summaryrefslogtreecommitdiff
path: root/sys/lib/python
diff options
context:
space:
mode:
authorOri Bernstein <ori@eigenstate.org>2021-06-14 00:00:37 +0000
committerOri Bernstein <ori@eigenstate.org>2021-06-14 00:00:37 +0000
commita73a964e51247ed169d322c725a3a18859f109a3 (patch)
tree3f752d117274d444bda44e85609aeac1acf313f3 /sys/lib/python
parente64efe273fcb921a61bf27d33b230c4e64fcd425 (diff)
python, hg: tow outside the environment.
they've served us well, and can ride off into the sunset.
Diffstat (limited to 'sys/lib/python')
-rw-r--r--sys/lib/python/BaseHTTPServer.py578
-rw-r--r--sys/lib/python/Bastion.py177
-rw-r--r--sys/lib/python/CGIHTTPServer.py362
-rw-r--r--sys/lib/python/ConfigParser.py640
-rw-r--r--sys/lib/python/Cookie.py746
-rw-r--r--sys/lib/python/DocXMLRPCServer.py306
-rw-r--r--sys/lib/python/HTMLParser.py369
-rw-r--r--sys/lib/python/MimeWriter.py181
-rw-r--r--sys/lib/python/Queue.py215
-rw-r--r--sys/lib/python/SimpleHTTPServer.py218
-rw-r--r--sys/lib/python/SimpleXMLRPCServer.py595
-rw-r--r--sys/lib/python/SocketServer.py588
-rw-r--r--sys/lib/python/StringIO.py323
-rw-r--r--sys/lib/python/UserDict.py175
-rw-r--r--sys/lib/python/UserList.py85
-rwxr-xr-xsys/lib/python/UserString.py194
-rw-r--r--sys/lib/python/_LWPCookieJar.py170
-rw-r--r--sys/lib/python/_MozillaCookieJar.py149
-rw-r--r--sys/lib/python/__future__.py116
-rw-r--r--sys/lib/python/__phello__.foo.py1
-rw-r--r--sys/lib/python/_strptime.py452
-rw-r--r--sys/lib/python/_threading_local.py241
-rw-r--r--sys/lib/python/aifc.py961
-rw-r--r--sys/lib/python/anydbm.py83
-rw-r--r--sys/lib/python/asynchat.py295
-rw-r--r--sys/lib/python/asyncore.py551
-rw-r--r--sys/lib/python/atexit.py62
-rw-r--r--sys/lib/python/audiodev.py257
-rwxr-xr-xsys/lib/python/base64.py359
-rw-r--r--sys/lib/python/bdb.py613
-rw-r--r--sys/lib/python/binhex.py527
-rw-r--r--sys/lib/python/bisect.py84
-rw-r--r--sys/lib/python/bsddb/__init__.py397
-rw-r--r--sys/lib/python/bsddb/db.py51
-rw-r--r--sys/lib/python/bsddb/dbobj.py254
-rw-r--r--sys/lib/python/bsddb/dbrecio.py190
-rw-r--r--sys/lib/python/bsddb/dbshelve.py299
-rw-r--r--sys/lib/python/bsddb/dbtables.py706
-rw-r--r--sys/lib/python/bsddb/dbutils.py77
-rwxr-xr-xsys/lib/python/cProfile.py190
-rw-r--r--sys/lib/python/calendar.py701
-rwxr-xr-xsys/lib/python/cgi.py1071
-rw-r--r--sys/lib/python/cgitb.py317
-rw-r--r--sys/lib/python/chunk.py167
-rw-r--r--sys/lib/python/cmd.py405
-rw-r--r--sys/lib/python/code.py307
-rw-r--r--sys/lib/python/codecs.py1034
-rw-r--r--sys/lib/python/codeop.py168
-rw-r--r--sys/lib/python/colorsys.py126
-rw-r--r--sys/lib/python/commands.py84
-rw-r--r--sys/lib/python/compileall.py157
-rw-r--r--sys/lib/python/compiler/__init__.py26
-rw-r--r--sys/lib/python/compiler/ast.py1356
-rw-r--r--sys/lib/python/compiler/consts.py21
-rw-r--r--sys/lib/python/compiler/future.py73
-rw-r--r--sys/lib/python/compiler/misc.py73
-rw-r--r--sys/lib/python/compiler/pyassem.py818
-rw-r--r--sys/lib/python/compiler/pycodegen.py1533
-rw-r--r--sys/lib/python/compiler/symbols.py463
-rw-r--r--sys/lib/python/compiler/syntax.py46
-rw-r--r--sys/lib/python/compiler/transformer.py1490
-rw-r--r--sys/lib/python/compiler/visitor.py113
-rw-r--r--sys/lib/python/config/Makefile1088
-rw-r--r--sys/lib/python/contextlib.py154
-rw-r--r--sys/lib/python/cookielib.py1776
-rw-r--r--sys/lib/python/copy.py414
-rw-r--r--sys/lib/python/copy_reg.py200
-rw-r--r--sys/lib/python/csv.py415
-rw-r--r--sys/lib/python/ctypes/__init__.py529
-rw-r--r--sys/lib/python/ctypes/_endian.py60
-rw-r--r--sys/lib/python/ctypes/macholib/README.ctypes7
-rw-r--r--sys/lib/python/ctypes/macholib/__init__.py12
-rw-r--r--sys/lib/python/ctypes/macholib/dyld.py169
-rw-r--r--sys/lib/python/ctypes/macholib/dylib.py66
-rwxr-xr-xsys/lib/python/ctypes/macholib/fetch_macholib2
-rw-r--r--sys/lib/python/ctypes/macholib/fetch_macholib.bat1
-rw-r--r--sys/lib/python/ctypes/macholib/framework.py68
-rw-r--r--sys/lib/python/ctypes/util.py154
-rw-r--r--sys/lib/python/ctypes/wintypes.py172
-rw-r--r--sys/lib/python/curses/__init__.py53
-rw-r--r--sys/lib/python/curses/ascii.py99
-rw-r--r--sys/lib/python/curses/has_key.py192
-rw-r--r--sys/lib/python/curses/panel.py8
-rw-r--r--sys/lib/python/curses/textpad.py173
-rw-r--r--sys/lib/python/curses/wrapper.py50
-rw-r--r--sys/lib/python/dbhash.py16
-rw-r--r--sys/lib/python/decimal.py3137
-rw-r--r--sys/lib/python/difflib.py2019
-rw-r--r--sys/lib/python/dircache.py38
-rw-r--r--sys/lib/python/dis.py223
-rw-r--r--sys/lib/python/distutils/README22
-rw-r--r--sys/lib/python/distutils/__init__.py23
-rw-r--r--sys/lib/python/distutils/archive_util.py173
-rw-r--r--sys/lib/python/distutils/bcppcompiler.py398
-rw-r--r--sys/lib/python/distutils/ccompiler.py1268
-rw-r--r--sys/lib/python/distutils/cmd.py478
-rw-r--r--sys/lib/python/distutils/command/__init__.py33
-rw-r--r--sys/lib/python/distutils/command/bdist.py150
-rw-r--r--sys/lib/python/distutils/command/bdist_dumb.py135
-rw-r--r--sys/lib/python/distutils/command/bdist_msi.py639
-rw-r--r--sys/lib/python/distutils/command/bdist_rpm.py564
-rw-r--r--sys/lib/python/distutils/command/bdist_wininst.py328
-rw-r--r--sys/lib/python/distutils/command/build.py136
-rw-r--r--sys/lib/python/distutils/command/build_clib.py238
-rw-r--r--sys/lib/python/distutils/command/build_ext.py716
-rw-r--r--sys/lib/python/distutils/command/build_py.py435
-rw-r--r--sys/lib/python/distutils/command/build_scripts.py131
-rw-r--r--sys/lib/python/distutils/command/clean.py82
-rw-r--r--sys/lib/python/distutils/command/command_template45
-rw-r--r--sys/lib/python/distutils/command/config.py368
-rw-r--r--sys/lib/python/distutils/command/install.py607
-rw-r--r--sys/lib/python/distutils/command/install_data.py85
-rw-r--r--sys/lib/python/distutils/command/install_egg_info.py78
-rw-r--r--sys/lib/python/distutils/command/install_headers.py53
-rw-r--r--sys/lib/python/distutils/command/install_lib.py223
-rw-r--r--sys/lib/python/distutils/command/install_scripts.py66
-rw-r--r--sys/lib/python/distutils/command/register.py294
-rw-r--r--sys/lib/python/distutils/command/sdist.py465
-rw-r--r--sys/lib/python/distutils/command/upload.py199
-rw-r--r--sys/lib/python/distutils/core.py242
-rw-r--r--sys/lib/python/distutils/cygwinccompiler.py441
-rw-r--r--sys/lib/python/distutils/debug.py9
-rw-r--r--sys/lib/python/distutils/dep_util.py95
-rw-r--r--sys/lib/python/distutils/dir_util.py227
-rw-r--r--sys/lib/python/distutils/dist.py1222
-rw-r--r--sys/lib/python/distutils/emxccompiler.py315
-rw-r--r--sys/lib/python/distutils/errors.py99
-rw-r--r--sys/lib/python/distutils/extension.py246
-rw-r--r--sys/lib/python/distutils/fancy_getopt.py502
-rw-r--r--sys/lib/python/distutils/file_util.py253
-rw-r--r--sys/lib/python/distutils/filelist.py355
-rw-r--r--sys/lib/python/distutils/log.py69
-rw-r--r--sys/lib/python/distutils/msvccompiler.py652
-rw-r--r--sys/lib/python/distutils/mwerkscompiler.py248
-rw-r--r--sys/lib/python/distutils/spawn.py201
-rw-r--r--sys/lib/python/distutils/sysconfig.py538
-rw-r--r--sys/lib/python/distutils/tests/__init__.py35
-rw-r--r--sys/lib/python/distutils/tests/support.py54
-rw-r--r--sys/lib/python/distutils/tests/test_build_py.py61
-rw-r--r--sys/lib/python/distutils/tests/test_build_scripts.py81
-rw-r--r--sys/lib/python/distutils/tests/test_dist.py189
-rw-r--r--sys/lib/python/distutils/tests/test_install.py55
-rw-r--r--sys/lib/python/distutils/tests/test_install_scripts.py79
-rw-r--r--sys/lib/python/distutils/tests/test_versionpredicate.py9
-rw-r--r--sys/lib/python/distutils/text_file.py382
-rw-r--r--sys/lib/python/distutils/unixccompiler.py315
-rw-r--r--sys/lib/python/distutils/util.py513
-rw-r--r--sys/lib/python/distutils/version.py299
-rw-r--r--sys/lib/python/distutils/versionpredicate.py164
-rw-r--r--sys/lib/python/doctest.py2637
-rw-r--r--sys/lib/python/dumbdbm.py233
-rw-r--r--sys/lib/python/dummy_thread.py152
-rw-r--r--sys/lib/python/dummy_threading.py83
-rw-r--r--sys/lib/python/email/__init__.py123
-rw-r--r--sys/lib/python/email/_parseaddr.py480
-rw-r--r--sys/lib/python/email/base64mime.py184
-rw-r--r--sys/lib/python/email/charset.py388
-rw-r--r--sys/lib/python/email/encoders.py88
-rw-r--r--sys/lib/python/email/errors.py57
-rw-r--r--sys/lib/python/email/feedparser.py480
-rw-r--r--sys/lib/python/email/generator.py348
-rw-r--r--sys/lib/python/email/header.py503
-rw-r--r--sys/lib/python/email/iterators.py73
-rw-r--r--sys/lib/python/email/message.py786
-rw-r--r--sys/lib/python/email/mime/__init__.py0
-rw-r--r--sys/lib/python/email/mime/application.py36
-rw-r--r--sys/lib/python/email/mime/audio.py73
-rw-r--r--sys/lib/python/email/mime/base.py26
-rw-r--r--sys/lib/python/email/mime/image.py46
-rw-r--r--sys/lib/python/email/mime/message.py34
-rw-r--r--sys/lib/python/email/mime/multipart.py41
-rw-r--r--sys/lib/python/email/mime/nonmultipart.py26
-rw-r--r--sys/lib/python/email/mime/text.py30
-rw-r--r--sys/lib/python/email/parser.py91
-rw-r--r--sys/lib/python/email/quoprimime.py336
-rw-r--r--sys/lib/python/email/utils.py323
-rw-r--r--sys/lib/python/encodings/__init__.py154
-rw-r--r--sys/lib/python/encodings/aliases.py508
-rw-r--r--sys/lib/python/encodings/ascii.py50
-rw-r--r--sys/lib/python/encodings/base64_codec.py79
-rw-r--r--sys/lib/python/encodings/big5.py39
-rw-r--r--sys/lib/python/encodings/big5hkscs.py39
-rw-r--r--sys/lib/python/encodings/bz2_codec.py102
-rw-r--r--sys/lib/python/encodings/charmap.py69
-rw-r--r--sys/lib/python/encodings/cp037.py307
-rw-r--r--sys/lib/python/encodings/cp1006.py307
-rw-r--r--sys/lib/python/encodings/cp1026.py307
-rw-r--r--sys/lib/python/encodings/cp1140.py307
-rw-r--r--sys/lib/python/encodings/cp1250.py307
-rw-r--r--sys/lib/python/encodings/cp1251.py307
-rw-r--r--sys/lib/python/encodings/cp1252.py307
-rw-r--r--sys/lib/python/encodings/cp1253.py307
-rw-r--r--sys/lib/python/encodings/cp1254.py307
-rw-r--r--sys/lib/python/encodings/cp1255.py307
-rw-r--r--sys/lib/python/encodings/cp1256.py307
-rw-r--r--sys/lib/python/encodings/cp1257.py307
-rw-r--r--sys/lib/python/encodings/cp1258.py307
-rw-r--r--sys/lib/python/encodings/cp424.py307
-rw-r--r--sys/lib/python/encodings/cp437.py698
-rw-r--r--sys/lib/python/encodings/cp500.py307
-rw-r--r--sys/lib/python/encodings/cp737.py698
-rw-r--r--sys/lib/python/encodings/cp775.py697
-rw-r--r--sys/lib/python/encodings/cp850.py698
-rw-r--r--sys/lib/python/encodings/cp852.py698
-rw-r--r--sys/lib/python/encodings/cp855.py698
-rw-r--r--sys/lib/python/encodings/cp856.py307
-rw-r--r--sys/lib/python/encodings/cp857.py694
-rw-r--r--sys/lib/python/encodings/cp860.py698
-rw-r--r--sys/lib/python/encodings/cp861.py698
-rw-r--r--sys/lib/python/encodings/cp862.py698
-rw-r--r--sys/lib/python/encodings/cp863.py698
-rw-r--r--sys/lib/python/encodings/cp864.py690
-rw-r--r--sys/lib/python/encodings/cp865.py698
-rw-r--r--sys/lib/python/encodings/cp866.py698
-rw-r--r--sys/lib/python/encodings/cp869.py689
-rw-r--r--sys/lib/python/encodings/cp874.py307
-rw-r--r--sys/lib/python/encodings/cp875.py307
-rw-r--r--sys/lib/python/encodings/cp932.py39
-rw-r--r--sys/lib/python/encodings/cp949.py39
-rw-r--r--sys/lib/python/encodings/cp950.py39
-rw-r--r--sys/lib/python/encodings/euc_jis_2004.py39
-rw-r--r--sys/lib/python/encodings/euc_jisx0213.py39
-rw-r--r--sys/lib/python/encodings/euc_jp.py39
-rw-r--r--sys/lib/python/encodings/euc_kr.py39
-rw-r--r--sys/lib/python/encodings/gb18030.py39
-rw-r--r--sys/lib/python/encodings/gb2312.py39
-rw-r--r--sys/lib/python/encodings/gbk.py39
-rw-r--r--sys/lib/python/encodings/hex_codec.py79
-rw-r--r--sys/lib/python/encodings/hp_roman8.py152
-rw-r--r--sys/lib/python/encodings/hz.py39
-rw-r--r--sys/lib/python/encodings/idna.py288
-rw-r--r--sys/lib/python/encodings/iso2022_jp.py39
-rw-r--r--sys/lib/python/encodings/iso2022_jp_1.py39
-rw-r--r--sys/lib/python/encodings/iso2022_jp_2.py39
-rw-r--r--sys/lib/python/encodings/iso2022_jp_2004.py39
-rw-r--r--sys/lib/python/encodings/iso2022_jp_3.py39
-rw-r--r--sys/lib/python/encodings/iso2022_jp_ext.py39
-rw-r--r--sys/lib/python/encodings/iso2022_kr.py39
-rw-r--r--sys/lib/python/encodings/iso8859_1.py307
-rw-r--r--sys/lib/python/encodings/iso8859_10.py307
-rw-r--r--sys/lib/python/encodings/iso8859_11.py307
-rw-r--r--sys/lib/python/encodings/iso8859_13.py307
-rw-r--r--sys/lib/python/encodings/iso8859_14.py307
-rw-r--r--sys/lib/python/encodings/iso8859_15.py307
-rw-r--r--sys/lib/python/encodings/iso8859_16.py307
-rw-r--r--sys/lib/python/encodings/iso8859_2.py307
-rw-r--r--sys/lib/python/encodings/iso8859_3.py307
-rw-r--r--sys/lib/python/encodings/iso8859_4.py307
-rw-r--r--sys/lib/python/encodings/iso8859_5.py307
-rw-r--r--sys/lib/python/encodings/iso8859_6.py307
-rw-r--r--sys/lib/python/encodings/iso8859_7.py307
-rw-r--r--sys/lib/python/encodings/iso8859_8.py307
-rw-r--r--sys/lib/python/encodings/iso8859_9.py307
-rw-r--r--sys/lib/python/encodings/johab.py39
-rw-r--r--sys/lib/python/encodings/koi8_r.py307
-rw-r--r--sys/lib/python/encodings/koi8_u.py307
-rw-r--r--sys/lib/python/encodings/latin_1.py50
-rw-r--r--sys/lib/python/encodings/mac_arabic.py698
-rw-r--r--sys/lib/python/encodings/mac_centeuro.py307
-rw-r--r--sys/lib/python/encodings/mac_croatian.py307
-rw-r--r--sys/lib/python/encodings/mac_cyrillic.py307
-rw-r--r--sys/lib/python/encodings/mac_farsi.py307
-rw-r--r--sys/lib/python/encodings/mac_greek.py307
-rw-r--r--sys/lib/python/encodings/mac_iceland.py307
-rw-r--r--sys/lib/python/encodings/mac_latin2.py183
-rw-r--r--sys/lib/python/encodings/mac_roman.py307
-rw-r--r--sys/lib/python/encodings/mac_romanian.py307
-rw-r--r--sys/lib/python/encodings/mac_turkish.py307
-rw-r--r--sys/lib/python/encodings/mbcs.py47
-rw-r--r--sys/lib/python/encodings/palmos.py83
-rw-r--r--sys/lib/python/encodings/ptcp154.py175
-rw-r--r--sys/lib/python/encodings/punycode.py238
-rw-r--r--sys/lib/python/encodings/quopri_codec.py74
-rw-r--r--sys/lib/python/encodings/raw_unicode_escape.py45
-rw-r--r--sys/lib/python/encodings/rot_13.py118
-rw-r--r--sys/lib/python/encodings/shift_jis.py39
-rw-r--r--sys/lib/python/encodings/shift_jis_2004.py39
-rw-r--r--sys/lib/python/encodings/shift_jisx0213.py39
-rw-r--r--sys/lib/python/encodings/string_escape.py38
-rw-r--r--sys/lib/python/encodings/tis_620.py307
-rw-r--r--sys/lib/python/encodings/undefined.py49
-rw-r--r--sys/lib/python/encodings/unicode_escape.py45
-rw-r--r--sys/lib/python/encodings/unicode_internal.py45
-rw-r--r--sys/lib/python/encodings/utf_16.py104
-rw-r--r--sys/lib/python/encodings/utf_16_be.py42
-rw-r--r--sys/lib/python/encodings/utf_16_le.py42
-rw-r--r--sys/lib/python/encodings/utf_7.py41
-rw-r--r--sys/lib/python/encodings/utf_8.py42
-rw-r--r--sys/lib/python/encodings/utf_8_sig.py100
-rw-r--r--sys/lib/python/encodings/uu_codec.py128
-rw-r--r--sys/lib/python/encodings/zlib_codec.py102
-rw-r--r--sys/lib/python/factotum.py102
-rw-r--r--sys/lib/python/filecmp.py297
-rw-r--r--sys/lib/python/fileinput.py413
-rw-r--r--sys/lib/python/fnmatch.py107
-rw-r--r--sys/lib/python/formatter.py447
-rw-r--r--sys/lib/python/fpformat.py142
-rw-r--r--sys/lib/python/ftplib.py823
-rw-r--r--sys/lib/python/functools.py51
-rw-r--r--sys/lib/python/getopt.py211
-rw-r--r--sys/lib/python/getpass.py127
-rw-r--r--sys/lib/python/gettext.py591
-rw-r--r--sys/lib/python/glob.py74
-rw-r--r--sys/lib/python/gopherlib.py209
-rw-r--r--sys/lib/python/gzip.py490
-rw-r--r--sys/lib/python/hashlib.py82
-rw-r--r--sys/lib/python/heapq.py343
-rw-r--r--sys/lib/python/hgext/__init__.py1
-rw-r--r--sys/lib/python/hgext/acl.py107
-rw-r--r--sys/lib/python/hgext/bookmarks.py340
-rw-r--r--sys/lib/python/hgext/bugzilla.py439
-rw-r--r--sys/lib/python/hgext/children.py44
-rw-r--r--sys/lib/python/hgext/churn.py174
-rw-r--r--sys/lib/python/hgext/color.py286
-rw-r--r--sys/lib/python/hgext/convert/__init__.py296
-rw-r--r--sys/lib/python/hgext/convert/bzr.py259
-rw-r--r--sys/lib/python/hgext/convert/common.py389
-rw-r--r--sys/lib/python/hgext/convert/convcmd.py396
-rw-r--r--sys/lib/python/hgext/convert/cvs.py372
-rw-r--r--sys/lib/python/hgext/convert/cvsps.py831
-rw-r--r--sys/lib/python/hgext/convert/darcs.py135
-rw-r--r--sys/lib/python/hgext/convert/filemap.py359
-rw-r--r--sys/lib/python/hgext/convert/git.py152
-rw-r--r--sys/lib/python/hgext/convert/gnuarch.py342
-rw-r--r--sys/lib/python/hgext/convert/hg.py363
-rw-r--r--sys/lib/python/hgext/convert/monotone.py217
-rw-r--r--sys/lib/python/hgext/convert/p4.py205
-rw-r--r--sys/lib/python/hgext/convert/subversion.py1136
-rw-r--r--sys/lib/python/hgext/convert/transport.py128
-rw-r--r--sys/lib/python/hgext/extdiff.py228
-rw-r--r--sys/lib/python/hgext/fetch.py148
-rw-r--r--sys/lib/python/hgext/gpg.py284
-rw-r--r--sys/lib/python/hgext/graphlog.py378
-rw-r--r--sys/lib/python/hgext/hgcia.py246
-rw-r--r--sys/lib/python/hgext/hgfactotum.py70
-rw-r--r--sys/lib/python/hgext/hgk.py347
-rw-r--r--sys/lib/python/hgext/hgwebfs.py105
-rw-r--r--sys/lib/python/hgext/highlight/__init__.py60
-rw-r--r--sys/lib/python/hgext/highlight/highlight.py60
-rw-r--r--sys/lib/python/hgext/inotify/__init__.py109
-rw-r--r--sys/lib/python/hgext/inotify/client.py160
-rw-r--r--sys/lib/python/hgext/inotify/common.py51
-rw-r--r--sys/lib/python/hgext/inotify/linux/__init__.py41
-rw-r--r--sys/lib/python/hgext/inotify/linux/_inotify.c608
-rw-r--r--sys/lib/python/hgext/inotify/linux/watcher.py335
-rw-r--r--sys/lib/python/hgext/inotify/server.py874
-rw-r--r--sys/lib/python/hgext/interhg.py80
-rw-r--r--sys/lib/python/hgext/keyword.py555
-rw-r--r--sys/lib/python/hgext/mq.py2653
-rw-r--r--sys/lib/python/hgext/notify.py298
-rw-r--r--sys/lib/python/hgext/pager.py64
-rw-r--r--sys/lib/python/hgext/parentrevspec.py96
-rw-r--r--sys/lib/python/hgext/patchbomb.py513
-rw-r--r--sys/lib/python/hgext/purge.py111
-rw-r--r--sys/lib/python/hgext/rebase.py471
-rw-r--r--sys/lib/python/hgext/record.py551
-rw-r--r--sys/lib/python/hgext/share.py30
-rw-r--r--sys/lib/python/hgext/transplant.py606
-rw-r--r--sys/lib/python/hgext/win32mbcs.py147
-rw-r--r--sys/lib/python/hgext/win32text.py158
-rw-r--r--sys/lib/python/hgext/zeroconf/Zeroconf.py1573
-rw-r--r--sys/lib/python/hgext/zeroconf/__init__.py159
-rw-r--r--sys/lib/python/hmac.py113
-rw-r--r--sys/lib/python/hotshot/__init__.py76
-rw-r--r--sys/lib/python/hotshot/log.py192
-rw-r--r--sys/lib/python/hotshot/stats.py93
-rw-r--r--sys/lib/python/hotshot/stones.py31
-rw-r--r--sys/lib/python/htmlentitydefs.py273
-rw-r--r--sys/lib/python/htmllib.py486
-rw-r--r--sys/lib/python/httplib.py1428
-rw-r--r--sys/lib/python/idlelib/AutoComplete.py226
-rw-r--r--sys/lib/python/idlelib/AutoCompleteWindow.py393
-rw-r--r--sys/lib/python/idlelib/AutoExpand.py83
-rw-r--r--sys/lib/python/idlelib/Bindings.py111
-rw-r--r--sys/lib/python/idlelib/CREDITS.txt36
-rw-r--r--sys/lib/python/idlelib/CallTipWindow.py171
-rw-r--r--sys/lib/python/idlelib/CallTips.py212
-rw-r--r--sys/lib/python/idlelib/ChangeLog1591
-rw-r--r--sys/lib/python/idlelib/ClassBrowser.py221
-rw-r--r--sys/lib/python/idlelib/CodeContext.py167
-rw-r--r--sys/lib/python/idlelib/ColorDelegator.py263
-rw-r--r--sys/lib/python/idlelib/Debugger.py481
-rw-r--r--sys/lib/python/idlelib/Delegator.py33
-rw-r--r--sys/lib/python/idlelib/EditorWindow.py1511
-rw-r--r--sys/lib/python/idlelib/FileList.py124
-rw-r--r--sys/lib/python/idlelib/FormatParagraph.py148
-rw-r--r--sys/lib/python/idlelib/GrepDialog.py133
-rw-r--r--sys/lib/python/idlelib/HISTORY.txt296
-rw-r--r--sys/lib/python/idlelib/HyperParser.py241
-rw-r--r--sys/lib/python/idlelib/IOBinding.py584
-rw-r--r--sys/lib/python/idlelib/Icons/folder.gifbin120 -> 0 bytes
-rw-r--r--sys/lib/python/idlelib/Icons/idle.icnsbin57435 -> 0 bytes
-rw-r--r--sys/lib/python/idlelib/Icons/minusnode.gifbin96 -> 0 bytes
-rw-r--r--sys/lib/python/idlelib/Icons/openfolder.gifbin125 -> 0 bytes
-rw-r--r--sys/lib/python/idlelib/Icons/plusnode.gifbin79 -> 0 bytes
-rw-r--r--sys/lib/python/idlelib/Icons/python.gifbin125 -> 0 bytes
-rw-r--r--sys/lib/python/idlelib/Icons/tk.gifbin85 -> 0 bytes
-rw-r--r--sys/lib/python/idlelib/IdleHistory.py88
-rw-r--r--sys/lib/python/idlelib/MultiCall.py406
-rw-r--r--sys/lib/python/idlelib/MultiStatusBar.py32
-rw-r--r--sys/lib/python/idlelib/NEWS.txt613
-rw-r--r--sys/lib/python/idlelib/ObjectBrowser.py151
-rw-r--r--sys/lib/python/idlelib/OutputWindow.py157
-rw-r--r--sys/lib/python/idlelib/ParenMatch.py172
-rw-r--r--sys/lib/python/idlelib/PathBrowser.py95
-rw-r--r--sys/lib/python/idlelib/Percolator.py85
-rw-r--r--sys/lib/python/idlelib/PyParse.py594
-rw-r--r--sys/lib/python/idlelib/PyShell.py1441
-rw-r--r--sys/lib/python/idlelib/README.txt63
-rw-r--r--sys/lib/python/idlelib/RemoteDebugger.py381
-rw-r--r--sys/lib/python/idlelib/RemoteObjectBrowser.py36
-rw-r--r--sys/lib/python/idlelib/ReplaceDialog.py167
-rw-r--r--sys/lib/python/idlelib/ScriptBinding.py210
-rw-r--r--sys/lib/python/idlelib/ScrolledList.py139
-rw-r--r--sys/lib/python/idlelib/SearchDialog.py68
-rw-r--r--sys/lib/python/idlelib/SearchDialogBase.py140
-rw-r--r--sys/lib/python/idlelib/SearchEngine.py220
-rw-r--r--sys/lib/python/idlelib/StackViewer.py137
-rw-r--r--sys/lib/python/idlelib/TODO.txt210
-rw-r--r--sys/lib/python/idlelib/ToolTip.py89
-rw-r--r--sys/lib/python/idlelib/TreeWidget.py478
-rw-r--r--sys/lib/python/idlelib/UndoDelegator.py352
-rw-r--r--sys/lib/python/idlelib/WidgetRedirector.py92
-rw-r--r--sys/lib/python/idlelib/WindowList.py90
-rw-r--r--sys/lib/python/idlelib/ZoomHeight.py50
-rw-r--r--sys/lib/python/idlelib/__init__.py1
-rw-r--r--sys/lib/python/idlelib/aboutDialog.py163
-rw-r--r--sys/lib/python/idlelib/config-extensions.def88
-rw-r--r--sys/lib/python/idlelib/config-highlight.def64
-rw-r--r--sys/lib/python/idlelib/config-keys.def214
-rw-r--r--sys/lib/python/idlelib/config-main.def79
-rw-r--r--sys/lib/python/idlelib/configDialog.py1147
-rw-r--r--sys/lib/python/idlelib/configHandler.py696
-rw-r--r--sys/lib/python/idlelib/configHelpSourceEdit.py169
-rw-r--r--sys/lib/python/idlelib/configSectionNameDialog.py97
-rw-r--r--sys/lib/python/idlelib/dynOptionMenuWidget.py35
-rw-r--r--sys/lib/python/idlelib/extend.txt83
-rw-r--r--sys/lib/python/idlelib/help.txt213
-rwxr-xr-xsys/lib/python/idlelib/idle.bat3
-rw-r--r--sys/lib/python/idlelib/idle.py21
-rw-r--r--sys/lib/python/idlelib/idle.pyw21
-rw-r--r--sys/lib/python/idlelib/idlever.py1
-rw-r--r--sys/lib/python/idlelib/keybindingDialog.py268
-rw-r--r--sys/lib/python/idlelib/macosxSupport.py112
-rw-r--r--sys/lib/python/idlelib/rpc.py602
-rw-r--r--sys/lib/python/idlelib/run.py327
-rw-r--r--sys/lib/python/idlelib/tabpage.py113
-rw-r--r--sys/lib/python/idlelib/testcode.py31
-rw-r--r--sys/lib/python/idlelib/textView.py78
-rw-r--r--sys/lib/python/ihooks.py520
-rw-r--r--sys/lib/python/imaplib.py1499
-rw-r--r--sys/lib/python/imghdr.py161
-rw-r--r--sys/lib/python/imputil.py731
-rw-r--r--sys/lib/python/inspect.py889
-rwxr-xr-xsys/lib/python/keyword.py95
-rw-r--r--sys/lib/python/lib-tk/Canvas.py190
-rw-r--r--sys/lib/python/lib-tk/Dialog.py49
-rw-r--r--sys/lib/python/lib-tk/FileDialog.py274
-rw-r--r--sys/lib/python/lib-tk/FixTk.py37
-rw-r--r--sys/lib/python/lib-tk/ScrolledText.py43
-rw-r--r--sys/lib/python/lib-tk/SimpleDialog.py112
-rwxr-xr-xsys/lib/python/lib-tk/Tix.py1891
-rw-r--r--sys/lib/python/lib-tk/Tkconstants.py110
-rw-r--r--sys/lib/python/lib-tk/Tkdnd.py321
-rw-r--r--sys/lib/python/lib-tk/Tkinter.py3759
-rw-r--r--sys/lib/python/lib-tk/tkColorChooser.py70
-rw-r--r--sys/lib/python/lib-tk/tkCommonDialog.py60
-rw-r--r--sys/lib/python/lib-tk/tkFileDialog.py215
-rw-r--r--sys/lib/python/lib-tk/tkFont.py216
-rw-r--r--sys/lib/python/lib-tk/tkMessageBox.py122
-rw-r--r--sys/lib/python/lib-tk/tkSimpleDialog.py320
-rw-r--r--sys/lib/python/lib-tk/turtle.py956
-rw-r--r--sys/lib/python/linecache.py136
-rw-r--r--sys/lib/python/locale.py1562
-rw-r--r--sys/lib/python/logging/__init__.py1372
-rw-r--r--sys/lib/python/logging/config.py348
-rw-r--r--sys/lib/python/logging/handlers.py1019
-rw-r--r--sys/lib/python/macpath.py275
-rw-r--r--sys/lib/python/macurl2path.py97
-rwxr-xr-xsys/lib/python/mailbox.py2090
-rw-r--r--sys/lib/python/mailcap.py255
-rw-r--r--sys/lib/python/markupbase.py392
-rw-r--r--sys/lib/python/md5.py10
-rw-r--r--sys/lib/python/mercurial/__init__.py0
-rw-r--r--sys/lib/python/mercurial/ancestor.py85
-rw-r--r--sys/lib/python/mercurial/archival.py226
-rw-r--r--sys/lib/python/mercurial/base85.c155
-rw-r--r--sys/lib/python/mercurial/bdiff.c401
-rw-r--r--sys/lib/python/mercurial/bundlerepo.py303
-rw-r--r--sys/lib/python/mercurial/byterange.py468
-rw-r--r--sys/lib/python/mercurial/changegroup.py140
-rw-r--r--sys/lib/python/mercurial/changelog.py228
-rw-r--r--sys/lib/python/mercurial/cmdutil.py1254
-rw-r--r--sys/lib/python/mercurial/commands.py3565
-rw-r--r--sys/lib/python/mercurial/config.py137
-rw-r--r--sys/lib/python/mercurial/context.py818
-rw-r--r--sys/lib/python/mercurial/copies.py233
-rw-r--r--sys/lib/python/mercurial/demandimport.py139
-rw-r--r--sys/lib/python/mercurial/diffhelpers.c156
-rw-r--r--sys/lib/python/mercurial/dirstate.py601
-rw-r--r--sys/lib/python/mercurial/dispatch.py501
-rw-r--r--sys/lib/python/mercurial/encoding.py75
-rw-r--r--sys/lib/python/mercurial/error.py72
-rw-r--r--sys/lib/python/mercurial/extensions.py178
-rw-r--r--sys/lib/python/mercurial/fancyopts.py110
-rw-r--r--sys/lib/python/mercurial/filelog.py68
-rw-r--r--sys/lib/python/mercurial/filemerge.py231
-rw-r--r--sys/lib/python/mercurial/graphmod.py119
-rw-r--r--sys/lib/python/mercurial/hbisect.py145
-rw-r--r--sys/lib/python/mercurial/help.py527
-rw-r--r--sys/lib/python/mercurial/hg.py367
-rw-r--r--sys/lib/python/mercurial/hgweb/__init__.py16
-rw-r--r--sys/lib/python/mercurial/hgweb/common.py105
-rw-r--r--sys/lib/python/mercurial/hgweb/hgweb_mod.py315
-rw-r--r--sys/lib/python/mercurial/hgweb/hgwebdir_mod.py333
-rw-r--r--sys/lib/python/mercurial/hgweb/protocol.py206
-rw-r--r--sys/lib/python/mercurial/hgweb/request.py134
-rw-r--r--sys/lib/python/mercurial/hgweb/server.py298
-rw-r--r--sys/lib/python/mercurial/hgweb/webcommands.py690
-rw-r--r--sys/lib/python/mercurial/hgweb/webutil.py218
-rw-r--r--sys/lib/python/mercurial/hgweb/wsgicgi.py70
-rw-r--r--sys/lib/python/mercurial/hook.py135
-rw-r--r--sys/lib/python/mercurial/httprepo.py258
-rw-r--r--sys/lib/python/mercurial/i18n.py52
-rw-r--r--sys/lib/python/mercurial/ignore.py103
-rw-r--r--sys/lib/python/mercurial/keepalive.py671
-rw-r--r--sys/lib/python/mercurial/localrepo.py2156
-rw-r--r--sys/lib/python/mercurial/lock.py137
-rw-r--r--sys/lib/python/mercurial/lsprof.py113
-rw-r--r--sys/lib/python/mercurial/lsprofcalltree.py86
-rw-r--r--sys/lib/python/mercurial/mail.py190
-rw-r--r--sys/lib/python/mercurial/manifest.py201
-rw-r--r--sys/lib/python/mercurial/match.py249
-rw-r--r--sys/lib/python/mercurial/mdiff.py269
-rw-r--r--sys/lib/python/mercurial/merge.py481
-rw-r--r--sys/lib/python/mercurial/minirst.py343
-rw-r--r--sys/lib/python/mercurial/mpatch.c444
-rw-r--r--sys/lib/python/mercurial/node.py18
-rw-r--r--sys/lib/python/mercurial/osutil.c534
-rw-r--r--sys/lib/python/mercurial/parsers.c435
-rw-r--r--sys/lib/python/mercurial/patch.py1454
-rw-r--r--sys/lib/python/mercurial/posix.py252
-rw-r--r--sys/lib/python/mercurial/pure/base85.py74
-rw-r--r--sys/lib/python/mercurial/pure/bdiff.py76
-rw-r--r--sys/lib/python/mercurial/pure/diffhelpers.py56
-rw-r--r--sys/lib/python/mercurial/pure/mpatch.py116
-rw-r--r--sys/lib/python/mercurial/pure/osutil.py52
-rw-r--r--sys/lib/python/mercurial/pure/parsers.py90
-rw-r--r--sys/lib/python/mercurial/repair.py145
-rw-r--r--sys/lib/python/mercurial/repo.py43
-rw-r--r--sys/lib/python/mercurial/revlog.py1376
-rw-r--r--sys/lib/python/mercurial/simplemerge.py451
-rw-r--r--sys/lib/python/mercurial/sshrepo.py260
-rw-r--r--sys/lib/python/mercurial/sshserver.py225
-rw-r--r--sys/lib/python/mercurial/statichttprepo.py134
-rw-r--r--sys/lib/python/mercurial/store.py333
-rw-r--r--sys/lib/python/mercurial/streamclone.py67
-rw-r--r--sys/lib/python/mercurial/strutil.py34
-rw-r--r--sys/lib/python/mercurial/subrepo.py197
-rw-r--r--sys/lib/python/mercurial/tags.py338
-rw-r--r--sys/lib/python/mercurial/templatefilters.py211
-rw-r--r--sys/lib/python/mercurial/templater.py245
-rw-r--r--sys/lib/python/mercurial/templates/atom/changelog.tmpl10
-rw-r--r--sys/lib/python/mercurial/templates/atom/changelogentry.tmpl16
-rw-r--r--sys/lib/python/mercurial/templates/atom/error.tmpl17
-rw-r--r--sys/lib/python/mercurial/templates/atom/filelog.tmpl8
-rw-r--r--sys/lib/python/mercurial/templates/atom/header.tmpl2
-rw-r--r--sys/lib/python/mercurial/templates/atom/map11
-rw-r--r--sys/lib/python/mercurial/templates/atom/tagentry.tmpl8
-rw-r--r--sys/lib/python/mercurial/templates/atom/tags.tmpl11
-rw-r--r--sys/lib/python/mercurial/templates/coal/header.tmpl6
-rw-r--r--sys/lib/python/mercurial/templates/coal/map191
-rw-r--r--sys/lib/python/mercurial/templates/gitweb/branches.tmpl30
-rw-r--r--sys/lib/python/mercurial/templates/gitweb/changelog.tmpl39
-rw-r--r--sys/lib/python/mercurial/templates/gitweb/changelogentry.tmpl14
-rw-r--r--sys/lib/python/mercurial/templates/gitweb/changeset.tmpl50
-rw-r--r--sys/lib/python/mercurial/templates/gitweb/error.tmpl25
-rw-r--r--sys/lib/python/mercurial/templates/gitweb/fileannotate.tmpl61
-rw-r--r--sys/lib/python/mercurial/templates/gitweb/filediff.tmpl47
-rw-r--r--sys/lib/python/mercurial/templates/gitweb/filelog.tmpl40
-rw-r--r--sys/lib/python/mercurial/templates/gitweb/filerevision.tmpl60
-rw-r--r--sys/lib/python/mercurial/templates/gitweb/footer.tmpl11
-rw-r--r--sys/lib/python/mercurial/templates/gitweb/graph.tmpl121
-rw-r--r--sys/lib/python/mercurial/templates/gitweb/header.tmpl8
-rw-r--r--sys/lib/python/mercurial/templates/gitweb/index.tmpl26
-rw-r--r--sys/lib/python/mercurial/templates/gitweb/manifest.tmpl38
-rw-r--r--sys/lib/python/mercurial/templates/gitweb/map248
-rw-r--r--sys/lib/python/mercurial/templates/gitweb/notfound.tmpl18
-rw-r--r--sys/lib/python/mercurial/templates/gitweb/search.tmpl36
-rw-r--r--sys/lib/python/mercurial/templates/gitweb/shortlog.tmpl41
-rw-r--r--sys/lib/python/mercurial/templates/gitweb/summary.tmpl58
-rw-r--r--sys/lib/python/mercurial/templates/gitweb/tags.tmpl30
-rw-r--r--sys/lib/python/mercurial/templates/map-cmdline.changelog14
-rw-r--r--sys/lib/python/mercurial/templates/map-cmdline.compact9
-rw-r--r--sys/lib/python/mercurial/templates/map-cmdline.default24
-rw-r--r--sys/lib/python/mercurial/templates/monoblue/branches.tmpl36
-rw-r--r--sys/lib/python/mercurial/templates/monoblue/changelog.tmpl40
-rw-r--r--sys/lib/python/mercurial/templates/monoblue/changelogentry.tmpl6
-rw-r--r--sys/lib/python/mercurial/templates/monoblue/changeset.tmpl63
-rw-r--r--sys/lib/python/mercurial/templates/monoblue/error.tmpl34
-rw-r--r--sys/lib/python/mercurial/templates/monoblue/fileannotate.tmpl63
-rw-r--r--sys/lib/python/mercurial/templates/monoblue/filediff.tmpl54
-rw-r--r--sys/lib/python/mercurial/templates/monoblue/filelog.tmpl49
-rw-r--r--sys/lib/python/mercurial/templates/monoblue/filerevision.tmpl63
-rw-r--r--sys/lib/python/mercurial/templates/monoblue/footer.tmpl22
-rw-r--r--sys/lib/python/mercurial/templates/monoblue/graph.tmpl118
-rw-r--r--sys/lib/python/mercurial/templates/monoblue/header.tmpl6
-rw-r--r--sys/lib/python/mercurial/templates/monoblue/index.tmpl39
-rw-r--r--sys/lib/python/mercurial/templates/monoblue/manifest.tmpl51
-rw-r--r--sys/lib/python/mercurial/templates/monoblue/map214
-rw-r--r--sys/lib/python/mercurial/templates/monoblue/notfound.tmpl35
-rw-r--r--sys/lib/python/mercurial/templates/monoblue/search.tmpl34
-rw-r--r--sys/lib/python/mercurial/templates/monoblue/shortlog.tmpl41
-rw-r--r--sys/lib/python/mercurial/templates/monoblue/summary.tmpl66
-rw-r--r--sys/lib/python/mercurial/templates/monoblue/tags.tmpl36
-rw-r--r--sys/lib/python/mercurial/templates/paper/branches.tmpl45
-rw-r--r--sys/lib/python/mercurial/templates/paper/changeset.tmpl71
-rw-r--r--sys/lib/python/mercurial/templates/paper/error.tmpl43
-rw-r--r--sys/lib/python/mercurial/templates/paper/fileannotate.tmpl77
-rw-r--r--sys/lib/python/mercurial/templates/paper/filediff.tmpl72
-rw-r--r--sys/lib/python/mercurial/templates/paper/filelog.tmpl60
-rw-r--r--sys/lib/python/mercurial/templates/paper/filelogentry.tmpl5
-rw-r--r--sys/lib/python/mercurial/templates/paper/filerevision.tmpl72
-rw-r--r--sys/lib/python/mercurial/templates/paper/footer.tmpl4
-rw-r--r--sys/lib/python/mercurial/templates/paper/graph.tmpl132
-rw-r--r--sys/lib/python/mercurial/templates/paper/header.tmpl6
-rw-r--r--sys/lib/python/mercurial/templates/paper/index.tmpl26
-rw-r--r--sys/lib/python/mercurial/templates/paper/manifest.tmpl54
-rw-r--r--sys/lib/python/mercurial/templates/paper/map191
-rw-r--r--sys/lib/python/mercurial/templates/paper/notfound.tmpl12
-rw-r--r--sys/lib/python/mercurial/templates/paper/search.tmpl43
-rw-r--r--sys/lib/python/mercurial/templates/paper/shortlog.tmpl57
-rw-r--r--sys/lib/python/mercurial/templates/paper/shortlogentry.tmpl5
-rw-r--r--sys/lib/python/mercurial/templates/paper/tags.tmpl45
-rw-r--r--sys/lib/python/mercurial/templates/raw/changeset.tmpl9
-rw-r--r--sys/lib/python/mercurial/templates/raw/error.tmpl2
-rw-r--r--sys/lib/python/mercurial/templates/raw/fileannotate.tmpl5
-rw-r--r--sys/lib/python/mercurial/templates/raw/filediff.tmpl5
-rw-r--r--sys/lib/python/mercurial/templates/raw/index.tmpl2
-rw-r--r--sys/lib/python/mercurial/templates/raw/manifest.tmpl3
-rw-r--r--sys/lib/python/mercurial/templates/raw/map23
-rw-r--r--sys/lib/python/mercurial/templates/raw/notfound.tmpl2
-rw-r--r--sys/lib/python/mercurial/templates/rss/changelog.tmpl6
-rw-r--r--sys/lib/python/mercurial/templates/rss/changelogentry.tmpl7
-rw-r--r--sys/lib/python/mercurial/templates/rss/error.tmpl10
-rw-r--r--sys/lib/python/mercurial/templates/rss/filelog.tmpl6
-rw-r--r--sys/lib/python/mercurial/templates/rss/filelogentry.tmpl7
-rw-r--r--sys/lib/python/mercurial/templates/rss/header.tmpl5
-rw-r--r--sys/lib/python/mercurial/templates/rss/map10
-rw-r--r--sys/lib/python/mercurial/templates/rss/tagentry.tmpl6
-rw-r--r--sys/lib/python/mercurial/templates/rss/tags.tmpl6
-rw-r--r--sys/lib/python/mercurial/templates/spartan/branches.tmpl26
-rw-r--r--sys/lib/python/mercurial/templates/spartan/changelog.tmpl43
-rw-r--r--sys/lib/python/mercurial/templates/spartan/changelogentry.tmpl25
-rw-r--r--sys/lib/python/mercurial/templates/spartan/changeset.tmpl51
-rw-r--r--sys/lib/python/mercurial/templates/spartan/error.tmpl15
-rw-r--r--sys/lib/python/mercurial/templates/spartan/fileannotate.tmpl48
-rw-r--r--sys/lib/python/mercurial/templates/spartan/filediff.tmpl36
-rw-r--r--sys/lib/python/mercurial/templates/spartan/filelog.tmpl28
-rw-r--r--sys/lib/python/mercurial/templates/spartan/filelogentry.tmpl25
-rw-r--r--sys/lib/python/mercurial/templates/spartan/filerevision.tmpl46
-rw-r--r--sys/lib/python/mercurial/templates/spartan/footer.tmpl8
-rw-r--r--sys/lib/python/mercurial/templates/spartan/graph.tmpl96
-rw-r--r--sys/lib/python/mercurial/templates/spartan/header.tmpl6
-rw-r--r--sys/lib/python/mercurial/templates/spartan/index.tmpl19
-rw-r--r--sys/lib/python/mercurial/templates/spartan/manifest.tmpl28
-rw-r--r--sys/lib/python/mercurial/templates/spartan/map178
-rw-r--r--sys/lib/python/mercurial/templates/spartan/notfound.tmpl12
-rw-r--r--sys/lib/python/mercurial/templates/spartan/search.tmpl36
-rw-r--r--sys/lib/python/mercurial/templates/spartan/shortlog.tmpl43
-rw-r--r--sys/lib/python/mercurial/templates/spartan/shortlogentry.tmpl7
-rw-r--r--sys/lib/python/mercurial/templates/spartan/tags.tmpl26
-rw-r--r--sys/lib/python/mercurial/templates/static/background.pngbin603 -> 0 bytes
-rw-r--r--sys/lib/python/mercurial/templates/static/coal-file.pngbin273 -> 0 bytes
-rw-r--r--sys/lib/python/mercurial/templates/static/coal-folder.pngbin284 -> 0 bytes
-rw-r--r--sys/lib/python/mercurial/templates/static/excanvas.js19
-rw-r--r--sys/lib/python/mercurial/templates/static/graph.js137
-rw-r--r--sys/lib/python/mercurial/templates/static/hgicon.pngbin792 -> 0 bytes
-rw-r--r--sys/lib/python/mercurial/templates/static/hglogo.pngbin4123 -> 0 bytes
-rw-r--r--sys/lib/python/mercurial/templates/static/style-coal.css265
-rw-r--r--sys/lib/python/mercurial/templates/static/style-gitweb.css123
-rw-r--r--sys/lib/python/mercurial/templates/static/style-monoblue.css472
-rw-r--r--sys/lib/python/mercurial/templates/static/style-paper.css254
-rw-r--r--sys/lib/python/mercurial/templates/static/style.css105
-rw-r--r--sys/lib/python/mercurial/templates/template-vars.txt37
-rw-r--r--sys/lib/python/mercurial/transaction.py165
-rw-r--r--sys/lib/python/mercurial/ui.py381
-rw-r--r--sys/lib/python/mercurial/url.py533
-rw-r--r--sys/lib/python/mercurial/util.py1284
-rw-r--r--sys/lib/python/mercurial/verify.py258
-rw-r--r--sys/lib/python/mercurial/win32.py144
-rw-r--r--sys/lib/python/mercurial/windows.py292
-rw-r--r--sys/lib/python/mhlib.py1001
-rw-r--r--sys/lib/python/mimetools.py241
-rw-r--r--sys/lib/python/mimetypes.py533
-rwxr-xr-xsys/lib/python/mimify.py464
-rw-r--r--sys/lib/python/modulefinder.py595
-rw-r--r--sys/lib/python/msilib/__init__.py463
-rw-r--r--sys/lib/python/msilib/schema.py1007
-rw-r--r--sys/lib/python/msilib/sequence.py126
-rw-r--r--sys/lib/python/msilib/text.py129
-rw-r--r--sys/lib/python/multifile.py158
-rw-r--r--sys/lib/python/mutex.py52
-rw-r--r--sys/lib/python/netrc.py111
-rw-r--r--sys/lib/python/new.py17
-rw-r--r--sys/lib/python/nntplib.py628
-rw-r--r--sys/lib/python/ntpath.py511
-rw-r--r--sys/lib/python/nturl2path.py63
-rw-r--r--sys/lib/python/opcode.py185
-rw-r--r--sys/lib/python/optparse.py1682
-rw-r--r--sys/lib/python/os.py738
-rw-r--r--sys/lib/python/os2emxpath.py423
-rw-r--r--sys/lib/python/pdb.doc192
-rwxr-xr-xsys/lib/python/pdb.py1234
-rw-r--r--sys/lib/python/pickle.py1383
-rw-r--r--sys/lib/python/pickletools.py2246
-rw-r--r--sys/lib/python/pipes.py298
-rw-r--r--sys/lib/python/pkgutil.py546
-rw-r--r--sys/lib/python/plat-aix3/IN.py126
-rwxr-xr-xsys/lib/python/plat-aix3/regen8
-rw-r--r--sys/lib/python/plat-aix4/IN.py165
-rwxr-xr-xsys/lib/python/plat-aix4/regen8
-rw-r--r--sys/lib/python/plat-atheos/IN.py944
-rw-r--r--sys/lib/python/plat-atheos/TYPES.py142
-rw-r--r--sys/lib/python/plat-atheos/regen3
-rw-r--r--sys/lib/python/plat-beos5/IN.py327
-rwxr-xr-xsys/lib/python/plat-beos5/regen7
-rw-r--r--sys/lib/python/plat-darwin/IN.py357
-rwxr-xr-xsys/lib/python/plat-darwin/regen3
-rw-r--r--sys/lib/python/plat-freebsd2/IN.py187
-rwxr-xr-xsys/lib/python/plat-freebsd2/regen3
-rw-r--r--sys/lib/python/plat-freebsd3/IN.py189
-rwxr-xr-xsys/lib/python/plat-freebsd3/regen4
-rw-r--r--sys/lib/python/plat-freebsd4/IN.py355
-rw-r--r--sys/lib/python/plat-freebsd4/regen3
-rw-r--r--sys/lib/python/plat-freebsd5/IN.py355
-rw-r--r--sys/lib/python/plat-freebsd5/regen3
-rw-r--r--sys/lib/python/plat-freebsd6/IN.py515
-rw-r--r--sys/lib/python/plat-freebsd6/regen3
-rw-r--r--sys/lib/python/plat-freebsd7/IN.py535
-rw-r--r--sys/lib/python/plat-freebsd7/regen3
-rwxr-xr-xsys/lib/python/plat-generic/regen3
-rwxr-xr-xsys/lib/python/plat-irix5/AL.py61
-rwxr-xr-xsys/lib/python/plat-irix5/CD.py34
-rwxr-xr-xsys/lib/python/plat-irix5/CL.py24
-rwxr-xr-xsys/lib/python/plat-irix5/CL_old.py236
-rwxr-xr-xsys/lib/python/plat-irix5/DEVICE.py400
-rwxr-xr-xsys/lib/python/plat-irix5/ERRNO.py147
-rwxr-xr-xsys/lib/python/plat-irix5/FILE.py239
-rwxr-xr-xsys/lib/python/plat-irix5/FL.py289
-rwxr-xr-xsys/lib/python/plat-irix5/GET.py59
-rwxr-xr-xsys/lib/python/plat-irix5/GL.py393
-rwxr-xr-xsys/lib/python/plat-irix5/GLWS.py12
-rwxr-xr-xsys/lib/python/plat-irix5/IN.py141
-rwxr-xr-xsys/lib/python/plat-irix5/IOCTL.py233
-rwxr-xr-xsys/lib/python/plat-irix5/SV.py120
-rwxr-xr-xsys/lib/python/plat-irix5/WAIT.py14
-rwxr-xr-xsys/lib/python/plat-irix5/cddb.py204
-rwxr-xr-xsys/lib/python/plat-irix5/cdplayer.py88
-rwxr-xr-xsys/lib/python/plat-irix5/flp.doc117
-rwxr-xr-xsys/lib/python/plat-irix5/flp.py451
-rwxr-xr-xsys/lib/python/plat-irix5/jpeg.py111
-rwxr-xr-xsys/lib/python/plat-irix5/panel.py281
-rwxr-xr-xsys/lib/python/plat-irix5/panelparser.py128
-rwxr-xr-xsys/lib/python/plat-irix5/readcd.doc104
-rwxr-xr-xsys/lib/python/plat-irix5/readcd.py244
-rwxr-xr-xsys/lib/python/plat-irix5/regen10
-rwxr-xr-xsys/lib/python/plat-irix5/torgb.py99
-rw-r--r--sys/lib/python/plat-irix6/AL.py61
-rw-r--r--sys/lib/python/plat-irix6/CD.py34
-rw-r--r--sys/lib/python/plat-irix6/CL.py24
-rw-r--r--sys/lib/python/plat-irix6/DEVICE.py400
-rw-r--r--sys/lib/python/plat-irix6/ERRNO.py180
-rw-r--r--sys/lib/python/plat-irix6/FILE.py674
-rw-r--r--sys/lib/python/plat-irix6/FL.py289
-rw-r--r--sys/lib/python/plat-irix6/GET.py59
-rw-r--r--sys/lib/python/plat-irix6/GL.py393
-rw-r--r--sys/lib/python/plat-irix6/GLWS.py12
-rw-r--r--sys/lib/python/plat-irix6/IN.py385
-rw-r--r--sys/lib/python/plat-irix6/IOCTL.py233
-rw-r--r--sys/lib/python/plat-irix6/SV.py120
-rw-r--r--sys/lib/python/plat-irix6/WAIT.py335
-rw-r--r--sys/lib/python/plat-irix6/cddb.py204
-rw-r--r--sys/lib/python/plat-irix6/cdplayer.py88
-rw-r--r--sys/lib/python/plat-irix6/flp.doc117
-rw-r--r--sys/lib/python/plat-irix6/flp.py450
-rw-r--r--sys/lib/python/plat-irix6/jpeg.py111
-rw-r--r--sys/lib/python/plat-irix6/panel.py281
-rw-r--r--sys/lib/python/plat-irix6/panelparser.py128
-rw-r--r--sys/lib/python/plat-irix6/readcd.doc104
-rw-r--r--sys/lib/python/plat-irix6/readcd.py244
-rwxr-xr-xsys/lib/python/plat-irix6/regen11
-rw-r--r--sys/lib/python/plat-irix6/torgb.py99
-rw-r--r--sys/lib/python/plat-linux2/CDROM.py207
-rw-r--r--sys/lib/python/plat-linux2/DLFCN.py83
-rw-r--r--sys/lib/python/plat-linux2/IN.py615
-rw-r--r--sys/lib/python/plat-linux2/TYPES.py170
-rwxr-xr-xsys/lib/python/plat-linux2/regen8
-rw-r--r--sys/lib/python/plat-netbsd1/IN.py56
-rwxr-xr-xsys/lib/python/plat-netbsd1/regen3
-rwxr-xr-xsys/lib/python/plat-next3/regen6
-rw-r--r--sys/lib/python/plat-os2emx/IN.py82
-rw-r--r--sys/lib/python/plat-os2emx/SOCKET.py106
-rw-r--r--sys/lib/python/plat-os2emx/_emx_link.py79
-rw-r--r--sys/lib/python/plat-os2emx/grp.py182
-rw-r--r--sys/lib/python/plat-os2emx/pwd.py208
-rw-r--r--sys/lib/python/plat-os2emx/regen7
-rw-r--r--sys/lib/python/plat-riscos/riscosenviron.py45
-rw-r--r--sys/lib/python/plat-riscos/riscospath.py378
-rw-r--r--sys/lib/python/plat-riscos/rourl2path.py71
-rw-r--r--sys/lib/python/plat-sunos5/CDIO.py73
-rw-r--r--sys/lib/python/plat-sunos5/DLFCN.py27
-rwxr-xr-xsys/lib/python/plat-sunos5/IN.py1421
-rw-r--r--sys/lib/python/plat-sunos5/STROPTS.py1813
-rwxr-xr-xsys/lib/python/plat-sunos5/SUNAUDIODEV.py40
-rw-r--r--sys/lib/python/plat-sunos5/TYPES.py313
-rwxr-xr-xsys/lib/python/plat-sunos5/regen9
-rw-r--r--sys/lib/python/plat-unixware7/IN.py836
-rw-r--r--sys/lib/python/plat-unixware7/STROPTS.py328
-rwxr-xr-xsys/lib/python/plat-unixware7/regen9
-rwxr-xr-xsys/lib/python/platform.py1254
-rw-r--r--sys/lib/python/popen2.py244
-rw-r--r--sys/lib/python/poplib.py423
-rw-r--r--sys/lib/python/posixfile.py235
-rw-r--r--sys/lib/python/posixpath.py453
-rw-r--r--sys/lib/python/pprint.py315
-rwxr-xr-xsys/lib/python/profile.py619
-rw-r--r--sys/lib/python/pstats.py684
-rw-r--r--sys/lib/python/pty.py174
-rw-r--r--sys/lib/python/py_compile.py164
-rw-r--r--sys/lib/python/pyclbr.py340
-rwxr-xr-xsys/lib/python/pydoc.py2255
-rwxr-xr-xsys/lib/python/quopri.py237
-rw-r--r--sys/lib/python/random.py862
-rw-r--r--sys/lib/python/re.py315
-rw-r--r--sys/lib/python/repr.py122
-rw-r--r--sys/lib/python/rexec.py585
-rw-r--r--sys/lib/python/rfc822.py1007
-rw-r--r--sys/lib/python/rlcompleter.py154
-rw-r--r--sys/lib/python/robotparser.py292
-rwxr-xr-xsys/lib/python/runpy.py104
-rw-r--r--sys/lib/python/sched.py117
-rw-r--r--sys/lib/python/sets.py577
-rw-r--r--sys/lib/python/sgmllib.py548
-rw-r--r--sys/lib/python/sha.py11
-rw-r--r--sys/lib/python/shelve.py225
-rw-r--r--sys/lib/python/shlex.py292
-rw-r--r--sys/lib/python/shutil.py203
-rw-r--r--sys/lib/python/site-packages/README2
-rw-r--r--sys/lib/python/site.py424
-rwxr-xr-xsys/lib/python/smtpd.py549
-rwxr-xr-xsys/lib/python/smtplib.py743
-rw-r--r--sys/lib/python/sndhdr.py228
-rw-r--r--sys/lib/python/socket.py414
-rw-r--r--sys/lib/python/sqlite3/__init__.py24
-rw-r--r--sys/lib/python/sqlite3/dbapi2.py88
-rw-r--r--sys/lib/python/sre.py13
-rw-r--r--sys/lib/python/sre_compile.py531
-rw-r--r--sys/lib/python/sre_constants.py261
-rw-r--r--sys/lib/python/sre_parse.py796
-rw-r--r--sys/lib/python/stat.py86
-rw-r--r--sys/lib/python/statvfs.py15
-rw-r--r--sys/lib/python/string.py529
-rw-r--r--sys/lib/python/stringold.py429
-rw-r--r--sys/lib/python/stringprep.py272
-rw-r--r--sys/lib/python/struct.py99
-rw-r--r--sys/lib/python/subprocess.py1249
-rw-r--r--sys/lib/python/sunau.py474
-rw-r--r--sys/lib/python/sunaudio.py44
-rwxr-xr-xsys/lib/python/symbol.py113
-rw-r--r--sys/lib/python/symtable.py252
-rwxr-xr-xsys/lib/python/tabnanny.py329
-rw-r--r--sys/lib/python/tarfile.py2176
-rw-r--r--sys/lib/python/telnetlib.py669
-rw-r--r--sys/lib/python/tempfile.py472
-rw-r--r--sys/lib/python/textwrap.py374
-rw-r--r--sys/lib/python/this.py28
-rw-r--r--sys/lib/python/threading.py816
-rw-r--r--sys/lib/python/timeit.py285
-rw-r--r--sys/lib/python/toaiff.py107
-rwxr-xr-xsys/lib/python/token.py141
-rw-r--r--sys/lib/python/tokenize.py345
-rw-r--r--sys/lib/python/trace.py792
-rw-r--r--sys/lib/python/traceback.py312
-rw-r--r--sys/lib/python/tty.py36
-rw-r--r--sys/lib/python/types.py101
-rw-r--r--sys/lib/python/unittest.py816
-rw-r--r--sys/lib/python/urllib.py1538
-rw-r--r--sys/lib/python/urllib2.py1353
-rw-r--r--sys/lib/python/urlparse.py375
-rw-r--r--sys/lib/python/user.py45
-rwxr-xr-xsys/lib/python/uu.py186
-rw-r--r--sys/lib/python/uuid.py541
-rw-r--r--sys/lib/python/warnings.py264
-rw-r--r--sys/lib/python/wave.py499
-rw-r--r--sys/lib/python/weakref.py355
-rw-r--r--sys/lib/python/webbrowser.py651
-rw-r--r--sys/lib/python/whichdb.py117
-rw-r--r--sys/lib/python/wsgiref.egg-info8
-rw-r--r--sys/lib/python/wsgiref/__init__.py23
-rw-r--r--sys/lib/python/wsgiref/handlers.py492
-rw-r--r--sys/lib/python/wsgiref/headers.py205
-rw-r--r--sys/lib/python/wsgiref/simple_server.py205
-rw-r--r--sys/lib/python/wsgiref/util.py205
-rw-r--r--sys/lib/python/wsgiref/validate.py432
-rw-r--r--sys/lib/python/xdrlib.py287
-rw-r--r--sys/lib/python/xml/__init__.py47
-rw-r--r--sys/lib/python/xml/dom/NodeFilter.py27
-rw-r--r--sys/lib/python/xml/dom/__init__.py139
-rw-r--r--sys/lib/python/xml/dom/domreg.py99
-rw-r--r--sys/lib/python/xml/dom/expatbuilder.py983
-rw-r--r--sys/lib/python/xml/dom/minicompat.py110
-rw-r--r--sys/lib/python/xml/dom/minidom.py1936
-rw-r--r--sys/lib/python/xml/dom/pulldom.py351
-rw-r--r--sys/lib/python/xml/dom/xmlbuilder.py386
-rw-r--r--sys/lib/python/xml/etree/ElementInclude.py143
-rw-r--r--sys/lib/python/xml/etree/ElementPath.py198
-rw-r--r--sys/lib/python/xml/etree/ElementTree.py1260
-rw-r--r--sys/lib/python/xml/etree/__init__.py33
-rw-r--r--sys/lib/python/xml/etree/cElementTree.py3
-rw-r--r--sys/lib/python/xml/parsers/__init__.py8
-rw-r--r--sys/lib/python/xml/parsers/expat.py4
-rw-r--r--sys/lib/python/xml/sax/__init__.py108
-rw-r--r--sys/lib/python/xml/sax/_exceptions.py131
-rw-r--r--sys/lib/python/xml/sax/expatreader.py414
-rw-r--r--sys/lib/python/xml/sax/handler.py342
-rw-r--r--sys/lib/python/xml/sax/saxutils.py302
-rw-r--r--sys/lib/python/xml/sax/xmlreader.py381
-rw-r--r--sys/lib/python/xmllib.py929
-rw-r--r--sys/lib/python/xmlrpclib.py1488
-rw-r--r--sys/lib/python/zipfile.py900
931 files changed, 0 insertions, 270919 deletions
diff --git a/sys/lib/python/BaseHTTPServer.py b/sys/lib/python/BaseHTTPServer.py
deleted file mode 100644
index 396e4d6a3..000000000
--- a/sys/lib/python/BaseHTTPServer.py
+++ /dev/null
@@ -1,578 +0,0 @@
-"""HTTP server base class.
-
-Note: the class in this module doesn't implement any HTTP request; see
-SimpleHTTPServer for simple implementations of GET, HEAD and POST
-(including CGI scripts). It does, however, optionally implement HTTP/1.1
-persistent connections, as of version 0.3.
-
-Contents:
-
-- BaseHTTPRequestHandler: HTTP request handler base class
-- test: test function
-
-XXX To do:
-
-- log requests even later (to capture byte count)
-- log user-agent header and other interesting goodies
-- send error log to separate file
-"""
-
-
-# See also:
-#
-# HTTP Working Group T. Berners-Lee
-# INTERNET-DRAFT R. T. Fielding
-# <draft-ietf-http-v10-spec-00.txt> H. Frystyk Nielsen
-# Expires September 8, 1995 March 8, 1995
-#
-# URL: http://www.ics.uci.edu/pub/ietf/http/draft-ietf-http-v10-spec-00.txt
-#
-# and
-#
-# Network Working Group R. Fielding
-# Request for Comments: 2616 et al
-# Obsoletes: 2068 June 1999
-# Category: Standards Track
-#
-# URL: http://www.faqs.org/rfcs/rfc2616.html
-
-# Log files
-# ---------
-#
-# Here's a quote from the NCSA httpd docs about log file format.
-#
-# | The logfile format is as follows. Each line consists of:
-# |
-# | host rfc931 authuser [DD/Mon/YYYY:hh:mm:ss] "request" ddd bbbb
-# |
-# | host: Either the DNS name or the IP number of the remote client
-# | rfc931: Any information returned by identd for this person,
-# | - otherwise.
-# | authuser: If user sent a userid for authentication, the user name,
-# | - otherwise.
-# | DD: Day
-# | Mon: Month (calendar name)
-# | YYYY: Year
-# | hh: hour (24-hour format, the machine's timezone)
-# | mm: minutes
-# | ss: seconds
-# | request: The first line of the HTTP request as sent by the client.
-# | ddd: the status code returned by the server, - if not available.
-# | bbbb: the total number of bytes sent,
-# | *not including the HTTP/1.0 header*, - if not available
-# |
-# | You can determine the name of the file accessed through request.
-#
-# (Actually, the latter is only true if you know the server configuration
-# at the time the request was made!)
-
-__version__ = "0.3"
-
-__all__ = ["HTTPServer", "BaseHTTPRequestHandler"]
-
-import sys
-import time
-import socket # For gethostbyaddr()
-import mimetools
-import SocketServer
-
-# Default error message
-DEFAULT_ERROR_MESSAGE = """\
-<head>
-<title>Error response</title>
-</head>
-<body>
-<h1>Error response</h1>
-<p>Error code %(code)d.
-<p>Message: %(message)s.
-<p>Error code explanation: %(code)s = %(explain)s.
-</body>
-"""
-
-def _quote_html(html):
- return html.replace("&", "&amp;").replace("<", "&lt;").replace(">", "&gt;")
-
-class HTTPServer(SocketServer.TCPServer):
-
- allow_reuse_address = 1 # Seems to make sense in testing environment
-
- def server_bind(self):
- """Override server_bind to store the server name."""
- SocketServer.TCPServer.server_bind(self)
- host, port = self.socket.getsockname()[:2]
- self.server_name = socket.getfqdn(host)
- self.server_port = port
-
-
-class BaseHTTPRequestHandler(SocketServer.StreamRequestHandler):
-
- """HTTP request handler base class.
-
- The following explanation of HTTP serves to guide you through the
- code as well as to expose any misunderstandings I may have about
- HTTP (so you don't need to read the code to figure out I'm wrong
- :-).
-
- HTTP (HyperText Transfer Protocol) is an extensible protocol on
- top of a reliable stream transport (e.g. TCP/IP). The protocol
- recognizes three parts to a request:
-
- 1. One line identifying the request type and path
- 2. An optional set of RFC-822-style headers
- 3. An optional data part
-
- The headers and data are separated by a blank line.
-
- The first line of the request has the form
-
- <command> <path> <version>
-
- where <command> is a (case-sensitive) keyword such as GET or POST,
- <path> is a string containing path information for the request,
- and <version> should be the string "HTTP/1.0" or "HTTP/1.1".
- <path> is encoded using the URL encoding scheme (using %xx to signify
- the ASCII character with hex code xx).
-
- The specification specifies that lines are separated by CRLF but
- for compatibility with the widest range of clients recommends
- servers also handle LF. Similarly, whitespace in the request line
- is treated sensibly (allowing multiple spaces between components
- and allowing trailing whitespace).
-
- Similarly, for output, lines ought to be separated by CRLF pairs
- but most clients grok LF characters just fine.
-
- If the first line of the request has the form
-
- <command> <path>
-
- (i.e. <version> is left out) then this is assumed to be an HTTP
- 0.9 request; this form has no optional headers and data part and
- the reply consists of just the data.
-
- The reply form of the HTTP 1.x protocol again has three parts:
-
- 1. One line giving the response code
- 2. An optional set of RFC-822-style headers
- 3. The data
-
- Again, the headers and data are separated by a blank line.
-
- The response code line has the form
-
- <version> <responsecode> <responsestring>
-
- where <version> is the protocol version ("HTTP/1.0" or "HTTP/1.1"),
- <responsecode> is a 3-digit response code indicating success or
- failure of the request, and <responsestring> is an optional
- human-readable string explaining what the response code means.
-
- This server parses the request and the headers, and then calls a
- function specific to the request type (<command>). Specifically,
- a request SPAM will be handled by a method do_SPAM(). If no
- such method exists the server sends an error response to the
- client. If it exists, it is called with no arguments:
-
- do_SPAM()
-
- Note that the request name is case sensitive (i.e. SPAM and spam
- are different requests).
-
- The various request details are stored in instance variables:
-
- - client_address is the client IP address in the form (host,
- port);
-
- - command, path and version are the broken-down request line;
-
- - headers is an instance of mimetools.Message (or a derived
- class) containing the header information;
-
- - rfile is a file object open for reading positioned at the
- start of the optional input data part;
-
- - wfile is a file object open for writing.
-
- IT IS IMPORTANT TO ADHERE TO THE PROTOCOL FOR WRITING!
-
- The first thing to be written must be the response line. Then
- follow 0 or more header lines, then a blank line, and then the
- actual data (if any). The meaning of the header lines depends on
- the command executed by the server; in most cases, when data is
- returned, there should be at least one header line of the form
-
- Content-type: <type>/<subtype>
-
- where <type> and <subtype> should be registered MIME types,
- e.g. "text/html" or "text/plain".
-
- """
-
- # The Python system version, truncated to its first component.
- sys_version = "Python/" + sys.version.split()[0]
-
- # The server software version. You may want to override this.
- # The format is multiple whitespace-separated strings,
- # where each string is of the form name[/version].
- server_version = "BaseHTTP/" + __version__
-
- def parse_request(self):
- """Parse a request (internal).
-
- The request should be stored in self.raw_requestline; the results
- are in self.command, self.path, self.request_version and
- self.headers.
-
- Return True for success, False for failure; on failure, an
- error is sent back.
-
- """
- self.command = None # set in case of error on the first line
- self.request_version = version = "HTTP/0.9" # Default
- self.close_connection = 1
- requestline = self.raw_requestline
- if requestline[-2:] == '\r\n':
- requestline = requestline[:-2]
- elif requestline[-1:] == '\n':
- requestline = requestline[:-1]
- self.requestline = requestline
- words = requestline.split()
- if len(words) == 3:
- [command, path, version] = words
- if version[:5] != 'HTTP/':
- self.send_error(400, "Bad request version (%r)" % version)
- return False
- try:
- base_version_number = version.split('/', 1)[1]
- version_number = base_version_number.split(".")
- # RFC 2145 section 3.1 says there can be only one "." and
- # - major and minor numbers MUST be treated as
- # separate integers;
- # - HTTP/2.4 is a lower version than HTTP/2.13, which in
- # turn is lower than HTTP/12.3;
- # - Leading zeros MUST be ignored by recipients.
- if len(version_number) != 2:
- raise ValueError
- version_number = int(version_number[0]), int(version_number[1])
- except (ValueError, IndexError):
- self.send_error(400, "Bad request version (%r)" % version)
- return False
- if version_number >= (1, 1) and self.protocol_version >= "HTTP/1.1":
- self.close_connection = 0
- if version_number >= (2, 0):
- self.send_error(505,
- "Invalid HTTP Version (%s)" % base_version_number)
- return False
- elif len(words) == 2:
- [command, path] = words
- self.close_connection = 1
- if command != 'GET':
- self.send_error(400,
- "Bad HTTP/0.9 request type (%r)" % command)
- return False
- elif not words:
- return False
- else:
- self.send_error(400, "Bad request syntax (%r)" % requestline)
- return False
- self.command, self.path, self.request_version = command, path, version
-
- # Examine the headers and look for a Connection directive
- self.headers = self.MessageClass(self.rfile, 0)
-
- conntype = self.headers.get('Connection', "")
- if conntype.lower() == 'close':
- self.close_connection = 1
- elif (conntype.lower() == 'keep-alive' and
- self.protocol_version >= "HTTP/1.1"):
- self.close_connection = 0
- return True
-
- def handle_one_request(self):
- """Handle a single HTTP request.
-
- You normally don't need to override this method; see the class
- __doc__ string for information on how to handle specific HTTP
- commands such as GET and POST.
-
- """
- self.raw_requestline = self.rfile.readline()
- if not self.raw_requestline:
- self.close_connection = 1
- return
- if not self.parse_request(): # An error code has been sent, just exit
- return
- mname = 'do_' + self.command
- if not hasattr(self, mname):
- self.send_error(501, "Unsupported method (%r)" % self.command)
- return
- method = getattr(self, mname)
- method()
-
- def handle(self):
- """Handle multiple requests if necessary."""
- self.close_connection = 1
-
- self.handle_one_request()
- while not self.close_connection:
- self.handle_one_request()
-
- def send_error(self, code, message=None):
- """Send and log an error reply.
-
- Arguments are the error code, and a detailed message.
- The detailed message defaults to the short entry matching the
- response code.
-
- This sends an error response (so it must be called before any
- output has been generated), logs the error, and finally sends
- a piece of HTML explaining the error to the user.
-
- """
-
- try:
- short, long = self.responses[code]
- except KeyError:
- short, long = '???', '???'
- if message is None:
- message = short
- explain = long
- self.log_error("code %d, message %s", code, message)
- # using _quote_html to prevent Cross Site Scripting attacks (see bug #1100201)
- content = (self.error_message_format %
- {'code': code, 'message': _quote_html(message), 'explain': explain})
- self.send_response(code, message)
- self.send_header("Content-Type", "text/html")
- self.send_header('Connection', 'close')
- self.end_headers()
- if self.command != 'HEAD' and code >= 200 and code not in (204, 304):
- self.wfile.write(content)
-
- error_message_format = DEFAULT_ERROR_MESSAGE
-
- def send_response(self, code, message=None):
- """Send the response header and log the response code.
-
- Also send two standard headers with the server software
- version and the current date.
-
- """
- self.log_request(code)
- if message is None:
- if code in self.responses:
- message = self.responses[code][0]
- else:
- message = ''
- if self.request_version != 'HTTP/0.9':
- self.wfile.write("%s %d %s\r\n" %
- (self.protocol_version, code, message))
- # print (self.protocol_version, code, message)
- self.send_header('Server', self.version_string())
- self.send_header('Date', self.date_time_string())
-
- def send_header(self, keyword, value):
- """Send a MIME header."""
- if self.request_version != 'HTTP/0.9':
- self.wfile.write("%s: %s\r\n" % (keyword, value))
-
- if keyword.lower() == 'connection':
- if value.lower() == 'close':
- self.close_connection = 1
- elif value.lower() == 'keep-alive':
- self.close_connection = 0
-
- def end_headers(self):
- """Send the blank line ending the MIME headers."""
- if self.request_version != 'HTTP/0.9':
- self.wfile.write("\r\n")
-
- def log_request(self, code='-', size='-'):
- """Log an accepted request.
-
- This is called by send_response().
-
- """
-
- self.log_message('"%s" %s %s',
- self.requestline, str(code), str(size))
-
- def log_error(self, *args):
- """Log an error.
-
- This is called when a request cannot be fulfilled. By
- default it passes the message on to log_message().
-
- Arguments are the same as for log_message().
-
- XXX This should go to the separate error log.
-
- """
-
- self.log_message(*args)
-
- def log_message(self, format, *args):
- """Log an arbitrary message.
-
- This is used by all other logging functions. Override
- it if you have specific logging wishes.
-
- The first argument, FORMAT, is a format string for the
- message to be logged. If the format string contains
- any % escapes requiring parameters, they should be
- specified as subsequent arguments (it's just like
- printf!).
-
- The client host and current date/time are prefixed to
- every message.
-
- """
-
- sys.stderr.write("%s - - [%s] %s\n" %
- (self.address_string(),
- self.log_date_time_string(),
- format%args))
-
- def version_string(self):
- """Return the server software version string."""
- return self.server_version + ' ' + self.sys_version
-
- def date_time_string(self, timestamp=None):
- """Return the current date and time formatted for a message header."""
- if timestamp is None:
- timestamp = time.time()
- year, month, day, hh, mm, ss, wd, y, z = time.gmtime(timestamp)
- s = "%s, %02d %3s %4d %02d:%02d:%02d GMT" % (
- self.weekdayname[wd],
- day, self.monthname[month], year,
- hh, mm, ss)
- return s
-
- def log_date_time_string(self):
- """Return the current time formatted for logging."""
- now = time.time()
- year, month, day, hh, mm, ss, x, y, z = time.localtime(now)
- s = "%02d/%3s/%04d %02d:%02d:%02d" % (
- day, self.monthname[month], year, hh, mm, ss)
- return s
-
- weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
-
- monthname = [None,
- 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
- 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
-
- def address_string(self):
- """Return the client address formatted for logging.
-
- This version looks up the full hostname using gethostbyaddr(),
- and tries to find a name that contains at least one dot.
-
- """
-
- host, port = self.client_address[:2]
- return socket.getfqdn(host)
-
- # Essentially static class variables
-
- # The version of the HTTP protocol we support.
- # Set this to HTTP/1.1 to enable automatic keepalive
- protocol_version = "HTTP/1.0"
-
- # The Message-like class used to parse headers
- MessageClass = mimetools.Message
-
- # Table mapping response codes to messages; entries have the
- # form {code: (shortmessage, longmessage)}.
- # See RFC 2616.
- responses = {
- 100: ('Continue', 'Request received, please continue'),
- 101: ('Switching Protocols',
- 'Switching to new protocol; obey Upgrade header'),
-
- 200: ('OK', 'Request fulfilled, document follows'),
- 201: ('Created', 'Document created, URL follows'),
- 202: ('Accepted',
- 'Request accepted, processing continues off-line'),
- 203: ('Non-Authoritative Information', 'Request fulfilled from cache'),
- 204: ('No Content', 'Request fulfilled, nothing follows'),
- 205: ('Reset Content', 'Clear input form for further input.'),
- 206: ('Partial Content', 'Partial content follows.'),
-
- 300: ('Multiple Choices',
- 'Object has several resources -- see URI list'),
- 301: ('Moved Permanently', 'Object moved permanently -- see URI list'),
- 302: ('Found', 'Object moved temporarily -- see URI list'),
- 303: ('See Other', 'Object moved -- see Method and URL list'),
- 304: ('Not Modified',
- 'Document has not changed since given time'),
- 305: ('Use Proxy',
- 'You must use proxy specified in Location to access this '
- 'resource.'),
- 307: ('Temporary Redirect',
- 'Object moved temporarily -- see URI list'),
-
- 400: ('Bad Request',
- 'Bad request syntax or unsupported method'),
- 401: ('Unauthorized',
- 'No permission -- see authorization schemes'),
- 402: ('Payment Required',
- 'No payment -- see charging schemes'),
- 403: ('Forbidden',
- 'Request forbidden -- authorization will not help'),
- 404: ('Not Found', 'Nothing matches the given URI'),
- 405: ('Method Not Allowed',
- 'Specified method is invalid for this server.'),
- 406: ('Not Acceptable', 'URI not available in preferred format.'),
- 407: ('Proxy Authentication Required', 'You must authenticate with '
- 'this proxy before proceeding.'),
- 408: ('Request Timeout', 'Request timed out; try again later.'),
- 409: ('Conflict', 'Request conflict.'),
- 410: ('Gone',
- 'URI no longer exists and has been permanently removed.'),
- 411: ('Length Required', 'Client must specify Content-Length.'),
- 412: ('Precondition Failed', 'Precondition in headers is false.'),
- 413: ('Request Entity Too Large', 'Entity is too large.'),
- 414: ('Request-URI Too Long', 'URI is too long.'),
- 415: ('Unsupported Media Type', 'Entity body in unsupported format.'),
- 416: ('Requested Range Not Satisfiable',
- 'Cannot satisfy request range.'),
- 417: ('Expectation Failed',
- 'Expect condition could not be satisfied.'),
-
- 500: ('Internal Server Error', 'Server got itself in trouble'),
- 501: ('Not Implemented',
- 'Server does not support this operation'),
- 502: ('Bad Gateway', 'Invalid responses from another server/proxy.'),
- 503: ('Service Unavailable',
- 'The server cannot process the request due to a high load'),
- 504: ('Gateway Timeout',
- 'The gateway server did not receive a timely response'),
- 505: ('HTTP Version Not Supported', 'Cannot fulfill request.'),
- }
-
-
-def test(HandlerClass = BaseHTTPRequestHandler,
- ServerClass = HTTPServer, protocol="HTTP/1.0"):
- """Test the HTTP request handler class.
-
- This runs an HTTP server on port 8000 (or the first command line
- argument).
-
- """
-
- if sys.argv[1:]:
- port = int(sys.argv[1])
- else:
- port = 8000
- server_address = ('', port)
-
- HandlerClass.protocol_version = protocol
- httpd = ServerClass(server_address, HandlerClass)
-
- sa = httpd.socket.getsockname()
- print "Serving HTTP on", sa[0], "port", sa[1], "..."
- httpd.serve_forever()
-
-
-if __name__ == '__main__':
- test()
diff --git a/sys/lib/python/Bastion.py b/sys/lib/python/Bastion.py
deleted file mode 100644
index 58cce978c..000000000
--- a/sys/lib/python/Bastion.py
+++ /dev/null
@@ -1,177 +0,0 @@
-"""Bastionification utility.
-
-A bastion (for another object -- the 'original') is an object that has
-the same methods as the original but does not give access to its
-instance variables. Bastions have a number of uses, but the most
-obvious one is to provide code executing in restricted mode with a
-safe interface to an object implemented in unrestricted mode.
-
-The bastionification routine has an optional second argument which is
-a filter function. Only those methods for which the filter method
-(called with the method name as argument) returns true are accessible.
-The default filter method returns true unless the method name begins
-with an underscore.
-
-There are a number of possible implementations of bastions. We use a
-'lazy' approach where the bastion's __getattr__() discipline does all
-the work for a particular method the first time it is used. This is
-usually fastest, especially if the user doesn't call all available
-methods. The retrieved methods are stored as instance variables of
-the bastion, so the overhead is only occurred on the first use of each
-method.
-
-Detail: the bastion class has a __repr__() discipline which includes
-the repr() of the original object. This is precomputed when the
-bastion is created.
-
-"""
-
-__all__ = ["BastionClass", "Bastion"]
-
-from types import MethodType
-
-
-class BastionClass:
-
- """Helper class used by the Bastion() function.
-
- You could subclass this and pass the subclass as the bastionclass
- argument to the Bastion() function, as long as the constructor has
- the same signature (a get() function and a name for the object).
-
- """
-
- def __init__(self, get, name):
- """Constructor.
-
- Arguments:
-
- get - a function that gets the attribute value (by name)
- name - a human-readable name for the original object
- (suggestion: use repr(object))
-
- """
- self._get_ = get
- self._name_ = name
-
- def __repr__(self):
- """Return a representation string.
-
- This includes the name passed in to the constructor, so that
- if you print the bastion during debugging, at least you have
- some idea of what it is.
-
- """
- return "<Bastion for %s>" % self._name_
-
- def __getattr__(self, name):
- """Get an as-yet undefined attribute value.
-
- This calls the get() function that was passed to the
- constructor. The result is stored as an instance variable so
- that the next time the same attribute is requested,
- __getattr__() won't be invoked.
-
- If the get() function raises an exception, this is simply
- passed on -- exceptions are not cached.
-
- """
- attribute = self._get_(name)
- self.__dict__[name] = attribute
- return attribute
-
-
-def Bastion(object, filter = lambda name: name[:1] != '_',
- name=None, bastionclass=BastionClass):
- """Create a bastion for an object, using an optional filter.
-
- See the Bastion module's documentation for background.
-
- Arguments:
-
- object - the original object
- filter - a predicate that decides whether a function name is OK;
- by default all names are OK that don't start with '_'
- name - the name of the object; default repr(object)
- bastionclass - class used to create the bastion; default BastionClass
-
- """
-
- raise RuntimeError, "This code is not secure in Python 2.2 and 2.3"
-
- # Note: we define *two* ad-hoc functions here, get1 and get2.
- # Both are intended to be called in the same way: get(name).
- # It is clear that the real work (getting the attribute
- # from the object and calling the filter) is done in get1.
- # Why can't we pass get1 to the bastion? Because the user
- # would be able to override the filter argument! With get2,
- # overriding the default argument is no security loophole:
- # all it does is call it.
- # Also notice that we can't place the object and filter as
- # instance variables on the bastion object itself, since
- # the user has full access to all instance variables!
-
- def get1(name, object=object, filter=filter):
- """Internal function for Bastion(). See source comments."""
- if filter(name):
- attribute = getattr(object, name)
- if type(attribute) == MethodType:
- return attribute
- raise AttributeError, name
-
- def get2(name, get1=get1):
- """Internal function for Bastion(). See source comments."""
- return get1(name)
-
- if name is None:
- name = repr(object)
- return bastionclass(get2, name)
-
-
-def _test():
- """Test the Bastion() function."""
- class Original:
- def __init__(self):
- self.sum = 0
- def add(self, n):
- self._add(n)
- def _add(self, n):
- self.sum = self.sum + n
- def total(self):
- return self.sum
- o = Original()
- b = Bastion(o)
- testcode = """if 1:
- b.add(81)
- b.add(18)
- print "b.total() =", b.total()
- try:
- print "b.sum =", b.sum,
- except:
- print "inaccessible"
- else:
- print "accessible"
- try:
- print "b._add =", b._add,
- except:
- print "inaccessible"
- else:
- print "accessible"
- try:
- print "b._get_.func_defaults =", map(type, b._get_.func_defaults),
- except:
- print "inaccessible"
- else:
- print "accessible"
- \n"""
- exec testcode
- print '='*20, "Using rexec:", '='*20
- import rexec
- r = rexec.RExec()
- m = r.add_module('__main__')
- m.b = b
- r.r_exec(testcode)
-
-
-if __name__ == '__main__':
- _test()
diff --git a/sys/lib/python/CGIHTTPServer.py b/sys/lib/python/CGIHTTPServer.py
deleted file mode 100644
index c119c9a69..000000000
--- a/sys/lib/python/CGIHTTPServer.py
+++ /dev/null
@@ -1,362 +0,0 @@
-"""CGI-savvy HTTP Server.
-
-This module builds on SimpleHTTPServer by implementing GET and POST
-requests to cgi-bin scripts.
-
-If the os.fork() function is not present (e.g. on Windows),
-os.popen2() is used as a fallback, with slightly altered semantics; if
-that function is not present either (e.g. on Macintosh), only Python
-scripts are supported, and they are executed by the current process.
-
-In all cases, the implementation is intentionally naive -- all
-requests are executed sychronously.
-
-SECURITY WARNING: DON'T USE THIS CODE UNLESS YOU ARE INSIDE A FIREWALL
--- it may execute arbitrary Python code or external programs.
-
-Note that status code 200 is sent prior to execution of a CGI script, so
-scripts cannot send other status codes such as 302 (redirect).
-"""
-
-
-__version__ = "0.4"
-
-__all__ = ["CGIHTTPRequestHandler"]
-
-import os
-import sys
-import urllib
-import BaseHTTPServer
-import SimpleHTTPServer
-import select
-
-
-class CGIHTTPRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
-
- """Complete HTTP server with GET, HEAD and POST commands.
-
- GET and HEAD also support running CGI scripts.
-
- The POST command is *only* implemented for CGI scripts.
-
- """
-
- # Determine platform specifics
- have_fork = hasattr(os, 'fork')
- have_popen2 = hasattr(os, 'popen2')
- have_popen3 = hasattr(os, 'popen3')
-
- # Make rfile unbuffered -- we need to read one line and then pass
- # the rest to a subprocess, so we can't use buffered input.
- rbufsize = 0
-
- def do_POST(self):
- """Serve a POST request.
-
- This is only implemented for CGI scripts.
-
- """
-
- if self.is_cgi():
- self.run_cgi()
- else:
- self.send_error(501, "Can only POST to CGI scripts")
-
- def send_head(self):
- """Version of send_head that support CGI scripts"""
- if self.is_cgi():
- return self.run_cgi()
- else:
- return SimpleHTTPServer.SimpleHTTPRequestHandler.send_head(self)
-
- def is_cgi(self):
- """Test whether self.path corresponds to a CGI script.
-
- Return a tuple (dir, rest) if self.path requires running a
- CGI script, None if not. Note that rest begins with a
- slash if it is not empty.
-
- The default implementation tests whether the path
- begins with one of the strings in the list
- self.cgi_directories (and the next character is a '/'
- or the end of the string).
-
- """
-
- path = self.path
-
- for x in self.cgi_directories:
- i = len(x)
- if path[:i] == x and (not path[i:] or path[i] == '/'):
- self.cgi_info = path[:i], path[i+1:]
- return True
- return False
-
- cgi_directories = ['/cgi-bin', '/htbin']
-
- def is_executable(self, path):
- """Test whether argument path is an executable file."""
- return executable(path)
-
- def is_python(self, path):
- """Test whether argument path is a Python script."""
- head, tail = os.path.splitext(path)
- return tail.lower() in (".py", ".pyw")
-
- def run_cgi(self):
- """Execute a CGI script."""
- path = self.path
- dir, rest = self.cgi_info
-
- i = path.find('/', len(dir) + 1)
- while i >= 0:
- nextdir = path[:i]
- nextrest = path[i+1:]
-
- scriptdir = self.translate_path(nextdir)
- if os.path.isdir(scriptdir):
- dir, rest = nextdir, nextrest
- i = path.find('/', len(dir) + 1)
- else:
- break
-
- # find an explicit query string, if present.
- i = rest.rfind('?')
- if i >= 0:
- rest, query = rest[:i], rest[i+1:]
- else:
- query = ''
-
- # dissect the part after the directory name into a script name &
- # a possible additional path, to be stored in PATH_INFO.
- i = rest.find('/')
- if i >= 0:
- script, rest = rest[:i], rest[i:]
- else:
- script, rest = rest, ''
-
- scriptname = dir + '/' + script
- scriptfile = self.translate_path(scriptname)
- if not os.path.exists(scriptfile):
- self.send_error(404, "No such CGI script (%r)" % scriptname)
- return
- if not os.path.isfile(scriptfile):
- self.send_error(403, "CGI script is not a plain file (%r)" %
- scriptname)
- return
- ispy = self.is_python(scriptname)
- if not ispy:
- if not (self.have_fork or self.have_popen2 or self.have_popen3):
- self.send_error(403, "CGI script is not a Python script (%r)" %
- scriptname)
- return
- if not self.is_executable(scriptfile):
- self.send_error(403, "CGI script is not executable (%r)" %
- scriptname)
- return
-
- # Reference: http://hoohoo.ncsa.uiuc.edu/cgi/env.html
- # XXX Much of the following could be prepared ahead of time!
- env = {}
- env['SERVER_SOFTWARE'] = self.version_string()
- env['SERVER_NAME'] = self.server.server_name
- env['GATEWAY_INTERFACE'] = 'CGI/1.1'
- env['SERVER_PROTOCOL'] = self.protocol_version
- env['SERVER_PORT'] = str(self.server.server_port)
- env['REQUEST_METHOD'] = self.command
- uqrest = urllib.unquote(rest)
- env['PATH_INFO'] = uqrest
- env['PATH_TRANSLATED'] = self.translate_path(uqrest)
- env['SCRIPT_NAME'] = scriptname
- if query:
- env['QUERY_STRING'] = query
- host = self.address_string()
- if host != self.client_address[0]:
- env['REMOTE_HOST'] = host
- env['REMOTE_ADDR'] = self.client_address[0]
- authorization = self.headers.getheader("authorization")
- if authorization:
- authorization = authorization.split()
- if len(authorization) == 2:
- import base64, binascii
- env['AUTH_TYPE'] = authorization[0]
- if authorization[0].lower() == "basic":
- try:
- authorization = base64.decodestring(authorization[1])
- except binascii.Error:
- pass
- else:
- authorization = authorization.split(':')
- if len(authorization) == 2:
- env['REMOTE_USER'] = authorization[0]
- # XXX REMOTE_IDENT
- if self.headers.typeheader is None:
- env['CONTENT_TYPE'] = self.headers.type
- else:
- env['CONTENT_TYPE'] = self.headers.typeheader
- length = self.headers.getheader('content-length')
- if length:
- env['CONTENT_LENGTH'] = length
- accept = []
- for line in self.headers.getallmatchingheaders('accept'):
- if line[:1] in "\t\n\r ":
- accept.append(line.strip())
- else:
- accept = accept + line[7:].split(',')
- env['HTTP_ACCEPT'] = ','.join(accept)
- ua = self.headers.getheader('user-agent')
- if ua:
- env['HTTP_USER_AGENT'] = ua
- co = filter(None, self.headers.getheaders('cookie'))
- if co:
- env['HTTP_COOKIE'] = ', '.join(co)
- # XXX Other HTTP_* headers
- # Since we're setting the env in the parent, provide empty
- # values to override previously set values
- for k in ('QUERY_STRING', 'REMOTE_HOST', 'CONTENT_LENGTH',
- 'HTTP_USER_AGENT', 'HTTP_COOKIE'):
- env.setdefault(k, "")
- os.environ.update(env)
-
- self.send_response(200, "Script output follows")
-
- decoded_query = query.replace('+', ' ')
-
- if self.have_fork:
- # Unix -- fork as we should
- args = [script]
- if '=' not in decoded_query:
- args.append(decoded_query)
- nobody = nobody_uid()
- self.wfile.flush() # Always flush before forking
- pid = os.fork()
- if pid != 0:
- # Parent
- pid, sts = os.waitpid(pid, 0)
- # throw away additional data [see bug #427345]
- while select.select([self.rfile], [], [], 0)[0]:
- if not self.rfile.read(1):
- break
- if sts:
- self.log_error("CGI script exit status %#x", sts)
- return
- # Child
- try:
- try:
- os.setuid(nobody)
- except os.error:
- pass
- os.dup2(self.rfile.fileno(), 0)
- os.dup2(self.wfile.fileno(), 1)
- os.execve(scriptfile, args, os.environ)
- except:
- self.server.handle_error(self.request, self.client_address)
- os._exit(127)
-
- elif self.have_popen2 or self.have_popen3:
- # Windows -- use popen2 or popen3 to create a subprocess
- import shutil
- if self.have_popen3:
- popenx = os.popen3
- else:
- popenx = os.popen2
- cmdline = scriptfile
- if self.is_python(scriptfile):
- interp = sys.executable
- if interp.lower().endswith("w.exe"):
- # On Windows, use python.exe, not pythonw.exe
- interp = interp[:-5] + interp[-4:]
- cmdline = "%s -u %s" % (interp, cmdline)
- if '=' not in query and '"' not in query:
- cmdline = '%s "%s"' % (cmdline, query)
- self.log_message("command: %s", cmdline)
- try:
- nbytes = int(length)
- except (TypeError, ValueError):
- nbytes = 0
- files = popenx(cmdline, 'b')
- fi = files[0]
- fo = files[1]
- if self.have_popen3:
- fe = files[2]
- if self.command.lower() == "post" and nbytes > 0:
- data = self.rfile.read(nbytes)
- fi.write(data)
- # throw away additional data [see bug #427345]
- while select.select([self.rfile._sock], [], [], 0)[0]:
- if not self.rfile._sock.recv(1):
- break
- fi.close()
- shutil.copyfileobj(fo, self.wfile)
- if self.have_popen3:
- errors = fe.read()
- fe.close()
- if errors:
- self.log_error('%s', errors)
- sts = fo.close()
- if sts:
- self.log_error("CGI script exit status %#x", sts)
- else:
- self.log_message("CGI script exited OK")
-
- else:
- # Other O.S. -- execute script in this process
- save_argv = sys.argv
- save_stdin = sys.stdin
- save_stdout = sys.stdout
- save_stderr = sys.stderr
- try:
- save_cwd = os.getcwd()
- try:
- sys.argv = [scriptfile]
- if '=' not in decoded_query:
- sys.argv.append(decoded_query)
- sys.stdout = self.wfile
- sys.stdin = self.rfile
- execfile(scriptfile, {"__name__": "__main__"})
- finally:
- sys.argv = save_argv
- sys.stdin = save_stdin
- sys.stdout = save_stdout
- sys.stderr = save_stderr
- os.chdir(save_cwd)
- except SystemExit, sts:
- self.log_error("CGI script exit status %s", str(sts))
- else:
- self.log_message("CGI script exited OK")
-
-
-nobody = None
-
-def nobody_uid():
- """Internal routine to get nobody's uid"""
- global nobody
- if nobody:
- return nobody
- try:
- import pwd
- except ImportError:
- return -1
- try:
- nobody = pwd.getpwnam('nobody')[2]
- except KeyError:
- nobody = 1 + max(map(lambda x: x[2], pwd.getpwall()))
- return nobody
-
-
-def executable(path):
- """Test for executable file."""
- try:
- st = os.stat(path)
- except os.error:
- return False
- return st.st_mode & 0111 != 0
-
-
-def test(HandlerClass = CGIHTTPRequestHandler,
- ServerClass = BaseHTTPServer.HTTPServer):
- SimpleHTTPServer.test(HandlerClass, ServerClass)
-
-
-if __name__ == '__main__':
- test()
diff --git a/sys/lib/python/ConfigParser.py b/sys/lib/python/ConfigParser.py
deleted file mode 100644
index 6dc53b9e0..000000000
--- a/sys/lib/python/ConfigParser.py
+++ /dev/null
@@ -1,640 +0,0 @@
-"""Configuration file parser.
-
-A setup file consists of sections, lead by a "[section]" header,
-and followed by "name: value" entries, with continuations and such in
-the style of RFC 822.
-
-The option values can contain format strings which refer to other values in
-the same section, or values in a special [DEFAULT] section.
-
-For example:
-
- something: %(dir)s/whatever
-
-would resolve the "%(dir)s" to the value of dir. All reference
-expansions are done late, on demand.
-
-Intrinsic defaults can be specified by passing them into the
-ConfigParser constructor as a dictionary.
-
-class:
-
-ConfigParser -- responsible for parsing a list of
- configuration files, and managing the parsed database.
-
- methods:
-
- __init__(defaults=None)
- create the parser and specify a dictionary of intrinsic defaults. The
- keys must be strings, the values must be appropriate for %()s string
- interpolation. Note that `__name__' is always an intrinsic default;
- its value is the section's name.
-
- sections()
- return all the configuration section names, sans DEFAULT
-
- has_section(section)
- return whether the given section exists
-
- has_option(section, option)
- return whether the given option exists in the given section
-
- options(section)
- return list of configuration options for the named section
-
- read(filenames)
- read and parse the list of named configuration files, given by
- name. A single filename is also allowed. Non-existing files
- are ignored. Return list of successfully read files.
-
- readfp(fp, filename=None)
- read and parse one configuration file, given as a file object.
- The filename defaults to fp.name; it is only used in error
- messages (if fp has no `name' attribute, the string `<???>' is used).
-
- get(section, option, raw=False, vars=None)
- return a string value for the named option. All % interpolations are
- expanded in the return values, based on the defaults passed into the
- constructor and the DEFAULT section. Additional substitutions may be
- provided using the `vars' argument, which must be a dictionary whose
- contents override any pre-existing defaults.
-
- getint(section, options)
- like get(), but convert value to an integer
-
- getfloat(section, options)
- like get(), but convert value to a float
-
- getboolean(section, options)
- like get(), but convert value to a boolean (currently case
- insensitively defined as 0, false, no, off for False, and 1, true,
- yes, on for True). Returns False or True.
-
- items(section, raw=False, vars=None)
- return a list of tuples with (name, value) for each option
- in the section.
-
- remove_section(section)
- remove the given file section and all its options
-
- remove_option(section, option)
- remove the given option from the given section
-
- set(section, option, value)
- set the given option
-
- write(fp)
- write the configuration state in .ini format
-"""
-
-import re
-
-__all__ = ["NoSectionError", "DuplicateSectionError", "NoOptionError",
- "InterpolationError", "InterpolationDepthError",
- "InterpolationSyntaxError", "ParsingError",
- "MissingSectionHeaderError",
- "ConfigParser", "SafeConfigParser", "RawConfigParser",
- "DEFAULTSECT", "MAX_INTERPOLATION_DEPTH"]
-
-DEFAULTSECT = "DEFAULT"
-
-MAX_INTERPOLATION_DEPTH = 10
-
-
-
-# exception classes
-class Error(Exception):
- """Base class for ConfigParser exceptions."""
-
- def __init__(self, msg=''):
- self.message = msg
- Exception.__init__(self, msg)
-
- def __repr__(self):
- return self.message
-
- __str__ = __repr__
-
-class NoSectionError(Error):
- """Raised when no section matches a requested option."""
-
- def __init__(self, section):
- Error.__init__(self, 'No section: %r' % (section,))
- self.section = section
-
-class DuplicateSectionError(Error):
- """Raised when a section is multiply-created."""
-
- def __init__(self, section):
- Error.__init__(self, "Section %r already exists" % section)
- self.section = section
-
-class NoOptionError(Error):
- """A requested option was not found."""
-
- def __init__(self, option, section):
- Error.__init__(self, "No option %r in section: %r" %
- (option, section))
- self.option = option
- self.section = section
-
-class InterpolationError(Error):
- """Base class for interpolation-related exceptions."""
-
- def __init__(self, option, section, msg):
- Error.__init__(self, msg)
- self.option = option
- self.section = section
-
-class InterpolationMissingOptionError(InterpolationError):
- """A string substitution required a setting which was not available."""
-
- def __init__(self, option, section, rawval, reference):
- msg = ("Bad value substitution:\n"
- "\tsection: [%s]\n"
- "\toption : %s\n"
- "\tkey : %s\n"
- "\trawval : %s\n"
- % (section, option, reference, rawval))
- InterpolationError.__init__(self, option, section, msg)
- self.reference = reference
-
-class InterpolationSyntaxError(InterpolationError):
- """Raised when the source text into which substitutions are made
- does not conform to the required syntax."""
-
-class InterpolationDepthError(InterpolationError):
- """Raised when substitutions are nested too deeply."""
-
- def __init__(self, option, section, rawval):
- msg = ("Value interpolation too deeply recursive:\n"
- "\tsection: [%s]\n"
- "\toption : %s\n"
- "\trawval : %s\n"
- % (section, option, rawval))
- InterpolationError.__init__(self, option, section, msg)
-
-class ParsingError(Error):
- """Raised when a configuration file does not follow legal syntax."""
-
- def __init__(self, filename):
- Error.__init__(self, 'File contains parsing errors: %s' % filename)
- self.filename = filename
- self.errors = []
-
- def append(self, lineno, line):
- self.errors.append((lineno, line))
- self.message += '\n\t[line %2d]: %s' % (lineno, line)
-
-class MissingSectionHeaderError(ParsingError):
- """Raised when a key-value pair is found before any section header."""
-
- def __init__(self, filename, lineno, line):
- Error.__init__(
- self,
- 'File contains no section headers.\nfile: %s, line: %d\n%r' %
- (filename, lineno, line))
- self.filename = filename
- self.lineno = lineno
- self.line = line
-
-
-
-class RawConfigParser:
- def __init__(self, defaults=None):
- self._sections = {}
- self._defaults = {}
- if defaults:
- for key, value in defaults.items():
- self._defaults[self.optionxform(key)] = value
-
- def defaults(self):
- return self._defaults
-
- def sections(self):
- """Return a list of section names, excluding [DEFAULT]"""
- # self._sections will never have [DEFAULT] in it
- return self._sections.keys()
-
- def add_section(self, section):
- """Create a new section in the configuration.
-
- Raise DuplicateSectionError if a section by the specified name
- already exists.
- """
- if section in self._sections:
- raise DuplicateSectionError(section)
- self._sections[section] = {}
-
- def has_section(self, section):
- """Indicate whether the named section is present in the configuration.
-
- The DEFAULT section is not acknowledged.
- """
- return section in self._sections
-
- def options(self, section):
- """Return a list of option names for the given section name."""
- try:
- opts = self._sections[section].copy()
- except KeyError:
- raise NoSectionError(section)
- opts.update(self._defaults)
- if '__name__' in opts:
- del opts['__name__']
- return opts.keys()
-
- def read(self, filenames):
- """Read and parse a filename or a list of filenames.
-
- Files that cannot be opened are silently ignored; this is
- designed so that you can specify a list of potential
- configuration file locations (e.g. current directory, user's
- home directory, systemwide directory), and all existing
- configuration files in the list will be read. A single
- filename may also be given.
-
- Return list of successfully read files.
- """
- if isinstance(filenames, basestring):
- filenames = [filenames]
- read_ok = []
- for filename in filenames:
- try:
- fp = open(filename)
- except IOError:
- continue
- self._read(fp, filename)
- fp.close()
- read_ok.append(filename)
- return read_ok
-
- def readfp(self, fp, filename=None):
- """Like read() but the argument must be a file-like object.
-
- The `fp' argument must have a `readline' method. Optional
- second argument is the `filename', which if not given, is
- taken from fp.name. If fp has no `name' attribute, `<???>' is
- used.
-
- """
- if filename is None:
- try:
- filename = fp.name
- except AttributeError:
- filename = '<???>'
- self._read(fp, filename)
-
- def get(self, section, option):
- opt = self.optionxform(option)
- if section not in self._sections:
- if section != DEFAULTSECT:
- raise NoSectionError(section)
- if opt in self._defaults:
- return self._defaults[opt]
- else:
- raise NoOptionError(option, section)
- elif opt in self._sections[section]:
- return self._sections[section][opt]
- elif opt in self._defaults:
- return self._defaults[opt]
- else:
- raise NoOptionError(option, section)
-
- def items(self, section):
- try:
- d2 = self._sections[section]
- except KeyError:
- if section != DEFAULTSECT:
- raise NoSectionError(section)
- d2 = {}
- d = self._defaults.copy()
- d.update(d2)
- if "__name__" in d:
- del d["__name__"]
- return d.items()
-
- def _get(self, section, conv, option):
- return conv(self.get(section, option))
-
- def getint(self, section, option):
- return self._get(section, int, option)
-
- def getfloat(self, section, option):
- return self._get(section, float, option)
-
- _boolean_states = {'1': True, 'yes': True, 'true': True, 'on': True,
- '0': False, 'no': False, 'false': False, 'off': False}
-
- def getboolean(self, section, option):
- v = self.get(section, option)
- if v.lower() not in self._boolean_states:
- raise ValueError, 'Not a boolean: %s' % v
- return self._boolean_states[v.lower()]
-
- def optionxform(self, optionstr):
- return optionstr.lower()
-
- def has_option(self, section, option):
- """Check for the existence of a given option in a given section."""
- if not section or section == DEFAULTSECT:
- option = self.optionxform(option)
- return option in self._defaults
- elif section not in self._sections:
- return False
- else:
- option = self.optionxform(option)
- return (option in self._sections[section]
- or option in self._defaults)
-
- def set(self, section, option, value):
- """Set an option."""
- if not section or section == DEFAULTSECT:
- sectdict = self._defaults
- else:
- try:
- sectdict = self._sections[section]
- except KeyError:
- raise NoSectionError(section)
- sectdict[self.optionxform(option)] = value
-
- def write(self, fp):
- """Write an .ini-format representation of the configuration state."""
- if self._defaults:
- fp.write("[%s]\n" % DEFAULTSECT)
- for (key, value) in self._defaults.items():
- fp.write("%s = %s\n" % (key, str(value).replace('\n', '\n\t')))
- fp.write("\n")
- for section in self._sections:
- fp.write("[%s]\n" % section)
- for (key, value) in self._sections[section].items():
- if key != "__name__":
- fp.write("%s = %s\n" %
- (key, str(value).replace('\n', '\n\t')))
- fp.write("\n")
-
- def remove_option(self, section, option):
- """Remove an option."""
- if not section or section == DEFAULTSECT:
- sectdict = self._defaults
- else:
- try:
- sectdict = self._sections[section]
- except KeyError:
- raise NoSectionError(section)
- option = self.optionxform(option)
- existed = option in sectdict
- if existed:
- del sectdict[option]
- return existed
-
- def remove_section(self, section):
- """Remove a file section."""
- existed = section in self._sections
- if existed:
- del self._sections[section]
- return existed
-
- #
- # Regular expressions for parsing section headers and options.
- #
- SECTCRE = re.compile(
- r'\[' # [
- r'(?P<header>[^]]+)' # very permissive!
- r'\]' # ]
- )
- OPTCRE = re.compile(
- r'(?P<option>[^:=\s][^:=]*)' # very permissive!
- r'\s*(?P<vi>[:=])\s*' # any number of space/tab,
- # followed by separator
- # (either : or =), followed
- # by any # space/tab
- r'(?P<value>.*)$' # everything up to eol
- )
-
- def _read(self, fp, fpname):
- """Parse a sectioned setup file.
-
- The sections in setup file contains a title line at the top,
- indicated by a name in square brackets (`[]'), plus key/value
- options lines, indicated by `name: value' format lines.
- Continuations are represented by an embedded newline then
- leading whitespace. Blank lines, lines beginning with a '#',
- and just about everything else are ignored.
- """
- cursect = None # None, or a dictionary
- optname = None
- lineno = 0
- e = None # None, or an exception
- while True:
- line = fp.readline()
- if not line:
- break
- lineno = lineno + 1
- # comment or blank line?
- if line.strip() == '' or line[0] in '#;':
- continue
- if line.split(None, 1)[0].lower() == 'rem' and line[0] in "rR":
- # no leading whitespace
- continue
- # continuation line?
- if line[0].isspace() and cursect is not None and optname:
- value = line.strip()
- if value:
- cursect[optname] = "%s\n%s" % (cursect[optname], value)
- # a section header or option header?
- else:
- # is it a section header?
- mo = self.SECTCRE.match(line)
- if mo:
- sectname = mo.group('header')
- if sectname in self._sections:
- cursect = self._sections[sectname]
- elif sectname == DEFAULTSECT:
- cursect = self._defaults
- else:
- cursect = {'__name__': sectname}
- self._sections[sectname] = cursect
- # So sections can't start with a continuation line
- optname = None
- # no section header in the file?
- elif cursect is None:
- raise MissingSectionHeaderError(fpname, lineno, line)
- # an option line?
- else:
- mo = self.OPTCRE.match(line)
- if mo:
- optname, vi, optval = mo.group('option', 'vi', 'value')
- if vi in ('=', ':') and ';' in optval:
- # ';' is a comment delimiter only if it follows
- # a spacing character
- pos = optval.find(';')
- if pos != -1 and optval[pos-1].isspace():
- optval = optval[:pos]
- optval = optval.strip()
- # allow empty values
- if optval == '""':
- optval = ''
- optname = self.optionxform(optname.rstrip())
- cursect[optname] = optval
- else:
- # a non-fatal parsing error occurred. set up the
- # exception but keep going. the exception will be
- # raised at the end of the file and will contain a
- # list of all bogus lines
- if not e:
- e = ParsingError(fpname)
- e.append(lineno, repr(line))
- # if any parsing errors occurred, raise an exception
- if e:
- raise e
-
-
-class ConfigParser(RawConfigParser):
-
- def get(self, section, option, raw=False, vars=None):
- """Get an option value for a given section.
-
- All % interpolations are expanded in the return values, based on the
- defaults passed into the constructor, unless the optional argument
- `raw' is true. Additional substitutions may be provided using the
- `vars' argument, which must be a dictionary whose contents overrides
- any pre-existing defaults.
-
- The section DEFAULT is special.
- """
- d = self._defaults.copy()
- try:
- d.update(self._sections[section])
- except KeyError:
- if section != DEFAULTSECT:
- raise NoSectionError(section)
- # Update with the entry specific variables
- if vars:
- for key, value in vars.items():
- d[self.optionxform(key)] = value
- option = self.optionxform(option)
- try:
- value = d[option]
- except KeyError:
- raise NoOptionError(option, section)
-
- if raw:
- return value
- else:
- return self._interpolate(section, option, value, d)
-
- def items(self, section, raw=False, vars=None):
- """Return a list of tuples with (name, value) for each option
- in the section.
-
- All % interpolations are expanded in the return values, based on the
- defaults passed into the constructor, unless the optional argument
- `raw' is true. Additional substitutions may be provided using the
- `vars' argument, which must be a dictionary whose contents overrides
- any pre-existing defaults.
-
- The section DEFAULT is special.
- """
- d = self._defaults.copy()
- try:
- d.update(self._sections[section])
- except KeyError:
- if section != DEFAULTSECT:
- raise NoSectionError(section)
- # Update with the entry specific variables
- if vars:
- for key, value in vars.items():
- d[self.optionxform(key)] = value
- options = d.keys()
- if "__name__" in options:
- options.remove("__name__")
- if raw:
- return [(option, d[option])
- for option in options]
- else:
- return [(option, self._interpolate(section, option, d[option], d))
- for option in options]
-
- def _interpolate(self, section, option, rawval, vars):
- # do the string interpolation
- value = rawval
- depth = MAX_INTERPOLATION_DEPTH
- while depth: # Loop through this until it's done
- depth -= 1
- if "%(" in value:
- value = self._KEYCRE.sub(self._interpolation_replace, value)
- try:
- value = value % vars
- except KeyError, e:
- raise InterpolationMissingOptionError(
- option, section, rawval, e[0])
- else:
- break
- if "%(" in value:
- raise InterpolationDepthError(option, section, rawval)
- return value
-
- _KEYCRE = re.compile(r"%\(([^)]*)\)s|.")
-
- def _interpolation_replace(self, match):
- s = match.group(1)
- if s is None:
- return match.group()
- else:
- return "%%(%s)s" % self.optionxform(s)
-
-
-class SafeConfigParser(ConfigParser):
-
- def _interpolate(self, section, option, rawval, vars):
- # do the string interpolation
- L = []
- self._interpolate_some(option, L, rawval, section, vars, 1)
- return ''.join(L)
-
- _interpvar_match = re.compile(r"%\(([^)]+)\)s").match
-
- def _interpolate_some(self, option, accum, rest, section, map, depth):
- if depth > MAX_INTERPOLATION_DEPTH:
- raise InterpolationDepthError(option, section, rest)
- while rest:
- p = rest.find("%")
- if p < 0:
- accum.append(rest)
- return
- if p > 0:
- accum.append(rest[:p])
- rest = rest[p:]
- # p is no longer used
- c = rest[1:2]
- if c == "%":
- accum.append("%")
- rest = rest[2:]
- elif c == "(":
- m = self._interpvar_match(rest)
- if m is None:
- raise InterpolationSyntaxError(option, section,
- "bad interpolation variable reference %r" % rest)
- var = self.optionxform(m.group(1))
- rest = rest[m.end():]
- try:
- v = map[var]
- except KeyError:
- raise InterpolationMissingOptionError(
- option, section, rest, var)
- if "%" in v:
- self._interpolate_some(option, accum, v,
- section, map, depth + 1)
- else:
- accum.append(v)
- else:
- raise InterpolationSyntaxError(
- option, section,
- "'%%' must be followed by '%%' or '(', found: %r" % (rest,))
-
- def set(self, section, option, value):
- """Set an option. Extend ConfigParser.set: check for string values."""
- if not isinstance(value, basestring):
- raise TypeError("option values must be strings")
- ConfigParser.set(self, section, option, value)
diff --git a/sys/lib/python/Cookie.py b/sys/lib/python/Cookie.py
deleted file mode 100644
index e1eb7348b..000000000
--- a/sys/lib/python/Cookie.py
+++ /dev/null
@@ -1,746 +0,0 @@
-#!/usr/bin/env python
-#
-
-####
-# Copyright 2000 by Timothy O'Malley <timo@alum.mit.edu>
-#
-# All Rights Reserved
-#
-# Permission to use, copy, modify, and distribute this software
-# and its documentation for any purpose and without fee is hereby
-# granted, provided that the above copyright notice appear in all
-# copies and that both that copyright notice and this permission
-# notice appear in supporting documentation, and that the name of
-# Timothy O'Malley not be used in advertising or publicity
-# pertaining to distribution of the software without specific, written
-# prior permission.
-#
-# Timothy O'Malley DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
-# SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-# AND FITNESS, IN NO EVENT SHALL Timothy O'Malley BE LIABLE FOR
-# ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
-# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
-# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-# PERFORMANCE OF THIS SOFTWARE.
-#
-####
-#
-# Id: Cookie.py,v 2.29 2000/08/23 05:28:49 timo Exp
-# by Timothy O'Malley <timo@alum.mit.edu>
-#
-# Cookie.py is a Python module for the handling of HTTP
-# cookies as a Python dictionary. See RFC 2109 for more
-# information on cookies.
-#
-# The original idea to treat Cookies as a dictionary came from
-# Dave Mitchell (davem@magnet.com) in 1995, when he released the
-# first version of nscookie.py.
-#
-####
-
-r"""
-Here's a sample session to show how to use this module.
-At the moment, this is the only documentation.
-
-The Basics
-----------
-
-Importing is easy..
-
- >>> import Cookie
-
-Most of the time you start by creating a cookie. Cookies come in
-three flavors, each with slightly different encoding semantics, but
-more on that later.
-
- >>> C = Cookie.SimpleCookie()
- >>> C = Cookie.SerialCookie()
- >>> C = Cookie.SmartCookie()
-
-[Note: Long-time users of Cookie.py will remember using
-Cookie.Cookie() to create an Cookie object. Although deprecated, it
-is still supported by the code. See the Backward Compatibility notes
-for more information.]
-
-Once you've created your Cookie, you can add values just as if it were
-a dictionary.
-
- >>> C = Cookie.SmartCookie()
- >>> C["fig"] = "newton"
- >>> C["sugar"] = "wafer"
- >>> C.output()
- 'Set-Cookie: fig=newton\r\nSet-Cookie: sugar=wafer'
-
-Notice that the printable representation of a Cookie is the
-appropriate format for a Set-Cookie: header. This is the
-default behavior. You can change the header and printed
-attributes by using the .output() function
-
- >>> C = Cookie.SmartCookie()
- >>> C["rocky"] = "road"
- >>> C["rocky"]["path"] = "/cookie"
- >>> print C.output(header="Cookie:")
- Cookie: rocky=road; Path=/cookie
- >>> print C.output(attrs=[], header="Cookie:")
- Cookie: rocky=road
-
-The load() method of a Cookie extracts cookies from a string. In a
-CGI script, you would use this method to extract the cookies from the
-HTTP_COOKIE environment variable.
-
- >>> C = Cookie.SmartCookie()
- >>> C.load("chips=ahoy; vienna=finger")
- >>> C.output()
- 'Set-Cookie: chips=ahoy\r\nSet-Cookie: vienna=finger'
-
-The load() method is darn-tootin smart about identifying cookies
-within a string. Escaped quotation marks, nested semicolons, and other
-such trickeries do not confuse it.
-
- >>> C = Cookie.SmartCookie()
- >>> C.load('keebler="E=everybody; L=\\"Loves\\"; fudge=\\012;";')
- >>> print C
- Set-Cookie: keebler="E=everybody; L=\"Loves\"; fudge=\012;"
-
-Each element of the Cookie also supports all of the RFC 2109
-Cookie attributes. Here's an example which sets the Path
-attribute.
-
- >>> C = Cookie.SmartCookie()
- >>> C["oreo"] = "doublestuff"
- >>> C["oreo"]["path"] = "/"
- >>> print C
- Set-Cookie: oreo=doublestuff; Path=/
-
-Each dictionary element has a 'value' attribute, which gives you
-back the value associated with the key.
-
- >>> C = Cookie.SmartCookie()
- >>> C["twix"] = "none for you"
- >>> C["twix"].value
- 'none for you'
-
-
-A Bit More Advanced
--------------------
-
-As mentioned before, there are three different flavors of Cookie
-objects, each with different encoding/decoding semantics. This
-section briefly discusses the differences.
-
-SimpleCookie
-
-The SimpleCookie expects that all values should be standard strings.
-Just to be sure, SimpleCookie invokes the str() builtin to convert
-the value to a string, when the values are set dictionary-style.
-
- >>> C = Cookie.SimpleCookie()
- >>> C["number"] = 7
- >>> C["string"] = "seven"
- >>> C["number"].value
- '7'
- >>> C["string"].value
- 'seven'
- >>> C.output()
- 'Set-Cookie: number=7\r\nSet-Cookie: string=seven'
-
-
-SerialCookie
-
-The SerialCookie expects that all values should be serialized using
-cPickle (or pickle, if cPickle isn't available). As a result of
-serializing, SerialCookie can save almost any Python object to a
-value, and recover the exact same object when the cookie has been
-returned. (SerialCookie can yield some strange-looking cookie
-values, however.)
-
- >>> C = Cookie.SerialCookie()
- >>> C["number"] = 7
- >>> C["string"] = "seven"
- >>> C["number"].value
- 7
- >>> C["string"].value
- 'seven'
- >>> C.output()
- 'Set-Cookie: number="I7\\012."\r\nSet-Cookie: string="S\'seven\'\\012p1\\012."'
-
-Be warned, however, if SerialCookie cannot de-serialize a value (because
-it isn't a valid pickle'd object), IT WILL RAISE AN EXCEPTION.
-
-
-SmartCookie
-
-The SmartCookie combines aspects of each of the other two flavors.
-When setting a value in a dictionary-fashion, the SmartCookie will
-serialize (ala cPickle) the value *if and only if* it isn't a
-Python string. String objects are *not* serialized. Similarly,
-when the load() method parses out values, it attempts to de-serialize
-the value. If it fails, then it fallsback to treating the value
-as a string.
-
- >>> C = Cookie.SmartCookie()
- >>> C["number"] = 7
- >>> C["string"] = "seven"
- >>> C["number"].value
- 7
- >>> C["string"].value
- 'seven'
- >>> C.output()
- 'Set-Cookie: number="I7\\012."\r\nSet-Cookie: string=seven'
-
-
-Backwards Compatibility
------------------------
-
-In order to keep compatibilty with earlier versions of Cookie.py,
-it is still possible to use Cookie.Cookie() to create a Cookie. In
-fact, this simply returns a SmartCookie.
-
- >>> C = Cookie.Cookie()
- >>> print C.__class__.__name__
- SmartCookie
-
-
-Finis.
-""" #"
-# ^
-# |----helps out font-lock
-
-#
-# Import our required modules
-#
-import string
-
-try:
- from cPickle import dumps, loads
-except ImportError:
- from pickle import dumps, loads
-
-import re, warnings
-
-__all__ = ["CookieError","BaseCookie","SimpleCookie","SerialCookie",
- "SmartCookie","Cookie"]
-
-_nulljoin = ''.join
-_semispacejoin = '; '.join
-_spacejoin = ' '.join
-
-#
-# Define an exception visible to External modules
-#
-class CookieError(Exception):
- pass
-
-
-# These quoting routines conform to the RFC2109 specification, which in
-# turn references the character definitions from RFC2068. They provide
-# a two-way quoting algorithm. Any non-text character is translated
-# into a 4 character sequence: a forward-slash followed by the
-# three-digit octal equivalent of the character. Any '\' or '"' is
-# quoted with a preceeding '\' slash.
-#
-# These are taken from RFC2068 and RFC2109.
-# _LegalChars is the list of chars which don't require "'s
-# _Translator hash-table for fast quoting
-#
-_LegalChars = string.ascii_letters + string.digits + "!#$%&'*+-.^_`|~"
-_Translator = {
- '\000' : '\\000', '\001' : '\\001', '\002' : '\\002',
- '\003' : '\\003', '\004' : '\\004', '\005' : '\\005',
- '\006' : '\\006', '\007' : '\\007', '\010' : '\\010',
- '\011' : '\\011', '\012' : '\\012', '\013' : '\\013',
- '\014' : '\\014', '\015' : '\\015', '\016' : '\\016',
- '\017' : '\\017', '\020' : '\\020', '\021' : '\\021',
- '\022' : '\\022', '\023' : '\\023', '\024' : '\\024',
- '\025' : '\\025', '\026' : '\\026', '\027' : '\\027',
- '\030' : '\\030', '\031' : '\\031', '\032' : '\\032',
- '\033' : '\\033', '\034' : '\\034', '\035' : '\\035',
- '\036' : '\\036', '\037' : '\\037',
-
- '"' : '\\"', '\\' : '\\\\',
-
- '\177' : '\\177', '\200' : '\\200', '\201' : '\\201',
- '\202' : '\\202', '\203' : '\\203', '\204' : '\\204',
- '\205' : '\\205', '\206' : '\\206', '\207' : '\\207',
- '\210' : '\\210', '\211' : '\\211', '\212' : '\\212',
- '\213' : '\\213', '\214' : '\\214', '\215' : '\\215',
- '\216' : '\\216', '\217' : '\\217', '\220' : '\\220',
- '\221' : '\\221', '\222' : '\\222', '\223' : '\\223',
- '\224' : '\\224', '\225' : '\\225', '\226' : '\\226',
- '\227' : '\\227', '\230' : '\\230', '\231' : '\\231',
- '\232' : '\\232', '\233' : '\\233', '\234' : '\\234',
- '\235' : '\\235', '\236' : '\\236', '\237' : '\\237',
- '\240' : '\\240', '\241' : '\\241', '\242' : '\\242',
- '\243' : '\\243', '\244' : '\\244', '\245' : '\\245',
- '\246' : '\\246', '\247' : '\\247', '\250' : '\\250',
- '\251' : '\\251', '\252' : '\\252', '\253' : '\\253',
- '\254' : '\\254', '\255' : '\\255', '\256' : '\\256',
- '\257' : '\\257', '\260' : '\\260', '\261' : '\\261',
- '\262' : '\\262', '\263' : '\\263', '\264' : '\\264',
- '\265' : '\\265', '\266' : '\\266', '\267' : '\\267',
- '\270' : '\\270', '\271' : '\\271', '\272' : '\\272',
- '\273' : '\\273', '\274' : '\\274', '\275' : '\\275',
- '\276' : '\\276', '\277' : '\\277', '\300' : '\\300',
- '\301' : '\\301', '\302' : '\\302', '\303' : '\\303',
- '\304' : '\\304', '\305' : '\\305', '\306' : '\\306',
- '\307' : '\\307', '\310' : '\\310', '\311' : '\\311',
- '\312' : '\\312', '\313' : '\\313', '\314' : '\\314',
- '\315' : '\\315', '\316' : '\\316', '\317' : '\\317',
- '\320' : '\\320', '\321' : '\\321', '\322' : '\\322',
- '\323' : '\\323', '\324' : '\\324', '\325' : '\\325',
- '\326' : '\\326', '\327' : '\\327', '\330' : '\\330',
- '\331' : '\\331', '\332' : '\\332', '\333' : '\\333',
- '\334' : '\\334', '\335' : '\\335', '\336' : '\\336',
- '\337' : '\\337', '\340' : '\\340', '\341' : '\\341',
- '\342' : '\\342', '\343' : '\\343', '\344' : '\\344',
- '\345' : '\\345', '\346' : '\\346', '\347' : '\\347',
- '\350' : '\\350', '\351' : '\\351', '\352' : '\\352',
- '\353' : '\\353', '\354' : '\\354', '\355' : '\\355',
- '\356' : '\\356', '\357' : '\\357', '\360' : '\\360',
- '\361' : '\\361', '\362' : '\\362', '\363' : '\\363',
- '\364' : '\\364', '\365' : '\\365', '\366' : '\\366',
- '\367' : '\\367', '\370' : '\\370', '\371' : '\\371',
- '\372' : '\\372', '\373' : '\\373', '\374' : '\\374',
- '\375' : '\\375', '\376' : '\\376', '\377' : '\\377'
- }
-
-_idmap = ''.join(chr(x) for x in xrange(256))
-
-def _quote(str, LegalChars=_LegalChars,
- idmap=_idmap, translate=string.translate):
- #
- # If the string does not need to be double-quoted,
- # then just return the string. Otherwise, surround
- # the string in doublequotes and precede quote (with a \)
- # special characters.
- #
- if "" == translate(str, idmap, LegalChars):
- return str
- else:
- return '"' + _nulljoin( map(_Translator.get, str, str) ) + '"'
-# end _quote
-
-
-_OctalPatt = re.compile(r"\\[0-3][0-7][0-7]")
-_QuotePatt = re.compile(r"[\\].")
-
-def _unquote(str):
- # If there aren't any doublequotes,
- # then there can't be any special characters. See RFC 2109.
- if len(str) < 2:
- return str
- if str[0] != '"' or str[-1] != '"':
- return str
-
- # We have to assume that we must decode this string.
- # Down to work.
-
- # Remove the "s
- str = str[1:-1]
-
- # Check for special sequences. Examples:
- # \012 --> \n
- # \" --> "
- #
- i = 0
- n = len(str)
- res = []
- while 0 <= i < n:
- Omatch = _OctalPatt.search(str, i)
- Qmatch = _QuotePatt.search(str, i)
- if not Omatch and not Qmatch: # Neither matched
- res.append(str[i:])
- break
- # else:
- j = k = -1
- if Omatch: j = Omatch.start(0)
- if Qmatch: k = Qmatch.start(0)
- if Qmatch and ( not Omatch or k < j ): # QuotePatt matched
- res.append(str[i:k])
- res.append(str[k+1])
- i = k+2
- else: # OctalPatt matched
- res.append(str[i:j])
- res.append( chr( int(str[j+1:j+4], 8) ) )
- i = j+4
- return _nulljoin(res)
-# end _unquote
-
-# The _getdate() routine is used to set the expiration time in
-# the cookie's HTTP header. By default, _getdate() returns the
-# current time in the appropriate "expires" format for a
-# Set-Cookie header. The one optional argument is an offset from
-# now, in seconds. For example, an offset of -3600 means "one hour ago".
-# The offset may be a floating point number.
-#
-
-_weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
-
-_monthname = [None,
- 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
- 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
-
-def _getdate(future=0, weekdayname=_weekdayname, monthname=_monthname):
- from time import gmtime, time
- now = time()
- year, month, day, hh, mm, ss, wd, y, z = gmtime(now + future)
- return "%s, %02d-%3s-%4d %02d:%02d:%02d GMT" % \
- (weekdayname[wd], day, monthname[month], year, hh, mm, ss)
-
-
-#
-# A class to hold ONE key,value pair.
-# In a cookie, each such pair may have several attributes.
-# so this class is used to keep the attributes associated
-# with the appropriate key,value pair.
-# This class also includes a coded_value attribute, which
-# is used to hold the network representation of the
-# value. This is most useful when Python objects are
-# pickled for network transit.
-#
-
-class Morsel(dict):
- # RFC 2109 lists these attributes as reserved:
- # path comment domain
- # max-age secure version
- #
- # For historical reasons, these attributes are also reserved:
- # expires
- #
- # This dictionary provides a mapping from the lowercase
- # variant on the left to the appropriate traditional
- # formatting on the right.
- _reserved = { "expires" : "expires",
- "path" : "Path",
- "comment" : "Comment",
- "domain" : "Domain",
- "max-age" : "Max-Age",
- "secure" : "secure",
- "version" : "Version",
- }
-
- def __init__(self):
- # Set defaults
- self.key = self.value = self.coded_value = None
-
- # Set default attributes
- for K in self._reserved:
- dict.__setitem__(self, K, "")
- # end __init__
-
- def __setitem__(self, K, V):
- K = K.lower()
- if not K in self._reserved:
- raise CookieError("Invalid Attribute %s" % K)
- dict.__setitem__(self, K, V)
- # end __setitem__
-
- def isReservedKey(self, K):
- return K.lower() in self._reserved
- # end isReservedKey
-
- def set(self, key, val, coded_val,
- LegalChars=_LegalChars,
- idmap=_idmap, translate=string.translate):
- # First we verify that the key isn't a reserved word
- # Second we make sure it only contains legal characters
- if key.lower() in self._reserved:
- raise CookieError("Attempt to set a reserved key: %s" % key)
- if "" != translate(key, idmap, LegalChars):
- raise CookieError("Illegal key value: %s" % key)
-
- # It's a good key, so save it.
- self.key = key
- self.value = val
- self.coded_value = coded_val
- # end set
-
- def output(self, attrs=None, header = "Set-Cookie:"):
- return "%s %s" % ( header, self.OutputString(attrs) )
-
- __str__ = output
-
- def __repr__(self):
- return '<%s: %s=%s>' % (self.__class__.__name__,
- self.key, repr(self.value) )
-
- def js_output(self, attrs=None):
- # Print javascript
- return """
- <script type="text/javascript">
- <!-- begin hiding
- document.cookie = \"%s\";
- // end hiding -->
- </script>
- """ % ( self.OutputString(attrs), )
- # end js_output()
-
- def OutputString(self, attrs=None):
- # Build up our result
- #
- result = []
- RA = result.append
-
- # First, the key=value pair
- RA("%s=%s" % (self.key, self.coded_value))
-
- # Now add any defined attributes
- if attrs is None:
- attrs = self._reserved
- items = self.items()
- items.sort()
- for K,V in items:
- if V == "": continue
- if K not in attrs: continue
- if K == "expires" and type(V) == type(1):
- RA("%s=%s" % (self._reserved[K], _getdate(V)))
- elif K == "max-age" and type(V) == type(1):
- RA("%s=%d" % (self._reserved[K], V))
- elif K == "secure":
- RA(str(self._reserved[K]))
- else:
- RA("%s=%s" % (self._reserved[K], V))
-
- # Return the result
- return _semispacejoin(result)
- # end OutputString
-# end Morsel class
-
-
-
-#
-# Pattern for finding cookie
-#
-# This used to be strict parsing based on the RFC2109 and RFC2068
-# specifications. I have since discovered that MSIE 3.0x doesn't
-# follow the character rules outlined in those specs. As a
-# result, the parsing rules here are less strict.
-#
-
-_LegalCharsPatt = r"[\w\d!#%&'~_`><@,:/\$\*\+\-\.\^\|\)\(\?\}\{\=]"
-_CookiePattern = re.compile(
- r"(?x)" # This is a Verbose pattern
- r"(?P<key>" # Start of group 'key'
- ""+ _LegalCharsPatt +"+?" # Any word of at least one letter, nongreedy
- r")" # End of group 'key'
- r"\s*=\s*" # Equal Sign
- r"(?P<val>" # Start of group 'val'
- r'"(?:[^\\"]|\\.)*"' # Any doublequoted string
- r"|" # or
- ""+ _LegalCharsPatt +"*" # Any word or empty string
- r")" # End of group 'val'
- r"\s*;?" # Probably ending in a semi-colon
- )
-
-
-# At long last, here is the cookie class.
-# Using this class is almost just like using a dictionary.
-# See this module's docstring for example usage.
-#
-class BaseCookie(dict):
- # A container class for a set of Morsels
- #
-
- def value_decode(self, val):
- """real_value, coded_value = value_decode(STRING)
- Called prior to setting a cookie's value from the network
- representation. The VALUE is the value read from HTTP
- header.
- Override this function to modify the behavior of cookies.
- """
- return val, val
- # end value_encode
-
- def value_encode(self, val):
- """real_value, coded_value = value_encode(VALUE)
- Called prior to setting a cookie's value from the dictionary
- representation. The VALUE is the value being assigned.
- Override this function to modify the behavior of cookies.
- """
- strval = str(val)
- return strval, strval
- # end value_encode
-
- def __init__(self, input=None):
- if input: self.load(input)
- # end __init__
-
- def __set(self, key, real_value, coded_value):
- """Private method for setting a cookie's value"""
- M = self.get(key, Morsel())
- M.set(key, real_value, coded_value)
- dict.__setitem__(self, key, M)
- # end __set
-
- def __setitem__(self, key, value):
- """Dictionary style assignment."""
- rval, cval = self.value_encode(value)
- self.__set(key, rval, cval)
- # end __setitem__
-
- def output(self, attrs=None, header="Set-Cookie:", sep="\015\012"):
- """Return a string suitable for HTTP."""
- result = []
- items = self.items()
- items.sort()
- for K,V in items:
- result.append( V.output(attrs, header) )
- return sep.join(result)
- # end output
-
- __str__ = output
-
- def __repr__(self):
- L = []
- items = self.items()
- items.sort()
- for K,V in items:
- L.append( '%s=%s' % (K,repr(V.value) ) )
- return '<%s: %s>' % (self.__class__.__name__, _spacejoin(L))
-
- def js_output(self, attrs=None):
- """Return a string suitable for JavaScript."""
- result = []
- items = self.items()
- items.sort()
- for K,V in items:
- result.append( V.js_output(attrs) )
- return _nulljoin(result)
- # end js_output
-
- def load(self, rawdata):
- """Load cookies from a string (presumably HTTP_COOKIE) or
- from a dictionary. Loading cookies from a dictionary 'd'
- is equivalent to calling:
- map(Cookie.__setitem__, d.keys(), d.values())
- """
- if type(rawdata) == type(""):
- self.__ParseString(rawdata)
- else:
- self.update(rawdata)
- return
- # end load()
-
- def __ParseString(self, str, patt=_CookiePattern):
- i = 0 # Our starting point
- n = len(str) # Length of string
- M = None # current morsel
-
- while 0 <= i < n:
- # Start looking for a cookie
- match = patt.search(str, i)
- if not match: break # No more cookies
-
- K,V = match.group("key"), match.group("val")
- i = match.end(0)
-
- # Parse the key, value in case it's metainfo
- if K[0] == "$":
- # We ignore attributes which pertain to the cookie
- # mechanism as a whole. See RFC 2109.
- # (Does anyone care?)
- if M:
- M[ K[1:] ] = V
- elif K.lower() in Morsel._reserved:
- if M:
- M[ K ] = _unquote(V)
- else:
- rval, cval = self.value_decode(V)
- self.__set(K, rval, cval)
- M = self[K]
- # end __ParseString
-# end BaseCookie class
-
-class SimpleCookie(BaseCookie):
- """SimpleCookie
- SimpleCookie supports strings as cookie values. When setting
- the value using the dictionary assignment notation, SimpleCookie
- calls the builtin str() to convert the value to a string. Values
- received from HTTP are kept as strings.
- """
- def value_decode(self, val):
- return _unquote( val ), val
- def value_encode(self, val):
- strval = str(val)
- return strval, _quote( strval )
-# end SimpleCookie
-
-class SerialCookie(BaseCookie):
- """SerialCookie
- SerialCookie supports arbitrary objects as cookie values. All
- values are serialized (using cPickle) before being sent to the
- client. All incoming values are assumed to be valid Pickle
- representations. IF AN INCOMING VALUE IS NOT IN A VALID PICKLE
- FORMAT, THEN AN EXCEPTION WILL BE RAISED.
-
- Note: Large cookie values add overhead because they must be
- retransmitted on every HTTP transaction.
-
- Note: HTTP has a 2k limit on the size of a cookie. This class
- does not check for this limit, so be careful!!!
- """
- def __init__(self, input=None):
- warnings.warn("SerialCookie class is insecure; do not use it",
- DeprecationWarning)
- BaseCookie.__init__(self, input)
- # end __init__
- def value_decode(self, val):
- # This could raise an exception!
- return loads( _unquote(val) ), val
- def value_encode(self, val):
- return val, _quote( dumps(val) )
-# end SerialCookie
-
-class SmartCookie(BaseCookie):
- """SmartCookie
- SmartCookie supports arbitrary objects as cookie values. If the
- object is a string, then it is quoted. If the object is not a
- string, however, then SmartCookie will use cPickle to serialize
- the object into a string representation.
-
- Note: Large cookie values add overhead because they must be
- retransmitted on every HTTP transaction.
-
- Note: HTTP has a 2k limit on the size of a cookie. This class
- does not check for this limit, so be careful!!!
- """
- def __init__(self, input=None):
- warnings.warn("Cookie/SmartCookie class is insecure; do not use it",
- DeprecationWarning)
- BaseCookie.__init__(self, input)
- # end __init__
- def value_decode(self, val):
- strval = _unquote(val)
- try:
- return loads(strval), val
- except:
- return strval, val
- def value_encode(self, val):
- if type(val) == type(""):
- return val, _quote(val)
- else:
- return val, _quote( dumps(val) )
-# end SmartCookie
-
-
-###########################################################
-# Backwards Compatibility: Don't break any existing code!
-
-# We provide Cookie() as an alias for SmartCookie()
-Cookie = SmartCookie
-
-#
-###########################################################
-
-def _test():
- import doctest, Cookie
- return doctest.testmod(Cookie)
-
-if __name__ == "__main__":
- _test()
-
-
-#Local Variables:
-#tab-width: 4
-#end:
diff --git a/sys/lib/python/DocXMLRPCServer.py b/sys/lib/python/DocXMLRPCServer.py
deleted file mode 100644
index 86ed32b6f..000000000
--- a/sys/lib/python/DocXMLRPCServer.py
+++ /dev/null
@@ -1,306 +0,0 @@
-"""Self documenting XML-RPC Server.
-
-This module can be used to create XML-RPC servers that
-serve pydoc-style documentation in response to HTTP
-GET requests. This documentation is dynamically generated
-based on the functions and methods registered with the
-server.
-
-This module is built upon the pydoc and SimpleXMLRPCServer
-modules.
-"""
-
-import pydoc
-import inspect
-import re
-import sys
-
-from SimpleXMLRPCServer import (SimpleXMLRPCServer,
- SimpleXMLRPCRequestHandler,
- CGIXMLRPCRequestHandler,
- resolve_dotted_attribute)
-
-class ServerHTMLDoc(pydoc.HTMLDoc):
- """Class used to generate pydoc HTML document for a server"""
-
- def markup(self, text, escape=None, funcs={}, classes={}, methods={}):
- """Mark up some plain text, given a context of symbols to look for.
- Each context dictionary maps object names to anchor names."""
- escape = escape or self.escape
- results = []
- here = 0
-
- # XXX Note that this regular expressions does not allow for the
- # hyperlinking of arbitrary strings being used as method
- # names. Only methods with names consisting of word characters
- # and '.'s are hyperlinked.
- pattern = re.compile(r'\b((http|ftp)://\S+[\w/]|'
- r'RFC[- ]?(\d+)|'
- r'PEP[- ]?(\d+)|'
- r'(self\.)?((?:\w|\.)+))\b')
- while 1:
- match = pattern.search(text, here)
- if not match: break
- start, end = match.span()
- results.append(escape(text[here:start]))
-
- all, scheme, rfc, pep, selfdot, name = match.groups()
- if scheme:
- url = escape(all).replace('"', '&quot;')
- results.append('<a href="%s">%s</a>' % (url, url))
- elif rfc:
- url = 'http://www.rfc-editor.org/rfc/rfc%d.txt' % int(rfc)
- results.append('<a href="%s">%s</a>' % (url, escape(all)))
- elif pep:
- url = 'http://www.python.org/peps/pep-%04d.html' % int(pep)
- results.append('<a href="%s">%s</a>' % (url, escape(all)))
- elif text[end:end+1] == '(':
- results.append(self.namelink(name, methods, funcs, classes))
- elif selfdot:
- results.append('self.<strong>%s</strong>' % name)
- else:
- results.append(self.namelink(name, classes))
- here = end
- results.append(escape(text[here:]))
- return ''.join(results)
-
- def docroutine(self, object, name=None, mod=None,
- funcs={}, classes={}, methods={}, cl=None):
- """Produce HTML documentation for a function or method object."""
-
- anchor = (cl and cl.__name__ or '') + '-' + name
- note = ''
-
- title = '<a name="%s"><strong>%s</strong></a>' % (anchor, name)
-
- if inspect.ismethod(object):
- args, varargs, varkw, defaults = inspect.getargspec(object.im_func)
- # exclude the argument bound to the instance, it will be
- # confusing to the non-Python user
- argspec = inspect.formatargspec (
- args[1:],
- varargs,
- varkw,
- defaults,
- formatvalue=self.formatvalue
- )
- elif inspect.isfunction(object):
- args, varargs, varkw, defaults = inspect.getargspec(object)
- argspec = inspect.formatargspec(
- args, varargs, varkw, defaults, formatvalue=self.formatvalue)
- else:
- argspec = '(...)'
-
- if isinstance(object, tuple):
- argspec = object[0] or argspec
- docstring = object[1] or ""
- else:
- docstring = pydoc.getdoc(object)
-
- decl = title + argspec + (note and self.grey(
- '<font face="helvetica, arial">%s</font>' % note))
-
- doc = self.markup(
- docstring, self.preformat, funcs, classes, methods)
- doc = doc and '<dd><tt>%s</tt></dd>' % doc
- return '<dl><dt>%s</dt>%s</dl>\n' % (decl, doc)
-
- def docserver(self, server_name, package_documentation, methods):
- """Produce HTML documentation for an XML-RPC server."""
-
- fdict = {}
- for key, value in methods.items():
- fdict[key] = '#-' + key
- fdict[value] = fdict[key]
-
- head = '<big><big><strong>%s</strong></big></big>' % server_name
- result = self.heading(head, '#ffffff', '#7799ee')
-
- doc = self.markup(package_documentation, self.preformat, fdict)
- doc = doc and '<tt>%s</tt>' % doc
- result = result + '<p>%s</p>\n' % doc
-
- contents = []
- method_items = methods.items()
- method_items.sort()
- for key, value in method_items:
- contents.append(self.docroutine(value, key, funcs=fdict))
- result = result + self.bigsection(
- 'Methods', '#ffffff', '#eeaa77', pydoc.join(contents))
-
- return result
-
-class XMLRPCDocGenerator:
- """Generates documentation for an XML-RPC server.
-
- This class is designed as mix-in and should not
- be constructed directly.
- """
-
- def __init__(self):
- # setup variables used for HTML documentation
- self.server_name = 'XML-RPC Server Documentation'
- self.server_documentation = \
- "This server exports the following methods through the XML-RPC "\
- "protocol."
- self.server_title = 'XML-RPC Server Documentation'
-
- def set_server_title(self, server_title):
- """Set the HTML title of the generated server documentation"""
-
- self.server_title = server_title
-
- def set_server_name(self, server_name):
- """Set the name of the generated HTML server documentation"""
-
- self.server_name = server_name
-
- def set_server_documentation(self, server_documentation):
- """Set the documentation string for the entire server."""
-
- self.server_documentation = server_documentation
-
- def generate_html_documentation(self):
- """generate_html_documentation() => html documentation for the server
-
- Generates HTML documentation for the server using introspection for
- installed functions and instances that do not implement the
- _dispatch method. Alternatively, instances can choose to implement
- the _get_method_argstring(method_name) method to provide the
- argument string used in the documentation and the
- _methodHelp(method_name) method to provide the help text used
- in the documentation."""
-
- methods = {}
-
- for method_name in self.system_listMethods():
- if self.funcs.has_key(method_name):
- method = self.funcs[method_name]
- elif self.instance is not None:
- method_info = [None, None] # argspec, documentation
- if hasattr(self.instance, '_get_method_argstring'):
- method_info[0] = self.instance._get_method_argstring(method_name)
- if hasattr(self.instance, '_methodHelp'):
- method_info[1] = self.instance._methodHelp(method_name)
-
- method_info = tuple(method_info)
- if method_info != (None, None):
- method = method_info
- elif not hasattr(self.instance, '_dispatch'):
- try:
- method = resolve_dotted_attribute(
- self.instance,
- method_name
- )
- except AttributeError:
- method = method_info
- else:
- method = method_info
- else:
- assert 0, "Could not find method in self.functions and no "\
- "instance installed"
-
- methods[method_name] = method
-
- documenter = ServerHTMLDoc()
- documentation = documenter.docserver(
- self.server_name,
- self.server_documentation,
- methods
- )
-
- return documenter.page(self.server_title, documentation)
-
-class DocXMLRPCRequestHandler(SimpleXMLRPCRequestHandler):
- """XML-RPC and documentation request handler class.
-
- Handles all HTTP POST requests and attempts to decode them as
- XML-RPC requests.
-
- Handles all HTTP GET requests and interprets them as requests
- for documentation.
- """
-
- def do_GET(self):
- """Handles the HTTP GET request.
-
- Interpret all HTTP GET requests as requests for server
- documentation.
- """
- # Check that the path is legal
- if not self.is_rpc_path_valid():
- self.report_404()
- return
-
- response = self.server.generate_html_documentation()
- self.send_response(200)
- self.send_header("Content-type", "text/html")
- self.send_header("Content-length", str(len(response)))
- self.end_headers()
- self.wfile.write(response)
-
- # shut down the connection
- self.wfile.flush()
- self.connection.shutdown(1)
-
-class DocXMLRPCServer( SimpleXMLRPCServer,
- XMLRPCDocGenerator):
- """XML-RPC and HTML documentation server.
-
- Adds the ability to serve server documentation to the capabilities
- of SimpleXMLRPCServer.
- """
-
- def __init__(self, addr, requestHandler=DocXMLRPCRequestHandler,
- logRequests=1):
- SimpleXMLRPCServer.__init__(self, addr, requestHandler, logRequests)
- XMLRPCDocGenerator.__init__(self)
-
-class DocCGIXMLRPCRequestHandler( CGIXMLRPCRequestHandler,
- XMLRPCDocGenerator):
- """Handler for XML-RPC data and documentation requests passed through
- CGI"""
-
- def handle_get(self):
- """Handles the HTTP GET request.
-
- Interpret all HTTP GET requests as requests for server
- documentation.
- """
-
- response = self.generate_html_documentation()
-
- print 'Content-Type: text/html'
- print 'Content-Length: %d' % len(response)
- print
- sys.stdout.write(response)
-
- def __init__(self):
- CGIXMLRPCRequestHandler.__init__(self)
- XMLRPCDocGenerator.__init__(self)
-
-if __name__ == '__main__':
- def deg_to_rad(deg):
- """deg_to_rad(90) => 1.5707963267948966
-
- Converts an angle in degrees to an angle in radians"""
- import math
- return deg * math.pi / 180
-
- server = DocXMLRPCServer(("localhost", 8000))
-
- server.set_server_title("Math Server")
- server.set_server_name("Math XML-RPC Server")
- server.set_server_documentation("""This server supports various mathematical functions.
-
-You can use it from Python as follows:
-
->>> from xmlrpclib import ServerProxy
->>> s = ServerProxy("http://localhost:8000")
->>> s.deg_to_rad(90.0)
-1.5707963267948966""")
-
- server.register_function(deg_to_rad)
- server.register_introspection_functions()
-
- server.serve_forever()
diff --git a/sys/lib/python/HTMLParser.py b/sys/lib/python/HTMLParser.py
deleted file mode 100644
index 8380466e3..000000000
--- a/sys/lib/python/HTMLParser.py
+++ /dev/null
@@ -1,369 +0,0 @@
-"""A parser for HTML and XHTML."""
-
-# This file is based on sgmllib.py, but the API is slightly different.
-
-# XXX There should be a way to distinguish between PCDATA (parsed
-# character data -- the normal case), RCDATA (replaceable character
-# data -- only char and entity references and end tags are special)
-# and CDATA (character data -- only end tags are special).
-
-
-import markupbase
-import re
-
-# Regular expressions used for parsing
-
-interesting_normal = re.compile('[&<]')
-interesting_cdata = re.compile(r'<(/|\Z)')
-incomplete = re.compile('&[a-zA-Z#]')
-
-entityref = re.compile('&([a-zA-Z][-.a-zA-Z0-9]*)[^a-zA-Z0-9]')
-charref = re.compile('&#(?:[0-9]+|[xX][0-9a-fA-F]+)[^0-9a-fA-F]')
-
-starttagopen = re.compile('<[a-zA-Z]')
-piclose = re.compile('>')
-commentclose = re.compile(r'--\s*>')
-tagfind = re.compile('[a-zA-Z][-.a-zA-Z0-9:_]*')
-attrfind = re.compile(
- r'\s*([a-zA-Z_][-.:a-zA-Z_0-9]*)(\s*=\s*'
- r'(\'[^\']*\'|"[^"]*"|[-a-zA-Z0-9./,:;+*%?!&$\(\)_#=~@]*))?')
-
-locatestarttagend = re.compile(r"""
- <[a-zA-Z][-.a-zA-Z0-9:_]* # tag name
- (?:\s+ # whitespace before attribute name
- (?:[a-zA-Z_][-.:a-zA-Z0-9_]* # attribute name
- (?:\s*=\s* # value indicator
- (?:'[^']*' # LITA-enclosed value
- |\"[^\"]*\" # LIT-enclosed value
- |[^'\">\s]+ # bare value
- )
- )?
- )
- )*
- \s* # trailing whitespace
-""", re.VERBOSE)
-endendtag = re.compile('>')
-endtagfind = re.compile('</\s*([a-zA-Z][-.a-zA-Z0-9:_]*)\s*>')
-
-
-class HTMLParseError(Exception):
- """Exception raised for all parse errors."""
-
- def __init__(self, msg, position=(None, None)):
- assert msg
- self.msg = msg
- self.lineno = position[0]
- self.offset = position[1]
-
- def __str__(self):
- result = self.msg
- if self.lineno is not None:
- result = result + ", at line %d" % self.lineno
- if self.offset is not None:
- result = result + ", column %d" % (self.offset + 1)
- return result
-
-
-class HTMLParser(markupbase.ParserBase):
- """Find tags and other markup and call handler functions.
-
- Usage:
- p = HTMLParser()
- p.feed(data)
- ...
- p.close()
-
- Start tags are handled by calling self.handle_starttag() or
- self.handle_startendtag(); end tags by self.handle_endtag(). The
- data between tags is passed from the parser to the derived class
- by calling self.handle_data() with the data as argument (the data
- may be split up in arbitrary chunks). Entity references are
- passed by calling self.handle_entityref() with the entity
- reference as the argument. Numeric character references are
- passed to self.handle_charref() with the string containing the
- reference as the argument.
- """
-
- CDATA_CONTENT_ELEMENTS = ("script", "style")
-
-
- def __init__(self):
- """Initialize and reset this instance."""
- self.reset()
-
- def reset(self):
- """Reset this instance. Loses all unprocessed data."""
- self.rawdata = ''
- self.lasttag = '???'
- self.interesting = interesting_normal
- markupbase.ParserBase.reset(self)
-
- def feed(self, data):
- """Feed data to the parser.
-
- Call this as often as you want, with as little or as much text
- as you want (may include '\n').
- """
- self.rawdata = self.rawdata + data
- self.goahead(0)
-
- def close(self):
- """Handle any buffered data."""
- self.goahead(1)
-
- def error(self, message):
- raise HTMLParseError(message, self.getpos())
-
- __starttag_text = None
-
- def get_starttag_text(self):
- """Return full source of start tag: '<...>'."""
- return self.__starttag_text
-
- def set_cdata_mode(self):
- self.interesting = interesting_cdata
-
- def clear_cdata_mode(self):
- self.interesting = interesting_normal
-
- # Internal -- handle data as far as reasonable. May leave state
- # and data to be processed by a subsequent call. If 'end' is
- # true, force handling all data as if followed by EOF marker.
- def goahead(self, end):
- rawdata = self.rawdata
- i = 0
- n = len(rawdata)
- while i < n:
- match = self.interesting.search(rawdata, i) # < or &
- if match:
- j = match.start()
- else:
- j = n
- if i < j: self.handle_data(rawdata[i:j])
- i = self.updatepos(i, j)
- if i == n: break
- startswith = rawdata.startswith
- if startswith('<', i):
- if starttagopen.match(rawdata, i): # < + letter
- k = self.parse_starttag(i)
- elif startswith("</", i):
- k = self.parse_endtag(i)
- elif startswith("<!--", i):
- k = self.parse_comment(i)
- elif startswith("<?", i):
- k = self.parse_pi(i)
- elif startswith("<!", i):
- k = self.parse_declaration(i)
- elif (i + 1) < n:
- self.handle_data("<")
- k = i + 1
- else:
- break
- if k < 0:
- if end:
- self.error("EOF in middle of construct")
- break
- i = self.updatepos(i, k)
- elif startswith("&#", i):
- match = charref.match(rawdata, i)
- if match:
- name = match.group()[2:-1]
- self.handle_charref(name)
- k = match.end()
- if not startswith(';', k-1):
- k = k - 1
- i = self.updatepos(i, k)
- continue
- else:
- break
- elif startswith('&', i):
- match = entityref.match(rawdata, i)
- if match:
- name = match.group(1)
- self.handle_entityref(name)
- k = match.end()
- if not startswith(';', k-1):
- k = k - 1
- i = self.updatepos(i, k)
- continue
- match = incomplete.match(rawdata, i)
- if match:
- # match.group() will contain at least 2 chars
- if end and match.group() == rawdata[i:]:
- self.error("EOF in middle of entity or char ref")
- # incomplete
- break
- elif (i + 1) < n:
- # not the end of the buffer, and can't be confused
- # with some other construct
- self.handle_data("&")
- i = self.updatepos(i, i + 1)
- else:
- break
- else:
- assert 0, "interesting.search() lied"
- # end while
- if end and i < n:
- self.handle_data(rawdata[i:n])
- i = self.updatepos(i, n)
- self.rawdata = rawdata[i:]
-
- # Internal -- parse processing instr, return end or -1 if not terminated
- def parse_pi(self, i):
- rawdata = self.rawdata
- assert rawdata[i:i+2] == '<?', 'unexpected call to parse_pi()'
- match = piclose.search(rawdata, i+2) # >
- if not match:
- return -1
- j = match.start()
- self.handle_pi(rawdata[i+2: j])
- j = match.end()
- return j
-
- # Internal -- handle starttag, return end or -1 if not terminated
- def parse_starttag(self, i):
- self.__starttag_text = None
- endpos = self.check_for_whole_start_tag(i)
- if endpos < 0:
- return endpos
- rawdata = self.rawdata
- self.__starttag_text = rawdata[i:endpos]
-
- # Now parse the data between i+1 and j into a tag and attrs
- attrs = []
- match = tagfind.match(rawdata, i+1)
- assert match, 'unexpected call to parse_starttag()'
- k = match.end()
- self.lasttag = tag = rawdata[i+1:k].lower()
-
- while k < endpos:
- m = attrfind.match(rawdata, k)
- if not m:
- break
- attrname, rest, attrvalue = m.group(1, 2, 3)
- if not rest:
- attrvalue = None
- elif attrvalue[:1] == '\'' == attrvalue[-1:] or \
- attrvalue[:1] == '"' == attrvalue[-1:]:
- attrvalue = attrvalue[1:-1]
- attrvalue = self.unescape(attrvalue)
- attrs.append((attrname.lower(), attrvalue))
- k = m.end()
-
- end = rawdata[k:endpos].strip()
- if end not in (">", "/>"):
- lineno, offset = self.getpos()
- if "\n" in self.__starttag_text:
- lineno = lineno + self.__starttag_text.count("\n")
- offset = len(self.__starttag_text) \
- - self.__starttag_text.rfind("\n")
- else:
- offset = offset + len(self.__starttag_text)
- self.error("junk characters in start tag: %r"
- % (rawdata[k:endpos][:20],))
- if end.endswith('/>'):
- # XHTML-style empty tag: <span attr="value" />
- self.handle_startendtag(tag, attrs)
- else:
- self.handle_starttag(tag, attrs)
- if tag in self.CDATA_CONTENT_ELEMENTS:
- self.set_cdata_mode()
- return endpos
-
- # Internal -- check to see if we have a complete starttag; return end
- # or -1 if incomplete.
- def check_for_whole_start_tag(self, i):
- rawdata = self.rawdata
- m = locatestarttagend.match(rawdata, i)
- if m:
- j = m.end()
- next = rawdata[j:j+1]
- if next == ">":
- return j + 1
- if next == "/":
- if rawdata.startswith("/>", j):
- return j + 2
- if rawdata.startswith("/", j):
- # buffer boundary
- return -1
- # else bogus input
- self.updatepos(i, j + 1)
- self.error("malformed empty start tag")
- if next == "":
- # end of input
- return -1
- if next in ("abcdefghijklmnopqrstuvwxyz=/"
- "ABCDEFGHIJKLMNOPQRSTUVWXYZ"):
- # end of input in or before attribute value, or we have the
- # '/' from a '/>' ending
- return -1
- self.updatepos(i, j)
- self.error("malformed start tag")
- raise AssertionError("we should not get here!")
-
- # Internal -- parse endtag, return end or -1 if incomplete
- def parse_endtag(self, i):
- rawdata = self.rawdata
- assert rawdata[i:i+2] == "</", "unexpected call to parse_endtag"
- match = endendtag.search(rawdata, i+1) # >
- if not match:
- return -1
- j = match.end()
- match = endtagfind.match(rawdata, i) # </ + tag + >
- if not match:
- self.error("bad end tag: %r" % (rawdata[i:j],))
- tag = match.group(1)
- self.handle_endtag(tag.lower())
- self.clear_cdata_mode()
- return j
-
- # Overridable -- finish processing of start+end tag: <tag.../>
- def handle_startendtag(self, tag, attrs):
- self.handle_starttag(tag, attrs)
- self.handle_endtag(tag)
-
- # Overridable -- handle start tag
- def handle_starttag(self, tag, attrs):
- pass
-
- # Overridable -- handle end tag
- def handle_endtag(self, tag):
- pass
-
- # Overridable -- handle character reference
- def handle_charref(self, name):
- pass
-
- # Overridable -- handle entity reference
- def handle_entityref(self, name):
- pass
-
- # Overridable -- handle data
- def handle_data(self, data):
- pass
-
- # Overridable -- handle comment
- def handle_comment(self, data):
- pass
-
- # Overridable -- handle declaration
- def handle_decl(self, decl):
- pass
-
- # Overridable -- handle processing instruction
- def handle_pi(self, data):
- pass
-
- def unknown_decl(self, data):
- self.error("unknown declaration: %r" % (data,))
-
- # Internal -- helper to remove special character quoting
- def unescape(self, s):
- if '&' not in s:
- return s
- s = s.replace("&lt;", "<")
- s = s.replace("&gt;", ">")
- s = s.replace("&apos;", "'")
- s = s.replace("&quot;", '"')
- s = s.replace("&amp;", "&") # Must be last
- return s
diff --git a/sys/lib/python/MimeWriter.py b/sys/lib/python/MimeWriter.py
deleted file mode 100644
index 58c0a0bcf..000000000
--- a/sys/lib/python/MimeWriter.py
+++ /dev/null
@@ -1,181 +0,0 @@
-"""Generic MIME writer.
-
-This module defines the class MimeWriter. The MimeWriter class implements
-a basic formatter for creating MIME multi-part files. It doesn't seek around
-the output file nor does it use large amounts of buffer space. You must write
-the parts out in the order that they should occur in the final file.
-MimeWriter does buffer the headers you add, allowing you to rearrange their
-order.
-
-"""
-
-
-import mimetools
-
-__all__ = ["MimeWriter"]
-
-class MimeWriter:
-
- """Generic MIME writer.
-
- Methods:
-
- __init__()
- addheader()
- flushheaders()
- startbody()
- startmultipartbody()
- nextpart()
- lastpart()
-
- A MIME writer is much more primitive than a MIME parser. It
- doesn't seek around on the output file, and it doesn't use large
- amounts of buffer space, so you have to write the parts in the
- order they should occur on the output file. It does buffer the
- headers you add, allowing you to rearrange their order.
-
- General usage is:
-
- f = <open the output file>
- w = MimeWriter(f)
- ...call w.addheader(key, value) 0 or more times...
-
- followed by either:
-
- f = w.startbody(content_type)
- ...call f.write(data) for body data...
-
- or:
-
- w.startmultipartbody(subtype)
- for each part:
- subwriter = w.nextpart()
- ...use the subwriter's methods to create the subpart...
- w.lastpart()
-
- The subwriter is another MimeWriter instance, and should be
- treated in the same way as the toplevel MimeWriter. This way,
- writing recursive body parts is easy.
-
- Warning: don't forget to call lastpart()!
-
- XXX There should be more state so calls made in the wrong order
- are detected.
-
- Some special cases:
-
- - startbody() just returns the file passed to the constructor;
- but don't use this knowledge, as it may be changed.
-
- - startmultipartbody() actually returns a file as well;
- this can be used to write the initial 'if you can read this your
- mailer is not MIME-aware' message.
-
- - If you call flushheaders(), the headers accumulated so far are
- written out (and forgotten); this is useful if you don't need a
- body part at all, e.g. for a subpart of type message/rfc822
- that's (mis)used to store some header-like information.
-
- - Passing a keyword argument 'prefix=<flag>' to addheader(),
- start*body() affects where the header is inserted; 0 means
- append at the end, 1 means insert at the start; default is
- append for addheader(), but insert for start*body(), which use
- it to determine where the Content-Type header goes.
-
- """
-
- def __init__(self, fp):
- self._fp = fp
- self._headers = []
-
- def addheader(self, key, value, prefix=0):
- """Add a header line to the MIME message.
-
- The key is the name of the header, where the value obviously provides
- the value of the header. The optional argument prefix determines
- where the header is inserted; 0 means append at the end, 1 means
- insert at the start. The default is to append.
-
- """
- lines = value.split("\n")
- while lines and not lines[-1]: del lines[-1]
- while lines and not lines[0]: del lines[0]
- for i in range(1, len(lines)):
- lines[i] = " " + lines[i].strip()
- value = "\n".join(lines) + "\n"
- line = key + ": " + value
- if prefix:
- self._headers.insert(0, line)
- else:
- self._headers.append(line)
-
- def flushheaders(self):
- """Writes out and forgets all headers accumulated so far.
-
- This is useful if you don't need a body part at all; for example,
- for a subpart of type message/rfc822 that's (mis)used to store some
- header-like information.
-
- """
- self._fp.writelines(self._headers)
- self._headers = []
-
- def startbody(self, ctype, plist=[], prefix=1):
- """Returns a file-like object for writing the body of the message.
-
- The content-type is set to the provided ctype, and the optional
- parameter, plist, provides additional parameters for the
- content-type declaration. The optional argument prefix determines
- where the header is inserted; 0 means append at the end, 1 means
- insert at the start. The default is to insert at the start.
-
- """
- for name, value in plist:
- ctype = ctype + ';\n %s=\"%s\"' % (name, value)
- self.addheader("Content-Type", ctype, prefix=prefix)
- self.flushheaders()
- self._fp.write("\n")
- return self._fp
-
- def startmultipartbody(self, subtype, boundary=None, plist=[], prefix=1):
- """Returns a file-like object for writing the body of the message.
-
- Additionally, this method initializes the multi-part code, where the
- subtype parameter provides the multipart subtype, the boundary
- parameter may provide a user-defined boundary specification, and the
- plist parameter provides optional parameters for the subtype. The
- optional argument, prefix, determines where the header is inserted;
- 0 means append at the end, 1 means insert at the start. The default
- is to insert at the start. Subparts should be created using the
- nextpart() method.
-
- """
- self._boundary = boundary or mimetools.choose_boundary()
- return self.startbody("multipart/" + subtype,
- [("boundary", self._boundary)] + plist,
- prefix=prefix)
-
- def nextpart(self):
- """Returns a new instance of MimeWriter which represents an
- individual part in a multipart message.
-
- This may be used to write the part as well as used for creating
- recursively complex multipart messages. The message must first be
- initialized with the startmultipartbody() method before using the
- nextpart() method.
-
- """
- self._fp.write("\n--" + self._boundary + "\n")
- return self.__class__(self._fp)
-
- def lastpart(self):
- """This is used to designate the last part of a multipart message.
-
- It should always be used when writing multipart messages.
-
- """
- self._fp.write("\n--" + self._boundary + "--\n")
-
-
-if __name__ == '__main__':
- import test.test_MimeWriter
diff --git a/sys/lib/python/Queue.py b/sys/lib/python/Queue.py
deleted file mode 100644
index 79b0abf14..000000000
--- a/sys/lib/python/Queue.py
+++ /dev/null
@@ -1,215 +0,0 @@
-"""A multi-producer, multi-consumer queue."""
-
-from time import time as _time
-from collections import deque
-
-__all__ = ['Empty', 'Full', 'Queue']
-
-class Empty(Exception):
- "Exception raised by Queue.get(block=0)/get_nowait()."
- pass
-
-class Full(Exception):
- "Exception raised by Queue.put(block=0)/put_nowait()."
- pass
-
-class Queue:
- """Create a queue object with a given maximum size.
-
- If maxsize is <= 0, the queue size is infinite.
- """
- def __init__(self, maxsize=0):
- try:
- import threading
- except ImportError:
- import dummy_threading as threading
- self._init(maxsize)
- # mutex must be held whenever the queue is mutating. All methods
- # that acquire mutex must release it before returning. mutex
- # is shared between the three conditions, so acquiring and
- # releasing the conditions also acquires and releases mutex.
- self.mutex = threading.Lock()
- # Notify not_empty whenever an item is added to the queue; a
- # thread waiting to get is notified then.
- self.not_empty = threading.Condition(self.mutex)
- # Notify not_full whenever an item is removed from the queue;
- # a thread waiting to put is notified then.
- self.not_full = threading.Condition(self.mutex)
- # Notify all_tasks_done whenever the number of unfinished tasks
- # drops to zero; thread waiting to join() is notified to resume
- self.all_tasks_done = threading.Condition(self.mutex)
- self.unfinished_tasks = 0
-
- def task_done(self):
- """Indicate that a formerly enqueued task is complete.
-
- Used by Queue consumer threads. For each get() used to fetch a task,
- a subsequent call to task_done() tells the queue that the processing
- on the task is complete.
-
- If a join() is currently blocking, it will resume when all items
- have been processed (meaning that a task_done() call was received
- for every item that had been put() into the queue).
-
- Raises a ValueError if called more times than there were items
- placed in the queue.
- """
- self.all_tasks_done.acquire()
- try:
- unfinished = self.unfinished_tasks - 1
- if unfinished <= 0:
- if unfinished < 0:
- raise ValueError('task_done() called too many times')
- self.all_tasks_done.notifyAll()
- self.unfinished_tasks = unfinished
- finally:
- self.all_tasks_done.release()
-
- def join(self):
- """Blocks until all items in the Queue have been gotten and processed.
-
- The count of unfinished tasks goes up whenever an item is added to the
- queue. The count goes down whenever a consumer thread calls task_done()
- to indicate the item was retrieved and all work on it is complete.
-
- When the count of unfinished tasks drops to zero, join() unblocks.
- """
- self.all_tasks_done.acquire()
- try:
- while self.unfinished_tasks:
- self.all_tasks_done.wait()
- finally:
- self.all_tasks_done.release()
-
- def qsize(self):
- """Return the approximate size of the queue (not reliable!)."""
- self.mutex.acquire()
- n = self._qsize()
- self.mutex.release()
- return n
-
- def empty(self):
- """Return True if the queue is empty, False otherwise (not reliable!)."""
- self.mutex.acquire()
- n = self._empty()
- self.mutex.release()
- return n
-
- def full(self):
- """Return True if the queue is full, False otherwise (not reliable!)."""
- self.mutex.acquire()
- n = self._full()
- self.mutex.release()
- return n
-
- def put(self, item, block=True, timeout=None):
- """Put an item into the queue.
-
- If optional args 'block' is true and 'timeout' is None (the default),
- block if necessary until a free slot is available. If 'timeout' is
- a positive number, it blocks at most 'timeout' seconds and raises
- the Full exception if no free slot was available within that time.
- Otherwise ('block' is false), put an item on the queue if a free slot
- is immediately available, else raise the Full exception ('timeout'
- is ignored in that case).
- """
- self.not_full.acquire()
- try:
- if not block:
- if self._full():
- raise Full
- elif timeout is None:
- while self._full():
- self.not_full.wait()
- else:
- if timeout < 0:
- raise ValueError("'timeout' must be a positive number")
- endtime = _time() + timeout
- while self._full():
- remaining = endtime - _time()
- if remaining <= 0.0:
- raise Full
- self.not_full.wait(remaining)
- self._put(item)
- self.unfinished_tasks += 1
- self.not_empty.notify()
- finally:
- self.not_full.release()
-
- def put_nowait(self, item):
- """Put an item into the queue without blocking.
-
- Only enqueue the item if a free slot is immediately available.
- Otherwise raise the Full exception.
- """
- return self.put(item, False)
-
- def get(self, block=True, timeout=None):
- """Remove and return an item from the queue.
-
- If optional args 'block' is true and 'timeout' is None (the default),
- block if necessary until an item is available. If 'timeout' is
- a positive number, it blocks at most 'timeout' seconds and raises
- the Empty exception if no item was available within that time.
- Otherwise ('block' is false), return an item if one is immediately
- available, else raise the Empty exception ('timeout' is ignored
- in that case).
- """
- self.not_empty.acquire()
- try:
- if not block:
- if self._empty():
- raise Empty
- elif timeout is None:
- while self._empty():
- self.not_empty.wait()
- else:
- if timeout < 0:
- raise ValueError("'timeout' must be a positive number")
- endtime = _time() + timeout
- while self._empty():
- remaining = endtime - _time()
- if remaining <= 0.0:
- raise Empty
- self.not_empty.wait(remaining)
- item = self._get()
- self.not_full.notify()
- return item
- finally:
- self.not_empty.release()
-
- def get_nowait(self):
- """Remove and return an item from the queue without blocking.
-
- Only get an item if one is immediately available. Otherwise
- raise the Empty exception.
- """
- return self.get(False)
-
- # Override these methods to implement other queue organizations
- # (e.g. stack or priority queue).
- # These will only be called with appropriate locks held
-
- # Initialize the queue representation
- def _init(self, maxsize):
- self.maxsize = maxsize
- self.queue = deque()
-
- def _qsize(self):
- return len(self.queue)
-
- # Check whether the queue is empty
- def _empty(self):
- return not self.queue
-
- # Check whether the queue is full
- def _full(self):
- return self.maxsize > 0 and len(self.queue) == self.maxsize
-
- # Put a new item in the queue
- def _put(self, item):
- self.queue.append(item)
-
- # Get an item from the queue
- def _get(self):
- return self.queue.popleft()
diff --git a/sys/lib/python/SimpleHTTPServer.py b/sys/lib/python/SimpleHTTPServer.py
deleted file mode 100644
index 86c669ea4..000000000
--- a/sys/lib/python/SimpleHTTPServer.py
+++ /dev/null
@@ -1,218 +0,0 @@
-"""Simple HTTP Server.
-
-This module builds on BaseHTTPServer by implementing the standard GET
-and HEAD requests in a fairly straightforward manner.
-
-"""
-
-
-__version__ = "0.6"
-
-__all__ = ["SimpleHTTPRequestHandler"]
-
-import os
-import posixpath
-import BaseHTTPServer
-import urllib
-import urlparse
-import cgi
-import shutil
-import mimetypes
-try:
- from cStringIO import StringIO
-except ImportError:
- from StringIO import StringIO
-
-
-class SimpleHTTPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
-
- """Simple HTTP request handler with GET and HEAD commands.
-
- This serves files from the current directory and any of its
- subdirectories. The MIME type for files is determined by
- calling the .guess_type() method.
-
- The GET and HEAD requests are identical except that the HEAD
- request omits the actual contents of the file.
-
- """
-
- server_version = "SimpleHTTP/" + __version__
-
- def do_GET(self):
- """Serve a GET request."""
- f = self.send_head()
- if f:
- self.copyfile(f, self.wfile)
- f.close()
-
- def do_HEAD(self):
- """Serve a HEAD request."""
- f = self.send_head()
- if f:
- f.close()
-
- def send_head(self):
- """Common code for GET and HEAD commands.
-
- This sends the response code and MIME headers.
-
- Return value is either a file object (which has to be copied
- to the outputfile by the caller unless the command was HEAD,
- and must be closed by the caller under all circumstances), or
- None, in which case the caller has nothing further to do.
-
- """
- path = self.translate_path(self.path)
- f = None
- if os.path.isdir(path):
- if not self.path.endswith('/'):
- # redirect browser - doing basically what apache does
- self.send_response(301)
- self.send_header("Location", self.path + "/")
- self.end_headers()
- return None
- for index in "index.html", "index.htm":
- index = os.path.join(path, index)
- if os.path.exists(index):
- path = index
- break
- else:
- return self.list_directory(path)
- ctype = self.guess_type(path)
- if ctype.startswith('text/'):
- mode = 'r'
- else:
- mode = 'rb'
- try:
- f = open(path, mode)
- except IOError:
- self.send_error(404, "File not found")
- return None
- self.send_response(200)
- self.send_header("Content-type", ctype)
- fs = os.fstat(f.fileno())
- self.send_header("Content-Length", str(fs[6]))
- self.send_header("Last-Modified", self.date_time_string(fs.st_mtime))
- self.end_headers()
- return f
-
- def list_directory(self, path):
- """Helper to produce a directory listing (absent index.html).
-
- Return value is either a file object, or None (indicating an
- error). In either case, the headers are sent, making the
- interface the same as for send_head().
-
- """
- try:
- list = os.listdir(path)
- except os.error:
- self.send_error(404, "No permission to list directory")
- return None
- list.sort(key=lambda a: a.lower())
- f = StringIO()
- displaypath = cgi.escape(urllib.unquote(self.path))
- f.write("<title>Directory listing for %s</title>\n" % displaypath)
- f.write("<h2>Directory listing for %s</h2>\n" % displaypath)
- f.write("<hr>\n<ul>\n")
- for name in list:
- fullname = os.path.join(path, name)
- displayname = linkname = name
- # Append / for directories or @ for symbolic links
- if os.path.isdir(fullname):
- displayname = name + "/"
- linkname = name + "/"
- if os.path.islink(fullname):
- displayname = name + "@"
- # Note: a link to a directory displays with @ and links with /
- f.write('<li><a href="%s">%s</a>\n'
- % (urllib.quote(linkname), cgi.escape(displayname)))
- f.write("</ul>\n<hr>\n")
- length = f.tell()
- f.seek(0)
- self.send_response(200)
- self.send_header("Content-type", "text/html")
- self.send_header("Content-Length", str(length))
- self.end_headers()
- return f
-
- def translate_path(self, path):
- """Translate a /-separated PATH to the local filename syntax.
-
- Components that mean special things to the local file system
- (e.g. drive or directory names) are ignored. (XXX They should
- probably be diagnosed.)
-
- """
- # abandon query parameters
- path = urlparse.urlparse(path)[2]
- path = posixpath.normpath(urllib.unquote(path))
- words = path.split('/')
- words = filter(None, words)
- path = os.getcwd()
- for word in words:
- drive, word = os.path.splitdrive(word)
- head, word = os.path.split(word)
- if word in (os.curdir, os.pardir): continue
- path = os.path.join(path, word)
- return path
-
- def copyfile(self, source, outputfile):
- """Copy all data between two file objects.
-
- The SOURCE argument is a file object open for reading
- (or anything with a read() method) and the DESTINATION
- argument is a file object open for writing (or
- anything with a write() method).
-
- The only reason for overriding this would be to change
- the block size or perhaps to replace newlines by CRLF
- -- note however that this the default server uses this
- to copy binary data as well.
-
- """
- shutil.copyfileobj(source, outputfile)
-
- def guess_type(self, path):
- """Guess the type of a file.
-
- Argument is a PATH (a filename).
-
- Return value is a string of the form type/subtype,
- usable for a MIME Content-type header.
-
- The default implementation looks the file's extension
- up in the table self.extensions_map, using application/octet-stream
- as a default; however it would be permissible (if
- slow) to look inside the data to make a better guess.
-
- """
-
- base, ext = posixpath.splitext(path)
- if ext in self.extensions_map:
- return self.extensions_map[ext]
- ext = ext.lower()
- if ext in self.extensions_map:
- return self.extensions_map[ext]
- else:
- return self.extensions_map['']
-
- if not mimetypes.inited:
- mimetypes.init() # try to read system mime.types
- extensions_map = mimetypes.types_map.copy()
- extensions_map.update({
- '': 'application/octet-stream', # Default
- '.py': 'text/plain',
- '.c': 'text/plain',
- '.h': 'text/plain',
- })
-
-
-def test(HandlerClass = SimpleHTTPRequestHandler,
- ServerClass = BaseHTTPServer.HTTPServer):
- BaseHTTPServer.test(HandlerClass, ServerClass)
-
-
-if __name__ == '__main__':
- test()
diff --git a/sys/lib/python/SimpleXMLRPCServer.py b/sys/lib/python/SimpleXMLRPCServer.py
deleted file mode 100644
index 7a9f26faa..000000000
--- a/sys/lib/python/SimpleXMLRPCServer.py
+++ /dev/null
@@ -1,595 +0,0 @@
-"""Simple XML-RPC Server.
-
-This module can be used to create simple XML-RPC servers
-by creating a server and either installing functions, a
-class instance, or by extending the SimpleXMLRPCServer
-class.
-
-It can also be used to handle XML-RPC requests in a CGI
-environment using CGIXMLRPCRequestHandler.
-
-A list of possible usage patterns follows:
-
-1. Install functions:
-
-server = SimpleXMLRPCServer(("localhost", 8000))
-server.register_function(pow)
-server.register_function(lambda x,y: x+y, 'add')
-server.serve_forever()
-
-2. Install an instance:
-
-class MyFuncs:
- def __init__(self):
- # make all of the string functions available through
- # string.func_name
- import string
- self.string = string
- def _listMethods(self):
- # implement this method so that system.listMethods
- # knows to advertise the strings methods
- return list_public_methods(self) + \
- ['string.' + method for method in list_public_methods(self.string)]
- def pow(self, x, y): return pow(x, y)
- def add(self, x, y) : return x + y
-
-server = SimpleXMLRPCServer(("localhost", 8000))
-server.register_introspection_functions()
-server.register_instance(MyFuncs())
-server.serve_forever()
-
-3. Install an instance with custom dispatch method:
-
-class Math:
- def _listMethods(self):
- # this method must be present for system.listMethods
- # to work
- return ['add', 'pow']
- def _methodHelp(self, method):
- # this method must be present for system.methodHelp
- # to work
- if method == 'add':
- return "add(2,3) => 5"
- elif method == 'pow':
- return "pow(x, y[, z]) => number"
- else:
- # By convention, return empty
- # string if no help is available
- return ""
- def _dispatch(self, method, params):
- if method == 'pow':
- return pow(*params)
- elif method == 'add':
- return params[0] + params[1]
- else:
- raise 'bad method'
-
-server = SimpleXMLRPCServer(("localhost", 8000))
-server.register_introspection_functions()
-server.register_instance(Math())
-server.serve_forever()
-
-4. Subclass SimpleXMLRPCServer:
-
-class MathServer(SimpleXMLRPCServer):
- def _dispatch(self, method, params):
- try:
- # We are forcing the 'export_' prefix on methods that are
- # callable through XML-RPC to prevent potential security
- # problems
- func = getattr(self, 'export_' + method)
- except AttributeError:
- raise Exception('method "%s" is not supported' % method)
- else:
- return func(*params)
-
- def export_add(self, x, y):
- return x + y
-
-server = MathServer(("localhost", 8000))
-server.serve_forever()
-
-5. CGI script:
-
-server = CGIXMLRPCRequestHandler()
-server.register_function(pow)
-server.handle_request()
-"""
-
-# Written by Brian Quinlan (brian@sweetapp.com).
-# Based on code written by Fredrik Lundh.
-
-import xmlrpclib
-from xmlrpclib import Fault
-import SocketServer
-import BaseHTTPServer
-import sys
-import os
-try:
- import fcntl
-except ImportError:
- fcntl = None
-
-def resolve_dotted_attribute(obj, attr, allow_dotted_names=True):
- """resolve_dotted_attribute(a, 'b.c.d') => a.b.c.d
-
- Resolves a dotted attribute name to an object. Raises
- an AttributeError if any attribute in the chain starts with a '_'.
-
- If the optional allow_dotted_names argument is false, dots are not
- supported and this function operates similar to getattr(obj, attr).
- """
-
- if allow_dotted_names:
- attrs = attr.split('.')
- else:
- attrs = [attr]
-
- for i in attrs:
- if i.startswith('_'):
- raise AttributeError(
- 'attempt to access private attribute "%s"' % i
- )
- else:
- obj = getattr(obj,i)
- return obj
-
-def list_public_methods(obj):
- """Returns a list of attribute strings, found in the specified
- object, which represent callable attributes"""
-
- return [member for member in dir(obj)
- if not member.startswith('_') and
- callable(getattr(obj, member))]
-
-def remove_duplicates(lst):
- """remove_duplicates([2,2,2,1,3,3]) => [3,1,2]
-
- Returns a copy of a list without duplicates. Every list
- item must be hashable and the order of the items in the
- resulting list is not defined.
- """
- u = {}
- for x in lst:
- u[x] = 1
-
- return u.keys()
-
-class SimpleXMLRPCDispatcher:
- """Mix-in class that dispatches XML-RPC requests.
-
- This class is used to register XML-RPC method handlers
- and then to dispatch them. There should never be any
- reason to instantiate this class directly.
- """
-
- def __init__(self, allow_none, encoding):
- self.funcs = {}
- self.instance = None
- self.allow_none = allow_none
- self.encoding = encoding
-
- def register_instance(self, instance, allow_dotted_names=False):
- """Registers an instance to respond to XML-RPC requests.
-
- Only one instance can be installed at a time.
-
- If the registered instance has a _dispatch method then that
- method will be called with the name of the XML-RPC method and
- its parameters as a tuple
- e.g. instance._dispatch('add',(2,3))
-
- If the registered instance does not have a _dispatch method
- then the instance will be searched to find a matching method
- and, if found, will be called. Methods beginning with an '_'
- are considered private and will not be called by
- SimpleXMLRPCServer.
-
- If a registered function matches a XML-RPC request, then it
- will be called instead of the registered instance.
-
- If the optional allow_dotted_names argument is true and the
- instance does not have a _dispatch method, method names
- containing dots are supported and resolved, as long as none of
- the name segments start with an '_'.
-
- *** SECURITY WARNING: ***
-
- Enabling the allow_dotted_names options allows intruders
- to access your module's global variables and may allow
- intruders to execute arbitrary code on your machine. Only
- use this option on a secure, closed network.
-
- """
-
- self.instance = instance
- self.allow_dotted_names = allow_dotted_names
-
- def register_function(self, function, name = None):
- """Registers a function to respond to XML-RPC requests.
-
- The optional name argument can be used to set a Unicode name
- for the function.
- """
-
- if name is None:
- name = function.__name__
- self.funcs[name] = function
-
- def register_introspection_functions(self):
- """Registers the XML-RPC introspection methods in the system
- namespace.
-
- see http://xmlrpc.usefulinc.com/doc/reserved.html
- """
-
- self.funcs.update({'system.listMethods' : self.system_listMethods,
- 'system.methodSignature' : self.system_methodSignature,
- 'system.methodHelp' : self.system_methodHelp})
-
- def register_multicall_functions(self):
- """Registers the XML-RPC multicall method in the system
- namespace.
-
- see http://www.xmlrpc.com/discuss/msgReader$1208"""
-
- self.funcs.update({'system.multicall' : self.system_multicall})
-
- def _marshaled_dispatch(self, data, dispatch_method = None):
- """Dispatches an XML-RPC method from marshalled (XML) data.
-
- XML-RPC methods are dispatched from the marshalled (XML) data
- using the _dispatch method and the result is returned as
- marshalled data. For backwards compatibility, a dispatch
- function can be provided as an argument (see comment in
- SimpleXMLRPCRequestHandler.do_POST) but overriding the
- existing method through subclassing is the prefered means
- of changing method dispatch behavior.
- """
-
- try:
- params, method = xmlrpclib.loads(data)
-
- # generate response
- if dispatch_method is not None:
- response = dispatch_method(method, params)
- else:
- response = self._dispatch(method, params)
- # wrap response in a singleton tuple
- response = (response,)
- response = xmlrpclib.dumps(response, methodresponse=1,
- allow_none=self.allow_none, encoding=self.encoding)
- except Fault, fault:
- response = xmlrpclib.dumps(fault, allow_none=self.allow_none,
- encoding=self.encoding)
- except:
- # report exception back to server
- response = xmlrpclib.dumps(
- xmlrpclib.Fault(1, "%s:%s" % (sys.exc_type, sys.exc_value)),
- encoding=self.encoding, allow_none=self.allow_none,
- )
-
- return response
-
- def system_listMethods(self):
- """system.listMethods() => ['add', 'subtract', 'multiple']
-
- Returns a list of the methods supported by the server."""
-
- methods = self.funcs.keys()
- if self.instance is not None:
- # Instance can implement _listMethod to return a list of
- # methods
- if hasattr(self.instance, '_listMethods'):
- methods = remove_duplicates(
- methods + self.instance._listMethods()
- )
- # if the instance has a _dispatch method then we
- # don't have enough information to provide a list
- # of methods
- elif not hasattr(self.instance, '_dispatch'):
- methods = remove_duplicates(
- methods + list_public_methods(self.instance)
- )
- methods.sort()
- return methods
-
- def system_methodSignature(self, method_name):
- """system.methodSignature('add') => [double, int, int]
-
- Returns a list describing the signature of the method. In the
- above example, the add method takes two integers as arguments
- and returns a double result.
-
- This server does NOT support system.methodSignature."""
-
- # See http://xmlrpc.usefulinc.com/doc/sysmethodsig.html
-
- return 'signatures not supported'
-
- def system_methodHelp(self, method_name):
- """system.methodHelp('add') => "Adds two integers together"
-
- Returns a string containing documentation for the specified method."""
-
- method = None
- if self.funcs.has_key(method_name):
- method = self.funcs[method_name]
- elif self.instance is not None:
- # Instance can implement _methodHelp to return help for a method
- if hasattr(self.instance, '_methodHelp'):
- return self.instance._methodHelp(method_name)
- # if the instance has a _dispatch method then we
- # don't have enough information to provide help
- elif not hasattr(self.instance, '_dispatch'):
- try:
- method = resolve_dotted_attribute(
- self.instance,
- method_name,
- self.allow_dotted_names
- )
- except AttributeError:
- pass
-
- # Note that we aren't checking that the method actually
- # be a callable object of some kind
- if method is None:
- return ""
- else:
- import pydoc
- return pydoc.getdoc(method)
-
- def system_multicall(self, call_list):
- """system.multicall([{'methodName': 'add', 'params': [2, 2]}, ...]) => \
-[[4], ...]
-
- Allows the caller to package multiple XML-RPC calls into a single
- request.
-
- See http://www.xmlrpc.com/discuss/msgReader$1208
- """
-
- results = []
- for call in call_list:
- method_name = call['methodName']
- params = call['params']
-
- try:
- # XXX A marshalling error in any response will fail the entire
- # multicall. If someone cares they should fix this.
- results.append([self._dispatch(method_name, params)])
- except Fault, fault:
- results.append(
- {'faultCode' : fault.faultCode,
- 'faultString' : fault.faultString}
- )
- except:
- results.append(
- {'faultCode' : 1,
- 'faultString' : "%s:%s" % (sys.exc_type, sys.exc_value)}
- )
- return results
-
- def _dispatch(self, method, params):
- """Dispatches the XML-RPC method.
-
- XML-RPC calls are forwarded to a registered function that
- matches the called XML-RPC method name. If no such function
- exists then the call is forwarded to the registered instance,
- if available.
-
- If the registered instance has a _dispatch method then that
- method will be called with the name of the XML-RPC method and
- its parameters as a tuple
- e.g. instance._dispatch('add',(2,3))
-
- If the registered instance does not have a _dispatch method
- then the instance will be searched to find a matching method
- and, if found, will be called.
-
- Methods beginning with an '_' are considered private and will
- not be called.
- """
-
- func = None
- try:
- # check to see if a matching function has been registered
- func = self.funcs[method]
- except KeyError:
- if self.instance is not None:
- # check for a _dispatch method
- if hasattr(self.instance, '_dispatch'):
- return self.instance._dispatch(method, params)
- else:
- # call instance method directly
- try:
- func = resolve_dotted_attribute(
- self.instance,
- method,
- self.allow_dotted_names
- )
- except AttributeError:
- pass
-
- if func is not None:
- return func(*params)
- else:
- raise Exception('method "%s" is not supported' % method)
-
-class SimpleXMLRPCRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
- """Simple XML-RPC request handler class.
-
- Handles all HTTP POST requests and attempts to decode them as
- XML-RPC requests.
- """
-
- # Class attribute listing the accessible path components;
- # paths not on this list will result in a 404 error.
- rpc_paths = ('/', '/RPC2')
-
- def is_rpc_path_valid(self):
- if self.rpc_paths:
- return self.path in self.rpc_paths
- else:
- # If .rpc_paths is empty, just assume all paths are legal
- return True
-
- def do_POST(self):
- """Handles the HTTP POST request.
-
- Attempts to interpret all HTTP POST requests as XML-RPC calls,
- which are forwarded to the server's _dispatch method for handling.
- """
-
- # Check that the path is legal
- if not self.is_rpc_path_valid():
- self.report_404()
- return
-
- try:
- # Get arguments by reading body of request.
- # We read this in chunks to avoid straining
- # socket.read(); around the 10 or 15Mb mark, some platforms
- # begin to have problems (bug #792570).
- max_chunk_size = 10*1024*1024
- size_remaining = int(self.headers["content-length"])
- L = []
- while size_remaining:
- chunk_size = min(size_remaining, max_chunk_size)
- L.append(self.rfile.read(chunk_size))
- size_remaining -= len(L[-1])
- data = ''.join(L)
-
- # In previous versions of SimpleXMLRPCServer, _dispatch
- # could be overridden in this class, instead of in
- # SimpleXMLRPCDispatcher. To maintain backwards compatibility,
- # check to see if a subclass implements _dispatch and dispatch
- # using that method if present.
- response = self.server._marshaled_dispatch(
- data, getattr(self, '_dispatch', None)
- )
- except: # This should only happen if the module is buggy
- # internal error, report as HTTP server error
- self.send_response(500)
- self.end_headers()
- else:
- # got a valid XML RPC response
- self.send_response(200)
- self.send_header("Content-type", "text/xml")
- self.send_header("Content-length", str(len(response)))
- self.end_headers()
- self.wfile.write(response)
-
- # shut down the connection
- self.wfile.flush()
- self.connection.shutdown(1)
-
- def report_404 (self):
- # Report a 404 error
- self.send_response(404)
- response = 'No such page'
- self.send_header("Content-type", "text/plain")
- self.send_header("Content-length", str(len(response)))
- self.end_headers()
- self.wfile.write(response)
- # shut down the connection
- self.wfile.flush()
- self.connection.shutdown(1)
-
- def log_request(self, code='-', size='-'):
- """Selectively log an accepted request."""
-
- if self.server.logRequests:
- BaseHTTPServer.BaseHTTPRequestHandler.log_request(self, code, size)
-
-class SimpleXMLRPCServer(SocketServer.TCPServer,
- SimpleXMLRPCDispatcher):
- """Simple XML-RPC server.
-
- Simple XML-RPC server that allows functions and a single instance
- to be installed to handle requests. The default implementation
- attempts to dispatch XML-RPC calls to the functions or instance
- installed in the server. Override the _dispatch method inhereted
- from SimpleXMLRPCDispatcher to change this behavior.
- """
-
- allow_reuse_address = True
-
- def __init__(self, addr, requestHandler=SimpleXMLRPCRequestHandler,
- logRequests=True, allow_none=False, encoding=None):
- self.logRequests = logRequests
-
- SimpleXMLRPCDispatcher.__init__(self, allow_none, encoding)
- SocketServer.TCPServer.__init__(self, addr, requestHandler)
-
- # [Bug #1222790] If possible, set close-on-exec flag; if a
- # method spawns a subprocess, the subprocess shouldn't have
- # the listening socket open.
- if fcntl is not None and hasattr(fcntl, 'FD_CLOEXEC'):
- flags = fcntl.fcntl(self.fileno(), fcntl.F_GETFD)
- flags |= fcntl.FD_CLOEXEC
- fcntl.fcntl(self.fileno(), fcntl.F_SETFD, flags)
-
-class CGIXMLRPCRequestHandler(SimpleXMLRPCDispatcher):
- """Simple handler for XML-RPC data passed through CGI."""
-
- def __init__(self, allow_none=False, encoding=None):
- SimpleXMLRPCDispatcher.__init__(self, allow_none, encoding)
-
- def handle_xmlrpc(self, request_text):
- """Handle a single XML-RPC request"""
-
- response = self._marshaled_dispatch(request_text)
-
- print 'Content-Type: text/xml'
- print 'Content-Length: %d' % len(response)
- print
- sys.stdout.write(response)
-
- def handle_get(self):
- """Handle a single HTTP GET request.
-
- Default implementation indicates an error because
- XML-RPC uses the POST method.
- """
-
- code = 400
- message, explain = \
- BaseHTTPServer.BaseHTTPRequestHandler.responses[code]
-
- response = BaseHTTPServer.DEFAULT_ERROR_MESSAGE % \
- {
- 'code' : code,
- 'message' : message,
- 'explain' : explain
- }
- print 'Status: %d %s' % (code, message)
- print 'Content-Type: text/html'
- print 'Content-Length: %d' % len(response)
- print
- sys.stdout.write(response)
-
- def handle_request(self, request_text = None):
- """Handle a single XML-RPC request passed through a CGI post method.
-
- If no XML data is given then it is read from stdin. The resulting
- XML-RPC response is printed to stdout along with the correct HTTP
- headers.
- """
-
- if request_text is None and \
- os.environ.get('REQUEST_METHOD', None) == 'GET':
- self.handle_get()
- else:
- # POST data is normally available through stdin
- if request_text is None:
- request_text = sys.stdin.read()
-
- self.handle_xmlrpc(request_text)
-
-if __name__ == '__main__':
- print 'Running XML-RPC server on port 8000'
- server = SimpleXMLRPCServer(("localhost", 8000))
- server.register_function(pow)
- server.register_function(lambda x,y: x+y, 'add')
- server.serve_forever()
diff --git a/sys/lib/python/SocketServer.py b/sys/lib/python/SocketServer.py
deleted file mode 100644
index 7d9b9a536..000000000
--- a/sys/lib/python/SocketServer.py
+++ /dev/null
@@ -1,588 +0,0 @@
-"""Generic socket server classes.
-
-This module tries to capture the various aspects of defining a server:
-
-For socket-based servers:
-
-- address family:
- - AF_INET{,6}: IP (Internet Protocol) sockets (default)
- - AF_UNIX: Unix domain sockets
- - others, e.g. AF_DECNET are conceivable (see <socket.h>
-- socket type:
- - SOCK_STREAM (reliable stream, e.g. TCP)
- - SOCK_DGRAM (datagrams, e.g. UDP)
-
-For request-based servers (including socket-based):
-
-- client address verification before further looking at the request
- (This is actually a hook for any processing that needs to look
- at the request before anything else, e.g. logging)
-- how to handle multiple requests:
- - synchronous (one request is handled at a time)
- - forking (each request is handled by a new process)
- - threading (each request is handled by a new thread)
-
-The classes in this module favor the server type that is simplest to
-write: a synchronous TCP/IP server. This is bad class design, but
-save some typing. (There's also the issue that a deep class hierarchy
-slows down method lookups.)
-
-There are five classes in an inheritance diagram, four of which represent
-synchronous servers of four types:
-
- +------------+
- | BaseServer |
- +------------+
- |
- v
- +-----------+ +------------------+
- | TCPServer |------->| UnixStreamServer |
- +-----------+ +------------------+
- |
- v
- +-----------+ +--------------------+
- | UDPServer |------->| UnixDatagramServer |
- +-----------+ +--------------------+
-
-Note that UnixDatagramServer derives from UDPServer, not from
-UnixStreamServer -- the only difference between an IP and a Unix
-stream server is the address family, which is simply repeated in both
-unix server classes.
-
-Forking and threading versions of each type of server can be created
-using the ForkingMixIn and ThreadingMixIn mix-in classes. For
-instance, a threading UDP server class is created as follows:
-
- class ThreadingUDPServer(ThreadingMixIn, UDPServer): pass
-
-The Mix-in class must come first, since it overrides a method defined
-in UDPServer! Setting the various member variables also changes
-the behavior of the underlying server mechanism.
-
-To implement a service, you must derive a class from
-BaseRequestHandler and redefine its handle() method. You can then run
-various versions of the service by combining one of the server classes
-with your request handler class.
-
-The request handler class must be different for datagram or stream
-services. This can be hidden by using the request handler
-subclasses StreamRequestHandler or DatagramRequestHandler.
-
-Of course, you still have to use your head!
-
-For instance, it makes no sense to use a forking server if the service
-contains state in memory that can be modified by requests (since the
-modifications in the child process would never reach the initial state
-kept in the parent process and passed to each child). In this case,
-you can use a threading server, but you will probably have to use
-locks to avoid two requests that come in nearly simultaneous to apply
-conflicting changes to the server state.
-
-On the other hand, if you are building e.g. an HTTP server, where all
-data is stored externally (e.g. in the file system), a synchronous
-class will essentially render the service "deaf" while one request is
-being handled -- which may be for a very long time if a client is slow
-to reqd all the data it has requested. Here a threading or forking
-server is appropriate.
-
-In some cases, it may be appropriate to process part of a request
-synchronously, but to finish processing in a forked child depending on
-the request data. This can be implemented by using a synchronous
-server and doing an explicit fork in the request handler class
-handle() method.
-
-Another approach to handling multiple simultaneous requests in an
-environment that supports neither threads nor fork (or where these are
-too expensive or inappropriate for the service) is to maintain an
-explicit table of partially finished requests and to use select() to
-decide which request to work on next (or whether to handle a new
-incoming request). This is particularly important for stream services
-where each client can potentially be connected for a long time (if
-threads or subprocesses cannot be used).
-
-Future work:
-- Standard classes for Sun RPC (which uses either UDP or TCP)
-- Standard mix-in classes to implement various authentication
- and encryption schemes
-- Standard framework for select-based multiplexing
-
-XXX Open problems:
-- What to do with out-of-band data?
-
-BaseServer:
-- split generic "request" functionality out into BaseServer class.
- Copyright (C) 2000 Luke Kenneth Casson Leighton <lkcl@samba.org>
-
- example: read entries from a SQL database (requires overriding
- get_request() to return a table entry from the database).
- entry is processed by a RequestHandlerClass.
-
-"""
-
-# Author of the BaseServer patch: Luke Kenneth Casson Leighton
-
-# XXX Warning!
-# There is a test suite for this module, but it cannot be run by the
-# standard regression test.
-# To run it manually, run Lib/test/test_socketserver.py.
-
-__version__ = "0.4"
-
-
-import socket
-import sys
-import os
-
-__all__ = ["TCPServer","UDPServer","ForkingUDPServer","ForkingTCPServer",
- "ThreadingUDPServer","ThreadingTCPServer","BaseRequestHandler",
- "StreamRequestHandler","DatagramRequestHandler",
- "ThreadingMixIn", "ForkingMixIn"]
-if hasattr(socket, "AF_UNIX"):
- __all__.extend(["UnixStreamServer","UnixDatagramServer",
- "ThreadingUnixStreamServer",
- "ThreadingUnixDatagramServer"])
-
-class BaseServer:
-
- """Base class for server classes.
-
- Methods for the caller:
-
- - __init__(server_address, RequestHandlerClass)
- - serve_forever()
- - handle_request() # if you do not use serve_forever()
- - fileno() -> int # for select()
-
- Methods that may be overridden:
-
- - server_bind()
- - server_activate()
- - get_request() -> request, client_address
- - verify_request(request, client_address)
- - server_close()
- - process_request(request, client_address)
- - close_request(request)
- - handle_error()
-
- Methods for derived classes:
-
- - finish_request(request, client_address)
-
- Class variables that may be overridden by derived classes or
- instances:
-
- - address_family
- - socket_type
- - allow_reuse_address
-
- Instance variables:
-
- - RequestHandlerClass
- - socket
-
- """
-
- def __init__(self, server_address, RequestHandlerClass):
- """Constructor. May be extended, do not override."""
- self.server_address = server_address
- self.RequestHandlerClass = RequestHandlerClass
-
- def server_activate(self):
- """Called by constructor to activate the server.
-
- May be overridden.
-
- """
- pass
-
- def serve_forever(self):
- """Handle one request at a time until doomsday."""
- while 1:
- self.handle_request()
-
- # The distinction between handling, getting, processing and
- # finishing a request is fairly arbitrary. Remember:
- #
- # - handle_request() is the top-level call. It calls
- # get_request(), verify_request() and process_request()
- # - get_request() is different for stream or datagram sockets
- # - process_request() is the place that may fork a new process
- # or create a new thread to finish the request
- # - finish_request() instantiates the request handler class;
- # this constructor will handle the request all by itself
-
- def handle_request(self):
- """Handle one request, possibly blocking."""
- try:
- request, client_address = self.get_request()
- except socket.error:
- return
- if self.verify_request(request, client_address):
- try:
- self.process_request(request, client_address)
- except:
- self.handle_error(request, client_address)
- self.close_request(request)
-
- def verify_request(self, request, client_address):
- """Verify the request. May be overridden.
-
- Return True if we should proceed with this request.
-
- """
- return True
-
- def process_request(self, request, client_address):
- """Call finish_request.
-
- Overridden by ForkingMixIn and ThreadingMixIn.
-
- """
- self.finish_request(request, client_address)
- self.close_request(request)
-
- def server_close(self):
- """Called to clean-up the server.
-
- May be overridden.
-
- """
- pass
-
- def finish_request(self, request, client_address):
- """Finish one request by instantiating RequestHandlerClass."""
- self.RequestHandlerClass(request, client_address, self)
-
- def close_request(self, request):
- """Called to clean up an individual request."""
- pass
-
- def handle_error(self, request, client_address):
- """Handle an error gracefully. May be overridden.
-
- The default is to print a traceback and continue.
-
- """
- print '-'*40
- print 'Exception happened during processing of request from',
- print client_address
- import traceback
- traceback.print_exc() # XXX But this goes to stderr!
- print '-'*40
-
-
-class TCPServer(BaseServer):
-
- """Base class for various socket-based server classes.
-
- Defaults to synchronous IP stream (i.e., TCP).
-
- Methods for the caller:
-
- - __init__(server_address, RequestHandlerClass)
- - serve_forever()
- - handle_request() # if you don't use serve_forever()
- - fileno() -> int # for select()
-
- Methods that may be overridden:
-
- - server_bind()
- - server_activate()
- - get_request() -> request, client_address
- - verify_request(request, client_address)
- - process_request(request, client_address)
- - close_request(request)
- - handle_error()
-
- Methods for derived classes:
-
- - finish_request(request, client_address)
-
- Class variables that may be overridden by derived classes or
- instances:
-
- - address_family
- - socket_type
- - request_queue_size (only for stream sockets)
- - allow_reuse_address
-
- Instance variables:
-
- - server_address
- - RequestHandlerClass
- - socket
-
- """
-
- address_family = socket.AF_INET
-
- socket_type = socket.SOCK_STREAM
-
- request_queue_size = 5
-
- allow_reuse_address = False
-
- def __init__(self, server_address, RequestHandlerClass):
- """Constructor. May be extended, do not override."""
- BaseServer.__init__(self, server_address, RequestHandlerClass)
- self.socket = socket.socket(self.address_family,
- self.socket_type)
- self.server_bind()
- self.server_activate()
-
- def server_bind(self):
- """Called by constructor to bind the socket.
-
- May be overridden.
-
- """
- if self.allow_reuse_address:
- self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
- self.socket.bind(self.server_address)
- self.server_address = self.socket.getsockname()
-
- def server_activate(self):
- """Called by constructor to activate the server.
-
- May be overridden.
-
- """
- self.socket.listen(self.request_queue_size)
-
- def server_close(self):
- """Called to clean-up the server.
-
- May be overridden.
-
- """
- self.socket.close()
-
- def fileno(self):
- """Return socket file number.
-
- Interface required by select().
-
- """
- return self.socket.fileno()
-
- def get_request(self):
- """Get the request and client address from the socket.
-
- May be overridden.
-
- """
- return self.socket.accept()
-
- def close_request(self, request):
- """Called to clean up an individual request."""
- request.close()
-
-
-class UDPServer(TCPServer):
-
- """UDP server class."""
-
- allow_reuse_address = False
-
- socket_type = socket.SOCK_DGRAM
-
- max_packet_size = 8192
-
- def get_request(self):
- data, client_addr = self.socket.recvfrom(self.max_packet_size)
- return (data, self.socket), client_addr
-
- def server_activate(self):
- # No need to call listen() for UDP.
- pass
-
- def close_request(self, request):
- # No need to close anything.
- pass
-
-class ForkingMixIn:
-
- """Mix-in class to handle each request in a new process."""
-
- active_children = None
- max_children = 40
-
- def collect_children(self):
- """Internal routine to wait for died children."""
- while self.active_children:
- if len(self.active_children) < self.max_children:
- options = os.WNOHANG
- else:
- # If the maximum number of children are already
- # running, block while waiting for a child to exit
- options = 0
- try:
- pid, status = os.waitpid(0, options)
- except os.error:
- pid = None
- if not pid: break
- self.active_children.remove(pid)
-
- def process_request(self, request, client_address):
- """Fork a new subprocess to process the request."""
- self.collect_children()
- pid = os.fork()
- if pid:
- # Parent process
- if self.active_children is None:
- self.active_children = []
- self.active_children.append(pid)
- self.close_request(request)
- return
- else:
- # Child process.
- # This must never return, hence os._exit()!
- try:
- self.finish_request(request, client_address)
- os._exit(0)
- except:
- try:
- self.handle_error(request, client_address)
- finally:
- os._exit(1)
-
-
-class ThreadingMixIn:
- """Mix-in class to handle each request in a new thread."""
-
- # Decides how threads will act upon termination of the
- # main process
- daemon_threads = False
-
- def process_request_thread(self, request, client_address):
- """Same as in BaseServer but as a thread.
-
- In addition, exception handling is done here.
-
- """
- try:
- self.finish_request(request, client_address)
- self.close_request(request)
- except:
- self.handle_error(request, client_address)
- self.close_request(request)
-
- def process_request(self, request, client_address):
- """Start a new thread to process the request."""
- import threading
- t = threading.Thread(target = self.process_request_thread,
- args = (request, client_address))
- if self.daemon_threads:
- t.setDaemon (1)
- t.start()
-
-
-class ForkingUDPServer(ForkingMixIn, UDPServer): pass
-class ForkingTCPServer(ForkingMixIn, TCPServer): pass
-
-class ThreadingUDPServer(ThreadingMixIn, UDPServer): pass
-class ThreadingTCPServer(ThreadingMixIn, TCPServer): pass
-
-if hasattr(socket, 'AF_UNIX'):
-
- class UnixStreamServer(TCPServer):
- address_family = socket.AF_UNIX
-
- class UnixDatagramServer(UDPServer):
- address_family = socket.AF_UNIX
-
- class ThreadingUnixStreamServer(ThreadingMixIn, UnixStreamServer): pass
-
- class ThreadingUnixDatagramServer(ThreadingMixIn, UnixDatagramServer): pass
-
-class BaseRequestHandler:
-
- """Base class for request handler classes.
-
- This class is instantiated for each request to be handled. The
- constructor sets the instance variables request, client_address
- and server, and then calls the handle() method. To implement a
- specific service, all you need to do is to derive a class which
- defines a handle() method.
-
- The handle() method can find the request as self.request, the
- client address as self.client_address, and the server (in case it
- needs access to per-server information) as self.server. Since a
- separate instance is created for each request, the handle() method
- can define arbitrary other instance variariables.
-
- """
-
- def __init__(self, request, client_address, server):
- self.request = request
- self.client_address = client_address
- self.server = server
- try:
- self.setup()
- self.handle()
- self.finish()
- finally:
- sys.exc_traceback = None # Help garbage collection
-
- def setup(self):
- pass
-
- def handle(self):
- pass
-
- def finish(self):
- pass
-
-
-# The following two classes make it possible to use the same service
-# class for stream or datagram servers.
-# Each class sets up these instance variables:
-# - rfile: a file object from which receives the request is read
-# - wfile: a file object to which the reply is written
-# When the handle() method returns, wfile is flushed properly
-
-
-class StreamRequestHandler(BaseRequestHandler):
-
- """Define self.rfile and self.wfile for stream sockets."""
-
- # Default buffer sizes for rfile, wfile.
- # We default rfile to buffered because otherwise it could be
- # really slow for large data (a getc() call per byte); we make
- # wfile unbuffered because (a) often after a write() we want to
- # read and we need to flush the line; (b) big writes to unbuffered
- # files are typically optimized by stdio even when big reads
- # aren't.
- rbufsize = -1
- wbufsize = 0
-
- def setup(self):
- self.connection = self.request
- self.rfile = self.connection.makefile('rb', self.rbufsize)
- self.wfile = self.connection.makefile('wb', self.wbufsize)
-
- def finish(self):
- if not self.wfile.closed:
- self.wfile.flush()
- self.wfile.close()
- self.rfile.close()
-
-
-class DatagramRequestHandler(BaseRequestHandler):
-
- # XXX Regrettably, I cannot get this working on Linux;
- # s.recvfrom() doesn't return a meaningful client address.
-
- """Define self.rfile and self.wfile for datagram sockets."""
-
- def setup(self):
- try:
- from cStringIO import StringIO
- except ImportError:
- from StringIO import StringIO
- self.packet, self.socket = self.request
- self.rfile = StringIO(self.packet)
- self.wfile = StringIO()
-
- def finish(self):
- self.socket.sendto(self.wfile.getvalue(), self.client_address)
diff --git a/sys/lib/python/StringIO.py b/sys/lib/python/StringIO.py
deleted file mode 100644
index 232009fb9..000000000
--- a/sys/lib/python/StringIO.py
+++ /dev/null
@@ -1,323 +0,0 @@
-r"""File-like objects that read from or write to a string buffer.
-
-This implements (nearly) all stdio methods.
-
-f = StringIO() # ready for writing
-f = StringIO(buf) # ready for reading
-f.close() # explicitly release resources held
-flag = f.isatty() # always false
-pos = f.tell() # get current position
-f.seek(pos) # set current position
-f.seek(pos, mode) # mode 0: absolute; 1: relative; 2: relative to EOF
-buf = f.read() # read until EOF
-buf = f.read(n) # read up to n bytes
-buf = f.readline() # read until end of line ('\n') or EOF
-list = f.readlines()# list of f.readline() results until EOF
-f.truncate([size]) # truncate file at to at most size (default: current pos)
-f.write(buf) # write at current position
-f.writelines(list) # for line in list: f.write(line)
-f.getvalue() # return whole file's contents as a string
-
-Notes:
-- Using a real file is often faster (but less convenient).
-- There's also a much faster implementation in C, called cStringIO, but
- it's not subclassable.
-- fileno() is left unimplemented so that code which uses it triggers
- an exception early.
-- Seeking far beyond EOF and then writing will insert real null
- bytes that occupy space in the buffer.
-- There's a simple test set (see end of this file).
-"""
-try:
- from errno import EINVAL
-except ImportError:
- EINVAL = 22
-
-__all__ = ["StringIO"]
-
-def _complain_ifclosed(closed):
- if closed:
- raise ValueError, "I/O operation on closed file"
-
-class StringIO:
- """class StringIO([buffer])
-
- When a StringIO object is created, it can be initialized to an existing
- string by passing the string to the constructor. If no string is given,
- the StringIO will start empty.
-
- The StringIO object can accept either Unicode or 8-bit strings, but
- mixing the two may take some care. If both are used, 8-bit strings that
- cannot be interpreted as 7-bit ASCII (that use the 8th bit) will cause
- a UnicodeError to be raised when getvalue() is called.
- """
- def __init__(self, buf = ''):
- # Force self.buf to be a string or unicode
- if not isinstance(buf, basestring):
- buf = str(buf)
- self.buf = buf
- self.len = len(buf)
- self.buflist = []
- self.pos = 0
- self.closed = False
- self.softspace = 0
-
- def __iter__(self):
- return self
-
- def next(self):
- """A file object is its own iterator, for example iter(f) returns f
- (unless f is closed). When a file is used as an iterator, typically
- in a for loop (for example, for line in f: print line), the next()
- method is called repeatedly. This method returns the next input line,
- or raises StopIteration when EOF is hit.
- """
- _complain_ifclosed(self.closed)
- r = self.readline()
- if not r:
- raise StopIteration
- return r
-
- def close(self):
- """Free the memory buffer.
- """
- if not self.closed:
- self.closed = True
- del self.buf, self.pos
-
- def isatty(self):
- """Returns False because StringIO objects are not connected to a
- tty-like device.
- """
- _complain_ifclosed(self.closed)
- return False
-
- def seek(self, pos, mode = 0):
- """Set the file's current position.
-
- The mode argument is optional and defaults to 0 (absolute file
- positioning); other values are 1 (seek relative to the current
- position) and 2 (seek relative to the file's end).
-
- There is no return value.
- """
- _complain_ifclosed(self.closed)
- if self.buflist:
- self.buf += ''.join(self.buflist)
- self.buflist = []
- if mode == 1:
- pos += self.pos
- elif mode == 2:
- pos += self.len
- self.pos = max(0, pos)
-
- def tell(self):
- """Return the file's current position."""
- _complain_ifclosed(self.closed)
- return self.pos
-
- def read(self, n = -1):
- """Read at most size bytes from the file
- (less if the read hits EOF before obtaining size bytes).
-
- If the size argument is negative or omitted, read all data until EOF
- is reached. The bytes are returned as a string object. An empty
- string is returned when EOF is encountered immediately.
- """
- _complain_ifclosed(self.closed)
- if self.buflist:
- self.buf += ''.join(self.buflist)
- self.buflist = []
- if n < 0:
- newpos = self.len
- else:
- newpos = min(self.pos+n, self.len)
- r = self.buf[self.pos:newpos]
- self.pos = newpos
- return r
-
- def readline(self, length=None):
- r"""Read one entire line from the file.
-
- A trailing newline character is kept in the string (but may be absent
- when a file ends with an incomplete line). If the size argument is
- present and non-negative, it is a maximum byte count (including the
- trailing newline) and an incomplete line may be returned.
-
- An empty string is returned only when EOF is encountered immediately.
-
- Note: Unlike stdio's fgets(), the returned string contains null
- characters ('\0') if they occurred in the input.
- """
- _complain_ifclosed(self.closed)
- if self.buflist:
- self.buf += ''.join(self.buflist)
- self.buflist = []
- i = self.buf.find('\n', self.pos)
- if i < 0:
- newpos = self.len
- else:
- newpos = i+1
- if length is not None:
- if self.pos + length < newpos:
- newpos = self.pos + length
- r = self.buf[self.pos:newpos]
- self.pos = newpos
- return r
-
- def readlines(self, sizehint = 0):
- """Read until EOF using readline() and return a list containing the
- lines thus read.
-
- If the optional sizehint argument is present, instead of reading up
- to EOF, whole lines totalling approximately sizehint bytes (or more
- to accommodate a final whole line).
- """
- total = 0
- lines = []
- line = self.readline()
- while line:
- lines.append(line)
- total += len(line)
- if 0 < sizehint <= total:
- break
- line = self.readline()
- return lines
-
- def truncate(self, size=None):
- """Truncate the file's size.
-
- If the optional size argument is present, the file is truncated to
- (at most) that size. The size defaults to the current position.
- The current file position is not changed unless the position
- is beyond the new file size.
-
- If the specified size exceeds the file's current size, the
- file remains unchanged.
- """
- _complain_ifclosed(self.closed)
- if size is None:
- size = self.pos
- elif size < 0:
- raise IOError(EINVAL, "Negative size not allowed")
- elif size < self.pos:
- self.pos = size
- self.buf = self.getvalue()[:size]
- self.len = size
-
- def write(self, s):
- """Write a string to the file.
-
- There is no return value.
- """
- _complain_ifclosed(self.closed)
- if not s: return
- # Force s to be a string or unicode
- if not isinstance(s, basestring):
- s = str(s)
- spos = self.pos
- slen = self.len
- if spos == slen:
- self.buflist.append(s)
- self.len = self.pos = spos + len(s)
- return
- if spos > slen:
- self.buflist.append('\0'*(spos - slen))
- slen = spos
- newpos = spos + len(s)
- if spos < slen:
- if self.buflist:
- self.buf += ''.join(self.buflist)
- self.buflist = [self.buf[:spos], s, self.buf[newpos:]]
- self.buf = ''
- if newpos > slen:
- slen = newpos
- else:
- self.buflist.append(s)
- slen = newpos
- self.len = slen
- self.pos = newpos
-
- def writelines(self, iterable):
- """Write a sequence of strings to the file. The sequence can be any
- iterable object producing strings, typically a list of strings. There
- is no return value.
-
- (The name is intended to match readlines(); writelines() does not add
- line separators.)
- """
- write = self.write
- for line in iterable:
- write(line)
-
- def flush(self):
- """Flush the internal buffer
- """
- _complain_ifclosed(self.closed)
-
- def getvalue(self):
- """
- Retrieve the entire contents of the "file" at any time before
- the StringIO object's close() method is called.
-
- The StringIO object can accept either Unicode or 8-bit strings,
- but mixing the two may take some care. If both are used, 8-bit
- strings that cannot be interpreted as 7-bit ASCII (that use the
- 8th bit) will cause a UnicodeError to be raised when getvalue()
- is called.
- """
- if self.buflist:
- self.buf += ''.join(self.buflist)
- self.buflist = []
- return self.buf
-
-
-# A little test suite
-
-def test():
- import sys
- if sys.argv[1:]:
- file = sys.argv[1]
- else:
- file = '/etc/passwd'
- lines = open(file, 'r').readlines()
- text = open(file, 'r').read()
- f = StringIO()
- for line in lines[:-2]:
- f.write(line)
- f.writelines(lines[-2:])
- if f.getvalue() != text:
- raise RuntimeError, 'write failed'
- length = f.tell()
- print 'File length =', length
- f.seek(len(lines[0]))
- f.write(lines[1])
- f.seek(0)
- print 'First line =', repr(f.readline())
- print 'Position =', f.tell()
- line = f.readline()
- print 'Second line =', repr(line)
- f.seek(-len(line), 1)
- line2 = f.read(len(line))
- if line != line2:
- raise RuntimeError, 'bad result after seek back'
- f.seek(len(line2), 1)
- list = f.readlines()
- line = list[-1]
- f.seek(f.tell() - len(line))
- line2 = f.read()
- if line != line2:
- raise RuntimeError, 'bad result after seek back from EOF'
- print 'Read', len(list), 'more lines'
- print 'File length =', f.tell()
- if f.tell() != length:
- raise RuntimeError, 'bad length'
- f.truncate(length/2)
- f.seek(0, 2)
- print 'Truncated length =', f.tell()
- if f.tell() != length/2:
- raise RuntimeError, 'truncate did not adjust length'
- f.close()
-
-if __name__ == '__main__':
- test()
diff --git a/sys/lib/python/UserDict.py b/sys/lib/python/UserDict.py
deleted file mode 100644
index 5e97817f0..000000000
--- a/sys/lib/python/UserDict.py
+++ /dev/null
@@ -1,175 +0,0 @@
-"""A more or less complete user-defined wrapper around dictionary objects."""
-
-class UserDict:
- def __init__(self, dict=None, **kwargs):
- self.data = {}
- if dict is not None:
- self.update(dict)
- if len(kwargs):
- self.update(kwargs)
- def __repr__(self): return repr(self.data)
- def __cmp__(self, dict):
- if isinstance(dict, UserDict):
- return cmp(self.data, dict.data)
- else:
- return cmp(self.data, dict)
- def __len__(self): return len(self.data)
- def __getitem__(self, key):
- if key in self.data:
- return self.data[key]
- if hasattr(self.__class__, "__missing__"):
- return self.__class__.__missing__(self, key)
- raise KeyError(key)
- def __setitem__(self, key, item): self.data[key] = item
- def __delitem__(self, key): del self.data[key]
- def clear(self): self.data.clear()
- def copy(self):
- if self.__class__ is UserDict:
- return UserDict(self.data.copy())
- import copy
- data = self.data
- try:
- self.data = {}
- c = copy.copy(self)
- finally:
- self.data = data
- c.update(self)
- return c
- def keys(self): return self.data.keys()
- def items(self): return self.data.items()
- def iteritems(self): return self.data.iteritems()
- def iterkeys(self): return self.data.iterkeys()
- def itervalues(self): return self.data.itervalues()
- def values(self): return self.data.values()
- def has_key(self, key): return self.data.has_key(key)
- def update(self, dict=None, **kwargs):
- if dict is None:
- pass
- elif isinstance(dict, UserDict):
- self.data.update(dict.data)
- elif isinstance(dict, type({})) or not hasattr(dict, 'items'):
- self.data.update(dict)
- else:
- for k, v in dict.items():
- self[k] = v
- if len(kwargs):
- self.data.update(kwargs)
- def get(self, key, failobj=None):
- if not self.has_key(key):
- return failobj
- return self[key]
- def setdefault(self, key, failobj=None):
- if not self.has_key(key):
- self[key] = failobj
- return self[key]
- def pop(self, key, *args):
- return self.data.pop(key, *args)
- def popitem(self):
- return self.data.popitem()
- def __contains__(self, key):
- return key in self.data
- @classmethod
- def fromkeys(cls, iterable, value=None):
- d = cls()
- for key in iterable:
- d[key] = value
- return d
-
-class IterableUserDict(UserDict):
- def __iter__(self):
- return iter(self.data)
-
-class DictMixin:
- # Mixin defining all dictionary methods for classes that already have
- # a minimum dictionary interface including getitem, setitem, delitem,
- # and keys. Without knowledge of the subclass constructor, the mixin
- # does not define __init__() or copy(). In addition to the four base
- # methods, progressively more efficiency comes with defining
- # __contains__(), __iter__(), and iteritems().
-
- # second level definitions support higher levels
- def __iter__(self):
- for k in self.keys():
- yield k
- def has_key(self, key):
- try:
- value = self[key]
- except KeyError:
- return False
- return True
- def __contains__(self, key):
- return self.has_key(key)
-
- # third level takes advantage of second level definitions
- def iteritems(self):
- for k in self:
- yield (k, self[k])
- def iterkeys(self):
- return self.__iter__()
-
- # fourth level uses definitions from lower levels
- def itervalues(self):
- for _, v in self.iteritems():
- yield v
- def values(self):
- return [v for _, v in self.iteritems()]
- def items(self):
- return list(self.iteritems())
- def clear(self):
- for key in self.keys():
- del self[key]
- def setdefault(self, key, default=None):
- try:
- return self[key]
- except KeyError:
- self[key] = default
- return default
- def pop(self, key, *args):
- if len(args) > 1:
- raise TypeError, "pop expected at most 2 arguments, got "\
- + repr(1 + len(args))
- try:
- value = self[key]
- except KeyError:
- if args:
- return args[0]
- raise
- del self[key]
- return value
- def popitem(self):
- try:
- k, v = self.iteritems().next()
- except StopIteration:
- raise KeyError, 'container is empty'
- del self[k]
- return (k, v)
- def update(self, other=None, **kwargs):
- # Make progressively weaker assumptions about "other"
- if other is None:
- pass
- elif hasattr(other, 'iteritems'): # iteritems saves memory and lookups
- for k, v in other.iteritems():
- self[k] = v
- elif hasattr(other, 'keys'):
- for k in other.keys():
- self[k] = other[k]
- else:
- for k, v in other:
- self[k] = v
- if kwargs:
- self.update(kwargs)
- def get(self, key, default=None):
- try:
- return self[key]
- except KeyError:
- return default
- def __repr__(self):
- return repr(dict(self.iteritems()))
- def __cmp__(self, other):
- if other is None:
- return 1
- if isinstance(other, DictMixin):
- other = dict(other.iteritems())
- return cmp(dict(self.iteritems()), other)
- def __len__(self):
- return len(self.keys())
diff --git a/sys/lib/python/UserList.py b/sys/lib/python/UserList.py
deleted file mode 100644
index 072f6a732..000000000
--- a/sys/lib/python/UserList.py
+++ /dev/null
@@ -1,85 +0,0 @@
-"""A more or less complete user-defined wrapper around list objects."""
-
-class UserList:
- def __init__(self, initlist=None):
- self.data = []
- if initlist is not None:
- # XXX should this accept an arbitrary sequence?
- if type(initlist) == type(self.data):
- self.data[:] = initlist
- elif isinstance(initlist, UserList):
- self.data[:] = initlist.data[:]
- else:
- self.data = list(initlist)
- def __repr__(self): return repr(self.data)
- def __lt__(self, other): return self.data < self.__cast(other)
- def __le__(self, other): return self.data <= self.__cast(other)
- def __eq__(self, other): return self.data == self.__cast(other)
- def __ne__(self, other): return self.data != self.__cast(other)
- def __gt__(self, other): return self.data > self.__cast(other)
- def __ge__(self, other): return self.data >= self.__cast(other)
- def __cast(self, other):
- if isinstance(other, UserList): return other.data
- else: return other
- def __cmp__(self, other):
- return cmp(self.data, self.__cast(other))
- def __contains__(self, item): return item in self.data
- def __len__(self): return len(self.data)
- def __getitem__(self, i): return self.data[i]
- def __setitem__(self, i, item): self.data[i] = item
- def __delitem__(self, i): del self.data[i]
- def __getslice__(self, i, j):
- i = max(i, 0); j = max(j, 0)
- return self.__class__(self.data[i:j])
- def __setslice__(self, i, j, other):
- i = max(i, 0); j = max(j, 0)
- if isinstance(other, UserList):
- self.data[i:j] = other.data
- elif isinstance(other, type(self.data)):
- self.data[i:j] = other
- else:
- self.data[i:j] = list(other)
- def __delslice__(self, i, j):
- i = max(i, 0); j = max(j, 0)
- del self.data[i:j]
- def __add__(self, other):
- if isinstance(other, UserList):
- return self.__class__(self.data + other.data)
- elif isinstance(other, type(self.data)):
- return self.__class__(self.data + other)
- else:
- return self.__class__(self.data + list(other))
- def __radd__(self, other):
- if isinstance(other, UserList):
- return self.__class__(other.data + self.data)
- elif isinstance(other, type(self.data)):
- return self.__class__(other + self.data)
- else:
- return self.__class__(list(other) + self.data)
- def __iadd__(self, other):
- if isinstance(other, UserList):
- self.data += other.data
- elif isinstance(other, type(self.data)):
- self.data += other
- else:
- self.data += list(other)
- return self
- def __mul__(self, n):
- return self.__class__(self.data*n)
- __rmul__ = __mul__
- def __imul__(self, n):
- self.data *= n
- return self
- def append(self, item): self.data.append(item)
- def insert(self, i, item): self.data.insert(i, item)
- def pop(self, i=-1): return self.data.pop(i)
- def remove(self, item): self.data.remove(item)
- def count(self, item): return self.data.count(item)
- def index(self, item, *args): return self.data.index(item, *args)
- def reverse(self): self.data.reverse()
- def sort(self, *args, **kwds): self.data.sort(*args, **kwds)
- def extend(self, other):
- if isinstance(other, UserList):
- self.data.extend(other.data)
- else:
- self.data.extend(other)
diff --git a/sys/lib/python/UserString.py b/sys/lib/python/UserString.py
deleted file mode 100755
index 60dc34bc4..000000000
--- a/sys/lib/python/UserString.py
+++ /dev/null
@@ -1,194 +0,0 @@
-#!/usr/bin/env python
-## vim:ts=4:et:nowrap
-"""A user-defined wrapper around string objects
-
-Note: string objects have grown methods in Python 1.6
-This module requires Python 1.6 or later.
-"""
-import sys
-
-__all__ = ["UserString","MutableString"]
-
-class UserString:
- def __init__(self, seq):
- if isinstance(seq, basestring):
- self.data = seq
- elif isinstance(seq, UserString):
- self.data = seq.data[:]
- else:
- self.data = str(seq)
- def __str__(self): return str(self.data)
- def __repr__(self): return repr(self.data)
- def __int__(self): return int(self.data)
- def __long__(self): return long(self.data)
- def __float__(self): return float(self.data)
- def __complex__(self): return complex(self.data)
- def __hash__(self): return hash(self.data)
-
- def __cmp__(self, string):
- if isinstance(string, UserString):
- return cmp(self.data, string.data)
- else:
- return cmp(self.data, string)
- def __contains__(self, char):
- return char in self.data
-
- def __len__(self): return len(self.data)
- def __getitem__(self, index): return self.__class__(self.data[index])
- def __getslice__(self, start, end):
- start = max(start, 0); end = max(end, 0)
- return self.__class__(self.data[start:end])
-
- def __add__(self, other):
- if isinstance(other, UserString):
- return self.__class__(self.data + other.data)
- elif isinstance(other, basestring):
- return self.__class__(self.data + other)
- else:
- return self.__class__(self.data + str(other))
- def __radd__(self, other):
- if isinstance(other, basestring):
- return self.__class__(other + self.data)
- else:
- return self.__class__(str(other) + self.data)
- def __mul__(self, n):
- return self.__class__(self.data*n)
- __rmul__ = __mul__
- def __mod__(self, args):
- return self.__class__(self.data % args)
-
- # the following methods are defined in alphabetical order:
- def capitalize(self): return self.__class__(self.data.capitalize())
- def center(self, width, *args):
- return self.__class__(self.data.center(width, *args))
- def count(self, sub, start=0, end=sys.maxint):
- return self.data.count(sub, start, end)
- def decode(self, encoding=None, errors=None): # XXX improve this?
- if encoding:
- if errors:
- return self.__class__(self.data.decode(encoding, errors))
- else:
- return self.__class__(self.data.decode(encoding))
- else:
- return self.__class__(self.data.decode())
- def encode(self, encoding=None, errors=None): # XXX improve this?
- if encoding:
- if errors:
- return self.__class__(self.data.encode(encoding, errors))
- else:
- return self.__class__(self.data.encode(encoding))
- else:
- return self.__class__(self.data.encode())
- def endswith(self, suffix, start=0, end=sys.maxint):
- return self.data.endswith(suffix, start, end)
- def expandtabs(self, tabsize=8):
- return self.__class__(self.data.expandtabs(tabsize))
- def find(self, sub, start=0, end=sys.maxint):
- return self.data.find(sub, start, end)
- def index(self, sub, start=0, end=sys.maxint):
- return self.data.index(sub, start, end)
- def isalpha(self): return self.data.isalpha()
- def isalnum(self): return self.data.isalnum()
- def isdecimal(self): return self.data.isdecimal()
- def isdigit(self): return self.data.isdigit()
- def islower(self): return self.data.islower()
- def isnumeric(self): return self.data.isnumeric()
- def isspace(self): return self.data.isspace()
- def istitle(self): return self.data.istitle()
- def isupper(self): return self.data.isupper()
- def join(self, seq): return self.data.join(seq)
- def ljust(self, width, *args):
- return self.__class__(self.data.ljust(width, *args))
- def lower(self): return self.__class__(self.data.lower())
- def lstrip(self, chars=None): return self.__class__(self.data.lstrip(chars))
- def partition(self, sep):
- return self.data.partition(sep)
- def replace(self, old, new, maxsplit=-1):
- return self.__class__(self.data.replace(old, new, maxsplit))
- def rfind(self, sub, start=0, end=sys.maxint):
- return self.data.rfind(sub, start, end)
- def rindex(self, sub, start=0, end=sys.maxint):
- return self.data.rindex(sub, start, end)
- def rjust(self, width, *args):
- return self.__class__(self.data.rjust(width, *args))
- def rpartition(self, sep):
- return self.data.rpartition(sep)
- def rstrip(self, chars=None): return self.__class__(self.data.rstrip(chars))
- def split(self, sep=None, maxsplit=-1):
- return self.data.split(sep, maxsplit)
- def rsplit(self, sep=None, maxsplit=-1):
- return self.data.rsplit(sep, maxsplit)
- def splitlines(self, keepends=0): return self.data.splitlines(keepends)
- def startswith(self, prefix, start=0, end=sys.maxint):
- return self.data.startswith(prefix, start, end)
- def strip(self, chars=None): return self.__class__(self.data.strip(chars))
- def swapcase(self): return self.__class__(self.data.swapcase())
- def title(self): return self.__class__(self.data.title())
- def translate(self, *args):
- return self.__class__(self.data.translate(*args))
- def upper(self): return self.__class__(self.data.upper())
- def zfill(self, width): return self.__class__(self.data.zfill(width))
-
-class MutableString(UserString):
- """mutable string objects
-
- Python strings are immutable objects. This has the advantage, that
- strings may be used as dictionary keys. If this property isn't needed
- and you insist on changing string values in place instead, you may cheat
- and use MutableString.
-
- But the purpose of this class is an educational one: to prevent
- people from inventing their own mutable string class derived
- from UserString and than forget thereby to remove (override) the
- __hash__ method inherited from UserString. This would lead to
- errors that would be very hard to track down.
-
- A faster and better solution is to rewrite your program using lists."""
- def __init__(self, string=""):
- self.data = string
- def __hash__(self):
- raise TypeError, "unhashable type (it is mutable)"
- def __setitem__(self, index, sub):
- if index < 0:
- index += len(self.data)
- if index < 0 or index >= len(self.data): raise IndexError
- self.data = self.data[:index] + sub + self.data[index+1:]
- def __delitem__(self, index):
- if index < 0:
- index += len(self.data)
- if index < 0 or index >= len(self.data): raise IndexError
- self.data = self.data[:index] + self.data[index+1:]
- def __setslice__(self, start, end, sub):
- start = max(start, 0); end = max(end, 0)
- if isinstance(sub, UserString):
- self.data = self.data[:start]+sub.data+self.data[end:]
- elif isinstance(sub, basestring):
- self.data = self.data[:start]+sub+self.data[end:]
- else:
- self.data = self.data[:start]+str(sub)+self.data[end:]
- def __delslice__(self, start, end):
- start = max(start, 0); end = max(end, 0)
- self.data = self.data[:start] + self.data[end:]
- def immutable(self):
- return UserString(self.data)
- def __iadd__(self, other):
- if isinstance(other, UserString):
- self.data += other.data
- elif isinstance(other, basestring):
- self.data += other
- else:
- self.data += str(other)
- return self
- def __imul__(self, n):
- self.data *= n
- return self
-
-if __name__ == "__main__":
- # execute the regression test to stdout, if called as a script:
- import os
- called_in_dir, called_as = os.path.split(sys.argv[0])
- called_as, py = os.path.splitext(called_as)
- if '-q' in sys.argv:
- from test import test_support
- test_support.verbose = 0
- __import__('test.test_' + called_as.lower())
diff --git a/sys/lib/python/_LWPCookieJar.py b/sys/lib/python/_LWPCookieJar.py
deleted file mode 100644
index 2a4fa7b2f..000000000
--- a/sys/lib/python/_LWPCookieJar.py
+++ /dev/null
@@ -1,170 +0,0 @@
-"""Load / save to libwww-perl (LWP) format files.
-
-Actually, the format is slightly extended from that used by LWP's
-(libwww-perl's) HTTP::Cookies, to avoid losing some RFC 2965 information
-not recorded by LWP.
-
-It uses the version string "2.0", though really there isn't an LWP Cookies
-2.0 format. This indicates that there is extra information in here
-(domain_dot and # port_spec) while still being compatible with
-libwww-perl, I hope.
-
-"""
-
-import time, re
-from cookielib import (_warn_unhandled_exception, FileCookieJar, LoadError,
- Cookie, MISSING_FILENAME_TEXT,
- join_header_words, split_header_words,
- iso2time, time2isoz)
-
-def lwp_cookie_str(cookie):
- """Return string representation of Cookie in an the LWP cookie file format.
-
- Actually, the format is extended a bit -- see module docstring.
-
- """
- h = [(cookie.name, cookie.value),
- ("path", cookie.path),
- ("domain", cookie.domain)]
- if cookie.port is not None: h.append(("port", cookie.port))
- if cookie.path_specified: h.append(("path_spec", None))
- if cookie.port_specified: h.append(("port_spec", None))
- if cookie.domain_initial_dot: h.append(("domain_dot", None))
- if cookie.secure: h.append(("secure", None))
- if cookie.expires: h.append(("expires",
- time2isoz(float(cookie.expires))))
- if cookie.discard: h.append(("discard", None))
- if cookie.comment: h.append(("comment", cookie.comment))
- if cookie.comment_url: h.append(("commenturl", cookie.comment_url))
-
- keys = cookie._rest.keys()
- keys.sort()
- for k in keys:
- h.append((k, str(cookie._rest[k])))
-
- h.append(("version", str(cookie.version)))
-
- return join_header_words([h])
-
-class LWPCookieJar(FileCookieJar):
- """
- The LWPCookieJar saves a sequence of"Set-Cookie3" lines.
- "Set-Cookie3" is the format used by the libwww-perl libary, not known
- to be compatible with any browser, but which is easy to read and
- doesn't lose information about RFC 2965 cookies.
-
- Additional methods
-
- as_lwp_str(ignore_discard=True, ignore_expired=True)
-
- """
-
- def as_lwp_str(self, ignore_discard=True, ignore_expires=True):
- """Return cookies as a string of "\n"-separated "Set-Cookie3" headers.
-
- ignore_discard and ignore_expires: see docstring for FileCookieJar.save
-
- """
- now = time.time()
- r = []
- for cookie in self:
- if not ignore_discard and cookie.discard:
- continue
- if not ignore_expires and cookie.is_expired(now):
- continue
- r.append("Set-Cookie3: %s" % lwp_cookie_str(cookie))
- return "\n".join(r+[""])
-
- def save(self, filename=None, ignore_discard=False, ignore_expires=False):
- if filename is None:
- if self.filename is not None: filename = self.filename
- else: raise ValueError(MISSING_FILENAME_TEXT)
-
- f = open(filename, "w")
- try:
- # There really isn't an LWP Cookies 2.0 format, but this indicates
- # that there is extra information in here (domain_dot and
- # port_spec) while still being compatible with libwww-perl, I hope.
- f.write("#LWP-Cookies-2.0\n")
- f.write(self.as_lwp_str(ignore_discard, ignore_expires))
- finally:
- f.close()
-
- def _really_load(self, f, filename, ignore_discard, ignore_expires):
- magic = f.readline()
- if not re.search(self.magic_re, magic):
- msg = ("%r does not look like a Set-Cookie3 (LWP) format "
- "file" % filename)
- raise LoadError(msg)
-
- now = time.time()
-
- header = "Set-Cookie3:"
- boolean_attrs = ("port_spec", "path_spec", "domain_dot",
- "secure", "discard")
- value_attrs = ("version",
- "port", "path", "domain",
- "expires",
- "comment", "commenturl")
-
- try:
- while 1:
- line = f.readline()
- if line == "": break
- if not line.startswith(header):
- continue
- line = line[len(header):].strip()
-
- for data in split_header_words([line]):
- name, value = data[0]
- standard = {}
- rest = {}
- for k in boolean_attrs:
- standard[k] = False
- for k, v in data[1:]:
- if k is not None:
- lc = k.lower()
- else:
- lc = None
- # don't lose case distinction for unknown fields
- if (lc in value_attrs) or (lc in boolean_attrs):
- k = lc
- if k in boolean_attrs:
- if v is None: v = True
- standard[k] = v
- elif k in value_attrs:
- standard[k] = v
- else:
- rest[k] = v
-
- h = standard.get
- expires = h("expires")
- discard = h("discard")
- if expires is not None:
- expires = iso2time(expires)
- if expires is None:
- discard = True
- domain = h("domain")
- domain_specified = domain.startswith(".")
- c = Cookie(h("version"), name, value,
- h("port"), h("port_spec"),
- domain, domain_specified, h("domain_dot"),
- h("path"), h("path_spec"),
- h("secure"),
- expires,
- discard,
- h("comment"),
- h("commenturl"),
- rest)
- if not ignore_discard and c.discard:
- continue
- if not ignore_expires and c.is_expired(now):
- continue
- self.set_cookie(c)
-
- except IOError:
- raise
- except Exception:
- _warn_unhandled_exception()
- raise LoadError("invalid Set-Cookie3 format file %r: %r" %
- (filename, line))
diff --git a/sys/lib/python/_MozillaCookieJar.py b/sys/lib/python/_MozillaCookieJar.py
deleted file mode 100644
index 4fd6de32c..000000000
--- a/sys/lib/python/_MozillaCookieJar.py
+++ /dev/null
@@ -1,149 +0,0 @@
-"""Mozilla / Netscape cookie loading / saving."""
-
-import re, time
-
-from cookielib import (_warn_unhandled_exception, FileCookieJar, LoadError,
- Cookie, MISSING_FILENAME_TEXT)
-
-class MozillaCookieJar(FileCookieJar):
- """
-
- WARNING: you may want to backup your browser's cookies file if you use
- this class to save cookies. I *think* it works, but there have been
- bugs in the past!
-
- This class differs from CookieJar only in the format it uses to save and
- load cookies to and from a file. This class uses the Mozilla/Netscape
- `cookies.txt' format. lynx uses this file format, too.
-
- Don't expect cookies saved while the browser is running to be noticed by
- the browser (in fact, Mozilla on unix will overwrite your saved cookies if
- you change them on disk while it's running; on Windows, you probably can't
- save at all while the browser is running).
-
- Note that the Mozilla/Netscape format will downgrade RFC2965 cookies to
- Netscape cookies on saving.
-
- In particular, the cookie version and port number information is lost,
- together with information about whether or not Path, Port and Discard were
- specified by the Set-Cookie2 (or Set-Cookie) header, and whether or not the
- domain as set in the HTTP header started with a dot (yes, I'm aware some
- domains in Netscape files start with a dot and some don't -- trust me, you
- really don't want to know any more about this).
-
- Note that though Mozilla and Netscape use the same format, they use
- slightly different headers. The class saves cookies using the Netscape
- header by default (Mozilla can cope with that).
-
- """
- magic_re = "#( Netscape)? HTTP Cookie File"
- header = """\
- # Netscape HTTP Cookie File
- # http://www.netscape.com/newsref/std/cookie_spec.html
- # This is a generated file! Do not edit.
-
-"""
-
- def _really_load(self, f, filename, ignore_discard, ignore_expires):
- now = time.time()
-
- magic = f.readline()
- if not re.search(self.magic_re, magic):
- f.close()
- raise LoadError(
- "%r does not look like a Netscape format cookies file" %
- filename)
-
- try:
- while 1:
- line = f.readline()
- if line == "": break
-
- # last field may be absent, so keep any trailing tab
- if line.endswith("\n"): line = line[:-1]
-
- # skip comments and blank lines XXX what is $ for?
- if (line.strip().startswith(("#", "$")) or
- line.strip() == ""):
- continue
-
- domain, domain_specified, path, secure, expires, name, value = \
- line.split("\t")
- secure = (secure == "TRUE")
- domain_specified = (domain_specified == "TRUE")
- if name == "":
- # cookies.txt regards 'Set-Cookie: foo' as a cookie
- # with no name, whereas cookielib regards it as a
- # cookie with no value.
- name = value
- value = None
-
- initial_dot = domain.startswith(".")
- assert domain_specified == initial_dot
-
- discard = False
- if expires == "":
- expires = None
- discard = True
-
- # assume path_specified is false
- c = Cookie(0, name, value,
- None, False,
- domain, domain_specified, initial_dot,
- path, False,
- secure,
- expires,
- discard,
- None,
- None,
- {})
- if not ignore_discard and c.discard:
- continue
- if not ignore_expires and c.is_expired(now):
- continue
- self.set_cookie(c)
-
- except IOError:
- raise
- except Exception:
- _warn_unhandled_exception()
- raise LoadError("invalid Netscape format cookies file %r: %r" %
- (filename, line))
-
- def save(self, filename=None, ignore_discard=False, ignore_expires=False):
- if filename is None:
- if self.filename is not None: filename = self.filename
- else: raise ValueError(MISSING_FILENAME_TEXT)
-
- f = open(filename, "w")
- try:
- f.write(self.header)
- now = time.time()
- for cookie in self:
- if not ignore_discard and cookie.discard:
- continue
- if not ignore_expires and cookie.is_expired(now):
- continue
- if cookie.secure: secure = "TRUE"
- else: secure = "FALSE"
- if cookie.domain.startswith("."): initial_dot = "TRUE"
- else: initial_dot = "FALSE"
- if cookie.expires is not None:
- expires = str(cookie.expires)
- else:
- expires = ""
- if cookie.value is None:
- # cookies.txt regards 'Set-Cookie: foo' as a cookie
- # with no name, whereas cookielib regards it as a
- # cookie with no value.
- name = ""
- value = cookie.name
- else:
- name = cookie.name
- value = cookie.value
- f.write(
- "\t".join([cookie.domain, initial_dot, cookie.path,
- secure, expires, name, value])+
- "\n")
- finally:
- f.close()
diff --git a/sys/lib/python/__future__.py b/sys/lib/python/__future__.py
deleted file mode 100644
index d8e14d157..000000000
--- a/sys/lib/python/__future__.py
+++ /dev/null
@@ -1,116 +0,0 @@
-"""Record of phased-in incompatible language changes.
-
-Each line is of the form:
-
- FeatureName = "_Feature(" OptionalRelease "," MandatoryRelease ","
- CompilerFlag ")"
-
-where, normally, OptionalRelease < MandatoryRelease, and both are 5-tuples
-of the same form as sys.version_info:
-
- (PY_MAJOR_VERSION, # the 2 in 2.1.0a3; an int
- PY_MINOR_VERSION, # the 1; an int
- PY_MICRO_VERSION, # the 0; an int
- PY_RELEASE_LEVEL, # "alpha", "beta", "candidate" or "final"; string
- PY_RELEASE_SERIAL # the 3; an int
- )
-
-OptionalRelease records the first release in which
-
- from __future__ import FeatureName
-
-was accepted.
-
-In the case of MandatoryReleases that have not yet occurred,
-MandatoryRelease predicts the release in which the feature will become part
-of the language.
-
-Else MandatoryRelease records when the feature became part of the language;
-in releases at or after that, modules no longer need
-
- from __future__ import FeatureName
-
-to use the feature in question, but may continue to use such imports.
-
-MandatoryRelease may also be None, meaning that a planned feature got
-dropped.
-
-Instances of class _Feature have two corresponding methods,
-.getOptionalRelease() and .getMandatoryRelease().
-
-CompilerFlag is the (bitfield) flag that should be passed in the fourth
-argument to the builtin function compile() to enable the feature in
-dynamically compiled code. This flag is stored in the .compiler_flag
-attribute on _Future instances. These values must match the appropriate
-#defines of CO_xxx flags in Include/compile.h.
-
-No feature line is ever to be deleted from this file.
-"""
-
-all_feature_names = [
- "nested_scopes",
- "generators",
- "division",
- "absolute_import",
- "with_statement",
-]
-
-__all__ = ["all_feature_names"] + all_feature_names
-
-# The CO_xxx symbols are defined here under the same names used by
-# compile.h, so that an editor search will find them here. However,
-# they're not exported in __all__, because they don't really belong to
-# this module.
-CO_NESTED = 0x0010 # nested_scopes
-CO_GENERATOR_ALLOWED = 0 # generators (obsolete, was 0x1000)
-CO_FUTURE_DIVISION = 0x2000 # division
-CO_FUTURE_ABSOLUTE_IMPORT = 0x4000 # perform absolute imports by default
-CO_FUTURE_WITH_STATEMENT = 0x8000 # with statement
-
-class _Feature:
- def __init__(self, optionalRelease, mandatoryRelease, compiler_flag):
- self.optional = optionalRelease
- self.mandatory = mandatoryRelease
- self.compiler_flag = compiler_flag
-
- def getOptionalRelease(self):
- """Return first release in which this feature was recognized.
-
- This is a 5-tuple, of the same form as sys.version_info.
- """
-
- return self.optional
-
- def getMandatoryRelease(self):
- """Return release in which this feature will become mandatory.
-
- This is a 5-tuple, of the same form as sys.version_info, or, if
- the feature was dropped, is None.
- """
-
- return self.mandatory
-
- def __repr__(self):
- return "_Feature" + repr((self.optional,
- self.mandatory,
- self.compiler_flag))
-
-nested_scopes = _Feature((2, 1, 0, "beta", 1),
- (2, 2, 0, "alpha", 0),
- CO_NESTED)
-
-generators = _Feature((2, 2, 0, "alpha", 1),
- (2, 3, 0, "final", 0),
- CO_GENERATOR_ALLOWED)
-
-division = _Feature((2, 2, 0, "alpha", 2),
- (3, 0, 0, "alpha", 0),
- CO_FUTURE_DIVISION)
-
-absolute_import = _Feature((2, 5, 0, "alpha", 1),
- (2, 7, 0, "alpha", 0),
- CO_FUTURE_ABSOLUTE_IMPORT)
-
-with_statement = _Feature((2, 5, 0, "alpha", 1),
- (2, 6, 0, "alpha", 0),
- CO_FUTURE_WITH_STATEMENT)
diff --git a/sys/lib/python/__phello__.foo.py b/sys/lib/python/__phello__.foo.py
deleted file mode 100644
index 8e8623ee1..000000000
--- a/sys/lib/python/__phello__.foo.py
+++ /dev/null
@@ -1 +0,0 @@
-# This file exists as a helper for the test.test_frozen module.
diff --git a/sys/lib/python/_strptime.py b/sys/lib/python/_strptime.py
deleted file mode 100644
index ef0103621..000000000
--- a/sys/lib/python/_strptime.py
+++ /dev/null
@@ -1,452 +0,0 @@
-"""Strptime-related classes and functions.
-
-CLASSES:
- LocaleTime -- Discovers and stores locale-specific time information
- TimeRE -- Creates regexes for pattern matching a string of text containing
- time information
-
-FUNCTIONS:
- _getlang -- Figure out what language is being used for the locale
- strptime -- Calculates the time struct represented by the passed-in string
-
-"""
-import time
-import locale
-import calendar
-from re import compile as re_compile
-from re import IGNORECASE
-from re import escape as re_escape
-from datetime import date as datetime_date
-try:
- from thread import allocate_lock as _thread_allocate_lock
-except:
- from dummy_thread import allocate_lock as _thread_allocate_lock
-
-__author__ = "Brett Cannon"
-__email__ = "brett@python.org"
-
-__all__ = ['strptime']
-
-def _getlang():
- # Figure out what the current language is set to.
- return locale.getlocale(locale.LC_TIME)
-
-class LocaleTime(object):
- """Stores and handles locale-specific information related to time.
-
- ATTRIBUTES:
- f_weekday -- full weekday names (7-item list)
- a_weekday -- abbreviated weekday names (7-item list)
- f_month -- full month names (13-item list; dummy value in [0], which
- is added by code)
- a_month -- abbreviated month names (13-item list, dummy value in
- [0], which is added by code)
- am_pm -- AM/PM representation (2-item list)
- LC_date_time -- format string for date/time representation (string)
- LC_date -- format string for date representation (string)
- LC_time -- format string for time representation (string)
- timezone -- daylight- and non-daylight-savings timezone representation
- (2-item list of sets)
- lang -- Language used by instance (2-item tuple)
- """
-
- def __init__(self):
- """Set all attributes.
-
- Order of methods called matters for dependency reasons.
-
- The locale language is set at the offset and then checked again before
- exiting. This is to make sure that the attributes were not set with a
- mix of information from more than one locale. This would most likely
- happen when using threads where one thread calls a locale-dependent
- function while another thread changes the locale while the function in
- the other thread is still running. Proper coding would call for
- locks to prevent changing the locale while locale-dependent code is
- running. The check here is done in case someone does not think about
- doing this.
-
- Only other possible issue is if someone changed the timezone and did
- not call tz.tzset . That is an issue for the programmer, though,
- since changing the timezone is worthless without that call.
-
- """
- self.lang = _getlang()
- self.__calc_weekday()
- self.__calc_month()
- self.__calc_am_pm()
- self.__calc_timezone()
- self.__calc_date_time()
- if _getlang() != self.lang:
- raise ValueError("locale changed during initialization")
-
- def __pad(self, seq, front):
- # Add '' to seq to either the front (is True), else the back.
- seq = list(seq)
- if front:
- seq.insert(0, '')
- else:
- seq.append('')
- return seq
-
- def __calc_weekday(self):
- # Set self.a_weekday and self.f_weekday using the calendar
- # module.
- a_weekday = [calendar.day_abbr[i].lower() for i in range(7)]
- f_weekday = [calendar.day_name[i].lower() for i in range(7)]
- self.a_weekday = a_weekday
- self.f_weekday = f_weekday
-
- def __calc_month(self):
- # Set self.f_month and self.a_month using the calendar module.
- a_month = [calendar.month_abbr[i].lower() for i in range(13)]
- f_month = [calendar.month_name[i].lower() for i in range(13)]
- self.a_month = a_month
- self.f_month = f_month
-
- def __calc_am_pm(self):
- # Set self.am_pm by using time.strftime().
-
- # The magic date (1999,3,17,hour,44,55,2,76,0) is not really that
- # magical; just happened to have used it everywhere else where a
- # static date was needed.
- am_pm = []
- for hour in (01,22):
- time_tuple = time.struct_time((1999,3,17,hour,44,55,2,76,0))
- am_pm.append(time.strftime("%p", time_tuple).lower())
- self.am_pm = am_pm
-
- def __calc_date_time(self):
- # Set self.date_time, self.date, & self.time by using
- # time.strftime().
-
- # Use (1999,3,17,22,44,55,2,76,0) for magic date because the amount of
- # overloaded numbers is minimized. The order in which searches for
- # values within the format string is very important; it eliminates
- # possible ambiguity for what something represents.
- time_tuple = time.struct_time((1999,3,17,22,44,55,2,76,0))
- date_time = [None, None, None]
- date_time[0] = time.strftime("%c", time_tuple).lower()
- date_time[1] = time.strftime("%x", time_tuple).lower()
- date_time[2] = time.strftime("%X", time_tuple).lower()
- replacement_pairs = [('%', '%%'), (self.f_weekday[2], '%A'),
- (self.f_month[3], '%B'), (self.a_weekday[2], '%a'),
- (self.a_month[3], '%b'), (self.am_pm[1], '%p'),
- ('1999', '%Y'), ('99', '%y'), ('22', '%H'),
- ('44', '%M'), ('55', '%S'), ('76', '%j'),
- ('17', '%d'), ('03', '%m'), ('3', '%m'),
- # '3' needed for when no leading zero.
- ('2', '%w'), ('10', '%I')]
- replacement_pairs.extend([(tz, "%Z") for tz_values in self.timezone
- for tz in tz_values])
- for offset,directive in ((0,'%c'), (1,'%x'), (2,'%X')):
- current_format = date_time[offset]
- for old, new in replacement_pairs:
- # Must deal with possible lack of locale info
- # manifesting itself as the empty string (e.g., Swedish's
- # lack of AM/PM info) or a platform returning a tuple of empty
- # strings (e.g., MacOS 9 having timezone as ('','')).
- if old:
- current_format = current_format.replace(old, new)
- # If %W is used, then Sunday, 2005-01-03 will fall on week 0 since
- # 2005-01-03 occurs before the first Monday of the year. Otherwise
- # %U is used.
- time_tuple = time.struct_time((1999,1,3,1,1,1,6,3,0))
- if '00' in time.strftime(directive, time_tuple):
- U_W = '%W'
- else:
- U_W = '%U'
- date_time[offset] = current_format.replace('11', U_W)
- self.LC_date_time = date_time[0]
- self.LC_date = date_time[1]
- self.LC_time = date_time[2]
-
- def __calc_timezone(self):
- # Set self.timezone by using time.tzname.
- # Do not worry about possibility of time.tzname[0] == timetzname[1]
- # and time.daylight; handle that in strptime .
- try:
- time.tzset()
- except AttributeError:
- pass
- no_saving = frozenset(["utc", "gmt", time.tzname[0].lower()])
- if time.daylight:
- has_saving = frozenset([time.tzname[1].lower()])
- else:
- has_saving = frozenset()
- self.timezone = (no_saving, has_saving)
-
-
-class TimeRE(dict):
- """Handle conversion from format directives to regexes."""
-
- def __init__(self, locale_time=None):
- """Create keys/values.
-
- Order of execution is important for dependency reasons.
-
- """
- if locale_time:
- self.locale_time = locale_time
- else:
- self.locale_time = LocaleTime()
- base = super(TimeRE, self)
- base.__init__({
- # The " \d" part of the regex is to make %c from ANSI C work
- 'd': r"(?P<d>3[0-1]|[1-2]\d|0[1-9]|[1-9]| [1-9])",
- 'H': r"(?P<H>2[0-3]|[0-1]\d|\d)",
- 'I': r"(?P<I>1[0-2]|0[1-9]|[1-9])",
- 'j': r"(?P<j>36[0-6]|3[0-5]\d|[1-2]\d\d|0[1-9]\d|00[1-9]|[1-9]\d|0[1-9]|[1-9])",
- 'm': r"(?P<m>1[0-2]|0[1-9]|[1-9])",
- 'M': r"(?P<M>[0-5]\d|\d)",
- 'S': r"(?P<S>6[0-1]|[0-5]\d|\d)",
- 'U': r"(?P<U>5[0-3]|[0-4]\d|\d)",
- 'w': r"(?P<w>[0-6])",
- # W is set below by using 'U'
- 'y': r"(?P<y>\d\d)",
- #XXX: Does 'Y' need to worry about having less or more than
- # 4 digits?
- 'Y': r"(?P<Y>\d\d\d\d)",
- 'A': self.__seqToRE(self.locale_time.f_weekday, 'A'),
- 'a': self.__seqToRE(self.locale_time.a_weekday, 'a'),
- 'B': self.__seqToRE(self.locale_time.f_month[1:], 'B'),
- 'b': self.__seqToRE(self.locale_time.a_month[1:], 'b'),
- 'p': self.__seqToRE(self.locale_time.am_pm, 'p'),
- 'Z': self.__seqToRE((tz for tz_names in self.locale_time.timezone
- for tz in tz_names),
- 'Z'),
- '%': '%'})
- base.__setitem__('W', base.__getitem__('U').replace('U', 'W'))
- base.__setitem__('c', self.pattern(self.locale_time.LC_date_time))
- base.__setitem__('x', self.pattern(self.locale_time.LC_date))
- base.__setitem__('X', self.pattern(self.locale_time.LC_time))
-
- def __seqToRE(self, to_convert, directive):
- """Convert a list to a regex string for matching a directive.
-
- Want possible matching values to be from longest to shortest. This
- prevents the possibility of a match occuring for a value that also
- a substring of a larger value that should have matched (e.g., 'abc'
- matching when 'abcdef' should have been the match).
-
- """
- to_convert = sorted(to_convert, key=len, reverse=True)
- for value in to_convert:
- if value != '':
- break
- else:
- return ''
- regex = '|'.join(re_escape(stuff) for stuff in to_convert)
- regex = '(?P<%s>%s' % (directive, regex)
- return '%s)' % regex
-
- def pattern(self, format):
- """Return regex pattern for the format string.
-
- Need to make sure that any characters that might be interpreted as
- regex syntax are escaped.
-
- """
- processed_format = ''
- # The sub() call escapes all characters that might be misconstrued
- # as regex syntax. Cannot use re.escape since we have to deal with
- # format directives (%m, etc.).
- regex_chars = re_compile(r"([\\.^$*+?\(\){}\[\]|])")
- format = regex_chars.sub(r"\\\1", format)
- whitespace_replacement = re_compile('\s+')
- format = whitespace_replacement.sub('\s*', format)
- while '%' in format:
- directive_index = format.index('%')+1
- processed_format = "%s%s%s" % (processed_format,
- format[:directive_index-1],
- self[format[directive_index]])
- format = format[directive_index+1:]
- return "%s%s" % (processed_format, format)
-
- def compile(self, format):
- """Return a compiled re object for the format string."""
- return re_compile(self.pattern(format), IGNORECASE)
-
-_cache_lock = _thread_allocate_lock()
-# DO NOT modify _TimeRE_cache or _regex_cache without acquiring the cache lock
-# first!
-_TimeRE_cache = TimeRE()
-_CACHE_MAX_SIZE = 5 # Max number of regexes stored in _regex_cache
-_regex_cache = {}
-
-def _calc_julian_from_U_or_W(year, week_of_year, day_of_week, week_starts_Mon):
- """Calculate the Julian day based on the year, week of the year, and day of
- the week, with week_start_day representing whether the week of the year
- assumes the week starts on Sunday or Monday (6 or 0)."""
- first_weekday = datetime_date(year, 1, 1).weekday()
- # If we are dealing with the %U directive (week starts on Sunday), it's
- # easier to just shift the view to Sunday being the first day of the
- # week.
- if not week_starts_Mon:
- first_weekday = (first_weekday + 1) % 7
- day_of_week = (day_of_week + 1) % 7
- # Need to watch out for a week 0 (when the first day of the year is not
- # the same as that specified by %U or %W).
- week_0_length = (7 - first_weekday) % 7
- if week_of_year == 0:
- return 1 + day_of_week - first_weekday
- else:
- days_to_week = week_0_length + (7 * (week_of_year - 1))
- return 1 + days_to_week + day_of_week
-
-
-def strptime(data_string, format="%a %b %d %H:%M:%S %Y"):
- """Return a time struct based on the input string and the format string."""
- global _TimeRE_cache, _regex_cache
- _cache_lock.acquire()
- try:
- time_re = _TimeRE_cache
- locale_time = time_re.locale_time
- if _getlang() != locale_time.lang:
- _TimeRE_cache = TimeRE()
- _regex_cache = {}
- if len(_regex_cache) > _CACHE_MAX_SIZE:
- _regex_cache.clear()
- format_regex = _regex_cache.get(format)
- if not format_regex:
- try:
- format_regex = time_re.compile(format)
- # KeyError raised when a bad format is found; can be specified as
- # \\, in which case it was a stray % but with a space after it
- except KeyError, err:
- bad_directive = err.args[0]
- if bad_directive == "\\":
- bad_directive = "%"
- del err
- raise ValueError("'%s' is a bad directive in format '%s'" %
- (bad_directive, format))
- # IndexError only occurs when the format string is "%"
- except IndexError:
- raise ValueError("stray %% in format '%s'" % format)
- _regex_cache[format] = format_regex
- finally:
- _cache_lock.release()
- found = format_regex.match(data_string)
- if not found:
- raise ValueError("time data did not match format: data=%s fmt=%s" %
- (data_string, format))
- if len(data_string) != found.end():
- raise ValueError("unconverted data remains: %s" %
- data_string[found.end():])
- year = 1900
- month = day = 1
- hour = minute = second = 0
- tz = -1
- # Default to -1 to signify that values not known; not critical to have,
- # though
- week_of_year = -1
- week_of_year_start = -1
- # weekday and julian defaulted to -1 so as to signal need to calculate
- # values
- weekday = julian = -1
- found_dict = found.groupdict()
- for group_key in found_dict.iterkeys():
- # Directives not explicitly handled below:
- # c, x, X
- # handled by making out of other directives
- # U, W
- # worthless without day of the week
- if group_key == 'y':
- year = int(found_dict['y'])
- # Open Group specification for strptime() states that a %y
- #value in the range of [00, 68] is in the century 2000, while
- #[69,99] is in the century 1900
- if year <= 68:
- year += 2000
- else:
- year += 1900
- elif group_key == 'Y':
- year = int(found_dict['Y'])
- elif group_key == 'm':
- month = int(found_dict['m'])
- elif group_key == 'B':
- month = locale_time.f_month.index(found_dict['B'].lower())
- elif group_key == 'b':
- month = locale_time.a_month.index(found_dict['b'].lower())
- elif group_key == 'd':
- day = int(found_dict['d'])
- elif group_key == 'H':
- hour = int(found_dict['H'])
- elif group_key == 'I':
- hour = int(found_dict['I'])
- ampm = found_dict.get('p', '').lower()
- # If there was no AM/PM indicator, we'll treat this like AM
- if ampm in ('', locale_time.am_pm[0]):
- # We're in AM so the hour is correct unless we're
- # looking at 12 midnight.
- # 12 midnight == 12 AM == hour 0
- if hour == 12:
- hour = 0
- elif ampm == locale_time.am_pm[1]:
- # We're in PM so we need to add 12 to the hour unless
- # we're looking at 12 noon.
- # 12 noon == 12 PM == hour 12
- if hour != 12:
- hour += 12
- elif group_key == 'M':
- minute = int(found_dict['M'])
- elif group_key == 'S':
- second = int(found_dict['S'])
- elif group_key == 'A':
- weekday = locale_time.f_weekday.index(found_dict['A'].lower())
- elif group_key == 'a':
- weekday = locale_time.a_weekday.index(found_dict['a'].lower())
- elif group_key == 'w':
- weekday = int(found_dict['w'])
- if weekday == 0:
- weekday = 6
- else:
- weekday -= 1
- elif group_key == 'j':
- julian = int(found_dict['j'])
- elif group_key in ('U', 'W'):
- week_of_year = int(found_dict[group_key])
- if group_key == 'U':
- # U starts week on Sunday.
- week_of_year_start = 6
- else:
- # W starts week on Monday.
- week_of_year_start = 0
- elif group_key == 'Z':
- # Since -1 is default value only need to worry about setting tz if
- # it can be something other than -1.
- found_zone = found_dict['Z'].lower()
- for value, tz_values in enumerate(locale_time.timezone):
- if found_zone in tz_values:
- # Deal with bad locale setup where timezone names are the
- # same and yet time.daylight is true; too ambiguous to
- # be able to tell what timezone has daylight savings
- if (time.tzname[0] == time.tzname[1] and
- time.daylight and found_zone not in ("utc", "gmt")):
- break
- else:
- tz = value
- break
- # If we know the week of the year and what day of that week, we can figure
- # out the Julian day of the year.
- if julian == -1 and week_of_year != -1 and weekday != -1:
- week_starts_Mon = True if week_of_year_start == 0 else False
- julian = _calc_julian_from_U_or_W(year, week_of_year, weekday,
- week_starts_Mon)
- # Cannot pre-calculate datetime_date() since can change in Julian
- # calculation and thus could have different value for the day of the week
- # calculation.
- if julian == -1:
- # Need to add 1 to result since first day of the year is 1, not 0.
- julian = datetime_date(year, month, day).toordinal() - \
- datetime_date(year, 1, 1).toordinal() + 1
- else: # Assume that if they bothered to include Julian day it will
- # be accurate.
- datetime_result = datetime_date.fromordinal((julian - 1) + datetime_date(year, 1, 1).toordinal())
- year = datetime_result.year
- month = datetime_result.month
- day = datetime_result.day
- if weekday == -1:
- weekday = datetime_date(year, month, day).weekday()
- return time.struct_time((year, month, day,
- hour, minute, second,
- weekday, julian, tz))
diff --git a/sys/lib/python/_threading_local.py b/sys/lib/python/_threading_local.py
deleted file mode 100644
index f0ce85716..000000000
--- a/sys/lib/python/_threading_local.py
+++ /dev/null
@@ -1,241 +0,0 @@
-"""Thread-local objects.
-
-(Note that this module provides a Python version of the threading.local
- class. Depending on the version of Python you're using, there may be a
- faster one available. You should always import the `local` class from
- `threading`.)
-
-Thread-local objects support the management of thread-local data.
-If you have data that you want to be local to a thread, simply create
-a thread-local object and use its attributes:
-
- >>> mydata = local()
- >>> mydata.number = 42
- >>> mydata.number
- 42
-
-You can also access the local-object's dictionary:
-
- >>> mydata.__dict__
- {'number': 42}
- >>> mydata.__dict__.setdefault('widgets', [])
- []
- >>> mydata.widgets
- []
-
-What's important about thread-local objects is that their data are
-local to a thread. If we access the data in a different thread:
-
- >>> log = []
- >>> def f():
- ... items = mydata.__dict__.items()
- ... items.sort()
- ... log.append(items)
- ... mydata.number = 11
- ... log.append(mydata.number)
-
- >>> import threading
- >>> thread = threading.Thread(target=f)
- >>> thread.start()
- >>> thread.join()
- >>> log
- [[], 11]
-
-we get different data. Furthermore, changes made in the other thread
-don't affect data seen in this thread:
-
- >>> mydata.number
- 42
-
-Of course, values you get from a local object, including a __dict__
-attribute, are for whatever thread was current at the time the
-attribute was read. For that reason, you generally don't want to save
-these values across threads, as they apply only to the thread they
-came from.
-
-You can create custom local objects by subclassing the local class:
-
- >>> class MyLocal(local):
- ... number = 2
- ... initialized = False
- ... def __init__(self, **kw):
- ... if self.initialized:
- ... raise SystemError('__init__ called too many times')
- ... self.initialized = True
- ... self.__dict__.update(kw)
- ... def squared(self):
- ... return self.number ** 2
-
-This can be useful to support default values, methods and
-initialization. Note that if you define an __init__ method, it will be
-called each time the local object is used in a separate thread. This
-is necessary to initialize each thread's dictionary.
-
-Now if we create a local object:
-
- >>> mydata = MyLocal(color='red')
-
-Now we have a default number:
-
- >>> mydata.number
- 2
-
-an initial color:
-
- >>> mydata.color
- 'red'
- >>> del mydata.color
-
-And a method that operates on the data:
-
- >>> mydata.squared()
- 4
-
-As before, we can access the data in a separate thread:
-
- >>> log = []
- >>> thread = threading.Thread(target=f)
- >>> thread.start()
- >>> thread.join()
- >>> log
- [[('color', 'red'), ('initialized', True)], 11]
-
-without affecting this thread's data:
-
- >>> mydata.number
- 2
- >>> mydata.color
- Traceback (most recent call last):
- ...
- AttributeError: 'MyLocal' object has no attribute 'color'
-
-Note that subclasses can define slots, but they are not thread
-local. They are shared across threads:
-
- >>> class MyLocal(local):
- ... __slots__ = 'number'
-
- >>> mydata = MyLocal()
- >>> mydata.number = 42
- >>> mydata.color = 'red'
-
-So, the separate thread:
-
- >>> thread = threading.Thread(target=f)
- >>> thread.start()
- >>> thread.join()
-
-affects what we see:
-
- >>> mydata.number
- 11
-
->>> del mydata
-"""
-
-__all__ = ["local"]
-
-# We need to use objects from the threading module, but the threading
-# module may also want to use our `local` class, if support for locals
-# isn't compiled in to the `thread` module. This creates potential problems
-# with circular imports. For that reason, we don't import `threading`
-# until the bottom of this file (a hack sufficient to worm around the
-# potential problems). Note that almost all platforms do have support for
-# locals in the `thread` module, and there is no circular import problem
-# then, so problems introduced by fiddling the order of imports here won't
-# manifest on most boxes.
-
-class _localbase(object):
- __slots__ = '_local__key', '_local__args', '_local__lock'
-
- def __new__(cls, *args, **kw):
- self = object.__new__(cls)
- key = '_local__key', 'thread.local.' + str(id(self))
- object.__setattr__(self, '_local__key', key)
- object.__setattr__(self, '_local__args', (args, kw))
- object.__setattr__(self, '_local__lock', RLock())
-
- if args or kw and (cls.__init__ is object.__init__):
- raise TypeError("Initialization arguments are not supported")
-
- # We need to create the thread dict in anticipation of
- # __init__ being called, to make sure we don't call it
- # again ourselves.
- dict = object.__getattribute__(self, '__dict__')
- currentThread().__dict__[key] = dict
-
- return self
-
-def _patch(self):
- key = object.__getattribute__(self, '_local__key')
- d = currentThread().__dict__.get(key)
- if d is None:
- d = {}
- currentThread().__dict__[key] = d
- object.__setattr__(self, '__dict__', d)
-
- # we have a new instance dict, so call out __init__ if we have
- # one
- cls = type(self)
- if cls.__init__ is not object.__init__:
- args, kw = object.__getattribute__(self, '_local__args')
- cls.__init__(self, *args, **kw)
- else:
- object.__setattr__(self, '__dict__', d)
-
-class local(_localbase):
-
- def __getattribute__(self, name):
- lock = object.__getattribute__(self, '_local__lock')
- lock.acquire()
- try:
- _patch(self)
- return object.__getattribute__(self, name)
- finally:
- lock.release()
-
- def __setattr__(self, name, value):
- lock = object.__getattribute__(self, '_local__lock')
- lock.acquire()
- try:
- _patch(self)
- return object.__setattr__(self, name, value)
- finally:
- lock.release()
-
- def __delattr__(self, name):
- lock = object.__getattribute__(self, '_local__lock')
- lock.acquire()
- try:
- _patch(self)
- return object.__delattr__(self, name)
- finally:
- lock.release()
-
- def __del__(self):
- import threading
-
- key = object.__getattribute__(self, '_local__key')
-
- try:
- threads = list(threading.enumerate())
- except:
- # If enumerate fails, as it seems to do during
- # shutdown, we'll skip cleanup under the assumption
- # that there is nothing to clean up.
- return
-
- for thread in threads:
- try:
- __dict__ = thread.__dict__
- except AttributeError:
- # Thread is dying, rest in peace.
- continue
-
- if key in __dict__:
- try:
- del __dict__[key]
- except KeyError:
- pass # didn't have anything in this thread
-
-from threading import currentThread, RLock
diff --git a/sys/lib/python/aifc.py b/sys/lib/python/aifc.py
deleted file mode 100644
index a5f86be45..000000000
--- a/sys/lib/python/aifc.py
+++ /dev/null
@@ -1,961 +0,0 @@
-"""Stuff to parse AIFF-C and AIFF files.
-
-Unless explicitly stated otherwise, the description below is true
-both for AIFF-C files and AIFF files.
-
-An AIFF-C file has the following structure.
-
- +-----------------+
- | FORM |
- +-----------------+
- | <size> |
- +----+------------+
- | | AIFC |
- | +------------+
- | | <chunks> |
- | | . |
- | | . |
- | | . |
- +----+------------+
-
-An AIFF file has the string "AIFF" instead of "AIFC".
-
-A chunk consists of an identifier (4 bytes) followed by a size (4 bytes,
-big endian order), followed by the data. The size field does not include
-the size of the 8 byte header.
-
-The following chunk types are recognized.
-
- FVER
- <version number of AIFF-C defining document> (AIFF-C only).
- MARK
- <# of markers> (2 bytes)
- list of markers:
- <marker ID> (2 bytes, must be > 0)
- <position> (4 bytes)
- <marker name> ("pstring")
- COMM
- <# of channels> (2 bytes)
- <# of sound frames> (4 bytes)
- <size of the samples> (2 bytes)
- <sampling frequency> (10 bytes, IEEE 80-bit extended
- floating point)
- in AIFF-C files only:
- <compression type> (4 bytes)
- <human-readable version of compression type> ("pstring")
- SSND
- <offset> (4 bytes, not used by this program)
- <blocksize> (4 bytes, not used by this program)
- <sound data>
-
-A pstring consists of 1 byte length, a string of characters, and 0 or 1
-byte pad to make the total length even.
-
-Usage.
-
-Reading AIFF files:
- f = aifc.open(file, 'r')
-where file is either the name of a file or an open file pointer.
-The open file pointer must have methods read(), seek(), and close().
-In some types of audio files, if the setpos() method is not used,
-the seek() method is not necessary.
-
-This returns an instance of a class with the following public methods:
- getnchannels() -- returns number of audio channels (1 for
- mono, 2 for stereo)
- getsampwidth() -- returns sample width in bytes
- getframerate() -- returns sampling frequency
- getnframes() -- returns number of audio frames
- getcomptype() -- returns compression type ('NONE' for AIFF files)
- getcompname() -- returns human-readable version of
- compression type ('not compressed' for AIFF files)
- getparams() -- returns a tuple consisting of all of the
- above in the above order
- getmarkers() -- get the list of marks in the audio file or None
- if there are no marks
- getmark(id) -- get mark with the specified id (raises an error
- if the mark does not exist)
- readframes(n) -- returns at most n frames of audio
- rewind() -- rewind to the beginning of the audio stream
- setpos(pos) -- seek to the specified position
- tell() -- return the current position
- close() -- close the instance (make it unusable)
-The position returned by tell(), the position given to setpos() and
-the position of marks are all compatible and have nothing to do with
-the actual position in the file.
-The close() method is called automatically when the class instance
-is destroyed.
-
-Writing AIFF files:
- f = aifc.open(file, 'w')
-where file is either the name of a file or an open file pointer.
-The open file pointer must have methods write(), tell(), seek(), and
-close().
-
-This returns an instance of a class with the following public methods:
- aiff() -- create an AIFF file (AIFF-C default)
- aifc() -- create an AIFF-C file
- setnchannels(n) -- set the number of channels
- setsampwidth(n) -- set the sample width
- setframerate(n) -- set the frame rate
- setnframes(n) -- set the number of frames
- setcomptype(type, name)
- -- set the compression type and the
- human-readable compression type
- setparams(tuple)
- -- set all parameters at once
- setmark(id, pos, name)
- -- add specified mark to the list of marks
- tell() -- return current position in output file (useful
- in combination with setmark())
- writeframesraw(data)
- -- write audio frames without pathing up the
- file header
- writeframes(data)
- -- write audio frames and patch up the file header
- close() -- patch up the file header and close the
- output file
-You should set the parameters before the first writeframesraw or
-writeframes. The total number of frames does not need to be set,
-but when it is set to the correct value, the header does not have to
-be patched up.
-It is best to first set all parameters, perhaps possibly the
-compression type, and then write audio frames using writeframesraw.
-When all frames have been written, either call writeframes('') or
-close() to patch up the sizes in the header.
-Marks can be added anytime. If there are any marks, ypu must call
-close() after all frames have been written.
-The close() method is called automatically when the class instance
-is destroyed.
-
-When a file is opened with the extension '.aiff', an AIFF file is
-written, otherwise an AIFF-C file is written. This default can be
-changed by calling aiff() or aifc() before the first writeframes or
-writeframesraw.
-"""
-
-import struct
-import __builtin__
-
-__all__ = ["Error","open","openfp"]
-
-class Error(Exception):
- pass
-
-_AIFC_version = 0xA2805140L # Version 1 of AIFF-C
-
-_skiplist = 'COMT', 'INST', 'MIDI', 'AESD', \
- 'APPL', 'NAME', 'AUTH', '(c) ', 'ANNO'
-
-def _read_long(file):
- try:
- return struct.unpack('>l', file.read(4))[0]
- except struct.error:
- raise EOFError
-
-def _read_ulong(file):
- try:
- return struct.unpack('>L', file.read(4))[0]
- except struct.error:
- raise EOFError
-
-def _read_short(file):
- try:
- return struct.unpack('>h', file.read(2))[0]
- except struct.error:
- raise EOFError
-
-def _read_string(file):
- length = ord(file.read(1))
- if length == 0:
- data = ''
- else:
- data = file.read(length)
- if length & 1 == 0:
- dummy = file.read(1)
- return data
-
-_HUGE_VAL = 1.79769313486231e+308 # See <limits.h>
-
-def _read_float(f): # 10 bytes
- expon = _read_short(f) # 2 bytes
- sign = 1
- if expon < 0:
- sign = -1
- expon = expon + 0x8000
- himant = _read_ulong(f) # 4 bytes
- lomant = _read_ulong(f) # 4 bytes
- if expon == himant == lomant == 0:
- f = 0.0
- elif expon == 0x7FFF:
- f = _HUGE_VAL
- else:
- expon = expon - 16383
- f = (himant * 0x100000000L + lomant) * pow(2.0, expon - 63)
- return sign * f
-
-def _write_short(f, x):
- f.write(struct.pack('>h', x))
-
-def _write_long(f, x):
- f.write(struct.pack('>L', x))
-
-def _write_string(f, s):
- if len(s) > 255:
- raise ValueError("string exceeds maximum pstring length")
- f.write(chr(len(s)))
- f.write(s)
- if len(s) & 1 == 0:
- f.write(chr(0))
-
-def _write_float(f, x):
- import math
- if x < 0:
- sign = 0x8000
- x = x * -1
- else:
- sign = 0
- if x == 0:
- expon = 0
- himant = 0
- lomant = 0
- else:
- fmant, expon = math.frexp(x)
- if expon > 16384 or fmant >= 1: # Infinity or NaN
- expon = sign|0x7FFF
- himant = 0
- lomant = 0
- else: # Finite
- expon = expon + 16382
- if expon < 0: # denormalized
- fmant = math.ldexp(fmant, expon)
- expon = 0
- expon = expon | sign
- fmant = math.ldexp(fmant, 32)
- fsmant = math.floor(fmant)
- himant = long(fsmant)
- fmant = math.ldexp(fmant - fsmant, 32)
- fsmant = math.floor(fmant)
- lomant = long(fsmant)
- _write_short(f, expon)
- _write_long(f, himant)
- _write_long(f, lomant)
-
-from chunk import Chunk
-
-class Aifc_read:
- # Variables used in this class:
- #
- # These variables are available to the user though appropriate
- # methods of this class:
- # _file -- the open file with methods read(), close(), and seek()
- # set through the __init__() method
- # _nchannels -- the number of audio channels
- # available through the getnchannels() method
- # _nframes -- the number of audio frames
- # available through the getnframes() method
- # _sampwidth -- the number of bytes per audio sample
- # available through the getsampwidth() method
- # _framerate -- the sampling frequency
- # available through the getframerate() method
- # _comptype -- the AIFF-C compression type ('NONE' if AIFF)
- # available through the getcomptype() method
- # _compname -- the human-readable AIFF-C compression type
- # available through the getcomptype() method
- # _markers -- the marks in the audio file
- # available through the getmarkers() and getmark()
- # methods
- # _soundpos -- the position in the audio stream
- # available through the tell() method, set through the
- # setpos() method
- #
- # These variables are used internally only:
- # _version -- the AIFF-C version number
- # _decomp -- the decompressor from builtin module cl
- # _comm_chunk_read -- 1 iff the COMM chunk has been read
- # _aifc -- 1 iff reading an AIFF-C file
- # _ssnd_seek_needed -- 1 iff positioned correctly in audio
- # file for readframes()
- # _ssnd_chunk -- instantiation of a chunk class for the SSND chunk
- # _framesize -- size of one frame in the file
-
- def initfp(self, file):
- self._version = 0
- self._decomp = None
- self._convert = None
- self._markers = []
- self._soundpos = 0
- self._file = Chunk(file)
- if self._file.getname() != 'FORM':
- raise Error, 'file does not start with FORM id'
- formdata = self._file.read(4)
- if formdata == 'AIFF':
- self._aifc = 0
- elif formdata == 'AIFC':
- self._aifc = 1
- else:
- raise Error, 'not an AIFF or AIFF-C file'
- self._comm_chunk_read = 0
- while 1:
- self._ssnd_seek_needed = 1
- try:
- chunk = Chunk(self._file)
- except EOFError:
- break
- chunkname = chunk.getname()
- if chunkname == 'COMM':
- self._read_comm_chunk(chunk)
- self._comm_chunk_read = 1
- elif chunkname == 'SSND':
- self._ssnd_chunk = chunk
- dummy = chunk.read(8)
- self._ssnd_seek_needed = 0
- elif chunkname == 'FVER':
- self._version = _read_ulong(chunk)
- elif chunkname == 'MARK':
- self._readmark(chunk)
- elif chunkname in _skiplist:
- pass
- else:
- raise Error, 'unrecognized chunk type '+chunk.chunkname
- chunk.skip()
- if not self._comm_chunk_read or not self._ssnd_chunk:
- raise Error, 'COMM chunk and/or SSND chunk missing'
- if self._aifc and self._decomp:
- import cl
- params = [cl.ORIGINAL_FORMAT, 0,
- cl.BITS_PER_COMPONENT, self._sampwidth * 8,
- cl.FRAME_RATE, self._framerate]
- if self._nchannels == 1:
- params[1] = cl.MONO
- elif self._nchannels == 2:
- params[1] = cl.STEREO_INTERLEAVED
- else:
- raise Error, 'cannot compress more than 2 channels'
- self._decomp.SetParams(params)
-
- def __init__(self, f):
- if type(f) == type(''):
- f = __builtin__.open(f, 'rb')
- # else, assume it is an open file object already
- self.initfp(f)
-
- #
- # User visible methods.
- #
- def getfp(self):
- return self._file
-
- def rewind(self):
- self._ssnd_seek_needed = 1
- self._soundpos = 0
-
- def close(self):
- if self._decomp:
- self._decomp.CloseDecompressor()
- self._decomp = None
- self._file = None
-
- def tell(self):
- return self._soundpos
-
- def getnchannels(self):
- return self._nchannels
-
- def getnframes(self):
- return self._nframes
-
- def getsampwidth(self):
- return self._sampwidth
-
- def getframerate(self):
- return self._framerate
-
- def getcomptype(self):
- return self._comptype
-
- def getcompname(self):
- return self._compname
-
-## def getversion(self):
-## return self._version
-
- def getparams(self):
- return self.getnchannels(), self.getsampwidth(), \
- self.getframerate(), self.getnframes(), \
- self.getcomptype(), self.getcompname()
-
- def getmarkers(self):
- if len(self._markers) == 0:
- return None
- return self._markers
-
- def getmark(self, id):
- for marker in self._markers:
- if id == marker[0]:
- return marker
- raise Error, 'marker %r does not exist' % (id,)
-
- def setpos(self, pos):
- if pos < 0 or pos > self._nframes:
- raise Error, 'position not in range'
- self._soundpos = pos
- self._ssnd_seek_needed = 1
-
- def readframes(self, nframes):
- if self._ssnd_seek_needed:
- self._ssnd_chunk.seek(0)
- dummy = self._ssnd_chunk.read(8)
- pos = self._soundpos * self._framesize
- if pos:
- self._ssnd_chunk.seek(pos + 8)
- self._ssnd_seek_needed = 0
- if nframes == 0:
- return ''
- data = self._ssnd_chunk.read(nframes * self._framesize)
- if self._convert and data:
- data = self._convert(data)
- self._soundpos = self._soundpos + len(data) / (self._nchannels * self._sampwidth)
- return data
-
- #
- # Internal methods.
- #
-
- def _decomp_data(self, data):
- import cl
- dummy = self._decomp.SetParam(cl.FRAME_BUFFER_SIZE,
- len(data) * 2)
- return self._decomp.Decompress(len(data) / self._nchannels,
- data)
-
- def _ulaw2lin(self, data):
- import audioop
- return audioop.ulaw2lin(data, 2)
-
- def _adpcm2lin(self, data):
- import audioop
- if not hasattr(self, '_adpcmstate'):
- # first time
- self._adpcmstate = None
- data, self._adpcmstate = audioop.adpcm2lin(data, 2,
- self._adpcmstate)
- return data
-
- def _read_comm_chunk(self, chunk):
- self._nchannels = _read_short(chunk)
- self._nframes = _read_long(chunk)
- self._sampwidth = (_read_short(chunk) + 7) / 8
- self._framerate = int(_read_float(chunk))
- self._framesize = self._nchannels * self._sampwidth
- if self._aifc:
- #DEBUG: SGI's soundeditor produces a bad size :-(
- kludge = 0
- if chunk.chunksize == 18:
- kludge = 1
- print 'Warning: bad COMM chunk size'
- chunk.chunksize = 23
- #DEBUG end
- self._comptype = chunk.read(4)
- #DEBUG start
- if kludge:
- length = ord(chunk.file.read(1))
- if length & 1 == 0:
- length = length + 1
- chunk.chunksize = chunk.chunksize + length
- chunk.file.seek(-1, 1)
- #DEBUG end
- self._compname = _read_string(chunk)
- if self._comptype != 'NONE':
- if self._comptype == 'G722':
- try:
- import audioop
- except ImportError:
- pass
- else:
- self._convert = self._adpcm2lin
- self._framesize = self._framesize / 4
- return
- # for ULAW and ALAW try Compression Library
- try:
- import cl
- except ImportError:
- if self._comptype == 'ULAW':
- try:
- import audioop
- self._convert = self._ulaw2lin
- self._framesize = self._framesize / 2
- return
- except ImportError:
- pass
- raise Error, 'cannot read compressed AIFF-C files'
- if self._comptype == 'ULAW':
- scheme = cl.G711_ULAW
- self._framesize = self._framesize / 2
- elif self._comptype == 'ALAW':
- scheme = cl.G711_ALAW
- self._framesize = self._framesize / 2
- else:
- raise Error, 'unsupported compression type'
- self._decomp = cl.OpenDecompressor(scheme)
- self._convert = self._decomp_data
- else:
- self._comptype = 'NONE'
- self._compname = 'not compressed'
-
- def _readmark(self, chunk):
- nmarkers = _read_short(chunk)
- # Some files appear to contain invalid counts.
- # Cope with this by testing for EOF.
- try:
- for i in range(nmarkers):
- id = _read_short(chunk)
- pos = _read_long(chunk)
- name = _read_string(chunk)
- if pos or name:
- # some files appear to have
- # dummy markers consisting of
- # a position 0 and name ''
- self._markers.append((id, pos, name))
- except EOFError:
- print 'Warning: MARK chunk contains only',
- print len(self._markers),
- if len(self._markers) == 1: print 'marker',
- else: print 'markers',
- print 'instead of', nmarkers
-
-class Aifc_write:
- # Variables used in this class:
- #
- # These variables are user settable through appropriate methods
- # of this class:
- # _file -- the open file with methods write(), close(), tell(), seek()
- # set through the __init__() method
- # _comptype -- the AIFF-C compression type ('NONE' in AIFF)
- # set through the setcomptype() or setparams() method
- # _compname -- the human-readable AIFF-C compression type
- # set through the setcomptype() or setparams() method
- # _nchannels -- the number of audio channels
- # set through the setnchannels() or setparams() method
- # _sampwidth -- the number of bytes per audio sample
- # set through the setsampwidth() or setparams() method
- # _framerate -- the sampling frequency
- # set through the setframerate() or setparams() method
- # _nframes -- the number of audio frames written to the header
- # set through the setnframes() or setparams() method
- # _aifc -- whether we're writing an AIFF-C file or an AIFF file
- # set through the aifc() method, reset through the
- # aiff() method
- #
- # These variables are used internally only:
- # _version -- the AIFF-C version number
- # _comp -- the compressor from builtin module cl
- # _nframeswritten -- the number of audio frames actually written
- # _datalength -- the size of the audio samples written to the header
- # _datawritten -- the size of the audio samples actually written
-
- def __init__(self, f):
- if type(f) == type(''):
- filename = f
- f = __builtin__.open(f, 'wb')
- else:
- # else, assume it is an open file object already
- filename = '???'
- self.initfp(f)
- if filename[-5:] == '.aiff':
- self._aifc = 0
- else:
- self._aifc = 1
-
- def initfp(self, file):
- self._file = file
- self._version = _AIFC_version
- self._comptype = 'NONE'
- self._compname = 'not compressed'
- self._comp = None
- self._convert = None
- self._nchannels = 0
- self._sampwidth = 0
- self._framerate = 0
- self._nframes = 0
- self._nframeswritten = 0
- self._datawritten = 0
- self._datalength = 0
- self._markers = []
- self._marklength = 0
- self._aifc = 1 # AIFF-C is default
-
- def __del__(self):
- if self._file:
- self.close()
-
- #
- # User visible methods.
- #
- def aiff(self):
- if self._nframeswritten:
- raise Error, 'cannot change parameters after starting to write'
- self._aifc = 0
-
- def aifc(self):
- if self._nframeswritten:
- raise Error, 'cannot change parameters after starting to write'
- self._aifc = 1
-
- def setnchannels(self, nchannels):
- if self._nframeswritten:
- raise Error, 'cannot change parameters after starting to write'
- if nchannels < 1:
- raise Error, 'bad # of channels'
- self._nchannels = nchannels
-
- def getnchannels(self):
- if not self._nchannels:
- raise Error, 'number of channels not set'
- return self._nchannels
-
- def setsampwidth(self, sampwidth):
- if self._nframeswritten:
- raise Error, 'cannot change parameters after starting to write'
- if sampwidth < 1 or sampwidth > 4:
- raise Error, 'bad sample width'
- self._sampwidth = sampwidth
-
- def getsampwidth(self):
- if not self._sampwidth:
- raise Error, 'sample width not set'
- return self._sampwidth
-
- def setframerate(self, framerate):
- if self._nframeswritten:
- raise Error, 'cannot change parameters after starting to write'
- if framerate <= 0:
- raise Error, 'bad frame rate'
- self._framerate = framerate
-
- def getframerate(self):
- if not self._framerate:
- raise Error, 'frame rate not set'
- return self._framerate
-
- def setnframes(self, nframes):
- if self._nframeswritten:
- raise Error, 'cannot change parameters after starting to write'
- self._nframes = nframes
-
- def getnframes(self):
- return self._nframeswritten
-
- def setcomptype(self, comptype, compname):
- if self._nframeswritten:
- raise Error, 'cannot change parameters after starting to write'
- if comptype not in ('NONE', 'ULAW', 'ALAW', 'G722'):
- raise Error, 'unsupported compression type'
- self._comptype = comptype
- self._compname = compname
-
- def getcomptype(self):
- return self._comptype
-
- def getcompname(self):
- return self._compname
-
-## def setversion(self, version):
-## if self._nframeswritten:
-## raise Error, 'cannot change parameters after starting to write'
-## self._version = version
-
- def setparams(self, (nchannels, sampwidth, framerate, nframes, comptype, compname)):
- if self._nframeswritten:
- raise Error, 'cannot change parameters after starting to write'
- if comptype not in ('NONE', 'ULAW', 'ALAW', 'G722'):
- raise Error, 'unsupported compression type'
- self.setnchannels(nchannels)
- self.setsampwidth(sampwidth)
- self.setframerate(framerate)
- self.setnframes(nframes)
- self.setcomptype(comptype, compname)
-
- def getparams(self):
- if not self._nchannels or not self._sampwidth or not self._framerate:
- raise Error, 'not all parameters set'
- return self._nchannels, self._sampwidth, self._framerate, \
- self._nframes, self._comptype, self._compname
-
- def setmark(self, id, pos, name):
- if id <= 0:
- raise Error, 'marker ID must be > 0'
- if pos < 0:
- raise Error, 'marker position must be >= 0'
- if type(name) != type(''):
- raise Error, 'marker name must be a string'
- for i in range(len(self._markers)):
- if id == self._markers[i][0]:
- self._markers[i] = id, pos, name
- return
- self._markers.append((id, pos, name))
-
- def getmark(self, id):
- for marker in self._markers:
- if id == marker[0]:
- return marker
- raise Error, 'marker %r does not exist' % (id,)
-
- def getmarkers(self):
- if len(self._markers) == 0:
- return None
- return self._markers
-
- def tell(self):
- return self._nframeswritten
-
- def writeframesraw(self, data):
- self._ensure_header_written(len(data))
- nframes = len(data) / (self._sampwidth * self._nchannels)
- if self._convert:
- data = self._convert(data)
- self._file.write(data)
- self._nframeswritten = self._nframeswritten + nframes
- self._datawritten = self._datawritten + len(data)
-
- def writeframes(self, data):
- self.writeframesraw(data)
- if self._nframeswritten != self._nframes or \
- self._datalength != self._datawritten:
- self._patchheader()
-
- def close(self):
- self._ensure_header_written(0)
- if self._datawritten & 1:
- # quick pad to even size
- self._file.write(chr(0))
- self._datawritten = self._datawritten + 1
- self._writemarkers()
- if self._nframeswritten != self._nframes or \
- self._datalength != self._datawritten or \
- self._marklength:
- self._patchheader()
- if self._comp:
- self._comp.CloseCompressor()
- self._comp = None
- self._file.flush()
- self._file = None
-
- #
- # Internal methods.
- #
-
- def _comp_data(self, data):
- import cl
- dummy = self._comp.SetParam(cl.FRAME_BUFFER_SIZE, len(data))
- dummy = self._comp.SetParam(cl.COMPRESSED_BUFFER_SIZE, len(data))
- return self._comp.Compress(self._nframes, data)
-
- def _lin2ulaw(self, data):
- import audioop
- return audioop.lin2ulaw(data, 2)
-
- def _lin2adpcm(self, data):
- import audioop
- if not hasattr(self, '_adpcmstate'):
- self._adpcmstate = None
- data, self._adpcmstate = audioop.lin2adpcm(data, 2,
- self._adpcmstate)
- return data
-
- def _ensure_header_written(self, datasize):
- if not self._nframeswritten:
- if self._comptype in ('ULAW', 'ALAW'):
- if not self._sampwidth:
- self._sampwidth = 2
- if self._sampwidth != 2:
- raise Error, 'sample width must be 2 when compressing with ULAW or ALAW'
- if self._comptype == 'G722':
- if not self._sampwidth:
- self._sampwidth = 2
- if self._sampwidth != 2:
- raise Error, 'sample width must be 2 when compressing with G7.22 (ADPCM)'
- if not self._nchannels:
- raise Error, '# channels not specified'
- if not self._sampwidth:
- raise Error, 'sample width not specified'
- if not self._framerate:
- raise Error, 'sampling rate not specified'
- self._write_header(datasize)
-
- def _init_compression(self):
- if self._comptype == 'G722':
- self._convert = self._lin2adpcm
- return
- try:
- import cl
- except ImportError:
- if self._comptype == 'ULAW':
- try:
- import audioop
- self._convert = self._lin2ulaw
- return
- except ImportError:
- pass
- raise Error, 'cannot write compressed AIFF-C files'
- if self._comptype == 'ULAW':
- scheme = cl.G711_ULAW
- elif self._comptype == 'ALAW':
- scheme = cl.G711_ALAW
- else:
- raise Error, 'unsupported compression type'
- self._comp = cl.OpenCompressor(scheme)
- params = [cl.ORIGINAL_FORMAT, 0,
- cl.BITS_PER_COMPONENT, self._sampwidth * 8,
- cl.FRAME_RATE, self._framerate,
- cl.FRAME_BUFFER_SIZE, 100,
- cl.COMPRESSED_BUFFER_SIZE, 100]
- if self._nchannels == 1:
- params[1] = cl.MONO
- elif self._nchannels == 2:
- params[1] = cl.STEREO_INTERLEAVED
- else:
- raise Error, 'cannot compress more than 2 channels'
- self._comp.SetParams(params)
- # the compressor produces a header which we ignore
- dummy = self._comp.Compress(0, '')
- self._convert = self._comp_data
-
- def _write_header(self, initlength):
- if self._aifc and self._comptype != 'NONE':
- self._init_compression()
- self._file.write('FORM')
- if not self._nframes:
- self._nframes = initlength / (self._nchannels * self._sampwidth)
- self._datalength = self._nframes * self._nchannels * self._sampwidth
- if self._datalength & 1:
- self._datalength = self._datalength + 1
- if self._aifc:
- if self._comptype in ('ULAW', 'ALAW'):
- self._datalength = self._datalength / 2
- if self._datalength & 1:
- self._datalength = self._datalength + 1
- elif self._comptype == 'G722':
- self._datalength = (self._datalength + 3) / 4
- if self._datalength & 1:
- self._datalength = self._datalength + 1
- self._form_length_pos = self._file.tell()
- commlength = self._write_form_length(self._datalength)
- if self._aifc:
- self._file.write('AIFC')
- self._file.write('FVER')
- _write_long(self._file, 4)
- _write_long(self._file, self._version)
- else:
- self._file.write('AIFF')
- self._file.write('COMM')
- _write_long(self._file, commlength)
- _write_short(self._file, self._nchannels)
- self._nframes_pos = self._file.tell()
- _write_long(self._file, self._nframes)
- _write_short(self._file, self._sampwidth * 8)
- _write_float(self._file, self._framerate)
- if self._aifc:
- self._file.write(self._comptype)
- _write_string(self._file, self._compname)
- self._file.write('SSND')
- self._ssnd_length_pos = self._file.tell()
- _write_long(self._file, self._datalength + 8)
- _write_long(self._file, 0)
- _write_long(self._file, 0)
-
- def _write_form_length(self, datalength):
- if self._aifc:
- commlength = 18 + 5 + len(self._compname)
- if commlength & 1:
- commlength = commlength + 1
- verslength = 12
- else:
- commlength = 18
- verslength = 0
- _write_long(self._file, 4 + verslength + self._marklength + \
- 8 + commlength + 16 + datalength)
- return commlength
-
- def _patchheader(self):
- curpos = self._file.tell()
- if self._datawritten & 1:
- datalength = self._datawritten + 1
- self._file.write(chr(0))
- else:
- datalength = self._datawritten
- if datalength == self._datalength and \
- self._nframes == self._nframeswritten and \
- self._marklength == 0:
- self._file.seek(curpos, 0)
- return
- self._file.seek(self._form_length_pos, 0)
- dummy = self._write_form_length(datalength)
- self._file.seek(self._nframes_pos, 0)
- _write_long(self._file, self._nframeswritten)
- self._file.seek(self._ssnd_length_pos, 0)
- _write_long(self._file, datalength + 8)
- self._file.seek(curpos, 0)
- self._nframes = self._nframeswritten
- self._datalength = datalength
-
- def _writemarkers(self):
- if len(self._markers) == 0:
- return
- self._file.write('MARK')
- length = 2
- for marker in self._markers:
- id, pos, name = marker
- length = length + len(name) + 1 + 6
- if len(name) & 1 == 0:
- length = length + 1
- _write_long(self._file, length)
- self._marklength = length + 8
- _write_short(self._file, len(self._markers))
- for marker in self._markers:
- id, pos, name = marker
- _write_short(self._file, id)
- _write_long(self._file, pos)
- _write_string(self._file, name)
-
-def open(f, mode=None):
- if mode is None:
- if hasattr(f, 'mode'):
- mode = f.mode
- else:
- mode = 'rb'
- if mode in ('r', 'rb'):
- return Aifc_read(f)
- elif mode in ('w', 'wb'):
- return Aifc_write(f)
- else:
- raise Error, "mode must be 'r', 'rb', 'w', or 'wb'"
-
-openfp = open # B/W compatibility
-
-if __name__ == '__main__':
- import sys
- if not sys.argv[1:]:
- sys.argv.append('/usr/demos/data/audio/bach.aiff')
- fn = sys.argv[1]
- f = open(fn, 'r')
- print "Reading", fn
- print "nchannels =", f.getnchannels()
- print "nframes =", f.getnframes()
- print "sampwidth =", f.getsampwidth()
- print "framerate =", f.getframerate()
- print "comptype =", f.getcomptype()
- print "compname =", f.getcompname()
- if sys.argv[2:]:
- gn = sys.argv[2]
- print "Writing", gn
- g = open(gn, 'w')
- g.setparams(f.getparams())
- while 1:
- data = f.readframes(1024)
- if not data:
- break
- g.writeframes(data)
- g.close()
- f.close()
- print "Done."
diff --git a/sys/lib/python/anydbm.py b/sys/lib/python/anydbm.py
deleted file mode 100644
index 8b01ef3ea..000000000
--- a/sys/lib/python/anydbm.py
+++ /dev/null
@@ -1,83 +0,0 @@
-"""Generic interface to all dbm clones.
-
-Instead of
-
- import dbm
- d = dbm.open(file, 'w', 0666)
-
-use
-
- import anydbm
- d = anydbm.open(file, 'w')
-
-The returned object is a dbhash, gdbm, dbm or dumbdbm object,
-dependent on the type of database being opened (determined by whichdb
-module) in the case of an existing dbm. If the dbm does not exist and
-the create or new flag ('c' or 'n') was specified, the dbm type will
-be determined by the availability of the modules (tested in the above
-order).
-
-It has the following interface (key and data are strings):
-
- d[key] = data # store data at key (may override data at
- # existing key)
- data = d[key] # retrieve data at key (raise KeyError if no
- # such key)
- del d[key] # delete data stored at key (raises KeyError
- # if no such key)
- flag = key in d # true if the key exists
- list = d.keys() # return a list of all existing keys (slow!)
-
-Future versions may change the order in which implementations are
-tested for existence, add interfaces to other dbm-like
-implementations.
-
-The open function has an optional second argument. This can be 'r',
-for read-only access, 'w', for read-write access of an existing
-database, 'c' for read-write access to a new or existing database, and
-'n' for read-write access to a new database. The default is 'r'.
-
-Note: 'r' and 'w' fail if the database doesn't exist; 'c' creates it
-only if it doesn't exist; and 'n' always creates a new database.
-
-"""
-
-class error(Exception):
- pass
-
-_names = ['dbhash', 'gdbm', 'dbm', 'dumbdbm']
-_errors = [error]
-_defaultmod = None
-
-for _name in _names:
- try:
- _mod = __import__(_name)
- except ImportError:
- continue
- if not _defaultmod:
- _defaultmod = _mod
- _errors.append(_mod.error)
-
-if not _defaultmod:
- raise ImportError, "no dbm clone found; tried %s" % _names
-
-error = tuple(_errors)
-
-def open(file, flag = 'r', mode = 0666):
- # guess the type of an existing database
- from whichdb import whichdb
- result=whichdb(file)
- if result is None:
- # db doesn't exist
- if 'c' in flag or 'n' in flag:
- # file doesn't exist and the new
- # flag was used so use default type
- mod = _defaultmod
- else:
- raise error, "need 'c' or 'n' flag to open new db"
- elif result == "":
- # db type cannot be determined
- raise error, "db type could not be determined"
- else:
- mod = __import__(result)
- return mod.open(file, flag, mode)
diff --git a/sys/lib/python/asynchat.py b/sys/lib/python/asynchat.py
deleted file mode 100644
index 6f99ba106..000000000
--- a/sys/lib/python/asynchat.py
+++ /dev/null
@@ -1,295 +0,0 @@
-# -*- Mode: Python; tab-width: 4 -*-
-# Id: asynchat.py,v 2.26 2000/09/07 22:29:26 rushing Exp
-# Author: Sam Rushing <rushing@nightmare.com>
-
-# ======================================================================
-# Copyright 1996 by Sam Rushing
-#
-# All Rights Reserved
-#
-# Permission to use, copy, modify, and distribute this software and
-# its documentation for any purpose and without fee is hereby
-# granted, provided that the above copyright notice appear in all
-# copies and that both that copyright notice and this permission
-# notice appear in supporting documentation, and that the name of Sam
-# Rushing not be used in advertising or publicity pertaining to
-# distribution of the software without specific, written prior
-# permission.
-#
-# SAM RUSHING DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
-# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN
-# NO EVENT SHALL SAM RUSHING BE LIABLE FOR ANY SPECIAL, INDIRECT OR
-# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
-# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
-# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
-# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-# ======================================================================
-
-r"""A class supporting chat-style (command/response) protocols.
-
-This class adds support for 'chat' style protocols - where one side
-sends a 'command', and the other sends a response (examples would be
-the common internet protocols - smtp, nntp, ftp, etc..).
-
-The handle_read() method looks at the input stream for the current
-'terminator' (usually '\r\n' for single-line responses, '\r\n.\r\n'
-for multi-line output), calling self.found_terminator() on its
-receipt.
-
-for example:
-Say you build an async nntp client using this class. At the start
-of the connection, you'll have self.terminator set to '\r\n', in
-order to process the single-line greeting. Just before issuing a
-'LIST' command you'll set it to '\r\n.\r\n'. The output of the LIST
-command will be accumulated (using your own 'collect_incoming_data'
-method) up to the terminator, and then control will be returned to
-you - by calling your self.found_terminator() method.
-"""
-
-import socket
-import asyncore
-from collections import deque
-
-class async_chat (asyncore.dispatcher):
- """This is an abstract class. You must derive from this class, and add
- the two methods collect_incoming_data() and found_terminator()"""
-
- # these are overridable defaults
-
- ac_in_buffer_size = 4096
- ac_out_buffer_size = 4096
-
- def __init__ (self, conn=None):
- self.ac_in_buffer = ''
- self.ac_out_buffer = ''
- self.producer_fifo = fifo()
- asyncore.dispatcher.__init__ (self, conn)
-
- def collect_incoming_data(self, data):
- raise NotImplementedError, "must be implemented in subclass"
-
- def found_terminator(self):
- raise NotImplementedError, "must be implemented in subclass"
-
- def set_terminator (self, term):
- "Set the input delimiter. Can be a fixed string of any length, an integer, or None"
- self.terminator = term
-
- def get_terminator (self):
- return self.terminator
-
- # grab some more data from the socket,
- # throw it to the collector method,
- # check for the terminator,
- # if found, transition to the next state.
-
- def handle_read (self):
-
- try:
- data = self.recv (self.ac_in_buffer_size)
- except socket.error, why:
- self.handle_error()
- return
-
- self.ac_in_buffer = self.ac_in_buffer + data
-
- # Continue to search for self.terminator in self.ac_in_buffer,
- # while calling self.collect_incoming_data. The while loop
- # is necessary because we might read several data+terminator
- # combos with a single recv(1024).
-
- while self.ac_in_buffer:
- lb = len(self.ac_in_buffer)
- terminator = self.get_terminator()
- if not terminator:
- # no terminator, collect it all
- self.collect_incoming_data (self.ac_in_buffer)
- self.ac_in_buffer = ''
- elif isinstance(terminator, int) or isinstance(terminator, long):
- # numeric terminator
- n = terminator
- if lb < n:
- self.collect_incoming_data (self.ac_in_buffer)
- self.ac_in_buffer = ''
- self.terminator = self.terminator - lb
- else:
- self.collect_incoming_data (self.ac_in_buffer[:n])
- self.ac_in_buffer = self.ac_in_buffer[n:]
- self.terminator = 0
- self.found_terminator()
- else:
- # 3 cases:
- # 1) end of buffer matches terminator exactly:
- # collect data, transition
- # 2) end of buffer matches some prefix:
- # collect data to the prefix
- # 3) end of buffer does not match any prefix:
- # collect data
- terminator_len = len(terminator)
- index = self.ac_in_buffer.find(terminator)
- if index != -1:
- # we found the terminator
- if index > 0:
- # don't bother reporting the empty string (source of subtle bugs)
- self.collect_incoming_data (self.ac_in_buffer[:index])
- self.ac_in_buffer = self.ac_in_buffer[index+terminator_len:]
- # This does the Right Thing if the terminator is changed here.
- self.found_terminator()
- else:
- # check for a prefix of the terminator
- index = find_prefix_at_end (self.ac_in_buffer, terminator)
- if index:
- if index != lb:
- # we found a prefix, collect up to the prefix
- self.collect_incoming_data (self.ac_in_buffer[:-index])
- self.ac_in_buffer = self.ac_in_buffer[-index:]
- break
- else:
- # no prefix, collect it all
- self.collect_incoming_data (self.ac_in_buffer)
- self.ac_in_buffer = ''
-
- def handle_write (self):
- self.initiate_send ()
-
- def handle_close (self):
- self.close()
-
- def push (self, data):
- self.producer_fifo.push (simple_producer (data))
- self.initiate_send()
-
- def push_with_producer (self, producer):
- self.producer_fifo.push (producer)
- self.initiate_send()
-
- def readable (self):
- "predicate for inclusion in the readable for select()"
- return (len(self.ac_in_buffer) <= self.ac_in_buffer_size)
-
- def writable (self):
- "predicate for inclusion in the writable for select()"
- # return len(self.ac_out_buffer) or len(self.producer_fifo) or (not self.connected)
- # this is about twice as fast, though not as clear.
- return not (
- (self.ac_out_buffer == '') and
- self.producer_fifo.is_empty() and
- self.connected
- )
-
- def close_when_done (self):
- "automatically close this channel once the outgoing queue is empty"
- self.producer_fifo.push (None)
-
- # refill the outgoing buffer by calling the more() method
- # of the first producer in the queue
- def refill_buffer (self):
- while 1:
- if len(self.producer_fifo):
- p = self.producer_fifo.first()
- # a 'None' in the producer fifo is a sentinel,
- # telling us to close the channel.
- if p is None:
- if not self.ac_out_buffer:
- self.producer_fifo.pop()
- self.close()
- return
- elif isinstance(p, str):
- self.producer_fifo.pop()
- self.ac_out_buffer = self.ac_out_buffer + p
- return
- data = p.more()
- if data:
- self.ac_out_buffer = self.ac_out_buffer + data
- return
- else:
- self.producer_fifo.pop()
- else:
- return
-
- def initiate_send (self):
- obs = self.ac_out_buffer_size
- # try to refill the buffer
- if (len (self.ac_out_buffer) < obs):
- self.refill_buffer()
-
- if self.ac_out_buffer and self.connected:
- # try to send the buffer
- try:
- num_sent = self.send (self.ac_out_buffer[:obs])
- if num_sent:
- self.ac_out_buffer = self.ac_out_buffer[num_sent:]
-
- except socket.error, why:
- self.handle_error()
- return
-
- def discard_buffers (self):
- # Emergencies only!
- self.ac_in_buffer = ''
- self.ac_out_buffer = ''
- while self.producer_fifo:
- self.producer_fifo.pop()
-
-
-class simple_producer:
-
- def __init__ (self, data, buffer_size=512):
- self.data = data
- self.buffer_size = buffer_size
-
- def more (self):
- if len (self.data) > self.buffer_size:
- result = self.data[:self.buffer_size]
- self.data = self.data[self.buffer_size:]
- return result
- else:
- result = self.data
- self.data = ''
- return result
-
-class fifo:
- def __init__ (self, list=None):
- if not list:
- self.list = deque()
- else:
- self.list = deque(list)
-
- def __len__ (self):
- return len(self.list)
-
- def is_empty (self):
- return not self.list
-
- def first (self):
- return self.list[0]
-
- def push (self, data):
- self.list.append(data)
-
- def pop (self):
- if self.list:
- return (1, self.list.popleft())
- else:
- return (0, None)
-
-# Given 'haystack', see if any prefix of 'needle' is at its end. This
-# assumes an exact match has already been checked. Return the number of
-# characters matched.
-# for example:
-# f_p_a_e ("qwerty\r", "\r\n") => 1
-# f_p_a_e ("qwertydkjf", "\r\n") => 0
-# f_p_a_e ("qwerty\r\n", "\r\n") => <undefined>
-
-# this could maybe be made faster with a computed regex?
-# [answer: no; circa Python-2.0, Jan 2001]
-# new python: 28961/s
-# old python: 18307/s
-# re: 12820/s
-# regex: 14035/s
-
-def find_prefix_at_end (haystack, needle):
- l = len(needle) - 1
- while l and not haystack.endswith(needle[:l]):
- l -= 1
- return l
diff --git a/sys/lib/python/asyncore.py b/sys/lib/python/asyncore.py
deleted file mode 100644
index 886c84545..000000000
--- a/sys/lib/python/asyncore.py
+++ /dev/null
@@ -1,551 +0,0 @@
-# -*- Mode: Python -*-
-# Id: asyncore.py,v 2.51 2000/09/07 22:29:26 rushing Exp
-# Author: Sam Rushing <rushing@nightmare.com>
-
-# ======================================================================
-# Copyright 1996 by Sam Rushing
-#
-# All Rights Reserved
-#
-# Permission to use, copy, modify, and distribute this software and
-# its documentation for any purpose and without fee is hereby
-# granted, provided that the above copyright notice appear in all
-# copies and that both that copyright notice and this permission
-# notice appear in supporting documentation, and that the name of Sam
-# Rushing not be used in advertising or publicity pertaining to
-# distribution of the software without specific, written prior
-# permission.
-#
-# SAM RUSHING DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
-# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN
-# NO EVENT SHALL SAM RUSHING BE LIABLE FOR ANY SPECIAL, INDIRECT OR
-# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
-# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
-# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
-# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-# ======================================================================
-
-"""Basic infrastructure for asynchronous socket service clients and servers.
-
-There are only two ways to have a program on a single processor do "more
-than one thing at a time". Multi-threaded programming is the simplest and
-most popular way to do it, but there is another very different technique,
-that lets you have nearly all the advantages of multi-threading, without
-actually using multiple threads. it's really only practical if your program
-is largely I/O bound. If your program is CPU bound, then pre-emptive
-scheduled threads are probably what you really need. Network servers are
-rarely CPU-bound, however.
-
-If your operating system supports the select() system call in its I/O
-library (and nearly all do), then you can use it to juggle multiple
-communication channels at once; doing other work while your I/O is taking
-place in the "background." Although this strategy can seem strange and
-complex, especially at first, it is in many ways easier to understand and
-control than multi-threaded programming. The module documented here solves
-many of the difficult problems for you, making the task of building
-sophisticated high-performance network servers and clients a snap.
-"""
-
-import select
-import socket
-import sys
-import time
-
-import os
-from errno import EALREADY, EINPROGRESS, EWOULDBLOCK, ECONNRESET, \
- ENOTCONN, ESHUTDOWN, EINTR, EISCONN, errorcode
-
-try:
- socket_map
-except NameError:
- socket_map = {}
-
-class ExitNow(Exception):
- pass
-
-def read(obj):
- try:
- obj.handle_read_event()
- except ExitNow:
- raise
- except:
- obj.handle_error()
-
-def write(obj):
- try:
- obj.handle_write_event()
- except ExitNow:
- raise
- except:
- obj.handle_error()
-
-def _exception (obj):
- try:
- obj.handle_expt_event()
- except ExitNow:
- raise
- except:
- obj.handle_error()
-
-def readwrite(obj, flags):
- try:
- if flags & (select.POLLIN | select.POLLPRI):
- obj.handle_read_event()
- if flags & select.POLLOUT:
- obj.handle_write_event()
- if flags & (select.POLLERR | select.POLLHUP | select.POLLNVAL):
- obj.handle_expt_event()
- except ExitNow:
- raise
- except:
- obj.handle_error()
-
-def poll(timeout=0.0, map=None):
- if map is None:
- map = socket_map
- if map:
- r = []; w = []; e = []
- for fd, obj in map.items():
- is_r = obj.readable()
- is_w = obj.writable()
- if is_r:
- r.append(fd)
- if is_w:
- w.append(fd)
- if is_r or is_w:
- e.append(fd)
- if [] == r == w == e:
- time.sleep(timeout)
- else:
- try:
- r, w, e = select.select(r, w, e, timeout)
- except select.error, err:
- if err[0] != EINTR:
- raise
- else:
- return
-
- for fd in r:
- obj = map.get(fd)
- if obj is None:
- continue
- read(obj)
-
- for fd in w:
- obj = map.get(fd)
- if obj is None:
- continue
- write(obj)
-
- for fd in e:
- obj = map.get(fd)
- if obj is None:
- continue
- _exception(obj)
-
-def poll2(timeout=0.0, map=None):
- # Use the poll() support added to the select module in Python 2.0
- if map is None:
- map = socket_map
- if timeout is not None:
- # timeout is in milliseconds
- timeout = int(timeout*1000)
- pollster = select.poll()
- if map:
- for fd, obj in map.items():
- flags = 0
- if obj.readable():
- flags |= select.POLLIN | select.POLLPRI
- if obj.writable():
- flags |= select.POLLOUT
- if flags:
- # Only check for exceptions if object was either readable
- # or writable.
- flags |= select.POLLERR | select.POLLHUP | select.POLLNVAL
- pollster.register(fd, flags)
- try:
- r = pollster.poll(timeout)
- except select.error, err:
- if err[0] != EINTR:
- raise
- r = []
- for fd, flags in r:
- obj = map.get(fd)
- if obj is None:
- continue
- readwrite(obj, flags)
-
-poll3 = poll2 # Alias for backward compatibility
-
-def loop(timeout=30.0, use_poll=False, map=None, count=None):
- if map is None:
- map = socket_map
-
- if use_poll and hasattr(select, 'poll'):
- poll_fun = poll2
- else:
- poll_fun = poll
-
- if count is None:
- while map:
- poll_fun(timeout, map)
-
- else:
- while map and count > 0:
- poll_fun(timeout, map)
- count = count - 1
-
-class dispatcher:
-
- debug = False
- connected = False
- accepting = False
- closing = False
- addr = None
-
- def __init__(self, sock=None, map=None):
- if map is None:
- self._map = socket_map
- else:
- self._map = map
-
- if sock:
- self.set_socket(sock, map)
- # I think it should inherit this anyway
- self.socket.setblocking(0)
- self.connected = True
- # XXX Does the constructor require that the socket passed
- # be connected?
- try:
- self.addr = sock.getpeername()
- except socket.error:
- # The addr isn't crucial
- pass
- else:
- self.socket = None
-
- def __repr__(self):
- status = [self.__class__.__module__+"."+self.__class__.__name__]
- if self.accepting and self.addr:
- status.append('listening')
- elif self.connected:
- status.append('connected')
- if self.addr is not None:
- try:
- status.append('%s:%d' % self.addr)
- except TypeError:
- status.append(repr(self.addr))
- return '<%s at %#x>' % (' '.join(status), id(self))
-
- def add_channel(self, map=None):
- #self.log_info('adding channel %s' % self)
- if map is None:
- map = self._map
- map[self._fileno] = self
-
- def del_channel(self, map=None):
- fd = self._fileno
- if map is None:
- map = self._map
- if map.has_key(fd):
- #self.log_info('closing channel %d:%s' % (fd, self))
- del map[fd]
- self._fileno = None
-
- def create_socket(self, family, type):
- self.family_and_type = family, type
- self.socket = socket.socket(family, type)
- self.socket.setblocking(0)
- self._fileno = self.socket.fileno()
- self.add_channel()
-
- def set_socket(self, sock, map=None):
- self.socket = sock
-## self.__dict__['socket'] = sock
- self._fileno = sock.fileno()
- self.add_channel(map)
-
- def set_reuse_addr(self):
- # try to re-use a server port if possible
- try:
- self.socket.setsockopt(
- socket.SOL_SOCKET, socket.SO_REUSEADDR,
- self.socket.getsockopt(socket.SOL_SOCKET,
- socket.SO_REUSEADDR) | 1
- )
- except socket.error:
- pass
-
- # ==================================================
- # predicates for select()
- # these are used as filters for the lists of sockets
- # to pass to select().
- # ==================================================
-
- def readable(self):
- return True
-
- def writable(self):
- return True
-
- # ==================================================
- # socket object methods.
- # ==================================================
-
- def listen(self, num):
- self.accepting = True
- if os.name == 'nt' and num > 5:
- num = 1
- return self.socket.listen(num)
-
- def bind(self, addr):
- self.addr = addr
- return self.socket.bind(addr)
-
- def connect(self, address):
- self.connected = False
- err = self.socket.connect_ex(address)
- # XXX Should interpret Winsock return values
- if err in (EINPROGRESS, EALREADY, EWOULDBLOCK):
- return
- if err in (0, EISCONN):
- self.addr = address
- self.connected = True
- self.handle_connect()
- else:
- raise socket.error, (err, errorcode[err])
-
- def accept(self):
- # XXX can return either an address pair or None
- try:
- conn, addr = self.socket.accept()
- return conn, addr
- except socket.error, why:
- if why[0] == EWOULDBLOCK:
- pass
- else:
- raise
-
- def send(self, data):
- try:
- result = self.socket.send(data)
- return result
- except socket.error, why:
- if why[0] == EWOULDBLOCK:
- return 0
- else:
- raise
- return 0
-
- def recv(self, buffer_size):
- try:
- data = self.socket.recv(buffer_size)
- if not data:
- # a closed connection is indicated by signaling
- # a read condition, and having recv() return 0.
- self.handle_close()
- return ''
- else:
- return data
- except socket.error, why:
- # winsock sometimes throws ENOTCONN
- if why[0] in [ECONNRESET, ENOTCONN, ESHUTDOWN]:
- self.handle_close()
- return ''
- else:
- raise
-
- def close(self):
- self.del_channel()
- self.socket.close()
-
- # cheap inheritance, used to pass all other attribute
- # references to the underlying socket object.
- def __getattr__(self, attr):
- return getattr(self.socket, attr)
-
- # log and log_info may be overridden to provide more sophisticated
- # logging and warning methods. In general, log is for 'hit' logging
- # and 'log_info' is for informational, warning and error logging.
-
- def log(self, message):
- sys.stderr.write('log: %s\n' % str(message))
-
- def log_info(self, message, type='info'):
- if __debug__ or type != 'info':
- print '%s: %s' % (type, message)
-
- def handle_read_event(self):
- if self.accepting:
- # for an accepting socket, getting a read implies
- # that we are connected
- if not self.connected:
- self.connected = True
- self.handle_accept()
- elif not self.connected:
- self.handle_connect()
- self.connected = True
- self.handle_read()
- else:
- self.handle_read()
-
- def handle_write_event(self):
- # getting a write implies that we are connected
- if not self.connected:
- self.handle_connect()
- self.connected = True
- self.handle_write()
-
- def handle_expt_event(self):
- self.handle_expt()
-
- def handle_error(self):
- nil, t, v, tbinfo = compact_traceback()
-
- # sometimes a user repr method will crash.
- try:
- self_repr = repr(self)
- except:
- self_repr = '<__repr__(self) failed for object at %0x>' % id(self)
-
- self.log_info(
- 'uncaptured python exception, closing channel %s (%s:%s %s)' % (
- self_repr,
- t,
- v,
- tbinfo
- ),
- 'error'
- )
- self.close()
-
- def handle_expt(self):
- self.log_info('unhandled exception', 'warning')
-
- def handle_read(self):
- self.log_info('unhandled read event', 'warning')
-
- def handle_write(self):
- self.log_info('unhandled write event', 'warning')
-
- def handle_connect(self):
- self.log_info('unhandled connect event', 'warning')
-
- def handle_accept(self):
- self.log_info('unhandled accept event', 'warning')
-
- def handle_close(self):
- self.log_info('unhandled close event', 'warning')
- self.close()
-
-# ---------------------------------------------------------------------------
-# adds simple buffered output capability, useful for simple clients.
-# [for more sophisticated usage use asynchat.async_chat]
-# ---------------------------------------------------------------------------
-
-class dispatcher_with_send(dispatcher):
-
- def __init__(self, sock=None, map=None):
- dispatcher.__init__(self, sock, map)
- self.out_buffer = ''
-
- def initiate_send(self):
- num_sent = 0
- num_sent = dispatcher.send(self, self.out_buffer[:512])
- self.out_buffer = self.out_buffer[num_sent:]
-
- def handle_write(self):
- self.initiate_send()
-
- def writable(self):
- return (not self.connected) or len(self.out_buffer)
-
- def send(self, data):
- if self.debug:
- self.log_info('sending %s' % repr(data))
- self.out_buffer = self.out_buffer + data
- self.initiate_send()
-
-# ---------------------------------------------------------------------------
-# used for debugging.
-# ---------------------------------------------------------------------------
-
-def compact_traceback():
- t, v, tb = sys.exc_info()
- tbinfo = []
- assert tb # Must have a traceback
- while tb:
- tbinfo.append((
- tb.tb_frame.f_code.co_filename,
- tb.tb_frame.f_code.co_name,
- str(tb.tb_lineno)
- ))
- tb = tb.tb_next
-
- # just to be safe
- del tb
-
- file, function, line = tbinfo[-1]
- info = ' '.join(['[%s|%s|%s]' % x for x in tbinfo])
- return (file, function, line), t, v, info
-
-def close_all(map=None):
- if map is None:
- map = socket_map
- for x in map.values():
- x.socket.close()
- map.clear()
-
-# Asynchronous File I/O:
-#
-# After a little research (reading man pages on various unixen, and
-# digging through the linux kernel), I've determined that select()
-# isn't meant for doing asynchronous file i/o.
-# Heartening, though - reading linux/mm/filemap.c shows that linux
-# supports asynchronous read-ahead. So _MOST_ of the time, the data
-# will be sitting in memory for us already when we go to read it.
-#
-# What other OS's (besides NT) support async file i/o? [VMS?]
-#
-# Regardless, this is useful for pipes, and stdin/stdout...
-
-if os.name == 'posix':
- import fcntl
-
- class file_wrapper:
- # here we override just enough to make a file
- # look like a socket for the purposes of asyncore.
-
- def __init__(self, fd):
- self.fd = fd
-
- def recv(self, *args):
- return os.read(self.fd, *args)
-
- def send(self, *args):
- return os.write(self.fd, *args)
-
- read = recv
- write = send
-
- def close(self):
- os.close(self.fd)
-
- def fileno(self):
- return self.fd
-
- class file_dispatcher(dispatcher):
-
- def __init__(self, fd, map=None):
- dispatcher.__init__(self, None, map)
- self.connected = True
- self.set_file(fd)
- # set it to non-blocking mode
- flags = fcntl.fcntl(fd, fcntl.F_GETFL, 0)
- flags = flags | os.O_NONBLOCK
- fcntl.fcntl(fd, fcntl.F_SETFL, flags)
-
- def set_file(self, fd):
- self._fileno = fd
- self.socket = file_wrapper(fd)
- self.add_channel()
diff --git a/sys/lib/python/atexit.py b/sys/lib/python/atexit.py
deleted file mode 100644
index c9f4cc677..000000000
--- a/sys/lib/python/atexit.py
+++ /dev/null
@@ -1,62 +0,0 @@
-"""
-atexit.py - allow programmer to define multiple exit functions to be executed
-upon normal program termination.
-
-One public function, register, is defined.
-"""
-
-__all__ = ["register"]
-
-import sys
-
-_exithandlers = []
-def _run_exitfuncs():
- """run any registered exit functions
-
- _exithandlers is traversed in reverse order so functions are executed
- last in, first out.
- """
-
- exc_info = None
- while _exithandlers:
- func, targs, kargs = _exithandlers.pop()
- try:
- func(*targs, **kargs)
- except SystemExit:
- exc_info = sys.exc_info()
- except:
- import traceback
- print >> sys.stderr, "Error in atexit._run_exitfuncs:"
- traceback.print_exc()
- exc_info = sys.exc_info()
-
- if exc_info is not None:
- raise exc_info[0], exc_info[1], exc_info[2]
-
-
-def register(func, *targs, **kargs):
- """register a function to be executed upon normal program termination
-
- func - function to be called at exit
- targs - optional arguments to pass to func
- kargs - optional keyword arguments to pass to func
- """
- _exithandlers.append((func, targs, kargs))
-
-if hasattr(sys, "exitfunc"):
- # Assume it's another registered exit function - append it to our list
- register(sys.exitfunc)
-sys.exitfunc = _run_exitfuncs
-
-if __name__ == "__main__":
- def x1():
- print "running x1"
- def x2(n):
- print "running x2(%r)" % (n,)
- def x3(n, kwd=None):
- print "running x3(%r, kwd=%r)" % (n, kwd)
-
- register(x1)
- register(x2, 12)
- register(x3, 5, "bar")
- register(x3, "no kwd args")
diff --git a/sys/lib/python/audiodev.py b/sys/lib/python/audiodev.py
deleted file mode 100644
index 8945c983c..000000000
--- a/sys/lib/python/audiodev.py
+++ /dev/null
@@ -1,257 +0,0 @@
-"""Classes for manipulating audio devices (currently only for Sun and SGI)"""
-
-__all__ = ["error","AudioDev"]
-
-class error(Exception):
- pass
-
-class Play_Audio_sgi:
- # Private instance variables
-## if 0: access frameratelist, nchannelslist, sampwidthlist, oldparams, \
-## params, config, inited_outrate, inited_width, \
-## inited_nchannels, port, converter, classinited: private
-
- classinited = 0
- frameratelist = nchannelslist = sampwidthlist = None
-
- def initclass(self):
- import AL
- self.frameratelist = [
- (48000, AL.RATE_48000),
- (44100, AL.RATE_44100),
- (32000, AL.RATE_32000),
- (22050, AL.RATE_22050),
- (16000, AL.RATE_16000),
- (11025, AL.RATE_11025),
- ( 8000, AL.RATE_8000),
- ]
- self.nchannelslist = [
- (1, AL.MONO),
- (2, AL.STEREO),
- (4, AL.QUADRO),
- ]
- self.sampwidthlist = [
- (1, AL.SAMPLE_8),
- (2, AL.SAMPLE_16),
- (3, AL.SAMPLE_24),
- ]
- self.classinited = 1
-
- def __init__(self):
- import al, AL
- if not self.classinited:
- self.initclass()
- self.oldparams = []
- self.params = [AL.OUTPUT_RATE, 0]
- self.config = al.newconfig()
- self.inited_outrate = 0
- self.inited_width = 0
- self.inited_nchannels = 0
- self.converter = None
- self.port = None
- return
-
- def __del__(self):
- if self.port:
- self.stop()
- if self.oldparams:
- import al, AL
- al.setparams(AL.DEFAULT_DEVICE, self.oldparams)
- self.oldparams = []
-
- def wait(self):
- if not self.port:
- return
- import time
- while self.port.getfilled() > 0:
- time.sleep(0.1)
- self.stop()
-
- def stop(self):
- if self.port:
- self.port.closeport()
- self.port = None
- if self.oldparams:
- import al, AL
- al.setparams(AL.DEFAULT_DEVICE, self.oldparams)
- self.oldparams = []
-
- def setoutrate(self, rate):
- for (raw, cooked) in self.frameratelist:
- if rate == raw:
- self.params[1] = cooked
- self.inited_outrate = 1
- break
- else:
- raise error, 'bad output rate'
-
- def setsampwidth(self, width):
- for (raw, cooked) in self.sampwidthlist:
- if width == raw:
- self.config.setwidth(cooked)
- self.inited_width = 1
- break
- else:
- if width == 0:
- import AL
- self.inited_width = 0
- self.config.setwidth(AL.SAMPLE_16)
- self.converter = self.ulaw2lin
- else:
- raise error, 'bad sample width'
-
- def setnchannels(self, nchannels):
- for (raw, cooked) in self.nchannelslist:
- if nchannels == raw:
- self.config.setchannels(cooked)
- self.inited_nchannels = 1
- break
- else:
- raise error, 'bad # of channels'
-
- def writeframes(self, data):
- if not (self.inited_outrate and self.inited_nchannels):
- raise error, 'params not specified'
- if not self.port:
- import al, AL
- self.port = al.openport('Python', 'w', self.config)
- self.oldparams = self.params[:]
- al.getparams(AL.DEFAULT_DEVICE, self.oldparams)
- al.setparams(AL.DEFAULT_DEVICE, self.params)
- if self.converter:
- data = self.converter(data)
- self.port.writesamps(data)
-
- def getfilled(self):
- if self.port:
- return self.port.getfilled()
- else:
- return 0
-
- def getfillable(self):
- if self.port:
- return self.port.getfillable()
- else:
- return self.config.getqueuesize()
-
- # private methods
-## if 0: access *: private
-
- def ulaw2lin(self, data):
- import audioop
- return audioop.ulaw2lin(data, 2)
-
-class Play_Audio_sun:
-## if 0: access outrate, sampwidth, nchannels, inited_outrate, inited_width, \
-## inited_nchannels, converter: private
-
- def __init__(self):
- self.outrate = 0
- self.sampwidth = 0
- self.nchannels = 0
- self.inited_outrate = 0
- self.inited_width = 0
- self.inited_nchannels = 0
- self.converter = None
- self.port = None
- return
-
- def __del__(self):
- self.stop()
-
- def setoutrate(self, rate):
- self.outrate = rate
- self.inited_outrate = 1
-
- def setsampwidth(self, width):
- self.sampwidth = width
- self.inited_width = 1
-
- def setnchannels(self, nchannels):
- self.nchannels = nchannels
- self.inited_nchannels = 1
-
- def writeframes(self, data):
- if not (self.inited_outrate and self.inited_width and self.inited_nchannels):
- raise error, 'params not specified'
- if not self.port:
- import sunaudiodev, SUNAUDIODEV
- self.port = sunaudiodev.open('w')
- info = self.port.getinfo()
- info.o_sample_rate = self.outrate
- info.o_channels = self.nchannels
- if self.sampwidth == 0:
- info.o_precision = 8
- self.o_encoding = SUNAUDIODEV.ENCODING_ULAW
- # XXX Hack, hack -- leave defaults
- else:
- info.o_precision = 8 * self.sampwidth
- info.o_encoding = SUNAUDIODEV.ENCODING_LINEAR
- self.port.setinfo(info)
- if self.converter:
- data = self.converter(data)
- self.port.write(data)
-
- def wait(self):
- if not self.port:
- return
- self.port.drain()
- self.stop()
-
- def stop(self):
- if self.port:
- self.port.flush()
- self.port.close()
- self.port = None
-
- def getfilled(self):
- if self.port:
- return self.port.obufcount()
- else:
- return 0
-
-## # Nobody remembers what this method does, and it's broken. :-(
-## def getfillable(self):
-## return BUFFERSIZE - self.getfilled()
-
-def AudioDev():
- # Dynamically try to import and use a platform specific module.
- try:
- import al
- except ImportError:
- try:
- import sunaudiodev
- return Play_Audio_sun()
- except ImportError:
- try:
- import Audio_mac
- except ImportError:
- raise error, 'no audio device'
- else:
- return Audio_mac.Play_Audio_mac()
- else:
- return Play_Audio_sgi()
-
-def test(fn = None):
- import sys
- if sys.argv[1:]:
- fn = sys.argv[1]
- else:
- fn = 'f:just samples:just.aif'
- import aifc
- af = aifc.open(fn, 'r')
- print fn, af.getparams()
- p = AudioDev()
- p.setoutrate(af.getframerate())
- p.setsampwidth(af.getsampwidth())
- p.setnchannels(af.getnchannels())
- BUFSIZ = af.getframerate()/af.getsampwidth()/af.getnchannels()
- while 1:
- data = af.readframes(BUFSIZ)
- if not data: break
- print len(data)
- p.writeframes(data)
- p.wait()
-
-if __name__ == '__main__':
- test()
diff --git a/sys/lib/python/base64.py b/sys/lib/python/base64.py
deleted file mode 100755
index c196cd8a8..000000000
--- a/sys/lib/python/base64.py
+++ /dev/null
@@ -1,359 +0,0 @@
-#! /usr/bin/env python
-
-"""RFC 3548: Base16, Base32, Base64 Data Encodings"""
-
-# Modified 04-Oct-1995 by Jack Jansen to use binascii module
-# Modified 30-Dec-2003 by Barry Warsaw to add full RFC 3548 support
-
-import re
-import struct
-import binascii
-
-
-__all__ = [
- # Legacy interface exports traditional RFC 1521 Base64 encodings
- 'encode', 'decode', 'encodestring', 'decodestring',
- # Generalized interface for other encodings
- 'b64encode', 'b64decode', 'b32encode', 'b32decode',
- 'b16encode', 'b16decode',
- # Standard Base64 encoding
- 'standard_b64encode', 'standard_b64decode',
- # Some common Base64 alternatives. As referenced by RFC 3458, see thread
- # starting at:
- #
- # http://zgp.org/pipermail/p2p-hackers/2001-September/000316.html
- 'urlsafe_b64encode', 'urlsafe_b64decode',
- ]
-
-_translation = [chr(_x) for _x in range(256)]
-EMPTYSTRING = ''
-
-
-def _translate(s, altchars):
- translation = _translation[:]
- for k, v in altchars.items():
- translation[ord(k)] = v
- return s.translate(''.join(translation))
-
-
-
-# Base64 encoding/decoding uses binascii
-
-def b64encode(s, altchars=None):
- """Encode a string using Base64.
-
- s is the string to encode. Optional altchars must be a string of at least
- length 2 (additional characters are ignored) which specifies an
- alternative alphabet for the '+' and '/' characters. This allows an
- application to e.g. generate url or filesystem safe Base64 strings.
-
- The encoded string is returned.
- """
- # Strip off the trailing newline
- encoded = binascii.b2a_base64(s)[:-1]
- if altchars is not None:
- return _translate(encoded, {'+': altchars[0], '/': altchars[1]})
- return encoded
-
-
-def b64decode(s, altchars=None):
- """Decode a Base64 encoded string.
-
- s is the string to decode. Optional altchars must be a string of at least
- length 2 (additional characters are ignored) which specifies the
- alternative alphabet used instead of the '+' and '/' characters.
-
- The decoded string is returned. A TypeError is raised if s were
- incorrectly padded or if there are non-alphabet characters present in the
- string.
- """
- if altchars is not None:
- s = _translate(s, {altchars[0]: '+', altchars[1]: '/'})
- try:
- return binascii.a2b_base64(s)
- except binascii.Error, msg:
- # Transform this exception for consistency
- raise TypeError(msg)
-
-
-def standard_b64encode(s):
- """Encode a string using the standard Base64 alphabet.
-
- s is the string to encode. The encoded string is returned.
- """
- return b64encode(s)
-
-def standard_b64decode(s):
- """Decode a string encoded with the standard Base64 alphabet.
-
- s is the string to decode. The decoded string is returned. A TypeError
- is raised if the string is incorrectly padded or if there are non-alphabet
- characters present in the string.
- """
- return b64decode(s)
-
-def urlsafe_b64encode(s):
- """Encode a string using a url-safe Base64 alphabet.
-
- s is the string to encode. The encoded string is returned. The alphabet
- uses '-' instead of '+' and '_' instead of '/'.
- """
- return b64encode(s, '-_')
-
-def urlsafe_b64decode(s):
- """Decode a string encoded with the standard Base64 alphabet.
-
- s is the string to decode. The decoded string is returned. A TypeError
- is raised if the string is incorrectly padded or if there are non-alphabet
- characters present in the string.
-
- The alphabet uses '-' instead of '+' and '_' instead of '/'.
- """
- return b64decode(s, '-_')
-
-
-
-# Base32 encoding/decoding must be done in Python
-_b32alphabet = {
- 0: 'A', 9: 'J', 18: 'S', 27: '3',
- 1: 'B', 10: 'K', 19: 'T', 28: '4',
- 2: 'C', 11: 'L', 20: 'U', 29: '5',
- 3: 'D', 12: 'M', 21: 'V', 30: '6',
- 4: 'E', 13: 'N', 22: 'W', 31: '7',
- 5: 'F', 14: 'O', 23: 'X',
- 6: 'G', 15: 'P', 24: 'Y',
- 7: 'H', 16: 'Q', 25: 'Z',
- 8: 'I', 17: 'R', 26: '2',
- }
-
-_b32tab = _b32alphabet.items()
-_b32tab.sort()
-_b32tab = [v for k, v in _b32tab]
-_b32rev = dict([(v, long(k)) for k, v in _b32alphabet.items()])
-
-
-def b32encode(s):
- """Encode a string using Base32.
-
- s is the string to encode. The encoded string is returned.
- """
- parts = []
- quanta, leftover = divmod(len(s), 5)
- # Pad the last quantum with zero bits if necessary
- if leftover:
- s += ('\0' * (5 - leftover))
- quanta += 1
- for i in range(quanta):
- # c1 and c2 are 16 bits wide, c3 is 8 bits wide. The intent of this
- # code is to process the 40 bits in units of 5 bits. So we take the 1
- # leftover bit of c1 and tack it onto c2. Then we take the 2 leftover
- # bits of c2 and tack them onto c3. The shifts and masks are intended
- # to give us values of exactly 5 bits in width.
- c1, c2, c3 = struct.unpack('!HHB', s[i*5:(i+1)*5])
- c2 += (c1 & 1) << 16 # 17 bits wide
- c3 += (c2 & 3) << 8 # 10 bits wide
- parts.extend([_b32tab[c1 >> 11], # bits 1 - 5
- _b32tab[(c1 >> 6) & 0x1f], # bits 6 - 10
- _b32tab[(c1 >> 1) & 0x1f], # bits 11 - 15
- _b32tab[c2 >> 12], # bits 16 - 20 (1 - 5)
- _b32tab[(c2 >> 7) & 0x1f], # bits 21 - 25 (6 - 10)
- _b32tab[(c2 >> 2) & 0x1f], # bits 26 - 30 (11 - 15)
- _b32tab[c3 >> 5], # bits 31 - 35 (1 - 5)
- _b32tab[c3 & 0x1f], # bits 36 - 40 (1 - 5)
- ])
- encoded = EMPTYSTRING.join(parts)
- # Adjust for any leftover partial quanta
- if leftover == 1:
- return encoded[:-6] + '======'
- elif leftover == 2:
- return encoded[:-4] + '===='
- elif leftover == 3:
- return encoded[:-3] + '==='
- elif leftover == 4:
- return encoded[:-1] + '='
- return encoded
-
-
-def b32decode(s, casefold=False, map01=None):
- """Decode a Base32 encoded string.
-
- s is the string to decode. Optional casefold is a flag specifying whether
- a lowercase alphabet is acceptable as input. For security purposes, the
- default is False.
-
- RFC 3548 allows for optional mapping of the digit 0 (zero) to the letter O
- (oh), and for optional mapping of the digit 1 (one) to either the letter I
- (eye) or letter L (el). The optional argument map01 when not None,
- specifies which letter the digit 1 should be mapped to (when map01 is not
- None, the digit 0 is always mapped to the letter O). For security
- purposes the default is None, so that 0 and 1 are not allowed in the
- input.
-
- The decoded string is returned. A TypeError is raised if s were
- incorrectly padded or if there are non-alphabet characters present in the
- string.
- """
- quanta, leftover = divmod(len(s), 8)
- if leftover:
- raise TypeError('Incorrect padding')
- # Handle section 2.4 zero and one mapping. The flag map01 will be either
- # False, or the character to map the digit 1 (one) to. It should be
- # either L (el) or I (eye).
- if map01:
- s = _translate(s, {'0': 'O', '1': map01})
- if casefold:
- s = s.upper()
- # Strip off pad characters from the right. We need to count the pad
- # characters because this will tell us how many null bytes to remove from
- # the end of the decoded string.
- padchars = 0
- mo = re.search('(?P<pad>[=]*)$', s)
- if mo:
- padchars = len(mo.group('pad'))
- if padchars > 0:
- s = s[:-padchars]
- # Now decode the full quanta
- parts = []
- acc = 0
- shift = 35
- for c in s:
- val = _b32rev.get(c)
- if val is None:
- raise TypeError('Non-base32 digit found')
- acc += _b32rev[c] << shift
- shift -= 5
- if shift < 0:
- parts.append(binascii.unhexlify('%010x' % acc))
- acc = 0
- shift = 35
- # Process the last, partial quanta
- last = binascii.unhexlify('%010x' % acc)
- if padchars == 0:
- last = '' # No characters
- elif padchars == 1:
- last = last[:-1]
- elif padchars == 3:
- last = last[:-2]
- elif padchars == 4:
- last = last[:-3]
- elif padchars == 6:
- last = last[:-4]
- else:
- raise TypeError('Incorrect padding')
- parts.append(last)
- return EMPTYSTRING.join(parts)
-
-
-
-# RFC 3548, Base 16 Alphabet specifies uppercase, but hexlify() returns
-# lowercase. The RFC also recommends against accepting input case
-# insensitively.
-def b16encode(s):
- """Encode a string using Base16.
-
- s is the string to encode. The encoded string is returned.
- """
- return binascii.hexlify(s).upper()
-
-
-def b16decode(s, casefold=False):
- """Decode a Base16 encoded string.
-
- s is the string to decode. Optional casefold is a flag specifying whether
- a lowercase alphabet is acceptable as input. For security purposes, the
- default is False.
-
- The decoded string is returned. A TypeError is raised if s were
- incorrectly padded or if there are non-alphabet characters present in the
- string.
- """
- if casefold:
- s = s.upper()
- if re.search('[^0-9A-F]', s):
- raise TypeError('Non-base16 digit found')
- return binascii.unhexlify(s)
-
-
-
-# Legacy interface. This code could be cleaned up since I don't believe
-# binascii has any line length limitations. It just doesn't seem worth it
-# though.
-
-MAXLINESIZE = 76 # Excluding the CRLF
-MAXBINSIZE = (MAXLINESIZE//4)*3
-
-def encode(input, output):
- """Encode a file."""
- while True:
- s = input.read(MAXBINSIZE)
- if not s:
- break
- while len(s) < MAXBINSIZE:
- ns = input.read(MAXBINSIZE-len(s))
- if not ns:
- break
- s += ns
- line = binascii.b2a_base64(s)
- output.write(line)
-
-
-def decode(input, output):
- """Decode a file."""
- while True:
- line = input.readline()
- if not line:
- break
- s = binascii.a2b_base64(line)
- output.write(s)
-
-
-def encodestring(s):
- """Encode a string."""
- pieces = []
- for i in range(0, len(s), MAXBINSIZE):
- chunk = s[i : i + MAXBINSIZE]
- pieces.append(binascii.b2a_base64(chunk))
- return "".join(pieces)
-
-
-def decodestring(s):
- """Decode a string."""
- return binascii.a2b_base64(s)
-
-
-
-# Useable as a script...
-def test():
- """Small test program"""
- import sys, getopt
- try:
- opts, args = getopt.getopt(sys.argv[1:], 'deut')
- except getopt.error, msg:
- sys.stdout = sys.stderr
- print msg
- print """usage: %s [-d|-e|-u|-t] [file|-]
- -d, -u: decode
- -e: encode (default)
- -t: encode and decode string 'Aladdin:open sesame'"""%sys.argv[0]
- sys.exit(2)
- func = encode
- for o, a in opts:
- if o == '-e': func = encode
- if o == '-d': func = decode
- if o == '-u': func = decode
- if o == '-t': test1(); return
- if args and args[0] != '-':
- func(open(args[0], 'rb'), sys.stdout)
- else:
- func(sys.stdin, sys.stdout)
-
-
-def test1():
- s0 = "Aladdin:open sesame"
- s1 = encodestring(s0)
- s2 = decodestring(s1)
- print s0, repr(s1), s2
-
-
-if __name__ == '__main__':
- test()
diff --git a/sys/lib/python/bdb.py b/sys/lib/python/bdb.py
deleted file mode 100644
index 0c56b63fa..000000000
--- a/sys/lib/python/bdb.py
+++ /dev/null
@@ -1,613 +0,0 @@
-"""Debugger basics"""
-
-import sys
-import os
-import types
-
-__all__ = ["BdbQuit","Bdb","Breakpoint"]
-
-class BdbQuit(Exception):
- """Exception to give up completely"""
-
-
-class Bdb:
-
- """Generic Python debugger base class.
-
- This class takes care of details of the trace facility;
- a derived class should implement user interaction.
- The standard debugger class (pdb.Pdb) is an example.
- """
-
- def __init__(self):
- self.breaks = {}
- self.fncache = {}
-
- def canonic(self, filename):
- if filename == "<" + filename[1:-1] + ">":
- return filename
- canonic = self.fncache.get(filename)
- if not canonic:
- canonic = os.path.abspath(filename)
- canonic = os.path.normcase(canonic)
- self.fncache[filename] = canonic
- return canonic
-
- def reset(self):
- import linecache
- linecache.checkcache()
- self.botframe = None
- self.stopframe = None
- self.returnframe = None
- self.quitting = 0
-
- def trace_dispatch(self, frame, event, arg):
- if self.quitting:
- return # None
- if event == 'line':
- return self.dispatch_line(frame)
- if event == 'call':
- return self.dispatch_call(frame, arg)
- if event == 'return':
- return self.dispatch_return(frame, arg)
- if event == 'exception':
- return self.dispatch_exception(frame, arg)
- if event == 'c_call':
- return self.trace_dispatch
- if event == 'c_exception':
- return self.trace_dispatch
- if event == 'c_return':
- return self.trace_dispatch
- print 'bdb.Bdb.dispatch: unknown debugging event:', repr(event)
- return self.trace_dispatch
-
- def dispatch_line(self, frame):
- if self.stop_here(frame) or self.break_here(frame):
- self.user_line(frame)
- if self.quitting: raise BdbQuit
- return self.trace_dispatch
-
- def dispatch_call(self, frame, arg):
- # XXX 'arg' is no longer used
- if self.botframe is None:
- # First call of dispatch since reset()
- self.botframe = frame.f_back # (CT) Note that this may also be None!
- return self.trace_dispatch
- if not (self.stop_here(frame) or self.break_anywhere(frame)):
- # No need to trace this function
- return # None
- self.user_call(frame, arg)
- if self.quitting: raise BdbQuit
- return self.trace_dispatch
-
- def dispatch_return(self, frame, arg):
- if self.stop_here(frame) or frame == self.returnframe:
- self.user_return(frame, arg)
- if self.quitting: raise BdbQuit
- return self.trace_dispatch
-
- def dispatch_exception(self, frame, arg):
- if self.stop_here(frame):
- self.user_exception(frame, arg)
- if self.quitting: raise BdbQuit
- return self.trace_dispatch
-
- # Normally derived classes don't override the following
- # methods, but they may if they want to redefine the
- # definition of stopping and breakpoints.
-
- def stop_here(self, frame):
- # (CT) stopframe may now also be None, see dispatch_call.
- # (CT) the former test for None is therefore removed from here.
- if frame is self.stopframe:
- return True
- while frame is not None and frame is not self.stopframe:
- if frame is self.botframe:
- return True
- frame = frame.f_back
- return False
-
- def break_here(self, frame):
- filename = self.canonic(frame.f_code.co_filename)
- if not filename in self.breaks:
- return False
- lineno = frame.f_lineno
- if not lineno in self.breaks[filename]:
- # The line itself has no breakpoint, but maybe the line is the
- # first line of a function with breakpoint set by function name.
- lineno = frame.f_code.co_firstlineno
- if not lineno in self.breaks[filename]:
- return False
-
- # flag says ok to delete temp. bp
- (bp, flag) = effective(filename, lineno, frame)
- if bp:
- self.currentbp = bp.number
- if (flag and bp.temporary):
- self.do_clear(str(bp.number))
- return True
- else:
- return False
-
- def do_clear(self, arg):
- raise NotImplementedError, "subclass of bdb must implement do_clear()"
-
- def break_anywhere(self, frame):
- return self.breaks.has_key(
- self.canonic(frame.f_code.co_filename))
-
- # Derived classes should override the user_* methods
- # to gain control.
-
- def user_call(self, frame, argument_list):
- """This method is called when there is the remote possibility
- that we ever need to stop in this function."""
- pass
-
- def user_line(self, frame):
- """This method is called when we stop or break at this line."""
- pass
-
- def user_return(self, frame, return_value):
- """This method is called when a return trap is set here."""
- pass
-
- def user_exception(self, frame, (exc_type, exc_value, exc_traceback)):
- """This method is called if an exception occurs,
- but only if we are to stop at or just below this level."""
- pass
-
- # Derived classes and clients can call the following methods
- # to affect the stepping state.
-
- def set_step(self):
- """Stop after one line of code."""
- self.stopframe = None
- self.returnframe = None
- self.quitting = 0
-
- def set_next(self, frame):
- """Stop on the next line in or below the given frame."""
- self.stopframe = frame
- self.returnframe = None
- self.quitting = 0
-
- def set_return(self, frame):
- """Stop when returning from the given frame."""
- self.stopframe = frame.f_back
- self.returnframe = frame
- self.quitting = 0
-
- def set_trace(self, frame=None):
- """Start debugging from `frame`.
-
- If frame is not specified, debugging starts from caller's frame.
- """
- if frame is None:
- frame = sys._getframe().f_back
- self.reset()
- while frame:
- frame.f_trace = self.trace_dispatch
- self.botframe = frame
- frame = frame.f_back
- self.set_step()
- sys.settrace(self.trace_dispatch)
-
- def set_continue(self):
- # Don't stop except at breakpoints or when finished
- self.stopframe = self.botframe
- self.returnframe = None
- self.quitting = 0
- if not self.breaks:
- # no breakpoints; run without debugger overhead
- sys.settrace(None)
- frame = sys._getframe().f_back
- while frame and frame is not self.botframe:
- del frame.f_trace
- frame = frame.f_back
-
- def set_quit(self):
- self.stopframe = self.botframe
- self.returnframe = None
- self.quitting = 1
- sys.settrace(None)
-
- # Derived classes and clients can call the following methods
- # to manipulate breakpoints. These methods return an
- # error message is something went wrong, None if all is well.
- # Set_break prints out the breakpoint line and file:lineno.
- # Call self.get_*break*() to see the breakpoints or better
- # for bp in Breakpoint.bpbynumber: if bp: bp.bpprint().
-
- def set_break(self, filename, lineno, temporary=0, cond = None,
- funcname=None):
- filename = self.canonic(filename)
- import linecache # Import as late as possible
- line = linecache.getline(filename, lineno)
- if not line:
- return 'Line %s:%d does not exist' % (filename,
- lineno)
- if not filename in self.breaks:
- self.breaks[filename] = []
- list = self.breaks[filename]
- if not lineno in list:
- list.append(lineno)
- bp = Breakpoint(filename, lineno, temporary, cond, funcname)
-
- def clear_break(self, filename, lineno):
- filename = self.canonic(filename)
- if not filename in self.breaks:
- return 'There are no breakpoints in %s' % filename
- if lineno not in self.breaks[filename]:
- return 'There is no breakpoint at %s:%d' % (filename,
- lineno)
- # If there's only one bp in the list for that file,line
- # pair, then remove the breaks entry
- for bp in Breakpoint.bplist[filename, lineno][:]:
- bp.deleteMe()
- if not Breakpoint.bplist.has_key((filename, lineno)):
- self.breaks[filename].remove(lineno)
- if not self.breaks[filename]:
- del self.breaks[filename]
-
- def clear_bpbynumber(self, arg):
- try:
- number = int(arg)
- except:
- return 'Non-numeric breakpoint number (%s)' % arg
- try:
- bp = Breakpoint.bpbynumber[number]
- except IndexError:
- return 'Breakpoint number (%d) out of range' % number
- if not bp:
- return 'Breakpoint (%d) already deleted' % number
- self.clear_break(bp.file, bp.line)
-
- def clear_all_file_breaks(self, filename):
- filename = self.canonic(filename)
- if not filename in self.breaks:
- return 'There are no breakpoints in %s' % filename
- for line in self.breaks[filename]:
- blist = Breakpoint.bplist[filename, line]
- for bp in blist:
- bp.deleteMe()
- del self.breaks[filename]
-
- def clear_all_breaks(self):
- if not self.breaks:
- return 'There are no breakpoints'
- for bp in Breakpoint.bpbynumber:
- if bp:
- bp.deleteMe()
- self.breaks = {}
-
- def get_break(self, filename, lineno):
- filename = self.canonic(filename)
- return filename in self.breaks and \
- lineno in self.breaks[filename]
-
- def get_breaks(self, filename, lineno):
- filename = self.canonic(filename)
- return filename in self.breaks and \
- lineno in self.breaks[filename] and \
- Breakpoint.bplist[filename, lineno] or []
-
- def get_file_breaks(self, filename):
- filename = self.canonic(filename)
- if filename in self.breaks:
- return self.breaks[filename]
- else:
- return []
-
- def get_all_breaks(self):
- return self.breaks
-
- # Derived classes and clients can call the following method
- # to get a data structure representing a stack trace.
-
- def get_stack(self, f, t):
- stack = []
- if t and t.tb_frame is f:
- t = t.tb_next
- while f is not None:
- stack.append((f, f.f_lineno))
- if f is self.botframe:
- break
- f = f.f_back
- stack.reverse()
- i = max(0, len(stack) - 1)
- while t is not None:
- stack.append((t.tb_frame, t.tb_lineno))
- t = t.tb_next
- return stack, i
-
- #
-
- def format_stack_entry(self, frame_lineno, lprefix=': '):
- import linecache, repr
- frame, lineno = frame_lineno
- filename = self.canonic(frame.f_code.co_filename)
- s = '%s(%r)' % (filename, lineno)
- if frame.f_code.co_name:
- s = s + frame.f_code.co_name
- else:
- s = s + "<lambda>"
- if '__args__' in frame.f_locals:
- args = frame.f_locals['__args__']
- else:
- args = None
- if args:
- s = s + repr.repr(args)
- else:
- s = s + '()'
- if '__return__' in frame.f_locals:
- rv = frame.f_locals['__return__']
- s = s + '->'
- s = s + repr.repr(rv)
- line = linecache.getline(filename, lineno)
- if line: s = s + lprefix + line.strip()
- return s
-
- # The following two methods can be called by clients to use
- # a debugger to debug a statement, given as a string.
-
- def run(self, cmd, globals=None, locals=None):
- if globals is None:
- import __main__
- globals = __main__.__dict__
- if locals is None:
- locals = globals
- self.reset()
- sys.settrace(self.trace_dispatch)
- if not isinstance(cmd, types.CodeType):
- cmd = cmd+'\n'
- try:
- try:
- exec cmd in globals, locals
- except BdbQuit:
- pass
- finally:
- self.quitting = 1
- sys.settrace(None)
-
- def runeval(self, expr, globals=None, locals=None):
- if globals is None:
- import __main__
- globals = __main__.__dict__
- if locals is None:
- locals = globals
- self.reset()
- sys.settrace(self.trace_dispatch)
- if not isinstance(expr, types.CodeType):
- expr = expr+'\n'
- try:
- try:
- return eval(expr, globals, locals)
- except BdbQuit:
- pass
- finally:
- self.quitting = 1
- sys.settrace(None)
-
- def runctx(self, cmd, globals, locals):
- # B/W compatibility
- self.run(cmd, globals, locals)
-
- # This method is more useful to debug a single function call.
-
- def runcall(self, func, *args, **kwds):
- self.reset()
- sys.settrace(self.trace_dispatch)
- res = None
- try:
- try:
- res = func(*args, **kwds)
- except BdbQuit:
- pass
- finally:
- self.quitting = 1
- sys.settrace(None)
- return res
-
-
-def set_trace():
- Bdb().set_trace()
-
-
-class Breakpoint:
-
- """Breakpoint class
-
- Implements temporary breakpoints, ignore counts, disabling and
- (re)-enabling, and conditionals.
-
- Breakpoints are indexed by number through bpbynumber and by
- the file,line tuple using bplist. The former points to a
- single instance of class Breakpoint. The latter points to a
- list of such instances since there may be more than one
- breakpoint per line.
-
- """
-
- # XXX Keeping state in the class is a mistake -- this means
- # you cannot have more than one active Bdb instance.
-
- next = 1 # Next bp to be assigned
- bplist = {} # indexed by (file, lineno) tuple
- bpbynumber = [None] # Each entry is None or an instance of Bpt
- # index 0 is unused, except for marking an
- # effective break .... see effective()
-
- def __init__(self, file, line, temporary=0, cond=None, funcname=None):
- self.funcname = funcname
- # Needed if funcname is not None.
- self.func_first_executable_line = None
- self.file = file # This better be in canonical form!
- self.line = line
- self.temporary = temporary
- self.cond = cond
- self.enabled = 1
- self.ignore = 0
- self.hits = 0
- self.number = Breakpoint.next
- Breakpoint.next = Breakpoint.next + 1
- # Build the two lists
- self.bpbynumber.append(self)
- if self.bplist.has_key((file, line)):
- self.bplist[file, line].append(self)
- else:
- self.bplist[file, line] = [self]
-
-
- def deleteMe(self):
- index = (self.file, self.line)
- self.bpbynumber[self.number] = None # No longer in list
- self.bplist[index].remove(self)
- if not self.bplist[index]:
- # No more bp for this f:l combo
- del self.bplist[index]
-
- def enable(self):
- self.enabled = 1
-
- def disable(self):
- self.enabled = 0
-
- def bpprint(self, out=None):
- if out is None:
- out = sys.stdout
- if self.temporary:
- disp = 'del '
- else:
- disp = 'keep '
- if self.enabled:
- disp = disp + 'yes '
- else:
- disp = disp + 'no '
- print >>out, '%-4dbreakpoint %s at %s:%d' % (self.number, disp,
- self.file, self.line)
- if self.cond:
- print >>out, '\tstop only if %s' % (self.cond,)
- if self.ignore:
- print >>out, '\tignore next %d hits' % (self.ignore)
- if (self.hits):
- if (self.hits > 1): ss = 's'
- else: ss = ''
- print >>out, ('\tbreakpoint already hit %d time%s' %
- (self.hits, ss))
-
-# -----------end of Breakpoint class----------
-
-def checkfuncname(b, frame):
- """Check whether we should break here because of `b.funcname`."""
- if not b.funcname:
- # Breakpoint was set via line number.
- if b.line != frame.f_lineno:
- # Breakpoint was set at a line with a def statement and the function
- # defined is called: don't break.
- return False
- return True
-
- # Breakpoint set via function name.
-
- if frame.f_code.co_name != b.funcname:
- # It's not a function call, but rather execution of def statement.
- return False
-
- # We are in the right frame.
- if not b.func_first_executable_line:
- # The function is entered for the 1st time.
- b.func_first_executable_line = frame.f_lineno
-
- if b.func_first_executable_line != frame.f_lineno:
- # But we are not at the first line number: don't break.
- return False
- return True
-
-# Determines if there is an effective (active) breakpoint at this
-# line of code. Returns breakpoint number or 0 if none
-def effective(file, line, frame):
- """Determine which breakpoint for this file:line is to be acted upon.
-
- Called only if we know there is a bpt at this
- location. Returns breakpoint that was triggered and a flag
- that indicates if it is ok to delete a temporary bp.
-
- """
- possibles = Breakpoint.bplist[file,line]
- for i in range(0, len(possibles)):
- b = possibles[i]
- if b.enabled == 0:
- continue
- if not checkfuncname(b, frame):
- continue
- # Count every hit when bp is enabled
- b.hits = b.hits + 1
- if not b.cond:
- # If unconditional, and ignoring,
- # go on to next, else break
- if b.ignore > 0:
- b.ignore = b.ignore -1
- continue
- else:
- # breakpoint and marker that's ok
- # to delete if temporary
- return (b,1)
- else:
- # Conditional bp.
- # Ignore count applies only to those bpt hits where the
- # condition evaluates to true.
- try:
- val = eval(b.cond, frame.f_globals,
- frame.f_locals)
- if val:
- if b.ignore > 0:
- b.ignore = b.ignore -1
- # continue
- else:
- return (b,1)
- # else:
- # continue
- except:
- # if eval fails, most conservative
- # thing is to stop on breakpoint
- # regardless of ignore count.
- # Don't delete temporary,
- # as another hint to user.
- return (b,0)
- return (None, None)
-
-# -------------------- testing --------------------
-
-class Tdb(Bdb):
- def user_call(self, frame, args):
- name = frame.f_code.co_name
- if not name: name = '???'
- print '+++ call', name, args
- def user_line(self, frame):
- import linecache
- name = frame.f_code.co_name
- if not name: name = '???'
- fn = self.canonic(frame.f_code.co_filename)
- line = linecache.getline(fn, frame.f_lineno)
- print '+++', fn, frame.f_lineno, name, ':', line.strip()
- def user_return(self, frame, retval):
- print '+++ return', retval
- def user_exception(self, frame, exc_stuff):
- print '+++ exception', exc_stuff
- self.set_continue()
-
-def foo(n):
- print 'foo(', n, ')'
- x = bar(n*10)
- print 'bar returned', x
-
-def bar(a):
- print 'bar(', a, ')'
- return a/2
-
-def test():
- t = Tdb()
- t.run('import bdb; bdb.foo(10)')
-
-# end
diff --git a/sys/lib/python/binhex.py b/sys/lib/python/binhex.py
deleted file mode 100644
index 0f3e3c47d..000000000
--- a/sys/lib/python/binhex.py
+++ /dev/null
@@ -1,527 +0,0 @@
-"""Macintosh binhex compression/decompression.
-
-easy interface:
-binhex(inputfilename, outputfilename)
-hexbin(inputfilename, outputfilename)
-"""
-
-#
-# Jack Jansen, CWI, August 1995.
-#
-# The module is supposed to be as compatible as possible. Especially the
-# easy interface should work "as expected" on any platform.
-# XXXX Note: currently, textfiles appear in mac-form on all platforms.
-# We seem to lack a simple character-translate in python.
-# (we should probably use ISO-Latin-1 on all but the mac platform).
-# XXXX The simple routines are too simple: they expect to hold the complete
-# files in-core. Should be fixed.
-# XXXX It would be nice to handle AppleDouble format on unix
-# (for servers serving macs).
-# XXXX I don't understand what happens when you get 0x90 times the same byte on
-# input. The resulting code (xx 90 90) would appear to be interpreted as an
-# escaped *value* of 0x90. All coders I've seen appear to ignore this nicety...
-#
-import sys
-import os
-import struct
-import binascii
-
-__all__ = ["binhex","hexbin","Error"]
-
-class Error(Exception):
- pass
-
-# States (what have we written)
-[_DID_HEADER, _DID_DATA, _DID_RSRC] = range(3)
-
-# Various constants
-REASONABLY_LARGE=32768 # Minimal amount we pass the rle-coder
-LINELEN=64
-RUNCHAR=chr(0x90) # run-length introducer
-
-#
-# This code is no longer byte-order dependent
-
-#
-# Workarounds for non-mac machines.
-try:
- from Carbon.File import FSSpec, FInfo
- from MacOS import openrf
-
- def getfileinfo(name):
- finfo = FSSpec(name).FSpGetFInfo()
- dir, file = os.path.split(name)
- # XXX Get resource/data sizes
- fp = open(name, 'rb')
- fp.seek(0, 2)
- dlen = fp.tell()
- fp = openrf(name, '*rb')
- fp.seek(0, 2)
- rlen = fp.tell()
- return file, finfo, dlen, rlen
-
- def openrsrc(name, *mode):
- if not mode:
- mode = '*rb'
- else:
- mode = '*' + mode[0]
- return openrf(name, mode)
-
-except ImportError:
- #
- # Glue code for non-macintosh usage
- #
-
- class FInfo:
- def __init__(self):
- self.Type = '????'
- self.Creator = '????'
- self.Flags = 0
-
- def getfileinfo(name):
- finfo = FInfo()
- # Quick check for textfile
- fp = open(name)
- data = open(name).read(256)
- for c in data:
- if not c.isspace() and (c<' ' or ord(c) > 0x7f):
- break
- else:
- finfo.Type = 'TEXT'
- fp.seek(0, 2)
- dsize = fp.tell()
- fp.close()
- dir, file = os.path.split(name)
- file = file.replace(':', '-', 1)
- return file, finfo, dsize, 0
-
- class openrsrc:
- def __init__(self, *args):
- pass
-
- def read(self, *args):
- return ''
-
- def write(self, *args):
- pass
-
- def close(self):
- pass
-
-class _Hqxcoderengine:
- """Write data to the coder in 3-byte chunks"""
-
- def __init__(self, ofp):
- self.ofp = ofp
- self.data = ''
- self.hqxdata = ''
- self.linelen = LINELEN-1
-
- def write(self, data):
- self.data = self.data + data
- datalen = len(self.data)
- todo = (datalen//3)*3
- data = self.data[:todo]
- self.data = self.data[todo:]
- if not data:
- return
- self.hqxdata = self.hqxdata + binascii.b2a_hqx(data)
- self._flush(0)
-
- def _flush(self, force):
- first = 0
- while first <= len(self.hqxdata)-self.linelen:
- last = first + self.linelen
- self.ofp.write(self.hqxdata[first:last]+'\n')
- self.linelen = LINELEN
- first = last
- self.hqxdata = self.hqxdata[first:]
- if force:
- self.ofp.write(self.hqxdata + ':\n')
-
- def close(self):
- if self.data:
- self.hqxdata = \
- self.hqxdata + binascii.b2a_hqx(self.data)
- self._flush(1)
- self.ofp.close()
- del self.ofp
-
-class _Rlecoderengine:
- """Write data to the RLE-coder in suitably large chunks"""
-
- def __init__(self, ofp):
- self.ofp = ofp
- self.data = ''
-
- def write(self, data):
- self.data = self.data + data
- if len(self.data) < REASONABLY_LARGE:
- return
- rledata = binascii.rlecode_hqx(self.data)
- self.ofp.write(rledata)
- self.data = ''
-
- def close(self):
- if self.data:
- rledata = binascii.rlecode_hqx(self.data)
- self.ofp.write(rledata)
- self.ofp.close()
- del self.ofp
-
-class BinHex:
- def __init__(self, (name, finfo, dlen, rlen), ofp):
- if type(ofp) == type(''):
- ofname = ofp
- ofp = open(ofname, 'w')
- if os.name == 'mac':
- fss = FSSpec(ofname)
- fss.SetCreatorType('BnHq', 'TEXT')
- ofp.write('(This file must be converted with BinHex 4.0)\n\n:')
- hqxer = _Hqxcoderengine(ofp)
- self.ofp = _Rlecoderengine(hqxer)
- self.crc = 0
- if finfo is None:
- finfo = FInfo()
- self.dlen = dlen
- self.rlen = rlen
- self._writeinfo(name, finfo)
- self.state = _DID_HEADER
-
- def _writeinfo(self, name, finfo):
- nl = len(name)
- if nl > 63:
- raise Error, 'Filename too long'
- d = chr(nl) + name + '\0'
- d2 = finfo.Type + finfo.Creator
-
- # Force all structs to be packed with big-endian
- d3 = struct.pack('>h', finfo.Flags)
- d4 = struct.pack('>ii', self.dlen, self.rlen)
- info = d + d2 + d3 + d4
- self._write(info)
- self._writecrc()
-
- def _write(self, data):
- self.crc = binascii.crc_hqx(data, self.crc)
- self.ofp.write(data)
-
- def _writecrc(self):
- # XXXX Should this be here??
- # self.crc = binascii.crc_hqx('\0\0', self.crc)
- if self.crc < 0:
- fmt = '>h'
- else:
- fmt = '>H'
- self.ofp.write(struct.pack(fmt, self.crc))
- self.crc = 0
-
- def write(self, data):
- if self.state != _DID_HEADER:
- raise Error, 'Writing data at the wrong time'
- self.dlen = self.dlen - len(data)
- self._write(data)
-
- def close_data(self):
- if self.dlen != 0:
- raise Error, 'Incorrect data size, diff=%r' % (self.rlen,)
- self._writecrc()
- self.state = _DID_DATA
-
- def write_rsrc(self, data):
- if self.state < _DID_DATA:
- self.close_data()
- if self.state != _DID_DATA:
- raise Error, 'Writing resource data at the wrong time'
- self.rlen = self.rlen - len(data)
- self._write(data)
-
- def close(self):
- if self.state < _DID_DATA:
- self.close_data()
- if self.state != _DID_DATA:
- raise Error, 'Close at the wrong time'
- if self.rlen != 0:
- raise Error, \
- "Incorrect resource-datasize, diff=%r" % (self.rlen,)
- self._writecrc()
- self.ofp.close()
- self.state = None
- del self.ofp
-
-def binhex(inp, out):
- """(infilename, outfilename) - Create binhex-encoded copy of a file"""
- finfo = getfileinfo(inp)
- ofp = BinHex(finfo, out)
-
- ifp = open(inp, 'rb')
- # XXXX Do textfile translation on non-mac systems
- while 1:
- d = ifp.read(128000)
- if not d: break
- ofp.write(d)
- ofp.close_data()
- ifp.close()
-
- ifp = openrsrc(inp, 'rb')
- while 1:
- d = ifp.read(128000)
- if not d: break
- ofp.write_rsrc(d)
- ofp.close()
- ifp.close()
-
-class _Hqxdecoderengine:
- """Read data via the decoder in 4-byte chunks"""
-
- def __init__(self, ifp):
- self.ifp = ifp
- self.eof = 0
-
- def read(self, totalwtd):
- """Read at least wtd bytes (or until EOF)"""
- decdata = ''
- wtd = totalwtd
- #
- # The loop here is convoluted, since we don't really now how
- # much to decode: there may be newlines in the incoming data.
- while wtd > 0:
- if self.eof: return decdata
- wtd = ((wtd+2)//3)*4
- data = self.ifp.read(wtd)
- #
- # Next problem: there may not be a complete number of
- # bytes in what we pass to a2b. Solve by yet another
- # loop.
- #
- while 1:
- try:
- decdatacur, self.eof = \
- binascii.a2b_hqx(data)
- break
- except binascii.Incomplete:
- pass
- newdata = self.ifp.read(1)
- if not newdata:
- raise Error, \
- 'Premature EOF on binhex file'
- data = data + newdata
- decdata = decdata + decdatacur
- wtd = totalwtd - len(decdata)
- if not decdata and not self.eof:
- raise Error, 'Premature EOF on binhex file'
- return decdata
-
- def close(self):
- self.ifp.close()
-
-class _Rledecoderengine:
- """Read data via the RLE-coder"""
-
- def __init__(self, ifp):
- self.ifp = ifp
- self.pre_buffer = ''
- self.post_buffer = ''
- self.eof = 0
-
- def read(self, wtd):
- if wtd > len(self.post_buffer):
- self._fill(wtd-len(self.post_buffer))
- rv = self.post_buffer[:wtd]
- self.post_buffer = self.post_buffer[wtd:]
- return rv
-
- def _fill(self, wtd):
- self.pre_buffer = self.pre_buffer + self.ifp.read(wtd+4)
- if self.ifp.eof:
- self.post_buffer = self.post_buffer + \
- binascii.rledecode_hqx(self.pre_buffer)
- self.pre_buffer = ''
- return
-
- #
- # Obfuscated code ahead. We have to take care that we don't
- # end up with an orphaned RUNCHAR later on. So, we keep a couple
- # of bytes in the buffer, depending on what the end of
- # the buffer looks like:
- # '\220\0\220' - Keep 3 bytes: repeated \220 (escaped as \220\0)
- # '?\220' - Keep 2 bytes: repeated something-else
- # '\220\0' - Escaped \220: Keep 2 bytes.
- # '?\220?' - Complete repeat sequence: decode all
- # otherwise: keep 1 byte.
- #
- mark = len(self.pre_buffer)
- if self.pre_buffer[-3:] == RUNCHAR + '\0' + RUNCHAR:
- mark = mark - 3
- elif self.pre_buffer[-1] == RUNCHAR:
- mark = mark - 2
- elif self.pre_buffer[-2:] == RUNCHAR + '\0':
- mark = mark - 2
- elif self.pre_buffer[-2] == RUNCHAR:
- pass # Decode all
- else:
- mark = mark - 1
-
- self.post_buffer = self.post_buffer + \
- binascii.rledecode_hqx(self.pre_buffer[:mark])
- self.pre_buffer = self.pre_buffer[mark:]
-
- def close(self):
- self.ifp.close()
-
-class HexBin:
- def __init__(self, ifp):
- if type(ifp) == type(''):
- ifp = open(ifp)
- #
- # Find initial colon.
- #
- while 1:
- ch = ifp.read(1)
- if not ch:
- raise Error, "No binhex data found"
- # Cater for \r\n terminated lines (which show up as \n\r, hence
- # all lines start with \r)
- if ch == '\r':
- continue
- if ch == ':':
- break
- if ch != '\n':
- dummy = ifp.readline()
-
- hqxifp = _Hqxdecoderengine(ifp)
- self.ifp = _Rledecoderengine(hqxifp)
- self.crc = 0
- self._readheader()
-
- def _read(self, len):
- data = self.ifp.read(len)
- self.crc = binascii.crc_hqx(data, self.crc)
- return data
-
- def _checkcrc(self):
- filecrc = struct.unpack('>h', self.ifp.read(2))[0] & 0xffff
- #self.crc = binascii.crc_hqx('\0\0', self.crc)
- # XXXX Is this needed??
- self.crc = self.crc & 0xffff
- if filecrc != self.crc:
- raise Error, 'CRC error, computed %x, read %x' \
- %(self.crc, filecrc)
- self.crc = 0
-
- def _readheader(self):
- len = self._read(1)
- fname = self._read(ord(len))
- rest = self._read(1+4+4+2+4+4)
- self._checkcrc()
-
- type = rest[1:5]
- creator = rest[5:9]
- flags = struct.unpack('>h', rest[9:11])[0]
- self.dlen = struct.unpack('>l', rest[11:15])[0]
- self.rlen = struct.unpack('>l', rest[15:19])[0]
-
- self.FName = fname
- self.FInfo = FInfo()
- self.FInfo.Creator = creator
- self.FInfo.Type = type
- self.FInfo.Flags = flags
-
- self.state = _DID_HEADER
-
- def read(self, *n):
- if self.state != _DID_HEADER:
- raise Error, 'Read data at wrong time'
- if n:
- n = n[0]
- n = min(n, self.dlen)
- else:
- n = self.dlen
- rv = ''
- while len(rv) < n:
- rv = rv + self._read(n-len(rv))
- self.dlen = self.dlen - n
- return rv
-
- def close_data(self):
- if self.state != _DID_HEADER:
- raise Error, 'close_data at wrong time'
- if self.dlen:
- dummy = self._read(self.dlen)
- self._checkcrc()
- self.state = _DID_DATA
-
- def read_rsrc(self, *n):
- if self.state == _DID_HEADER:
- self.close_data()
- if self.state != _DID_DATA:
- raise Error, 'Read resource data at wrong time'
- if n:
- n = n[0]
- n = min(n, self.rlen)
- else:
- n = self.rlen
- self.rlen = self.rlen - n
- return self._read(n)
-
- def close(self):
- if self.rlen:
- dummy = self.read_rsrc(self.rlen)
- self._checkcrc()
- self.state = _DID_RSRC
- self.ifp.close()
-
-def hexbin(inp, out):
- """(infilename, outfilename) - Decode binhexed file"""
- ifp = HexBin(inp)
- finfo = ifp.FInfo
- if not out:
- out = ifp.FName
- if os.name == 'mac':
- ofss = FSSpec(out)
- out = ofss.as_pathname()
-
- ofp = open(out, 'wb')
- # XXXX Do translation on non-mac systems
- while 1:
- d = ifp.read(128000)
- if not d: break
- ofp.write(d)
- ofp.close()
- ifp.close_data()
-
- d = ifp.read_rsrc(128000)
- if d:
- ofp = openrsrc(out, 'wb')
- ofp.write(d)
- while 1:
- d = ifp.read_rsrc(128000)
- if not d: break
- ofp.write(d)
- ofp.close()
-
- if os.name == 'mac':
- nfinfo = ofss.GetFInfo()
- nfinfo.Creator = finfo.Creator
- nfinfo.Type = finfo.Type
- nfinfo.Flags = finfo.Flags
- ofss.SetFInfo(nfinfo)
-
- ifp.close()
-
-def _test():
- if os.name == 'mac':
- import macfs
- fss, ok = macfs.PromptGetFile('File to convert:')
- if not ok:
- sys.exit(0)
- fname = fss.as_pathname()
- else:
- fname = sys.argv[1]
- binhex(fname, fname+'.hqx')
- hexbin(fname+'.hqx', fname+'.viahqx')
- #hexbin(fname, fname+'.unpacked')
- sys.exit(1)
-
-if __name__ == '__main__':
- _test()
diff --git a/sys/lib/python/bisect.py b/sys/lib/python/bisect.py
deleted file mode 100644
index e4a21336f..000000000
--- a/sys/lib/python/bisect.py
+++ /dev/null
@@ -1,84 +0,0 @@
-"""Bisection algorithms."""
-
-def insort_right(a, x, lo=0, hi=None):
- """Insert item x in list a, and keep it sorted assuming a is sorted.
-
- If x is already in a, insert it to the right of the rightmost x.
-
- Optional args lo (default 0) and hi (default len(a)) bound the
- slice of a to be searched.
- """
-
- if hi is None:
- hi = len(a)
- while lo < hi:
- mid = (lo+hi)//2
- if x < a[mid]: hi = mid
- else: lo = mid+1
- a.insert(lo, x)
-
-insort = insort_right # backward compatibility
-
-def bisect_right(a, x, lo=0, hi=None):
- """Return the index where to insert item x in list a, assuming a is sorted.
-
- The return value i is such that all e in a[:i] have e <= x, and all e in
- a[i:] have e > x. So if x already appears in the list, a.insert(x) will
- insert just after the rightmost x already there.
-
- Optional args lo (default 0) and hi (default len(a)) bound the
- slice of a to be searched.
- """
-
- if hi is None:
- hi = len(a)
- while lo < hi:
- mid = (lo+hi)//2
- if x < a[mid]: hi = mid
- else: lo = mid+1
- return lo
-
-bisect = bisect_right # backward compatibility
-
-def insort_left(a, x, lo=0, hi=None):
- """Insert item x in list a, and keep it sorted assuming a is sorted.
-
- If x is already in a, insert it to the left of the leftmost x.
-
- Optional args lo (default 0) and hi (default len(a)) bound the
- slice of a to be searched.
- """
-
- if hi is None:
- hi = len(a)
- while lo < hi:
- mid = (lo+hi)//2
- if a[mid] < x: lo = mid+1
- else: hi = mid
- a.insert(lo, x)
-
-
-def bisect_left(a, x, lo=0, hi=None):
- """Return the index where to insert item x in list a, assuming a is sorted.
-
- The return value i is such that all e in a[:i] have e < x, and all e in
- a[i:] have e >= x. So if x already appears in the list, a.insert(x) will
- insert just before the leftmost x already there.
-
- Optional args lo (default 0) and hi (default len(a)) bound the
- slice of a to be searched.
- """
-
- if hi is None:
- hi = len(a)
- while lo < hi:
- mid = (lo+hi)//2
- if a[mid] < x: lo = mid+1
- else: hi = mid
- return lo
-
-# Overwrite above definitions with a fast C implementation
-try:
- from _bisect import bisect_right, bisect_left, insort_left, insort_right, insort, bisect
-except ImportError:
- pass
diff --git a/sys/lib/python/bsddb/__init__.py b/sys/lib/python/bsddb/__init__.py
deleted file mode 100644
index cf3266886..000000000
--- a/sys/lib/python/bsddb/__init__.py
+++ /dev/null
@@ -1,397 +0,0 @@
-#----------------------------------------------------------------------
-# Copyright (c) 1999-2001, Digital Creations, Fredericksburg, VA, USA
-# and Andrew Kuchling. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# o Redistributions of source code must retain the above copyright
-# notice, this list of conditions, and the disclaimer that follows.
-#
-# o Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions, and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-#
-# o Neither the name of Digital Creations nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY DIGITAL CREATIONS AND CONTRIBUTORS *AS
-# IS* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
-# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
-# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL DIGITAL
-# CREATIONS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
-# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
-# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
-# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
-# DAMAGE.
-#----------------------------------------------------------------------
-
-
-"""Support for BerkeleyDB 3.3 through 4.4 with a simple interface.
-
-For the full featured object oriented interface use the bsddb.db module
-instead. It mirrors the Sleepycat BerkeleyDB C API.
-"""
-
-try:
- if __name__ == 'bsddb3':
- # import _pybsddb binary as it should be the more recent version from
- # a standalone pybsddb addon package than the version included with
- # python as bsddb._bsddb.
- import _pybsddb
- _bsddb = _pybsddb
- from bsddb3.dbutils import DeadlockWrap as _DeadlockWrap
- else:
- import _bsddb
- from bsddb.dbutils import DeadlockWrap as _DeadlockWrap
-except ImportError:
- # Remove ourselves from sys.modules
- import sys
- del sys.modules[__name__]
- raise
-
-# bsddb3 calls it db, but provide _db for backwards compatibility
-db = _db = _bsddb
-__version__ = db.__version__
-
-error = db.DBError # So bsddb.error will mean something...
-
-#----------------------------------------------------------------------
-
-import sys, os
-
-# for backwards compatibility with python versions older than 2.3, the
-# iterator interface is dynamically defined and added using a mixin
-# class. old python can't tokenize it due to the yield keyword.
-if sys.version >= '2.3':
- import UserDict
- from weakref import ref
- exec """
-class _iter_mixin(UserDict.DictMixin):
- def _make_iter_cursor(self):
- cur = _DeadlockWrap(self.db.cursor)
- key = id(cur)
- self._cursor_refs[key] = ref(cur, self._gen_cref_cleaner(key))
- return cur
-
- def _gen_cref_cleaner(self, key):
- # use generate the function for the weakref callback here
- # to ensure that we do not hold a strict reference to cur
- # in the callback.
- return lambda ref: self._cursor_refs.pop(key, None)
-
- def __iter__(self):
- try:
- cur = self._make_iter_cursor()
-
- # FIXME-20031102-greg: race condition. cursor could
- # be closed by another thread before this call.
-
- # since we're only returning keys, we call the cursor
- # methods with flags=0, dlen=0, dofs=0
- key = _DeadlockWrap(cur.first, 0,0,0)[0]
- yield key
-
- next = cur.next
- while 1:
- try:
- key = _DeadlockWrap(next, 0,0,0)[0]
- yield key
- except _bsddb.DBCursorClosedError:
- cur = self._make_iter_cursor()
- # FIXME-20031101-greg: race condition. cursor could
- # be closed by another thread before this call.
- _DeadlockWrap(cur.set, key,0,0,0)
- next = cur.next
- except _bsddb.DBNotFoundError:
- return
- except _bsddb.DBCursorClosedError:
- # the database was modified during iteration. abort.
- return
-
- def iteritems(self):
- if not self.db:
- return
- try:
- cur = self._make_iter_cursor()
-
- # FIXME-20031102-greg: race condition. cursor could
- # be closed by another thread before this call.
-
- kv = _DeadlockWrap(cur.first)
- key = kv[0]
- yield kv
-
- next = cur.next
- while 1:
- try:
- kv = _DeadlockWrap(next)
- key = kv[0]
- yield kv
- except _bsddb.DBCursorClosedError:
- cur = self._make_iter_cursor()
- # FIXME-20031101-greg: race condition. cursor could
- # be closed by another thread before this call.
- _DeadlockWrap(cur.set, key,0,0,0)
- next = cur.next
- except _bsddb.DBNotFoundError:
- return
- except _bsddb.DBCursorClosedError:
- # the database was modified during iteration. abort.
- return
-"""
-else:
- class _iter_mixin: pass
-
-
-class _DBWithCursor(_iter_mixin):
- """
- A simple wrapper around DB that makes it look like the bsddbobject in
- the old module. It uses a cursor as needed to provide DB traversal.
- """
- def __init__(self, db):
- self.db = db
- self.db.set_get_returns_none(0)
-
- # FIXME-20031101-greg: I believe there is still the potential
- # for deadlocks in a multithreaded environment if someone
- # attempts to use the any of the cursor interfaces in one
- # thread while doing a put or delete in another thread. The
- # reason is that _checkCursor and _closeCursors are not atomic
- # operations. Doing our own locking around self.dbc,
- # self.saved_dbc_key and self._cursor_refs could prevent this.
- # TODO: A test case demonstrating the problem needs to be written.
-
- # self.dbc is a DBCursor object used to implement the
- # first/next/previous/last/set_location methods.
- self.dbc = None
- self.saved_dbc_key = None
-
- # a collection of all DBCursor objects currently allocated
- # by the _iter_mixin interface.
- self._cursor_refs = {}
-
- def __del__(self):
- self.close()
-
- def _checkCursor(self):
- if self.dbc is None:
- self.dbc = _DeadlockWrap(self.db.cursor)
- if self.saved_dbc_key is not None:
- _DeadlockWrap(self.dbc.set, self.saved_dbc_key)
- self.saved_dbc_key = None
-
- # This method is needed for all non-cursor DB calls to avoid
- # BerkeleyDB deadlocks (due to being opened with DB_INIT_LOCK
- # and DB_THREAD to be thread safe) when intermixing database
- # operations that use the cursor internally with those that don't.
- def _closeCursors(self, save=1):
- if self.dbc:
- c = self.dbc
- self.dbc = None
- if save:
- try:
- self.saved_dbc_key = _DeadlockWrap(c.current, 0,0,0)[0]
- except db.DBError:
- pass
- _DeadlockWrap(c.close)
- del c
- for cref in self._cursor_refs.values():
- c = cref()
- if c is not None:
- _DeadlockWrap(c.close)
-
- def _checkOpen(self):
- if self.db is None:
- raise error, "BSDDB object has already been closed"
-
- def isOpen(self):
- return self.db is not None
-
- def __len__(self):
- self._checkOpen()
- return _DeadlockWrap(lambda: len(self.db)) # len(self.db)
-
- def __getitem__(self, key):
- self._checkOpen()
- return _DeadlockWrap(lambda: self.db[key]) # self.db[key]
-
- def __setitem__(self, key, value):
- self._checkOpen()
- self._closeCursors()
- def wrapF():
- self.db[key] = value
- _DeadlockWrap(wrapF) # self.db[key] = value
-
- def __delitem__(self, key):
- self._checkOpen()
- self._closeCursors()
- def wrapF():
- del self.db[key]
- _DeadlockWrap(wrapF) # del self.db[key]
-
- def close(self):
- self._closeCursors(save=0)
- if self.dbc is not None:
- _DeadlockWrap(self.dbc.close)
- v = 0
- if self.db is not None:
- v = _DeadlockWrap(self.db.close)
- self.dbc = None
- self.db = None
- return v
-
- def keys(self):
- self._checkOpen()
- return _DeadlockWrap(self.db.keys)
-
- def has_key(self, key):
- self._checkOpen()
- return _DeadlockWrap(self.db.has_key, key)
-
- def set_location(self, key):
- self._checkOpen()
- self._checkCursor()
- return _DeadlockWrap(self.dbc.set_range, key)
-
- def next(self):
- self._checkOpen()
- self._checkCursor()
- rv = _DeadlockWrap(self.dbc.next)
- return rv
-
- def previous(self):
- self._checkOpen()
- self._checkCursor()
- rv = _DeadlockWrap(self.dbc.prev)
- return rv
-
- def first(self):
- self._checkOpen()
- self._checkCursor()
- rv = _DeadlockWrap(self.dbc.first)
- return rv
-
- def last(self):
- self._checkOpen()
- self._checkCursor()
- rv = _DeadlockWrap(self.dbc.last)
- return rv
-
- def sync(self):
- self._checkOpen()
- return _DeadlockWrap(self.db.sync)
-
-
-#----------------------------------------------------------------------
-# Compatibility object factory functions
-
-def hashopen(file, flag='c', mode=0666, pgsize=None, ffactor=None, nelem=None,
- cachesize=None, lorder=None, hflags=0):
-
- flags = _checkflag(flag, file)
- e = _openDBEnv(cachesize)
- d = db.DB(e)
- d.set_flags(hflags)
- if pgsize is not None: d.set_pagesize(pgsize)
- if lorder is not None: d.set_lorder(lorder)
- if ffactor is not None: d.set_h_ffactor(ffactor)
- if nelem is not None: d.set_h_nelem(nelem)
- d.open(file, db.DB_HASH, flags, mode)
- return _DBWithCursor(d)
-
-#----------------------------------------------------------------------
-
-def btopen(file, flag='c', mode=0666,
- btflags=0, cachesize=None, maxkeypage=None, minkeypage=None,
- pgsize=None, lorder=None):
-
- flags = _checkflag(flag, file)
- e = _openDBEnv(cachesize)
- d = db.DB(e)
- if pgsize is not None: d.set_pagesize(pgsize)
- if lorder is not None: d.set_lorder(lorder)
- d.set_flags(btflags)
- if minkeypage is not None: d.set_bt_minkey(minkeypage)
- if maxkeypage is not None: d.set_bt_maxkey(maxkeypage)
- d.open(file, db.DB_BTREE, flags, mode)
- return _DBWithCursor(d)
-
-#----------------------------------------------------------------------
-
-
-def rnopen(file, flag='c', mode=0666,
- rnflags=0, cachesize=None, pgsize=None, lorder=None,
- rlen=None, delim=None, source=None, pad=None):
-
- flags = _checkflag(flag, file)
- e = _openDBEnv(cachesize)
- d = db.DB(e)
- if pgsize is not None: d.set_pagesize(pgsize)
- if lorder is not None: d.set_lorder(lorder)
- d.set_flags(rnflags)
- if delim is not None: d.set_re_delim(delim)
- if rlen is not None: d.set_re_len(rlen)
- if source is not None: d.set_re_source(source)
- if pad is not None: d.set_re_pad(pad)
- d.open(file, db.DB_RECNO, flags, mode)
- return _DBWithCursor(d)
-
-#----------------------------------------------------------------------
-
-def _openDBEnv(cachesize):
- e = db.DBEnv()
- if cachesize is not None:
- if cachesize >= 20480:
- e.set_cachesize(0, cachesize)
- else:
- raise error, "cachesize must be >= 20480"
- e.set_lk_detect(db.DB_LOCK_DEFAULT)
- e.open('.', db.DB_PRIVATE | db.DB_CREATE | db.DB_THREAD | db.DB_INIT_LOCK | db.DB_INIT_MPOOL)
- return e
-
-def _checkflag(flag, file):
- if flag == 'r':
- flags = db.DB_RDONLY
- elif flag == 'rw':
- flags = 0
- elif flag == 'w':
- flags = db.DB_CREATE
- elif flag == 'c':
- flags = db.DB_CREATE
- elif flag == 'n':
- flags = db.DB_CREATE
- #flags = db.DB_CREATE | db.DB_TRUNCATE
- # we used db.DB_TRUNCATE flag for this before but BerkeleyDB
- # 4.2.52 changed to disallowed truncate with txn environments.
- if file is not None and os.path.isfile(file):
- os.unlink(file)
- else:
- raise error, "flags should be one of 'r', 'w', 'c' or 'n'"
- return flags | db.DB_THREAD
-
-#----------------------------------------------------------------------
-
-
-# This is a silly little hack that allows apps to continue to use the
-# DB_THREAD flag even on systems without threads without freaking out
-# BerkeleyDB.
-#
-# This assumes that if Python was built with thread support then
-# BerkeleyDB was too.
-
-try:
- import thread
- del thread
- if db.version() < (3, 3, 0):
- db.DB_THREAD = 0
-except ImportError:
- db.DB_THREAD = 0
-
-#----------------------------------------------------------------------
diff --git a/sys/lib/python/bsddb/db.py b/sys/lib/python/bsddb/db.py
deleted file mode 100644
index 3bd0c8ba4..000000000
--- a/sys/lib/python/bsddb/db.py
+++ /dev/null
@@ -1,51 +0,0 @@
-#----------------------------------------------------------------------
-# Copyright (c) 1999-2001, Digital Creations, Fredericksburg, VA, USA
-# and Andrew Kuchling. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# o Redistributions of source code must retain the above copyright
-# notice, this list of conditions, and the disclaimer that follows.
-#
-# o Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions, and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-#
-# o Neither the name of Digital Creations nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY DIGITAL CREATIONS AND CONTRIBUTORS *AS
-# IS* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
-# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
-# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL DIGITAL
-# CREATIONS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
-# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
-# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
-# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
-# DAMAGE.
-#----------------------------------------------------------------------
-
-
-# This module is just a placeholder for possible future expansion, in
-# case we ever want to augment the stuff in _db in any way. For now
-# it just simply imports everything from _db.
-
-if __name__.startswith('bsddb3.'):
- # import _pybsddb binary as it should be the more recent version from
- # a standalone pybsddb addon package than the version included with
- # python as bsddb._bsddb.
- from _pybsddb import *
- from _pybsddb import __version__
-else:
- from _bsddb import *
- from _bsddb import __version__
-
-if version() < (3, 2, 0):
- raise ImportError, "correct BerkeleyDB symbols not found. Perhaps python was statically linked with an older version?"
diff --git a/sys/lib/python/bsddb/dbobj.py b/sys/lib/python/bsddb/dbobj.py
deleted file mode 100644
index b74ee72f7..000000000
--- a/sys/lib/python/bsddb/dbobj.py
+++ /dev/null
@@ -1,254 +0,0 @@
-#-------------------------------------------------------------------------
-# This file contains real Python object wrappers for DB and DBEnv
-# C "objects" that can be usefully subclassed. The previous SWIG
-# based interface allowed this thanks to SWIG's shadow classes.
-# -- Gregory P. Smith
-#-------------------------------------------------------------------------
-#
-# (C) Copyright 2001 Autonomous Zone Industries
-#
-# License: This is free software. You may use this software for any
-# purpose including modification/redistribution, so long as
-# this header remains intact and that you do not claim any
-# rights of ownership or authorship of this software. This
-# software has been tested, but no warranty is expressed or
-# implied.
-#
-
-#
-# TODO it would be *really nice* to have an automatic shadow class populator
-# so that new methods don't need to be added here manually after being
-# added to _bsddb.c.
-#
-
-import db
-
-try:
- from UserDict import DictMixin
-except ImportError:
- # DictMixin is new in Python 2.3
- class DictMixin: pass
-
-class DBEnv:
- def __init__(self, *args, **kwargs):
- self._cobj = apply(db.DBEnv, args, kwargs)
-
- def close(self, *args, **kwargs):
- return apply(self._cobj.close, args, kwargs)
- def open(self, *args, **kwargs):
- return apply(self._cobj.open, args, kwargs)
- def remove(self, *args, **kwargs):
- return apply(self._cobj.remove, args, kwargs)
- def set_shm_key(self, *args, **kwargs):
- return apply(self._cobj.set_shm_key, args, kwargs)
- def set_cachesize(self, *args, **kwargs):
- return apply(self._cobj.set_cachesize, args, kwargs)
- def set_data_dir(self, *args, **kwargs):
- return apply(self._cobj.set_data_dir, args, kwargs)
- def set_flags(self, *args, **kwargs):
- return apply(self._cobj.set_flags, args, kwargs)
- def set_lg_bsize(self, *args, **kwargs):
- return apply(self._cobj.set_lg_bsize, args, kwargs)
- def set_lg_dir(self, *args, **kwargs):
- return apply(self._cobj.set_lg_dir, args, kwargs)
- def set_lg_max(self, *args, **kwargs):
- return apply(self._cobj.set_lg_max, args, kwargs)
- def set_lk_detect(self, *args, **kwargs):
- return apply(self._cobj.set_lk_detect, args, kwargs)
- if db.version() < (4,5):
- def set_lk_max(self, *args, **kwargs):
- return apply(self._cobj.set_lk_max, args, kwargs)
- def set_lk_max_locks(self, *args, **kwargs):
- return apply(self._cobj.set_lk_max_locks, args, kwargs)
- def set_lk_max_lockers(self, *args, **kwargs):
- return apply(self._cobj.set_lk_max_lockers, args, kwargs)
- def set_lk_max_objects(self, *args, **kwargs):
- return apply(self._cobj.set_lk_max_objects, args, kwargs)
- def set_mp_mmapsize(self, *args, **kwargs):
- return apply(self._cobj.set_mp_mmapsize, args, kwargs)
- def set_timeout(self, *args, **kwargs):
- return apply(self._cobj.set_timeout, args, kwargs)
- def set_tmp_dir(self, *args, **kwargs):
- return apply(self._cobj.set_tmp_dir, args, kwargs)
- def txn_begin(self, *args, **kwargs):
- return apply(self._cobj.txn_begin, args, kwargs)
- def txn_checkpoint(self, *args, **kwargs):
- return apply(self._cobj.txn_checkpoint, args, kwargs)
- def txn_stat(self, *args, **kwargs):
- return apply(self._cobj.txn_stat, args, kwargs)
- def set_tx_max(self, *args, **kwargs):
- return apply(self._cobj.set_tx_max, args, kwargs)
- def set_tx_timestamp(self, *args, **kwargs):
- return apply(self._cobj.set_tx_timestamp, args, kwargs)
- def lock_detect(self, *args, **kwargs):
- return apply(self._cobj.lock_detect, args, kwargs)
- def lock_get(self, *args, **kwargs):
- return apply(self._cobj.lock_get, args, kwargs)
- def lock_id(self, *args, **kwargs):
- return apply(self._cobj.lock_id, args, kwargs)
- def lock_put(self, *args, **kwargs):
- return apply(self._cobj.lock_put, args, kwargs)
- def lock_stat(self, *args, **kwargs):
- return apply(self._cobj.lock_stat, args, kwargs)
- def log_archive(self, *args, **kwargs):
- return apply(self._cobj.log_archive, args, kwargs)
-
- def set_get_returns_none(self, *args, **kwargs):
- return apply(self._cobj.set_get_returns_none, args, kwargs)
-
- if db.version() >= (4,0):
- def log_stat(self, *args, **kwargs):
- return apply(self._cobj.log_stat, args, kwargs)
-
- if db.version() >= (4,1):
- def dbremove(self, *args, **kwargs):
- return apply(self._cobj.dbremove, args, kwargs)
- def dbrename(self, *args, **kwargs):
- return apply(self._cobj.dbrename, args, kwargs)
- def set_encrypt(self, *args, **kwargs):
- return apply(self._cobj.set_encrypt, args, kwargs)
-
- if db.version() >= (4,4):
- def lsn_reset(self, *args, **kwargs):
- return apply(self._cobj.lsn_reset, args, kwargs)
-
-
-class DB(DictMixin):
- def __init__(self, dbenv, *args, **kwargs):
- # give it the proper DBEnv C object that its expecting
- self._cobj = apply(db.DB, (dbenv._cobj,) + args, kwargs)
-
- # TODO are there other dict methods that need to be overridden?
- def __len__(self):
- return len(self._cobj)
- def __getitem__(self, arg):
- return self._cobj[arg]
- def __setitem__(self, key, value):
- self._cobj[key] = value
- def __delitem__(self, arg):
- del self._cobj[arg]
-
- def append(self, *args, **kwargs):
- return apply(self._cobj.append, args, kwargs)
- def associate(self, *args, **kwargs):
- return apply(self._cobj.associate, args, kwargs)
- def close(self, *args, **kwargs):
- return apply(self._cobj.close, args, kwargs)
- def consume(self, *args, **kwargs):
- return apply(self._cobj.consume, args, kwargs)
- def consume_wait(self, *args, **kwargs):
- return apply(self._cobj.consume_wait, args, kwargs)
- def cursor(self, *args, **kwargs):
- return apply(self._cobj.cursor, args, kwargs)
- def delete(self, *args, **kwargs):
- return apply(self._cobj.delete, args, kwargs)
- def fd(self, *args, **kwargs):
- return apply(self._cobj.fd, args, kwargs)
- def get(self, *args, **kwargs):
- return apply(self._cobj.get, args, kwargs)
- def pget(self, *args, **kwargs):
- return apply(self._cobj.pget, args, kwargs)
- def get_both(self, *args, **kwargs):
- return apply(self._cobj.get_both, args, kwargs)
- def get_byteswapped(self, *args, **kwargs):
- return apply(self._cobj.get_byteswapped, args, kwargs)
- def get_size(self, *args, **kwargs):
- return apply(self._cobj.get_size, args, kwargs)
- def get_type(self, *args, **kwargs):
- return apply(self._cobj.get_type, args, kwargs)
- def join(self, *args, **kwargs):
- return apply(self._cobj.join, args, kwargs)
- def key_range(self, *args, **kwargs):
- return apply(self._cobj.key_range, args, kwargs)
- def has_key(self, *args, **kwargs):
- return apply(self._cobj.has_key, args, kwargs)
- def items(self, *args, **kwargs):
- return apply(self._cobj.items, args, kwargs)
- def keys(self, *args, **kwargs):
- return apply(self._cobj.keys, args, kwargs)
- def open(self, *args, **kwargs):
- return apply(self._cobj.open, args, kwargs)
- def put(self, *args, **kwargs):
- return apply(self._cobj.put, args, kwargs)
- def remove(self, *args, **kwargs):
- return apply(self._cobj.remove, args, kwargs)
- def rename(self, *args, **kwargs):
- return apply(self._cobj.rename, args, kwargs)
- def set_bt_minkey(self, *args, **kwargs):
- return apply(self._cobj.set_bt_minkey, args, kwargs)
- def set_bt_compare(self, *args, **kwargs):
- return apply(self._cobj.set_bt_compare, args, kwargs)
- def set_cachesize(self, *args, **kwargs):
- return apply(self._cobj.set_cachesize, args, kwargs)
- def set_flags(self, *args, **kwargs):
- return apply(self._cobj.set_flags, args, kwargs)
- def set_h_ffactor(self, *args, **kwargs):
- return apply(self._cobj.set_h_ffactor, args, kwargs)
- def set_h_nelem(self, *args, **kwargs):
- return apply(self._cobj.set_h_nelem, args, kwargs)
- def set_lorder(self, *args, **kwargs):
- return apply(self._cobj.set_lorder, args, kwargs)
- def set_pagesize(self, *args, **kwargs):
- return apply(self._cobj.set_pagesize, args, kwargs)
- def set_re_delim(self, *args, **kwargs):
- return apply(self._cobj.set_re_delim, args, kwargs)
- def set_re_len(self, *args, **kwargs):
- return apply(self._cobj.set_re_len, args, kwargs)
- def set_re_pad(self, *args, **kwargs):
- return apply(self._cobj.set_re_pad, args, kwargs)
- def set_re_source(self, *args, **kwargs):
- return apply(self._cobj.set_re_source, args, kwargs)
- def set_q_extentsize(self, *args, **kwargs):
- return apply(self._cobj.set_q_extentsize, args, kwargs)
- def stat(self, *args, **kwargs):
- return apply(self._cobj.stat, args, kwargs)
- def sync(self, *args, **kwargs):
- return apply(self._cobj.sync, args, kwargs)
- def type(self, *args, **kwargs):
- return apply(self._cobj.type, args, kwargs)
- def upgrade(self, *args, **kwargs):
- return apply(self._cobj.upgrade, args, kwargs)
- def values(self, *args, **kwargs):
- return apply(self._cobj.values, args, kwargs)
- def verify(self, *args, **kwargs):
- return apply(self._cobj.verify, args, kwargs)
- def set_get_returns_none(self, *args, **kwargs):
- return apply(self._cobj.set_get_returns_none, args, kwargs)
-
- if db.version() >= (4,1):
- def set_encrypt(self, *args, **kwargs):
- return apply(self._cobj.set_encrypt, args, kwargs)
-
-
-class DBSequence:
- def __init__(self, *args, **kwargs):
- self._cobj = apply(db.DBSequence, args, kwargs)
-
- def close(self, *args, **kwargs):
- return apply(self._cobj.close, args, kwargs)
- def get(self, *args, **kwargs):
- return apply(self._cobj.get, args, kwargs)
- def get_dbp(self, *args, **kwargs):
- return apply(self._cobj.get_dbp, args, kwargs)
- def get_key(self, *args, **kwargs):
- return apply(self._cobj.get_key, args, kwargs)
- def init_value(self, *args, **kwargs):
- return apply(self._cobj.init_value, args, kwargs)
- def open(self, *args, **kwargs):
- return apply(self._cobj.open, args, kwargs)
- def remove(self, *args, **kwargs):
- return apply(self._cobj.remove, args, kwargs)
- def stat(self, *args, **kwargs):
- return apply(self._cobj.stat, args, kwargs)
- def set_cachesize(self, *args, **kwargs):
- return apply(self._cobj.set_cachesize, args, kwargs)
- def set_flags(self, *args, **kwargs):
- return apply(self._cobj.set_flags, args, kwargs)
- def set_range(self, *args, **kwargs):
- return apply(self._cobj.set_range, args, kwargs)
- def get_cachesize(self, *args, **kwargs):
- return apply(self._cobj.get_cachesize, args, kwargs)
- def get_flags(self, *args, **kwargs):
- return apply(self._cobj.get_flags, args, kwargs)
- def get_range(self, *args, **kwargs):
- return apply(self._cobj.get_range, args, kwargs)
diff --git a/sys/lib/python/bsddb/dbrecio.py b/sys/lib/python/bsddb/dbrecio.py
deleted file mode 100644
index d439f3255..000000000
--- a/sys/lib/python/bsddb/dbrecio.py
+++ /dev/null
@@ -1,190 +0,0 @@
-
-"""
-File-like objects that read from or write to a bsddb record.
-
-This implements (nearly) all stdio methods.
-
-f = DBRecIO(db, key, txn=None)
-f.close() # explicitly release resources held
-flag = f.isatty() # always false
-pos = f.tell() # get current position
-f.seek(pos) # set current position
-f.seek(pos, mode) # mode 0: absolute; 1: relative; 2: relative to EOF
-buf = f.read() # read until EOF
-buf = f.read(n) # read up to n bytes
-f.truncate([size]) # truncate file at to at most size (default: current pos)
-f.write(buf) # write at current position
-f.writelines(list) # for line in list: f.write(line)
-
-Notes:
-- fileno() is left unimplemented so that code which uses it triggers
- an exception early.
-- There's a simple test set (see end of this file) - not yet updated
- for DBRecIO.
-- readline() is not implemented yet.
-
-
-From:
- Itamar Shtull-Trauring <itamar@maxnm.com>
-"""
-
-import errno
-import string
-
-class DBRecIO:
- def __init__(self, db, key, txn=None):
- self.db = db
- self.key = key
- self.txn = txn
- self.len = None
- self.pos = 0
- self.closed = 0
- self.softspace = 0
-
- def close(self):
- if not self.closed:
- self.closed = 1
- del self.db, self.txn
-
- def isatty(self):
- if self.closed:
- raise ValueError, "I/O operation on closed file"
- return 0
-
- def seek(self, pos, mode = 0):
- if self.closed:
- raise ValueError, "I/O operation on closed file"
- if mode == 1:
- pos = pos + self.pos
- elif mode == 2:
- pos = pos + self.len
- self.pos = max(0, pos)
-
- def tell(self):
- if self.closed:
- raise ValueError, "I/O operation on closed file"
- return self.pos
-
- def read(self, n = -1):
- if self.closed:
- raise ValueError, "I/O operation on closed file"
- if n < 0:
- newpos = self.len
- else:
- newpos = min(self.pos+n, self.len)
-
- dlen = newpos - self.pos
-
- r = self.db.get(self.key, txn=self.txn, dlen=dlen, doff=self.pos)
- self.pos = newpos
- return r
-
- __fixme = """
- def readline(self, length=None):
- if self.closed:
- raise ValueError, "I/O operation on closed file"
- if self.buflist:
- self.buf = self.buf + string.joinfields(self.buflist, '')
- self.buflist = []
- i = string.find(self.buf, '\n', self.pos)
- if i < 0:
- newpos = self.len
- else:
- newpos = i+1
- if length is not None:
- if self.pos + length < newpos:
- newpos = self.pos + length
- r = self.buf[self.pos:newpos]
- self.pos = newpos
- return r
-
- def readlines(self, sizehint = 0):
- total = 0
- lines = []
- line = self.readline()
- while line:
- lines.append(line)
- total += len(line)
- if 0 < sizehint <= total:
- break
- line = self.readline()
- return lines
- """
-
- def truncate(self, size=None):
- if self.closed:
- raise ValueError, "I/O operation on closed file"
- if size is None:
- size = self.pos
- elif size < 0:
- raise IOError(errno.EINVAL,
- "Negative size not allowed")
- elif size < self.pos:
- self.pos = size
- self.db.put(self.key, "", txn=self.txn, dlen=self.len-size, doff=size)
-
- def write(self, s):
- if self.closed:
- raise ValueError, "I/O operation on closed file"
- if not s: return
- if self.pos > self.len:
- self.buflist.append('\0'*(self.pos - self.len))
- self.len = self.pos
- newpos = self.pos + len(s)
- self.db.put(self.key, s, txn=self.txn, dlen=len(s), doff=self.pos)
- self.pos = newpos
-
- def writelines(self, list):
- self.write(string.joinfields(list, ''))
-
- def flush(self):
- if self.closed:
- raise ValueError, "I/O operation on closed file"
-
-
-"""
-# A little test suite
-
-def _test():
- import sys
- if sys.argv[1:]:
- file = sys.argv[1]
- else:
- file = '/etc/passwd'
- lines = open(file, 'r').readlines()
- text = open(file, 'r').read()
- f = StringIO()
- for line in lines[:-2]:
- f.write(line)
- f.writelines(lines[-2:])
- if f.getvalue() != text:
- raise RuntimeError, 'write failed'
- length = f.tell()
- print 'File length =', length
- f.seek(len(lines[0]))
- f.write(lines[1])
- f.seek(0)
- print 'First line =', repr(f.readline())
- here = f.tell()
- line = f.readline()
- print 'Second line =', repr(line)
- f.seek(-len(line), 1)
- line2 = f.read(len(line))
- if line != line2:
- raise RuntimeError, 'bad result after seek back'
- f.seek(len(line2), 1)
- list = f.readlines()
- line = list[-1]
- f.seek(f.tell() - len(line))
- line2 = f.read()
- if line != line2:
- raise RuntimeError, 'bad result after seek back from EOF'
- print 'Read', len(list), 'more lines'
- print 'File length =', f.tell()
- if f.tell() != length:
- raise RuntimeError, 'bad length'
- f.close()
-
-if __name__ == '__main__':
- _test()
-"""
diff --git a/sys/lib/python/bsddb/dbshelve.py b/sys/lib/python/bsddb/dbshelve.py
deleted file mode 100644
index d341ab791..000000000
--- a/sys/lib/python/bsddb/dbshelve.py
+++ /dev/null
@@ -1,299 +0,0 @@
-#!/bin/env python
-#------------------------------------------------------------------------
-# Copyright (c) 1997-2001 by Total Control Software
-# All Rights Reserved
-#------------------------------------------------------------------------
-#
-# Module Name: dbShelve.py
-#
-# Description: A reimplementation of the standard shelve.py that
-# forces the use of cPickle, and DB.
-#
-# Creation Date: 11/3/97 3:39:04PM
-#
-# License: This is free software. You may use this software for any
-# purpose including modification/redistribution, so long as
-# this header remains intact and that you do not claim any
-# rights of ownership or authorship of this software. This
-# software has been tested, but no warranty is expressed or
-# implied.
-#
-# 13-Dec-2000: Updated to be used with the new bsddb3 package.
-# Added DBShelfCursor class.
-#
-#------------------------------------------------------------------------
-
-"""Manage shelves of pickled objects using bsddb database files for the
-storage.
-"""
-
-#------------------------------------------------------------------------
-
-import cPickle
-try:
- from UserDict import DictMixin
-except ImportError:
- # DictMixin is new in Python 2.3
- class DictMixin: pass
-import db
-
-#------------------------------------------------------------------------
-
-
-def open(filename, flags=db.DB_CREATE, mode=0660, filetype=db.DB_HASH,
- dbenv=None, dbname=None):
- """
- A simple factory function for compatibility with the standard
- shleve.py module. It can be used like this, where key is a string
- and data is a pickleable object:
-
- from bsddb import dbshelve
- db = dbshelve.open(filename)
-
- db[key] = data
-
- db.close()
- """
- if type(flags) == type(''):
- sflag = flags
- if sflag == 'r':
- flags = db.DB_RDONLY
- elif sflag == 'rw':
- flags = 0
- elif sflag == 'w':
- flags = db.DB_CREATE
- elif sflag == 'c':
- flags = db.DB_CREATE
- elif sflag == 'n':
- flags = db.DB_TRUNCATE | db.DB_CREATE
- else:
- raise db.DBError, "flags should be one of 'r', 'w', 'c' or 'n' or use the bsddb.db.DB_* flags"
-
- d = DBShelf(dbenv)
- d.open(filename, dbname, filetype, flags, mode)
- return d
-
-#---------------------------------------------------------------------------
-
-class DBShelf(DictMixin):
- """A shelf to hold pickled objects, built upon a bsddb DB object. It
- automatically pickles/unpickles data objects going to/from the DB.
- """
- def __init__(self, dbenv=None):
- self.db = db.DB(dbenv)
- self.binary = 1
-
-
- def __del__(self):
- self.close()
-
-
- def __getattr__(self, name):
- """Many methods we can just pass through to the DB object.
- (See below)
- """
- return getattr(self.db, name)
-
-
- #-----------------------------------
- # Dictionary access methods
-
- def __len__(self):
- return len(self.db)
-
-
- def __getitem__(self, key):
- data = self.db[key]
- return cPickle.loads(data)
-
-
- def __setitem__(self, key, value):
- data = cPickle.dumps(value, self.binary)
- self.db[key] = data
-
-
- def __delitem__(self, key):
- del self.db[key]
-
-
- def keys(self, txn=None):
- if txn != None:
- return self.db.keys(txn)
- else:
- return self.db.keys()
-
-
- def items(self, txn=None):
- if txn != None:
- items = self.db.items(txn)
- else:
- items = self.db.items()
- newitems = []
-
- for k, v in items:
- newitems.append( (k, cPickle.loads(v)) )
- return newitems
-
- def values(self, txn=None):
- if txn != None:
- values = self.db.values(txn)
- else:
- values = self.db.values()
-
- return map(cPickle.loads, values)
-
- #-----------------------------------
- # Other methods
-
- def __append(self, value, txn=None):
- data = cPickle.dumps(value, self.binary)
- return self.db.append(data, txn)
-
- def append(self, value, txn=None):
- if self.get_type() != db.DB_RECNO:
- self.append = self.__append
- return self.append(value, txn=txn)
- raise db.DBError, "append() only supported when dbshelve opened with filetype=dbshelve.db.DB_RECNO"
-
-
- def associate(self, secondaryDB, callback, flags=0):
- def _shelf_callback(priKey, priData, realCallback=callback):
- data = cPickle.loads(priData)
- return realCallback(priKey, data)
- return self.db.associate(secondaryDB, _shelf_callback, flags)
-
-
- #def get(self, key, default=None, txn=None, flags=0):
- def get(self, *args, **kw):
- # We do it with *args and **kw so if the default value wasn't
- # given nothing is passed to the extension module. That way
- # an exception can be raised if set_get_returns_none is turned
- # off.
- data = apply(self.db.get, args, kw)
- try:
- return cPickle.loads(data)
- except (TypeError, cPickle.UnpicklingError):
- return data # we may be getting the default value, or None,
- # so it doesn't need unpickled.
-
- def get_both(self, key, value, txn=None, flags=0):
- data = cPickle.dumps(value, self.binary)
- data = self.db.get(key, data, txn, flags)
- return cPickle.loads(data)
-
-
- def cursor(self, txn=None, flags=0):
- c = DBShelfCursor(self.db.cursor(txn, flags))
- c.binary = self.binary
- return c
-
-
- def put(self, key, value, txn=None, flags=0):
- data = cPickle.dumps(value, self.binary)
- return self.db.put(key, data, txn, flags)
-
-
- def join(self, cursorList, flags=0):
- raise NotImplementedError
-
-
- #----------------------------------------------
- # Methods allowed to pass-through to self.db
- #
- # close, delete, fd, get_byteswapped, get_type, has_key,
- # key_range, open, remove, rename, stat, sync,
- # upgrade, verify, and all set_* methods.
-
-
-#---------------------------------------------------------------------------
-
-class DBShelfCursor:
- """
- """
- def __init__(self, cursor):
- self.dbc = cursor
-
- def __del__(self):
- self.close()
-
-
- def __getattr__(self, name):
- """Some methods we can just pass through to the cursor object. (See below)"""
- return getattr(self.dbc, name)
-
-
- #----------------------------------------------
-
- def dup(self, flags=0):
- return DBShelfCursor(self.dbc.dup(flags))
-
-
- def put(self, key, value, flags=0):
- data = cPickle.dumps(value, self.binary)
- return self.dbc.put(key, data, flags)
-
-
- def get(self, *args):
- count = len(args) # a method overloading hack
- method = getattr(self, 'get_%d' % count)
- apply(method, args)
-
- def get_1(self, flags):
- rec = self.dbc.get(flags)
- return self._extract(rec)
-
- def get_2(self, key, flags):
- rec = self.dbc.get(key, flags)
- return self._extract(rec)
-
- def get_3(self, key, value, flags):
- data = cPickle.dumps(value, self.binary)
- rec = self.dbc.get(key, flags)
- return self._extract(rec)
-
-
- def current(self, flags=0): return self.get_1(flags|db.DB_CURRENT)
- def first(self, flags=0): return self.get_1(flags|db.DB_FIRST)
- def last(self, flags=0): return self.get_1(flags|db.DB_LAST)
- def next(self, flags=0): return self.get_1(flags|db.DB_NEXT)
- def prev(self, flags=0): return self.get_1(flags|db.DB_PREV)
- def consume(self, flags=0): return self.get_1(flags|db.DB_CONSUME)
- def next_dup(self, flags=0): return self.get_1(flags|db.DB_NEXT_DUP)
- def next_nodup(self, flags=0): return self.get_1(flags|db.DB_NEXT_NODUP)
- def prev_nodup(self, flags=0): return self.get_1(flags|db.DB_PREV_NODUP)
-
-
- def get_both(self, key, value, flags=0):
- data = cPickle.dumps(value, self.binary)
- rec = self.dbc.get_both(key, flags)
- return self._extract(rec)
-
-
- def set(self, key, flags=0):
- rec = self.dbc.set(key, flags)
- return self._extract(rec)
-
- def set_range(self, key, flags=0):
- rec = self.dbc.set_range(key, flags)
- return self._extract(rec)
-
- def set_recno(self, recno, flags=0):
- rec = self.dbc.set_recno(recno, flags)
- return self._extract(rec)
-
- set_both = get_both
-
- def _extract(self, rec):
- if rec is None:
- return None
- else:
- key, data = rec
- return key, cPickle.loads(data)
-
- #----------------------------------------------
- # Methods allowed to pass-through to self.dbc
- #
- # close, count, delete, get_recno, join_item
-
-
-#---------------------------------------------------------------------------
diff --git a/sys/lib/python/bsddb/dbtables.py b/sys/lib/python/bsddb/dbtables.py
deleted file mode 100644
index 253331169..000000000
--- a/sys/lib/python/bsddb/dbtables.py
+++ /dev/null
@@ -1,706 +0,0 @@
-#-----------------------------------------------------------------------
-#
-# Copyright (C) 2000, 2001 by Autonomous Zone Industries
-# Copyright (C) 2002 Gregory P. Smith
-#
-# License: This is free software. You may use this software for any
-# purpose including modification/redistribution, so long as
-# this header remains intact and that you do not claim any
-# rights of ownership or authorship of this software. This
-# software has been tested, but no warranty is expressed or
-# implied.
-#
-# -- Gregory P. Smith <greg@electricrain.com>
-
-# This provides a simple database table interface built on top of
-# the Python BerkeleyDB 3 interface.
-#
-_cvsid = '$Id: dbtables.py 46858 2006-06-11 08:35:14Z neal.norwitz $'
-
-import re
-import sys
-import copy
-import xdrlib
-import random
-from types import ListType, StringType
-import cPickle as pickle
-
-try:
- # For Pythons w/distutils pybsddb
- from bsddb3.db import *
-except ImportError:
- # For Python 2.3
- from bsddb.db import *
-
-# XXX(nnorwitz): is this correct? DBIncompleteError is conditional in _bsddb.c
-try:
- DBIncompleteError
-except NameError:
- class DBIncompleteError(Exception):
- pass
-
-class TableDBError(StandardError):
- pass
-class TableAlreadyExists(TableDBError):
- pass
-
-
-class Cond:
- """This condition matches everything"""
- def __call__(self, s):
- return 1
-
-class ExactCond(Cond):
- """Acts as an exact match condition function"""
- def __init__(self, strtomatch):
- self.strtomatch = strtomatch
- def __call__(self, s):
- return s == self.strtomatch
-
-class PrefixCond(Cond):
- """Acts as a condition function for matching a string prefix"""
- def __init__(self, prefix):
- self.prefix = prefix
- def __call__(self, s):
- return s[:len(self.prefix)] == self.prefix
-
-class PostfixCond(Cond):
- """Acts as a condition function for matching a string postfix"""
- def __init__(self, postfix):
- self.postfix = postfix
- def __call__(self, s):
- return s[-len(self.postfix):] == self.postfix
-
-class LikeCond(Cond):
- """
- Acts as a function that will match using an SQL 'LIKE' style
- string. Case insensitive and % signs are wild cards.
- This isn't perfect but it should work for the simple common cases.
- """
- def __init__(self, likestr, re_flags=re.IGNORECASE):
- # escape python re characters
- chars_to_escape = '.*+()[]?'
- for char in chars_to_escape :
- likestr = likestr.replace(char, '\\'+char)
- # convert %s to wildcards
- self.likestr = likestr.replace('%', '.*')
- self.re = re.compile('^'+self.likestr+'$', re_flags)
- def __call__(self, s):
- return self.re.match(s)
-
-#
-# keys used to store database metadata
-#
-_table_names_key = '__TABLE_NAMES__' # list of the tables in this db
-_columns = '._COLUMNS__' # table_name+this key contains a list of columns
-
-def _columns_key(table):
- return table + _columns
-
-#
-# these keys are found within table sub databases
-#
-_data = '._DATA_.' # this+column+this+rowid key contains table data
-_rowid = '._ROWID_.' # this+rowid+this key contains a unique entry for each
- # row in the table. (no data is stored)
-_rowid_str_len = 8 # length in bytes of the unique rowid strings
-
-def _data_key(table, col, rowid):
- return table + _data + col + _data + rowid
-
-def _search_col_data_key(table, col):
- return table + _data + col + _data
-
-def _search_all_data_key(table):
- return table + _data
-
-def _rowid_key(table, rowid):
- return table + _rowid + rowid + _rowid
-
-def _search_rowid_key(table):
- return table + _rowid
-
-def contains_metastrings(s) :
- """Verify that the given string does not contain any
- metadata strings that might interfere with dbtables database operation.
- """
- if (s.find(_table_names_key) >= 0 or
- s.find(_columns) >= 0 or
- s.find(_data) >= 0 or
- s.find(_rowid) >= 0):
- # Then
- return 1
- else:
- return 0
-
-
-class bsdTableDB :
- def __init__(self, filename, dbhome, create=0, truncate=0, mode=0600,
- recover=0, dbflags=0):
- """bsdTableDB(filename, dbhome, create=0, truncate=0, mode=0600)
-
- Open database name in the dbhome BerkeleyDB directory.
- Use keyword arguments when calling this constructor.
- """
- self.db = None
- myflags = DB_THREAD
- if create:
- myflags |= DB_CREATE
- flagsforenv = (DB_INIT_MPOOL | DB_INIT_LOCK | DB_INIT_LOG |
- DB_INIT_TXN | dbflags)
- # DB_AUTO_COMMIT isn't a valid flag for env.open()
- try:
- dbflags |= DB_AUTO_COMMIT
- except AttributeError:
- pass
- if recover:
- flagsforenv = flagsforenv | DB_RECOVER
- self.env = DBEnv()
- # enable auto deadlock avoidance
- self.env.set_lk_detect(DB_LOCK_DEFAULT)
- self.env.open(dbhome, myflags | flagsforenv)
- if truncate:
- myflags |= DB_TRUNCATE
- self.db = DB(self.env)
- # this code relies on DBCursor.set* methods to raise exceptions
- # rather than returning None
- self.db.set_get_returns_none(1)
- # allow duplicate entries [warning: be careful w/ metadata]
- self.db.set_flags(DB_DUP)
- self.db.open(filename, DB_BTREE, dbflags | myflags, mode)
- self.dbfilename = filename
- # Initialize the table names list if this is a new database
- txn = self.env.txn_begin()
- try:
- if not self.db.has_key(_table_names_key, txn):
- self.db.put(_table_names_key, pickle.dumps([], 1), txn=txn)
- # Yes, bare except
- except:
- txn.abort()
- raise
- else:
- txn.commit()
- # TODO verify more of the database's metadata?
- self.__tablecolumns = {}
-
- def __del__(self):
- self.close()
-
- def close(self):
- if self.db is not None:
- self.db.close()
- self.db = None
- if self.env is not None:
- self.env.close()
- self.env = None
-
- def checkpoint(self, mins=0):
- try:
- self.env.txn_checkpoint(mins)
- except DBIncompleteError:
- pass
-
- def sync(self):
- try:
- self.db.sync()
- except DBIncompleteError:
- pass
-
- def _db_print(self) :
- """Print the database to stdout for debugging"""
- print "******** Printing raw database for debugging ********"
- cur = self.db.cursor()
- try:
- key, data = cur.first()
- while 1:
- print repr({key: data})
- next = cur.next()
- if next:
- key, data = next
- else:
- cur.close()
- return
- except DBNotFoundError:
- cur.close()
-
-
- def CreateTable(self, table, columns):
- """CreateTable(table, columns) - Create a new table in the database.
-
- raises TableDBError if it already exists or for other DB errors.
- """
- assert isinstance(columns, ListType)
- txn = None
- try:
- # checking sanity of the table and column names here on
- # table creation will prevent problems elsewhere.
- if contains_metastrings(table):
- raise ValueError(
- "bad table name: contains reserved metastrings")
- for column in columns :
- if contains_metastrings(column):
- raise ValueError(
- "bad column name: contains reserved metastrings")
-
- columnlist_key = _columns_key(table)
- if self.db.has_key(columnlist_key):
- raise TableAlreadyExists, "table already exists"
-
- txn = self.env.txn_begin()
- # store the table's column info
- self.db.put(columnlist_key, pickle.dumps(columns, 1), txn=txn)
-
- # add the table name to the tablelist
- tablelist = pickle.loads(self.db.get(_table_names_key, txn=txn,
- flags=DB_RMW))
- tablelist.append(table)
- # delete 1st, in case we opened with DB_DUP
- self.db.delete(_table_names_key, txn)
- self.db.put(_table_names_key, pickle.dumps(tablelist, 1), txn=txn)
-
- txn.commit()
- txn = None
- except DBError, dberror:
- if txn:
- txn.abort()
- raise TableDBError, dberror[1]
-
-
- def ListTableColumns(self, table):
- """Return a list of columns in the given table.
- [] if the table doesn't exist.
- """
- assert isinstance(table, StringType)
- if contains_metastrings(table):
- raise ValueError, "bad table name: contains reserved metastrings"
-
- columnlist_key = _columns_key(table)
- if not self.db.has_key(columnlist_key):
- return []
- pickledcolumnlist = self.db.get(columnlist_key)
- if pickledcolumnlist:
- return pickle.loads(pickledcolumnlist)
- else:
- return []
-
- def ListTables(self):
- """Return a list of tables in this database."""
- pickledtablelist = self.db.get(_table_names_key)
- if pickledtablelist:
- return pickle.loads(pickledtablelist)
- else:
- return []
-
- def CreateOrExtendTable(self, table, columns):
- """CreateOrExtendTable(table, columns)
-
- Create a new table in the database.
-
- If a table of this name already exists, extend it to have any
- additional columns present in the given list as well as
- all of its current columns.
- """
- assert isinstance(columns, ListType)
- try:
- self.CreateTable(table, columns)
- except TableAlreadyExists:
- # the table already existed, add any new columns
- txn = None
- try:
- columnlist_key = _columns_key(table)
- txn = self.env.txn_begin()
-
- # load the current column list
- oldcolumnlist = pickle.loads(
- self.db.get(columnlist_key, txn=txn, flags=DB_RMW))
- # create a hash table for fast lookups of column names in the
- # loop below
- oldcolumnhash = {}
- for c in oldcolumnlist:
- oldcolumnhash[c] = c
-
- # create a new column list containing both the old and new
- # column names
- newcolumnlist = copy.copy(oldcolumnlist)
- for c in columns:
- if not oldcolumnhash.has_key(c):
- newcolumnlist.append(c)
-
- # store the table's new extended column list
- if newcolumnlist != oldcolumnlist :
- # delete the old one first since we opened with DB_DUP
- self.db.delete(columnlist_key, txn)
- self.db.put(columnlist_key,
- pickle.dumps(newcolumnlist, 1),
- txn=txn)
-
- txn.commit()
- txn = None
-
- self.__load_column_info(table)
- except DBError, dberror:
- if txn:
- txn.abort()
- raise TableDBError, dberror[1]
-
-
- def __load_column_info(self, table) :
- """initialize the self.__tablecolumns dict"""
- # check the column names
- try:
- tcolpickles = self.db.get(_columns_key(table))
- except DBNotFoundError:
- raise TableDBError, "unknown table: %r" % (table,)
- if not tcolpickles:
- raise TableDBError, "unknown table: %r" % (table,)
- self.__tablecolumns[table] = pickle.loads(tcolpickles)
-
- def __new_rowid(self, table, txn) :
- """Create a new unique row identifier"""
- unique = 0
- while not unique:
- # Generate a random 64-bit row ID string
- # (note: this code has <64 bits of randomness
- # but it's plenty for our database id needs!)
- p = xdrlib.Packer()
- p.pack_int(int(random.random()*2147483647))
- p.pack_int(int(random.random()*2147483647))
- newid = p.get_buffer()
-
- # Guarantee uniqueness by adding this key to the database
- try:
- self.db.put(_rowid_key(table, newid), None, txn=txn,
- flags=DB_NOOVERWRITE)
- except DBKeyExistError:
- pass
- else:
- unique = 1
-
- return newid
-
-
- def Insert(self, table, rowdict) :
- """Insert(table, datadict) - Insert a new row into the table
- using the keys+values from rowdict as the column values.
- """
- txn = None
- try:
- if not self.db.has_key(_columns_key(table)):
- raise TableDBError, "unknown table"
-
- # check the validity of each column name
- if not self.__tablecolumns.has_key(table):
- self.__load_column_info(table)
- for column in rowdict.keys() :
- if not self.__tablecolumns[table].count(column):
- raise TableDBError, "unknown column: %r" % (column,)
-
- # get a unique row identifier for this row
- txn = self.env.txn_begin()
- rowid = self.__new_rowid(table, txn=txn)
-
- # insert the row values into the table database
- for column, dataitem in rowdict.items():
- # store the value
- self.db.put(_data_key(table, column, rowid), dataitem, txn=txn)
-
- txn.commit()
- txn = None
-
- except DBError, dberror:
- # WIBNI we could just abort the txn and re-raise the exception?
- # But no, because TableDBError is not related to DBError via
- # inheritance, so it would be backwards incompatible. Do the next
- # best thing.
- info = sys.exc_info()
- if txn:
- txn.abort()
- self.db.delete(_rowid_key(table, rowid))
- raise TableDBError, dberror[1], info[2]
-
-
- def Modify(self, table, conditions={}, mappings={}):
- """Modify(table, conditions={}, mappings={}) - Modify items in rows matching 'conditions' using mapping functions in 'mappings'
-
- * table - the table name
- * conditions - a dictionary keyed on column names containing
- a condition callable expecting the data string as an
- argument and returning a boolean.
- * mappings - a dictionary keyed on column names containing a
- condition callable expecting the data string as an argument and
- returning the new string for that column.
- """
- try:
- matching_rowids = self.__Select(table, [], conditions)
-
- # modify only requested columns
- columns = mappings.keys()
- for rowid in matching_rowids.keys():
- txn = None
- try:
- for column in columns:
- txn = self.env.txn_begin()
- # modify the requested column
- try:
- dataitem = self.db.get(
- _data_key(table, column, rowid),
- txn)
- self.db.delete(
- _data_key(table, column, rowid),
- txn)
- except DBNotFoundError:
- # XXXXXXX row key somehow didn't exist, assume no
- # error
- dataitem = None
- dataitem = mappings[column](dataitem)
- if dataitem <> None:
- self.db.put(
- _data_key(table, column, rowid),
- dataitem, txn=txn)
- txn.commit()
- txn = None
-
- # catch all exceptions here since we call unknown callables
- except:
- if txn:
- txn.abort()
- raise
-
- except DBError, dberror:
- raise TableDBError, dberror[1]
-
- def Delete(self, table, conditions={}):
- """Delete(table, conditions) - Delete items matching the given
- conditions from the table.
-
- * conditions - a dictionary keyed on column names containing
- condition functions expecting the data string as an
- argument and returning a boolean.
- """
- try:
- matching_rowids = self.__Select(table, [], conditions)
-
- # delete row data from all columns
- columns = self.__tablecolumns[table]
- for rowid in matching_rowids.keys():
- txn = None
- try:
- txn = self.env.txn_begin()
- for column in columns:
- # delete the data key
- try:
- self.db.delete(_data_key(table, column, rowid),
- txn)
- except DBNotFoundError:
- # XXXXXXX column may not exist, assume no error
- pass
-
- try:
- self.db.delete(_rowid_key(table, rowid), txn)
- except DBNotFoundError:
- # XXXXXXX row key somehow didn't exist, assume no error
- pass
- txn.commit()
- txn = None
- except DBError, dberror:
- if txn:
- txn.abort()
- raise
- except DBError, dberror:
- raise TableDBError, dberror[1]
-
-
- def Select(self, table, columns, conditions={}):
- """Select(table, columns, conditions) - retrieve specific row data
- Returns a list of row column->value mapping dictionaries.
-
- * columns - a list of which column data to return. If
- columns is None, all columns will be returned.
- * conditions - a dictionary keyed on column names
- containing callable conditions expecting the data string as an
- argument and returning a boolean.
- """
- try:
- if not self.__tablecolumns.has_key(table):
- self.__load_column_info(table)
- if columns is None:
- columns = self.__tablecolumns[table]
- matching_rowids = self.__Select(table, columns, conditions)
- except DBError, dberror:
- raise TableDBError, dberror[1]
- # return the matches as a list of dictionaries
- return matching_rowids.values()
-
-
- def __Select(self, table, columns, conditions):
- """__Select() - Used to implement Select and Delete (above)
- Returns a dictionary keyed on rowids containing dicts
- holding the row data for columns listed in the columns param
- that match the given conditions.
- * conditions is a dictionary keyed on column names
- containing callable conditions expecting the data string as an
- argument and returning a boolean.
- """
- # check the validity of each column name
- if not self.__tablecolumns.has_key(table):
- self.__load_column_info(table)
- if columns is None:
- columns = self.tablecolumns[table]
- for column in (columns + conditions.keys()):
- if not self.__tablecolumns[table].count(column):
- raise TableDBError, "unknown column: %r" % (column,)
-
- # keyed on rows that match so far, containings dicts keyed on
- # column names containing the data for that row and column.
- matching_rowids = {}
- # keys are rowids that do not match
- rejected_rowids = {}
-
- # attempt to sort the conditions in such a way as to minimize full
- # column lookups
- def cmp_conditions(atuple, btuple):
- a = atuple[1]
- b = btuple[1]
- if type(a) is type(b):
- if isinstance(a, PrefixCond) and isinstance(b, PrefixCond):
- # longest prefix first
- return cmp(len(b.prefix), len(a.prefix))
- if isinstance(a, LikeCond) and isinstance(b, LikeCond):
- # longest likestr first
- return cmp(len(b.likestr), len(a.likestr))
- return 0
- if isinstance(a, ExactCond):
- return -1
- if isinstance(b, ExactCond):
- return 1
- if isinstance(a, PrefixCond):
- return -1
- if isinstance(b, PrefixCond):
- return 1
- # leave all unknown condition callables alone as equals
- return 0
-
- conditionlist = conditions.items()
- conditionlist.sort(cmp_conditions)
-
- # Apply conditions to column data to find what we want
- cur = self.db.cursor()
- column_num = -1
- for column, condition in conditionlist:
- column_num = column_num + 1
- searchkey = _search_col_data_key(table, column)
- # speedup: don't linear search columns within loop
- if column in columns:
- savethiscolumndata = 1 # save the data for return
- else:
- savethiscolumndata = 0 # data only used for selection
-
- try:
- key, data = cur.set_range(searchkey)
- while key[:len(searchkey)] == searchkey:
- # extract the rowid from the key
- rowid = key[-_rowid_str_len:]
-
- if not rejected_rowids.has_key(rowid):
- # if no condition was specified or the condition
- # succeeds, add row to our match list.
- if not condition or condition(data):
- if not matching_rowids.has_key(rowid):
- matching_rowids[rowid] = {}
- if savethiscolumndata:
- matching_rowids[rowid][column] = data
- else:
- if matching_rowids.has_key(rowid):
- del matching_rowids[rowid]
- rejected_rowids[rowid] = rowid
-
- key, data = cur.next()
-
- except DBError, dberror:
- if dberror[0] != DB_NOTFOUND:
- raise
- continue
-
- cur.close()
-
- # we're done selecting rows, garbage collect the reject list
- del rejected_rowids
-
- # extract any remaining desired column data from the
- # database for the matching rows.
- if len(columns) > 0:
- for rowid, rowdata in matching_rowids.items():
- for column in columns:
- if rowdata.has_key(column):
- continue
- try:
- rowdata[column] = self.db.get(
- _data_key(table, column, rowid))
- except DBError, dberror:
- if dberror[0] != DB_NOTFOUND:
- raise
- rowdata[column] = None
-
- # return the matches
- return matching_rowids
-
-
- def Drop(self, table):
- """Remove an entire table from the database"""
- txn = None
- try:
- txn = self.env.txn_begin()
-
- # delete the column list
- self.db.delete(_columns_key(table), txn)
-
- cur = self.db.cursor(txn)
-
- # delete all keys containing this tables column and row info
- table_key = _search_all_data_key(table)
- while 1:
- try:
- key, data = cur.set_range(table_key)
- except DBNotFoundError:
- break
- # only delete items in this table
- if key[:len(table_key)] != table_key:
- break
- cur.delete()
-
- # delete all rowids used by this table
- table_key = _search_rowid_key(table)
- while 1:
- try:
- key, data = cur.set_range(table_key)
- except DBNotFoundError:
- break
- # only delete items in this table
- if key[:len(table_key)] != table_key:
- break
- cur.delete()
-
- cur.close()
-
- # delete the tablename from the table name list
- tablelist = pickle.loads(
- self.db.get(_table_names_key, txn=txn, flags=DB_RMW))
- try:
- tablelist.remove(table)
- except ValueError:
- # hmm, it wasn't there, oh well, that's what we want.
- pass
- # delete 1st, incase we opened with DB_DUP
- self.db.delete(_table_names_key, txn)
- self.db.put(_table_names_key, pickle.dumps(tablelist, 1), txn=txn)
-
- txn.commit()
- txn = None
-
- if self.__tablecolumns.has_key(table):
- del self.__tablecolumns[table]
-
- except DBError, dberror:
- if txn:
- txn.abort()
- raise TableDBError, dberror[1]
diff --git a/sys/lib/python/bsddb/dbutils.py b/sys/lib/python/bsddb/dbutils.py
deleted file mode 100644
index 6dcfdd5b5..000000000
--- a/sys/lib/python/bsddb/dbutils.py
+++ /dev/null
@@ -1,77 +0,0 @@
-#------------------------------------------------------------------------
-#
-# Copyright (C) 2000 Autonomous Zone Industries
-#
-# License: This is free software. You may use this software for any
-# purpose including modification/redistribution, so long as
-# this header remains intact and that you do not claim any
-# rights of ownership or authorship of this software. This
-# software has been tested, but no warranty is expressed or
-# implied.
-#
-# Author: Gregory P. Smith <greg@electricrain.com>
-#
-# Note: I don't know how useful this is in reality since when a
-# DBLockDeadlockError happens the current transaction is supposed to be
-# aborted. If it doesn't then when the operation is attempted again
-# the deadlock is still happening...
-# --Robin
-#
-#------------------------------------------------------------------------
-
-
-#
-# import the time.sleep function in a namespace safe way to allow
-# "from bsddb.dbutils import *"
-#
-from time import sleep as _sleep
-
-import db
-
-# always sleep at least N seconds between retrys
-_deadlock_MinSleepTime = 1.0/128
-# never sleep more than N seconds between retrys
-_deadlock_MaxSleepTime = 3.14159
-
-# Assign a file object to this for a "sleeping" message to be written to it
-# each retry
-_deadlock_VerboseFile = None
-
-
-def DeadlockWrap(function, *_args, **_kwargs):
- """DeadlockWrap(function, *_args, **_kwargs) - automatically retries
- function in case of a database deadlock.
-
- This is a function intended to be used to wrap database calls such
- that they perform retrys with exponentially backing off sleeps in
- between when a DBLockDeadlockError exception is raised.
-
- A 'max_retries' parameter may optionally be passed to prevent it
- from retrying forever (in which case the exception will be reraised).
-
- d = DB(...)
- d.open(...)
- DeadlockWrap(d.put, "foo", data="bar") # set key "foo" to "bar"
- """
- sleeptime = _deadlock_MinSleepTime
- max_retries = _kwargs.get('max_retries', -1)
- if _kwargs.has_key('max_retries'):
- del _kwargs['max_retries']
- while True:
- try:
- return function(*_args, **_kwargs)
- except db.DBLockDeadlockError:
- if _deadlock_VerboseFile:
- _deadlock_VerboseFile.write(
- 'dbutils.DeadlockWrap: sleeping %1.3f\n' % sleeptime)
- _sleep(sleeptime)
- # exponential backoff in the sleep time
- sleeptime *= 2
- if sleeptime > _deadlock_MaxSleepTime:
- sleeptime = _deadlock_MaxSleepTime
- max_retries -= 1
- if max_retries == -1:
- raise
-
-
-#------------------------------------------------------------------------
diff --git a/sys/lib/python/cProfile.py b/sys/lib/python/cProfile.py
deleted file mode 100755
index 19d58048a..000000000
--- a/sys/lib/python/cProfile.py
+++ /dev/null
@@ -1,190 +0,0 @@
-#! /usr/bin/env python
-
-"""Python interface for the 'lsprof' profiler.
- Compatible with the 'profile' module.
-"""
-
-__all__ = ["run", "runctx", "help", "Profile"]
-
-import _lsprof
-
-# ____________________________________________________________
-# Simple interface
-
-def run(statement, filename=None, sort=-1):
- """Run statement under profiler optionally saving results in filename
-
- This function takes a single argument that can be passed to the
- "exec" statement, and an optional file name. In all cases this
- routine attempts to "exec" its first argument and gather profiling
- statistics from the execution. If no file name is present, then this
- function automatically prints a simple profiling report, sorted by the
- standard name string (file/line/function-name) that is presented in
- each line.
- """
- prof = Profile()
- result = None
- try:
- try:
- prof = prof.run(statement)
- except SystemExit:
- pass
- finally:
- if filename is not None:
- prof.dump_stats(filename)
- else:
- result = prof.print_stats(sort)
- return result
-
-def runctx(statement, globals, locals, filename=None):
- """Run statement under profiler, supplying your own globals and locals,
- optionally saving results in filename.
-
- statement and filename have the same semantics as profile.run
- """
- prof = Profile()
- result = None
- try:
- try:
- prof = prof.runctx(statement, globals, locals)
- except SystemExit:
- pass
- finally:
- if filename is not None:
- prof.dump_stats(filename)
- else:
- result = prof.print_stats()
- return result
-
-# Backwards compatibility.
-def help():
- print "Documentation for the profile/cProfile modules can be found "
- print "in the Python Library Reference, section 'The Python Profiler'."
-
-# ____________________________________________________________
-
-class Profile(_lsprof.Profiler):
- """Profile(custom_timer=None, time_unit=None, subcalls=True, builtins=True)
-
- Builds a profiler object using the specified timer function.
- The default timer is a fast built-in one based on real time.
- For custom timer functions returning integers, time_unit can
- be a float specifying a scale (i.e. how long each integer unit
- is, in seconds).
- """
-
- # Most of the functionality is in the base class.
- # This subclass only adds convenient and backward-compatible methods.
-
- def print_stats(self, sort=-1):
- import pstats
- pstats.Stats(self).strip_dirs().sort_stats(sort).print_stats()
-
- def dump_stats(self, file):
- import marshal
- f = open(file, 'wb')
- self.create_stats()
- marshal.dump(self.stats, f)
- f.close()
-
- def create_stats(self):
- self.disable()
- self.snapshot_stats()
-
- def snapshot_stats(self):
- entries = self.getstats()
- self.stats = {}
- callersdicts = {}
- # call information
- for entry in entries:
- func = label(entry.code)
- nc = entry.callcount # ncalls column of pstats (before '/')
- cc = nc - entry.reccallcount # ncalls column of pstats (after '/')
- tt = entry.inlinetime # tottime column of pstats
- ct = entry.totaltime # cumtime column of pstats
- callers = {}
- callersdicts[id(entry.code)] = callers
- self.stats[func] = cc, nc, tt, ct, callers
- # subcall information
- for entry in entries:
- if entry.calls:
- func = label(entry.code)
- for subentry in entry.calls:
- try:
- callers = callersdicts[id(subentry.code)]
- except KeyError:
- continue
- nc = subentry.callcount
- cc = nc - subentry.reccallcount
- tt = subentry.inlinetime
- ct = subentry.totaltime
- if func in callers:
- prev = callers[func]
- nc += prev[0]
- cc += prev[1]
- tt += prev[2]
- ct += prev[3]
- callers[func] = nc, cc, tt, ct
-
- # The following two methods can be called by clients to use
- # a profiler to profile a statement, given as a string.
-
- def run(self, cmd):
- import __main__
- dict = __main__.__dict__
- return self.runctx(cmd, dict, dict)
-
- def runctx(self, cmd, globals, locals):
- self.enable()
- try:
- exec cmd in globals, locals
- finally:
- self.disable()
- return self
-
- # This method is more useful to profile a single function call.
- def runcall(self, func, *args, **kw):
- self.enable()
- try:
- return func(*args, **kw)
- finally:
- self.disable()
-
-# ____________________________________________________________
-
-def label(code):
- if isinstance(code, str):
- return ('~', 0, code) # built-in functions ('~' sorts at the end)
- else:
- return (code.co_filename, code.co_firstlineno, code.co_name)
-
-# ____________________________________________________________
-
-def main():
- import os, sys
- from optparse import OptionParser
- usage = "cProfile.py [-o output_file_path] [-s sort] scriptfile [arg] ..."
- parser = OptionParser(usage=usage)
- parser.allow_interspersed_args = False
- parser.add_option('-o', '--outfile', dest="outfile",
- help="Save stats to <outfile>", default=None)
- parser.add_option('-s', '--sort', dest="sort",
- help="Sort order when printing to stdout, based on pstats.Stats class", default=-1)
-
- if not sys.argv[1:]:
- parser.print_usage()
- sys.exit(2)
-
- (options, args) = parser.parse_args()
- sys.argv[:] = args
-
- if (len(sys.argv) > 0):
- sys.path.insert(0, os.path.dirname(sys.argv[0]))
- run('execfile(%r)' % (sys.argv[0],), options.outfile, options.sort)
- else:
- parser.print_usage()
- return parser
-
-# When invoked as main program, invoke the profiler on a script
-if __name__ == '__main__':
- main()
diff --git a/sys/lib/python/calendar.py b/sys/lib/python/calendar.py
deleted file mode 100644
index 00948efe5..000000000
--- a/sys/lib/python/calendar.py
+++ /dev/null
@@ -1,701 +0,0 @@
-"""Calendar printing functions
-
-Note when comparing these calendars to the ones printed by cal(1): By
-default, these calendars have Monday as the first day of the week, and
-Sunday as the last (the European convention). Use setfirstweekday() to
-set the first day of the week (0=Monday, 6=Sunday)."""
-
-from __future__ import with_statement
-import sys, datetime, locale
-
-__all__ = ["IllegalMonthError", "IllegalWeekdayError", "setfirstweekday",
- "firstweekday", "isleap", "leapdays", "weekday", "monthrange",
- "monthcalendar", "prmonth", "month", "prcal", "calendar",
- "timegm", "month_name", "month_abbr", "day_name", "day_abbr"]
-
-# Exception raised for bad input (with string parameter for details)
-error = ValueError
-
-# Exceptions raised for bad input
-class IllegalMonthError(ValueError):
- def __init__(self, month):
- self.month = month
- def __str__(self):
- return "bad month number %r; must be 1-12" % self.month
-
-
-class IllegalWeekdayError(ValueError):
- def __init__(self, weekday):
- self.weekday = weekday
- def __str__(self):
- return "bad weekday number %r; must be 0 (Monday) to 6 (Sunday)" % self.weekday
-
-
-# Constants for months referenced later
-January = 1
-February = 2
-
-# Number of days per month (except for February in leap years)
-mdays = [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
-
-# This module used to have hard-coded lists of day and month names, as
-# English strings. The classes following emulate a read-only version of
-# that, but supply localized names. Note that the values are computed
-# fresh on each call, in case the user changes locale between calls.
-
-class _localized_month:
-
- _months = [datetime.date(2001, i+1, 1).strftime for i in xrange(12)]
- _months.insert(0, lambda x: "")
-
- def __init__(self, format):
- self.format = format
-
- def __getitem__(self, i):
- funcs = self._months[i]
- if isinstance(i, slice):
- return [f(self.format) for f in funcs]
- else:
- return funcs(self.format)
-
- def __len__(self):
- return 13
-
-
-class _localized_day:
-
- # January 1, 2001, was a Monday.
- _days = [datetime.date(2001, 1, i+1).strftime for i in xrange(7)]
-
- def __init__(self, format):
- self.format = format
-
- def __getitem__(self, i):
- funcs = self._days[i]
- if isinstance(i, slice):
- return [f(self.format) for f in funcs]
- else:
- return funcs(self.format)
-
- def __len__(self):
- return 7
-
-
-# Full and abbreviated names of weekdays
-day_name = _localized_day('%A')
-day_abbr = _localized_day('%a')
-
-# Full and abbreviated names of months (1-based arrays!!!)
-month_name = _localized_month('%B')
-month_abbr = _localized_month('%b')
-
-# Constants for weekdays
-(MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY) = range(7)
-
-
-def isleap(year):
- """Return 1 for leap years, 0 for non-leap years."""
- return year % 4 == 0 and (year % 100 != 0 or year % 400 == 0)
-
-
-def leapdays(y1, y2):
- """Return number of leap years in range [y1, y2).
- Assume y1 <= y2."""
- y1 -= 1
- y2 -= 1
- return (y2//4 - y1//4) - (y2//100 - y1//100) + (y2//400 - y1//400)
-
-
-def weekday(year, month, day):
- """Return weekday (0-6 ~ Mon-Sun) for year (1970-...), month (1-12),
- day (1-31)."""
- return datetime.date(year, month, day).weekday()
-
-
-def monthrange(year, month):
- """Return weekday (0-6 ~ Mon-Sun) and number of days (28-31) for
- year, month."""
- if not 1 <= month <= 12:
- raise IllegalMonthError(month)
- day1 = weekday(year, month, 1)
- ndays = mdays[month] + (month == February and isleap(year))
- return day1, ndays
-
-
-class Calendar(object):
- """
- Base calendar class. This class doesn't do any formatting. It simply
- provides data to subclasses.
- """
-
- def __init__(self, firstweekday=0):
- self.firstweekday = firstweekday # 0 = Monday, 6 = Sunday
-
- def getfirstweekday(self):
- return self._firstweekday % 7
-
- def setfirstweekday(self, firstweekday):
- self._firstweekday = firstweekday
-
- firstweekday = property(getfirstweekday, setfirstweekday)
-
- def iterweekdays(self):
- """
- Return a iterator for one week of weekday numbers starting with the
- configured first one.
- """
- for i in xrange(self.firstweekday, self.firstweekday + 7):
- yield i%7
-
- def itermonthdates(self, year, month):
- """
- Return an iterator for one month. The iterator will yield datetime.date
- values and will always iterate through complete weeks, so it will yield
- dates outside the specified month.
- """
- date = datetime.date(year, month, 1)
- # Go back to the beginning of the week
- days = (date.weekday() - self.firstweekday) % 7
- date -= datetime.timedelta(days=days)
- oneday = datetime.timedelta(days=1)
- while True:
- yield date
- date += oneday
- if date.month != month and date.weekday() == self.firstweekday:
- break
-
- def itermonthdays2(self, year, month):
- """
- Like itermonthdates(), but will yield (day number, weekday number)
- tuples. For days outside the specified month the day number is 0.
- """
- for date in self.itermonthdates(year, month):
- if date.month != month:
- yield (0, date.weekday())
- else:
- yield (date.day, date.weekday())
-
- def itermonthdays(self, year, month):
- """
- Like itermonthdates(), but will yield day numbers tuples. For days
- outside the specified month the day number is 0.
- """
- for date in self.itermonthdates(year, month):
- if date.month != month:
- yield 0
- else:
- yield date.day
-
- def monthdatescalendar(self, year, month):
- """
- Return a matrix (list of lists) representing a month's calendar.
- Each row represents a week; week entries are datetime.date values.
- """
- dates = list(self.itermonthdates(year, month))
- return [ dates[i:i+7] for i in xrange(0, len(dates), 7) ]
-
- def monthdays2calendar(self, year, month):
- """
- Return a matrix representing a month's calendar.
- Each row represents a week; week entries are
- (day number, weekday number) tuples. Day numbers outside this month
- are zero.
- """
- days = list(self.itermonthdays2(year, month))
- return [ days[i:i+7] for i in xrange(0, len(days), 7) ]
-
- def monthdayscalendar(self, year, month):
- """
- Return a matrix representing a month's calendar.
- Each row represents a week; days outside this month are zero.
- """
- days = list(self.itermonthdays(year, month))
- return [ days[i:i+7] for i in xrange(0, len(days), 7) ]
-
- def yeardatescalendar(self, year, width=3):
- """
- Return the data for the specified year ready for formatting. The return
- value is a list of month rows. Each month row contains upto width months.
- Each month contains between 4 and 6 weeks and each week contains 1-7
- days. Days are datetime.date objects.
- """
- months = [
- self.monthdatescalendar(year, i)
- for i in xrange(January, January+12)
- ]
- return [months[i:i+width] for i in xrange(0, len(months), width) ]
-
- def yeardays2calendar(self, year, width=3):
- """
- Return the data for the specified year ready for formatting (similar to
- yeardatescalendar()). Entries in the week lists are
- (day number, weekday number) tuples. Day numbers outside this month are
- zero.
- """
- months = [
- self.monthdays2calendar(year, i)
- for i in xrange(January, January+12)
- ]
- return [months[i:i+width] for i in xrange(0, len(months), width) ]
-
- def yeardayscalendar(self, year, width=3):
- """
- Return the data for the specified year ready for formatting (similar to
- yeardatescalendar()). Entries in the week lists are day numbers.
- Day numbers outside this month are zero.
- """
- months = [
- self.monthdayscalendar(year, i)
- for i in xrange(January, January+12)
- ]
- return [months[i:i+width] for i in xrange(0, len(months), width) ]
-
-
-class TextCalendar(Calendar):
- """
- Subclass of Calendar that outputs a calendar as a simple plain text
- similar to the UNIX program cal.
- """
-
- def prweek(self, theweek, width):
- """
- Print a single week (no newline).
- """
- print self.week(theweek, width),
-
- def formatday(self, day, weekday, width):
- """
- Returns a formatted day.
- """
- if day == 0:
- s = ''
- else:
- s = '%2i' % day # right-align single-digit days
- return s.center(width)
-
- def formatweek(self, theweek, width):
- """
- Returns a single week in a string (no newline).
- """
- return ' '.join(self.formatday(d, wd, width) for (d, wd) in theweek)
-
- def formatweekday(self, day, width):
- """
- Returns a formatted week day name.
- """
- if width >= 9:
- names = day_name
- else:
- names = day_abbr
- return names[day][:width].center(width)
-
- def formatweekheader(self, width):
- """
- Return a header for a week.
- """
- return ' '.join(self.formatweekday(i, width) for i in self.iterweekdays())
-
- def formatmonthname(self, theyear, themonth, width, withyear=True):
- """
- Return a formatted month name.
- """
- s = month_name[themonth]
- if withyear:
- s = "%s %r" % (s, theyear)
- return s.center(width)
-
- def prmonth(self, theyear, themonth, w=0, l=0):
- """
- Print a month's calendar.
- """
- print self.formatmonth(theyear, themonth, w, l),
-
- def formatmonth(self, theyear, themonth, w=0, l=0):
- """
- Return a month's calendar string (multi-line).
- """
- w = max(2, w)
- l = max(1, l)
- s = self.formatmonthname(theyear, themonth, 7 * (w + 1) - 1)
- s = s.rstrip()
- s += '\n' * l
- s += self.formatweekheader(w).rstrip()
- s += '\n' * l
- for week in self.monthdays2calendar(theyear, themonth):
- s += self.formatweek(week, w).rstrip()
- s += '\n' * l
- return s
-
- def formatyear(self, theyear, w=2, l=1, c=6, m=3):
- """
- Returns a year's calendar as a multi-line string.
- """
- w = max(2, w)
- l = max(1, l)
- c = max(2, c)
- colwidth = (w + 1) * 7 - 1
- v = []
- a = v.append
- a(repr(theyear).center(colwidth*m+c*(m-1)).rstrip())
- a('\n'*l)
- header = self.formatweekheader(w)
- for (i, row) in enumerate(self.yeardays2calendar(theyear, m)):
- # months in this row
- months = xrange(m*i+1, min(m*(i+1)+1, 13))
- a('\n'*l)
- names = (self.formatmonthname(theyear, k, colwidth, False)
- for k in months)
- a(formatstring(names, colwidth, c).rstrip())
- a('\n'*l)
- headers = (header for k in months)
- a(formatstring(headers, colwidth, c).rstrip())
- a('\n'*l)
- # max number of weeks for this row
- height = max(len(cal) for cal in row)
- for j in xrange(height):
- weeks = []
- for cal in row:
- if j >= len(cal):
- weeks.append('')
- else:
- weeks.append(self.formatweek(cal[j], w))
- a(formatstring(weeks, colwidth, c).rstrip())
- a('\n' * l)
- return ''.join(v)
-
- def pryear(self, theyear, w=0, l=0, c=6, m=3):
- """Print a year's calendar."""
- print self.formatyear(theyear, w, l, c, m)
-
-
-class HTMLCalendar(Calendar):
- """
- This calendar returns complete HTML pages.
- """
-
- # CSS classes for the day <td>s
- cssclasses = ["mon", "tue", "wed", "thu", "fri", "sat", "sun"]
-
- def formatday(self, day, weekday):
- """
- Return a day as a table cell.
- """
- if day == 0:
- return '<td class="noday">&nbsp;</td>' # day outside month
- else:
- return '<td class="%s">%d</td>' % (self.cssclasses[weekday], day)
-
- def formatweek(self, theweek):
- """
- Return a complete week as a table row.
- """
- s = ''.join(self.formatday(d, wd) for (d, wd) in theweek)
- return '<tr>%s</tr>' % s
-
- def formatweekday(self, day):
- """
- Return a weekday name as a table header.
- """
- return '<th class="%s">%s</th>' % (self.cssclasses[day], day_abbr[day])
-
- def formatweekheader(self):
- """
- Return a header for a week as a table row.
- """
- s = ''.join(self.formatweekday(i) for i in self.iterweekdays())
- return '<tr>%s</tr>' % s
-
- def formatmonthname(self, theyear, themonth, withyear=True):
- """
- Return a month name as a table row.
- """
- if withyear:
- s = '%s %s' % (month_name[themonth], theyear)
- else:
- s = '%s' % month_name[themonth]
- return '<tr><th colspan="7" class="month">%s</th></tr>' % s
-
- def formatmonth(self, theyear, themonth, withyear=True):
- """
- Return a formatted month as a table.
- """
- v = []
- a = v.append
- a('<table border="0" cellpadding="0" cellspacing="0" class="month">')
- a('\n')
- a(self.formatmonthname(theyear, themonth, withyear=withyear))
- a('\n')
- a(self.formatweekheader())
- a('\n')
- for week in self.monthdays2calendar(theyear, themonth):
- a(self.formatweek(week))
- a('\n')
- a('</table>')
- a('\n')
- return ''.join(v)
-
- def formatyear(self, theyear, width=3):
- """
- Return a formatted year as a table of tables.
- """
- v = []
- a = v.append
- width = max(width, 1)
- a('<table border="0" cellpadding="0" cellspacing="0" class="year">')
- a('\n')
- a('<tr><th colspan="%d" class="year">%s</th></tr>' % (width, theyear))
- for i in xrange(January, January+12, width):
- # months in this row
- months = xrange(i, min(i+width, 13))
- a('<tr>')
- for m in months:
- a('<td>')
- a(self.formatmonth(theyear, m, withyear=False))
- a('</td>')
- a('</tr>')
- a('</table>')
- return ''.join(v)
-
- def formatyearpage(self, theyear, width=3, css='calendar.css', encoding=None):
- """
- Return a formatted year as a complete HTML page.
- """
- if encoding is None:
- encoding = sys.getdefaultencoding()
- v = []
- a = v.append
- a('<?xml version="1.0" encoding="%s"?>\n' % encoding)
- a('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">\n')
- a('<html>\n')
- a('<head>\n')
- a('<meta http-equiv="Content-Type" content="text/html; charset=%s" />\n' % encoding)
- if css is not None:
- a('<link rel="stylesheet" type="text/css" href="%s" />\n' % css)
- a('<title>Calendar for %d</title\n' % theyear)
- a('</head>\n')
- a('<body>\n')
- a(self.formatyear(theyear, width))
- a('</body>\n')
- a('</html>\n')
- return ''.join(v).encode(encoding, "xmlcharrefreplace")
-
-
-class TimeEncoding:
- def __init__(self, locale):
- self.locale = locale
-
- def __enter__(self):
- self.oldlocale = locale.setlocale(locale.LC_TIME, self.locale)
- return locale.getlocale(locale.LC_TIME)[1]
-
- def __exit__(self, *args):
- locale.setlocale(locale.LC_TIME, self.oldlocale)
-
-
-class LocaleTextCalendar(TextCalendar):
- """
- This class can be passed a locale name in the constructor and will return
- month and weekday names in the specified locale. If this locale includes
- an encoding all strings containing month and weekday names will be returned
- as unicode.
- """
-
- def __init__(self, firstweekday=0, locale=None):
- TextCalendar.__init__(self, firstweekday)
- if locale is None:
- locale = locale.getdefaultlocale()
- self.locale = locale
-
- def formatweekday(self, day, width):
- with TimeEncoding(self.locale) as encoding:
- if width >= 9:
- names = day_name
- else:
- names = day_abbr
- name = names[day]
- if encoding is not None:
- name = name.decode(encoding)
- return name[:width].center(width)
-
- def formatmonthname(self, theyear, themonth, width, withyear=True):
- with TimeEncoding(self.locale) as encoding:
- s = month_name[themonth]
- if encoding is not None:
- s = s.decode(encoding)
- if withyear:
- s = "%s %r" % (s, theyear)
- return s.center(width)
-
-
-class LocaleHTMLCalendar(HTMLCalendar):
- """
- This class can be passed a locale name in the constructor and will return
- month and weekday names in the specified locale. If this locale includes
- an encoding all strings containing month and weekday names will be returned
- as unicode.
- """
- def __init__(self, firstweekday=0, locale=None):
- HTMLCalendar.__init__(self, firstweekday)
- if locale is None:
- locale = locale.getdefaultlocale()
- self.locale = locale
-
- def formatweekday(self, day):
- with TimeEncoding(self.locale) as encoding:
- s = day_abbr[day]
- if encoding is not None:
- s = s.decode(encoding)
- return '<th class="%s">%s</th>' % (self.cssclasses[day], s)
-
- def formatmonthname(self, theyear, themonth, withyear=True):
- with TimeEncoding(self.locale) as encoding:
- s = month_name[themonth]
- if encoding is not None:
- s = s.decode(encoding)
- if withyear:
- s = '%s %s' % (s, theyear)
- return '<tr><th colspan="7" class="month">%s</th></tr>' % s
-
-
-# Support for old module level interface
-c = TextCalendar()
-
-firstweekday = c.getfirstweekday
-
-def setfirstweekday(firstweekday):
- if not MONDAY <= firstweekday <= SUNDAY:
- raise IllegalWeekdayError(firstweekday)
- c.firstweekday = firstweekday
-
-monthcalendar = c.monthdayscalendar
-prweek = c.prweek
-week = c.formatweek
-weekheader = c.formatweekheader
-prmonth = c.prmonth
-month = c.formatmonth
-calendar = c.formatyear
-prcal = c.pryear
-
-
-# Spacing of month columns for multi-column year calendar
-_colwidth = 7*3 - 1 # Amount printed by prweek()
-_spacing = 6 # Number of spaces between columns
-
-
-def format(cols, colwidth=_colwidth, spacing=_spacing):
- """Prints multi-column formatting for year calendars"""
- print formatstring(cols, colwidth, spacing)
-
-
-def formatstring(cols, colwidth=_colwidth, spacing=_spacing):
- """Returns a string formatted from n strings, centered within n columns."""
- spacing *= ' '
- return spacing.join(c.center(colwidth) for c in cols)
-
-
-EPOCH = 1970
-_EPOCH_ORD = datetime.date(EPOCH, 1, 1).toordinal()
-
-
-def timegm(tuple):
- """Unrelated but handy function to calculate Unix timestamp from GMT."""
- year, month, day, hour, minute, second = tuple[:6]
- days = datetime.date(year, month, 1).toordinal() - _EPOCH_ORD + day - 1
- hours = days*24 + hour
- minutes = hours*60 + minute
- seconds = minutes*60 + second
- return seconds
-
-
-def main(args):
- import optparse
- parser = optparse.OptionParser(usage="usage: %prog [options] [year [month]]")
- parser.add_option(
- "-w", "--width",
- dest="width", type="int", default=2,
- help="width of date column (default 2, text only)"
- )
- parser.add_option(
- "-l", "--lines",
- dest="lines", type="int", default=1,
- help="number of lines for each week (default 1, text only)"
- )
- parser.add_option(
- "-s", "--spacing",
- dest="spacing", type="int", default=6,
- help="spacing between months (default 6, text only)"
- )
- parser.add_option(
- "-m", "--months",
- dest="months", type="int", default=3,
- help="months per row (default 3, text only)"
- )
- parser.add_option(
- "-c", "--css",
- dest="css", default="calendar.css",
- help="CSS to use for page (html only)"
- )
- parser.add_option(
- "-L", "--locale",
- dest="locale", default=None,
- help="locale to be used from month and weekday names"
- )
- parser.add_option(
- "-e", "--encoding",
- dest="encoding", default=None,
- help="Encoding to use for output"
- )
- parser.add_option(
- "-t", "--type",
- dest="type", default="text",
- choices=("text", "html"),
- help="output type (text or html)"
- )
-
- (options, args) = parser.parse_args(args)
-
- if options.locale and not options.encoding:
- parser.error("if --locale is specified --encoding is required")
- sys.exit(1)
-
- if options.type == "html":
- if options.locale:
- cal = LocaleHTMLCalendar(locale=options.locale)
- else:
- cal = HTMLCalendar()
- encoding = options.encoding
- if encoding is None:
- encoding = sys.getdefaultencoding()
- optdict = dict(encoding=encoding, css=options.css)
- if len(args) == 1:
- print cal.formatyearpage(datetime.date.today().year, **optdict)
- elif len(args) == 2:
- print cal.formatyearpage(int(args[1]), **optdict)
- else:
- parser.error("incorrect number of arguments")
- sys.exit(1)
- else:
- if options.locale:
- cal = LocaleTextCalendar(locale=options.locale)
- else:
- cal = TextCalendar()
- optdict = dict(w=options.width, l=options.lines)
- if len(args) != 3:
- optdict["c"] = options.spacing
- optdict["m"] = options.months
- if len(args) == 1:
- result = cal.formatyear(datetime.date.today().year, **optdict)
- elif len(args) == 2:
- result = cal.formatyear(int(args[1]), **optdict)
- elif len(args) == 3:
- result = cal.formatmonth(int(args[1]), int(args[2]), **optdict)
- else:
- parser.error("incorrect number of arguments")
- sys.exit(1)
- if options.encoding:
- result = result.encode(options.encoding)
- print result
-
-
-if __name__ == "__main__":
- main(sys.argv)
diff --git a/sys/lib/python/cgi.py b/sys/lib/python/cgi.py
deleted file mode 100755
index f31938b7c..000000000
--- a/sys/lib/python/cgi.py
+++ /dev/null
@@ -1,1071 +0,0 @@
-#! /usr/local/bin/python
-
-# NOTE: the above "/usr/local/bin/python" is NOT a mistake. It is
-# intentionally NOT "/usr/bin/env python". On many systems
-# (e.g. Solaris), /usr/local/bin is not in $PATH as passed to CGI
-# scripts, and /usr/local/bin is the default directory where Python is
-# installed, so /usr/bin/env would be unable to find python. Granted,
-# binary installations by Linux vendors often install Python in
-# /usr/bin. So let those vendors patch cgi.py to match their choice
-# of installation.
-
-"""Support module for CGI (Common Gateway Interface) scripts.
-
-This module defines a number of utilities for use by CGI scripts
-written in Python.
-"""
-
-# XXX Perhaps there should be a slimmed version that doesn't contain
-# all those backwards compatible and debugging classes and functions?
-
-# History
-# -------
-#
-# Michael McLay started this module. Steve Majewski changed the
-# interface to SvFormContentDict and FormContentDict. The multipart
-# parsing was inspired by code submitted by Andreas Paepcke. Guido van
-# Rossum rewrote, reformatted and documented the module and is currently
-# responsible for its maintenance.
-#
-
-__version__ = "2.6"
-
-
-# Imports
-# =======
-
-from operator import attrgetter
-import sys
-import os
-import urllib
-import mimetools
-import rfc822
-import UserDict
-try:
- from cStringIO import StringIO
-except ImportError:
- from StringIO import StringIO
-
-__all__ = ["MiniFieldStorage", "FieldStorage", "FormContentDict",
- "SvFormContentDict", "InterpFormContentDict", "FormContent",
- "parse", "parse_qs", "parse_qsl", "parse_multipart",
- "parse_header", "print_exception", "print_environ",
- "print_form", "print_directory", "print_arguments",
- "print_environ_usage", "escape"]
-
-# Logging support
-# ===============
-
-logfile = "" # Filename to log to, if not empty
-logfp = None # File object to log to, if not None
-
-def initlog(*allargs):
- """Write a log message, if there is a log file.
-
- Even though this function is called initlog(), you should always
- use log(); log is a variable that is set either to initlog
- (initially), to dolog (once the log file has been opened), or to
- nolog (when logging is disabled).
-
- The first argument is a format string; the remaining arguments (if
- any) are arguments to the % operator, so e.g.
- log("%s: %s", "a", "b")
- will write "a: b" to the log file, followed by a newline.
-
- If the global logfp is not None, it should be a file object to
- which log data is written.
-
- If the global logfp is None, the global logfile may be a string
- giving a filename to open, in append mode. This file should be
- world writable!!! If the file can't be opened, logging is
- silently disabled (since there is no safe place where we could
- send an error message).
-
- """
- global logfp, log
- if logfile and not logfp:
- try:
- logfp = open(logfile, "a")
- except IOError:
- pass
- if not logfp:
- log = nolog
- else:
- log = dolog
- log(*allargs)
-
-def dolog(fmt, *args):
- """Write a log message to the log file. See initlog() for docs."""
- logfp.write(fmt%args + "\n")
-
-def nolog(*allargs):
- """Dummy function, assigned to log when logging is disabled."""
- pass
-
-log = initlog # The current logging function
-
-
-# Parsing functions
-# =================
-
-# Maximum input we will accept when REQUEST_METHOD is POST
-# 0 ==> unlimited input
-maxlen = 0
-
-def parse(fp=None, environ=os.environ, keep_blank_values=0, strict_parsing=0):
- """Parse a query in the environment or from a file (default stdin)
-
- Arguments, all optional:
-
- fp : file pointer; default: sys.stdin
-
- environ : environment dictionary; default: os.environ
-
- keep_blank_values: flag indicating whether blank values in
- URL encoded forms should be treated as blank strings.
- A true value indicates that blanks should be retained as
- blank strings. The default false value indicates that
- blank values are to be ignored and treated as if they were
- not included.
-
- strict_parsing: flag indicating what to do with parsing errors.
- If false (the default), errors are silently ignored.
- If true, errors raise a ValueError exception.
- """
- if fp is None:
- fp = sys.stdin
- if not 'REQUEST_METHOD' in environ:
- environ['REQUEST_METHOD'] = 'GET' # For testing stand-alone
- if environ['REQUEST_METHOD'] == 'POST':
- ctype, pdict = parse_header(environ['CONTENT_TYPE'])
- if ctype == 'multipart/form-data':
- return parse_multipart(fp, pdict)
- elif ctype == 'application/x-www-form-urlencoded':
- clength = int(environ['CONTENT_LENGTH'])
- if maxlen and clength > maxlen:
- raise ValueError, 'Maximum content length exceeded'
- qs = fp.read(clength)
- else:
- qs = '' # Unknown content-type
- if 'QUERY_STRING' in environ:
- if qs: qs = qs + '&'
- qs = qs + environ['QUERY_STRING']
- elif sys.argv[1:]:
- if qs: qs = qs + '&'
- qs = qs + sys.argv[1]
- environ['QUERY_STRING'] = qs # XXX Shouldn't, really
- elif 'QUERY_STRING' in environ:
- qs = environ['QUERY_STRING']
- else:
- if sys.argv[1:]:
- qs = sys.argv[1]
- else:
- qs = ""
- environ['QUERY_STRING'] = qs # XXX Shouldn't, really
- return parse_qs(qs, keep_blank_values, strict_parsing)
-
-
-def parse_qs(qs, keep_blank_values=0, strict_parsing=0):
- """Parse a query given as a string argument.
-
- Arguments:
-
- qs: URL-encoded query string to be parsed
-
- keep_blank_values: flag indicating whether blank values in
- URL encoded queries should be treated as blank strings.
- A true value indicates that blanks should be retained as
- blank strings. The default false value indicates that
- blank values are to be ignored and treated as if they were
- not included.
-
- strict_parsing: flag indicating what to do with parsing errors.
- If false (the default), errors are silently ignored.
- If true, errors raise a ValueError exception.
- """
- dict = {}
- for name, value in parse_qsl(qs, keep_blank_values, strict_parsing):
- if name in dict:
- dict[name].append(value)
- else:
- dict[name] = [value]
- return dict
-
-def parse_qsl(qs, keep_blank_values=0, strict_parsing=0):
- """Parse a query given as a string argument.
-
- Arguments:
-
- qs: URL-encoded query string to be parsed
-
- keep_blank_values: flag indicating whether blank values in
- URL encoded queries should be treated as blank strings. A
- true value indicates that blanks should be retained as blank
- strings. The default false value indicates that blank values
- are to be ignored and treated as if they were not included.
-
- strict_parsing: flag indicating what to do with parsing errors. If
- false (the default), errors are silently ignored. If true,
- errors raise a ValueError exception.
-
- Returns a list, as G-d intended.
- """
- pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')]
- r = []
- for name_value in pairs:
- if not name_value and not strict_parsing:
- continue
- nv = name_value.split('=', 1)
- if len(nv) != 2:
- if strict_parsing:
- raise ValueError, "bad query field: %r" % (name_value,)
- # Handle case of a control-name with no equal sign
- if keep_blank_values:
- nv.append('')
- else:
- continue
- if len(nv[1]) or keep_blank_values:
- name = urllib.unquote(nv[0].replace('+', ' '))
- value = urllib.unquote(nv[1].replace('+', ' '))
- r.append((name, value))
-
- return r
-
-
-def parse_multipart(fp, pdict):
- """Parse multipart input.
-
- Arguments:
- fp : input file
- pdict: dictionary containing other parameters of content-type header
-
- Returns a dictionary just like parse_qs(): keys are the field names, each
- value is a list of values for that field. This is easy to use but not
- much good if you are expecting megabytes to be uploaded -- in that case,
- use the FieldStorage class instead which is much more flexible. Note
- that content-type is the raw, unparsed contents of the content-type
- header.
-
- XXX This does not parse nested multipart parts -- use FieldStorage for
- that.
-
- XXX This should really be subsumed by FieldStorage altogether -- no
- point in having two implementations of the same parsing algorithm.
- Also, FieldStorage protects itself better against certain DoS attacks
- by limiting the size of the data read in one chunk. The API here
- does not support that kind of protection. This also affects parse()
- since it can call parse_multipart().
-
- """
- boundary = ""
- if 'boundary' in pdict:
- boundary = pdict['boundary']
- if not valid_boundary(boundary):
- raise ValueError, ('Invalid boundary in multipart form: %r'
- % (boundary,))
-
- nextpart = "--" + boundary
- lastpart = "--" + boundary + "--"
- partdict = {}
- terminator = ""
-
- while terminator != lastpart:
- bytes = -1
- data = None
- if terminator:
- # At start of next part. Read headers first.
- headers = mimetools.Message(fp)
- clength = headers.getheader('content-length')
- if clength:
- try:
- bytes = int(clength)
- except ValueError:
- pass
- if bytes > 0:
- if maxlen and bytes > maxlen:
- raise ValueError, 'Maximum content length exceeded'
- data = fp.read(bytes)
- else:
- data = ""
- # Read lines until end of part.
- lines = []
- while 1:
- line = fp.readline()
- if not line:
- terminator = lastpart # End outer loop
- break
- if line[:2] == "--":
- terminator = line.strip()
- if terminator in (nextpart, lastpart):
- break
- lines.append(line)
- # Done with part.
- if data is None:
- continue
- if bytes < 0:
- if lines:
- # Strip final line terminator
- line = lines[-1]
- if line[-2:] == "\r\n":
- line = line[:-2]
- elif line[-1:] == "\n":
- line = line[:-1]
- lines[-1] = line
- data = "".join(lines)
- line = headers['content-disposition']
- if not line:
- continue
- key, params = parse_header(line)
- if key != 'form-data':
- continue
- if 'name' in params:
- name = params['name']
- else:
- continue
- if name in partdict:
- partdict[name].append(data)
- else:
- partdict[name] = [data]
-
- return partdict
-
-
-def parse_header(line):
- """Parse a Content-type like header.
-
- Return the main content-type and a dictionary of options.
-
- """
- plist = [x.strip() for x in line.split(';')]
- key = plist.pop(0).lower()
- pdict = {}
- for p in plist:
- i = p.find('=')
- if i >= 0:
- name = p[:i].strip().lower()
- value = p[i+1:].strip()
- if len(value) >= 2 and value[0] == value[-1] == '"':
- value = value[1:-1]
- value = value.replace('\\\\', '\\').replace('\\"', '"')
- pdict[name] = value
- return key, pdict
-
-
-# Classes for field storage
-# =========================
-
-class MiniFieldStorage:
-
- """Like FieldStorage, for use when no file uploads are possible."""
-
- # Dummy attributes
- filename = None
- list = None
- type = None
- file = None
- type_options = {}
- disposition = None
- disposition_options = {}
- headers = {}
-
- def __init__(self, name, value):
- """Constructor from field name and value."""
- self.name = name
- self.value = value
- # self.file = StringIO(value)
-
- def __repr__(self):
- """Return printable representation."""
- return "MiniFieldStorage(%r, %r)" % (self.name, self.value)
-
-
-class FieldStorage:
-
- """Store a sequence of fields, reading multipart/form-data.
-
- This class provides naming, typing, files stored on disk, and
- more. At the top level, it is accessible like a dictionary, whose
- keys are the field names. (Note: None can occur as a field name.)
- The items are either a Python list (if there's multiple values) or
- another FieldStorage or MiniFieldStorage object. If it's a single
- object, it has the following attributes:
-
- name: the field name, if specified; otherwise None
-
- filename: the filename, if specified; otherwise None; this is the
- client side filename, *not* the file name on which it is
- stored (that's a temporary file you don't deal with)
-
- value: the value as a *string*; for file uploads, this
- transparently reads the file every time you request the value
-
- file: the file(-like) object from which you can read the data;
- None if the data is stored a simple string
-
- type: the content-type, or None if not specified
-
- type_options: dictionary of options specified on the content-type
- line
-
- disposition: content-disposition, or None if not specified
-
- disposition_options: dictionary of corresponding options
-
- headers: a dictionary(-like) object (sometimes rfc822.Message or a
- subclass thereof) containing *all* headers
-
- The class is subclassable, mostly for the purpose of overriding
- the make_file() method, which is called internally to come up with
- a file open for reading and writing. This makes it possible to
- override the default choice of storing all files in a temporary
- directory and unlinking them as soon as they have been opened.
-
- """
-
- def __init__(self, fp=None, headers=None, outerboundary="",
- environ=os.environ, keep_blank_values=0, strict_parsing=0):
- """Constructor. Read multipart/* until last part.
-
- Arguments, all optional:
-
- fp : file pointer; default: sys.stdin
- (not used when the request method is GET)
-
- headers : header dictionary-like object; default:
- taken from environ as per CGI spec
-
- outerboundary : terminating multipart boundary
- (for internal use only)
-
- environ : environment dictionary; default: os.environ
-
- keep_blank_values: flag indicating whether blank values in
- URL encoded forms should be treated as blank strings.
- A true value indicates that blanks should be retained as
- blank strings. The default false value indicates that
- blank values are to be ignored and treated as if they were
- not included.
-
- strict_parsing: flag indicating what to do with parsing errors.
- If false (the default), errors are silently ignored.
- If true, errors raise a ValueError exception.
-
- """
- method = 'GET'
- self.keep_blank_values = keep_blank_values
- self.strict_parsing = strict_parsing
- if 'REQUEST_METHOD' in environ:
- method = environ['REQUEST_METHOD'].upper()
- if method == 'GET' or method == 'HEAD':
- if 'QUERY_STRING' in environ:
- qs = environ['QUERY_STRING']
- elif sys.argv[1:]:
- qs = sys.argv[1]
- else:
- qs = ""
- fp = StringIO(qs)
- if headers is None:
- headers = {'content-type':
- "application/x-www-form-urlencoded"}
- if headers is None:
- headers = {}
- if method == 'POST':
- # Set default content-type for POST to what's traditional
- headers['content-type'] = "application/x-www-form-urlencoded"
- if 'CONTENT_TYPE' in environ:
- headers['content-type'] = environ['CONTENT_TYPE']
- if 'CONTENT_LENGTH' in environ:
- headers['content-length'] = environ['CONTENT_LENGTH']
- self.fp = fp or sys.stdin
- self.headers = headers
- self.outerboundary = outerboundary
-
- # Process content-disposition header
- cdisp, pdict = "", {}
- if 'content-disposition' in self.headers:
- cdisp, pdict = parse_header(self.headers['content-disposition'])
- self.disposition = cdisp
- self.disposition_options = pdict
- self.name = None
- if 'name' in pdict:
- self.name = pdict['name']
- self.filename = None
- if 'filename' in pdict:
- self.filename = pdict['filename']
-
- # Process content-type header
- #
- # Honor any existing content-type header. But if there is no
- # content-type header, use some sensible defaults. Assume
- # outerboundary is "" at the outer level, but something non-false
- # inside a multi-part. The default for an inner part is text/plain,
- # but for an outer part it should be urlencoded. This should catch
- # bogus clients which erroneously forget to include a content-type
- # header.
- #
- # See below for what we do if there does exist a content-type header,
- # but it happens to be something we don't understand.
- if 'content-type' in self.headers:
- ctype, pdict = parse_header(self.headers['content-type'])
- elif self.outerboundary or method != 'POST':
- ctype, pdict = "text/plain", {}
- else:
- ctype, pdict = 'application/x-www-form-urlencoded', {}
- self.type = ctype
- self.type_options = pdict
- self.innerboundary = ""
- if 'boundary' in pdict:
- self.innerboundary = pdict['boundary']
- clen = -1
- if 'content-length' in self.headers:
- try:
- clen = int(self.headers['content-length'])
- except ValueError:
- pass
- if maxlen and clen > maxlen:
- raise ValueError, 'Maximum content length exceeded'
- self.length = clen
-
- self.list = self.file = None
- self.done = 0
- if ctype == 'application/x-www-form-urlencoded':
- self.read_urlencoded()
- elif ctype[:10] == 'multipart/':
- self.read_multi(environ, keep_blank_values, strict_parsing)
- else:
- self.read_single()
-
- def __repr__(self):
- """Return a printable representation."""
- return "FieldStorage(%r, %r, %r)" % (
- self.name, self.filename, self.value)
-
- def __iter__(self):
- return iter(self.keys())
-
- def __getattr__(self, name):
- if name != 'value':
- raise AttributeError, name
- if self.file:
- self.file.seek(0)
- value = self.file.read()
- self.file.seek(0)
- elif self.list is not None:
- value = self.list
- else:
- value = None
- return value
-
- def __getitem__(self, key):
- """Dictionary style indexing."""
- if self.list is None:
- raise TypeError, "not indexable"
- found = []
- for item in self.list:
- if item.name == key: found.append(item)
- if not found:
- raise KeyError, key
- if len(found) == 1:
- return found[0]
- else:
- return found
-
- def getvalue(self, key, default=None):
- """Dictionary style get() method, including 'value' lookup."""
- if key in self:
- value = self[key]
- if type(value) is type([]):
- return map(attrgetter('value'), value)
- else:
- return value.value
- else:
- return default
-
- def getfirst(self, key, default=None):
- """ Return the first value received."""
- if key in self:
- value = self[key]
- if type(value) is type([]):
- return value[0].value
- else:
- return value.value
- else:
- return default
-
- def getlist(self, key):
- """ Return list of received values."""
- if key in self:
- value = self[key]
- if type(value) is type([]):
- return map(attrgetter('value'), value)
- else:
- return [value.value]
- else:
- return []
-
- def keys(self):
- """Dictionary style keys() method."""
- if self.list is None:
- raise TypeError, "not indexable"
- keys = []
- for item in self.list:
- if item.name not in keys: keys.append(item.name)
- return keys
-
- def has_key(self, key):
- """Dictionary style has_key() method."""
- if self.list is None:
- raise TypeError, "not indexable"
- for item in self.list:
- if item.name == key: return True
- return False
-
- def __contains__(self, key):
- """Dictionary style __contains__ method."""
- if self.list is None:
- raise TypeError, "not indexable"
- for item in self.list:
- if item.name == key: return True
- return False
-
- def __len__(self):
- """Dictionary style len(x) support."""
- return len(self.keys())
-
- def read_urlencoded(self):
- """Internal: read data in query string format."""
- qs = self.fp.read(self.length)
- self.list = list = []
- for key, value in parse_qsl(qs, self.keep_blank_values,
- self.strict_parsing):
- list.append(MiniFieldStorage(key, value))
- self.skip_lines()
-
- FieldStorageClass = None
-
- def read_multi(self, environ, keep_blank_values, strict_parsing):
- """Internal: read a part that is itself multipart."""
- ib = self.innerboundary
- if not valid_boundary(ib):
- raise ValueError, 'Invalid boundary in multipart form: %r' % (ib,)
- self.list = []
- klass = self.FieldStorageClass or self.__class__
- part = klass(self.fp, {}, ib,
- environ, keep_blank_values, strict_parsing)
- # Throw first part away
- while not part.done:
- headers = rfc822.Message(self.fp)
- part = klass(self.fp, headers, ib,
- environ, keep_blank_values, strict_parsing)
- self.list.append(part)
- self.skip_lines()
-
- def read_single(self):
- """Internal: read an atomic part."""
- if self.length >= 0:
- self.read_binary()
- self.skip_lines()
- else:
- self.read_lines()
- self.file.seek(0)
-
- bufsize = 8*1024 # I/O buffering size for copy to file
-
- def read_binary(self):
- """Internal: read binary data."""
- self.file = self.make_file('b')
- todo = self.length
- if todo >= 0:
- while todo > 0:
- data = self.fp.read(min(todo, self.bufsize))
- if not data:
- self.done = -1
- break
- self.file.write(data)
- todo = todo - len(data)
-
- def read_lines(self):
- """Internal: read lines until EOF or outerboundary."""
- self.file = self.__file = StringIO()
- if self.outerboundary:
- self.read_lines_to_outerboundary()
- else:
- self.read_lines_to_eof()
-
- def __write(self, line):
- if self.__file is not None:
- if self.__file.tell() + len(line) > 1000:
- self.file = self.make_file('')
- self.file.write(self.__file.getvalue())
- self.__file = None
- self.file.write(line)
-
- def read_lines_to_eof(self):
- """Internal: read lines until EOF."""
- while 1:
- line = self.fp.readline(1<<16)
- if not line:
- self.done = -1
- break
- self.__write(line)
-
- def read_lines_to_outerboundary(self):
- """Internal: read lines until outerboundary."""
- next = "--" + self.outerboundary
- last = next + "--"
- delim = ""
- last_line_lfend = True
- while 1:
- line = self.fp.readline(1<<16)
- if not line:
- self.done = -1
- break
- if line[:2] == "--" and last_line_lfend:
- strippedline = line.strip()
- if strippedline == next:
- break
- if strippedline == last:
- self.done = 1
- break
- odelim = delim
- if line[-2:] == "\r\n":
- delim = "\r\n"
- line = line[:-2]
- last_line_lfend = True
- elif line[-1] == "\n":
- delim = "\n"
- line = line[:-1]
- last_line_lfend = True
- else:
- delim = ""
- last_line_lfend = False
- self.__write(odelim + line)
-
- def skip_lines(self):
- """Internal: skip lines until outer boundary if defined."""
- if not self.outerboundary or self.done:
- return
- next = "--" + self.outerboundary
- last = next + "--"
- last_line_lfend = True
- while 1:
- line = self.fp.readline(1<<16)
- if not line:
- self.done = -1
- break
- if line[:2] == "--" and last_line_lfend:
- strippedline = line.strip()
- if strippedline == next:
- break
- if strippedline == last:
- self.done = 1
- break
- last_line_lfend = line.endswith('\n')
-
- def make_file(self, binary=None):
- """Overridable: return a readable & writable file.
-
- The file will be used as follows:
- - data is written to it
- - seek(0)
- - data is read from it
-
- The 'binary' argument is unused -- the file is always opened
- in binary mode.
-
- This version opens a temporary file for reading and writing,
- and immediately deletes (unlinks) it. The trick (on Unix!) is
- that the file can still be used, but it can't be opened by
- another process, and it will automatically be deleted when it
- is closed or when the current process terminates.
-
- If you want a more permanent file, you derive a class which
- overrides this method. If you want a visible temporary file
- that is nevertheless automatically deleted when the script
- terminates, try defining a __del__ method in a derived class
- which unlinks the temporary files you have created.
-
- """
- import tempfile
- return tempfile.TemporaryFile("w+b")
-
-
-
-# Backwards Compatibility Classes
-# ===============================
-
-class FormContentDict(UserDict.UserDict):
- """Form content as dictionary with a list of values per field.
-
- form = FormContentDict()
-
- form[key] -> [value, value, ...]
- key in form -> Boolean
- form.keys() -> [key, key, ...]
- form.values() -> [[val, val, ...], [val, val, ...], ...]
- form.items() -> [(key, [val, val, ...]), (key, [val, val, ...]), ...]
- form.dict == {key: [val, val, ...], ...}
-
- """
- def __init__(self, environ=os.environ):
- self.dict = self.data = parse(environ=environ)
- self.query_string = environ['QUERY_STRING']
-
-
-class SvFormContentDict(FormContentDict):
- """Form content as dictionary expecting a single value per field.
-
- If you only expect a single value for each field, then form[key]
- will return that single value. It will raise an IndexError if
- that expectation is not true. If you expect a field to have
- possible multiple values, than you can use form.getlist(key) to
- get all of the values. values() and items() are a compromise:
- they return single strings where there is a single value, and
- lists of strings otherwise.
-
- """
- def __getitem__(self, key):
- if len(self.dict[key]) > 1:
- raise IndexError, 'expecting a single value'
- return self.dict[key][0]
- def getlist(self, key):
- return self.dict[key]
- def values(self):
- result = []
- for value in self.dict.values():
- if len(value) == 1:
- result.append(value[0])
- else: result.append(value)
- return result
- def items(self):
- result = []
- for key, value in self.dict.items():
- if len(value) == 1:
- result.append((key, value[0]))
- else: result.append((key, value))
- return result
-
-
-class InterpFormContentDict(SvFormContentDict):
- """This class is present for backwards compatibility only."""
- def __getitem__(self, key):
- v = SvFormContentDict.__getitem__(self, key)
- if v[0] in '0123456789+-.':
- try: return int(v)
- except ValueError:
- try: return float(v)
- except ValueError: pass
- return v.strip()
- def values(self):
- result = []
- for key in self.keys():
- try:
- result.append(self[key])
- except IndexError:
- result.append(self.dict[key])
- return result
- def items(self):
- result = []
- for key in self.keys():
- try:
- result.append((key, self[key]))
- except IndexError:
- result.append((key, self.dict[key]))
- return result
-
-
-class FormContent(FormContentDict):
- """This class is present for backwards compatibility only."""
- def values(self, key):
- if key in self.dict :return self.dict[key]
- else: return None
- def indexed_value(self, key, location):
- if key in self.dict:
- if len(self.dict[key]) > location:
- return self.dict[key][location]
- else: return None
- else: return None
- def value(self, key):
- if key in self.dict: return self.dict[key][0]
- else: return None
- def length(self, key):
- return len(self.dict[key])
- def stripped(self, key):
- if key in self.dict: return self.dict[key][0].strip()
- else: return None
- def pars(self):
- return self.dict
-
-
-# Test/debug code
-# ===============
-
-def test(environ=os.environ):
- """Robust test CGI script, usable as main program.
-
- Write minimal HTTP headers and dump all information provided to
- the script in HTML form.
-
- """
- print "Content-type: text/html"
- print
- sys.stderr = sys.stdout
- try:
- form = FieldStorage() # Replace with other classes to test those
- print_directory()
- print_arguments()
- print_form(form)
- print_environ(environ)
- print_environ_usage()
- def f():
- exec "testing print_exception() -- <I>italics?</I>"
- def g(f=f):
- f()
- print "<H3>What follows is a test, not an actual exception:</H3>"
- g()
- except:
- print_exception()
-
- print "<H1>Second try with a small maxlen...</H1>"
-
- global maxlen
- maxlen = 50
- try:
- form = FieldStorage() # Replace with other classes to test those
- print_directory()
- print_arguments()
- print_form(form)
- print_environ(environ)
- except:
- print_exception()
-
-def print_exception(type=None, value=None, tb=None, limit=None):
- if type is None:
- type, value, tb = sys.exc_info()
- import traceback
- print
- print "<H3>Traceback (most recent call last):</H3>"
- list = traceback.format_tb(tb, limit) + \
- traceback.format_exception_only(type, value)
- print "<PRE>%s<B>%s</B></PRE>" % (
- escape("".join(list[:-1])),
- escape(list[-1]),
- )
- del tb
-
-def print_environ(environ=os.environ):
- """Dump the shell environment as HTML."""
- keys = environ.keys()
- keys.sort()
- print
- print "<H3>Shell Environment:</H3>"
- print "<DL>"
- for key in keys:
- print "<DT>", escape(key), "<DD>", escape(environ[key])
- print "</DL>"
- print
-
-def print_form(form):
- """Dump the contents of a form as HTML."""
- keys = form.keys()
- keys.sort()
- print
- print "<H3>Form Contents:</H3>"
- if not keys:
- print "<P>No form fields."
- print "<DL>"
- for key in keys:
- print "<DT>" + escape(key) + ":",
- value = form[key]
- print "<i>" + escape(repr(type(value))) + "</i>"
- print "<DD>" + escape(repr(value))
- print "</DL>"
- print
-
-def print_directory():
- """Dump the current directory as HTML."""
- print
- print "<H3>Current Working Directory:</H3>"
- try:
- pwd = os.getcwd()
- except os.error, msg:
- print "os.error:", escape(str(msg))
- else:
- print escape(pwd)
- print
-
-def print_arguments():
- print
- print "<H3>Command Line Arguments:</H3>"
- print
- print sys.argv
- print
-
-def print_environ_usage():
- """Dump a list of environment variables used by CGI as HTML."""
- print """
-<H3>These environment variables could have been set:</H3>
-<UL>
-<LI>AUTH_TYPE
-<LI>CONTENT_LENGTH
-<LI>CONTENT_TYPE
-<LI>DATE_GMT
-<LI>DATE_LOCAL
-<LI>DOCUMENT_NAME
-<LI>DOCUMENT_ROOT
-<LI>DOCUMENT_URI
-<LI>GATEWAY_INTERFACE
-<LI>LAST_MODIFIED
-<LI>PATH
-<LI>PATH_INFO
-<LI>PATH_TRANSLATED
-<LI>QUERY_STRING
-<LI>REMOTE_ADDR
-<LI>REMOTE_HOST
-<LI>REMOTE_IDENT
-<LI>REMOTE_USER
-<LI>REQUEST_METHOD
-<LI>SCRIPT_NAME
-<LI>SERVER_NAME
-<LI>SERVER_PORT
-<LI>SERVER_PROTOCOL
-<LI>SERVER_ROOT
-<LI>SERVER_SOFTWARE
-</UL>
-In addition, HTTP headers sent by the server may be passed in the
-environment as well. Here are some common variable names:
-<UL>
-<LI>HTTP_ACCEPT
-<LI>HTTP_CONNECTION
-<LI>HTTP_HOST
-<LI>HTTP_PRAGMA
-<LI>HTTP_REFERER
-<LI>HTTP_USER_AGENT
-</UL>
-"""
-
-
-# Utilities
-# =========
-
-def escape(s, quote=None):
- '''Replace special characters "&", "<" and ">" to HTML-safe sequences.
- If the optional flag quote is true, the quotation mark character (")
- is also translated.'''
- s = s.replace("&", "&amp;") # Must be done first!
- s = s.replace("<", "&lt;")
- s = s.replace(">", "&gt;")
- if quote:
- s = s.replace('"', "&quot;")
- return s
-
-def valid_boundary(s, _vb_pattern="^[ -~]{0,200}[!-~]$"):
- import re
- return re.match(_vb_pattern, s)
-
-# Invoke mainline
-# ===============
-
-# Call test() when this file is run as a script (not imported as a module)
-if __name__ == '__main__':
- test()
diff --git a/sys/lib/python/cgitb.py b/sys/lib/python/cgitb.py
deleted file mode 100644
index 068c1da94..000000000
--- a/sys/lib/python/cgitb.py
+++ /dev/null
@@ -1,317 +0,0 @@
-"""More comprehensive traceback formatting for Python scripts.
-
-To enable this module, do:
-
- import cgitb; cgitb.enable()
-
-at the top of your script. The optional arguments to enable() are:
-
- display - if true, tracebacks are displayed in the web browser
- logdir - if set, tracebacks are written to files in this directory
- context - number of lines of source code to show for each stack frame
- format - 'text' or 'html' controls the output format
-
-By default, tracebacks are displayed but not saved, the context is 5 lines
-and the output format is 'html' (for backwards compatibility with the
-original use of this module)
-
-Alternatively, if you have caught an exception and want cgitb to display it
-for you, call cgitb.handler(). The optional argument to handler() is a
-3-item tuple (etype, evalue, etb) just like the value of sys.exc_info().
-The default handler displays output as HTML.
-"""
-
-__author__ = 'Ka-Ping Yee'
-
-__version__ = '$Revision: 39758 $'
-
-import sys
-
-def reset():
- """Return a string that resets the CGI and browser to a known state."""
- return '''<!--: spam
-Content-Type: text/html
-
-<body bgcolor="#f0f0f8"><font color="#f0f0f8" size="-5"> -->
-<body bgcolor="#f0f0f8"><font color="#f0f0f8" size="-5"> --> -->
-</font> </font> </font> </script> </object> </blockquote> </pre>
-</table> </table> </table> </table> </table> </font> </font> </font>'''
-
-__UNDEF__ = [] # a special sentinel object
-def small(text):
- if text:
- return '<small>' + text + '</small>'
- else:
- return ''
-
-def strong(text):
- if text:
- return '<strong>' + text + '</strong>'
- else:
- return ''
-
-def grey(text):
- if text:
- return '<font color="#909090">' + text + '</font>'
- else:
- return ''
-
-def lookup(name, frame, locals):
- """Find the value for a given name in the given environment."""
- if name in locals:
- return 'local', locals[name]
- if name in frame.f_globals:
- return 'global', frame.f_globals[name]
- if '__builtins__' in frame.f_globals:
- builtins = frame.f_globals['__builtins__']
- if type(builtins) is type({}):
- if name in builtins:
- return 'builtin', builtins[name]
- else:
- if hasattr(builtins, name):
- return 'builtin', getattr(builtins, name)
- return None, __UNDEF__
-
-def scanvars(reader, frame, locals):
- """Scan one logical line of Python and look up values of variables used."""
- import tokenize, keyword
- vars, lasttoken, parent, prefix, value = [], None, None, '', __UNDEF__
- for ttype, token, start, end, line in tokenize.generate_tokens(reader):
- if ttype == tokenize.NEWLINE: break
- if ttype == tokenize.NAME and token not in keyword.kwlist:
- if lasttoken == '.':
- if parent is not __UNDEF__:
- value = getattr(parent, token, __UNDEF__)
- vars.append((prefix + token, prefix, value))
- else:
- where, value = lookup(token, frame, locals)
- vars.append((token, where, value))
- elif token == '.':
- prefix += lasttoken + '.'
- parent = value
- else:
- parent, prefix = None, ''
- lasttoken = token
- return vars
-
-def html((etype, evalue, etb), context=5):
- """Return a nice HTML document describing a given traceback."""
- import os, types, time, traceback, linecache, inspect, pydoc
-
- if type(etype) is types.ClassType:
- etype = etype.__name__
- pyver = 'Python ' + sys.version.split()[0] + ': ' + sys.executable
- date = time.ctime(time.time())
- head = '<body bgcolor="#f0f0f8">' + pydoc.html.heading(
- '<big><big>%s</big></big>' %
- strong(pydoc.html.escape(str(etype))),
- '#ffffff', '#6622aa', pyver + '<br>' + date) + '''
-<p>A problem occurred in a Python script. Here is the sequence of
-function calls leading up to the error, in the order they occurred.</p>'''
-
- indent = '<tt>' + small('&nbsp;' * 5) + '&nbsp;</tt>'
- frames = []
- records = inspect.getinnerframes(etb, context)
- for frame, file, lnum, func, lines, index in records:
- if file:
- file = os.path.abspath(file)
- link = '<a href="file://%s">%s</a>' % (file, pydoc.html.escape(file))
- else:
- file = link = '?'
- args, varargs, varkw, locals = inspect.getargvalues(frame)
- call = ''
- if func != '?':
- call = 'in ' + strong(func) + \
- inspect.formatargvalues(args, varargs, varkw, locals,
- formatvalue=lambda value: '=' + pydoc.html.repr(value))
-
- highlight = {}
- def reader(lnum=[lnum]):
- highlight[lnum[0]] = 1
- try: return linecache.getline(file, lnum[0])
- finally: lnum[0] += 1
- vars = scanvars(reader, frame, locals)
-
- rows = ['<tr><td bgcolor="#d8bbff">%s%s %s</td></tr>' %
- ('<big>&nbsp;</big>', link, call)]
- if index is not None:
- i = lnum - index
- for line in lines:
- num = small('&nbsp;' * (5-len(str(i))) + str(i)) + '&nbsp;'
- line = '<tt>%s%s</tt>' % (num, pydoc.html.preformat(line))
- if i in highlight:
- rows.append('<tr><td bgcolor="#ffccee">%s</td></tr>' % line)
- else:
- rows.append('<tr><td>%s</td></tr>' % grey(line))
- i += 1
-
- done, dump = {}, []
- for name, where, value in vars:
- if name in done: continue
- done[name] = 1
- if value is not __UNDEF__:
- if where in ('global', 'builtin'):
- name = ('<em>%s</em> ' % where) + strong(name)
- elif where == 'local':
- name = strong(name)
- else:
- name = where + strong(name.split('.')[-1])
- dump.append('%s&nbsp;= %s' % (name, pydoc.html.repr(value)))
- else:
- dump.append(name + ' <em>undefined</em>')
-
- rows.append('<tr><td>%s</td></tr>' % small(grey(', '.join(dump))))
- frames.append('''
-<table width="100%%" cellspacing=0 cellpadding=0 border=0>
-%s</table>''' % '\n'.join(rows))
-
- exception = ['<p>%s: %s' % (strong(pydoc.html.escape(str(etype))),
- pydoc.html.escape(str(evalue)))]
- if type(evalue) is types.InstanceType:
- for name in dir(evalue):
- if name[:1] == '_': continue
- value = pydoc.html.repr(getattr(evalue, name))
- exception.append('\n<br>%s%s&nbsp;=\n%s' % (indent, name, value))
-
- import traceback
- return head + ''.join(frames) + ''.join(exception) + '''
-
-
-<!-- The above is a description of an error in a Python program, formatted
- for a Web browser because the 'cgitb' module was enabled. In case you
- are not reading this in a Web browser, here is the original traceback:
-
-%s
--->
-''' % ''.join(traceback.format_exception(etype, evalue, etb))
-
-def text((etype, evalue, etb), context=5):
- """Return a plain text document describing a given traceback."""
- import os, types, time, traceback, linecache, inspect, pydoc
-
- if type(etype) is types.ClassType:
- etype = etype.__name__
- pyver = 'Python ' + sys.version.split()[0] + ': ' + sys.executable
- date = time.ctime(time.time())
- head = "%s\n%s\n%s\n" % (str(etype), pyver, date) + '''
-A problem occurred in a Python script. Here is the sequence of
-function calls leading up to the error, in the order they occurred.
-'''
-
- frames = []
- records = inspect.getinnerframes(etb, context)
- for frame, file, lnum, func, lines, index in records:
- file = file and os.path.abspath(file) or '?'
- args, varargs, varkw, locals = inspect.getargvalues(frame)
- call = ''
- if func != '?':
- call = 'in ' + func + \
- inspect.formatargvalues(args, varargs, varkw, locals,
- formatvalue=lambda value: '=' + pydoc.text.repr(value))
-
- highlight = {}
- def reader(lnum=[lnum]):
- highlight[lnum[0]] = 1
- try: return linecache.getline(file, lnum[0])
- finally: lnum[0] += 1
- vars = scanvars(reader, frame, locals)
-
- rows = [' %s %s' % (file, call)]
- if index is not None:
- i = lnum - index
- for line in lines:
- num = '%5d ' % i
- rows.append(num+line.rstrip())
- i += 1
-
- done, dump = {}, []
- for name, where, value in vars:
- if name in done: continue
- done[name] = 1
- if value is not __UNDEF__:
- if where == 'global': name = 'global ' + name
- elif where != 'local': name = where + name.split('.')[-1]
- dump.append('%s = %s' % (name, pydoc.text.repr(value)))
- else:
- dump.append(name + ' undefined')
-
- rows.append('\n'.join(dump))
- frames.append('\n%s\n' % '\n'.join(rows))
-
- exception = ['%s: %s' % (str(etype), str(evalue))]
- if type(evalue) is types.InstanceType:
- for name in dir(evalue):
- value = pydoc.text.repr(getattr(evalue, name))
- exception.append('\n%s%s = %s' % (" "*4, name, value))
-
- import traceback
- return head + ''.join(frames) + ''.join(exception) + '''
-
-The above is a description of an error in a Python program. Here is
-the original traceback:
-
-%s
-''' % ''.join(traceback.format_exception(etype, evalue, etb))
-
-class Hook:
- """A hook to replace sys.excepthook that shows tracebacks in HTML."""
-
- def __init__(self, display=1, logdir=None, context=5, file=None,
- format="html"):
- self.display = display # send tracebacks to browser if true
- self.logdir = logdir # log tracebacks to files if not None
- self.context = context # number of source code lines per frame
- self.file = file or sys.stdout # place to send the output
- self.format = format
-
- def __call__(self, etype, evalue, etb):
- self.handle((etype, evalue, etb))
-
- def handle(self, info=None):
- info = info or sys.exc_info()
- if self.format == "html":
- self.file.write(reset())
-
- formatter = (self.format=="html") and html or text
- plain = False
- try:
- doc = formatter(info, self.context)
- except: # just in case something goes wrong
- import traceback
- doc = ''.join(traceback.format_exception(*info))
- plain = True
-
- if self.display:
- if plain:
- doc = doc.replace('&', '&amp;').replace('<', '&lt;')
- self.file.write('<pre>' + doc + '</pre>\n')
- else:
- self.file.write(doc + '\n')
- else:
- self.file.write('<p>A problem occurred in a Python script.\n')
-
- if self.logdir is not None:
- import os, tempfile
- suffix = ['.txt', '.html'][self.format=="html"]
- (fd, path) = tempfile.mkstemp(suffix=suffix, dir=self.logdir)
- try:
- file = os.fdopen(fd, 'w')
- file.write(doc)
- file.close()
- msg = '<p> %s contains the description of this error.' % path
- except:
- msg = '<p> Tried to save traceback to %s, but failed.' % path
- self.file.write(msg + '\n')
- try:
- self.file.flush()
- except: pass
-
-handler = Hook().handle
-def enable(display=1, logdir=None, context=5, format="html"):
- """Install an exception handler that formats tracebacks as HTML.
-
- The optional argument 'display' can be set to 0 to suppress sending the
- traceback to the browser, and 'logdir' can be set to a directory to cause
- tracebacks to be written to files there."""
- sys.excepthook = Hook(display=display, logdir=logdir,
- context=context, format=format)
diff --git a/sys/lib/python/chunk.py b/sys/lib/python/chunk.py
deleted file mode 100644
index a8fbc1051..000000000
--- a/sys/lib/python/chunk.py
+++ /dev/null
@@ -1,167 +0,0 @@
-"""Simple class to read IFF chunks.
-
-An IFF chunk (used in formats such as AIFF, TIFF, RMFF (RealMedia File
-Format)) has the following structure:
-
-+----------------+
-| ID (4 bytes) |
-+----------------+
-| size (4 bytes) |
-+----------------+
-| data |
-| ... |
-+----------------+
-
-The ID is a 4-byte string which identifies the type of chunk.
-
-The size field (a 32-bit value, encoded using big-endian byte order)
-gives the size of the whole chunk, including the 8-byte header.
-
-Usually an IFF-type file consists of one or more chunks. The proposed
-usage of the Chunk class defined here is to instantiate an instance at
-the start of each chunk and read from the instance until it reaches
-the end, after which a new instance can be instantiated. At the end
-of the file, creating a new instance will fail with a EOFError
-exception.
-
-Usage:
-while True:
- try:
- chunk = Chunk(file)
- except EOFError:
- break
- chunktype = chunk.getname()
- while True:
- data = chunk.read(nbytes)
- if not data:
- pass
- # do something with data
-
-The interface is file-like. The implemented methods are:
-read, close, seek, tell, isatty.
-Extra methods are: skip() (called by close, skips to the end of the chunk),
-getname() (returns the name (ID) of the chunk)
-
-The __init__ method has one required argument, a file-like object
-(including a chunk instance), and one optional argument, a flag which
-specifies whether or not chunks are aligned on 2-byte boundaries. The
-default is 1, i.e. aligned.
-"""
-
-class Chunk:
- def __init__(self, file, align=True, bigendian=True, inclheader=False):
- import struct
- self.closed = False
- self.align = align # whether to align to word (2-byte) boundaries
- if bigendian:
- strflag = '>'
- else:
- strflag = '<'
- self.file = file
- self.chunkname = file.read(4)
- if len(self.chunkname) < 4:
- raise EOFError
- try:
- self.chunksize = struct.unpack(strflag+'L', file.read(4))[0]
- except struct.error:
- raise EOFError
- if inclheader:
- self.chunksize = self.chunksize - 8 # subtract header
- self.size_read = 0
- try:
- self.offset = self.file.tell()
- except (AttributeError, IOError):
- self.seekable = False
- else:
- self.seekable = True
-
- def getname(self):
- """Return the name (ID) of the current chunk."""
- return self.chunkname
-
- def getsize(self):
- """Return the size of the current chunk."""
- return self.chunksize
-
- def close(self):
- if not self.closed:
- self.skip()
- self.closed = True
-
- def isatty(self):
- if self.closed:
- raise ValueError, "I/O operation on closed file"
- return False
-
- def seek(self, pos, whence=0):
- """Seek to specified position into the chunk.
- Default position is 0 (start of chunk).
- If the file is not seekable, this will result in an error.
- """
-
- if self.closed:
- raise ValueError, "I/O operation on closed file"
- if not self.seekable:
- raise IOError, "cannot seek"
- if whence == 1:
- pos = pos + self.size_read
- elif whence == 2:
- pos = pos + self.chunksize
- if pos < 0 or pos > self.chunksize:
- raise RuntimeError
- self.file.seek(self.offset + pos, 0)
- self.size_read = pos
-
- def tell(self):
- if self.closed:
- raise ValueError, "I/O operation on closed file"
- return self.size_read
-
- def read(self, size=-1):
- """Read at most size bytes from the chunk.
- If size is omitted or negative, read until the end
- of the chunk.
- """
-
- if self.closed:
- raise ValueError, "I/O operation on closed file"
- if self.size_read >= self.chunksize:
- return ''
- if size < 0:
- size = self.chunksize - self.size_read
- if size > self.chunksize - self.size_read:
- size = self.chunksize - self.size_read
- data = self.file.read(size)
- self.size_read = self.size_read + len(data)
- if self.size_read == self.chunksize and \
- self.align and \
- (self.chunksize & 1):
- dummy = self.file.read(1)
- self.size_read = self.size_read + len(dummy)
- return data
-
- def skip(self):
- """Skip the rest of the chunk.
- If you are not interested in the contents of the chunk,
- this method should be called so that the file points to
- the start of the next chunk.
- """
-
- if self.closed:
- raise ValueError, "I/O operation on closed file"
- if self.seekable:
- try:
- n = self.chunksize - self.size_read
- # maybe fix alignment
- if self.align and (self.chunksize & 1):
- n = n + 1
- self.file.seek(n, 1)
- self.size_read = self.size_read + n
- return
- except IOError:
- pass
- while self.size_read < self.chunksize:
- n = min(8192, self.chunksize - self.size_read)
- dummy = self.read(n)
- if not dummy:
- raise EOFError
diff --git a/sys/lib/python/cmd.py b/sys/lib/python/cmd.py
deleted file mode 100644
index 3f82b4871..000000000
--- a/sys/lib/python/cmd.py
+++ /dev/null
@@ -1,405 +0,0 @@
-"""A generic class to build line-oriented command interpreters.
-
-Interpreters constructed with this class obey the following conventions:
-
-1. End of file on input is processed as the command 'EOF'.
-2. A command is parsed out of each line by collecting the prefix composed
- of characters in the identchars member.
-3. A command `foo' is dispatched to a method 'do_foo()'; the do_ method
- is passed a single argument consisting of the remainder of the line.
-4. Typing an empty line repeats the last command. (Actually, it calls the
- method `emptyline', which may be overridden in a subclass.)
-5. There is a predefined `help' method. Given an argument `topic', it
- calls the command `help_topic'. With no arguments, it lists all topics
- with defined help_ functions, broken into up to three topics; documented
- commands, miscellaneous help topics, and undocumented commands.
-6. The command '?' is a synonym for `help'. The command '!' is a synonym
- for `shell', if a do_shell method exists.
-7. If completion is enabled, completing commands will be done automatically,
- and completing of commands args is done by calling complete_foo() with
- arguments text, line, begidx, endidx. text is string we are matching
- against, all returned matches must begin with it. line is the current
- input line (lstripped), begidx and endidx are the beginning and end
- indexes of the text being matched, which could be used to provide
- different completion depending upon which position the argument is in.
-
-The `default' method may be overridden to intercept commands for which there
-is no do_ method.
-
-The `completedefault' method may be overridden to intercept completions for
-commands that have no complete_ method.
-
-The data member `self.ruler' sets the character used to draw separator lines
-in the help messages. If empty, no ruler line is drawn. It defaults to "=".
-
-If the value of `self.intro' is nonempty when the cmdloop method is called,
-it is printed out on interpreter startup. This value may be overridden
-via an optional argument to the cmdloop() method.
-
-The data members `self.doc_header', `self.misc_header', and
-`self.undoc_header' set the headers used for the help function's
-listings of documented functions, miscellaneous topics, and undocumented
-functions respectively.
-
-These interpreters use raw_input; thus, if the readline module is loaded,
-they automatically support Emacs-like command history and editing features.
-"""
-
-import string
-
-__all__ = ["Cmd"]
-
-PROMPT = '(Cmd) '
-IDENTCHARS = string.ascii_letters + string.digits + '_'
-
-class Cmd:
- """A simple framework for writing line-oriented command interpreters.
-
- These are often useful for test harnesses, administrative tools, and
- prototypes that will later be wrapped in a more sophisticated interface.
-
- A Cmd instance or subclass instance is a line-oriented interpreter
- framework. There is no good reason to instantiate Cmd itself; rather,
- it's useful as a superclass of an interpreter class you define yourself
- in order to inherit Cmd's methods and encapsulate action methods.
-
- """
- prompt = PROMPT
- identchars = IDENTCHARS
- ruler = '='
- lastcmd = ''
- intro = None
- doc_leader = ""
- doc_header = "Documented commands (type help <topic>):"
- misc_header = "Miscellaneous help topics:"
- undoc_header = "Undocumented commands:"
- nohelp = "*** No help on %s"
- use_rawinput = 1
-
- def __init__(self, completekey='tab', stdin=None, stdout=None):
- """Instantiate a line-oriented interpreter framework.
-
- The optional argument 'completekey' is the readline name of a
- completion key; it defaults to the Tab key. If completekey is
- not None and the readline module is available, command completion
- is done automatically. The optional arguments stdin and stdout
- specify alternate input and output file objects; if not specified,
- sys.stdin and sys.stdout are used.
-
- """
- import sys
- if stdin is not None:
- self.stdin = stdin
- else:
- self.stdin = sys.stdin
- if stdout is not None:
- self.stdout = stdout
- else:
- self.stdout = sys.stdout
- self.cmdqueue = []
- self.completekey = completekey
-
- def cmdloop(self, intro=None):
- """Repeatedly issue a prompt, accept input, parse an initial prefix
- off the received input, and dispatch to action methods, passing them
- the remainder of the line as argument.
-
- """
-
- self.preloop()
- if self.use_rawinput and self.completekey:
- try:
- import readline
- self.old_completer = readline.get_completer()
- readline.set_completer(self.complete)
- readline.parse_and_bind(self.completekey+": complete")
- except ImportError:
- pass
- try:
- if intro is not None:
- self.intro = intro
- if self.intro:
- self.stdout.write(str(self.intro)+"\n")
- stop = None
- while not stop:
- if self.cmdqueue:
- line = self.cmdqueue.pop(0)
- else:
- if self.use_rawinput:
- try:
- line = raw_input(self.prompt)
- except EOFError:
- line = 'EOF'
- else:
- self.stdout.write(self.prompt)
- self.stdout.flush()
- line = self.stdin.readline()
- if not len(line):
- line = 'EOF'
- else:
- line = line[:-1] # chop \n
- line = self.precmd(line)
- stop = self.onecmd(line)
- stop = self.postcmd(stop, line)
- self.postloop()
- finally:
- if self.use_rawinput and self.completekey:
- try:
- import readline
- readline.set_completer(self.old_completer)
- except ImportError:
- pass
-
-
- def precmd(self, line):
- """Hook method executed just before the command line is
- interpreted, but after the input prompt is generated and issued.
-
- """
- return line
-
- def postcmd(self, stop, line):
- """Hook method executed just after a command dispatch is finished."""
- return stop
-
- def preloop(self):
- """Hook method executed once when the cmdloop() method is called."""
- pass
-
- def postloop(self):
- """Hook method executed once when the cmdloop() method is about to
- return.
-
- """
- pass
-
- def parseline(self, line):
- """Parse the line into a command name and a string containing
- the arguments. Returns a tuple containing (command, args, line).
- 'command' and 'args' may be None if the line couldn't be parsed.
- """
- line = line.strip()
- if not line:
- return None, None, line
- elif line[0] == '?':
- line = 'help ' + line[1:]
- elif line[0] == '!':
- if hasattr(self, 'do_shell'):
- line = 'shell ' + line[1:]
- else:
- return None, None, line
- i, n = 0, len(line)
- while i < n and line[i] in self.identchars: i = i+1
- cmd, arg = line[:i], line[i:].strip()
- return cmd, arg, line
-
- def onecmd(self, line):
- """Interpret the argument as though it had been typed in response
- to the prompt.
-
- This may be overridden, but should not normally need to be;
- see the precmd() and postcmd() methods for useful execution hooks.
- The return value is a flag indicating whether interpretation of
- commands by the interpreter should stop.
-
- """
- cmd, arg, line = self.parseline(line)
- if not line:
- return self.emptyline()
- if cmd is None:
- return self.default(line)
- self.lastcmd = line
- if cmd == '':
- return self.default(line)
- else:
- try:
- func = getattr(self, 'do_' + cmd)
- except AttributeError:
- return self.default(line)
- return func(arg)
-
- def emptyline(self):
- """Called when an empty line is entered in response to the prompt.
-
- If this method is not overridden, it repeats the last nonempty
- command entered.
-
- """
- if self.lastcmd:
- return self.onecmd(self.lastcmd)
-
- def default(self, line):
- """Called on an input line when the command prefix is not recognized.
-
- If this method is not overridden, it prints an error message and
- returns.
-
- """
- self.stdout.write('*** Unknown syntax: %s\n'%line)
-
- def completedefault(self, *ignored):
- """Method called to complete an input line when no command-specific
- complete_*() method is available.
-
- By default, it returns an empty list.
-
- """
- return []
-
- def completenames(self, text, *ignored):
- dotext = 'do_'+text
- return [a[3:] for a in self.get_names() if a.startswith(dotext)]
-
- def complete(self, text, state):
- """Return the next possible completion for 'text'.
-
- If a command has not been entered, then complete against command list.
- Otherwise try to call complete_<command> to get list of completions.
- """
- if state == 0:
- import readline
- origline = readline.get_line_buffer()
- line = origline.lstrip()
- stripped = len(origline) - len(line)
- begidx = readline.get_begidx() - stripped
- endidx = readline.get_endidx() - stripped
- if begidx>0:
- cmd, args, foo = self.parseline(line)
- if cmd == '':
- compfunc = self.completedefault
- else:
- try:
- compfunc = getattr(self, 'complete_' + cmd)
- except AttributeError:
- compfunc = self.completedefault
- else:
- compfunc = self.completenames
- self.completion_matches = compfunc(text, line, begidx, endidx)
- try:
- return self.completion_matches[state]
- except IndexError:
- return None
-
- def get_names(self):
- # Inheritance says we have to look in class and
- # base classes; order is not important.
- names = []
- classes = [self.__class__]
- while classes:
- aclass = classes.pop(0)
- if aclass.__bases__:
- classes = classes + list(aclass.__bases__)
- names = names + dir(aclass)
- return names
-
- def complete_help(self, *args):
- return self.completenames(*args)
-
- def do_help(self, arg):
- if arg:
- # XXX check arg syntax
- try:
- func = getattr(self, 'help_' + arg)
- except AttributeError:
- try:
- doc=getattr(self, 'do_' + arg).__doc__
- if doc:
- self.stdout.write("%s\n"%str(doc))
- return
- except AttributeError:
- pass
- self.stdout.write("%s\n"%str(self.nohelp % (arg,)))
- return
- func()
- else:
- names = self.get_names()
- cmds_doc = []
- cmds_undoc = []
- help = {}
- for name in names:
- if name[:5] == 'help_':
- help[name[5:]]=1
- names.sort()
- # There can be duplicates if routines overridden
- prevname = ''
- for name in names:
- if name[:3] == 'do_':
- if name == prevname:
- continue
- prevname = name
- cmd=name[3:]
- if cmd in help:
- cmds_doc.append(cmd)
- del help[cmd]
- elif getattr(self, name).__doc__:
- cmds_doc.append(cmd)
- else:
- cmds_undoc.append(cmd)
- self.stdout.write("%s\n"%str(self.doc_leader))
- self.print_topics(self.doc_header, cmds_doc, 15,80)
- self.print_topics(self.misc_header, help.keys(),15,80)
- self.print_topics(self.undoc_header, cmds_undoc, 15,80)
-
- def print_topics(self, header, cmds, cmdlen, maxcol):
- if cmds:
- self.stdout.write("%s\n"%str(header))
- if self.ruler:
- self.stdout.write("%s\n"%str(self.ruler * len(header)))
- self.columnize(cmds, maxcol-1)
- self.stdout.write("\n")
-
- def columnize(self, list, displaywidth=80):
- """Display a list of strings as a compact set of columns.
-
- Each column is only as wide as necessary.
- Columns are separated by two spaces (one was not legible enough).
- """
- if not list:
- self.stdout.write("<empty>\n")
- return
- nonstrings = [i for i in range(len(list))
- if not isinstance(list[i], str)]
- if nonstrings:
- raise TypeError, ("list[i] not a string for i in %s" %
- ", ".join(map(str, nonstrings)))
- size = len(list)
- if size == 1:
- self.stdout.write('%s\n'%str(list[0]))
- return
- # Try every row count from 1 upwards
- for nrows in range(1, len(list)):
- ncols = (size+nrows-1) // nrows
- colwidths = []
- totwidth = -2
- for col in range(ncols):
- colwidth = 0
- for row in range(nrows):
- i = row + nrows*col
- if i >= size:
- break
- x = list[i]
- colwidth = max(colwidth, len(x))
- colwidths.append(colwidth)
- totwidth += colwidth + 2
- if totwidth > displaywidth:
- break
- if totwidth <= displaywidth:
- break
- else:
- nrows = len(list)
- ncols = 1
- colwidths = [0]
- for row in range(nrows):
- texts = []
- for col in range(ncols):
- i = row + nrows*col
- if i >= size:
- x = ""
- else:
- x = list[i]
- texts.append(x)
- while texts and not texts[-1]:
- del texts[-1]
- for col in range(len(texts)):
- texts[col] = texts[col].ljust(colwidths[col])
- self.stdout.write("%s\n"%str(" ".join(texts)))
diff --git a/sys/lib/python/code.py b/sys/lib/python/code.py
deleted file mode 100644
index 6bdc658ad..000000000
--- a/sys/lib/python/code.py
+++ /dev/null
@@ -1,307 +0,0 @@
-"""Utilities needed to emulate Python's interactive interpreter.
-
-"""
-
-# Inspired by similar code by Jeff Epler and Fredrik Lundh.
-
-
-import sys
-import traceback
-from codeop import CommandCompiler, compile_command
-
-__all__ = ["InteractiveInterpreter", "InteractiveConsole", "interact",
- "compile_command"]
-
-def softspace(file, newvalue):
- oldvalue = 0
- try:
- oldvalue = file.softspace
- except AttributeError:
- pass
- try:
- file.softspace = newvalue
- except (AttributeError, TypeError):
- # "attribute-less object" or "read-only attributes"
- pass
- return oldvalue
-
-class InteractiveInterpreter:
- """Base class for InteractiveConsole.
-
- This class deals with parsing and interpreter state (the user's
- namespace); it doesn't deal with input buffering or prompting or
- input file naming (the filename is always passed in explicitly).
-
- """
-
- def __init__(self, locals=None):
- """Constructor.
-
- The optional 'locals' argument specifies the dictionary in
- which code will be executed; it defaults to a newly created
- dictionary with key "__name__" set to "__console__" and key
- "__doc__" set to None.
-
- """
- if locals is None:
- locals = {"__name__": "__console__", "__doc__": None}
- self.locals = locals
- self.compile = CommandCompiler()
-
- def runsource(self, source, filename="<input>", symbol="single"):
- """Compile and run some source in the interpreter.
-
- Arguments are as for compile_command().
-
- One several things can happen:
-
- 1) The input is incorrect; compile_command() raised an
- exception (SyntaxError or OverflowError). A syntax traceback
- will be printed by calling the showsyntaxerror() method.
-
- 2) The input is incomplete, and more input is required;
- compile_command() returned None. Nothing happens.
-
- 3) The input is complete; compile_command() returned a code
- object. The code is executed by calling self.runcode() (which
- also handles run-time exceptions, except for SystemExit).
-
- The return value is True in case 2, False in the other cases (unless
- an exception is raised). The return value can be used to
- decide whether to use sys.ps1 or sys.ps2 to prompt the next
- line.
-
- """
- try:
- code = self.compile(source, filename, symbol)
- except (OverflowError, SyntaxError, ValueError):
- # Case 1
- self.showsyntaxerror(filename)
- return False
-
- if code is None:
- # Case 2
- return True
-
- # Case 3
- self.runcode(code)
- return False
-
- def runcode(self, code):
- """Execute a code object.
-
- When an exception occurs, self.showtraceback() is called to
- display a traceback. All exceptions are caught except
- SystemExit, which is reraised.
-
- A note about KeyboardInterrupt: this exception may occur
- elsewhere in this code, and may not always be caught. The
- caller should be prepared to deal with it.
-
- """
- try:
- exec code in self.locals
- except SystemExit:
- raise
- except:
- self.showtraceback()
- else:
- if softspace(sys.stdout, 0):
- print
-
- def showsyntaxerror(self, filename=None):
- """Display the syntax error that just occurred.
-
- This doesn't display a stack trace because there isn't one.
-
- If a filename is given, it is stuffed in the exception instead
- of what was there before (because Python's parser always uses
- "<string>" when reading from a string).
-
- The output is written by self.write(), below.
-
- """
- type, value, sys.last_traceback = sys.exc_info()
- sys.last_type = type
- sys.last_value = value
- if filename and type is SyntaxError:
- # Work hard to stuff the correct filename in the exception
- try:
- msg, (dummy_filename, lineno, offset, line) = value
- except:
- # Not the format we expect; leave it alone
- pass
- else:
- # Stuff in the right filename
- value = SyntaxError(msg, (filename, lineno, offset, line))
- sys.last_value = value
- list = traceback.format_exception_only(type, value)
- map(self.write, list)
-
- def showtraceback(self):
- """Display the exception that just occurred.
-
- We remove the first stack item because it is our own code.
-
- The output is written by self.write(), below.
-
- """
- try:
- type, value, tb = sys.exc_info()
- sys.last_type = type
- sys.last_value = value
- sys.last_traceback = tb
- tblist = traceback.extract_tb(tb)
- del tblist[:1]
- list = traceback.format_list(tblist)
- if list:
- list.insert(0, "Traceback (most recent call last):\n")
- list[len(list):] = traceback.format_exception_only(type, value)
- finally:
- tblist = tb = None
- map(self.write, list)
-
- def write(self, data):
- """Write a string.
-
- The base implementation writes to sys.stderr; a subclass may
- replace this with a different implementation.
-
- """
- sys.stderr.write(data)
-
-
-class InteractiveConsole(InteractiveInterpreter):
- """Closely emulate the behavior of the interactive Python interpreter.
-
- This class builds on InteractiveInterpreter and adds prompting
- using the familiar sys.ps1 and sys.ps2, and input buffering.
-
- """
-
- def __init__(self, locals=None, filename="<console>"):
- """Constructor.
-
- The optional locals argument will be passed to the
- InteractiveInterpreter base class.
-
- The optional filename argument should specify the (file)name
- of the input stream; it will show up in tracebacks.
-
- """
- InteractiveInterpreter.__init__(self, locals)
- self.filename = filename
- self.resetbuffer()
-
- def resetbuffer(self):
- """Reset the input buffer."""
- self.buffer = []
-
- def interact(self, banner=None):
- """Closely emulate the interactive Python console.
-
- The optional banner argument specify the banner to print
- before the first interaction; by default it prints a banner
- similar to the one printed by the real Python interpreter,
- followed by the current class name in parentheses (so as not
- to confuse this with the real interpreter -- since it's so
- close!).
-
- """
- try:
- sys.ps1
- except AttributeError:
- sys.ps1 = ">>> "
- try:
- sys.ps2
- except AttributeError:
- sys.ps2 = "... "
- cprt = 'Type "help", "copyright", "credits" or "license" for more information.'
- if banner is None:
- self.write("Python %s on %s\n%s\n(%s)\n" %
- (sys.version, sys.platform, cprt,
- self.__class__.__name__))
- else:
- self.write("%s\n" % str(banner))
- more = 0
- while 1:
- try:
- if more:
- prompt = sys.ps2
- else:
- prompt = sys.ps1
- try:
- line = self.raw_input(prompt)
- except EOFError:
- self.write("\n")
- break
- else:
- more = self.push(line)
- except KeyboardInterrupt:
- self.write("\nKeyboardInterrupt\n")
- self.resetbuffer()
- more = 0
-
- def push(self, line):
- """Push a line to the interpreter.
-
- The line should not have a trailing newline; it may have
- internal newlines. The line is appended to a buffer and the
- interpreter's runsource() method is called with the
- concatenated contents of the buffer as source. If this
- indicates that the command was executed or invalid, the buffer
- is reset; otherwise, the command is incomplete, and the buffer
- is left as it was after the line was appended. The return
- value is 1 if more input is required, 0 if the line was dealt
- with in some way (this is the same as runsource()).
-
- """
- self.buffer.append(line)
- source = "\n".join(self.buffer)
- more = self.runsource(source, self.filename)
- if not more:
- self.resetbuffer()
- return more
-
- def raw_input(self, prompt=""):
- """Write a prompt and read a line.
-
- The returned line does not include the trailing newline.
- When the user enters the EOF key sequence, EOFError is raised.
-
- The base implementation uses the built-in function
- raw_input(); a subclass may replace this with a different
- implementation.
-
- """
- return raw_input(prompt)
-
-
-def interact(banner=None, readfunc=None, local=None):
- """Closely emulate the interactive Python interpreter.
-
- This is a backwards compatible interface to the InteractiveConsole
- class. When readfunc is not specified, it attempts to import the
- readline module to enable GNU readline if it is available.
-
- Arguments (all optional, all default to None):
-
- banner -- passed to InteractiveConsole.interact()
- readfunc -- if not None, replaces InteractiveConsole.raw_input()
- local -- passed to InteractiveInterpreter.__init__()
-
- """
- console = InteractiveConsole(local)
- if readfunc is not None:
- console.raw_input = readfunc
- else:
- try:
- import readline
- except ImportError:
- pass
- console.interact(banner)
-
-
-if __name__ == '__main__':
- import pdb
- pdb.run("interact()\n")
diff --git a/sys/lib/python/codecs.py b/sys/lib/python/codecs.py
deleted file mode 100644
index f834b8dd1..000000000
--- a/sys/lib/python/codecs.py
+++ /dev/null
@@ -1,1034 +0,0 @@
-""" codecs -- Python Codec Registry, API and helpers.
-
-
-Written by Marc-Andre Lemburg (mal@lemburg.com).
-
-(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
-
-"""#"
-
-import __builtin__, sys
-
-### Registry and builtin stateless codec functions
-
-try:
- from _codecs import *
-except ImportError, why:
- raise SystemError('Failed to load the builtin codecs: %s' % why)
-
-__all__ = ["register", "lookup", "open", "EncodedFile", "BOM", "BOM_BE",
- "BOM_LE", "BOM32_BE", "BOM32_LE", "BOM64_BE", "BOM64_LE",
- "BOM_UTF8", "BOM_UTF16", "BOM_UTF16_LE", "BOM_UTF16_BE",
- "BOM_UTF32", "BOM_UTF32_LE", "BOM_UTF32_BE",
- "strict_errors", "ignore_errors", "replace_errors",
- "xmlcharrefreplace_errors",
- "register_error", "lookup_error"]
-
-### Constants
-
-#
-# Byte Order Mark (BOM = ZERO WIDTH NO-BREAK SPACE = U+FEFF)
-# and its possible byte string values
-# for UTF8/UTF16/UTF32 output and little/big endian machines
-#
-
-# UTF-8
-BOM_UTF8 = '\xef\xbb\xbf'
-
-# UTF-16, little endian
-BOM_LE = BOM_UTF16_LE = '\xff\xfe'
-
-# UTF-16, big endian
-BOM_BE = BOM_UTF16_BE = '\xfe\xff'
-
-# UTF-32, little endian
-BOM_UTF32_LE = '\xff\xfe\x00\x00'
-
-# UTF-32, big endian
-BOM_UTF32_BE = '\x00\x00\xfe\xff'
-
-if sys.byteorder == 'little':
-
- # UTF-16, native endianness
- BOM = BOM_UTF16 = BOM_UTF16_LE
-
- # UTF-32, native endianness
- BOM_UTF32 = BOM_UTF32_LE
-
-else:
-
- # UTF-16, native endianness
- BOM = BOM_UTF16 = BOM_UTF16_BE
-
- # UTF-32, native endianness
- BOM_UTF32 = BOM_UTF32_BE
-
-# Old broken names (don't use in new code)
-BOM32_LE = BOM_UTF16_LE
-BOM32_BE = BOM_UTF16_BE
-BOM64_LE = BOM_UTF32_LE
-BOM64_BE = BOM_UTF32_BE
-
-
-### Codec base classes (defining the API)
-
-class CodecInfo(tuple):
-
- def __new__(cls, encode, decode, streamreader=None, streamwriter=None,
- incrementalencoder=None, incrementaldecoder=None, name=None):
- self = tuple.__new__(cls, (encode, decode, streamreader, streamwriter))
- self.name = name
- self.encode = encode
- self.decode = decode
- self.incrementalencoder = incrementalencoder
- self.incrementaldecoder = incrementaldecoder
- self.streamwriter = streamwriter
- self.streamreader = streamreader
- return self
-
- def __repr__(self):
- return "<%s.%s object for encoding %s at 0x%x>" % (self.__class__.__module__, self.__class__.__name__, self.name, id(self))
-
-class Codec:
-
- """ Defines the interface for stateless encoders/decoders.
-
- The .encode()/.decode() methods may use different error
- handling schemes by providing the errors argument. These
- string values are predefined:
-
- 'strict' - raise a ValueError error (or a subclass)
- 'ignore' - ignore the character and continue with the next
- 'replace' - replace with a suitable replacement character;
- Python will use the official U+FFFD REPLACEMENT
- CHARACTER for the builtin Unicode codecs on
- decoding and '?' on encoding.
- 'xmlcharrefreplace' - Replace with the appropriate XML
- character reference (only for encoding).
- 'backslashreplace' - Replace with backslashed escape sequences
- (only for encoding).
-
- The set of allowed values can be extended via register_error.
-
- """
- def encode(self, input, errors='strict'):
-
- """ Encodes the object input and returns a tuple (output
- object, length consumed).
-
- errors defines the error handling to apply. It defaults to
- 'strict' handling.
-
- The method may not store state in the Codec instance. Use
- StreamCodec for codecs which have to keep state in order to
- make encoding/decoding efficient.
-
- The encoder must be able to handle zero length input and
- return an empty object of the output object type in this
- situation.
-
- """
- raise NotImplementedError
-
- def decode(self, input, errors='strict'):
-
- """ Decodes the object input and returns a tuple (output
- object, length consumed).
-
- input must be an object which provides the bf_getreadbuf
- buffer slot. Python strings, buffer objects and memory
- mapped files are examples of objects providing this slot.
-
- errors defines the error handling to apply. It defaults to
- 'strict' handling.
-
- The method may not store state in the Codec instance. Use
- StreamCodec for codecs which have to keep state in order to
- make encoding/decoding efficient.
-
- The decoder must be able to handle zero length input and
- return an empty object of the output object type in this
- situation.
-
- """
- raise NotImplementedError
-
-class IncrementalEncoder(object):
- """
- An IncrementalEncoder encodes an input in multiple steps. The input can be
- passed piece by piece to the encode() method. The IncrementalEncoder remembers
- the state of the Encoding process between calls to encode().
- """
- def __init__(self, errors='strict'):
- """
- Creates an IncrementalEncoder instance.
-
- The IncrementalEncoder may use different error handling schemes by
- providing the errors keyword argument. See the module docstring
- for a list of possible values.
- """
- self.errors = errors
- self.buffer = ""
-
- def encode(self, input, final=False):
- """
- Encodes input and returns the resulting object.
- """
- raise NotImplementedError
-
- def reset(self):
- """
- Resets the encoder to the initial state.
- """
-
-class BufferedIncrementalEncoder(IncrementalEncoder):
- """
- This subclass of IncrementalEncoder can be used as the baseclass for an
- incremental encoder if the encoder must keep some of the output in a
- buffer between calls to encode().
- """
- def __init__(self, errors='strict'):
- IncrementalEncoder.__init__(self, errors)
- self.buffer = "" # unencoded input that is kept between calls to encode()
-
- def _buffer_encode(self, input, errors, final):
- # Overwrite this method in subclasses: It must encode input
- # and return an (output, length consumed) tuple
- raise NotImplementedError
-
- def encode(self, input, final=False):
- # encode input (taking the buffer into account)
- data = self.buffer + input
- (result, consumed) = self._buffer_encode(data, self.errors, final)
- # keep unencoded input until the next call
- self.buffer = data[consumed:]
- return result
-
- def reset(self):
- IncrementalEncoder.reset(self)
- self.buffer = ""
-
-class IncrementalDecoder(object):
- """
- An IncrementalDecoder decodes an input in multiple steps. The input can be
- passed piece by piece to the decode() method. The IncrementalDecoder
- remembers the state of the decoding process between calls to decode().
- """
- def __init__(self, errors='strict'):
- """
- Creates a IncrementalDecoder instance.
-
- The IncrementalDecoder may use different error handling schemes by
- providing the errors keyword argument. See the module docstring
- for a list of possible values.
- """
- self.errors = errors
-
- def decode(self, input, final=False):
- """
- Decodes input and returns the resulting object.
- """
- raise NotImplementedError
-
- def reset(self):
- """
- Resets the decoder to the initial state.
- """
-
-class BufferedIncrementalDecoder(IncrementalDecoder):
- """
- This subclass of IncrementalDecoder can be used as the baseclass for an
- incremental decoder if the decoder must be able to handle incomplete byte
- sequences.
- """
- def __init__(self, errors='strict'):
- IncrementalDecoder.__init__(self, errors)
- self.buffer = "" # undecoded input that is kept between calls to decode()
-
- def _buffer_decode(self, input, errors, final):
- # Overwrite this method in subclasses: It must decode input
- # and return an (output, length consumed) tuple
- raise NotImplementedError
-
- def decode(self, input, final=False):
- # decode input (taking the buffer into account)
- data = self.buffer + input
- (result, consumed) = self._buffer_decode(data, self.errors, final)
- # keep undecoded input until the next call
- self.buffer = data[consumed:]
- return result
-
- def reset(self):
- IncrementalDecoder.reset(self)
- self.buffer = ""
-
-#
-# The StreamWriter and StreamReader class provide generic working
-# interfaces which can be used to implement new encoding submodules
-# very easily. See encodings/utf_8.py for an example on how this is
-# done.
-#
-
-class StreamWriter(Codec):
-
- def __init__(self, stream, errors='strict'):
-
- """ Creates a StreamWriter instance.
-
- stream must be a file-like object open for writing
- (binary) data.
-
- The StreamWriter may use different error handling
- schemes by providing the errors keyword argument. These
- parameters are predefined:
-
- 'strict' - raise a ValueError (or a subclass)
- 'ignore' - ignore the character and continue with the next
- 'replace'- replace with a suitable replacement character
- 'xmlcharrefreplace' - Replace with the appropriate XML
- character reference.
- 'backslashreplace' - Replace with backslashed escape
- sequences (only for encoding).
-
- The set of allowed parameter values can be extended via
- register_error.
- """
- self.stream = stream
- self.errors = errors
-
- def write(self, object):
-
- """ Writes the object's contents encoded to self.stream.
- """
- data, consumed = self.encode(object, self.errors)
- self.stream.write(data)
-
- def writelines(self, list):
-
- """ Writes the concatenated list of strings to the stream
- using .write().
- """
- self.write(''.join(list))
-
- def reset(self):
-
- """ Flushes and resets the codec buffers used for keeping state.
-
- Calling this method should ensure that the data on the
- output is put into a clean state, that allows appending
- of new fresh data without having to rescan the whole
- stream to recover state.
-
- """
- pass
-
- def __getattr__(self, name,
- getattr=getattr):
-
- """ Inherit all other methods from the underlying stream.
- """
- return getattr(self.stream, name)
-
- def __enter__(self):
- return self
-
- def __exit__(self, type, value, tb):
- self.stream.close()
-
-###
-
-class StreamReader(Codec):
-
- def __init__(self, stream, errors='strict'):
-
- """ Creates a StreamReader instance.
-
- stream must be a file-like object open for reading
- (binary) data.
-
- The StreamReader may use different error handling
- schemes by providing the errors keyword argument. These
- parameters are predefined:
-
- 'strict' - raise a ValueError (or a subclass)
- 'ignore' - ignore the character and continue with the next
- 'replace'- replace with a suitable replacement character;
-
- The set of allowed parameter values can be extended via
- register_error.
- """
- self.stream = stream
- self.errors = errors
- self.bytebuffer = ""
- # For str->str decoding this will stay a str
- # For str->unicode decoding the first read will promote it to unicode
- self.charbuffer = ""
- self.linebuffer = None
-
- def decode(self, input, errors='strict'):
- raise NotImplementedError
-
- def read(self, size=-1, chars=-1, firstline=False):
-
- """ Decodes data from the stream self.stream and returns the
- resulting object.
-
- chars indicates the number of characters to read from the
- stream. read() will never return more than chars
- characters, but it might return less, if there are not enough
- characters available.
-
- size indicates the approximate maximum number of bytes to
- read from the stream for decoding purposes. The decoder
- can modify this setting as appropriate. The default value
- -1 indicates to read and decode as much as possible. size
- is intended to prevent having to decode huge files in one
- step.
-
- If firstline is true, and a UnicodeDecodeError happens
- after the first line terminator in the input only the first line
- will be returned, the rest of the input will be kept until the
- next call to read().
-
- The method should use a greedy read strategy meaning that
- it should read as much data as is allowed within the
- definition of the encoding and the given size, e.g. if
- optional encoding endings or state markers are available
- on the stream, these should be read too.
- """
- # If we have lines cached, first merge them back into characters
- if self.linebuffer:
- self.charbuffer = "".join(self.linebuffer)
- self.linebuffer = None
-
- # read until we get the required number of characters (if available)
- while True:
- # can the request can be satisfied from the character buffer?
- if chars < 0:
- if size < 0:
- if self.charbuffer:
- break
- elif len(self.charbuffer) >= size:
- break
- else:
- if len(self.charbuffer) >= chars:
- break
- # we need more data
- if size < 0:
- newdata = self.stream.read()
- else:
- newdata = self.stream.read(size)
- # decode bytes (those remaining from the last call included)
- data = self.bytebuffer + newdata
- try:
- newchars, decodedbytes = self.decode(data, self.errors)
- except UnicodeDecodeError, exc:
- if firstline:
- newchars, decodedbytes = self.decode(data[:exc.start], self.errors)
- lines = newchars.splitlines(True)
- if len(lines)<=1:
- raise
- else:
- raise
- # keep undecoded bytes until the next call
- self.bytebuffer = data[decodedbytes:]
- # put new characters in the character buffer
- self.charbuffer += newchars
- # there was no data available
- if not newdata:
- break
- if chars < 0:
- # Return everything we've got
- result = self.charbuffer
- self.charbuffer = ""
- else:
- # Return the first chars characters
- result = self.charbuffer[:chars]
- self.charbuffer = self.charbuffer[chars:]
- return result
-
- def readline(self, size=None, keepends=True):
-
- """ Read one line from the input stream and return the
- decoded data.
-
- size, if given, is passed as size argument to the
- read() method.
-
- """
- # If we have lines cached from an earlier read, return
- # them unconditionally
- if self.linebuffer:
- line = self.linebuffer[0]
- del self.linebuffer[0]
- if len(self.linebuffer) == 1:
- # revert to charbuffer mode; we might need more data
- # next time
- self.charbuffer = self.linebuffer[0]
- self.linebuffer = None
- if not keepends:
- line = line.splitlines(False)[0]
- return line
-
- readsize = size or 72
- line = ""
- # If size is given, we call read() only once
- while True:
- data = self.read(readsize, firstline=True)
- if data:
- # If we're at a "\r" read one extra character (which might
- # be a "\n") to get a proper line ending. If the stream is
- # temporarily exhausted we return the wrong line ending.
- if data.endswith("\r"):
- data += self.read(size=1, chars=1)
-
- line += data
- lines = line.splitlines(True)
- if lines:
- if len(lines) > 1:
- # More than one line result; the first line is a full line
- # to return
- line = lines[0]
- del lines[0]
- if len(lines) > 1:
- # cache the remaining lines
- lines[-1] += self.charbuffer
- self.linebuffer = lines
- self.charbuffer = None
- else:
- # only one remaining line, put it back into charbuffer
- self.charbuffer = lines[0] + self.charbuffer
- if not keepends:
- line = line.splitlines(False)[0]
- break
- line0withend = lines[0]
- line0withoutend = lines[0].splitlines(False)[0]
- if line0withend != line0withoutend: # We really have a line end
- # Put the rest back together and keep it until the next call
- self.charbuffer = "".join(lines[1:]) + self.charbuffer
- if keepends:
- line = line0withend
- else:
- line = line0withoutend
- break
- # we didn't get anything or this was our only try
- if not data or size is not None:
- if line and not keepends:
- line = line.splitlines(False)[0]
- break
- if readsize<8000:
- readsize *= 2
- return line
-
- def readlines(self, sizehint=None, keepends=True):
-
- """ Read all lines available on the input stream
- and return them as list of lines.
-
- Line breaks are implemented using the codec's decoder
- method and are included in the list entries.
-
- sizehint, if given, is ignored since there is no efficient
- way to finding the true end-of-line.
-
- """
- data = self.read()
- return data.splitlines(keepends)
-
- def reset(self):
-
- """ Resets the codec buffers used for keeping state.
-
- Note that no stream repositioning should take place.
- This method is primarily intended to be able to recover
- from decoding errors.
-
- """
- self.bytebuffer = ""
- self.charbuffer = u""
- self.linebuffer = None
-
- def seek(self, offset, whence=0):
- """ Set the input stream's current position.
-
- Resets the codec buffers used for keeping state.
- """
- self.reset()
- self.stream.seek(offset, whence)
-
- def next(self):
-
- """ Return the next decoded line from the input stream."""
- line = self.readline()
- if line:
- return line
- raise StopIteration
-
- def __iter__(self):
- return self
-
- def __getattr__(self, name,
- getattr=getattr):
-
- """ Inherit all other methods from the underlying stream.
- """
- return getattr(self.stream, name)
-
- def __enter__(self):
- return self
-
- def __exit__(self, type, value, tb):
- self.stream.close()
-
-###
-
-class StreamReaderWriter:
-
- """ StreamReaderWriter instances allow wrapping streams which
- work in both read and write modes.
-
- The design is such that one can use the factory functions
- returned by the codec.lookup() function to construct the
- instance.
-
- """
- # Optional attributes set by the file wrappers below
- encoding = 'unknown'
-
- def __init__(self, stream, Reader, Writer, errors='strict'):
-
- """ Creates a StreamReaderWriter instance.
-
- stream must be a Stream-like object.
-
- Reader, Writer must be factory functions or classes
- providing the StreamReader, StreamWriter interface resp.
-
- Error handling is done in the same way as defined for the
- StreamWriter/Readers.
-
- """
- self.stream = stream
- self.reader = Reader(stream, errors)
- self.writer = Writer(stream, errors)
- self.errors = errors
-
- def read(self, size=-1):
-
- return self.reader.read(size)
-
- def readline(self, size=None):
-
- return self.reader.readline(size)
-
- def readlines(self, sizehint=None):
-
- return self.reader.readlines(sizehint)
-
- def next(self):
-
- """ Return the next decoded line from the input stream."""
- return self.reader.next()
-
- def __iter__(self):
- return self
-
- def write(self, data):
-
- return self.writer.write(data)
-
- def writelines(self, list):
-
- return self.writer.writelines(list)
-
- def reset(self):
-
- self.reader.reset()
- self.writer.reset()
-
- def __getattr__(self, name,
- getattr=getattr):
-
- """ Inherit all other methods from the underlying stream.
- """
- return getattr(self.stream, name)
-
- # these are needed to make "with codecs.open(...)" work properly
-
- def __enter__(self):
- return self
-
- def __exit__(self, type, value, tb):
- self.stream.close()
-
-###
-
-class StreamRecoder:
-
- """ StreamRecoder instances provide a frontend - backend
- view of encoding data.
-
- They use the complete set of APIs returned by the
- codecs.lookup() function to implement their task.
-
- Data written to the stream is first decoded into an
- intermediate format (which is dependent on the given codec
- combination) and then written to the stream using an instance
- of the provided Writer class.
-
- In the other direction, data is read from the stream using a
- Reader instance and then return encoded data to the caller.
-
- """
- # Optional attributes set by the file wrappers below
- data_encoding = 'unknown'
- file_encoding = 'unknown'
-
- def __init__(self, stream, encode, decode, Reader, Writer,
- errors='strict'):
-
- """ Creates a StreamRecoder instance which implements a two-way
- conversion: encode and decode work on the frontend (the
- input to .read() and output of .write()) while
- Reader and Writer work on the backend (reading and
- writing to the stream).
-
- You can use these objects to do transparent direct
- recodings from e.g. latin-1 to utf-8 and back.
-
- stream must be a file-like object.
-
- encode, decode must adhere to the Codec interface, Reader,
- Writer must be factory functions or classes providing the
- StreamReader, StreamWriter interface resp.
-
- encode and decode are needed for the frontend translation,
- Reader and Writer for the backend translation. Unicode is
- used as intermediate encoding.
-
- Error handling is done in the same way as defined for the
- StreamWriter/Readers.
-
- """
- self.stream = stream
- self.encode = encode
- self.decode = decode
- self.reader = Reader(stream, errors)
- self.writer = Writer(stream, errors)
- self.errors = errors
-
- def read(self, size=-1):
-
- data = self.reader.read(size)
- data, bytesencoded = self.encode(data, self.errors)
- return data
-
- def readline(self, size=None):
-
- if size is None:
- data = self.reader.readline()
- else:
- data = self.reader.readline(size)
- data, bytesencoded = self.encode(data, self.errors)
- return data
-
- def readlines(self, sizehint=None):
-
- data = self.reader.read()
- data, bytesencoded = self.encode(data, self.errors)
- return data.splitlines(1)
-
- def next(self):
-
- """ Return the next decoded line from the input stream."""
- data = self.reader.next()
- data, bytesencoded = self.encode(data, self.errors)
- return data
-
- def __iter__(self):
- return self
-
- def write(self, data):
-
- data, bytesdecoded = self.decode(data, self.errors)
- return self.writer.write(data)
-
- def writelines(self, list):
-
- data = ''.join(list)
- data, bytesdecoded = self.decode(data, self.errors)
- return self.writer.write(data)
-
- def reset(self):
-
- self.reader.reset()
- self.writer.reset()
-
- def __getattr__(self, name,
- getattr=getattr):
-
- """ Inherit all other methods from the underlying stream.
- """
- return getattr(self.stream, name)
-
- def __enter__(self):
- return self
-
- def __exit__(self, type, value, tb):
- self.stream.close()
-
-### Shortcuts
-
-def open(filename, mode='rb', encoding=None, errors='strict', buffering=1):
-
- """ Open an encoded file using the given mode and return
- a wrapped version providing transparent encoding/decoding.
-
- Note: The wrapped version will only accept the object format
- defined by the codecs, i.e. Unicode objects for most builtin
- codecs. Output is also codec dependent and will usually be
- Unicode as well.
-
- Files are always opened in binary mode, even if no binary mode
- was specified. This is done to avoid data loss due to encodings
- using 8-bit values. The default file mode is 'rb' meaning to
- open the file in binary read mode.
-
- encoding specifies the encoding which is to be used for the
- file.
-
- errors may be given to define the error handling. It defaults
- to 'strict' which causes ValueErrors to be raised in case an
- encoding error occurs.
-
- buffering has the same meaning as for the builtin open() API.
- It defaults to line buffered.
-
- The returned wrapped file object provides an extra attribute
- .encoding which allows querying the used encoding. This
- attribute is only available if an encoding was specified as
- parameter.
-
- """
- if encoding is not None and \
- 'b' not in mode:
- # Force opening of the file in binary mode
- mode = mode + 'b'
- file = __builtin__.open(filename, mode, buffering)
- if encoding is None:
- return file
- info = lookup(encoding)
- srw = StreamReaderWriter(file, info.streamreader, info.streamwriter, errors)
- # Add attributes to simplify introspection
- srw.encoding = encoding
- return srw
-
-def EncodedFile(file, data_encoding, file_encoding=None, errors='strict'):
-
- """ Return a wrapped version of file which provides transparent
- encoding translation.
-
- Strings written to the wrapped file are interpreted according
- to the given data_encoding and then written to the original
- file as string using file_encoding. The intermediate encoding
- will usually be Unicode but depends on the specified codecs.
-
- Strings are read from the file using file_encoding and then
- passed back to the caller as string using data_encoding.
-
- If file_encoding is not given, it defaults to data_encoding.
-
- errors may be given to define the error handling. It defaults
- to 'strict' which causes ValueErrors to be raised in case an
- encoding error occurs.
-
- The returned wrapped file object provides two extra attributes
- .data_encoding and .file_encoding which reflect the given
- parameters of the same name. The attributes can be used for
- introspection by Python programs.
-
- """
- if file_encoding is None:
- file_encoding = data_encoding
- data_info = lookup(data_encoding)
- file_info = lookup(file_encoding)
- sr = StreamRecoder(file, data_info.encode, data_info.decode,
- file_info.streamreader, file_info.streamwriter, errors)
- # Add attributes to simplify introspection
- sr.data_encoding = data_encoding
- sr.file_encoding = file_encoding
- return sr
-
-### Helpers for codec lookup
-
-def getencoder(encoding):
-
- """ Lookup up the codec for the given encoding and return
- its encoder function.
-
- Raises a LookupError in case the encoding cannot be found.
-
- """
- return lookup(encoding).encode
-
-def getdecoder(encoding):
-
- """ Lookup up the codec for the given encoding and return
- its decoder function.
-
- Raises a LookupError in case the encoding cannot be found.
-
- """
- return lookup(encoding).decode
-
-def getincrementalencoder(encoding):
-
- """ Lookup up the codec for the given encoding and return
- its IncrementalEncoder class or factory function.
-
- Raises a LookupError in case the encoding cannot be found
- or the codecs doesn't provide an incremental encoder.
-
- """
- encoder = lookup(encoding).incrementalencoder
- if encoder is None:
- raise LookupError(encoding)
- return encoder
-
-def getincrementaldecoder(encoding):
-
- """ Lookup up the codec for the given encoding and return
- its IncrementalDecoder class or factory function.
-
- Raises a LookupError in case the encoding cannot be found
- or the codecs doesn't provide an incremental decoder.
-
- """
- decoder = lookup(encoding).incrementaldecoder
- if decoder is None:
- raise LookupError(encoding)
- return decoder
-
-def getreader(encoding):
-
- """ Lookup up the codec for the given encoding and return
- its StreamReader class or factory function.
-
- Raises a LookupError in case the encoding cannot be found.
-
- """
- return lookup(encoding).streamreader
-
-def getwriter(encoding):
-
- """ Lookup up the codec for the given encoding and return
- its StreamWriter class or factory function.
-
- Raises a LookupError in case the encoding cannot be found.
-
- """
- return lookup(encoding).streamwriter
-
-def iterencode(iterator, encoding, errors='strict', **kwargs):
- """
- Encoding iterator.
-
- Encodes the input strings from the iterator using a IncrementalEncoder.
-
- errors and kwargs are passed through to the IncrementalEncoder
- constructor.
- """
- encoder = getincrementalencoder(encoding)(errors, **kwargs)
- for input in iterator:
- output = encoder.encode(input)
- if output:
- yield output
- output = encoder.encode("", True)
- if output:
- yield output
-
-def iterdecode(iterator, encoding, errors='strict', **kwargs):
- """
- Decoding iterator.
-
- Decodes the input strings from the iterator using a IncrementalDecoder.
-
- errors and kwargs are passed through to the IncrementalDecoder
- constructor.
- """
- decoder = getincrementaldecoder(encoding)(errors, **kwargs)
- for input in iterator:
- output = decoder.decode(input)
- if output:
- yield output
- output = decoder.decode("", True)
- if output:
- yield output
-
-### Helpers for charmap-based codecs
-
-def make_identity_dict(rng):
-
- """ make_identity_dict(rng) -> dict
-
- Return a dictionary where elements of the rng sequence are
- mapped to themselves.
-
- """
- res = {}
- for i in rng:
- res[i]=i
- return res
-
-def make_encoding_map(decoding_map):
-
- """ Creates an encoding map from a decoding map.
-
- If a target mapping in the decoding map occurs multiple
- times, then that target is mapped to None (undefined mapping),
- causing an exception when encountered by the charmap codec
- during translation.
-
- One example where this happens is cp875.py which decodes
- multiple character to \u001a.
-
- """
- m = {}
- for k,v in decoding_map.items():
- if not v in m:
- m[v] = k
- else:
- m[v] = None
- return m
-
-### error handlers
-
-try:
- strict_errors = lookup_error("strict")
- ignore_errors = lookup_error("ignore")
- replace_errors = lookup_error("replace")
- xmlcharrefreplace_errors = lookup_error("xmlcharrefreplace")
- backslashreplace_errors = lookup_error("backslashreplace")
-except LookupError:
- # In --disable-unicode builds, these error handler are missing
- strict_errors = None
- ignore_errors = None
- replace_errors = None
- xmlcharrefreplace_errors = None
- backslashreplace_errors = None
-
-# Tell modulefinder that using codecs probably needs the encodings
-# package
-_false = 0
-if _false:
- import encodings
-
-### Tests
-
-if __name__ == '__main__':
-
- # Make stdout translate Latin-1 output into UTF-8 output
- sys.stdout = EncodedFile(sys.stdout, 'latin-1', 'utf-8')
-
- # Have stdin translate Latin-1 input into UTF-8 input
- sys.stdin = EncodedFile(sys.stdin, 'utf-8', 'latin-1')
diff --git a/sys/lib/python/codeop.py b/sys/lib/python/codeop.py
deleted file mode 100644
index 5616d92a8..000000000
--- a/sys/lib/python/codeop.py
+++ /dev/null
@@ -1,168 +0,0 @@
-r"""Utilities to compile possibly incomplete Python source code.
-
-This module provides two interfaces, broadly similar to the builtin
-function compile(), which take program text, a filename and a 'mode'
-and:
-
-- Return code object if the command is complete and valid
-- Return None if the command is incomplete
-- Raise SyntaxError, ValueError or OverflowError if the command is a
- syntax error (OverflowError and ValueError can be produced by
- malformed literals).
-
-Approach:
-
-First, check if the source consists entirely of blank lines and
-comments; if so, replace it with 'pass', because the built-in
-parser doesn't always do the right thing for these.
-
-Compile three times: as is, with \n, and with \n\n appended. If it
-compiles as is, it's complete. If it compiles with one \n appended,
-we expect more. If it doesn't compile either way, we compare the
-error we get when compiling with \n or \n\n appended. If the errors
-are the same, the code is broken. But if the errors are different, we
-expect more. Not intuitive; not even guaranteed to hold in future
-releases; but this matches the compiler's behavior from Python 1.4
-through 2.2, at least.
-
-Caveat:
-
-It is possible (but not likely) that the parser stops parsing with a
-successful outcome before reaching the end of the source; in this
-case, trailing symbols may be ignored instead of causing an error.
-For example, a backslash followed by two newlines may be followed by
-arbitrary garbage. This will be fixed once the API for the parser is
-better.
-
-The two interfaces are:
-
-compile_command(source, filename, symbol):
-
- Compiles a single command in the manner described above.
-
-CommandCompiler():
-
- Instances of this class have __call__ methods identical in
- signature to compile_command; the difference is that if the
- instance compiles program text containing a __future__ statement,
- the instance 'remembers' and compiles all subsequent program texts
- with the statement in force.
-
-The module also provides another class:
-
-Compile():
-
- Instances of this class act like the built-in function compile,
- but with 'memory' in the sense described above.
-"""
-
-import __future__
-
-_features = [getattr(__future__, fname)
- for fname in __future__.all_feature_names]
-
-__all__ = ["compile_command", "Compile", "CommandCompiler"]
-
-PyCF_DONT_IMPLY_DEDENT = 0x200 # Matches pythonrun.h
-
-def _maybe_compile(compiler, source, filename, symbol):
- # Check for source consisting of only blank lines and comments
- for line in source.split("\n"):
- line = line.strip()
- if line and line[0] != '#':
- break # Leave it alone
- else:
- if symbol != "eval":
- source = "pass" # Replace it with a 'pass' statement
-
- err = err1 = err2 = None
- code = code1 = code2 = None
-
- try:
- code = compiler(source, filename, symbol)
- except SyntaxError, err:
- pass
-
- try:
- code1 = compiler(source + "\n", filename, symbol)
- except SyntaxError, err1:
- pass
-
- try:
- code2 = compiler(source + "\n\n", filename, symbol)
- except SyntaxError, err2:
- pass
-
- if code:
- return code
- if not code1 and repr(err1) == repr(err2):
- raise SyntaxError, err1
-
-def _compile(source, filename, symbol):
- return compile(source, filename, symbol, PyCF_DONT_IMPLY_DEDENT)
-
-def compile_command(source, filename="<input>", symbol="single"):
- r"""Compile a command and determine whether it is incomplete.
-
- Arguments:
-
- source -- the source string; may contain \n characters
- filename -- optional filename from which source was read; default
- "<input>"
- symbol -- optional grammar start symbol; "single" (default) or "eval"
-
- Return value / exceptions raised:
-
- - Return a code object if the command is complete and valid
- - Return None if the command is incomplete
- - Raise SyntaxError, ValueError or OverflowError if the command is a
- syntax error (OverflowError and ValueError can be produced by
- malformed literals).
- """
- return _maybe_compile(_compile, source, filename, symbol)
-
-class Compile:
- """Instances of this class behave much like the built-in compile
- function, but if one is used to compile text containing a future
- statement, it "remembers" and compiles all subsequent program texts
- with the statement in force."""
- def __init__(self):
- self.flags = PyCF_DONT_IMPLY_DEDENT
-
- def __call__(self, source, filename, symbol):
- codeob = compile(source, filename, symbol, self.flags, 1)
- for feature in _features:
- if codeob.co_flags & feature.compiler_flag:
- self.flags |= feature.compiler_flag
- return codeob
-
-class CommandCompiler:
- """Instances of this class have __call__ methods identical in
- signature to compile_command; the difference is that if the
- instance compiles program text containing a __future__ statement,
- the instance 'remembers' and compiles all subsequent program texts
- with the statement in force."""
-
- def __init__(self,):
- self.compiler = Compile()
-
- def __call__(self, source, filename="<input>", symbol="single"):
- r"""Compile a command and determine whether it is incomplete.
-
- Arguments:
-
- source -- the source string; may contain \n characters
- filename -- optional filename from which source was read;
- default "<input>"
- symbol -- optional grammar start symbol; "single" (default) or
- "eval"
-
- Return value / exceptions raised:
-
- - Return a code object if the command is complete and valid
- - Return None if the command is incomplete
- - Raise SyntaxError, ValueError or OverflowError if the command is a
- syntax error (OverflowError and ValueError can be produced by
- malformed literals).
- """
- return _maybe_compile(self.compiler, source, filename, symbol)
diff --git a/sys/lib/python/colorsys.py b/sys/lib/python/colorsys.py
deleted file mode 100644
index 39b4b165c..000000000
--- a/sys/lib/python/colorsys.py
+++ /dev/null
@@ -1,126 +0,0 @@
-"""Conversion functions between RGB and other color systems.
-
-This modules provides two functions for each color system ABC:
-
- rgb_to_abc(r, g, b) --> a, b, c
- abc_to_rgb(a, b, c) --> r, g, b
-
-All inputs and outputs are triples of floats in the range [0.0...1.0]
-(with the exception of I and Q, which covers a slightly larger range).
-Inputs outside the valid range may cause exceptions or invalid outputs.
-
-Supported color systems:
-RGB: Red, Green, Blue components
-YIQ: Luminance, Chrominance (used by composite video signals)
-HLS: Hue, Luminance, Saturation
-HSV: Hue, Saturation, Value
-"""
-
-# References:
-# http://en.wikipedia.org/wiki/YIQ
-# http://en.wikipedia.org/wiki/HLS_color_space
-# http://en.wikipedia.org/wiki/HSV_color_space
-
-__all__ = ["rgb_to_yiq","yiq_to_rgb","rgb_to_hls","hls_to_rgb",
- "rgb_to_hsv","hsv_to_rgb"]
-
-# Some floating point constants
-
-ONE_THIRD = 1.0/3.0
-ONE_SIXTH = 1.0/6.0
-TWO_THIRD = 2.0/3.0
-
-# YIQ: used by composite video signals (linear combinations of RGB)
-# Y: perceived grey level (0.0 == black, 1.0 == white)
-# I, Q: color components
-
-def rgb_to_yiq(r, g, b):
- y = 0.30*r + 0.59*g + 0.11*b
- i = 0.60*r - 0.28*g - 0.32*b
- q = 0.21*r - 0.52*g + 0.31*b
- return (y, i, q)
-
-def yiq_to_rgb(y, i, q):
- r = y + 0.948262*i + 0.624013*q
- g = y - 0.276066*i - 0.639810*q
- b = y - 1.105450*i + 1.729860*q
- if r < 0.0: r = 0.0
- if g < 0.0: g = 0.0
- if b < 0.0: b = 0.0
- if r > 1.0: r = 1.0
- if g > 1.0: g = 1.0
- if b > 1.0: b = 1.0
- return (r, g, b)
-
-
-# HLS: Hue, Luminance, Saturation
-# H: position in the spectrum
-# L: color lightness
-# S: color saturation
-
-def rgb_to_hls(r, g, b):
- maxc = max(r, g, b)
- minc = min(r, g, b)
- # XXX Can optimize (maxc+minc) and (maxc-minc)
- l = (minc+maxc)/2.0
- if minc == maxc: return 0.0, l, 0.0
- if l <= 0.5: s = (maxc-minc) / (maxc+minc)
- else: s = (maxc-minc) / (2.0-maxc-minc)
- rc = (maxc-r) / (maxc-minc)
- gc = (maxc-g) / (maxc-minc)
- bc = (maxc-b) / (maxc-minc)
- if r == maxc: h = bc-gc
- elif g == maxc: h = 2.0+rc-bc
- else: h = 4.0+gc-rc
- h = (h/6.0) % 1.0
- return h, l, s
-
-def hls_to_rgb(h, l, s):
- if s == 0.0: return l, l, l
- if l <= 0.5: m2 = l * (1.0+s)
- else: m2 = l+s-(l*s)
- m1 = 2.0*l - m2
- return (_v(m1, m2, h+ONE_THIRD), _v(m1, m2, h), _v(m1, m2, h-ONE_THIRD))
-
-def _v(m1, m2, hue):
- hue = hue % 1.0
- if hue < ONE_SIXTH: return m1 + (m2-m1)*hue*6.0
- if hue < 0.5: return m2
- if hue < TWO_THIRD: return m1 + (m2-m1)*(TWO_THIRD-hue)*6.0
- return m1
-
-
-# HSV: Hue, Saturation, Value
-# H: position in the spectrum
-# S: color saturation ("purity")
-# V: color brightness
-
-def rgb_to_hsv(r, g, b):
- maxc = max(r, g, b)
- minc = min(r, g, b)
- v = maxc
- if minc == maxc: return 0.0, 0.0, v
- s = (maxc-minc) / maxc
- rc = (maxc-r) / (maxc-minc)
- gc = (maxc-g) / (maxc-minc)
- bc = (maxc-b) / (maxc-minc)
- if r == maxc: h = bc-gc
- elif g == maxc: h = 2.0+rc-bc
- else: h = 4.0+gc-rc
- h = (h/6.0) % 1.0
- return h, s, v
-
-def hsv_to_rgb(h, s, v):
- if s == 0.0: return v, v, v
- i = int(h*6.0) # XXX assume int() truncates!
- f = (h*6.0) - i
- p = v*(1.0 - s)
- q = v*(1.0 - s*f)
- t = v*(1.0 - s*(1.0-f))
- if i%6 == 0: return v, t, p
- if i == 1: return q, v, p
- if i == 2: return p, v, t
- if i == 3: return p, q, v
- if i == 4: return t, p, v
- if i == 5: return v, p, q
- # Cannot get here
diff --git a/sys/lib/python/commands.py b/sys/lib/python/commands.py
deleted file mode 100644
index cfbb541cf..000000000
--- a/sys/lib/python/commands.py
+++ /dev/null
@@ -1,84 +0,0 @@
-"""Execute shell commands via os.popen() and return status, output.
-
-Interface summary:
-
- import commands
-
- outtext = commands.getoutput(cmd)
- (exitstatus, outtext) = commands.getstatusoutput(cmd)
- outtext = commands.getstatus(file) # returns output of "ls -ld file"
-
-A trailing newline is removed from the output string.
-
-Encapsulates the basic operation:
-
- pipe = os.popen('{ ' + cmd + '; } 2>&1', 'r')
- text = pipe.read()
- sts = pipe.close()
-
- [Note: it would be nice to add functions to interpret the exit status.]
-"""
-
-__all__ = ["getstatusoutput","getoutput","getstatus"]
-
-# Module 'commands'
-#
-# Various tools for executing commands and looking at their output and status.
-#
-# NB This only works (and is only relevant) for UNIX.
-
-
-# Get 'ls -l' status for an object into a string
-#
-def getstatus(file):
- """Return output of "ls -ld <file>" in a string."""
- return getoutput('ls -ld' + mkarg(file))
-
-
-# Get the output from a shell command into a string.
-# The exit status is ignored; a trailing newline is stripped.
-# Assume the command will work with '{ ... ; } 2>&1' around it..
-#
-def getoutput(cmd):
- """Return output (stdout or stderr) of executing cmd in a shell."""
- return getstatusoutput(cmd)[1]
-
-
-# Ditto but preserving the exit status.
-# Returns a pair (sts, output)
-#
-def getstatusoutput(cmd):
- """Return (status, output) of executing cmd in a shell."""
- import os
- pipe = os.popen('{ ' + cmd + '; } 2>&1', 'r')
- text = pipe.read()
- sts = pipe.close()
- if sts is None: sts = 0
- if text[-1:] == '\n': text = text[:-1]
- return sts, text
-
-
-# Make command argument from directory and pathname (prefix space, add quotes).
-#
-def mk2arg(head, x):
- import os
- return mkarg(os.path.join(head, x))
-
-
-# Make a shell command argument from a string.
-# Return a string beginning with a space followed by a shell-quoted
-# version of the argument.
-# Two strategies: enclose in single quotes if it contains none;
-# otherwise, enclose in double quotes and prefix quotable characters
-# with backslash.
-#
-def mkarg(x):
- if '\'' not in x:
- return ' \'' + x + '\''
- s = ' "'
- for c in x:
- if c in '\\$"`':
- s = s + '\\'
- s = s + c
- s = s + '"'
- return s
diff --git a/sys/lib/python/compileall.py b/sys/lib/python/compileall.py
deleted file mode 100644
index b21d95f82..000000000
--- a/sys/lib/python/compileall.py
+++ /dev/null
@@ -1,157 +0,0 @@
-"""Module/script to "compile" all .py files to .pyc (or .pyo) file.
-
-When called as a script with arguments, this compiles the directories
-given as arguments recursively; the -l option prevents it from
-recursing into directories.
-
-Without arguments, if compiles all modules on sys.path, without
-recursing into subdirectories. (Even though it should do so for
-packages -- for now, you'll have to deal with packages separately.)
-
-See module py_compile for details of the actual byte-compilation.
-
-"""
-
-import os
-import sys
-import py_compile
-
-__all__ = ["compile_dir","compile_path"]
-
-def compile_dir(dir, maxlevels=10, ddir=None,
- force=0, rx=None, quiet=0):
- """Byte-compile all modules in the given directory tree.
-
- Arguments (only dir is required):
-
- dir: the directory to byte-compile
- maxlevels: maximum recursion level (default 10)
- ddir: if given, purported directory name (this is the
- directory name that will show up in error messages)
- force: if 1, force compilation, even if timestamps are up-to-date
- quiet: if 1, be quiet during compilation
-
- """
- if not quiet:
- print 'Listing', dir, '...'
- try:
- names = os.listdir(dir)
- except os.error:
- print "Can't list", dir
- names = []
- names.sort()
- success = 1
- for name in names:
- fullname = os.path.join(dir, name)
- if ddir is not None:
- dfile = os.path.join(ddir, name)
- else:
- dfile = None
- if rx is not None:
- mo = rx.search(fullname)
- if mo:
- continue
- if os.path.isfile(fullname):
- head, tail = name[:-3], name[-3:]
- if tail == '.py':
- cfile = fullname + (__debug__ and 'c' or 'o')
- ftime = os.stat(fullname).st_mtime
- try: ctime = os.stat(cfile).st_mtime
- except os.error: ctime = 0
- if (ctime > ftime) and not force: continue
- if not quiet:
- print 'Compiling', fullname, '...'
- try:
- ok = py_compile.compile(fullname, None, dfile, True)
- except KeyboardInterrupt:
- raise KeyboardInterrupt
- except py_compile.PyCompileError,err:
- if quiet:
- print 'Compiling', fullname, '...'
- print err.msg
- success = 0
- except IOError, e:
- print "Sorry", e
- success = 0
- else:
- if ok == 0:
- success = 0
- elif maxlevels > 0 and \
- name != os.curdir and name != os.pardir and \
- os.path.isdir(fullname) and \
- not os.path.islink(fullname):
- if not compile_dir(fullname, maxlevels - 1, dfile, force, rx, quiet):
- success = 0
- return success
-
-def compile_path(skip_curdir=1, maxlevels=0, force=0, quiet=0):
- """Byte-compile all module on sys.path.
-
- Arguments (all optional):
-
- skip_curdir: if true, skip current directory (default true)
- maxlevels: max recursion level (default 0)
- force: as for compile_dir() (default 0)
- quiet: as for compile_dir() (default 0)
-
- """
- success = 1
- for dir in sys.path:
- if (not dir or dir == os.curdir) and skip_curdir:
- print 'Skipping current directory'
- else:
- success = success and compile_dir(dir, maxlevels, None,
- force, quiet=quiet)
- return success
-
-def main():
- """Script main program."""
- import getopt
- try:
- opts, args = getopt.getopt(sys.argv[1:], 'lfqd:x:')
- except getopt.error, msg:
- print msg
- print "usage: python compileall.py [-l] [-f] [-q] [-d destdir] " \
- "[-x regexp] [directory ...]"
- print "-l: don't recurse down"
- print "-f: force rebuild even if timestamps are up-to-date"
- print "-q: quiet operation"
- print "-d destdir: purported directory name for error messages"
- print " if no directory arguments, -l sys.path is assumed"
- print "-x regexp: skip files matching the regular expression regexp"
- print " the regexp is search for in the full path of the file"
- sys.exit(2)
- maxlevels = 10
- ddir = None
- force = 0
- quiet = 0
- rx = None
- for o, a in opts:
- if o == '-l': maxlevels = 0
- if o == '-d': ddir = a
- if o == '-f': force = 1
- if o == '-q': quiet = 1
- if o == '-x':
- import re
- rx = re.compile(a)
- if ddir:
- if len(args) != 1:
- print "-d destdir require exactly one directory argument"
- sys.exit(2)
- success = 1
- try:
- if args:
- for dir in args:
- if not compile_dir(dir, maxlevels, ddir,
- force, rx, quiet):
- success = 0
- else:
- success = compile_path()
- except KeyboardInterrupt:
- print "\n[interrupt]"
- success = 0
- return success
-
-if __name__ == '__main__':
- exit_status = int(not main())
- sys.exit(exit_status)
diff --git a/sys/lib/python/compiler/__init__.py b/sys/lib/python/compiler/__init__.py
deleted file mode 100644
index ce89144b7..000000000
--- a/sys/lib/python/compiler/__init__.py
+++ /dev/null
@@ -1,26 +0,0 @@
-"""Package for parsing and compiling Python source code
-
-There are several functions defined at the top level that are imported
-from modules contained in the package.
-
-parse(buf, mode="exec") -> AST
- Converts a string containing Python source code to an abstract
- syntax tree (AST). The AST is defined in compiler.ast.
-
-parseFile(path) -> AST
- The same as parse(open(path))
-
-walk(ast, visitor, verbose=None)
- Does a pre-order walk over the ast using the visitor instance.
- See compiler.visitor for details.
-
-compile(source, filename, mode, flags=None, dont_inherit=None)
- Returns a code object. A replacement for the builtin compile() function.
-
-compileFile(filename)
- Generates a .pyc file by compiling filename.
-"""
-
-from compiler.transformer import parse, parseFile
-from compiler.visitor import walk
-from compiler.pycodegen import compile, compileFile
diff --git a/sys/lib/python/compiler/ast.py b/sys/lib/python/compiler/ast.py
deleted file mode 100644
index 93437d6ce..000000000
--- a/sys/lib/python/compiler/ast.py
+++ /dev/null
@@ -1,1356 +0,0 @@
-"""Python abstract syntax node definitions
-
-This file is automatically generated by Tools/compiler/astgen.py
-"""
-from compiler.consts import CO_VARARGS, CO_VARKEYWORDS
-
-def flatten(seq):
- l = []
- for elt in seq:
- t = type(elt)
- if t is tuple or t is list:
- for elt2 in flatten(elt):
- l.append(elt2)
- else:
- l.append(elt)
- return l
-
-def flatten_nodes(seq):
- return [n for n in flatten(seq) if isinstance(n, Node)]
-
-nodes = {}
-
-class Node:
- """Abstract base class for ast nodes."""
- def getChildren(self):
- pass # implemented by subclasses
- def __iter__(self):
- for n in self.getChildren():
- yield n
- def asList(self): # for backwards compatibility
- return self.getChildren()
- def getChildNodes(self):
- pass # implemented by subclasses
-
-class EmptyNode(Node):
- pass
-
-class Expression(Node):
- # Expression is an artificial node class to support "eval"
- nodes["expression"] = "Expression"
- def __init__(self, node):
- self.node = node
-
- def getChildren(self):
- return self.node,
-
- def getChildNodes(self):
- return self.node,
-
- def __repr__(self):
- return "Expression(%s)" % (repr(self.node))
-
-class Add(Node):
- def __init__(self, (left, right), lineno=None):
- self.left = left
- self.right = right
- self.lineno = lineno
-
- def getChildren(self):
- return self.left, self.right
-
- def getChildNodes(self):
- return self.left, self.right
-
- def __repr__(self):
- return "Add((%s, %s))" % (repr(self.left), repr(self.right))
-
-class And(Node):
- def __init__(self, nodes, lineno=None):
- self.nodes = nodes
- self.lineno = lineno
-
- def getChildren(self):
- return tuple(flatten(self.nodes))
-
- def getChildNodes(self):
- nodelist = []
- nodelist.extend(flatten_nodes(self.nodes))
- return tuple(nodelist)
-
- def __repr__(self):
- return "And(%s)" % (repr(self.nodes),)
-
-class AssAttr(Node):
- def __init__(self, expr, attrname, flags, lineno=None):
- self.expr = expr
- self.attrname = attrname
- self.flags = flags
- self.lineno = lineno
-
- def getChildren(self):
- return self.expr, self.attrname, self.flags
-
- def getChildNodes(self):
- return self.expr,
-
- def __repr__(self):
- return "AssAttr(%s, %s, %s)" % (repr(self.expr), repr(self.attrname), repr(self.flags))
-
-class AssList(Node):
- def __init__(self, nodes, lineno=None):
- self.nodes = nodes
- self.lineno = lineno
-
- def getChildren(self):
- return tuple(flatten(self.nodes))
-
- def getChildNodes(self):
- nodelist = []
- nodelist.extend(flatten_nodes(self.nodes))
- return tuple(nodelist)
-
- def __repr__(self):
- return "AssList(%s)" % (repr(self.nodes),)
-
-class AssName(Node):
- def __init__(self, name, flags, lineno=None):
- self.name = name
- self.flags = flags
- self.lineno = lineno
-
- def getChildren(self):
- return self.name, self.flags
-
- def getChildNodes(self):
- return ()
-
- def __repr__(self):
- return "AssName(%s, %s)" % (repr(self.name), repr(self.flags))
-
-class AssTuple(Node):
- def __init__(self, nodes, lineno=None):
- self.nodes = nodes
- self.lineno = lineno
-
- def getChildren(self):
- return tuple(flatten(self.nodes))
-
- def getChildNodes(self):
- nodelist = []
- nodelist.extend(flatten_nodes(self.nodes))
- return tuple(nodelist)
-
- def __repr__(self):
- return "AssTuple(%s)" % (repr(self.nodes),)
-
-class Assert(Node):
- def __init__(self, test, fail, lineno=None):
- self.test = test
- self.fail = fail
- self.lineno = lineno
-
- def getChildren(self):
- children = []
- children.append(self.test)
- children.append(self.fail)
- return tuple(children)
-
- def getChildNodes(self):
- nodelist = []
- nodelist.append(self.test)
- if self.fail is not None:
- nodelist.append(self.fail)
- return tuple(nodelist)
-
- def __repr__(self):
- return "Assert(%s, %s)" % (repr(self.test), repr(self.fail))
-
-class Assign(Node):
- def __init__(self, nodes, expr, lineno=None):
- self.nodes = nodes
- self.expr = expr
- self.lineno = lineno
-
- def getChildren(self):
- children = []
- children.extend(flatten(self.nodes))
- children.append(self.expr)
- return tuple(children)
-
- def getChildNodes(self):
- nodelist = []
- nodelist.extend(flatten_nodes(self.nodes))
- nodelist.append(self.expr)
- return tuple(nodelist)
-
- def __repr__(self):
- return "Assign(%s, %s)" % (repr(self.nodes), repr(self.expr))
-
-class AugAssign(Node):
- def __init__(self, node, op, expr, lineno=None):
- self.node = node
- self.op = op
- self.expr = expr
- self.lineno = lineno
-
- def getChildren(self):
- return self.node, self.op, self.expr
-
- def getChildNodes(self):
- return self.node, self.expr
-
- def __repr__(self):
- return "AugAssign(%s, %s, %s)" % (repr(self.node), repr(self.op), repr(self.expr))
-
-class Backquote(Node):
- def __init__(self, expr, lineno=None):
- self.expr = expr
- self.lineno = lineno
-
- def getChildren(self):
- return self.expr,
-
- def getChildNodes(self):
- return self.expr,
-
- def __repr__(self):
- return "Backquote(%s)" % (repr(self.expr),)
-
-class Bitand(Node):
- def __init__(self, nodes, lineno=None):
- self.nodes = nodes
- self.lineno = lineno
-
- def getChildren(self):
- return tuple(flatten(self.nodes))
-
- def getChildNodes(self):
- nodelist = []
- nodelist.extend(flatten_nodes(self.nodes))
- return tuple(nodelist)
-
- def __repr__(self):
- return "Bitand(%s)" % (repr(self.nodes),)
-
-class Bitor(Node):
- def __init__(self, nodes, lineno=None):
- self.nodes = nodes
- self.lineno = lineno
-
- def getChildren(self):
- return tuple(flatten(self.nodes))
-
- def getChildNodes(self):
- nodelist = []
- nodelist.extend(flatten_nodes(self.nodes))
- return tuple(nodelist)
-
- def __repr__(self):
- return "Bitor(%s)" % (repr(self.nodes),)
-
-class Bitxor(Node):
- def __init__(self, nodes, lineno=None):
- self.nodes = nodes
- self.lineno = lineno
-
- def getChildren(self):
- return tuple(flatten(self.nodes))
-
- def getChildNodes(self):
- nodelist = []
- nodelist.extend(flatten_nodes(self.nodes))
- return tuple(nodelist)
-
- def __repr__(self):
- return "Bitxor(%s)" % (repr(self.nodes),)
-
-class Break(Node):
- def __init__(self, lineno=None):
- self.lineno = lineno
-
- def getChildren(self):
- return ()
-
- def getChildNodes(self):
- return ()
-
- def __repr__(self):
- return "Break()"
-
-class CallFunc(Node):
- def __init__(self, node, args, star_args = None, dstar_args = None, lineno=None):
- self.node = node
- self.args = args
- self.star_args = star_args
- self.dstar_args = dstar_args
- self.lineno = lineno
-
- def getChildren(self):
- children = []
- children.append(self.node)
- children.extend(flatten(self.args))
- children.append(self.star_args)
- children.append(self.dstar_args)
- return tuple(children)
-
- def getChildNodes(self):
- nodelist = []
- nodelist.append(self.node)
- nodelist.extend(flatten_nodes(self.args))
- if self.star_args is not None:
- nodelist.append(self.star_args)
- if self.dstar_args is not None:
- nodelist.append(self.dstar_args)
- return tuple(nodelist)
-
- def __repr__(self):
- return "CallFunc(%s, %s, %s, %s)" % (repr(self.node), repr(self.args), repr(self.star_args), repr(self.dstar_args))
-
-class Class(Node):
- def __init__(self, name, bases, doc, code, lineno=None):
- self.name = name
- self.bases = bases
- self.doc = doc
- self.code = code
- self.lineno = lineno
-
- def getChildren(self):
- children = []
- children.append(self.name)
- children.extend(flatten(self.bases))
- children.append(self.doc)
- children.append(self.code)
- return tuple(children)
-
- def getChildNodes(self):
- nodelist = []
- nodelist.extend(flatten_nodes(self.bases))
- nodelist.append(self.code)
- return tuple(nodelist)
-
- def __repr__(self):
- return "Class(%s, %s, %s, %s)" % (repr(self.name), repr(self.bases), repr(self.doc), repr(self.code))
-
-class Compare(Node):
- def __init__(self, expr, ops, lineno=None):
- self.expr = expr
- self.ops = ops
- self.lineno = lineno
-
- def getChildren(self):
- children = []
- children.append(self.expr)
- children.extend(flatten(self.ops))
- return tuple(children)
-
- def getChildNodes(self):
- nodelist = []
- nodelist.append(self.expr)
- nodelist.extend(flatten_nodes(self.ops))
- return tuple(nodelist)
-
- def __repr__(self):
- return "Compare(%s, %s)" % (repr(self.expr), repr(self.ops))
-
-class Const(Node):
- def __init__(self, value, lineno=None):
- self.value = value
- self.lineno = lineno
-
- def getChildren(self):
- return self.value,
-
- def getChildNodes(self):
- return ()
-
- def __repr__(self):
- return "Const(%s)" % (repr(self.value),)
-
-class Continue(Node):
- def __init__(self, lineno=None):
- self.lineno = lineno
-
- def getChildren(self):
- return ()
-
- def getChildNodes(self):
- return ()
-
- def __repr__(self):
- return "Continue()"
-
-class Decorators(Node):
- def __init__(self, nodes, lineno=None):
- self.nodes = nodes
- self.lineno = lineno
-
- def getChildren(self):
- return tuple(flatten(self.nodes))
-
- def getChildNodes(self):
- nodelist = []
- nodelist.extend(flatten_nodes(self.nodes))
- return tuple(nodelist)
-
- def __repr__(self):
- return "Decorators(%s)" % (repr(self.nodes),)
-
-class Dict(Node):
- def __init__(self, items, lineno=None):
- self.items = items
- self.lineno = lineno
-
- def getChildren(self):
- return tuple(flatten(self.items))
-
- def getChildNodes(self):
- nodelist = []
- nodelist.extend(flatten_nodes(self.items))
- return tuple(nodelist)
-
- def __repr__(self):
- return "Dict(%s)" % (repr(self.items),)
-
-class Discard(Node):
- def __init__(self, expr, lineno=None):
- self.expr = expr
- self.lineno = lineno
-
- def getChildren(self):
- return self.expr,
-
- def getChildNodes(self):
- return self.expr,
-
- def __repr__(self):
- return "Discard(%s)" % (repr(self.expr),)
-
-class Div(Node):
- def __init__(self, (left, right), lineno=None):
- self.left = left
- self.right = right
- self.lineno = lineno
-
- def getChildren(self):
- return self.left, self.right
-
- def getChildNodes(self):
- return self.left, self.right
-
- def __repr__(self):
- return "Div((%s, %s))" % (repr(self.left), repr(self.right))
-
-class Ellipsis(Node):
- def __init__(self, lineno=None):
- self.lineno = lineno
-
- def getChildren(self):
- return ()
-
- def getChildNodes(self):
- return ()
-
- def __repr__(self):
- return "Ellipsis()"
-
-class Exec(Node):
- def __init__(self, expr, locals, globals, lineno=None):
- self.expr = expr
- self.locals = locals
- self.globals = globals
- self.lineno = lineno
-
- def getChildren(self):
- children = []
- children.append(self.expr)
- children.append(self.locals)
- children.append(self.globals)
- return tuple(children)
-
- def getChildNodes(self):
- nodelist = []
- nodelist.append(self.expr)
- if self.locals is not None:
- nodelist.append(self.locals)
- if self.globals is not None:
- nodelist.append(self.globals)
- return tuple(nodelist)
-
- def __repr__(self):
- return "Exec(%s, %s, %s)" % (repr(self.expr), repr(self.locals), repr(self.globals))
-
-class FloorDiv(Node):
- def __init__(self, (left, right), lineno=None):
- self.left = left
- self.right = right
- self.lineno = lineno
-
- def getChildren(self):
- return self.left, self.right
-
- def getChildNodes(self):
- return self.left, self.right
-
- def __repr__(self):
- return "FloorDiv((%s, %s))" % (repr(self.left), repr(self.right))
-
-class For(Node):
- def __init__(self, assign, list, body, else_, lineno=None):
- self.assign = assign
- self.list = list
- self.body = body
- self.else_ = else_
- self.lineno = lineno
-
- def getChildren(self):
- children = []
- children.append(self.assign)
- children.append(self.list)
- children.append(self.body)
- children.append(self.else_)
- return tuple(children)
-
- def getChildNodes(self):
- nodelist = []
- nodelist.append(self.assign)
- nodelist.append(self.list)
- nodelist.append(self.body)
- if self.else_ is not None:
- nodelist.append(self.else_)
- return tuple(nodelist)
-
- def __repr__(self):
- return "For(%s, %s, %s, %s)" % (repr(self.assign), repr(self.list), repr(self.body), repr(self.else_))
-
-class From(Node):
- def __init__(self, modname, names, level, lineno=None):
- self.modname = modname
- self.names = names
- self.level = level
- self.lineno = lineno
-
- def getChildren(self):
- return self.modname, self.names, self.level
-
- def getChildNodes(self):
- return ()
-
- def __repr__(self):
- return "From(%s, %s, %s)" % (repr(self.modname), repr(self.names), repr(self.level))
-
-class Function(Node):
- def __init__(self, decorators, name, argnames, defaults, flags, doc, code, lineno=None):
- self.decorators = decorators
- self.name = name
- self.argnames = argnames
- self.defaults = defaults
- self.flags = flags
- self.doc = doc
- self.code = code
- self.lineno = lineno
- self.varargs = self.kwargs = None
- if flags & CO_VARARGS:
- self.varargs = 1
- if flags & CO_VARKEYWORDS:
- self.kwargs = 1
-
-
-
- def getChildren(self):
- children = []
- children.append(self.decorators)
- children.append(self.name)
- children.append(self.argnames)
- children.extend(flatten(self.defaults))
- children.append(self.flags)
- children.append(self.doc)
- children.append(self.code)
- return tuple(children)
-
- def getChildNodes(self):
- nodelist = []
- if self.decorators is not None:
- nodelist.append(self.decorators)
- nodelist.extend(flatten_nodes(self.defaults))
- nodelist.append(self.code)
- return tuple(nodelist)
-
- def __repr__(self):
- return "Function(%s, %s, %s, %s, %s, %s, %s)" % (repr(self.decorators), repr(self.name), repr(self.argnames), repr(self.defaults), repr(self.flags), repr(self.doc), repr(self.code))
-
-class GenExpr(Node):
- def __init__(self, code, lineno=None):
- self.code = code
- self.lineno = lineno
- self.argnames = ['.0']
- self.varargs = self.kwargs = None
-
- def getChildren(self):
- return self.code,
-
- def getChildNodes(self):
- return self.code,
-
- def __repr__(self):
- return "GenExpr(%s)" % (repr(self.code),)
-
-class GenExprFor(Node):
- def __init__(self, assign, iter, ifs, lineno=None):
- self.assign = assign
- self.iter = iter
- self.ifs = ifs
- self.lineno = lineno
- self.is_outmost = False
-
-
- def getChildren(self):
- children = []
- children.append(self.assign)
- children.append(self.iter)
- children.extend(flatten(self.ifs))
- return tuple(children)
-
- def getChildNodes(self):
- nodelist = []
- nodelist.append(self.assign)
- nodelist.append(self.iter)
- nodelist.extend(flatten_nodes(self.ifs))
- return tuple(nodelist)
-
- def __repr__(self):
- return "GenExprFor(%s, %s, %s)" % (repr(self.assign), repr(self.iter), repr(self.ifs))
-
-class GenExprIf(Node):
- def __init__(self, test, lineno=None):
- self.test = test
- self.lineno = lineno
-
- def getChildren(self):
- return self.test,
-
- def getChildNodes(self):
- return self.test,
-
- def __repr__(self):
- return "GenExprIf(%s)" % (repr(self.test),)
-
-class GenExprInner(Node):
- def __init__(self, expr, quals, lineno=None):
- self.expr = expr
- self.quals = quals
- self.lineno = lineno
-
- def getChildren(self):
- children = []
- children.append(self.expr)
- children.extend(flatten(self.quals))
- return tuple(children)
-
- def getChildNodes(self):
- nodelist = []
- nodelist.append(self.expr)
- nodelist.extend(flatten_nodes(self.quals))
- return tuple(nodelist)
-
- def __repr__(self):
- return "GenExprInner(%s, %s)" % (repr(self.expr), repr(self.quals))
-
-class Getattr(Node):
- def __init__(self, expr, attrname, lineno=None):
- self.expr = expr
- self.attrname = attrname
- self.lineno = lineno
-
- def getChildren(self):
- return self.expr, self.attrname
-
- def getChildNodes(self):
- return self.expr,
-
- def __repr__(self):
- return "Getattr(%s, %s)" % (repr(self.expr), repr(self.attrname))
-
-class Global(Node):
- def __init__(self, names, lineno=None):
- self.names = names
- self.lineno = lineno
-
- def getChildren(self):
- return self.names,
-
- def getChildNodes(self):
- return ()
-
- def __repr__(self):
- return "Global(%s)" % (repr(self.names),)
-
-class If(Node):
- def __init__(self, tests, else_, lineno=None):
- self.tests = tests
- self.else_ = else_
- self.lineno = lineno
-
- def getChildren(self):
- children = []
- children.extend(flatten(self.tests))
- children.append(self.else_)
- return tuple(children)
-
- def getChildNodes(self):
- nodelist = []
- nodelist.extend(flatten_nodes(self.tests))
- if self.else_ is not None:
- nodelist.append(self.else_)
- return tuple(nodelist)
-
- def __repr__(self):
- return "If(%s, %s)" % (repr(self.tests), repr(self.else_))
-
-class IfExp(Node):
- def __init__(self, test, then, else_, lineno=None):
- self.test = test
- self.then = then
- self.else_ = else_
- self.lineno = lineno
-
- def getChildren(self):
- return self.test, self.then, self.else_
-
- def getChildNodes(self):
- return self.test, self.then, self.else_
-
- def __repr__(self):
- return "IfExp(%s, %s, %s)" % (repr(self.test), repr(self.then), repr(self.else_))
-
-class Import(Node):
- def __init__(self, names, lineno=None):
- self.names = names
- self.lineno = lineno
-
- def getChildren(self):
- return self.names,
-
- def getChildNodes(self):
- return ()
-
- def __repr__(self):
- return "Import(%s)" % (repr(self.names),)
-
-class Invert(Node):
- def __init__(self, expr, lineno=None):
- self.expr = expr
- self.lineno = lineno
-
- def getChildren(self):
- return self.expr,
-
- def getChildNodes(self):
- return self.expr,
-
- def __repr__(self):
- return "Invert(%s)" % (repr(self.expr),)
-
-class Keyword(Node):
- def __init__(self, name, expr, lineno=None):
- self.name = name
- self.expr = expr
- self.lineno = lineno
-
- def getChildren(self):
- return self.name, self.expr
-
- def getChildNodes(self):
- return self.expr,
-
- def __repr__(self):
- return "Keyword(%s, %s)" % (repr(self.name), repr(self.expr))
-
-class Lambda(Node):
- def __init__(self, argnames, defaults, flags, code, lineno=None):
- self.argnames = argnames
- self.defaults = defaults
- self.flags = flags
- self.code = code
- self.lineno = lineno
- self.varargs = self.kwargs = None
- if flags & CO_VARARGS:
- self.varargs = 1
- if flags & CO_VARKEYWORDS:
- self.kwargs = 1
-
-
-
- def getChildren(self):
- children = []
- children.append(self.argnames)
- children.extend(flatten(self.defaults))
- children.append(self.flags)
- children.append(self.code)
- return tuple(children)
-
- def getChildNodes(self):
- nodelist = []
- nodelist.extend(flatten_nodes(self.defaults))
- nodelist.append(self.code)
- return tuple(nodelist)
-
- def __repr__(self):
- return "Lambda(%s, %s, %s, %s)" % (repr(self.argnames), repr(self.defaults), repr(self.flags), repr(self.code))
-
-class LeftShift(Node):
- def __init__(self, (left, right), lineno=None):
- self.left = left
- self.right = right
- self.lineno = lineno
-
- def getChildren(self):
- return self.left, self.right
-
- def getChildNodes(self):
- return self.left, self.right
-
- def __repr__(self):
- return "LeftShift((%s, %s))" % (repr(self.left), repr(self.right))
-
-class List(Node):
- def __init__(self, nodes, lineno=None):
- self.nodes = nodes
- self.lineno = lineno
-
- def getChildren(self):
- return tuple(flatten(self.nodes))
-
- def getChildNodes(self):
- nodelist = []
- nodelist.extend(flatten_nodes(self.nodes))
- return tuple(nodelist)
-
- def __repr__(self):
- return "List(%s)" % (repr(self.nodes),)
-
-class ListComp(Node):
- def __init__(self, expr, quals, lineno=None):
- self.expr = expr
- self.quals = quals
- self.lineno = lineno
-
- def getChildren(self):
- children = []
- children.append(self.expr)
- children.extend(flatten(self.quals))
- return tuple(children)
-
- def getChildNodes(self):
- nodelist = []
- nodelist.append(self.expr)
- nodelist.extend(flatten_nodes(self.quals))
- return tuple(nodelist)
-
- def __repr__(self):
- return "ListComp(%s, %s)" % (repr(self.expr), repr(self.quals))
-
-class ListCompFor(Node):
- def __init__(self, assign, list, ifs, lineno=None):
- self.assign = assign
- self.list = list
- self.ifs = ifs
- self.lineno = lineno
-
- def getChildren(self):
- children = []
- children.append(self.assign)
- children.append(self.list)
- children.extend(flatten(self.ifs))
- return tuple(children)
-
- def getChildNodes(self):
- nodelist = []
- nodelist.append(self.assign)
- nodelist.append(self.list)
- nodelist.extend(flatten_nodes(self.ifs))
- return tuple(nodelist)
-
- def __repr__(self):
- return "ListCompFor(%s, %s, %s)" % (repr(self.assign), repr(self.list), repr(self.ifs))
-
-class ListCompIf(Node):
- def __init__(self, test, lineno=None):
- self.test = test
- self.lineno = lineno
-
- def getChildren(self):
- return self.test,
-
- def getChildNodes(self):
- return self.test,
-
- def __repr__(self):
- return "ListCompIf(%s)" % (repr(self.test),)
-
-class Mod(Node):
- def __init__(self, (left, right), lineno=None):
- self.left = left
- self.right = right
- self.lineno = lineno
-
- def getChildren(self):
- return self.left, self.right
-
- def getChildNodes(self):
- return self.left, self.right
-
- def __repr__(self):
- return "Mod((%s, %s))" % (repr(self.left), repr(self.right))
-
-class Module(Node):
- def __init__(self, doc, node, lineno=None):
- self.doc = doc
- self.node = node
- self.lineno = lineno
-
- def getChildren(self):
- return self.doc, self.node
-
- def getChildNodes(self):
- return self.node,
-
- def __repr__(self):
- return "Module(%s, %s)" % (repr(self.doc), repr(self.node))
-
-class Mul(Node):
- def __init__(self, (left, right), lineno=None):
- self.left = left
- self.right = right
- self.lineno = lineno
-
- def getChildren(self):
- return self.left, self.right
-
- def getChildNodes(self):
- return self.left, self.right
-
- def __repr__(self):
- return "Mul((%s, %s))" % (repr(self.left), repr(self.right))
-
-class Name(Node):
- def __init__(self, name, lineno=None):
- self.name = name
- self.lineno = lineno
-
- def getChildren(self):
- return self.name,
-
- def getChildNodes(self):
- return ()
-
- def __repr__(self):
- return "Name(%s)" % (repr(self.name),)
-
-class Not(Node):
- def __init__(self, expr, lineno=None):
- self.expr = expr
- self.lineno = lineno
-
- def getChildren(self):
- return self.expr,
-
- def getChildNodes(self):
- return self.expr,
-
- def __repr__(self):
- return "Not(%s)" % (repr(self.expr),)
-
-class Or(Node):
- def __init__(self, nodes, lineno=None):
- self.nodes = nodes
- self.lineno = lineno
-
- def getChildren(self):
- return tuple(flatten(self.nodes))
-
- def getChildNodes(self):
- nodelist = []
- nodelist.extend(flatten_nodes(self.nodes))
- return tuple(nodelist)
-
- def __repr__(self):
- return "Or(%s)" % (repr(self.nodes),)
-
-class Pass(Node):
- def __init__(self, lineno=None):
- self.lineno = lineno
-
- def getChildren(self):
- return ()
-
- def getChildNodes(self):
- return ()
-
- def __repr__(self):
- return "Pass()"
-
-class Power(Node):
- def __init__(self, (left, right), lineno=None):
- self.left = left
- self.right = right
- self.lineno = lineno
-
- def getChildren(self):
- return self.left, self.right
-
- def getChildNodes(self):
- return self.left, self.right
-
- def __repr__(self):
- return "Power((%s, %s))" % (repr(self.left), repr(self.right))
-
-class Print(Node):
- def __init__(self, nodes, dest, lineno=None):
- self.nodes = nodes
- self.dest = dest
- self.lineno = lineno
-
- def getChildren(self):
- children = []
- children.extend(flatten(self.nodes))
- children.append(self.dest)
- return tuple(children)
-
- def getChildNodes(self):
- nodelist = []
- nodelist.extend(flatten_nodes(self.nodes))
- if self.dest is not None:
- nodelist.append(self.dest)
- return tuple(nodelist)
-
- def __repr__(self):
- return "Print(%s, %s)" % (repr(self.nodes), repr(self.dest))
-
-class Printnl(Node):
- def __init__(self, nodes, dest, lineno=None):
- self.nodes = nodes
- self.dest = dest
- self.lineno = lineno
-
- def getChildren(self):
- children = []
- children.extend(flatten(self.nodes))
- children.append(self.dest)
- return tuple(children)
-
- def getChildNodes(self):
- nodelist = []
- nodelist.extend(flatten_nodes(self.nodes))
- if self.dest is not None:
- nodelist.append(self.dest)
- return tuple(nodelist)
-
- def __repr__(self):
- return "Printnl(%s, %s)" % (repr(self.nodes), repr(self.dest))
-
-class Raise(Node):
- def __init__(self, expr1, expr2, expr3, lineno=None):
- self.expr1 = expr1
- self.expr2 = expr2
- self.expr3 = expr3
- self.lineno = lineno
-
- def getChildren(self):
- children = []
- children.append(self.expr1)
- children.append(self.expr2)
- children.append(self.expr3)
- return tuple(children)
-
- def getChildNodes(self):
- nodelist = []
- if self.expr1 is not None:
- nodelist.append(self.expr1)
- if self.expr2 is not None:
- nodelist.append(self.expr2)
- if self.expr3 is not None:
- nodelist.append(self.expr3)
- return tuple(nodelist)
-
- def __repr__(self):
- return "Raise(%s, %s, %s)" % (repr(self.expr1), repr(self.expr2), repr(self.expr3))
-
-class Return(Node):
- def __init__(self, value, lineno=None):
- self.value = value
- self.lineno = lineno
-
- def getChildren(self):
- return self.value,
-
- def getChildNodes(self):
- return self.value,
-
- def __repr__(self):
- return "Return(%s)" % (repr(self.value),)
-
-class RightShift(Node):
- def __init__(self, (left, right), lineno=None):
- self.left = left
- self.right = right
- self.lineno = lineno
-
- def getChildren(self):
- return self.left, self.right
-
- def getChildNodes(self):
- return self.left, self.right
-
- def __repr__(self):
- return "RightShift((%s, %s))" % (repr(self.left), repr(self.right))
-
-class Slice(Node):
- def __init__(self, expr, flags, lower, upper, lineno=None):
- self.expr = expr
- self.flags = flags
- self.lower = lower
- self.upper = upper
- self.lineno = lineno
-
- def getChildren(self):
- children = []
- children.append(self.expr)
- children.append(self.flags)
- children.append(self.lower)
- children.append(self.upper)
- return tuple(children)
-
- def getChildNodes(self):
- nodelist = []
- nodelist.append(self.expr)
- if self.lower is not None:
- nodelist.append(self.lower)
- if self.upper is not None:
- nodelist.append(self.upper)
- return tuple(nodelist)
-
- def __repr__(self):
- return "Slice(%s, %s, %s, %s)" % (repr(self.expr), repr(self.flags), repr(self.lower), repr(self.upper))
-
-class Sliceobj(Node):
- def __init__(self, nodes, lineno=None):
- self.nodes = nodes
- self.lineno = lineno
-
- def getChildren(self):
- return tuple(flatten(self.nodes))
-
- def getChildNodes(self):
- nodelist = []
- nodelist.extend(flatten_nodes(self.nodes))
- return tuple(nodelist)
-
- def __repr__(self):
- return "Sliceobj(%s)" % (repr(self.nodes),)
-
-class Stmt(Node):
- def __init__(self, nodes, lineno=None):
- self.nodes = nodes
- self.lineno = lineno
-
- def getChildren(self):
- return tuple(flatten(self.nodes))
-
- def getChildNodes(self):
- nodelist = []
- nodelist.extend(flatten_nodes(self.nodes))
- return tuple(nodelist)
-
- def __repr__(self):
- return "Stmt(%s)" % (repr(self.nodes),)
-
-class Sub(Node):
- def __init__(self, (left, right), lineno=None):
- self.left = left
- self.right = right
- self.lineno = lineno
-
- def getChildren(self):
- return self.left, self.right
-
- def getChildNodes(self):
- return self.left, self.right
-
- def __repr__(self):
- return "Sub((%s, %s))" % (repr(self.left), repr(self.right))
-
-class Subscript(Node):
- def __init__(self, expr, flags, subs, lineno=None):
- self.expr = expr
- self.flags = flags
- self.subs = subs
- self.lineno = lineno
-
- def getChildren(self):
- children = []
- children.append(self.expr)
- children.append(self.flags)
- children.extend(flatten(self.subs))
- return tuple(children)
-
- def getChildNodes(self):
- nodelist = []
- nodelist.append(self.expr)
- nodelist.extend(flatten_nodes(self.subs))
- return tuple(nodelist)
-
- def __repr__(self):
- return "Subscript(%s, %s, %s)" % (repr(self.expr), repr(self.flags), repr(self.subs))
-
-class TryExcept(Node):
- def __init__(self, body, handlers, else_, lineno=None):
- self.body = body
- self.handlers = handlers
- self.else_ = else_
- self.lineno = lineno
-
- def getChildren(self):
- children = []
- children.append(self.body)
- children.extend(flatten(self.handlers))
- children.append(self.else_)
- return tuple(children)
-
- def getChildNodes(self):
- nodelist = []
- nodelist.append(self.body)
- nodelist.extend(flatten_nodes(self.handlers))
- if self.else_ is not None:
- nodelist.append(self.else_)
- return tuple(nodelist)
-
- def __repr__(self):
- return "TryExcept(%s, %s, %s)" % (repr(self.body), repr(self.handlers), repr(self.else_))
-
-class TryFinally(Node):
- def __init__(self, body, final, lineno=None):
- self.body = body
- self.final = final
- self.lineno = lineno
-
- def getChildren(self):
- return self.body, self.final
-
- def getChildNodes(self):
- return self.body, self.final
-
- def __repr__(self):
- return "TryFinally(%s, %s)" % (repr(self.body), repr(self.final))
-
-class Tuple(Node):
- def __init__(self, nodes, lineno=None):
- self.nodes = nodes
- self.lineno = lineno
-
- def getChildren(self):
- return tuple(flatten(self.nodes))
-
- def getChildNodes(self):
- nodelist = []
- nodelist.extend(flatten_nodes(self.nodes))
- return tuple(nodelist)
-
- def __repr__(self):
- return "Tuple(%s)" % (repr(self.nodes),)
-
-class UnaryAdd(Node):
- def __init__(self, expr, lineno=None):
- self.expr = expr
- self.lineno = lineno
-
- def getChildren(self):
- return self.expr,
-
- def getChildNodes(self):
- return self.expr,
-
- def __repr__(self):
- return "UnaryAdd(%s)" % (repr(self.expr),)
-
-class UnarySub(Node):
- def __init__(self, expr, lineno=None):
- self.expr = expr
- self.lineno = lineno
-
- def getChildren(self):
- return self.expr,
-
- def getChildNodes(self):
- return self.expr,
-
- def __repr__(self):
- return "UnarySub(%s)" % (repr(self.expr),)
-
-class While(Node):
- def __init__(self, test, body, else_, lineno=None):
- self.test = test
- self.body = body
- self.else_ = else_
- self.lineno = lineno
-
- def getChildren(self):
- children = []
- children.append(self.test)
- children.append(self.body)
- children.append(self.else_)
- return tuple(children)
-
- def getChildNodes(self):
- nodelist = []
- nodelist.append(self.test)
- nodelist.append(self.body)
- if self.else_ is not None:
- nodelist.append(self.else_)
- return tuple(nodelist)
-
- def __repr__(self):
- return "While(%s, %s, %s)" % (repr(self.test), repr(self.body), repr(self.else_))
-
-class With(Node):
- def __init__(self, expr, vars, body, lineno=None):
- self.expr = expr
- self.vars = vars
- self.body = body
- self.lineno = lineno
-
- def getChildren(self):
- children = []
- children.append(self.expr)
- children.append(self.vars)
- children.append(self.body)
- return tuple(children)
-
- def getChildNodes(self):
- nodelist = []
- nodelist.append(self.expr)
- if self.vars is not None:
- nodelist.append(self.vars)
- nodelist.append(self.body)
- return tuple(nodelist)
-
- def __repr__(self):
- return "With(%s, %s, %s)" % (repr(self.expr), repr(self.vars), repr(self.body))
-
-class Yield(Node):
- def __init__(self, value, lineno=None):
- self.value = value
- self.lineno = lineno
-
- def getChildren(self):
- return self.value,
-
- def getChildNodes(self):
- return self.value,
-
- def __repr__(self):
- return "Yield(%s)" % (repr(self.value),)
-
-for name, obj in globals().items():
- if isinstance(obj, type) and issubclass(obj, Node):
- nodes[name.lower()] = obj
diff --git a/sys/lib/python/compiler/consts.py b/sys/lib/python/compiler/consts.py
deleted file mode 100644
index c79e814be..000000000
--- a/sys/lib/python/compiler/consts.py
+++ /dev/null
@@ -1,21 +0,0 @@
-# operation flags
-OP_ASSIGN = 'OP_ASSIGN'
-OP_DELETE = 'OP_DELETE'
-OP_APPLY = 'OP_APPLY'
-
-SC_LOCAL = 1
-SC_GLOBAL = 2
-SC_FREE = 3
-SC_CELL = 4
-SC_UNKNOWN = 5
-
-CO_OPTIMIZED = 0x0001
-CO_NEWLOCALS = 0x0002
-CO_VARARGS = 0x0004
-CO_VARKEYWORDS = 0x0008
-CO_NESTED = 0x0010
-CO_GENERATOR = 0x0020
-CO_GENERATOR_ALLOWED = 0
-CO_FUTURE_DIVISION = 0x2000
-CO_FUTURE_ABSIMPORT = 0x4000
-CO_FUTURE_WITH_STATEMENT = 0x8000
diff --git a/sys/lib/python/compiler/future.py b/sys/lib/python/compiler/future.py
deleted file mode 100644
index fef189e9e..000000000
--- a/sys/lib/python/compiler/future.py
+++ /dev/null
@@ -1,73 +0,0 @@
-"""Parser for future statements
-
-"""
-
-from compiler import ast, walk
-
-def is_future(stmt):
- """Return true if statement is a well-formed future statement"""
- if not isinstance(stmt, ast.From):
- return 0
- if stmt.modname == "__future__":
- return 1
- else:
- return 0
-
-class FutureParser:
-
- features = ("nested_scopes", "generators", "division",
- "absolute_import", "with_statement")
-
- def __init__(self):
- self.found = {} # set
-
- def visitModule(self, node):
- stmt = node.node
- for s in stmt.nodes:
- if not self.check_stmt(s):
- break
-
- def check_stmt(self, stmt):
- if is_future(stmt):
- for name, asname in stmt.names:
- if name in self.features:
- self.found[name] = 1
- else:
- raise SyntaxError, \
- "future feature %s is not defined" % name
- stmt.valid_future = 1
- return 1
- return 0
-
- def get_features(self):
- """Return list of features enabled by future statements"""
- return self.found.keys()
-
-class BadFutureParser:
- """Check for invalid future statements"""
-
- def visitFrom(self, node):
- if hasattr(node, 'valid_future'):
- return
- if node.modname != "__future__":
- return
- raise SyntaxError, "invalid future statement " + repr(node)
-
-def find_futures(node):
- p1 = FutureParser()
- p2 = BadFutureParser()
- walk(node, p1)
- walk(node, p2)
- return p1.get_features()
-
-if __name__ == "__main__":
- import sys
- from compiler import parseFile, walk
-
- for file in sys.argv[1:]:
- print file
- tree = parseFile(file)
- v = FutureParser()
- walk(tree, v)
- print v.found
- print
diff --git a/sys/lib/python/compiler/misc.py b/sys/lib/python/compiler/misc.py
deleted file mode 100644
index 8d9177092..000000000
--- a/sys/lib/python/compiler/misc.py
+++ /dev/null
@@ -1,73 +0,0 @@
-
-def flatten(tup):
- elts = []
- for elt in tup:
- if isinstance(elt, tuple):
- elts = elts + flatten(elt)
- else:
- elts.append(elt)
- return elts
-
-class Set:
- def __init__(self):
- self.elts = {}
- def __len__(self):
- return len(self.elts)
- def __contains__(self, elt):
- return self.elts.has_key(elt)
- def add(self, elt):
- self.elts[elt] = elt
- def elements(self):
- return self.elts.keys()
- def has_elt(self, elt):
- return self.elts.has_key(elt)
- def remove(self, elt):
- del self.elts[elt]
- def copy(self):
- c = Set()
- c.elts.update(self.elts)
- return c
-
-class Stack:
- def __init__(self):
- self.stack = []
- self.pop = self.stack.pop
- def __len__(self):
- return len(self.stack)
- def push(self, elt):
- self.stack.append(elt)
- def top(self):
- return self.stack[-1]
- def __getitem__(self, index): # needed by visitContinue()
- return self.stack[index]
-
-MANGLE_LEN = 256 # magic constant from compile.c
-
-def mangle(name, klass):
- if not name.startswith('__'):
- return name
- if len(name) + 2 >= MANGLE_LEN:
- return name
- if name.endswith('__'):
- return name
- try:
- i = 0
- while klass[i] == '_':
- i = i + 1
- except IndexError:
- return name
- klass = klass[i:]
-
- tlen = len(klass) + len(name)
- if tlen > MANGLE_LEN:
- klass = klass[:MANGLE_LEN-tlen]
-
- return "_%s%s" % (klass, name)
-
-def set_filename(filename, tree):
- """Set the filename attribute to filename on every node in tree"""
- worklist = [tree]
- while worklist:
- node = worklist.pop(0)
- node.filename = filename
- worklist.extend(node.getChildNodes())
diff --git a/sys/lib/python/compiler/pyassem.py b/sys/lib/python/compiler/pyassem.py
deleted file mode 100644
index 82ff39664..000000000
--- a/sys/lib/python/compiler/pyassem.py
+++ /dev/null
@@ -1,818 +0,0 @@
-"""A flow graph representation for Python bytecode"""
-
-import dis
-import new
-import sys
-
-from compiler import misc
-from compiler.consts \
- import CO_OPTIMIZED, CO_NEWLOCALS, CO_VARARGS, CO_VARKEYWORDS
-
-class FlowGraph:
- def __init__(self):
- self.current = self.entry = Block()
- self.exit = Block("exit")
- self.blocks = misc.Set()
- self.blocks.add(self.entry)
- self.blocks.add(self.exit)
-
- def startBlock(self, block):
- if self._debug:
- if self.current:
- print "end", repr(self.current)
- print " next", self.current.next
- print " ", self.current.get_children()
- print repr(block)
- self.current = block
-
- def nextBlock(self, block=None):
- # XXX think we need to specify when there is implicit transfer
- # from one block to the next. might be better to represent this
- # with explicit JUMP_ABSOLUTE instructions that are optimized
- # out when they are unnecessary.
- #
- # I think this strategy works: each block has a child
- # designated as "next" which is returned as the last of the
- # children. because the nodes in a graph are emitted in
- # reverse post order, the "next" block will always be emitted
- # immediately after its parent.
- # Worry: maintaining this invariant could be tricky
- if block is None:
- block = self.newBlock()
-
- # Note: If the current block ends with an unconditional
- # control transfer, then it is incorrect to add an implicit
- # transfer to the block graph. The current code requires
- # these edges to get the blocks emitted in the right order,
- # however. :-( If a client needs to remove these edges, call
- # pruneEdges().
-
- self.current.addNext(block)
- self.startBlock(block)
-
- def newBlock(self):
- b = Block()
- self.blocks.add(b)
- return b
-
- def startExitBlock(self):
- self.startBlock(self.exit)
-
- _debug = 0
-
- def _enable_debug(self):
- self._debug = 1
-
- def _disable_debug(self):
- self._debug = 0
-
- def emit(self, *inst):
- if self._debug:
- print "\t", inst
- if inst[0] in ['RETURN_VALUE', 'YIELD_VALUE']:
- self.current.addOutEdge(self.exit)
- if len(inst) == 2 and isinstance(inst[1], Block):
- self.current.addOutEdge(inst[1])
- self.current.emit(inst)
-
- def getBlocksInOrder(self):
- """Return the blocks in reverse postorder
-
- i.e. each node appears before all of its successors
- """
- # XXX make sure every node that doesn't have an explicit next
- # is set so that next points to exit
- for b in self.blocks.elements():
- if b is self.exit:
- continue
- if not b.next:
- b.addNext(self.exit)
- order = dfs_postorder(self.entry, {})
- order.reverse()
- self.fixupOrder(order, self.exit)
- # hack alert
- if not self.exit in order:
- order.append(self.exit)
-
- return order
-
- def fixupOrder(self, blocks, default_next):
- """Fixup bad order introduced by DFS."""
-
- # XXX This is a total mess. There must be a better way to get
- # the code blocks in the right order.
-
- self.fixupOrderHonorNext(blocks, default_next)
- self.fixupOrderForward(blocks, default_next)
-
- def fixupOrderHonorNext(self, blocks, default_next):
- """Fix one problem with DFS.
-
- The DFS uses child block, but doesn't know about the special
- "next" block. As a result, the DFS can order blocks so that a
- block isn't next to the right block for implicit control
- transfers.
- """
- index = {}
- for i in range(len(blocks)):
- index[blocks[i]] = i
-
- for i in range(0, len(blocks) - 1):
- b = blocks[i]
- n = blocks[i + 1]
- if not b.next or b.next[0] == default_next or b.next[0] == n:
- continue
- # The blocks are in the wrong order. Find the chain of
- # blocks to insert where they belong.
- cur = b
- chain = []
- elt = cur
- while elt.next and elt.next[0] != default_next:
- chain.append(elt.next[0])
- elt = elt.next[0]
- # Now remove the blocks in the chain from the current
- # block list, so that they can be re-inserted.
- l = []
- for b in chain:
- assert index[b] > i
- l.append((index[b], b))
- l.sort()
- l.reverse()
- for j, b in l:
- del blocks[index[b]]
- # Insert the chain in the proper location
- blocks[i:i + 1] = [cur] + chain
- # Finally, re-compute the block indexes
- for i in range(len(blocks)):
- index[blocks[i]] = i
-
- def fixupOrderForward(self, blocks, default_next):
- """Make sure all JUMP_FORWARDs jump forward"""
- index = {}
- chains = []
- cur = []
- for b in blocks:
- index[b] = len(chains)
- cur.append(b)
- if b.next and b.next[0] == default_next:
- chains.append(cur)
- cur = []
- chains.append(cur)
-
- while 1:
- constraints = []
-
- for i in range(len(chains)):
- l = chains[i]
- for b in l:
- for c in b.get_children():
- if index[c] < i:
- forward_p = 0
- for inst in b.insts:
- if inst[0] == 'JUMP_FORWARD':
- if inst[1] == c:
- forward_p = 1
- if not forward_p:
- continue
- constraints.append((index[c], i))
-
- if not constraints:
- break
-
- # XXX just do one for now
- # do swaps to get things in the right order
- goes_before, a_chain = constraints[0]
- assert a_chain > goes_before
- c = chains[a_chain]
- chains.remove(c)
- chains.insert(goes_before, c)
-
- del blocks[:]
- for c in chains:
- for b in c:
- blocks.append(b)
-
- def getBlocks(self):
- return self.blocks.elements()
-
- def getRoot(self):
- """Return nodes appropriate for use with dominator"""
- return self.entry
-
- def getContainedGraphs(self):
- l = []
- for b in self.getBlocks():
- l.extend(b.getContainedGraphs())
- return l
-
-def dfs_postorder(b, seen):
- """Depth-first search of tree rooted at b, return in postorder"""
- order = []
- seen[b] = b
- for c in b.get_children():
- if seen.has_key(c):
- continue
- order = order + dfs_postorder(c, seen)
- order.append(b)
- return order
-
-class Block:
- _count = 0
-
- def __init__(self, label=''):
- self.insts = []
- self.inEdges = misc.Set()
- self.outEdges = misc.Set()
- self.label = label
- self.bid = Block._count
- self.next = []
- Block._count = Block._count + 1
-
- def __repr__(self):
- if self.label:
- return "<block %s id=%d>" % (self.label, self.bid)
- else:
- return "<block id=%d>" % (self.bid)
-
- def __str__(self):
- insts = map(str, self.insts)
- return "<block %s %d:\n%s>" % (self.label, self.bid,
- '\n'.join(insts))
-
- def emit(self, inst):
- op = inst[0]
- if op[:4] == 'JUMP':
- self.outEdges.add(inst[1])
- self.insts.append(inst)
-
- def getInstructions(self):
- return self.insts
-
- def addInEdge(self, block):
- self.inEdges.add(block)
-
- def addOutEdge(self, block):
- self.outEdges.add(block)
-
- def addNext(self, block):
- self.next.append(block)
- assert len(self.next) == 1, map(str, self.next)
-
- _uncond_transfer = ('RETURN_VALUE', 'RAISE_VARARGS', 'YIELD_VALUE',
- 'JUMP_ABSOLUTE', 'JUMP_FORWARD', 'CONTINUE_LOOP')
-
- def pruneNext(self):
- """Remove bogus edge for unconditional transfers
-
- Each block has a next edge that accounts for implicit control
- transfers, e.g. from a JUMP_IF_FALSE to the block that will be
- executed if the test is true.
-
- These edges must remain for the current assembler code to
- work. If they are removed, the dfs_postorder gets things in
- weird orders. However, they shouldn't be there for other
- purposes, e.g. conversion to SSA form. This method will
- remove the next edge when it follows an unconditional control
- transfer.
- """
- try:
- op, arg = self.insts[-1]
- except (IndexError, ValueError):
- return
- if op in self._uncond_transfer:
- self.next = []
-
- def get_children(self):
- if self.next and self.next[0] in self.outEdges:
- self.outEdges.remove(self.next[0])
- return self.outEdges.elements() + self.next
-
- def getContainedGraphs(self):
- """Return all graphs contained within this block.
-
- For example, a MAKE_FUNCTION block will contain a reference to
- the graph for the function body.
- """
- contained = []
- for inst in self.insts:
- if len(inst) == 1:
- continue
- op = inst[1]
- if hasattr(op, 'graph'):
- contained.append(op.graph)
- return contained
-
-# flags for code objects
-
-# the FlowGraph is transformed in place; it exists in one of these states
-RAW = "RAW"
-FLAT = "FLAT"
-CONV = "CONV"
-DONE = "DONE"
-
-class PyFlowGraph(FlowGraph):
- super_init = FlowGraph.__init__
-
- def __init__(self, name, filename, args=(), optimized=0, klass=None):
- self.super_init()
- self.name = name
- self.filename = filename
- self.docstring = None
- self.args = args # XXX
- self.argcount = getArgCount(args)
- self.klass = klass
- if optimized:
- self.flags = CO_OPTIMIZED | CO_NEWLOCALS
- else:
- self.flags = 0
- self.consts = []
- self.names = []
- # Free variables found by the symbol table scan, including
- # variables used only in nested scopes, are included here.
- self.freevars = []
- self.cellvars = []
- # The closure list is used to track the order of cell
- # variables and free variables in the resulting code object.
- # The offsets used by LOAD_CLOSURE/LOAD_DEREF refer to both
- # kinds of variables.
- self.closure = []
- self.varnames = list(args) or []
- for i in range(len(self.varnames)):
- var = self.varnames[i]
- if isinstance(var, TupleArg):
- self.varnames[i] = var.getName()
- self.stage = RAW
-
- def setDocstring(self, doc):
- self.docstring = doc
-
- def setFlag(self, flag):
- self.flags = self.flags | flag
- if flag == CO_VARARGS:
- self.argcount = self.argcount - 1
-
- def checkFlag(self, flag):
- if self.flags & flag:
- return 1
-
- def setFreeVars(self, names):
- self.freevars = list(names)
-
- def setCellVars(self, names):
- self.cellvars = names
-
- def getCode(self):
- """Get a Python code object"""
- assert self.stage == RAW
- self.computeStackDepth()
- self.flattenGraph()
- assert self.stage == FLAT
- self.convertArgs()
- assert self.stage == CONV
- self.makeByteCode()
- assert self.stage == DONE
- return self.newCodeObject()
-
- def dump(self, io=None):
- if io:
- save = sys.stdout
- sys.stdout = io
- pc = 0
- for t in self.insts:
- opname = t[0]
- if opname == "SET_LINENO":
- print
- if len(t) == 1:
- print "\t", "%3d" % pc, opname
- pc = pc + 1
- else:
- print "\t", "%3d" % pc, opname, t[1]
- pc = pc + 3
- if io:
- sys.stdout = save
-
- def computeStackDepth(self):
- """Compute the max stack depth.
-
- Approach is to compute the stack effect of each basic block.
- Then find the path through the code with the largest total
- effect.
- """
- depth = {}
- exit = None
- for b in self.getBlocks():
- depth[b] = findDepth(b.getInstructions())
-
- seen = {}
-
- def max_depth(b, d):
- if seen.has_key(b):
- return d
- seen[b] = 1
- d = d + depth[b]
- children = b.get_children()
- if children:
- return max([max_depth(c, d) for c in children])
- else:
- if not b.label == "exit":
- return max_depth(self.exit, d)
- else:
- return d
-
- self.stacksize = max_depth(self.entry, 0)
-
- def flattenGraph(self):
- """Arrange the blocks in order and resolve jumps"""
- assert self.stage == RAW
- self.insts = insts = []
- pc = 0
- begin = {}
- end = {}
- for b in self.getBlocksInOrder():
- begin[b] = pc
- for inst in b.getInstructions():
- insts.append(inst)
- if len(inst) == 1:
- pc = pc + 1
- elif inst[0] != "SET_LINENO":
- # arg takes 2 bytes
- pc = pc + 3
- end[b] = pc
- pc = 0
- for i in range(len(insts)):
- inst = insts[i]
- if len(inst) == 1:
- pc = pc + 1
- elif inst[0] != "SET_LINENO":
- pc = pc + 3
- opname = inst[0]
- if self.hasjrel.has_elt(opname):
- oparg = inst[1]
- offset = begin[oparg] - pc
- insts[i] = opname, offset
- elif self.hasjabs.has_elt(opname):
- insts[i] = opname, begin[inst[1]]
- self.stage = FLAT
-
- hasjrel = misc.Set()
- for i in dis.hasjrel:
- hasjrel.add(dis.opname[i])
- hasjabs = misc.Set()
- for i in dis.hasjabs:
- hasjabs.add(dis.opname[i])
-
- def convertArgs(self):
- """Convert arguments from symbolic to concrete form"""
- assert self.stage == FLAT
- self.consts.insert(0, self.docstring)
- self.sort_cellvars()
- for i in range(len(self.insts)):
- t = self.insts[i]
- if len(t) == 2:
- opname, oparg = t
- conv = self._converters.get(opname, None)
- if conv:
- self.insts[i] = opname, conv(self, oparg)
- self.stage = CONV
-
- def sort_cellvars(self):
- """Sort cellvars in the order of varnames and prune from freevars.
- """
- cells = {}
- for name in self.cellvars:
- cells[name] = 1
- self.cellvars = [name for name in self.varnames
- if cells.has_key(name)]
- for name in self.cellvars:
- del cells[name]
- self.cellvars = self.cellvars + cells.keys()
- self.closure = self.cellvars + self.freevars
-
- def _lookupName(self, name, list):
- """Return index of name in list, appending if necessary
-
- This routine uses a list instead of a dictionary, because a
- dictionary can't store two different keys if the keys have the
- same value but different types, e.g. 2 and 2L. The compiler
- must treat these two separately, so it does an explicit type
- comparison before comparing the values.
- """
- t = type(name)
- for i in range(len(list)):
- if t == type(list[i]) and list[i] == name:
- return i
- end = len(list)
- list.append(name)
- return end
-
- _converters = {}
- def _convert_LOAD_CONST(self, arg):
- if hasattr(arg, 'getCode'):
- arg = arg.getCode()
- return self._lookupName(arg, self.consts)
-
- def _convert_LOAD_FAST(self, arg):
- self._lookupName(arg, self.names)
- return self._lookupName(arg, self.varnames)
- _convert_STORE_FAST = _convert_LOAD_FAST
- _convert_DELETE_FAST = _convert_LOAD_FAST
-
- def _convert_LOAD_NAME(self, arg):
- if self.klass is None:
- self._lookupName(arg, self.varnames)
- return self._lookupName(arg, self.names)
-
- def _convert_NAME(self, arg):
- if self.klass is None:
- self._lookupName(arg, self.varnames)
- return self._lookupName(arg, self.names)
- _convert_STORE_NAME = _convert_NAME
- _convert_DELETE_NAME = _convert_NAME
- _convert_IMPORT_NAME = _convert_NAME
- _convert_IMPORT_FROM = _convert_NAME
- _convert_STORE_ATTR = _convert_NAME
- _convert_LOAD_ATTR = _convert_NAME
- _convert_DELETE_ATTR = _convert_NAME
- _convert_LOAD_GLOBAL = _convert_NAME
- _convert_STORE_GLOBAL = _convert_NAME
- _convert_DELETE_GLOBAL = _convert_NAME
-
- def _convert_DEREF(self, arg):
- self._lookupName(arg, self.names)
- self._lookupName(arg, self.varnames)
- return self._lookupName(arg, self.closure)
- _convert_LOAD_DEREF = _convert_DEREF
- _convert_STORE_DEREF = _convert_DEREF
-
- def _convert_LOAD_CLOSURE(self, arg):
- self._lookupName(arg, self.varnames)
- return self._lookupName(arg, self.closure)
-
- _cmp = list(dis.cmp_op)
- def _convert_COMPARE_OP(self, arg):
- return self._cmp.index(arg)
-
- # similarly for other opcodes...
-
- for name, obj in locals().items():
- if name[:9] == "_convert_":
- opname = name[9:]
- _converters[opname] = obj
- del name, obj, opname
-
- def makeByteCode(self):
- assert self.stage == CONV
- self.lnotab = lnotab = LineAddrTable()
- for t in self.insts:
- opname = t[0]
- if len(t) == 1:
- lnotab.addCode(self.opnum[opname])
- else:
- oparg = t[1]
- if opname == "SET_LINENO":
- lnotab.nextLine(oparg)
- continue
- hi, lo = twobyte(oparg)
- try:
- lnotab.addCode(self.opnum[opname], lo, hi)
- except ValueError:
- print opname, oparg
- print self.opnum[opname], lo, hi
- raise
- self.stage = DONE
-
- opnum = {}
- for num in range(len(dis.opname)):
- opnum[dis.opname[num]] = num
- del num
-
- def newCodeObject(self):
- assert self.stage == DONE
- if (self.flags & CO_NEWLOCALS) == 0:
- nlocals = 0
- else:
- nlocals = len(self.varnames)
- argcount = self.argcount
- if self.flags & CO_VARKEYWORDS:
- argcount = argcount - 1
- return new.code(argcount, nlocals, self.stacksize, self.flags,
- self.lnotab.getCode(), self.getConsts(),
- tuple(self.names), tuple(self.varnames),
- self.filename, self.name, self.lnotab.firstline,
- self.lnotab.getTable(), tuple(self.freevars),
- tuple(self.cellvars))
-
- def getConsts(self):
- """Return a tuple for the const slot of the code object
-
- Must convert references to code (MAKE_FUNCTION) to code
- objects recursively.
- """
- l = []
- for elt in self.consts:
- if isinstance(elt, PyFlowGraph):
- elt = elt.getCode()
- l.append(elt)
- return tuple(l)
-
-def isJump(opname):
- if opname[:4] == 'JUMP':
- return 1
-
-class TupleArg:
- """Helper for marking func defs with nested tuples in arglist"""
- def __init__(self, count, names):
- self.count = count
- self.names = names
- def __repr__(self):
- return "TupleArg(%s, %s)" % (self.count, self.names)
- def getName(self):
- return ".%d" % self.count
-
-def getArgCount(args):
- argcount = len(args)
- if args:
- for arg in args:
- if isinstance(arg, TupleArg):
- numNames = len(misc.flatten(arg.names))
- argcount = argcount - numNames
- return argcount
-
-def twobyte(val):
- """Convert an int argument into high and low bytes"""
- assert isinstance(val, int)
- return divmod(val, 256)
-
-class LineAddrTable:
- """lnotab
-
- This class builds the lnotab, which is documented in compile.c.
- Here's a brief recap:
-
- For each SET_LINENO instruction after the first one, two bytes are
- added to lnotab. (In some cases, multiple two-byte entries are
- added.) The first byte is the distance in bytes between the
- instruction for the last SET_LINENO and the current SET_LINENO.
- The second byte is offset in line numbers. If either offset is
- greater than 255, multiple two-byte entries are added -- see
- compile.c for the delicate details.
- """
-
- def __init__(self):
- self.code = []
- self.codeOffset = 0
- self.firstline = 0
- self.lastline = 0
- self.lastoff = 0
- self.lnotab = []
-
- def addCode(self, *args):
- for arg in args:
- self.code.append(chr(arg))
- self.codeOffset = self.codeOffset + len(args)
-
- def nextLine(self, lineno):
- if self.firstline == 0:
- self.firstline = lineno
- self.lastline = lineno
- else:
- # compute deltas
- addr = self.codeOffset - self.lastoff
- line = lineno - self.lastline
- # Python assumes that lineno always increases with
- # increasing bytecode address (lnotab is unsigned char).
- # Depending on when SET_LINENO instructions are emitted
- # this is not always true. Consider the code:
- # a = (1,
- # b)
- # In the bytecode stream, the assignment to "a" occurs
- # after the loading of "b". This works with the C Python
- # compiler because it only generates a SET_LINENO instruction
- # for the assignment.
- if line >= 0:
- push = self.lnotab.append
- while addr > 255:
- push(255); push(0)
- addr -= 255
- while line > 255:
- push(addr); push(255)
- line -= 255
- addr = 0
- if addr > 0 or line > 0:
- push(addr); push(line)
- self.lastline = lineno
- self.lastoff = self.codeOffset
-
- def getCode(self):
- return ''.join(self.code)
-
- def getTable(self):
- return ''.join(map(chr, self.lnotab))
-
-class StackDepthTracker:
- # XXX 1. need to keep track of stack depth on jumps
- # XXX 2. at least partly as a result, this code is broken
-
- def findDepth(self, insts, debug=0):
- depth = 0
- maxDepth = 0
- for i in insts:
- opname = i[0]
- if debug:
- print i,
- delta = self.effect.get(opname, None)
- if delta is not None:
- depth = depth + delta
- else:
- # now check patterns
- for pat, pat_delta in self.patterns:
- if opname[:len(pat)] == pat:
- delta = pat_delta
- depth = depth + delta
- break
- # if we still haven't found a match
- if delta is None:
- meth = getattr(self, opname, None)
- if meth is not None:
- depth = depth + meth(i[1])
- if depth > maxDepth:
- maxDepth = depth
- if debug:
- print depth, maxDepth
- return maxDepth
-
- effect = {
- 'POP_TOP': -1,
- 'DUP_TOP': 1,
- 'LIST_APPEND': -2,
- 'SLICE+1': -1,
- 'SLICE+2': -1,
- 'SLICE+3': -2,
- 'STORE_SLICE+0': -1,
- 'STORE_SLICE+1': -2,
- 'STORE_SLICE+2': -2,
- 'STORE_SLICE+3': -3,
- 'DELETE_SLICE+0': -1,
- 'DELETE_SLICE+1': -2,
- 'DELETE_SLICE+2': -2,
- 'DELETE_SLICE+3': -3,
- 'STORE_SUBSCR': -3,
- 'DELETE_SUBSCR': -2,
- # PRINT_EXPR?
- 'PRINT_ITEM': -1,
- 'RETURN_VALUE': -1,
- 'YIELD_VALUE': -1,
- 'EXEC_STMT': -3,
- 'BUILD_CLASS': -2,
- 'STORE_NAME': -1,
- 'STORE_ATTR': -2,
- 'DELETE_ATTR': -1,
- 'STORE_GLOBAL': -1,
- 'BUILD_MAP': 1,
- 'COMPARE_OP': -1,
- 'STORE_FAST': -1,
- 'IMPORT_STAR': -1,
- 'IMPORT_NAME': -1,
- 'IMPORT_FROM': 1,
- 'LOAD_ATTR': 0, # unlike other loads
- # close enough...
- 'SETUP_EXCEPT': 3,
- 'SETUP_FINALLY': 3,
- 'FOR_ITER': 1,
- 'WITH_CLEANUP': -1,
- }
- # use pattern match
- patterns = [
- ('BINARY_', -1),
- ('LOAD_', 1),
- ]
-
- def UNPACK_SEQUENCE(self, count):
- return count-1
- def BUILD_TUPLE(self, count):
- return -count+1
- def BUILD_LIST(self, count):
- return -count+1
- def CALL_FUNCTION(self, argc):
- hi, lo = divmod(argc, 256)
- return -(lo + hi * 2)
- def CALL_FUNCTION_VAR(self, argc):
- return self.CALL_FUNCTION(argc)-1
- def CALL_FUNCTION_KW(self, argc):
- return self.CALL_FUNCTION(argc)-1
- def CALL_FUNCTION_VAR_KW(self, argc):
- return self.CALL_FUNCTION(argc)-2
- def MAKE_FUNCTION(self, argc):
- return -argc
- def MAKE_CLOSURE(self, argc):
- # XXX need to account for free variables too!
- return -argc
- def BUILD_SLICE(self, argc):
- if argc == 2:
- return -1
- elif argc == 3:
- return -2
- def DUP_TOPX(self, argc):
- return argc
-
-findDepth = StackDepthTracker().findDepth
diff --git a/sys/lib/python/compiler/pycodegen.py b/sys/lib/python/compiler/pycodegen.py
deleted file mode 100644
index 2af03a89e..000000000
--- a/sys/lib/python/compiler/pycodegen.py
+++ /dev/null
@@ -1,1533 +0,0 @@
-import imp
-import os
-import marshal
-import struct
-import sys
-from cStringIO import StringIO
-
-from compiler import ast, parse, walk, syntax
-from compiler import pyassem, misc, future, symbols
-from compiler.consts import SC_LOCAL, SC_GLOBAL, SC_FREE, SC_CELL
-from compiler.consts import (CO_VARARGS, CO_VARKEYWORDS, CO_NEWLOCALS,
- CO_NESTED, CO_GENERATOR, CO_FUTURE_DIVISION,
- CO_FUTURE_ABSIMPORT, CO_FUTURE_WITH_STATEMENT)
-from compiler.pyassem import TupleArg
-
-# XXX The version-specific code can go, since this code only works with 2.x.
-# Do we have Python 1.x or Python 2.x?
-try:
- VERSION = sys.version_info[0]
-except AttributeError:
- VERSION = 1
-
-callfunc_opcode_info = {
- # (Have *args, Have **args) : opcode
- (0,0) : "CALL_FUNCTION",
- (1,0) : "CALL_FUNCTION_VAR",
- (0,1) : "CALL_FUNCTION_KW",
- (1,1) : "CALL_FUNCTION_VAR_KW",
-}
-
-LOOP = 1
-EXCEPT = 2
-TRY_FINALLY = 3
-END_FINALLY = 4
-
-def compileFile(filename, display=0):
- f = open(filename, 'U')
- buf = f.read()
- f.close()
- mod = Module(buf, filename)
- try:
- mod.compile(display)
- except SyntaxError:
- raise
- else:
- f = open(filename + "c", "wb")
- mod.dump(f)
- f.close()
-
-def compile(source, filename, mode, flags=None, dont_inherit=None):
- """Replacement for builtin compile() function"""
- if flags is not None or dont_inherit is not None:
- raise RuntimeError, "not implemented yet"
-
- if mode == "single":
- gen = Interactive(source, filename)
- elif mode == "exec":
- gen = Module(source, filename)
- elif mode == "eval":
- gen = Expression(source, filename)
- else:
- raise ValueError("compile() 3rd arg must be 'exec' or "
- "'eval' or 'single'")
- gen.compile()
- return gen.code
-
-class AbstractCompileMode:
-
- mode = None # defined by subclass
-
- def __init__(self, source, filename):
- self.source = source
- self.filename = filename
- self.code = None
-
- def _get_tree(self):
- tree = parse(self.source, self.mode)
- misc.set_filename(self.filename, tree)
- syntax.check(tree)
- return tree
-
- def compile(self):
- pass # implemented by subclass
-
- def getCode(self):
- return self.code
-
-class Expression(AbstractCompileMode):
-
- mode = "eval"
-
- def compile(self):
- tree = self._get_tree()
- gen = ExpressionCodeGenerator(tree)
- self.code = gen.getCode()
-
-class Interactive(AbstractCompileMode):
-
- mode = "single"
-
- def compile(self):
- tree = self._get_tree()
- gen = InteractiveCodeGenerator(tree)
- self.code = gen.getCode()
-
-class Module(AbstractCompileMode):
-
- mode = "exec"
-
- def compile(self, display=0):
- tree = self._get_tree()
- gen = ModuleCodeGenerator(tree)
- if display:
- import pprint
- print pprint.pprint(tree)
- self.code = gen.getCode()
-
- def dump(self, f):
- f.write(self.getPycHeader())
- marshal.dump(self.code, f)
-
- MAGIC = imp.get_magic()
-
- def getPycHeader(self):
- # compile.c uses marshal to write a long directly, with
- # calling the interface that would also generate a 1-byte code
- # to indicate the type of the value. simplest way to get the
- # same effect is to call marshal and then skip the code.
- mtime = os.path.getmtime(self.filename)
- mtime = struct.pack('<i', mtime)
- return self.MAGIC + mtime
-
-class LocalNameFinder:
- """Find local names in scope"""
- def __init__(self, names=()):
- self.names = misc.Set()
- self.globals = misc.Set()
- for name in names:
- self.names.add(name)
-
- # XXX list comprehensions and for loops
-
- def getLocals(self):
- for elt in self.globals.elements():
- if self.names.has_elt(elt):
- self.names.remove(elt)
- return self.names
-
- def visitDict(self, node):
- pass
-
- def visitGlobal(self, node):
- for name in node.names:
- self.globals.add(name)
-
- def visitFunction(self, node):
- self.names.add(node.name)
-
- def visitLambda(self, node):
- pass
-
- def visitImport(self, node):
- for name, alias in node.names:
- self.names.add(alias or name)
-
- def visitFrom(self, node):
- for name, alias in node.names:
- self.names.add(alias or name)
-
- def visitClass(self, node):
- self.names.add(node.name)
-
- def visitAssName(self, node):
- self.names.add(node.name)
-
-def is_constant_false(node):
- if isinstance(node, ast.Const):
- if not node.value:
- return 1
- return 0
-
-class CodeGenerator:
- """Defines basic code generator for Python bytecode
-
- This class is an abstract base class. Concrete subclasses must
- define an __init__() that defines self.graph and then calls the
- __init__() defined in this class.
-
- The concrete class must also define the class attributes
- NameFinder, FunctionGen, and ClassGen. These attributes can be
- defined in the initClass() method, which is a hook for
- initializing these methods after all the classes have been
- defined.
- """
-
- optimized = 0 # is namespace access optimized?
- __initialized = None
- class_name = None # provide default for instance variable
-
- def __init__(self):
- if self.__initialized is None:
- self.initClass()
- self.__class__.__initialized = 1
- self.checkClass()
- self.locals = misc.Stack()
- self.setups = misc.Stack()
- self.last_lineno = None
- self._setupGraphDelegation()
- self._div_op = "BINARY_DIVIDE"
-
- # XXX set flags based on future features
- futures = self.get_module().futures
- for feature in futures:
- if feature == "division":
- self.graph.setFlag(CO_FUTURE_DIVISION)
- self._div_op = "BINARY_TRUE_DIVIDE"
- elif feature == "absolute_import":
- self.graph.setFlag(CO_FUTURE_ABSIMPORT)
- elif feature == "with_statement":
- self.graph.setFlag(CO_FUTURE_WITH_STATEMENT)
-
- def initClass(self):
- """This method is called once for each class"""
-
- def checkClass(self):
- """Verify that class is constructed correctly"""
- try:
- assert hasattr(self, 'graph')
- assert getattr(self, 'NameFinder')
- assert getattr(self, 'FunctionGen')
- assert getattr(self, 'ClassGen')
- except AssertionError, msg:
- intro = "Bad class construction for %s" % self.__class__.__name__
- raise AssertionError, intro
-
- def _setupGraphDelegation(self):
- self.emit = self.graph.emit
- self.newBlock = self.graph.newBlock
- self.startBlock = self.graph.startBlock
- self.nextBlock = self.graph.nextBlock
- self.setDocstring = self.graph.setDocstring
-
- def getCode(self):
- """Return a code object"""
- return self.graph.getCode()
-
- def mangle(self, name):
- if self.class_name is not None:
- return misc.mangle(name, self.class_name)
- else:
- return name
-
- def parseSymbols(self, tree):
- s = symbols.SymbolVisitor()
- walk(tree, s)
- return s.scopes
-
- def get_module(self):
- raise RuntimeError, "should be implemented by subclasses"
-
- # Next five methods handle name access
-
- def isLocalName(self, name):
- return self.locals.top().has_elt(name)
-
- def storeName(self, name):
- self._nameOp('STORE', name)
-
- def loadName(self, name):
- self._nameOp('LOAD', name)
-
- def delName(self, name):
- self._nameOp('DELETE', name)
-
- def _nameOp(self, prefix, name):
- name = self.mangle(name)
- scope = self.scope.check_name(name)
- if scope == SC_LOCAL:
- if not self.optimized:
- self.emit(prefix + '_NAME', name)
- else:
- self.emit(prefix + '_FAST', name)
- elif scope == SC_GLOBAL:
- if not self.optimized:
- self.emit(prefix + '_NAME', name)
- else:
- self.emit(prefix + '_GLOBAL', name)
- elif scope == SC_FREE or scope == SC_CELL:
- self.emit(prefix + '_DEREF', name)
- else:
- raise RuntimeError, "unsupported scope for var %s: %d" % \
- (name, scope)
-
- def _implicitNameOp(self, prefix, name):
- """Emit name ops for names generated implicitly by for loops
-
- The interpreter generates names that start with a period or
- dollar sign. The symbol table ignores these names because
- they aren't present in the program text.
- """
- if self.optimized:
- self.emit(prefix + '_FAST', name)
- else:
- self.emit(prefix + '_NAME', name)
-
- # The set_lineno() function and the explicit emit() calls for
- # SET_LINENO below are only used to generate the line number table.
- # As of Python 2.3, the interpreter does not have a SET_LINENO
- # instruction. pyassem treats SET_LINENO opcodes as a special case.
-
- def set_lineno(self, node, force=False):
- """Emit SET_LINENO if necessary.
-
- The instruction is considered necessary if the node has a
- lineno attribute and it is different than the last lineno
- emitted.
-
- Returns true if SET_LINENO was emitted.
-
- There are no rules for when an AST node should have a lineno
- attribute. The transformer and AST code need to be reviewed
- and a consistent policy implemented and documented. Until
- then, this method works around missing line numbers.
- """
- lineno = getattr(node, 'lineno', None)
- if lineno is not None and (lineno != self.last_lineno
- or force):
- self.emit('SET_LINENO', lineno)
- self.last_lineno = lineno
- return True
- return False
-
- # The first few visitor methods handle nodes that generator new
- # code objects. They use class attributes to determine what
- # specialized code generators to use.
-
- NameFinder = LocalNameFinder
- FunctionGen = None
- ClassGen = None
-
- def visitModule(self, node):
- self.scopes = self.parseSymbols(node)
- self.scope = self.scopes[node]
- self.emit('SET_LINENO', 0)
- if node.doc:
- self.emit('LOAD_CONST', node.doc)
- self.storeName('__doc__')
- lnf = walk(node.node, self.NameFinder(), verbose=0)
- self.locals.push(lnf.getLocals())
- self.visit(node.node)
- self.emit('LOAD_CONST', None)
- self.emit('RETURN_VALUE')
-
- def visitExpression(self, node):
- self.set_lineno(node)
- self.scopes = self.parseSymbols(node)
- self.scope = self.scopes[node]
- self.visit(node.node)
- self.emit('RETURN_VALUE')
-
- def visitFunction(self, node):
- self._visitFuncOrLambda(node, isLambda=0)
- if node.doc:
- self.setDocstring(node.doc)
- self.storeName(node.name)
-
- def visitLambda(self, node):
- self._visitFuncOrLambda(node, isLambda=1)
-
- def _visitFuncOrLambda(self, node, isLambda=0):
- if not isLambda and node.decorators:
- for decorator in node.decorators.nodes:
- self.visit(decorator)
- ndecorators = len(node.decorators.nodes)
- else:
- ndecorators = 0
-
- gen = self.FunctionGen(node, self.scopes, isLambda,
- self.class_name, self.get_module())
- walk(node.code, gen)
- gen.finish()
- self.set_lineno(node)
- for default in node.defaults:
- self.visit(default)
- self._makeClosure(gen, len(node.defaults))
- for i in range(ndecorators):
- self.emit('CALL_FUNCTION', 1)
-
- def visitClass(self, node):
- gen = self.ClassGen(node, self.scopes,
- self.get_module())
- walk(node.code, gen)
- gen.finish()
- self.set_lineno(node)
- self.emit('LOAD_CONST', node.name)
- for base in node.bases:
- self.visit(base)
- self.emit('BUILD_TUPLE', len(node.bases))
- self._makeClosure(gen, 0)
- self.emit('CALL_FUNCTION', 0)
- self.emit('BUILD_CLASS')
- self.storeName(node.name)
-
- # The rest are standard visitor methods
-
- # The next few implement control-flow statements
-
- def visitIf(self, node):
- end = self.newBlock()
- numtests = len(node.tests)
- for i in range(numtests):
- test, suite = node.tests[i]
- if is_constant_false(test):
- # XXX will need to check generator stuff here
- continue
- self.set_lineno(test)
- self.visit(test)
- nextTest = self.newBlock()
- self.emit('JUMP_IF_FALSE', nextTest)
- self.nextBlock()
- self.emit('POP_TOP')
- self.visit(suite)
- self.emit('JUMP_FORWARD', end)
- self.startBlock(nextTest)
- self.emit('POP_TOP')
- if node.else_:
- self.visit(node.else_)
- self.nextBlock(end)
-
- def visitWhile(self, node):
- self.set_lineno(node)
-
- loop = self.newBlock()
- else_ = self.newBlock()
-
- after = self.newBlock()
- self.emit('SETUP_LOOP', after)
-
- self.nextBlock(loop)
- self.setups.push((LOOP, loop))
-
- self.set_lineno(node, force=True)
- self.visit(node.test)
- self.emit('JUMP_IF_FALSE', else_ or after)
-
- self.nextBlock()
- self.emit('POP_TOP')
- self.visit(node.body)
- self.emit('JUMP_ABSOLUTE', loop)
-
- self.startBlock(else_) # or just the POPs if not else clause
- self.emit('POP_TOP')
- self.emit('POP_BLOCK')
- self.setups.pop()
- if node.else_:
- self.visit(node.else_)
- self.nextBlock(after)
-
- def visitFor(self, node):
- start = self.newBlock()
- anchor = self.newBlock()
- after = self.newBlock()
- self.setups.push((LOOP, start))
-
- self.set_lineno(node)
- self.emit('SETUP_LOOP', after)
- self.visit(node.list)
- self.emit('GET_ITER')
-
- self.nextBlock(start)
- self.set_lineno(node, force=1)
- self.emit('FOR_ITER', anchor)
- self.visit(node.assign)
- self.visit(node.body)
- self.emit('JUMP_ABSOLUTE', start)
- self.nextBlock(anchor)
- self.emit('POP_BLOCK')
- self.setups.pop()
- if node.else_:
- self.visit(node.else_)
- self.nextBlock(after)
-
- def visitBreak(self, node):
- if not self.setups:
- raise SyntaxError, "'break' outside loop (%s, %d)" % \
- (node.filename, node.lineno)
- self.set_lineno(node)
- self.emit('BREAK_LOOP')
-
- def visitContinue(self, node):
- if not self.setups:
- raise SyntaxError, "'continue' outside loop (%s, %d)" % \
- (node.filename, node.lineno)
- kind, block = self.setups.top()
- if kind == LOOP:
- self.set_lineno(node)
- self.emit('JUMP_ABSOLUTE', block)
- self.nextBlock()
- elif kind == EXCEPT or kind == TRY_FINALLY:
- self.set_lineno(node)
- # find the block that starts the loop
- top = len(self.setups)
- while top > 0:
- top = top - 1
- kind, loop_block = self.setups[top]
- if kind == LOOP:
- break
- if kind != LOOP:
- raise SyntaxError, "'continue' outside loop (%s, %d)" % \
- (node.filename, node.lineno)
- self.emit('CONTINUE_LOOP', loop_block)
- self.nextBlock()
- elif kind == END_FINALLY:
- msg = "'continue' not allowed inside 'finally' clause (%s, %d)"
- raise SyntaxError, msg % (node.filename, node.lineno)
-
- def visitTest(self, node, jump):
- end = self.newBlock()
- for child in node.nodes[:-1]:
- self.visit(child)
- self.emit(jump, end)
- self.nextBlock()
- self.emit('POP_TOP')
- self.visit(node.nodes[-1])
- self.nextBlock(end)
-
- def visitAnd(self, node):
- self.visitTest(node, 'JUMP_IF_FALSE')
-
- def visitOr(self, node):
- self.visitTest(node, 'JUMP_IF_TRUE')
-
- def visitIfExp(self, node):
- endblock = self.newBlock()
- elseblock = self.newBlock()
- self.visit(node.test)
- self.emit('JUMP_IF_FALSE', elseblock)
- self.emit('POP_TOP')
- self.visit(node.then)
- self.emit('JUMP_FORWARD', endblock)
- self.nextBlock(elseblock)
- self.emit('POP_TOP')
- self.visit(node.else_)
- self.nextBlock(endblock)
-
- def visitCompare(self, node):
- self.visit(node.expr)
- cleanup = self.newBlock()
- for op, code in node.ops[:-1]:
- self.visit(code)
- self.emit('DUP_TOP')
- self.emit('ROT_THREE')
- self.emit('COMPARE_OP', op)
- self.emit('JUMP_IF_FALSE', cleanup)
- self.nextBlock()
- self.emit('POP_TOP')
- # now do the last comparison
- if node.ops:
- op, code = node.ops[-1]
- self.visit(code)
- self.emit('COMPARE_OP', op)
- if len(node.ops) > 1:
- end = self.newBlock()
- self.emit('JUMP_FORWARD', end)
- self.startBlock(cleanup)
- self.emit('ROT_TWO')
- self.emit('POP_TOP')
- self.nextBlock(end)
-
- # list comprehensions
- __list_count = 0
-
- def visitListComp(self, node):
- self.set_lineno(node)
- # setup list
- append = "$append%d" % self.__list_count
- self.__list_count = self.__list_count + 1
- self.emit('BUILD_LIST', 0)
- self.emit('DUP_TOP')
- self.emit('LOAD_ATTR', 'append')
- self._implicitNameOp('STORE', append)
-
- stack = []
- for i, for_ in zip(range(len(node.quals)), node.quals):
- start, anchor = self.visit(for_)
- cont = None
- for if_ in for_.ifs:
- if cont is None:
- cont = self.newBlock()
- self.visit(if_, cont)
- stack.insert(0, (start, cont, anchor))
-
- self._implicitNameOp('LOAD', append)
- self.visit(node.expr)
- self.emit('CALL_FUNCTION', 1)
- self.emit('POP_TOP')
-
- for start, cont, anchor in stack:
- if cont:
- skip_one = self.newBlock()
- self.emit('JUMP_FORWARD', skip_one)
- self.startBlock(cont)
- self.emit('POP_TOP')
- self.nextBlock(skip_one)
- self.emit('JUMP_ABSOLUTE', start)
- self.startBlock(anchor)
- self._implicitNameOp('DELETE', append)
-
- self.__list_count = self.__list_count - 1
-
- def visitListCompFor(self, node):
- start = self.newBlock()
- anchor = self.newBlock()
-
- self.visit(node.list)
- self.emit('GET_ITER')
- self.nextBlock(start)
- self.set_lineno(node, force=True)
- self.emit('FOR_ITER', anchor)
- self.nextBlock()
- self.visit(node.assign)
- return start, anchor
-
- def visitListCompIf(self, node, branch):
- self.set_lineno(node, force=True)
- self.visit(node.test)
- self.emit('JUMP_IF_FALSE', branch)
- self.newBlock()
- self.emit('POP_TOP')
-
- def _makeClosure(self, gen, args):
- frees = gen.scope.get_free_vars()
- if frees:
- for name in frees:
- self.emit('LOAD_CLOSURE', name)
- self.emit('BUILD_TUPLE', len(frees))
- self.emit('LOAD_CONST', gen)
- self.emit('MAKE_CLOSURE', args)
- else:
- self.emit('LOAD_CONST', gen)
- self.emit('MAKE_FUNCTION', args)
-
- def visitGenExpr(self, node):
- gen = GenExprCodeGenerator(node, self.scopes, self.class_name,
- self.get_module())
- walk(node.code, gen)
- gen.finish()
- self.set_lineno(node)
- self._makeClosure(gen, 0)
- # precomputation of outmost iterable
- self.visit(node.code.quals[0].iter)
- self.emit('GET_ITER')
- self.emit('CALL_FUNCTION', 1)
-
- def visitGenExprInner(self, node):
- self.set_lineno(node)
- # setup list
-
- stack = []
- for i, for_ in zip(range(len(node.quals)), node.quals):
- start, anchor, end = self.visit(for_)
- cont = None
- for if_ in for_.ifs:
- if cont is None:
- cont = self.newBlock()
- self.visit(if_, cont)
- stack.insert(0, (start, cont, anchor, end))
-
- self.visit(node.expr)
- self.emit('YIELD_VALUE')
- self.emit('POP_TOP')
-
- for start, cont, anchor, end in stack:
- if cont:
- skip_one = self.newBlock()
- self.emit('JUMP_FORWARD', skip_one)
- self.startBlock(cont)
- self.emit('POP_TOP')
- self.nextBlock(skip_one)
- self.emit('JUMP_ABSOLUTE', start)
- self.startBlock(anchor)
- self.emit('POP_BLOCK')
- self.setups.pop()
- self.startBlock(end)
-
- self.emit('LOAD_CONST', None)
-
- def visitGenExprFor(self, node):
- start = self.newBlock()
- anchor = self.newBlock()
- end = self.newBlock()
-
- self.setups.push((LOOP, start))
- self.emit('SETUP_LOOP', end)
-
- if node.is_outmost:
- self.loadName('.0')
- else:
- self.visit(node.iter)
- self.emit('GET_ITER')
-
- self.nextBlock(start)
- self.set_lineno(node, force=True)
- self.emit('FOR_ITER', anchor)
- self.nextBlock()
- self.visit(node.assign)
- return start, anchor, end
-
- def visitGenExprIf(self, node, branch):
- self.set_lineno(node, force=True)
- self.visit(node.test)
- self.emit('JUMP_IF_FALSE', branch)
- self.newBlock()
- self.emit('POP_TOP')
-
- # exception related
-
- def visitAssert(self, node):
- # XXX would be interesting to implement this via a
- # transformation of the AST before this stage
- if __debug__:
- end = self.newBlock()
- self.set_lineno(node)
- # XXX AssertionError appears to be special case -- it is always
- # loaded as a global even if there is a local name. I guess this
- # is a sort of renaming op.
- self.nextBlock()
- self.visit(node.test)
- self.emit('JUMP_IF_TRUE', end)
- self.nextBlock()
- self.emit('POP_TOP')
- self.emit('LOAD_GLOBAL', 'AssertionError')
- if node.fail:
- self.visit(node.fail)
- self.emit('RAISE_VARARGS', 2)
- else:
- self.emit('RAISE_VARARGS', 1)
- self.nextBlock(end)
- self.emit('POP_TOP')
-
- def visitRaise(self, node):
- self.set_lineno(node)
- n = 0
- if node.expr1:
- self.visit(node.expr1)
- n = n + 1
- if node.expr2:
- self.visit(node.expr2)
- n = n + 1
- if node.expr3:
- self.visit(node.expr3)
- n = n + 1
- self.emit('RAISE_VARARGS', n)
-
- def visitTryExcept(self, node):
- body = self.newBlock()
- handlers = self.newBlock()
- end = self.newBlock()
- if node.else_:
- lElse = self.newBlock()
- else:
- lElse = end
- self.set_lineno(node)
- self.emit('SETUP_EXCEPT', handlers)
- self.nextBlock(body)
- self.setups.push((EXCEPT, body))
- self.visit(node.body)
- self.emit('POP_BLOCK')
- self.setups.pop()
- self.emit('JUMP_FORWARD', lElse)
- self.startBlock(handlers)
-
- last = len(node.handlers) - 1
- for i in range(len(node.handlers)):
- expr, target, body = node.handlers[i]
- self.set_lineno(expr)
- if expr:
- self.emit('DUP_TOP')
- self.visit(expr)
- self.emit('COMPARE_OP', 'exception match')
- next = self.newBlock()
- self.emit('JUMP_IF_FALSE', next)
- self.nextBlock()
- self.emit('POP_TOP')
- self.emit('POP_TOP')
- if target:
- self.visit(target)
- else:
- self.emit('POP_TOP')
- self.emit('POP_TOP')
- self.visit(body)
- self.emit('JUMP_FORWARD', end)
- if expr:
- self.nextBlock(next)
- else:
- self.nextBlock()
- if expr: # XXX
- self.emit('POP_TOP')
- self.emit('END_FINALLY')
- if node.else_:
- self.nextBlock(lElse)
- self.visit(node.else_)
- self.nextBlock(end)
-
- def visitTryFinally(self, node):
- body = self.newBlock()
- final = self.newBlock()
- self.set_lineno(node)
- self.emit('SETUP_FINALLY', final)
- self.nextBlock(body)
- self.setups.push((TRY_FINALLY, body))
- self.visit(node.body)
- self.emit('POP_BLOCK')
- self.setups.pop()
- self.emit('LOAD_CONST', None)
- self.nextBlock(final)
- self.setups.push((END_FINALLY, final))
- self.visit(node.final)
- self.emit('END_FINALLY')
- self.setups.pop()
-
- __with_count = 0
-
- def visitWith(self, node):
- body = self.newBlock()
- final = self.newBlock()
- exitvar = "$exit%d" % self.__with_count
- valuevar = "$value%d" % self.__with_count
- self.__with_count += 1
- self.set_lineno(node)
- self.visit(node.expr)
- self.emit('DUP_TOP')
- self.emit('LOAD_ATTR', '__exit__')
- self._implicitNameOp('STORE', exitvar)
- self.emit('LOAD_ATTR', '__enter__')
- self.emit('CALL_FUNCTION', 0)
- if node.vars is None:
- self.emit('POP_TOP')
- else:
- self._implicitNameOp('STORE', valuevar)
- self.emit('SETUP_FINALLY', final)
- self.nextBlock(body)
- self.setups.push((TRY_FINALLY, body))
- if node.vars is not None:
- self._implicitNameOp('LOAD', valuevar)
- self._implicitNameOp('DELETE', valuevar)
- self.visit(node.vars)
- self.visit(node.body)
- self.emit('POP_BLOCK')
- self.setups.pop()
- self.emit('LOAD_CONST', None)
- self.nextBlock(final)
- self.setups.push((END_FINALLY, final))
- self._implicitNameOp('LOAD', exitvar)
- self._implicitNameOp('DELETE', exitvar)
- self.emit('WITH_CLEANUP')
- self.emit('END_FINALLY')
- self.setups.pop()
- self.__with_count -= 1
-
- # misc
-
- def visitDiscard(self, node):
- self.set_lineno(node)
- self.visit(node.expr)
- self.emit('POP_TOP')
-
- def visitConst(self, node):
- self.emit('LOAD_CONST', node.value)
-
- def visitKeyword(self, node):
- self.emit('LOAD_CONST', node.name)
- self.visit(node.expr)
-
- def visitGlobal(self, node):
- # no code to generate
- pass
-
- def visitName(self, node):
- self.set_lineno(node)
- self.loadName(node.name)
-
- def visitPass(self, node):
- self.set_lineno(node)
-
- def visitImport(self, node):
- self.set_lineno(node)
- level = 0 if self.graph.checkFlag(CO_FUTURE_ABSIMPORT) else -1
- for name, alias in node.names:
- if VERSION > 1:
- self.emit('LOAD_CONST', level)
- self.emit('LOAD_CONST', None)
- self.emit('IMPORT_NAME', name)
- mod = name.split(".")[0]
- if alias:
- self._resolveDots(name)
- self.storeName(alias)
- else:
- self.storeName(mod)
-
- def visitFrom(self, node):
- self.set_lineno(node)
- level = node.level
- if level == 0 and not self.graph.checkFlag(CO_FUTURE_ABSIMPORT):
- level = -1
- fromlist = map(lambda (name, alias): name, node.names)
- if VERSION > 1:
- self.emit('LOAD_CONST', level)
- self.emit('LOAD_CONST', tuple(fromlist))
- self.emit('IMPORT_NAME', node.modname)
- for name, alias in node.names:
- if VERSION > 1:
- if name == '*':
- self.namespace = 0
- self.emit('IMPORT_STAR')
- # There can only be one name w/ from ... import *
- assert len(node.names) == 1
- return
- else:
- self.emit('IMPORT_FROM', name)
- self._resolveDots(name)
- self.storeName(alias or name)
- else:
- self.emit('IMPORT_FROM', name)
- self.emit('POP_TOP')
-
- def _resolveDots(self, name):
- elts = name.split(".")
- if len(elts) == 1:
- return
- for elt in elts[1:]:
- self.emit('LOAD_ATTR', elt)
-
- def visitGetattr(self, node):
- self.visit(node.expr)
- self.emit('LOAD_ATTR', self.mangle(node.attrname))
-
- # next five implement assignments
-
- def visitAssign(self, node):
- self.set_lineno(node)
- self.visit(node.expr)
- dups = len(node.nodes) - 1
- for i in range(len(node.nodes)):
- elt = node.nodes[i]
- if i < dups:
- self.emit('DUP_TOP')
- if isinstance(elt, ast.Node):
- self.visit(elt)
-
- def visitAssName(self, node):
- if node.flags == 'OP_ASSIGN':
- self.storeName(node.name)
- elif node.flags == 'OP_DELETE':
- self.set_lineno(node)
- self.delName(node.name)
- else:
- print "oops", node.flags
-
- def visitAssAttr(self, node):
- self.visit(node.expr)
- if node.flags == 'OP_ASSIGN':
- self.emit('STORE_ATTR', self.mangle(node.attrname))
- elif node.flags == 'OP_DELETE':
- self.emit('DELETE_ATTR', self.mangle(node.attrname))
- else:
- print "warning: unexpected flags:", node.flags
- print node
-
- def _visitAssSequence(self, node, op='UNPACK_SEQUENCE'):
- if findOp(node) != 'OP_DELETE':
- self.emit(op, len(node.nodes))
- for child in node.nodes:
- self.visit(child)
-
- if VERSION > 1:
- visitAssTuple = _visitAssSequence
- visitAssList = _visitAssSequence
- else:
- def visitAssTuple(self, node):
- self._visitAssSequence(node, 'UNPACK_TUPLE')
-
- def visitAssList(self, node):
- self._visitAssSequence(node, 'UNPACK_LIST')
-
- # augmented assignment
-
- def visitAugAssign(self, node):
- self.set_lineno(node)
- aug_node = wrap_aug(node.node)
- self.visit(aug_node, "load")
- self.visit(node.expr)
- self.emit(self._augmented_opcode[node.op])
- self.visit(aug_node, "store")
-
- _augmented_opcode = {
- '+=' : 'INPLACE_ADD',
- '-=' : 'INPLACE_SUBTRACT',
- '*=' : 'INPLACE_MULTIPLY',
- '/=' : 'INPLACE_DIVIDE',
- '//=': 'INPLACE_FLOOR_DIVIDE',
- '%=' : 'INPLACE_MODULO',
- '**=': 'INPLACE_POWER',
- '>>=': 'INPLACE_RSHIFT',
- '<<=': 'INPLACE_LSHIFT',
- '&=' : 'INPLACE_AND',
- '^=' : 'INPLACE_XOR',
- '|=' : 'INPLACE_OR',
- }
-
- def visitAugName(self, node, mode):
- if mode == "load":
- self.loadName(node.name)
- elif mode == "store":
- self.storeName(node.name)
-
- def visitAugGetattr(self, node, mode):
- if mode == "load":
- self.visit(node.expr)
- self.emit('DUP_TOP')
- self.emit('LOAD_ATTR', self.mangle(node.attrname))
- elif mode == "store":
- self.emit('ROT_TWO')
- self.emit('STORE_ATTR', self.mangle(node.attrname))
-
- def visitAugSlice(self, node, mode):
- if mode == "load":
- self.visitSlice(node, 1)
- elif mode == "store":
- slice = 0
- if node.lower:
- slice = slice | 1
- if node.upper:
- slice = slice | 2
- if slice == 0:
- self.emit('ROT_TWO')
- elif slice == 3:
- self.emit('ROT_FOUR')
- else:
- self.emit('ROT_THREE')
- self.emit('STORE_SLICE+%d' % slice)
-
- def visitAugSubscript(self, node, mode):
- if mode == "load":
- self.visitSubscript(node, 1)
- elif mode == "store":
- self.emit('ROT_THREE')
- self.emit('STORE_SUBSCR')
-
- def visitExec(self, node):
- self.visit(node.expr)
- if node.locals is None:
- self.emit('LOAD_CONST', None)
- else:
- self.visit(node.locals)
- if node.globals is None:
- self.emit('DUP_TOP')
- else:
- self.visit(node.globals)
- self.emit('EXEC_STMT')
-
- def visitCallFunc(self, node):
- pos = 0
- kw = 0
- self.set_lineno(node)
- self.visit(node.node)
- for arg in node.args:
- self.visit(arg)
- if isinstance(arg, ast.Keyword):
- kw = kw + 1
- else:
- pos = pos + 1
- if node.star_args is not None:
- self.visit(node.star_args)
- if node.dstar_args is not None:
- self.visit(node.dstar_args)
- have_star = node.star_args is not None
- have_dstar = node.dstar_args is not None
- opcode = callfunc_opcode_info[have_star, have_dstar]
- self.emit(opcode, kw << 8 | pos)
-
- def visitPrint(self, node, newline=0):
- self.set_lineno(node)
- if node.dest:
- self.visit(node.dest)
- for child in node.nodes:
- if node.dest:
- self.emit('DUP_TOP')
- self.visit(child)
- if node.dest:
- self.emit('ROT_TWO')
- self.emit('PRINT_ITEM_TO')
- else:
- self.emit('PRINT_ITEM')
- if node.dest and not newline:
- self.emit('POP_TOP')
-
- def visitPrintnl(self, node):
- self.visitPrint(node, newline=1)
- if node.dest:
- self.emit('PRINT_NEWLINE_TO')
- else:
- self.emit('PRINT_NEWLINE')
-
- def visitReturn(self, node):
- self.set_lineno(node)
- self.visit(node.value)
- self.emit('RETURN_VALUE')
-
- def visitYield(self, node):
- self.set_lineno(node)
- self.visit(node.value)
- self.emit('YIELD_VALUE')
-
- # slice and subscript stuff
-
- def visitSlice(self, node, aug_flag=None):
- # aug_flag is used by visitAugSlice
- self.visit(node.expr)
- slice = 0
- if node.lower:
- self.visit(node.lower)
- slice = slice | 1
- if node.upper:
- self.visit(node.upper)
- slice = slice | 2
- if aug_flag:
- if slice == 0:
- self.emit('DUP_TOP')
- elif slice == 3:
- self.emit('DUP_TOPX', 3)
- else:
- self.emit('DUP_TOPX', 2)
- if node.flags == 'OP_APPLY':
- self.emit('SLICE+%d' % slice)
- elif node.flags == 'OP_ASSIGN':
- self.emit('STORE_SLICE+%d' % slice)
- elif node.flags == 'OP_DELETE':
- self.emit('DELETE_SLICE+%d' % slice)
- else:
- print "weird slice", node.flags
- raise
-
- def visitSubscript(self, node, aug_flag=None):
- self.visit(node.expr)
- for sub in node.subs:
- self.visit(sub)
- if len(node.subs) > 1:
- self.emit('BUILD_TUPLE', len(node.subs))
- if aug_flag:
- self.emit('DUP_TOPX', 2)
- if node.flags == 'OP_APPLY':
- self.emit('BINARY_SUBSCR')
- elif node.flags == 'OP_ASSIGN':
- self.emit('STORE_SUBSCR')
- elif node.flags == 'OP_DELETE':
- self.emit('DELETE_SUBSCR')
-
- # binary ops
-
- def binaryOp(self, node, op):
- self.visit(node.left)
- self.visit(node.right)
- self.emit(op)
-
- def visitAdd(self, node):
- return self.binaryOp(node, 'BINARY_ADD')
-
- def visitSub(self, node):
- return self.binaryOp(node, 'BINARY_SUBTRACT')
-
- def visitMul(self, node):
- return self.binaryOp(node, 'BINARY_MULTIPLY')
-
- def visitDiv(self, node):
- return self.binaryOp(node, self._div_op)
-
- def visitFloorDiv(self, node):
- return self.binaryOp(node, 'BINARY_FLOOR_DIVIDE')
-
- def visitMod(self, node):
- return self.binaryOp(node, 'BINARY_MODULO')
-
- def visitPower(self, node):
- return self.binaryOp(node, 'BINARY_POWER')
-
- def visitLeftShift(self, node):
- return self.binaryOp(node, 'BINARY_LSHIFT')
-
- def visitRightShift(self, node):
- return self.binaryOp(node, 'BINARY_RSHIFT')
-
- # unary ops
-
- def unaryOp(self, node, op):
- self.visit(node.expr)
- self.emit(op)
-
- def visitInvert(self, node):
- return self.unaryOp(node, 'UNARY_INVERT')
-
- def visitUnarySub(self, node):
- return self.unaryOp(node, 'UNARY_NEGATIVE')
-
- def visitUnaryAdd(self, node):
- return self.unaryOp(node, 'UNARY_POSITIVE')
-
- def visitUnaryInvert(self, node):
- return self.unaryOp(node, 'UNARY_INVERT')
-
- def visitNot(self, node):
- return self.unaryOp(node, 'UNARY_NOT')
-
- def visitBackquote(self, node):
- return self.unaryOp(node, 'UNARY_CONVERT')
-
- # bit ops
-
- def bitOp(self, nodes, op):
- self.visit(nodes[0])
- for node in nodes[1:]:
- self.visit(node)
- self.emit(op)
-
- def visitBitand(self, node):
- return self.bitOp(node.nodes, 'BINARY_AND')
-
- def visitBitor(self, node):
- return self.bitOp(node.nodes, 'BINARY_OR')
-
- def visitBitxor(self, node):
- return self.bitOp(node.nodes, 'BINARY_XOR')
-
- # object constructors
-
- def visitEllipsis(self, node):
- self.emit('LOAD_CONST', Ellipsis)
-
- def visitTuple(self, node):
- self.set_lineno(node)
- for elt in node.nodes:
- self.visit(elt)
- self.emit('BUILD_TUPLE', len(node.nodes))
-
- def visitList(self, node):
- self.set_lineno(node)
- for elt in node.nodes:
- self.visit(elt)
- self.emit('BUILD_LIST', len(node.nodes))
-
- def visitSliceobj(self, node):
- for child in node.nodes:
- self.visit(child)
- self.emit('BUILD_SLICE', len(node.nodes))
-
- def visitDict(self, node):
- self.set_lineno(node)
- self.emit('BUILD_MAP', 0)
- for k, v in node.items:
- self.emit('DUP_TOP')
- self.visit(k)
- self.visit(v)
- self.emit('ROT_THREE')
- self.emit('STORE_SUBSCR')
-
-class NestedScopeMixin:
- """Defines initClass() for nested scoping (Python 2.2-compatible)"""
- def initClass(self):
- self.__class__.NameFinder = LocalNameFinder
- self.__class__.FunctionGen = FunctionCodeGenerator
- self.__class__.ClassGen = ClassCodeGenerator
-
-class ModuleCodeGenerator(NestedScopeMixin, CodeGenerator):
- __super_init = CodeGenerator.__init__
-
- scopes = None
-
- def __init__(self, tree):
- self.graph = pyassem.PyFlowGraph("<module>", tree.filename)
- self.futures = future.find_futures(tree)
- self.__super_init()
- walk(tree, self)
-
- def get_module(self):
- return self
-
-class ExpressionCodeGenerator(NestedScopeMixin, CodeGenerator):
- __super_init = CodeGenerator.__init__
-
- scopes = None
- futures = ()
-
- def __init__(self, tree):
- self.graph = pyassem.PyFlowGraph("<expression>", tree.filename)
- self.__super_init()
- walk(tree, self)
-
- def get_module(self):
- return self
-
-class InteractiveCodeGenerator(NestedScopeMixin, CodeGenerator):
-
- __super_init = CodeGenerator.__init__
-
- scopes = None
- futures = ()
-
- def __init__(self, tree):
- self.graph = pyassem.PyFlowGraph("<interactive>", tree.filename)
- self.__super_init()
- self.set_lineno(tree)
- walk(tree, self)
- self.emit('RETURN_VALUE')
-
- def get_module(self):
- return self
-
- def visitDiscard(self, node):
- # XXX Discard means it's an expression. Perhaps this is a bad
- # name.
- self.visit(node.expr)
- self.emit('PRINT_EXPR')
-
-class AbstractFunctionCode:
- optimized = 1
- lambdaCount = 0
-
- def __init__(self, func, scopes, isLambda, class_name, mod):
- self.class_name = class_name
- self.module = mod
- if isLambda:
- klass = FunctionCodeGenerator
- name = "<lambda.%d>" % klass.lambdaCount
- klass.lambdaCount = klass.lambdaCount + 1
- else:
- name = func.name
-
- args, hasTupleArg = generateArgList(func.argnames)
- self.graph = pyassem.PyFlowGraph(name, func.filename, args,
- optimized=1)
- self.isLambda = isLambda
- self.super_init()
-
- if not isLambda and func.doc:
- self.setDocstring(func.doc)
-
- lnf = walk(func.code, self.NameFinder(args), verbose=0)
- self.locals.push(lnf.getLocals())
- if func.varargs:
- self.graph.setFlag(CO_VARARGS)
- if func.kwargs:
- self.graph.setFlag(CO_VARKEYWORDS)
- self.set_lineno(func)
- if hasTupleArg:
- self.generateArgUnpack(func.argnames)
-
- def get_module(self):
- return self.module
-
- def finish(self):
- self.graph.startExitBlock()
- if not self.isLambda:
- self.emit('LOAD_CONST', None)
- self.emit('RETURN_VALUE')
-
- def generateArgUnpack(self, args):
- for i in range(len(args)):
- arg = args[i]
- if isinstance(arg, tuple):
- self.emit('LOAD_FAST', '.%d' % (i * 2))
- self.unpackSequence(arg)
-
- def unpackSequence(self, tup):
- if VERSION > 1:
- self.emit('UNPACK_SEQUENCE', len(tup))
- else:
- self.emit('UNPACK_TUPLE', len(tup))
- for elt in tup:
- if isinstance(elt, tuple):
- self.unpackSequence(elt)
- else:
- self._nameOp('STORE', elt)
-
- unpackTuple = unpackSequence
-
-class FunctionCodeGenerator(NestedScopeMixin, AbstractFunctionCode,
- CodeGenerator):
- super_init = CodeGenerator.__init__ # call be other init
- scopes = None
-
- __super_init = AbstractFunctionCode.__init__
-
- def __init__(self, func, scopes, isLambda, class_name, mod):
- self.scopes = scopes
- self.scope = scopes[func]
- self.__super_init(func, scopes, isLambda, class_name, mod)
- self.graph.setFreeVars(self.scope.get_free_vars())
- self.graph.setCellVars(self.scope.get_cell_vars())
- if self.scope.generator is not None:
- self.graph.setFlag(CO_GENERATOR)
-
-class GenExprCodeGenerator(NestedScopeMixin, AbstractFunctionCode,
- CodeGenerator):
- super_init = CodeGenerator.__init__ # call be other init
- scopes = None
-
- __super_init = AbstractFunctionCode.__init__
-
- def __init__(self, gexp, scopes, class_name, mod):
- self.scopes = scopes
- self.scope = scopes[gexp]
- self.__super_init(gexp, scopes, 1, class_name, mod)
- self.graph.setFreeVars(self.scope.get_free_vars())
- self.graph.setCellVars(self.scope.get_cell_vars())
- self.graph.setFlag(CO_GENERATOR)
-
-class AbstractClassCode:
-
- def __init__(self, klass, scopes, module):
- self.class_name = klass.name
- self.module = module
- self.graph = pyassem.PyFlowGraph(klass.name, klass.filename,
- optimized=0, klass=1)
- self.super_init()
- lnf = walk(klass.code, self.NameFinder(), verbose=0)
- self.locals.push(lnf.getLocals())
- self.graph.setFlag(CO_NEWLOCALS)
- if klass.doc:
- self.setDocstring(klass.doc)
-
- def get_module(self):
- return self.module
-
- def finish(self):
- self.graph.startExitBlock()
- self.emit('LOAD_LOCALS')
- self.emit('RETURN_VALUE')
-
-class ClassCodeGenerator(NestedScopeMixin, AbstractClassCode, CodeGenerator):
- super_init = CodeGenerator.__init__
- scopes = None
-
- __super_init = AbstractClassCode.__init__
-
- def __init__(self, klass, scopes, module):
- self.scopes = scopes
- self.scope = scopes[klass]
- self.__super_init(klass, scopes, module)
- self.graph.setFreeVars(self.scope.get_free_vars())
- self.graph.setCellVars(self.scope.get_cell_vars())
- self.set_lineno(klass)
- self.emit("LOAD_GLOBAL", "__name__")
- self.storeName("__module__")
- if klass.doc:
- self.emit("LOAD_CONST", klass.doc)
- self.storeName('__doc__')
-
-def generateArgList(arglist):
- """Generate an arg list marking TupleArgs"""
- args = []
- extra = []
- count = 0
- for i in range(len(arglist)):
- elt = arglist[i]
- if isinstance(elt, str):
- args.append(elt)
- elif isinstance(elt, tuple):
- args.append(TupleArg(i * 2, elt))
- extra.extend(misc.flatten(elt))
- count = count + 1
- else:
- raise ValueError, "unexpect argument type:", elt
- return args + extra, count
-
-def findOp(node):
- """Find the op (DELETE, LOAD, STORE) in an AssTuple tree"""
- v = OpFinder()
- walk(node, v, verbose=0)
- return v.op
-
-class OpFinder:
- def __init__(self):
- self.op = None
- def visitAssName(self, node):
- if self.op is None:
- self.op = node.flags
- elif self.op != node.flags:
- raise ValueError, "mixed ops in stmt"
- visitAssAttr = visitAssName
- visitSubscript = visitAssName
-
-class Delegator:
- """Base class to support delegation for augmented assignment nodes
-
- To generator code for augmented assignments, we use the following
- wrapper classes. In visitAugAssign, the left-hand expression node
- is visited twice. The first time the visit uses the normal method
- for that node . The second time the visit uses a different method
- that generates the appropriate code to perform the assignment.
- These delegator classes wrap the original AST nodes in order to
- support the variant visit methods.
- """
- def __init__(self, obj):
- self.obj = obj
-
- def __getattr__(self, attr):
- return getattr(self.obj, attr)
-
-class AugGetattr(Delegator):
- pass
-
-class AugName(Delegator):
- pass
-
-class AugSlice(Delegator):
- pass
-
-class AugSubscript(Delegator):
- pass
-
-wrapper = {
- ast.Getattr: AugGetattr,
- ast.Name: AugName,
- ast.Slice: AugSlice,
- ast.Subscript: AugSubscript,
- }
-
-def wrap_aug(node):
- return wrapper[node.__class__](node)
-
-if __name__ == "__main__":
- for file in sys.argv[1:]:
- compileFile(file)
diff --git a/sys/lib/python/compiler/symbols.py b/sys/lib/python/compiler/symbols.py
deleted file mode 100644
index 8f6298060..000000000
--- a/sys/lib/python/compiler/symbols.py
+++ /dev/null
@@ -1,463 +0,0 @@
-"""Module symbol-table generator"""
-
-from compiler import ast
-from compiler.consts import SC_LOCAL, SC_GLOBAL, SC_FREE, SC_CELL, SC_UNKNOWN
-from compiler.misc import mangle
-import types
-
-
-import sys
-
-MANGLE_LEN = 256
-
-class Scope:
- # XXX how much information do I need about each name?
- def __init__(self, name, module, klass=None):
- self.name = name
- self.module = module
- self.defs = {}
- self.uses = {}
- self.globals = {}
- self.params = {}
- self.frees = {}
- self.cells = {}
- self.children = []
- # nested is true if the class could contain free variables,
- # i.e. if it is nested within another function.
- self.nested = None
- self.generator = None
- self.klass = None
- if klass is not None:
- for i in range(len(klass)):
- if klass[i] != '_':
- self.klass = klass[i:]
- break
-
- def __repr__(self):
- return "<%s: %s>" % (self.__class__.__name__, self.name)
-
- def mangle(self, name):
- if self.klass is None:
- return name
- return mangle(name, self.klass)
-
- def add_def(self, name):
- self.defs[self.mangle(name)] = 1
-
- def add_use(self, name):
- self.uses[self.mangle(name)] = 1
-
- def add_global(self, name):
- name = self.mangle(name)
- if self.uses.has_key(name) or self.defs.has_key(name):
- pass # XXX warn about global following def/use
- if self.params.has_key(name):
- raise SyntaxError, "%s in %s is global and parameter" % \
- (name, self.name)
- self.globals[name] = 1
- self.module.add_def(name)
-
- def add_param(self, name):
- name = self.mangle(name)
- self.defs[name] = 1
- self.params[name] = 1
-
- def get_names(self):
- d = {}
- d.update(self.defs)
- d.update(self.uses)
- d.update(self.globals)
- return d.keys()
-
- def add_child(self, child):
- self.children.append(child)
-
- def get_children(self):
- return self.children
-
- def DEBUG(self):
- print >> sys.stderr, self.name, self.nested and "nested" or ""
- print >> sys.stderr, "\tglobals: ", self.globals
- print >> sys.stderr, "\tcells: ", self.cells
- print >> sys.stderr, "\tdefs: ", self.defs
- print >> sys.stderr, "\tuses: ", self.uses
- print >> sys.stderr, "\tfrees:", self.frees
-
- def check_name(self, name):
- """Return scope of name.
-
- The scope of a name could be LOCAL, GLOBAL, FREE, or CELL.
- """
- if self.globals.has_key(name):
- return SC_GLOBAL
- if self.cells.has_key(name):
- return SC_CELL
- if self.defs.has_key(name):
- return SC_LOCAL
- if self.nested and (self.frees.has_key(name) or
- self.uses.has_key(name)):
- return SC_FREE
- if self.nested:
- return SC_UNKNOWN
- else:
- return SC_GLOBAL
-
- def get_free_vars(self):
- if not self.nested:
- return ()
- free = {}
- free.update(self.frees)
- for name in self.uses.keys():
- if not (self.defs.has_key(name) or
- self.globals.has_key(name)):
- free[name] = 1
- return free.keys()
-
- def handle_children(self):
- for child in self.children:
- frees = child.get_free_vars()
- globals = self.add_frees(frees)
- for name in globals:
- child.force_global(name)
-
- def force_global(self, name):
- """Force name to be global in scope.
-
- Some child of the current node had a free reference to name.
- When the child was processed, it was labelled a free
- variable. Now that all its enclosing scope have been
- processed, the name is known to be a global or builtin. So
- walk back down the child chain and set the name to be global
- rather than free.
-
- Be careful to stop if a child does not think the name is
- free.
- """
- self.globals[name] = 1
- if self.frees.has_key(name):
- del self.frees[name]
- for child in self.children:
- if child.check_name(name) == SC_FREE:
- child.force_global(name)
-
- def add_frees(self, names):
- """Process list of free vars from nested scope.
-
- Returns a list of names that are either 1) declared global in the
- parent or 2) undefined in a top-level parent. In either case,
- the nested scope should treat them as globals.
- """
- child_globals = []
- for name in names:
- sc = self.check_name(name)
- if self.nested:
- if sc == SC_UNKNOWN or sc == SC_FREE \
- or isinstance(self, ClassScope):
- self.frees[name] = 1
- elif sc == SC_GLOBAL:
- child_globals.append(name)
- elif isinstance(self, FunctionScope) and sc == SC_LOCAL:
- self.cells[name] = 1
- elif sc != SC_CELL:
- child_globals.append(name)
- else:
- if sc == SC_LOCAL:
- self.cells[name] = 1
- elif sc != SC_CELL:
- child_globals.append(name)
- return child_globals
-
- def get_cell_vars(self):
- return self.cells.keys()
-
-class ModuleScope(Scope):
- __super_init = Scope.__init__
-
- def __init__(self):
- self.__super_init("global", self)
-
-class FunctionScope(Scope):
- pass
-
-class GenExprScope(Scope):
- __super_init = Scope.__init__
-
- __counter = 1
-
- def __init__(self, module, klass=None):
- i = self.__counter
- self.__counter += 1
- self.__super_init("generator expression<%d>"%i, module, klass)
- self.add_param('.0')
-
- def get_names(self):
- keys = Scope.get_names(self)
- return keys
-
-class LambdaScope(FunctionScope):
- __super_init = Scope.__init__
-
- __counter = 1
-
- def __init__(self, module, klass=None):
- i = self.__counter
- self.__counter += 1
- self.__super_init("lambda.%d" % i, module, klass)
-
-class ClassScope(Scope):
- __super_init = Scope.__init__
-
- def __init__(self, name, module):
- self.__super_init(name, module, name)
-
-class SymbolVisitor:
- def __init__(self):
- self.scopes = {}
- self.klass = None
-
- # node that define new scopes
-
- def visitModule(self, node):
- scope = self.module = self.scopes[node] = ModuleScope()
- self.visit(node.node, scope)
-
- visitExpression = visitModule
-
- def visitFunction(self, node, parent):
- if node.decorators:
- self.visit(node.decorators, parent)
- parent.add_def(node.name)
- for n in node.defaults:
- self.visit(n, parent)
- scope = FunctionScope(node.name, self.module, self.klass)
- if parent.nested or isinstance(parent, FunctionScope):
- scope.nested = 1
- self.scopes[node] = scope
- self._do_args(scope, node.argnames)
- self.visit(node.code, scope)
- self.handle_free_vars(scope, parent)
-
- def visitGenExpr(self, node, parent):
- scope = GenExprScope(self.module, self.klass);
- if parent.nested or isinstance(parent, FunctionScope) \
- or isinstance(parent, GenExprScope):
- scope.nested = 1
-
- self.scopes[node] = scope
- self.visit(node.code, scope)
-
- self.handle_free_vars(scope, parent)
-
- def visitGenExprInner(self, node, scope):
- for genfor in node.quals:
- self.visit(genfor, scope)
-
- self.visit(node.expr, scope)
-
- def visitGenExprFor(self, node, scope):
- self.visit(node.assign, scope, 1)
- self.visit(node.iter, scope)
- for if_ in node.ifs:
- self.visit(if_, scope)
-
- def visitGenExprIf(self, node, scope):
- self.visit(node.test, scope)
-
- def visitLambda(self, node, parent, assign=0):
- # Lambda is an expression, so it could appear in an expression
- # context where assign is passed. The transformer should catch
- # any code that has a lambda on the left-hand side.
- assert not assign
-
- for n in node.defaults:
- self.visit(n, parent)
- scope = LambdaScope(self.module, self.klass)
- if parent.nested or isinstance(parent, FunctionScope):
- scope.nested = 1
- self.scopes[node] = scope
- self._do_args(scope, node.argnames)
- self.visit(node.code, scope)
- self.handle_free_vars(scope, parent)
-
- def _do_args(self, scope, args):
- for name in args:
- if type(name) == types.TupleType:
- self._do_args(scope, name)
- else:
- scope.add_param(name)
-
- def handle_free_vars(self, scope, parent):
- parent.add_child(scope)
- scope.handle_children()
-
- def visitClass(self, node, parent):
- parent.add_def(node.name)
- for n in node.bases:
- self.visit(n, parent)
- scope = ClassScope(node.name, self.module)
- if parent.nested or isinstance(parent, FunctionScope):
- scope.nested = 1
- if node.doc is not None:
- scope.add_def('__doc__')
- scope.add_def('__module__')
- self.scopes[node] = scope
- prev = self.klass
- self.klass = node.name
- self.visit(node.code, scope)
- self.klass = prev
- self.handle_free_vars(scope, parent)
-
- # name can be a def or a use
-
- # XXX a few calls and nodes expect a third "assign" arg that is
- # true if the name is being used as an assignment. only
- # expressions contained within statements may have the assign arg.
-
- def visitName(self, node, scope, assign=0):
- if assign:
- scope.add_def(node.name)
- else:
- scope.add_use(node.name)
-
- # operations that bind new names
-
- def visitFor(self, node, scope):
- self.visit(node.assign, scope, 1)
- self.visit(node.list, scope)
- self.visit(node.body, scope)
- if node.else_:
- self.visit(node.else_, scope)
-
- def visitFrom(self, node, scope):
- for name, asname in node.names:
- if name == "*":
- continue
- scope.add_def(asname or name)
-
- def visitImport(self, node, scope):
- for name, asname in node.names:
- i = name.find(".")
- if i > -1:
- name = name[:i]
- scope.add_def(asname or name)
-
- def visitGlobal(self, node, scope):
- for name in node.names:
- scope.add_global(name)
-
- def visitAssign(self, node, scope):
- """Propagate assignment flag down to child nodes.
-
- The Assign node doesn't itself contains the variables being
- assigned to. Instead, the children in node.nodes are visited
- with the assign flag set to true. When the names occur in
- those nodes, they are marked as defs.
-
- Some names that occur in an assignment target are not bound by
- the assignment, e.g. a name occurring inside a slice. The
- visitor handles these nodes specially; they do not propagate
- the assign flag to their children.
- """
- for n in node.nodes:
- self.visit(n, scope, 1)
- self.visit(node.expr, scope)
-
- def visitAssName(self, node, scope, assign=1):
- scope.add_def(node.name)
-
- def visitAssAttr(self, node, scope, assign=0):
- self.visit(node.expr, scope, 0)
-
- def visitSubscript(self, node, scope, assign=0):
- self.visit(node.expr, scope, 0)
- for n in node.subs:
- self.visit(n, scope, 0)
-
- def visitSlice(self, node, scope, assign=0):
- self.visit(node.expr, scope, 0)
- if node.lower:
- self.visit(node.lower, scope, 0)
- if node.upper:
- self.visit(node.upper, scope, 0)
-
- def visitAugAssign(self, node, scope):
- # If the LHS is a name, then this counts as assignment.
- # Otherwise, it's just use.
- self.visit(node.node, scope)
- if isinstance(node.node, ast.Name):
- self.visit(node.node, scope, 1) # XXX worry about this
- self.visit(node.expr, scope)
-
- # prune if statements if tests are false
-
- _const_types = types.StringType, types.IntType, types.FloatType
-
- def visitIf(self, node, scope):
- for test, body in node.tests:
- if isinstance(test, ast.Const):
- if type(test.value) in self._const_types:
- if not test.value:
- continue
- self.visit(test, scope)
- self.visit(body, scope)
- if node.else_:
- self.visit(node.else_, scope)
-
- # a yield statement signals a generator
-
- def visitYield(self, node, scope):
- scope.generator = 1
- self.visit(node.value, scope)
-
-def list_eq(l1, l2):
- return sorted(l1) == sorted(l2)
-
-if __name__ == "__main__":
- import sys
- from compiler import parseFile, walk
- import symtable
-
- def get_names(syms):
- return [s for s in [s.get_name() for s in syms.get_symbols()]
- if not (s.startswith('_[') or s.startswith('.'))]
-
- for file in sys.argv[1:]:
- print file
- f = open(file)
- buf = f.read()
- f.close()
- syms = symtable.symtable(buf, file, "exec")
- mod_names = get_names(syms)
- tree = parseFile(file)
- s = SymbolVisitor()
- walk(tree, s)
-
- # compare module-level symbols
- names2 = s.scopes[tree].get_names()
-
- if not list_eq(mod_names, names2):
- print
- print "oops", file
- print sorted(mod_names)
- print sorted(names2)
- sys.exit(-1)
-
- d = {}
- d.update(s.scopes)
- del d[tree]
- scopes = d.values()
- del d
-
- for s in syms.get_symbols():
- if s.is_namespace():
- l = [sc for sc in scopes
- if sc.name == s.get_name()]
- if len(l) > 1:
- print "skipping", s.get_name()
- else:
- if not list_eq(get_names(s.get_namespace()),
- l[0].get_names()):
- print s.get_name()
- print sorted(get_names(s.get_namespace()))
- print sorted(l[0].get_names())
- sys.exit(-1)
diff --git a/sys/lib/python/compiler/syntax.py b/sys/lib/python/compiler/syntax.py
deleted file mode 100644
index a45d9c2cf..000000000
--- a/sys/lib/python/compiler/syntax.py
+++ /dev/null
@@ -1,46 +0,0 @@
-"""Check for errs in the AST.
-
-The Python parser does not catch all syntax errors. Others, like
-assignments with invalid targets, are caught in the code generation
-phase.
-
-The compiler package catches some errors in the transformer module.
-But it seems clearer to write checkers that use the AST to detect
-errors.
-"""
-
-from compiler import ast, walk
-
-def check(tree, multi=None):
- v = SyntaxErrorChecker(multi)
- walk(tree, v)
- return v.errors
-
-class SyntaxErrorChecker:
- """A visitor to find syntax errors in the AST."""
-
- def __init__(self, multi=None):
- """Create new visitor object.
-
- If optional argument multi is not None, then print messages
- for each error rather than raising a SyntaxError for the
- first.
- """
- self.multi = multi
- self.errors = 0
-
- def error(self, node, msg):
- self.errors = self.errors + 1
- if self.multi is not None:
- print "%s:%s: %s" % (node.filename, node.lineno, msg)
- else:
- raise SyntaxError, "%s (%s:%s)" % (msg, node.filename, node.lineno)
-
- def visitAssign(self, node):
- # the transformer module handles many of these
- pass
-## for target in node.nodes:
-## if isinstance(target, ast.AssList):
-## if target.lineno is None:
-## target.lineno = node.lineno
-## self.error(target, "can't assign to list comprehension")
diff --git a/sys/lib/python/compiler/transformer.py b/sys/lib/python/compiler/transformer.py
deleted file mode 100644
index ac23ad18a..000000000
--- a/sys/lib/python/compiler/transformer.py
+++ /dev/null
@@ -1,1490 +0,0 @@
-"""Parse tree transformation module.
-
-Transforms Python source code into an abstract syntax tree (AST)
-defined in the ast module.
-
-The simplest ways to invoke this module are via parse and parseFile.
-parse(buf) -> AST
-parseFile(path) -> AST
-"""
-
-# Original version written by Greg Stein (gstein@lyra.org)
-# and Bill Tutt (rassilon@lima.mudlib.org)
-# February 1997.
-#
-# Modifications and improvements for Python 2.0 by Jeremy Hylton and
-# Mark Hammond
-#
-# Some fixes to try to have correct line number on almost all nodes
-# (except Module, Discard and Stmt) added by Sylvain Thenault
-#
-# Portions of this file are:
-# Copyright (C) 1997-1998 Greg Stein. All Rights Reserved.
-#
-# This module is provided under a BSD-ish license. See
-# http://www.opensource.org/licenses/bsd-license.html
-# and replace OWNER, ORGANIZATION, and YEAR as appropriate.
-
-from compiler.ast import *
-import parser
-import symbol
-import token
-import sys
-
-class WalkerError(StandardError):
- pass
-
-from compiler.consts import CO_VARARGS, CO_VARKEYWORDS
-from compiler.consts import OP_ASSIGN, OP_DELETE, OP_APPLY
-
-def parseFile(path):
- f = open(path, "U")
- # XXX The parser API tolerates files without a trailing newline,
- # but not strings without a trailing newline. Always add an extra
- # newline to the file contents, since we're going through the string
- # version of the API.
- src = f.read() + "\n"
- f.close()
- return parse(src)
-
-def parse(buf, mode="exec"):
- if mode == "exec" or mode == "single":
- return Transformer().parsesuite(buf)
- elif mode == "eval":
- return Transformer().parseexpr(buf)
- else:
- raise ValueError("compile() arg 3 must be"
- " 'exec' or 'eval' or 'single'")
-
-def asList(nodes):
- l = []
- for item in nodes:
- if hasattr(item, "asList"):
- l.append(item.asList())
- else:
- if type(item) is type( (None, None) ):
- l.append(tuple(asList(item)))
- elif type(item) is type( [] ):
- l.append(asList(item))
- else:
- l.append(item)
- return l
-
-def extractLineNo(ast):
- if not isinstance(ast[1], tuple):
- # get a terminal node
- return ast[2]
- for child in ast[1:]:
- if isinstance(child, tuple):
- lineno = extractLineNo(child)
- if lineno is not None:
- return lineno
-
-def Node(*args):
- kind = args[0]
- if nodes.has_key(kind):
- try:
- return nodes[kind](*args[1:])
- except TypeError:
- print nodes[kind], len(args), args
- raise
- else:
- raise WalkerError, "Can't find appropriate Node type: %s" % str(args)
- #return apply(ast.Node, args)
-
-class Transformer:
- """Utility object for transforming Python parse trees.
-
- Exposes the following methods:
- tree = transform(ast_tree)
- tree = parsesuite(text)
- tree = parseexpr(text)
- tree = parsefile(fileob | filename)
- """
-
- def __init__(self):
- self._dispatch = {}
- for value, name in symbol.sym_name.items():
- if hasattr(self, name):
- self._dispatch[value] = getattr(self, name)
- self._dispatch[token.NEWLINE] = self.com_NEWLINE
- self._atom_dispatch = {token.LPAR: self.atom_lpar,
- token.LSQB: self.atom_lsqb,
- token.LBRACE: self.atom_lbrace,
- token.BACKQUOTE: self.atom_backquote,
- token.NUMBER: self.atom_number,
- token.STRING: self.atom_string,
- token.NAME: self.atom_name,
- }
- self.encoding = None
-
- def transform(self, tree):
- """Transform an AST into a modified parse tree."""
- if not (isinstance(tree, tuple) or isinstance(tree, list)):
- tree = parser.ast2tuple(tree, line_info=1)
- return self.compile_node(tree)
-
- def parsesuite(self, text):
- """Return a modified parse tree for the given suite text."""
- return self.transform(parser.suite(text))
-
- def parseexpr(self, text):
- """Return a modified parse tree for the given expression text."""
- return self.transform(parser.expr(text))
-
- def parsefile(self, file):
- """Return a modified parse tree for the contents of the given file."""
- if type(file) == type(''):
- file = open(file)
- return self.parsesuite(file.read())
-
- # --------------------------------------------------------------
- #
- # PRIVATE METHODS
- #
-
- def compile_node(self, node):
- ### emit a line-number node?
- n = node[0]
-
- if n == symbol.encoding_decl:
- self.encoding = node[2]
- node = node[1]
- n = node[0]
-
- if n == symbol.single_input:
- return self.single_input(node[1:])
- if n == symbol.file_input:
- return self.file_input(node[1:])
- if n == symbol.eval_input:
- return self.eval_input(node[1:])
- if n == symbol.lambdef:
- return self.lambdef(node[1:])
- if n == symbol.funcdef:
- return self.funcdef(node[1:])
- if n == symbol.classdef:
- return self.classdef(node[1:])
-
- raise WalkerError, ('unexpected node type', n)
-
- def single_input(self, node):
- ### do we want to do anything about being "interactive" ?
-
- # NEWLINE | simple_stmt | compound_stmt NEWLINE
- n = node[0][0]
- if n != token.NEWLINE:
- return self.com_stmt(node[0])
-
- return Pass()
-
- def file_input(self, nodelist):
- doc = self.get_docstring(nodelist, symbol.file_input)
- if doc is not None:
- i = 1
- else:
- i = 0
- stmts = []
- for node in nodelist[i:]:
- if node[0] != token.ENDMARKER and node[0] != token.NEWLINE:
- self.com_append_stmt(stmts, node)
- return Module(doc, Stmt(stmts))
-
- def eval_input(self, nodelist):
- # from the built-in function input()
- ### is this sufficient?
- return Expression(self.com_node(nodelist[0]))
-
- def decorator_name(self, nodelist):
- listlen = len(nodelist)
- assert listlen >= 1 and listlen % 2 == 1
-
- item = self.atom_name(nodelist)
- i = 1
- while i < listlen:
- assert nodelist[i][0] == token.DOT
- assert nodelist[i + 1][0] == token.NAME
- item = Getattr(item, nodelist[i + 1][1])
- i += 2
-
- return item
-
- def decorator(self, nodelist):
- # '@' dotted_name [ '(' [arglist] ')' ]
- assert len(nodelist) in (3, 5, 6)
- assert nodelist[0][0] == token.AT
- assert nodelist[-1][0] == token.NEWLINE
-
- assert nodelist[1][0] == symbol.dotted_name
- funcname = self.decorator_name(nodelist[1][1:])
-
- if len(nodelist) > 3:
- assert nodelist[2][0] == token.LPAR
- expr = self.com_call_function(funcname, nodelist[3])
- else:
- expr = funcname
-
- return expr
-
- def decorators(self, nodelist):
- # decorators: decorator ([NEWLINE] decorator)* NEWLINE
- items = []
- for dec_nodelist in nodelist:
- assert dec_nodelist[0] == symbol.decorator
- items.append(self.decorator(dec_nodelist[1:]))
- return Decorators(items)
-
- def funcdef(self, nodelist):
- # -6 -5 -4 -3 -2 -1
- # funcdef: [decorators] 'def' NAME parameters ':' suite
- # parameters: '(' [varargslist] ')'
-
- if len(nodelist) == 6:
- assert nodelist[0][0] == symbol.decorators
- decorators = self.decorators(nodelist[0][1:])
- else:
- assert len(nodelist) == 5
- decorators = None
-
- lineno = nodelist[-4][2]
- name = nodelist[-4][1]
- args = nodelist[-3][2]
-
- if args[0] == symbol.varargslist:
- names, defaults, flags = self.com_arglist(args[1:])
- else:
- names = defaults = ()
- flags = 0
- doc = self.get_docstring(nodelist[-1])
-
- # code for function
- code = self.com_node(nodelist[-1])
-
- if doc is not None:
- assert isinstance(code, Stmt)
- assert isinstance(code.nodes[0], Discard)
- del code.nodes[0]
- return Function(decorators, name, names, defaults, flags, doc, code,
- lineno=lineno)
-
- def lambdef(self, nodelist):
- # lambdef: 'lambda' [varargslist] ':' test
- if nodelist[2][0] == symbol.varargslist:
- names, defaults, flags = self.com_arglist(nodelist[2][1:])
- else:
- names = defaults = ()
- flags = 0
-
- # code for lambda
- code = self.com_node(nodelist[-1])
-
- return Lambda(names, defaults, flags, code, lineno=nodelist[1][2])
- old_lambdef = lambdef
-
- def classdef(self, nodelist):
- # classdef: 'class' NAME ['(' [testlist] ')'] ':' suite
-
- name = nodelist[1][1]
- doc = self.get_docstring(nodelist[-1])
- if nodelist[2][0] == token.COLON:
- bases = []
- elif nodelist[3][0] == token.RPAR:
- bases = []
- else:
- bases = self.com_bases(nodelist[3])
-
- # code for class
- code = self.com_node(nodelist[-1])
-
- if doc is not None:
- assert isinstance(code, Stmt)
- assert isinstance(code.nodes[0], Discard)
- del code.nodes[0]
-
- return Class(name, bases, doc, code, lineno=nodelist[1][2])
-
- def stmt(self, nodelist):
- return self.com_stmt(nodelist[0])
-
- small_stmt = stmt
- flow_stmt = stmt
- compound_stmt = stmt
-
- def simple_stmt(self, nodelist):
- # small_stmt (';' small_stmt)* [';'] NEWLINE
- stmts = []
- for i in range(0, len(nodelist), 2):
- self.com_append_stmt(stmts, nodelist[i])
- return Stmt(stmts)
-
- def parameters(self, nodelist):
- raise WalkerError
-
- def varargslist(self, nodelist):
- raise WalkerError
-
- def fpdef(self, nodelist):
- raise WalkerError
-
- def fplist(self, nodelist):
- raise WalkerError
-
- def dotted_name(self, nodelist):
- raise WalkerError
-
- def comp_op(self, nodelist):
- raise WalkerError
-
- def trailer(self, nodelist):
- raise WalkerError
-
- def sliceop(self, nodelist):
- raise WalkerError
-
- def argument(self, nodelist):
- raise WalkerError
-
- # --------------------------------------------------------------
- #
- # STATEMENT NODES (invoked by com_node())
- #
-
- def expr_stmt(self, nodelist):
- # augassign testlist | testlist ('=' testlist)*
- en = nodelist[-1]
- exprNode = self.lookup_node(en)(en[1:])
- if len(nodelist) == 1:
- return Discard(exprNode, lineno=exprNode.lineno)
- if nodelist[1][0] == token.EQUAL:
- nodesl = []
- for i in range(0, len(nodelist) - 2, 2):
- nodesl.append(self.com_assign(nodelist[i], OP_ASSIGN))
- return Assign(nodesl, exprNode, lineno=nodelist[1][2])
- else:
- lval = self.com_augassign(nodelist[0])
- op = self.com_augassign_op(nodelist[1])
- return AugAssign(lval, op[1], exprNode, lineno=op[2])
- raise WalkerError, "can't get here"
-
- def print_stmt(self, nodelist):
- # print ([ test (',' test)* [','] ] | '>>' test [ (',' test)+ [','] ])
- items = []
- if len(nodelist) == 1:
- start = 1
- dest = None
- elif nodelist[1][0] == token.RIGHTSHIFT:
- assert len(nodelist) == 3 \
- or nodelist[3][0] == token.COMMA
- dest = self.com_node(nodelist[2])
- start = 4
- else:
- dest = None
- start = 1
- for i in range(start, len(nodelist), 2):
- items.append(self.com_node(nodelist[i]))
- if nodelist[-1][0] == token.COMMA:
- return Print(items, dest, lineno=nodelist[0][2])
- return Printnl(items, dest, lineno=nodelist[0][2])
-
- def del_stmt(self, nodelist):
- return self.com_assign(nodelist[1], OP_DELETE)
-
- def pass_stmt(self, nodelist):
- return Pass(lineno=nodelist[0][2])
-
- def break_stmt(self, nodelist):
- return Break(lineno=nodelist[0][2])
-
- def continue_stmt(self, nodelist):
- return Continue(lineno=nodelist[0][2])
-
- def return_stmt(self, nodelist):
- # return: [testlist]
- if len(nodelist) < 2:
- return Return(Const(None), lineno=nodelist[0][2])
- return Return(self.com_node(nodelist[1]), lineno=nodelist[0][2])
-
- def yield_stmt(self, nodelist):
- expr = self.com_node(nodelist[0])
- return Discard(expr, lineno=expr.lineno)
-
- def yield_expr(self, nodelist):
- if len(nodelist) > 1:
- value = self.com_node(nodelist[1])
- else:
- value = Const(None)
- return Yield(value, lineno=nodelist[0][2])
-
- def raise_stmt(self, nodelist):
- # raise: [test [',' test [',' test]]]
- if len(nodelist) > 5:
- expr3 = self.com_node(nodelist[5])
- else:
- expr3 = None
- if len(nodelist) > 3:
- expr2 = self.com_node(nodelist[3])
- else:
- expr2 = None
- if len(nodelist) > 1:
- expr1 = self.com_node(nodelist[1])
- else:
- expr1 = None
- return Raise(expr1, expr2, expr3, lineno=nodelist[0][2])
-
- def import_stmt(self, nodelist):
- # import_stmt: import_name | import_from
- assert len(nodelist) == 1
- return self.com_node(nodelist[0])
-
- def import_name(self, nodelist):
- # import_name: 'import' dotted_as_names
- return Import(self.com_dotted_as_names(nodelist[1]),
- lineno=nodelist[0][2])
-
- def import_from(self, nodelist):
- # import_from: 'from' ('.'* dotted_name | '.') 'import' ('*' |
- # '(' import_as_names ')' | import_as_names)
- assert nodelist[0][1] == 'from'
- idx = 1
- while nodelist[idx][1] == '.':
- idx += 1
- level = idx - 1
- if nodelist[idx][0] == symbol.dotted_name:
- fromname = self.com_dotted_name(nodelist[idx])
- idx += 1
- else:
- fromname = ""
- assert nodelist[idx][1] == 'import'
- if nodelist[idx + 1][0] == token.STAR:
- return From(fromname, [('*', None)], level,
- lineno=nodelist[0][2])
- else:
- node = nodelist[idx + 1 + (nodelist[idx + 1][0] == token.LPAR)]
- return From(fromname, self.com_import_as_names(node), level,
- lineno=nodelist[0][2])
-
- def global_stmt(self, nodelist):
- # global: NAME (',' NAME)*
- names = []
- for i in range(1, len(nodelist), 2):
- names.append(nodelist[i][1])
- return Global(names, lineno=nodelist[0][2])
-
- def exec_stmt(self, nodelist):
- # exec_stmt: 'exec' expr ['in' expr [',' expr]]
- expr1 = self.com_node(nodelist[1])
- if len(nodelist) >= 4:
- expr2 = self.com_node(nodelist[3])
- if len(nodelist) >= 6:
- expr3 = self.com_node(nodelist[5])
- else:
- expr3 = None
- else:
- expr2 = expr3 = None
-
- return Exec(expr1, expr2, expr3, lineno=nodelist[0][2])
-
- def assert_stmt(self, nodelist):
- # 'assert': test, [',' test]
- expr1 = self.com_node(nodelist[1])
- if (len(nodelist) == 4):
- expr2 = self.com_node(nodelist[3])
- else:
- expr2 = None
- return Assert(expr1, expr2, lineno=nodelist[0][2])
-
- def if_stmt(self, nodelist):
- # if: test ':' suite ('elif' test ':' suite)* ['else' ':' suite]
- tests = []
- for i in range(0, len(nodelist) - 3, 4):
- testNode = self.com_node(nodelist[i + 1])
- suiteNode = self.com_node(nodelist[i + 3])
- tests.append((testNode, suiteNode))
-
- if len(nodelist) % 4 == 3:
- elseNode = self.com_node(nodelist[-1])
-## elseNode.lineno = nodelist[-1][1][2]
- else:
- elseNode = None
- return If(tests, elseNode, lineno=nodelist[0][2])
-
- def while_stmt(self, nodelist):
- # 'while' test ':' suite ['else' ':' suite]
-
- testNode = self.com_node(nodelist[1])
- bodyNode = self.com_node(nodelist[3])
-
- if len(nodelist) > 4:
- elseNode = self.com_node(nodelist[6])
- else:
- elseNode = None
-
- return While(testNode, bodyNode, elseNode, lineno=nodelist[0][2])
-
- def for_stmt(self, nodelist):
- # 'for' exprlist 'in' exprlist ':' suite ['else' ':' suite]
-
- assignNode = self.com_assign(nodelist[1], OP_ASSIGN)
- listNode = self.com_node(nodelist[3])
- bodyNode = self.com_node(nodelist[5])
-
- if len(nodelist) > 8:
- elseNode = self.com_node(nodelist[8])
- else:
- elseNode = None
-
- return For(assignNode, listNode, bodyNode, elseNode,
- lineno=nodelist[0][2])
-
- def try_stmt(self, nodelist):
- return self.com_try_except_finally(nodelist)
-
- def with_stmt(self, nodelist):
- return self.com_with(nodelist)
-
- def with_var(self, nodelist):
- return self.com_with_var(nodelist)
-
- def suite(self, nodelist):
- # simple_stmt | NEWLINE INDENT NEWLINE* (stmt NEWLINE*)+ DEDENT
- if len(nodelist) == 1:
- return self.com_stmt(nodelist[0])
-
- stmts = []
- for node in nodelist:
- if node[0] == symbol.stmt:
- self.com_append_stmt(stmts, node)
- return Stmt(stmts)
-
- # --------------------------------------------------------------
- #
- # EXPRESSION NODES (invoked by com_node())
- #
-
- def testlist(self, nodelist):
- # testlist: expr (',' expr)* [',']
- # testlist_safe: test [(',' test)+ [',']]
- # exprlist: expr (',' expr)* [',']
- return self.com_binary(Tuple, nodelist)
-
- testlist_safe = testlist # XXX
- testlist1 = testlist
- exprlist = testlist
-
- def testlist_gexp(self, nodelist):
- if len(nodelist) == 2 and nodelist[1][0] == symbol.gen_for:
- test = self.com_node(nodelist[0])
- return self.com_generator_expression(test, nodelist[1])
- return self.testlist(nodelist)
-
- def test(self, nodelist):
- # or_test ['if' or_test 'else' test] | lambdef
- if len(nodelist) == 1 and nodelist[0][0] == symbol.lambdef:
- return self.lambdef(nodelist[0])
- then = self.com_node(nodelist[0])
- if len(nodelist) > 1:
- assert len(nodelist) == 5
- assert nodelist[1][1] == 'if'
- assert nodelist[3][1] == 'else'
- test = self.com_node(nodelist[2])
- else_ = self.com_node(nodelist[4])
- return IfExp(test, then, else_, lineno=nodelist[1][2])
- return then
-
- def or_test(self, nodelist):
- # and_test ('or' and_test)* | lambdef
- if len(nodelist) == 1 and nodelist[0][0] == symbol.lambdef:
- return self.lambdef(nodelist[0])
- return self.com_binary(Or, nodelist)
- old_test = or_test
-
- def and_test(self, nodelist):
- # not_test ('and' not_test)*
- return self.com_binary(And, nodelist)
-
- def not_test(self, nodelist):
- # 'not' not_test | comparison
- result = self.com_node(nodelist[-1])
- if len(nodelist) == 2:
- return Not(result, lineno=nodelist[0][2])
- return result
-
- def comparison(self, nodelist):
- # comparison: expr (comp_op expr)*
- node = self.com_node(nodelist[0])
- if len(nodelist) == 1:
- return node
-
- results = []
- for i in range(2, len(nodelist), 2):
- nl = nodelist[i-1]
-
- # comp_op: '<' | '>' | '=' | '>=' | '<=' | '<>' | '!=' | '=='
- # | 'in' | 'not' 'in' | 'is' | 'is' 'not'
- n = nl[1]
- if n[0] == token.NAME:
- type = n[1]
- if len(nl) == 3:
- if type == 'not':
- type = 'not in'
- else:
- type = 'is not'
- else:
- type = _cmp_types[n[0]]
-
- lineno = nl[1][2]
- results.append((type, self.com_node(nodelist[i])))
-
- # we need a special "compare" node so that we can distinguish
- # 3 < x < 5 from (3 < x) < 5
- # the two have very different semantics and results (note that the
- # latter form is always true)
-
- return Compare(node, results, lineno=lineno)
-
- def expr(self, nodelist):
- # xor_expr ('|' xor_expr)*
- return self.com_binary(Bitor, nodelist)
-
- def xor_expr(self, nodelist):
- # xor_expr ('^' xor_expr)*
- return self.com_binary(Bitxor, nodelist)
-
- def and_expr(self, nodelist):
- # xor_expr ('&' xor_expr)*
- return self.com_binary(Bitand, nodelist)
-
- def shift_expr(self, nodelist):
- # shift_expr ('<<'|'>>' shift_expr)*
- node = self.com_node(nodelist[0])
- for i in range(2, len(nodelist), 2):
- right = self.com_node(nodelist[i])
- if nodelist[i-1][0] == token.LEFTSHIFT:
- node = LeftShift([node, right], lineno=nodelist[1][2])
- elif nodelist[i-1][0] == token.RIGHTSHIFT:
- node = RightShift([node, right], lineno=nodelist[1][2])
- else:
- raise ValueError, "unexpected token: %s" % nodelist[i-1][0]
- return node
-
- def arith_expr(self, nodelist):
- node = self.com_node(nodelist[0])
- for i in range(2, len(nodelist), 2):
- right = self.com_node(nodelist[i])
- if nodelist[i-1][0] == token.PLUS:
- node = Add([node, right], lineno=nodelist[1][2])
- elif nodelist[i-1][0] == token.MINUS:
- node = Sub([node, right], lineno=nodelist[1][2])
- else:
- raise ValueError, "unexpected token: %s" % nodelist[i-1][0]
- return node
-
- def term(self, nodelist):
- node = self.com_node(nodelist[0])
- for i in range(2, len(nodelist), 2):
- right = self.com_node(nodelist[i])
- t = nodelist[i-1][0]
- if t == token.STAR:
- node = Mul([node, right])
- elif t == token.SLASH:
- node = Div([node, right])
- elif t == token.PERCENT:
- node = Mod([node, right])
- elif t == token.DOUBLESLASH:
- node = FloorDiv([node, right])
- else:
- raise ValueError, "unexpected token: %s" % t
- node.lineno = nodelist[1][2]
- return node
-
- def factor(self, nodelist):
- elt = nodelist[0]
- t = elt[0]
- node = self.lookup_node(nodelist[-1])(nodelist[-1][1:])
- # need to handle (unary op)constant here...
- if t == token.PLUS:
- return UnaryAdd(node, lineno=elt[2])
- elif t == token.MINUS:
- return UnarySub(node, lineno=elt[2])
- elif t == token.TILDE:
- node = Invert(node, lineno=elt[2])
- return node
-
- def power(self, nodelist):
- # power: atom trailer* ('**' factor)*
- node = self.com_node(nodelist[0])
- for i in range(1, len(nodelist)):
- elt = nodelist[i]
- if elt[0] == token.DOUBLESTAR:
- return Power([node, self.com_node(nodelist[i+1])],
- lineno=elt[2])
-
- node = self.com_apply_trailer(node, elt)
-
- return node
-
- def atom(self, nodelist):
- return self._atom_dispatch[nodelist[0][0]](nodelist)
-
- def atom_lpar(self, nodelist):
- if nodelist[1][0] == token.RPAR:
- return Tuple((), lineno=nodelist[0][2])
- return self.com_node(nodelist[1])
-
- def atom_lsqb(self, nodelist):
- if nodelist[1][0] == token.RSQB:
- return List((), lineno=nodelist[0][2])
- return self.com_list_constructor(nodelist[1])
-
- def atom_lbrace(self, nodelist):
- if nodelist[1][0] == token.RBRACE:
- return Dict((), lineno=nodelist[0][2])
- return self.com_dictmaker(nodelist[1])
-
- def atom_backquote(self, nodelist):
- return Backquote(self.com_node(nodelist[1]))
-
- def atom_number(self, nodelist):
- ### need to verify this matches compile.c
- k = eval(nodelist[0][1])
- return Const(k, lineno=nodelist[0][2])
-
- def decode_literal(self, lit):
- if self.encoding:
- # this is particularly fragile & a bit of a
- # hack... changes in compile.c:parsestr and
- # tokenizer.c must be reflected here.
- if self.encoding not in ['utf-8', 'iso-8859-1']:
- lit = unicode(lit, 'utf-8').encode(self.encoding)
- return eval("# coding: %s\n%s" % (self.encoding, lit))
- else:
- return eval(lit)
-
- def atom_string(self, nodelist):
- k = ''
- for node in nodelist:
- k += self.decode_literal(node[1])
- return Const(k, lineno=nodelist[0][2])
-
- def atom_name(self, nodelist):
- return Name(nodelist[0][1], lineno=nodelist[0][2])
-
- # --------------------------------------------------------------
- #
- # INTERNAL PARSING UTILITIES
- #
-
- # The use of com_node() introduces a lot of extra stack frames,
- # enough to cause a stack overflow compiling test.test_parser with
- # the standard interpreter recursionlimit. The com_node() is a
- # convenience function that hides the dispatch details, but comes
- # at a very high cost. It is more efficient to dispatch directly
- # in the callers. In these cases, use lookup_node() and call the
- # dispatched node directly.
-
- def lookup_node(self, node):
- return self._dispatch[node[0]]
-
- def com_node(self, node):
- # Note: compile.c has handling in com_node for del_stmt, pass_stmt,
- # break_stmt, stmt, small_stmt, flow_stmt, simple_stmt,
- # and compound_stmt.
- # We'll just dispatch them.
- return self._dispatch[node[0]](node[1:])
-
- def com_NEWLINE(self, *args):
- # A ';' at the end of a line can make a NEWLINE token appear
- # here, Render it harmless. (genc discards ('discard',
- # ('const', xxxx)) Nodes)
- return Discard(Const(None))
-
- def com_arglist(self, nodelist):
- # varargslist:
- # (fpdef ['=' test] ',')* ('*' NAME [',' '**' NAME] | '**' NAME)
- # | fpdef ['=' test] (',' fpdef ['=' test])* [',']
- # fpdef: NAME | '(' fplist ')'
- # fplist: fpdef (',' fpdef)* [',']
- names = []
- defaults = []
- flags = 0
-
- i = 0
- while i < len(nodelist):
- node = nodelist[i]
- if node[0] == token.STAR or node[0] == token.DOUBLESTAR:
- if node[0] == token.STAR:
- node = nodelist[i+1]
- if node[0] == token.NAME:
- names.append(node[1])
- flags = flags | CO_VARARGS
- i = i + 3
-
- if i < len(nodelist):
- # should be DOUBLESTAR
- t = nodelist[i][0]
- if t == token.DOUBLESTAR:
- node = nodelist[i+1]
- else:
- raise ValueError, "unexpected token: %s" % t
- names.append(node[1])
- flags = flags | CO_VARKEYWORDS
-
- break
-
- # fpdef: NAME | '(' fplist ')'
- names.append(self.com_fpdef(node))
-
- i = i + 1
- if i < len(nodelist) and nodelist[i][0] == token.EQUAL:
- defaults.append(self.com_node(nodelist[i + 1]))
- i = i + 2
- elif len(defaults):
- # we have already seen an argument with default, but here
- # came one without
- raise SyntaxError, "non-default argument follows default argument"
-
- # skip the comma
- i = i + 1
-
- return names, defaults, flags
-
- def com_fpdef(self, node):
- # fpdef: NAME | '(' fplist ')'
- if node[1][0] == token.LPAR:
- return self.com_fplist(node[2])
- return node[1][1]
-
- def com_fplist(self, node):
- # fplist: fpdef (',' fpdef)* [',']
- if len(node) == 2:
- return self.com_fpdef(node[1])
- list = []
- for i in range(1, len(node), 2):
- list.append(self.com_fpdef(node[i]))
- return tuple(list)
-
- def com_dotted_name(self, node):
- # String together the dotted names and return the string
- name = ""
- for n in node:
- if type(n) == type(()) and n[0] == 1:
- name = name + n[1] + '.'
- return name[:-1]
-
- def com_dotted_as_name(self, node):
- assert node[0] == symbol.dotted_as_name
- node = node[1:]
- dot = self.com_dotted_name(node[0][1:])
- if len(node) == 1:
- return dot, None
- assert node[1][1] == 'as'
- assert node[2][0] == token.NAME
- return dot, node[2][1]
-
- def com_dotted_as_names(self, node):
- assert node[0] == symbol.dotted_as_names
- node = node[1:]
- names = [self.com_dotted_as_name(node[0])]
- for i in range(2, len(node), 2):
- names.append(self.com_dotted_as_name(node[i]))
- return names
-
- def com_import_as_name(self, node):
- assert node[0] == symbol.import_as_name
- node = node[1:]
- assert node[0][0] == token.NAME
- if len(node) == 1:
- return node[0][1], None
- assert node[1][1] == 'as', node
- assert node[2][0] == token.NAME
- return node[0][1], node[2][1]
-
- def com_import_as_names(self, node):
- assert node[0] == symbol.import_as_names
- node = node[1:]
- names = [self.com_import_as_name(node[0])]
- for i in range(2, len(node), 2):
- names.append(self.com_import_as_name(node[i]))
- return names
-
- def com_bases(self, node):
- bases = []
- for i in range(1, len(node), 2):
- bases.append(self.com_node(node[i]))
- return bases
-
- def com_try_except_finally(self, nodelist):
- # ('try' ':' suite
- # ((except_clause ':' suite)+ ['else' ':' suite] ['finally' ':' suite]
- # | 'finally' ':' suite))
-
- if nodelist[3][0] == token.NAME:
- # first clause is a finally clause: only try-finally
- return TryFinally(self.com_node(nodelist[2]),
- self.com_node(nodelist[5]),
- lineno=nodelist[0][2])
-
- #tryexcept: [TryNode, [except_clauses], elseNode)]
- clauses = []
- elseNode = None
- finallyNode = None
- for i in range(3, len(nodelist), 3):
- node = nodelist[i]
- if node[0] == symbol.except_clause:
- # except_clause: 'except' [expr [',' expr]] */
- if len(node) > 2:
- expr1 = self.com_node(node[2])
- if len(node) > 4:
- expr2 = self.com_assign(node[4], OP_ASSIGN)
- else:
- expr2 = None
- else:
- expr1 = expr2 = None
- clauses.append((expr1, expr2, self.com_node(nodelist[i+2])))
-
- if node[0] == token.NAME:
- if node[1] == 'else':
- elseNode = self.com_node(nodelist[i+2])
- elif node[1] == 'finally':
- finallyNode = self.com_node(nodelist[i+2])
- try_except = TryExcept(self.com_node(nodelist[2]), clauses, elseNode,
- lineno=nodelist[0][2])
- if finallyNode:
- return TryFinally(try_except, finallyNode, lineno=nodelist[0][2])
- else:
- return try_except
-
- def com_with(self, nodelist):
- # with_stmt: 'with' expr [with_var] ':' suite
- expr = self.com_node(nodelist[1])
- body = self.com_node(nodelist[-1])
- if nodelist[2][0] == token.COLON:
- var = None
- else:
- var = self.com_assign(nodelist[2][2], OP_ASSIGN)
- return With(expr, var, body, lineno=nodelist[0][2])
-
- def com_with_var(self, nodelist):
- # with_var: 'as' expr
- return self.com_node(nodelist[1])
-
- def com_augassign_op(self, node):
- assert node[0] == symbol.augassign
- return node[1]
-
- def com_augassign(self, node):
- """Return node suitable for lvalue of augmented assignment
-
- Names, slices, and attributes are the only allowable nodes.
- """
- l = self.com_node(node)
- if l.__class__ in (Name, Slice, Subscript, Getattr):
- return l
- raise SyntaxError, "can't assign to %s" % l.__class__.__name__
-
- def com_assign(self, node, assigning):
- # return a node suitable for use as an "lvalue"
- # loop to avoid trivial recursion
- while 1:
- t = node[0]
- if t in (symbol.exprlist, symbol.testlist, symbol.testlist_safe, symbol.testlist_gexp):
- if len(node) > 2:
- return self.com_assign_tuple(node, assigning)
- node = node[1]
- elif t in _assign_types:
- if len(node) > 2:
- raise SyntaxError, "can't assign to operator"
- node = node[1]
- elif t == symbol.power:
- if node[1][0] != symbol.atom:
- raise SyntaxError, "can't assign to operator"
- if len(node) > 2:
- primary = self.com_node(node[1])
- for i in range(2, len(node)-1):
- ch = node[i]
- if ch[0] == token.DOUBLESTAR:
- raise SyntaxError, "can't assign to operator"
- primary = self.com_apply_trailer(primary, ch)
- return self.com_assign_trailer(primary, node[-1],
- assigning)
- node = node[1]
- elif t == symbol.atom:
- t = node[1][0]
- if t == token.LPAR:
- node = node[2]
- if node[0] == token.RPAR:
- raise SyntaxError, "can't assign to ()"
- elif t == token.LSQB:
- node = node[2]
- if node[0] == token.RSQB:
- raise SyntaxError, "can't assign to []"
- return self.com_assign_list(node, assigning)
- elif t == token.NAME:
- return self.com_assign_name(node[1], assigning)
- else:
- raise SyntaxError, "can't assign to literal"
- else:
- raise SyntaxError, "bad assignment (%s)" % t
-
- def com_assign_tuple(self, node, assigning):
- assigns = []
- for i in range(1, len(node), 2):
- assigns.append(self.com_assign(node[i], assigning))
- return AssTuple(assigns, lineno=extractLineNo(node))
-
- def com_assign_list(self, node, assigning):
- assigns = []
- for i in range(1, len(node), 2):
- if i + 1 < len(node):
- if node[i + 1][0] == symbol.list_for:
- raise SyntaxError, "can't assign to list comprehension"
- assert node[i + 1][0] == token.COMMA, node[i + 1]
- assigns.append(self.com_assign(node[i], assigning))
- return AssList(assigns, lineno=extractLineNo(node))
-
- def com_assign_name(self, node, assigning):
- return AssName(node[1], assigning, lineno=node[2])
-
- def com_assign_trailer(self, primary, node, assigning):
- t = node[1][0]
- if t == token.DOT:
- return self.com_assign_attr(primary, node[2], assigning)
- if t == token.LSQB:
- return self.com_subscriptlist(primary, node[2], assigning)
- if t == token.LPAR:
- raise SyntaxError, "can't assign to function call"
- raise SyntaxError, "unknown trailer type: %s" % t
-
- def com_assign_attr(self, primary, node, assigning):
- return AssAttr(primary, node[1], assigning, lineno=node[-1])
-
- def com_binary(self, constructor, nodelist):
- "Compile 'NODE (OP NODE)*' into (type, [ node1, ..., nodeN ])."
- l = len(nodelist)
- if l == 1:
- n = nodelist[0]
- return self.lookup_node(n)(n[1:])
- items = []
- for i in range(0, l, 2):
- n = nodelist[i]
- items.append(self.lookup_node(n)(n[1:]))
- return constructor(items, lineno=extractLineNo(nodelist))
-
- def com_stmt(self, node):
- result = self.lookup_node(node)(node[1:])
- assert result is not None
- if isinstance(result, Stmt):
- return result
- return Stmt([result])
-
- def com_append_stmt(self, stmts, node):
- result = self.lookup_node(node)(node[1:])
- assert result is not None
- if isinstance(result, Stmt):
- stmts.extend(result.nodes)
- else:
- stmts.append(result)
-
- if hasattr(symbol, 'list_for'):
- def com_list_constructor(self, nodelist):
- # listmaker: test ( list_for | (',' test)* [','] )
- values = []
- for i in range(1, len(nodelist)):
- if nodelist[i][0] == symbol.list_for:
- assert len(nodelist[i:]) == 1
- return self.com_list_comprehension(values[0],
- nodelist[i])
- elif nodelist[i][0] == token.COMMA:
- continue
- values.append(self.com_node(nodelist[i]))
- return List(values, lineno=values[0].lineno)
-
- def com_list_comprehension(self, expr, node):
- # list_iter: list_for | list_if
- # list_for: 'for' exprlist 'in' testlist [list_iter]
- # list_if: 'if' test [list_iter]
-
- # XXX should raise SyntaxError for assignment
-
- lineno = node[1][2]
- fors = []
- while node:
- t = node[1][1]
- if t == 'for':
- assignNode = self.com_assign(node[2], OP_ASSIGN)
- listNode = self.com_node(node[4])
- newfor = ListCompFor(assignNode, listNode, [])
- newfor.lineno = node[1][2]
- fors.append(newfor)
- if len(node) == 5:
- node = None
- else:
- node = self.com_list_iter(node[5])
- elif t == 'if':
- test = self.com_node(node[2])
- newif = ListCompIf(test, lineno=node[1][2])
- newfor.ifs.append(newif)
- if len(node) == 3:
- node = None
- else:
- node = self.com_list_iter(node[3])
- else:
- raise SyntaxError, \
- ("unexpected list comprehension element: %s %d"
- % (node, lineno))
- return ListComp(expr, fors, lineno=lineno)
-
- def com_list_iter(self, node):
- assert node[0] == symbol.list_iter
- return node[1]
- else:
- def com_list_constructor(self, nodelist):
- values = []
- for i in range(1, len(nodelist), 2):
- values.append(self.com_node(nodelist[i]))
- return List(values, lineno=values[0].lineno)
-
- if hasattr(symbol, 'gen_for'):
- def com_generator_expression(self, expr, node):
- # gen_iter: gen_for | gen_if
- # gen_for: 'for' exprlist 'in' test [gen_iter]
- # gen_if: 'if' test [gen_iter]
-
- lineno = node[1][2]
- fors = []
- while node:
- t = node[1][1]
- if t == 'for':
- assignNode = self.com_assign(node[2], OP_ASSIGN)
- genNode = self.com_node(node[4])
- newfor = GenExprFor(assignNode, genNode, [],
- lineno=node[1][2])
- fors.append(newfor)
- if (len(node)) == 5:
- node = None
- else:
- node = self.com_gen_iter(node[5])
- elif t == 'if':
- test = self.com_node(node[2])
- newif = GenExprIf(test, lineno=node[1][2])
- newfor.ifs.append(newif)
- if len(node) == 3:
- node = None
- else:
- node = self.com_gen_iter(node[3])
- else:
- raise SyntaxError, \
- ("unexpected generator expression element: %s %d"
- % (node, lineno))
- fors[0].is_outmost = True
- return GenExpr(GenExprInner(expr, fors), lineno=lineno)
-
- def com_gen_iter(self, node):
- assert node[0] == symbol.gen_iter
- return node[1]
-
- def com_dictmaker(self, nodelist):
- # dictmaker: test ':' test (',' test ':' value)* [',']
- items = []
- for i in range(1, len(nodelist), 4):
- items.append((self.com_node(nodelist[i]),
- self.com_node(nodelist[i+2])))
- return Dict(items, lineno=items[0][0].lineno)
-
- def com_apply_trailer(self, primaryNode, nodelist):
- t = nodelist[1][0]
- if t == token.LPAR:
- return self.com_call_function(primaryNode, nodelist[2])
- if t == token.DOT:
- return self.com_select_member(primaryNode, nodelist[2])
- if t == token.LSQB:
- return self.com_subscriptlist(primaryNode, nodelist[2], OP_APPLY)
-
- raise SyntaxError, 'unknown node type: %s' % t
-
- def com_select_member(self, primaryNode, nodelist):
- if nodelist[0] != token.NAME:
- raise SyntaxError, "member must be a name"
- return Getattr(primaryNode, nodelist[1], lineno=nodelist[2])
-
- def com_call_function(self, primaryNode, nodelist):
- if nodelist[0] == token.RPAR:
- return CallFunc(primaryNode, [], lineno=extractLineNo(nodelist))
- args = []
- kw = 0
- len_nodelist = len(nodelist)
- for i in range(1, len_nodelist, 2):
- node = nodelist[i]
- if node[0] == token.STAR or node[0] == token.DOUBLESTAR:
- break
- kw, result = self.com_argument(node, kw)
-
- if len_nodelist != 2 and isinstance(result, GenExpr) \
- and len(node) == 3 and node[2][0] == symbol.gen_for:
- # allow f(x for x in y), but reject f(x for x in y, 1)
- # should use f((x for x in y), 1) instead of f(x for x in y, 1)
- raise SyntaxError, 'generator expression needs parenthesis'
-
- args.append(result)
- else:
- # No broken by star arg, so skip the last one we processed.
- i = i + 1
- if i < len_nodelist and nodelist[i][0] == token.COMMA:
- # need to accept an application that looks like "f(a, b,)"
- i = i + 1
- star_node = dstar_node = None
- while i < len_nodelist:
- tok = nodelist[i]
- ch = nodelist[i+1]
- i = i + 3
- if tok[0]==token.STAR:
- if star_node is not None:
- raise SyntaxError, 'already have the varargs indentifier'
- star_node = self.com_node(ch)
- elif tok[0]==token.DOUBLESTAR:
- if dstar_node is not None:
- raise SyntaxError, 'already have the kwargs indentifier'
- dstar_node = self.com_node(ch)
- else:
- raise SyntaxError, 'unknown node type: %s' % tok
- return CallFunc(primaryNode, args, star_node, dstar_node,
- lineno=extractLineNo(nodelist))
-
- def com_argument(self, nodelist, kw):
- if len(nodelist) == 3 and nodelist[2][0] == symbol.gen_for:
- test = self.com_node(nodelist[1])
- return 0, self.com_generator_expression(test, nodelist[2])
- if len(nodelist) == 2:
- if kw:
- raise SyntaxError, "non-keyword arg after keyword arg"
- return 0, self.com_node(nodelist[1])
- result = self.com_node(nodelist[3])
- n = nodelist[1]
- while len(n) == 2 and n[0] != token.NAME:
- n = n[1]
- if n[0] != token.NAME:
- raise SyntaxError, "keyword can't be an expression (%s)"%n[0]
- node = Keyword(n[1], result, lineno=n[2])
- return 1, node
-
- def com_subscriptlist(self, primary, nodelist, assigning):
- # slicing: simple_slicing | extended_slicing
- # simple_slicing: primary "[" short_slice "]"
- # extended_slicing: primary "[" slice_list "]"
- # slice_list: slice_item ("," slice_item)* [","]
-
- # backwards compat slice for '[i:j]'
- if len(nodelist) == 2:
- sub = nodelist[1]
- if (sub[1][0] == token.COLON or \
- (len(sub) > 2 and sub[2][0] == token.COLON)) and \
- sub[-1][0] != symbol.sliceop:
- return self.com_slice(primary, sub, assigning)
-
- subscripts = []
- for i in range(1, len(nodelist), 2):
- subscripts.append(self.com_subscript(nodelist[i]))
- return Subscript(primary, assigning, subscripts,
- lineno=extractLineNo(nodelist))
-
- def com_subscript(self, node):
- # slice_item: expression | proper_slice | ellipsis
- ch = node[1]
- t = ch[0]
- if t == token.DOT and node[2][0] == token.DOT:
- return Ellipsis()
- if t == token.COLON or len(node) > 2:
- return self.com_sliceobj(node)
- return self.com_node(ch)
-
- def com_sliceobj(self, node):
- # proper_slice: short_slice | long_slice
- # short_slice: [lower_bound] ":" [upper_bound]
- # long_slice: short_slice ":" [stride]
- # lower_bound: expression
- # upper_bound: expression
- # stride: expression
- #
- # Note: a stride may be further slicing...
-
- items = []
-
- if node[1][0] == token.COLON:
- items.append(Const(None))
- i = 2
- else:
- items.append(self.com_node(node[1]))
- # i == 2 is a COLON
- i = 3
-
- if i < len(node) and node[i][0] == symbol.test:
- items.append(self.com_node(node[i]))
- i = i + 1
- else:
- items.append(Const(None))
-
- # a short_slice has been built. look for long_slice now by looking
- # for strides...
- for j in range(i, len(node)):
- ch = node[j]
- if len(ch) == 2:
- items.append(Const(None))
- else:
- items.append(self.com_node(ch[2]))
- return Sliceobj(items, lineno=extractLineNo(node))
-
- def com_slice(self, primary, node, assigning):
- # short_slice: [lower_bound] ":" [upper_bound]
- lower = upper = None
- if len(node) == 3:
- if node[1][0] == token.COLON:
- upper = self.com_node(node[2])
- else:
- lower = self.com_node(node[1])
- elif len(node) == 4:
- lower = self.com_node(node[1])
- upper = self.com_node(node[3])
- return Slice(primary, assigning, lower, upper,
- lineno=extractLineNo(node))
-
- def get_docstring(self, node, n=None):
- if n is None:
- n = node[0]
- node = node[1:]
- if n == symbol.suite:
- if len(node) == 1:
- return self.get_docstring(node[0])
- for sub in node:
- if sub[0] == symbol.stmt:
- return self.get_docstring(sub)
- return None
- if n == symbol.file_input:
- for sub in node:
- if sub[0] == symbol.stmt:
- return self.get_docstring(sub)
- return None
- if n == symbol.atom:
- if node[0][0] == token.STRING:
- s = ''
- for t in node:
- s = s + eval(t[1])
- return s
- return None
- if n == symbol.stmt or n == symbol.simple_stmt \
- or n == symbol.small_stmt:
- return self.get_docstring(node[0])
- if n in _doc_nodes and len(node) == 1:
- return self.get_docstring(node[0])
- return None
-
-
-_doc_nodes = [
- symbol.expr_stmt,
- symbol.testlist,
- symbol.testlist_safe,
- symbol.test,
- symbol.or_test,
- symbol.and_test,
- symbol.not_test,
- symbol.comparison,
- symbol.expr,
- symbol.xor_expr,
- symbol.and_expr,
- symbol.shift_expr,
- symbol.arith_expr,
- symbol.term,
- symbol.factor,
- symbol.power,
- ]
-
-# comp_op: '<' | '>' | '=' | '>=' | '<=' | '<>' | '!=' | '=='
-# | 'in' | 'not' 'in' | 'is' | 'is' 'not'
-_cmp_types = {
- token.LESS : '<',
- token.GREATER : '>',
- token.EQEQUAL : '==',
- token.EQUAL : '==',
- token.LESSEQUAL : '<=',
- token.GREATEREQUAL : '>=',
- token.NOTEQUAL : '!=',
- }
-
-_legal_node_types = [
- symbol.funcdef,
- symbol.classdef,
- symbol.stmt,
- symbol.small_stmt,
- symbol.flow_stmt,
- symbol.simple_stmt,
- symbol.compound_stmt,
- symbol.expr_stmt,
- symbol.print_stmt,
- symbol.del_stmt,
- symbol.pass_stmt,
- symbol.break_stmt,
- symbol.continue_stmt,
- symbol.return_stmt,
- symbol.raise_stmt,
- symbol.import_stmt,
- symbol.global_stmt,
- symbol.exec_stmt,
- symbol.assert_stmt,
- symbol.if_stmt,
- symbol.while_stmt,
- symbol.for_stmt,
- symbol.try_stmt,
- symbol.with_stmt,
- symbol.suite,
- symbol.testlist,
- symbol.testlist_safe,
- symbol.test,
- symbol.and_test,
- symbol.not_test,
- symbol.comparison,
- symbol.exprlist,
- symbol.expr,
- symbol.xor_expr,
- symbol.and_expr,
- symbol.shift_expr,
- symbol.arith_expr,
- symbol.term,
- symbol.factor,
- symbol.power,
- symbol.atom,
- ]
-
-if hasattr(symbol, 'yield_stmt'):
- _legal_node_types.append(symbol.yield_stmt)
-if hasattr(symbol, 'yield_expr'):
- _legal_node_types.append(symbol.yield_expr)
-
-_assign_types = [
- symbol.test,
- symbol.or_test,
- symbol.and_test,
- symbol.not_test,
- symbol.comparison,
- symbol.expr,
- symbol.xor_expr,
- symbol.and_expr,
- symbol.shift_expr,
- symbol.arith_expr,
- symbol.term,
- symbol.factor,
- ]
-
-_names = {}
-for k, v in symbol.sym_name.items():
- _names[k] = v
-for k, v in token.tok_name.items():
- _names[k] = v
-
-def debug_tree(tree):
- l = []
- for elt in tree:
- if isinstance(elt, int):
- l.append(_names.get(elt, elt))
- elif isinstance(elt, str):
- l.append(elt)
- else:
- l.append(debug_tree(elt))
- return l
diff --git a/sys/lib/python/compiler/visitor.py b/sys/lib/python/compiler/visitor.py
deleted file mode 100644
index 9e39d3664..000000000
--- a/sys/lib/python/compiler/visitor.py
+++ /dev/null
@@ -1,113 +0,0 @@
-from compiler import ast
-
-# XXX should probably rename ASTVisitor to ASTWalker
-# XXX can it be made even more generic?
-
-class ASTVisitor:
- """Performs a depth-first walk of the AST
-
- The ASTVisitor will walk the AST, performing either a preorder or
- postorder traversal depending on which method is called.
-
- methods:
- preorder(tree, visitor)
- postorder(tree, visitor)
- tree: an instance of ast.Node
- visitor: an instance with visitXXX methods
-
- The ASTVisitor is responsible for walking over the tree in the
- correct order. For each node, it checks the visitor argument for
- a method named 'visitNodeType' where NodeType is the name of the
- node's class, e.g. Class. If the method exists, it is called
- with the node as its sole argument.
-
- The visitor method for a particular node type can control how
- child nodes are visited during a preorder walk. (It can't control
- the order during a postorder walk, because it is called _after_
- the walk has occurred.) The ASTVisitor modifies the visitor
- argument by adding a visit method to the visitor; this method can
- be used to visit a child node of arbitrary type.
- """
-
- VERBOSE = 0
-
- def __init__(self):
- self.node = None
- self._cache = {}
-
- def default(self, node, *args):
- for child in node.getChildNodes():
- self.dispatch(child, *args)
-
- def dispatch(self, node, *args):
- self.node = node
- klass = node.__class__
- meth = self._cache.get(klass, None)
- if meth is None:
- className = klass.__name__
- meth = getattr(self.visitor, 'visit' + className, self.default)
- self._cache[klass] = meth
-## if self.VERBOSE > 0:
-## className = klass.__name__
-## if self.VERBOSE == 1:
-## if meth == 0:
-## print "dispatch", className
-## else:
-## print "dispatch", className, (meth and meth.__name__ or '')
- return meth(node, *args)
-
- def preorder(self, tree, visitor, *args):
- """Do preorder walk of tree using visitor"""
- self.visitor = visitor
- visitor.visit = self.dispatch
- self.dispatch(tree, *args) # XXX *args make sense?
-
-class ExampleASTVisitor(ASTVisitor):
- """Prints examples of the nodes that aren't visited
-
- This visitor-driver is only useful for development, when it's
- helpful to develop a visitor incrementally, and get feedback on what
- you still have to do.
- """
- examples = {}
-
- def dispatch(self, node, *args):
- self.node = node
- meth = self._cache.get(node.__class__, None)
- className = node.__class__.__name__
- if meth is None:
- meth = getattr(self.visitor, 'visit' + className, 0)
- self._cache[node.__class__] = meth
- if self.VERBOSE > 1:
- print "dispatch", className, (meth and meth.__name__ or '')
- if meth:
- meth(node, *args)
- elif self.VERBOSE > 0:
- klass = node.__class__
- if not self.examples.has_key(klass):
- self.examples[klass] = klass
- print
- print self.visitor
- print klass
- for attr in dir(node):
- if attr[0] != '_':
- print "\t", "%-12.12s" % attr, getattr(node, attr)
- print
- return self.default(node, *args)
-
-# XXX this is an API change
-
-_walker = ASTVisitor
-def walk(tree, visitor, walker=None, verbose=None):
- if walker is None:
- walker = _walker()
- if verbose is not None:
- walker.VERBOSE = verbose
- walker.preorder(tree, visitor)
- return walker.visitor
-
-def dumpNode(node):
- print node.__class__
- for attr in dir(node):
- if attr[0] != '_':
- print "\t", "%-10.10s" % attr, getattr(node, attr)
diff --git a/sys/lib/python/config/Makefile b/sys/lib/python/config/Makefile
deleted file mode 100644
index 2ee077d0d..000000000
--- a/sys/lib/python/config/Makefile
+++ /dev/null
@@ -1,1088 +0,0 @@
-# Top-level Makefile for Python
-#
-# As distributed, this file is called Makefile.pre.in; it is processed
-# into the real Makefile by running the script ./configure, which
-# replaces things like @spam@ with values appropriate for your system.
-# This means that if you edit Makefile, your changes get lost the next
-# time you run the configure script. Ideally, you can do:
-#
-# ./configure
-# make
-# make test
-# make install
-#
-# If you have a previous version of Python installed that you don't
-# want to overwrite, you can use "make altinstall" instead of "make
-# install". Refer to the "Installing" section in the README file for
-# additional details.
-#
-# See also the section "Build instructions" in the README file.
-
-# === Variables set by makesetup ===
-
-MODOBJS= _MODOBJS_
-MODLIBS= _MODLIBS_
-
-# === Variables set by configure
-VERSION= @VERSION@
-srcdir= @srcdir@
-VPATH= @srcdir@
-
-CC= @CC@
-CXX= @CXX@
-MAINCC= @MAINCC@
-LINKCC= @LINKCC@
-AR= @AR@
-RANLIB= @RANLIB@
-SVNVERSION= @SVNVERSION@
-
-# Shell used by make (some versions default to the login shell, which is bad)
-SHELL= /bin/sh
-
-# Use this to make a link between python$(VERSION) and python in $(BINDIR)
-LN= @LN@
-
-# Portable install script (configure doesn't always guess right)
-INSTALL= @INSTALL@
-INSTALL_PROGRAM=@INSTALL_PROGRAM@
-INSTALL_SCRIPT= @INSTALL_SCRIPT@
-INSTALL_DATA= @INSTALL_DATA@
-# Shared libraries must be installed with executable mode on some systems;
-# rather than figuring out exactly which, we always give them executable mode.
-# Also, making them read-only seems to be a good idea...
-INSTALL_SHARED= ${INSTALL} -m 555
-
-MAKESETUP= $(srcdir)/Modules/makesetup
-
-# Compiler options
-OPT= @OPT@
-BASECFLAGS= @BASECFLAGS@
-CFLAGS= $(BASECFLAGS) $(OPT) $(EXTRA_CFLAGS)
-# Both CPPFLAGS and LDFLAGS need to contain the shell's value for setup.py to
-# be able to build extension modules using the directories specified in the
-# environment variables
-CPPFLAGS= -I. -I$(srcdir)/Include @CPPFLAGS@
-LDFLAGS= @LDFLAGS@
-LDLAST= @LDLAST@
-SGI_ABI= @SGI_ABI@
-CCSHARED= @CCSHARED@
-LINKFORSHARED= @LINKFORSHARED@
-# Extra C flags added for building the interpreter object files.
-CFLAGSFORSHARED=@CFLAGSFORSHARED@
-# C flags used for building the interpreter object files
-PY_CFLAGS= $(CFLAGS) $(CPPFLAGS) $(CFLAGSFORSHARED) -DPy_BUILD_CORE
-
-
-# Machine-dependent subdirectories
-MACHDEP= @MACHDEP@
-
-# Install prefix for architecture-independent files
-prefix= @prefix@
-
-# Install prefix for architecture-dependent files
-exec_prefix= @exec_prefix@
-
-# Expanded directories
-BINDIR= $(exec_prefix)/bin
-LIBDIR= $(exec_prefix)/lib
-MANDIR= @mandir@
-INCLUDEDIR= @includedir@
-CONFINCLUDEDIR= $(exec_prefix)/include
-SCRIPTDIR= $(prefix)/lib
-
-# Detailed destination directories
-BINLIBDEST= $(LIBDIR)/python$(VERSION)
-LIBDEST= $(SCRIPTDIR)/python$(VERSION)
-INCLUDEPY= $(INCLUDEDIR)/python$(VERSION)
-CONFINCLUDEPY= $(CONFINCLUDEDIR)/python$(VERSION)
-LIBP= $(LIBDIR)/python$(VERSION)
-
-# Symbols used for using shared libraries
-SO= @SO@
-LDSHARED= @LDSHARED@
-BLDSHARED= @BLDSHARED@
-DESTSHARED= $(BINLIBDEST)/lib-dynload
-
-# Executable suffix (.exe on Windows and Mac OS X)
-EXE= @EXEEXT@
-BUILDEXE= @BUILDEXEEXT@
-
-# Short name and location for Mac OS X Python framework
-UNIVERSALSDK=@UNIVERSALSDK@
-PYTHONFRAMEWORK= @PYTHONFRAMEWORK@
-PYTHONFRAMEWORKDIR= @PYTHONFRAMEWORKDIR@
-PYTHONFRAMEWORKPREFIX= @PYTHONFRAMEWORKPREFIX@
-PYTHONFRAMEWORKINSTALLDIR= @PYTHONFRAMEWORKINSTALLDIR@
-# Deployment target selected during configure, to be checked
-# by distutils. The export statement is needed to ensure that the
-# deployment target is active during build.
-MACOSX_DEPLOYMENT_TARGET=@CONFIGURE_MACOSX_DEPLOYMENT_TARGET@
-@EXPORT_MACOSX_DEPLOYMENT_TARGET@export MACOSX_DEPLOYMENT_TARGET
-
-# Options to enable prebinding (for fast startup prior to Mac OS X 10.3)
-OTHER_LIBTOOL_OPT=@OTHER_LIBTOOL_OPT@
-
-# Environment to run shared python without installed libraries
-RUNSHARED= @RUNSHARED@
-
-# Modes for directories, executables and data files created by the
-# install process. Default to user-only-writable for all file types.
-DIRMODE= 755
-EXEMODE= 755
-FILEMODE= 644
-
-# configure script arguments
-CONFIG_ARGS= @CONFIG_ARGS@
-
-
-# Subdirectories with code
-SRCDIRS= @SRCDIRS@
-
-# Other subdirectories
-SUBDIRSTOO= Include Lib Misc Demo
-
-# Files and directories to be distributed
-CONFIGFILES= configure configure.in acconfig.h pyconfig.h.in Makefile.pre.in
-DISTFILES= README ChangeLog $(CONFIGFILES)
-DISTDIRS= $(SUBDIRS) $(SUBDIRSTOO) Ext-dummy
-DIST= $(DISTFILES) $(DISTDIRS)
-
-
-LIBRARY= @LIBRARY@
-LDLIBRARY= @LDLIBRARY@
-BLDLIBRARY= @BLDLIBRARY@
-DLLLIBRARY= @DLLLIBRARY@
-LDLIBRARYDIR= @LDLIBRARYDIR@
-INSTSONAME= @INSTSONAME@
-
-
-LIBS= @LIBS@
-LIBM= @LIBM@
-LIBC= @LIBC@
-SYSLIBS= $(LIBM) $(LIBC)
-SHLIBS= @SHLIBS@
-
-THREADOBJ= @THREADOBJ@
-DLINCLDIR= @DLINCLDIR@
-DYNLOADFILE= @DYNLOADFILE@
-MACHDEP_OBJS= @MACHDEP_OBJS@
-UNICODE_OBJS= @UNICODE_OBJS@
-
-PYTHON= python$(EXE)
-BUILDPYTHON= python$(BUILDEXE)
-
-# === Definitions added by makesetup ===
-
-
-##########################################################################
-# Modules
-MODULE_OBJS= \
- Modules/config.o \
- Modules/getpath.o \
- Modules/main.o \
- Modules/gcmodule.o
-
-# Used of signalmodule.o is not available
-SIGNAL_OBJS= @SIGNAL_OBJS@
-
-
-##########################################################################
-# Grammar
-GRAMMAR_H= $(srcdir)/Include/graminit.h
-GRAMMAR_C= $(srcdir)/Python/graminit.c
-GRAMMAR_INPUT= $(srcdir)/Grammar/Grammar
-
-
-##########################################################################
-# Parser
-PGEN= Parser/pgen$(EXE)
-
-POBJS= \
- Parser/acceler.o \
- Parser/grammar1.o \
- Parser/listnode.o \
- Parser/node.o \
- Parser/parser.o \
- Parser/parsetok.o \
- Parser/bitset.o \
- Parser/metagrammar.o \
- Parser/firstsets.o \
- Parser/grammar.o \
- Parser/pgen.o
-
-PARSER_OBJS= $(POBJS) Parser/myreadline.o Parser/tokenizer.o
-
-PGOBJS= \
- Objects/obmalloc.o \
- Python/mysnprintf.o \
- Parser/tokenizer_pgen.o \
- Parser/printgrammar.o \
- Parser/pgenmain.o
-
-PGENOBJS= $(PGENMAIN) $(POBJS) $(PGOBJS)
-
-##########################################################################
-# AST
-AST_H_DIR= $(srcdir)/Include
-AST_H= $(AST_H_DIR)/Python-ast.h
-AST_C_DIR= $(srcdir)/Python
-AST_C= $(AST_C_DIR)/Python-ast.c
-AST_ASDL= $(srcdir)/Parser/Python.asdl
-
-ASDLGEN_FILES= $(srcdir)/Parser/asdl.py $(srcdir)/Parser/asdl_c.py
-# XXX Note that a build now requires Python exist before the build starts
-ASDLGEN= $(srcdir)/Parser/asdl_c.py
-
-##########################################################################
-# Python
-PYTHON_OBJS= \
- Python/Python-ast.o \
- Python/asdl.o \
- Python/ast.o \
- Python/bltinmodule.o \
- Python/ceval.o \
- Python/compile.o \
- Python/codecs.o \
- Python/errors.o \
- Python/frozen.o \
- Python/frozenmain.o \
- Python/future.o \
- Python/getargs.o \
- Python/getcompiler.o \
- Python/getcopyright.o \
- Python/getmtime.o \
- Python/getplatform.o \
- Python/getversion.o \
- Python/graminit.o \
- Python/import.o \
- Python/importdl.o \
- Python/marshal.o \
- Python/modsupport.o \
- Python/mystrtoul.o \
- Python/mysnprintf.o \
- Python/pyarena.o \
- Python/pyfpe.o \
- Python/pystate.o \
- Python/pythonrun.o \
- Python/structmember.o \
- Python/symtable.o \
- Python/sysmodule.o \
- Python/traceback.o \
- Python/getopt.o \
- Python/pystrtod.o \
- Python/$(DYNLOADFILE) \
- $(MACHDEP_OBJS) \
- $(THREADOBJ)
-
-
-##########################################################################
-# Objects
-OBJECT_OBJS= \
- Objects/abstract.o \
- Objects/boolobject.o \
- Objects/bufferobject.o \
- Objects/cellobject.o \
- Objects/classobject.o \
- Objects/cobject.o \
- Objects/codeobject.o \
- Objects/complexobject.o \
- Objects/descrobject.o \
- Objects/enumobject.o \
- Objects/exceptions.o \
- Objects/genobject.o \
- Objects/fileobject.o \
- Objects/floatobject.o \
- Objects/frameobject.o \
- Objects/funcobject.o \
- Objects/intobject.o \
- Objects/iterobject.o \
- Objects/listobject.o \
- Objects/longobject.o \
- Objects/dictobject.o \
- Objects/methodobject.o \
- Objects/moduleobject.o \
- Objects/object.o \
- Objects/obmalloc.o \
- Objects/rangeobject.o \
- Objects/setobject.o \
- Objects/sliceobject.o \
- Objects/stringobject.o \
- Objects/structseq.o \
- Objects/tupleobject.o \
- Objects/typeobject.o \
- Objects/weakrefobject.o \
- $(UNICODE_OBJS)
-
-
-##########################################################################
-# objects that get linked into the Python library
-LIBRARY_OBJS= \
- Modules/_typesmodule.o \
- Modules/getbuildinfo.o \
- $(PARSER_OBJS) \
- $(OBJECT_OBJS) \
- $(PYTHON_OBJS) \
- $(MODULE_OBJS) \
- $(SIGNAL_OBJS) \
- $(MODOBJS)
-
-#########################################################################
-# Rules
-
-# Default target
-all: $(BUILDPYTHON) oldsharedmods sharedmods
-
-# Build the interpreter
-$(BUILDPYTHON): Modules/python.o $(LIBRARY) $(LDLIBRARY)
- $(LINKCC) $(LDFLAGS) $(LINKFORSHARED) -o $@ \
- Modules/python.o \
- $(BLDLIBRARY) $(LIBS) $(MODLIBS) $(SYSLIBS) $(LDLAST)
-
-platform: $(BUILDPYTHON)
- $(RUNSHARED) ./$(BUILDPYTHON) -E -c 'import sys ; from distutils.util import get_platform ; print get_platform()+"-"+sys.version[0:3]' >platform
-
-
-# Build the shared modules
-sharedmods: $(BUILDPYTHON)
- case $$MAKEFLAGS in \
- *-s*) $(RUNSHARED) CC='$(CC)' LDSHARED='$(BLDSHARED)' OPT='$(OPT)' ./$(BUILDPYTHON) -E $(srcdir)/setup.py -q build;; \
- *) $(RUNSHARED) CC='$(CC)' LDSHARED='$(BLDSHARED)' OPT='$(OPT)' ./$(BUILDPYTHON) -E $(srcdir)/setup.py build;; \
- esac
-
-# Build static library
-# avoid long command lines, same as LIBRARY_OBJS
-$(LIBRARY): $(LIBRARY_OBJS)
- -rm -f $@
- $(AR) cr $@ Modules/getbuildinfo.o
- $(AR) cr $@ Modules/_typesmodule.o
- $(AR) cr $@ $(PARSER_OBJS)
- $(AR) cr $@ $(OBJECT_OBJS)
- $(AR) cr $@ $(PYTHON_OBJS)
- $(AR) cr $@ $(MODULE_OBJS) $(SIGNAL_OBJS)
- $(AR) cr $@ $(MODOBJS)
- $(RANLIB) $@
-
-libpython$(VERSION).so: $(LIBRARY_OBJS)
- if test $(INSTSONAME) != $(LDLIBRARY); then \
- $(LDSHARED) -Wl,-h$(INSTSONAME) -o $(INSTSONAME) $(LIBRARY_OBJS) $(SHLIBS) $(LIBC) $(LIBM); \
- $(LN) -f $(INSTSONAME) $@; \
- else\
- $(LDSHARED) -o $@ $(LIBRARY_OBJS) $(SHLIBS) $(LIBC) $(LIBM); \
- fi
-
-libpython$(VERSION).sl: $(LIBRARY_OBJS)
- $(LDSHARED) -o $@ $(LIBRARY_OBJS) $(SHLIBS) $(LIBC) $(LIBM)
-
-# This rule is here for OPENSTEP/Rhapsody/MacOSX. It builds a temporary
-# minimal framework (not including the Lib directory and such) in the current
-# directory.
-RESSRCDIR=$(srcdir)/Mac/Resources/framework
-$(PYTHONFRAMEWORKDIR)/Versions/$(VERSION)/$(PYTHONFRAMEWORK): \
- $(LIBRARY) \
- $(RESSRCDIR)/Info.plist \
- $(RESSRCDIR)/version.plist \
- $(RESSRCDIR)/English.lproj/InfoPlist.strings
- $(INSTALL) -d -m $(DIRMODE) $(PYTHONFRAMEWORKDIR)/Versions/$(VERSION)
- if test "${UNIVERSALSDK}"; then \
- $(CC) -o $(LDLIBRARY) -arch i386 -arch ppc -dynamiclib \
- -isysroot "${UNIVERSALSDK}" \
- -all_load $(LIBRARY) -Wl,-single_module \
- -install_name $(DESTDIR)$(PYTHONFRAMEWORKINSTALLDIR)/Versions/$(VERSION)/Python \
- -compatibility_version $(VERSION) \
- -current_version $(VERSION); \
- else \
- libtool -o $(LDLIBRARY) -dynamic $(OTHER_LIBTOOL_OPT) $(LIBRARY) \
- @LIBTOOL_CRUFT@ ;\
- fi
- $(INSTALL) -d -m $(DIRMODE) \
- $(PYTHONFRAMEWORKDIR)/Versions/$(VERSION)/Resources/English.lproj
- $(INSTALL_DATA) $(RESSRCDIR)/Info.plist \
- $(PYTHONFRAMEWORKDIR)/Versions/$(VERSION)/Resources/Info.plist
- $(INSTALL_DATA) $(RESSRCDIR)/version.plist \
- $(PYTHONFRAMEWORKDIR)/Versions/$(VERSION)/Resources/version.plist
- $(INSTALL_DATA) $(RESSRCDIR)/English.lproj/InfoPlist.strings \
- $(PYTHONFRAMEWORKDIR)/Versions/$(VERSION)/Resources/English.lproj/InfoPlist.strings
- $(LN) -fsn $(VERSION) $(PYTHONFRAMEWORKDIR)/Versions/Current
- $(LN) -fsn Versions/Current/$(PYTHONFRAMEWORK) $(PYTHONFRAMEWORKDIR)/$(PYTHONFRAMEWORK)
- $(LN) -fsn Versions/Current/Headers $(PYTHONFRAMEWORKDIR)/Headers
- $(LN) -fsn Versions/Current/Resources $(PYTHONFRAMEWORKDIR)/Resources
-
-# This rule builds the Cygwin Python DLL and import library if configured
-# for a shared core library; otherwise, this rule is a noop.
-$(DLLLIBRARY) libpython$(VERSION).dll.a: $(LIBRARY_OBJS)
- if test -n "$(DLLLIBRARY)"; then \
- $(LDSHARED) -Wl,--out-implib=$@ -o $(DLLLIBRARY) $^ \
- $(LIBS) $(MODLIBS) $(SYSLIBS); \
- else true; \
- fi
-
-
-oldsharedmods: $(SHAREDMODS)
-
-
-Makefile Modules/config.c: Makefile.pre \
- $(srcdir)/Modules/config.c.in \
- $(MAKESETUP) \
- Modules/Setup.config \
- Modules/Setup \
- Modules/Setup.local
- $(SHELL) $(MAKESETUP) -c $(srcdir)/Modules/config.c.in \
- -s Modules \
- Modules/Setup.config \
- Modules/Setup.local \
- Modules/Setup
- @mv config.c Modules
- @echo "The Makefile was updated, you may need to re-run make."
-
-
-Modules/Setup: $(srcdir)/Modules/Setup.dist
- @if test -f Modules/Setup; then \
- echo "-----------------------------------------------"; \
- echo "Modules/Setup.dist is newer than Modules/Setup;"; \
- echo "check to make sure you have all the updates you"; \
- echo "need in your Modules/Setup file."; \
- echo "Usually, copying Setup.dist to Setup will work."; \
- echo "-----------------------------------------------"; \
- fi
-
-############################################################################
-# Special rules for object files
-
-Modules/getbuildinfo.o: $(PARSER_OBJS) \
- $(OBJECT_OBJS) \
- $(PYTHON_OBJS) \
- $(MODULE_OBJS) \
- $(SIGNAL_OBJS) \
- $(MODOBJS) \
- $(srcdir)/Modules/getbuildinfo.c
- $(CC) -c $(PY_CFLAGS) -DSVNVERSION=\"`LC_ALL=C $(SVNVERSION)`\" -o $@ $(srcdir)/Modules/getbuildinfo.c
-
-Modules/getpath.o: $(srcdir)/Modules/getpath.c Makefile
- $(CC) -c $(PY_CFLAGS) -DPYTHONPATH='"$(PYTHONPATH)"' \
- -DPREFIX='"$(prefix)"' \
- -DEXEC_PREFIX='"$(exec_prefix)"' \
- -DVERSION='"$(VERSION)"' \
- -DVPATH='"$(VPATH)"' \
- -o $@ $(srcdir)/Modules/getpath.c
-
-Modules/python.o: $(srcdir)/Modules/python.c
- $(MAINCC) -c $(PY_CFLAGS) -o $@ $(srcdir)/Modules/python.c
-
-
-$(GRAMMAR_H) $(GRAMMAR_C): $(PGEN) $(GRAMMAR_INPUT)
- -$(PGEN) $(GRAMMAR_INPUT) $(GRAMMAR_H) $(GRAMMAR_C)
-
-$(PGEN): $(PGENOBJS)
- $(CC) $(OPT) $(LDFLAGS) $(PGENOBJS) $(LIBS) -o $(PGEN)
-
-Parser/grammar.o: $(srcdir)/Parser/grammar.c \
- $(srcdir)/Include/token.h \
- $(srcdir)/Include/grammar.h
-Parser/metagrammar.o: $(srcdir)/Parser/metagrammar.c
-
-Parser/tokenizer_pgen.o: $(srcdir)/Parser/tokenizer.c
-
-$(AST_H): $(AST_ASDL) $(ASDLGEN_FILES)
- $(ASDLGEN) -h $(AST_H_DIR) $(AST_ASDL)
-
-$(AST_C): $(AST_ASDL) $(ASDLGEN_FILES)
- $(ASDLGEN) -c $(AST_C_DIR) $(AST_ASDL)
-
-Python/compile.o Python/symtable.o: $(GRAMMAR_H) $(AST_H)
-
-Python/getplatform.o: $(srcdir)/Python/getplatform.c
- $(CC) -c $(PY_CFLAGS) -DPLATFORM='"$(MACHDEP)"' -o $@ $(srcdir)/Python/getplatform.c
-
-Python/importdl.o: $(srcdir)/Python/importdl.c
- $(CC) -c $(PY_CFLAGS) -I$(DLINCLDIR) -o $@ $(srcdir)/Python/importdl.c
-
-Objects/unicodectype.o: $(srcdir)/Objects/unicodectype.c \
- $(srcdir)/Objects/unicodetype_db.h
-
-############################################################################
-# Header files
-
-PYTHON_HEADERS= \
- Include/Python.h \
- Include/Python-ast.h \
- Include/asdl.h \
- Include/abstract.h \
- Include/boolobject.h \
- Include/bufferobject.h \
- Include/ceval.h \
- Include/classobject.h \
- Include/cobject.h \
- Include/code.h \
- Include/codecs.h \
- Include/compile.h \
- Include/complexobject.h \
- Include/descrobject.h \
- Include/dictobject.h \
- Include/enumobject.h \
- Include/genobject.h \
- Include/fileobject.h \
- Include/floatobject.h \
- Include/funcobject.h \
- Include/import.h \
- Include/intobject.h \
- Include/intrcheck.h \
- Include/iterobject.h \
- Include/listobject.h \
- Include/longobject.h \
- Include/methodobject.h \
- Include/modsupport.h \
- Include/moduleobject.h \
- Include/object.h \
- Include/objimpl.h \
- Include/patchlevel.h \
- Include/pyarena.h \
- Include/pydebug.h \
- Include/pyerrors.h \
- Include/pyfpe.h \
- Include/pymem.h \
- Include/pyport.h \
- Include/pystate.h \
- Include/pythonrun.h \
- Include/rangeobject.h \
- Include/setobject.h \
- Include/sliceobject.h \
- Include/stringobject.h \
- Include/structseq.h \
- Include/structmember.h \
- Include/symtable.h \
- Include/sysmodule.h \
- Include/traceback.h \
- Include/tupleobject.h \
- Include/unicodeobject.h \
- Include/weakrefobject.h \
- pyconfig.h
-
-$(LIBRARY_OBJS) $(MODOBJS) Modules/python.o: $(PYTHON_HEADERS)
-
-
-######################################################################
-
-# Test the interpreter (twice, once without .pyc files, once with)
-# In the past, we've had problems where bugs in the marshalling or
-# elsewhere caused bytecode read from .pyc files to behave differently
-# than bytecode generated directly from a .py source file. Sometimes
-# the bytecode read from a .pyc file had the bug, somtimes the directly
-# generated bytecode. This is sometimes a very shy bug needing a lot of
-# sample data.
-
-TESTOPTS= -l $(EXTRATESTOPTS)
-TESTPROG= $(srcdir)/Lib/test/regrtest.py
-TESTPYTHON= $(RUNSHARED) ./$(BUILDPYTHON) -E -tt
-test: all platform
- -find $(srcdir)/Lib -name '*.py[co]' -print | xargs rm -f
- -$(TESTPYTHON) $(TESTPROG) $(TESTOPTS)
- $(TESTPYTHON) $(TESTPROG) $(TESTOPTS)
-
-testall: all platform
- -find $(srcdir)/Lib -name '*.py[co]' -print | xargs rm -f
- -$(TESTPYTHON) $(TESTPROG) $(TESTOPTS) -uall
- $(TESTPYTHON) $(TESTPROG) $(TESTOPTS) -uall
-
-# Run the unitests for both architectures in a Universal build on OSX
-# Must be run on an Intel box.
-testuniversal: all platform
- if [ `arch` != 'i386' ];then \
- echo "This can only be used on OSX/i386" ;\
- exit 1 ;\
- fi
- -find $(srcdir)/Lib -name '*.py[co]' -print | xargs rm -f
- -$(TESTPYTHON) $(TESTPROG) $(TESTOPTS) -uall
- $(TESTPYTHON) $(TESTPROG) $(TESTOPTS) -uall
- $(RUNSHARED) /usr/libexec/oah/translate ./$(BUILDPYTHON) -E -tt $(TESTPROG) $(TESTOPTS) -uall
-
-
-# Like testall, but with a single pass only
-buildbottest: all platform
- $(TESTPYTHON) $(TESTPROG) $(TESTOPTS) -uall -rw
-
-QUICKTESTOPTS= $(TESTOPTS) -x test_thread test_signal test_strftime \
- test_unicodedata test_re test_sre test_select test_poll \
- test_linuxaudiodev test_struct test_sunaudiodev test_zlib
-quicktest: all platform
- -find $(srcdir)/Lib -name '*.py[co]' -print | xargs rm -f
- -$(TESTPYTHON) $(TESTPROG) $(QUICKTESTOPTS)
- $(TESTPYTHON) $(TESTPROG) $(QUICKTESTOPTS)
-
-MEMTESTOPTS= $(QUICKTESTOPTS) -x test_dl test___all__ test_fork1 \
- test_longexp
-memtest: all platform
- -rm -f $(srcdir)/Lib/test/*.py[co]
- -$(TESTPYTHON) $(TESTPROG) $(MEMTESTOPTS)
- $(TESTPYTHON) $(TESTPROG) $(MEMTESTOPTS)
-
-# Install everything
-install: @FRAMEWORKINSTALLFIRST@ altinstall bininstall maninstall @FRAMEWORKINSTALLLAST@
-
-# Install almost everything without disturbing previous versions
-altinstall: @FRAMEWORKALTINSTALLFIRST@ altbininstall libinstall inclinstall libainstall \
- sharedinstall oldsharedinstall @FRAMEWORKALTINSTALLLAST@
-
-# Install shared libraries enabled by Setup
-DESTDIRS= $(exec_prefix) $(LIBDIR) $(BINLIBDEST) $(DESTSHARED)
-
-oldsharedinstall: $(DESTSHARED) $(SHAREDMODS)
- @for i in X $(SHAREDMODS); do \
- if test $$i != X; then \
- echo $(INSTALL_SHARED) $$i $(DESTSHARED)/`basename $$i`; \
- $(INSTALL_SHARED) $$i $(DESTDIR)$(DESTSHARED)/`basename $$i`; \
- fi; \
- done
-
-$(DESTSHARED):
- @for i in $(DESTDIRS); \
- do \
- if test ! -d $(DESTDIR)$$i; then \
- echo "Creating directory $$i"; \
- $(INSTALL) -d -m $(DIRMODE) $(DESTDIR)$$i; \
- else true; \
- fi; \
- done
-
-
-# Install the interpreter (by creating a hard link to python$(VERSION))
-bininstall: altbininstall
- -if test -f $(DESTDIR)$(BINDIR)/$(PYTHON) -o -h $(DESTDIR)$(BINDIR)/$(PYTHON); \
- then rm -f $(DESTDIR)$(BINDIR)/$(PYTHON); \
- else true; \
- fi
- (cd $(DESTDIR)$(BINDIR); $(LN) python$(VERSION)$(EXE) $(PYTHON))
- (cd $(DESTDIR)$(BINDIR); $(LN) -sf python$(VERSION)-config python-config)
-
-# Install the interpreter with $(VERSION) affixed
-# This goes into $(exec_prefix)
-altbininstall: $(BUILDPYTHON)
- @for i in $(BINDIR) $(LIBDIR); \
- do \
- if test ! -d $(DESTDIR)$$i; then \
- echo "Creating directory $$i"; \
- $(INSTALL) -d -m $(DIRMODE) $(DESTDIR)$$i; \
- else true; \
- fi; \
- done
- $(INSTALL_PROGRAM) $(BUILDPYTHON) $(DESTDIR)$(BINDIR)/python$(VERSION)$(EXE)
- if test -f libpython$(VERSION)$(SO); then \
- if test "$(SO)" = .dll; then \
- $(INSTALL_SHARED) libpython$(VERSION)$(SO) $(DESTDIR)$(BINDIR); \
- else \
- $(INSTALL_SHARED) libpython$(VERSION)$(SO) $(DESTDIR)$(LIBDIR)/$(INSTSONAME); \
- if test libpython$(VERSION)$(SO) != $(INSTSONAME); then \
- (cd $(DESTDIR)$(LIBDIR); $(LN) -sf $(INSTSONAME) libpython$(VERSION)$(SO)); \
- fi \
- fi; \
- else true; \
- fi
-
-# Install the manual page
-maninstall:
- @for i in $(MANDIR) $(MANDIR)/man1; \
- do \
- if test ! -d $(DESTDIR)$$i; then \
- echo "Creating directory $$i"; \
- $(INSTALL) -d -m $(DIRMODE) $(DESTDIR)$$i; \
- else true; \
- fi; \
- done
- $(INSTALL_DATA) $(srcdir)/Misc/python.man \
- $(DESTDIR)$(MANDIR)/man1/python.1
-
-# Install the library
-PLATDIR= plat-$(MACHDEP)
-EXTRAPLATDIR= @EXTRAPLATDIR@
-EXTRAMACHDEPPATH=@EXTRAMACHDEPPATH@
-MACHDEPS= $(PLATDIR) $(EXTRAPLATDIR)
-XMLLIBSUBDIRS= xml xml/dom xml/etree xml/parsers xml/sax
-PLATMACDIRS= plat-mac plat-mac/Carbon plat-mac/lib-scriptpackages \
- plat-mac/lib-scriptpackages/_builtinSuites \
- plat-mac/lib-scriptpackages/CodeWarrior \
- plat-mac/lib-scriptpackages/Explorer \
- plat-mac/lib-scriptpackages/Finder \
- plat-mac/lib-scriptpackages/Netscape \
- plat-mac/lib-scriptpackages/StdSuites \
- plat-mac/lib-scriptpackages/SystemEvents \
- plat-mac/lib-scriptpackages/Terminal
-PLATMACPATH=:plat-mac:plat-mac/lib-scriptpackages
-LIBSUBDIRS= lib-tk site-packages test test/output test/data \
- test/decimaltestdata \
- encodings compiler hotshot \
- email email/mime email/test email/test/data \
- sqlite3 sqlite3/test \
- logging bsddb bsddb/test csv wsgiref \
- ctypes ctypes/test ctypes/macholib idlelib idlelib/Icons \
- distutils distutils/command distutils/tests $(XMLLIBSUBDIRS) \
- setuptools setuptools/command setuptools/tests setuptools.egg-info \
- curses $(MACHDEPS)
-libinstall: $(BUILDPYTHON) $(srcdir)/Lib/$(PLATDIR)
- @for i in $(SCRIPTDIR) $(LIBDEST); \
- do \
- if test ! -d $(DESTDIR)$$i; then \
- echo "Creating directory $$i"; \
- $(INSTALL) -d -m $(DIRMODE) $(DESTDIR)$$i; \
- else true; \
- fi; \
- done
- @for d in $(LIBSUBDIRS); \
- do \
- a=$(srcdir)/Lib/$$d; \
- if test ! -d $$a; then continue; else true; fi; \
- b=$(LIBDEST)/$$d; \
- if test ! -d $(DESTDIR)$$b; then \
- echo "Creating directory $$b"; \
- $(INSTALL) -d -m $(DIRMODE) $(DESTDIR)$$b; \
- else true; \
- fi; \
- done
- @for i in $(srcdir)/Lib/*.py $(srcdir)/Lib/*.doc $(srcdir)/Lib/*.egg-info ; \
- do \
- if test -x $$i; then \
- $(INSTALL_SCRIPT) $$i $(DESTDIR)$(LIBDEST); \
- echo $(INSTALL_SCRIPT) $$i $(LIBDEST); \
- else \
- $(INSTALL_DATA) $$i $(DESTDIR)$(LIBDEST); \
- echo $(INSTALL_DATA) $$i $(LIBDEST); \
- fi; \
- done
- @for d in $(LIBSUBDIRS); \
- do \
- a=$(srcdir)/Lib/$$d; \
- if test ! -d $$a; then continue; else true; fi; \
- if test `ls $$a | wc -l` -lt 1; then continue; fi; \
- b=$(LIBDEST)/$$d; \
- for i in $$a/*; \
- do \
- case $$i in \
- *CVS) ;; \
- *.py[co]) ;; \
- *.orig) ;; \
- *~) ;; \
- *) \
- if test -d $$i; then continue; fi; \
- if test -x $$i; then \
- echo $(INSTALL_SCRIPT) $$i $$b; \
- $(INSTALL_SCRIPT) $$i $(DESTDIR)$$b; \
- else \
- echo $(INSTALL_DATA) $$i $$b; \
- $(INSTALL_DATA) $$i $(DESTDIR)$$b; \
- fi;; \
- esac; \
- done; \
- done
- $(INSTALL_DATA) $(srcdir)/LICENSE $(DESTDIR)$(LIBDEST)/LICENSE.txt
- PYTHONPATH=$(DESTDIR)$(LIBDEST) $(RUNSHARED) \
- ./$(BUILDPYTHON) -Wi -tt $(DESTDIR)$(LIBDEST)/compileall.py \
- -d $(LIBDEST) -f \
- -x 'bad_coding|badsyntax|site-packages' $(DESTDIR)$(LIBDEST)
- PYTHONPATH=$(DESTDIR)$(LIBDEST) $(RUNSHARED) \
- ./$(BUILDPYTHON) -Wi -tt -O $(DESTDIR)$(LIBDEST)/compileall.py \
- -d $(LIBDEST) -f \
- -x 'bad_coding|badsyntax|site-packages' $(DESTDIR)$(LIBDEST)
- -PYTHONPATH=$(DESTDIR)$(LIBDEST) $(RUNSHARED) \
- ./$(BUILDPYTHON) -Wi -t $(DESTDIR)$(LIBDEST)/compileall.py \
- -d $(LIBDEST)/site-packages -f \
- -x badsyntax $(DESTDIR)$(LIBDEST)/site-packages
- -PYTHONPATH=$(DESTDIR)$(LIBDEST) $(RUNSHARED) \
- ./$(BUILDPYTHON) -Wi -t -O $(DESTDIR)$(LIBDEST)/compileall.py \
- -d $(LIBDEST)/site-packages -f \
- -x badsyntax $(DESTDIR)$(LIBDEST)/site-packages
-
-# Create the PLATDIR source directory, if one wasn't distributed..
-$(srcdir)/Lib/$(PLATDIR):
- mkdir $(srcdir)/Lib/$(PLATDIR)
- cp $(srcdir)/Lib/plat-generic/regen $(srcdir)/Lib/$(PLATDIR)/regen
- export PATH; PATH="`pwd`:$$PATH"; \
- export PYTHONPATH; PYTHONPATH="`pwd`/Lib"; \
- export DYLD_FRAMEWORK_PATH; DYLD_FRAMEWORK_PATH="`pwd`"; \
- export EXE; EXE="$(BUILDEXE)"; \
- cd $(srcdir)/Lib/$(PLATDIR); ./regen
-
-# Install the include files
-INCLDIRSTOMAKE=$(INCLUDEDIR) $(CONFINCLUDEDIR) $(INCLUDEPY) $(CONFINCLUDEPY)
-inclinstall:
- @for i in $(INCLDIRSTOMAKE); \
- do \
- if test ! -d $(DESTDIR)$$i; then \
- echo "Creating directory $$i"; \
- $(INSTALL) -d -m $(DIRMODE) $(DESTDIR)$$i; \
- else true; \
- fi; \
- done
- @for i in $(srcdir)/Include/*.h; \
- do \
- echo $(INSTALL_DATA) $$i $(INCLUDEPY); \
- $(INSTALL_DATA) $$i $(DESTDIR)$(INCLUDEPY); \
- done
- $(INSTALL_DATA) pyconfig.h $(DESTDIR)$(CONFINCLUDEPY)/pyconfig.h
-
-# Install the library and miscellaneous stuff needed for extending/embedding
-# This goes into $(exec_prefix)
-LIBPL= $(LIBP)/config
-libainstall: all
- @for i in $(LIBDIR) $(LIBP) $(LIBPL); \
- do \
- if test ! -d $(DESTDIR)$$i; then \
- echo "Creating directory $$i"; \
- $(INSTALL) -d -m $(DIRMODE) $(DESTDIR)$$i; \
- else true; \
- fi; \
- done
- @if test -d $(LIBRARY); then :; else \
- if test "$(PYTHONFRAMEWORKDIR)" = no-framework; then \
- if test "$(SO)" = .dll; then \
- $(INSTALL_DATA) $(LDLIBRARY) $(DESTDIR)$(LIBPL) ; \
- else \
- $(INSTALL_DATA) $(LIBRARY) $(DESTDIR)$(LIBPL)/$(LIBRARY) ; \
- $(RANLIB) $(DESTDIR)$(LIBPL)/$(LIBRARY) ; \
- fi; \
- else \
- echo Skip install of $(LIBRARY) - use make frameworkinstall; \
- fi; \
- fi
- $(INSTALL_DATA) Modules/config.c $(DESTDIR)$(LIBPL)/config.c
- $(INSTALL_DATA) Modules/python.o $(DESTDIR)$(LIBPL)/python.o
- $(INSTALL_DATA) $(srcdir)/Modules/config.c.in $(DESTDIR)$(LIBPL)/config.c.in
- $(INSTALL_DATA) Makefile $(DESTDIR)$(LIBPL)/Makefile
- $(INSTALL_DATA) Modules/Setup $(DESTDIR)$(LIBPL)/Setup
- $(INSTALL_DATA) Modules/Setup.local $(DESTDIR)$(LIBPL)/Setup.local
- $(INSTALL_DATA) Modules/Setup.config $(DESTDIR)$(LIBPL)/Setup.config
- $(INSTALL_SCRIPT) $(srcdir)/Modules/makesetup $(DESTDIR)$(LIBPL)/makesetup
- $(INSTALL_SCRIPT) $(srcdir)/install-sh $(DESTDIR)$(LIBPL)/install-sh
- # Substitution happens here, as the completely-expanded BINDIR
- # is not available in configure
- sed -e "s,@EXENAME@,$(BINDIR)/python$(VERSION)$(EXE)," < $(srcdir)/Misc/python-config.in >python-config
- $(INSTALL_SCRIPT) python-config $(DESTDIR)$(BINDIR)/python$(VERSION)-config
- rm python-config
- @if [ -s Modules/python.exp -a \
- "`echo $(MACHDEP) | sed 's/^\(...\).*/\1/'`" = "aix" ]; then \
- echo; echo "Installing support files for building shared extension modules on AIX:"; \
- $(INSTALL_DATA) Modules/python.exp \
- $(DESTDIR)$(LIBPL)/python.exp; \
- echo; echo "$(LIBPL)/python.exp"; \
- $(INSTALL_SCRIPT) $(srcdir)/Modules/makexp_aix \
- $(DESTDIR)$(LIBPL)/makexp_aix; \
- echo "$(LIBPL)/makexp_aix"; \
- $(INSTALL_SCRIPT) $(srcdir)/Modules/ld_so_aix \
- $(DESTDIR)$(LIBPL)/ld_so_aix; \
- echo "$(LIBPL)/ld_so_aix"; \
- echo; echo "See Misc/AIX-NOTES for details."; \
- else true; \
- fi
- @case "$(MACHDEP)" in beos*) \
- echo; echo "Installing support files for building shared extension modules on BeOS:"; \
- $(INSTALL_DATA) Misc/BeOS-NOTES $(DESTDIR)$(LIBPL)/README; \
- echo; echo "$(LIBPL)/README"; \
- $(INSTALL_SCRIPT) Modules/ar_beos $(DESTDIR)$(LIBPL)/ar_beos; \
- echo "$(LIBPL)/ar_beos"; \
- $(INSTALL_SCRIPT) Modules/ld_so_beos $(DESTDIR)$(LIBPL)/ld_so_beos; \
- echo "$(LIBPL)/ld_so_beos"; \
- echo; echo "See Misc/BeOS-NOTES for details."; \
- ;; \
- esac
-
-# Install the dynamically loadable modules
-# This goes into $(exec_prefix)
-sharedinstall:
- $(RUNSHARED) ./$(BUILDPYTHON) -E $(srcdir)/setup.py install \
- --prefix=$(prefix) \
- --install-scripts=$(BINDIR) \
- --install-platlib=$(DESTSHARED) \
- --root=/$(DESTDIR)
-
-# Here are a couple of targets for MacOSX again, to install a full
-# framework-based Python. frameworkinstall installs everything, the
-# subtargets install specific parts. Much of the actual work is offloaded to
-# the Makefile in Mac
-#
-#
-# This target is here for backward compatiblity, previous versions of Python
-# hadn't integrated framework installation in the normal install process.
-frameworkinstall: install
-
-# On install, we re-make the framework
-# structure in the install location, /Library/Frameworks/ or the argument to
-# --enable-framework. If --enable-framework has been specified then we have
-# automatically set prefix to the location deep down in the framework, so we
-# only have to cater for the structural bits of the framework.
-
-frameworkinstallframework: frameworkinstallstructure install frameworkinstallmaclib
-
-frameworkinstallstructure: $(LDLIBRARY)
- @if test "$(PYTHONFRAMEWORKDIR)" = no-framework; then \
- echo Not configured with --enable-framework; \
- exit 1; \
- else true; \
- fi
- @for i in $(prefix)/Resources/English.lproj $(prefix)/lib; do\
- if test ! -d $(DESTDIR)$$i; then \
- echo "Creating directory $(DESTDIR)$$i"; \
- $(INSTALL) -d -m $(DIRMODE) $(DESTDIR)$$i; \
- else true; \
- fi; \
- done
- $(LN) -fsn include/python$(VERSION) $(DESTDIR)$(prefix)/Headers
- $(INSTALL_DATA) $(RESSRCDIR)/Info.plist $(DESTDIR)$(prefix)/Resources/Info.plist
- $(INSTALL_DATA) $(RESSRCDIR)/version.plist $(DESTDIR)$(prefix)/Resources/version.plist
- $(INSTALL_DATA) $(RESSRCDIR)/English.lproj/InfoPlist.strings \
- $(DESTDIR)$(prefix)/Resources/English.lproj/InfoPlist.strings
- $(LN) -fsn $(VERSION) $(DESTDIR)$(PYTHONFRAMEWORKINSTALLDIR)/Versions/Current
- $(LN) -fsn Versions/Current/Python $(DESTDIR)$(PYTHONFRAMEWORKINSTALLDIR)/Python
- $(LN) -fsn Versions/Current/Headers $(DESTDIR)$(PYTHONFRAMEWORKINSTALLDIR)/Headers
- $(LN) -fsn Versions/Current/Resources $(DESTDIR)$(PYTHONFRAMEWORKINSTALLDIR)/Resources
- $(INSTALL_SHARED) $(LDLIBRARY) $(DESTDIR)$(PYTHONFRAMEWORKPREFIX)/$(LDLIBRARY)
-
-# This installs Mac/Lib into the framework
-# Install a number of symlinks to keep software that expects a normal unix
-# install (which includes python-config) happy.
-frameworkinstallmaclib:
- ln -fs "../../../Python" "$(DESTDIR)$(prefix)/lib/python$(VERSION)/config/libpython$(VERSION).a"
- cd Mac && $(MAKE) installmacsubtree DESTDIR="$(DESTDIR)"
-
-# This installs the IDE, the Launcher and other apps into /Applications
-frameworkinstallapps:
- cd Mac && $(MAKE) installapps DESTDIR="$(DESTDIR)"
-
-# This install the unix python and pythonw tools in /usr/local/bin
-frameworkinstallunixtools:
- cd Mac && $(MAKE) installunixtools DESTDIR="$(DESTDIR)"
-
-frameworkaltinstallunixtools:
- cd Mac && $(MAKE) altinstallunixtools DESTDIR="$(DESTDIR)"
-
-# This installs the Demos and Tools into the applications directory.
-# It is not part of a normal frameworkinstall
-frameworkinstallextras:
- cd Mac && Make installextras DESTDIR="$(DESTDIR)"
-
-# This installs a few of the useful scripts in Tools/scripts
-scriptsinstall:
- SRCDIR=$(srcdir) $(RUNSHARED) \
- ./$(BUILDPYTHON) $(srcdir)/Tools/scripts/setup.py install \
- --prefix=$(prefix) \
- --install-scripts=$(BINDIR) \
- --root=/$(DESTDIR)
-
-# Build the toplevel Makefile
-Makefile.pre: Makefile.pre.in config.status
- CONFIG_FILES=Makefile.pre CONFIG_HEADERS= $(SHELL) config.status
- $(MAKE) -f Makefile.pre Makefile
-
-# Run the configure script.
-config.status: $(srcdir)/configure
- $(SHELL) $(srcdir)/configure $(CONFIG_ARGS)
-
-.PRECIOUS: config.status $(BUILDPYTHON) Makefile Makefile.pre
-
-# Some make's put the object file in the current directory
-.c.o:
- $(CC) -c $(PY_CFLAGS) -o $@ $<
-
-# Run reindent on the library
-reindent:
- ./python$(EXEEXT) $(srcdir)/Tools/scripts/reindent.py -r $(srcdir)/Lib
-
-# Rerun configure with the same options as it was run last time,
-# provided the config.status script exists
-recheck:
- $(SHELL) config.status --recheck
- $(SHELL) config.status
-
-# Rebuild the configure script from configure.in; also rebuild pyconfig.h.in
-autoconf:
- (cd $(srcdir); autoconf)
- (cd $(srcdir); autoheader)
-
-# Create a tags file for vi
-tags::
- cd $(srcdir); \
- ctags -w -t Include/*.h; \
- for i in $(SRCDIRS); do ctags -w -t -a $$i/*.[ch]; \
- done; \
- sort -o tags tags
-
-# Create a tags file for GNU Emacs
-TAGS::
- cd $(srcdir); \
- etags Include/*.h; \
- for i in $(SRCDIRS); do etags -a $$i/*.[ch]; done
-
-# Sanitation targets -- clean leaves libraries, executables and tags
-# files, which clobber removes those as well
-pycremoval:
- find $(srcdir) -name '*.py[co]' -exec rm -f {} ';'
-
-clean: pycremoval
- find . -name '*.o' -exec rm -f {} ';'
- find . -name '*.s[ol]' -exec rm -f {} ';'
- find $(srcdir)/build -name 'fficonfig.h' -exec rm -f {} ';' || true
- find $(srcdir)/build -name 'fficonfig.py' -exec rm -f {} ';' || true
-
-clobber: clean
- -rm -f $(BUILDPYTHON) $(PGEN) $(LIBRARY) $(LDLIBRARY) $(DLLLIBRARY) \
- tags TAGS \
- config.cache config.log pyconfig.h Modules/config.c
- -rm -rf build platform
- -rm -rf $(PYTHONFRAMEWORKDIR)
-
-# Make things extra clean, before making a distribution:
-# remove all generated files, even Makefile[.pre]
-# Keep configure and Python-ast.[ch], it's possible they can't be generated
-distclean: clobber
- -rm -f core Makefile Makefile.pre config.status \
- Modules/Setup Modules/Setup.local Modules/Setup.config
- find $(srcdir) '(' -name '*.fdc' -o -name '*~' \
- -o -name '[@,#]*' -o -name '*.old' \
- -o -name '*.orig' -o -name '*.rej' \
- -o -name '*.bak' ')' \
- -exec rm -f {} ';'
-
-# Check for smelly exported symbols (not starting with Py/_Py)
-smelly: all
- nm -p $(LIBRARY) | \
- sed -n "/ [TDB] /s/.* //p" | grep -v "^_*Py" | sort -u; \
-
-# Find files with funny names
-funny:
- find $(DISTDIRS) -type d \
- -o -name '*.[chs]' \
- -o -name '*.py' \
- -o -name '*.doc' \
- -o -name '*.sty' \
- -o -name '*.bib' \
- -o -name '*.dat' \
- -o -name '*.el' \
- -o -name '*.fd' \
- -o -name '*.in' \
- -o -name '*.tex' \
- -o -name '*,[vpt]' \
- -o -name 'Setup' \
- -o -name 'Setup.*' \
- -o -name README \
- -o -name Makefile \
- -o -name ChangeLog \
- -o -name Repository \
- -o -name Root \
- -o -name Entries \
- -o -name Tag \
- -o -name tags \
- -o -name TAGS \
- -o -name .cvsignore \
- -o -name MANIFEST \
- -o -print
-
-# Dependencies
-
-Python/thread.o: @THREADHEADERS@
-
-# Declare targets that aren't real files
-.PHONY: all sharedmods oldsharedmods test quicktest memtest
-.PHONY: install altinstall oldsharedinstall bininstall altbininstall
-.PHONY: maninstall libinstall inclinstall libainstall sharedinstall
-.PHONY: frameworkinstall frameworkinstallframework frameworkinstallstructure
-.PHONY: frameworkinstallmaclib frameworkinstallapps frameworkinstallunixtools
-.PHONY: frameworkaltinstallunixtools recheck autoconf clean clobber distclean
-.PHONY: smelly funny
-
-# IF YOU PUT ANYTHING HERE IT WILL GO AWAY
diff --git a/sys/lib/python/contextlib.py b/sys/lib/python/contextlib.py
deleted file mode 100644
index a807c42ce..000000000
--- a/sys/lib/python/contextlib.py
+++ /dev/null
@@ -1,154 +0,0 @@
-"""Utilities for with-statement contexts. See PEP 343."""
-
-import sys
-
-__all__ = ["contextmanager", "nested", "closing"]
-
-class GeneratorContextManager(object):
- """Helper for @contextmanager decorator."""
-
- def __init__(self, gen):
- self.gen = gen
-
- def __enter__(self):
- try:
- return self.gen.next()
- except StopIteration:
- raise RuntimeError("generator didn't yield")
-
- def __exit__(self, type, value, traceback):
- if type is None:
- try:
- self.gen.next()
- except StopIteration:
- return
- else:
- raise RuntimeError("generator didn't stop")
- else:
- try:
- self.gen.throw(type, value, traceback)
- raise RuntimeError("generator didn't stop after throw()")
- except StopIteration, exc:
- # Suppress the exception *unless* it's the same exception that
- # was passed to throw(). This prevents a StopIteration
- # raised inside the "with" statement from being suppressed
- return exc is not value
- except:
- # only re-raise if it's *not* the exception that was
- # passed to throw(), because __exit__() must not raise
- # an exception unless __exit__() itself failed. But throw()
- # has to raise the exception to signal propagation, so this
- # fixes the impedance mismatch between the throw() protocol
- # and the __exit__() protocol.
- #
- if sys.exc_info()[1] is not value:
- raise
-
-
-def contextmanager(func):
- """@contextmanager decorator.
-
- Typical usage:
-
- @contextmanager
- def some_generator(<arguments>):
- <setup>
- try:
- yield <value>
- finally:
- <cleanup>
-
- This makes this:
-
- with some_generator(<arguments>) as <variable>:
- <body>
-
- equivalent to this:
-
- <setup>
- try:
- <variable> = <value>
- <body>
- finally:
- <cleanup>
-
- """
- def helper(*args, **kwds):
- return GeneratorContextManager(func(*args, **kwds))
- try:
- helper.__name__ = func.__name__
- helper.__doc__ = func.__doc__
- helper.__dict__ = func.__dict__
- except:
- pass
- return helper
-
-
-@contextmanager
-def nested(*managers):
- """Support multiple context managers in a single with-statement.
-
- Code like this:
-
- with nested(A, B, C) as (X, Y, Z):
- <body>
-
- is equivalent to this:
-
- with A as X:
- with B as Y:
- with C as Z:
- <body>
-
- """
- exits = []
- vars = []
- exc = (None, None, None)
- try:
- try:
- for mgr in managers:
- exit = mgr.__exit__
- enter = mgr.__enter__
- vars.append(enter())
- exits.append(exit)
- yield vars
- except:
- exc = sys.exc_info()
- finally:
- while exits:
- exit = exits.pop()
- try:
- if exit(*exc):
- exc = (None, None, None)
- except:
- exc = sys.exc_info()
- if exc != (None, None, None):
- # Don't rely on sys.exc_info() still containing
- # the right information. Another exception may
- # have been raised and caught by an exit method
- raise exc[0], exc[1], exc[2]
-
-
-class closing(object):
- """Context to automatically close something at the end of a block.
-
- Code like this:
-
- with closing(<module>.open(<arguments>)) as f:
- <block>
-
- is equivalent to this:
-
- f = <module>.open(<arguments>)
- try:
- <block>
- finally:
- f.close()
-
- """
- def __init__(self, thing):
- self.thing = thing
- def __enter__(self):
- return self.thing
- def __exit__(self, *exc_info):
- self.thing.close()
diff --git a/sys/lib/python/cookielib.py b/sys/lib/python/cookielib.py
deleted file mode 100644
index e8fee0ee6..000000000
--- a/sys/lib/python/cookielib.py
+++ /dev/null
@@ -1,1776 +0,0 @@
-"""HTTP cookie handling for web clients.
-
-This module has (now fairly distant) origins in Gisle Aas' Perl module
-HTTP::Cookies, from the libwww-perl library.
-
-Docstrings, comments and debug strings in this code refer to the
-attributes of the HTTP cookie system as cookie-attributes, to distinguish
-them clearly from Python attributes.
-
-Class diagram (note that BSDDBCookieJar and the MSIE* classes are not
-distributed with the Python standard library, but are available from
-http://wwwsearch.sf.net/):
-
- CookieJar____
- / \ \
- FileCookieJar \ \
- / | \ \ \
- MozillaCookieJar | LWPCookieJar \ \
- | | \
- | ---MSIEBase | \
- | / | | \
- | / MSIEDBCookieJar BSDDBCookieJar
- |/
- MSIECookieJar
-
-"""
-
-__all__ = ['Cookie', 'CookieJar', 'CookiePolicy', 'DefaultCookiePolicy',
- 'FileCookieJar', 'LWPCookieJar', 'LoadError', 'MozillaCookieJar']
-
-import re, urlparse, copy, time, urllib
-try:
- import threading as _threading
-except ImportError:
- import dummy_threading as _threading
-import httplib # only for the default HTTP port
-from calendar import timegm
-
-debug = False # set to True to enable debugging via the logging module
-logger = None
-
-def _debug(*args):
- if not debug:
- return
- global logger
- if not logger:
- import logging
- logger = logging.getLogger("cookielib")
- return logger.debug(*args)
-
-
-DEFAULT_HTTP_PORT = str(httplib.HTTP_PORT)
-MISSING_FILENAME_TEXT = ("a filename was not supplied (nor was the CookieJar "
- "instance initialised with one)")
-
-def _warn_unhandled_exception():
- # There are a few catch-all except: statements in this module, for
- # catching input that's bad in unexpected ways. Warn if any
- # exceptions are caught there.
- import warnings, traceback, StringIO
- f = StringIO.StringIO()
- traceback.print_exc(None, f)
- msg = f.getvalue()
- warnings.warn("cookielib bug!\n%s" % msg, stacklevel=2)
-
-
-# Date/time conversion
-# -----------------------------------------------------------------------------
-
-EPOCH_YEAR = 1970
-def _timegm(tt):
- year, month, mday, hour, min, sec = tt[:6]
- if ((year >= EPOCH_YEAR) and (1 <= month <= 12) and (1 <= mday <= 31) and
- (0 <= hour <= 24) and (0 <= min <= 59) and (0 <= sec <= 61)):
- return timegm(tt)
- else:
- return None
-
-DAYS = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
-MONTHS = ["Jan", "Feb", "Mar", "Apr", "May", "Jun",
- "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
-MONTHS_LOWER = []
-for month in MONTHS: MONTHS_LOWER.append(month.lower())
-
-def time2isoz(t=None):
- """Return a string representing time in seconds since epoch, t.
-
- If the function is called without an argument, it will use the current
- time.
-
- The format of the returned string is like "YYYY-MM-DD hh:mm:ssZ",
- representing Universal Time (UTC, aka GMT). An example of this format is:
-
- 1994-11-24 08:49:37Z
-
- """
- if t is None: t = time.time()
- year, mon, mday, hour, min, sec = time.gmtime(t)[:6]
- return "%04d-%02d-%02d %02d:%02d:%02dZ" % (
- year, mon, mday, hour, min, sec)
-
-def time2netscape(t=None):
- """Return a string representing time in seconds since epoch, t.
-
- If the function is called without an argument, it will use the current
- time.
-
- The format of the returned string is like this:
-
- Wed, DD-Mon-YYYY HH:MM:SS GMT
-
- """
- if t is None: t = time.time()
- year, mon, mday, hour, min, sec, wday = time.gmtime(t)[:7]
- return "%s %02d-%s-%04d %02d:%02d:%02d GMT" % (
- DAYS[wday], mday, MONTHS[mon-1], year, hour, min, sec)
-
-
-UTC_ZONES = {"GMT": None, "UTC": None, "UT": None, "Z": None}
-
-TIMEZONE_RE = re.compile(r"^([-+])?(\d\d?):?(\d\d)?$")
-def offset_from_tz_string(tz):
- offset = None
- if tz in UTC_ZONES:
- offset = 0
- else:
- m = TIMEZONE_RE.search(tz)
- if m:
- offset = 3600 * int(m.group(2))
- if m.group(3):
- offset = offset + 60 * int(m.group(3))
- if m.group(1) == '-':
- offset = -offset
- return offset
-
-def _str2time(day, mon, yr, hr, min, sec, tz):
- # translate month name to number
- # month numbers start with 1 (January)
- try:
- mon = MONTHS_LOWER.index(mon.lower())+1
- except ValueError:
- # maybe it's already a number
- try:
- imon = int(mon)
- except ValueError:
- return None
- if 1 <= imon <= 12:
- mon = imon
- else:
- return None
-
- # make sure clock elements are defined
- if hr is None: hr = 0
- if min is None: min = 0
- if sec is None: sec = 0
-
- yr = int(yr)
- day = int(day)
- hr = int(hr)
- min = int(min)
- sec = int(sec)
-
- if yr < 1000:
- # find "obvious" year
- cur_yr = time.localtime(time.time())[0]
- m = cur_yr % 100
- tmp = yr
- yr = yr + cur_yr - m
- m = m - tmp
- if abs(m) > 50:
- if m > 0: yr = yr + 100
- else: yr = yr - 100
-
- # convert UTC time tuple to seconds since epoch (not timezone-adjusted)
- t = _timegm((yr, mon, day, hr, min, sec, tz))
-
- if t is not None:
- # adjust time using timezone string, to get absolute time since epoch
- if tz is None:
- tz = "UTC"
- tz = tz.upper()
- offset = offset_from_tz_string(tz)
- if offset is None:
- return None
- t = t - offset
-
- return t
-
-STRICT_DATE_RE = re.compile(
- r"^[SMTWF][a-z][a-z], (\d\d) ([JFMASOND][a-z][a-z]) "
- "(\d\d\d\d) (\d\d):(\d\d):(\d\d) GMT$")
-WEEKDAY_RE = re.compile(
- r"^(?:Sun|Mon|Tue|Wed|Thu|Fri|Sat)[a-z]*,?\s*", re.I)
-LOOSE_HTTP_DATE_RE = re.compile(
- r"""^
- (\d\d?) # day
- (?:\s+|[-\/])
- (\w+) # month
- (?:\s+|[-\/])
- (\d+) # year
- (?:
- (?:\s+|:) # separator before clock
- (\d\d?):(\d\d) # hour:min
- (?::(\d\d))? # optional seconds
- )? # optional clock
- \s*
- ([-+]?\d{2,4}|(?![APap][Mm]\b)[A-Za-z]+)? # timezone
- \s*
- (?:\(\w+\))? # ASCII representation of timezone in parens.
- \s*$""", re.X)
-def http2time(text):
- """Returns time in seconds since epoch of time represented by a string.
-
- Return value is an integer.
-
- None is returned if the format of str is unrecognized, the time is outside
- the representable range, or the timezone string is not recognized. If the
- string contains no timezone, UTC is assumed.
-
- The timezone in the string may be numerical (like "-0800" or "+0100") or a
- string timezone (like "UTC", "GMT", "BST" or "EST"). Currently, only the
- timezone strings equivalent to UTC (zero offset) are known to the function.
-
- The function loosely parses the following formats:
-
- Wed, 09 Feb 1994 22:23:32 GMT -- HTTP format
- Tuesday, 08-Feb-94 14:15:29 GMT -- old rfc850 HTTP format
- Tuesday, 08-Feb-1994 14:15:29 GMT -- broken rfc850 HTTP format
- 09 Feb 1994 22:23:32 GMT -- HTTP format (no weekday)
- 08-Feb-94 14:15:29 GMT -- rfc850 format (no weekday)
- 08-Feb-1994 14:15:29 GMT -- broken rfc850 format (no weekday)
-
- The parser ignores leading and trailing whitespace. The time may be
- absent.
-
- If the year is given with only 2 digits, the function will select the
- century that makes the year closest to the current date.
-
- """
- # fast exit for strictly conforming string
- m = STRICT_DATE_RE.search(text)
- if m:
- g = m.groups()
- mon = MONTHS_LOWER.index(g[1].lower()) + 1
- tt = (int(g[2]), mon, int(g[0]),
- int(g[3]), int(g[4]), float(g[5]))
- return _timegm(tt)
-
- # No, we need some messy parsing...
-
- # clean up
- text = text.lstrip()
- text = WEEKDAY_RE.sub("", text, 1) # Useless weekday
-
- # tz is time zone specifier string
- day, mon, yr, hr, min, sec, tz = [None]*7
-
- # loose regexp parse
- m = LOOSE_HTTP_DATE_RE.search(text)
- if m is not None:
- day, mon, yr, hr, min, sec, tz = m.groups()
- else:
- return None # bad format
-
- return _str2time(day, mon, yr, hr, min, sec, tz)
-
-ISO_DATE_RE = re.compile(
- """^
- (\d{4}) # year
- [-\/]?
- (\d\d?) # numerical month
- [-\/]?
- (\d\d?) # day
- (?:
- (?:\s+|[-:Tt]) # separator before clock
- (\d\d?):?(\d\d) # hour:min
- (?::?(\d\d(?:\.\d*)?))? # optional seconds (and fractional)
- )? # optional clock
- \s*
- ([-+]?\d\d?:?(:?\d\d)?
- |Z|z)? # timezone (Z is "zero meridian", i.e. GMT)
- \s*$""", re.X)
-def iso2time(text):
- """
- As for http2time, but parses the ISO 8601 formats:
-
- 1994-02-03 14:15:29 -0100 -- ISO 8601 format
- 1994-02-03 14:15:29 -- zone is optional
- 1994-02-03 -- only date
- 1994-02-03T14:15:29 -- Use T as separator
- 19940203T141529Z -- ISO 8601 compact format
- 19940203 -- only date
-
- """
- # clean up
- text = text.lstrip()
-
- # tz is time zone specifier string
- day, mon, yr, hr, min, sec, tz = [None]*7
-
- # loose regexp parse
- m = ISO_DATE_RE.search(text)
- if m is not None:
- # XXX there's an extra bit of the timezone I'm ignoring here: is
- # this the right thing to do?
- yr, mon, day, hr, min, sec, tz, _ = m.groups()
- else:
- return None # bad format
-
- return _str2time(day, mon, yr, hr, min, sec, tz)
-
-
-# Header parsing
-# -----------------------------------------------------------------------------
-
-def unmatched(match):
- """Return unmatched part of re.Match object."""
- start, end = match.span(0)
- return match.string[:start]+match.string[end:]
-
-HEADER_TOKEN_RE = re.compile(r"^\s*([^=\s;,]+)")
-HEADER_QUOTED_VALUE_RE = re.compile(r"^\s*=\s*\"([^\"\\]*(?:\\.[^\"\\]*)*)\"")
-HEADER_VALUE_RE = re.compile(r"^\s*=\s*([^\s;,]*)")
-HEADER_ESCAPE_RE = re.compile(r"\\(.)")
-def split_header_words(header_values):
- r"""Parse header values into a list of lists containing key,value pairs.
-
- The function knows how to deal with ",", ";" and "=" as well as quoted
- values after "=". A list of space separated tokens are parsed as if they
- were separated by ";".
-
- If the header_values passed as argument contains multiple values, then they
- are treated as if they were a single value separated by comma ",".
-
- This means that this function is useful for parsing header fields that
- follow this syntax (BNF as from the HTTP/1.1 specification, but we relax
- the requirement for tokens).
-
- headers = #header
- header = (token | parameter) *( [";"] (token | parameter))
-
- token = 1*<any CHAR except CTLs or separators>
- separators = "(" | ")" | "<" | ">" | "@"
- | "," | ";" | ":" | "\" | <">
- | "/" | "[" | "]" | "?" | "="
- | "{" | "}" | SP | HT
-
- quoted-string = ( <"> *(qdtext | quoted-pair ) <"> )
- qdtext = <any TEXT except <">>
- quoted-pair = "\" CHAR
-
- parameter = attribute "=" value
- attribute = token
- value = token | quoted-string
-
- Each header is represented by a list of key/value pairs. The value for a
- simple token (not part of a parameter) is None. Syntactically incorrect
- headers will not necessarily be parsed as you would want.
-
- This is easier to describe with some examples:
-
- >>> split_header_words(['foo="bar"; port="80,81"; discard, bar=baz'])
- [[('foo', 'bar'), ('port', '80,81'), ('discard', None)], [('bar', 'baz')]]
- >>> split_header_words(['text/html; charset="iso-8859-1"'])
- [[('text/html', None), ('charset', 'iso-8859-1')]]
- >>> split_header_words([r'Basic realm="\"foo\bar\""'])
- [[('Basic', None), ('realm', '"foobar"')]]
-
- """
- assert not isinstance(header_values, basestring)
- result = []
- for text in header_values:
- orig_text = text
- pairs = []
- while text:
- m = HEADER_TOKEN_RE.search(text)
- if m:
- text = unmatched(m)
- name = m.group(1)
- m = HEADER_QUOTED_VALUE_RE.search(text)
- if m: # quoted value
- text = unmatched(m)
- value = m.group(1)
- value = HEADER_ESCAPE_RE.sub(r"\1", value)
- else:
- m = HEADER_VALUE_RE.search(text)
- if m: # unquoted value
- text = unmatched(m)
- value = m.group(1)
- value = value.rstrip()
- else:
- # no value, a lone token
- value = None
- pairs.append((name, value))
- elif text.lstrip().startswith(","):
- # concatenated headers, as per RFC 2616 section 4.2
- text = text.lstrip()[1:]
- if pairs: result.append(pairs)
- pairs = []
- else:
- # skip junk
- non_junk, nr_junk_chars = re.subn("^[=\s;]*", "", text)
- assert nr_junk_chars > 0, (
- "split_header_words bug: '%s', '%s', %s" %
- (orig_text, text, pairs))
- text = non_junk
- if pairs: result.append(pairs)
- return result
-
-HEADER_JOIN_ESCAPE_RE = re.compile(r"([\"\\])")
-def join_header_words(lists):
- """Do the inverse (almost) of the conversion done by split_header_words.
-
- Takes a list of lists of (key, value) pairs and produces a single header
- value. Attribute values are quoted if needed.
-
- >>> join_header_words([[("text/plain", None), ("charset", "iso-8859/1")]])
- 'text/plain; charset="iso-8859/1"'
- >>> join_header_words([[("text/plain", None)], [("charset", "iso-8859/1")]])
- 'text/plain, charset="iso-8859/1"'
-
- """
- headers = []
- for pairs in lists:
- attr = []
- for k, v in pairs:
- if v is not None:
- if not re.search(r"^\w+$", v):
- v = HEADER_JOIN_ESCAPE_RE.sub(r"\\\1", v) # escape " and \
- v = '"%s"' % v
- k = "%s=%s" % (k, v)
- attr.append(k)
- if attr: headers.append("; ".join(attr))
- return ", ".join(headers)
-
-def parse_ns_headers(ns_headers):
- """Ad-hoc parser for Netscape protocol cookie-attributes.
-
- The old Netscape cookie format for Set-Cookie can for instance contain
- an unquoted "," in the expires field, so we have to use this ad-hoc
- parser instead of split_header_words.
-
- XXX This may not make the best possible effort to parse all the crap
- that Netscape Cookie headers contain. Ronald Tschalar's HTTPClient
- parser is probably better, so could do worse than following that if
- this ever gives any trouble.
-
- Currently, this is also used for parsing RFC 2109 cookies.
-
- """
- known_attrs = ("expires", "domain", "path", "secure",
- # RFC 2109 attrs (may turn up in Netscape cookies, too)
- "port", "max-age")
-
- result = []
- for ns_header in ns_headers:
- pairs = []
- version_set = False
- for ii, param in enumerate(re.split(r";\s*", ns_header)):
- param = param.rstrip()
- if param == "": continue
- if "=" not in param:
- k, v = param, None
- else:
- k, v = re.split(r"\s*=\s*", param, 1)
- k = k.lstrip()
- if ii != 0:
- lc = k.lower()
- if lc in known_attrs:
- k = lc
- if k == "version":
- # This is an RFC 2109 cookie.
- version_set = True
- if k == "expires":
- # convert expires date to seconds since epoch
- if v.startswith('"'): v = v[1:]
- if v.endswith('"'): v = v[:-1]
- v = http2time(v) # None if invalid
- pairs.append((k, v))
-
- if pairs:
- if not version_set:
- pairs.append(("version", "0"))
- result.append(pairs)
-
- return result
-
-
-IPV4_RE = re.compile(r"\.\d+$")
-def is_HDN(text):
- """Return True if text is a host domain name."""
- # XXX
- # This may well be wrong. Which RFC is HDN defined in, if any (for
- # the purposes of RFC 2965)?
- # For the current implementation, what about IPv6? Remember to look
- # at other uses of IPV4_RE also, if change this.
- if IPV4_RE.search(text):
- return False
- if text == "":
- return False
- if text[0] == "." or text[-1] == ".":
- return False
- return True
-
-def domain_match(A, B):
- """Return True if domain A domain-matches domain B, according to RFC 2965.
-
- A and B may be host domain names or IP addresses.
-
- RFC 2965, section 1:
-
- Host names can be specified either as an IP address or a HDN string.
- Sometimes we compare one host name with another. (Such comparisons SHALL
- be case-insensitive.) Host A's name domain-matches host B's if
-
- * their host name strings string-compare equal; or
-
- * A is a HDN string and has the form NB, where N is a non-empty
- name string, B has the form .B', and B' is a HDN string. (So,
- x.y.com domain-matches .Y.com but not Y.com.)
-
- Note that domain-match is not a commutative operation: a.b.c.com
- domain-matches .c.com, but not the reverse.
-
- """
- # Note that, if A or B are IP addresses, the only relevant part of the
- # definition of the domain-match algorithm is the direct string-compare.
- A = A.lower()
- B = B.lower()
- if A == B:
- return True
- if not is_HDN(A):
- return False
- i = A.rfind(B)
- if i == -1 or i == 0:
- # A does not have form NB, or N is the empty string
- return False
- if not B.startswith("."):
- return False
- if not is_HDN(B[1:]):
- return False
- return True
-
-def liberal_is_HDN(text):
- """Return True if text is a sort-of-like a host domain name.
-
- For accepting/blocking domains.
-
- """
- if IPV4_RE.search(text):
- return False
- return True
-
-def user_domain_match(A, B):
- """For blocking/accepting domains.
-
- A and B may be host domain names or IP addresses.
-
- """
- A = A.lower()
- B = B.lower()
- if not (liberal_is_HDN(A) and liberal_is_HDN(B)):
- if A == B:
- # equal IP addresses
- return True
- return False
- initial_dot = B.startswith(".")
- if initial_dot and A.endswith(B):
- return True
- if not initial_dot and A == B:
- return True
- return False
-
-cut_port_re = re.compile(r":\d+$")
-def request_host(request):
- """Return request-host, as defined by RFC 2965.
-
- Variation from RFC: returned value is lowercased, for convenient
- comparison.
-
- """
- url = request.get_full_url()
- host = urlparse.urlparse(url)[1]
- if host == "":
- host = request.get_header("Host", "")
-
- # remove port, if present
- host = cut_port_re.sub("", host, 1)
- return host.lower()
-
-def eff_request_host(request):
- """Return a tuple (request-host, effective request-host name).
-
- As defined by RFC 2965, except both are lowercased.
-
- """
- erhn = req_host = request_host(request)
- if req_host.find(".") == -1 and not IPV4_RE.search(req_host):
- erhn = req_host + ".local"
- return req_host, erhn
-
-def request_path(request):
- """request-URI, as defined by RFC 2965."""
- url = request.get_full_url()
- #scheme, netloc, path, parameters, query, frag = urlparse.urlparse(url)
- #req_path = escape_path("".join(urlparse.urlparse(url)[2:]))
- path, parameters, query, frag = urlparse.urlparse(url)[2:]
- if parameters:
- path = "%s;%s" % (path, parameters)
- path = escape_path(path)
- req_path = urlparse.urlunparse(("", "", path, "", query, frag))
- if not req_path.startswith("/"):
- # fix bad RFC 2396 absoluteURI
- req_path = "/"+req_path
- return req_path
-
-def request_port(request):
- host = request.get_host()
- i = host.find(':')
- if i >= 0:
- port = host[i+1:]
- try:
- int(port)
- except ValueError:
- _debug("nonnumeric port: '%s'", port)
- return None
- else:
- port = DEFAULT_HTTP_PORT
- return port
-
-# Characters in addition to A-Z, a-z, 0-9, '_', '.', and '-' that don't
-# need to be escaped to form a valid HTTP URL (RFCs 2396 and 1738).
-HTTP_PATH_SAFE = "%/;:@&=+$,!~*'()"
-ESCAPED_CHAR_RE = re.compile(r"%([0-9a-fA-F][0-9a-fA-F])")
-def uppercase_escaped_char(match):
- return "%%%s" % match.group(1).upper()
-def escape_path(path):
- """Escape any invalid characters in HTTP URL, and uppercase all escapes."""
- # There's no knowing what character encoding was used to create URLs
- # containing %-escapes, but since we have to pick one to escape invalid
- # path characters, we pick UTF-8, as recommended in the HTML 4.0
- # specification:
- # http://www.w3.org/TR/REC-html40/appendix/notes.html#h-B.2.1
- # And here, kind of: draft-fielding-uri-rfc2396bis-03
- # (And in draft IRI specification: draft-duerst-iri-05)
- # (And here, for new URI schemes: RFC 2718)
- if isinstance(path, unicode):
- path = path.encode("utf-8")
- path = urllib.quote(path, HTTP_PATH_SAFE)
- path = ESCAPED_CHAR_RE.sub(uppercase_escaped_char, path)
- return path
-
-def reach(h):
- """Return reach of host h, as defined by RFC 2965, section 1.
-
- The reach R of a host name H is defined as follows:
-
- * If
-
- - H is the host domain name of a host; and,
-
- - H has the form A.B; and
-
- - A has no embedded (that is, interior) dots; and
-
- - B has at least one embedded dot, or B is the string "local".
- then the reach of H is .B.
-
- * Otherwise, the reach of H is H.
-
- >>> reach("www.acme.com")
- '.acme.com'
- >>> reach("acme.com")
- 'acme.com'
- >>> reach("acme.local")
- '.local'
-
- """
- i = h.find(".")
- if i >= 0:
- #a = h[:i] # this line is only here to show what a is
- b = h[i+1:]
- i = b.find(".")
- if is_HDN(h) and (i >= 0 or b == "local"):
- return "."+b
- return h
-
-def is_third_party(request):
- """
-
- RFC 2965, section 3.3.6:
-
- An unverifiable transaction is to a third-party host if its request-
- host U does not domain-match the reach R of the request-host O in the
- origin transaction.
-
- """
- req_host = request_host(request)
- if not domain_match(req_host, reach(request.get_origin_req_host())):
- return True
- else:
- return False
-
-
-class Cookie:
- """HTTP Cookie.
-
- This class represents both Netscape and RFC 2965 cookies.
-
- This is deliberately a very simple class. It just holds attributes. It's
- possible to construct Cookie instances that don't comply with the cookie
- standards. CookieJar.make_cookies is the factory function for Cookie
- objects -- it deals with cookie parsing, supplying defaults, and
- normalising to the representation used in this class. CookiePolicy is
- responsible for checking them to see whether they should be accepted from
- and returned to the server.
-
- Note that the port may be present in the headers, but unspecified ("Port"
- rather than"Port=80", for example); if this is the case, port is None.
-
- """
-
- def __init__(self, version, name, value,
- port, port_specified,
- domain, domain_specified, domain_initial_dot,
- path, path_specified,
- secure,
- expires,
- discard,
- comment,
- comment_url,
- rest,
- rfc2109=False,
- ):
-
- if version is not None: version = int(version)
- if expires is not None: expires = int(expires)
- if port is None and port_specified is True:
- raise ValueError("if port is None, port_specified must be false")
-
- self.version = version
- self.name = name
- self.value = value
- self.port = port
- self.port_specified = port_specified
- # normalise case, as per RFC 2965 section 3.3.3
- self.domain = domain.lower()
- self.domain_specified = domain_specified
- # Sigh. We need to know whether the domain given in the
- # cookie-attribute had an initial dot, in order to follow RFC 2965
- # (as clarified in draft errata). Needed for the returned $Domain
- # value.
- self.domain_initial_dot = domain_initial_dot
- self.path = path
- self.path_specified = path_specified
- self.secure = secure
- self.expires = expires
- self.discard = discard
- self.comment = comment
- self.comment_url = comment_url
- self.rfc2109 = rfc2109
-
- self._rest = copy.copy(rest)
-
- def has_nonstandard_attr(self, name):
- return name in self._rest
- def get_nonstandard_attr(self, name, default=None):
- return self._rest.get(name, default)
- def set_nonstandard_attr(self, name, value):
- self._rest[name] = value
-
- def is_expired(self, now=None):
- if now is None: now = time.time()
- if (self.expires is not None) and (self.expires <= now):
- return True
- return False
-
- def __str__(self):
- if self.port is None: p = ""
- else: p = ":"+self.port
- limit = self.domain + p + self.path
- if self.value is not None:
- namevalue = "%s=%s" % (self.name, self.value)
- else:
- namevalue = self.name
- return "<Cookie %s for %s>" % (namevalue, limit)
-
- def __repr__(self):
- args = []
- for name in ("version", "name", "value",
- "port", "port_specified",
- "domain", "domain_specified", "domain_initial_dot",
- "path", "path_specified",
- "secure", "expires", "discard", "comment", "comment_url",
- ):
- attr = getattr(self, name)
- args.append("%s=%s" % (name, repr(attr)))
- args.append("rest=%s" % repr(self._rest))
- args.append("rfc2109=%s" % repr(self.rfc2109))
- return "Cookie(%s)" % ", ".join(args)
-
-
-class CookiePolicy:
- """Defines which cookies get accepted from and returned to server.
-
- May also modify cookies, though this is probably a bad idea.
-
- The subclass DefaultCookiePolicy defines the standard rules for Netscape
- and RFC 2965 cookies -- override that if you want a customised policy.
-
- """
- def set_ok(self, cookie, request):
- """Return true if (and only if) cookie should be accepted from server.
-
- Currently, pre-expired cookies never get this far -- the CookieJar
- class deletes such cookies itself.
-
- """
- raise NotImplementedError()
-
- def return_ok(self, cookie, request):
- """Return true if (and only if) cookie should be returned to server."""
- raise NotImplementedError()
-
- def domain_return_ok(self, domain, request):
- """Return false if cookies should not be returned, given cookie domain.
- """
- return True
-
- def path_return_ok(self, path, request):
- """Return false if cookies should not be returned, given cookie path.
- """
- return True
-
-
-class DefaultCookiePolicy(CookiePolicy):
- """Implements the standard rules for accepting and returning cookies."""
-
- DomainStrictNoDots = 1
- DomainStrictNonDomain = 2
- DomainRFC2965Match = 4
-
- DomainLiberal = 0
- DomainStrict = DomainStrictNoDots|DomainStrictNonDomain
-
- def __init__(self,
- blocked_domains=None, allowed_domains=None,
- netscape=True, rfc2965=False,
- rfc2109_as_netscape=None,
- hide_cookie2=False,
- strict_domain=False,
- strict_rfc2965_unverifiable=True,
- strict_ns_unverifiable=False,
- strict_ns_domain=DomainLiberal,
- strict_ns_set_initial_dollar=False,
- strict_ns_set_path=False,
- ):
- """Constructor arguments should be passed as keyword arguments only."""
- self.netscape = netscape
- self.rfc2965 = rfc2965
- self.rfc2109_as_netscape = rfc2109_as_netscape
- self.hide_cookie2 = hide_cookie2
- self.strict_domain = strict_domain
- self.strict_rfc2965_unverifiable = strict_rfc2965_unverifiable
- self.strict_ns_unverifiable = strict_ns_unverifiable
- self.strict_ns_domain = strict_ns_domain
- self.strict_ns_set_initial_dollar = strict_ns_set_initial_dollar
- self.strict_ns_set_path = strict_ns_set_path
-
- if blocked_domains is not None:
- self._blocked_domains = tuple(blocked_domains)
- else:
- self._blocked_domains = ()
-
- if allowed_domains is not None:
- allowed_domains = tuple(allowed_domains)
- self._allowed_domains = allowed_domains
-
- def blocked_domains(self):
- """Return the sequence of blocked domains (as a tuple)."""
- return self._blocked_domains
- def set_blocked_domains(self, blocked_domains):
- """Set the sequence of blocked domains."""
- self._blocked_domains = tuple(blocked_domains)
-
- def is_blocked(self, domain):
- for blocked_domain in self._blocked_domains:
- if user_domain_match(domain, blocked_domain):
- return True
- return False
-
- def allowed_domains(self):
- """Return None, or the sequence of allowed domains (as a tuple)."""
- return self._allowed_domains
- def set_allowed_domains(self, allowed_domains):
- """Set the sequence of allowed domains, or None."""
- if allowed_domains is not None:
- allowed_domains = tuple(allowed_domains)
- self._allowed_domains = allowed_domains
-
- def is_not_allowed(self, domain):
- if self._allowed_domains is None:
- return False
- for allowed_domain in self._allowed_domains:
- if user_domain_match(domain, allowed_domain):
- return False
- return True
-
- def set_ok(self, cookie, request):
- """
- If you override .set_ok(), be sure to call this method. If it returns
- false, so should your subclass (assuming your subclass wants to be more
- strict about which cookies to accept).
-
- """
- _debug(" - checking cookie %s=%s", cookie.name, cookie.value)
-
- assert cookie.name is not None
-
- for n in "version", "verifiability", "name", "path", "domain", "port":
- fn_name = "set_ok_"+n
- fn = getattr(self, fn_name)
- if not fn(cookie, request):
- return False
-
- return True
-
- def set_ok_version(self, cookie, request):
- if cookie.version is None:
- # Version is always set to 0 by parse_ns_headers if it's a Netscape
- # cookie, so this must be an invalid RFC 2965 cookie.
- _debug(" Set-Cookie2 without version attribute (%s=%s)",
- cookie.name, cookie.value)
- return False
- if cookie.version > 0 and not self.rfc2965:
- _debug(" RFC 2965 cookies are switched off")
- return False
- elif cookie.version == 0 and not self.netscape:
- _debug(" Netscape cookies are switched off")
- return False
- return True
-
- def set_ok_verifiability(self, cookie, request):
- if request.is_unverifiable() and is_third_party(request):
- if cookie.version > 0 and self.strict_rfc2965_unverifiable:
- _debug(" third-party RFC 2965 cookie during "
- "unverifiable transaction")
- return False
- elif cookie.version == 0 and self.strict_ns_unverifiable:
- _debug(" third-party Netscape cookie during "
- "unverifiable transaction")
- return False
- return True
-
- def set_ok_name(self, cookie, request):
- # Try and stop servers setting V0 cookies designed to hack other
- # servers that know both V0 and V1 protocols.
- if (cookie.version == 0 and self.strict_ns_set_initial_dollar and
- cookie.name.startswith("$")):
- _debug(" illegal name (starts with '$'): '%s'", cookie.name)
- return False
- return True
-
- def set_ok_path(self, cookie, request):
- if cookie.path_specified:
- req_path = request_path(request)
- if ((cookie.version > 0 or
- (cookie.version == 0 and self.strict_ns_set_path)) and
- not req_path.startswith(cookie.path)):
- _debug(" path attribute %s is not a prefix of request "
- "path %s", cookie.path, req_path)
- return False
- return True
-
- def set_ok_domain(self, cookie, request):
- if self.is_blocked(cookie.domain):
- _debug(" domain %s is in user block-list", cookie.domain)
- return False
- if self.is_not_allowed(cookie.domain):
- _debug(" domain %s is not in user allow-list", cookie.domain)
- return False
- if cookie.domain_specified:
- req_host, erhn = eff_request_host(request)
- domain = cookie.domain
- if self.strict_domain and (domain.count(".") >= 2):
- # XXX This should probably be compared with the Konqueror
- # (kcookiejar.cpp) and Mozilla implementations, but it's a
- # losing battle.
- i = domain.rfind(".")
- j = domain.rfind(".", 0, i)
- if j == 0: # domain like .foo.bar
- tld = domain[i+1:]
- sld = domain[j+1:i]
- if sld.lower() in ("co", "ac", "com", "edu", "org", "net",
- "gov", "mil", "int", "aero", "biz", "cat", "coop",
- "info", "jobs", "mobi", "museum", "name", "pro",
- "travel", "eu") and len(tld) == 2:
- # domain like .co.uk
- _debug(" country-code second level domain %s", domain)
- return False
- if domain.startswith("."):
- undotted_domain = domain[1:]
- else:
- undotted_domain = domain
- embedded_dots = (undotted_domain.find(".") >= 0)
- if not embedded_dots and domain != ".local":
- _debug(" non-local domain %s contains no embedded dot",
- domain)
- return False
- if cookie.version == 0:
- if (not erhn.endswith(domain) and
- (not erhn.startswith(".") and
- not ("."+erhn).endswith(domain))):
- _debug(" effective request-host %s (even with added "
- "initial dot) does not end end with %s",
- erhn, domain)
- return False
- if (cookie.version > 0 or
- (self.strict_ns_domain & self.DomainRFC2965Match)):
- if not domain_match(erhn, domain):
- _debug(" effective request-host %s does not domain-match "
- "%s", erhn, domain)
- return False
- if (cookie.version > 0 or
- (self.strict_ns_domain & self.DomainStrictNoDots)):
- host_prefix = req_host[:-len(domain)]
- if (host_prefix.find(".") >= 0 and
- not IPV4_RE.search(req_host)):
- _debug(" host prefix %s for domain %s contains a dot",
- host_prefix, domain)
- return False
- return True
-
- def set_ok_port(self, cookie, request):
- if cookie.port_specified:
- req_port = request_port(request)
- if req_port is None:
- req_port = "80"
- else:
- req_port = str(req_port)
- for p in cookie.port.split(","):
- try:
- int(p)
- except ValueError:
- _debug(" bad port %s (not numeric)", p)
- return False
- if p == req_port:
- break
- else:
- _debug(" request port (%s) not found in %s",
- req_port, cookie.port)
- return False
- return True
-
- def return_ok(self, cookie, request):
- """
- If you override .return_ok(), be sure to call this method. If it
- returns false, so should your subclass (assuming your subclass wants to
- be more strict about which cookies to return).
-
- """
- # Path has already been checked by .path_return_ok(), and domain
- # blocking done by .domain_return_ok().
- _debug(" - checking cookie %s=%s", cookie.name, cookie.value)
-
- for n in "version", "verifiability", "secure", "expires", "port", "domain":
- fn_name = "return_ok_"+n
- fn = getattr(self, fn_name)
- if not fn(cookie, request):
- return False
- return True
-
- def return_ok_version(self, cookie, request):
- if cookie.version > 0 and not self.rfc2965:
- _debug(" RFC 2965 cookies are switched off")
- return False
- elif cookie.version == 0 and not self.netscape:
- _debug(" Netscape cookies are switched off")
- return False
- return True
-
- def return_ok_verifiability(self, cookie, request):
- if request.is_unverifiable() and is_third_party(request):
- if cookie.version > 0 and self.strict_rfc2965_unverifiable:
- _debug(" third-party RFC 2965 cookie during unverifiable "
- "transaction")
- return False
- elif cookie.version == 0 and self.strict_ns_unverifiable:
- _debug(" third-party Netscape cookie during unverifiable "
- "transaction")
- return False
- return True
-
- def return_ok_secure(self, cookie, request):
- if cookie.secure and request.get_type() != "https":
- _debug(" secure cookie with non-secure request")
- return False
- return True
-
- def return_ok_expires(self, cookie, request):
- if cookie.is_expired(self._now):
- _debug(" cookie expired")
- return False
- return True
-
- def return_ok_port(self, cookie, request):
- if cookie.port:
- req_port = request_port(request)
- if req_port is None:
- req_port = "80"
- for p in cookie.port.split(","):
- if p == req_port:
- break
- else:
- _debug(" request port %s does not match cookie port %s",
- req_port, cookie.port)
- return False
- return True
-
- def return_ok_domain(self, cookie, request):
- req_host, erhn = eff_request_host(request)
- domain = cookie.domain
-
- # strict check of non-domain cookies: Mozilla does this, MSIE5 doesn't
- if (cookie.version == 0 and
- (self.strict_ns_domain & self.DomainStrictNonDomain) and
- not cookie.domain_specified and domain != erhn):
- _debug(" cookie with unspecified domain does not string-compare "
- "equal to request domain")
- return False
-
- if cookie.version > 0 and not domain_match(erhn, domain):
- _debug(" effective request-host name %s does not domain-match "
- "RFC 2965 cookie domain %s", erhn, domain)
- return False
- if cookie.version == 0 and not ("."+erhn).endswith(domain):
- _debug(" request-host %s does not match Netscape cookie domain "
- "%s", req_host, domain)
- return False
- return True
-
- def domain_return_ok(self, domain, request):
- # Liberal check of. This is here as an optimization to avoid
- # having to load lots of MSIE cookie files unless necessary.
- req_host, erhn = eff_request_host(request)
- if not req_host.startswith("."):
- req_host = "."+req_host
- if not erhn.startswith("."):
- erhn = "."+erhn
- if not (req_host.endswith(domain) or erhn.endswith(domain)):
- #_debug(" request domain %s does not match cookie domain %s",
- # req_host, domain)
- return False
-
- if self.is_blocked(domain):
- _debug(" domain %s is in user block-list", domain)
- return False
- if self.is_not_allowed(domain):
- _debug(" domain %s is not in user allow-list", domain)
- return False
-
- return True
-
- def path_return_ok(self, path, request):
- _debug("- checking cookie path=%s", path)
- req_path = request_path(request)
- if not req_path.startswith(path):
- _debug(" %s does not path-match %s", req_path, path)
- return False
- return True
-
-
-def vals_sorted_by_key(adict):
- keys = adict.keys()
- keys.sort()
- return map(adict.get, keys)
-
-def deepvalues(mapping):
- """Iterates over nested mapping, depth-first, in sorted order by key."""
- values = vals_sorted_by_key(mapping)
- for obj in values:
- mapping = False
- try:
- obj.items
- except AttributeError:
- pass
- else:
- mapping = True
- for subobj in deepvalues(obj):
- yield subobj
- if not mapping:
- yield obj
-
-
-# Used as second parameter to dict.get() method, to distinguish absent
-# dict key from one with a None value.
-class Absent: pass
-
-class CookieJar:
- """Collection of HTTP cookies.
-
- You may not need to know about this class: try
- urllib2.build_opener(HTTPCookieProcessor).open(url).
-
- """
-
- non_word_re = re.compile(r"\W")
- quote_re = re.compile(r"([\"\\])")
- strict_domain_re = re.compile(r"\.?[^.]*")
- domain_re = re.compile(r"[^.]*")
- dots_re = re.compile(r"^\.+")
-
- magic_re = r"^\#LWP-Cookies-(\d+\.\d+)"
-
- def __init__(self, policy=None):
- if policy is None:
- policy = DefaultCookiePolicy()
- self._policy = policy
-
- self._cookies_lock = _threading.RLock()
- self._cookies = {}
-
- def set_policy(self, policy):
- self._policy = policy
-
- def _cookies_for_domain(self, domain, request):
- cookies = []
- if not self._policy.domain_return_ok(domain, request):
- return []
- _debug("Checking %s for cookies to return", domain)
- cookies_by_path = self._cookies[domain]
- for path in cookies_by_path.keys():
- if not self._policy.path_return_ok(path, request):
- continue
- cookies_by_name = cookies_by_path[path]
- for cookie in cookies_by_name.values():
- if not self._policy.return_ok(cookie, request):
- _debug(" not returning cookie")
- continue
- _debug(" it's a match")
- cookies.append(cookie)
- return cookies
-
- def _cookies_for_request(self, request):
- """Return a list of cookies to be returned to server."""
- cookies = []
- for domain in self._cookies.keys():
- cookies.extend(self._cookies_for_domain(domain, request))
- return cookies
-
- def _cookie_attrs(self, cookies):
- """Return a list of cookie-attributes to be returned to server.
-
- like ['foo="bar"; $Path="/"', ...]
-
- The $Version attribute is also added when appropriate (currently only
- once per request).
-
- """
- # add cookies in order of most specific (ie. longest) path first
- def decreasing_size(a, b): return cmp(len(b.path), len(a.path))
- cookies.sort(decreasing_size)
-
- version_set = False
-
- attrs = []
- for cookie in cookies:
- # set version of Cookie header
- # XXX
- # What should it be if multiple matching Set-Cookie headers have
- # different versions themselves?
- # Answer: there is no answer; was supposed to be settled by
- # RFC 2965 errata, but that may never appear...
- version = cookie.version
- if not version_set:
- version_set = True
- if version > 0:
- attrs.append("$Version=%s" % version)
-
- # quote cookie value if necessary
- # (not for Netscape protocol, which already has any quotes
- # intact, due to the poorly-specified Netscape Cookie: syntax)
- if ((cookie.value is not None) and
- self.non_word_re.search(cookie.value) and version > 0):
- value = self.quote_re.sub(r"\\\1", cookie.value)
- else:
- value = cookie.value
-
- # add cookie-attributes to be returned in Cookie header
- if cookie.value is None:
- attrs.append(cookie.name)
- else:
- attrs.append("%s=%s" % (cookie.name, value))
- if version > 0:
- if cookie.path_specified:
- attrs.append('$Path="%s"' % cookie.path)
- if cookie.domain.startswith("."):
- domain = cookie.domain
- if (not cookie.domain_initial_dot and
- domain.startswith(".")):
- domain = domain[1:]
- attrs.append('$Domain="%s"' % domain)
- if cookie.port is not None:
- p = "$Port"
- if cookie.port_specified:
- p = p + ('="%s"' % cookie.port)
- attrs.append(p)
-
- return attrs
-
- def add_cookie_header(self, request):
- """Add correct Cookie: header to request (urllib2.Request object).
-
- The Cookie2 header is also added unless policy.hide_cookie2 is true.
-
- """
- _debug("add_cookie_header")
- self._cookies_lock.acquire()
-
- self._policy._now = self._now = int(time.time())
-
- cookies = self._cookies_for_request(request)
-
- attrs = self._cookie_attrs(cookies)
- if attrs:
- if not request.has_header("Cookie"):
- request.add_unredirected_header(
- "Cookie", "; ".join(attrs))
-
- # if necessary, advertise that we know RFC 2965
- if (self._policy.rfc2965 and not self._policy.hide_cookie2 and
- not request.has_header("Cookie2")):
- for cookie in cookies:
- if cookie.version != 1:
- request.add_unredirected_header("Cookie2", '$Version="1"')
- break
-
- self._cookies_lock.release()
-
- self.clear_expired_cookies()
-
- def _normalized_cookie_tuples(self, attrs_set):
- """Return list of tuples containing normalised cookie information.
-
- attrs_set is the list of lists of key,value pairs extracted from
- the Set-Cookie or Set-Cookie2 headers.
-
- Tuples are name, value, standard, rest, where name and value are the
- cookie name and value, standard is a dictionary containing the standard
- cookie-attributes (discard, secure, version, expires or max-age,
- domain, path and port) and rest is a dictionary containing the rest of
- the cookie-attributes.
-
- """
- cookie_tuples = []
-
- boolean_attrs = "discard", "secure"
- value_attrs = ("version",
- "expires", "max-age",
- "domain", "path", "port",
- "comment", "commenturl")
-
- for cookie_attrs in attrs_set:
- name, value = cookie_attrs[0]
-
- # Build dictionary of standard cookie-attributes (standard) and
- # dictionary of other cookie-attributes (rest).
-
- # Note: expiry time is normalised to seconds since epoch. V0
- # cookies should have the Expires cookie-attribute, and V1 cookies
- # should have Max-Age, but since V1 includes RFC 2109 cookies (and
- # since V0 cookies may be a mish-mash of Netscape and RFC 2109), we
- # accept either (but prefer Max-Age).
- max_age_set = False
-
- bad_cookie = False
-
- standard = {}
- rest = {}
- for k, v in cookie_attrs[1:]:
- lc = k.lower()
- # don't lose case distinction for unknown fields
- if lc in value_attrs or lc in boolean_attrs:
- k = lc
- if k in boolean_attrs and v is None:
- # boolean cookie-attribute is present, but has no value
- # (like "discard", rather than "port=80")
- v = True
- if k in standard:
- # only first value is significant
- continue
- if k == "domain":
- if v is None:
- _debug(" missing value for domain attribute")
- bad_cookie = True
- break
- # RFC 2965 section 3.3.3
- v = v.lower()
- if k == "expires":
- if max_age_set:
- # Prefer max-age to expires (like Mozilla)
- continue
- if v is None:
- _debug(" missing or invalid value for expires "
- "attribute: treating as session cookie")
- continue
- if k == "max-age":
- max_age_set = True
- try:
- v = int(v)
- except ValueError:
- _debug(" missing or invalid (non-numeric) value for "
- "max-age attribute")
- bad_cookie = True
- break
- # convert RFC 2965 Max-Age to seconds since epoch
- # XXX Strictly you're supposed to follow RFC 2616
- # age-calculation rules. Remember that zero Max-Age is a
- # is a request to discard (old and new) cookie, though.
- k = "expires"
- v = self._now + v
- if (k in value_attrs) or (k in boolean_attrs):
- if (v is None and
- k not in ("port", "comment", "commenturl")):
- _debug(" missing value for %s attribute" % k)
- bad_cookie = True
- break
- standard[k] = v
- else:
- rest[k] = v
-
- if bad_cookie:
- continue
-
- cookie_tuples.append((name, value, standard, rest))
-
- return cookie_tuples
-
- def _cookie_from_cookie_tuple(self, tup, request):
- # standard is dict of standard cookie-attributes, rest is dict of the
- # rest of them
- name, value, standard, rest = tup
-
- domain = standard.get("domain", Absent)
- path = standard.get("path", Absent)
- port = standard.get("port", Absent)
- expires = standard.get("expires", Absent)
-
- # set the easy defaults
- version = standard.get("version", None)
- if version is not None: version = int(version)
- secure = standard.get("secure", False)
- # (discard is also set if expires is Absent)
- discard = standard.get("discard", False)
- comment = standard.get("comment", None)
- comment_url = standard.get("commenturl", None)
-
- # set default path
- if path is not Absent and path != "":
- path_specified = True
- path = escape_path(path)
- else:
- path_specified = False
- path = request_path(request)
- i = path.rfind("/")
- if i != -1:
- if version == 0:
- # Netscape spec parts company from reality here
- path = path[:i]
- else:
- path = path[:i+1]
- if len(path) == 0: path = "/"
-
- # set default domain
- domain_specified = domain is not Absent
- # but first we have to remember whether it starts with a dot
- domain_initial_dot = False
- if domain_specified:
- domain_initial_dot = bool(domain.startswith("."))
- if domain is Absent:
- req_host, erhn = eff_request_host(request)
- domain = erhn
- elif not domain.startswith("."):
- domain = "."+domain
-
- # set default port
- port_specified = False
- if port is not Absent:
- if port is None:
- # Port attr present, but has no value: default to request port.
- # Cookie should then only be sent back on that port.
- port = request_port(request)
- else:
- port_specified = True
- port = re.sub(r"\s+", "", port)
- else:
- # No port attr present. Cookie can be sent back on any port.
- port = None
-
- # set default expires and discard
- if expires is Absent:
- expires = None
- discard = True
- elif expires <= self._now:
- # Expiry date in past is request to delete cookie. This can't be
- # in DefaultCookiePolicy, because can't delete cookies there.
- try:
- self.clear(domain, path, name)
- except KeyError:
- pass
- _debug("Expiring cookie, domain='%s', path='%s', name='%s'",
- domain, path, name)
- return None
-
- return Cookie(version,
- name, value,
- port, port_specified,
- domain, domain_specified, domain_initial_dot,
- path, path_specified,
- secure,
- expires,
- discard,
- comment,
- comment_url,
- rest)
-
- def _cookies_from_attrs_set(self, attrs_set, request):
- cookie_tuples = self._normalized_cookie_tuples(attrs_set)
-
- cookies = []
- for tup in cookie_tuples:
- cookie = self._cookie_from_cookie_tuple(tup, request)
- if cookie: cookies.append(cookie)
- return cookies
-
- def _process_rfc2109_cookies(self, cookies):
- rfc2109_as_ns = getattr(self._policy, 'rfc2109_as_netscape', None)
- if rfc2109_as_ns is None:
- rfc2109_as_ns = not self._policy.rfc2965
- for cookie in cookies:
- if cookie.version == 1:
- cookie.rfc2109 = True
- if rfc2109_as_ns:
- # treat 2109 cookies as Netscape cookies rather than
- # as RFC2965 cookies
- cookie.version = 0
-
- def make_cookies(self, response, request):
- """Return sequence of Cookie objects extracted from response object."""
- # get cookie-attributes for RFC 2965 and Netscape protocols
- headers = response.info()
- rfc2965_hdrs = headers.getheaders("Set-Cookie2")
- ns_hdrs = headers.getheaders("Set-Cookie")
-
- rfc2965 = self._policy.rfc2965
- netscape = self._policy.netscape
-
- if ((not rfc2965_hdrs and not ns_hdrs) or
- (not ns_hdrs and not rfc2965) or
- (not rfc2965_hdrs and not netscape) or
- (not netscape and not rfc2965)):
- return [] # no relevant cookie headers: quick exit
-
- try:
- cookies = self._cookies_from_attrs_set(
- split_header_words(rfc2965_hdrs), request)
- except Exception:
- _warn_unhandled_exception()
- cookies = []
-
- if ns_hdrs and netscape:
- try:
- # RFC 2109 and Netscape cookies
- ns_cookies = self._cookies_from_attrs_set(
- parse_ns_headers(ns_hdrs), request)
- except Exception:
- _warn_unhandled_exception()
- ns_cookies = []
- self._process_rfc2109_cookies(ns_cookies)
-
- # Look for Netscape cookies (from Set-Cookie headers) that match
- # corresponding RFC 2965 cookies (from Set-Cookie2 headers).
- # For each match, keep the RFC 2965 cookie and ignore the Netscape
- # cookie (RFC 2965 section 9.1). Actually, RFC 2109 cookies are
- # bundled in with the Netscape cookies for this purpose, which is
- # reasonable behaviour.
- if rfc2965:
- lookup = {}
- for cookie in cookies:
- lookup[(cookie.domain, cookie.path, cookie.name)] = None
-
- def no_matching_rfc2965(ns_cookie, lookup=lookup):
- key = ns_cookie.domain, ns_cookie.path, ns_cookie.name
- return key not in lookup
- ns_cookies = filter(no_matching_rfc2965, ns_cookies)
-
- if ns_cookies:
- cookies.extend(ns_cookies)
-
- return cookies
-
- def set_cookie_if_ok(self, cookie, request):
- """Set a cookie if policy says it's OK to do so."""
- self._cookies_lock.acquire()
- self._policy._now = self._now = int(time.time())
-
- if self._policy.set_ok(cookie, request):
- self.set_cookie(cookie)
-
- self._cookies_lock.release()
-
- def set_cookie(self, cookie):
- """Set a cookie, without checking whether or not it should be set."""
- c = self._cookies
- self._cookies_lock.acquire()
- try:
- if cookie.domain not in c: c[cookie.domain] = {}
- c2 = c[cookie.domain]
- if cookie.path not in c2: c2[cookie.path] = {}
- c3 = c2[cookie.path]
- c3[cookie.name] = cookie
- finally:
- self._cookies_lock.release()
-
- def extract_cookies(self, response, request):
- """Extract cookies from response, where allowable given the request."""
- _debug("extract_cookies: %s", response.info())
- self._cookies_lock.acquire()
- self._policy._now = self._now = int(time.time())
-
- for cookie in self.make_cookies(response, request):
- if self._policy.set_ok(cookie, request):
- _debug(" setting cookie: %s", cookie)
- self.set_cookie(cookie)
- self._cookies_lock.release()
-
- def clear(self, domain=None, path=None, name=None):
- """Clear some cookies.
-
- Invoking this method without arguments will clear all cookies. If
- given a single argument, only cookies belonging to that domain will be
- removed. If given two arguments, cookies belonging to the specified
- path within that domain are removed. If given three arguments, then
- the cookie with the specified name, path and domain is removed.
-
- Raises KeyError if no matching cookie exists.
-
- """
- if name is not None:
- if (domain is None) or (path is None):
- raise ValueError(
- "domain and path must be given to remove a cookie by name")
- del self._cookies[domain][path][name]
- elif path is not None:
- if domain is None:
- raise ValueError(
- "domain must be given to remove cookies by path")
- del self._cookies[domain][path]
- elif domain is not None:
- del self._cookies[domain]
- else:
- self._cookies = {}
-
- def clear_session_cookies(self):
- """Discard all session cookies.
-
- Note that the .save() method won't save session cookies anyway, unless
- you ask otherwise by passing a true ignore_discard argument.
-
- """
- self._cookies_lock.acquire()
- for cookie in self:
- if cookie.discard:
- self.clear(cookie.domain, cookie.path, cookie.name)
- self._cookies_lock.release()
-
- def clear_expired_cookies(self):
- """Discard all expired cookies.
-
- You probably don't need to call this method: expired cookies are never
- sent back to the server (provided you're using DefaultCookiePolicy),
- this method is called by CookieJar itself every so often, and the
- .save() method won't save expired cookies anyway (unless you ask
- otherwise by passing a true ignore_expires argument).
-
- """
- self._cookies_lock.acquire()
- now = time.time()
- for cookie in self:
- if cookie.is_expired(now):
- self.clear(cookie.domain, cookie.path, cookie.name)
- self._cookies_lock.release()
-
- def __iter__(self):
- return deepvalues(self._cookies)
-
- def __len__(self):
- """Return number of contained cookies."""
- i = 0
- for cookie in self: i = i + 1
- return i
-
- def __repr__(self):
- r = []
- for cookie in self: r.append(repr(cookie))
- return "<%s[%s]>" % (self.__class__, ", ".join(r))
-
- def __str__(self):
- r = []
- for cookie in self: r.append(str(cookie))
- return "<%s[%s]>" % (self.__class__, ", ".join(r))
-
-
-# derives from IOError for backwards-compatibility with Python 2.4.0
-class LoadError(IOError): pass
-
-class FileCookieJar(CookieJar):
- """CookieJar that can be loaded from and saved to a file."""
-
- def __init__(self, filename=None, delayload=False, policy=None):
- """
- Cookies are NOT loaded from the named file until either the .load() or
- .revert() method is called.
-
- """
- CookieJar.__init__(self, policy)
- if filename is not None:
- try:
- filename+""
- except:
- raise ValueError("filename must be string-like")
- self.filename = filename
- self.delayload = bool(delayload)
-
- def save(self, filename=None, ignore_discard=False, ignore_expires=False):
- """Save cookies to a file."""
- raise NotImplementedError()
-
- def load(self, filename=None, ignore_discard=False, ignore_expires=False):
- """Load cookies from a file."""
- if filename is None:
- if self.filename is not None: filename = self.filename
- else: raise ValueError(MISSING_FILENAME_TEXT)
-
- f = open(filename)
- try:
- self._really_load(f, filename, ignore_discard, ignore_expires)
- finally:
- f.close()
-
- def revert(self, filename=None,
- ignore_discard=False, ignore_expires=False):
- """Clear all cookies and reload cookies from a saved file.
-
- Raises LoadError (or IOError) if reversion is not successful; the
- object's state will not be altered if this happens.
-
- """
- if filename is None:
- if self.filename is not None: filename = self.filename
- else: raise ValueError(MISSING_FILENAME_TEXT)
-
- self._cookies_lock.acquire()
-
- old_state = copy.deepcopy(self._cookies)
- self._cookies = {}
- try:
- self.load(filename, ignore_discard, ignore_expires)
- except (LoadError, IOError):
- self._cookies = old_state
- raise
-
- self._cookies_lock.release()
-
-from _LWPCookieJar import LWPCookieJar, lwp_cookie_str
-from _MozillaCookieJar import MozillaCookieJar
diff --git a/sys/lib/python/copy.py b/sys/lib/python/copy.py
deleted file mode 100644
index 35c666f7d..000000000
--- a/sys/lib/python/copy.py
+++ /dev/null
@@ -1,414 +0,0 @@
-"""Generic (shallow and deep) copying operations.
-
-Interface summary:
-
- import copy
-
- x = copy.copy(y) # make a shallow copy of y
- x = copy.deepcopy(y) # make a deep copy of y
-
-For module specific errors, copy.Error is raised.
-
-The difference between shallow and deep copying is only relevant for
-compound objects (objects that contain other objects, like lists or
-class instances).
-
-- A shallow copy constructs a new compound object and then (to the
- extent possible) inserts *the same objects* into it that the
- original contains.
-
-- A deep copy constructs a new compound object and then, recursively,
- inserts *copies* into it of the objects found in the original.
-
-Two problems often exist with deep copy operations that don't exist
-with shallow copy operations:
-
- a) recursive objects (compound objects that, directly or indirectly,
- contain a reference to themselves) may cause a recursive loop
-
- b) because deep copy copies *everything* it may copy too much, e.g.
- administrative data structures that should be shared even between
- copies
-
-Python's deep copy operation avoids these problems by:
-
- a) keeping a table of objects already copied during the current
- copying pass
-
- b) letting user-defined classes override the copying operation or the
- set of components copied
-
-This version does not copy types like module, class, function, method,
-nor stack trace, stack frame, nor file, socket, window, nor array, nor
-any similar types.
-
-Classes can use the same interfaces to control copying that they use
-to control pickling: they can define methods called __getinitargs__(),
-__getstate__() and __setstate__(). See the documentation for module
-"pickle" for information on these methods.
-"""
-
-import types
-from copy_reg import dispatch_table
-
-class Error(Exception):
- pass
-error = Error # backward compatibility
-
-try:
- from org.python.core import PyStringMap
-except ImportError:
- PyStringMap = None
-
-__all__ = ["Error", "copy", "deepcopy"]
-
-def copy(x):
- """Shallow copy operation on arbitrary Python objects.
-
- See the module's __doc__ string for more info.
- """
-
- cls = type(x)
-
- copier = _copy_dispatch.get(cls)
- if copier:
- return copier(x)
-
- copier = getattr(cls, "__copy__", None)
- if copier:
- return copier(x)
-
- reductor = dispatch_table.get(cls)
- if reductor:
- rv = reductor(x)
- else:
- reductor = getattr(x, "__reduce_ex__", None)
- if reductor:
- rv = reductor(2)
- else:
- reductor = getattr(x, "__reduce__", None)
- if reductor:
- rv = reductor()
- else:
- raise Error("un(shallow)copyable object of type %s" % cls)
-
- return _reconstruct(x, rv, 0)
-
-
-_copy_dispatch = d = {}
-
-def _copy_immutable(x):
- return x
-for t in (type(None), int, long, float, bool, str, tuple,
- frozenset, type, xrange, types.ClassType,
- types.BuiltinFunctionType,
- types.FunctionType):
- d[t] = _copy_immutable
-for name in ("ComplexType", "UnicodeType", "CodeType"):
- t = getattr(types, name, None)
- if t is not None:
- d[t] = _copy_immutable
-
-def _copy_with_constructor(x):
- return type(x)(x)
-for t in (list, dict, set):
- d[t] = _copy_with_constructor
-
-def _copy_with_copy_method(x):
- return x.copy()
-if PyStringMap is not None:
- d[PyStringMap] = _copy_with_copy_method
-
-def _copy_inst(x):
- if hasattr(x, '__copy__'):
- return x.__copy__()
- if hasattr(x, '__getinitargs__'):
- args = x.__getinitargs__()
- y = x.__class__(*args)
- else:
- y = _EmptyClass()
- y.__class__ = x.__class__
- if hasattr(x, '__getstate__'):
- state = x.__getstate__()
- else:
- state = x.__dict__
- if hasattr(y, '__setstate__'):
- y.__setstate__(state)
- else:
- y.__dict__.update(state)
- return y
-d[types.InstanceType] = _copy_inst
-
-del d
-
-def deepcopy(x, memo=None, _nil=[]):
- """Deep copy operation on arbitrary Python objects.
-
- See the module's __doc__ string for more info.
- """
-
- if memo is None:
- memo = {}
-
- d = id(x)
- y = memo.get(d, _nil)
- if y is not _nil:
- return y
-
- cls = type(x)
-
- copier = _deepcopy_dispatch.get(cls)
- if copier:
- y = copier(x, memo)
- else:
- try:
- issc = issubclass(cls, type)
- except TypeError: # cls is not a class (old Boost; see SF #502085)
- issc = 0
- if issc:
- y = _deepcopy_atomic(x, memo)
- else:
- copier = getattr(x, "__deepcopy__", None)
- if copier:
- y = copier(memo)
- else:
- reductor = dispatch_table.get(cls)
- if reductor:
- rv = reductor(x)
- else:
- reductor = getattr(x, "__reduce_ex__", None)
- if reductor:
- rv = reductor(2)
- else:
- reductor = getattr(x, "__reduce__", None)
- if reductor:
- rv = reductor()
- else:
- raise Error(
- "un(deep)copyable object of type %s" % cls)
- y = _reconstruct(x, rv, 1, memo)
-
- memo[d] = y
- _keep_alive(x, memo) # Make sure x lives at least as long as d
- return y
-
-_deepcopy_dispatch = d = {}
-
-def _deepcopy_atomic(x, memo):
- return x
-d[type(None)] = _deepcopy_atomic
-d[int] = _deepcopy_atomic
-d[long] = _deepcopy_atomic
-d[float] = _deepcopy_atomic
-d[bool] = _deepcopy_atomic
-try:
- d[complex] = _deepcopy_atomic
-except NameError:
- pass
-d[str] = _deepcopy_atomic
-try:
- d[unicode] = _deepcopy_atomic
-except NameError:
- pass
-try:
- d[types.CodeType] = _deepcopy_atomic
-except AttributeError:
- pass
-d[type] = _deepcopy_atomic
-d[xrange] = _deepcopy_atomic
-d[types.ClassType] = _deepcopy_atomic
-d[types.BuiltinFunctionType] = _deepcopy_atomic
-d[types.FunctionType] = _deepcopy_atomic
-
-def _deepcopy_list(x, memo):
- y = []
- memo[id(x)] = y
- for a in x:
- y.append(deepcopy(a, memo))
- return y
-d[list] = _deepcopy_list
-
-def _deepcopy_tuple(x, memo):
- y = []
- for a in x:
- y.append(deepcopy(a, memo))
- d = id(x)
- try:
- return memo[d]
- except KeyError:
- pass
- for i in range(len(x)):
- if x[i] is not y[i]:
- y = tuple(y)
- break
- else:
- y = x
- memo[d] = y
- return y
-d[tuple] = _deepcopy_tuple
-
-def _deepcopy_dict(x, memo):
- y = {}
- memo[id(x)] = y
- for key, value in x.iteritems():
- y[deepcopy(key, memo)] = deepcopy(value, memo)
- return y
-d[dict] = _deepcopy_dict
-if PyStringMap is not None:
- d[PyStringMap] = _deepcopy_dict
-
-def _keep_alive(x, memo):
- """Keeps a reference to the object x in the memo.
-
- Because we remember objects by their id, we have
- to assure that possibly temporary objects are kept
- alive by referencing them.
- We store a reference at the id of the memo, which should
- normally not be used unless someone tries to deepcopy
- the memo itself...
- """
- try:
- memo[id(memo)].append(x)
- except KeyError:
- # aha, this is the first one :-)
- memo[id(memo)]=[x]
-
-def _deepcopy_inst(x, memo):
- if hasattr(x, '__deepcopy__'):
- return x.__deepcopy__(memo)
- if hasattr(x, '__getinitargs__'):
- args = x.__getinitargs__()
- args = deepcopy(args, memo)
- y = x.__class__(*args)
- else:
- y = _EmptyClass()
- y.__class__ = x.__class__
- memo[id(x)] = y
- if hasattr(x, '__getstate__'):
- state = x.__getstate__()
- else:
- state = x.__dict__
- state = deepcopy(state, memo)
- if hasattr(y, '__setstate__'):
- y.__setstate__(state)
- else:
- y.__dict__.update(state)
- return y
-d[types.InstanceType] = _deepcopy_inst
-
-def _reconstruct(x, info, deep, memo=None):
- if isinstance(info, str):
- return x
- assert isinstance(info, tuple)
- if memo is None:
- memo = {}
- n = len(info)
- assert n in (2, 3, 4, 5)
- callable, args = info[:2]
- if n > 2:
- state = info[2]
- else:
- state = {}
- if n > 3:
- listiter = info[3]
- else:
- listiter = None
- if n > 4:
- dictiter = info[4]
- else:
- dictiter = None
- if deep:
- args = deepcopy(args, memo)
- y = callable(*args)
- memo[id(x)] = y
- if listiter is not None:
- for item in listiter:
- if deep:
- item = deepcopy(item, memo)
- y.append(item)
- if dictiter is not None:
- for key, value in dictiter:
- if deep:
- key = deepcopy(key, memo)
- value = deepcopy(value, memo)
- y[key] = value
- if state:
- if deep:
- state = deepcopy(state, memo)
- if hasattr(y, '__setstate__'):
- y.__setstate__(state)
- else:
- if isinstance(state, tuple) and len(state) == 2:
- state, slotstate = state
- else:
- slotstate = None
- if state is not None:
- y.__dict__.update(state)
- if slotstate is not None:
- for key, value in slotstate.iteritems():
- setattr(y, key, value)
- return y
-
-del d
-
-del types
-
-# Helper for instance creation without calling __init__
-class _EmptyClass:
- pass
-
-def _test():
- l = [None, 1, 2L, 3.14, 'xyzzy', (1, 2L), [3.14, 'abc'],
- {'abc': 'ABC'}, (), [], {}]
- l1 = copy(l)
- print l1==l
- l1 = map(copy, l)
- print l1==l
- l1 = deepcopy(l)
- print l1==l
- class C:
- def __init__(self, arg=None):
- self.a = 1
- self.arg = arg
- if __name__ == '__main__':
- import sys
- file = sys.argv[0]
- else:
- file = __file__
- self.fp = open(file)
- self.fp.close()
- def __getstate__(self):
- return {'a': self.a, 'arg': self.arg}
- def __setstate__(self, state):
- for key, value in state.iteritems():
- setattr(self, key, value)
- def __deepcopy__(self, memo=None):
- new = self.__class__(deepcopy(self.arg, memo))
- new.a = self.a
- return new
- c = C('argument sketch')
- l.append(c)
- l2 = copy(l)
- print l == l2
- print l
- print l2
- l2 = deepcopy(l)
- print l == l2
- print l
- print l2
- l.append({l[1]: l, 'xyz': l[2]})
- l3 = copy(l)
- import repr
- print map(repr.repr, l)
- print map(repr.repr, l1)
- print map(repr.repr, l2)
- print map(repr.repr, l3)
- l3 = deepcopy(l)
- import repr
- print map(repr.repr, l)
- print map(repr.repr, l1)
- print map(repr.repr, l2)
- print map(repr.repr, l3)
-
-if __name__ == '__main__':
- _test()
diff --git a/sys/lib/python/copy_reg.py b/sys/lib/python/copy_reg.py
deleted file mode 100644
index f87c50ffa..000000000
--- a/sys/lib/python/copy_reg.py
+++ /dev/null
@@ -1,200 +0,0 @@
-"""Helper to provide extensibility for pickle/cPickle.
-
-This is only useful to add pickle support for extension types defined in
-C, not for instances of user-defined classes.
-"""
-
-from types import ClassType as _ClassType
-
-__all__ = ["pickle", "constructor",
- "add_extension", "remove_extension", "clear_extension_cache"]
-
-dispatch_table = {}
-
-def pickle(ob_type, pickle_function, constructor_ob=None):
- if type(ob_type) is _ClassType:
- raise TypeError("copy_reg is not intended for use with classes")
-
- if not callable(pickle_function):
- raise TypeError("reduction functions must be callable")
- dispatch_table[ob_type] = pickle_function
-
- # The constructor_ob function is a vestige of safe for unpickling.
- # There is no reason for the caller to pass it anymore.
- if constructor_ob is not None:
- constructor(constructor_ob)
-
-def constructor(object):
- if not callable(object):
- raise TypeError("constructors must be callable")
-
-# Example: provide pickling support for complex numbers.
-
-try:
- complex
-except NameError:
- pass
-else:
-
- def pickle_complex(c):
- return complex, (c.real, c.imag)
-
- pickle(complex, pickle_complex, complex)
-
-# Support for pickling new-style objects
-
-def _reconstructor(cls, base, state):
- if base is object:
- obj = object.__new__(cls)
- else:
- obj = base.__new__(cls, state)
- base.__init__(obj, state)
- return obj
-
-_HEAPTYPE = 1<<9
-
-# Python code for object.__reduce_ex__ for protocols 0 and 1
-
-def _reduce_ex(self, proto):
- assert proto < 2
- for base in self.__class__.__mro__:
- if hasattr(base, '__flags__') and not base.__flags__ & _HEAPTYPE:
- break
- else:
- base = object # not really reachable
- if base is object:
- state = None
- else:
- if base is self.__class__:
- raise TypeError, "can't pickle %s objects" % base.__name__
- state = base(self)
- args = (self.__class__, base, state)
- try:
- getstate = self.__getstate__
- except AttributeError:
- if getattr(self, "__slots__", None):
- raise TypeError("a class that defines __slots__ without "
- "defining __getstate__ cannot be pickled")
- try:
- dict = self.__dict__
- except AttributeError:
- dict = None
- else:
- dict = getstate()
- if dict:
- return _reconstructor, args, dict
- else:
- return _reconstructor, args
-
-# Helper for __reduce_ex__ protocol 2
-
-def __newobj__(cls, *args):
- return cls.__new__(cls, *args)
-
-def _slotnames(cls):
- """Return a list of slot names for a given class.
-
- This needs to find slots defined by the class and its bases, so we
- can't simply return the __slots__ attribute. We must walk down
- the Method Resolution Order and concatenate the __slots__ of each
- class found there. (This assumes classes don't modify their
- __slots__ attribute to misrepresent their slots after the class is
- defined.)
- """
-
- # Get the value from a cache in the class if possible
- names = cls.__dict__.get("__slotnames__")
- if names is not None:
- return names
-
- # Not cached -- calculate the value
- names = []
- if not hasattr(cls, "__slots__"):
- # This class has no slots
- pass
- else:
- # Slots found -- gather slot names from all base classes
- for c in cls.__mro__:
- if "__slots__" in c.__dict__:
- slots = c.__dict__['__slots__']
- # if class has a single slot, it can be given as a string
- if isinstance(slots, basestring):
- slots = (slots,)
- for name in slots:
- # special descriptors
- if name in ("__dict__", "__weakref__"):
- continue
- # mangled names
- elif name.startswith('__') and not name.endswith('__'):
- names.append('_%s%s' % (c.__name__, name))
- else:
- names.append(name)
-
- # Cache the outcome in the class if at all possible
- try:
- cls.__slotnames__ = names
- except:
- pass # But don't die if we can't
-
- return names
-
-# A registry of extension codes. This is an ad-hoc compression
-# mechanism. Whenever a global reference to <module>, <name> is about
-# to be pickled, the (<module>, <name>) tuple is looked up here to see
-# if it is a registered extension code for it. Extension codes are
-# universal, so that the meaning of a pickle does not depend on
-# context. (There are also some codes reserved for local use that
-# don't have this restriction.) Codes are positive ints; 0 is
-# reserved.
-
-_extension_registry = {} # key -> code
-_inverted_registry = {} # code -> key
-_extension_cache = {} # code -> object
-# Don't ever rebind those names: cPickle grabs a reference to them when
-# it's initialized, and won't see a rebinding.
-
-def add_extension(module, name, code):
- """Register an extension code."""
- code = int(code)
- if not 1 <= code <= 0x7fffffff:
- raise ValueError, "code out of range"
- key = (module, name)
- if (_extension_registry.get(key) == code and
- _inverted_registry.get(code) == key):
- return # Redundant registrations are benign
- if key in _extension_registry:
- raise ValueError("key %s is already registered with code %s" %
- (key, _extension_registry[key]))
- if code in _inverted_registry:
- raise ValueError("code %s is already in use for key %s" %
- (code, _inverted_registry[code]))
- _extension_registry[key] = code
- _inverted_registry[code] = key
-
-def remove_extension(module, name, code):
- """Unregister an extension code. For testing only."""
- key = (module, name)
- if (_extension_registry.get(key) != code or
- _inverted_registry.get(code) != key):
- raise ValueError("key %s is not registered with code %s" %
- (key, code))
- del _extension_registry[key]
- del _inverted_registry[code]
- if code in _extension_cache:
- del _extension_cache[code]
-
-def clear_extension_cache():
- _extension_cache.clear()
-
-# Standard extension code assignments
-
-# Reserved ranges
-
-# First Last Count Purpose
-# 1 127 127 Reserved for Python standard library
-# 128 191 64 Reserved for Zope
-# 192 239 48 Reserved for 3rd parties
-# 240 255 16 Reserved for private use (will never be assigned)
-# 256 Inf Inf Reserved for future assignment
-
-# Extension codes are assigned by the Python Software Foundation.
diff --git a/sys/lib/python/csv.py b/sys/lib/python/csv.py
deleted file mode 100644
index f21385478..000000000
--- a/sys/lib/python/csv.py
+++ /dev/null
@@ -1,415 +0,0 @@
-
-"""
-csv.py - read/write/investigate CSV files
-"""
-
-import re
-from _csv import Error, __version__, writer, reader, register_dialect, \
- unregister_dialect, get_dialect, list_dialects, \
- field_size_limit, \
- QUOTE_MINIMAL, QUOTE_ALL, QUOTE_NONNUMERIC, QUOTE_NONE, \
- __doc__
-from _csv import Dialect as _Dialect
-
-try:
- from cStringIO import StringIO
-except ImportError:
- from StringIO import StringIO
-
-__all__ = [ "QUOTE_MINIMAL", "QUOTE_ALL", "QUOTE_NONNUMERIC", "QUOTE_NONE",
- "Error", "Dialect", "excel", "excel_tab", "reader", "writer",
- "register_dialect", "get_dialect", "list_dialects", "Sniffer",
- "unregister_dialect", "__version__", "DictReader", "DictWriter" ]
-
-class Dialect:
- """Describe an Excel dialect.
-
- This must be subclassed (see csv.excel). Valid attributes are:
- delimiter, quotechar, escapechar, doublequote, skipinitialspace,
- lineterminator, quoting.
-
- """
- _name = ""
- _valid = False
- # placeholders
- delimiter = None
- quotechar = None
- escapechar = None
- doublequote = None
- skipinitialspace = None
- lineterminator = None
- quoting = None
-
- def __init__(self):
- if self.__class__ != Dialect:
- self._valid = True
- self._validate()
-
- def _validate(self):
- try:
- _Dialect(self)
- except TypeError, e:
- # We do this for compatibility with py2.3
- raise Error(str(e))
-
-class excel(Dialect):
- """Describe the usual properties of Excel-generated CSV files."""
- delimiter = ','
- quotechar = '"'
- doublequote = True
- skipinitialspace = False
- lineterminator = '\r\n'
- quoting = QUOTE_MINIMAL
-register_dialect("excel", excel)
-
-class excel_tab(excel):
- """Describe the usual properties of Excel-generated TAB-delimited files."""
- delimiter = '\t'
-register_dialect("excel-tab", excel_tab)
-
-
-class DictReader:
- def __init__(self, f, fieldnames=None, restkey=None, restval=None,
- dialect="excel", *args, **kwds):
- self.fieldnames = fieldnames # list of keys for the dict
- self.restkey = restkey # key to catch long rows
- self.restval = restval # default value for short rows
- self.reader = reader(f, dialect, *args, **kwds)
-
- def __iter__(self):
- return self
-
- def next(self):
- row = self.reader.next()
- if self.fieldnames is None:
- self.fieldnames = row
- row = self.reader.next()
-
- # unlike the basic reader, we prefer not to return blanks,
- # because we will typically wind up with a dict full of None
- # values
- while row == []:
- row = self.reader.next()
- d = dict(zip(self.fieldnames, row))
- lf = len(self.fieldnames)
- lr = len(row)
- if lf < lr:
- d[self.restkey] = row[lf:]
- elif lf > lr:
- for key in self.fieldnames[lr:]:
- d[key] = self.restval
- return d
-
-
-class DictWriter:
- def __init__(self, f, fieldnames, restval="", extrasaction="raise",
- dialect="excel", *args, **kwds):
- self.fieldnames = fieldnames # list of keys for the dict
- self.restval = restval # for writing short dicts
- if extrasaction.lower() not in ("raise", "ignore"):
- raise ValueError, \
- ("extrasaction (%s) must be 'raise' or 'ignore'" %
- extrasaction)
- self.extrasaction = extrasaction
- self.writer = writer(f, dialect, *args, **kwds)
-
- def _dict_to_list(self, rowdict):
- if self.extrasaction == "raise":
- for k in rowdict.keys():
- if k not in self.fieldnames:
- raise ValueError, "dict contains fields not in fieldnames"
- return [rowdict.get(key, self.restval) for key in self.fieldnames]
-
- def writerow(self, rowdict):
- return self.writer.writerow(self._dict_to_list(rowdict))
-
- def writerows(self, rowdicts):
- rows = []
- for rowdict in rowdicts:
- rows.append(self._dict_to_list(rowdict))
- return self.writer.writerows(rows)
-
-# Guard Sniffer's type checking against builds that exclude complex()
-try:
- complex
-except NameError:
- complex = float
-
-class Sniffer:
- '''
- "Sniffs" the format of a CSV file (i.e. delimiter, quotechar)
- Returns a Dialect object.
- '''
- def __init__(self):
- # in case there is more than one possible delimiter
- self.preferred = [',', '\t', ';', ' ', ':']
-
-
- def sniff(self, sample, delimiters=None):
- """
- Returns a dialect (or None) corresponding to the sample
- """
-
- quotechar, delimiter, skipinitialspace = \
- self._guess_quote_and_delimiter(sample, delimiters)
- if not delimiter:
- delimiter, skipinitialspace = self._guess_delimiter(sample,
- delimiters)
-
- if not delimiter:
- raise Error, "Could not determine delimiter"
-
- class dialect(Dialect):
- _name = "sniffed"
- lineterminator = '\r\n'
- quoting = QUOTE_MINIMAL
- # escapechar = ''
- doublequote = False
-
- dialect.delimiter = delimiter
- # _csv.reader won't accept a quotechar of ''
- dialect.quotechar = quotechar or '"'
- dialect.skipinitialspace = skipinitialspace
-
- return dialect
-
-
- def _guess_quote_and_delimiter(self, data, delimiters):
- """
- Looks for text enclosed between two identical quotes
- (the probable quotechar) which are preceded and followed
- by the same character (the probable delimiter).
- For example:
- ,'some text',
- The quote with the most wins, same with the delimiter.
- If there is no quotechar the delimiter can't be determined
- this way.
- """
-
- matches = []
- for restr in ('(?P<delim>[^\w\n"\'])(?P<space> ?)(?P<quote>["\']).*?(?P=quote)(?P=delim)', # ,".*?",
- '(?:^|\n)(?P<quote>["\']).*?(?P=quote)(?P<delim>[^\w\n"\'])(?P<space> ?)', # ".*?",
- '(?P<delim>>[^\w\n"\'])(?P<space> ?)(?P<quote>["\']).*?(?P=quote)(?:$|\n)', # ,".*?"
- '(?:^|\n)(?P<quote>["\']).*?(?P=quote)(?:$|\n)'): # ".*?" (no delim, no space)
- regexp = re.compile(restr, re.DOTALL | re.MULTILINE)
- matches = regexp.findall(data)
- if matches:
- break
-
- if not matches:
- return ('', None, 0) # (quotechar, delimiter, skipinitialspace)
-
- quotes = {}
- delims = {}
- spaces = 0
- for m in matches:
- n = regexp.groupindex['quote'] - 1
- key = m[n]
- if key:
- quotes[key] = quotes.get(key, 0) + 1
- try:
- n = regexp.groupindex['delim'] - 1
- key = m[n]
- except KeyError:
- continue
- if key and (delimiters is None or key in delimiters):
- delims[key] = delims.get(key, 0) + 1
- try:
- n = regexp.groupindex['space'] - 1
- except KeyError:
- continue
- if m[n]:
- spaces += 1
-
- quotechar = reduce(lambda a, b, quotes = quotes:
- (quotes[a] > quotes[b]) and a or b, quotes.keys())
-
- if delims:
- delim = reduce(lambda a, b, delims = delims:
- (delims[a] > delims[b]) and a or b, delims.keys())
- skipinitialspace = delims[delim] == spaces
- if delim == '\n': # most likely a file with a single column
- delim = ''
- else:
- # there is *no* delimiter, it's a single column of quoted data
- delim = ''
- skipinitialspace = 0
-
- return (quotechar, delim, skipinitialspace)
-
-
- def _guess_delimiter(self, data, delimiters):
- """
- The delimiter /should/ occur the same number of times on
- each row. However, due to malformed data, it may not. We don't want
- an all or nothing approach, so we allow for small variations in this
- number.
- 1) build a table of the frequency of each character on every line.
- 2) build a table of freqencies of this frequency (meta-frequency?),
- e.g. 'x occurred 5 times in 10 rows, 6 times in 1000 rows,
- 7 times in 2 rows'
- 3) use the mode of the meta-frequency to determine the /expected/
- frequency for that character
- 4) find out how often the character actually meets that goal
- 5) the character that best meets its goal is the delimiter
- For performance reasons, the data is evaluated in chunks, so it can
- try and evaluate the smallest portion of the data possible, evaluating
- additional chunks as necessary.
- """
-
- data = filter(None, data.split('\n'))
-
- ascii = [chr(c) for c in range(127)] # 7-bit ASCII
-
- # build frequency tables
- chunkLength = min(10, len(data))
- iteration = 0
- charFrequency = {}
- modes = {}
- delims = {}
- start, end = 0, min(chunkLength, len(data))
- while start < len(data):
- iteration += 1
- for line in data[start:end]:
- for char in ascii:
- metaFrequency = charFrequency.get(char, {})
- # must count even if frequency is 0
- freq = line.count(char)
- # value is the mode
- metaFrequency[freq] = metaFrequency.get(freq, 0) + 1
- charFrequency[char] = metaFrequency
-
- for char in charFrequency.keys():
- items = charFrequency[char].items()
- if len(items) == 1 and items[0][0] == 0:
- continue
- # get the mode of the frequencies
- if len(items) > 1:
- modes[char] = reduce(lambda a, b: a[1] > b[1] and a or b,
- items)
- # adjust the mode - subtract the sum of all
- # other frequencies
- items.remove(modes[char])
- modes[char] = (modes[char][0], modes[char][1]
- - reduce(lambda a, b: (0, a[1] + b[1]),
- items)[1])
- else:
- modes[char] = items[0]
-
- # build a list of possible delimiters
- modeList = modes.items()
- total = float(chunkLength * iteration)
- # (rows of consistent data) / (number of rows) = 100%
- consistency = 1.0
- # minimum consistency threshold
- threshold = 0.9
- while len(delims) == 0 and consistency >= threshold:
- for k, v in modeList:
- if v[0] > 0 and v[1] > 0:
- if ((v[1]/total) >= consistency and
- (delimiters is None or k in delimiters)):
- delims[k] = v
- consistency -= 0.01
-
- if len(delims) == 1:
- delim = delims.keys()[0]
- skipinitialspace = (data[0].count(delim) ==
- data[0].count("%c " % delim))
- return (delim, skipinitialspace)
-
- # analyze another chunkLength lines
- start = end
- end += chunkLength
-
- if not delims:
- return ('', 0)
-
- # if there's more than one, fall back to a 'preferred' list
- if len(delims) > 1:
- for d in self.preferred:
- if d in delims.keys():
- skipinitialspace = (data[0].count(d) ==
- data[0].count("%c " % d))
- return (d, skipinitialspace)
-
- # nothing else indicates a preference, pick the character that
- # dominates(?)
- items = [(v,k) for (k,v) in delims.items()]
- items.sort()
- delim = items[-1][1]
-
- skipinitialspace = (data[0].count(delim) ==
- data[0].count("%c " % delim))
- return (delim, skipinitialspace)
-
-
- def has_header(self, sample):
- # Creates a dictionary of types of data in each column. If any
- # column is of a single type (say, integers), *except* for the first
- # row, then the first row is presumed to be labels. If the type
- # can't be determined, it is assumed to be a string in which case
- # the length of the string is the determining factor: if all of the
- # rows except for the first are the same length, it's a header.
- # Finally, a 'vote' is taken at the end for each column, adding or
- # subtracting from the likelihood of the first row being a header.
-
- rdr = reader(StringIO(sample), self.sniff(sample))
-
- header = rdr.next() # assume first row is header
-
- columns = len(header)
- columnTypes = {}
- for i in range(columns): columnTypes[i] = None
-
- checked = 0
- for row in rdr:
- # arbitrary number of rows to check, to keep it sane
- if checked > 20:
- break
- checked += 1
-
- if len(row) != columns:
- continue # skip rows that have irregular number of columns
-
- for col in columnTypes.keys():
-
- for thisType in [int, long, float, complex]:
- try:
- thisType(row[col])
- break
- except (ValueError, OverflowError):
- pass
- else:
- # fallback to length of string
- thisType = len(row[col])
-
- # treat longs as ints
- if thisType == long:
- thisType = int
-
- if thisType != columnTypes[col]:
- if columnTypes[col] is None: # add new column type
- columnTypes[col] = thisType
- else:
- # type is inconsistent, remove column from
- # consideration
- del columnTypes[col]
-
- # finally, compare results against first row and "vote"
- # on whether it's a header
- hasHeader = 0
- for col, colType in columnTypes.items():
- if type(colType) == type(0): # it's a length
- if len(header[col]) != colType:
- hasHeader += 1
- else:
- hasHeader -= 1
- else: # attempt typecast
- try:
- colType(header[col])
- except (ValueError, TypeError):
- hasHeader += 1
- else:
- hasHeader -= 1
-
- return hasHeader > 0
diff --git a/sys/lib/python/ctypes/__init__.py b/sys/lib/python/ctypes/__init__.py
deleted file mode 100644
index 593730449..000000000
--- a/sys/lib/python/ctypes/__init__.py
+++ /dev/null
@@ -1,529 +0,0 @@
-######################################################################
-# This file should be kept compatible with Python 2.3, see PEP 291. #
-######################################################################
-"""create and manipulate C data types in Python"""
-
-import os as _os, sys as _sys
-
-__version__ = "1.0.2"
-
-from _ctypes import Union, Structure, Array
-from _ctypes import _Pointer
-from _ctypes import CFuncPtr as _CFuncPtr
-from _ctypes import __version__ as _ctypes_version
-from _ctypes import RTLD_LOCAL, RTLD_GLOBAL
-from _ctypes import ArgumentError
-
-from struct import calcsize as _calcsize
-
-if __version__ != _ctypes_version:
- raise Exception, ("Version number mismatch", __version__, _ctypes_version)
-
-if _os.name in ("nt", "ce"):
- from _ctypes import FormatError
-
-DEFAULT_MODE = RTLD_LOCAL
-if _os.name == "posix" and _sys.platform == "darwin":
- import gestalt
-
- # gestalt.gestalt("sysv") returns the version number of the
- # currently active system file as BCD.
- # On OS X 10.4.6 -> 0x1046
- # On OS X 10.2.8 -> 0x1028
- # See also http://www.rgaros.nl/gestalt/
- #
- # On OS X 10.3, we use RTLD_GLOBAL as default mode
- # because RTLD_LOCAL does not work at least on some
- # libraries.
-
- if gestalt.gestalt("sysv") < 0x1040:
- DEFAULT_MODE = RTLD_GLOBAL
-
-from _ctypes import FUNCFLAG_CDECL as _FUNCFLAG_CDECL, \
- FUNCFLAG_PYTHONAPI as _FUNCFLAG_PYTHONAPI
-
-"""
-WINOLEAPI -> HRESULT
-WINOLEAPI_(type)
-
-STDMETHODCALLTYPE
-
-STDMETHOD(name)
-STDMETHOD_(type, name)
-
-STDAPICALLTYPE
-"""
-
-def create_string_buffer(init, size=None):
- """create_string_buffer(aString) -> character array
- create_string_buffer(anInteger) -> character array
- create_string_buffer(aString, anInteger) -> character array
- """
- if isinstance(init, (str, unicode)):
- if size is None:
- size = len(init)+1
- buftype = c_char * size
- buf = buftype()
- buf.value = init
- return buf
- elif isinstance(init, (int, long)):
- buftype = c_char * init
- buf = buftype()
- return buf
- raise TypeError, init
-
-def c_buffer(init, size=None):
-## "deprecated, use create_string_buffer instead"
-## import warnings
-## warnings.warn("c_buffer is deprecated, use create_string_buffer instead",
-## DeprecationWarning, stacklevel=2)
- return create_string_buffer(init, size)
-
-_c_functype_cache = {}
-def CFUNCTYPE(restype, *argtypes):
- """CFUNCTYPE(restype, *argtypes) -> function prototype.
-
- restype: the result type
- argtypes: a sequence specifying the argument types
-
- The function prototype can be called in different ways to create a
- callable object:
-
- prototype(integer address) -> foreign function
- prototype(callable) -> create and return a C callable function from callable
- prototype(integer index, method name[, paramflags]) -> foreign function calling a COM method
- prototype((ordinal number, dll object)[, paramflags]) -> foreign function exported by ordinal
- prototype((function name, dll object)[, paramflags]) -> foreign function exported by name
- """
- try:
- return _c_functype_cache[(restype, argtypes)]
- except KeyError:
- class CFunctionType(_CFuncPtr):
- _argtypes_ = argtypes
- _restype_ = restype
- _flags_ = _FUNCFLAG_CDECL
- _c_functype_cache[(restype, argtypes)] = CFunctionType
- return CFunctionType
-
-if _os.name in ("nt", "ce"):
- from _ctypes import LoadLibrary as _dlopen
- from _ctypes import FUNCFLAG_STDCALL as _FUNCFLAG_STDCALL
- if _os.name == "ce":
- # 'ce' doesn't have the stdcall calling convention
- _FUNCFLAG_STDCALL = _FUNCFLAG_CDECL
-
- _win_functype_cache = {}
- def WINFUNCTYPE(restype, *argtypes):
- # docstring set later (very similar to CFUNCTYPE.__doc__)
- try:
- return _win_functype_cache[(restype, argtypes)]
- except KeyError:
- class WinFunctionType(_CFuncPtr):
- _argtypes_ = argtypes
- _restype_ = restype
- _flags_ = _FUNCFLAG_STDCALL
- _win_functype_cache[(restype, argtypes)] = WinFunctionType
- return WinFunctionType
- if WINFUNCTYPE.__doc__:
- WINFUNCTYPE.__doc__ = CFUNCTYPE.__doc__.replace("CFUNCTYPE", "WINFUNCTYPE")
-
-elif _os.name == "posix":
- from _ctypes import dlopen as _dlopen
-
-from _ctypes import sizeof, byref, addressof, alignment, resize
-from _ctypes import _SimpleCData
-
-def _check_size(typ, typecode=None):
- # Check if sizeof(ctypes_type) against struct.calcsize. This
- # should protect somewhat against a misconfigured libffi.
- from struct import calcsize
- if typecode is None:
- # Most _type_ codes are the same as used in struct
- typecode = typ._type_
- actual, required = sizeof(typ), calcsize(typecode)
- if actual != required:
- raise SystemError("sizeof(%s) wrong: %d instead of %d" % \
- (typ, actual, required))
-
-class py_object(_SimpleCData):
- _type_ = "O"
- def __repr__(self):
- try:
- return super(py_object, self).__repr__()
- except ValueError:
- return "%s(<NULL>)" % type(self).__name__
-_check_size(py_object, "P")
-
-class c_short(_SimpleCData):
- _type_ = "h"
-_check_size(c_short)
-
-class c_ushort(_SimpleCData):
- _type_ = "H"
-_check_size(c_ushort)
-
-class c_long(_SimpleCData):
- _type_ = "l"
-_check_size(c_long)
-
-class c_ulong(_SimpleCData):
- _type_ = "L"
-_check_size(c_ulong)
-
-if _calcsize("i") == _calcsize("l"):
- # if int and long have the same size, make c_int an alias for c_long
- c_int = c_long
- c_uint = c_ulong
-else:
- class c_int(_SimpleCData):
- _type_ = "i"
- _check_size(c_int)
-
- class c_uint(_SimpleCData):
- _type_ = "I"
- _check_size(c_uint)
-
-class c_float(_SimpleCData):
- _type_ = "f"
-_check_size(c_float)
-
-class c_double(_SimpleCData):
- _type_ = "d"
-_check_size(c_double)
-
-if _calcsize("l") == _calcsize("q"):
- # if long and long long have the same size, make c_longlong an alias for c_long
- c_longlong = c_long
- c_ulonglong = c_ulong
-else:
- class c_longlong(_SimpleCData):
- _type_ = "q"
- _check_size(c_longlong)
-
- class c_ulonglong(_SimpleCData):
- _type_ = "Q"
- ## def from_param(cls, val):
- ## return ('d', float(val), val)
- ## from_param = classmethod(from_param)
- _check_size(c_ulonglong)
-
-class c_ubyte(_SimpleCData):
- _type_ = "B"
-c_ubyte.__ctype_le__ = c_ubyte.__ctype_be__ = c_ubyte
-# backward compatibility:
-##c_uchar = c_ubyte
-_check_size(c_ubyte)
-
-class c_byte(_SimpleCData):
- _type_ = "b"
-c_byte.__ctype_le__ = c_byte.__ctype_be__ = c_byte
-_check_size(c_byte)
-
-class c_char(_SimpleCData):
- _type_ = "c"
-c_char.__ctype_le__ = c_char.__ctype_be__ = c_char
-_check_size(c_char)
-
-class c_char_p(_SimpleCData):
- _type_ = "z"
-_check_size(c_char_p, "P")
-
-class c_void_p(_SimpleCData):
- _type_ = "P"
-c_voidp = c_void_p # backwards compatibility (to a bug)
-_check_size(c_void_p)
-
-# This cache maps types to pointers to them.
-_pointer_type_cache = {}
-
-def POINTER(cls):
- try:
- return _pointer_type_cache[cls]
- except KeyError:
- pass
- if type(cls) is str:
- klass = type(_Pointer)("LP_%s" % cls,
- (_Pointer,),
- {})
- _pointer_type_cache[id(klass)] = klass
- return klass
- else:
- name = "LP_%s" % cls.__name__
- klass = type(_Pointer)(name,
- (_Pointer,),
- {'_type_': cls})
- _pointer_type_cache[cls] = klass
- return klass
-
-try:
- from _ctypes import set_conversion_mode
-except ImportError:
- pass
-else:
- if _os.name in ("nt", "ce"):
- set_conversion_mode("mbcs", "ignore")
- else:
- set_conversion_mode("ascii", "strict")
-
- class c_wchar_p(_SimpleCData):
- _type_ = "Z"
-
- class c_wchar(_SimpleCData):
- _type_ = "u"
-
- POINTER(c_wchar).from_param = c_wchar_p.from_param #_SimpleCData.c_wchar_p_from_param
-
- def create_unicode_buffer(init, size=None):
- """create_unicode_buffer(aString) -> character array
- create_unicode_buffer(anInteger) -> character array
- create_unicode_buffer(aString, anInteger) -> character array
- """
- if isinstance(init, (str, unicode)):
- if size is None:
- size = len(init)+1
- buftype = c_wchar * size
- buf = buftype()
- buf.value = init
- return buf
- elif isinstance(init, (int, long)):
- buftype = c_wchar * init
- buf = buftype()
- return buf
- raise TypeError, init
-
-POINTER(c_char).from_param = c_char_p.from_param #_SimpleCData.c_char_p_from_param
-
-# XXX Deprecated
-def SetPointerType(pointer, cls):
- if _pointer_type_cache.get(cls, None) is not None:
- raise RuntimeError, \
- "This type already exists in the cache"
- if not _pointer_type_cache.has_key(id(pointer)):
- raise RuntimeError, \
- "What's this???"
- pointer.set_type(cls)
- _pointer_type_cache[cls] = pointer
- del _pointer_type_cache[id(pointer)]
-
-
-def pointer(inst):
- return POINTER(type(inst))(inst)
-
-# XXX Deprecated
-def ARRAY(typ, len):
- return typ * len
-
-################################################################
-
-
-class CDLL(object):
- """An instance of this class represents a loaded dll/shared
- library, exporting functions using the standard C calling
- convention (named 'cdecl' on Windows).
-
- The exported functions can be accessed as attributes, or by
- indexing with the function name. Examples:
-
- <obj>.qsort -> callable object
- <obj>['qsort'] -> callable object
-
- Calling the functions releases the Python GIL during the call and
- reaquires it afterwards.
- """
- class _FuncPtr(_CFuncPtr):
- _flags_ = _FUNCFLAG_CDECL
- _restype_ = c_int # default, can be overridden in instances
-
- def __init__(self, name, mode=DEFAULT_MODE, handle=None):
- self._name = name
- if handle is None:
- self._handle = _dlopen(self._name, mode)
- else:
- self._handle = handle
-
- def __repr__(self):
- return "<%s '%s', handle %x at %x>" % \
- (self.__class__.__name__, self._name,
- (self._handle & (_sys.maxint*2 + 1)),
- id(self) & (_sys.maxint*2 + 1))
-
- def __getattr__(self, name):
- if name.startswith('__') and name.endswith('__'):
- raise AttributeError, name
- func = self.__getitem__(name)
- setattr(self, name, func)
- return func
-
- def __getitem__(self, name_or_ordinal):
- func = self._FuncPtr((name_or_ordinal, self))
- if not isinstance(name_or_ordinal, (int, long)):
- func.__name__ = name_or_ordinal
- return func
-
-class PyDLL(CDLL):
- """This class represents the Python library itself. It allows to
- access Python API functions. The GIL is not released, and
- Python exceptions are handled correctly.
- """
- class _FuncPtr(_CFuncPtr):
- _flags_ = _FUNCFLAG_CDECL | _FUNCFLAG_PYTHONAPI
- _restype_ = c_int # default, can be overridden in instances
-
-if _os.name in ("nt", "ce"):
-
- class WinDLL(CDLL):
- """This class represents a dll exporting functions using the
- Windows stdcall calling convention.
- """
- class _FuncPtr(_CFuncPtr):
- _flags_ = _FUNCFLAG_STDCALL
- _restype_ = c_int # default, can be overridden in instances
-
- # XXX Hm, what about HRESULT as normal parameter?
- # Mustn't it derive from c_long then?
- from _ctypes import _check_HRESULT, _SimpleCData
- class HRESULT(_SimpleCData):
- _type_ = "l"
- # _check_retval_ is called with the function's result when it
- # is used as restype. It checks for the FAILED bit, and
- # raises a WindowsError if it is set.
- #
- # The _check_retval_ method is implemented in C, so that the
- # method definition itself is not included in the traceback
- # when it raises an error - that is what we want (and Python
- # doesn't have a way to raise an exception in the caller's
- # frame).
- _check_retval_ = _check_HRESULT
-
- class OleDLL(CDLL):
- """This class represents a dll exporting functions using the
- Windows stdcall calling convention, and returning HRESULT.
- HRESULT error values are automatically raised as WindowsError
- exceptions.
- """
- class _FuncPtr(_CFuncPtr):
- _flags_ = _FUNCFLAG_STDCALL
- _restype_ = HRESULT
-
-class LibraryLoader(object):
- def __init__(self, dlltype):
- self._dlltype = dlltype
-
- def __getattr__(self, name):
- if name[0] == '_':
- raise AttributeError(name)
- dll = self._dlltype(name)
- setattr(self, name, dll)
- return dll
-
- def __getitem__(self, name):
- return getattr(self, name)
-
- def LoadLibrary(self, name):
- return self._dlltype(name)
-
-cdll = LibraryLoader(CDLL)
-pydll = LibraryLoader(PyDLL)
-
-if _os.name in ("nt", "ce"):
- pythonapi = PyDLL("python dll", None, _sys.dllhandle)
-elif _sys.platform == "cygwin":
- pythonapi = PyDLL("libpython%d.%d.dll" % _sys.version_info[:2])
-else:
- pythonapi = PyDLL(None)
-
-
-if _os.name in ("nt", "ce"):
- windll = LibraryLoader(WinDLL)
- oledll = LibraryLoader(OleDLL)
-
- if _os.name == "nt":
- GetLastError = windll.kernel32.GetLastError
- else:
- GetLastError = windll.coredll.GetLastError
-
- def WinError(code=None, descr=None):
- if code is None:
- code = GetLastError()
- if descr is None:
- descr = FormatError(code).strip()
- return WindowsError(code, descr)
-
-_pointer_type_cache[None] = c_void_p
-
-if sizeof(c_uint) == sizeof(c_void_p):
- c_size_t = c_uint
-elif sizeof(c_ulong) == sizeof(c_void_p):
- c_size_t = c_ulong
-
-# functions
-
-from _ctypes import _memmove_addr, _memset_addr, _string_at_addr, _cast_addr
-
-## void *memmove(void *, const void *, size_t);
-memmove = CFUNCTYPE(c_void_p, c_void_p, c_void_p, c_size_t)(_memmove_addr)
-
-## void *memset(void *, int, size_t)
-memset = CFUNCTYPE(c_void_p, c_void_p, c_int, c_size_t)(_memset_addr)
-
-def PYFUNCTYPE(restype, *argtypes):
- class CFunctionType(_CFuncPtr):
- _argtypes_ = argtypes
- _restype_ = restype
- _flags_ = _FUNCFLAG_CDECL | _FUNCFLAG_PYTHONAPI
- return CFunctionType
-
-_cast = PYFUNCTYPE(py_object, c_void_p, py_object, py_object)(_cast_addr)
-def cast(obj, typ):
- return _cast(obj, obj, typ)
-
-_string_at = CFUNCTYPE(py_object, c_void_p, c_int)(_string_at_addr)
-def string_at(ptr, size=-1):
- """string_at(addr[, size]) -> string
-
- Return the string at addr."""
- return _string_at(ptr, size)
-
-try:
- from _ctypes import _wstring_at_addr
-except ImportError:
- pass
-else:
- _wstring_at = CFUNCTYPE(py_object, c_void_p, c_int)(_wstring_at_addr)
- def wstring_at(ptr, size=-1):
- """wstring_at(addr[, size]) -> string
-
- Return the string at addr."""
- return _wstring_at(ptr, size)
-
-
-if _os.name in ("nt", "ce"): # COM stuff
- def DllGetClassObject(rclsid, riid, ppv):
- try:
- ccom = __import__("comtypes.server.inprocserver", globals(), locals(), ['*'])
- except ImportError:
- return -2147221231 # CLASS_E_CLASSNOTAVAILABLE
- else:
- return ccom.DllGetClassObject(rclsid, riid, ppv)
-
- def DllCanUnloadNow():
- try:
- ccom = __import__("comtypes.server.inprocserver", globals(), locals(), ['*'])
- except ImportError:
- return 0 # S_OK
- return ccom.DllCanUnloadNow()
-
-from ctypes._endian import BigEndianStructure, LittleEndianStructure
-
-# Fill in specifically-sized types
-c_int8 = c_byte
-c_uint8 = c_ubyte
-for kind in [c_short, c_int, c_long, c_longlong]:
- if sizeof(kind) == 2: c_int16 = kind
- elif sizeof(kind) == 4: c_int32 = kind
- elif sizeof(kind) == 8: c_int64 = kind
-for kind in [c_ushort, c_uint, c_ulong, c_ulonglong]:
- if sizeof(kind) == 2: c_uint16 = kind
- elif sizeof(kind) == 4: c_uint32 = kind
- elif sizeof(kind) == 8: c_uint64 = kind
-del(kind)
diff --git a/sys/lib/python/ctypes/_endian.py b/sys/lib/python/ctypes/_endian.py
deleted file mode 100644
index 6de0d47b2..000000000
--- a/sys/lib/python/ctypes/_endian.py
+++ /dev/null
@@ -1,60 +0,0 @@
-######################################################################
-# This file should be kept compatible with Python 2.3, see PEP 291. #
-######################################################################
-import sys
-from ctypes import *
-
-_array_type = type(c_int * 3)
-
-def _other_endian(typ):
- """Return the type with the 'other' byte order. Simple types like
- c_int and so on already have __ctype_be__ and __ctype_le__
- attributes which contain the types, for more complicated types
- only arrays are supported.
- """
- try:
- return getattr(typ, _OTHER_ENDIAN)
- except AttributeError:
- if type(typ) == _array_type:
- return _other_endian(typ._type_) * typ._length_
- raise TypeError("This type does not support other endian: %s" % typ)
-
-class _swapped_meta(type(Structure)):
- def __setattr__(self, attrname, value):
- if attrname == "_fields_":
- fields = []
- for desc in value:
- name = desc[0]
- typ = desc[1]
- rest = desc[2:]
- fields.append((name, _other_endian(typ)) + rest)
- value = fields
- super(_swapped_meta, self).__setattr__(attrname, value)
-
-################################################################
-
-# Note: The Structure metaclass checks for the *presence* (not the
-# value!) of a _swapped_bytes_ attribute to determine the bit order in
-# structures containing bit fields.
-
-if sys.byteorder == "little":
- _OTHER_ENDIAN = "__ctype_be__"
-
- LittleEndianStructure = Structure
-
- class BigEndianStructure(Structure):
- """Structure with big endian byte order"""
- __metaclass__ = _swapped_meta
- _swappedbytes_ = None
-
-elif sys.byteorder == "big":
- _OTHER_ENDIAN = "__ctype_le__"
-
- BigEndianStructure = Structure
- class LittleEndianStructure(Structure):
- """Structure with little endian byte order"""
- __metaclass__ = _swapped_meta
- _swappedbytes_ = None
-
-else:
- raise RuntimeError("Invalid byteorder")
diff --git a/sys/lib/python/ctypes/macholib/README.ctypes b/sys/lib/python/ctypes/macholib/README.ctypes
deleted file mode 100644
index 4e10cbe41..000000000
--- a/sys/lib/python/ctypes/macholib/README.ctypes
+++ /dev/null
@@ -1,7 +0,0 @@
-Files in this directory from from Bob Ippolito's py2app.
-
-License: Any components of the py2app suite may be distributed under
-the MIT or PSF open source licenses.
-
-This is version 1.0, SVN revision 789, from 2006/01/25.
-The main repository is http://svn.red-bean.com/bob/macholib/trunk/macholib/ \ No newline at end of file
diff --git a/sys/lib/python/ctypes/macholib/__init__.py b/sys/lib/python/ctypes/macholib/__init__.py
deleted file mode 100644
index 36149d28a..000000000
--- a/sys/lib/python/ctypes/macholib/__init__.py
+++ /dev/null
@@ -1,12 +0,0 @@
-######################################################################
-# This file should be kept compatible with Python 2.3, see PEP 291. #
-######################################################################
-"""
-Enough Mach-O to make your head spin.
-
-See the relevant header files in /usr/include/mach-o
-
-And also Apple's documentation.
-"""
-
-__version__ = '1.0'
diff --git a/sys/lib/python/ctypes/macholib/dyld.py b/sys/lib/python/ctypes/macholib/dyld.py
deleted file mode 100644
index 14e21395e..000000000
--- a/sys/lib/python/ctypes/macholib/dyld.py
+++ /dev/null
@@ -1,169 +0,0 @@
-######################################################################
-# This file should be kept compatible with Python 2.3, see PEP 291. #
-######################################################################
-"""
-dyld emulation
-"""
-
-import os
-from framework import framework_info
-from dylib import dylib_info
-from itertools import *
-
-__all__ = [
- 'dyld_find', 'framework_find',
- 'framework_info', 'dylib_info',
-]
-
-# These are the defaults as per man dyld(1)
-#
-DEFAULT_FRAMEWORK_FALLBACK = [
- os.path.expanduser("~/Library/Frameworks"),
- "/Library/Frameworks",
- "/Network/Library/Frameworks",
- "/System/Library/Frameworks",
-]
-
-DEFAULT_LIBRARY_FALLBACK = [
- os.path.expanduser("~/lib"),
- "/usr/local/lib",
- "/lib",
- "/usr/lib",
-]
-
-def ensure_utf8(s):
- """Not all of PyObjC and Python understand unicode paths very well yet"""
- if isinstance(s, unicode):
- return s.encode('utf8')
- return s
-
-def dyld_env(env, var):
- if env is None:
- env = os.environ
- rval = env.get(var)
- if rval is None:
- return []
- return rval.split(':')
-
-def dyld_image_suffix(env=None):
- if env is None:
- env = os.environ
- return env.get('DYLD_IMAGE_SUFFIX')
-
-def dyld_framework_path(env=None):
- return dyld_env(env, 'DYLD_FRAMEWORK_PATH')
-
-def dyld_library_path(env=None):
- return dyld_env(env, 'DYLD_LIBRARY_PATH')
-
-def dyld_fallback_framework_path(env=None):
- return dyld_env(env, 'DYLD_FALLBACK_FRAMEWORK_PATH')
-
-def dyld_fallback_library_path(env=None):
- return dyld_env(env, 'DYLD_FALLBACK_LIBRARY_PATH')
-
-def dyld_image_suffix_search(iterator, env=None):
- """For a potential path iterator, add DYLD_IMAGE_SUFFIX semantics"""
- suffix = dyld_image_suffix(env)
- if suffix is None:
- return iterator
- def _inject(iterator=iterator, suffix=suffix):
- for path in iterator:
- if path.endswith('.dylib'):
- yield path[:-len('.dylib')] + suffix + '.dylib'
- else:
- yield path + suffix
- yield path
- return _inject()
-
-def dyld_override_search(name, env=None):
- # If DYLD_FRAMEWORK_PATH is set and this dylib_name is a
- # framework name, use the first file that exists in the framework
- # path if any. If there is none go on to search the DYLD_LIBRARY_PATH
- # if any.
-
- framework = framework_info(name)
-
- if framework is not None:
- for path in dyld_framework_path(env):
- yield os.path.join(path, framework['name'])
-
- # If DYLD_LIBRARY_PATH is set then use the first file that exists
- # in the path. If none use the original name.
- for path in dyld_library_path(env):
- yield os.path.join(path, os.path.basename(name))
-
-def dyld_executable_path_search(name, executable_path=None):
- # If we haven't done any searching and found a library and the
- # dylib_name starts with "@executable_path/" then construct the
- # library name.
- if name.startswith('@executable_path/') and executable_path is not None:
- yield os.path.join(executable_path, name[len('@executable_path/'):])
-
-def dyld_default_search(name, env=None):
- yield name
-
- framework = framework_info(name)
-
- if framework is not None:
- fallback_framework_path = dyld_fallback_framework_path(env)
- for path in fallback_framework_path:
- yield os.path.join(path, framework['name'])
-
- fallback_library_path = dyld_fallback_library_path(env)
- for path in fallback_library_path:
- yield os.path.join(path, os.path.basename(name))
-
- if framework is not None and not fallback_framework_path:
- for path in DEFAULT_FRAMEWORK_FALLBACK:
- yield os.path.join(path, framework['name'])
-
- if not fallback_library_path:
- for path in DEFAULT_LIBRARY_FALLBACK:
- yield os.path.join(path, os.path.basename(name))
-
-def dyld_find(name, executable_path=None, env=None):
- """
- Find a library or framework using dyld semantics
- """
- name = ensure_utf8(name)
- executable_path = ensure_utf8(executable_path)
- for path in dyld_image_suffix_search(chain(
- dyld_override_search(name, env),
- dyld_executable_path_search(name, executable_path),
- dyld_default_search(name, env),
- ), env):
- if os.path.isfile(path):
- return path
- raise ValueError, "dylib %s could not be found" % (name,)
-
-def framework_find(fn, executable_path=None, env=None):
- """
- Find a framework using dyld semantics in a very loose manner.
-
- Will take input such as:
- Python
- Python.framework
- Python.framework/Versions/Current
- """
- try:
- return dyld_find(fn, executable_path=executable_path, env=env)
- except ValueError, e:
- pass
- fmwk_index = fn.rfind('.framework')
- if fmwk_index == -1:
- fmwk_index = len(fn)
- fn += '.framework'
- fn = os.path.join(fn, os.path.basename(fn[:fmwk_index]))
- try:
- return dyld_find(fn, executable_path=executable_path, env=env)
- except ValueError:
- raise e
-
-def test_dyld_find():
- env = {}
- assert dyld_find('libSystem.dylib') == '/usr/lib/libSystem.dylib'
- assert dyld_find('System.framework/System') == '/System/Library/Frameworks/System.framework/System'
-
-if __name__ == '__main__':
- test_dyld_find()
diff --git a/sys/lib/python/ctypes/macholib/dylib.py b/sys/lib/python/ctypes/macholib/dylib.py
deleted file mode 100644
index ea3dd38bd..000000000
--- a/sys/lib/python/ctypes/macholib/dylib.py
+++ /dev/null
@@ -1,66 +0,0 @@
-######################################################################
-# This file should be kept compatible with Python 2.3, see PEP 291. #
-######################################################################
-"""
-Generic dylib path manipulation
-"""
-
-import re
-
-__all__ = ['dylib_info']
-
-DYLIB_RE = re.compile(r"""(?x)
-(?P<location>^.*)(?:^|/)
-(?P<name>
- (?P<shortname>\w+?)
- (?:\.(?P<version>[^._]+))?
- (?:_(?P<suffix>[^._]+))?
- \.dylib$
-)
-""")
-
-def dylib_info(filename):
- """
- A dylib name can take one of the following four forms:
- Location/Name.SomeVersion_Suffix.dylib
- Location/Name.SomeVersion.dylib
- Location/Name_Suffix.dylib
- Location/Name.dylib
-
- returns None if not found or a mapping equivalent to:
- dict(
- location='Location',
- name='Name.SomeVersion_Suffix.dylib',
- shortname='Name',
- version='SomeVersion',
- suffix='Suffix',
- )
-
- Note that SomeVersion and Suffix are optional and may be None
- if not present.
- """
- is_dylib = DYLIB_RE.match(filename)
- if not is_dylib:
- return None
- return is_dylib.groupdict()
-
-
-def test_dylib_info():
- def d(location=None, name=None, shortname=None, version=None, suffix=None):
- return dict(
- location=location,
- name=name,
- shortname=shortname,
- version=version,
- suffix=suffix
- )
- assert dylib_info('completely/invalid') is None
- assert dylib_info('completely/invalide_debug') is None
- assert dylib_info('P/Foo.dylib') == d('P', 'Foo.dylib', 'Foo')
- assert dylib_info('P/Foo_debug.dylib') == d('P', 'Foo_debug.dylib', 'Foo', suffix='debug')
- assert dylib_info('P/Foo.A.dylib') == d('P', 'Foo.A.dylib', 'Foo', 'A')
- assert dylib_info('P/Foo_debug.A.dylib') == d('P', 'Foo_debug.A.dylib', 'Foo_debug', 'A')
- assert dylib_info('P/Foo.A_debug.dylib') == d('P', 'Foo.A_debug.dylib', 'Foo', 'A', 'debug')
-
-if __name__ == '__main__':
- test_dylib_info()
diff --git a/sys/lib/python/ctypes/macholib/fetch_macholib b/sys/lib/python/ctypes/macholib/fetch_macholib
deleted file mode 100755
index e6d6a2265..000000000
--- a/sys/lib/python/ctypes/macholib/fetch_macholib
+++ /dev/null
@@ -1,2 +0,0 @@
-#!/bin/sh
-svn export --force http://svn.red-bean.com/bob/macholib/trunk/macholib/ .
diff --git a/sys/lib/python/ctypes/macholib/fetch_macholib.bat b/sys/lib/python/ctypes/macholib/fetch_macholib.bat
deleted file mode 100644
index f9e1c0dc9..000000000
--- a/sys/lib/python/ctypes/macholib/fetch_macholib.bat
+++ /dev/null
@@ -1 +0,0 @@
-svn export --force http://svn.red-bean.com/bob/macholib/trunk/macholib/ .
diff --git a/sys/lib/python/ctypes/macholib/framework.py b/sys/lib/python/ctypes/macholib/framework.py
deleted file mode 100644
index dd7fb2f29..000000000
--- a/sys/lib/python/ctypes/macholib/framework.py
+++ /dev/null
@@ -1,68 +0,0 @@
-######################################################################
-# This file should be kept compatible with Python 2.3, see PEP 291. #
-######################################################################
-"""
-Generic framework path manipulation
-"""
-
-import re
-
-__all__ = ['framework_info']
-
-STRICT_FRAMEWORK_RE = re.compile(r"""(?x)
-(?P<location>^.*)(?:^|/)
-(?P<name>
- (?P<shortname>\w+).framework/
- (?:Versions/(?P<version>[^/]+)/)?
- (?P=shortname)
- (?:_(?P<suffix>[^_]+))?
-)$
-""")
-
-def framework_info(filename):
- """
- A framework name can take one of the following four forms:
- Location/Name.framework/Versions/SomeVersion/Name_Suffix
- Location/Name.framework/Versions/SomeVersion/Name
- Location/Name.framework/Name_Suffix
- Location/Name.framework/Name
-
- returns None if not found, or a mapping equivalent to:
- dict(
- location='Location',
- name='Name.framework/Versions/SomeVersion/Name_Suffix',
- shortname='Name',
- version='SomeVersion',
- suffix='Suffix',
- )
-
- Note that SomeVersion and Suffix are optional and may be None
- if not present
- """
- is_framework = STRICT_FRAMEWORK_RE.match(filename)
- if not is_framework:
- return None
- return is_framework.groupdict()
-
-def test_framework_info():
- def d(location=None, name=None, shortname=None, version=None, suffix=None):
- return dict(
- location=location,
- name=name,
- shortname=shortname,
- version=version,
- suffix=suffix
- )
- assert framework_info('completely/invalid') is None
- assert framework_info('completely/invalid/_debug') is None
- assert framework_info('P/F.framework') is None
- assert framework_info('P/F.framework/_debug') is None
- assert framework_info('P/F.framework/F') == d('P', 'F.framework/F', 'F')
- assert framework_info('P/F.framework/F_debug') == d('P', 'F.framework/F_debug', 'F', suffix='debug')
- assert framework_info('P/F.framework/Versions') is None
- assert framework_info('P/F.framework/Versions/A') is None
- assert framework_info('P/F.framework/Versions/A/F') == d('P', 'F.framework/Versions/A/F', 'F', 'A')
- assert framework_info('P/F.framework/Versions/A/F_debug') == d('P', 'F.framework/Versions/A/F_debug', 'F', 'A', 'debug')
-
-if __name__ == '__main__':
- test_framework_info()
diff --git a/sys/lib/python/ctypes/util.py b/sys/lib/python/ctypes/util.py
deleted file mode 100644
index f7133538b..000000000
--- a/sys/lib/python/ctypes/util.py
+++ /dev/null
@@ -1,154 +0,0 @@
-######################################################################
-# This file should be kept compatible with Python 2.3, see PEP 291. #
-######################################################################
-import sys, os
-
-# find_library(name) returns the pathname of a library, or None.
-if os.name == "nt":
- def find_library(name):
- # See MSDN for the REAL search order.
- for directory in os.environ['PATH'].split(os.pathsep):
- fname = os.path.join(directory, name)
- if os.path.exists(fname):
- return fname
- if fname.lower().endswith(".dll"):
- continue
- fname = fname + ".dll"
- if os.path.exists(fname):
- return fname
- return None
-
-if os.name == "ce":
- # search path according to MSDN:
- # - absolute path specified by filename
- # - The .exe launch directory
- # - the Windows directory
- # - ROM dll files (where are they?)
- # - OEM specified search path: HKLM\Loader\SystemPath
- def find_library(name):
- return name
-
-if os.name == "posix" and sys.platform == "darwin":
- from ctypes.macholib.dyld import dyld_find as _dyld_find
- def find_library(name):
- possible = ['lib%s.dylib' % name,
- '%s.dylib' % name,
- '%s.framework/%s' % (name, name)]
- for name in possible:
- try:
- return _dyld_find(name)
- except ValueError:
- continue
- return None
-
-elif os.name == "posix":
- # Andreas Degert's find functions, using gcc, /sbin/ldconfig, objdump
- import re, tempfile, errno
-
- def _findLib_gcc(name):
- expr = r'[^\(\)\s]*lib%s\.[^\(\)\s]*' % re.escape(name)
- fdout, ccout = tempfile.mkstemp()
- os.close(fdout)
- cmd = 'if type gcc >/dev/null 2>&1; then CC=gcc; else CC=cc; fi;' \
- '$CC -Wl,-t -o ' + ccout + ' 2>&1 -l' + name
- try:
- f = os.popen(cmd)
- trace = f.read()
- f.close()
- finally:
- try:
- os.unlink(ccout)
- except OSError, e:
- if e.errno != errno.ENOENT:
- raise
- res = re.search(expr, trace)
- if not res:
- return None
- return res.group(0)
-
- def _get_soname(f):
- # assuming GNU binutils / ELF
- if not f:
- return None
- cmd = "objdump -p -j .dynamic 2>/dev/null " + f
- res = re.search(r'\sSONAME\s+([^\s]+)', os.popen(cmd).read())
- if not res:
- return None
- return res.group(1)
-
- if (sys.platform.startswith("freebsd")
- or sys.platform.startswith("openbsd")
- or sys.platform.startswith("dragonfly")):
-
- def _num_version(libname):
- # "libxyz.so.MAJOR.MINOR" => [ MAJOR, MINOR ]
- parts = libname.split(".")
- nums = []
- try:
- while parts:
- nums.insert(0, int(parts.pop()))
- except ValueError:
- pass
- return nums or [ sys.maxint ]
-
- def find_library(name):
- ename = re.escape(name)
- expr = r':-l%s\.\S+ => \S*/(lib%s\.\S+)' % (ename, ename)
- res = re.findall(expr,
- os.popen('/sbin/ldconfig -r 2>/dev/null').read())
- if not res:
- return _get_soname(_findLib_gcc(name))
- res.sort(cmp= lambda x,y: cmp(_num_version(x), _num_version(y)))
- return res[-1]
-
- else:
-
- def _findLib_ldconfig(name):
- # XXX assuming GLIBC's ldconfig (with option -p)
- expr = r'/[^\(\)\s]*lib%s\.[^\(\)\s]*' % re.escape(name)
- res = re.search(expr,
- os.popen('/sbin/ldconfig -p 2>/dev/null').read())
- if not res:
- # Hm, this works only for libs needed by the python executable.
- cmd = 'ldd %s 2>/dev/null' % sys.executable
- res = re.search(expr, os.popen(cmd).read())
- if not res:
- return None
- return res.group(0)
-
- def find_library(name):
- return _get_soname(_findLib_ldconfig(name) or _findLib_gcc(name))
-
-################################################################
-# test code
-
-def test():
- from ctypes import cdll
- if os.name == "nt":
- print cdll.msvcrt
- print cdll.load("msvcrt")
- print find_library("msvcrt")
-
- if os.name == "posix":
- # find and load_version
- print find_library("m")
- print find_library("c")
- print find_library("bz2")
-
- # getattr
-## print cdll.m
-## print cdll.bz2
-
- # load
- if sys.platform == "darwin":
- print cdll.LoadLibrary("libm.dylib")
- print cdll.LoadLibrary("libcrypto.dylib")
- print cdll.LoadLibrary("libSystem.dylib")
- print cdll.LoadLibrary("System.framework/System")
- else:
- print cdll.LoadLibrary("libm.so")
- print cdll.LoadLibrary("libcrypt.so")
- print find_library("crypt")
-
-if __name__ == "__main__":
- test()
diff --git a/sys/lib/python/ctypes/wintypes.py b/sys/lib/python/ctypes/wintypes.py
deleted file mode 100644
index 97682331f..000000000
--- a/sys/lib/python/ctypes/wintypes.py
+++ /dev/null
@@ -1,172 +0,0 @@
-######################################################################
-# This file should be kept compatible with Python 2.3, see PEP 291. #
-######################################################################
-
-# The most useful windows datatypes
-from ctypes import *
-
-BYTE = c_byte
-WORD = c_ushort
-DWORD = c_ulong
-
-WCHAR = c_wchar
-UINT = c_uint
-
-DOUBLE = c_double
-
-BOOLEAN = BYTE
-BOOL = c_long
-
-from ctypes import _SimpleCData
-class VARIANT_BOOL(_SimpleCData):
- _type_ = "v"
- def __repr__(self):
- return "%s(%r)" % (self.__class__.__name__, self.value)
-
-ULONG = c_ulong
-LONG = c_long
-
-# in the windows header files, these are structures.
-_LARGE_INTEGER = LARGE_INTEGER = c_longlong
-_ULARGE_INTEGER = ULARGE_INTEGER = c_ulonglong
-
-LPCOLESTR = LPOLESTR = OLESTR = c_wchar_p
-LPCWSTR = LPWSTR = c_wchar_p
-LPCSTR = LPSTR = c_char_p
-
-WPARAM = c_uint
-LPARAM = c_long
-
-ATOM = WORD
-LANGID = WORD
-
-COLORREF = DWORD
-LGRPID = DWORD
-LCTYPE = DWORD
-
-LCID = DWORD
-
-################################################################
-# HANDLE types
-HANDLE = c_ulong # in the header files: void *
-
-HACCEL = HANDLE
-HBITMAP = HANDLE
-HBRUSH = HANDLE
-HCOLORSPACE = HANDLE
-HDC = HANDLE
-HDESK = HANDLE
-HDWP = HANDLE
-HENHMETAFILE = HANDLE
-HFONT = HANDLE
-HGDIOBJ = HANDLE
-HGLOBAL = HANDLE
-HHOOK = HANDLE
-HICON = HANDLE
-HINSTANCE = HANDLE
-HKEY = HANDLE
-HKL = HANDLE
-HLOCAL = HANDLE
-HMENU = HANDLE
-HMETAFILE = HANDLE
-HMODULE = HANDLE
-HMONITOR = HANDLE
-HPALETTE = HANDLE
-HPEN = HANDLE
-HRGN = HANDLE
-HRSRC = HANDLE
-HSTR = HANDLE
-HTASK = HANDLE
-HWINSTA = HANDLE
-HWND = HANDLE
-SC_HANDLE = HANDLE
-SERVICE_STATUS_HANDLE = HANDLE
-
-################################################################
-# Some important structure definitions
-
-class RECT(Structure):
- _fields_ = [("left", c_long),
- ("top", c_long),
- ("right", c_long),
- ("bottom", c_long)]
-tagRECT = _RECTL = RECTL = RECT
-
-class _SMALL_RECT(Structure):
- _fields_ = [('Left', c_short),
- ('Top', c_short),
- ('Right', c_short),
- ('Bottom', c_short)]
-SMALL_RECT = _SMALL_RECT
-
-class _COORD(Structure):
- _fields_ = [('X', c_short),
- ('Y', c_short)]
-
-class POINT(Structure):
- _fields_ = [("x", c_long),
- ("y", c_long)]
-tagPOINT = _POINTL = POINTL = POINT
-
-class SIZE(Structure):
- _fields_ = [("cx", c_long),
- ("cy", c_long)]
-tagSIZE = SIZEL = SIZE
-
-def RGB(red, green, blue):
- return red + (green << 8) + (blue << 16)
-
-class FILETIME(Structure):
- _fields_ = [("dwLowDateTime", DWORD),
- ("dwHighDateTime", DWORD)]
-_FILETIME = FILETIME
-
-class MSG(Structure):
- _fields_ = [("hWnd", HWND),
- ("message", c_uint),
- ("wParam", WPARAM),
- ("lParam", LPARAM),
- ("time", DWORD),
- ("pt", POINT)]
-tagMSG = MSG
-MAX_PATH = 260
-
-class WIN32_FIND_DATAA(Structure):
- _fields_ = [("dwFileAttributes", DWORD),
- ("ftCreationTime", FILETIME),
- ("ftLastAccessTime", FILETIME),
- ("ftLastWriteTime", FILETIME),
- ("nFileSizeHigh", DWORD),
- ("nFileSizeLow", DWORD),
- ("dwReserved0", DWORD),
- ("dwReserved1", DWORD),
- ("cFileName", c_char * MAX_PATH),
- ("cAlternameFileName", c_char * 14)]
-
-class WIN32_FIND_DATAW(Structure):
- _fields_ = [("dwFileAttributes", DWORD),
- ("ftCreationTime", FILETIME),
- ("ftLastAccessTime", FILETIME),
- ("ftLastWriteTime", FILETIME),
- ("nFileSizeHigh", DWORD),
- ("nFileSizeLow", DWORD),
- ("dwReserved0", DWORD),
- ("dwReserved1", DWORD),
- ("cFileName", c_wchar * MAX_PATH),
- ("cAlternameFileName", c_wchar * 14)]
-
-__all__ = ['ATOM', 'BOOL', 'BOOLEAN', 'BYTE', 'COLORREF', 'DOUBLE',
- 'DWORD', 'FILETIME', 'HACCEL', 'HANDLE', 'HBITMAP', 'HBRUSH',
- 'HCOLORSPACE', 'HDC', 'HDESK', 'HDWP', 'HENHMETAFILE', 'HFONT',
- 'HGDIOBJ', 'HGLOBAL', 'HHOOK', 'HICON', 'HINSTANCE', 'HKEY',
- 'HKL', 'HLOCAL', 'HMENU', 'HMETAFILE', 'HMODULE', 'HMONITOR',
- 'HPALETTE', 'HPEN', 'HRGN', 'HRSRC', 'HSTR', 'HTASK', 'HWINSTA',
- 'HWND', 'LANGID', 'LARGE_INTEGER', 'LCID', 'LCTYPE', 'LGRPID',
- 'LONG', 'LPARAM', 'LPCOLESTR', 'LPCSTR', 'LPCWSTR', 'LPOLESTR',
- 'LPSTR', 'LPWSTR', 'MAX_PATH', 'MSG', 'OLESTR', 'POINT',
- 'POINTL', 'RECT', 'RECTL', 'RGB', 'SC_HANDLE',
- 'SERVICE_STATUS_HANDLE', 'SIZE', 'SIZEL', 'SMALL_RECT', 'UINT',
- 'ULARGE_INTEGER', 'ULONG', 'VARIANT_BOOL', 'WCHAR',
- 'WIN32_FIND_DATAA', 'WIN32_FIND_DATAW', 'WORD', 'WPARAM', '_COORD',
- '_FILETIME', '_LARGE_INTEGER', '_POINTL', '_RECTL', '_SMALL_RECT',
- '_ULARGE_INTEGER', 'tagMSG', 'tagPOINT', 'tagRECT', 'tagSIZE']
diff --git a/sys/lib/python/curses/__init__.py b/sys/lib/python/curses/__init__.py
deleted file mode 100644
index 3efc17986..000000000
--- a/sys/lib/python/curses/__init__.py
+++ /dev/null
@@ -1,53 +0,0 @@
-"""curses
-
-The main package for curses support for Python. Normally used by importing
-the package, and perhaps a particular module inside it.
-
- import curses
- from curses import textpad
- curses.initwin()
- ...
-
-"""
-
-__revision__ = "$Id: __init__.py 36560 2004-07-18 06:16:08Z tim_one $"
-
-from _curses import *
-from curses.wrapper import wrapper
-
-# Some constants, most notably the ACS_* ones, are only added to the C
-# _curses module's dictionary after initscr() is called. (Some
-# versions of SGI's curses don't define values for those constants
-# until initscr() has been called.) This wrapper function calls the
-# underlying C initscr(), and then copies the constants from the
-# _curses module to the curses package's dictionary. Don't do 'from
-# curses import *' if you'll be needing the ACS_* constants.
-
-def initscr():
- import _curses, curses
- stdscr = _curses.initscr()
- for key, value in _curses.__dict__.items():
- if key[0:4] == 'ACS_' or key in ('LINES', 'COLS'):
- setattr(curses, key, value)
-
- return stdscr
-
-# This is a similar wrapper for start_color(), which adds the COLORS and
-# COLOR_PAIRS variables which are only available after start_color() is
-# called.
-
-def start_color():
- import _curses, curses
- retval = _curses.start_color()
- if hasattr(_curses, 'COLORS'):
- curses.COLORS = _curses.COLORS
- if hasattr(_curses, 'COLOR_PAIRS'):
- curses.COLOR_PAIRS = _curses.COLOR_PAIRS
- return retval
-
-# Import Python has_key() implementation if _curses doesn't contain has_key()
-
-try:
- has_key
-except NameError:
- from has_key import has_key
diff --git a/sys/lib/python/curses/ascii.py b/sys/lib/python/curses/ascii.py
deleted file mode 100644
index 800fd8b4b..000000000
--- a/sys/lib/python/curses/ascii.py
+++ /dev/null
@@ -1,99 +0,0 @@
-"""Constants and membership tests for ASCII characters"""
-
-NUL = 0x00 # ^@
-SOH = 0x01 # ^A
-STX = 0x02 # ^B
-ETX = 0x03 # ^C
-EOT = 0x04 # ^D
-ENQ = 0x05 # ^E
-ACK = 0x06 # ^F
-BEL = 0x07 # ^G
-BS = 0x08 # ^H
-TAB = 0x09 # ^I
-HT = 0x09 # ^I
-LF = 0x0a # ^J
-NL = 0x0a # ^J
-VT = 0x0b # ^K
-FF = 0x0c # ^L
-CR = 0x0d # ^M
-SO = 0x0e # ^N
-SI = 0x0f # ^O
-DLE = 0x10 # ^P
-DC1 = 0x11 # ^Q
-DC2 = 0x12 # ^R
-DC3 = 0x13 # ^S
-DC4 = 0x14 # ^T
-NAK = 0x15 # ^U
-SYN = 0x16 # ^V
-ETB = 0x17 # ^W
-CAN = 0x18 # ^X
-EM = 0x19 # ^Y
-SUB = 0x1a # ^Z
-ESC = 0x1b # ^[
-FS = 0x1c # ^\
-GS = 0x1d # ^]
-RS = 0x1e # ^^
-US = 0x1f # ^_
-SP = 0x20 # space
-DEL = 0x7f # delete
-
-controlnames = [
-"NUL", "SOH", "STX", "ETX", "EOT", "ENQ", "ACK", "BEL",
-"BS", "HT", "LF", "VT", "FF", "CR", "SO", "SI",
-"DLE", "DC1", "DC2", "DC3", "DC4", "NAK", "SYN", "ETB",
-"CAN", "EM", "SUB", "ESC", "FS", "GS", "RS", "US",
-"SP"
-]
-
-def _ctoi(c):
- if type(c) == type(""):
- return ord(c)
- else:
- return c
-
-def isalnum(c): return isalpha(c) or isdigit(c)
-def isalpha(c): return isupper(c) or islower(c)
-def isascii(c): return _ctoi(c) <= 127 # ?
-def isblank(c): return _ctoi(c) in (8,32)
-def iscntrl(c): return _ctoi(c) <= 31
-def isdigit(c): return _ctoi(c) >= 48 and _ctoi(c) <= 57
-def isgraph(c): return _ctoi(c) >= 33 and _ctoi(c) <= 126
-def islower(c): return _ctoi(c) >= 97 and _ctoi(c) <= 122
-def isprint(c): return _ctoi(c) >= 32 and _ctoi(c) <= 126
-def ispunct(c): return _ctoi(c) != 32 and not isalnum(c)
-def isspace(c): return _ctoi(c) in (9, 10, 11, 12, 13, 32)
-def isupper(c): return _ctoi(c) >= 65 and _ctoi(c) <= 90
-def isxdigit(c): return isdigit(c) or \
- (_ctoi(c) >= 65 and _ctoi(c) <= 70) or (_ctoi(c) >= 97 and _ctoi(c) <= 102)
-def isctrl(c): return _ctoi(c) < 32
-def ismeta(c): return _ctoi(c) > 127
-
-def ascii(c):
- if type(c) == type(""):
- return chr(_ctoi(c) & 0x7f)
- else:
- return _ctoi(c) & 0x7f
-
-def ctrl(c):
- if type(c) == type(""):
- return chr(_ctoi(c) & 0x1f)
- else:
- return _ctoi(c) & 0x1f
-
-def alt(c):
- if type(c) == type(""):
- return chr(_ctoi(c) | 0x80)
- else:
- return _ctoi(c) | 0x80
-
-def unctrl(c):
- bits = _ctoi(c)
- if bits == 0x7f:
- rep = "^?"
- elif isprint(bits & 0x7f):
- rep = chr(bits & 0x7f)
- else:
- rep = "^" + chr(((bits & 0x7f) | 0x20) + 0x20)
- if bits & 0x80:
- return "!" + rep
- return rep
diff --git a/sys/lib/python/curses/has_key.py b/sys/lib/python/curses/has_key.py
deleted file mode 100644
index 60b7be994..000000000
--- a/sys/lib/python/curses/has_key.py
+++ /dev/null
@@ -1,192 +0,0 @@
-
-#
-# Emulation of has_key() function for platforms that don't use ncurses
-#
-
-import _curses
-
-# Table mapping curses keys to the terminfo capability name
-
-_capability_names = {
- _curses.KEY_A1: 'ka1',
- _curses.KEY_A3: 'ka3',
- _curses.KEY_B2: 'kb2',
- _curses.KEY_BACKSPACE: 'kbs',
- _curses.KEY_BEG: 'kbeg',
- _curses.KEY_BTAB: 'kcbt',
- _curses.KEY_C1: 'kc1',
- _curses.KEY_C3: 'kc3',
- _curses.KEY_CANCEL: 'kcan',
- _curses.KEY_CATAB: 'ktbc',
- _curses.KEY_CLEAR: 'kclr',
- _curses.KEY_CLOSE: 'kclo',
- _curses.KEY_COMMAND: 'kcmd',
- _curses.KEY_COPY: 'kcpy',
- _curses.KEY_CREATE: 'kcrt',
- _curses.KEY_CTAB: 'kctab',
- _curses.KEY_DC: 'kdch1',
- _curses.KEY_DL: 'kdl1',
- _curses.KEY_DOWN: 'kcud1',
- _curses.KEY_EIC: 'krmir',
- _curses.KEY_END: 'kend',
- _curses.KEY_ENTER: 'kent',
- _curses.KEY_EOL: 'kel',
- _curses.KEY_EOS: 'ked',
- _curses.KEY_EXIT: 'kext',
- _curses.KEY_F0: 'kf0',
- _curses.KEY_F1: 'kf1',
- _curses.KEY_F10: 'kf10',
- _curses.KEY_F11: 'kf11',
- _curses.KEY_F12: 'kf12',
- _curses.KEY_F13: 'kf13',
- _curses.KEY_F14: 'kf14',
- _curses.KEY_F15: 'kf15',
- _curses.KEY_F16: 'kf16',
- _curses.KEY_F17: 'kf17',
- _curses.KEY_F18: 'kf18',
- _curses.KEY_F19: 'kf19',
- _curses.KEY_F2: 'kf2',
- _curses.KEY_F20: 'kf20',
- _curses.KEY_F21: 'kf21',
- _curses.KEY_F22: 'kf22',
- _curses.KEY_F23: 'kf23',
- _curses.KEY_F24: 'kf24',
- _curses.KEY_F25: 'kf25',
- _curses.KEY_F26: 'kf26',
- _curses.KEY_F27: 'kf27',
- _curses.KEY_F28: 'kf28',
- _curses.KEY_F29: 'kf29',
- _curses.KEY_F3: 'kf3',
- _curses.KEY_F30: 'kf30',
- _curses.KEY_F31: 'kf31',
- _curses.KEY_F32: 'kf32',
- _curses.KEY_F33: 'kf33',
- _curses.KEY_F34: 'kf34',
- _curses.KEY_F35: 'kf35',
- _curses.KEY_F36: 'kf36',
- _curses.KEY_F37: 'kf37',
- _curses.KEY_F38: 'kf38',
- _curses.KEY_F39: 'kf39',
- _curses.KEY_F4: 'kf4',
- _curses.KEY_F40: 'kf40',
- _curses.KEY_F41: 'kf41',
- _curses.KEY_F42: 'kf42',
- _curses.KEY_F43: 'kf43',
- _curses.KEY_F44: 'kf44',
- _curses.KEY_F45: 'kf45',
- _curses.KEY_F46: 'kf46',
- _curses.KEY_F47: 'kf47',
- _curses.KEY_F48: 'kf48',
- _curses.KEY_F49: 'kf49',
- _curses.KEY_F5: 'kf5',
- _curses.KEY_F50: 'kf50',
- _curses.KEY_F51: 'kf51',
- _curses.KEY_F52: 'kf52',
- _curses.KEY_F53: 'kf53',
- _curses.KEY_F54: 'kf54',
- _curses.KEY_F55: 'kf55',
- _curses.KEY_F56: 'kf56',
- _curses.KEY_F57: 'kf57',
- _curses.KEY_F58: 'kf58',
- _curses.KEY_F59: 'kf59',
- _curses.KEY_F6: 'kf6',
- _curses.KEY_F60: 'kf60',
- _curses.KEY_F61: 'kf61',
- _curses.KEY_F62: 'kf62',
- _curses.KEY_F63: 'kf63',
- _curses.KEY_F7: 'kf7',
- _curses.KEY_F8: 'kf8',
- _curses.KEY_F9: 'kf9',
- _curses.KEY_FIND: 'kfnd',
- _curses.KEY_HELP: 'khlp',
- _curses.KEY_HOME: 'khome',
- _curses.KEY_IC: 'kich1',
- _curses.KEY_IL: 'kil1',
- _curses.KEY_LEFT: 'kcub1',
- _curses.KEY_LL: 'kll',
- _curses.KEY_MARK: 'kmrk',
- _curses.KEY_MESSAGE: 'kmsg',
- _curses.KEY_MOVE: 'kmov',
- _curses.KEY_NEXT: 'knxt',
- _curses.KEY_NPAGE: 'knp',
- _curses.KEY_OPEN: 'kopn',
- _curses.KEY_OPTIONS: 'kopt',
- _curses.KEY_PPAGE: 'kpp',
- _curses.KEY_PREVIOUS: 'kprv',
- _curses.KEY_PRINT: 'kprt',
- _curses.KEY_REDO: 'krdo',
- _curses.KEY_REFERENCE: 'kref',
- _curses.KEY_REFRESH: 'krfr',
- _curses.KEY_REPLACE: 'krpl',
- _curses.KEY_RESTART: 'krst',
- _curses.KEY_RESUME: 'kres',
- _curses.KEY_RIGHT: 'kcuf1',
- _curses.KEY_SAVE: 'ksav',
- _curses.KEY_SBEG: 'kBEG',
- _curses.KEY_SCANCEL: 'kCAN',
- _curses.KEY_SCOMMAND: 'kCMD',
- _curses.KEY_SCOPY: 'kCPY',
- _curses.KEY_SCREATE: 'kCRT',
- _curses.KEY_SDC: 'kDC',
- _curses.KEY_SDL: 'kDL',
- _curses.KEY_SELECT: 'kslt',
- _curses.KEY_SEND: 'kEND',
- _curses.KEY_SEOL: 'kEOL',
- _curses.KEY_SEXIT: 'kEXT',
- _curses.KEY_SF: 'kind',
- _curses.KEY_SFIND: 'kFND',
- _curses.KEY_SHELP: 'kHLP',
- _curses.KEY_SHOME: 'kHOM',
- _curses.KEY_SIC: 'kIC',
- _curses.KEY_SLEFT: 'kLFT',
- _curses.KEY_SMESSAGE: 'kMSG',
- _curses.KEY_SMOVE: 'kMOV',
- _curses.KEY_SNEXT: 'kNXT',
- _curses.KEY_SOPTIONS: 'kOPT',
- _curses.KEY_SPREVIOUS: 'kPRV',
- _curses.KEY_SPRINT: 'kPRT',
- _curses.KEY_SR: 'kri',
- _curses.KEY_SREDO: 'kRDO',
- _curses.KEY_SREPLACE: 'kRPL',
- _curses.KEY_SRIGHT: 'kRIT',
- _curses.KEY_SRSUME: 'kRES',
- _curses.KEY_SSAVE: 'kSAV',
- _curses.KEY_SSUSPEND: 'kSPD',
- _curses.KEY_STAB: 'khts',
- _curses.KEY_SUNDO: 'kUND',
- _curses.KEY_SUSPEND: 'kspd',
- _curses.KEY_UNDO: 'kund',
- _curses.KEY_UP: 'kcuu1'
- }
-
-def has_key(ch):
- if isinstance(ch, str):
- ch = ord(ch)
-
- # Figure out the correct capability name for the keycode.
- capability_name = _capability_names.get(ch)
- if capability_name is None:
- return False
-
- #Check the current terminal description for that capability;
- #if present, return true, else return false.
- if _curses.tigetstr( capability_name ):
- return True
- else:
- return False
-
-if __name__ == '__main__':
- # Compare the output of this implementation and the ncurses has_key,
- # on platforms where has_key is already available
- try:
- L = []
- _curses.initscr()
- for key in _capability_names.keys():
- system = _curses.has_key(key)
- python = has_key(key)
- if system != python:
- L.append( 'Mismatch for key %s, system=%i, Python=%i'
- % (_curses.keyname( key ), system, python) )
- finally:
- _curses.endwin()
- for i in L: print i
diff --git a/sys/lib/python/curses/panel.py b/sys/lib/python/curses/panel.py
deleted file mode 100644
index 3497d752c..000000000
--- a/sys/lib/python/curses/panel.py
+++ /dev/null
@@ -1,8 +0,0 @@
-"""curses.panel
-
-Module for using panels with curses.
-"""
-
-__revision__ = "$Id: panel.py 36560 2004-07-18 06:16:08Z tim_one $"
-
-from _curses_panel import *
diff --git a/sys/lib/python/curses/textpad.py b/sys/lib/python/curses/textpad.py
deleted file mode 100644
index 120c5721e..000000000
--- a/sys/lib/python/curses/textpad.py
+++ /dev/null
@@ -1,173 +0,0 @@
-"""Simple textbox editing widget with Emacs-like keybindings."""
-
-import curses, ascii
-
-def rectangle(win, uly, ulx, lry, lrx):
- """Draw a rectangle with corners at the provided upper-left
- and lower-right coordinates.
- """
- win.vline(uly+1, ulx, curses.ACS_VLINE, lry - uly - 1)
- win.hline(uly, ulx+1, curses.ACS_HLINE, lrx - ulx - 1)
- win.hline(lry, ulx+1, curses.ACS_HLINE, lrx - ulx - 1)
- win.vline(uly+1, lrx, curses.ACS_VLINE, lry - uly - 1)
- win.addch(uly, ulx, curses.ACS_ULCORNER)
- win.addch(uly, lrx, curses.ACS_URCORNER)
- win.addch(lry, lrx, curses.ACS_LRCORNER)
- win.addch(lry, ulx, curses.ACS_LLCORNER)
-
-class Textbox:
- """Editing widget using the interior of a window object.
- Supports the following Emacs-like key bindings:
-
- Ctrl-A Go to left edge of window.
- Ctrl-B Cursor left, wrapping to previous line if appropriate.
- Ctrl-D Delete character under cursor.
- Ctrl-E Go to right edge (stripspaces off) or end of line (stripspaces on).
- Ctrl-F Cursor right, wrapping to next line when appropriate.
- Ctrl-G Terminate, returning the window contents.
- Ctrl-H Delete character backward.
- Ctrl-J Terminate if the window is 1 line, otherwise insert newline.
- Ctrl-K If line is blank, delete it, otherwise clear to end of line.
- Ctrl-L Refresh screen.
- Ctrl-N Cursor down; move down one line.
- Ctrl-O Insert a blank line at cursor location.
- Ctrl-P Cursor up; move up one line.
-
- Move operations do nothing if the cursor is at an edge where the movement
- is not possible. The following synonyms are supported where possible:
-
- KEY_LEFT = Ctrl-B, KEY_RIGHT = Ctrl-F, KEY_UP = Ctrl-P, KEY_DOWN = Ctrl-N
- KEY_BACKSPACE = Ctrl-h
- """
- def __init__(self, win):
- self.win = win
- (self.maxy, self.maxx) = win.getmaxyx()
- self.maxy = self.maxy - 1
- self.maxx = self.maxx - 1
- self.stripspaces = 1
- self.lastcmd = None
- win.keypad(1)
-
- def _end_of_line(self, y):
- "Go to the location of the first blank on the given line."
- last = self.maxx
- while 1:
- if ascii.ascii(self.win.inch(y, last)) != ascii.SP:
- last = min(self.maxx, last+1)
- break
- elif last == 0:
- break
- last = last - 1
- return last
-
- def do_command(self, ch):
- "Process a single editing command."
- (y, x) = self.win.getyx()
- self.lastcmd = ch
- if ascii.isprint(ch):
- if y < self.maxy or x < self.maxx:
- # The try-catch ignores the error we trigger from some curses
- # versions by trying to write into the lowest-rightmost spot
- # in the window.
- try:
- self.win.addch(ch)
- except curses.error:
- pass
- elif ch == ascii.SOH: # ^a
- self.win.move(y, 0)
- elif ch in (ascii.STX,curses.KEY_LEFT, ascii.BS,curses.KEY_BACKSPACE):
- if x > 0:
- self.win.move(y, x-1)
- elif y == 0:
- pass
- elif self.stripspaces:
- self.win.move(y-1, self._end_of_line(y-1))
- else:
- self.win.move(y-1, self.maxx)
- if ch in (ascii.BS, curses.KEY_BACKSPACE):
- self.win.delch()
- elif ch == ascii.EOT: # ^d
- self.win.delch()
- elif ch == ascii.ENQ: # ^e
- if self.stripspaces:
- self.win.move(y, self._end_of_line(y))
- else:
- self.win.move(y, self.maxx)
- elif ch in (ascii.ACK, curses.KEY_RIGHT): # ^f
- if x < self.maxx:
- self.win.move(y, x+1)
- elif y == self.maxy:
- pass
- else:
- self.win.move(y+1, 0)
- elif ch == ascii.BEL: # ^g
- return 0
- elif ch == ascii.NL: # ^j
- if self.maxy == 0:
- return 0
- elif y < self.maxy:
- self.win.move(y+1, 0)
- elif ch == ascii.VT: # ^k
- if x == 0 and self._end_of_line(y) == 0:
- self.win.deleteln()
- else:
- # first undo the effect of self._end_of_line
- self.win.move(y, x)
- self.win.clrtoeol()
- elif ch == ascii.FF: # ^l
- self.win.refresh()
- elif ch in (ascii.SO, curses.KEY_DOWN): # ^n
- if y < self.maxy:
- self.win.move(y+1, x)
- if x > self._end_of_line(y+1):
- self.win.move(y+1, self._end_of_line(y+1))
- elif ch == ascii.SI: # ^o
- self.win.insertln()
- elif ch in (ascii.DLE, curses.KEY_UP): # ^p
- if y > 0:
- self.win.move(y-1, x)
- if x > self._end_of_line(y-1):
- self.win.move(y-1, self._end_of_line(y-1))
- return 1
-
- def gather(self):
- "Collect and return the contents of the window."
- result = ""
- for y in range(self.maxy+1):
- self.win.move(y, 0)
- stop = self._end_of_line(y)
- if stop == 0 and self.stripspaces:
- continue
- for x in range(self.maxx+1):
- if self.stripspaces and x == stop:
- break
- result = result + chr(ascii.ascii(self.win.inch(y, x)))
- if self.maxy > 0:
- result = result + "\n"
- return result
-
- def edit(self, validate=None):
- "Edit in the widget window and collect the results."
- while 1:
- ch = self.win.getch()
- if validate:
- ch = validate(ch)
- if not ch:
- continue
- if not self.do_command(ch):
- break
- self.win.refresh()
- return self.gather()
-
-if __name__ == '__main__':
- def test_editbox(stdscr):
- ncols, nlines = 9, 4
- uly, ulx = 15, 20
- stdscr.addstr(uly-2, ulx, "Use Ctrl-G to end editing.")
- win = curses.newwin(nlines, ncols, uly, ulx)
- rectangle(stdscr, uly-1, ulx-1, uly + nlines, ulx + ncols)
- stdscr.refresh()
- return Textbox(win).edit()
-
- str = curses.wrapper(test_editbox)
- print 'Contents of text box:', repr(str)
diff --git a/sys/lib/python/curses/wrapper.py b/sys/lib/python/curses/wrapper.py
deleted file mode 100644
index 53352041e..000000000
--- a/sys/lib/python/curses/wrapper.py
+++ /dev/null
@@ -1,50 +0,0 @@
-"""curses.wrapper
-
-Contains one function, wrapper(), which runs another function which
-should be the rest of your curses-based application. If the
-application raises an exception, wrapper() will restore the terminal
-to a sane state so you can read the resulting traceback.
-
-"""
-
-import sys, curses
-
-def wrapper(func, *args, **kwds):
- """Wrapper function that initializes curses and calls another function,
- restoring normal keyboard/screen behavior on error.
- The callable object 'func' is then passed the main window 'stdscr'
- as its first argument, followed by any other arguments passed to
- wrapper().
- """
-
- res = None
- try:
- # Initialize curses
- stdscr=curses.initscr()
-
- # Turn off echoing of keys, and enter cbreak mode,
- # where no buffering is performed on keyboard input
- curses.noecho()
- curses.cbreak()
-
- # In keypad mode, escape sequences for special keys
- # (like the cursor keys) will be interpreted and
- # a special value like curses.KEY_LEFT will be returned
- stdscr.keypad(1)
-
- # Start color, too. Harmless if the terminal doesn't have
- # color; user can test with has_color() later on. The try/catch
- # works around a minor bit of over-conscientiousness in the curses
- # module -- the error return from C start_color() is ignorable.
- try:
- curses.start_color()
- except:
- pass
-
- return func(stdscr, *args, **kwds)
- finally:
- # Set everything back to normal
- stdscr.keypad(0)
- curses.echo()
- curses.nocbreak()
- curses.endwin()
diff --git a/sys/lib/python/dbhash.py b/sys/lib/python/dbhash.py
deleted file mode 100644
index 9f8a9c3f2..000000000
--- a/sys/lib/python/dbhash.py
+++ /dev/null
@@ -1,16 +0,0 @@
-"""Provide a (g)dbm-compatible interface to bsddb.hashopen."""
-
-import sys
-try:
- import bsddb
-except ImportError:
- # prevent a second import of this module from spuriously succeeding
- del sys.modules[__name__]
- raise
-
-__all__ = ["error","open"]
-
-error = bsddb.error # Exported for anydbm
-
-def open(file, flag = 'r', mode=0666):
- return bsddb.hashopen(file, flag, mode)
diff --git a/sys/lib/python/decimal.py b/sys/lib/python/decimal.py
deleted file mode 100644
index 2b9bc75a3..000000000
--- a/sys/lib/python/decimal.py
+++ /dev/null
@@ -1,3137 +0,0 @@
-# Copyright (c) 2004 Python Software Foundation.
-# All rights reserved.
-
-# Written by Eric Price <eprice at tjhsst.edu>
-# and Facundo Batista <facundo at taniquetil.com.ar>
-# and Raymond Hettinger <python at rcn.com>
-# and Aahz <aahz at pobox.com>
-# and Tim Peters
-
-# This module is currently Py2.3 compatible and should be kept that way
-# unless a major compelling advantage arises. IOW, 2.3 compatibility is
-# strongly preferred, but not guaranteed.
-
-# Also, this module should be kept in sync with the latest updates of
-# the IBM specification as it evolves. Those updates will be treated
-# as bug fixes (deviation from the spec is a compatibility, usability
-# bug) and will be backported. At this point the spec is stabilizing
-# and the updates are becoming fewer, smaller, and less significant.
-
-"""
-This is a Py2.3 implementation of decimal floating point arithmetic based on
-the General Decimal Arithmetic Specification:
-
- www2.hursley.ibm.com/decimal/decarith.html
-
-and IEEE standard 854-1987:
-
- www.cs.berkeley.edu/~ejr/projects/754/private/drafts/854-1987/dir.html
-
-Decimal floating point has finite precision with arbitrarily large bounds.
-
-The purpose of the module is to support arithmetic using familiar
-"schoolhouse" rules and to avoid the some of tricky representation
-issues associated with binary floating point. The package is especially
-useful for financial applications or for contexts where users have
-expectations that are at odds with binary floating point (for instance,
-in binary floating point, 1.00 % 0.1 gives 0.09999999999999995 instead
-of the expected Decimal("0.00") returned by decimal floating point).
-
-Here are some examples of using the decimal module:
-
->>> from decimal import *
->>> setcontext(ExtendedContext)
->>> Decimal(0)
-Decimal("0")
->>> Decimal("1")
-Decimal("1")
->>> Decimal("-.0123")
-Decimal("-0.0123")
->>> Decimal(123456)
-Decimal("123456")
->>> Decimal("123.45e12345678901234567890")
-Decimal("1.2345E+12345678901234567892")
->>> Decimal("1.33") + Decimal("1.27")
-Decimal("2.60")
->>> Decimal("12.34") + Decimal("3.87") - Decimal("18.41")
-Decimal("-2.20")
->>> dig = Decimal(1)
->>> print dig / Decimal(3)
-0.333333333
->>> getcontext().prec = 18
->>> print dig / Decimal(3)
-0.333333333333333333
->>> print dig.sqrt()
-1
->>> print Decimal(3).sqrt()
-1.73205080756887729
->>> print Decimal(3) ** 123
-4.85192780976896427E+58
->>> inf = Decimal(1) / Decimal(0)
->>> print inf
-Infinity
->>> neginf = Decimal(-1) / Decimal(0)
->>> print neginf
--Infinity
->>> print neginf + inf
-NaN
->>> print neginf * inf
--Infinity
->>> print dig / 0
-Infinity
->>> getcontext().traps[DivisionByZero] = 1
->>> print dig / 0
-Traceback (most recent call last):
- ...
- ...
- ...
-DivisionByZero: x / 0
->>> c = Context()
->>> c.traps[InvalidOperation] = 0
->>> print c.flags[InvalidOperation]
-0
->>> c.divide(Decimal(0), Decimal(0))
-Decimal("NaN")
->>> c.traps[InvalidOperation] = 1
->>> print c.flags[InvalidOperation]
-1
->>> c.flags[InvalidOperation] = 0
->>> print c.flags[InvalidOperation]
-0
->>> print c.divide(Decimal(0), Decimal(0))
-Traceback (most recent call last):
- ...
- ...
- ...
-InvalidOperation: 0 / 0
->>> print c.flags[InvalidOperation]
-1
->>> c.flags[InvalidOperation] = 0
->>> c.traps[InvalidOperation] = 0
->>> print c.divide(Decimal(0), Decimal(0))
-NaN
->>> print c.flags[InvalidOperation]
-1
->>>
-"""
-
-__all__ = [
- # Two major classes
- 'Decimal', 'Context',
-
- # Contexts
- 'DefaultContext', 'BasicContext', 'ExtendedContext',
-
- # Exceptions
- 'DecimalException', 'Clamped', 'InvalidOperation', 'DivisionByZero',
- 'Inexact', 'Rounded', 'Subnormal', 'Overflow', 'Underflow',
-
- # Constants for use in setting up contexts
- 'ROUND_DOWN', 'ROUND_HALF_UP', 'ROUND_HALF_EVEN', 'ROUND_CEILING',
- 'ROUND_FLOOR', 'ROUND_UP', 'ROUND_HALF_DOWN',
-
- # Functions for manipulating contexts
- 'setcontext', 'getcontext', 'localcontext'
-]
-
-import copy as _copy
-
-#Rounding
-ROUND_DOWN = 'ROUND_DOWN'
-ROUND_HALF_UP = 'ROUND_HALF_UP'
-ROUND_HALF_EVEN = 'ROUND_HALF_EVEN'
-ROUND_CEILING = 'ROUND_CEILING'
-ROUND_FLOOR = 'ROUND_FLOOR'
-ROUND_UP = 'ROUND_UP'
-ROUND_HALF_DOWN = 'ROUND_HALF_DOWN'
-
-#Rounding decision (not part of the public API)
-NEVER_ROUND = 'NEVER_ROUND' # Round in division (non-divmod), sqrt ONLY
-ALWAYS_ROUND = 'ALWAYS_ROUND' # Every operation rounds at end.
-
-#Errors
-
-class DecimalException(ArithmeticError):
- """Base exception class.
-
- Used exceptions derive from this.
- If an exception derives from another exception besides this (such as
- Underflow (Inexact, Rounded, Subnormal) that indicates that it is only
- called if the others are present. This isn't actually used for
- anything, though.
-
- handle -- Called when context._raise_error is called and the
- trap_enabler is set. First argument is self, second is the
- context. More arguments can be given, those being after
- the explanation in _raise_error (For example,
- context._raise_error(NewError, '(-x)!', self._sign) would
- call NewError().handle(context, self._sign).)
-
- To define a new exception, it should be sufficient to have it derive
- from DecimalException.
- """
- def handle(self, context, *args):
- pass
-
-
-class Clamped(DecimalException):
- """Exponent of a 0 changed to fit bounds.
-
- This occurs and signals clamped if the exponent of a result has been
- altered in order to fit the constraints of a specific concrete
- representation. This may occur when the exponent of a zero result would
- be outside the bounds of a representation, or when a large normal
- number would have an encoded exponent that cannot be represented. In
- this latter case, the exponent is reduced to fit and the corresponding
- number of zero digits are appended to the coefficient ("fold-down").
- """
-
-
-class InvalidOperation(DecimalException):
- """An invalid operation was performed.
-
- Various bad things cause this:
-
- Something creates a signaling NaN
- -INF + INF
- 0 * (+-)INF
- (+-)INF / (+-)INF
- x % 0
- (+-)INF % x
- x._rescale( non-integer )
- sqrt(-x) , x > 0
- 0 ** 0
- x ** (non-integer)
- x ** (+-)INF
- An operand is invalid
- """
- def handle(self, context, *args):
- if args:
- if args[0] == 1: #sNaN, must drop 's' but keep diagnostics
- return Decimal( (args[1]._sign, args[1]._int, 'n') )
- return NaN
-
-class ConversionSyntax(InvalidOperation):
- """Trying to convert badly formed string.
-
- This occurs and signals invalid-operation if an string is being
- converted to a number and it does not conform to the numeric string
- syntax. The result is [0,qNaN].
- """
-
- def handle(self, context, *args):
- return (0, (0,), 'n') #Passed to something which uses a tuple.
-
-class DivisionByZero(DecimalException, ZeroDivisionError):
- """Division by 0.
-
- This occurs and signals division-by-zero if division of a finite number
- by zero was attempted (during a divide-integer or divide operation, or a
- power operation with negative right-hand operand), and the dividend was
- not zero.
-
- The result of the operation is [sign,inf], where sign is the exclusive
- or of the signs of the operands for divide, or is 1 for an odd power of
- -0, for power.
- """
-
- def handle(self, context, sign, double = None, *args):
- if double is not None:
- return (Infsign[sign],)*2
- return Infsign[sign]
-
-class DivisionImpossible(InvalidOperation):
- """Cannot perform the division adequately.
-
- This occurs and signals invalid-operation if the integer result of a
- divide-integer or remainder operation had too many digits (would be
- longer than precision). The result is [0,qNaN].
- """
-
- def handle(self, context, *args):
- return (NaN, NaN)
-
-class DivisionUndefined(InvalidOperation, ZeroDivisionError):
- """Undefined result of division.
-
- This occurs and signals invalid-operation if division by zero was
- attempted (during a divide-integer, divide, or remainder operation), and
- the dividend is also zero. The result is [0,qNaN].
- """
-
- def handle(self, context, tup=None, *args):
- if tup is not None:
- return (NaN, NaN) #for 0 %0, 0 // 0
- return NaN
-
-class Inexact(DecimalException):
- """Had to round, losing information.
-
- This occurs and signals inexact whenever the result of an operation is
- not exact (that is, it needed to be rounded and any discarded digits
- were non-zero), or if an overflow or underflow condition occurs. The
- result in all cases is unchanged.
-
- The inexact signal may be tested (or trapped) to determine if a given
- operation (or sequence of operations) was inexact.
- """
- pass
-
-class InvalidContext(InvalidOperation):
- """Invalid context. Unknown rounding, for example.
-
- This occurs and signals invalid-operation if an invalid context was
- detected during an operation. This can occur if contexts are not checked
- on creation and either the precision exceeds the capability of the
- underlying concrete representation or an unknown or unsupported rounding
- was specified. These aspects of the context need only be checked when
- the values are required to be used. The result is [0,qNaN].
- """
-
- def handle(self, context, *args):
- return NaN
-
-class Rounded(DecimalException):
- """Number got rounded (not necessarily changed during rounding).
-
- This occurs and signals rounded whenever the result of an operation is
- rounded (that is, some zero or non-zero digits were discarded from the
- coefficient), or if an overflow or underflow condition occurs. The
- result in all cases is unchanged.
-
- The rounded signal may be tested (or trapped) to determine if a given
- operation (or sequence of operations) caused a loss of precision.
- """
- pass
-
-class Subnormal(DecimalException):
- """Exponent < Emin before rounding.
-
- This occurs and signals subnormal whenever the result of a conversion or
- operation is subnormal (that is, its adjusted exponent is less than
- Emin, before any rounding). The result in all cases is unchanged.
-
- The subnormal signal may be tested (or trapped) to determine if a given
- or operation (or sequence of operations) yielded a subnormal result.
- """
- pass
-
-class Overflow(Inexact, Rounded):
- """Numerical overflow.
-
- This occurs and signals overflow if the adjusted exponent of a result
- (from a conversion or from an operation that is not an attempt to divide
- by zero), after rounding, would be greater than the largest value that
- can be handled by the implementation (the value Emax).
-
- The result depends on the rounding mode:
-
- For round-half-up and round-half-even (and for round-half-down and
- round-up, if implemented), the result of the operation is [sign,inf],
- where sign is the sign of the intermediate result. For round-down, the
- result is the largest finite number that can be represented in the
- current precision, with the sign of the intermediate result. For
- round-ceiling, the result is the same as for round-down if the sign of
- the intermediate result is 1, or is [0,inf] otherwise. For round-floor,
- the result is the same as for round-down if the sign of the intermediate
- result is 0, or is [1,inf] otherwise. In all cases, Inexact and Rounded
- will also be raised.
- """
-
- def handle(self, context, sign, *args):
- if context.rounding in (ROUND_HALF_UP, ROUND_HALF_EVEN,
- ROUND_HALF_DOWN, ROUND_UP):
- return Infsign[sign]
- if sign == 0:
- if context.rounding == ROUND_CEILING:
- return Infsign[sign]
- return Decimal((sign, (9,)*context.prec,
- context.Emax-context.prec+1))
- if sign == 1:
- if context.rounding == ROUND_FLOOR:
- return Infsign[sign]
- return Decimal( (sign, (9,)*context.prec,
- context.Emax-context.prec+1))
-
-
-class Underflow(Inexact, Rounded, Subnormal):
- """Numerical underflow with result rounded to 0.
-
- This occurs and signals underflow if a result is inexact and the
- adjusted exponent of the result would be smaller (more negative) than
- the smallest value that can be handled by the implementation (the value
- Emin). That is, the result is both inexact and subnormal.
-
- The result after an underflow will be a subnormal number rounded, if
- necessary, so that its exponent is not less than Etiny. This may result
- in 0 with the sign of the intermediate result and an exponent of Etiny.
-
- In all cases, Inexact, Rounded, and Subnormal will also be raised.
- """
-
-# List of public traps and flags
-_signals = [Clamped, DivisionByZero, Inexact, Overflow, Rounded,
- Underflow, InvalidOperation, Subnormal]
-
-# Map conditions (per the spec) to signals
-_condition_map = {ConversionSyntax:InvalidOperation,
- DivisionImpossible:InvalidOperation,
- DivisionUndefined:InvalidOperation,
- InvalidContext:InvalidOperation}
-
-##### Context Functions #######################################
-
-# The getcontext() and setcontext() function manage access to a thread-local
-# current context. Py2.4 offers direct support for thread locals. If that
-# is not available, use threading.currentThread() which is slower but will
-# work for older Pythons. If threads are not part of the build, create a
-# mock threading object with threading.local() returning the module namespace.
-
-try:
- import threading
-except ImportError:
- # Python was compiled without threads; create a mock object instead
- import sys
- class MockThreading:
- def local(self, sys=sys):
- return sys.modules[__name__]
- threading = MockThreading()
- del sys, MockThreading
-
-try:
- threading.local
-
-except AttributeError:
-
- #To fix reloading, force it to create a new context
- #Old contexts have different exceptions in their dicts, making problems.
- if hasattr(threading.currentThread(), '__decimal_context__'):
- del threading.currentThread().__decimal_context__
-
- def setcontext(context):
- """Set this thread's context to context."""
- if context in (DefaultContext, BasicContext, ExtendedContext):
- context = context.copy()
- context.clear_flags()
- threading.currentThread().__decimal_context__ = context
-
- def getcontext():
- """Returns this thread's context.
-
- If this thread does not yet have a context, returns
- a new context and sets this thread's context.
- New contexts are copies of DefaultContext.
- """
- try:
- return threading.currentThread().__decimal_context__
- except AttributeError:
- context = Context()
- threading.currentThread().__decimal_context__ = context
- return context
-
-else:
-
- local = threading.local()
- if hasattr(local, '__decimal_context__'):
- del local.__decimal_context__
-
- def getcontext(_local=local):
- """Returns this thread's context.
-
- If this thread does not yet have a context, returns
- a new context and sets this thread's context.
- New contexts are copies of DefaultContext.
- """
- try:
- return _local.__decimal_context__
- except AttributeError:
- context = Context()
- _local.__decimal_context__ = context
- return context
-
- def setcontext(context, _local=local):
- """Set this thread's context to context."""
- if context in (DefaultContext, BasicContext, ExtendedContext):
- context = context.copy()
- context.clear_flags()
- _local.__decimal_context__ = context
-
- del threading, local # Don't contaminate the namespace
-
-def localcontext(ctx=None):
- """Return a context manager for a copy of the supplied context
-
- Uses a copy of the current context if no context is specified
- The returned context manager creates a local decimal context
- in a with statement:
- def sin(x):
- with localcontext() as ctx:
- ctx.prec += 2
- # Rest of sin calculation algorithm
- # uses a precision 2 greater than normal
- return +s # Convert result to normal precision
-
- def sin(x):
- with localcontext(ExtendedContext):
- # Rest of sin calculation algorithm
- # uses the Extended Context from the
- # General Decimal Arithmetic Specification
- return +s # Convert result to normal context
-
- """
- # The string below can't be included in the docstring until Python 2.6
- # as the doctest module doesn't understand __future__ statements
- """
- >>> from __future__ import with_statement
- >>> print getcontext().prec
- 28
- >>> with localcontext():
- ... ctx = getcontext()
- ... ctx.prec += 2
- ... print ctx.prec
- ...
- 30
- >>> with localcontext(ExtendedContext):
- ... print getcontext().prec
- ...
- 9
- >>> print getcontext().prec
- 28
- """
- if ctx is None: ctx = getcontext()
- return _ContextManager(ctx)
-
-
-##### Decimal class ###########################################
-
-class Decimal(object):
- """Floating point class for decimal arithmetic."""
-
- __slots__ = ('_exp','_int','_sign', '_is_special')
- # Generally, the value of the Decimal instance is given by
- # (-1)**_sign * _int * 10**_exp
- # Special values are signified by _is_special == True
-
- # We're immutable, so use __new__ not __init__
- def __new__(cls, value="0", context=None):
- """Create a decimal point instance.
-
- >>> Decimal('3.14') # string input
- Decimal("3.14")
- >>> Decimal((0, (3, 1, 4), -2)) # tuple input (sign, digit_tuple, exponent)
- Decimal("3.14")
- >>> Decimal(314) # int or long
- Decimal("314")
- >>> Decimal(Decimal(314)) # another decimal instance
- Decimal("314")
- """
-
- self = object.__new__(cls)
- self._is_special = False
-
- # From an internal working value
- if isinstance(value, _WorkRep):
- self._sign = value.sign
- self._int = tuple(map(int, str(value.int)))
- self._exp = int(value.exp)
- return self
-
- # From another decimal
- if isinstance(value, Decimal):
- self._exp = value._exp
- self._sign = value._sign
- self._int = value._int
- self._is_special = value._is_special
- return self
-
- # From an integer
- if isinstance(value, (int,long)):
- if value >= 0:
- self._sign = 0
- else:
- self._sign = 1
- self._exp = 0
- self._int = tuple(map(int, str(abs(value))))
- return self
-
- # tuple/list conversion (possibly from as_tuple())
- if isinstance(value, (list,tuple)):
- if len(value) != 3:
- raise ValueError, 'Invalid arguments'
- if value[0] not in (0,1):
- raise ValueError, 'Invalid sign'
- for digit in value[1]:
- if not isinstance(digit, (int,long)) or digit < 0:
- raise ValueError, "The second value in the tuple must be composed of non negative integer elements."
-
- self._sign = value[0]
- self._int = tuple(value[1])
- if value[2] in ('F','n','N'):
- self._exp = value[2]
- self._is_special = True
- else:
- self._exp = int(value[2])
- return self
-
- if isinstance(value, float):
- raise TypeError("Cannot convert float to Decimal. " +
- "First convert the float to a string")
-
- # Other argument types may require the context during interpretation
- if context is None:
- context = getcontext()
-
- # From a string
- # REs insist on real strings, so we can too.
- if isinstance(value, basestring):
- if _isinfinity(value):
- self._exp = 'F'
- self._int = (0,)
- self._is_special = True
- if _isinfinity(value) == 1:
- self._sign = 0
- else:
- self._sign = 1
- return self
- if _isnan(value):
- sig, sign, diag = _isnan(value)
- self._is_special = True
- if len(diag) > context.prec: #Diagnostic info too long
- self._sign, self._int, self._exp = \
- context._raise_error(ConversionSyntax)
- return self
- if sig == 1:
- self._exp = 'n' #qNaN
- else: #sig == 2
- self._exp = 'N' #sNaN
- self._sign = sign
- self._int = tuple(map(int, diag)) #Diagnostic info
- return self
- try:
- self._sign, self._int, self._exp = _string2exact(value)
- except ValueError:
- self._is_special = True
- self._sign, self._int, self._exp = context._raise_error(ConversionSyntax)
- return self
-
- raise TypeError("Cannot convert %r to Decimal" % value)
-
- def _isnan(self):
- """Returns whether the number is not actually one.
-
- 0 if a number
- 1 if NaN
- 2 if sNaN
- """
- if self._is_special:
- exp = self._exp
- if exp == 'n':
- return 1
- elif exp == 'N':
- return 2
- return 0
-
- def _isinfinity(self):
- """Returns whether the number is infinite
-
- 0 if finite or not a number
- 1 if +INF
- -1 if -INF
- """
- if self._exp == 'F':
- if self._sign:
- return -1
- return 1
- return 0
-
- def _check_nans(self, other = None, context=None):
- """Returns whether the number is not actually one.
-
- if self, other are sNaN, signal
- if self, other are NaN return nan
- return 0
-
- Done before operations.
- """
-
- self_is_nan = self._isnan()
- if other is None:
- other_is_nan = False
- else:
- other_is_nan = other._isnan()
-
- if self_is_nan or other_is_nan:
- if context is None:
- context = getcontext()
-
- if self_is_nan == 2:
- return context._raise_error(InvalidOperation, 'sNaN',
- 1, self)
- if other_is_nan == 2:
- return context._raise_error(InvalidOperation, 'sNaN',
- 1, other)
- if self_is_nan:
- return self
-
- return other
- return 0
-
- def __nonzero__(self):
- """Is the number non-zero?
-
- 0 if self == 0
- 1 if self != 0
- """
- if self._is_special:
- return 1
- return sum(self._int) != 0
-
- def __cmp__(self, other, context=None):
- other = _convert_other(other)
- if other is NotImplemented:
- return other
-
- if self._is_special or other._is_special:
- ans = self._check_nans(other, context)
- if ans:
- return 1 # Comparison involving NaN's always reports self > other
-
- # INF = INF
- return cmp(self._isinfinity(), other._isinfinity())
-
- if not self and not other:
- return 0 #If both 0, sign comparison isn't certain.
-
- #If different signs, neg one is less
- if other._sign < self._sign:
- return -1
- if self._sign < other._sign:
- return 1
-
- self_adjusted = self.adjusted()
- other_adjusted = other.adjusted()
- if self_adjusted == other_adjusted and \
- self._int + (0,)*(self._exp - other._exp) == \
- other._int + (0,)*(other._exp - self._exp):
- return 0 #equal, except in precision. ([0]*(-x) = [])
- elif self_adjusted > other_adjusted and self._int[0] != 0:
- return (-1)**self._sign
- elif self_adjusted < other_adjusted and other._int[0] != 0:
- return -((-1)**self._sign)
-
- # Need to round, so make sure we have a valid context
- if context is None:
- context = getcontext()
-
- context = context._shallow_copy()
- rounding = context._set_rounding(ROUND_UP) #round away from 0
-
- flags = context._ignore_all_flags()
- res = self.__sub__(other, context=context)
-
- context._regard_flags(*flags)
-
- context.rounding = rounding
-
- if not res:
- return 0
- elif res._sign:
- return -1
- return 1
-
- def __eq__(self, other):
- if not isinstance(other, (Decimal, int, long)):
- return NotImplemented
- return self.__cmp__(other) == 0
-
- def __ne__(self, other):
- if not isinstance(other, (Decimal, int, long)):
- return NotImplemented
- return self.__cmp__(other) != 0
-
- def compare(self, other, context=None):
- """Compares one to another.
-
- -1 => a < b
- 0 => a = b
- 1 => a > b
- NaN => one is NaN
- Like __cmp__, but returns Decimal instances.
- """
- other = _convert_other(other)
- if other is NotImplemented:
- return other
-
- #compare(NaN, NaN) = NaN
- if (self._is_special or other and other._is_special):
- ans = self._check_nans(other, context)
- if ans:
- return ans
-
- return Decimal(self.__cmp__(other, context))
-
- def __hash__(self):
- """x.__hash__() <==> hash(x)"""
- # Decimal integers must hash the same as the ints
- # Non-integer decimals are normalized and hashed as strings
- # Normalization assures that hash(100E-1) == hash(10)
- if self._is_special:
- if self._isnan():
- raise TypeError('Cannot hash a NaN value.')
- return hash(str(self))
- i = int(self)
- if self == Decimal(i):
- return hash(i)
- assert self.__nonzero__() # '-0' handled by integer case
- return hash(str(self.normalize()))
-
- def as_tuple(self):
- """Represents the number as a triple tuple.
-
- To show the internals exactly as they are.
- """
- return (self._sign, self._int, self._exp)
-
- def __repr__(self):
- """Represents the number as an instance of Decimal."""
- # Invariant: eval(repr(d)) == d
- return 'Decimal("%s")' % str(self)
-
- def __str__(self, eng = 0, context=None):
- """Return string representation of the number in scientific notation.
-
- Captures all of the information in the underlying representation.
- """
-
- if self._is_special:
- if self._isnan():
- minus = '-'*self._sign
- if self._int == (0,):
- info = ''
- else:
- info = ''.join(map(str, self._int))
- if self._isnan() == 2:
- return minus + 'sNaN' + info
- return minus + 'NaN' + info
- if self._isinfinity():
- minus = '-'*self._sign
- return minus + 'Infinity'
-
- if context is None:
- context = getcontext()
-
- tmp = map(str, self._int)
- numdigits = len(self._int)
- leftdigits = self._exp + numdigits
- if eng and not self: #self = 0eX wants 0[.0[0]]eY, not [[0]0]0eY
- if self._exp < 0 and self._exp >= -6: #short, no need for e/E
- s = '-'*self._sign + '0.' + '0'*(abs(self._exp))
- return s
- #exp is closest mult. of 3 >= self._exp
- exp = ((self._exp - 1)// 3 + 1) * 3
- if exp != self._exp:
- s = '0.'+'0'*(exp - self._exp)
- else:
- s = '0'
- if exp != 0:
- if context.capitals:
- s += 'E'
- else:
- s += 'e'
- if exp > 0:
- s += '+' #0.0e+3, not 0.0e3
- s += str(exp)
- s = '-'*self._sign + s
- return s
- if eng:
- dotplace = (leftdigits-1)%3+1
- adjexp = leftdigits -1 - (leftdigits-1)%3
- else:
- adjexp = leftdigits-1
- dotplace = 1
- if self._exp == 0:
- pass
- elif self._exp < 0 and adjexp >= 0:
- tmp.insert(leftdigits, '.')
- elif self._exp < 0 and adjexp >= -6:
- tmp[0:0] = ['0'] * int(-leftdigits)
- tmp.insert(0, '0.')
- else:
- if numdigits > dotplace:
- tmp.insert(dotplace, '.')
- elif numdigits < dotplace:
- tmp.extend(['0']*(dotplace-numdigits))
- if adjexp:
- if not context.capitals:
- tmp.append('e')
- else:
- tmp.append('E')
- if adjexp > 0:
- tmp.append('+')
- tmp.append(str(adjexp))
- if eng:
- while tmp[0:1] == ['0']:
- tmp[0:1] = []
- if len(tmp) == 0 or tmp[0] == '.' or tmp[0].lower() == 'e':
- tmp[0:0] = ['0']
- if self._sign:
- tmp.insert(0, '-')
-
- return ''.join(tmp)
-
- def to_eng_string(self, context=None):
- """Convert to engineering-type string.
-
- Engineering notation has an exponent which is a multiple of 3, so there
- are up to 3 digits left of the decimal place.
-
- Same rules for when in exponential and when as a value as in __str__.
- """
- return self.__str__(eng=1, context=context)
-
- def __neg__(self, context=None):
- """Returns a copy with the sign switched.
-
- Rounds, if it has reason.
- """
- if self._is_special:
- ans = self._check_nans(context=context)
- if ans:
- return ans
-
- if not self:
- # -Decimal('0') is Decimal('0'), not Decimal('-0')
- sign = 0
- elif self._sign:
- sign = 0
- else:
- sign = 1
-
- if context is None:
- context = getcontext()
- if context._rounding_decision == ALWAYS_ROUND:
- return Decimal((sign, self._int, self._exp))._fix(context)
- return Decimal( (sign, self._int, self._exp))
-
- def __pos__(self, context=None):
- """Returns a copy, unless it is a sNaN.
-
- Rounds the number (if more then precision digits)
- """
- if self._is_special:
- ans = self._check_nans(context=context)
- if ans:
- return ans
-
- sign = self._sign
- if not self:
- # + (-0) = 0
- sign = 0
-
- if context is None:
- context = getcontext()
-
- if context._rounding_decision == ALWAYS_ROUND:
- ans = self._fix(context)
- else:
- ans = Decimal(self)
- ans._sign = sign
- return ans
-
- def __abs__(self, round=1, context=None):
- """Returns the absolute value of self.
-
- If the second argument is 0, do not round.
- """
- if self._is_special:
- ans = self._check_nans(context=context)
- if ans:
- return ans
-
- if not round:
- if context is None:
- context = getcontext()
- context = context._shallow_copy()
- context._set_rounding_decision(NEVER_ROUND)
-
- if self._sign:
- ans = self.__neg__(context=context)
- else:
- ans = self.__pos__(context=context)
-
- return ans
-
- def __add__(self, other, context=None):
- """Returns self + other.
-
- -INF + INF (or the reverse) cause InvalidOperation errors.
- """
- other = _convert_other(other)
- if other is NotImplemented:
- return other
-
- if context is None:
- context = getcontext()
-
- if self._is_special or other._is_special:
- ans = self._check_nans(other, context)
- if ans:
- return ans
-
- if self._isinfinity():
- #If both INF, same sign => same as both, opposite => error.
- if self._sign != other._sign and other._isinfinity():
- return context._raise_error(InvalidOperation, '-INF + INF')
- return Decimal(self)
- if other._isinfinity():
- return Decimal(other) #Can't both be infinity here
-
- shouldround = context._rounding_decision == ALWAYS_ROUND
-
- exp = min(self._exp, other._exp)
- negativezero = 0
- if context.rounding == ROUND_FLOOR and self._sign != other._sign:
- #If the answer is 0, the sign should be negative, in this case.
- negativezero = 1
-
- if not self and not other:
- sign = min(self._sign, other._sign)
- if negativezero:
- sign = 1
- return Decimal( (sign, (0,), exp))
- if not self:
- exp = max(exp, other._exp - context.prec-1)
- ans = other._rescale(exp, watchexp=0, context=context)
- if shouldround:
- ans = ans._fix(context)
- return ans
- if not other:
- exp = max(exp, self._exp - context.prec-1)
- ans = self._rescale(exp, watchexp=0, context=context)
- if shouldround:
- ans = ans._fix(context)
- return ans
-
- op1 = _WorkRep(self)
- op2 = _WorkRep(other)
- op1, op2 = _normalize(op1, op2, shouldround, context.prec)
-
- result = _WorkRep()
- if op1.sign != op2.sign:
- # Equal and opposite
- if op1.int == op2.int:
- if exp < context.Etiny():
- exp = context.Etiny()
- context._raise_error(Clamped)
- return Decimal((negativezero, (0,), exp))
- if op1.int < op2.int:
- op1, op2 = op2, op1
- #OK, now abs(op1) > abs(op2)
- if op1.sign == 1:
- result.sign = 1
- op1.sign, op2.sign = op2.sign, op1.sign
- else:
- result.sign = 0
- #So we know the sign, and op1 > 0.
- elif op1.sign == 1:
- result.sign = 1
- op1.sign, op2.sign = (0, 0)
- else:
- result.sign = 0
- #Now, op1 > abs(op2) > 0
-
- if op2.sign == 0:
- result.int = op1.int + op2.int
- else:
- result.int = op1.int - op2.int
-
- result.exp = op1.exp
- ans = Decimal(result)
- if shouldround:
- ans = ans._fix(context)
- return ans
-
- __radd__ = __add__
-
- def __sub__(self, other, context=None):
- """Return self + (-other)"""
- other = _convert_other(other)
- if other is NotImplemented:
- return other
-
- if self._is_special or other._is_special:
- ans = self._check_nans(other, context=context)
- if ans:
- return ans
-
- # -Decimal(0) = Decimal(0), which we don't want since
- # (-0 - 0 = -0 + (-0) = -0, but -0 + 0 = 0.)
- # so we change the sign directly to a copy
- tmp = Decimal(other)
- tmp._sign = 1-tmp._sign
-
- return self.__add__(tmp, context=context)
-
- def __rsub__(self, other, context=None):
- """Return other + (-self)"""
- other = _convert_other(other)
- if other is NotImplemented:
- return other
-
- tmp = Decimal(self)
- tmp._sign = 1 - tmp._sign
- return other.__add__(tmp, context=context)
-
- def _increment(self, round=1, context=None):
- """Special case of add, adding 1eExponent
-
- Since it is common, (rounding, for example) this adds
- (sign)*one E self._exp to the number more efficiently than add.
-
- For example:
- Decimal('5.624e10')._increment() == Decimal('5.625e10')
- """
- if self._is_special:
- ans = self._check_nans(context=context)
- if ans:
- return ans
-
- return Decimal(self) # Must be infinite, and incrementing makes no difference
-
- L = list(self._int)
- L[-1] += 1
- spot = len(L)-1
- while L[spot] == 10:
- L[spot] = 0
- if spot == 0:
- L[0:0] = [1]
- break
- L[spot-1] += 1
- spot -= 1
- ans = Decimal((self._sign, L, self._exp))
-
- if context is None:
- context = getcontext()
- if round and context._rounding_decision == ALWAYS_ROUND:
- ans = ans._fix(context)
- return ans
-
- def __mul__(self, other, context=None):
- """Return self * other.
-
- (+-) INF * 0 (or its reverse) raise InvalidOperation.
- """
- other = _convert_other(other)
- if other is NotImplemented:
- return other
-
- if context is None:
- context = getcontext()
-
- resultsign = self._sign ^ other._sign
-
- if self._is_special or other._is_special:
- ans = self._check_nans(other, context)
- if ans:
- return ans
-
- if self._isinfinity():
- if not other:
- return context._raise_error(InvalidOperation, '(+-)INF * 0')
- return Infsign[resultsign]
-
- if other._isinfinity():
- if not self:
- return context._raise_error(InvalidOperation, '0 * (+-)INF')
- return Infsign[resultsign]
-
- resultexp = self._exp + other._exp
- shouldround = context._rounding_decision == ALWAYS_ROUND
-
- # Special case for multiplying by zero
- if not self or not other:
- ans = Decimal((resultsign, (0,), resultexp))
- if shouldround:
- #Fixing in case the exponent is out of bounds
- ans = ans._fix(context)
- return ans
-
- # Special case for multiplying by power of 10
- if self._int == (1,):
- ans = Decimal((resultsign, other._int, resultexp))
- if shouldround:
- ans = ans._fix(context)
- return ans
- if other._int == (1,):
- ans = Decimal((resultsign, self._int, resultexp))
- if shouldround:
- ans = ans._fix(context)
- return ans
-
- op1 = _WorkRep(self)
- op2 = _WorkRep(other)
-
- ans = Decimal( (resultsign, map(int, str(op1.int * op2.int)), resultexp))
- if shouldround:
- ans = ans._fix(context)
-
- return ans
- __rmul__ = __mul__
-
- def __div__(self, other, context=None):
- """Return self / other."""
- return self._divide(other, context=context)
- __truediv__ = __div__
-
- def _divide(self, other, divmod = 0, context=None):
- """Return a / b, to context.prec precision.
-
- divmod:
- 0 => true division
- 1 => (a //b, a%b)
- 2 => a //b
- 3 => a%b
-
- Actually, if divmod is 2 or 3 a tuple is returned, but errors for
- computing the other value are not raised.
- """
- other = _convert_other(other)
- if other is NotImplemented:
- if divmod in (0, 1):
- return NotImplemented
- return (NotImplemented, NotImplemented)
-
- if context is None:
- context = getcontext()
-
- sign = self._sign ^ other._sign
-
- if self._is_special or other._is_special:
- ans = self._check_nans(other, context)
- if ans:
- if divmod:
- return (ans, ans)
- return ans
-
- if self._isinfinity() and other._isinfinity():
- if divmod:
- return (context._raise_error(InvalidOperation,
- '(+-)INF // (+-)INF'),
- context._raise_error(InvalidOperation,
- '(+-)INF % (+-)INF'))
- return context._raise_error(InvalidOperation, '(+-)INF/(+-)INF')
-
- if self._isinfinity():
- if divmod == 1:
- return (Infsign[sign],
- context._raise_error(InvalidOperation, 'INF % x'))
- elif divmod == 2:
- return (Infsign[sign], NaN)
- elif divmod == 3:
- return (Infsign[sign],
- context._raise_error(InvalidOperation, 'INF % x'))
- return Infsign[sign]
-
- if other._isinfinity():
- if divmod:
- return (Decimal((sign, (0,), 0)), Decimal(self))
- context._raise_error(Clamped, 'Division by infinity')
- return Decimal((sign, (0,), context.Etiny()))
-
- # Special cases for zeroes
- if not self and not other:
- if divmod:
- return context._raise_error(DivisionUndefined, '0 / 0', 1)
- return context._raise_error(DivisionUndefined, '0 / 0')
-
- if not self:
- if divmod:
- otherside = Decimal(self)
- otherside._exp = min(self._exp, other._exp)
- return (Decimal((sign, (0,), 0)), otherside)
- exp = self._exp - other._exp
- if exp < context.Etiny():
- exp = context.Etiny()
- context._raise_error(Clamped, '0e-x / y')
- if exp > context.Emax:
- exp = context.Emax
- context._raise_error(Clamped, '0e+x / y')
- return Decimal( (sign, (0,), exp) )
-
- if not other:
- if divmod:
- return context._raise_error(DivisionByZero, 'divmod(x,0)',
- sign, 1)
- return context._raise_error(DivisionByZero, 'x / 0', sign)
-
- #OK, so neither = 0, INF or NaN
-
- shouldround = context._rounding_decision == ALWAYS_ROUND
-
- #If we're dividing into ints, and self < other, stop.
- #self.__abs__(0) does not round.
- if divmod and (self.__abs__(0, context) < other.__abs__(0, context)):
-
- if divmod == 1 or divmod == 3:
- exp = min(self._exp, other._exp)
- ans2 = self._rescale(exp, context=context, watchexp=0)
- if shouldround:
- ans2 = ans2._fix(context)
- return (Decimal( (sign, (0,), 0) ),
- ans2)
-
- elif divmod == 2:
- #Don't round the mod part, if we don't need it.
- return (Decimal( (sign, (0,), 0) ), Decimal(self))
-
- op1 = _WorkRep(self)
- op2 = _WorkRep(other)
- op1, op2, adjust = _adjust_coefficients(op1, op2)
- res = _WorkRep( (sign, 0, (op1.exp - op2.exp)) )
- if divmod and res.exp > context.prec + 1:
- return context._raise_error(DivisionImpossible)
-
- prec_limit = 10 ** context.prec
- while 1:
- while op2.int <= op1.int:
- res.int += 1
- op1.int -= op2.int
- if res.exp == 0 and divmod:
- if res.int >= prec_limit and shouldround:
- return context._raise_error(DivisionImpossible)
- otherside = Decimal(op1)
- frozen = context._ignore_all_flags()
-
- exp = min(self._exp, other._exp)
- otherside = otherside._rescale(exp, context=context, watchexp=0)
- context._regard_flags(*frozen)
- if shouldround:
- otherside = otherside._fix(context)
- return (Decimal(res), otherside)
-
- if op1.int == 0 and adjust >= 0 and not divmod:
- break
- if res.int >= prec_limit and shouldround:
- if divmod:
- return context._raise_error(DivisionImpossible)
- shouldround=1
- # Really, the answer is a bit higher, so adding a one to
- # the end will make sure the rounding is right.
- if op1.int != 0:
- res.int *= 10
- res.int += 1
- res.exp -= 1
-
- break
- res.int *= 10
- res.exp -= 1
- adjust += 1
- op1.int *= 10
- op1.exp -= 1
-
- if res.exp == 0 and divmod and op2.int > op1.int:
- #Solves an error in precision. Same as a previous block.
-
- if res.int >= prec_limit and shouldround:
- return context._raise_error(DivisionImpossible)
- otherside = Decimal(op1)
- frozen = context._ignore_all_flags()
-
- exp = min(self._exp, other._exp)
- otherside = otherside._rescale(exp, context=context)
-
- context._regard_flags(*frozen)
-
- return (Decimal(res), otherside)
-
- ans = Decimal(res)
- if shouldround:
- ans = ans._fix(context)
- return ans
-
- def __rdiv__(self, other, context=None):
- """Swaps self/other and returns __div__."""
- other = _convert_other(other)
- if other is NotImplemented:
- return other
- return other.__div__(self, context=context)
- __rtruediv__ = __rdiv__
-
- def __divmod__(self, other, context=None):
- """
- (self // other, self % other)
- """
- return self._divide(other, 1, context)
-
- def __rdivmod__(self, other, context=None):
- """Swaps self/other and returns __divmod__."""
- other = _convert_other(other)
- if other is NotImplemented:
- return other
- return other.__divmod__(self, context=context)
-
- def __mod__(self, other, context=None):
- """
- self % other
- """
- other = _convert_other(other)
- if other is NotImplemented:
- return other
-
- if self._is_special or other._is_special:
- ans = self._check_nans(other, context)
- if ans:
- return ans
-
- if self and not other:
- return context._raise_error(InvalidOperation, 'x % 0')
-
- return self._divide(other, 3, context)[1]
-
- def __rmod__(self, other, context=None):
- """Swaps self/other and returns __mod__."""
- other = _convert_other(other)
- if other is NotImplemented:
- return other
- return other.__mod__(self, context=context)
-
- def remainder_near(self, other, context=None):
- """
- Remainder nearest to 0- abs(remainder-near) <= other/2
- """
- other = _convert_other(other)
- if other is NotImplemented:
- return other
-
- if self._is_special or other._is_special:
- ans = self._check_nans(other, context)
- if ans:
- return ans
- if self and not other:
- return context._raise_error(InvalidOperation, 'x % 0')
-
- if context is None:
- context = getcontext()
- # If DivisionImpossible causes an error, do not leave Rounded/Inexact
- # ignored in the calling function.
- context = context._shallow_copy()
- flags = context._ignore_flags(Rounded, Inexact)
- #keep DivisionImpossible flags
- (side, r) = self.__divmod__(other, context=context)
-
- if r._isnan():
- context._regard_flags(*flags)
- return r
-
- context = context._shallow_copy()
- rounding = context._set_rounding_decision(NEVER_ROUND)
-
- if other._sign:
- comparison = other.__div__(Decimal(-2), context=context)
- else:
- comparison = other.__div__(Decimal(2), context=context)
-
- context._set_rounding_decision(rounding)
- context._regard_flags(*flags)
-
- s1, s2 = r._sign, comparison._sign
- r._sign, comparison._sign = 0, 0
-
- if r < comparison:
- r._sign, comparison._sign = s1, s2
- #Get flags now
- self.__divmod__(other, context=context)
- return r._fix(context)
- r._sign, comparison._sign = s1, s2
-
- rounding = context._set_rounding_decision(NEVER_ROUND)
-
- (side, r) = self.__divmod__(other, context=context)
- context._set_rounding_decision(rounding)
- if r._isnan():
- return r
-
- decrease = not side._iseven()
- rounding = context._set_rounding_decision(NEVER_ROUND)
- side = side.__abs__(context=context)
- context._set_rounding_decision(rounding)
-
- s1, s2 = r._sign, comparison._sign
- r._sign, comparison._sign = 0, 0
- if r > comparison or decrease and r == comparison:
- r._sign, comparison._sign = s1, s2
- context.prec += 1
- if len(side.__add__(Decimal(1), context=context)._int) >= context.prec:
- context.prec -= 1
- return context._raise_error(DivisionImpossible)[1]
- context.prec -= 1
- if self._sign == other._sign:
- r = r.__sub__(other, context=context)
- else:
- r = r.__add__(other, context=context)
- else:
- r._sign, comparison._sign = s1, s2
-
- return r._fix(context)
-
- def __floordiv__(self, other, context=None):
- """self // other"""
- return self._divide(other, 2, context)[0]
-
- def __rfloordiv__(self, other, context=None):
- """Swaps self/other and returns __floordiv__."""
- other = _convert_other(other)
- if other is NotImplemented:
- return other
- return other.__floordiv__(self, context=context)
-
- def __float__(self):
- """Float representation."""
- return float(str(self))
-
- def __int__(self):
- """Converts self to an int, truncating if necessary."""
- if self._is_special:
- if self._isnan():
- context = getcontext()
- return context._raise_error(InvalidContext)
- elif self._isinfinity():
- raise OverflowError, "Cannot convert infinity to long"
- if self._exp >= 0:
- s = ''.join(map(str, self._int)) + '0'*self._exp
- else:
- s = ''.join(map(str, self._int))[:self._exp]
- if s == '':
- s = '0'
- sign = '-'*self._sign
- return int(sign + s)
-
- def __long__(self):
- """Converts to a long.
-
- Equivalent to long(int(self))
- """
- return long(self.__int__())
-
- def _fix(self, context):
- """Round if it is necessary to keep self within prec precision.
-
- Rounds and fixes the exponent. Does not raise on a sNaN.
-
- Arguments:
- self - Decimal instance
- context - context used.
- """
- if self._is_special:
- return self
- if context is None:
- context = getcontext()
- prec = context.prec
- ans = self._fixexponents(context)
- if len(ans._int) > prec:
- ans = ans._round(prec, context=context)
- ans = ans._fixexponents(context)
- return ans
-
- def _fixexponents(self, context):
- """Fix the exponents and return a copy with the exponent in bounds.
- Only call if known to not be a special value.
- """
- folddown = context._clamp
- Emin = context.Emin
- ans = self
- ans_adjusted = ans.adjusted()
- if ans_adjusted < Emin:
- Etiny = context.Etiny()
- if ans._exp < Etiny:
- if not ans:
- ans = Decimal(self)
- ans._exp = Etiny
- context._raise_error(Clamped)
- return ans
- ans = ans._rescale(Etiny, context=context)
- #It isn't zero, and exp < Emin => subnormal
- context._raise_error(Subnormal)
- if context.flags[Inexact]:
- context._raise_error(Underflow)
- else:
- if ans:
- #Only raise subnormal if non-zero.
- context._raise_error(Subnormal)
- else:
- Etop = context.Etop()
- if folddown and ans._exp > Etop:
- context._raise_error(Clamped)
- ans = ans._rescale(Etop, context=context)
- else:
- Emax = context.Emax
- if ans_adjusted > Emax:
- if not ans:
- ans = Decimal(self)
- ans._exp = Emax
- context._raise_error(Clamped)
- return ans
- context._raise_error(Inexact)
- context._raise_error(Rounded)
- return context._raise_error(Overflow, 'above Emax', ans._sign)
- return ans
-
- def _round(self, prec=None, rounding=None, context=None):
- """Returns a rounded version of self.
-
- You can specify the precision or rounding method. Otherwise, the
- context determines it.
- """
-
- if self._is_special:
- ans = self._check_nans(context=context)
- if ans:
- return ans
-
- if self._isinfinity():
- return Decimal(self)
-
- if context is None:
- context = getcontext()
-
- if rounding is None:
- rounding = context.rounding
- if prec is None:
- prec = context.prec
-
- if not self:
- if prec <= 0:
- dig = (0,)
- exp = len(self._int) - prec + self._exp
- else:
- dig = (0,) * prec
- exp = len(self._int) + self._exp - prec
- ans = Decimal((self._sign, dig, exp))
- context._raise_error(Rounded)
- return ans
-
- if prec == 0:
- temp = Decimal(self)
- temp._int = (0,)+temp._int
- prec = 1
- elif prec < 0:
- exp = self._exp + len(self._int) - prec - 1
- temp = Decimal( (self._sign, (0, 1), exp))
- prec = 1
- else:
- temp = Decimal(self)
-
- numdigits = len(temp._int)
- if prec == numdigits:
- return temp
-
- # See if we need to extend precision
- expdiff = prec - numdigits
- if expdiff > 0:
- tmp = list(temp._int)
- tmp.extend([0] * expdiff)
- ans = Decimal( (temp._sign, tmp, temp._exp - expdiff))
- return ans
-
- #OK, but maybe all the lost digits are 0.
- lostdigits = self._int[expdiff:]
- if lostdigits == (0,) * len(lostdigits):
- ans = Decimal( (temp._sign, temp._int[:prec], temp._exp - expdiff))
- #Rounded, but not Inexact
- context._raise_error(Rounded)
- return ans
-
- # Okay, let's round and lose data
-
- this_function = getattr(temp, self._pick_rounding_function[rounding])
- #Now we've got the rounding function
-
- if prec != context.prec:
- context = context._shallow_copy()
- context.prec = prec
- ans = this_function(prec, expdiff, context)
- context._raise_error(Rounded)
- context._raise_error(Inexact, 'Changed in rounding')
-
- return ans
-
- _pick_rounding_function = {}
-
- def _round_down(self, prec, expdiff, context):
- """Also known as round-towards-0, truncate."""
- return Decimal( (self._sign, self._int[:prec], self._exp - expdiff) )
-
- def _round_half_up(self, prec, expdiff, context, tmp = None):
- """Rounds 5 up (away from 0)"""
-
- if tmp is None:
- tmp = Decimal( (self._sign,self._int[:prec], self._exp - expdiff))
- if self._int[prec] >= 5:
- tmp = tmp._increment(round=0, context=context)
- if len(tmp._int) > prec:
- return Decimal( (tmp._sign, tmp._int[:-1], tmp._exp + 1))
- return tmp
-
- def _round_half_even(self, prec, expdiff, context):
- """Round 5 to even, rest to nearest."""
-
- tmp = Decimal( (self._sign, self._int[:prec], self._exp - expdiff))
- half = (self._int[prec] == 5)
- if half:
- for digit in self._int[prec+1:]:
- if digit != 0:
- half = 0
- break
- if half:
- if self._int[prec-1] & 1 == 0:
- return tmp
- return self._round_half_up(prec, expdiff, context, tmp)
-
- def _round_half_down(self, prec, expdiff, context):
- """Round 5 down"""
-
- tmp = Decimal( (self._sign, self._int[:prec], self._exp - expdiff))
- half = (self._int[prec] == 5)
- if half:
- for digit in self._int[prec+1:]:
- if digit != 0:
- half = 0
- break
- if half:
- return tmp
- return self._round_half_up(prec, expdiff, context, tmp)
-
- def _round_up(self, prec, expdiff, context):
- """Rounds away from 0."""
- tmp = Decimal( (self._sign, self._int[:prec], self._exp - expdiff) )
- for digit in self._int[prec:]:
- if digit != 0:
- tmp = tmp._increment(round=1, context=context)
- if len(tmp._int) > prec:
- return Decimal( (tmp._sign, tmp._int[:-1], tmp._exp + 1))
- else:
- return tmp
- return tmp
-
- def _round_ceiling(self, prec, expdiff, context):
- """Rounds up (not away from 0 if negative.)"""
- if self._sign:
- return self._round_down(prec, expdiff, context)
- else:
- return self._round_up(prec, expdiff, context)
-
- def _round_floor(self, prec, expdiff, context):
- """Rounds down (not towards 0 if negative)"""
- if not self._sign:
- return self._round_down(prec, expdiff, context)
- else:
- return self._round_up(prec, expdiff, context)
-
- def __pow__(self, n, modulo = None, context=None):
- """Return self ** n (mod modulo)
-
- If modulo is None (default), don't take it mod modulo.
- """
- n = _convert_other(n)
- if n is NotImplemented:
- return n
-
- if context is None:
- context = getcontext()
-
- if self._is_special or n._is_special or n.adjusted() > 8:
- #Because the spot << doesn't work with really big exponents
- if n._isinfinity() or n.adjusted() > 8:
- return context._raise_error(InvalidOperation, 'x ** INF')
-
- ans = self._check_nans(n, context)
- if ans:
- return ans
-
- if not n._isinteger():
- return context._raise_error(InvalidOperation, 'x ** (non-integer)')
-
- if not self and not n:
- return context._raise_error(InvalidOperation, '0 ** 0')
-
- if not n:
- return Decimal(1)
-
- if self == Decimal(1):
- return Decimal(1)
-
- sign = self._sign and not n._iseven()
- n = int(n)
-
- if self._isinfinity():
- if modulo:
- return context._raise_error(InvalidOperation, 'INF % x')
- if n > 0:
- return Infsign[sign]
- return Decimal( (sign, (0,), 0) )
-
- #with ludicrously large exponent, just raise an overflow and return inf.
- if not modulo and n > 0 and (self._exp + len(self._int) - 1) * n > context.Emax \
- and self:
-
- tmp = Decimal('inf')
- tmp._sign = sign
- context._raise_error(Rounded)
- context._raise_error(Inexact)
- context._raise_error(Overflow, 'Big power', sign)
- return tmp
-
- elength = len(str(abs(n)))
- firstprec = context.prec
-
- if not modulo and firstprec + elength + 1 > DefaultContext.Emax:
- return context._raise_error(Overflow, 'Too much precision.', sign)
-
- mul = Decimal(self)
- val = Decimal(1)
- context = context._shallow_copy()
- context.prec = firstprec + elength + 1
- if n < 0:
- #n is a long now, not Decimal instance
- n = -n
- mul = Decimal(1).__div__(mul, context=context)
-
- spot = 1
- while spot <= n:
- spot <<= 1
-
- spot >>= 1
- #Spot is the highest power of 2 less than n
- while spot:
- val = val.__mul__(val, context=context)
- if val._isinfinity():
- val = Infsign[sign]
- break
- if spot & n:
- val = val.__mul__(mul, context=context)
- if modulo is not None:
- val = val.__mod__(modulo, context=context)
- spot >>= 1
- context.prec = firstprec
-
- if context._rounding_decision == ALWAYS_ROUND:
- return val._fix(context)
- return val
-
- def __rpow__(self, other, context=None):
- """Swaps self/other and returns __pow__."""
- other = _convert_other(other)
- if other is NotImplemented:
- return other
- return other.__pow__(self, context=context)
-
- def normalize(self, context=None):
- """Normalize- strip trailing 0s, change anything equal to 0 to 0e0"""
-
- if self._is_special:
- ans = self._check_nans(context=context)
- if ans:
- return ans
-
- dup = self._fix(context)
- if dup._isinfinity():
- return dup
-
- if not dup:
- return Decimal( (dup._sign, (0,), 0) )
- end = len(dup._int)
- exp = dup._exp
- while dup._int[end-1] == 0:
- exp += 1
- end -= 1
- return Decimal( (dup._sign, dup._int[:end], exp) )
-
-
- def quantize(self, exp, rounding=None, context=None, watchexp=1):
- """Quantize self so its exponent is the same as that of exp.
-
- Similar to self._rescale(exp._exp) but with error checking.
- """
- if self._is_special or exp._is_special:
- ans = self._check_nans(exp, context)
- if ans:
- return ans
-
- if exp._isinfinity() or self._isinfinity():
- if exp._isinfinity() and self._isinfinity():
- return self #if both are inf, it is OK
- if context is None:
- context = getcontext()
- return context._raise_error(InvalidOperation,
- 'quantize with one INF')
- return self._rescale(exp._exp, rounding, context, watchexp)
-
- def same_quantum(self, other):
- """Test whether self and other have the same exponent.
-
- same as self._exp == other._exp, except NaN == sNaN
- """
- if self._is_special or other._is_special:
- if self._isnan() or other._isnan():
- return self._isnan() and other._isnan() and True
- if self._isinfinity() or other._isinfinity():
- return self._isinfinity() and other._isinfinity() and True
- return self._exp == other._exp
-
- def _rescale(self, exp, rounding=None, context=None, watchexp=1):
- """Rescales so that the exponent is exp.
-
- exp = exp to scale to (an integer)
- rounding = rounding version
- watchexp: if set (default) an error is returned if exp is greater
- than Emax or less than Etiny.
- """
- if context is None:
- context = getcontext()
-
- if self._is_special:
- if self._isinfinity():
- return context._raise_error(InvalidOperation, 'rescale with an INF')
-
- ans = self._check_nans(context=context)
- if ans:
- return ans
-
- if watchexp and (context.Emax < exp or context.Etiny() > exp):
- return context._raise_error(InvalidOperation, 'rescale(a, INF)')
-
- if not self:
- ans = Decimal(self)
- ans._int = (0,)
- ans._exp = exp
- return ans
-
- diff = self._exp - exp
- digits = len(self._int) + diff
-
- if watchexp and digits > context.prec:
- return context._raise_error(InvalidOperation, 'Rescale > prec')
-
- tmp = Decimal(self)
- tmp._int = (0,) + tmp._int
- digits += 1
-
- if digits < 0:
- tmp._exp = -digits + tmp._exp
- tmp._int = (0,1)
- digits = 1
- tmp = tmp._round(digits, rounding, context=context)
-
- if tmp._int[0] == 0 and len(tmp._int) > 1:
- tmp._int = tmp._int[1:]
- tmp._exp = exp
-
- tmp_adjusted = tmp.adjusted()
- if tmp and tmp_adjusted < context.Emin:
- context._raise_error(Subnormal)
- elif tmp and tmp_adjusted > context.Emax:
- return context._raise_error(InvalidOperation, 'rescale(a, INF)')
- return tmp
-
- def to_integral(self, rounding=None, context=None):
- """Rounds to the nearest integer, without raising inexact, rounded."""
- if self._is_special:
- ans = self._check_nans(context=context)
- if ans:
- return ans
- if self._exp >= 0:
- return self
- if context is None:
- context = getcontext()
- flags = context._ignore_flags(Rounded, Inexact)
- ans = self._rescale(0, rounding, context=context)
- context._regard_flags(flags)
- return ans
-
- def sqrt(self, context=None):
- """Return the square root of self.
-
- Uses a converging algorithm (Xn+1 = 0.5*(Xn + self / Xn))
- Should quadratically approach the right answer.
- """
- if self._is_special:
- ans = self._check_nans(context=context)
- if ans:
- return ans
-
- if self._isinfinity() and self._sign == 0:
- return Decimal(self)
-
- if not self:
- #exponent = self._exp / 2, using round_down.
- #if self._exp < 0:
- # exp = (self._exp+1) // 2
- #else:
- exp = (self._exp) // 2
- if self._sign == 1:
- #sqrt(-0) = -0
- return Decimal( (1, (0,), exp))
- else:
- return Decimal( (0, (0,), exp))
-
- if context is None:
- context = getcontext()
-
- if self._sign == 1:
- return context._raise_error(InvalidOperation, 'sqrt(-x), x > 0')
-
- tmp = Decimal(self)
-
- expadd = tmp._exp // 2
- if tmp._exp & 1:
- tmp._int += (0,)
- tmp._exp = 0
- else:
- tmp._exp = 0
-
- context = context._shallow_copy()
- flags = context._ignore_all_flags()
- firstprec = context.prec
- context.prec = 3
- if tmp.adjusted() & 1 == 0:
- ans = Decimal( (0, (8,1,9), tmp.adjusted() - 2) )
- ans = ans.__add__(tmp.__mul__(Decimal((0, (2,5,9), -2)),
- context=context), context=context)
- ans._exp -= 1 + tmp.adjusted() // 2
- else:
- ans = Decimal( (0, (2,5,9), tmp._exp + len(tmp._int)- 3) )
- ans = ans.__add__(tmp.__mul__(Decimal((0, (8,1,9), -3)),
- context=context), context=context)
- ans._exp -= 1 + tmp.adjusted() // 2
-
- #ans is now a linear approximation.
-
- Emax, Emin = context.Emax, context.Emin
- context.Emax, context.Emin = DefaultContext.Emax, DefaultContext.Emin
-
- half = Decimal('0.5')
-
- maxp = firstprec + 2
- rounding = context._set_rounding(ROUND_HALF_EVEN)
- while 1:
- context.prec = min(2*context.prec - 2, maxp)
- ans = half.__mul__(ans.__add__(tmp.__div__(ans, context=context),
- context=context), context=context)
- if context.prec == maxp:
- break
-
- #round to the answer's precision-- the only error can be 1 ulp.
- context.prec = firstprec
- prevexp = ans.adjusted()
- ans = ans._round(context=context)
-
- #Now, check if the other last digits are better.
- context.prec = firstprec + 1
- # In case we rounded up another digit and we should actually go lower.
- if prevexp != ans.adjusted():
- ans._int += (0,)
- ans._exp -= 1
-
-
- lower = ans.__sub__(Decimal((0, (5,), ans._exp-1)), context=context)
- context._set_rounding(ROUND_UP)
- if lower.__mul__(lower, context=context) > (tmp):
- ans = ans.__sub__(Decimal((0, (1,), ans._exp)), context=context)
-
- else:
- upper = ans.__add__(Decimal((0, (5,), ans._exp-1)),context=context)
- context._set_rounding(ROUND_DOWN)
- if upper.__mul__(upper, context=context) < tmp:
- ans = ans.__add__(Decimal((0, (1,), ans._exp)),context=context)
-
- ans._exp += expadd
-
- context.prec = firstprec
- context.rounding = rounding
- ans = ans._fix(context)
-
- rounding = context._set_rounding_decision(NEVER_ROUND)
- if not ans.__mul__(ans, context=context) == self:
- # Only rounded/inexact if here.
- context._regard_flags(flags)
- context._raise_error(Rounded)
- context._raise_error(Inexact)
- else:
- #Exact answer, so let's set the exponent right.
- #if self._exp < 0:
- # exp = (self._exp +1)// 2
- #else:
- exp = self._exp // 2
- context.prec += ans._exp - exp
- ans = ans._rescale(exp, context=context)
- context.prec = firstprec
- context._regard_flags(flags)
- context.Emax, context.Emin = Emax, Emin
-
- return ans._fix(context)
-
- def max(self, other, context=None):
- """Returns the larger value.
-
- like max(self, other) except if one is not a number, returns
- NaN (and signals if one is sNaN). Also rounds.
- """
- other = _convert_other(other)
- if other is NotImplemented:
- return other
-
- if self._is_special or other._is_special:
- # if one operand is a quiet NaN and the other is number, then the
- # number is always returned
- sn = self._isnan()
- on = other._isnan()
- if sn or on:
- if on == 1 and sn != 2:
- return self
- if sn == 1 and on != 2:
- return other
- return self._check_nans(other, context)
-
- ans = self
- c = self.__cmp__(other)
- if c == 0:
- # if both operands are finite and equal in numerical value
- # then an ordering is applied:
- #
- # if the signs differ then max returns the operand with the
- # positive sign and min returns the operand with the negative sign
- #
- # if the signs are the same then the exponent is used to select
- # the result.
- if self._sign != other._sign:
- if self._sign:
- ans = other
- elif self._exp < other._exp and not self._sign:
- ans = other
- elif self._exp > other._exp and self._sign:
- ans = other
- elif c == -1:
- ans = other
-
- if context is None:
- context = getcontext()
- if context._rounding_decision == ALWAYS_ROUND:
- return ans._fix(context)
- return ans
-
- def min(self, other, context=None):
- """Returns the smaller value.
-
- like min(self, other) except if one is not a number, returns
- NaN (and signals if one is sNaN). Also rounds.
- """
- other = _convert_other(other)
- if other is NotImplemented:
- return other
-
- if self._is_special or other._is_special:
- # if one operand is a quiet NaN and the other is number, then the
- # number is always returned
- sn = self._isnan()
- on = other._isnan()
- if sn or on:
- if on == 1 and sn != 2:
- return self
- if sn == 1 and on != 2:
- return other
- return self._check_nans(other, context)
-
- ans = self
- c = self.__cmp__(other)
- if c == 0:
- # if both operands are finite and equal in numerical value
- # then an ordering is applied:
- #
- # if the signs differ then max returns the operand with the
- # positive sign and min returns the operand with the negative sign
- #
- # if the signs are the same then the exponent is used to select
- # the result.
- if self._sign != other._sign:
- if other._sign:
- ans = other
- elif self._exp > other._exp and not self._sign:
- ans = other
- elif self._exp < other._exp and self._sign:
- ans = other
- elif c == 1:
- ans = other
-
- if context is None:
- context = getcontext()
- if context._rounding_decision == ALWAYS_ROUND:
- return ans._fix(context)
- return ans
-
- def _isinteger(self):
- """Returns whether self is an integer"""
- if self._exp >= 0:
- return True
- rest = self._int[self._exp:]
- return rest == (0,)*len(rest)
-
- def _iseven(self):
- """Returns 1 if self is even. Assumes self is an integer."""
- if self._exp > 0:
- return 1
- return self._int[-1+self._exp] & 1 == 0
-
- def adjusted(self):
- """Return the adjusted exponent of self"""
- try:
- return self._exp + len(self._int) - 1
- #If NaN or Infinity, self._exp is string
- except TypeError:
- return 0
-
- # support for pickling, copy, and deepcopy
- def __reduce__(self):
- return (self.__class__, (str(self),))
-
- def __copy__(self):
- if type(self) == Decimal:
- return self # I'm immutable; therefore I am my own clone
- return self.__class__(str(self))
-
- def __deepcopy__(self, memo):
- if type(self) == Decimal:
- return self # My components are also immutable
- return self.__class__(str(self))
-
-##### Context class ###########################################
-
-
-# get rounding method function:
-rounding_functions = [name for name in Decimal.__dict__.keys() if name.startswith('_round_')]
-for name in rounding_functions:
- #name is like _round_half_even, goes to the global ROUND_HALF_EVEN value.
- globalname = name[1:].upper()
- val = globals()[globalname]
- Decimal._pick_rounding_function[val] = name
-
-del name, val, globalname, rounding_functions
-
-class _ContextManager(object):
- """Context manager class to support localcontext().
-
- Sets a copy of the supplied context in __enter__() and restores
- the previous decimal context in __exit__()
- """
- def __init__(self, new_context):
- self.new_context = new_context.copy()
- def __enter__(self):
- self.saved_context = getcontext()
- setcontext(self.new_context)
- return self.new_context
- def __exit__(self, t, v, tb):
- setcontext(self.saved_context)
-
-class Context(object):
- """Contains the context for a Decimal instance.
-
- Contains:
- prec - precision (for use in rounding, division, square roots..)
- rounding - rounding type. (how you round)
- _rounding_decision - ALWAYS_ROUND, NEVER_ROUND -- do you round?
- traps - If traps[exception] = 1, then the exception is
- raised when it is caused. Otherwise, a value is
- substituted in.
- flags - When an exception is caused, flags[exception] is incremented.
- (Whether or not the trap_enabler is set)
- Should be reset by user of Decimal instance.
- Emin - Minimum exponent
- Emax - Maximum exponent
- capitals - If 1, 1*10^1 is printed as 1E+1.
- If 0, printed as 1e1
- _clamp - If 1, change exponents if too high (Default 0)
- """
-
- def __init__(self, prec=None, rounding=None,
- traps=None, flags=None,
- _rounding_decision=None,
- Emin=None, Emax=None,
- capitals=None, _clamp=0,
- _ignored_flags=None):
- if flags is None:
- flags = []
- if _ignored_flags is None:
- _ignored_flags = []
- if not isinstance(flags, dict):
- flags = dict([(s,s in flags) for s in _signals])
- del s
- if traps is not None and not isinstance(traps, dict):
- traps = dict([(s,s in traps) for s in _signals])
- del s
- for name, val in locals().items():
- if val is None:
- setattr(self, name, _copy.copy(getattr(DefaultContext, name)))
- else:
- setattr(self, name, val)
- del self.self
-
- def __repr__(self):
- """Show the current context."""
- s = []
- s.append('Context(prec=%(prec)d, rounding=%(rounding)s, Emin=%(Emin)d, Emax=%(Emax)d, capitals=%(capitals)d' % vars(self))
- s.append('flags=[' + ', '.join([f.__name__ for f, v in self.flags.items() if v]) + ']')
- s.append('traps=[' + ', '.join([t.__name__ for t, v in self.traps.items() if v]) + ']')
- return ', '.join(s) + ')'
-
- def clear_flags(self):
- """Reset all flags to zero"""
- for flag in self.flags:
- self.flags[flag] = 0
-
- def _shallow_copy(self):
- """Returns a shallow copy from self."""
- nc = Context(self.prec, self.rounding, self.traps, self.flags,
- self._rounding_decision, self.Emin, self.Emax,
- self.capitals, self._clamp, self._ignored_flags)
- return nc
-
- def copy(self):
- """Returns a deep copy from self."""
- nc = Context(self.prec, self.rounding, self.traps.copy(), self.flags.copy(),
- self._rounding_decision, self.Emin, self.Emax,
- self.capitals, self._clamp, self._ignored_flags)
- return nc
- __copy__ = copy
-
- def _raise_error(self, condition, explanation = None, *args):
- """Handles an error
-
- If the flag is in _ignored_flags, returns the default response.
- Otherwise, it increments the flag, then, if the corresponding
- trap_enabler is set, it reaises the exception. Otherwise, it returns
- the default value after incrementing the flag.
- """
- error = _condition_map.get(condition, condition)
- if error in self._ignored_flags:
- #Don't touch the flag
- return error().handle(self, *args)
-
- self.flags[error] += 1
- if not self.traps[error]:
- #The errors define how to handle themselves.
- return condition().handle(self, *args)
-
- # Errors should only be risked on copies of the context
- #self._ignored_flags = []
- raise error, explanation
-
- def _ignore_all_flags(self):
- """Ignore all flags, if they are raised"""
- return self._ignore_flags(*_signals)
-
- def _ignore_flags(self, *flags):
- """Ignore the flags, if they are raised"""
- # Do not mutate-- This way, copies of a context leave the original
- # alone.
- self._ignored_flags = (self._ignored_flags + list(flags))
- return list(flags)
-
- def _regard_flags(self, *flags):
- """Stop ignoring the flags, if they are raised"""
- if flags and isinstance(flags[0], (tuple,list)):
- flags = flags[0]
- for flag in flags:
- self._ignored_flags.remove(flag)
-
- def __hash__(self):
- """A Context cannot be hashed."""
- # We inherit object.__hash__, so we must deny this explicitly
- raise TypeError, "Cannot hash a Context."
-
- def Etiny(self):
- """Returns Etiny (= Emin - prec + 1)"""
- return int(self.Emin - self.prec + 1)
-
- def Etop(self):
- """Returns maximum exponent (= Emax - prec + 1)"""
- return int(self.Emax - self.prec + 1)
-
- def _set_rounding_decision(self, type):
- """Sets the rounding decision.
-
- Sets the rounding decision, and returns the current (previous)
- rounding decision. Often used like:
-
- context = context._shallow_copy()
- # That so you don't change the calling context
- # if an error occurs in the middle (say DivisionImpossible is raised).
-
- rounding = context._set_rounding_decision(NEVER_ROUND)
- instance = instance / Decimal(2)
- context._set_rounding_decision(rounding)
-
- This will make it not round for that operation.
- """
-
- rounding = self._rounding_decision
- self._rounding_decision = type
- return rounding
-
- def _set_rounding(self, type):
- """Sets the rounding type.
-
- Sets the rounding type, and returns the current (previous)
- rounding type. Often used like:
-
- context = context.copy()
- # so you don't change the calling context
- # if an error occurs in the middle.
- rounding = context._set_rounding(ROUND_UP)
- val = self.__sub__(other, context=context)
- context._set_rounding(rounding)
-
- This will make it round up for that operation.
- """
- rounding = self.rounding
- self.rounding= type
- return rounding
-
- def create_decimal(self, num='0'):
- """Creates a new Decimal instance but using self as context."""
- d = Decimal(num, context=self)
- return d._fix(self)
-
- #Methods
- def abs(self, a):
- """Returns the absolute value of the operand.
-
- If the operand is negative, the result is the same as using the minus
- operation on the operand. Otherwise, the result is the same as using
- the plus operation on the operand.
-
- >>> ExtendedContext.abs(Decimal('2.1'))
- Decimal("2.1")
- >>> ExtendedContext.abs(Decimal('-100'))
- Decimal("100")
- >>> ExtendedContext.abs(Decimal('101.5'))
- Decimal("101.5")
- >>> ExtendedContext.abs(Decimal('-101.5'))
- Decimal("101.5")
- """
- return a.__abs__(context=self)
-
- def add(self, a, b):
- """Return the sum of the two operands.
-
- >>> ExtendedContext.add(Decimal('12'), Decimal('7.00'))
- Decimal("19.00")
- >>> ExtendedContext.add(Decimal('1E+2'), Decimal('1.01E+4'))
- Decimal("1.02E+4")
- """
- return a.__add__(b, context=self)
-
- def _apply(self, a):
- return str(a._fix(self))
-
- def compare(self, a, b):
- """Compares values numerically.
-
- If the signs of the operands differ, a value representing each operand
- ('-1' if the operand is less than zero, '0' if the operand is zero or
- negative zero, or '1' if the operand is greater than zero) is used in
- place of that operand for the comparison instead of the actual
- operand.
-
- The comparison is then effected by subtracting the second operand from
- the first and then returning a value according to the result of the
- subtraction: '-1' if the result is less than zero, '0' if the result is
- zero or negative zero, or '1' if the result is greater than zero.
-
- >>> ExtendedContext.compare(Decimal('2.1'), Decimal('3'))
- Decimal("-1")
- >>> ExtendedContext.compare(Decimal('2.1'), Decimal('2.1'))
- Decimal("0")
- >>> ExtendedContext.compare(Decimal('2.1'), Decimal('2.10'))
- Decimal("0")
- >>> ExtendedContext.compare(Decimal('3'), Decimal('2.1'))
- Decimal("1")
- >>> ExtendedContext.compare(Decimal('2.1'), Decimal('-3'))
- Decimal("1")
- >>> ExtendedContext.compare(Decimal('-3'), Decimal('2.1'))
- Decimal("-1")
- """
- return a.compare(b, context=self)
-
- def divide(self, a, b):
- """Decimal division in a specified context.
-
- >>> ExtendedContext.divide(Decimal('1'), Decimal('3'))
- Decimal("0.333333333")
- >>> ExtendedContext.divide(Decimal('2'), Decimal('3'))
- Decimal("0.666666667")
- >>> ExtendedContext.divide(Decimal('5'), Decimal('2'))
- Decimal("2.5")
- >>> ExtendedContext.divide(Decimal('1'), Decimal('10'))
- Decimal("0.1")
- >>> ExtendedContext.divide(Decimal('12'), Decimal('12'))
- Decimal("1")
- >>> ExtendedContext.divide(Decimal('8.00'), Decimal('2'))
- Decimal("4.00")
- >>> ExtendedContext.divide(Decimal('2.400'), Decimal('2.0'))
- Decimal("1.20")
- >>> ExtendedContext.divide(Decimal('1000'), Decimal('100'))
- Decimal("10")
- >>> ExtendedContext.divide(Decimal('1000'), Decimal('1'))
- Decimal("1000")
- >>> ExtendedContext.divide(Decimal('2.40E+6'), Decimal('2'))
- Decimal("1.20E+6")
- """
- return a.__div__(b, context=self)
-
- def divide_int(self, a, b):
- """Divides two numbers and returns the integer part of the result.
-
- >>> ExtendedContext.divide_int(Decimal('2'), Decimal('3'))
- Decimal("0")
- >>> ExtendedContext.divide_int(Decimal('10'), Decimal('3'))
- Decimal("3")
- >>> ExtendedContext.divide_int(Decimal('1'), Decimal('0.3'))
- Decimal("3")
- """
- return a.__floordiv__(b, context=self)
-
- def divmod(self, a, b):
- return a.__divmod__(b, context=self)
-
- def max(self, a,b):
- """max compares two values numerically and returns the maximum.
-
- If either operand is a NaN then the general rules apply.
- Otherwise, the operands are compared as as though by the compare
- operation. If they are numerically equal then the left-hand operand
- is chosen as the result. Otherwise the maximum (closer to positive
- infinity) of the two operands is chosen as the result.
-
- >>> ExtendedContext.max(Decimal('3'), Decimal('2'))
- Decimal("3")
- >>> ExtendedContext.max(Decimal('-10'), Decimal('3'))
- Decimal("3")
- >>> ExtendedContext.max(Decimal('1.0'), Decimal('1'))
- Decimal("1")
- >>> ExtendedContext.max(Decimal('7'), Decimal('NaN'))
- Decimal("7")
- """
- return a.max(b, context=self)
-
- def min(self, a,b):
- """min compares two values numerically and returns the minimum.
-
- If either operand is a NaN then the general rules apply.
- Otherwise, the operands are compared as as though by the compare
- operation. If they are numerically equal then the left-hand operand
- is chosen as the result. Otherwise the minimum (closer to negative
- infinity) of the two operands is chosen as the result.
-
- >>> ExtendedContext.min(Decimal('3'), Decimal('2'))
- Decimal("2")
- >>> ExtendedContext.min(Decimal('-10'), Decimal('3'))
- Decimal("-10")
- >>> ExtendedContext.min(Decimal('1.0'), Decimal('1'))
- Decimal("1.0")
- >>> ExtendedContext.min(Decimal('7'), Decimal('NaN'))
- Decimal("7")
- """
- return a.min(b, context=self)
-
- def minus(self, a):
- """Minus corresponds to unary prefix minus in Python.
-
- The operation is evaluated using the same rules as subtract; the
- operation minus(a) is calculated as subtract('0', a) where the '0'
- has the same exponent as the operand.
-
- >>> ExtendedContext.minus(Decimal('1.3'))
- Decimal("-1.3")
- >>> ExtendedContext.minus(Decimal('-1.3'))
- Decimal("1.3")
- """
- return a.__neg__(context=self)
-
- def multiply(self, a, b):
- """multiply multiplies two operands.
-
- If either operand is a special value then the general rules apply.
- Otherwise, the operands are multiplied together ('long multiplication'),
- resulting in a number which may be as long as the sum of the lengths
- of the two operands.
-
- >>> ExtendedContext.multiply(Decimal('1.20'), Decimal('3'))
- Decimal("3.60")
- >>> ExtendedContext.multiply(Decimal('7'), Decimal('3'))
- Decimal("21")
- >>> ExtendedContext.multiply(Decimal('0.9'), Decimal('0.8'))
- Decimal("0.72")
- >>> ExtendedContext.multiply(Decimal('0.9'), Decimal('-0'))
- Decimal("-0.0")
- >>> ExtendedContext.multiply(Decimal('654321'), Decimal('654321'))
- Decimal("4.28135971E+11")
- """
- return a.__mul__(b, context=self)
-
- def normalize(self, a):
- """normalize reduces an operand to its simplest form.
-
- Essentially a plus operation with all trailing zeros removed from the
- result.
-
- >>> ExtendedContext.normalize(Decimal('2.1'))
- Decimal("2.1")
- >>> ExtendedContext.normalize(Decimal('-2.0'))
- Decimal("-2")
- >>> ExtendedContext.normalize(Decimal('1.200'))
- Decimal("1.2")
- >>> ExtendedContext.normalize(Decimal('-120'))
- Decimal("-1.2E+2")
- >>> ExtendedContext.normalize(Decimal('120.00'))
- Decimal("1.2E+2")
- >>> ExtendedContext.normalize(Decimal('0.00'))
- Decimal("0")
- """
- return a.normalize(context=self)
-
- def plus(self, a):
- """Plus corresponds to unary prefix plus in Python.
-
- The operation is evaluated using the same rules as add; the
- operation plus(a) is calculated as add('0', a) where the '0'
- has the same exponent as the operand.
-
- >>> ExtendedContext.plus(Decimal('1.3'))
- Decimal("1.3")
- >>> ExtendedContext.plus(Decimal('-1.3'))
- Decimal("-1.3")
- """
- return a.__pos__(context=self)
-
- def power(self, a, b, modulo=None):
- """Raises a to the power of b, to modulo if given.
-
- The right-hand operand must be a whole number whose integer part (after
- any exponent has been applied) has no more than 9 digits and whose
- fractional part (if any) is all zeros before any rounding. The operand
- may be positive, negative, or zero; if negative, the absolute value of
- the power is used, and the left-hand operand is inverted (divided into
- 1) before use.
-
- If the increased precision needed for the intermediate calculations
- exceeds the capabilities of the implementation then an Invalid operation
- condition is raised.
-
- If, when raising to a negative power, an underflow occurs during the
- division into 1, the operation is not halted at that point but
- continues.
-
- >>> ExtendedContext.power(Decimal('2'), Decimal('3'))
- Decimal("8")
- >>> ExtendedContext.power(Decimal('2'), Decimal('-3'))
- Decimal("0.125")
- >>> ExtendedContext.power(Decimal('1.7'), Decimal('8'))
- Decimal("69.7575744")
- >>> ExtendedContext.power(Decimal('Infinity'), Decimal('-2'))
- Decimal("0")
- >>> ExtendedContext.power(Decimal('Infinity'), Decimal('-1'))
- Decimal("0")
- >>> ExtendedContext.power(Decimal('Infinity'), Decimal('0'))
- Decimal("1")
- >>> ExtendedContext.power(Decimal('Infinity'), Decimal('1'))
- Decimal("Infinity")
- >>> ExtendedContext.power(Decimal('Infinity'), Decimal('2'))
- Decimal("Infinity")
- >>> ExtendedContext.power(Decimal('-Infinity'), Decimal('-2'))
- Decimal("0")
- >>> ExtendedContext.power(Decimal('-Infinity'), Decimal('-1'))
- Decimal("-0")
- >>> ExtendedContext.power(Decimal('-Infinity'), Decimal('0'))
- Decimal("1")
- >>> ExtendedContext.power(Decimal('-Infinity'), Decimal('1'))
- Decimal("-Infinity")
- >>> ExtendedContext.power(Decimal('-Infinity'), Decimal('2'))
- Decimal("Infinity")
- >>> ExtendedContext.power(Decimal('0'), Decimal('0'))
- Decimal("NaN")
- """
- return a.__pow__(b, modulo, context=self)
-
- def quantize(self, a, b):
- """Returns a value equal to 'a' (rounded) and having the exponent of 'b'.
-
- The coefficient of the result is derived from that of the left-hand
- operand. It may be rounded using the current rounding setting (if the
- exponent is being increased), multiplied by a positive power of ten (if
- the exponent is being decreased), or is unchanged (if the exponent is
- already equal to that of the right-hand operand).
-
- Unlike other operations, if the length of the coefficient after the
- quantize operation would be greater than precision then an Invalid
- operation condition is raised. This guarantees that, unless there is an
- error condition, the exponent of the result of a quantize is always
- equal to that of the right-hand operand.
-
- Also unlike other operations, quantize will never raise Underflow, even
- if the result is subnormal and inexact.
-
- >>> ExtendedContext.quantize(Decimal('2.17'), Decimal('0.001'))
- Decimal("2.170")
- >>> ExtendedContext.quantize(Decimal('2.17'), Decimal('0.01'))
- Decimal("2.17")
- >>> ExtendedContext.quantize(Decimal('2.17'), Decimal('0.1'))
- Decimal("2.2")
- >>> ExtendedContext.quantize(Decimal('2.17'), Decimal('1e+0'))
- Decimal("2")
- >>> ExtendedContext.quantize(Decimal('2.17'), Decimal('1e+1'))
- Decimal("0E+1")
- >>> ExtendedContext.quantize(Decimal('-Inf'), Decimal('Infinity'))
- Decimal("-Infinity")
- >>> ExtendedContext.quantize(Decimal('2'), Decimal('Infinity'))
- Decimal("NaN")
- >>> ExtendedContext.quantize(Decimal('-0.1'), Decimal('1'))
- Decimal("-0")
- >>> ExtendedContext.quantize(Decimal('-0'), Decimal('1e+5'))
- Decimal("-0E+5")
- >>> ExtendedContext.quantize(Decimal('+35236450.6'), Decimal('1e-2'))
- Decimal("NaN")
- >>> ExtendedContext.quantize(Decimal('-35236450.6'), Decimal('1e-2'))
- Decimal("NaN")
- >>> ExtendedContext.quantize(Decimal('217'), Decimal('1e-1'))
- Decimal("217.0")
- >>> ExtendedContext.quantize(Decimal('217'), Decimal('1e-0'))
- Decimal("217")
- >>> ExtendedContext.quantize(Decimal('217'), Decimal('1e+1'))
- Decimal("2.2E+2")
- >>> ExtendedContext.quantize(Decimal('217'), Decimal('1e+2'))
- Decimal("2E+2")
- """
- return a.quantize(b, context=self)
-
- def remainder(self, a, b):
- """Returns the remainder from integer division.
-
- The result is the residue of the dividend after the operation of
- calculating integer division as described for divide-integer, rounded to
- precision digits if necessary. The sign of the result, if non-zero, is
- the same as that of the original dividend.
-
- This operation will fail under the same conditions as integer division
- (that is, if integer division on the same two operands would fail, the
- remainder cannot be calculated).
-
- >>> ExtendedContext.remainder(Decimal('2.1'), Decimal('3'))
- Decimal("2.1")
- >>> ExtendedContext.remainder(Decimal('10'), Decimal('3'))
- Decimal("1")
- >>> ExtendedContext.remainder(Decimal('-10'), Decimal('3'))
- Decimal("-1")
- >>> ExtendedContext.remainder(Decimal('10.2'), Decimal('1'))
- Decimal("0.2")
- >>> ExtendedContext.remainder(Decimal('10'), Decimal('0.3'))
- Decimal("0.1")
- >>> ExtendedContext.remainder(Decimal('3.6'), Decimal('1.3'))
- Decimal("1.0")
- """
- return a.__mod__(b, context=self)
-
- def remainder_near(self, a, b):
- """Returns to be "a - b * n", where n is the integer nearest the exact
- value of "x / b" (if two integers are equally near then the even one
- is chosen). If the result is equal to 0 then its sign will be the
- sign of a.
-
- This operation will fail under the same conditions as integer division
- (that is, if integer division on the same two operands would fail, the
- remainder cannot be calculated).
-
- >>> ExtendedContext.remainder_near(Decimal('2.1'), Decimal('3'))
- Decimal("-0.9")
- >>> ExtendedContext.remainder_near(Decimal('10'), Decimal('6'))
- Decimal("-2")
- >>> ExtendedContext.remainder_near(Decimal('10'), Decimal('3'))
- Decimal("1")
- >>> ExtendedContext.remainder_near(Decimal('-10'), Decimal('3'))
- Decimal("-1")
- >>> ExtendedContext.remainder_near(Decimal('10.2'), Decimal('1'))
- Decimal("0.2")
- >>> ExtendedContext.remainder_near(Decimal('10'), Decimal('0.3'))
- Decimal("0.1")
- >>> ExtendedContext.remainder_near(Decimal('3.6'), Decimal('1.3'))
- Decimal("-0.3")
- """
- return a.remainder_near(b, context=self)
-
- def same_quantum(self, a, b):
- """Returns True if the two operands have the same exponent.
-
- The result is never affected by either the sign or the coefficient of
- either operand.
-
- >>> ExtendedContext.same_quantum(Decimal('2.17'), Decimal('0.001'))
- False
- >>> ExtendedContext.same_quantum(Decimal('2.17'), Decimal('0.01'))
- True
- >>> ExtendedContext.same_quantum(Decimal('2.17'), Decimal('1'))
- False
- >>> ExtendedContext.same_quantum(Decimal('Inf'), Decimal('-Inf'))
- True
- """
- return a.same_quantum(b)
-
- def sqrt(self, a):
- """Returns the square root of a non-negative number to context precision.
-
- If the result must be inexact, it is rounded using the round-half-even
- algorithm.
-
- >>> ExtendedContext.sqrt(Decimal('0'))
- Decimal("0")
- >>> ExtendedContext.sqrt(Decimal('-0'))
- Decimal("-0")
- >>> ExtendedContext.sqrt(Decimal('0.39'))
- Decimal("0.624499800")
- >>> ExtendedContext.sqrt(Decimal('100'))
- Decimal("10")
- >>> ExtendedContext.sqrt(Decimal('1'))
- Decimal("1")
- >>> ExtendedContext.sqrt(Decimal('1.0'))
- Decimal("1.0")
- >>> ExtendedContext.sqrt(Decimal('1.00'))
- Decimal("1.0")
- >>> ExtendedContext.sqrt(Decimal('7'))
- Decimal("2.64575131")
- >>> ExtendedContext.sqrt(Decimal('10'))
- Decimal("3.16227766")
- >>> ExtendedContext.prec
- 9
- """
- return a.sqrt(context=self)
-
- def subtract(self, a, b):
- """Return the difference between the two operands.
-
- >>> ExtendedContext.subtract(Decimal('1.3'), Decimal('1.07'))
- Decimal("0.23")
- >>> ExtendedContext.subtract(Decimal('1.3'), Decimal('1.30'))
- Decimal("0.00")
- >>> ExtendedContext.subtract(Decimal('1.3'), Decimal('2.07'))
- Decimal("-0.77")
- """
- return a.__sub__(b, context=self)
-
- def to_eng_string(self, a):
- """Converts a number to a string, using scientific notation.
-
- The operation is not affected by the context.
- """
- return a.to_eng_string(context=self)
-
- def to_sci_string(self, a):
- """Converts a number to a string, using scientific notation.
-
- The operation is not affected by the context.
- """
- return a.__str__(context=self)
-
- def to_integral(self, a):
- """Rounds to an integer.
-
- When the operand has a negative exponent, the result is the same
- as using the quantize() operation using the given operand as the
- left-hand-operand, 1E+0 as the right-hand-operand, and the precision
- of the operand as the precision setting, except that no flags will
- be set. The rounding mode is taken from the context.
-
- >>> ExtendedContext.to_integral(Decimal('2.1'))
- Decimal("2")
- >>> ExtendedContext.to_integral(Decimal('100'))
- Decimal("100")
- >>> ExtendedContext.to_integral(Decimal('100.0'))
- Decimal("100")
- >>> ExtendedContext.to_integral(Decimal('101.5'))
- Decimal("102")
- >>> ExtendedContext.to_integral(Decimal('-101.5'))
- Decimal("-102")
- >>> ExtendedContext.to_integral(Decimal('10E+5'))
- Decimal("1.0E+6")
- >>> ExtendedContext.to_integral(Decimal('7.89E+77'))
- Decimal("7.89E+77")
- >>> ExtendedContext.to_integral(Decimal('-Inf'))
- Decimal("-Infinity")
- """
- return a.to_integral(context=self)
-
-class _WorkRep(object):
- __slots__ = ('sign','int','exp')
- # sign: 0 or 1
- # int: int or long
- # exp: None, int, or string
-
- def __init__(self, value=None):
- if value is None:
- self.sign = None
- self.int = 0
- self.exp = None
- elif isinstance(value, Decimal):
- self.sign = value._sign
- cum = 0
- for digit in value._int:
- cum = cum * 10 + digit
- self.int = cum
- self.exp = value._exp
- else:
- # assert isinstance(value, tuple)
- self.sign = value[0]
- self.int = value[1]
- self.exp = value[2]
-
- def __repr__(self):
- return "(%r, %r, %r)" % (self.sign, self.int, self.exp)
-
- __str__ = __repr__
-
-
-
-def _normalize(op1, op2, shouldround = 0, prec = 0):
- """Normalizes op1, op2 to have the same exp and length of coefficient.
-
- Done during addition.
- """
- # Yes, the exponent is a long, but the difference between exponents
- # must be an int-- otherwise you'd get a big memory problem.
- numdigits = int(op1.exp - op2.exp)
- if numdigits < 0:
- numdigits = -numdigits
- tmp = op2
- other = op1
- else:
- tmp = op1
- other = op2
-
-
- if shouldround and numdigits > prec + 1:
- # Big difference in exponents - check the adjusted exponents
- tmp_len = len(str(tmp.int))
- other_len = len(str(other.int))
- if numdigits > (other_len + prec + 1 - tmp_len):
- # If the difference in adjusted exps is > prec+1, we know
- # other is insignificant, so might as well put a 1 after the precision.
- # (since this is only for addition.) Also stops use of massive longs.
-
- extend = prec + 2 - tmp_len
- if extend <= 0:
- extend = 1
- tmp.int *= 10 ** extend
- tmp.exp -= extend
- other.int = 1
- other.exp = tmp.exp
- return op1, op2
-
- tmp.int *= 10 ** numdigits
- tmp.exp -= numdigits
- return op1, op2
-
-def _adjust_coefficients(op1, op2):
- """Adjust op1, op2 so that op2.int * 10 > op1.int >= op2.int.
-
- Returns the adjusted op1, op2 as well as the change in op1.exp-op2.exp.
-
- Used on _WorkRep instances during division.
- """
- adjust = 0
- #If op1 is smaller, make it larger
- while op2.int > op1.int:
- op1.int *= 10
- op1.exp -= 1
- adjust += 1
-
- #If op2 is too small, make it larger
- while op1.int >= (10 * op2.int):
- op2.int *= 10
- op2.exp -= 1
- adjust -= 1
-
- return op1, op2, adjust
-
-##### Helper Functions ########################################
-
-def _convert_other(other):
- """Convert other to Decimal.
-
- Verifies that it's ok to use in an implicit construction.
- """
- if isinstance(other, Decimal):
- return other
- if isinstance(other, (int, long)):
- return Decimal(other)
- return NotImplemented
-
-_infinity_map = {
- 'inf' : 1,
- 'infinity' : 1,
- '+inf' : 1,
- '+infinity' : 1,
- '-inf' : -1,
- '-infinity' : -1
-}
-
-def _isinfinity(num):
- """Determines whether a string or float is infinity.
-
- +1 for negative infinity; 0 for finite ; +1 for positive infinity
- """
- num = str(num).lower()
- return _infinity_map.get(num, 0)
-
-def _isnan(num):
- """Determines whether a string or float is NaN
-
- (1, sign, diagnostic info as string) => NaN
- (2, sign, diagnostic info as string) => sNaN
- 0 => not a NaN
- """
- num = str(num).lower()
- if not num:
- return 0
-
- #get the sign, get rid of trailing [+-]
- sign = 0
- if num[0] == '+':
- num = num[1:]
- elif num[0] == '-': #elif avoids '+-nan'
- num = num[1:]
- sign = 1
-
- if num.startswith('nan'):
- if len(num) > 3 and not num[3:].isdigit(): #diagnostic info
- return 0
- return (1, sign, num[3:].lstrip('0'))
- if num.startswith('snan'):
- if len(num) > 4 and not num[4:].isdigit():
- return 0
- return (2, sign, num[4:].lstrip('0'))
- return 0
-
-
-##### Setup Specific Contexts ################################
-
-# The default context prototype used by Context()
-# Is mutable, so that new contexts can have different default values
-
-DefaultContext = Context(
- prec=28, rounding=ROUND_HALF_EVEN,
- traps=[DivisionByZero, Overflow, InvalidOperation],
- flags=[],
- _rounding_decision=ALWAYS_ROUND,
- Emax=999999999,
- Emin=-999999999,
- capitals=1
-)
-
-# Pre-made alternate contexts offered by the specification
-# Don't change these; the user should be able to select these
-# contexts and be able to reproduce results from other implementations
-# of the spec.
-
-BasicContext = Context(
- prec=9, rounding=ROUND_HALF_UP,
- traps=[DivisionByZero, Overflow, InvalidOperation, Clamped, Underflow],
- flags=[],
-)
-
-ExtendedContext = Context(
- prec=9, rounding=ROUND_HALF_EVEN,
- traps=[],
- flags=[],
-)
-
-
-##### Useful Constants (internal use only) ####################
-
-#Reusable defaults
-Inf = Decimal('Inf')
-negInf = Decimal('-Inf')
-
-#Infsign[sign] is infinity w/ that sign
-Infsign = (Inf, negInf)
-
-NaN = Decimal('NaN')
-
-
-##### crud for parsing strings #################################
-import re
-
-# There's an optional sign at the start, and an optional exponent
-# at the end. The exponent has an optional sign and at least one
-# digit. In between, must have either at least one digit followed
-# by an optional fraction, or a decimal point followed by at least
-# one digit. Yuck.
-
-_parser = re.compile(r"""
-# \s*
- (?P<sign>[-+])?
- (
- (?P<int>\d+) (\. (?P<frac>\d*))?
- |
- \. (?P<onlyfrac>\d+)
- )
- ([eE](?P<exp>[-+]? \d+))?
-# \s*
- $
-""", re.VERBOSE).match #Uncomment the \s* to allow leading or trailing spaces.
-
-del re
-
-# return sign, n, p s.t. float string value == -1**sign * n * 10**p exactly
-
-def _string2exact(s):
- m = _parser(s)
- if m is None:
- raise ValueError("invalid literal for Decimal: %r" % s)
-
- if m.group('sign') == "-":
- sign = 1
- else:
- sign = 0
-
- exp = m.group('exp')
- if exp is None:
- exp = 0
- else:
- exp = int(exp)
-
- intpart = m.group('int')
- if intpart is None:
- intpart = ""
- fracpart = m.group('onlyfrac')
- else:
- fracpart = m.group('frac')
- if fracpart is None:
- fracpart = ""
-
- exp -= len(fracpart)
-
- mantissa = intpart + fracpart
- tmp = map(int, mantissa)
- backup = tmp
- while tmp and tmp[0] == 0:
- del tmp[0]
-
- # It's a zero
- if not tmp:
- if backup:
- return (sign, tuple(backup), exp)
- return (sign, (0,), exp)
- mantissa = tuple(tmp)
-
- return (sign, mantissa, exp)
-
-
-if __name__ == '__main__':
- import doctest, sys
- doctest.testmod(sys.modules[__name__])
diff --git a/sys/lib/python/difflib.py b/sys/lib/python/difflib.py
deleted file mode 100644
index 9be6ca724..000000000
--- a/sys/lib/python/difflib.py
+++ /dev/null
@@ -1,2019 +0,0 @@
-#! /usr/bin/env python
-
-"""
-Module difflib -- helpers for computing deltas between objects.
-
-Function get_close_matches(word, possibilities, n=3, cutoff=0.6):
- Use SequenceMatcher to return list of the best "good enough" matches.
-
-Function context_diff(a, b):
- For two lists of strings, return a delta in context diff format.
-
-Function ndiff(a, b):
- Return a delta: the difference between `a` and `b` (lists of strings).
-
-Function restore(delta, which):
- Return one of the two sequences that generated an ndiff delta.
-
-Function unified_diff(a, b):
- For two lists of strings, return a delta in unified diff format.
-
-Class SequenceMatcher:
- A flexible class for comparing pairs of sequences of any type.
-
-Class Differ:
- For producing human-readable deltas from sequences of lines of text.
-
-Class HtmlDiff:
- For producing HTML side by side comparison with change highlights.
-"""
-
-__all__ = ['get_close_matches', 'ndiff', 'restore', 'SequenceMatcher',
- 'Differ','IS_CHARACTER_JUNK', 'IS_LINE_JUNK', 'context_diff',
- 'unified_diff', 'HtmlDiff']
-
-import heapq
-
-def _calculate_ratio(matches, length):
- if length:
- return 2.0 * matches / length
- return 1.0
-
-class SequenceMatcher:
-
- """
- SequenceMatcher is a flexible class for comparing pairs of sequences of
- any type, so long as the sequence elements are hashable. The basic
- algorithm predates, and is a little fancier than, an algorithm
- published in the late 1980's by Ratcliff and Obershelp under the
- hyperbolic name "gestalt pattern matching". The basic idea is to find
- the longest contiguous matching subsequence that contains no "junk"
- elements (R-O doesn't address junk). The same idea is then applied
- recursively to the pieces of the sequences to the left and to the right
- of the matching subsequence. This does not yield minimal edit
- sequences, but does tend to yield matches that "look right" to people.
-
- SequenceMatcher tries to compute a "human-friendly diff" between two
- sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the
- longest *contiguous* & junk-free matching subsequence. That's what
- catches peoples' eyes. The Windows(tm) windiff has another interesting
- notion, pairing up elements that appear uniquely in each sequence.
- That, and the method here, appear to yield more intuitive difference
- reports than does diff. This method appears to be the least vulnerable
- to synching up on blocks of "junk lines", though (like blank lines in
- ordinary text files, or maybe "<P>" lines in HTML files). That may be
- because this is the only method of the 3 that has a *concept* of
- "junk" <wink>.
-
- Example, comparing two strings, and considering blanks to be "junk":
-
- >>> s = SequenceMatcher(lambda x: x == " ",
- ... "private Thread currentThread;",
- ... "private volatile Thread currentThread;")
- >>>
-
- .ratio() returns a float in [0, 1], measuring the "similarity" of the
- sequences. As a rule of thumb, a .ratio() value over 0.6 means the
- sequences are close matches:
-
- >>> print round(s.ratio(), 3)
- 0.866
- >>>
-
- If you're only interested in where the sequences match,
- .get_matching_blocks() is handy:
-
- >>> for block in s.get_matching_blocks():
- ... print "a[%d] and b[%d] match for %d elements" % block
- a[0] and b[0] match for 8 elements
- a[8] and b[17] match for 21 elements
- a[29] and b[38] match for 0 elements
-
- Note that the last tuple returned by .get_matching_blocks() is always a
- dummy, (len(a), len(b), 0), and this is the only case in which the last
- tuple element (number of elements matched) is 0.
-
- If you want to know how to change the first sequence into the second,
- use .get_opcodes():
-
- >>> for opcode in s.get_opcodes():
- ... print "%6s a[%d:%d] b[%d:%d]" % opcode
- equal a[0:8] b[0:8]
- insert a[8:8] b[8:17]
- equal a[8:29] b[17:38]
-
- See the Differ class for a fancy human-friendly file differencer, which
- uses SequenceMatcher both to compare sequences of lines, and to compare
- sequences of characters within similar (near-matching) lines.
-
- See also function get_close_matches() in this module, which shows how
- simple code building on SequenceMatcher can be used to do useful work.
-
- Timing: Basic R-O is cubic time worst case and quadratic time expected
- case. SequenceMatcher is quadratic time for the worst case and has
- expected-case behavior dependent in a complicated way on how many
- elements the sequences have in common; best case time is linear.
-
- Methods:
-
- __init__(isjunk=None, a='', b='')
- Construct a SequenceMatcher.
-
- set_seqs(a, b)
- Set the two sequences to be compared.
-
- set_seq1(a)
- Set the first sequence to be compared.
-
- set_seq2(b)
- Set the second sequence to be compared.
-
- find_longest_match(alo, ahi, blo, bhi)
- Find longest matching block in a[alo:ahi] and b[blo:bhi].
-
- get_matching_blocks()
- Return list of triples describing matching subsequences.
-
- get_opcodes()
- Return list of 5-tuples describing how to turn a into b.
-
- ratio()
- Return a measure of the sequences' similarity (float in [0,1]).
-
- quick_ratio()
- Return an upper bound on .ratio() relatively quickly.
-
- real_quick_ratio()
- Return an upper bound on ratio() very quickly.
- """
-
- def __init__(self, isjunk=None, a='', b=''):
- """Construct a SequenceMatcher.
-
- Optional arg isjunk is None (the default), or a one-argument
- function that takes a sequence element and returns true iff the
- element is junk. None is equivalent to passing "lambda x: 0", i.e.
- no elements are considered to be junk. For example, pass
- lambda x: x in " \\t"
- if you're comparing lines as sequences of characters, and don't
- want to synch up on blanks or hard tabs.
-
- Optional arg a is the first of two sequences to be compared. By
- default, an empty string. The elements of a must be hashable. See
- also .set_seqs() and .set_seq1().
-
- Optional arg b is the second of two sequences to be compared. By
- default, an empty string. The elements of b must be hashable. See
- also .set_seqs() and .set_seq2().
- """
-
- # Members:
- # a
- # first sequence
- # b
- # second sequence; differences are computed as "what do
- # we need to do to 'a' to change it into 'b'?"
- # b2j
- # for x in b, b2j[x] is a list of the indices (into b)
- # at which x appears; junk elements do not appear
- # fullbcount
- # for x in b, fullbcount[x] == the number of times x
- # appears in b; only materialized if really needed (used
- # only for computing quick_ratio())
- # matching_blocks
- # a list of (i, j, k) triples, where a[i:i+k] == b[j:j+k];
- # ascending & non-overlapping in i and in j; terminated by
- # a dummy (len(a), len(b), 0) sentinel
- # opcodes
- # a list of (tag, i1, i2, j1, j2) tuples, where tag is
- # one of
- # 'replace' a[i1:i2] should be replaced by b[j1:j2]
- # 'delete' a[i1:i2] should be deleted
- # 'insert' b[j1:j2] should be inserted
- # 'equal' a[i1:i2] == b[j1:j2]
- # isjunk
- # a user-supplied function taking a sequence element and
- # returning true iff the element is "junk" -- this has
- # subtle but helpful effects on the algorithm, which I'll
- # get around to writing up someday <0.9 wink>.
- # DON'T USE! Only __chain_b uses this. Use isbjunk.
- # isbjunk
- # for x in b, isbjunk(x) == isjunk(x) but much faster;
- # it's really the has_key method of a hidden dict.
- # DOES NOT WORK for x in a!
- # isbpopular
- # for x in b, isbpopular(x) is true iff b is reasonably long
- # (at least 200 elements) and x accounts for more than 1% of
- # its elements. DOES NOT WORK for x in a!
-
- self.isjunk = isjunk
- self.a = self.b = None
- self.set_seqs(a, b)
-
- def set_seqs(self, a, b):
- """Set the two sequences to be compared.
-
- >>> s = SequenceMatcher()
- >>> s.set_seqs("abcd", "bcde")
- >>> s.ratio()
- 0.75
- """
-
- self.set_seq1(a)
- self.set_seq2(b)
-
- def set_seq1(self, a):
- """Set the first sequence to be compared.
-
- The second sequence to be compared is not changed.
-
- >>> s = SequenceMatcher(None, "abcd", "bcde")
- >>> s.ratio()
- 0.75
- >>> s.set_seq1("bcde")
- >>> s.ratio()
- 1.0
- >>>
-
- SequenceMatcher computes and caches detailed information about the
- second sequence, so if you want to compare one sequence S against
- many sequences, use .set_seq2(S) once and call .set_seq1(x)
- repeatedly for each of the other sequences.
-
- See also set_seqs() and set_seq2().
- """
-
- if a is self.a:
- return
- self.a = a
- self.matching_blocks = self.opcodes = None
-
- def set_seq2(self, b):
- """Set the second sequence to be compared.
-
- The first sequence to be compared is not changed.
-
- >>> s = SequenceMatcher(None, "abcd", "bcde")
- >>> s.ratio()
- 0.75
- >>> s.set_seq2("abcd")
- >>> s.ratio()
- 1.0
- >>>
-
- SequenceMatcher computes and caches detailed information about the
- second sequence, so if you want to compare one sequence S against
- many sequences, use .set_seq2(S) once and call .set_seq1(x)
- repeatedly for each of the other sequences.
-
- See also set_seqs() and set_seq1().
- """
-
- if b is self.b:
- return
- self.b = b
- self.matching_blocks = self.opcodes = None
- self.fullbcount = None
- self.__chain_b()
-
- # For each element x in b, set b2j[x] to a list of the indices in
- # b where x appears; the indices are in increasing order; note that
- # the number of times x appears in b is len(b2j[x]) ...
- # when self.isjunk is defined, junk elements don't show up in this
- # map at all, which stops the central find_longest_match method
- # from starting any matching block at a junk element ...
- # also creates the fast isbjunk function ...
- # b2j also does not contain entries for "popular" elements, meaning
- # elements that account for more than 1% of the total elements, and
- # when the sequence is reasonably large (>= 200 elements); this can
- # be viewed as an adaptive notion of semi-junk, and yields an enormous
- # speedup when, e.g., comparing program files with hundreds of
- # instances of "return NULL;" ...
- # note that this is only called when b changes; so for cross-product
- # kinds of matches, it's best to call set_seq2 once, then set_seq1
- # repeatedly
-
- def __chain_b(self):
- # Because isjunk is a user-defined (not C) function, and we test
- # for junk a LOT, it's important to minimize the number of calls.
- # Before the tricks described here, __chain_b was by far the most
- # time-consuming routine in the whole module! If anyone sees
- # Jim Roskind, thank him again for profile.py -- I never would
- # have guessed that.
- # The first trick is to build b2j ignoring the possibility
- # of junk. I.e., we don't call isjunk at all yet. Throwing
- # out the junk later is much cheaper than building b2j "right"
- # from the start.
- b = self.b
- n = len(b)
- self.b2j = b2j = {}
- populardict = {}
- for i, elt in enumerate(b):
- if elt in b2j:
- indices = b2j[elt]
- if n >= 200 and len(indices) * 100 > n:
- populardict[elt] = 1
- del indices[:]
- else:
- indices.append(i)
- else:
- b2j[elt] = [i]
-
- # Purge leftover indices for popular elements.
- for elt in populardict:
- del b2j[elt]
-
- # Now b2j.keys() contains elements uniquely, and especially when
- # the sequence is a string, that's usually a good deal smaller
- # than len(string). The difference is the number of isjunk calls
- # saved.
- isjunk = self.isjunk
- junkdict = {}
- if isjunk:
- for d in populardict, b2j:
- for elt in d.keys():
- if isjunk(elt):
- junkdict[elt] = 1
- del d[elt]
-
- # Now for x in b, isjunk(x) == x in junkdict, but the
- # latter is much faster. Note too that while there may be a
- # lot of junk in the sequence, the number of *unique* junk
- # elements is probably small. So the memory burden of keeping
- # this dict alive is likely trivial compared to the size of b2j.
- self.isbjunk = junkdict.has_key
- self.isbpopular = populardict.has_key
-
- def find_longest_match(self, alo, ahi, blo, bhi):
- """Find longest matching block in a[alo:ahi] and b[blo:bhi].
-
- If isjunk is not defined:
-
- Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where
- alo <= i <= i+k <= ahi
- blo <= j <= j+k <= bhi
- and for all (i',j',k') meeting those conditions,
- k >= k'
- i <= i'
- and if i == i', j <= j'
-
- In other words, of all maximal matching blocks, return one that
- starts earliest in a, and of all those maximal matching blocks that
- start earliest in a, return the one that starts earliest in b.
-
- >>> s = SequenceMatcher(None, " abcd", "abcd abcd")
- >>> s.find_longest_match(0, 5, 0, 9)
- (0, 4, 5)
-
- If isjunk is defined, first the longest matching block is
- determined as above, but with the additional restriction that no
- junk element appears in the block. Then that block is extended as
- far as possible by matching (only) junk elements on both sides. So
- the resulting block never matches on junk except as identical junk
- happens to be adjacent to an "interesting" match.
-
- Here's the same example as before, but considering blanks to be
- junk. That prevents " abcd" from matching the " abcd" at the tail
- end of the second sequence directly. Instead only the "abcd" can
- match, and matches the leftmost "abcd" in the second sequence:
-
- >>> s = SequenceMatcher(lambda x: x==" ", " abcd", "abcd abcd")
- >>> s.find_longest_match(0, 5, 0, 9)
- (1, 0, 4)
-
- If no blocks match, return (alo, blo, 0).
-
- >>> s = SequenceMatcher(None, "ab", "c")
- >>> s.find_longest_match(0, 2, 0, 1)
- (0, 0, 0)
- """
-
- # CAUTION: stripping common prefix or suffix would be incorrect.
- # E.g.,
- # ab
- # acab
- # Longest matching block is "ab", but if common prefix is
- # stripped, it's "a" (tied with "b"). UNIX(tm) diff does so
- # strip, so ends up claiming that ab is changed to acab by
- # inserting "ca" in the middle. That's minimal but unintuitive:
- # "it's obvious" that someone inserted "ac" at the front.
- # Windiff ends up at the same place as diff, but by pairing up
- # the unique 'b's and then matching the first two 'a's.
-
- a, b, b2j, isbjunk = self.a, self.b, self.b2j, self.isbjunk
- besti, bestj, bestsize = alo, blo, 0
- # find longest junk-free match
- # during an iteration of the loop, j2len[j] = length of longest
- # junk-free match ending with a[i-1] and b[j]
- j2len = {}
- nothing = []
- for i in xrange(alo, ahi):
- # look at all instances of a[i] in b; note that because
- # b2j has no junk keys, the loop is skipped if a[i] is junk
- j2lenget = j2len.get
- newj2len = {}
- for j in b2j.get(a[i], nothing):
- # a[i] matches b[j]
- if j < blo:
- continue
- if j >= bhi:
- break
- k = newj2len[j] = j2lenget(j-1, 0) + 1
- if k > bestsize:
- besti, bestj, bestsize = i-k+1, j-k+1, k
- j2len = newj2len
-
- # Extend the best by non-junk elements on each end. In particular,
- # "popular" non-junk elements aren't in b2j, which greatly speeds
- # the inner loop above, but also means "the best" match so far
- # doesn't contain any junk *or* popular non-junk elements.
- while besti > alo and bestj > blo and \
- not isbjunk(b[bestj-1]) and \
- a[besti-1] == b[bestj-1]:
- besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
- while besti+bestsize < ahi and bestj+bestsize < bhi and \
- not isbjunk(b[bestj+bestsize]) and \
- a[besti+bestsize] == b[bestj+bestsize]:
- bestsize += 1
-
- # Now that we have a wholly interesting match (albeit possibly
- # empty!), we may as well suck up the matching junk on each
- # side of it too. Can't think of a good reason not to, and it
- # saves post-processing the (possibly considerable) expense of
- # figuring out what to do with it. In the case of an empty
- # interesting match, this is clearly the right thing to do,
- # because no other kind of match is possible in the regions.
- while besti > alo and bestj > blo and \
- isbjunk(b[bestj-1]) and \
- a[besti-1] == b[bestj-1]:
- besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
- while besti+bestsize < ahi and bestj+bestsize < bhi and \
- isbjunk(b[bestj+bestsize]) and \
- a[besti+bestsize] == b[bestj+bestsize]:
- bestsize = bestsize + 1
-
- return besti, bestj, bestsize
-
- def get_matching_blocks(self):
- """Return list of triples describing matching subsequences.
-
- Each triple is of the form (i, j, n), and means that
- a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in
- i and in j. New in Python 2.5, it's also guaranteed that if
- (i, j, n) and (i', j', n') are adjacent triples in the list, and
- the second is not the last triple in the list, then i+n != i' or
- j+n != j'. IOW, adjacent triples never describe adjacent equal
- blocks.
-
- The last triple is a dummy, (len(a), len(b), 0), and is the only
- triple with n==0.
-
- >>> s = SequenceMatcher(None, "abxcd", "abcd")
- >>> s.get_matching_blocks()
- [(0, 0, 2), (3, 2, 2), (5, 4, 0)]
- """
-
- if self.matching_blocks is not None:
- return self.matching_blocks
- la, lb = len(self.a), len(self.b)
-
- # This is most naturally expressed as a recursive algorithm, but
- # at least one user bumped into extreme use cases that exceeded
- # the recursion limit on their box. So, now we maintain a list
- # ('queue`) of blocks we still need to look at, and append partial
- # results to `matching_blocks` in a loop; the matches are sorted
- # at the end.
- queue = [(0, la, 0, lb)]
- matching_blocks = []
- while queue:
- alo, ahi, blo, bhi = queue.pop()
- i, j, k = x = self.find_longest_match(alo, ahi, blo, bhi)
- # a[alo:i] vs b[blo:j] unknown
- # a[i:i+k] same as b[j:j+k]
- # a[i+k:ahi] vs b[j+k:bhi] unknown
- if k: # if k is 0, there was no matching block
- matching_blocks.append(x)
- if alo < i and blo < j:
- queue.append((alo, i, blo, j))
- if i+k < ahi and j+k < bhi:
- queue.append((i+k, ahi, j+k, bhi))
- matching_blocks.sort()
-
- # It's possible that we have adjacent equal blocks in the
- # matching_blocks list now. Starting with 2.5, this code was added
- # to collapse them.
- i1 = j1 = k1 = 0
- non_adjacent = []
- for i2, j2, k2 in matching_blocks:
- # Is this block adjacent to i1, j1, k1?
- if i1 + k1 == i2 and j1 + k1 == j2:
- # Yes, so collapse them -- this just increases the length of
- # the first block by the length of the second, and the first
- # block so lengthened remains the block to compare against.
- k1 += k2
- else:
- # Not adjacent. Remember the first block (k1==0 means it's
- # the dummy we started with), and make the second block the
- # new block to compare against.
- if k1:
- non_adjacent.append((i1, j1, k1))
- i1, j1, k1 = i2, j2, k2
- if k1:
- non_adjacent.append((i1, j1, k1))
-
- non_adjacent.append( (la, lb, 0) )
- self.matching_blocks = non_adjacent
- return self.matching_blocks
-
- def get_opcodes(self):
- """Return list of 5-tuples describing how to turn a into b.
-
- Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple
- has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the
- tuple preceding it, and likewise for j1 == the previous j2.
-
- The tags are strings, with these meanings:
-
- 'replace': a[i1:i2] should be replaced by b[j1:j2]
- 'delete': a[i1:i2] should be deleted.
- Note that j1==j2 in this case.
- 'insert': b[j1:j2] should be inserted at a[i1:i1].
- Note that i1==i2 in this case.
- 'equal': a[i1:i2] == b[j1:j2]
-
- >>> a = "qabxcd"
- >>> b = "abycdf"
- >>> s = SequenceMatcher(None, a, b)
- >>> for tag, i1, i2, j1, j2 in s.get_opcodes():
- ... print ("%7s a[%d:%d] (%s) b[%d:%d] (%s)" %
- ... (tag, i1, i2, a[i1:i2], j1, j2, b[j1:j2]))
- delete a[0:1] (q) b[0:0] ()
- equal a[1:3] (ab) b[0:2] (ab)
- replace a[3:4] (x) b[2:3] (y)
- equal a[4:6] (cd) b[3:5] (cd)
- insert a[6:6] () b[5:6] (f)
- """
-
- if self.opcodes is not None:
- return self.opcodes
- i = j = 0
- self.opcodes = answer = []
- for ai, bj, size in self.get_matching_blocks():
- # invariant: we've pumped out correct diffs to change
- # a[:i] into b[:j], and the next matching block is
- # a[ai:ai+size] == b[bj:bj+size]. So we need to pump
- # out a diff to change a[i:ai] into b[j:bj], pump out
- # the matching block, and move (i,j) beyond the match
- tag = ''
- if i < ai and j < bj:
- tag = 'replace'
- elif i < ai:
- tag = 'delete'
- elif j < bj:
- tag = 'insert'
- if tag:
- answer.append( (tag, i, ai, j, bj) )
- i, j = ai+size, bj+size
- # the list of matching blocks is terminated by a
- # sentinel with size 0
- if size:
- answer.append( ('equal', ai, i, bj, j) )
- return answer
-
- def get_grouped_opcodes(self, n=3):
- """ Isolate change clusters by eliminating ranges with no changes.
-
- Return a generator of groups with upto n lines of context.
- Each group is in the same format as returned by get_opcodes().
-
- >>> from pprint import pprint
- >>> a = map(str, range(1,40))
- >>> b = a[:]
- >>> b[8:8] = ['i'] # Make an insertion
- >>> b[20] += 'x' # Make a replacement
- >>> b[23:28] = [] # Make a deletion
- >>> b[30] += 'y' # Make another replacement
- >>> pprint(list(SequenceMatcher(None,a,b).get_grouped_opcodes()))
- [[('equal', 5, 8, 5, 8), ('insert', 8, 8, 8, 9), ('equal', 8, 11, 9, 12)],
- [('equal', 16, 19, 17, 20),
- ('replace', 19, 20, 20, 21),
- ('equal', 20, 22, 21, 23),
- ('delete', 22, 27, 23, 23),
- ('equal', 27, 30, 23, 26)],
- [('equal', 31, 34, 27, 30),
- ('replace', 34, 35, 30, 31),
- ('equal', 35, 38, 31, 34)]]
- """
-
- codes = self.get_opcodes()
- if not codes:
- codes = [("equal", 0, 1, 0, 1)]
- # Fixup leading and trailing groups if they show no changes.
- if codes[0][0] == 'equal':
- tag, i1, i2, j1, j2 = codes[0]
- codes[0] = tag, max(i1, i2-n), i2, max(j1, j2-n), j2
- if codes[-1][0] == 'equal':
- tag, i1, i2, j1, j2 = codes[-1]
- codes[-1] = tag, i1, min(i2, i1+n), j1, min(j2, j1+n)
-
- nn = n + n
- group = []
- for tag, i1, i2, j1, j2 in codes:
- # End the current group and start a new one whenever
- # there is a large range with no changes.
- if tag == 'equal' and i2-i1 > nn:
- group.append((tag, i1, min(i2, i1+n), j1, min(j2, j1+n)))
- yield group
- group = []
- i1, j1 = max(i1, i2-n), max(j1, j2-n)
- group.append((tag, i1, i2, j1 ,j2))
- if group and not (len(group)==1 and group[0][0] == 'equal'):
- yield group
-
- def ratio(self):
- """Return a measure of the sequences' similarity (float in [0,1]).
-
- Where T is the total number of elements in both sequences, and
- M is the number of matches, this is 2.0*M / T.
- Note that this is 1 if the sequences are identical, and 0 if
- they have nothing in common.
-
- .ratio() is expensive to compute if you haven't already computed
- .get_matching_blocks() or .get_opcodes(), in which case you may
- want to try .quick_ratio() or .real_quick_ratio() first to get an
- upper bound.
-
- >>> s = SequenceMatcher(None, "abcd", "bcde")
- >>> s.ratio()
- 0.75
- >>> s.quick_ratio()
- 0.75
- >>> s.real_quick_ratio()
- 1.0
- """
-
- matches = reduce(lambda sum, triple: sum + triple[-1],
- self.get_matching_blocks(), 0)
- return _calculate_ratio(matches, len(self.a) + len(self.b))
-
- def quick_ratio(self):
- """Return an upper bound on ratio() relatively quickly.
-
- This isn't defined beyond that it is an upper bound on .ratio(), and
- is faster to compute.
- """
-
- # viewing a and b as multisets, set matches to the cardinality
- # of their intersection; this counts the number of matches
- # without regard to order, so is clearly an upper bound
- if self.fullbcount is None:
- self.fullbcount = fullbcount = {}
- for elt in self.b:
- fullbcount[elt] = fullbcount.get(elt, 0) + 1
- fullbcount = self.fullbcount
- # avail[x] is the number of times x appears in 'b' less the
- # number of times we've seen it in 'a' so far ... kinda
- avail = {}
- availhas, matches = avail.has_key, 0
- for elt in self.a:
- if availhas(elt):
- numb = avail[elt]
- else:
- numb = fullbcount.get(elt, 0)
- avail[elt] = numb - 1
- if numb > 0:
- matches = matches + 1
- return _calculate_ratio(matches, len(self.a) + len(self.b))
-
- def real_quick_ratio(self):
- """Return an upper bound on ratio() very quickly.
-
- This isn't defined beyond that it is an upper bound on .ratio(), and
- is faster to compute than either .ratio() or .quick_ratio().
- """
-
- la, lb = len(self.a), len(self.b)
- # can't have more matches than the number of elements in the
- # shorter sequence
- return _calculate_ratio(min(la, lb), la + lb)
-
-def get_close_matches(word, possibilities, n=3, cutoff=0.6):
- """Use SequenceMatcher to return list of the best "good enough" matches.
-
- word is a sequence for which close matches are desired (typically a
- string).
-
- possibilities is a list of sequences against which to match word
- (typically a list of strings).
-
- Optional arg n (default 3) is the maximum number of close matches to
- return. n must be > 0.
-
- Optional arg cutoff (default 0.6) is a float in [0, 1]. Possibilities
- that don't score at least that similar to word are ignored.
-
- The best (no more than n) matches among the possibilities are returned
- in a list, sorted by similarity score, most similar first.
-
- >>> get_close_matches("appel", ["ape", "apple", "peach", "puppy"])
- ['apple', 'ape']
- >>> import keyword as _keyword
- >>> get_close_matches("wheel", _keyword.kwlist)
- ['while']
- >>> get_close_matches("apple", _keyword.kwlist)
- []
- >>> get_close_matches("accept", _keyword.kwlist)
- ['except']
- """
-
- if not n > 0:
- raise ValueError("n must be > 0: %r" % (n,))
- if not 0.0 <= cutoff <= 1.0:
- raise ValueError("cutoff must be in [0.0, 1.0]: %r" % (cutoff,))
- result = []
- s = SequenceMatcher()
- s.set_seq2(word)
- for x in possibilities:
- s.set_seq1(x)
- if s.real_quick_ratio() >= cutoff and \
- s.quick_ratio() >= cutoff and \
- s.ratio() >= cutoff:
- result.append((s.ratio(), x))
-
- # Move the best scorers to head of list
- result = heapq.nlargest(n, result)
- # Strip scores for the best n matches
- return [x for score, x in result]
-
-def _count_leading(line, ch):
- """
- Return number of `ch` characters at the start of `line`.
-
- Example:
-
- >>> _count_leading(' abc', ' ')
- 3
- """
-
- i, n = 0, len(line)
- while i < n and line[i] == ch:
- i += 1
- return i
-
-class Differ:
- r"""
- Differ is a class for comparing sequences of lines of text, and
- producing human-readable differences or deltas. Differ uses
- SequenceMatcher both to compare sequences of lines, and to compare
- sequences of characters within similar (near-matching) lines.
-
- Each line of a Differ delta begins with a two-letter code:
-
- '- ' line unique to sequence 1
- '+ ' line unique to sequence 2
- ' ' line common to both sequences
- '? ' line not present in either input sequence
-
- Lines beginning with '? ' attempt to guide the eye to intraline
- differences, and were not present in either input sequence. These lines
- can be confusing if the sequences contain tab characters.
-
- Note that Differ makes no claim to produce a *minimal* diff. To the
- contrary, minimal diffs are often counter-intuitive, because they synch
- up anywhere possible, sometimes accidental matches 100 pages apart.
- Restricting synch points to contiguous matches preserves some notion of
- locality, at the occasional cost of producing a longer diff.
-
- Example: Comparing two texts.
-
- First we set up the texts, sequences of individual single-line strings
- ending with newlines (such sequences can also be obtained from the
- `readlines()` method of file-like objects):
-
- >>> text1 = ''' 1. Beautiful is better than ugly.
- ... 2. Explicit is better than implicit.
- ... 3. Simple is better than complex.
- ... 4. Complex is better than complicated.
- ... '''.splitlines(1)
- >>> len(text1)
- 4
- >>> text1[0][-1]
- '\n'
- >>> text2 = ''' 1. Beautiful is better than ugly.
- ... 3. Simple is better than complex.
- ... 4. Complicated is better than complex.
- ... 5. Flat is better than nested.
- ... '''.splitlines(1)
-
- Next we instantiate a Differ object:
-
- >>> d = Differ()
-
- Note that when instantiating a Differ object we may pass functions to
- filter out line and character 'junk'. See Differ.__init__ for details.
-
- Finally, we compare the two:
-
- >>> result = list(d.compare(text1, text2))
-
- 'result' is a list of strings, so let's pretty-print it:
-
- >>> from pprint import pprint as _pprint
- >>> _pprint(result)
- [' 1. Beautiful is better than ugly.\n',
- '- 2. Explicit is better than implicit.\n',
- '- 3. Simple is better than complex.\n',
- '+ 3. Simple is better than complex.\n',
- '? ++\n',
- '- 4. Complex is better than complicated.\n',
- '? ^ ---- ^\n',
- '+ 4. Complicated is better than complex.\n',
- '? ++++ ^ ^\n',
- '+ 5. Flat is better than nested.\n']
-
- As a single multi-line string it looks like this:
-
- >>> print ''.join(result),
- 1. Beautiful is better than ugly.
- - 2. Explicit is better than implicit.
- - 3. Simple is better than complex.
- + 3. Simple is better than complex.
- ? ++
- - 4. Complex is better than complicated.
- ? ^ ---- ^
- + 4. Complicated is better than complex.
- ? ++++ ^ ^
- + 5. Flat is better than nested.
-
- Methods:
-
- __init__(linejunk=None, charjunk=None)
- Construct a text differencer, with optional filters.
-
- compare(a, b)
- Compare two sequences of lines; generate the resulting delta.
- """
-
- def __init__(self, linejunk=None, charjunk=None):
- """
- Construct a text differencer, with optional filters.
-
- The two optional keyword parameters are for filter functions:
-
- - `linejunk`: A function that should accept a single string argument,
- and return true iff the string is junk. The module-level function
- `IS_LINE_JUNK` may be used to filter out lines without visible
- characters, except for at most one splat ('#'). It is recommended
- to leave linejunk None; as of Python 2.3, the underlying
- SequenceMatcher class has grown an adaptive notion of "noise" lines
- that's better than any static definition the author has ever been
- able to craft.
-
- - `charjunk`: A function that should accept a string of length 1. The
- module-level function `IS_CHARACTER_JUNK` may be used to filter out
- whitespace characters (a blank or tab; **note**: bad idea to include
- newline in this!). Use of IS_CHARACTER_JUNK is recommended.
- """
-
- self.linejunk = linejunk
- self.charjunk = charjunk
-
- def compare(self, a, b):
- r"""
- Compare two sequences of lines; generate the resulting delta.
-
- Each sequence must contain individual single-line strings ending with
- newlines. Such sequences can be obtained from the `readlines()` method
- of file-like objects. The delta generated also consists of newline-
- terminated strings, ready to be printed as-is via the writeline()
- method of a file-like object.
-
- Example:
-
- >>> print ''.join(Differ().compare('one\ntwo\nthree\n'.splitlines(1),
- ... 'ore\ntree\nemu\n'.splitlines(1))),
- - one
- ? ^
- + ore
- ? ^
- - two
- - three
- ? -
- + tree
- + emu
- """
-
- cruncher = SequenceMatcher(self.linejunk, a, b)
- for tag, alo, ahi, blo, bhi in cruncher.get_opcodes():
- if tag == 'replace':
- g = self._fancy_replace(a, alo, ahi, b, blo, bhi)
- elif tag == 'delete':
- g = self._dump('-', a, alo, ahi)
- elif tag == 'insert':
- g = self._dump('+', b, blo, bhi)
- elif tag == 'equal':
- g = self._dump(' ', a, alo, ahi)
- else:
- raise ValueError, 'unknown tag %r' % (tag,)
-
- for line in g:
- yield line
-
- def _dump(self, tag, x, lo, hi):
- """Generate comparison results for a same-tagged range."""
- for i in xrange(lo, hi):
- yield '%s %s' % (tag, x[i])
-
- def _plain_replace(self, a, alo, ahi, b, blo, bhi):
- assert alo < ahi and blo < bhi
- # dump the shorter block first -- reduces the burden on short-term
- # memory if the blocks are of very different sizes
- if bhi - blo < ahi - alo:
- first = self._dump('+', b, blo, bhi)
- second = self._dump('-', a, alo, ahi)
- else:
- first = self._dump('-', a, alo, ahi)
- second = self._dump('+', b, blo, bhi)
-
- for g in first, second:
- for line in g:
- yield line
-
- def _fancy_replace(self, a, alo, ahi, b, blo, bhi):
- r"""
- When replacing one block of lines with another, search the blocks
- for *similar* lines; the best-matching pair (if any) is used as a
- synch point, and intraline difference marking is done on the
- similar pair. Lots of work, but often worth it.
-
- Example:
-
- >>> d = Differ()
- >>> results = d._fancy_replace(['abcDefghiJkl\n'], 0, 1,
- ... ['abcdefGhijkl\n'], 0, 1)
- >>> print ''.join(results),
- - abcDefghiJkl
- ? ^ ^ ^
- + abcdefGhijkl
- ? ^ ^ ^
- """
-
- # don't synch up unless the lines have a similarity score of at
- # least cutoff; best_ratio tracks the best score seen so far
- best_ratio, cutoff = 0.74, 0.75
- cruncher = SequenceMatcher(self.charjunk)
- eqi, eqj = None, None # 1st indices of equal lines (if any)
-
- # search for the pair that matches best without being identical
- # (identical lines must be junk lines, & we don't want to synch up
- # on junk -- unless we have to)
- for j in xrange(blo, bhi):
- bj = b[j]
- cruncher.set_seq2(bj)
- for i in xrange(alo, ahi):
- ai = a[i]
- if ai == bj:
- if eqi is None:
- eqi, eqj = i, j
- continue
- cruncher.set_seq1(ai)
- # computing similarity is expensive, so use the quick
- # upper bounds first -- have seen this speed up messy
- # compares by a factor of 3.
- # note that ratio() is only expensive to compute the first
- # time it's called on a sequence pair; the expensive part
- # of the computation is cached by cruncher
- if cruncher.real_quick_ratio() > best_ratio and \
- cruncher.quick_ratio() > best_ratio and \
- cruncher.ratio() > best_ratio:
- best_ratio, best_i, best_j = cruncher.ratio(), i, j
- if best_ratio < cutoff:
- # no non-identical "pretty close" pair
- if eqi is None:
- # no identical pair either -- treat it as a straight replace
- for line in self._plain_replace(a, alo, ahi, b, blo, bhi):
- yield line
- return
- # no close pair, but an identical pair -- synch up on that
- best_i, best_j, best_ratio = eqi, eqj, 1.0
- else:
- # there's a close pair, so forget the identical pair (if any)
- eqi = None
-
- # a[best_i] very similar to b[best_j]; eqi is None iff they're not
- # identical
-
- # pump out diffs from before the synch point
- for line in self._fancy_helper(a, alo, best_i, b, blo, best_j):
- yield line
-
- # do intraline marking on the synch pair
- aelt, belt = a[best_i], b[best_j]
- if eqi is None:
- # pump out a '-', '?', '+', '?' quad for the synched lines
- atags = btags = ""
- cruncher.set_seqs(aelt, belt)
- for tag, ai1, ai2, bj1, bj2 in cruncher.get_opcodes():
- la, lb = ai2 - ai1, bj2 - bj1
- if tag == 'replace':
- atags += '^' * la
- btags += '^' * lb
- elif tag == 'delete':
- atags += '-' * la
- elif tag == 'insert':
- btags += '+' * lb
- elif tag == 'equal':
- atags += ' ' * la
- btags += ' ' * lb
- else:
- raise ValueError, 'unknown tag %r' % (tag,)
- for line in self._qformat(aelt, belt, atags, btags):
- yield line
- else:
- # the synch pair is identical
- yield ' ' + aelt
-
- # pump out diffs from after the synch point
- for line in self._fancy_helper(a, best_i+1, ahi, b, best_j+1, bhi):
- yield line
-
- def _fancy_helper(self, a, alo, ahi, b, blo, bhi):
- g = []
- if alo < ahi:
- if blo < bhi:
- g = self._fancy_replace(a, alo, ahi, b, blo, bhi)
- else:
- g = self._dump('-', a, alo, ahi)
- elif blo < bhi:
- g = self._dump('+', b, blo, bhi)
-
- for line in g:
- yield line
-
- def _qformat(self, aline, bline, atags, btags):
- r"""
- Format "?" output and deal with leading tabs.
-
- Example:
-
- >>> d = Differ()
- >>> results = d._qformat('\tabcDefghiJkl\n', '\t\tabcdefGhijkl\n',
- ... ' ^ ^ ^ ', '+ ^ ^ ^ ')
- >>> for line in results: print repr(line)
- ...
- '- \tabcDefghiJkl\n'
- '? \t ^ ^ ^\n'
- '+ \t\tabcdefGhijkl\n'
- '? \t ^ ^ ^\n'
- """
-
- # Can hurt, but will probably help most of the time.
- common = min(_count_leading(aline, "\t"),
- _count_leading(bline, "\t"))
- common = min(common, _count_leading(atags[:common], " "))
- atags = atags[common:].rstrip()
- btags = btags[common:].rstrip()
-
- yield "- " + aline
- if atags:
- yield "? %s%s\n" % ("\t" * common, atags)
-
- yield "+ " + bline
- if btags:
- yield "? %s%s\n" % ("\t" * common, btags)
-
-# With respect to junk, an earlier version of ndiff simply refused to
-# *start* a match with a junk element. The result was cases like this:
-# before: private Thread currentThread;
-# after: private volatile Thread currentThread;
-# If you consider whitespace to be junk, the longest contiguous match
-# not starting with junk is "e Thread currentThread". So ndiff reported
-# that "e volatil" was inserted between the 't' and the 'e' in "private".
-# While an accurate view, to people that's absurd. The current version
-# looks for matching blocks that are entirely junk-free, then extends the
-# longest one of those as far as possible but only with matching junk.
-# So now "currentThread" is matched, then extended to suck up the
-# preceding blank; then "private" is matched, and extended to suck up the
-# following blank; then "Thread" is matched; and finally ndiff reports
-# that "volatile " was inserted before "Thread". The only quibble
-# remaining is that perhaps it was really the case that " volatile"
-# was inserted after "private". I can live with that <wink>.
-
-import re
-
-def IS_LINE_JUNK(line, pat=re.compile(r"\s*#?\s*$").match):
- r"""
- Return 1 for ignorable line: iff `line` is blank or contains a single '#'.
-
- Examples:
-
- >>> IS_LINE_JUNK('\n')
- True
- >>> IS_LINE_JUNK(' # \n')
- True
- >>> IS_LINE_JUNK('hello\n')
- False
- """
-
- return pat(line) is not None
-
-def IS_CHARACTER_JUNK(ch, ws=" \t"):
- r"""
- Return 1 for ignorable character: iff `ch` is a space or tab.
-
- Examples:
-
- >>> IS_CHARACTER_JUNK(' ')
- True
- >>> IS_CHARACTER_JUNK('\t')
- True
- >>> IS_CHARACTER_JUNK('\n')
- False
- >>> IS_CHARACTER_JUNK('x')
- False
- """
-
- return ch in ws
-
-
-def unified_diff(a, b, fromfile='', tofile='', fromfiledate='',
- tofiledate='', n=3, lineterm='\n'):
- r"""
- Compare two sequences of lines; generate the delta as a unified diff.
-
- Unified diffs are a compact way of showing line changes and a few
- lines of context. The number of context lines is set by 'n' which
- defaults to three.
-
- By default, the diff control lines (those with ---, +++, or @@) are
- created with a trailing newline. This is helpful so that inputs
- created from file.readlines() result in diffs that are suitable for
- file.writelines() since both the inputs and outputs have trailing
- newlines.
-
- For inputs that do not have trailing newlines, set the lineterm
- argument to "" so that the output will be uniformly newline free.
-
- The unidiff format normally has a header for filenames and modification
- times. Any or all of these may be specified using strings for
- 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'. The modification
- times are normally expressed in the format returned by time.ctime().
-
- Example:
-
- >>> for line in unified_diff('one two three four'.split(),
- ... 'zero one tree four'.split(), 'Original', 'Current',
- ... 'Sat Jan 26 23:30:50 1991', 'Fri Jun 06 10:20:52 2003',
- ... lineterm=''):
- ... print line
- --- Original Sat Jan 26 23:30:50 1991
- +++ Current Fri Jun 06 10:20:52 2003
- @@ -1,4 +1,4 @@
- +zero
- one
- -two
- -three
- +tree
- four
- """
-
- started = False
- for group in SequenceMatcher(None,a,b).get_grouped_opcodes(n):
- if not started:
- yield '--- %s %s%s' % (fromfile, fromfiledate, lineterm)
- yield '+++ %s %s%s' % (tofile, tofiledate, lineterm)
- started = True
- i1, i2, j1, j2 = group[0][1], group[-1][2], group[0][3], group[-1][4]
- yield "@@ -%d,%d +%d,%d @@%s" % (i1+1, i2-i1, j1+1, j2-j1, lineterm)
- for tag, i1, i2, j1, j2 in group:
- if tag == 'equal':
- for line in a[i1:i2]:
- yield ' ' + line
- continue
- if tag == 'replace' or tag == 'delete':
- for line in a[i1:i2]:
- yield '-' + line
- if tag == 'replace' or tag == 'insert':
- for line in b[j1:j2]:
- yield '+' + line
-
-# See http://www.unix.org/single_unix_specification/
-def context_diff(a, b, fromfile='', tofile='',
- fromfiledate='', tofiledate='', n=3, lineterm='\n'):
- r"""
- Compare two sequences of lines; generate the delta as a context diff.
-
- Context diffs are a compact way of showing line changes and a few
- lines of context. The number of context lines is set by 'n' which
- defaults to three.
-
- By default, the diff control lines (those with *** or ---) are
- created with a trailing newline. This is helpful so that inputs
- created from file.readlines() result in diffs that are suitable for
- file.writelines() since both the inputs and outputs have trailing
- newlines.
-
- For inputs that do not have trailing newlines, set the lineterm
- argument to "" so that the output will be uniformly newline free.
-
- The context diff format normally has a header for filenames and
- modification times. Any or all of these may be specified using
- strings for 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'.
- The modification times are normally expressed in the format returned
- by time.ctime(). If not specified, the strings default to blanks.
-
- Example:
-
- >>> print ''.join(context_diff('one\ntwo\nthree\nfour\n'.splitlines(1),
- ... 'zero\none\ntree\nfour\n'.splitlines(1), 'Original', 'Current',
- ... 'Sat Jan 26 23:30:50 1991', 'Fri Jun 06 10:22:46 2003')),
- *** Original Sat Jan 26 23:30:50 1991
- --- Current Fri Jun 06 10:22:46 2003
- ***************
- *** 1,4 ****
- one
- ! two
- ! three
- four
- --- 1,4 ----
- + zero
- one
- ! tree
- four
- """
-
- started = False
- prefixmap = {'insert':'+ ', 'delete':'- ', 'replace':'! ', 'equal':' '}
- for group in SequenceMatcher(None,a,b).get_grouped_opcodes(n):
- if not started:
- yield '*** %s %s%s' % (fromfile, fromfiledate, lineterm)
- yield '--- %s %s%s' % (tofile, tofiledate, lineterm)
- started = True
-
- yield '***************%s' % (lineterm,)
- if group[-1][2] - group[0][1] >= 2:
- yield '*** %d,%d ****%s' % (group[0][1]+1, group[-1][2], lineterm)
- else:
- yield '*** %d ****%s' % (group[-1][2], lineterm)
- visiblechanges = [e for e in group if e[0] in ('replace', 'delete')]
- if visiblechanges:
- for tag, i1, i2, _, _ in group:
- if tag != 'insert':
- for line in a[i1:i2]:
- yield prefixmap[tag] + line
-
- if group[-1][4] - group[0][3] >= 2:
- yield '--- %d,%d ----%s' % (group[0][3]+1, group[-1][4], lineterm)
- else:
- yield '--- %d ----%s' % (group[-1][4], lineterm)
- visiblechanges = [e for e in group if e[0] in ('replace', 'insert')]
- if visiblechanges:
- for tag, _, _, j1, j2 in group:
- if tag != 'delete':
- for line in b[j1:j2]:
- yield prefixmap[tag] + line
-
-def ndiff(a, b, linejunk=None, charjunk=IS_CHARACTER_JUNK):
- r"""
- Compare `a` and `b` (lists of strings); return a `Differ`-style delta.
-
- Optional keyword parameters `linejunk` and `charjunk` are for filter
- functions (or None):
-
- - linejunk: A function that should accept a single string argument, and
- return true iff the string is junk. The default is None, and is
- recommended; as of Python 2.3, an adaptive notion of "noise" lines is
- used that does a good job on its own.
-
- - charjunk: A function that should accept a string of length 1. The
- default is module-level function IS_CHARACTER_JUNK, which filters out
- whitespace characters (a blank or tab; note: bad idea to include newline
- in this!).
-
- Tools/scripts/ndiff.py is a command-line front-end to this function.
-
- Example:
-
- >>> diff = ndiff('one\ntwo\nthree\n'.splitlines(1),
- ... 'ore\ntree\nemu\n'.splitlines(1))
- >>> print ''.join(diff),
- - one
- ? ^
- + ore
- ? ^
- - two
- - three
- ? -
- + tree
- + emu
- """
- return Differ(linejunk, charjunk).compare(a, b)
-
-def _mdiff(fromlines, tolines, context=None, linejunk=None,
- charjunk=IS_CHARACTER_JUNK):
- r"""Returns generator yielding marked up from/to side by side differences.
-
- Arguments:
- fromlines -- list of text lines to compared to tolines
- tolines -- list of text lines to be compared to fromlines
- context -- number of context lines to display on each side of difference,
- if None, all from/to text lines will be generated.
- linejunk -- passed on to ndiff (see ndiff documentation)
- charjunk -- passed on to ndiff (see ndiff documentation)
-
- This function returns an interator which returns a tuple:
- (from line tuple, to line tuple, boolean flag)
-
- from/to line tuple -- (line num, line text)
- line num -- integer or None (to indicate a context seperation)
- line text -- original line text with following markers inserted:
- '\0+' -- marks start of added text
- '\0-' -- marks start of deleted text
- '\0^' -- marks start of changed text
- '\1' -- marks end of added/deleted/changed text
-
- boolean flag -- None indicates context separation, True indicates
- either "from" or "to" line contains a change, otherwise False.
-
- This function/iterator was originally developed to generate side by side
- file difference for making HTML pages (see HtmlDiff class for example
- usage).
-
- Note, this function utilizes the ndiff function to generate the side by
- side difference markup. Optional ndiff arguments may be passed to this
- function and they in turn will be passed to ndiff.
- """
- import re
-
- # regular expression for finding intraline change indices
- change_re = re.compile('(\++|\-+|\^+)')
-
- # create the difference iterator to generate the differences
- diff_lines_iterator = ndiff(fromlines,tolines,linejunk,charjunk)
-
- def _make_line(lines, format_key, side, num_lines=[0,0]):
- """Returns line of text with user's change markup and line formatting.
-
- lines -- list of lines from the ndiff generator to produce a line of
- text from. When producing the line of text to return, the
- lines used are removed from this list.
- format_key -- '+' return first line in list with "add" markup around
- the entire line.
- '-' return first line in list with "delete" markup around
- the entire line.
- '?' return first line in list with add/delete/change
- intraline markup (indices obtained from second line)
- None return first line in list with no markup
- side -- indice into the num_lines list (0=from,1=to)
- num_lines -- from/to current line number. This is NOT intended to be a
- passed parameter. It is present as a keyword argument to
- maintain memory of the current line numbers between calls
- of this function.
-
- Note, this function is purposefully not defined at the module scope so
- that data it needs from its parent function (within whose context it
- is defined) does not need to be of module scope.
- """
- num_lines[side] += 1
- # Handle case where no user markup is to be added, just return line of
- # text with user's line format to allow for usage of the line number.
- if format_key is None:
- return (num_lines[side],lines.pop(0)[2:])
- # Handle case of intraline changes
- if format_key == '?':
- text, markers = lines.pop(0), lines.pop(0)
- # find intraline changes (store change type and indices in tuples)
- sub_info = []
- def record_sub_info(match_object,sub_info=sub_info):
- sub_info.append([match_object.group(1)[0],match_object.span()])
- return match_object.group(1)
- change_re.sub(record_sub_info,markers)
- # process each tuple inserting our special marks that won't be
- # noticed by an xml/html escaper.
- for key,(begin,end) in sub_info[::-1]:
- text = text[0:begin]+'\0'+key+text[begin:end]+'\1'+text[end:]
- text = text[2:]
- # Handle case of add/delete entire line
- else:
- text = lines.pop(0)[2:]
- # if line of text is just a newline, insert a space so there is
- # something for the user to highlight and see.
- if not text:
- text = ' '
- # insert marks that won't be noticed by an xml/html escaper.
- text = '\0' + format_key + text + '\1'
- # Return line of text, first allow user's line formatter to do its
- # thing (such as adding the line number) then replace the special
- # marks with what the user's change markup.
- return (num_lines[side],text)
-
- def _line_iterator():
- """Yields from/to lines of text with a change indication.
-
- This function is an iterator. It itself pulls lines from a
- differencing iterator, processes them and yields them. When it can
- it yields both a "from" and a "to" line, otherwise it will yield one
- or the other. In addition to yielding the lines of from/to text, a
- boolean flag is yielded to indicate if the text line(s) have
- differences in them.
-
- Note, this function is purposefully not defined at the module scope so
- that data it needs from its parent function (within whose context it
- is defined) does not need to be of module scope.
- """
- lines = []
- num_blanks_pending, num_blanks_to_yield = 0, 0
- while True:
- # Load up next 4 lines so we can look ahead, create strings which
- # are a concatenation of the first character of each of the 4 lines
- # so we can do some very readable comparisons.
- while len(lines) < 4:
- try:
- lines.append(diff_lines_iterator.next())
- except StopIteration:
- lines.append('X')
- s = ''.join([line[0] for line in lines])
- if s.startswith('X'):
- # When no more lines, pump out any remaining blank lines so the
- # corresponding add/delete lines get a matching blank line so
- # all line pairs get yielded at the next level.
- num_blanks_to_yield = num_blanks_pending
- elif s.startswith('-?+?'):
- # simple intraline change
- yield _make_line(lines,'?',0), _make_line(lines,'?',1), True
- continue
- elif s.startswith('--++'):
- # in delete block, add block coming: we do NOT want to get
- # caught up on blank lines yet, just process the delete line
- num_blanks_pending -= 1
- yield _make_line(lines,'-',0), None, True
- continue
- elif s.startswith(('--?+', '--+', '- ')):
- # in delete block and see a intraline change or unchanged line
- # coming: yield the delete line and then blanks
- from_line,to_line = _make_line(lines,'-',0), None
- num_blanks_to_yield,num_blanks_pending = num_blanks_pending-1,0
- elif s.startswith('-+?'):
- # intraline change
- yield _make_line(lines,None,0), _make_line(lines,'?',1), True
- continue
- elif s.startswith('-?+'):
- # intraline change
- yield _make_line(lines,'?',0), _make_line(lines,None,1), True
- continue
- elif s.startswith('-'):
- # delete FROM line
- num_blanks_pending -= 1
- yield _make_line(lines,'-',0), None, True
- continue
- elif s.startswith('+--'):
- # in add block, delete block coming: we do NOT want to get
- # caught up on blank lines yet, just process the add line
- num_blanks_pending += 1
- yield None, _make_line(lines,'+',1), True
- continue
- elif s.startswith(('+ ', '+-')):
- # will be leaving an add block: yield blanks then add line
- from_line, to_line = None, _make_line(lines,'+',1)
- num_blanks_to_yield,num_blanks_pending = num_blanks_pending+1,0
- elif s.startswith('+'):
- # inside an add block, yield the add line
- num_blanks_pending += 1
- yield None, _make_line(lines,'+',1), True
- continue
- elif s.startswith(' '):
- # unchanged text, yield it to both sides
- yield _make_line(lines[:],None,0),_make_line(lines,None,1),False
- continue
- # Catch up on the blank lines so when we yield the next from/to
- # pair, they are lined up.
- while(num_blanks_to_yield < 0):
- num_blanks_to_yield += 1
- yield None,('','\n'),True
- while(num_blanks_to_yield > 0):
- num_blanks_to_yield -= 1
- yield ('','\n'),None,True
- if s.startswith('X'):
- raise StopIteration
- else:
- yield from_line,to_line,True
-
- def _line_pair_iterator():
- """Yields from/to lines of text with a change indication.
-
- This function is an iterator. It itself pulls lines from the line
- iterator. Its difference from that iterator is that this function
- always yields a pair of from/to text lines (with the change
- indication). If necessary it will collect single from/to lines
- until it has a matching pair from/to pair to yield.
-
- Note, this function is purposefully not defined at the module scope so
- that data it needs from its parent function (within whose context it
- is defined) does not need to be of module scope.
- """
- line_iterator = _line_iterator()
- fromlines,tolines=[],[]
- while True:
- # Collecting lines of text until we have a from/to pair
- while (len(fromlines)==0 or len(tolines)==0):
- from_line, to_line, found_diff =line_iterator.next()
- if from_line is not None:
- fromlines.append((from_line,found_diff))
- if to_line is not None:
- tolines.append((to_line,found_diff))
- # Once we have a pair, remove them from the collection and yield it
- from_line, fromDiff = fromlines.pop(0)
- to_line, to_diff = tolines.pop(0)
- yield (from_line,to_line,fromDiff or to_diff)
-
- # Handle case where user does not want context differencing, just yield
- # them up without doing anything else with them.
- line_pair_iterator = _line_pair_iterator()
- if context is None:
- while True:
- yield line_pair_iterator.next()
- # Handle case where user wants context differencing. We must do some
- # storage of lines until we know for sure that they are to be yielded.
- else:
- context += 1
- lines_to_write = 0
- while True:
- # Store lines up until we find a difference, note use of a
- # circular queue because we only need to keep around what
- # we need for context.
- index, contextLines = 0, [None]*(context)
- found_diff = False
- while(found_diff is False):
- from_line, to_line, found_diff = line_pair_iterator.next()
- i = index % context
- contextLines[i] = (from_line, to_line, found_diff)
- index += 1
- # Yield lines that we have collected so far, but first yield
- # the user's separator.
- if index > context:
- yield None, None, None
- lines_to_write = context
- else:
- lines_to_write = index
- index = 0
- while(lines_to_write):
- i = index % context
- index += 1
- yield contextLines[i]
- lines_to_write -= 1
- # Now yield the context lines after the change
- lines_to_write = context-1
- while(lines_to_write):
- from_line, to_line, found_diff = line_pair_iterator.next()
- # If another change within the context, extend the context
- if found_diff:
- lines_to_write = context-1
- else:
- lines_to_write -= 1
- yield from_line, to_line, found_diff
-
-
-_file_template = """
-<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
- "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
-
-<html>
-
-<head>
- <meta http-equiv="Content-Type"
- content="text/html; charset=ISO-8859-1" />
- <title></title>
- <style type="text/css">%(styles)s
- </style>
-</head>
-
-<body>
- %(table)s%(legend)s
-</body>
-
-</html>"""
-
-_styles = """
- table.diff {font-family:Courier; border:medium;}
- .diff_header {background-color:#e0e0e0}
- td.diff_header {text-align:right}
- .diff_next {background-color:#c0c0c0}
- .diff_add {background-color:#aaffaa}
- .diff_chg {background-color:#ffff77}
- .diff_sub {background-color:#ffaaaa}"""
-
-_table_template = """
- <table class="diff" id="difflib_chg_%(prefix)s_top"
- cellspacing="0" cellpadding="0" rules="groups" >
- <colgroup></colgroup> <colgroup></colgroup> <colgroup></colgroup>
- <colgroup></colgroup> <colgroup></colgroup> <colgroup></colgroup>
- %(header_row)s
- <tbody>
-%(data_rows)s </tbody>
- </table>"""
-
-_legend = """
- <table class="diff" summary="Legends">
- <tr> <th colspan="2"> Legends </th> </tr>
- <tr> <td> <table border="" summary="Colors">
- <tr><th> Colors </th> </tr>
- <tr><td class="diff_add">&nbsp;Added&nbsp;</td></tr>
- <tr><td class="diff_chg">Changed</td> </tr>
- <tr><td class="diff_sub">Deleted</td> </tr>
- </table></td>
- <td> <table border="" summary="Links">
- <tr><th colspan="2"> Links </th> </tr>
- <tr><td>(f)irst change</td> </tr>
- <tr><td>(n)ext change</td> </tr>
- <tr><td>(t)op</td> </tr>
- </table></td> </tr>
- </table>"""
-
-class HtmlDiff(object):
- """For producing HTML side by side comparison with change highlights.
-
- This class can be used to create an HTML table (or a complete HTML file
- containing the table) showing a side by side, line by line comparison
- of text with inter-line and intra-line change highlights. The table can
- be generated in either full or contextual difference mode.
-
- The following methods are provided for HTML generation:
-
- make_table -- generates HTML for a single side by side table
- make_file -- generates complete HTML file with a single side by side table
-
- See tools/scripts/diff.py for an example usage of this class.
- """
-
- _file_template = _file_template
- _styles = _styles
- _table_template = _table_template
- _legend = _legend
- _default_prefix = 0
-
- def __init__(self,tabsize=8,wrapcolumn=None,linejunk=None,
- charjunk=IS_CHARACTER_JUNK):
- """HtmlDiff instance initializer
-
- Arguments:
- tabsize -- tab stop spacing, defaults to 8.
- wrapcolumn -- column number where lines are broken and wrapped,
- defaults to None where lines are not wrapped.
- linejunk,charjunk -- keyword arguments passed into ndiff() (used to by
- HtmlDiff() to generate the side by side HTML differences). See
- ndiff() documentation for argument default values and descriptions.
- """
- self._tabsize = tabsize
- self._wrapcolumn = wrapcolumn
- self._linejunk = linejunk
- self._charjunk = charjunk
-
- def make_file(self,fromlines,tolines,fromdesc='',todesc='',context=False,
- numlines=5):
- """Returns HTML file of side by side comparison with change highlights
-
- Arguments:
- fromlines -- list of "from" lines
- tolines -- list of "to" lines
- fromdesc -- "from" file column header string
- todesc -- "to" file column header string
- context -- set to True for contextual differences (defaults to False
- which shows full differences).
- numlines -- number of context lines. When context is set True,
- controls number of lines displayed before and after the change.
- When context is False, controls the number of lines to place
- the "next" link anchors before the next change (so click of
- "next" link jumps to just before the change).
- """
-
- return self._file_template % dict(
- styles = self._styles,
- legend = self._legend,
- table = self.make_table(fromlines,tolines,fromdesc,todesc,
- context=context,numlines=numlines))
-
- def _tab_newline_replace(self,fromlines,tolines):
- """Returns from/to line lists with tabs expanded and newlines removed.
-
- Instead of tab characters being replaced by the number of spaces
- needed to fill in to the next tab stop, this function will fill
- the space with tab characters. This is done so that the difference
- algorithms can identify changes in a file when tabs are replaced by
- spaces and vice versa. At the end of the HTML generation, the tab
- characters will be replaced with a nonbreakable space.
- """
- def expand_tabs(line):
- # hide real spaces
- line = line.replace(' ','\0')
- # expand tabs into spaces
- line = line.expandtabs(self._tabsize)
- # relace spaces from expanded tabs back into tab characters
- # (we'll replace them with markup after we do differencing)
- line = line.replace(' ','\t')
- return line.replace('\0',' ').rstrip('\n')
- fromlines = [expand_tabs(line) for line in fromlines]
- tolines = [expand_tabs(line) for line in tolines]
- return fromlines,tolines
-
- def _split_line(self,data_list,line_num,text):
- """Builds list of text lines by splitting text lines at wrap point
-
- This function will determine if the input text line needs to be
- wrapped (split) into separate lines. If so, the first wrap point
- will be determined and the first line appended to the output
- text line list. This function is used recursively to handle
- the second part of the split line to further split it.
- """
- # if blank line or context separator, just add it to the output list
- if not line_num:
- data_list.append((line_num,text))
- return
-
- # if line text doesn't need wrapping, just add it to the output list
- size = len(text)
- max = self._wrapcolumn
- if (size <= max) or ((size -(text.count('\0')*3)) <= max):
- data_list.append((line_num,text))
- return
-
- # scan text looking for the wrap point, keeping track if the wrap
- # point is inside markers
- i = 0
- n = 0
- mark = ''
- while n < max and i < size:
- if text[i] == '\0':
- i += 1
- mark = text[i]
- i += 1
- elif text[i] == '\1':
- i += 1
- mark = ''
- else:
- i += 1
- n += 1
-
- # wrap point is inside text, break it up into separate lines
- line1 = text[:i]
- line2 = text[i:]
-
- # if wrap point is inside markers, place end marker at end of first
- # line and start marker at beginning of second line because each
- # line will have its own table tag markup around it.
- if mark:
- line1 = line1 + '\1'
- line2 = '\0' + mark + line2
-
- # tack on first line onto the output list
- data_list.append((line_num,line1))
-
- # use this routine again to wrap the remaining text
- self._split_line(data_list,'>',line2)
-
- def _line_wrapper(self,diffs):
- """Returns iterator that splits (wraps) mdiff text lines"""
-
- # pull from/to data and flags from mdiff iterator
- for fromdata,todata,flag in diffs:
- # check for context separators and pass them through
- if flag is None:
- yield fromdata,todata,flag
- continue
- (fromline,fromtext),(toline,totext) = fromdata,todata
- # for each from/to line split it at the wrap column to form
- # list of text lines.
- fromlist,tolist = [],[]
- self._split_line(fromlist,fromline,fromtext)
- self._split_line(tolist,toline,totext)
- # yield from/to line in pairs inserting blank lines as
- # necessary when one side has more wrapped lines
- while fromlist or tolist:
- if fromlist:
- fromdata = fromlist.pop(0)
- else:
- fromdata = ('',' ')
- if tolist:
- todata = tolist.pop(0)
- else:
- todata = ('',' ')
- yield fromdata,todata,flag
-
- def _collect_lines(self,diffs):
- """Collects mdiff output into separate lists
-
- Before storing the mdiff from/to data into a list, it is converted
- into a single line of text with HTML markup.
- """
-
- fromlist,tolist,flaglist = [],[],[]
- # pull from/to data and flags from mdiff style iterator
- for fromdata,todata,flag in diffs:
- try:
- # store HTML markup of the lines into the lists
- fromlist.append(self._format_line(0,flag,*fromdata))
- tolist.append(self._format_line(1,flag,*todata))
- except TypeError:
- # exceptions occur for lines where context separators go
- fromlist.append(None)
- tolist.append(None)
- flaglist.append(flag)
- return fromlist,tolist,flaglist
-
- def _format_line(self,side,flag,linenum,text):
- """Returns HTML markup of "from" / "to" text lines
-
- side -- 0 or 1 indicating "from" or "to" text
- flag -- indicates if difference on line
- linenum -- line number (used for line number column)
- text -- line text to be marked up
- """
- try:
- linenum = '%d' % linenum
- id = ' id="%s%s"' % (self._prefix[side],linenum)
- except TypeError:
- # handle blank lines where linenum is '>' or ''
- id = ''
- # replace those things that would get confused with HTML symbols
- text=text.replace("&","&amp;").replace(">","&gt;").replace("<","&lt;")
-
- # make space non-breakable so they don't get compressed or line wrapped
- text = text.replace(' ','&nbsp;').rstrip()
-
- return '<td class="diff_header"%s>%s</td><td nowrap="nowrap">%s</td>' \
- % (id,linenum,text)
-
- def _make_prefix(self):
- """Create unique anchor prefixes"""
-
- # Generate a unique anchor prefix so multiple tables
- # can exist on the same HTML page without conflicts.
- fromprefix = "from%d_" % HtmlDiff._default_prefix
- toprefix = "to%d_" % HtmlDiff._default_prefix
- HtmlDiff._default_prefix += 1
- # store prefixes so line format method has access
- self._prefix = [fromprefix,toprefix]
-
- def _convert_flags(self,fromlist,tolist,flaglist,context,numlines):
- """Makes list of "next" links"""
-
- # all anchor names will be generated using the unique "to" prefix
- toprefix = self._prefix[1]
-
- # process change flags, generating middle column of next anchors/links
- next_id = ['']*len(flaglist)
- next_href = ['']*len(flaglist)
- num_chg, in_change = 0, False
- last = 0
- for i,flag in enumerate(flaglist):
- if flag:
- if not in_change:
- in_change = True
- last = i
- # at the beginning of a change, drop an anchor a few lines
- # (the context lines) before the change for the previous
- # link
- i = max([0,i-numlines])
- next_id[i] = ' id="difflib_chg_%s_%d"' % (toprefix,num_chg)
- # at the beginning of a change, drop a link to the next
- # change
- num_chg += 1
- next_href[last] = '<a href="#difflib_chg_%s_%d">n</a>' % (
- toprefix,num_chg)
- else:
- in_change = False
- # check for cases where there is no content to avoid exceptions
- if not flaglist:
- flaglist = [False]
- next_id = ['']
- next_href = ['']
- last = 0
- if context:
- fromlist = ['<td></td><td>&nbsp;No Differences Found&nbsp;</td>']
- tolist = fromlist
- else:
- fromlist = tolist = ['<td></td><td>&nbsp;Empty File&nbsp;</td>']
- # if not a change on first line, drop a link
- if not flaglist[0]:
- next_href[0] = '<a href="#difflib_chg_%s_0">f</a>' % toprefix
- # redo the last link to link to the top
- next_href[last] = '<a href="#difflib_chg_%s_top">t</a>' % (toprefix)
-
- return fromlist,tolist,flaglist,next_href,next_id
-
- def make_table(self,fromlines,tolines,fromdesc='',todesc='',context=False,
- numlines=5):
- """Returns HTML table of side by side comparison with change highlights
-
- Arguments:
- fromlines -- list of "from" lines
- tolines -- list of "to" lines
- fromdesc -- "from" file column header string
- todesc -- "to" file column header string
- context -- set to True for contextual differences (defaults to False
- which shows full differences).
- numlines -- number of context lines. When context is set True,
- controls number of lines displayed before and after the change.
- When context is False, controls the number of lines to place
- the "next" link anchors before the next change (so click of
- "next" link jumps to just before the change).
- """
-
- # make unique anchor prefixes so that multiple tables may exist
- # on the same page without conflict.
- self._make_prefix()
-
- # change tabs to spaces before it gets more difficult after we insert
- # markkup
- fromlines,tolines = self._tab_newline_replace(fromlines,tolines)
-
- # create diffs iterator which generates side by side from/to data
- if context:
- context_lines = numlines
- else:
- context_lines = None
- diffs = _mdiff(fromlines,tolines,context_lines,linejunk=self._linejunk,
- charjunk=self._charjunk)
-
- # set up iterator to wrap lines that exceed desired width
- if self._wrapcolumn:
- diffs = self._line_wrapper(diffs)
-
- # collect up from/to lines and flags into lists (also format the lines)
- fromlist,tolist,flaglist = self._collect_lines(diffs)
-
- # process change flags, generating middle column of next anchors/links
- fromlist,tolist,flaglist,next_href,next_id = self._convert_flags(
- fromlist,tolist,flaglist,context,numlines)
-
- s = []
- fmt = ' <tr><td class="diff_next"%s>%s</td>%s' + \
- '<td class="diff_next">%s</td>%s</tr>\n'
- for i in range(len(flaglist)):
- if flaglist[i] is None:
- # mdiff yields None on separator lines skip the bogus ones
- # generated for the first line
- if i > 0:
- s.append(' </tbody> \n <tbody>\n')
- else:
- s.append( fmt % (next_id[i],next_href[i],fromlist[i],
- next_href[i],tolist[i]))
- if fromdesc or todesc:
- header_row = '<thead><tr>%s%s%s%s</tr></thead>' % (
- '<th class="diff_next"><br /></th>',
- '<th colspan="2" class="diff_header">%s</th>' % fromdesc,
- '<th class="diff_next"><br /></th>',
- '<th colspan="2" class="diff_header">%s</th>' % todesc)
- else:
- header_row = ''
-
- table = self._table_template % dict(
- data_rows=''.join(s),
- header_row=header_row,
- prefix=self._prefix[1])
-
- return table.replace('\0+','<span class="diff_add">'). \
- replace('\0-','<span class="diff_sub">'). \
- replace('\0^','<span class="diff_chg">'). \
- replace('\1','</span>'). \
- replace('\t','&nbsp;')
-
-del re
-
-def restore(delta, which):
- r"""
- Generate one of the two sequences that generated a delta.
-
- Given a `delta` produced by `Differ.compare()` or `ndiff()`, extract
- lines originating from file 1 or 2 (parameter `which`), stripping off line
- prefixes.
-
- Examples:
-
- >>> diff = ndiff('one\ntwo\nthree\n'.splitlines(1),
- ... 'ore\ntree\nemu\n'.splitlines(1))
- >>> diff = list(diff)
- >>> print ''.join(restore(diff, 1)),
- one
- two
- three
- >>> print ''.join(restore(diff, 2)),
- ore
- tree
- emu
- """
- try:
- tag = {1: "- ", 2: "+ "}[int(which)]
- except KeyError:
- raise ValueError, ('unknown delta choice (must be 1 or 2): %r'
- % which)
- prefixes = (" ", tag)
- for line in delta:
- if line[:2] in prefixes:
- yield line[2:]
-
-def _test():
- import doctest, difflib
- return doctest.testmod(difflib)
-
-if __name__ == "__main__":
- _test()
diff --git a/sys/lib/python/dircache.py b/sys/lib/python/dircache.py
deleted file mode 100644
index 78ec7fe0f..000000000
--- a/sys/lib/python/dircache.py
+++ /dev/null
@@ -1,38 +0,0 @@
-"""Read and cache directory listings.
-
-The listdir() routine returns a sorted list of the files in a directory,
-using a cache to avoid reading the directory more often than necessary.
-The annotate() routine appends slashes to directories."""
-
-import os
-
-__all__ = ["listdir", "opendir", "annotate", "reset"]
-
-cache = {}
-
-def reset():
- """Reset the cache completely."""
- global cache
- cache = {}
-
-def listdir(path):
- """List directory contents, using cache."""
- try:
- cached_mtime, list = cache[path]
- del cache[path]
- except KeyError:
- cached_mtime, list = -1, []
- mtime = os.stat(path).st_mtime
- if mtime != cached_mtime:
- list = os.listdir(path)
- list.sort()
- cache[path] = mtime, list
- return list
-
-opendir = listdir # XXX backward compatibility
-
-def annotate(head, list):
- """Add '/' suffixes to directories."""
- for i in range(len(list)):
- if os.path.isdir(os.path.join(head, list[i])):
- list[i] = list[i] + '/'
diff --git a/sys/lib/python/dis.py b/sys/lib/python/dis.py
deleted file mode 100644
index 5a74b3ae8..000000000
--- a/sys/lib/python/dis.py
+++ /dev/null
@@ -1,223 +0,0 @@
-"""Disassembler of Python byte code into mnemonics."""
-
-import sys
-import types
-
-from opcode import *
-from opcode import __all__ as _opcodes_all
-
-__all__ = ["dis","disassemble","distb","disco"] + _opcodes_all
-del _opcodes_all
-
-def dis(x=None):
- """Disassemble classes, methods, functions, or code.
-
- With no argument, disassemble the last traceback.
-
- """
- if x is None:
- distb()
- return
- if type(x) is types.InstanceType:
- x = x.__class__
- if hasattr(x, 'im_func'):
- x = x.im_func
- if hasattr(x, 'func_code'):
- x = x.func_code
- if hasattr(x, '__dict__'):
- items = x.__dict__.items()
- items.sort()
- for name, x1 in items:
- if type(x1) in (types.MethodType,
- types.FunctionType,
- types.CodeType,
- types.ClassType):
- print "Disassembly of %s:" % name
- try:
- dis(x1)
- except TypeError, msg:
- print "Sorry:", msg
- print
- elif hasattr(x, 'co_code'):
- disassemble(x)
- elif isinstance(x, str):
- disassemble_string(x)
- else:
- raise TypeError, \
- "don't know how to disassemble %s objects" % \
- type(x).__name__
-
-def distb(tb=None):
- """Disassemble a traceback (default: last traceback)."""
- if tb is None:
- try:
- tb = sys.last_traceback
- except AttributeError:
- raise RuntimeError, "no last traceback to disassemble"
- while tb.tb_next: tb = tb.tb_next
- disassemble(tb.tb_frame.f_code, tb.tb_lasti)
-
-def disassemble(co, lasti=-1):
- """Disassemble a code object."""
- code = co.co_code
- labels = findlabels(code)
- linestarts = dict(findlinestarts(co))
- n = len(code)
- i = 0
- extended_arg = 0
- free = None
- while i < n:
- c = code[i]
- op = ord(c)
- if i in linestarts:
- if i > 0:
- print
- print "%3d" % linestarts[i],
- else:
- print ' ',
-
- if i == lasti: print '-->',
- else: print ' ',
- if i in labels: print '>>',
- else: print ' ',
- print repr(i).rjust(4),
- print opname[op].ljust(20),
- i = i+1
- if op >= HAVE_ARGUMENT:
- oparg = ord(code[i]) + ord(code[i+1])*256 + extended_arg
- extended_arg = 0
- i = i+2
- if op == EXTENDED_ARG:
- extended_arg = oparg*65536L
- print repr(oparg).rjust(5),
- if op in hasconst:
- print '(' + repr(co.co_consts[oparg]) + ')',
- elif op in hasname:
- print '(' + co.co_names[oparg] + ')',
- elif op in hasjrel:
- print '(to ' + repr(i + oparg) + ')',
- elif op in haslocal:
- print '(' + co.co_varnames[oparg] + ')',
- elif op in hascompare:
- print '(' + cmp_op[oparg] + ')',
- elif op in hasfree:
- if free is None:
- free = co.co_cellvars + co.co_freevars
- print '(' + free[oparg] + ')',
- print
-
-def disassemble_string(code, lasti=-1, varnames=None, names=None,
- constants=None):
- labels = findlabels(code)
- n = len(code)
- i = 0
- while i < n:
- c = code[i]
- op = ord(c)
- if i == lasti: print '-->',
- else: print ' ',
- if i in labels: print '>>',
- else: print ' ',
- print repr(i).rjust(4),
- print opname[op].ljust(15),
- i = i+1
- if op >= HAVE_ARGUMENT:
- oparg = ord(code[i]) + ord(code[i+1])*256
- i = i+2
- print repr(oparg).rjust(5),
- if op in hasconst:
- if constants:
- print '(' + repr(constants[oparg]) + ')',
- else:
- print '(%d)'%oparg,
- elif op in hasname:
- if names is not None:
- print '(' + names[oparg] + ')',
- else:
- print '(%d)'%oparg,
- elif op in hasjrel:
- print '(to ' + repr(i + oparg) + ')',
- elif op in haslocal:
- if varnames:
- print '(' + varnames[oparg] + ')',
- else:
- print '(%d)' % oparg,
- elif op in hascompare:
- print '(' + cmp_op[oparg] + ')',
- print
-
-disco = disassemble # XXX For backwards compatibility
-
-def findlabels(code):
- """Detect all offsets in a byte code which are jump targets.
-
- Return the list of offsets.
-
- """
- labels = []
- n = len(code)
- i = 0
- while i < n:
- c = code[i]
- op = ord(c)
- i = i+1
- if op >= HAVE_ARGUMENT:
- oparg = ord(code[i]) + ord(code[i+1])*256
- i = i+2
- label = -1
- if op in hasjrel:
- label = i+oparg
- elif op in hasjabs:
- label = oparg
- if label >= 0:
- if label not in labels:
- labels.append(label)
- return labels
-
-def findlinestarts(code):
- """Find the offsets in a byte code which are start of lines in the source.
-
- Generate pairs (offset, lineno) as described in Python/compile.c.
-
- """
- byte_increments = [ord(c) for c in code.co_lnotab[0::2]]
- line_increments = [ord(c) for c in code.co_lnotab[1::2]]
-
- lastlineno = None
- lineno = code.co_firstlineno
- addr = 0
- for byte_incr, line_incr in zip(byte_increments, line_increments):
- if byte_incr:
- if lineno != lastlineno:
- yield (addr, lineno)
- lastlineno = lineno
- addr += byte_incr
- lineno += line_incr
- if lineno != lastlineno:
- yield (addr, lineno)
-
-def _test():
- """Simple test program to disassemble a file."""
- if sys.argv[1:]:
- if sys.argv[2:]:
- sys.stderr.write("usage: python dis.py [-|file]\n")
- sys.exit(2)
- fn = sys.argv[1]
- if not fn or fn == "-":
- fn = None
- else:
- fn = None
- if fn is None:
- f = sys.stdin
- else:
- f = open(fn)
- source = f.read()
- if fn is not None:
- f.close()
- else:
- fn = "<stdin>"
- code = compile(source, fn, "exec")
- dis(code)
-
-if __name__ == "__main__":
- _test()
diff --git a/sys/lib/python/distutils/README b/sys/lib/python/distutils/README
deleted file mode 100644
index f32153159..000000000
--- a/sys/lib/python/distutils/README
+++ /dev/null
@@ -1,22 +0,0 @@
-This directory contains only a subset of the Distutils, specifically
-the Python modules in the 'distutils' and 'distutils.command'
-packages. This is all you need to distribute and install Python
-modules using the Distutils. There is also a separately packaged
-standalone version of the Distutils available for people who want to
-upgrade the Distutils without upgrading Python, available from the
-Distutils web page:
-
- http://www.python.org/sigs/distutils-sig/
-
-The standalone version includes all of the code in this directory,
-plus documentation, test scripts, examples, etc.
-
-The Distutils documentation is divided into two documents, "Installing
-Python Modules", which explains how to install Python packages, and
-"Distributing Python Modules", which explains how to write setup.py
-files. Both documents are part of the standard Python documentation
-set, and are available from http://www.python.org/doc/current/ .
-
- Greg Ward (gward@python.net)
-
-$Id: README 29650 2002-11-13 13:26:59Z akuchling $
diff --git a/sys/lib/python/distutils/__init__.py b/sys/lib/python/distutils/__init__.py
deleted file mode 100644
index b3db4a14e..000000000
--- a/sys/lib/python/distutils/__init__.py
+++ /dev/null
@@ -1,23 +0,0 @@
-"""distutils
-
-The main package for the Python Module Distribution Utilities. Normally
-used from a setup script as
-
- from distutils.core import setup
-
- setup (...)
-"""
-
-# This module should be kept compatible with Python 2.1.
-
-__revision__ = "$Id: __init__.py 54641 2007-03-31 21:02:43Z marc-andre.lemburg $"
-
-# Distutils version
-#
-# Please coordinate with Marc-Andre Lemburg <mal@egenix.com> when adding
-# new features to distutils that would warrant bumping the version number.
-#
-# In general, major and minor version should loosely follow the Python
-# version number the distutils code was shipped with.
-#
-__version__ = "2.5.1"
diff --git a/sys/lib/python/distutils/archive_util.py b/sys/lib/python/distutils/archive_util.py
deleted file mode 100644
index fe746f649..000000000
--- a/sys/lib/python/distutils/archive_util.py
+++ /dev/null
@@ -1,173 +0,0 @@
-"""distutils.archive_util
-
-Utility functions for creating archive files (tarballs, zip files,
-that sort of thing)."""
-
-# This module should be kept compatible with Python 2.1.
-
-__revision__ = "$Id: archive_util.py 37828 2004-11-10 22:23:15Z loewis $"
-
-import os
-from distutils.errors import DistutilsExecError
-from distutils.spawn import spawn
-from distutils.dir_util import mkpath
-from distutils import log
-
-def make_tarball (base_name, base_dir, compress="gzip",
- verbose=0, dry_run=0):
- """Create a (possibly compressed) tar file from all the files under
- 'base_dir'. 'compress' must be "gzip" (the default), "compress",
- "bzip2", or None. Both "tar" and the compression utility named by
- 'compress' must be on the default program search path, so this is
- probably Unix-specific. The output tar file will be named 'base_dir' +
- ".tar", possibly plus the appropriate compression extension (".gz",
- ".bz2" or ".Z"). Return the output filename.
- """
- # XXX GNU tar 1.13 has a nifty option to add a prefix directory.
- # It's pretty new, though, so we certainly can't require it --
- # but it would be nice to take advantage of it to skip the
- # "create a tree of hardlinks" step! (Would also be nice to
- # detect GNU tar to use its 'z' option and save a step.)
-
- compress_ext = { 'gzip': ".gz",
- 'bzip2': '.bz2',
- 'compress': ".Z" }
-
- # flags for compression program, each element of list will be an argument
- compress_flags = {'gzip': ["-f9"],
- 'compress': ["-f"],
- 'bzip2': ['-f9']}
-
- if compress is not None and compress not in compress_ext.keys():
- raise ValueError, \
- "bad value for 'compress': must be None, 'gzip', or 'compress'"
-
- archive_name = base_name + ".tar"
- mkpath(os.path.dirname(archive_name), dry_run=dry_run)
- cmd = ["tar", "-cf", archive_name, base_dir]
- spawn(cmd, dry_run=dry_run)
-
- if compress:
- spawn([compress] + compress_flags[compress] + [archive_name],
- dry_run=dry_run)
- return archive_name + compress_ext[compress]
- else:
- return archive_name
-
-# make_tarball ()
-
-
-def make_zipfile (base_name, base_dir, verbose=0, dry_run=0):
- """Create a zip file from all the files under 'base_dir'. The output
- zip file will be named 'base_dir' + ".zip". Uses either the "zipfile"
- Python module (if available) or the InfoZIP "zip" utility (if installed
- and found on the default search path). If neither tool is available,
- raises DistutilsExecError. Returns the name of the output zip file.
- """
- try:
- import zipfile
- except ImportError:
- zipfile = None
-
- zip_filename = base_name + ".zip"
- mkpath(os.path.dirname(zip_filename), dry_run=dry_run)
-
- # If zipfile module is not available, try spawning an external
- # 'zip' command.
- if zipfile is None:
- if verbose:
- zipoptions = "-r"
- else:
- zipoptions = "-rq"
-
- try:
- spawn(["zip", zipoptions, zip_filename, base_dir],
- dry_run=dry_run)
- except DistutilsExecError:
- # XXX really should distinguish between "couldn't find
- # external 'zip' command" and "zip failed".
- raise DistutilsExecError, \
- ("unable to create zip file '%s': "
- "could neither import the 'zipfile' module nor "
- "find a standalone zip utility") % zip_filename
-
- else:
- log.info("creating '%s' and adding '%s' to it",
- zip_filename, base_dir)
-
- def visit (z, dirname, names):
- for name in names:
- path = os.path.normpath(os.path.join(dirname, name))
- if os.path.isfile(path):
- z.write(path, path)
- log.info("adding '%s'" % path)
-
- if not dry_run:
- z = zipfile.ZipFile(zip_filename, "w",
- compression=zipfile.ZIP_DEFLATED)
-
- os.path.walk(base_dir, visit, z)
- z.close()
-
- return zip_filename
-
-# make_zipfile ()
-
-
-ARCHIVE_FORMATS = {
- 'gztar': (make_tarball, [('compress', 'gzip')], "gzip'ed tar-file"),
- 'bztar': (make_tarball, [('compress', 'bzip2')], "bzip2'ed tar-file"),
- 'ztar': (make_tarball, [('compress', 'compress')], "compressed tar file"),
- 'tar': (make_tarball, [('compress', None)], "uncompressed tar file"),
- 'zip': (make_zipfile, [],"ZIP file")
- }
-
-def check_archive_formats (formats):
- for format in formats:
- if not ARCHIVE_FORMATS.has_key(format):
- return format
- else:
- return None
-
-def make_archive (base_name, format,
- root_dir=None, base_dir=None,
- verbose=0, dry_run=0):
- """Create an archive file (eg. zip or tar). 'base_name' is the name
- of the file to create, minus any format-specific extension; 'format'
- is the archive format: one of "zip", "tar", "ztar", or "gztar".
- 'root_dir' is a directory that will be the root directory of the
- archive; ie. we typically chdir into 'root_dir' before creating the
- archive. 'base_dir' is the directory where we start archiving from;
- ie. 'base_dir' will be the common prefix of all files and
- directories in the archive. 'root_dir' and 'base_dir' both default
- to the current directory. Returns the name of the archive file.
- """
- save_cwd = os.getcwd()
- if root_dir is not None:
- log.debug("changing into '%s'", root_dir)
- base_name = os.path.abspath(base_name)
- if not dry_run:
- os.chdir(root_dir)
-
- if base_dir is None:
- base_dir = os.curdir
-
- kwargs = { 'dry_run': dry_run }
-
- try:
- format_info = ARCHIVE_FORMATS[format]
- except KeyError:
- raise ValueError, "unknown archive format '%s'" % format
-
- func = format_info[0]
- for (arg,val) in format_info[1]:
- kwargs[arg] = val
- filename = apply(func, (base_name, base_dir), kwargs)
-
- if root_dir is not None:
- log.debug("changing back to '%s'", save_cwd)
- os.chdir(save_cwd)
-
- return filename
-
-# make_archive ()
diff --git a/sys/lib/python/distutils/bcppcompiler.py b/sys/lib/python/distutils/bcppcompiler.py
deleted file mode 100644
index 5e7e0b4db..000000000
--- a/sys/lib/python/distutils/bcppcompiler.py
+++ /dev/null
@@ -1,398 +0,0 @@
-"""distutils.bcppcompiler
-
-Contains BorlandCCompiler, an implementation of the abstract CCompiler class
-for the Borland C++ compiler.
-"""
-
-# This implementation by Lyle Johnson, based on the original msvccompiler.py
-# module and using the directions originally published by Gordon Williams.
-
-# XXX looks like there's a LOT of overlap between these two classes:
-# someone should sit down and factor out the common code as
-# WindowsCCompiler! --GPW
-
-# This module should be kept compatible with Python 2.1.
-
-__revision__ = "$Id: bcppcompiler.py 37828 2004-11-10 22:23:15Z loewis $"
-
-
-import sys, os
-from distutils.errors import \
- DistutilsExecError, DistutilsPlatformError, \
- CompileError, LibError, LinkError, UnknownFileError
-from distutils.ccompiler import \
- CCompiler, gen_preprocess_options, gen_lib_options
-from distutils.file_util import write_file
-from distutils.dep_util import newer
-from distutils import log
-
-class BCPPCompiler(CCompiler) :
- """Concrete class that implements an interface to the Borland C/C++
- compiler, as defined by the CCompiler abstract class.
- """
-
- compiler_type = 'bcpp'
-
- # Just set this so CCompiler's constructor doesn't barf. We currently
- # don't use the 'set_executables()' bureaucracy provided by CCompiler,
- # as it really isn't necessary for this sort of single-compiler class.
- # Would be nice to have a consistent interface with UnixCCompiler,
- # though, so it's worth thinking about.
- executables = {}
-
- # Private class data (need to distinguish C from C++ source for compiler)
- _c_extensions = ['.c']
- _cpp_extensions = ['.cc', '.cpp', '.cxx']
-
- # Needed for the filename generation methods provided by the
- # base class, CCompiler.
- src_extensions = _c_extensions + _cpp_extensions
- obj_extension = '.obj'
- static_lib_extension = '.lib'
- shared_lib_extension = '.dll'
- static_lib_format = shared_lib_format = '%s%s'
- exe_extension = '.exe'
-
-
- def __init__ (self,
- verbose=0,
- dry_run=0,
- force=0):
-
- CCompiler.__init__ (self, verbose, dry_run, force)
-
- # These executables are assumed to all be in the path.
- # Borland doesn't seem to use any special registry settings to
- # indicate their installation locations.
-
- self.cc = "bcc32.exe"
- self.linker = "ilink32.exe"
- self.lib = "tlib.exe"
-
- self.preprocess_options = None
- self.compile_options = ['/tWM', '/O2', '/q', '/g0']
- self.compile_options_debug = ['/tWM', '/Od', '/q', '/g0']
-
- self.ldflags_shared = ['/Tpd', '/Gn', '/q', '/x']
- self.ldflags_shared_debug = ['/Tpd', '/Gn', '/q', '/x']
- self.ldflags_static = []
- self.ldflags_exe = ['/Gn', '/q', '/x']
- self.ldflags_exe_debug = ['/Gn', '/q', '/x','/r']
-
-
- # -- Worker methods ------------------------------------------------
-
- def compile(self, sources,
- output_dir=None, macros=None, include_dirs=None, debug=0,
- extra_preargs=None, extra_postargs=None, depends=None):
-
- macros, objects, extra_postargs, pp_opts, build = \
- self._setup_compile(output_dir, macros, include_dirs, sources,
- depends, extra_postargs)
- compile_opts = extra_preargs or []
- compile_opts.append ('-c')
- if debug:
- compile_opts.extend (self.compile_options_debug)
- else:
- compile_opts.extend (self.compile_options)
-
- for obj in objects:
- try:
- src, ext = build[obj]
- except KeyError:
- continue
- # XXX why do the normpath here?
- src = os.path.normpath(src)
- obj = os.path.normpath(obj)
- # XXX _setup_compile() did a mkpath() too but before the normpath.
- # Is it possible to skip the normpath?
- self.mkpath(os.path.dirname(obj))
-
- if ext == '.res':
- # This is already a binary file -- skip it.
- continue # the 'for' loop
- if ext == '.rc':
- # This needs to be compiled to a .res file -- do it now.
- try:
- self.spawn (["brcc32", "-fo", obj, src])
- except DistutilsExecError, msg:
- raise CompileError, msg
- continue # the 'for' loop
-
- # The next two are both for the real compiler.
- if ext in self._c_extensions:
- input_opt = ""
- elif ext in self._cpp_extensions:
- input_opt = "-P"
- else:
- # Unknown file type -- no extra options. The compiler
- # will probably fail, but let it just in case this is a
- # file the compiler recognizes even if we don't.
- input_opt = ""
-
- output_opt = "-o" + obj
-
- # Compiler command line syntax is: "bcc32 [options] file(s)".
- # Note that the source file names must appear at the end of
- # the command line.
- try:
- self.spawn ([self.cc] + compile_opts + pp_opts +
- [input_opt, output_opt] +
- extra_postargs + [src])
- except DistutilsExecError, msg:
- raise CompileError, msg
-
- return objects
-
- # compile ()
-
-
- def create_static_lib (self,
- objects,
- output_libname,
- output_dir=None,
- debug=0,
- target_lang=None):
-
- (objects, output_dir) = self._fix_object_args (objects, output_dir)
- output_filename = \
- self.library_filename (output_libname, output_dir=output_dir)
-
- if self._need_link (objects, output_filename):
- lib_args = [output_filename, '/u'] + objects
- if debug:
- pass # XXX what goes here?
- try:
- self.spawn ([self.lib] + lib_args)
- except DistutilsExecError, msg:
- raise LibError, msg
- else:
- log.debug("skipping %s (up-to-date)", output_filename)
-
- # create_static_lib ()
-
-
- def link (self,
- target_desc,
- objects,
- output_filename,
- output_dir=None,
- libraries=None,
- library_dirs=None,
- runtime_library_dirs=None,
- export_symbols=None,
- debug=0,
- extra_preargs=None,
- extra_postargs=None,
- build_temp=None,
- target_lang=None):
-
- # XXX this ignores 'build_temp'! should follow the lead of
- # msvccompiler.py
-
- (objects, output_dir) = self._fix_object_args (objects, output_dir)
- (libraries, library_dirs, runtime_library_dirs) = \
- self._fix_lib_args (libraries, library_dirs, runtime_library_dirs)
-
- if runtime_library_dirs:
- log.warn("I don't know what to do with 'runtime_library_dirs': %s",
- str(runtime_library_dirs))
-
- if output_dir is not None:
- output_filename = os.path.join (output_dir, output_filename)
-
- if self._need_link (objects, output_filename):
-
- # Figure out linker args based on type of target.
- if target_desc == CCompiler.EXECUTABLE:
- startup_obj = 'c0w32'
- if debug:
- ld_args = self.ldflags_exe_debug[:]
- else:
- ld_args = self.ldflags_exe[:]
- else:
- startup_obj = 'c0d32'
- if debug:
- ld_args = self.ldflags_shared_debug[:]
- else:
- ld_args = self.ldflags_shared[:]
-
-
- # Create a temporary exports file for use by the linker
- if export_symbols is None:
- def_file = ''
- else:
- head, tail = os.path.split (output_filename)
- modname, ext = os.path.splitext (tail)
- temp_dir = os.path.dirname(objects[0]) # preserve tree structure
- def_file = os.path.join (temp_dir, '%s.def' % modname)
- contents = ['EXPORTS']
- for sym in (export_symbols or []):
- contents.append(' %s=_%s' % (sym, sym))
- self.execute(write_file, (def_file, contents),
- "writing %s" % def_file)
-
- # Borland C++ has problems with '/' in paths
- objects2 = map(os.path.normpath, objects)
- # split objects in .obj and .res files
- # Borland C++ needs them at different positions in the command line
- objects = [startup_obj]
- resources = []
- for file in objects2:
- (base, ext) = os.path.splitext(os.path.normcase(file))
- if ext == '.res':
- resources.append(file)
- else:
- objects.append(file)
-
-
- for l in library_dirs:
- ld_args.append("/L%s" % os.path.normpath(l))
- ld_args.append("/L.") # we sometimes use relative paths
-
- # list of object files
- ld_args.extend(objects)
-
- # XXX the command-line syntax for Borland C++ is a bit wonky;
- # certain filenames are jammed together in one big string, but
- # comma-delimited. This doesn't mesh too well with the
- # Unix-centric attitude (with a DOS/Windows quoting hack) of
- # 'spawn()', so constructing the argument list is a bit
- # awkward. Note that doing the obvious thing and jamming all
- # the filenames and commas into one argument would be wrong,
- # because 'spawn()' would quote any filenames with spaces in
- # them. Arghghh!. Apparently it works fine as coded...
-
- # name of dll/exe file
- ld_args.extend([',',output_filename])
- # no map file and start libraries
- ld_args.append(',,')
-
- for lib in libraries:
- # see if we find it and if there is a bcpp specific lib
- # (xxx_bcpp.lib)
- libfile = self.find_library_file(library_dirs, lib, debug)
- if libfile is None:
- ld_args.append(lib)
- # probably a BCPP internal library -- don't warn
- else:
- # full name which prefers bcpp_xxx.lib over xxx.lib
- ld_args.append(libfile)
-
- # some default libraries
- ld_args.append ('import32')
- ld_args.append ('cw32mt')
-
- # def file for export symbols
- ld_args.extend([',',def_file])
- # add resource files
- ld_args.append(',')
- ld_args.extend(resources)
-
-
- if extra_preargs:
- ld_args[:0] = extra_preargs
- if extra_postargs:
- ld_args.extend(extra_postargs)
-
- self.mkpath (os.path.dirname (output_filename))
- try:
- self.spawn ([self.linker] + ld_args)
- except DistutilsExecError, msg:
- raise LinkError, msg
-
- else:
- log.debug("skipping %s (up-to-date)", output_filename)
-
- # link ()
-
- # -- Miscellaneous methods -----------------------------------------
-
-
- def find_library_file (self, dirs, lib, debug=0):
- # List of effective library names to try, in order of preference:
- # xxx_bcpp.lib is better than xxx.lib
- # and xxx_d.lib is better than xxx.lib if debug is set
- #
- # The "_bcpp" suffix is to handle a Python installation for people
- # with multiple compilers (primarily Distutils hackers, I suspect
- # ;-). The idea is they'd have one static library for each
- # compiler they care about, since (almost?) every Windows compiler
- # seems to have a different format for static libraries.
- if debug:
- dlib = (lib + "_d")
- try_names = (dlib + "_bcpp", lib + "_bcpp", dlib, lib)
- else:
- try_names = (lib + "_bcpp", lib)
-
- for dir in dirs:
- for name in try_names:
- libfile = os.path.join(dir, self.library_filename(name))
- if os.path.exists(libfile):
- return libfile
- else:
- # Oops, didn't find it in *any* of 'dirs'
- return None
-
- # overwrite the one from CCompiler to support rc and res-files
- def object_filenames (self,
- source_filenames,
- strip_dir=0,
- output_dir=''):
- if output_dir is None: output_dir = ''
- obj_names = []
- for src_name in source_filenames:
- # use normcase to make sure '.rc' is really '.rc' and not '.RC'
- (base, ext) = os.path.splitext (os.path.normcase(src_name))
- if ext not in (self.src_extensions + ['.rc','.res']):
- raise UnknownFileError, \
- "unknown file type '%s' (from '%s')" % \
- (ext, src_name)
- if strip_dir:
- base = os.path.basename (base)
- if ext == '.res':
- # these can go unchanged
- obj_names.append (os.path.join (output_dir, base + ext))
- elif ext == '.rc':
- # these need to be compiled to .res-files
- obj_names.append (os.path.join (output_dir, base + '.res'))
- else:
- obj_names.append (os.path.join (output_dir,
- base + self.obj_extension))
- return obj_names
-
- # object_filenames ()
-
- def preprocess (self,
- source,
- output_file=None,
- macros=None,
- include_dirs=None,
- extra_preargs=None,
- extra_postargs=None):
-
- (_, macros, include_dirs) = \
- self._fix_compile_args(None, macros, include_dirs)
- pp_opts = gen_preprocess_options(macros, include_dirs)
- pp_args = ['cpp32.exe'] + pp_opts
- if output_file is not None:
- pp_args.append('-o' + output_file)
- if extra_preargs:
- pp_args[:0] = extra_preargs
- if extra_postargs:
- pp_args.extend(extra_postargs)
- pp_args.append(source)
-
- # We need to preprocess: either we're being forced to, or the
- # source file is newer than the target (or the target doesn't
- # exist).
- if self.force or output_file is None or newer(source, output_file):
- if output_file:
- self.mkpath(os.path.dirname(output_file))
- try:
- self.spawn(pp_args)
- except DistutilsExecError, msg:
- print msg
- raise CompileError, msg
-
- # preprocess()
diff --git a/sys/lib/python/distutils/ccompiler.py b/sys/lib/python/distutils/ccompiler.py
deleted file mode 100644
index 23d2fbd3e..000000000
--- a/sys/lib/python/distutils/ccompiler.py
+++ /dev/null
@@ -1,1268 +0,0 @@
-"""distutils.ccompiler
-
-Contains CCompiler, an abstract base class that defines the interface
-for the Distutils compiler abstraction model."""
-
-# This module should be kept compatible with Python 2.1.
-
-__revision__ = "$Id: ccompiler.py 46331 2006-05-26 14:07:23Z bob.ippolito $"
-
-import sys, os, re
-from types import *
-from copy import copy
-from distutils.errors import *
-from distutils.spawn import spawn
-from distutils.file_util import move_file
-from distutils.dir_util import mkpath
-from distutils.dep_util import newer_pairwise, newer_group
-from distutils.util import split_quoted, execute
-from distutils import log
-
-class CCompiler:
- """Abstract base class to define the interface that must be implemented
- by real compiler classes. Also has some utility methods used by
- several compiler classes.
-
- The basic idea behind a compiler abstraction class is that each
- instance can be used for all the compile/link steps in building a
- single project. Thus, attributes common to all of those compile and
- link steps -- include directories, macros to define, libraries to link
- against, etc. -- are attributes of the compiler instance. To allow for
- variability in how individual files are treated, most of those
- attributes may be varied on a per-compilation or per-link basis.
- """
-
- # 'compiler_type' is a class attribute that identifies this class. It
- # keeps code that wants to know what kind of compiler it's dealing with
- # from having to import all possible compiler classes just to do an
- # 'isinstance'. In concrete CCompiler subclasses, 'compiler_type'
- # should really, really be one of the keys of the 'compiler_class'
- # dictionary (see below -- used by the 'new_compiler()' factory
- # function) -- authors of new compiler interface classes are
- # responsible for updating 'compiler_class'!
- compiler_type = None
-
- # XXX things not handled by this compiler abstraction model:
- # * client can't provide additional options for a compiler,
- # e.g. warning, optimization, debugging flags. Perhaps this
- # should be the domain of concrete compiler abstraction classes
- # (UnixCCompiler, MSVCCompiler, etc.) -- or perhaps the base
- # class should have methods for the common ones.
- # * can't completely override the include or library searchg
- # path, ie. no "cc -I -Idir1 -Idir2" or "cc -L -Ldir1 -Ldir2".
- # I'm not sure how widely supported this is even by Unix
- # compilers, much less on other platforms. And I'm even less
- # sure how useful it is; maybe for cross-compiling, but
- # support for that is a ways off. (And anyways, cross
- # compilers probably have a dedicated binary with the
- # right paths compiled in. I hope.)
- # * can't do really freaky things with the library list/library
- # dirs, e.g. "-Ldir1 -lfoo -Ldir2 -lfoo" to link against
- # different versions of libfoo.a in different locations. I
- # think this is useless without the ability to null out the
- # library search path anyways.
-
-
- # Subclasses that rely on the standard filename generation methods
- # implemented below should override these; see the comment near
- # those methods ('object_filenames()' et. al.) for details:
- src_extensions = None # list of strings
- obj_extension = None # string
- static_lib_extension = None
- shared_lib_extension = None # string
- static_lib_format = None # format string
- shared_lib_format = None # prob. same as static_lib_format
- exe_extension = None # string
-
- # Default language settings. language_map is used to detect a source
- # file or Extension target language, checking source filenames.
- # language_order is used to detect the language precedence, when deciding
- # what language to use when mixing source types. For example, if some
- # extension has two files with ".c" extension, and one with ".cpp", it
- # is still linked as c++.
- language_map = {".c" : "c",
- ".cc" : "c++",
- ".cpp" : "c++",
- ".cxx" : "c++",
- ".m" : "objc",
- }
- language_order = ["c++", "objc", "c"]
-
- def __init__ (self,
- verbose=0,
- dry_run=0,
- force=0):
-
- self.dry_run = dry_run
- self.force = force
- self.verbose = verbose
-
- # 'output_dir': a common output directory for object, library,
- # shared object, and shared library files
- self.output_dir = None
-
- # 'macros': a list of macro definitions (or undefinitions). A
- # macro definition is a 2-tuple (name, value), where the value is
- # either a string or None (no explicit value). A macro
- # undefinition is a 1-tuple (name,).
- self.macros = []
-
- # 'include_dirs': a list of directories to search for include files
- self.include_dirs = []
-
- # 'libraries': a list of libraries to include in any link
- # (library names, not filenames: eg. "foo" not "libfoo.a")
- self.libraries = []
-
- # 'library_dirs': a list of directories to search for libraries
- self.library_dirs = []
-
- # 'runtime_library_dirs': a list of directories to search for
- # shared libraries/objects at runtime
- self.runtime_library_dirs = []
-
- # 'objects': a list of object files (or similar, such as explicitly
- # named library files) to include on any link
- self.objects = []
-
- for key in self.executables.keys():
- self.set_executable(key, self.executables[key])
-
- # __init__ ()
-
-
- def set_executables (self, **args):
-
- """Define the executables (and options for them) that will be run
- to perform the various stages of compilation. The exact set of
- executables that may be specified here depends on the compiler
- class (via the 'executables' class attribute), but most will have:
- compiler the C/C++ compiler
- linker_so linker used to create shared objects and libraries
- linker_exe linker used to create binary executables
- archiver static library creator
-
- On platforms with a command-line (Unix, DOS/Windows), each of these
- is a string that will be split into executable name and (optional)
- list of arguments. (Splitting the string is done similarly to how
- Unix shells operate: words are delimited by spaces, but quotes and
- backslashes can override this. See
- 'distutils.util.split_quoted()'.)
- """
-
- # Note that some CCompiler implementation classes will define class
- # attributes 'cpp', 'cc', etc. with hard-coded executable names;
- # this is appropriate when a compiler class is for exactly one
- # compiler/OS combination (eg. MSVCCompiler). Other compiler
- # classes (UnixCCompiler, in particular) are driven by information
- # discovered at run-time, since there are many different ways to do
- # basically the same things with Unix C compilers.
-
- for key in args.keys():
- if not self.executables.has_key(key):
- raise ValueError, \
- "unknown executable '%s' for class %s" % \
- (key, self.__class__.__name__)
- self.set_executable(key, args[key])
-
- # set_executables ()
-
- def set_executable(self, key, value):
- if type(value) is StringType:
- setattr(self, key, split_quoted(value))
- else:
- setattr(self, key, value)
-
-
- def _find_macro (self, name):
- i = 0
- for defn in self.macros:
- if defn[0] == name:
- return i
- i = i + 1
-
- return None
-
-
- def _check_macro_definitions (self, definitions):
- """Ensures that every element of 'definitions' is a valid macro
- definition, ie. either (name,value) 2-tuple or a (name,) tuple. Do
- nothing if all definitions are OK, raise TypeError otherwise.
- """
- for defn in definitions:
- if not (type (defn) is TupleType and
- (len (defn) == 1 or
- (len (defn) == 2 and
- (type (defn[1]) is StringType or defn[1] is None))) and
- type (defn[0]) is StringType):
- raise TypeError, \
- ("invalid macro definition '%s': " % defn) + \
- "must be tuple (string,), (string, string), or " + \
- "(string, None)"
-
-
- # -- Bookkeeping methods -------------------------------------------
-
- def define_macro (self, name, value=None):
- """Define a preprocessor macro for all compilations driven by this
- compiler object. The optional parameter 'value' should be a
- string; if it is not supplied, then the macro will be defined
- without an explicit value and the exact outcome depends on the
- compiler used (XXX true? does ANSI say anything about this?)
- """
- # Delete from the list of macro definitions/undefinitions if
- # already there (so that this one will take precedence).
- i = self._find_macro (name)
- if i is not None:
- del self.macros[i]
-
- defn = (name, value)
- self.macros.append (defn)
-
-
- def undefine_macro (self, name):
- """Undefine a preprocessor macro for all compilations driven by
- this compiler object. If the same macro is defined by
- 'define_macro()' and undefined by 'undefine_macro()' the last call
- takes precedence (including multiple redefinitions or
- undefinitions). If the macro is redefined/undefined on a
- per-compilation basis (ie. in the call to 'compile()'), then that
- takes precedence.
- """
- # Delete from the list of macro definitions/undefinitions if
- # already there (so that this one will take precedence).
- i = self._find_macro (name)
- if i is not None:
- del self.macros[i]
-
- undefn = (name,)
- self.macros.append (undefn)
-
-
- def add_include_dir (self, dir):
- """Add 'dir' to the list of directories that will be searched for
- header files. The compiler is instructed to search directories in
- the order in which they are supplied by successive calls to
- 'add_include_dir()'.
- """
- self.include_dirs.append (dir)
-
- def set_include_dirs (self, dirs):
- """Set the list of directories that will be searched to 'dirs' (a
- list of strings). Overrides any preceding calls to
- 'add_include_dir()'; subsequence calls to 'add_include_dir()' add
- to the list passed to 'set_include_dirs()'. This does not affect
- any list of standard include directories that the compiler may
- search by default.
- """
- self.include_dirs = copy (dirs)
-
-
- def add_library (self, libname):
- """Add 'libname' to the list of libraries that will be included in
- all links driven by this compiler object. Note that 'libname'
- should *not* be the name of a file containing a library, but the
- name of the library itself: the actual filename will be inferred by
- the linker, the compiler, or the compiler class (depending on the
- platform).
-
- The linker will be instructed to link against libraries in the
- order they were supplied to 'add_library()' and/or
- 'set_libraries()'. It is perfectly valid to duplicate library
- names; the linker will be instructed to link against libraries as
- many times as they are mentioned.
- """
- self.libraries.append (libname)
-
- def set_libraries (self, libnames):
- """Set the list of libraries to be included in all links driven by
- this compiler object to 'libnames' (a list of strings). This does
- not affect any standard system libraries that the linker may
- include by default.
- """
- self.libraries = copy (libnames)
-
-
- def add_library_dir (self, dir):
- """Add 'dir' to the list of directories that will be searched for
- libraries specified to 'add_library()' and 'set_libraries()'. The
- linker will be instructed to search for libraries in the order they
- are supplied to 'add_library_dir()' and/or 'set_library_dirs()'.
- """
- self.library_dirs.append (dir)
-
- def set_library_dirs (self, dirs):
- """Set the list of library search directories to 'dirs' (a list of
- strings). This does not affect any standard library search path
- that the linker may search by default.
- """
- self.library_dirs = copy (dirs)
-
-
- def add_runtime_library_dir (self, dir):
- """Add 'dir' to the list of directories that will be searched for
- shared libraries at runtime.
- """
- self.runtime_library_dirs.append (dir)
-
- def set_runtime_library_dirs (self, dirs):
- """Set the list of directories to search for shared libraries at
- runtime to 'dirs' (a list of strings). This does not affect any
- standard search path that the runtime linker may search by
- default.
- """
- self.runtime_library_dirs = copy (dirs)
-
-
- def add_link_object (self, object):
- """Add 'object' to the list of object files (or analogues, such as
- explicitly named library files or the output of "resource
- compilers") to be included in every link driven by this compiler
- object.
- """
- self.objects.append (object)
-
- def set_link_objects (self, objects):
- """Set the list of object files (or analogues) to be included in
- every link to 'objects'. This does not affect any standard object
- files that the linker may include by default (such as system
- libraries).
- """
- self.objects = copy (objects)
-
-
- # -- Private utility methods --------------------------------------
- # (here for the convenience of subclasses)
-
- # Helper method to prep compiler in subclass compile() methods
-
- def _setup_compile(self, outdir, macros, incdirs, sources, depends,
- extra):
- """Process arguments and decide which source files to compile.
-
- Merges _fix_compile_args() and _prep_compile().
- """
- if outdir is None:
- outdir = self.output_dir
- elif type(outdir) is not StringType:
- raise TypeError, "'output_dir' must be a string or None"
-
- if macros is None:
- macros = self.macros
- elif type(macros) is ListType:
- macros = macros + (self.macros or [])
- else:
- raise TypeError, "'macros' (if supplied) must be a list of tuples"
-
- if incdirs is None:
- incdirs = self.include_dirs
- elif type(incdirs) in (ListType, TupleType):
- incdirs = list(incdirs) + (self.include_dirs or [])
- else:
- raise TypeError, \
- "'include_dirs' (if supplied) must be a list of strings"
-
- if extra is None:
- extra = []
-
- # Get the list of expected output (object) files
- objects = self.object_filenames(sources,
- strip_dir=0,
- output_dir=outdir)
- assert len(objects) == len(sources)
-
- # XXX should redo this code to eliminate skip_source entirely.
- # XXX instead create build and issue skip messages inline
-
- if self.force:
- skip_source = {} # rebuild everything
- for source in sources:
- skip_source[source] = 0
- elif depends is None:
- # If depends is None, figure out which source files we
- # have to recompile according to a simplistic check. We
- # just compare the source and object file, no deep
- # dependency checking involving header files.
- skip_source = {} # rebuild everything
- for source in sources: # no wait, rebuild nothing
- skip_source[source] = 1
-
- n_sources, n_objects = newer_pairwise(sources, objects)
- for source in n_sources: # no really, only rebuild what's
- skip_source[source] = 0 # out-of-date
- else:
- # If depends is a list of files, then do a different
- # simplistic check. Assume that each object depends on
- # its source and all files in the depends list.
- skip_source = {}
- # L contains all the depends plus a spot at the end for a
- # particular source file
- L = depends[:] + [None]
- for i in range(len(objects)):
- source = sources[i]
- L[-1] = source
- if newer_group(L, objects[i]):
- skip_source[source] = 0
- else:
- skip_source[source] = 1
-
- pp_opts = gen_preprocess_options(macros, incdirs)
-
- build = {}
- for i in range(len(sources)):
- src = sources[i]
- obj = objects[i]
- ext = os.path.splitext(src)[1]
- self.mkpath(os.path.dirname(obj))
- if skip_source[src]:
- log.debug("skipping %s (%s up-to-date)", src, obj)
- else:
- build[obj] = src, ext
-
- return macros, objects, extra, pp_opts, build
-
- def _get_cc_args(self, pp_opts, debug, before):
- # works for unixccompiler, emxccompiler, cygwinccompiler
- cc_args = pp_opts + ['-c']
- if debug:
- cc_args[:0] = ['-g']
- if before:
- cc_args[:0] = before
- return cc_args
-
- def _fix_compile_args (self, output_dir, macros, include_dirs):
- """Typecheck and fix-up some of the arguments to the 'compile()'
- method, and return fixed-up values. Specifically: if 'output_dir'
- is None, replaces it with 'self.output_dir'; ensures that 'macros'
- is a list, and augments it with 'self.macros'; ensures that
- 'include_dirs' is a list, and augments it with 'self.include_dirs'.
- Guarantees that the returned values are of the correct type,
- i.e. for 'output_dir' either string or None, and for 'macros' and
- 'include_dirs' either list or None.
- """
- if output_dir is None:
- output_dir = self.output_dir
- elif type (output_dir) is not StringType:
- raise TypeError, "'output_dir' must be a string or None"
-
- if macros is None:
- macros = self.macros
- elif type (macros) is ListType:
- macros = macros + (self.macros or [])
- else:
- raise TypeError, "'macros' (if supplied) must be a list of tuples"
-
- if include_dirs is None:
- include_dirs = self.include_dirs
- elif type (include_dirs) in (ListType, TupleType):
- include_dirs = list (include_dirs) + (self.include_dirs or [])
- else:
- raise TypeError, \
- "'include_dirs' (if supplied) must be a list of strings"
-
- return output_dir, macros, include_dirs
-
- # _fix_compile_args ()
-
-
- def _prep_compile(self, sources, output_dir, depends=None):
- """Decide which souce files must be recompiled.
-
- Determine the list of object files corresponding to 'sources',
- and figure out which ones really need to be recompiled.
- Return a list of all object files and a dictionary telling
- which source files can be skipped.
- """
- # Get the list of expected output (object) files
- objects = self.object_filenames(sources, output_dir=output_dir)
- assert len(objects) == len(sources)
-
- if self.force:
- skip_source = {} # rebuild everything
- for source in sources:
- skip_source[source] = 0
- elif depends is None:
- # If depends is None, figure out which source files we
- # have to recompile according to a simplistic check. We
- # just compare the source and object file, no deep
- # dependency checking involving header files.
- skip_source = {} # rebuild everything
- for source in sources: # no wait, rebuild nothing
- skip_source[source] = 1
-
- n_sources, n_objects = newer_pairwise(sources, objects)
- for source in n_sources: # no really, only rebuild what's
- skip_source[source] = 0 # out-of-date
- else:
- # If depends is a list of files, then do a different
- # simplistic check. Assume that each object depends on
- # its source and all files in the depends list.
- skip_source = {}
- # L contains all the depends plus a spot at the end for a
- # particular source file
- L = depends[:] + [None]
- for i in range(len(objects)):
- source = sources[i]
- L[-1] = source
- if newer_group(L, objects[i]):
- skip_source[source] = 0
- else:
- skip_source[source] = 1
-
- return objects, skip_source
-
- # _prep_compile ()
-
-
- def _fix_object_args (self, objects, output_dir):
- """Typecheck and fix up some arguments supplied to various methods.
- Specifically: ensure that 'objects' is a list; if output_dir is
- None, replace with self.output_dir. Return fixed versions of
- 'objects' and 'output_dir'.
- """
- if type (objects) not in (ListType, TupleType):
- raise TypeError, \
- "'objects' must be a list or tuple of strings"
- objects = list (objects)
-
- if output_dir is None:
- output_dir = self.output_dir
- elif type (output_dir) is not StringType:
- raise TypeError, "'output_dir' must be a string or None"
-
- return (objects, output_dir)
-
-
- def _fix_lib_args (self, libraries, library_dirs, runtime_library_dirs):
- """Typecheck and fix up some of the arguments supplied to the
- 'link_*' methods. Specifically: ensure that all arguments are
- lists, and augment them with their permanent versions
- (eg. 'self.libraries' augments 'libraries'). Return a tuple with
- fixed versions of all arguments.
- """
- if libraries is None:
- libraries = self.libraries
- elif type (libraries) in (ListType, TupleType):
- libraries = list (libraries) + (self.libraries or [])
- else:
- raise TypeError, \
- "'libraries' (if supplied) must be a list of strings"
-
- if library_dirs is None:
- library_dirs = self.library_dirs
- elif type (library_dirs) in (ListType, TupleType):
- library_dirs = list (library_dirs) + (self.library_dirs or [])
- else:
- raise TypeError, \
- "'library_dirs' (if supplied) must be a list of strings"
-
- if runtime_library_dirs is None:
- runtime_library_dirs = self.runtime_library_dirs
- elif type (runtime_library_dirs) in (ListType, TupleType):
- runtime_library_dirs = (list (runtime_library_dirs) +
- (self.runtime_library_dirs or []))
- else:
- raise TypeError, \
- "'runtime_library_dirs' (if supplied) " + \
- "must be a list of strings"
-
- return (libraries, library_dirs, runtime_library_dirs)
-
- # _fix_lib_args ()
-
-
- def _need_link (self, objects, output_file):
- """Return true if we need to relink the files listed in 'objects'
- to recreate 'output_file'.
- """
- if self.force:
- return 1
- else:
- if self.dry_run:
- newer = newer_group (objects, output_file, missing='newer')
- else:
- newer = newer_group (objects, output_file)
- return newer
-
- # _need_link ()
-
- def detect_language (self, sources):
- """Detect the language of a given file, or list of files. Uses
- language_map, and language_order to do the job.
- """
- if type(sources) is not ListType:
- sources = [sources]
- lang = None
- index = len(self.language_order)
- for source in sources:
- base, ext = os.path.splitext(source)
- extlang = self.language_map.get(ext)
- try:
- extindex = self.language_order.index(extlang)
- if extindex < index:
- lang = extlang
- index = extindex
- except ValueError:
- pass
- return lang
-
- # detect_language ()
-
- # -- Worker methods ------------------------------------------------
- # (must be implemented by subclasses)
-
- def preprocess (self,
- source,
- output_file=None,
- macros=None,
- include_dirs=None,
- extra_preargs=None,
- extra_postargs=None):
- """Preprocess a single C/C++ source file, named in 'source'.
- Output will be written to file named 'output_file', or stdout if
- 'output_file' not supplied. 'macros' is a list of macro
- definitions as for 'compile()', which will augment the macros set
- with 'define_macro()' and 'undefine_macro()'. 'include_dirs' is a
- list of directory names that will be added to the default list.
-
- Raises PreprocessError on failure.
- """
- pass
-
- def compile(self, sources, output_dir=None, macros=None,
- include_dirs=None, debug=0, extra_preargs=None,
- extra_postargs=None, depends=None):
- """Compile one or more source files.
-
- 'sources' must be a list of filenames, most likely C/C++
- files, but in reality anything that can be handled by a
- particular compiler and compiler class (eg. MSVCCompiler can
- handle resource files in 'sources'). Return a list of object
- filenames, one per source filename in 'sources'. Depending on
- the implementation, not all source files will necessarily be
- compiled, but all corresponding object filenames will be
- returned.
-
- If 'output_dir' is given, object files will be put under it, while
- retaining their original path component. That is, "foo/bar.c"
- normally compiles to "foo/bar.o" (for a Unix implementation); if
- 'output_dir' is "build", then it would compile to
- "build/foo/bar.o".
-
- 'macros', if given, must be a list of macro definitions. A macro
- definition is either a (name, value) 2-tuple or a (name,) 1-tuple.
- The former defines a macro; if the value is None, the macro is
- defined without an explicit value. The 1-tuple case undefines a
- macro. Later definitions/redefinitions/ undefinitions take
- precedence.
-
- 'include_dirs', if given, must be a list of strings, the
- directories to add to the default include file search path for this
- compilation only.
-
- 'debug' is a boolean; if true, the compiler will be instructed to
- output debug symbols in (or alongside) the object file(s).
-
- 'extra_preargs' and 'extra_postargs' are implementation- dependent.
- On platforms that have the notion of a command-line (e.g. Unix,
- DOS/Windows), they are most likely lists of strings: extra
- command-line arguments to prepand/append to the compiler command
- line. On other platforms, consult the implementation class
- documentation. In any event, they are intended as an escape hatch
- for those occasions when the abstract compiler framework doesn't
- cut the mustard.
-
- 'depends', if given, is a list of filenames that all targets
- depend on. If a source file is older than any file in
- depends, then the source file will be recompiled. This
- supports dependency tracking, but only at a coarse
- granularity.
-
- Raises CompileError on failure.
- """
-
- # A concrete compiler class can either override this method
- # entirely or implement _compile().
-
- macros, objects, extra_postargs, pp_opts, build = \
- self._setup_compile(output_dir, macros, include_dirs, sources,
- depends, extra_postargs)
- cc_args = self._get_cc_args(pp_opts, debug, extra_preargs)
-
- for obj in objects:
- try:
- src, ext = build[obj]
- except KeyError:
- continue
- self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts)
-
- # Return *all* object filenames, not just the ones we just built.
- return objects
-
- def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):
- """Compile 'src' to product 'obj'."""
-
- # A concrete compiler class that does not override compile()
- # should implement _compile().
- pass
-
- def create_static_lib (self,
- objects,
- output_libname,
- output_dir=None,
- debug=0,
- target_lang=None):
- """Link a bunch of stuff together to create a static library file.
- The "bunch of stuff" consists of the list of object files supplied
- as 'objects', the extra object files supplied to
- 'add_link_object()' and/or 'set_link_objects()', the libraries
- supplied to 'add_library()' and/or 'set_libraries()', and the
- libraries supplied as 'libraries' (if any).
-
- 'output_libname' should be a library name, not a filename; the
- filename will be inferred from the library name. 'output_dir' is
- the directory where the library file will be put.
-
- 'debug' is a boolean; if true, debugging information will be
- included in the library (note that on most platforms, it is the
- compile step where this matters: the 'debug' flag is included here
- just for consistency).
-
- 'target_lang' is the target language for which the given objects
- are being compiled. This allows specific linkage time treatment of
- certain languages.
-
- Raises LibError on failure.
- """
- pass
-
-
- # values for target_desc parameter in link()
- SHARED_OBJECT = "shared_object"
- SHARED_LIBRARY = "shared_library"
- EXECUTABLE = "executable"
-
- def link (self,
- target_desc,
- objects,
- output_filename,
- output_dir=None,
- libraries=None,
- library_dirs=None,
- runtime_library_dirs=None,
- export_symbols=None,
- debug=0,
- extra_preargs=None,
- extra_postargs=None,
- build_temp=None,
- target_lang=None):
- """Link a bunch of stuff together to create an executable or
- shared library file.
-
- The "bunch of stuff" consists of the list of object files supplied
- as 'objects'. 'output_filename' should be a filename. If
- 'output_dir' is supplied, 'output_filename' is relative to it
- (i.e. 'output_filename' can provide directory components if
- needed).
-
- 'libraries' is a list of libraries to link against. These are
- library names, not filenames, since they're translated into
- filenames in a platform-specific way (eg. "foo" becomes "libfoo.a"
- on Unix and "foo.lib" on DOS/Windows). However, they can include a
- directory component, which means the linker will look in that
- specific directory rather than searching all the normal locations.
-
- 'library_dirs', if supplied, should be a list of directories to
- search for libraries that were specified as bare library names
- (ie. no directory component). These are on top of the system
- default and those supplied to 'add_library_dir()' and/or
- 'set_library_dirs()'. 'runtime_library_dirs' is a list of
- directories that will be embedded into the shared library and used
- to search for other shared libraries that *it* depends on at
- run-time. (This may only be relevant on Unix.)
-
- 'export_symbols' is a list of symbols that the shared library will
- export. (This appears to be relevant only on Windows.)
-
- 'debug' is as for 'compile()' and 'create_static_lib()', with the
- slight distinction that it actually matters on most platforms (as
- opposed to 'create_static_lib()', which includes a 'debug' flag
- mostly for form's sake).
-
- 'extra_preargs' and 'extra_postargs' are as for 'compile()' (except
- of course that they supply command-line arguments for the
- particular linker being used).
-
- 'target_lang' is the target language for which the given objects
- are being compiled. This allows specific linkage time treatment of
- certain languages.
-
- Raises LinkError on failure.
- """
- raise NotImplementedError
-
-
- # Old 'link_*()' methods, rewritten to use the new 'link()' method.
-
- def link_shared_lib (self,
- objects,
- output_libname,
- output_dir=None,
- libraries=None,
- library_dirs=None,
- runtime_library_dirs=None,
- export_symbols=None,
- debug=0,
- extra_preargs=None,
- extra_postargs=None,
- build_temp=None,
- target_lang=None):
- self.link(CCompiler.SHARED_LIBRARY, objects,
- self.library_filename(output_libname, lib_type='shared'),
- output_dir,
- libraries, library_dirs, runtime_library_dirs,
- export_symbols, debug,
- extra_preargs, extra_postargs, build_temp, target_lang)
-
-
- def link_shared_object (self,
- objects,
- output_filename,
- output_dir=None,
- libraries=None,
- library_dirs=None,
- runtime_library_dirs=None,
- export_symbols=None,
- debug=0,
- extra_preargs=None,
- extra_postargs=None,
- build_temp=None,
- target_lang=None):
- self.link(CCompiler.SHARED_OBJECT, objects,
- output_filename, output_dir,
- libraries, library_dirs, runtime_library_dirs,
- export_symbols, debug,
- extra_preargs, extra_postargs, build_temp, target_lang)
-
-
- def link_executable (self,
- objects,
- output_progname,
- output_dir=None,
- libraries=None,
- library_dirs=None,
- runtime_library_dirs=None,
- debug=0,
- extra_preargs=None,
- extra_postargs=None,
- target_lang=None):
- self.link(CCompiler.EXECUTABLE, objects,
- self.executable_filename(output_progname), output_dir,
- libraries, library_dirs, runtime_library_dirs, None,
- debug, extra_preargs, extra_postargs, None, target_lang)
-
-
- # -- Miscellaneous methods -----------------------------------------
- # These are all used by the 'gen_lib_options() function; there is
- # no appropriate default implementation so subclasses should
- # implement all of these.
-
- def library_dir_option (self, dir):
- """Return the compiler option to add 'dir' to the list of
- directories searched for libraries.
- """
- raise NotImplementedError
-
- def runtime_library_dir_option (self, dir):
- """Return the compiler option to add 'dir' to the list of
- directories searched for runtime libraries.
- """
- raise NotImplementedError
-
- def library_option (self, lib):
- """Return the compiler option to add 'dir' to the list of libraries
- linked into the shared library or executable.
- """
- raise NotImplementedError
-
- def has_function(self, funcname,
- includes=None,
- include_dirs=None,
- libraries=None,
- library_dirs=None):
- """Return a boolean indicating whether funcname is supported on
- the current platform. The optional arguments can be used to
- augment the compilation environment.
- """
-
- # this can't be included at module scope because it tries to
- # import math which might not be available at that point - maybe
- # the necessary logic should just be inlined?
- import tempfile
- if includes is None:
- includes = []
- if include_dirs is None:
- include_dirs = []
- if libraries is None:
- libraries = []
- if library_dirs is None:
- library_dirs = []
- fd, fname = tempfile.mkstemp(".c", funcname, text=True)
- f = os.fdopen(fd, "w")
- for incl in includes:
- f.write("""#include "%s"\n""" % incl)
- f.write("""\
-main (int argc, char **argv) {
- %s();
-}
-""" % funcname)
- f.close()
- try:
- objects = self.compile([fname], include_dirs=include_dirs)
- except CompileError:
- return False
-
- try:
- self.link_executable(objects, "a.out",
- libraries=libraries,
- library_dirs=library_dirs)
- except (LinkError, TypeError):
- return False
- return True
-
- def find_library_file (self, dirs, lib, debug=0):
- """Search the specified list of directories for a static or shared
- library file 'lib' and return the full path to that file. If
- 'debug' true, look for a debugging version (if that makes sense on
- the current platform). Return None if 'lib' wasn't found in any of
- the specified directories.
- """
- raise NotImplementedError
-
- # -- Filename generation methods -----------------------------------
-
- # The default implementation of the filename generating methods are
- # prejudiced towards the Unix/DOS/Windows view of the world:
- # * object files are named by replacing the source file extension
- # (eg. .c/.cpp -> .o/.obj)
- # * library files (shared or static) are named by plugging the
- # library name and extension into a format string, eg.
- # "lib%s.%s" % (lib_name, ".a") for Unix static libraries
- # * executables are named by appending an extension (possibly
- # empty) to the program name: eg. progname + ".exe" for
- # Windows
- #
- # To reduce redundant code, these methods expect to find
- # several attributes in the current object (presumably defined
- # as class attributes):
- # * src_extensions -
- # list of C/C++ source file extensions, eg. ['.c', '.cpp']
- # * obj_extension -
- # object file extension, eg. '.o' or '.obj'
- # * static_lib_extension -
- # extension for static library files, eg. '.a' or '.lib'
- # * shared_lib_extension -
- # extension for shared library/object files, eg. '.so', '.dll'
- # * static_lib_format -
- # format string for generating static library filenames,
- # eg. 'lib%s.%s' or '%s.%s'
- # * shared_lib_format
- # format string for generating shared library filenames
- # (probably same as static_lib_format, since the extension
- # is one of the intended parameters to the format string)
- # * exe_extension -
- # extension for executable files, eg. '' or '.exe'
-
- def object_filenames(self, source_filenames, strip_dir=0, output_dir=''):
- if output_dir is None:
- output_dir = ''
- obj_names = []
- for src_name in source_filenames:
- base, ext = os.path.splitext(src_name)
- base = os.path.splitdrive(base)[1] # Chop off the drive
- base = base[os.path.isabs(base):] # If abs, chop off leading /
- if ext not in self.src_extensions:
- raise UnknownFileError, \
- "unknown file type '%s' (from '%s')" % (ext, src_name)
- if strip_dir:
- base = os.path.basename(base)
- obj_names.append(os.path.join(output_dir,
- base + self.obj_extension))
- return obj_names
-
- def shared_object_filename(self, basename, strip_dir=0, output_dir=''):
- assert output_dir is not None
- if strip_dir:
- basename = os.path.basename (basename)
- return os.path.join(output_dir, basename + self.shared_lib_extension)
-
- def executable_filename(self, basename, strip_dir=0, output_dir=''):
- assert output_dir is not None
- if strip_dir:
- basename = os.path.basename (basename)
- return os.path.join(output_dir, basename + (self.exe_extension or ''))
-
- def library_filename(self, libname, lib_type='static', # or 'shared'
- strip_dir=0, output_dir=''):
- assert output_dir is not None
- if lib_type not in ("static", "shared", "dylib"):
- raise ValueError, "'lib_type' must be \"static\", \"shared\" or \"dylib\""
- fmt = getattr(self, lib_type + "_lib_format")
- ext = getattr(self, lib_type + "_lib_extension")
-
- dir, base = os.path.split (libname)
- filename = fmt % (base, ext)
- if strip_dir:
- dir = ''
-
- return os.path.join(output_dir, dir, filename)
-
-
- # -- Utility methods -----------------------------------------------
-
- def announce (self, msg, level=1):
- log.debug(msg)
-
- def debug_print (self, msg):
- from distutils.debug import DEBUG
- if DEBUG:
- print msg
-
- def warn (self, msg):
- sys.stderr.write ("warning: %s\n" % msg)
-
- def execute (self, func, args, msg=None, level=1):
- execute(func, args, msg, self.dry_run)
-
- def spawn (self, cmd):
- spawn (cmd, dry_run=self.dry_run)
-
- def move_file (self, src, dst):
- return move_file (src, dst, dry_run=self.dry_run)
-
- def mkpath (self, name, mode=0777):
- mkpath (name, mode, self.dry_run)
-
-
-# class CCompiler
-
-
-# Map a sys.platform/os.name ('posix', 'nt') to the default compiler
-# type for that platform. Keys are interpreted as re match
-# patterns. Order is important; platform mappings are preferred over
-# OS names.
-_default_compilers = (
-
- # Platform string mappings
-
- # on a cygwin built python we can use gcc like an ordinary UNIXish
- # compiler
- ('cygwin.*', 'unix'),
- ('os2emx', 'emx'),
-
- # OS name mappings
- ('posix', 'unix'),
- ('nt', 'msvc'),
- ('mac', 'mwerks'),
-
- )
-
-def get_default_compiler(osname=None, platform=None):
-
- """ Determine the default compiler to use for the given platform.
-
- osname should be one of the standard Python OS names (i.e. the
- ones returned by os.name) and platform the common value
- returned by sys.platform for the platform in question.
-
- The default values are os.name and sys.platform in case the
- parameters are not given.
-
- """
- if osname is None:
- osname = os.name
- if platform is None:
- platform = sys.platform
- for pattern, compiler in _default_compilers:
- if re.match(pattern, platform) is not None or \
- re.match(pattern, osname) is not None:
- return compiler
- # Default to Unix compiler
- return 'unix'
-
-# Map compiler types to (module_name, class_name) pairs -- ie. where to
-# find the code that implements an interface to this compiler. (The module
-# is assumed to be in the 'distutils' package.)
-compiler_class = { 'unix': ('unixccompiler', 'UnixCCompiler',
- "standard UNIX-style compiler"),
- 'msvc': ('msvccompiler', 'MSVCCompiler',
- "Microsoft Visual C++"),
- 'cygwin': ('cygwinccompiler', 'CygwinCCompiler',
- "Cygwin port of GNU C Compiler for Win32"),
- 'mingw32': ('cygwinccompiler', 'Mingw32CCompiler',
- "Mingw32 port of GNU C Compiler for Win32"),
- 'bcpp': ('bcppcompiler', 'BCPPCompiler',
- "Borland C++ Compiler"),
- 'mwerks': ('mwerkscompiler', 'MWerksCompiler',
- "MetroWerks CodeWarrior"),
- 'emx': ('emxccompiler', 'EMXCCompiler',
- "EMX port of GNU C Compiler for OS/2"),
- }
-
-def show_compilers():
- """Print list of available compilers (used by the "--help-compiler"
- options to "build", "build_ext", "build_clib").
- """
- # XXX this "knows" that the compiler option it's describing is
- # "--compiler", which just happens to be the case for the three
- # commands that use it.
- from distutils.fancy_getopt import FancyGetopt
- compilers = []
- for compiler in compiler_class.keys():
- compilers.append(("compiler="+compiler, None,
- compiler_class[compiler][2]))
- compilers.sort()
- pretty_printer = FancyGetopt(compilers)
- pretty_printer.print_help("List of available compilers:")
-
-
-def new_compiler (plat=None,
- compiler=None,
- verbose=0,
- dry_run=0,
- force=0):
- """Generate an instance of some CCompiler subclass for the supplied
- platform/compiler combination. 'plat' defaults to 'os.name'
- (eg. 'posix', 'nt'), and 'compiler' defaults to the default compiler
- for that platform. Currently only 'posix' and 'nt' are supported, and
- the default compilers are "traditional Unix interface" (UnixCCompiler
- class) and Visual C++ (MSVCCompiler class). Note that it's perfectly
- possible to ask for a Unix compiler object under Windows, and a
- Microsoft compiler object under Unix -- if you supply a value for
- 'compiler', 'plat' is ignored.
- """
- if plat is None:
- plat = os.name
-
- try:
- if compiler is None:
- compiler = get_default_compiler(plat)
-
- (module_name, class_name, long_description) = compiler_class[compiler]
- except KeyError:
- msg = "don't know how to compile C/C++ code on platform '%s'" % plat
- if compiler is not None:
- msg = msg + " with '%s' compiler" % compiler
- raise DistutilsPlatformError, msg
-
- try:
- module_name = "distutils." + module_name
- __import__ (module_name)
- module = sys.modules[module_name]
- klass = vars(module)[class_name]
- except ImportError:
- raise DistutilsModuleError, \
- "can't compile C/C++ code: unable to load module '%s'" % \
- module_name
- except KeyError:
- raise DistutilsModuleError, \
- ("can't compile C/C++ code: unable to find class '%s' " +
- "in module '%s'") % (class_name, module_name)
-
- # XXX The None is necessary to preserve backwards compatibility
- # with classes that expect verbose to be the first positional
- # argument.
- return klass (None, dry_run, force)
-
-
-def gen_preprocess_options (macros, include_dirs):
- """Generate C pre-processor options (-D, -U, -I) as used by at least
- two types of compilers: the typical Unix compiler and Visual C++.
- 'macros' is the usual thing, a list of 1- or 2-tuples, where (name,)
- means undefine (-U) macro 'name', and (name,value) means define (-D)
- macro 'name' to 'value'. 'include_dirs' is just a list of directory
- names to be added to the header file search path (-I). Returns a list
- of command-line options suitable for either Unix compilers or Visual
- C++.
- """
- # XXX it would be nice (mainly aesthetic, and so we don't generate
- # stupid-looking command lines) to go over 'macros' and eliminate
- # redundant definitions/undefinitions (ie. ensure that only the
- # latest mention of a particular macro winds up on the command
- # line). I don't think it's essential, though, since most (all?)
- # Unix C compilers only pay attention to the latest -D or -U
- # mention of a macro on their command line. Similar situation for
- # 'include_dirs'. I'm punting on both for now. Anyways, weeding out
- # redundancies like this should probably be the province of
- # CCompiler, since the data structures used are inherited from it
- # and therefore common to all CCompiler classes.
-
- pp_opts = []
- for macro in macros:
-
- if not (type (macro) is TupleType and
- 1 <= len (macro) <= 2):
- raise TypeError, \
- ("bad macro definition '%s': " +
- "each element of 'macros' list must be a 1- or 2-tuple") % \
- macro
-
- if len (macro) == 1: # undefine this macro
- pp_opts.append ("-U%s" % macro[0])
- elif len (macro) == 2:
- if macro[1] is None: # define with no explicit value
- pp_opts.append ("-D%s" % macro[0])
- else:
- # XXX *don't* need to be clever about quoting the
- # macro value here, because we're going to avoid the
- # shell at all costs when we spawn the command!
- pp_opts.append ("-D%s=%s" % macro)
-
- for dir in include_dirs:
- pp_opts.append ("-I%s" % dir)
-
- return pp_opts
-
-# gen_preprocess_options ()
-
-
-def gen_lib_options (compiler, library_dirs, runtime_library_dirs, libraries):
- """Generate linker options for searching library directories and
- linking with specific libraries. 'libraries' and 'library_dirs' are,
- respectively, lists of library names (not filenames!) and search
- directories. Returns a list of command-line options suitable for use
- with some compiler (depending on the two format strings passed in).
- """
- lib_opts = []
-
- for dir in library_dirs:
- lib_opts.append (compiler.library_dir_option (dir))
-
- for dir in runtime_library_dirs:
- opt = compiler.runtime_library_dir_option (dir)
- if type(opt) is ListType:
- lib_opts = lib_opts + opt
- else:
- lib_opts.append (opt)
-
- # XXX it's important that we *not* remove redundant library mentions!
- # sometimes you really do have to say "-lfoo -lbar -lfoo" in order to
- # resolve all symbols. I just hope we never have to say "-lfoo obj.o
- # -lbar" to get things to work -- that's certainly a possibility, but a
- # pretty nasty way to arrange your C code.
-
- for lib in libraries:
- (lib_dir, lib_name) = os.path.split (lib)
- if lib_dir:
- lib_file = compiler.find_library_file ([lib_dir], lib_name)
- if lib_file:
- lib_opts.append (lib_file)
- else:
- compiler.warn ("no library file corresponding to "
- "'%s' found (skipping)" % lib)
- else:
- lib_opts.append (compiler.library_option (lib))
-
- return lib_opts
-
-# gen_lib_options ()
diff --git a/sys/lib/python/distutils/cmd.py b/sys/lib/python/distutils/cmd.py
deleted file mode 100644
index 8b757502e..000000000
--- a/sys/lib/python/distutils/cmd.py
+++ /dev/null
@@ -1,478 +0,0 @@
-"""distutils.cmd
-
-Provides the Command class, the base class for the command classes
-in the distutils.command package.
-"""
-
-# This module should be kept compatible with Python 2.1.
-
-__revision__ = "$Id: cmd.py 37828 2004-11-10 22:23:15Z loewis $"
-
-import sys, os, string, re
-from types import *
-from distutils.errors import *
-from distutils import util, dir_util, file_util, archive_util, dep_util
-from distutils import log
-
-class Command:
- """Abstract base class for defining command classes, the "worker bees"
- of the Distutils. A useful analogy for command classes is to think of
- them as subroutines with local variables called "options". The options
- are "declared" in 'initialize_options()' and "defined" (given their
- final values, aka "finalized") in 'finalize_options()', both of which
- must be defined by every command class. The distinction between the
- two is necessary because option values might come from the outside
- world (command line, config file, ...), and any options dependent on
- other options must be computed *after* these outside influences have
- been processed -- hence 'finalize_options()'. The "body" of the
- subroutine, where it does all its work based on the values of its
- options, is the 'run()' method, which must also be implemented by every
- command class.
- """
-
- # 'sub_commands' formalizes the notion of a "family" of commands,
- # eg. "install" as the parent with sub-commands "install_lib",
- # "install_headers", etc. The parent of a family of commands
- # defines 'sub_commands' as a class attribute; it's a list of
- # (command_name : string, predicate : unbound_method | string | None)
- # tuples, where 'predicate' is a method of the parent command that
- # determines whether the corresponding command is applicable in the
- # current situation. (Eg. we "install_headers" is only applicable if
- # we have any C header files to install.) If 'predicate' is None,
- # that command is always applicable.
- #
- # 'sub_commands' is usually defined at the *end* of a class, because
- # predicates can be unbound methods, so they must already have been
- # defined. The canonical example is the "install" command.
- sub_commands = []
-
-
- # -- Creation/initialization methods -------------------------------
-
- def __init__ (self, dist):
- """Create and initialize a new Command object. Most importantly,
- invokes the 'initialize_options()' method, which is the real
- initializer and depends on the actual command being
- instantiated.
- """
- # late import because of mutual dependence between these classes
- from distutils.dist import Distribution
-
- if not isinstance(dist, Distribution):
- raise TypeError, "dist must be a Distribution instance"
- if self.__class__ is Command:
- raise RuntimeError, "Command is an abstract class"
-
- self.distribution = dist
- self.initialize_options()
-
- # Per-command versions of the global flags, so that the user can
- # customize Distutils' behaviour command-by-command and let some
- # commands fall back on the Distribution's behaviour. None means
- # "not defined, check self.distribution's copy", while 0 or 1 mean
- # false and true (duh). Note that this means figuring out the real
- # value of each flag is a touch complicated -- hence "self._dry_run"
- # will be handled by __getattr__, below.
- # XXX This needs to be fixed.
- self._dry_run = None
-
- # verbose is largely ignored, but needs to be set for
- # backwards compatibility (I think)?
- self.verbose = dist.verbose
-
- # Some commands define a 'self.force' option to ignore file
- # timestamps, but methods defined *here* assume that
- # 'self.force' exists for all commands. So define it here
- # just to be safe.
- self.force = None
-
- # The 'help' flag is just used for command-line parsing, so
- # none of that complicated bureaucracy is needed.
- self.help = 0
-
- # 'finalized' records whether or not 'finalize_options()' has been
- # called. 'finalize_options()' itself should not pay attention to
- # this flag: it is the business of 'ensure_finalized()', which
- # always calls 'finalize_options()', to respect/update it.
- self.finalized = 0
-
- # __init__ ()
-
-
- # XXX A more explicit way to customize dry_run would be better.
-
- def __getattr__ (self, attr):
- if attr == 'dry_run':
- myval = getattr(self, "_" + attr)
- if myval is None:
- return getattr(self.distribution, attr)
- else:
- return myval
- else:
- raise AttributeError, attr
-
-
- def ensure_finalized (self):
- if not self.finalized:
- self.finalize_options()
- self.finalized = 1
-
-
- # Subclasses must define:
- # initialize_options()
- # provide default values for all options; may be customized by
- # setup script, by options from config file(s), or by command-line
- # options
- # finalize_options()
- # decide on the final values for all options; this is called
- # after all possible intervention from the outside world
- # (command-line, option file, etc.) has been processed
- # run()
- # run the command: do whatever it is we're here to do,
- # controlled by the command's various option values
-
- def initialize_options (self):
- """Set default values for all the options that this command
- supports. Note that these defaults may be overridden by other
- commands, by the setup script, by config files, or by the
- command-line. Thus, this is not the place to code dependencies
- between options; generally, 'initialize_options()' implementations
- are just a bunch of "self.foo = None" assignments.
-
- This method must be implemented by all command classes.
- """
- raise RuntimeError, \
- "abstract method -- subclass %s must override" % self.__class__
-
- def finalize_options (self):
- """Set final values for all the options that this command supports.
- This is always called as late as possible, ie. after any option
- assignments from the command-line or from other commands have been
- done. Thus, this is the place to code option dependencies: if
- 'foo' depends on 'bar', then it is safe to set 'foo' from 'bar' as
- long as 'foo' still has the same value it was assigned in
- 'initialize_options()'.
-
- This method must be implemented by all command classes.
- """
- raise RuntimeError, \
- "abstract method -- subclass %s must override" % self.__class__
-
-
- def dump_options (self, header=None, indent=""):
- from distutils.fancy_getopt import longopt_xlate
- if header is None:
- header = "command options for '%s':" % self.get_command_name()
- print indent + header
- indent = indent + " "
- for (option, _, _) in self.user_options:
- option = string.translate(option, longopt_xlate)
- if option[-1] == "=":
- option = option[:-1]
- value = getattr(self, option)
- print indent + "%s = %s" % (option, value)
-
-
- def run (self):
- """A command's raison d'etre: carry out the action it exists to
- perform, controlled by the options initialized in
- 'initialize_options()', customized by other commands, the setup
- script, the command-line, and config files, and finalized in
- 'finalize_options()'. All terminal output and filesystem
- interaction should be done by 'run()'.
-
- This method must be implemented by all command classes.
- """
-
- raise RuntimeError, \
- "abstract method -- subclass %s must override" % self.__class__
-
- def announce (self, msg, level=1):
- """If the current verbosity level is of greater than or equal to
- 'level' print 'msg' to stdout.
- """
- log.log(level, msg)
-
- def debug_print (self, msg):
- """Print 'msg' to stdout if the global DEBUG (taken from the
- DISTUTILS_DEBUG environment variable) flag is true.
- """
- from distutils.debug import DEBUG
- if DEBUG:
- print msg
- sys.stdout.flush()
-
-
-
- # -- Option validation methods -------------------------------------
- # (these are very handy in writing the 'finalize_options()' method)
- #
- # NB. the general philosophy here is to ensure that a particular option
- # value meets certain type and value constraints. If not, we try to
- # force it into conformance (eg. if we expect a list but have a string,
- # split the string on comma and/or whitespace). If we can't force the
- # option into conformance, raise DistutilsOptionError. Thus, command
- # classes need do nothing more than (eg.)
- # self.ensure_string_list('foo')
- # and they can be guaranteed that thereafter, self.foo will be
- # a list of strings.
-
- def _ensure_stringlike (self, option, what, default=None):
- val = getattr(self, option)
- if val is None:
- setattr(self, option, default)
- return default
- elif type(val) is not StringType:
- raise DistutilsOptionError, \
- "'%s' must be a %s (got `%s`)" % (option, what, val)
- return val
-
- def ensure_string (self, option, default=None):
- """Ensure that 'option' is a string; if not defined, set it to
- 'default'.
- """
- self._ensure_stringlike(option, "string", default)
-
- def ensure_string_list (self, option):
- """Ensure that 'option' is a list of strings. If 'option' is
- currently a string, we split it either on /,\s*/ or /\s+/, so
- "foo bar baz", "foo,bar,baz", and "foo, bar baz" all become
- ["foo", "bar", "baz"].
- """
- val = getattr(self, option)
- if val is None:
- return
- elif type(val) is StringType:
- setattr(self, option, re.split(r',\s*|\s+', val))
- else:
- if type(val) is ListType:
- types = map(type, val)
- ok = (types == [StringType] * len(val))
- else:
- ok = 0
-
- if not ok:
- raise DistutilsOptionError, \
- "'%s' must be a list of strings (got %r)" % \
- (option, val)
-
- def _ensure_tested_string (self, option, tester,
- what, error_fmt, default=None):
- val = self._ensure_stringlike(option, what, default)
- if val is not None and not tester(val):
- raise DistutilsOptionError, \
- ("error in '%s' option: " + error_fmt) % (option, val)
-
- def ensure_filename (self, option):
- """Ensure that 'option' is the name of an existing file."""
- self._ensure_tested_string(option, os.path.isfile,
- "filename",
- "'%s' does not exist or is not a file")
-
- def ensure_dirname (self, option):
- self._ensure_tested_string(option, os.path.isdir,
- "directory name",
- "'%s' does not exist or is not a directory")
-
-
- # -- Convenience methods for commands ------------------------------
-
- def get_command_name (self):
- if hasattr(self, 'command_name'):
- return self.command_name
- else:
- return self.__class__.__name__
-
-
- def set_undefined_options (self, src_cmd, *option_pairs):
- """Set the values of any "undefined" options from corresponding
- option values in some other command object. "Undefined" here means
- "is None", which is the convention used to indicate that an option
- has not been changed between 'initialize_options()' and
- 'finalize_options()'. Usually called from 'finalize_options()' for
- options that depend on some other command rather than another
- option of the same command. 'src_cmd' is the other command from
- which option values will be taken (a command object will be created
- for it if necessary); the remaining arguments are
- '(src_option,dst_option)' tuples which mean "take the value of
- 'src_option' in the 'src_cmd' command object, and copy it to
- 'dst_option' in the current command object".
- """
-
- # Option_pairs: list of (src_option, dst_option) tuples
-
- src_cmd_obj = self.distribution.get_command_obj(src_cmd)
- src_cmd_obj.ensure_finalized()
- for (src_option, dst_option) in option_pairs:
- if getattr(self, dst_option) is None:
- setattr(self, dst_option,
- getattr(src_cmd_obj, src_option))
-
-
- def get_finalized_command (self, command, create=1):
- """Wrapper around Distribution's 'get_command_obj()' method: find
- (create if necessary and 'create' is true) the command object for
- 'command', call its 'ensure_finalized()' method, and return the
- finalized command object.
- """
- cmd_obj = self.distribution.get_command_obj(command, create)
- cmd_obj.ensure_finalized()
- return cmd_obj
-
- # XXX rename to 'get_reinitialized_command()'? (should do the
- # same in dist.py, if so)
- def reinitialize_command (self, command, reinit_subcommands=0):
- return self.distribution.reinitialize_command(
- command, reinit_subcommands)
-
- def run_command (self, command):
- """Run some other command: uses the 'run_command()' method of
- Distribution, which creates and finalizes the command object if
- necessary and then invokes its 'run()' method.
- """
- self.distribution.run_command(command)
-
-
- def get_sub_commands (self):
- """Determine the sub-commands that are relevant in the current
- distribution (ie., that need to be run). This is based on the
- 'sub_commands' class attribute: each tuple in that list may include
- a method that we call to determine if the subcommand needs to be
- run for the current distribution. Return a list of command names.
- """
- commands = []
- for (cmd_name, method) in self.sub_commands:
- if method is None or method(self):
- commands.append(cmd_name)
- return commands
-
-
- # -- External world manipulation -----------------------------------
-
- def warn (self, msg):
- sys.stderr.write("warning: %s: %s\n" %
- (self.get_command_name(), msg))
-
-
- def execute (self, func, args, msg=None, level=1):
- util.execute(func, args, msg, dry_run=self.dry_run)
-
-
- def mkpath (self, name, mode=0777):
- dir_util.mkpath(name, mode, dry_run=self.dry_run)
-
-
- def copy_file (self, infile, outfile,
- preserve_mode=1, preserve_times=1, link=None, level=1):
- """Copy a file respecting verbose, dry-run and force flags. (The
- former two default to whatever is in the Distribution object, and
- the latter defaults to false for commands that don't define it.)"""
-
- return file_util.copy_file(
- infile, outfile,
- preserve_mode, preserve_times,
- not self.force,
- link,
- dry_run=self.dry_run)
-
-
- def copy_tree (self, infile, outfile,
- preserve_mode=1, preserve_times=1, preserve_symlinks=0,
- level=1):
- """Copy an entire directory tree respecting verbose, dry-run,
- and force flags.
- """
- return dir_util.copy_tree(
- infile, outfile,
- preserve_mode,preserve_times,preserve_symlinks,
- not self.force,
- dry_run=self.dry_run)
-
- def move_file (self, src, dst, level=1):
- """Move a file respectin dry-run flag."""
- return file_util.move_file(src, dst, dry_run = self.dry_run)
-
- def spawn (self, cmd, search_path=1, level=1):
- """Spawn an external command respecting dry-run flag."""
- from distutils.spawn import spawn
- spawn(cmd, search_path, dry_run= self.dry_run)
-
- def make_archive (self, base_name, format,
- root_dir=None, base_dir=None):
- return archive_util.make_archive(
- base_name, format, root_dir, base_dir, dry_run=self.dry_run)
-
-
- def make_file (self, infiles, outfile, func, args,
- exec_msg=None, skip_msg=None, level=1):
- """Special case of 'execute()' for operations that process one or
- more input files and generate one output file. Works just like
- 'execute()', except the operation is skipped and a different
- message printed if 'outfile' already exists and is newer than all
- files listed in 'infiles'. If the command defined 'self.force',
- and it is true, then the command is unconditionally run -- does no
- timestamp checks.
- """
- if exec_msg is None:
- exec_msg = "generating %s from %s" % \
- (outfile, string.join(infiles, ', '))
- if skip_msg is None:
- skip_msg = "skipping %s (inputs unchanged)" % outfile
-
-
- # Allow 'infiles' to be a single string
- if type(infiles) is StringType:
- infiles = (infiles,)
- elif type(infiles) not in (ListType, TupleType):
- raise TypeError, \
- "'infiles' must be a string, or a list or tuple of strings"
-
- # If 'outfile' must be regenerated (either because it doesn't
- # exist, is out-of-date, or the 'force' flag is true) then
- # perform the action that presumably regenerates it
- if self.force or dep_util.newer_group (infiles, outfile):
- self.execute(func, args, exec_msg, level)
-
- # Otherwise, print the "skip" message
- else:
- log.debug(skip_msg)
-
- # make_file ()
-
-# class Command
-
-
-# XXX 'install_misc' class not currently used -- it was the base class for
-# both 'install_scripts' and 'install_data', but they outgrew it. It might
-# still be useful for 'install_headers', though, so I'm keeping it around
-# for the time being.
-
-class install_misc (Command):
- """Common base class for installing some files in a subdirectory.
- Currently used by install_data and install_scripts.
- """
-
- user_options = [('install-dir=', 'd', "directory to install the files to")]
-
- def initialize_options (self):
- self.install_dir = None
- self.outfiles = []
-
- def _install_dir_from (self, dirname):
- self.set_undefined_options('install', (dirname, 'install_dir'))
-
- def _copy_files (self, filelist):
- self.outfiles = []
- if not filelist:
- return
- self.mkpath(self.install_dir)
- for f in filelist:
- self.copy_file(f, self.install_dir)
- self.outfiles.append(os.path.join(self.install_dir, f))
-
- def get_outputs (self):
- return self.outfiles
-
-
-if __name__ == "__main__":
- print "ok"
diff --git a/sys/lib/python/distutils/command/__init__.py b/sys/lib/python/distutils/command/__init__.py
deleted file mode 100644
index 2fc38aefa..000000000
--- a/sys/lib/python/distutils/command/__init__.py
+++ /dev/null
@@ -1,33 +0,0 @@
-"""distutils.command
-
-Package containing implementation of all the standard Distutils
-commands."""
-
-# This module should be kept compatible with Python 2.1.
-
-__revision__ = "$Id: __init__.py 37828 2004-11-10 22:23:15Z loewis $"
-
-__all__ = ['build',
- 'build_py',
- 'build_ext',
- 'build_clib',
- 'build_scripts',
- 'clean',
- 'install',
- 'install_lib',
- 'install_headers',
- 'install_scripts',
- 'install_data',
- 'sdist',
- 'register',
- 'bdist',
- 'bdist_dumb',
- 'bdist_rpm',
- 'bdist_wininst',
- # These two are reserved for future use:
- #'bdist_sdux',
- #'bdist_pkgtool',
- # Note:
- # bdist_packager is not included because it only provides
- # an abstract base class
- ]
diff --git a/sys/lib/python/distutils/command/bdist.py b/sys/lib/python/distutils/command/bdist.py
deleted file mode 100644
index 51a86d031..000000000
--- a/sys/lib/python/distutils/command/bdist.py
+++ /dev/null
@@ -1,150 +0,0 @@
-"""distutils.command.bdist
-
-Implements the Distutils 'bdist' command (create a built [binary]
-distribution)."""
-
-# This module should be kept compatible with Python 2.1.
-
-__revision__ = "$Id: bdist.py 37828 2004-11-10 22:23:15Z loewis $"
-
-import os, string
-from types import *
-from distutils.core import Command
-from distutils.errors import *
-from distutils.util import get_platform
-
-
-def show_formats ():
- """Print list of available formats (arguments to "--format" option).
- """
- from distutils.fancy_getopt import FancyGetopt
- formats=[]
- for format in bdist.format_commands:
- formats.append(("formats=" + format, None,
- bdist.format_command[format][1]))
- pretty_printer = FancyGetopt(formats)
- pretty_printer.print_help("List of available distribution formats:")
-
-
-class bdist (Command):
-
- description = "create a built (binary) distribution"
-
- user_options = [('bdist-base=', 'b',
- "temporary directory for creating built distributions"),
- ('plat-name=', 'p',
- "platform name to embed in generated filenames "
- "(default: %s)" % get_platform()),
- ('formats=', None,
- "formats for distribution (comma-separated list)"),
- ('dist-dir=', 'd',
- "directory to put final built distributions in "
- "[default: dist]"),
- ('skip-build', None,
- "skip rebuilding everything (for testing/debugging)"),
- ]
-
- boolean_options = ['skip-build']
-
- help_options = [
- ('help-formats', None,
- "lists available distribution formats", show_formats),
- ]
-
- # The following commands do not take a format option from bdist
- no_format_option = ('bdist_rpm',
- #'bdist_sdux', 'bdist_pkgtool'
- )
-
- # This won't do in reality: will need to distinguish RPM-ish Linux,
- # Debian-ish Linux, Solaris, FreeBSD, ..., Windows, Mac OS.
- default_format = { 'posix': 'gztar',
- 'nt': 'zip',
- 'os2': 'zip', }
-
- # Establish the preferred order (for the --help-formats option).
- format_commands = ['rpm', 'gztar', 'bztar', 'ztar', 'tar',
- 'wininst', 'zip',
- #'pkgtool', 'sdux'
- ]
-
- # And the real information.
- format_command = { 'rpm': ('bdist_rpm', "RPM distribution"),
- 'zip': ('bdist_dumb', "ZIP file"),
- 'gztar': ('bdist_dumb', "gzip'ed tar file"),
- 'bztar': ('bdist_dumb', "bzip2'ed tar file"),
- 'ztar': ('bdist_dumb', "compressed tar file"),
- 'tar': ('bdist_dumb', "tar file"),
- 'wininst': ('bdist_wininst',
- "Windows executable installer"),
- 'zip': ('bdist_dumb', "ZIP file"),
- #'pkgtool': ('bdist_pkgtool',
- # "Solaris pkgtool distribution"),
- #'sdux': ('bdist_sdux', "HP-UX swinstall depot"),
- }
-
-
- def initialize_options (self):
- self.bdist_base = None
- self.plat_name = None
- self.formats = None
- self.dist_dir = None
- self.skip_build = 0
-
- # initialize_options()
-
-
- def finalize_options (self):
- # have to finalize 'plat_name' before 'bdist_base'
- if self.plat_name is None:
- self.plat_name = get_platform()
-
- # 'bdist_base' -- parent of per-built-distribution-format
- # temporary directories (eg. we'll probably have
- # "build/bdist.<plat>/dumb", "build/bdist.<plat>/rpm", etc.)
- if self.bdist_base is None:
- build_base = self.get_finalized_command('build').build_base
- self.bdist_base = os.path.join(build_base,
- 'bdist.' + self.plat_name)
-
- self.ensure_string_list('formats')
- if self.formats is None:
- try:
- self.formats = [self.default_format[os.name]]
- except KeyError:
- raise DistutilsPlatformError, \
- "don't know how to create built distributions " + \
- "on platform %s" % os.name
-
- if self.dist_dir is None:
- self.dist_dir = "dist"
-
- # finalize_options()
-
-
- def run (self):
-
- # Figure out which sub-commands we need to run.
- commands = []
- for format in self.formats:
- try:
- commands.append(self.format_command[format][0])
- except KeyError:
- raise DistutilsOptionError, "invalid format '%s'" % format
-
- # Reinitialize and run each command.
- for i in range(len(self.formats)):
- cmd_name = commands[i]
- sub_cmd = self.reinitialize_command(cmd_name)
- if cmd_name not in self.no_format_option:
- sub_cmd.format = self.formats[i]
-
- # If we're going to need to run this command again, tell it to
- # keep its temporary files around so subsequent runs go faster.
- if cmd_name in commands[i+1:]:
- sub_cmd.keep_temp = 1
- self.run_command(cmd_name)
-
- # run()
-
-# class bdist
diff --git a/sys/lib/python/distutils/command/bdist_dumb.py b/sys/lib/python/distutils/command/bdist_dumb.py
deleted file mode 100644
index 00f063810..000000000
--- a/sys/lib/python/distutils/command/bdist_dumb.py
+++ /dev/null
@@ -1,135 +0,0 @@
-"""distutils.command.bdist_dumb
-
-Implements the Distutils 'bdist_dumb' command (create a "dumb" built
-distribution -- i.e., just an archive to be unpacked under $prefix or
-$exec_prefix)."""
-
-# This module should be kept compatible with Python 2.1.
-
-__revision__ = "$Id: bdist_dumb.py 38697 2005-03-23 18:54:36Z loewis $"
-
-import os
-from distutils.core import Command
-from distutils.util import get_platform
-from distutils.dir_util import create_tree, remove_tree, ensure_relative
-from distutils.errors import *
-from distutils.sysconfig import get_python_version
-from distutils import log
-
-class bdist_dumb (Command):
-
- description = "create a \"dumb\" built distribution"
-
- user_options = [('bdist-dir=', 'd',
- "temporary directory for creating the distribution"),
- ('plat-name=', 'p',
- "platform name to embed in generated filenames "
- "(default: %s)" % get_platform()),
- ('format=', 'f',
- "archive format to create (tar, ztar, gztar, zip)"),
- ('keep-temp', 'k',
- "keep the pseudo-installation tree around after " +
- "creating the distribution archive"),
- ('dist-dir=', 'd',
- "directory to put final built distributions in"),
- ('skip-build', None,
- "skip rebuilding everything (for testing/debugging)"),
- ('relative', None,
- "build the archive using relative paths"
- "(default: false)"),
- ]
-
- boolean_options = ['keep-temp', 'skip-build', 'relative']
-
- default_format = { 'posix': 'gztar',
- 'nt': 'zip',
- 'os2': 'zip' }
-
-
- def initialize_options (self):
- self.bdist_dir = None
- self.plat_name = None
- self.format = None
- self.keep_temp = 0
- self.dist_dir = None
- self.skip_build = 0
- self.relative = 0
-
- # initialize_options()
-
-
- def finalize_options (self):
-
- if self.bdist_dir is None:
- bdist_base = self.get_finalized_command('bdist').bdist_base
- self.bdist_dir = os.path.join(bdist_base, 'dumb')
-
- if self.format is None:
- try:
- self.format = self.default_format[os.name]
- except KeyError:
- raise DistutilsPlatformError, \
- ("don't know how to create dumb built distributions " +
- "on platform %s") % os.name
-
- self.set_undefined_options('bdist',
- ('dist_dir', 'dist_dir'),
- ('plat_name', 'plat_name'))
-
- # finalize_options()
-
-
- def run (self):
-
- if not self.skip_build:
- self.run_command('build')
-
- install = self.reinitialize_command('install', reinit_subcommands=1)
- install.root = self.bdist_dir
- install.skip_build = self.skip_build
- install.warn_dir = 0
-
- log.info("installing to %s" % self.bdist_dir)
- self.run_command('install')
-
- # And make an archive relative to the root of the
- # pseudo-installation tree.
- archive_basename = "%s.%s" % (self.distribution.get_fullname(),
- self.plat_name)
-
- # OS/2 objects to any ":" characters in a filename (such as when
- # a timestamp is used in a version) so change them to hyphens.
- if os.name == "os2":
- archive_basename = archive_basename.replace(":", "-")
-
- pseudoinstall_root = os.path.join(self.dist_dir, archive_basename)
- if not self.relative:
- archive_root = self.bdist_dir
- else:
- if (self.distribution.has_ext_modules() and
- (install.install_base != install.install_platbase)):
- raise DistutilsPlatformError, \
- ("can't make a dumb built distribution where "
- "base and platbase are different (%s, %s)"
- % (repr(install.install_base),
- repr(install.install_platbase)))
- else:
- archive_root = os.path.join(self.bdist_dir,
- ensure_relative(install.install_base))
-
- # Make the archive
- filename = self.make_archive(pseudoinstall_root,
- self.format, root_dir=archive_root)
- if self.distribution.has_ext_modules():
- pyversion = get_python_version()
- else:
- pyversion = 'any'
- self.distribution.dist_files.append(('bdist_dumb', pyversion,
- filename))
-
- if not self.keep_temp:
- remove_tree(self.bdist_dir, dry_run=self.dry_run)
-
- # run()
-
-# class bdist_dumb
diff --git a/sys/lib/python/distutils/command/bdist_msi.py b/sys/lib/python/distutils/command/bdist_msi.py
deleted file mode 100644
index 75db8773f..000000000
--- a/sys/lib/python/distutils/command/bdist_msi.py
+++ /dev/null
@@ -1,639 +0,0 @@
-# -*- coding: iso-8859-1 -*-
-# Copyright (C) 2005, 2006 Martin v. Löwis
-# Licensed to PSF under a Contributor Agreement.
-# The bdist_wininst command proper
-# based on bdist_wininst
-"""
-Implements the bdist_msi command.
-"""
-
-import sys, os, string
-from distutils.core import Command
-from distutils.util import get_platform
-from distutils.dir_util import remove_tree
-from distutils.sysconfig import get_python_version
-from distutils.version import StrictVersion
-from distutils.errors import DistutilsOptionError
-from distutils import log
-import msilib
-from msilib import schema, sequence, text
-from msilib import Directory, Feature, Dialog, add_data
-
-class PyDialog(Dialog):
- """Dialog class with a fixed layout: controls at the top, then a ruler,
- then a list of buttons: back, next, cancel. Optionally a bitmap at the
- left."""
- def __init__(self, *args, **kw):
- """Dialog(database, name, x, y, w, h, attributes, title, first,
- default, cancel, bitmap=true)"""
- Dialog.__init__(self, *args)
- ruler = self.h - 36
- bmwidth = 152*ruler/328
- #if kw.get("bitmap", True):
- # self.bitmap("Bitmap", 0, 0, bmwidth, ruler, "PythonWin")
- self.line("BottomLine", 0, ruler, self.w, 0)
-
- def title(self, title):
- "Set the title text of the dialog at the top."
- # name, x, y, w, h, flags=Visible|Enabled|Transparent|NoPrefix,
- # text, in VerdanaBold10
- self.text("Title", 15, 10, 320, 60, 0x30003,
- r"{\VerdanaBold10}%s" % title)
-
- def back(self, title, next, name = "Back", active = 1):
- """Add a back button with a given title, the tab-next button,
- its name in the Control table, possibly initially disabled.
-
- Return the button, so that events can be associated"""
- if active:
- flags = 3 # Visible|Enabled
- else:
- flags = 1 # Visible
- return self.pushbutton(name, 180, self.h-27 , 56, 17, flags, title, next)
-
- def cancel(self, title, next, name = "Cancel", active = 1):
- """Add a cancel button with a given title, the tab-next button,
- its name in the Control table, possibly initially disabled.
-
- Return the button, so that events can be associated"""
- if active:
- flags = 3 # Visible|Enabled
- else:
- flags = 1 # Visible
- return self.pushbutton(name, 304, self.h-27, 56, 17, flags, title, next)
-
- def next(self, title, next, name = "Next", active = 1):
- """Add a Next button with a given title, the tab-next button,
- its name in the Control table, possibly initially disabled.
-
- Return the button, so that events can be associated"""
- if active:
- flags = 3 # Visible|Enabled
- else:
- flags = 1 # Visible
- return self.pushbutton(name, 236, self.h-27, 56, 17, flags, title, next)
-
- def xbutton(self, name, title, next, xpos):
- """Add a button with a given title, the tab-next button,
- its name in the Control table, giving its x position; the
- y-position is aligned with the other buttons.
-
- Return the button, so that events can be associated"""
- return self.pushbutton(name, int(self.w*xpos - 28), self.h-27, 56, 17, 3, title, next)
-
-class bdist_msi (Command):
-
- description = "create a Microsoft Installer (.msi) binary distribution"
-
- user_options = [('bdist-dir=', None,
- "temporary directory for creating the distribution"),
- ('keep-temp', 'k',
- "keep the pseudo-installation tree around after " +
- "creating the distribution archive"),
- ('target-version=', None,
- "require a specific python version" +
- " on the target system"),
- ('no-target-compile', 'c',
- "do not compile .py to .pyc on the target system"),
- ('no-target-optimize', 'o',
- "do not compile .py to .pyo (optimized)"
- "on the target system"),
- ('dist-dir=', 'd',
- "directory to put final built distributions in"),
- ('skip-build', None,
- "skip rebuilding everything (for testing/debugging)"),
- ('install-script=', None,
- "basename of installation script to be run after"
- "installation or before deinstallation"),
- ('pre-install-script=', None,
- "Fully qualified filename of a script to be run before "
- "any files are installed. This script need not be in the "
- "distribution"),
- ]
-
- boolean_options = ['keep-temp', 'no-target-compile', 'no-target-optimize',
- 'skip-build']
-
- def initialize_options (self):
- self.bdist_dir = None
- self.keep_temp = 0
- self.no_target_compile = 0
- self.no_target_optimize = 0
- self.target_version = None
- self.dist_dir = None
- self.skip_build = 0
- self.install_script = None
- self.pre_install_script = None
-
- def finalize_options (self):
- if self.bdist_dir is None:
- bdist_base = self.get_finalized_command('bdist').bdist_base
- self.bdist_dir = os.path.join(bdist_base, 'msi')
- short_version = get_python_version()
- if self.target_version:
- if not self.skip_build and self.distribution.has_ext_modules()\
- and self.target_version != short_version:
- raise DistutilsOptionError, \
- "target version can only be %s, or the '--skip_build'" \
- " option must be specified" % (short_version,)
- else:
- self.target_version = short_version
-
- self.set_undefined_options('bdist', ('dist_dir', 'dist_dir'))
-
- if self.pre_install_script:
- raise DistutilsOptionError, "the pre-install-script feature is not yet implemented"
-
- if self.install_script:
- for script in self.distribution.scripts:
- if self.install_script == os.path.basename(script):
- break
- else:
- raise DistutilsOptionError, \
- "install_script '%s' not found in scripts" % \
- self.install_script
- self.install_script_key = None
- # finalize_options()
-
-
- def run (self):
- if not self.skip_build:
- self.run_command('build')
-
- install = self.reinitialize_command('install', reinit_subcommands=1)
- install.prefix = self.bdist_dir
- install.skip_build = self.skip_build
- install.warn_dir = 0
-
- install_lib = self.reinitialize_command('install_lib')
- # we do not want to include pyc or pyo files
- install_lib.compile = 0
- install_lib.optimize = 0
-
- if self.distribution.has_ext_modules():
- # If we are building an installer for a Python version other
- # than the one we are currently running, then we need to ensure
- # our build_lib reflects the other Python version rather than ours.
- # Note that for target_version!=sys.version, we must have skipped the
- # build step, so there is no issue with enforcing the build of this
- # version.
- target_version = self.target_version
- if not target_version:
- assert self.skip_build, "Should have already checked this"
- target_version = sys.version[0:3]
- plat_specifier = ".%s-%s" % (get_platform(), target_version)
- build = self.get_finalized_command('build')
- build.build_lib = os.path.join(build.build_base,
- 'lib' + plat_specifier)
-
- log.info("installing to %s", self.bdist_dir)
- install.ensure_finalized()
-
- # avoid warning of 'install_lib' about installing
- # into a directory not in sys.path
- sys.path.insert(0, os.path.join(self.bdist_dir, 'PURELIB'))
-
- install.run()
-
- del sys.path[0]
-
- self.mkpath(self.dist_dir)
- fullname = self.distribution.get_fullname()
- installer_name = self.get_installer_filename(fullname)
- installer_name = os.path.abspath(installer_name)
- if os.path.exists(installer_name): os.unlink(installer_name)
-
- metadata = self.distribution.metadata
- author = metadata.author
- if not author:
- author = metadata.maintainer
- if not author:
- author = "UNKNOWN"
- version = metadata.get_version()
- # ProductVersion must be strictly numeric
- # XXX need to deal with prerelease versions
- sversion = "%d.%d.%d" % StrictVersion(version).version
- # Prefix ProductName with Python x.y, so that
- # it sorts together with the other Python packages
- # in Add-Remove-Programs (APR)
- product_name = "Python %s %s" % (self.target_version,
- self.distribution.get_fullname())
- self.db = msilib.init_database(installer_name, schema,
- product_name, msilib.gen_uuid(),
- sversion, author)
- msilib.add_tables(self.db, sequence)
- props = [('DistVersion', version)]
- email = metadata.author_email or metadata.maintainer_email
- if email:
- props.append(("ARPCONTACT", email))
- if metadata.url:
- props.append(("ARPURLINFOABOUT", metadata.url))
- if props:
- add_data(self.db, 'Property', props)
-
- self.add_find_python()
- self.add_files()
- self.add_scripts()
- self.add_ui()
- self.db.Commit()
-
- if hasattr(self.distribution, 'dist_files'):
- self.distribution.dist_files.append(('bdist_msi', self.target_version, fullname))
-
- if not self.keep_temp:
- remove_tree(self.bdist_dir, dry_run=self.dry_run)
-
- def add_files(self):
- db = self.db
- cab = msilib.CAB("distfiles")
- f = Feature(db, "default", "Default Feature", "Everything", 1, directory="TARGETDIR")
- f.set_current()
- rootdir = os.path.abspath(self.bdist_dir)
- root = Directory(db, cab, None, rootdir, "TARGETDIR", "SourceDir")
- db.Commit()
- todo = [root]
- while todo:
- dir = todo.pop()
- for file in os.listdir(dir.absolute):
- afile = os.path.join(dir.absolute, file)
- if os.path.isdir(afile):
- newdir = Directory(db, cab, dir, file, file, "%s|%s" % (dir.make_short(file), file))
- todo.append(newdir)
- else:
- key = dir.add_file(file)
- if file==self.install_script:
- if self.install_script_key:
- raise DistutilsOptionError, "Multiple files with name %s" % file
- self.install_script_key = '[#%s]' % key
-
- cab.commit(db)
-
- def add_find_python(self):
- """Adds code to the installer to compute the location of Python.
- Properties PYTHON.MACHINE, PYTHON.USER, PYTHONDIR and PYTHON will be set
- in both the execute and UI sequences; PYTHONDIR will be set from
- PYTHON.USER if defined, else from PYTHON.MACHINE.
- PYTHON is PYTHONDIR\python.exe"""
- install_path = r"SOFTWARE\Python\PythonCore\%s\InstallPath" % self.target_version
- add_data(self.db, "RegLocator",
- [("python.machine", 2, install_path, None, 2),
- ("python.user", 1, install_path, None, 2)])
- add_data(self.db, "AppSearch",
- [("PYTHON.MACHINE", "python.machine"),
- ("PYTHON.USER", "python.user")])
- add_data(self.db, "CustomAction",
- [("PythonFromMachine", 51+256, "PYTHONDIR", "[PYTHON.MACHINE]"),
- ("PythonFromUser", 51+256, "PYTHONDIR", "[PYTHON.USER]"),
- ("PythonExe", 51+256, "PYTHON", "[PYTHONDIR]\\python.exe"),
- ("InitialTargetDir", 51+256, "TARGETDIR", "[PYTHONDIR]")])
- add_data(self.db, "InstallExecuteSequence",
- [("PythonFromMachine", "PYTHON.MACHINE", 401),
- ("PythonFromUser", "PYTHON.USER", 402),
- ("PythonExe", None, 403),
- ("InitialTargetDir", 'TARGETDIR=""', 404),
- ])
- add_data(self.db, "InstallUISequence",
- [("PythonFromMachine", "PYTHON.MACHINE", 401),
- ("PythonFromUser", "PYTHON.USER", 402),
- ("PythonExe", None, 403),
- ("InitialTargetDir", 'TARGETDIR=""', 404),
- ])
-
- def add_scripts(self):
- if self.install_script:
- add_data(self.db, "CustomAction",
- [("install_script", 50, "PYTHON", self.install_script_key)])
- add_data(self.db, "InstallExecuteSequence",
- [("install_script", "NOT Installed", 6800)])
- if self.pre_install_script:
- scriptfn = os.path.join(self.bdist_dir, "preinstall.bat")
- f = open(scriptfn, "w")
- # The batch file will be executed with [PYTHON], so that %1
- # is the path to the Python interpreter; %0 will be the path
- # of the batch file.
- # rem ="""
- # %1 %0
- # exit
- # """
- # <actual script>
- f.write('rem ="""\n%1 %0\nexit\n"""\n')
- f.write(open(self.pre_install_script).read())
- f.close()
- add_data(self.db, "Binary",
- [("PreInstall", msilib.Binary(scriptfn))
- ])
- add_data(self.db, "CustomAction",
- [("PreInstall", 2, "PreInstall", None)
- ])
- add_data(self.db, "InstallExecuteSequence",
- [("PreInstall", "NOT Installed", 450)])
-
-
- def add_ui(self):
- db = self.db
- x = y = 50
- w = 370
- h = 300
- title = "[ProductName] Setup"
-
- # see "Dialog Style Bits"
- modal = 3 # visible | modal
- modeless = 1 # visible
- track_disk_space = 32
-
- # UI customization properties
- add_data(db, "Property",
- # See "DefaultUIFont Property"
- [("DefaultUIFont", "DlgFont8"),
- # See "ErrorDialog Style Bit"
- ("ErrorDialog", "ErrorDlg"),
- ("Progress1", "Install"), # modified in maintenance type dlg
- ("Progress2", "installs"),
- ("MaintenanceForm_Action", "Repair"),
- # possible values: ALL, JUSTME
- ("WhichUsers", "ALL")
- ])
-
- # Fonts, see "TextStyle Table"
- add_data(db, "TextStyle",
- [("DlgFont8", "Tahoma", 9, None, 0),
- ("DlgFontBold8", "Tahoma", 8, None, 1), #bold
- ("VerdanaBold10", "Verdana", 10, None, 1),
- ("VerdanaRed9", "Verdana", 9, 255, 0),
- ])
-
- # UI Sequences, see "InstallUISequence Table", "Using a Sequence Table"
- # Numbers indicate sequence; see sequence.py for how these action integrate
- add_data(db, "InstallUISequence",
- [("PrepareDlg", "Not Privileged or Windows9x or Installed", 140),
- ("WhichUsersDlg", "Privileged and not Windows9x and not Installed", 141),
- # In the user interface, assume all-users installation if privileged.
- ("SelectDirectoryDlg", "Not Installed", 1230),
- # XXX no support for resume installations yet
- #("ResumeDlg", "Installed AND (RESUME OR Preselected)", 1240),
- ("MaintenanceTypeDlg", "Installed AND NOT RESUME AND NOT Preselected", 1250),
- ("ProgressDlg", None, 1280)])
-
- add_data(db, 'ActionText', text.ActionText)
- add_data(db, 'UIText', text.UIText)
- #####################################################################
- # Standard dialogs: FatalError, UserExit, ExitDialog
- fatal=PyDialog(db, "FatalError", x, y, w, h, modal, title,
- "Finish", "Finish", "Finish")
- fatal.title("[ProductName] Installer ended prematurely")
- fatal.back("< Back", "Finish", active = 0)
- fatal.cancel("Cancel", "Back", active = 0)
- fatal.text("Description1", 15, 70, 320, 80, 0x30003,
- "[ProductName] setup ended prematurely because of an error. Your system has not been modified. To install this program at a later time, please run the installation again.")
- fatal.text("Description2", 15, 155, 320, 20, 0x30003,
- "Click the Finish button to exit the Installer.")
- c=fatal.next("Finish", "Cancel", name="Finish")
- c.event("EndDialog", "Exit")
-
- user_exit=PyDialog(db, "UserExit", x, y, w, h, modal, title,
- "Finish", "Finish", "Finish")
- user_exit.title("[ProductName] Installer was interrupted")
- user_exit.back("< Back", "Finish", active = 0)
- user_exit.cancel("Cancel", "Back", active = 0)
- user_exit.text("Description1", 15, 70, 320, 80, 0x30003,
- "[ProductName] setup was interrupted. Your system has not been modified. "
- "To install this program at a later time, please run the installation again.")
- user_exit.text("Description2", 15, 155, 320, 20, 0x30003,
- "Click the Finish button to exit the Installer.")
- c = user_exit.next("Finish", "Cancel", name="Finish")
- c.event("EndDialog", "Exit")
-
- exit_dialog = PyDialog(db, "ExitDialog", x, y, w, h, modal, title,
- "Finish", "Finish", "Finish")
- exit_dialog.title("Completing the [ProductName] Installer")
- exit_dialog.back("< Back", "Finish", active = 0)
- exit_dialog.cancel("Cancel", "Back", active = 0)
- exit_dialog.text("Description", 15, 235, 320, 20, 0x30003,
- "Click the Finish button to exit the Installer.")
- c = exit_dialog.next("Finish", "Cancel", name="Finish")
- c.event("EndDialog", "Return")
-
- #####################################################################
- # Required dialog: FilesInUse, ErrorDlg
- inuse = PyDialog(db, "FilesInUse",
- x, y, w, h,
- 19, # KeepModeless|Modal|Visible
- title,
- "Retry", "Retry", "Retry", bitmap=False)
- inuse.text("Title", 15, 6, 200, 15, 0x30003,
- r"{\DlgFontBold8}Files in Use")
- inuse.text("Description", 20, 23, 280, 20, 0x30003,
- "Some files that need to be updated are currently in use.")
- inuse.text("Text", 20, 55, 330, 50, 3,
- "The following applications are using files that need to be updated by this setup. Close these applications and then click Retry to continue the installation or Cancel to exit it.")
- inuse.control("List", "ListBox", 20, 107, 330, 130, 7, "FileInUseProcess",
- None, None, None)
- c=inuse.back("Exit", "Ignore", name="Exit")
- c.event("EndDialog", "Exit")
- c=inuse.next("Ignore", "Retry", name="Ignore")
- c.event("EndDialog", "Ignore")
- c=inuse.cancel("Retry", "Exit", name="Retry")
- c.event("EndDialog","Retry")
-
- # See "Error Dialog". See "ICE20" for the required names of the controls.
- error = Dialog(db, "ErrorDlg",
- 50, 10, 330, 101,
- 65543, # Error|Minimize|Modal|Visible
- title,
- "ErrorText", None, None)
- error.text("ErrorText", 50,9,280,48,3, "")
- #error.control("ErrorIcon", "Icon", 15, 9, 24, 24, 5242881, None, "py.ico", None, None)
- error.pushbutton("N",120,72,81,21,3,"No",None).event("EndDialog","ErrorNo")
- error.pushbutton("Y",240,72,81,21,3,"Yes",None).event("EndDialog","ErrorYes")
- error.pushbutton("A",0,72,81,21,3,"Abort",None).event("EndDialog","ErrorAbort")
- error.pushbutton("C",42,72,81,21,3,"Cancel",None).event("EndDialog","ErrorCancel")
- error.pushbutton("I",81,72,81,21,3,"Ignore",None).event("EndDialog","ErrorIgnore")
- error.pushbutton("O",159,72,81,21,3,"Ok",None).event("EndDialog","ErrorOk")
- error.pushbutton("R",198,72,81,21,3,"Retry",None).event("EndDialog","ErrorRetry")
-
- #####################################################################
- # Global "Query Cancel" dialog
- cancel = Dialog(db, "CancelDlg", 50, 10, 260, 85, 3, title,
- "No", "No", "No")
- cancel.text("Text", 48, 15, 194, 30, 3,
- "Are you sure you want to cancel [ProductName] installation?")
- #cancel.control("Icon", "Icon", 15, 15, 24, 24, 5242881, None,
- # "py.ico", None, None)
- c=cancel.pushbutton("Yes", 72, 57, 56, 17, 3, "Yes", "No")
- c.event("EndDialog", "Exit")
-
- c=cancel.pushbutton("No", 132, 57, 56, 17, 3, "No", "Yes")
- c.event("EndDialog", "Return")
-
- #####################################################################
- # Global "Wait for costing" dialog
- costing = Dialog(db, "WaitForCostingDlg", 50, 10, 260, 85, modal, title,
- "Return", "Return", "Return")
- costing.text("Text", 48, 15, 194, 30, 3,
- "Please wait while the installer finishes determining your disk space requirements.")
- c = costing.pushbutton("Return", 102, 57, 56, 17, 3, "Return", None)
- c.event("EndDialog", "Exit")
-
- #####################################################################
- # Preparation dialog: no user input except cancellation
- prep = PyDialog(db, "PrepareDlg", x, y, w, h, modeless, title,
- "Cancel", "Cancel", "Cancel")
- prep.text("Description", 15, 70, 320, 40, 0x30003,
- "Please wait while the Installer prepares to guide you through the installation.")
- prep.title("Welcome to the [ProductName] Installer")
- c=prep.text("ActionText", 15, 110, 320, 20, 0x30003, "Pondering...")
- c.mapping("ActionText", "Text")
- c=prep.text("ActionData", 15, 135, 320, 30, 0x30003, None)
- c.mapping("ActionData", "Text")
- prep.back("Back", None, active=0)
- prep.next("Next", None, active=0)
- c=prep.cancel("Cancel", None)
- c.event("SpawnDialog", "CancelDlg")
-
- #####################################################################
- # Target directory selection
- seldlg = PyDialog(db, "SelectDirectoryDlg", x, y, w, h, modal, title,
- "Next", "Next", "Cancel")
- seldlg.title("Select Destination Directory")
-
- version = sys.version[:3]+" "
- seldlg.text("Hint", 15, 30, 300, 40, 3,
- "The destination directory should contain a Python %sinstallation" % version)
-
- seldlg.back("< Back", None, active=0)
- c = seldlg.next("Next >", "Cancel")
- c.event("SetTargetPath", "TARGETDIR", ordering=1)
- c.event("SpawnWaitDialog", "WaitForCostingDlg", ordering=2)
- c.event("EndDialog", "Return", ordering=3)
-
- c = seldlg.cancel("Cancel", "DirectoryCombo")
- c.event("SpawnDialog", "CancelDlg")
-
- seldlg.control("DirectoryCombo", "DirectoryCombo", 15, 70, 272, 80, 393219,
- "TARGETDIR", None, "DirectoryList", None)
- seldlg.control("DirectoryList", "DirectoryList", 15, 90, 308, 136, 3, "TARGETDIR",
- None, "PathEdit", None)
- seldlg.control("PathEdit", "PathEdit", 15, 230, 306, 16, 3, "TARGETDIR", None, "Next", None)
- c = seldlg.pushbutton("Up", 306, 70, 18, 18, 3, "Up", None)
- c.event("DirectoryListUp", "0")
- c = seldlg.pushbutton("NewDir", 324, 70, 30, 18, 3, "New", None)
- c.event("DirectoryListNew", "0")
-
- #####################################################################
- # Disk cost
- cost = PyDialog(db, "DiskCostDlg", x, y, w, h, modal, title,
- "OK", "OK", "OK", bitmap=False)
- cost.text("Title", 15, 6, 200, 15, 0x30003,
- "{\DlgFontBold8}Disk Space Requirements")
- cost.text("Description", 20, 20, 280, 20, 0x30003,
- "The disk space required for the installation of the selected features.")
- cost.text("Text", 20, 53, 330, 60, 3,
- "The highlighted volumes (if any) do not have enough disk space "
- "available for the currently selected features. You can either "
- "remove some files from the highlighted volumes, or choose to "
- "install less features onto local drive(s), or select different "
- "destination drive(s).")
- cost.control("VolumeList", "VolumeCostList", 20, 100, 330, 150, 393223,
- None, "{120}{70}{70}{70}{70}", None, None)
- cost.xbutton("OK", "Ok", None, 0.5).event("EndDialog", "Return")
-
- #####################################################################
- # WhichUsers Dialog. Only available on NT, and for privileged users.
- # This must be run before FindRelatedProducts, because that will
- # take into account whether the previous installation was per-user
- # or per-machine. We currently don't support going back to this
- # dialog after "Next" was selected; to support this, we would need to
- # find how to reset the ALLUSERS property, and how to re-run
- # FindRelatedProducts.
- # On Windows9x, the ALLUSERS property is ignored on the command line
- # and in the Property table, but installer fails according to the documentation
- # if a dialog attempts to set ALLUSERS.
- whichusers = PyDialog(db, "WhichUsersDlg", x, y, w, h, modal, title,
- "AdminInstall", "Next", "Cancel")
- whichusers.title("Select whether to install [ProductName] for all users of this computer.")
- # A radio group with two options: allusers, justme
- g = whichusers.radiogroup("AdminInstall", 15, 60, 260, 50, 3,
- "WhichUsers", "", "Next")
- g.add("ALL", 0, 5, 150, 20, "Install for all users")
- g.add("JUSTME", 0, 25, 150, 20, "Install just for me")
-
- whichusers.back("Back", None, active=0)
-
- c = whichusers.next("Next >", "Cancel")
- c.event("[ALLUSERS]", "1", 'WhichUsers="ALL"', 1)
- c.event("EndDialog", "Return", ordering = 2)
-
- c = whichusers.cancel("Cancel", "AdminInstall")
- c.event("SpawnDialog", "CancelDlg")
-
- #####################################################################
- # Installation Progress dialog (modeless)
- progress = PyDialog(db, "ProgressDlg", x, y, w, h, modeless, title,
- "Cancel", "Cancel", "Cancel", bitmap=False)
- progress.text("Title", 20, 15, 200, 15, 0x30003,
- "{\DlgFontBold8}[Progress1] [ProductName]")
- progress.text("Text", 35, 65, 300, 30, 3,
- "Please wait while the Installer [Progress2] [ProductName]. "
- "This may take several minutes.")
- progress.text("StatusLabel", 35, 100, 35, 20, 3, "Status:")
-
- c=progress.text("ActionText", 70, 100, w-70, 20, 3, "Pondering...")
- c.mapping("ActionText", "Text")
-
- #c=progress.text("ActionData", 35, 140, 300, 20, 3, None)
- #c.mapping("ActionData", "Text")
-
- c=progress.control("ProgressBar", "ProgressBar", 35, 120, 300, 10, 65537,
- None, "Progress done", None, None)
- c.mapping("SetProgress", "Progress")
-
- progress.back("< Back", "Next", active=False)
- progress.next("Next >", "Cancel", active=False)
- progress.cancel("Cancel", "Back").event("SpawnDialog", "CancelDlg")
-
- ###################################################################
- # Maintenance type: repair/uninstall
- maint = PyDialog(db, "MaintenanceTypeDlg", x, y, w, h, modal, title,
- "Next", "Next", "Cancel")
- maint.title("Welcome to the [ProductName] Setup Wizard")
- maint.text("BodyText", 15, 63, 330, 42, 3,
- "Select whether you want to repair or remove [ProductName].")
- g=maint.radiogroup("RepairRadioGroup", 15, 108, 330, 60, 3,
- "MaintenanceForm_Action", "", "Next")
- #g.add("Change", 0, 0, 200, 17, "&Change [ProductName]")
- g.add("Repair", 0, 18, 200, 17, "&Repair [ProductName]")
- g.add("Remove", 0, 36, 200, 17, "Re&move [ProductName]")
-
- maint.back("< Back", None, active=False)
- c=maint.next("Finish", "Cancel")
- # Change installation: Change progress dialog to "Change", then ask
- # for feature selection
- #c.event("[Progress1]", "Change", 'MaintenanceForm_Action="Change"', 1)
- #c.event("[Progress2]", "changes", 'MaintenanceForm_Action="Change"', 2)
-
- # Reinstall: Change progress dialog to "Repair", then invoke reinstall
- # Also set list of reinstalled features to "ALL"
- c.event("[REINSTALL]", "ALL", 'MaintenanceForm_Action="Repair"', 5)
- c.event("[Progress1]", "Repairing", 'MaintenanceForm_Action="Repair"', 6)
- c.event("[Progress2]", "repairs", 'MaintenanceForm_Action="Repair"', 7)
- c.event("Reinstall", "ALL", 'MaintenanceForm_Action="Repair"', 8)
-
- # Uninstall: Change progress to "Remove", then invoke uninstall
- # Also set list of removed features to "ALL"
- c.event("[REMOVE]", "ALL", 'MaintenanceForm_Action="Remove"', 11)
- c.event("[Progress1]", "Removing", 'MaintenanceForm_Action="Remove"', 12)
- c.event("[Progress2]", "removes", 'MaintenanceForm_Action="Remove"', 13)
- c.event("Remove", "ALL", 'MaintenanceForm_Action="Remove"', 14)
-
- # Close dialog when maintenance action scheduled
- c.event("EndDialog", "Return", 'MaintenanceForm_Action<>"Change"', 20)
- #c.event("NewDialog", "SelectFeaturesDlg", 'MaintenanceForm_Action="Change"', 21)
-
- maint.cancel("Cancel", "RepairRadioGroup").event("SpawnDialog", "CancelDlg")
-
- def get_installer_filename(self, fullname):
- # Factored out to allow overriding in subclasses
- installer_name = os.path.join(self.dist_dir,
- "%s.win32-py%s.msi" %
- (fullname, self.target_version))
- return installer_name
diff --git a/sys/lib/python/distutils/command/bdist_rpm.py b/sys/lib/python/distutils/command/bdist_rpm.py
deleted file mode 100644
index da3768ac5..000000000
--- a/sys/lib/python/distutils/command/bdist_rpm.py
+++ /dev/null
@@ -1,564 +0,0 @@
-"""distutils.command.bdist_rpm
-
-Implements the Distutils 'bdist_rpm' command (create RPM source and binary
-distributions)."""
-
-# This module should be kept compatible with Python 2.1.
-
-__revision__ = "$Id: bdist_rpm.py 52742 2006-11-12 18:56:18Z martin.v.loewis $"
-
-import sys, os, string
-import glob
-from types import *
-from distutils.core import Command
-from distutils.debug import DEBUG
-from distutils.util import get_platform
-from distutils.file_util import write_file
-from distutils.errors import *
-from distutils.sysconfig import get_python_version
-from distutils import log
-
-class bdist_rpm (Command):
-
- description = "create an RPM distribution"
-
- user_options = [
- ('bdist-base=', None,
- "base directory for creating built distributions"),
- ('rpm-base=', None,
- "base directory for creating RPMs (defaults to \"rpm\" under "
- "--bdist-base; must be specified for RPM 2)"),
- ('dist-dir=', 'd',
- "directory to put final RPM files in "
- "(and .spec files if --spec-only)"),
- ('python=', None,
- "path to Python interpreter to hard-code in the .spec file "
- "(default: \"python\")"),
- ('fix-python', None,
- "hard-code the exact path to the current Python interpreter in "
- "the .spec file"),
- ('spec-only', None,
- "only regenerate spec file"),
- ('source-only', None,
- "only generate source RPM"),
- ('binary-only', None,
- "only generate binary RPM"),
- ('use-bzip2', None,
- "use bzip2 instead of gzip to create source distribution"),
-
- # More meta-data: too RPM-specific to put in the setup script,
- # but needs to go in the .spec file -- so we make these options
- # to "bdist_rpm". The idea is that packagers would put this
- # info in setup.cfg, although they are of course free to
- # supply it on the command line.
- ('distribution-name=', None,
- "name of the (Linux) distribution to which this "
- "RPM applies (*not* the name of the module distribution!)"),
- ('group=', None,
- "package classification [default: \"Development/Libraries\"]"),
- ('release=', None,
- "RPM release number"),
- ('serial=', None,
- "RPM serial number"),
- ('vendor=', None,
- "RPM \"vendor\" (eg. \"Joe Blow <joe@example.com>\") "
- "[default: maintainer or author from setup script]"),
- ('packager=', None,
- "RPM packager (eg. \"Jane Doe <jane@example.net>\")"
- "[default: vendor]"),
- ('doc-files=', None,
- "list of documentation files (space or comma-separated)"),
- ('changelog=', None,
- "RPM changelog"),
- ('icon=', None,
- "name of icon file"),
- ('provides=', None,
- "capabilities provided by this package"),
- ('requires=', None,
- "capabilities required by this package"),
- ('conflicts=', None,
- "capabilities which conflict with this package"),
- ('build-requires=', None,
- "capabilities required to build this package"),
- ('obsoletes=', None,
- "capabilities made obsolete by this package"),
- ('no-autoreq', None,
- "do not automatically calculate dependencies"),
-
- # Actions to take when building RPM
- ('keep-temp', 'k',
- "don't clean up RPM build directory"),
- ('no-keep-temp', None,
- "clean up RPM build directory [default]"),
- ('use-rpm-opt-flags', None,
- "compile with RPM_OPT_FLAGS when building from source RPM"),
- ('no-rpm-opt-flags', None,
- "do not pass any RPM CFLAGS to compiler"),
- ('rpm3-mode', None,
- "RPM 3 compatibility mode (default)"),
- ('rpm2-mode', None,
- "RPM 2 compatibility mode"),
-
- # Add the hooks necessary for specifying custom scripts
- ('prep-script=', None,
- "Specify a script for the PREP phase of RPM building"),
- ('build-script=', None,
- "Specify a script for the BUILD phase of RPM building"),
-
- ('pre-install=', None,
- "Specify a script for the pre-INSTALL phase of RPM building"),
- ('install-script=', None,
- "Specify a script for the INSTALL phase of RPM building"),
- ('post-install=', None,
- "Specify a script for the post-INSTALL phase of RPM building"),
-
- ('pre-uninstall=', None,
- "Specify a script for the pre-UNINSTALL phase of RPM building"),
- ('post-uninstall=', None,
- "Specify a script for the post-UNINSTALL phase of RPM building"),
-
- ('clean-script=', None,
- "Specify a script for the CLEAN phase of RPM building"),
-
- ('verify-script=', None,
- "Specify a script for the VERIFY phase of the RPM build"),
-
- # Allow a packager to explicitly force an architecture
- ('force-arch=', None,
- "Force an architecture onto the RPM build process"),
- ]
-
- boolean_options = ['keep-temp', 'use-rpm-opt-flags', 'rpm3-mode',
- 'no-autoreq']
-
- negative_opt = {'no-keep-temp': 'keep-temp',
- 'no-rpm-opt-flags': 'use-rpm-opt-flags',
- 'rpm2-mode': 'rpm3-mode'}
-
-
- def initialize_options (self):
- self.bdist_base = None
- self.rpm_base = None
- self.dist_dir = None
- self.python = None
- self.fix_python = None
- self.spec_only = None
- self.binary_only = None
- self.source_only = None
- self.use_bzip2 = None
-
- self.distribution_name = None
- self.group = None
- self.release = None
- self.serial = None
- self.vendor = None
- self.packager = None
- self.doc_files = None
- self.changelog = None
- self.icon = None
-
- self.prep_script = None
- self.build_script = None
- self.install_script = None
- self.clean_script = None
- self.verify_script = None
- self.pre_install = None
- self.post_install = None
- self.pre_uninstall = None
- self.post_uninstall = None
- self.prep = None
- self.provides = None
- self.requires = None
- self.conflicts = None
- self.build_requires = None
- self.obsoletes = None
-
- self.keep_temp = 0
- self.use_rpm_opt_flags = 1
- self.rpm3_mode = 1
- self.no_autoreq = 0
-
- self.force_arch = None
-
- # initialize_options()
-
-
- def finalize_options (self):
- self.set_undefined_options('bdist', ('bdist_base', 'bdist_base'))
- if self.rpm_base is None:
- if not self.rpm3_mode:
- raise DistutilsOptionError, \
- "you must specify --rpm-base in RPM 2 mode"
- self.rpm_base = os.path.join(self.bdist_base, "rpm")
-
- if self.python is None:
- if self.fix_python:
- self.python = sys.executable
- else:
- self.python = "python"
- elif self.fix_python:
- raise DistutilsOptionError, \
- "--python and --fix-python are mutually exclusive options"
-
- if os.name != 'posix':
- raise DistutilsPlatformError, \
- ("don't know how to create RPM "
- "distributions on platform %s" % os.name)
- if self.binary_only and self.source_only:
- raise DistutilsOptionError, \
- "cannot supply both '--source-only' and '--binary-only'"
-
- # don't pass CFLAGS to pure python distributions
- if not self.distribution.has_ext_modules():
- self.use_rpm_opt_flags = 0
-
- self.set_undefined_options('bdist', ('dist_dir', 'dist_dir'))
- self.finalize_package_data()
-
- # finalize_options()
-
- def finalize_package_data (self):
- self.ensure_string('group', "Development/Libraries")
- self.ensure_string('vendor',
- "%s <%s>" % (self.distribution.get_contact(),
- self.distribution.get_contact_email()))
- self.ensure_string('packager')
- self.ensure_string_list('doc_files')
- if type(self.doc_files) is ListType:
- for readme in ('README', 'README.txt'):
- if os.path.exists(readme) and readme not in self.doc_files:
- self.doc_files.append(readme)
-
- self.ensure_string('release', "1")
- self.ensure_string('serial') # should it be an int?
-
- self.ensure_string('distribution_name')
-
- self.ensure_string('changelog')
- # Format changelog correctly
- self.changelog = self._format_changelog(self.changelog)
-
- self.ensure_filename('icon')
-
- self.ensure_filename('prep_script')
- self.ensure_filename('build_script')
- self.ensure_filename('install_script')
- self.ensure_filename('clean_script')
- self.ensure_filename('verify_script')
- self.ensure_filename('pre_install')
- self.ensure_filename('post_install')
- self.ensure_filename('pre_uninstall')
- self.ensure_filename('post_uninstall')
-
- # XXX don't forget we punted on summaries and descriptions -- they
- # should be handled here eventually!
-
- # Now *this* is some meta-data that belongs in the setup script...
- self.ensure_string_list('provides')
- self.ensure_string_list('requires')
- self.ensure_string_list('conflicts')
- self.ensure_string_list('build_requires')
- self.ensure_string_list('obsoletes')
-
- self.ensure_string('force_arch')
- # finalize_package_data ()
-
-
- def run (self):
-
- if DEBUG:
- print "before _get_package_data():"
- print "vendor =", self.vendor
- print "packager =", self.packager
- print "doc_files =", self.doc_files
- print "changelog =", self.changelog
-
- # make directories
- if self.spec_only:
- spec_dir = self.dist_dir
- self.mkpath(spec_dir)
- else:
- rpm_dir = {}
- for d in ('SOURCES', 'SPECS', 'BUILD', 'RPMS', 'SRPMS'):
- rpm_dir[d] = os.path.join(self.rpm_base, d)
- self.mkpath(rpm_dir[d])
- spec_dir = rpm_dir['SPECS']
-
- # Spec file goes into 'dist_dir' if '--spec-only specified',
- # build/rpm.<plat> otherwise.
- spec_path = os.path.join(spec_dir,
- "%s.spec" % self.distribution.get_name())
- self.execute(write_file,
- (spec_path,
- self._make_spec_file()),
- "writing '%s'" % spec_path)
-
- if self.spec_only: # stop if requested
- return
-
- # Make a source distribution and copy to SOURCES directory with
- # optional icon.
- saved_dist_files = self.distribution.dist_files[:]
- sdist = self.reinitialize_command('sdist')
- if self.use_bzip2:
- sdist.formats = ['bztar']
- else:
- sdist.formats = ['gztar']
- self.run_command('sdist')
- self.distribution.dist_files = saved_dist_files
-
- source = sdist.get_archive_files()[0]
- source_dir = rpm_dir['SOURCES']
- self.copy_file(source, source_dir)
-
- if self.icon:
- if os.path.exists(self.icon):
- self.copy_file(self.icon, source_dir)
- else:
- raise DistutilsFileError, \
- "icon file '%s' does not exist" % self.icon
-
-
- # build package
- log.info("building RPMs")
- rpm_cmd = ['rpm']
- if os.path.exists('/usr/bin/rpmbuild') or \
- os.path.exists('/bin/rpmbuild'):
- rpm_cmd = ['rpmbuild']
- if self.source_only: # what kind of RPMs?
- rpm_cmd.append('-bs')
- elif self.binary_only:
- rpm_cmd.append('-bb')
- else:
- rpm_cmd.append('-ba')
- if self.rpm3_mode:
- rpm_cmd.extend(['--define',
- '_topdir %s' % os.path.abspath(self.rpm_base)])
- if not self.keep_temp:
- rpm_cmd.append('--clean')
- rpm_cmd.append(spec_path)
- # Determine the binary rpm names that should be built out of this spec
- # file
- # Note that some of these may not be really built (if the file
- # list is empty)
- nvr_string = "%{name}-%{version}-%{release}"
- src_rpm = nvr_string + ".src.rpm"
- non_src_rpm = "%{arch}/" + nvr_string + ".%{arch}.rpm"
- q_cmd = r"rpm -q --qf '%s %s\n' --specfile '%s'" % (
- src_rpm, non_src_rpm, spec_path)
-
- out = os.popen(q_cmd)
- binary_rpms = []
- source_rpm = None
- while 1:
- line = out.readline()
- if not line:
- break
- l = string.split(string.strip(line))
- assert(len(l) == 2)
- binary_rpms.append(l[1])
- # The source rpm is named after the first entry in the spec file
- if source_rpm is None:
- source_rpm = l[0]
-
- status = out.close()
- if status:
- raise DistutilsExecError("Failed to execute: %s" % repr(q_cmd))
-
- self.spawn(rpm_cmd)
-
- if not self.dry_run:
- if not self.binary_only:
- srpm = os.path.join(rpm_dir['SRPMS'], source_rpm)
- assert(os.path.exists(srpm))
- self.move_file(srpm, self.dist_dir)
-
- if not self.source_only:
- for rpm in binary_rpms:
- rpm = os.path.join(rpm_dir['RPMS'], rpm)
- if os.path.exists(rpm):
- self.move_file(rpm, self.dist_dir)
- # run()
-
- def _dist_path(self, path):
- return os.path.join(self.dist_dir, os.path.basename(path))
-
- def _make_spec_file(self):
- """Generate the text of an RPM spec file and return it as a
- list of strings (one per line).
- """
- # definitions and headers
- spec_file = [
- '%define name ' + self.distribution.get_name(),
- '%define version ' + self.distribution.get_version().replace('-','_'),
- '%define unmangled_version ' + self.distribution.get_version(),
- '%define release ' + self.release.replace('-','_'),
- '',
- 'Summary: ' + self.distribution.get_description(),
- ]
-
- # put locale summaries into spec file
- # XXX not supported for now (hard to put a dictionary
- # in a config file -- arg!)
- #for locale in self.summaries.keys():
- # spec_file.append('Summary(%s): %s' % (locale,
- # self.summaries[locale]))
-
- spec_file.extend([
- 'Name: %{name}',
- 'Version: %{version}',
- 'Release: %{release}',])
-
- # XXX yuck! this filename is available from the "sdist" command,
- # but only after it has run: and we create the spec file before
- # running "sdist", in case of --spec-only.
- if self.use_bzip2:
- spec_file.append('Source0: %{name}-%{unmangled_version}.tar.bz2')
- else:
- spec_file.append('Source0: %{name}-%{unmangled_version}.tar.gz')
-
- spec_file.extend([
- 'License: ' + self.distribution.get_license(),
- 'Group: ' + self.group,
- 'BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-buildroot',
- 'Prefix: %{_prefix}', ])
-
- if not self.force_arch:
- # noarch if no extension modules
- if not self.distribution.has_ext_modules():
- spec_file.append('BuildArch: noarch')
- else:
- spec_file.append( 'BuildArch: %s' % self.force_arch )
-
- for field in ('Vendor',
- 'Packager',
- 'Provides',
- 'Requires',
- 'Conflicts',
- 'Obsoletes',
- ):
- val = getattr(self, string.lower(field))
- if type(val) is ListType:
- spec_file.append('%s: %s' % (field, string.join(val)))
- elif val is not None:
- spec_file.append('%s: %s' % (field, val))
-
-
- if self.distribution.get_url() != 'UNKNOWN':
- spec_file.append('Url: ' + self.distribution.get_url())
-
- if self.distribution_name:
- spec_file.append('Distribution: ' + self.distribution_name)
-
- if self.build_requires:
- spec_file.append('BuildRequires: ' +
- string.join(self.build_requires))
-
- if self.icon:
- spec_file.append('Icon: ' + os.path.basename(self.icon))
-
- if self.no_autoreq:
- spec_file.append('AutoReq: 0')
-
- spec_file.extend([
- '',
- '%description',
- self.distribution.get_long_description()
- ])
-
- # put locale descriptions into spec file
- # XXX again, suppressed because config file syntax doesn't
- # easily support this ;-(
- #for locale in self.descriptions.keys():
- # spec_file.extend([
- # '',
- # '%description -l ' + locale,
- # self.descriptions[locale],
- # ])
-
- # rpm scripts
- # figure out default build script
- def_setup_call = "%s %s" % (self.python,os.path.basename(sys.argv[0]))
- def_build = "%s build" % def_setup_call
- if self.use_rpm_opt_flags:
- def_build = 'env CFLAGS="$RPM_OPT_FLAGS" ' + def_build
-
- # insert contents of files
-
- # XXX this is kind of misleading: user-supplied options are files
- # that we open and interpolate into the spec file, but the defaults
- # are just text that we drop in as-is. Hmmm.
-
- script_options = [
- ('prep', 'prep_script', "%setup -n %{name}-%{unmangled_version}"),
- ('build', 'build_script', def_build),
- ('install', 'install_script',
- ("%s install "
- "--root=$RPM_BUILD_ROOT "
- "--record=INSTALLED_FILES") % def_setup_call),
- ('clean', 'clean_script', "rm -rf $RPM_BUILD_ROOT"),
- ('verifyscript', 'verify_script', None),
- ('pre', 'pre_install', None),
- ('post', 'post_install', None),
- ('preun', 'pre_uninstall', None),
- ('postun', 'post_uninstall', None),
- ]
-
- for (rpm_opt, attr, default) in script_options:
- # Insert contents of file referred to, if no file is referred to
- # use 'default' as contents of script
- val = getattr(self, attr)
- if val or default:
- spec_file.extend([
- '',
- '%' + rpm_opt,])
- if val:
- spec_file.extend(string.split(open(val, 'r').read(), '\n'))
- else:
- spec_file.append(default)
-
-
- # files section
- spec_file.extend([
- '',
- '%files -f INSTALLED_FILES',
- '%defattr(-,root,root)',
- ])
-
- if self.doc_files:
- spec_file.append('%doc ' + string.join(self.doc_files))
-
- if self.changelog:
- spec_file.extend([
- '',
- '%changelog',])
- spec_file.extend(self.changelog)
-
- return spec_file
-
- # _make_spec_file ()
-
- def _format_changelog(self, changelog):
- """Format the changelog correctly and convert it to a list of strings
- """
- if not changelog:
- return changelog
- new_changelog = []
- for line in string.split(string.strip(changelog), '\n'):
- line = string.strip(line)
- if line[0] == '*':
- new_changelog.extend(['', line])
- elif line[0] == '-':
- new_changelog.append(line)
- else:
- new_changelog.append(' ' + line)
-
- # strip trailing newline inserted by first changelog entry
- if not new_changelog[0]:
- del new_changelog[0]
-
- return new_changelog
-
- # _format_changelog()
-
-# class bdist_rpm
diff --git a/sys/lib/python/distutils/command/bdist_wininst.py b/sys/lib/python/distutils/command/bdist_wininst.py
deleted file mode 100644
index bbe229447..000000000
--- a/sys/lib/python/distutils/command/bdist_wininst.py
+++ /dev/null
@@ -1,328 +0,0 @@
-"""distutils.command.bdist_wininst
-
-Implements the Distutils 'bdist_wininst' command: create a windows installer
-exe-program."""
-
-# This module should be kept compatible with Python 2.1.
-
-__revision__ = "$Id: bdist_wininst.py 38697 2005-03-23 18:54:36Z loewis $"
-
-import sys, os, string
-from distutils.core import Command
-from distutils.util import get_platform
-from distutils.dir_util import create_tree, remove_tree
-from distutils.errors import *
-from distutils.sysconfig import get_python_version
-from distutils import log
-
-class bdist_wininst (Command):
-
- description = "create an executable installer for MS Windows"
-
- user_options = [('bdist-dir=', None,
- "temporary directory for creating the distribution"),
- ('keep-temp', 'k',
- "keep the pseudo-installation tree around after " +
- "creating the distribution archive"),
- ('target-version=', None,
- "require a specific python version" +
- " on the target system"),
- ('no-target-compile', 'c',
- "do not compile .py to .pyc on the target system"),
- ('no-target-optimize', 'o',
- "do not compile .py to .pyo (optimized)"
- "on the target system"),
- ('dist-dir=', 'd',
- "directory to put final built distributions in"),
- ('bitmap=', 'b',
- "bitmap to use for the installer instead of python-powered logo"),
- ('title=', 't',
- "title to display on the installer background instead of default"),
- ('skip-build', None,
- "skip rebuilding everything (for testing/debugging)"),
- ('install-script=', None,
- "basename of installation script to be run after"
- "installation or before deinstallation"),
- ('pre-install-script=', None,
- "Fully qualified filename of a script to be run before "
- "any files are installed. This script need not be in the "
- "distribution"),
- ]
-
- boolean_options = ['keep-temp', 'no-target-compile', 'no-target-optimize',
- 'skip-build']
-
- def initialize_options (self):
- self.bdist_dir = None
- self.keep_temp = 0
- self.no_target_compile = 0
- self.no_target_optimize = 0
- self.target_version = None
- self.dist_dir = None
- self.bitmap = None
- self.title = None
- self.skip_build = 0
- self.install_script = None
- self.pre_install_script = None
-
- # initialize_options()
-
-
- def finalize_options (self):
- if self.bdist_dir is None:
- bdist_base = self.get_finalized_command('bdist').bdist_base
- self.bdist_dir = os.path.join(bdist_base, 'wininst')
- if not self.target_version:
- self.target_version = ""
- if not self.skip_build and self.distribution.has_ext_modules():
- short_version = get_python_version()
- if self.target_version and self.target_version != short_version:
- raise DistutilsOptionError, \
- "target version can only be %s, or the '--skip_build'" \
- " option must be specified" % (short_version,)
- self.target_version = short_version
-
- self.set_undefined_options('bdist', ('dist_dir', 'dist_dir'))
-
- if self.install_script:
- for script in self.distribution.scripts:
- if self.install_script == os.path.basename(script):
- break
- else:
- raise DistutilsOptionError, \
- "install_script '%s' not found in scripts" % \
- self.install_script
- # finalize_options()
-
-
- def run (self):
- if (sys.platform != "win32" and
- (self.distribution.has_ext_modules() or
- self.distribution.has_c_libraries())):
- raise DistutilsPlatformError \
- ("distribution contains extensions and/or C libraries; "
- "must be compiled on a Windows 32 platform")
-
- if not self.skip_build:
- self.run_command('build')
-
- install = self.reinitialize_command('install', reinit_subcommands=1)
- install.root = self.bdist_dir
- install.skip_build = self.skip_build
- install.warn_dir = 0
-
- install_lib = self.reinitialize_command('install_lib')
- # we do not want to include pyc or pyo files
- install_lib.compile = 0
- install_lib.optimize = 0
-
- if self.distribution.has_ext_modules():
- # If we are building an installer for a Python version other
- # than the one we are currently running, then we need to ensure
- # our build_lib reflects the other Python version rather than ours.
- # Note that for target_version!=sys.version, we must have skipped the
- # build step, so there is no issue with enforcing the build of this
- # version.
- target_version = self.target_version
- if not target_version:
- assert self.skip_build, "Should have already checked this"
- target_version = sys.version[0:3]
- plat_specifier = ".%s-%s" % (get_platform(), target_version)
- build = self.get_finalized_command('build')
- build.build_lib = os.path.join(build.build_base,
- 'lib' + plat_specifier)
-
- # Use a custom scheme for the zip-file, because we have to decide
- # at installation time which scheme to use.
- for key in ('purelib', 'platlib', 'headers', 'scripts', 'data'):
- value = string.upper(key)
- if key == 'headers':
- value = value + '/Include/$dist_name'
- setattr(install,
- 'install_' + key,
- value)
-
- log.info("installing to %s", self.bdist_dir)
- install.ensure_finalized()
-
- # avoid warning of 'install_lib' about installing
- # into a directory not in sys.path
- sys.path.insert(0, os.path.join(self.bdist_dir, 'PURELIB'))
-
- install.run()
-
- del sys.path[0]
-
- # And make an archive relative to the root of the
- # pseudo-installation tree.
- from tempfile import mktemp
- archive_basename = mktemp()
- fullname = self.distribution.get_fullname()
- arcname = self.make_archive(archive_basename, "zip",
- root_dir=self.bdist_dir)
- # create an exe containing the zip-file
- self.create_exe(arcname, fullname, self.bitmap)
- if self.distribution.has_ext_modules():
- pyversion = get_python_version()
- else:
- pyversion = 'any'
- self.distribution.dist_files.append(('bdist_wininst', pyversion,
- self.get_installer_filename(fullname)))
- # remove the zip-file again
- log.debug("removing temporary file '%s'", arcname)
- os.remove(arcname)
-
- if not self.keep_temp:
- remove_tree(self.bdist_dir, dry_run=self.dry_run)
-
- # run()
-
- def get_inidata (self):
- # Return data describing the installation.
-
- lines = []
- metadata = self.distribution.metadata
-
- # Write the [metadata] section.
- lines.append("[metadata]")
-
- # 'info' will be displayed in the installer's dialog box,
- # describing the items to be installed.
- info = (metadata.long_description or '') + '\n'
-
- # Escape newline characters
- def escape(s):
- return string.replace(s, "\n", "\\n")
-
- for name in ["author", "author_email", "description", "maintainer",
- "maintainer_email", "name", "url", "version"]:
- data = getattr(metadata, name, "")
- if data:
- info = info + ("\n %s: %s" % \
- (string.capitalize(name), escape(data)))
- lines.append("%s=%s" % (name, escape(data)))
-
- # The [setup] section contains entries controlling
- # the installer runtime.
- lines.append("\n[Setup]")
- if self.install_script:
- lines.append("install_script=%s" % self.install_script)
- lines.append("info=%s" % escape(info))
- lines.append("target_compile=%d" % (not self.no_target_compile))
- lines.append("target_optimize=%d" % (not self.no_target_optimize))
- if self.target_version:
- lines.append("target_version=%s" % self.target_version)
-
- title = self.title or self.distribution.get_fullname()
- lines.append("title=%s" % escape(title))
- import time
- import distutils
- build_info = "Built %s with distutils-%s" % \
- (time.ctime(time.time()), distutils.__version__)
- lines.append("build_info=%s" % build_info)
- return string.join(lines, "\n")
-
- # get_inidata()
-
- def create_exe (self, arcname, fullname, bitmap=None):
- import struct
-
- self.mkpath(self.dist_dir)
-
- cfgdata = self.get_inidata()
-
- installer_name = self.get_installer_filename(fullname)
- self.announce("creating %s" % installer_name)
-
- if bitmap:
- bitmapdata = open(bitmap, "rb").read()
- bitmaplen = len(bitmapdata)
- else:
- bitmaplen = 0
-
- file = open(installer_name, "wb")
- file.write(self.get_exe_bytes())
- if bitmap:
- file.write(bitmapdata)
-
- # Convert cfgdata from unicode to ascii, mbcs encoded
- try:
- unicode
- except NameError:
- pass
- else:
- if isinstance(cfgdata, unicode):
- cfgdata = cfgdata.encode("mbcs")
-
- # Append the pre-install script
- cfgdata = cfgdata + "\0"
- if self.pre_install_script:
- script_data = open(self.pre_install_script, "r").read()
- cfgdata = cfgdata + script_data + "\n\0"
- else:
- # empty pre-install script
- cfgdata = cfgdata + "\0"
- file.write(cfgdata)
-
- # The 'magic number' 0x1234567B is used to make sure that the
- # binary layout of 'cfgdata' is what the wininst.exe binary
- # expects. If the layout changes, increment that number, make
- # the corresponding changes to the wininst.exe sources, and
- # recompile them.
- header = struct.pack("<iii",
- 0x1234567B, # tag
- len(cfgdata), # length
- bitmaplen, # number of bytes in bitmap
- )
- file.write(header)
- file.write(open(arcname, "rb").read())
-
- # create_exe()
-
- def get_installer_filename(self, fullname):
- # Factored out to allow overriding in subclasses
- if self.target_version:
- # if we create an installer for a specific python version,
- # it's better to include this in the name
- installer_name = os.path.join(self.dist_dir,
- "%s.win32-py%s.exe" %
- (fullname, self.target_version))
- else:
- installer_name = os.path.join(self.dist_dir,
- "%s.win32.exe" % fullname)
- return installer_name
- # get_installer_filename()
-
- def get_exe_bytes (self):
- from distutils.msvccompiler import get_build_version
- # If a target-version other than the current version has been
- # specified, then using the MSVC version from *this* build is no good.
- # Without actually finding and executing the target version and parsing
- # its sys.version, we just hard-code our knowledge of old versions.
- # NOTE: Possible alternative is to allow "--target-version" to
- # specify a Python executable rather than a simple version string.
- # We can then execute this program to obtain any info we need, such
- # as the real sys.version string for the build.
- cur_version = get_python_version()
- if self.target_version and self.target_version != cur_version:
- # If the target version is *later* than us, then we assume they
- # use what we use
- # string compares seem wrong, but are what sysconfig.py itself uses
- if self.target_version > cur_version:
- bv = get_build_version()
- else:
- if self.target_version < "2.4":
- bv = "6"
- else:
- bv = "7.1"
- else:
- # for current version - use authoritative check.
- bv = get_build_version()
-
- # wininst-x.y.exe is in the same directory as this file
- directory = os.path.dirname(__file__)
- # we must use a wininst-x.y.exe built with the same C compiler
- # used for python. XXX What about mingw, borland, and so on?
- filename = os.path.join(directory, "wininst-%s.exe" % bv)
- return open(filename, "rb").read()
-# class bdist_wininst
diff --git a/sys/lib/python/distutils/command/build.py b/sys/lib/python/distutils/command/build.py
deleted file mode 100644
index 8ada3bfd0..000000000
--- a/sys/lib/python/distutils/command/build.py
+++ /dev/null
@@ -1,136 +0,0 @@
-"""distutils.command.build
-
-Implements the Distutils 'build' command."""
-
-# This module should be kept compatible with Python 2.1.
-
-__revision__ = "$Id: build.py 37828 2004-11-10 22:23:15Z loewis $"
-
-import sys, os
-from distutils.core import Command
-from distutils.util import get_platform
-
-
-def show_compilers ():
- from distutils.ccompiler import show_compilers
- show_compilers()
-
-
-class build (Command):
-
- description = "build everything needed to install"
-
- user_options = [
- ('build-base=', 'b',
- "base directory for build library"),
- ('build-purelib=', None,
- "build directory for platform-neutral distributions"),
- ('build-platlib=', None,
- "build directory for platform-specific distributions"),
- ('build-lib=', None,
- "build directory for all distribution (defaults to either " +
- "build-purelib or build-platlib"),
- ('build-scripts=', None,
- "build directory for scripts"),
- ('build-temp=', 't',
- "temporary build directory"),
- ('compiler=', 'c',
- "specify the compiler type"),
- ('debug', 'g',
- "compile extensions and libraries with debugging information"),
- ('force', 'f',
- "forcibly build everything (ignore file timestamps)"),
- ('executable=', 'e',
- "specify final destination interpreter path (build.py)"),
- ]
-
- boolean_options = ['debug', 'force']
-
- help_options = [
- ('help-compiler', None,
- "list available compilers", show_compilers),
- ]
-
- def initialize_options (self):
- self.build_base = 'build'
- # these are decided only after 'build_base' has its final value
- # (unless overridden by the user or client)
- self.build_purelib = None
- self.build_platlib = None
- self.build_lib = None
- self.build_temp = None
- self.build_scripts = None
- self.compiler = None
- self.debug = None
- self.force = 0
- self.executable = None
-
- def finalize_options (self):
-
- plat_specifier = ".%s-%s" % (get_platform(), sys.version[0:3])
-
- # 'build_purelib' and 'build_platlib' just default to 'lib' and
- # 'lib.<plat>' under the base build directory. We only use one of
- # them for a given distribution, though --
- if self.build_purelib is None:
- self.build_purelib = os.path.join(self.build_base, 'lib')
- if self.build_platlib is None:
- self.build_platlib = os.path.join(self.build_base,
- 'lib' + plat_specifier)
-
- # 'build_lib' is the actual directory that we will use for this
- # particular module distribution -- if user didn't supply it, pick
- # one of 'build_purelib' or 'build_platlib'.
- if self.build_lib is None:
- if self.distribution.ext_modules:
- self.build_lib = self.build_platlib
- else:
- self.build_lib = self.build_purelib
-
- # 'build_temp' -- temporary directory for compiler turds,
- # "build/temp.<plat>"
- if self.build_temp is None:
- self.build_temp = os.path.join(self.build_base,
- 'temp' + plat_specifier)
- if self.build_scripts is None:
- self.build_scripts = os.path.join(self.build_base,
- 'scripts-' + sys.version[0:3])
-
- if self.executable is None:
- self.executable = os.path.normpath(sys.executable)
- # finalize_options ()
-
-
- def run (self):
-
- # Run all relevant sub-commands. This will be some subset of:
- # - build_py - pure Python modules
- # - build_clib - standalone C libraries
- # - build_ext - Python extensions
- # - build_scripts - (Python) scripts
- for cmd_name in self.get_sub_commands():
- self.run_command(cmd_name)
-
-
- # -- Predicates for the sub-command list ---------------------------
-
- def has_pure_modules (self):
- return self.distribution.has_pure_modules()
-
- def has_c_libraries (self):
- return self.distribution.has_c_libraries()
-
- def has_ext_modules (self):
- return self.distribution.has_ext_modules()
-
- def has_scripts (self):
- return self.distribution.has_scripts()
-
-
- sub_commands = [('build_py', has_pure_modules),
- ('build_clib', has_c_libraries),
- ('build_ext', has_ext_modules),
- ('build_scripts', has_scripts),
- ]
-
-# class build
diff --git a/sys/lib/python/distutils/command/build_clib.py b/sys/lib/python/distutils/command/build_clib.py
deleted file mode 100644
index 4591c5388..000000000
--- a/sys/lib/python/distutils/command/build_clib.py
+++ /dev/null
@@ -1,238 +0,0 @@
-"""distutils.command.build_clib
-
-Implements the Distutils 'build_clib' command, to build a C/C++ library
-that is included in the module distribution and needed by an extension
-module."""
-
-# This module should be kept compatible with Python 2.1.
-
-__revision__ = "$Id: build_clib.py 37828 2004-11-10 22:23:15Z loewis $"
-
-
-# XXX this module has *lots* of code ripped-off quite transparently from
-# build_ext.py -- not surprisingly really, as the work required to build
-# a static library from a collection of C source files is not really all
-# that different from what's required to build a shared object file from
-# a collection of C source files. Nevertheless, I haven't done the
-# necessary refactoring to account for the overlap in code between the
-# two modules, mainly because a number of subtle details changed in the
-# cut 'n paste. Sigh.
-
-import os, string
-from types import *
-from distutils.core import Command
-from distutils.errors import *
-from distutils.sysconfig import customize_compiler
-from distutils import log
-
-def show_compilers ():
- from distutils.ccompiler import show_compilers
- show_compilers()
-
-
-class build_clib (Command):
-
- description = "build C/C++ libraries used by Python extensions"
-
- user_options = [
- ('build-clib', 'b',
- "directory to build C/C++ libraries to"),
- ('build-temp', 't',
- "directory to put temporary build by-products"),
- ('debug', 'g',
- "compile with debugging information"),
- ('force', 'f',
- "forcibly build everything (ignore file timestamps)"),
- ('compiler=', 'c',
- "specify the compiler type"),
- ]
-
- boolean_options = ['debug', 'force']
-
- help_options = [
- ('help-compiler', None,
- "list available compilers", show_compilers),
- ]
-
- def initialize_options (self):
- self.build_clib = None
- self.build_temp = None
-
- # List of libraries to build
- self.libraries = None
-
- # Compilation options for all libraries
- self.include_dirs = None
- self.define = None
- self.undef = None
- self.debug = None
- self.force = 0
- self.compiler = None
-
- # initialize_options()
-
-
- def finalize_options (self):
-
- # This might be confusing: both build-clib and build-temp default
- # to build-temp as defined by the "build" command. This is because
- # I think that C libraries are really just temporary build
- # by-products, at least from the point of view of building Python
- # extensions -- but I want to keep my options open.
- self.set_undefined_options('build',
- ('build_temp', 'build_clib'),
- ('build_temp', 'build_temp'),
- ('compiler', 'compiler'),
- ('debug', 'debug'),
- ('force', 'force'))
-
- self.libraries = self.distribution.libraries
- if self.libraries:
- self.check_library_list(self.libraries)
-
- if self.include_dirs is None:
- self.include_dirs = self.distribution.include_dirs or []
- if type(self.include_dirs) is StringType:
- self.include_dirs = string.split(self.include_dirs,
- os.pathsep)
-
- # XXX same as for build_ext -- what about 'self.define' and
- # 'self.undef' ?
-
- # finalize_options()
-
-
- def run (self):
-
- if not self.libraries:
- return
-
- # Yech -- this is cut 'n pasted from build_ext.py!
- from distutils.ccompiler import new_compiler
- self.compiler = new_compiler(compiler=self.compiler,
- dry_run=self.dry_run,
- force=self.force)
- customize_compiler(self.compiler)
-
- if self.include_dirs is not None:
- self.compiler.set_include_dirs(self.include_dirs)
- if self.define is not None:
- # 'define' option is a list of (name,value) tuples
- for (name,value) in self.define:
- self.compiler.define_macro(name, value)
- if self.undef is not None:
- for macro in self.undef:
- self.compiler.undefine_macro(macro)
-
- self.build_libraries(self.libraries)
-
- # run()
-
-
- def check_library_list (self, libraries):
- """Ensure that the list of libraries (presumably provided as a
- command option 'libraries') is valid, i.e. it is a list of
- 2-tuples, where the tuples are (library_name, build_info_dict).
- Raise DistutilsSetupError if the structure is invalid anywhere;
- just returns otherwise."""
-
- # Yechh, blecch, ackk: this is ripped straight out of build_ext.py,
- # with only names changed to protect the innocent!
-
- if type(libraries) is not ListType:
- raise DistutilsSetupError, \
- "'libraries' option must be a list of tuples"
-
- for lib in libraries:
- if type(lib) is not TupleType and len(lib) != 2:
- raise DistutilsSetupError, \
- "each element of 'libraries' must a 2-tuple"
-
- if type(lib[0]) is not StringType:
- raise DistutilsSetupError, \
- "first element of each tuple in 'libraries' " + \
- "must be a string (the library name)"
- if '/' in lib[0] or (os.sep != '/' and os.sep in lib[0]):
- raise DistutilsSetupError, \
- ("bad library name '%s': " +
- "may not contain directory separators") % \
- lib[0]
-
- if type(lib[1]) is not DictionaryType:
- raise DistutilsSetupError, \
- "second element of each tuple in 'libraries' " + \
- "must be a dictionary (build info)"
- # for lib
-
- # check_library_list ()
-
-
- def get_library_names (self):
- # Assume the library list is valid -- 'check_library_list()' is
- # called from 'finalize_options()', so it should be!
-
- if not self.libraries:
- return None
-
- lib_names = []
- for (lib_name, build_info) in self.libraries:
- lib_names.append(lib_name)
- return lib_names
-
- # get_library_names ()
-
-
- def get_source_files (self):
- self.check_library_list(self.libraries)
- filenames = []
- for (lib_name, build_info) in self.libraries:
- sources = build_info.get('sources')
- if (sources is None or
- type(sources) not in (ListType, TupleType) ):
- raise DistutilsSetupError, \
- ("in 'libraries' option (library '%s'), "
- "'sources' must be present and must be "
- "a list of source filenames") % lib_name
-
- filenames.extend(sources)
-
- return filenames
- # get_source_files ()
-
-
- def build_libraries (self, libraries):
-
- for (lib_name, build_info) in libraries:
- sources = build_info.get('sources')
- if sources is None or type(sources) not in (ListType, TupleType):
- raise DistutilsSetupError, \
- ("in 'libraries' option (library '%s'), " +
- "'sources' must be present and must be " +
- "a list of source filenames") % lib_name
- sources = list(sources)
-
- log.info("building '%s' library", lib_name)
-
- # First, compile the source code to object files in the library
- # directory. (This should probably change to putting object
- # files in a temporary build directory.)
- macros = build_info.get('macros')
- include_dirs = build_info.get('include_dirs')
- objects = self.compiler.compile(sources,
- output_dir=self.build_temp,
- macros=macros,
- include_dirs=include_dirs,
- debug=self.debug)
-
- # Now "link" the object files together into a static library.
- # (On Unix at least, this isn't really linking -- it just
- # builds an archive. Whatever.)
- self.compiler.create_static_lib(objects, lib_name,
- output_dir=self.build_clib,
- debug=self.debug)
-
- # for libraries
-
- # build_libraries ()
-
-# class build_lib
diff --git a/sys/lib/python/distutils/command/build_ext.py b/sys/lib/python/distutils/command/build_ext.py
deleted file mode 100644
index a26fb9970..000000000
--- a/sys/lib/python/distutils/command/build_ext.py
+++ /dev/null
@@ -1,716 +0,0 @@
-"""distutils.command.build_ext
-
-Implements the Distutils 'build_ext' command, for building extension
-modules (currently limited to C extensions, should accommodate C++
-extensions ASAP)."""
-
-# This module should be kept compatible with Python 2.1.
-
-__revision__ = "$Id: build_ext.py 54332 2007-03-13 10:19:35Z georg.brandl $"
-
-import sys, os, string, re
-from types import *
-from distutils.core import Command
-from distutils.errors import *
-from distutils.sysconfig import customize_compiler, get_python_version
-from distutils.dep_util import newer_group
-from distutils.extension import Extension
-from distutils import log
-
-# An extension name is just a dot-separated list of Python NAMEs (ie.
-# the same as a fully-qualified module name).
-extension_name_re = re.compile \
- (r'^[a-zA-Z_][a-zA-Z_0-9]*(\.[a-zA-Z_][a-zA-Z_0-9]*)*$')
-
-
-def show_compilers ():
- from distutils.ccompiler import show_compilers
- show_compilers()
-
-
-class build_ext (Command):
-
- description = "build C/C++ extensions (compile/link to build directory)"
-
- # XXX thoughts on how to deal with complex command-line options like
- # these, i.e. how to make it so fancy_getopt can suck them off the
- # command line and make it look like setup.py defined the appropriate
- # lists of tuples of what-have-you.
- # - each command needs a callback to process its command-line options
- # - Command.__init__() needs access to its share of the whole
- # command line (must ultimately come from
- # Distribution.parse_command_line())
- # - it then calls the current command class' option-parsing
- # callback to deal with weird options like -D, which have to
- # parse the option text and churn out some custom data
- # structure
- # - that data structure (in this case, a list of 2-tuples)
- # will then be present in the command object by the time
- # we get to finalize_options() (i.e. the constructor
- # takes care of both command-line and client options
- # in between initialize_options() and finalize_options())
-
- sep_by = " (separated by '%s')" % os.pathsep
- user_options = [
- ('build-lib=', 'b',
- "directory for compiled extension modules"),
- ('build-temp=', 't',
- "directory for temporary files (build by-products)"),
- ('inplace', 'i',
- "ignore build-lib and put compiled extensions into the source " +
- "directory alongside your pure Python modules"),
- ('include-dirs=', 'I',
- "list of directories to search for header files" + sep_by),
- ('define=', 'D',
- "C preprocessor macros to define"),
- ('undef=', 'U',
- "C preprocessor macros to undefine"),
- ('libraries=', 'l',
- "external C libraries to link with"),
- ('library-dirs=', 'L',
- "directories to search for external C libraries" + sep_by),
- ('rpath=', 'R',
- "directories to search for shared C libraries at runtime"),
- ('link-objects=', 'O',
- "extra explicit link objects to include in the link"),
- ('debug', 'g',
- "compile/link with debugging information"),
- ('force', 'f',
- "forcibly build everything (ignore file timestamps)"),
- ('compiler=', 'c',
- "specify the compiler type"),
- ('swig-cpp', None,
- "make SWIG create C++ files (default is C)"),
- ('swig-opts=', None,
- "list of SWIG command line options"),
- ('swig=', None,
- "path to the SWIG executable"),
- ]
-
- boolean_options = ['inplace', 'debug', 'force', 'swig-cpp']
-
- help_options = [
- ('help-compiler', None,
- "list available compilers", show_compilers),
- ]
-
- def initialize_options (self):
- self.extensions = None
- self.build_lib = None
- self.build_temp = None
- self.inplace = 0
- self.package = None
-
- self.include_dirs = None
- self.define = None
- self.undef = None
- self.libraries = None
- self.library_dirs = None
- self.rpath = None
- self.link_objects = None
- self.debug = None
- self.force = None
- self.compiler = None
- self.swig = None
- self.swig_cpp = None
- self.swig_opts = None
-
- def finalize_options (self):
- from distutils import sysconfig
-
- self.set_undefined_options('build',
- ('build_lib', 'build_lib'),
- ('build_temp', 'build_temp'),
- ('compiler', 'compiler'),
- ('debug', 'debug'),
- ('force', 'force'))
-
- if self.package is None:
- self.package = self.distribution.ext_package
-
- self.extensions = self.distribution.ext_modules
-
-
- # Make sure Python's include directories (for Python.h, pyconfig.h,
- # etc.) are in the include search path.
- py_include = sysconfig.get_python_inc()
- plat_py_include = sysconfig.get_python_inc(plat_specific=1)
- if self.include_dirs is None:
- self.include_dirs = self.distribution.include_dirs or []
- if type(self.include_dirs) is StringType:
- self.include_dirs = string.split(self.include_dirs, os.pathsep)
-
- # Put the Python "system" include dir at the end, so that
- # any local include dirs take precedence.
- self.include_dirs.append(py_include)
- if plat_py_include != py_include:
- self.include_dirs.append(plat_py_include)
-
- if type(self.libraries) is StringType:
- self.libraries = [self.libraries]
-
- # Life is easier if we're not forever checking for None, so
- # simplify these options to empty lists if unset
- if self.libraries is None:
- self.libraries = []
- if self.library_dirs is None:
- self.library_dirs = []
- elif type(self.library_dirs) is StringType:
- self.library_dirs = string.split(self.library_dirs, os.pathsep)
-
- if self.rpath is None:
- self.rpath = []
- elif type(self.rpath) is StringType:
- self.rpath = string.split(self.rpath, os.pathsep)
-
- # for extensions under windows use different directories
- # for Release and Debug builds.
- # also Python's library directory must be appended to library_dirs
- if os.name == 'nt':
- self.library_dirs.append(os.path.join(sys.exec_prefix, 'libs'))
- if self.debug:
- self.build_temp = os.path.join(self.build_temp, "Debug")
- else:
- self.build_temp = os.path.join(self.build_temp, "Release")
-
- # Append the source distribution include and library directories,
- # this allows distutils on windows to work in the source tree
- self.include_dirs.append(os.path.join(sys.exec_prefix, 'PC'))
- self.library_dirs.append(os.path.join(sys.exec_prefix, 'PCBuild'))
-
- # OS/2 (EMX) doesn't support Debug vs Release builds, but has the
- # import libraries in its "Config" subdirectory
- if os.name == 'os2':
- self.library_dirs.append(os.path.join(sys.exec_prefix, 'Config'))
-
- # for extensions under Cygwin and AtheOS Python's library directory must be
- # appended to library_dirs
- if sys.platform[:6] == 'cygwin' or sys.platform[:6] == 'atheos':
- if sys.executable.startswith(os.path.join(sys.exec_prefix, "bin")):
- # building third party extensions
- self.library_dirs.append(os.path.join(sys.prefix, "lib",
- "python" + get_python_version(),
- "config"))
- else:
- # building python standard extensions
- self.library_dirs.append('.')
-
- # for extensions under Linux with a shared Python library,
- # Python's library directory must be appended to library_dirs
- if (sys.platform.startswith('linux') or sys.platform.startswith('gnu')) \
- and sysconfig.get_config_var('Py_ENABLE_SHARED'):
- if sys.executable.startswith(os.path.join(sys.exec_prefix, "bin")):
- # building third party extensions
- self.library_dirs.append(sysconfig.get_config_var('LIBDIR'))
- else:
- # building python standard extensions
- self.library_dirs.append('.')
-
- # The argument parsing will result in self.define being a string, but
- # it has to be a list of 2-tuples. All the preprocessor symbols
- # specified by the 'define' option will be set to '1'. Multiple
- # symbols can be separated with commas.
-
- if self.define:
- defines = string.split(self.define, ',')
- self.define = map(lambda symbol: (symbol, '1'), defines)
-
- # The option for macros to undefine is also a string from the
- # option parsing, but has to be a list. Multiple symbols can also
- # be separated with commas here.
- if self.undef:
- self.undef = string.split(self.undef, ',')
-
- if self.swig_opts is None:
- self.swig_opts = []
- else:
- self.swig_opts = self.swig_opts.split(' ')
-
- # finalize_options ()
-
-
- def run (self):
-
- from distutils.ccompiler import new_compiler
-
- # 'self.extensions', as supplied by setup.py, is a list of
- # Extension instances. See the documentation for Extension (in
- # distutils.extension) for details.
- #
- # For backwards compatibility with Distutils 0.8.2 and earlier, we
- # also allow the 'extensions' list to be a list of tuples:
- # (ext_name, build_info)
- # where build_info is a dictionary containing everything that
- # Extension instances do except the name, with a few things being
- # differently named. We convert these 2-tuples to Extension
- # instances as needed.
-
- if not self.extensions:
- return
-
- # If we were asked to build any C/C++ libraries, make sure that the
- # directory where we put them is in the library search path for
- # linking extensions.
- if self.distribution.has_c_libraries():
- build_clib = self.get_finalized_command('build_clib')
- self.libraries.extend(build_clib.get_library_names() or [])
- self.library_dirs.append(build_clib.build_clib)
-
- # Setup the CCompiler object that we'll use to do all the
- # compiling and linking
- self.compiler = new_compiler(compiler=self.compiler,
- verbose=self.verbose,
- dry_run=self.dry_run,
- force=self.force)
- customize_compiler(self.compiler)
-
- # And make sure that any compile/link-related options (which might
- # come from the command-line or from the setup script) are set in
- # that CCompiler object -- that way, they automatically apply to
- # all compiling and linking done here.
- if self.include_dirs is not None:
- self.compiler.set_include_dirs(self.include_dirs)
- if self.define is not None:
- # 'define' option is a list of (name,value) tuples
- for (name,value) in self.define:
- self.compiler.define_macro(name, value)
- if self.undef is not None:
- for macro in self.undef:
- self.compiler.undefine_macro(macro)
- if self.libraries is not None:
- self.compiler.set_libraries(self.libraries)
- if self.library_dirs is not None:
- self.compiler.set_library_dirs(self.library_dirs)
- if self.rpath is not None:
- self.compiler.set_runtime_library_dirs(self.rpath)
- if self.link_objects is not None:
- self.compiler.set_link_objects(self.link_objects)
-
- # Now actually compile and link everything.
- self.build_extensions()
-
- # run ()
-
-
- def check_extensions_list (self, extensions):
- """Ensure that the list of extensions (presumably provided as a
- command option 'extensions') is valid, i.e. it is a list of
- Extension objects. We also support the old-style list of 2-tuples,
- where the tuples are (ext_name, build_info), which are converted to
- Extension instances here.
-
- Raise DistutilsSetupError if the structure is invalid anywhere;
- just returns otherwise.
- """
- if type(extensions) is not ListType:
- raise DistutilsSetupError, \
- "'ext_modules' option must be a list of Extension instances"
-
- for i in range(len(extensions)):
- ext = extensions[i]
- if isinstance(ext, Extension):
- continue # OK! (assume type-checking done
- # by Extension constructor)
-
- (ext_name, build_info) = ext
- log.warn(("old-style (ext_name, build_info) tuple found in "
- "ext_modules for extension '%s'"
- "-- please convert to Extension instance" % ext_name))
- if type(ext) is not TupleType and len(ext) != 2:
- raise DistutilsSetupError, \
- ("each element of 'ext_modules' option must be an "
- "Extension instance or 2-tuple")
-
- if not (type(ext_name) is StringType and
- extension_name_re.match(ext_name)):
- raise DistutilsSetupError, \
- ("first element of each tuple in 'ext_modules' "
- "must be the extension name (a string)")
-
- if type(build_info) is not DictionaryType:
- raise DistutilsSetupError, \
- ("second element of each tuple in 'ext_modules' "
- "must be a dictionary (build info)")
-
- # OK, the (ext_name, build_info) dict is type-safe: convert it
- # to an Extension instance.
- ext = Extension(ext_name, build_info['sources'])
-
- # Easy stuff: one-to-one mapping from dict elements to
- # instance attributes.
- for key in ('include_dirs',
- 'library_dirs',
- 'libraries',
- 'extra_objects',
- 'extra_compile_args',
- 'extra_link_args'):
- val = build_info.get(key)
- if val is not None:
- setattr(ext, key, val)
-
- # Medium-easy stuff: same syntax/semantics, different names.
- ext.runtime_library_dirs = build_info.get('rpath')
- if build_info.has_key('def_file'):
- log.warn("'def_file' element of build info dict "
- "no longer supported")
-
- # Non-trivial stuff: 'macros' split into 'define_macros'
- # and 'undef_macros'.
- macros = build_info.get('macros')
- if macros:
- ext.define_macros = []
- ext.undef_macros = []
- for macro in macros:
- if not (type(macro) is TupleType and
- 1 <= len(macro) <= 2):
- raise DistutilsSetupError, \
- ("'macros' element of build info dict "
- "must be 1- or 2-tuple")
- if len(macro) == 1:
- ext.undef_macros.append(macro[0])
- elif len(macro) == 2:
- ext.define_macros.append(macro)
-
- extensions[i] = ext
-
- # for extensions
-
- # check_extensions_list ()
-
-
- def get_source_files (self):
- self.check_extensions_list(self.extensions)
- filenames = []
-
- # Wouldn't it be neat if we knew the names of header files too...
- for ext in self.extensions:
- filenames.extend(ext.sources)
-
- return filenames
-
-
- def get_outputs (self):
-
- # Sanity check the 'extensions' list -- can't assume this is being
- # done in the same run as a 'build_extensions()' call (in fact, we
- # can probably assume that it *isn't*!).
- self.check_extensions_list(self.extensions)
-
- # And build the list of output (built) filenames. Note that this
- # ignores the 'inplace' flag, and assumes everything goes in the
- # "build" tree.
- outputs = []
- for ext in self.extensions:
- fullname = self.get_ext_fullname(ext.name)
- outputs.append(os.path.join(self.build_lib,
- self.get_ext_filename(fullname)))
- return outputs
-
- # get_outputs ()
-
- def build_extensions(self):
- # First, sanity-check the 'extensions' list
- self.check_extensions_list(self.extensions)
-
- for ext in self.extensions:
- self.build_extension(ext)
-
- def build_extension(self, ext):
- sources = ext.sources
- if sources is None or type(sources) not in (ListType, TupleType):
- raise DistutilsSetupError, \
- ("in 'ext_modules' option (extension '%s'), " +
- "'sources' must be present and must be " +
- "a list of source filenames") % ext.name
- sources = list(sources)
-
- fullname = self.get_ext_fullname(ext.name)
- if self.inplace:
- # ignore build-lib -- put the compiled extension into
- # the source tree along with pure Python modules
-
- modpath = string.split(fullname, '.')
- package = string.join(modpath[0:-1], '.')
- base = modpath[-1]
-
- build_py = self.get_finalized_command('build_py')
- package_dir = build_py.get_package_dir(package)
- ext_filename = os.path.join(package_dir,
- self.get_ext_filename(base))
- else:
- ext_filename = os.path.join(self.build_lib,
- self.get_ext_filename(fullname))
- depends = sources + ext.depends
- if not (self.force or newer_group(depends, ext_filename, 'newer')):
- log.debug("skipping '%s' extension (up-to-date)", ext.name)
- return
- else:
- log.info("building '%s' extension", ext.name)
-
- # First, scan the sources for SWIG definition files (.i), run
- # SWIG on 'em to create .c files, and modify the sources list
- # accordingly.
- sources = self.swig_sources(sources, ext)
-
- # Next, compile the source code to object files.
-
- # XXX not honouring 'define_macros' or 'undef_macros' -- the
- # CCompiler API needs to change to accommodate this, and I
- # want to do one thing at a time!
-
- # Two possible sources for extra compiler arguments:
- # - 'extra_compile_args' in Extension object
- # - CFLAGS environment variable (not particularly
- # elegant, but people seem to expect it and I
- # guess it's useful)
- # The environment variable should take precedence, and
- # any sensible compiler will give precedence to later
- # command line args. Hence we combine them in order:
- extra_args = ext.extra_compile_args or []
-
- macros = ext.define_macros[:]
- for undef in ext.undef_macros:
- macros.append((undef,))
-
- objects = self.compiler.compile(sources,
- output_dir=self.build_temp,
- macros=macros,
- include_dirs=ext.include_dirs,
- debug=self.debug,
- extra_postargs=extra_args,
- depends=ext.depends)
-
- # XXX -- this is a Vile HACK!
- #
- # The setup.py script for Python on Unix needs to be able to
- # get this list so it can perform all the clean up needed to
- # avoid keeping object files around when cleaning out a failed
- # build of an extension module. Since Distutils does not
- # track dependencies, we have to get rid of intermediates to
- # ensure all the intermediates will be properly re-built.
- #
- self._built_objects = objects[:]
-
- # Now link the object files together into a "shared object" --
- # of course, first we have to figure out all the other things
- # that go into the mix.
- if ext.extra_objects:
- objects.extend(ext.extra_objects)
- extra_args = ext.extra_link_args or []
-
- # Detect target language, if not provided
- language = ext.language or self.compiler.detect_language(sources)
-
- self.compiler.link_shared_object(
- objects, ext_filename,
- libraries=self.get_libraries(ext),
- library_dirs=ext.library_dirs,
- runtime_library_dirs=ext.runtime_library_dirs,
- extra_postargs=extra_args,
- export_symbols=self.get_export_symbols(ext),
- debug=self.debug,
- build_temp=self.build_temp,
- target_lang=language)
-
-
- def swig_sources (self, sources, extension):
-
- """Walk the list of source files in 'sources', looking for SWIG
- interface (.i) files. Run SWIG on all that are found, and
- return a modified 'sources' list with SWIG source files replaced
- by the generated C (or C++) files.
- """
-
- new_sources = []
- swig_sources = []
- swig_targets = {}
-
- # XXX this drops generated C/C++ files into the source tree, which
- # is fine for developers who want to distribute the generated
- # source -- but there should be an option to put SWIG output in
- # the temp dir.
-
- if self.swig_cpp:
- log.warn("--swig-cpp is deprecated - use --swig-opts=-c++")
-
- if self.swig_cpp or ('-c++' in self.swig_opts):
- target_ext = '.cpp'
- else:
- target_ext = '.c'
-
- for source in sources:
- (base, ext) = os.path.splitext(source)
- if ext == ".i": # SWIG interface file
- new_sources.append(base + '_wrap' + target_ext)
- swig_sources.append(source)
- swig_targets[source] = new_sources[-1]
- else:
- new_sources.append(source)
-
- if not swig_sources:
- return new_sources
-
- swig = self.swig or self.find_swig()
- swig_cmd = [swig, "-python"]
- swig_cmd.extend(self.swig_opts)
- if self.swig_cpp:
- swig_cmd.append("-c++")
-
- # Do not override commandline arguments
- if not self.swig_opts:
- for o in extension.swig_opts:
- swig_cmd.append(o)
-
- for source in swig_sources:
- target = swig_targets[source]
- log.info("swigging %s to %s", source, target)
- self.spawn(swig_cmd + ["-o", target, source])
-
- return new_sources
-
- # swig_sources ()
-
- def find_swig (self):
- """Return the name of the SWIG executable. On Unix, this is
- just "swig" -- it should be in the PATH. Tries a bit harder on
- Windows.
- """
-
- if os.name == "posix":
- return "swig"
- elif os.name == "nt":
-
- # Look for SWIG in its standard installation directory on
- # Windows (or so I presume!). If we find it there, great;
- # if not, act like Unix and assume it's in the PATH.
- for vers in ("1.3", "1.2", "1.1"):
- fn = os.path.join("c:\\swig%s" % vers, "swig.exe")
- if os.path.isfile(fn):
- return fn
- else:
- return "swig.exe"
-
- elif os.name == "os2":
- # assume swig available in the PATH.
- return "swig.exe"
-
- else:
- raise DistutilsPlatformError, \
- ("I don't know how to find (much less run) SWIG "
- "on platform '%s'") % os.name
-
- # find_swig ()
-
- # -- Name generators -----------------------------------------------
- # (extension names, filenames, whatever)
-
- def get_ext_fullname (self, ext_name):
- if self.package is None:
- return ext_name
- else:
- return self.package + '.' + ext_name
-
- def get_ext_filename (self, ext_name):
- r"""Convert the name of an extension (eg. "foo.bar") into the name
- of the file from which it will be loaded (eg. "foo/bar.so", or
- "foo\bar.pyd").
- """
-
- from distutils.sysconfig import get_config_var
- ext_path = string.split(ext_name, '.')
- # OS/2 has an 8 character module (extension) limit :-(
- if os.name == "os2":
- ext_path[len(ext_path) - 1] = ext_path[len(ext_path) - 1][:8]
- # extensions in debug_mode are named 'module_d.pyd' under windows
- so_ext = get_config_var('SO')
- if os.name == 'nt' and self.debug:
- return apply(os.path.join, ext_path) + '_d' + so_ext
- return apply(os.path.join, ext_path) + so_ext
-
- def get_export_symbols (self, ext):
- """Return the list of symbols that a shared extension has to
- export. This either uses 'ext.export_symbols' or, if it's not
- provided, "init" + module_name. Only relevant on Windows, where
- the .pyd file (DLL) must export the module "init" function.
- """
-
- initfunc_name = "init" + string.split(ext.name,'.')[-1]
- if initfunc_name not in ext.export_symbols:
- ext.export_symbols.append(initfunc_name)
- return ext.export_symbols
-
- def get_libraries (self, ext):
- """Return the list of libraries to link against when building a
- shared extension. On most platforms, this is just 'ext.libraries';
- on Windows and OS/2, we add the Python library (eg. python20.dll).
- """
- # The python library is always needed on Windows. For MSVC, this
- # is redundant, since the library is mentioned in a pragma in
- # pyconfig.h that MSVC groks. The other Windows compilers all seem
- # to need it mentioned explicitly, though, so that's what we do.
- # Append '_d' to the python import library on debug builds.
- if sys.platform == "win32":
- from distutils.msvccompiler import MSVCCompiler
- if not isinstance(self.compiler, MSVCCompiler):
- template = "python%d%d"
- if self.debug:
- template = template + '_d'
- pythonlib = (template %
- (sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff))
- # don't extend ext.libraries, it may be shared with other
- # extensions, it is a reference to the original list
- return ext.libraries + [pythonlib]
- else:
- return ext.libraries
- elif sys.platform == "os2emx":
- # EMX/GCC requires the python library explicitly, and I
- # believe VACPP does as well (though not confirmed) - AIM Apr01
- template = "python%d%d"
- # debug versions of the main DLL aren't supported, at least
- # not at this time - AIM Apr01
- #if self.debug:
- # template = template + '_d'
- pythonlib = (template %
- (sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff))
- # don't extend ext.libraries, it may be shared with other
- # extensions, it is a reference to the original list
- return ext.libraries + [pythonlib]
- elif sys.platform[:6] == "cygwin":
- template = "python%d.%d"
- pythonlib = (template %
- (sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff))
- # don't extend ext.libraries, it may be shared with other
- # extensions, it is a reference to the original list
- return ext.libraries + [pythonlib]
- elif sys.platform[:6] == "atheos":
- from distutils import sysconfig
-
- template = "python%d.%d"
- pythonlib = (template %
- (sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff))
- # Get SHLIBS from Makefile
- extra = []
- for lib in sysconfig.get_config_var('SHLIBS').split():
- if lib.startswith('-l'):
- extra.append(lib[2:])
- else:
- extra.append(lib)
- # don't extend ext.libraries, it may be shared with other
- # extensions, it is a reference to the original list
- return ext.libraries + [pythonlib, "m"] + extra
-
- elif sys.platform == 'darwin':
- # Don't use the default code below
- return ext.libraries
-
- else:
- from distutils import sysconfig
- if sysconfig.get_config_var('Py_ENABLE_SHARED'):
- template = "python%d.%d"
- pythonlib = (template %
- (sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff))
- return ext.libraries + [pythonlib]
- else:
- return ext.libraries
-
-# class build_ext
diff --git a/sys/lib/python/distutils/command/build_py.py b/sys/lib/python/distutils/command/build_py.py
deleted file mode 100644
index 7c2c9f9f2..000000000
--- a/sys/lib/python/distutils/command/build_py.py
+++ /dev/null
@@ -1,435 +0,0 @@
-"""distutils.command.build_py
-
-Implements the Distutils 'build_py' command."""
-
-# This module should be kept compatible with Python 2.1.
-
-__revision__ = "$Id: build_py.py 37828 2004-11-10 22:23:15Z loewis $"
-
-import sys, string, os
-from types import *
-from glob import glob
-
-from distutils.core import Command
-from distutils.errors import *
-from distutils.util import convert_path
-from distutils import log
-
-class build_py (Command):
-
- description = "\"build\" pure Python modules (copy to build directory)"
-
- user_options = [
- ('build-lib=', 'd', "directory to \"build\" (copy) to"),
- ('compile', 'c', "compile .py to .pyc"),
- ('no-compile', None, "don't compile .py files [default]"),
- ('optimize=', 'O',
- "also compile with optimization: -O1 for \"python -O\", "
- "-O2 for \"python -OO\", and -O0 to disable [default: -O0]"),
- ('force', 'f', "forcibly build everything (ignore file timestamps)"),
- ]
-
- boolean_options = ['compile', 'force']
- negative_opt = {'no-compile' : 'compile'}
-
-
- def initialize_options (self):
- self.build_lib = None
- self.py_modules = None
- self.package = None
- self.package_data = None
- self.package_dir = None
- self.compile = 0
- self.optimize = 0
- self.force = None
-
- def finalize_options (self):
- self.set_undefined_options('build',
- ('build_lib', 'build_lib'),
- ('force', 'force'))
-
- # Get the distribution options that are aliases for build_py
- # options -- list of packages and list of modules.
- self.packages = self.distribution.packages
- self.py_modules = self.distribution.py_modules
- self.package_data = self.distribution.package_data
- self.package_dir = {}
- if self.distribution.package_dir:
- for name, path in self.distribution.package_dir.items():
- self.package_dir[name] = convert_path(path)
- self.data_files = self.get_data_files()
-
- # Ick, copied straight from install_lib.py (fancy_getopt needs a
- # type system! Hell, *everything* needs a type system!!!)
- if type(self.optimize) is not IntType:
- try:
- self.optimize = int(self.optimize)
- assert 0 <= self.optimize <= 2
- except (ValueError, AssertionError):
- raise DistutilsOptionError, "optimize must be 0, 1, or 2"
-
- def run (self):
-
- # XXX copy_file by default preserves atime and mtime. IMHO this is
- # the right thing to do, but perhaps it should be an option -- in
- # particular, a site administrator might want installed files to
- # reflect the time of installation rather than the last
- # modification time before the installed release.
-
- # XXX copy_file by default preserves mode, which appears to be the
- # wrong thing to do: if a file is read-only in the working
- # directory, we want it to be installed read/write so that the next
- # installation of the same module distribution can overwrite it
- # without problems. (This might be a Unix-specific issue.) Thus
- # we turn off 'preserve_mode' when copying to the build directory,
- # since the build directory is supposed to be exactly what the
- # installation will look like (ie. we preserve mode when
- # installing).
-
- # Two options control which modules will be installed: 'packages'
- # and 'py_modules'. The former lets us work with whole packages, not
- # specifying individual modules at all; the latter is for
- # specifying modules one-at-a-time.
-
- if self.py_modules:
- self.build_modules()
- if self.packages:
- self.build_packages()
- self.build_package_data()
-
- self.byte_compile(self.get_outputs(include_bytecode=0))
-
- # run ()
-
- def get_data_files (self):
- """Generate list of '(package,src_dir,build_dir,filenames)' tuples"""
- data = []
- if not self.packages:
- return data
- for package in self.packages:
- # Locate package source directory
- src_dir = self.get_package_dir(package)
-
- # Compute package build directory
- build_dir = os.path.join(*([self.build_lib] + package.split('.')))
-
- # Length of path to strip from found files
- plen = len(src_dir)+1
-
- # Strip directory from globbed filenames
- filenames = [
- file[plen:] for file in self.find_data_files(package, src_dir)
- ]
- data.append((package, src_dir, build_dir, filenames))
- return data
-
- def find_data_files (self, package, src_dir):
- """Return filenames for package's data files in 'src_dir'"""
- globs = (self.package_data.get('', [])
- + self.package_data.get(package, []))
- files = []
- for pattern in globs:
- # Each pattern has to be converted to a platform-specific path
- filelist = glob(os.path.join(src_dir, convert_path(pattern)))
- # Files that match more than one pattern are only added once
- files.extend([fn for fn in filelist if fn not in files])
- return files
-
- def build_package_data (self):
- """Copy data files into build directory"""
- lastdir = None
- for package, src_dir, build_dir, filenames in self.data_files:
- for filename in filenames:
- target = os.path.join(build_dir, filename)
- self.mkpath(os.path.dirname(target))
- self.copy_file(os.path.join(src_dir, filename), target,
- preserve_mode=False)
-
- def get_package_dir (self, package):
- """Return the directory, relative to the top of the source
- distribution, where package 'package' should be found
- (at least according to the 'package_dir' option, if any)."""
-
- path = string.split(package, '.')
-
- if not self.package_dir:
- if path:
- return apply(os.path.join, path)
- else:
- return ''
- else:
- tail = []
- while path:
- try:
- pdir = self.package_dir[string.join(path, '.')]
- except KeyError:
- tail.insert(0, path[-1])
- del path[-1]
- else:
- tail.insert(0, pdir)
- return apply(os.path.join, tail)
- else:
- # Oops, got all the way through 'path' without finding a
- # match in package_dir. If package_dir defines a directory
- # for the root (nameless) package, then fallback on it;
- # otherwise, we might as well have not consulted
- # package_dir at all, as we just use the directory implied
- # by 'tail' (which should be the same as the original value
- # of 'path' at this point).
- pdir = self.package_dir.get('')
- if pdir is not None:
- tail.insert(0, pdir)
-
- if tail:
- return apply(os.path.join, tail)
- else:
- return ''
-
- # get_package_dir ()
-
-
- def check_package (self, package, package_dir):
-
- # Empty dir name means current directory, which we can probably
- # assume exists. Also, os.path.exists and isdir don't know about
- # my "empty string means current dir" convention, so we have to
- # circumvent them.
- if package_dir != "":
- if not os.path.exists(package_dir):
- raise DistutilsFileError, \
- "package directory '%s' does not exist" % package_dir
- if not os.path.isdir(package_dir):
- raise DistutilsFileError, \
- ("supposed package directory '%s' exists, " +
- "but is not a directory") % package_dir
-
- # Require __init__.py for all but the "root package"
- if package:
- init_py = os.path.join(package_dir, "__init__.py")
- if os.path.isfile(init_py):
- return init_py
- else:
- log.warn(("package init file '%s' not found " +
- "(or not a regular file)"), init_py)
-
- # Either not in a package at all (__init__.py not expected), or
- # __init__.py doesn't exist -- so don't return the filename.
- return None
-
- # check_package ()
-
-
- def check_module (self, module, module_file):
- if not os.path.isfile(module_file):
- log.warn("file %s (for module %s) not found", module_file, module)
- return 0
- else:
- return 1
-
- # check_module ()
-
-
- def find_package_modules (self, package, package_dir):
- self.check_package(package, package_dir)
- module_files = glob(os.path.join(package_dir, "*.py"))
- modules = []
- setup_script = os.path.abspath(self.distribution.script_name)
-
- for f in module_files:
- abs_f = os.path.abspath(f)
- if abs_f != setup_script:
- module = os.path.splitext(os.path.basename(f))[0]
- modules.append((package, module, f))
- else:
- self.debug_print("excluding %s" % setup_script)
- return modules
-
-
- def find_modules (self):
- """Finds individually-specified Python modules, ie. those listed by
- module name in 'self.py_modules'. Returns a list of tuples (package,
- module_base, filename): 'package' is a tuple of the path through
- package-space to the module; 'module_base' is the bare (no
- packages, no dots) module name, and 'filename' is the path to the
- ".py" file (relative to the distribution root) that implements the
- module.
- """
-
- # Map package names to tuples of useful info about the package:
- # (package_dir, checked)
- # package_dir - the directory where we'll find source files for
- # this package
- # checked - true if we have checked that the package directory
- # is valid (exists, contains __init__.py, ... ?)
- packages = {}
-
- # List of (package, module, filename) tuples to return
- modules = []
-
- # We treat modules-in-packages almost the same as toplevel modules,
- # just the "package" for a toplevel is empty (either an empty
- # string or empty list, depending on context). Differences:
- # - don't check for __init__.py in directory for empty package
-
- for module in self.py_modules:
- path = string.split(module, '.')
- package = string.join(path[0:-1], '.')
- module_base = path[-1]
-
- try:
- (package_dir, checked) = packages[package]
- except KeyError:
- package_dir = self.get_package_dir(package)
- checked = 0
-
- if not checked:
- init_py = self.check_package(package, package_dir)
- packages[package] = (package_dir, 1)
- if init_py:
- modules.append((package, "__init__", init_py))
-
- # XXX perhaps we should also check for just .pyc files
- # (so greedy closed-source bastards can distribute Python
- # modules too)
- module_file = os.path.join(package_dir, module_base + ".py")
- if not self.check_module(module, module_file):
- continue
-
- modules.append((package, module_base, module_file))
-
- return modules
-
- # find_modules ()
-
-
- def find_all_modules (self):
- """Compute the list of all modules that will be built, whether
- they are specified one-module-at-a-time ('self.py_modules') or
- by whole packages ('self.packages'). Return a list of tuples
- (package, module, module_file), just like 'find_modules()' and
- 'find_package_modules()' do."""
-
- modules = []
- if self.py_modules:
- modules.extend(self.find_modules())
- if self.packages:
- for package in self.packages:
- package_dir = self.get_package_dir(package)
- m = self.find_package_modules(package, package_dir)
- modules.extend(m)
-
- return modules
-
- # find_all_modules ()
-
-
- def get_source_files (self):
-
- modules = self.find_all_modules()
- filenames = []
- for module in modules:
- filenames.append(module[-1])
-
- return filenames
-
-
- def get_module_outfile (self, build_dir, package, module):
- outfile_path = [build_dir] + list(package) + [module + ".py"]
- return apply(os.path.join, outfile_path)
-
-
- def get_outputs (self, include_bytecode=1):
- modules = self.find_all_modules()
- outputs = []
- for (package, module, module_file) in modules:
- package = string.split(package, '.')
- filename = self.get_module_outfile(self.build_lib, package, module)
- outputs.append(filename)
- if include_bytecode:
- if self.compile:
- outputs.append(filename + "c")
- if self.optimize > 0:
- outputs.append(filename + "o")
-
- outputs += [
- os.path.join(build_dir, filename)
- for package, src_dir, build_dir, filenames in self.data_files
- for filename in filenames
- ]
-
- return outputs
-
-
- def build_module (self, module, module_file, package):
- if type(package) is StringType:
- package = string.split(package, '.')
- elif type(package) not in (ListType, TupleType):
- raise TypeError, \
- "'package' must be a string (dot-separated), list, or tuple"
-
- # Now put the module source file into the "build" area -- this is
- # easy, we just copy it somewhere under self.build_lib (the build
- # directory for Python source).
- outfile = self.get_module_outfile(self.build_lib, package, module)
- dir = os.path.dirname(outfile)
- self.mkpath(dir)
- return self.copy_file(module_file, outfile, preserve_mode=0)
-
-
- def build_modules (self):
-
- modules = self.find_modules()
- for (package, module, module_file) in modules:
-
- # Now "build" the module -- ie. copy the source file to
- # self.build_lib (the build directory for Python source).
- # (Actually, it gets copied to the directory for this package
- # under self.build_lib.)
- self.build_module(module, module_file, package)
-
- # build_modules ()
-
-
- def build_packages (self):
-
- for package in self.packages:
-
- # Get list of (package, module, module_file) tuples based on
- # scanning the package directory. 'package' is only included
- # in the tuple so that 'find_modules()' and
- # 'find_package_tuples()' have a consistent interface; it's
- # ignored here (apart from a sanity check). Also, 'module' is
- # the *unqualified* module name (ie. no dots, no package -- we
- # already know its package!), and 'module_file' is the path to
- # the .py file, relative to the current directory
- # (ie. including 'package_dir').
- package_dir = self.get_package_dir(package)
- modules = self.find_package_modules(package, package_dir)
-
- # Now loop over the modules we found, "building" each one (just
- # copy it to self.build_lib).
- for (package_, module, module_file) in modules:
- assert package == package_
- self.build_module(module, module_file, package)
-
- # build_packages ()
-
-
- def byte_compile (self, files):
- from distutils.util import byte_compile
- prefix = self.build_lib
- if prefix[-1] != os.sep:
- prefix = prefix + os.sep
-
- # XXX this code is essentially the same as the 'byte_compile()
- # method of the "install_lib" command, except for the determination
- # of the 'prefix' string. Hmmm.
-
- if self.compile:
- byte_compile(files, optimize=0,
- force=self.force, prefix=prefix, dry_run=self.dry_run)
- if self.optimize > 0:
- byte_compile(files, optimize=self.optimize,
- force=self.force, prefix=prefix, dry_run=self.dry_run)
-
-# class build_py
diff --git a/sys/lib/python/distutils/command/build_scripts.py b/sys/lib/python/distutils/command/build_scripts.py
deleted file mode 100644
index acd824e8c..000000000
--- a/sys/lib/python/distutils/command/build_scripts.py
+++ /dev/null
@@ -1,131 +0,0 @@
-"""distutils.command.build_scripts
-
-Implements the Distutils 'build_scripts' command."""
-
-# This module should be kept compatible with Python 2.1.
-
-__revision__ = "$Id: build_scripts.py 37828 2004-11-10 22:23:15Z loewis $"
-
-import sys, os, re
-from stat import ST_MODE
-from distutils import sysconfig
-from distutils.core import Command
-from distutils.dep_util import newer
-from distutils.util import convert_path
-from distutils import log
-
-# check if Python is called on the first line with this expression
-first_line_re = re.compile('^#!.*python[0-9.]*([ \t].*)?$')
-
-class build_scripts (Command):
-
- description = "\"build\" scripts (copy and fixup #! line)"
-
- user_options = [
- ('build-dir=', 'd', "directory to \"build\" (copy) to"),
- ('force', 'f', "forcibly build everything (ignore file timestamps"),
- ('executable=', 'e', "specify final destination interpreter path"),
- ]
-
- boolean_options = ['force']
-
-
- def initialize_options (self):
- self.build_dir = None
- self.scripts = None
- self.force = None
- self.executable = None
- self.outfiles = None
-
- def finalize_options (self):
- self.set_undefined_options('build',
- ('build_scripts', 'build_dir'),
- ('force', 'force'),
- ('executable', 'executable'))
- self.scripts = self.distribution.scripts
-
- def get_source_files(self):
- return self.scripts
-
- def run (self):
- if not self.scripts:
- return
- self.copy_scripts()
-
-
- def copy_scripts (self):
- """Copy each script listed in 'self.scripts'; if it's marked as a
- Python script in the Unix way (first line matches 'first_line_re',
- ie. starts with "\#!" and contains "python"), then adjust the first
- line to refer to the current Python interpreter as we copy.
- """
- self.mkpath(self.build_dir)
- outfiles = []
- for script in self.scripts:
- adjust = 0
- script = convert_path(script)
- outfile = os.path.join(self.build_dir, os.path.basename(script))
- outfiles.append(outfile)
-
- if not self.force and not newer(script, outfile):
- log.debug("not copying %s (up-to-date)", script)
- continue
-
- # Always open the file, but ignore failures in dry-run mode --
- # that way, we'll get accurate feedback if we can read the
- # script.
- try:
- f = open(script, "r")
- except IOError:
- if not self.dry_run:
- raise
- f = None
- else:
- first_line = f.readline()
- if not first_line:
- self.warn("%s is an empty file (skipping)" % script)
- continue
-
- match = first_line_re.match(first_line)
- if match:
- adjust = 1
- post_interp = match.group(1) or ''
-
- if adjust:
- log.info("copying and adjusting %s -> %s", script,
- self.build_dir)
- if not self.dry_run:
- outf = open(outfile, "w")
- if not sysconfig.python_build:
- outf.write("#!%s%s\n" %
- (self.executable,
- post_interp))
- else:
- outf.write("#!%s%s\n" %
- (os.path.join(
- sysconfig.get_config_var("BINDIR"),
- "python" + sysconfig.get_config_var("EXE")),
- post_interp))
- outf.writelines(f.readlines())
- outf.close()
- if f:
- f.close()
- else:
- f.close()
- self.copy_file(script, outfile)
-
- if os.name == 'posix':
- for file in outfiles:
- if self.dry_run:
- log.info("changing mode of %s", file)
- else:
- oldmode = os.stat(file)[ST_MODE] & 07777
- newmode = (oldmode | 0555) & 07777
- if newmode != oldmode:
- log.info("changing mode of %s from %o to %o",
- file, oldmode, newmode)
- os.chmod(file, newmode)
-
- # copy_scripts ()
-
-# class build_scripts
diff --git a/sys/lib/python/distutils/command/clean.py b/sys/lib/python/distutils/command/clean.py
deleted file mode 100644
index f0c35cfa2..000000000
--- a/sys/lib/python/distutils/command/clean.py
+++ /dev/null
@@ -1,82 +0,0 @@
-"""distutils.command.clean
-
-Implements the Distutils 'clean' command."""
-
-# contributed by Bastian Kleineidam <calvin@cs.uni-sb.de>, added 2000-03-18
-
-# This module should be kept compatible with Python 2.1.
-
-__revision__ = "$Id: clean.py 38532 2005-03-03 08:12:27Z loewis $"
-
-import os
-from distutils.core import Command
-from distutils.dir_util import remove_tree
-from distutils import log
-
-class clean (Command):
-
- description = "clean up temporary files from 'build' command"
- user_options = [
- ('build-base=', 'b',
- "base build directory (default: 'build.build-base')"),
- ('build-lib=', None,
- "build directory for all modules (default: 'build.build-lib')"),
- ('build-temp=', 't',
- "temporary build directory (default: 'build.build-temp')"),
- ('build-scripts=', None,
- "build directory for scripts (default: 'build.build-scripts')"),
- ('bdist-base=', None,
- "temporary directory for built distributions"),
- ('all', 'a',
- "remove all build output, not just temporary by-products")
- ]
-
- boolean_options = ['all']
-
- def initialize_options(self):
- self.build_base = None
- self.build_lib = None
- self.build_temp = None
- self.build_scripts = None
- self.bdist_base = None
- self.all = None
-
- def finalize_options(self):
- self.set_undefined_options('build',
- ('build_base', 'build_base'),
- ('build_lib', 'build_lib'),
- ('build_scripts', 'build_scripts'),
- ('build_temp', 'build_temp'))
- self.set_undefined_options('bdist',
- ('bdist_base', 'bdist_base'))
-
- def run(self):
- # remove the build/temp.<plat> directory (unless it's already
- # gone)
- if os.path.exists(self.build_temp):
- remove_tree(self.build_temp, dry_run=self.dry_run)
- else:
- log.debug("'%s' does not exist -- can't clean it",
- self.build_temp)
-
- if self.all:
- # remove build directories
- for directory in (self.build_lib,
- self.bdist_base,
- self.build_scripts):
- if os.path.exists(directory):
- remove_tree(directory, dry_run=self.dry_run)
- else:
- log.warn("'%s' does not exist -- can't clean it",
- directory)
-
- # just for the heck of it, try to remove the base build directory:
- # we might have emptied it right now, but if not we don't care
- if not self.dry_run:
- try:
- os.rmdir(self.build_base)
- log.info("removing '%s'", self.build_base)
- except OSError:
- pass
-
-# class clean
diff --git a/sys/lib/python/distutils/command/command_template b/sys/lib/python/distutils/command/command_template
deleted file mode 100644
index 50bbab7b6..000000000
--- a/sys/lib/python/distutils/command/command_template
+++ /dev/null
@@ -1,45 +0,0 @@
-"""distutils.command.x
-
-Implements the Distutils 'x' command.
-"""
-
-# created 2000/mm/dd, John Doe
-
-__revision__ = "$Id$"
-
-from distutils.core import Command
-
-
-class x (Command):
-
- # Brief (40-50 characters) description of the command
- description = ""
-
- # List of option tuples: long name, short name (None if no short
- # name), and help string.
- user_options = [('', '',
- ""),
- ]
-
-
- def initialize_options (self):
- self. = None
- self. = None
- self. = None
-
- # initialize_options()
-
-
- def finalize_options (self):
- if self.x is None:
- self.x =
-
- # finalize_options()
-
-
- def run (self):
-
-
- # run()
-
-# class x
diff --git a/sys/lib/python/distutils/command/config.py b/sys/lib/python/distutils/command/config.py
deleted file mode 100644
index b9b54b023..000000000
--- a/sys/lib/python/distutils/command/config.py
+++ /dev/null
@@ -1,368 +0,0 @@
-"""distutils.command.config
-
-Implements the Distutils 'config' command, a (mostly) empty command class
-that exists mainly to be sub-classed by specific module distributions and
-applications. The idea is that while every "config" command is different,
-at least they're all named the same, and users always see "config" in the
-list of standard commands. Also, this is a good place to put common
-configure-like tasks: "try to compile this C code", or "figure out where
-this header file lives".
-"""
-
-# This module should be kept compatible with Python 2.1.
-
-__revision__ = "$Id: config.py 37828 2004-11-10 22:23:15Z loewis $"
-
-import sys, os, string, re
-from types import *
-from distutils.core import Command
-from distutils.errors import DistutilsExecError
-from distutils.sysconfig import customize_compiler
-from distutils import log
-
-LANG_EXT = {'c': '.c',
- 'c++': '.cxx'}
-
-class config (Command):
-
- description = "prepare to build"
-
- user_options = [
- ('compiler=', None,
- "specify the compiler type"),
- ('cc=', None,
- "specify the compiler executable"),
- ('include-dirs=', 'I',
- "list of directories to search for header files"),
- ('define=', 'D',
- "C preprocessor macros to define"),
- ('undef=', 'U',
- "C preprocessor macros to undefine"),
- ('libraries=', 'l',
- "external C libraries to link with"),
- ('library-dirs=', 'L',
- "directories to search for external C libraries"),
-
- ('noisy', None,
- "show every action (compile, link, run, ...) taken"),
- ('dump-source', None,
- "dump generated source files before attempting to compile them"),
- ]
-
-
- # The three standard command methods: since the "config" command
- # does nothing by default, these are empty.
-
- def initialize_options (self):
- self.compiler = None
- self.cc = None
- self.include_dirs = None
- #self.define = None
- #self.undef = None
- self.libraries = None
- self.library_dirs = None
-
- # maximal output for now
- self.noisy = 1
- self.dump_source = 1
-
- # list of temporary files generated along-the-way that we have
- # to clean at some point
- self.temp_files = []
-
- def finalize_options (self):
- if self.include_dirs is None:
- self.include_dirs = self.distribution.include_dirs or []
- elif type(self.include_dirs) is StringType:
- self.include_dirs = string.split(self.include_dirs, os.pathsep)
-
- if self.libraries is None:
- self.libraries = []
- elif type(self.libraries) is StringType:
- self.libraries = [self.libraries]
-
- if self.library_dirs is None:
- self.library_dirs = []
- elif type(self.library_dirs) is StringType:
- self.library_dirs = string.split(self.library_dirs, os.pathsep)
-
-
- def run (self):
- pass
-
-
- # Utility methods for actual "config" commands. The interfaces are
- # loosely based on Autoconf macros of similar names. Sub-classes
- # may use these freely.
-
- def _check_compiler (self):
- """Check that 'self.compiler' really is a CCompiler object;
- if not, make it one.
- """
- # We do this late, and only on-demand, because this is an expensive
- # import.
- from distutils.ccompiler import CCompiler, new_compiler
- if not isinstance(self.compiler, CCompiler):
- self.compiler = new_compiler(compiler=self.compiler,
- dry_run=self.dry_run, force=1)
- customize_compiler(self.compiler)
- if self.include_dirs:
- self.compiler.set_include_dirs(self.include_dirs)
- if self.libraries:
- self.compiler.set_libraries(self.libraries)
- if self.library_dirs:
- self.compiler.set_library_dirs(self.library_dirs)
-
-
- def _gen_temp_sourcefile (self, body, headers, lang):
- filename = "_configtest" + LANG_EXT[lang]
- file = open(filename, "w")
- if headers:
- for header in headers:
- file.write("#include <%s>\n" % header)
- file.write("\n")
- file.write(body)
- if body[-1] != "\n":
- file.write("\n")
- file.close()
- return filename
-
- def _preprocess (self, body, headers, include_dirs, lang):
- src = self._gen_temp_sourcefile(body, headers, lang)
- out = "_configtest.i"
- self.temp_files.extend([src, out])
- self.compiler.preprocess(src, out, include_dirs=include_dirs)
- return (src, out)
-
- def _compile (self, body, headers, include_dirs, lang):
- src = self._gen_temp_sourcefile(body, headers, lang)
- if self.dump_source:
- dump_file(src, "compiling '%s':" % src)
- (obj,) = self.compiler.object_filenames([src])
- self.temp_files.extend([src, obj])
- self.compiler.compile([src], include_dirs=include_dirs)
- return (src, obj)
-
- def _link (self, body,
- headers, include_dirs,
- libraries, library_dirs, lang):
- (src, obj) = self._compile(body, headers, include_dirs, lang)
- prog = os.path.splitext(os.path.basename(src))[0]
- self.compiler.link_executable([obj], prog,
- libraries=libraries,
- library_dirs=library_dirs,
- target_lang=lang)
-
- if self.compiler.exe_extension is not None:
- prog = prog + self.compiler.exe_extension
- self.temp_files.append(prog)
-
- return (src, obj, prog)
-
- def _clean (self, *filenames):
- if not filenames:
- filenames = self.temp_files
- self.temp_files = []
- log.info("removing: %s", string.join(filenames))
- for filename in filenames:
- try:
- os.remove(filename)
- except OSError:
- pass
-
-
- # XXX these ignore the dry-run flag: what to do, what to do? even if
- # you want a dry-run build, you still need some sort of configuration
- # info. My inclination is to make it up to the real config command to
- # consult 'dry_run', and assume a default (minimal) configuration if
- # true. The problem with trying to do it here is that you'd have to
- # return either true or false from all the 'try' methods, neither of
- # which is correct.
-
- # XXX need access to the header search path and maybe default macros.
-
- def try_cpp (self, body=None, headers=None, include_dirs=None, lang="c"):
- """Construct a source file from 'body' (a string containing lines
- of C/C++ code) and 'headers' (a list of header files to include)
- and run it through the preprocessor. Return true if the
- preprocessor succeeded, false if there were any errors.
- ('body' probably isn't of much use, but what the heck.)
- """
- from distutils.ccompiler import CompileError
- self._check_compiler()
- ok = 1
- try:
- self._preprocess(body, headers, include_dirs, lang)
- except CompileError:
- ok = 0
-
- self._clean()
- return ok
-
- def search_cpp (self, pattern, body=None,
- headers=None, include_dirs=None, lang="c"):
- """Construct a source file (just like 'try_cpp()'), run it through
- the preprocessor, and return true if any line of the output matches
- 'pattern'. 'pattern' should either be a compiled regex object or a
- string containing a regex. If both 'body' and 'headers' are None,
- preprocesses an empty file -- which can be useful to determine the
- symbols the preprocessor and compiler set by default.
- """
-
- self._check_compiler()
- (src, out) = self._preprocess(body, headers, include_dirs, lang)
-
- if type(pattern) is StringType:
- pattern = re.compile(pattern)
-
- file = open(out)
- match = 0
- while 1:
- line = file.readline()
- if line == '':
- break
- if pattern.search(line):
- match = 1
- break
-
- file.close()
- self._clean()
- return match
-
- def try_compile (self, body, headers=None, include_dirs=None, lang="c"):
- """Try to compile a source file built from 'body' and 'headers'.
- Return true on success, false otherwise.
- """
- from distutils.ccompiler import CompileError
- self._check_compiler()
- try:
- self._compile(body, headers, include_dirs, lang)
- ok = 1
- except CompileError:
- ok = 0
-
- log.info(ok and "success!" or "failure.")
- self._clean()
- return ok
-
- def try_link (self, body,
- headers=None, include_dirs=None,
- libraries=None, library_dirs=None,
- lang="c"):
- """Try to compile and link a source file, built from 'body' and
- 'headers', to executable form. Return true on success, false
- otherwise.
- """
- from distutils.ccompiler import CompileError, LinkError
- self._check_compiler()
- try:
- self._link(body, headers, include_dirs,
- libraries, library_dirs, lang)
- ok = 1
- except (CompileError, LinkError):
- ok = 0
-
- log.info(ok and "success!" or "failure.")
- self._clean()
- return ok
-
- def try_run (self, body,
- headers=None, include_dirs=None,
- libraries=None, library_dirs=None,
- lang="c"):
- """Try to compile, link to an executable, and run a program
- built from 'body' and 'headers'. Return true on success, false
- otherwise.
- """
- from distutils.ccompiler import CompileError, LinkError
- self._check_compiler()
- try:
- src, obj, exe = self._link(body, headers, include_dirs,
- libraries, library_dirs, lang)
- self.spawn([exe])
- ok = 1
- except (CompileError, LinkError, DistutilsExecError):
- ok = 0
-
- log.info(ok and "success!" or "failure.")
- self._clean()
- return ok
-
-
- # -- High-level methods --------------------------------------------
- # (these are the ones that are actually likely to be useful
- # when implementing a real-world config command!)
-
- def check_func (self, func,
- headers=None, include_dirs=None,
- libraries=None, library_dirs=None,
- decl=0, call=0):
-
- """Determine if function 'func' is available by constructing a
- source file that refers to 'func', and compiles and links it.
- If everything succeeds, returns true; otherwise returns false.
-
- The constructed source file starts out by including the header
- files listed in 'headers'. If 'decl' is true, it then declares
- 'func' (as "int func()"); you probably shouldn't supply 'headers'
- and set 'decl' true in the same call, or you might get errors about
- a conflicting declarations for 'func'. Finally, the constructed
- 'main()' function either references 'func' or (if 'call' is true)
- calls it. 'libraries' and 'library_dirs' are used when
- linking.
- """
-
- self._check_compiler()
- body = []
- if decl:
- body.append("int %s ();" % func)
- body.append("int main () {")
- if call:
- body.append(" %s();" % func)
- else:
- body.append(" %s;" % func)
- body.append("}")
- body = string.join(body, "\n") + "\n"
-
- return self.try_link(body, headers, include_dirs,
- libraries, library_dirs)
-
- # check_func ()
-
- def check_lib (self, library, library_dirs=None,
- headers=None, include_dirs=None, other_libraries=[]):
- """Determine if 'library' is available to be linked against,
- without actually checking that any particular symbols are provided
- by it. 'headers' will be used in constructing the source file to
- be compiled, but the only effect of this is to check if all the
- header files listed are available. Any libraries listed in
- 'other_libraries' will be included in the link, in case 'library'
- has symbols that depend on other libraries.
- """
- self._check_compiler()
- return self.try_link("int main (void) { }",
- headers, include_dirs,
- [library]+other_libraries, library_dirs)
-
- def check_header (self, header, include_dirs=None,
- library_dirs=None, lang="c"):
- """Determine if the system header file named by 'header_file'
- exists and can be found by the preprocessor; return true if so,
- false otherwise.
- """
- return self.try_cpp(body="/* No body */", headers=[header],
- include_dirs=include_dirs)
-
-
-# class config
-
-
-def dump_file (filename, head=None):
- if head is None:
- print filename + ":"
- else:
- print head
-
- file = open(filename)
- sys.stdout.write(file.read())
- file.close()
diff --git a/sys/lib/python/distutils/command/install.py b/sys/lib/python/distutils/command/install.py
deleted file mode 100644
index 8e270021f..000000000
--- a/sys/lib/python/distutils/command/install.py
+++ /dev/null
@@ -1,607 +0,0 @@
-"""distutils.command.install
-
-Implements the Distutils 'install' command."""
-
-from distutils import log
-
-# This module should be kept compatible with Python 2.1.
-
-__revision__ = "$Id: install.py 43363 2006-03-27 21:55:21Z phillip.eby $"
-
-import sys, os, string
-from types import *
-from distutils.core import Command
-from distutils.debug import DEBUG
-from distutils.sysconfig import get_config_vars
-from distutils.errors import DistutilsPlatformError
-from distutils.file_util import write_file
-from distutils.util import convert_path, subst_vars, change_root
-from distutils.errors import DistutilsOptionError
-from glob import glob
-
-if sys.version < "2.2":
- WINDOWS_SCHEME = {
- 'purelib': '$base',
- 'platlib': '$base',
- 'headers': '$base/Include/$dist_name',
- 'scripts': '$base/Scripts',
- 'data' : '$base',
- }
-else:
- WINDOWS_SCHEME = {
- 'purelib': '$base/Lib/site-packages',
- 'platlib': '$base/Lib/site-packages',
- 'headers': '$base/Include/$dist_name',
- 'scripts': '$base/Scripts',
- 'data' : '$base',
- }
-
-INSTALL_SCHEMES = {
- 'unix_prefix': {
- 'purelib': '$base/lib/python$py_version_short/site-packages',
- 'platlib': '$platbase/lib/python$py_version_short/site-packages',
- 'headers': '$base/include/python$py_version_short/$dist_name',
- 'scripts': '$base/bin',
- 'data' : '$base',
- },
- 'unix_home': {
- 'purelib': '$base/lib/python',
- 'platlib': '$base/lib/python',
- 'headers': '$base/include/python/$dist_name',
- 'scripts': '$base/bin',
- 'data' : '$base',
- },
- 'nt': WINDOWS_SCHEME,
- 'mac': {
- 'purelib': '$base/Lib/site-packages',
- 'platlib': '$base/Lib/site-packages',
- 'headers': '$base/Include/$dist_name',
- 'scripts': '$base/Scripts',
- 'data' : '$base',
- },
- 'os2': {
- 'purelib': '$base/Lib/site-packages',
- 'platlib': '$base/Lib/site-packages',
- 'headers': '$base/Include/$dist_name',
- 'scripts': '$base/Scripts',
- 'data' : '$base',
- }
- }
-
-# The keys to an installation scheme; if any new types of files are to be
-# installed, be sure to add an entry to every installation scheme above,
-# and to SCHEME_KEYS here.
-SCHEME_KEYS = ('purelib', 'platlib', 'headers', 'scripts', 'data')
-
-
-class install (Command):
-
- description = "install everything from build directory"
-
- user_options = [
- # Select installation scheme and set base director(y|ies)
- ('prefix=', None,
- "installation prefix"),
- ('exec-prefix=', None,
- "(Unix only) prefix for platform-specific files"),
- ('home=', None,
- "(Unix only) home directory to install under"),
-
- # Or, just set the base director(y|ies)
- ('install-base=', None,
- "base installation directory (instead of --prefix or --home)"),
- ('install-platbase=', None,
- "base installation directory for platform-specific files " +
- "(instead of --exec-prefix or --home)"),
- ('root=', None,
- "install everything relative to this alternate root directory"),
-
- # Or, explicitly set the installation scheme
- ('install-purelib=', None,
- "installation directory for pure Python module distributions"),
- ('install-platlib=', None,
- "installation directory for non-pure module distributions"),
- ('install-lib=', None,
- "installation directory for all module distributions " +
- "(overrides --install-purelib and --install-platlib)"),
-
- ('install-headers=', None,
- "installation directory for C/C++ headers"),
- ('install-scripts=', None,
- "installation directory for Python scripts"),
- ('install-data=', None,
- "installation directory for data files"),
-
- # Byte-compilation options -- see install_lib.py for details, as
- # these are duplicated from there (but only install_lib does
- # anything with them).
- ('compile', 'c', "compile .py to .pyc [default]"),
- ('no-compile', None, "don't compile .py files"),
- ('optimize=', 'O',
- "also compile with optimization: -O1 for \"python -O\", "
- "-O2 for \"python -OO\", and -O0 to disable [default: -O0]"),
-
- # Miscellaneous control options
- ('force', 'f',
- "force installation (overwrite any existing files)"),
- ('skip-build', None,
- "skip rebuilding everything (for testing/debugging)"),
-
- # Where to install documentation (eventually!)
- #('doc-format=', None, "format of documentation to generate"),
- #('install-man=', None, "directory for Unix man pages"),
- #('install-html=', None, "directory for HTML documentation"),
- #('install-info=', None, "directory for GNU info files"),
-
- ('record=', None,
- "filename in which to record list of installed files"),
- ]
-
- boolean_options = ['compile', 'force', 'skip-build']
- negative_opt = {'no-compile' : 'compile'}
-
-
- def initialize_options (self):
-
- # High-level options: these select both an installation base
- # and scheme.
- self.prefix = None
- self.exec_prefix = None
- self.home = None
-
- # These select only the installation base; it's up to the user to
- # specify the installation scheme (currently, that means supplying
- # the --install-{platlib,purelib,scripts,data} options).
- self.install_base = None
- self.install_platbase = None
- self.root = None
-
- # These options are the actual installation directories; if not
- # supplied by the user, they are filled in using the installation
- # scheme implied by prefix/exec-prefix/home and the contents of
- # that installation scheme.
- self.install_purelib = None # for pure module distributions
- self.install_platlib = None # non-pure (dists w/ extensions)
- self.install_headers = None # for C/C++ headers
- self.install_lib = None # set to either purelib or platlib
- self.install_scripts = None
- self.install_data = None
-
- self.compile = None
- self.optimize = None
-
- # These two are for putting non-packagized distributions into their
- # own directory and creating a .pth file if it makes sense.
- # 'extra_path' comes from the setup file; 'install_path_file' can
- # be turned off if it makes no sense to install a .pth file. (But
- # better to install it uselessly than to guess wrong and not
- # install it when it's necessary and would be used!) Currently,
- # 'install_path_file' is always true unless some outsider meddles
- # with it.
- self.extra_path = None
- self.install_path_file = 1
-
- # 'force' forces installation, even if target files are not
- # out-of-date. 'skip_build' skips running the "build" command,
- # handy if you know it's not necessary. 'warn_dir' (which is *not*
- # a user option, it's just there so the bdist_* commands can turn
- # it off) determines whether we warn about installing to a
- # directory not in sys.path.
- self.force = 0
- self.skip_build = 0
- self.warn_dir = 1
-
- # These are only here as a conduit from the 'build' command to the
- # 'install_*' commands that do the real work. ('build_base' isn't
- # actually used anywhere, but it might be useful in future.) They
- # are not user options, because if the user told the install
- # command where the build directory is, that wouldn't affect the
- # build command.
- self.build_base = None
- self.build_lib = None
-
- # Not defined yet because we don't know anything about
- # documentation yet.
- #self.install_man = None
- #self.install_html = None
- #self.install_info = None
-
- self.record = None
-
-
- # -- Option finalizing methods -------------------------------------
- # (This is rather more involved than for most commands,
- # because this is where the policy for installing third-
- # party Python modules on various platforms given a wide
- # array of user input is decided. Yes, it's quite complex!)
-
- def finalize_options (self):
-
- # This method (and its pliant slaves, like 'finalize_unix()',
- # 'finalize_other()', and 'select_scheme()') is where the default
- # installation directories for modules, extension modules, and
- # anything else we care to install from a Python module
- # distribution. Thus, this code makes a pretty important policy
- # statement about how third-party stuff is added to a Python
- # installation! Note that the actual work of installation is done
- # by the relatively simple 'install_*' commands; they just take
- # their orders from the installation directory options determined
- # here.
-
- # Check for errors/inconsistencies in the options; first, stuff
- # that's wrong on any platform.
-
- if ((self.prefix or self.exec_prefix or self.home) and
- (self.install_base or self.install_platbase)):
- raise DistutilsOptionError, \
- ("must supply either prefix/exec-prefix/home or " +
- "install-base/install-platbase -- not both")
-
- if self.home and (self.prefix or self.exec_prefix):
- raise DistutilsOptionError, \
- "must supply either home or prefix/exec-prefix -- not both"
-
- # Next, stuff that's wrong (or dubious) only on certain platforms.
- if os.name != "posix":
- if self.exec_prefix:
- self.warn("exec-prefix option ignored on this platform")
- self.exec_prefix = None
-
- # Now the interesting logic -- so interesting that we farm it out
- # to other methods. The goal of these methods is to set the final
- # values for the install_{lib,scripts,data,...} options, using as
- # input a heady brew of prefix, exec_prefix, home, install_base,
- # install_platbase, user-supplied versions of
- # install_{purelib,platlib,lib,scripts,data,...}, and the
- # INSTALL_SCHEME dictionary above. Phew!
-
- self.dump_dirs("pre-finalize_{unix,other}")
-
- if os.name == 'posix':
- self.finalize_unix()
- else:
- self.finalize_other()
-
- self.dump_dirs("post-finalize_{unix,other}()")
-
- # Expand configuration variables, tilde, etc. in self.install_base
- # and self.install_platbase -- that way, we can use $base or
- # $platbase in the other installation directories and not worry
- # about needing recursive variable expansion (shudder).
-
- py_version = (string.split(sys.version))[0]
- (prefix, exec_prefix) = get_config_vars('prefix', 'exec_prefix')
- self.config_vars = {'dist_name': self.distribution.get_name(),
- 'dist_version': self.distribution.get_version(),
- 'dist_fullname': self.distribution.get_fullname(),
- 'py_version': py_version,
- 'py_version_short': py_version[0:3],
- 'sys_prefix': prefix,
- 'prefix': prefix,
- 'sys_exec_prefix': exec_prefix,
- 'exec_prefix': exec_prefix,
- }
- self.expand_basedirs()
-
- self.dump_dirs("post-expand_basedirs()")
-
- # Now define config vars for the base directories so we can expand
- # everything else.
- self.config_vars['base'] = self.install_base
- self.config_vars['platbase'] = self.install_platbase
-
- if DEBUG:
- from pprint import pprint
- print "config vars:"
- pprint(self.config_vars)
-
- # Expand "~" and configuration variables in the installation
- # directories.
- self.expand_dirs()
-
- self.dump_dirs("post-expand_dirs()")
-
- # Pick the actual directory to install all modules to: either
- # install_purelib or install_platlib, depending on whether this
- # module distribution is pure or not. Of course, if the user
- # already specified install_lib, use their selection.
- if self.install_lib is None:
- if self.distribution.ext_modules: # has extensions: non-pure
- self.install_lib = self.install_platlib
- else:
- self.install_lib = self.install_purelib
-
-
- # Convert directories from Unix /-separated syntax to the local
- # convention.
- self.convert_paths('lib', 'purelib', 'platlib',
- 'scripts', 'data', 'headers')
-
- # Well, we're not actually fully completely finalized yet: we still
- # have to deal with 'extra_path', which is the hack for allowing
- # non-packagized module distributions (hello, Numerical Python!) to
- # get their own directories.
- self.handle_extra_path()
- self.install_libbase = self.install_lib # needed for .pth file
- self.install_lib = os.path.join(self.install_lib, self.extra_dirs)
-
- # If a new root directory was supplied, make all the installation
- # dirs relative to it.
- if self.root is not None:
- self.change_roots('libbase', 'lib', 'purelib', 'platlib',
- 'scripts', 'data', 'headers')
-
- self.dump_dirs("after prepending root")
-
- # Find out the build directories, ie. where to install from.
- self.set_undefined_options('build',
- ('build_base', 'build_base'),
- ('build_lib', 'build_lib'))
-
- # Punt on doc directories for now -- after all, we're punting on
- # documentation completely!
-
- # finalize_options ()
-
-
- def dump_dirs (self, msg):
- if DEBUG:
- from distutils.fancy_getopt import longopt_xlate
- print msg + ":"
- for opt in self.user_options:
- opt_name = opt[0]
- if opt_name[-1] == "=":
- opt_name = opt_name[0:-1]
- if self.negative_opt.has_key(opt_name):
- opt_name = string.translate(self.negative_opt[opt_name],
- longopt_xlate)
- val = not getattr(self, opt_name)
- else:
- opt_name = string.translate(opt_name, longopt_xlate)
- val = getattr(self, opt_name)
- print " %s: %s" % (opt_name, val)
-
-
- def finalize_unix (self):
-
- if self.install_base is not None or self.install_platbase is not None:
- if ((self.install_lib is None and
- self.install_purelib is None and
- self.install_platlib is None) or
- self.install_headers is None or
- self.install_scripts is None or
- self.install_data is None):
- raise DistutilsOptionError, \
- ("install-base or install-platbase supplied, but "
- "installation scheme is incomplete")
- return
-
- if self.home is not None:
- self.install_base = self.install_platbase = self.home
- self.select_scheme("unix_home")
- else:
- if self.prefix is None:
- if self.exec_prefix is not None:
- raise DistutilsOptionError, \
- "must not supply exec-prefix without prefix"
-
- self.prefix = os.path.normpath(sys.prefix)
- self.exec_prefix = os.path.normpath(sys.exec_prefix)
-
- else:
- if self.exec_prefix is None:
- self.exec_prefix = self.prefix
-
- self.install_base = self.prefix
- self.install_platbase = self.exec_prefix
- self.select_scheme("unix_prefix")
-
- # finalize_unix ()
-
-
- def finalize_other (self): # Windows and Mac OS for now
-
- if self.home is not None:
- self.install_base = self.install_platbase = self.home
- self.select_scheme("unix_home")
- else:
- if self.prefix is None:
- self.prefix = os.path.normpath(sys.prefix)
-
- self.install_base = self.install_platbase = self.prefix
- try:
- self.select_scheme(os.name)
- except KeyError:
- raise DistutilsPlatformError, \
- "I don't know how to install stuff on '%s'" % os.name
-
- # finalize_other ()
-
-
- def select_scheme (self, name):
- # it's the caller's problem if they supply a bad name!
- scheme = INSTALL_SCHEMES[name]
- for key in SCHEME_KEYS:
- attrname = 'install_' + key
- if getattr(self, attrname) is None:
- setattr(self, attrname, scheme[key])
-
-
- def _expand_attrs (self, attrs):
- for attr in attrs:
- val = getattr(self, attr)
- if val is not None:
- if os.name == 'posix':
- val = os.path.expanduser(val)
- val = subst_vars(val, self.config_vars)
- setattr(self, attr, val)
-
-
- def expand_basedirs (self):
- self._expand_attrs(['install_base',
- 'install_platbase',
- 'root'])
-
- def expand_dirs (self):
- self._expand_attrs(['install_purelib',
- 'install_platlib',
- 'install_lib',
- 'install_headers',
- 'install_scripts',
- 'install_data',])
-
-
- def convert_paths (self, *names):
- for name in names:
- attr = "install_" + name
- setattr(self, attr, convert_path(getattr(self, attr)))
-
-
- def handle_extra_path (self):
-
- if self.extra_path is None:
- self.extra_path = self.distribution.extra_path
-
- if self.extra_path is not None:
- if type(self.extra_path) is StringType:
- self.extra_path = string.split(self.extra_path, ',')
-
- if len(self.extra_path) == 1:
- path_file = extra_dirs = self.extra_path[0]
- elif len(self.extra_path) == 2:
- (path_file, extra_dirs) = self.extra_path
- else:
- raise DistutilsOptionError, \
- ("'extra_path' option must be a list, tuple, or "
- "comma-separated string with 1 or 2 elements")
-
- # convert to local form in case Unix notation used (as it
- # should be in setup scripts)
- extra_dirs = convert_path(extra_dirs)
-
- else:
- path_file = None
- extra_dirs = ''
-
- # XXX should we warn if path_file and not extra_dirs? (in which
- # case the path file would be harmless but pointless)
- self.path_file = path_file
- self.extra_dirs = extra_dirs
-
- # handle_extra_path ()
-
-
- def change_roots (self, *names):
- for name in names:
- attr = "install_" + name
- setattr(self, attr, change_root(self.root, getattr(self, attr)))
-
-
- # -- Command execution methods -------------------------------------
-
- def run (self):
-
- # Obviously have to build before we can install
- if not self.skip_build:
- self.run_command('build')
-
- # Run all sub-commands (at least those that need to be run)
- for cmd_name in self.get_sub_commands():
- self.run_command(cmd_name)
-
- if self.path_file:
- self.create_path_file()
-
- # write list of installed files, if requested.
- if self.record:
- outputs = self.get_outputs()
- if self.root: # strip any package prefix
- root_len = len(self.root)
- for counter in xrange(len(outputs)):
- outputs[counter] = outputs[counter][root_len:]
- self.execute(write_file,
- (self.record, outputs),
- "writing list of installed files to '%s'" %
- self.record)
-
- sys_path = map(os.path.normpath, sys.path)
- sys_path = map(os.path.normcase, sys_path)
- install_lib = os.path.normcase(os.path.normpath(self.install_lib))
- if (self.warn_dir and
- not (self.path_file and self.install_path_file) and
- install_lib not in sys_path):
- log.debug(("modules installed to '%s', which is not in "
- "Python's module search path (sys.path) -- "
- "you'll have to change the search path yourself"),
- self.install_lib)
-
- # run ()
-
- def create_path_file (self):
- filename = os.path.join(self.install_libbase,
- self.path_file + ".pth")
- if self.install_path_file:
- self.execute(write_file,
- (filename, [self.extra_dirs]),
- "creating %s" % filename)
- else:
- self.warn("path file '%s' not created" % filename)
-
-
- # -- Reporting methods ---------------------------------------------
-
- def get_outputs (self):
- # Assemble the outputs of all the sub-commands.
- outputs = []
- for cmd_name in self.get_sub_commands():
- cmd = self.get_finalized_command(cmd_name)
- # Add the contents of cmd.get_outputs(), ensuring
- # that outputs doesn't contain duplicate entries
- for filename in cmd.get_outputs():
- if filename not in outputs:
- outputs.append(filename)
-
- if self.path_file and self.install_path_file:
- outputs.append(os.path.join(self.install_libbase,
- self.path_file + ".pth"))
-
- return outputs
-
- def get_inputs (self):
- # XXX gee, this looks familiar ;-(
- inputs = []
- for cmd_name in self.get_sub_commands():
- cmd = self.get_finalized_command(cmd_name)
- inputs.extend(cmd.get_inputs())
-
- return inputs
-
-
- # -- Predicates for sub-command list -------------------------------
-
- def has_lib (self):
- """Return true if the current distribution has any Python
- modules to install."""
- return (self.distribution.has_pure_modules() or
- self.distribution.has_ext_modules())
-
- def has_headers (self):
- return self.distribution.has_headers()
-
- def has_scripts (self):
- return self.distribution.has_scripts()
-
- def has_data (self):
- return self.distribution.has_data_files()
-
-
- # 'sub_commands': a list of commands this command might have to run to
- # get its work done. See cmd.py for more info.
- sub_commands = [('install_lib', has_lib),
- ('install_headers', has_headers),
- ('install_scripts', has_scripts),
- ('install_data', has_data),
- ('install_egg_info', lambda self:True),
- ]
-
-# class install
diff --git a/sys/lib/python/distutils/command/install_data.py b/sys/lib/python/distutils/command/install_data.py
deleted file mode 100644
index f5f087cac..000000000
--- a/sys/lib/python/distutils/command/install_data.py
+++ /dev/null
@@ -1,85 +0,0 @@
-"""distutils.command.install_data
-
-Implements the Distutils 'install_data' command, for installing
-platform-independent data files."""
-
-# contributed by Bastian Kleineidam
-
-# This module should be kept compatible with Python 2.1.
-
-__revision__ = "$Id: install_data.py 37828 2004-11-10 22:23:15Z loewis $"
-
-import os
-from types import StringType
-from distutils.core import Command
-from distutils.util import change_root, convert_path
-
-class install_data (Command):
-
- description = "install data files"
-
- user_options = [
- ('install-dir=', 'd',
- "base directory for installing data files "
- "(default: installation base dir)"),
- ('root=', None,
- "install everything relative to this alternate root directory"),
- ('force', 'f', "force installation (overwrite existing files)"),
- ]
-
- boolean_options = ['force']
-
- def initialize_options (self):
- self.install_dir = None
- self.outfiles = []
- self.root = None
- self.force = 0
-
- self.data_files = self.distribution.data_files
- self.warn_dir = 1
-
- def finalize_options (self):
- self.set_undefined_options('install',
- ('install_data', 'install_dir'),
- ('root', 'root'),
- ('force', 'force'),
- )
-
- def run (self):
- self.mkpath(self.install_dir)
- for f in self.data_files:
- if type(f) is StringType:
- # it's a simple file, so copy it
- f = convert_path(f)
- if self.warn_dir:
- self.warn("setup script did not provide a directory for "
- "'%s' -- installing right in '%s'" %
- (f, self.install_dir))
- (out, _) = self.copy_file(f, self.install_dir)
- self.outfiles.append(out)
- else:
- # it's a tuple with path to install to and a list of files
- dir = convert_path(f[0])
- if not os.path.isabs(dir):
- dir = os.path.join(self.install_dir, dir)
- elif self.root:
- dir = change_root(self.root, dir)
- self.mkpath(dir)
-
- if f[1] == []:
- # If there are no files listed, the user must be
- # trying to create an empty directory, so add the
- # directory to the list of output files.
- self.outfiles.append(dir)
- else:
- # Copy files, adding them to the list of output files.
- for data in f[1]:
- data = convert_path(data)
- (out, _) = self.copy_file(data, dir)
- self.outfiles.append(out)
-
- def get_inputs (self):
- return self.data_files or []
-
- def get_outputs (self):
- return self.outfiles
diff --git a/sys/lib/python/distutils/command/install_egg_info.py b/sys/lib/python/distutils/command/install_egg_info.py
deleted file mode 100644
index c8880310d..000000000
--- a/sys/lib/python/distutils/command/install_egg_info.py
+++ /dev/null
@@ -1,78 +0,0 @@
-"""distutils.command.install_egg_info
-
-Implements the Distutils 'install_egg_info' command, for installing
-a package's PKG-INFO metadata."""
-
-
-from distutils.cmd import Command
-from distutils import log, dir_util
-import os, sys, re
-
-class install_egg_info(Command):
- """Install an .egg-info file for the package"""
-
- description = "Install package's PKG-INFO metadata as an .egg-info file"
- user_options = [
- ('install-dir=', 'd', "directory to install to"),
- ]
-
- def initialize_options(self):
- self.install_dir = None
-
- def finalize_options(self):
- self.set_undefined_options('install_lib',('install_dir','install_dir'))
- basename = "%s-%s-py%s.egg-info" % (
- to_filename(safe_name(self.distribution.get_name())),
- to_filename(safe_version(self.distribution.get_version())),
- sys.version[:3]
- )
- self.target = os.path.join(self.install_dir, basename)
- self.outputs = [self.target]
-
- def run(self):
- target = self.target
- if os.path.isdir(target) and not os.path.islink(target):
- dir_util.remove_tree(target, dry_run=self.dry_run)
- elif os.path.exists(target):
- self.execute(os.unlink,(self.target,),"Removing "+target)
- elif not os.path.isdir(self.install_dir):
- self.execute(os.makedirs, (self.install_dir,),
- "Creating "+self.install_dir)
- log.info("Writing %s", target)
- if not self.dry_run:
- f = open(target, 'w')
- self.distribution.metadata.write_pkg_file(f)
- f.close()
-
- def get_outputs(self):
- return self.outputs
-
-
-# The following routines are taken from setuptools' pkg_resources module and
-# can be replaced by importing them from pkg_resources once it is included
-# in the stdlib.
-
-def safe_name(name):
- """Convert an arbitrary string to a standard distribution name
-
- Any runs of non-alphanumeric/. characters are replaced with a single '-'.
- """
- return re.sub('[^A-Za-z0-9.]+', '-', name)
-
-
-def safe_version(version):
- """Convert an arbitrary string to a standard version string
-
- Spaces become dots, and all other non-alphanumeric characters become
- dashes, with runs of multiple dashes condensed to a single dash.
- """
- version = version.replace(' ','.')
- return re.sub('[^A-Za-z0-9.]+', '-', version)
-
-
-def to_filename(name):
- """Convert a project or version name to its filename-escaped form
-
- Any '-' characters are currently replaced with '_'.
- """
- return name.replace('-','_')
diff --git a/sys/lib/python/distutils/command/install_headers.py b/sys/lib/python/distutils/command/install_headers.py
deleted file mode 100644
index eacc1ee35..000000000
--- a/sys/lib/python/distutils/command/install_headers.py
+++ /dev/null
@@ -1,53 +0,0 @@
-"""distutils.command.install_headers
-
-Implements the Distutils 'install_headers' command, to install C/C++ header
-files to the Python include directory."""
-
-# This module should be kept compatible with Python 2.1.
-
-__revision__ = "$Id: install_headers.py 37828 2004-11-10 22:23:15Z loewis $"
-
-import os
-from distutils.core import Command
-
-
-class install_headers (Command):
-
- description = "install C/C++ header files"
-
- user_options = [('install-dir=', 'd',
- "directory to install header files to"),
- ('force', 'f',
- "force installation (overwrite existing files)"),
- ]
-
- boolean_options = ['force']
-
- def initialize_options (self):
- self.install_dir = None
- self.force = 0
- self.outfiles = []
-
- def finalize_options (self):
- self.set_undefined_options('install',
- ('install_headers', 'install_dir'),
- ('force', 'force'))
-
-
- def run (self):
- headers = self.distribution.headers
- if not headers:
- return
-
- self.mkpath(self.install_dir)
- for header in headers:
- (out, _) = self.copy_file(header, self.install_dir)
- self.outfiles.append(out)
-
- def get_inputs (self):
- return self.distribution.headers or []
-
- def get_outputs (self):
- return self.outfiles
-
-# class install_headers
diff --git a/sys/lib/python/distutils/command/install_lib.py b/sys/lib/python/distutils/command/install_lib.py
deleted file mode 100644
index e73c146b6..000000000
--- a/sys/lib/python/distutils/command/install_lib.py
+++ /dev/null
@@ -1,223 +0,0 @@
-# This module should be kept compatible with Python 2.1.
-
-__revision__ = "$Id: install_lib.py 37946 2004-12-02 20:14:16Z lemburg $"
-
-import sys, os, string
-from types import IntType
-from distutils.core import Command
-from distutils.errors import DistutilsOptionError
-
-
-# Extension for Python source files.
-if hasattr(os, 'extsep'):
- PYTHON_SOURCE_EXTENSION = os.extsep + "py"
-else:
- PYTHON_SOURCE_EXTENSION = ".py"
-
-class install_lib (Command):
-
- description = "install all Python modules (extensions and pure Python)"
-
- # The byte-compilation options are a tad confusing. Here are the
- # possible scenarios:
- # 1) no compilation at all (--no-compile --no-optimize)
- # 2) compile .pyc only (--compile --no-optimize; default)
- # 3) compile .pyc and "level 1" .pyo (--compile --optimize)
- # 4) compile "level 1" .pyo only (--no-compile --optimize)
- # 5) compile .pyc and "level 2" .pyo (--compile --optimize-more)
- # 6) compile "level 2" .pyo only (--no-compile --optimize-more)
- #
- # The UI for this is two option, 'compile' and 'optimize'.
- # 'compile' is strictly boolean, and only decides whether to
- # generate .pyc files. 'optimize' is three-way (0, 1, or 2), and
- # decides both whether to generate .pyo files and what level of
- # optimization to use.
-
- user_options = [
- ('install-dir=', 'd', "directory to install to"),
- ('build-dir=','b', "build directory (where to install from)"),
- ('force', 'f', "force installation (overwrite existing files)"),
- ('compile', 'c', "compile .py to .pyc [default]"),
- ('no-compile', None, "don't compile .py files"),
- ('optimize=', 'O',
- "also compile with optimization: -O1 for \"python -O\", "
- "-O2 for \"python -OO\", and -O0 to disable [default: -O0]"),
- ('skip-build', None, "skip the build steps"),
- ]
-
- boolean_options = ['force', 'compile', 'skip-build']
- negative_opt = {'no-compile' : 'compile'}
-
-
- def initialize_options (self):
- # let the 'install' command dictate our installation directory
- self.install_dir = None
- self.build_dir = None
- self.force = 0
- self.compile = None
- self.optimize = None
- self.skip_build = None
-
- def finalize_options (self):
-
- # Get all the information we need to install pure Python modules
- # from the umbrella 'install' command -- build (source) directory,
- # install (target) directory, and whether to compile .py files.
- self.set_undefined_options('install',
- ('build_lib', 'build_dir'),
- ('install_lib', 'install_dir'),
- ('force', 'force'),
- ('compile', 'compile'),
- ('optimize', 'optimize'),
- ('skip_build', 'skip_build'),
- )
-
- if self.compile is None:
- self.compile = 1
- if self.optimize is None:
- self.optimize = 0
-
- if type(self.optimize) is not IntType:
- try:
- self.optimize = int(self.optimize)
- assert 0 <= self.optimize <= 2
- except (ValueError, AssertionError):
- raise DistutilsOptionError, "optimize must be 0, 1, or 2"
-
- def run (self):
-
- # Make sure we have built everything we need first
- self.build()
-
- # Install everything: simply dump the entire contents of the build
- # directory to the installation directory (that's the beauty of
- # having a build directory!)
- outfiles = self.install()
-
- # (Optionally) compile .py to .pyc
- if outfiles is not None and self.distribution.has_pure_modules():
- self.byte_compile(outfiles)
-
- # run ()
-
-
- # -- Top-level worker functions ------------------------------------
- # (called from 'run()')
-
- def build (self):
- if not self.skip_build:
- if self.distribution.has_pure_modules():
- self.run_command('build_py')
- if self.distribution.has_ext_modules():
- self.run_command('build_ext')
-
- def install (self):
- if os.path.isdir(self.build_dir):
- outfiles = self.copy_tree(self.build_dir, self.install_dir)
- else:
- self.warn("'%s' does not exist -- no Python modules to install" %
- self.build_dir)
- return
- return outfiles
-
- def byte_compile (self, files):
- from distutils.util import byte_compile
-
- # Get the "--root" directory supplied to the "install" command,
- # and use it as a prefix to strip off the purported filename
- # encoded in bytecode files. This is far from complete, but it
- # should at least generate usable bytecode in RPM distributions.
- install_root = self.get_finalized_command('install').root
-
- if self.compile:
- byte_compile(files, optimize=0,
- force=self.force, prefix=install_root,
- dry_run=self.dry_run)
- if self.optimize > 0:
- byte_compile(files, optimize=self.optimize,
- force=self.force, prefix=install_root,
- verbose=self.verbose, dry_run=self.dry_run)
-
-
- # -- Utility methods -----------------------------------------------
-
- def _mutate_outputs (self, has_any, build_cmd, cmd_option, output_dir):
-
- if not has_any:
- return []
-
- build_cmd = self.get_finalized_command(build_cmd)
- build_files = build_cmd.get_outputs()
- build_dir = getattr(build_cmd, cmd_option)
-
- prefix_len = len(build_dir) + len(os.sep)
- outputs = []
- for file in build_files:
- outputs.append(os.path.join(output_dir, file[prefix_len:]))
-
- return outputs
-
- # _mutate_outputs ()
-
- def _bytecode_filenames (self, py_filenames):
- bytecode_files = []
- for py_file in py_filenames:
- # Since build_py handles package data installation, the
- # list of outputs can contain more than just .py files.
- # Make sure we only report bytecode for the .py files.
- ext = os.path.splitext(os.path.normcase(py_file))[1]
- if ext != PYTHON_SOURCE_EXTENSION:
- continue
- if self.compile:
- bytecode_files.append(py_file + "c")
- if self.optimize > 0:
- bytecode_files.append(py_file + "o")
-
- return bytecode_files
-
-
- # -- External interface --------------------------------------------
- # (called by outsiders)
-
- def get_outputs (self):
- """Return the list of files that would be installed if this command
- were actually run. Not affected by the "dry-run" flag or whether
- modules have actually been built yet.
- """
- pure_outputs = \
- self._mutate_outputs(self.distribution.has_pure_modules(),
- 'build_py', 'build_lib',
- self.install_dir)
- if self.compile:
- bytecode_outputs = self._bytecode_filenames(pure_outputs)
- else:
- bytecode_outputs = []
-
- ext_outputs = \
- self._mutate_outputs(self.distribution.has_ext_modules(),
- 'build_ext', 'build_lib',
- self.install_dir)
-
- return pure_outputs + bytecode_outputs + ext_outputs
-
- # get_outputs ()
-
- def get_inputs (self):
- """Get the list of files that are input to this command, ie. the
- files that get installed as they are named in the build tree.
- The files in this list correspond one-to-one to the output
- filenames returned by 'get_outputs()'.
- """
- inputs = []
-
- if self.distribution.has_pure_modules():
- build_py = self.get_finalized_command('build_py')
- inputs.extend(build_py.get_outputs())
-
- if self.distribution.has_ext_modules():
- build_ext = self.get_finalized_command('build_ext')
- inputs.extend(build_ext.get_outputs())
-
- return inputs
-
-# class install_lib
diff --git a/sys/lib/python/distutils/command/install_scripts.py b/sys/lib/python/distutils/command/install_scripts.py
deleted file mode 100644
index 025ca4063..000000000
--- a/sys/lib/python/distutils/command/install_scripts.py
+++ /dev/null
@@ -1,66 +0,0 @@
-"""distutils.command.install_scripts
-
-Implements the Distutils 'install_scripts' command, for installing
-Python scripts."""
-
-# contributed by Bastian Kleineidam
-
-# This module should be kept compatible with Python 2.1.
-
-__revision__ = "$Id: install_scripts.py 37828 2004-11-10 22:23:15Z loewis $"
-
-import os
-from distutils.core import Command
-from distutils import log
-from stat import ST_MODE
-
-class install_scripts (Command):
-
- description = "install scripts (Python or otherwise)"
-
- user_options = [
- ('install-dir=', 'd', "directory to install scripts to"),
- ('build-dir=','b', "build directory (where to install from)"),
- ('force', 'f', "force installation (overwrite existing files)"),
- ('skip-build', None, "skip the build steps"),
- ]
-
- boolean_options = ['force', 'skip-build']
-
-
- def initialize_options (self):
- self.install_dir = None
- self.force = 0
- self.build_dir = None
- self.skip_build = None
-
- def finalize_options (self):
- self.set_undefined_options('build', ('build_scripts', 'build_dir'))
- self.set_undefined_options('install',
- ('install_scripts', 'install_dir'),
- ('force', 'force'),
- ('skip_build', 'skip_build'),
- )
-
- def run (self):
- if not self.skip_build:
- self.run_command('build_scripts')
- self.outfiles = self.copy_tree(self.build_dir, self.install_dir)
- if os.name == 'posix':
- # Set the executable bits (owner, group, and world) on
- # all the scripts we just installed.
- for file in self.get_outputs():
- if self.dry_run:
- log.info("changing mode of %s", file)
- else:
- mode = ((os.stat(file)[ST_MODE]) | 0555) & 07777
- log.info("changing mode of %s to %o", file, mode)
- os.chmod(file, mode)
-
- def get_inputs (self):
- return self.distribution.scripts or []
-
- def get_outputs(self):
- return self.outfiles or []
-
-# class install_scripts
diff --git a/sys/lib/python/distutils/command/register.py b/sys/lib/python/distutils/command/register.py
deleted file mode 100644
index 5b958a4bb..000000000
--- a/sys/lib/python/distutils/command/register.py
+++ /dev/null
@@ -1,294 +0,0 @@
-"""distutils.command.register
-
-Implements the Distutils 'register' command (register with the repository).
-"""
-
-# created 2002/10/21, Richard Jones
-
-__revision__ = "$Id: register.py 52243 2006-10-09 17:13:26Z andrew.kuchling $"
-
-import sys, os, string, urllib2, getpass, urlparse
-import StringIO, ConfigParser
-
-from distutils.core import Command
-from distutils.errors import *
-
-class register(Command):
-
- description = ("register the distribution with the Python package index")
-
- DEFAULT_REPOSITORY = 'http://www.python.org/pypi'
-
- user_options = [
- ('repository=', 'r',
- "url of repository [default: %s]"%DEFAULT_REPOSITORY),
- ('list-classifiers', None,
- 'list the valid Trove classifiers'),
- ('show-response', None,
- 'display full response text from server'),
- ]
- boolean_options = ['verify', 'show-response', 'list-classifiers']
-
- def initialize_options(self):
- self.repository = None
- self.show_response = 0
- self.list_classifiers = 0
-
- def finalize_options(self):
- if self.repository is None:
- self.repository = self.DEFAULT_REPOSITORY
-
- def run(self):
- self.check_metadata()
- if self.dry_run:
- self.verify_metadata()
- elif self.list_classifiers:
- self.classifiers()
- else:
- self.send_metadata()
-
- def check_metadata(self):
- """Ensure that all required elements of meta-data (name, version,
- URL, (author and author_email) or (maintainer and
- maintainer_email)) are supplied by the Distribution object; warn if
- any are missing.
- """
- metadata = self.distribution.metadata
-
- missing = []
- for attr in ('name', 'version', 'url'):
- if not (hasattr(metadata, attr) and getattr(metadata, attr)):
- missing.append(attr)
-
- if missing:
- self.warn("missing required meta-data: " +
- string.join(missing, ", "))
-
- if metadata.author:
- if not metadata.author_email:
- self.warn("missing meta-data: if 'author' supplied, " +
- "'author_email' must be supplied too")
- elif metadata.maintainer:
- if not metadata.maintainer_email:
- self.warn("missing meta-data: if 'maintainer' supplied, " +
- "'maintainer_email' must be supplied too")
- else:
- self.warn("missing meta-data: either (author and author_email) " +
- "or (maintainer and maintainer_email) " +
- "must be supplied")
-
- def classifiers(self):
- ''' Fetch the list of classifiers from the server.
- '''
- response = urllib2.urlopen(self.repository+'?:action=list_classifiers')
- print response.read()
-
- def verify_metadata(self):
- ''' Send the metadata to the package index server to be checked.
- '''
- # send the info to the server and report the result
- (code, result) = self.post_to_server(self.build_post_data('verify'))
- print 'Server response (%s): %s'%(code, result)
-
- def send_metadata(self):
- ''' Send the metadata to the package index server.
-
- Well, do the following:
- 1. figure who the user is, and then
- 2. send the data as a Basic auth'ed POST.
-
- First we try to read the username/password from $HOME/.pypirc,
- which is a ConfigParser-formatted file with a section
- [server-login] containing username and password entries (both
- in clear text). Eg:
-
- [server-login]
- username: fred
- password: sekrit
-
- Otherwise, to figure who the user is, we offer the user three
- choices:
-
- 1. use existing login,
- 2. register as a new user, or
- 3. set the password to a random string and email the user.
-
- '''
- choice = 'x'
- username = password = ''
-
- # see if we can short-cut and get the username/password from the
- # config
- config = None
- if os.environ.has_key('HOME'):
- rc = os.path.join(os.environ['HOME'], '.pypirc')
- if os.path.exists(rc):
- print 'Using PyPI login from %s'%rc
- config = ConfigParser.ConfigParser()
- config.read(rc)
- username = config.get('server-login', 'username')
- password = config.get('server-login', 'password')
- choice = '1'
-
- # get the user's login info
- choices = '1 2 3 4'.split()
- while choice not in choices:
- print '''We need to know who you are, so please choose either:
- 1. use your existing login,
- 2. register as a new user,
- 3. have the server generate a new password for you (and email it to you), or
- 4. quit
-Your selection [default 1]: ''',
- choice = raw_input()
- if not choice:
- choice = '1'
- elif choice not in choices:
- print 'Please choose one of the four options!'
-
- if choice == '1':
- # get the username and password
- while not username:
- username = raw_input('Username: ')
- while not password:
- password = getpass.getpass('Password: ')
-
- # set up the authentication
- auth = urllib2.HTTPPasswordMgr()
- host = urlparse.urlparse(self.repository)[1]
- auth.add_password('pypi', host, username, password)
-
- # send the info to the server and report the result
- code, result = self.post_to_server(self.build_post_data('submit'),
- auth)
- print 'Server response (%s): %s'%(code, result)
-
- # possibly save the login
- if os.environ.has_key('HOME') and config is None and code == 200:
- rc = os.path.join(os.environ['HOME'], '.pypirc')
- print 'I can store your PyPI login so future submissions will be faster.'
- print '(the login will be stored in %s)'%rc
- choice = 'X'
- while choice.lower() not in 'yn':
- choice = raw_input('Save your login (y/N)?')
- if not choice:
- choice = 'n'
- if choice.lower() == 'y':
- f = open(rc, 'w')
- f.write('[server-login]\nusername:%s\npassword:%s\n'%(
- username, password))
- f.close()
- try:
- os.chmod(rc, 0600)
- except:
- pass
- elif choice == '2':
- data = {':action': 'user'}
- data['name'] = data['password'] = data['email'] = ''
- data['confirm'] = None
- while not data['name']:
- data['name'] = raw_input('Username: ')
- while data['password'] != data['confirm']:
- while not data['password']:
- data['password'] = getpass.getpass('Password: ')
- while not data['confirm']:
- data['confirm'] = getpass.getpass(' Confirm: ')
- if data['password'] != data['confirm']:
- data['password'] = ''
- data['confirm'] = None
- print "Password and confirm don't match!"
- while not data['email']:
- data['email'] = raw_input(' EMail: ')
- code, result = self.post_to_server(data)
- if code != 200:
- print 'Server response (%s): %s'%(code, result)
- else:
- print 'You will receive an email shortly.'
- print 'Follow the instructions in it to complete registration.'
- elif choice == '3':
- data = {':action': 'password_reset'}
- data['email'] = ''
- while not data['email']:
- data['email'] = raw_input('Your email address: ')
- code, result = self.post_to_server(data)
- print 'Server response (%s): %s'%(code, result)
-
- def build_post_data(self, action):
- # figure the data to send - the metadata plus some additional
- # information used by the package server
- meta = self.distribution.metadata
- data = {
- ':action': action,
- 'metadata_version' : '1.0',
- 'name': meta.get_name(),
- 'version': meta.get_version(),
- 'summary': meta.get_description(),
- 'home_page': meta.get_url(),
- 'author': meta.get_contact(),
- 'author_email': meta.get_contact_email(),
- 'license': meta.get_licence(),
- 'description': meta.get_long_description(),
- 'keywords': meta.get_keywords(),
- 'platform': meta.get_platforms(),
- 'classifiers': meta.get_classifiers(),
- 'download_url': meta.get_download_url(),
- # PEP 314
- 'provides': meta.get_provides(),
- 'requires': meta.get_requires(),
- 'obsoletes': meta.get_obsoletes(),
- }
- if data['provides'] or data['requires'] or data['obsoletes']:
- data['metadata_version'] = '1.1'
- return data
-
- def post_to_server(self, data, auth=None):
- ''' Post a query to the server, and return a string response.
- '''
-
- # Build up the MIME payload for the urllib2 POST data
- boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254'
- sep_boundary = '\n--' + boundary
- end_boundary = sep_boundary + '--'
- body = StringIO.StringIO()
- for key, value in data.items():
- # handle multiple entries for the same name
- if type(value) not in (type([]), type( () )):
- value = [value]
- for value in value:
- value = unicode(value).encode("utf-8")
- body.write(sep_boundary)
- body.write('\nContent-Disposition: form-data; name="%s"'%key)
- body.write("\n\n")
- body.write(value)
- if value and value[-1] == '\r':
- body.write('\n') # write an extra newline (lurve Macs)
- body.write(end_boundary)
- body.write("\n")
- body = body.getvalue()
-
- # build the Request
- headers = {
- 'Content-type': 'multipart/form-data; boundary=%s; charset=utf-8'%boundary,
- 'Content-length': str(len(body))
- }
- req = urllib2.Request(self.repository, body, headers)
-
- # handle HTTP and include the Basic Auth handler
- opener = urllib2.build_opener(
- urllib2.HTTPBasicAuthHandler(password_mgr=auth)
- )
- data = ''
- try:
- result = opener.open(req)
- except urllib2.HTTPError, e:
- if self.show_response:
- data = e.fp.read()
- result = e.code, e.msg
- except urllib2.URLError, e:
- result = 500, str(e)
- else:
- if self.show_response:
- data = result.read()
- result = 200, 'OK'
- if self.show_response:
- print '-'*75, data, '-'*75
- return result
diff --git a/sys/lib/python/distutils/command/sdist.py b/sys/lib/python/distutils/command/sdist.py
deleted file mode 100644
index 755f7f5cd..000000000
--- a/sys/lib/python/distutils/command/sdist.py
+++ /dev/null
@@ -1,465 +0,0 @@
-"""distutils.command.sdist
-
-Implements the Distutils 'sdist' command (create a source distribution)."""
-
-# This module should be kept compatible with Python 2.1.
-
-__revision__ = "$Id: sdist.py 38697 2005-03-23 18:54:36Z loewis $"
-
-import sys, os, string
-from types import *
-from glob import glob
-from distutils.core import Command
-from distutils import dir_util, dep_util, file_util, archive_util
-from distutils.text_file import TextFile
-from distutils.errors import *
-from distutils.filelist import FileList
-from distutils import log
-
-
-def show_formats ():
- """Print all possible values for the 'formats' option (used by
- the "--help-formats" command-line option).
- """
- from distutils.fancy_getopt import FancyGetopt
- from distutils.archive_util import ARCHIVE_FORMATS
- formats=[]
- for format in ARCHIVE_FORMATS.keys():
- formats.append(("formats=" + format, None,
- ARCHIVE_FORMATS[format][2]))
- formats.sort()
- pretty_printer = FancyGetopt(formats)
- pretty_printer.print_help(
- "List of available source distribution formats:")
-
-class sdist (Command):
-
- description = "create a source distribution (tarball, zip file, etc.)"
-
- user_options = [
- ('template=', 't',
- "name of manifest template file [default: MANIFEST.in]"),
- ('manifest=', 'm',
- "name of manifest file [default: MANIFEST]"),
- ('use-defaults', None,
- "include the default file set in the manifest "
- "[default; disable with --no-defaults]"),
- ('no-defaults', None,
- "don't include the default file set"),
- ('prune', None,
- "specifically exclude files/directories that should not be "
- "distributed (build tree, RCS/CVS dirs, etc.) "
- "[default; disable with --no-prune]"),
- ('no-prune', None,
- "don't automatically exclude anything"),
- ('manifest-only', 'o',
- "just regenerate the manifest and then stop "
- "(implies --force-manifest)"),
- ('force-manifest', 'f',
- "forcibly regenerate the manifest and carry on as usual"),
- ('formats=', None,
- "formats for source distribution (comma-separated list)"),
- ('keep-temp', 'k',
- "keep the distribution tree around after creating " +
- "archive file(s)"),
- ('dist-dir=', 'd',
- "directory to put the source distribution archive(s) in "
- "[default: dist]"),
- ]
-
- boolean_options = ['use-defaults', 'prune',
- 'manifest-only', 'force-manifest',
- 'keep-temp']
-
- help_options = [
- ('help-formats', None,
- "list available distribution formats", show_formats),
- ]
-
- negative_opt = {'no-defaults': 'use-defaults',
- 'no-prune': 'prune' }
-
- default_format = { 'posix': 'gztar',
- 'nt': 'zip' }
-
- def initialize_options (self):
- # 'template' and 'manifest' are, respectively, the names of
- # the manifest template and manifest file.
- self.template = None
- self.manifest = None
-
- # 'use_defaults': if true, we will include the default file set
- # in the manifest
- self.use_defaults = 1
- self.prune = 1
-
- self.manifest_only = 0
- self.force_manifest = 0
-
- self.formats = None
- self.keep_temp = 0
- self.dist_dir = None
-
- self.archive_files = None
-
-
- def finalize_options (self):
- if self.manifest is None:
- self.manifest = "MANIFEST"
- if self.template is None:
- self.template = "MANIFEST.in"
-
- self.ensure_string_list('formats')
- if self.formats is None:
- try:
- self.formats = [self.default_format[os.name]]
- except KeyError:
- raise DistutilsPlatformError, \
- "don't know how to create source distributions " + \
- "on platform %s" % os.name
-
- bad_format = archive_util.check_archive_formats(self.formats)
- if bad_format:
- raise DistutilsOptionError, \
- "unknown archive format '%s'" % bad_format
-
- if self.dist_dir is None:
- self.dist_dir = "dist"
-
-
- def run (self):
-
- # 'filelist' contains the list of files that will make up the
- # manifest
- self.filelist = FileList()
-
- # Ensure that all required meta-data is given; warn if not (but
- # don't die, it's not *that* serious!)
- self.check_metadata()
-
- # Do whatever it takes to get the list of files to process
- # (process the manifest template, read an existing manifest,
- # whatever). File list is accumulated in 'self.filelist'.
- self.get_file_list()
-
- # If user just wanted us to regenerate the manifest, stop now.
- if self.manifest_only:
- return
-
- # Otherwise, go ahead and create the source distribution tarball,
- # or zipfile, or whatever.
- self.make_distribution()
-
-
- def check_metadata (self):
- """Ensure that all required elements of meta-data (name, version,
- URL, (author and author_email) or (maintainer and
- maintainer_email)) are supplied by the Distribution object; warn if
- any are missing.
- """
- metadata = self.distribution.metadata
-
- missing = []
- for attr in ('name', 'version', 'url'):
- if not (hasattr(metadata, attr) and getattr(metadata, attr)):
- missing.append(attr)
-
- if missing:
- self.warn("missing required meta-data: " +
- string.join(missing, ", "))
-
- if metadata.author:
- if not metadata.author_email:
- self.warn("missing meta-data: if 'author' supplied, " +
- "'author_email' must be supplied too")
- elif metadata.maintainer:
- if not metadata.maintainer_email:
- self.warn("missing meta-data: if 'maintainer' supplied, " +
- "'maintainer_email' must be supplied too")
- else:
- self.warn("missing meta-data: either (author and author_email) " +
- "or (maintainer and maintainer_email) " +
- "must be supplied")
-
- # check_metadata ()
-
-
- def get_file_list (self):
- """Figure out the list of files to include in the source
- distribution, and put it in 'self.filelist'. This might involve
- reading the manifest template (and writing the manifest), or just
- reading the manifest, or just using the default file set -- it all
- depends on the user's options and the state of the filesystem.
- """
-
- # If we have a manifest template, see if it's newer than the
- # manifest; if so, we'll regenerate the manifest.
- template_exists = os.path.isfile(self.template)
- if template_exists:
- template_newer = dep_util.newer(self.template, self.manifest)
-
- # The contents of the manifest file almost certainly depend on the
- # setup script as well as the manifest template -- so if the setup
- # script is newer than the manifest, we'll regenerate the manifest
- # from the template. (Well, not quite: if we already have a
- # manifest, but there's no template -- which will happen if the
- # developer elects to generate a manifest some other way -- then we
- # can't regenerate the manifest, so we don't.)
- self.debug_print("checking if %s newer than %s" %
- (self.distribution.script_name, self.manifest))
- setup_newer = dep_util.newer(self.distribution.script_name,
- self.manifest)
-
- # cases:
- # 1) no manifest, template exists: generate manifest
- # (covered by 2a: no manifest == template newer)
- # 2) manifest & template exist:
- # 2a) template or setup script newer than manifest:
- # regenerate manifest
- # 2b) manifest newer than both:
- # do nothing (unless --force or --manifest-only)
- # 3) manifest exists, no template:
- # do nothing (unless --force or --manifest-only)
- # 4) no manifest, no template: generate w/ warning ("defaults only")
-
- manifest_outofdate = (template_exists and
- (template_newer or setup_newer))
- force_regen = self.force_manifest or self.manifest_only
- manifest_exists = os.path.isfile(self.manifest)
- neither_exists = (not template_exists and not manifest_exists)
-
- # Regenerate the manifest if necessary (or if explicitly told to)
- if manifest_outofdate or neither_exists or force_regen:
- if not template_exists:
- self.warn(("manifest template '%s' does not exist " +
- "(using default file list)") %
- self.template)
- self.filelist.findall()
-
- if self.use_defaults:
- self.add_defaults()
- if template_exists:
- self.read_template()
- if self.prune:
- self.prune_file_list()
-
- self.filelist.sort()
- self.filelist.remove_duplicates()
- self.write_manifest()
-
- # Don't regenerate the manifest, just read it in.
- else:
- self.read_manifest()
-
- # get_file_list ()
-
-
- def add_defaults (self):
- """Add all the default files to self.filelist:
- - README or README.txt
- - setup.py
- - test/test*.py
- - all pure Python modules mentioned in setup script
- - all C sources listed as part of extensions or C libraries
- in the setup script (doesn't catch C headers!)
- Warns if (README or README.txt) or setup.py are missing; everything
- else is optional.
- """
-
- standards = [('README', 'README.txt'), self.distribution.script_name]
- for fn in standards:
- if type(fn) is TupleType:
- alts = fn
- got_it = 0
- for fn in alts:
- if os.path.exists(fn):
- got_it = 1
- self.filelist.append(fn)
- break
-
- if not got_it:
- self.warn("standard file not found: should have one of " +
- string.join(alts, ', '))
- else:
- if os.path.exists(fn):
- self.filelist.append(fn)
- else:
- self.warn("standard file '%s' not found" % fn)
-
- optional = ['test/test*.py', 'setup.cfg']
- for pattern in optional:
- files = filter(os.path.isfile, glob(pattern))
- if files:
- self.filelist.extend(files)
-
- if self.distribution.has_pure_modules():
- build_py = self.get_finalized_command('build_py')
- self.filelist.extend(build_py.get_source_files())
-
- if self.distribution.has_ext_modules():
- build_ext = self.get_finalized_command('build_ext')
- self.filelist.extend(build_ext.get_source_files())
-
- if self.distribution.has_c_libraries():
- build_clib = self.get_finalized_command('build_clib')
- self.filelist.extend(build_clib.get_source_files())
-
- if self.distribution.has_scripts():
- build_scripts = self.get_finalized_command('build_scripts')
- self.filelist.extend(build_scripts.get_source_files())
-
- # add_defaults ()
-
-
- def read_template (self):
- """Read and parse manifest template file named by self.template.
-
- (usually "MANIFEST.in") The parsing and processing is done by
- 'self.filelist', which updates itself accordingly.
- """
- log.info("reading manifest template '%s'", self.template)
- template = TextFile(self.template,
- strip_comments=1,
- skip_blanks=1,
- join_lines=1,
- lstrip_ws=1,
- rstrip_ws=1,
- collapse_join=1)
-
- while 1:
- line = template.readline()
- if line is None: # end of file
- break
-
- try:
- self.filelist.process_template_line(line)
- except DistutilsTemplateError, msg:
- self.warn("%s, line %d: %s" % (template.filename,
- template.current_line,
- msg))
-
- # read_template ()
-
-
- def prune_file_list (self):
- """Prune off branches that might slip into the file list as created
- by 'read_template()', but really don't belong there:
- * the build tree (typically "build")
- * the release tree itself (only an issue if we ran "sdist"
- previously with --keep-temp, or it aborted)
- * any RCS, CVS and .svn directories
- """
- build = self.get_finalized_command('build')
- base_dir = self.distribution.get_fullname()
-
- self.filelist.exclude_pattern(None, prefix=build.build_base)
- self.filelist.exclude_pattern(None, prefix=base_dir)
- self.filelist.exclude_pattern(r'/(RCS|CVS|\.svn)/.*', is_regex=1)
-
-
- def write_manifest (self):
- """Write the file list in 'self.filelist' (presumably as filled in
- by 'add_defaults()' and 'read_template()') to the manifest file
- named by 'self.manifest'.
- """
- self.execute(file_util.write_file,
- (self.manifest, self.filelist.files),
- "writing manifest file '%s'" % self.manifest)
-
- # write_manifest ()
-
-
- def read_manifest (self):
- """Read the manifest file (named by 'self.manifest') and use it to
- fill in 'self.filelist', the list of files to include in the source
- distribution.
- """
- log.info("reading manifest file '%s'", self.manifest)
- manifest = open(self.manifest)
- while 1:
- line = manifest.readline()
- if line == '': # end of file
- break
- if line[-1] == '\n':
- line = line[0:-1]
- self.filelist.append(line)
-
- # read_manifest ()
-
-
- def make_release_tree (self, base_dir, files):
- """Create the directory tree that will become the source
- distribution archive. All directories implied by the filenames in
- 'files' are created under 'base_dir', and then we hard link or copy
- (if hard linking is unavailable) those files into place.
- Essentially, this duplicates the developer's source tree, but in a
- directory named after the distribution, containing only the files
- to be distributed.
- """
- # Create all the directories under 'base_dir' necessary to
- # put 'files' there; the 'mkpath()' is just so we don't die
- # if the manifest happens to be empty.
- self.mkpath(base_dir)
- dir_util.create_tree(base_dir, files, dry_run=self.dry_run)
-
- # And walk over the list of files, either making a hard link (if
- # os.link exists) to each one that doesn't already exist in its
- # corresponding location under 'base_dir', or copying each file
- # that's out-of-date in 'base_dir'. (Usually, all files will be
- # out-of-date, because by default we blow away 'base_dir' when
- # we're done making the distribution archives.)
-
- if hasattr(os, 'link'): # can make hard links on this system
- link = 'hard'
- msg = "making hard links in %s..." % base_dir
- else: # nope, have to copy
- link = None
- msg = "copying files to %s..." % base_dir
-
- if not files:
- log.warn("no files to distribute -- empty manifest?")
- else:
- log.info(msg)
- for file in files:
- if not os.path.isfile(file):
- log.warn("'%s' not a regular file -- skipping" % file)
- else:
- dest = os.path.join(base_dir, file)
- self.copy_file(file, dest, link=link)
-
- self.distribution.metadata.write_pkg_info(base_dir)
-
- # make_release_tree ()
-
- def make_distribution (self):
- """Create the source distribution(s). First, we create the release
- tree with 'make_release_tree()'; then, we create all required
- archive files (according to 'self.formats') from the release tree.
- Finally, we clean up by blowing away the release tree (unless
- 'self.keep_temp' is true). The list of archive files created is
- stored so it can be retrieved later by 'get_archive_files()'.
- """
- # Don't warn about missing meta-data here -- should be (and is!)
- # done elsewhere.
- base_dir = self.distribution.get_fullname()
- base_name = os.path.join(self.dist_dir, base_dir)
-
- self.make_release_tree(base_dir, self.filelist.files)
- archive_files = [] # remember names of files we create
- for fmt in self.formats:
- file = self.make_archive(base_name, fmt, base_dir=base_dir)
- archive_files.append(file)
- self.distribution.dist_files.append(('sdist', '', file))
-
- self.archive_files = archive_files
-
- if not self.keep_temp:
- dir_util.remove_tree(base_dir, dry_run=self.dry_run)
-
- def get_archive_files (self):
- """Return the list of archive files created when the command
- was run, or None if the command hasn't run yet.
- """
- return self.archive_files
-
-# class sdist
diff --git a/sys/lib/python/distutils/command/upload.py b/sys/lib/python/distutils/command/upload.py
deleted file mode 100644
index 67ba08042..000000000
--- a/sys/lib/python/distutils/command/upload.py
+++ /dev/null
@@ -1,199 +0,0 @@
-"""distutils.command.upload
-
-Implements the Distutils 'upload' subcommand (upload package to PyPI)."""
-
-from distutils.errors import *
-from distutils.core import Command
-from distutils.spawn import spawn
-from distutils import log
-from hashlib import md5
-import os
-import socket
-import platform
-import ConfigParser
-import httplib
-import base64
-import urlparse
-import cStringIO as StringIO
-
-class upload(Command):
-
- description = "upload binary package to PyPI"
-
- DEFAULT_REPOSITORY = 'http://www.python.org/pypi'
-
- user_options = [
- ('repository=', 'r',
- "url of repository [default: %s]" % DEFAULT_REPOSITORY),
- ('show-response', None,
- 'display full response text from server'),
- ('sign', 's',
- 'sign files to upload using gpg'),
- ('identity=', 'i', 'GPG identity used to sign files'),
- ]
- boolean_options = ['show-response', 'sign']
-
- def initialize_options(self):
- self.username = ''
- self.password = ''
- self.repository = ''
- self.show_response = 0
- self.sign = False
- self.identity = None
-
- def finalize_options(self):
- if self.identity and not self.sign:
- raise DistutilsOptionError(
- "Must use --sign for --identity to have meaning"
- )
- if os.environ.has_key('HOME'):
- rc = os.path.join(os.environ['HOME'], '.pypirc')
- if os.path.exists(rc):
- self.announce('Using PyPI login from %s' % rc)
- config = ConfigParser.ConfigParser({
- 'username':'',
- 'password':'',
- 'repository':''})
- config.read(rc)
- if not self.repository:
- self.repository = config.get('server-login', 'repository')
- if not self.username:
- self.username = config.get('server-login', 'username')
- if not self.password:
- self.password = config.get('server-login', 'password')
- if not self.repository:
- self.repository = self.DEFAULT_REPOSITORY
-
- def run(self):
- if not self.distribution.dist_files:
- raise DistutilsOptionError("No dist file created in earlier command")
- for command, pyversion, filename in self.distribution.dist_files:
- self.upload_file(command, pyversion, filename)
-
- def upload_file(self, command, pyversion, filename):
- # Sign if requested
- if self.sign:
- gpg_args = ["gpg", "--detach-sign", "-a", filename]
- if self.identity:
- gpg_args[2:2] = ["--local-user", self.identity]
- spawn(gpg_args,
- dry_run=self.dry_run)
-
- # Fill in the data - send all the meta-data in case we need to
- # register a new release
- content = open(filename,'rb').read()
- meta = self.distribution.metadata
- data = {
- # action
- ':action': 'file_upload',
- 'protcol_version': '1',
-
- # identify release
- 'name': meta.get_name(),
- 'version': meta.get_version(),
-
- # file content
- 'content': (os.path.basename(filename),content),
- 'filetype': command,
- 'pyversion': pyversion,
- 'md5_digest': md5(content).hexdigest(),
-
- # additional meta-data
- 'metadata_version' : '1.0',
- 'summary': meta.get_description(),
- 'home_page': meta.get_url(),
- 'author': meta.get_contact(),
- 'author_email': meta.get_contact_email(),
- 'license': meta.get_licence(),
- 'description': meta.get_long_description(),
- 'keywords': meta.get_keywords(),
- 'platform': meta.get_platforms(),
- 'classifiers': meta.get_classifiers(),
- 'download_url': meta.get_download_url(),
- # PEP 314
- 'provides': meta.get_provides(),
- 'requires': meta.get_requires(),
- 'obsoletes': meta.get_obsoletes(),
- }
- comment = ''
- if command == 'bdist_rpm':
- dist, version, id = platform.dist()
- if dist:
- comment = 'built for %s %s' % (dist, version)
- elif command == 'bdist_dumb':
- comment = 'built for %s' % platform.platform(terse=1)
- data['comment'] = comment
-
- if self.sign:
- data['gpg_signature'] = (os.path.basename(filename) + ".asc",
- open(filename+".asc").read())
-
- # set up the authentication
- auth = "Basic " + base64.encodestring(self.username + ":" + self.password).strip()
-
- # Build up the MIME payload for the POST data
- boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254'
- sep_boundary = '\n--' + boundary
- end_boundary = sep_boundary + '--'
- body = StringIO.StringIO()
- for key, value in data.items():
- # handle multiple entries for the same name
- if type(value) != type([]):
- value = [value]
- for value in value:
- if type(value) is tuple:
- fn = ';filename="%s"' % value[0]
- value = value[1]
- else:
- fn = ""
- value = str(value)
- body.write(sep_boundary)
- body.write('\nContent-Disposition: form-data; name="%s"'%key)
- body.write(fn)
- body.write("\n\n")
- body.write(value)
- if value and value[-1] == '\r':
- body.write('\n') # write an extra newline (lurve Macs)
- body.write(end_boundary)
- body.write("\n")
- body = body.getvalue()
-
- self.announce("Submitting %s to %s" % (filename, self.repository), log.INFO)
-
- # build the Request
- # We can't use urllib2 since we need to send the Basic
- # auth right with the first request
- schema, netloc, url, params, query, fragments = \
- urlparse.urlparse(self.repository)
- assert not params and not query and not fragments
- if schema == 'http':
- http = httplib.HTTPConnection(netloc)
- elif schema == 'https':
- http = httplib.HTTPSConnection(netloc)
- else:
- raise AssertionError, "unsupported schema "+schema
-
- data = ''
- loglevel = log.INFO
- try:
- http.connect()
- http.putrequest("POST", url)
- http.putheader('Content-type',
- 'multipart/form-data; boundary=%s'%boundary)
- http.putheader('Content-length', str(len(body)))
- http.putheader('Authorization', auth)
- http.endheaders()
- http.send(body)
- except socket.error, e:
- self.announce(str(e), log.ERROR)
- return
-
- r = http.getresponse()
- if r.status == 200:
- self.announce('Server response (%s): %s' % (r.status, r.reason),
- log.INFO)
- else:
- self.announce('Upload failed (%s): %s' % (r.status, r.reason),
- log.ERROR)
- if self.show_response:
- print '-'*75, r.read(), '-'*75
diff --git a/sys/lib/python/distutils/core.py b/sys/lib/python/distutils/core.py
deleted file mode 100644
index 02c11af8b..000000000
--- a/sys/lib/python/distutils/core.py
+++ /dev/null
@@ -1,242 +0,0 @@
-"""distutils.core
-
-The only module that needs to be imported to use the Distutils; provides
-the 'setup' function (which is to be called from the setup script). Also
-indirectly provides the Distribution and Command classes, although they are
-really defined in distutils.dist and distutils.cmd.
-"""
-
-# This module should be kept compatible with Python 2.1.
-
-__revision__ = "$Id: core.py 38672 2005-03-20 22:19:47Z fdrake $"
-
-import sys, os
-from types import *
-
-from distutils.debug import DEBUG
-from distutils.errors import *
-from distutils.util import grok_environment_error
-
-# Mainly import these so setup scripts can "from distutils.core import" them.
-from distutils.dist import Distribution
-from distutils.cmd import Command
-from distutils.extension import Extension
-
-# This is a barebones help message generated displayed when the user
-# runs the setup script with no arguments at all. More useful help
-# is generated with various --help options: global help, list commands,
-# and per-command help.
-USAGE = """\
-usage: %(script)s [global_opts] cmd1 [cmd1_opts] [cmd2 [cmd2_opts] ...]
- or: %(script)s --help [cmd1 cmd2 ...]
- or: %(script)s --help-commands
- or: %(script)s cmd --help
-"""
-
-def gen_usage (script_name):
- script = os.path.basename(script_name)
- return USAGE % vars()
-
-
-# Some mild magic to control the behaviour of 'setup()' from 'run_setup()'.
-_setup_stop_after = None
-_setup_distribution = None
-
-# Legal keyword arguments for the setup() function
-setup_keywords = ('distclass', 'script_name', 'script_args', 'options',
- 'name', 'version', 'author', 'author_email',
- 'maintainer', 'maintainer_email', 'url', 'license',
- 'description', 'long_description', 'keywords',
- 'platforms', 'classifiers', 'download_url',
- 'requires', 'provides', 'obsoletes',
- )
-
-# Legal keyword arguments for the Extension constructor
-extension_keywords = ('name', 'sources', 'include_dirs',
- 'define_macros', 'undef_macros',
- 'library_dirs', 'libraries', 'runtime_library_dirs',
- 'extra_objects', 'extra_compile_args', 'extra_link_args',
- 'swig_opts', 'export_symbols', 'depends', 'language')
-
-def setup (**attrs):
- """The gateway to the Distutils: do everything your setup script needs
- to do, in a highly flexible and user-driven way. Briefly: create a
- Distribution instance; find and parse config files; parse the command
- line; run each Distutils command found there, customized by the options
- supplied to 'setup()' (as keyword arguments), in config files, and on
- the command line.
-
- The Distribution instance might be an instance of a class supplied via
- the 'distclass' keyword argument to 'setup'; if no such class is
- supplied, then the Distribution class (in dist.py) is instantiated.
- All other arguments to 'setup' (except for 'cmdclass') are used to set
- attributes of the Distribution instance.
-
- The 'cmdclass' argument, if supplied, is a dictionary mapping command
- names to command classes. Each command encountered on the command line
- will be turned into a command class, which is in turn instantiated; any
- class found in 'cmdclass' is used in place of the default, which is
- (for command 'foo_bar') class 'foo_bar' in module
- 'distutils.command.foo_bar'. The command class must provide a
- 'user_options' attribute which is a list of option specifiers for
- 'distutils.fancy_getopt'. Any command-line options between the current
- and the next command are used to set attributes of the current command
- object.
-
- When the entire command-line has been successfully parsed, calls the
- 'run()' method on each command object in turn. This method will be
- driven entirely by the Distribution object (which each command object
- has a reference to, thanks to its constructor), and the
- command-specific options that became attributes of each command
- object.
- """
-
- global _setup_stop_after, _setup_distribution
-
- # Determine the distribution class -- either caller-supplied or
- # our Distribution (see below).
- klass = attrs.get('distclass')
- if klass:
- del attrs['distclass']
- else:
- klass = Distribution
-
- if not attrs.has_key('script_name'):
- attrs['script_name'] = os.path.basename(sys.argv[0])
- if not attrs.has_key('script_args'):
- attrs['script_args'] = sys.argv[1:]
-
- # Create the Distribution instance, using the remaining arguments
- # (ie. everything except distclass) to initialize it
- try:
- _setup_distribution = dist = klass(attrs)
- except DistutilsSetupError, msg:
- if attrs.has_key('name'):
- raise SystemExit, "error in %s setup command: %s" % \
- (attrs['name'], msg)
- else:
- raise SystemExit, "error in setup command: %s" % msg
-
- if _setup_stop_after == "init":
- return dist
-
- # Find and parse the config file(s): they will override options from
- # the setup script, but be overridden by the command line.
- dist.parse_config_files()
-
- if DEBUG:
- print "options (after parsing config files):"
- dist.dump_option_dicts()
-
- if _setup_stop_after == "config":
- return dist
-
- # Parse the command line; any command-line errors are the end user's
- # fault, so turn them into SystemExit to suppress tracebacks.
- try:
- ok = dist.parse_command_line()
- except DistutilsArgError, msg:
- raise SystemExit, gen_usage(dist.script_name) + "\nerror: %s" % msg
-
- if DEBUG:
- print "options (after parsing command line):"
- dist.dump_option_dicts()
-
- if _setup_stop_after == "commandline":
- return dist
-
- # And finally, run all the commands found on the command line.
- if ok:
- try:
- dist.run_commands()
- except KeyboardInterrupt:
- raise SystemExit, "interrupted"
- except (IOError, os.error), exc:
- error = grok_environment_error(exc)
-
- if DEBUG:
- sys.stderr.write(error + "\n")
- raise
- else:
- raise SystemExit, error
-
- except (DistutilsError,
- CCompilerError), msg:
- if DEBUG:
- raise
- else:
- raise SystemExit, "error: " + str(msg)
-
- return dist
-
-# setup ()
-
-
-def run_setup (script_name, script_args=None, stop_after="run"):
- """Run a setup script in a somewhat controlled environment, and
- return the Distribution instance that drives things. This is useful
- if you need to find out the distribution meta-data (passed as
- keyword args from 'script' to 'setup()', or the contents of the
- config files or command-line.
-
- 'script_name' is a file that will be run with 'execfile()';
- 'sys.argv[0]' will be replaced with 'script' for the duration of the
- call. 'script_args' is a list of strings; if supplied,
- 'sys.argv[1:]' will be replaced by 'script_args' for the duration of
- the call.
-
- 'stop_after' tells 'setup()' when to stop processing; possible
- values:
- init
- stop after the Distribution instance has been created and
- populated with the keyword arguments to 'setup()'
- config
- stop after config files have been parsed (and their data
- stored in the Distribution instance)
- commandline
- stop after the command-line ('sys.argv[1:]' or 'script_args')
- have been parsed (and the data stored in the Distribution)
- run [default]
- stop after all commands have been run (the same as if 'setup()'
- had been called in the usual way
-
- Returns the Distribution instance, which provides all information
- used to drive the Distutils.
- """
- if stop_after not in ('init', 'config', 'commandline', 'run'):
- raise ValueError, "invalid value for 'stop_after': %r" % (stop_after,)
-
- global _setup_stop_after, _setup_distribution
- _setup_stop_after = stop_after
-
- save_argv = sys.argv
- g = {}
- l = {}
- try:
- try:
- sys.argv[0] = script_name
- if script_args is not None:
- sys.argv[1:] = script_args
- execfile(script_name, g, l)
- finally:
- sys.argv = save_argv
- _setup_stop_after = None
- except SystemExit:
- # Hmm, should we do something if exiting with a non-zero code
- # (ie. error)?
- pass
- except:
- raise
-
- if _setup_distribution is None:
- raise RuntimeError, \
- ("'distutils.core.setup()' was never called -- "
- "perhaps '%s' is not a Distutils setup script?") % \
- script_name
-
- # I wonder if the setup script's namespace -- g and l -- would be of
- # any interest to callers?
- #print "_setup_distribution:", _setup_distribution
- return _setup_distribution
-
-# run_setup ()
diff --git a/sys/lib/python/distutils/cygwinccompiler.py b/sys/lib/python/distutils/cygwinccompiler.py
deleted file mode 100644
index e836cc4d2..000000000
--- a/sys/lib/python/distutils/cygwinccompiler.py
+++ /dev/null
@@ -1,441 +0,0 @@
-"""distutils.cygwinccompiler
-
-Provides the CygwinCCompiler class, a subclass of UnixCCompiler that
-handles the Cygwin port of the GNU C compiler to Windows. It also contains
-the Mingw32CCompiler class which handles the mingw32 port of GCC (same as
-cygwin in no-cygwin mode).
-"""
-
-# problems:
-#
-# * if you use a msvc compiled python version (1.5.2)
-# 1. you have to insert a __GNUC__ section in its config.h
-# 2. you have to generate a import library for its dll
-# - create a def-file for python??.dll
-# - create a import library using
-# dlltool --dllname python15.dll --def python15.def \
-# --output-lib libpython15.a
-#
-# see also http://starship.python.net/crew/kernr/mingw32/Notes.html
-#
-# * We put export_symbols in a def-file, and don't use
-# --export-all-symbols because it doesn't worked reliable in some
-# tested configurations. And because other windows compilers also
-# need their symbols specified this no serious problem.
-#
-# tested configurations:
-#
-# * cygwin gcc 2.91.57/ld 2.9.4/dllwrap 0.2.4 works
-# (after patching python's config.h and for C++ some other include files)
-# see also http://starship.python.net/crew/kernr/mingw32/Notes.html
-# * mingw32 gcc 2.95.2/ld 2.9.4/dllwrap 0.2.4 works
-# (ld doesn't support -shared, so we use dllwrap)
-# * cygwin gcc 2.95.2/ld 2.10.90/dllwrap 2.10.90 works now
-# - its dllwrap doesn't work, there is a bug in binutils 2.10.90
-# see also http://sources.redhat.com/ml/cygwin/2000-06/msg01274.html
-# - using gcc -mdll instead dllwrap doesn't work without -static because
-# it tries to link against dlls instead their import libraries. (If
-# it finds the dll first.)
-# By specifying -static we force ld to link against the import libraries,
-# this is windows standard and there are normally not the necessary symbols
-# in the dlls.
-# *** only the version of June 2000 shows these problems
-# * cygwin gcc 3.2/ld 2.13.90 works
-# (ld supports -shared)
-# * mingw gcc 3.2/ld 2.13 works
-# (ld supports -shared)
-
-# This module should be kept compatible with Python 2.1.
-
-__revision__ = "$Id: cygwinccompiler.py 37828 2004-11-10 22:23:15Z loewis $"
-
-import os,sys,copy
-from distutils.ccompiler import gen_preprocess_options, gen_lib_options
-from distutils.unixccompiler import UnixCCompiler
-from distutils.file_util import write_file
-from distutils.errors import DistutilsExecError, CompileError, UnknownFileError
-from distutils import log
-
-class CygwinCCompiler (UnixCCompiler):
-
- compiler_type = 'cygwin'
- obj_extension = ".o"
- static_lib_extension = ".a"
- shared_lib_extension = ".dll"
- static_lib_format = "lib%s%s"
- shared_lib_format = "%s%s"
- exe_extension = ".exe"
-
- def __init__ (self, verbose=0, dry_run=0, force=0):
-
- UnixCCompiler.__init__ (self, verbose, dry_run, force)
-
- (status, details) = check_config_h()
- self.debug_print("Python's GCC status: %s (details: %s)" %
- (status, details))
- if status is not CONFIG_H_OK:
- self.warn(
- "Python's pyconfig.h doesn't seem to support your compiler. "
- "Reason: %s. "
- "Compiling may fail because of undefined preprocessor macros."
- % details)
-
- self.gcc_version, self.ld_version, self.dllwrap_version = \
- get_versions()
- self.debug_print(self.compiler_type + ": gcc %s, ld %s, dllwrap %s\n" %
- (self.gcc_version,
- self.ld_version,
- self.dllwrap_version) )
-
- # ld_version >= "2.10.90" and < "2.13" should also be able to use
- # gcc -mdll instead of dllwrap
- # Older dllwraps had own version numbers, newer ones use the
- # same as the rest of binutils ( also ld )
- # dllwrap 2.10.90 is buggy
- if self.ld_version >= "2.10.90":
- self.linker_dll = "gcc"
- else:
- self.linker_dll = "dllwrap"
-
- # ld_version >= "2.13" support -shared so use it instead of
- # -mdll -static
- if self.ld_version >= "2.13":
- shared_option = "-shared"
- else:
- shared_option = "-mdll -static"
-
- # Hard-code GCC because that's what this is all about.
- # XXX optimization, warnings etc. should be customizable.
- self.set_executables(compiler='gcc -mcygwin -O -Wall',
- compiler_so='gcc -mcygwin -mdll -O -Wall',
- compiler_cxx='g++ -mcygwin -O -Wall',
- linker_exe='gcc -mcygwin',
- linker_so=('%s -mcygwin %s' %
- (self.linker_dll, shared_option)))
-
- # cygwin and mingw32 need different sets of libraries
- if self.gcc_version == "2.91.57":
- # cygwin shouldn't need msvcrt, but without the dlls will crash
- # (gcc version 2.91.57) -- perhaps something about initialization
- self.dll_libraries=["msvcrt"]
- self.warn(
- "Consider upgrading to a newer version of gcc")
- else:
- self.dll_libraries=[]
- # Include the appropriate MSVC runtime library if Python was built
- # with MSVC 7.0 or 7.1.
- msc_pos = sys.version.find('MSC v.')
- if msc_pos != -1:
- msc_ver = sys.version[msc_pos+6:msc_pos+10]
- if msc_ver == '1300':
- # MSVC 7.0
- self.dll_libraries = ['msvcr70']
- elif msc_ver == '1310':
- # MSVC 7.1
- self.dll_libraries = ['msvcr71']
-
- # __init__ ()
-
-
- def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):
- if ext == '.rc' or ext == '.res':
- # gcc needs '.res' and '.rc' compiled to object files !!!
- try:
- self.spawn(["windres", "-i", src, "-o", obj])
- except DistutilsExecError, msg:
- raise CompileError, msg
- else: # for other files use the C-compiler
- try:
- self.spawn(self.compiler_so + cc_args + [src, '-o', obj] +
- extra_postargs)
- except DistutilsExecError, msg:
- raise CompileError, msg
-
- def link (self,
- target_desc,
- objects,
- output_filename,
- output_dir=None,
- libraries=None,
- library_dirs=None,
- runtime_library_dirs=None,
- export_symbols=None,
- debug=0,
- extra_preargs=None,
- extra_postargs=None,
- build_temp=None,
- target_lang=None):
-
- # use separate copies, so we can modify the lists
- extra_preargs = copy.copy(extra_preargs or [])
- libraries = copy.copy(libraries or [])
- objects = copy.copy(objects or [])
-
- # Additional libraries
- libraries.extend(self.dll_libraries)
-
- # handle export symbols by creating a def-file
- # with executables this only works with gcc/ld as linker
- if ((export_symbols is not None) and
- (target_desc != self.EXECUTABLE or self.linker_dll == "gcc")):
- # (The linker doesn't do anything if output is up-to-date.
- # So it would probably better to check if we really need this,
- # but for this we had to insert some unchanged parts of
- # UnixCCompiler, and this is not what we want.)
-
- # we want to put some files in the same directory as the
- # object files are, build_temp doesn't help much
- # where are the object files
- temp_dir = os.path.dirname(objects[0])
- # name of dll to give the helper files the same base name
- (dll_name, dll_extension) = os.path.splitext(
- os.path.basename(output_filename))
-
- # generate the filenames for these files
- def_file = os.path.join(temp_dir, dll_name + ".def")
- lib_file = os.path.join(temp_dir, 'lib' + dll_name + ".a")
-
- # Generate .def file
- contents = [
- "LIBRARY %s" % os.path.basename(output_filename),
- "EXPORTS"]
- for sym in export_symbols:
- contents.append(sym)
- self.execute(write_file, (def_file, contents),
- "writing %s" % def_file)
-
- # next add options for def-file and to creating import libraries
-
- # dllwrap uses different options than gcc/ld
- if self.linker_dll == "dllwrap":
- extra_preargs.extend(["--output-lib", lib_file])
- # for dllwrap we have to use a special option
- extra_preargs.extend(["--def", def_file])
- # we use gcc/ld here and can be sure ld is >= 2.9.10
- else:
- # doesn't work: bfd_close build\...\libfoo.a: Invalid operation
- #extra_preargs.extend(["-Wl,--out-implib,%s" % lib_file])
- # for gcc/ld the def-file is specified as any object files
- objects.append(def_file)
-
- #end: if ((export_symbols is not None) and
- # (target_desc != self.EXECUTABLE or self.linker_dll == "gcc")):
-
- # who wants symbols and a many times larger output file
- # should explicitly switch the debug mode on
- # otherwise we let dllwrap/ld strip the output file
- # (On my machine: 10KB < stripped_file < ??100KB
- # unstripped_file = stripped_file + XXX KB
- # ( XXX=254 for a typical python extension))
- if not debug:
- extra_preargs.append("-s")
-
- UnixCCompiler.link(self,
- target_desc,
- objects,
- output_filename,
- output_dir,
- libraries,
- library_dirs,
- runtime_library_dirs,
- None, # export_symbols, we do this in our def-file
- debug,
- extra_preargs,
- extra_postargs,
- build_temp,
- target_lang)
-
- # link ()
-
- # -- Miscellaneous methods -----------------------------------------
-
- # overwrite the one from CCompiler to support rc and res-files
- def object_filenames (self,
- source_filenames,
- strip_dir=0,
- output_dir=''):
- if output_dir is None: output_dir = ''
- obj_names = []
- for src_name in source_filenames:
- # use normcase to make sure '.rc' is really '.rc' and not '.RC'
- (base, ext) = os.path.splitext (os.path.normcase(src_name))
- if ext not in (self.src_extensions + ['.rc','.res']):
- raise UnknownFileError, \
- "unknown file type '%s' (from '%s')" % \
- (ext, src_name)
- if strip_dir:
- base = os.path.basename (base)
- if ext == '.res' or ext == '.rc':
- # these need to be compiled to object files
- obj_names.append (os.path.join (output_dir,
- base + ext + self.obj_extension))
- else:
- obj_names.append (os.path.join (output_dir,
- base + self.obj_extension))
- return obj_names
-
- # object_filenames ()
-
-# class CygwinCCompiler
-
-
-# the same as cygwin plus some additional parameters
-class Mingw32CCompiler (CygwinCCompiler):
-
- compiler_type = 'mingw32'
-
- def __init__ (self,
- verbose=0,
- dry_run=0,
- force=0):
-
- CygwinCCompiler.__init__ (self, verbose, dry_run, force)
-
- # ld_version >= "2.13" support -shared so use it instead of
- # -mdll -static
- if self.ld_version >= "2.13":
- shared_option = "-shared"
- else:
- shared_option = "-mdll -static"
-
- # A real mingw32 doesn't need to specify a different entry point,
- # but cygwin 2.91.57 in no-cygwin-mode needs it.
- if self.gcc_version <= "2.91.57":
- entry_point = '--entry _DllMain@12'
- else:
- entry_point = ''
-
- self.set_executables(compiler='gcc -mno-cygwin -O -Wall',
- compiler_so='gcc -mno-cygwin -mdll -O -Wall',
- compiler_cxx='g++ -mno-cygwin -O -Wall',
- linker_exe='gcc -mno-cygwin',
- linker_so='%s -mno-cygwin %s %s'
- % (self.linker_dll, shared_option,
- entry_point))
- # Maybe we should also append -mthreads, but then the finished
- # dlls need another dll (mingwm10.dll see Mingw32 docs)
- # (-mthreads: Support thread-safe exception handling on `Mingw32')
-
- # no additional libraries needed
- self.dll_libraries=[]
-
- # Include the appropriate MSVC runtime library if Python was built
- # with MSVC 7.0 or 7.1.
- msc_pos = sys.version.find('MSC v.')
- if msc_pos != -1:
- msc_ver = sys.version[msc_pos+6:msc_pos+10]
- if msc_ver == '1300':
- # MSVC 7.0
- self.dll_libraries = ['msvcr70']
- elif msc_ver == '1310':
- # MSVC 7.1
- self.dll_libraries = ['msvcr71']
-
- # __init__ ()
-
-# class Mingw32CCompiler
-
-# Because these compilers aren't configured in Python's pyconfig.h file by
-# default, we should at least warn the user if he is using a unmodified
-# version.
-
-CONFIG_H_OK = "ok"
-CONFIG_H_NOTOK = "not ok"
-CONFIG_H_UNCERTAIN = "uncertain"
-
-def check_config_h():
-
- """Check if the current Python installation (specifically, pyconfig.h)
- appears amenable to building extensions with GCC. Returns a tuple
- (status, details), where 'status' is one of the following constants:
- CONFIG_H_OK
- all is well, go ahead and compile
- CONFIG_H_NOTOK
- doesn't look good
- CONFIG_H_UNCERTAIN
- not sure -- unable to read pyconfig.h
- 'details' is a human-readable string explaining the situation.
-
- Note there are two ways to conclude "OK": either 'sys.version' contains
- the string "GCC" (implying that this Python was built with GCC), or the
- installed "pyconfig.h" contains the string "__GNUC__".
- """
-
- # XXX since this function also checks sys.version, it's not strictly a
- # "pyconfig.h" check -- should probably be renamed...
-
- from distutils import sysconfig
- import string
- # if sys.version contains GCC then python was compiled with
- # GCC, and the pyconfig.h file should be OK
- if string.find(sys.version,"GCC") >= 0:
- return (CONFIG_H_OK, "sys.version mentions 'GCC'")
-
- fn = sysconfig.get_config_h_filename()
- try:
- # It would probably better to read single lines to search.
- # But we do this only once, and it is fast enough
- f = open(fn)
- s = f.read()
- f.close()
-
- except IOError, exc:
- # if we can't read this file, we cannot say it is wrong
- # the compiler will complain later about this file as missing
- return (CONFIG_H_UNCERTAIN,
- "couldn't read '%s': %s" % (fn, exc.strerror))
-
- else:
- # "pyconfig.h" contains an "#ifdef __GNUC__" or something similar
- if string.find(s,"__GNUC__") >= 0:
- return (CONFIG_H_OK, "'%s' mentions '__GNUC__'" % fn)
- else:
- return (CONFIG_H_NOTOK, "'%s' does not mention '__GNUC__'" % fn)
-
-
-
-def get_versions():
- """ Try to find out the versions of gcc, ld and dllwrap.
- If not possible it returns None for it.
- """
- from distutils.version import StrictVersion
- from distutils.spawn import find_executable
- import re
-
- gcc_exe = find_executable('gcc')
- if gcc_exe:
- out = os.popen(gcc_exe + ' -dumpversion','r')
- out_string = out.read()
- out.close()
- result = re.search('(\d+\.\d+(\.\d+)*)',out_string)
- if result:
- gcc_version = StrictVersion(result.group(1))
- else:
- gcc_version = None
- else:
- gcc_version = None
- ld_exe = find_executable('ld')
- if ld_exe:
- out = os.popen(ld_exe + ' -v','r')
- out_string = out.read()
- out.close()
- result = re.search('(\d+\.\d+(\.\d+)*)',out_string)
- if result:
- ld_version = StrictVersion(result.group(1))
- else:
- ld_version = None
- else:
- ld_version = None
- dllwrap_exe = find_executable('dllwrap')
- if dllwrap_exe:
- out = os.popen(dllwrap_exe + ' --version','r')
- out_string = out.read()
- out.close()
- result = re.search(' (\d+\.\d+(\.\d+)*)',out_string)
- if result:
- dllwrap_version = StrictVersion(result.group(1))
- else:
- dllwrap_version = None
- else:
- dllwrap_version = None
- return (gcc_version, ld_version, dllwrap_version)
diff --git a/sys/lib/python/distutils/debug.py b/sys/lib/python/distutils/debug.py
deleted file mode 100644
index 1cd427eca..000000000
--- a/sys/lib/python/distutils/debug.py
+++ /dev/null
@@ -1,9 +0,0 @@
-import os
-
-# This module should be kept compatible with Python 2.1.
-
-__revision__ = "$Id: debug.py 37828 2004-11-10 22:23:15Z loewis $"
-
-# If DISTUTILS_DEBUG is anything other than the empty string, we run in
-# debug mode.
-DEBUG = os.environ.get('DISTUTILS_DEBUG')
diff --git a/sys/lib/python/distutils/dep_util.py b/sys/lib/python/distutils/dep_util.py
deleted file mode 100644
index 9b1e762ea..000000000
--- a/sys/lib/python/distutils/dep_util.py
+++ /dev/null
@@ -1,95 +0,0 @@
-"""distutils.dep_util
-
-Utility functions for simple, timestamp-based dependency of files
-and groups of files; also, function based entirely on such
-timestamp dependency analysis."""
-
-# This module should be kept compatible with Python 2.1.
-
-__revision__ = "$Id: dep_util.py 37828 2004-11-10 22:23:15Z loewis $"
-
-import os
-from distutils.errors import DistutilsFileError
-
-
-def newer (source, target):
- """Return true if 'source' exists and is more recently modified than
- 'target', or if 'source' exists and 'target' doesn't. Return false if
- both exist and 'target' is the same age or younger than 'source'.
- Raise DistutilsFileError if 'source' does not exist.
- """
- if not os.path.exists(source):
- raise DistutilsFileError, "file '%s' does not exist" % source
- if not os.path.exists(target):
- return 1
-
- from stat import ST_MTIME
- mtime1 = os.stat(source)[ST_MTIME]
- mtime2 = os.stat(target)[ST_MTIME]
-
- return mtime1 > mtime2
-
-# newer ()
-
-
-def newer_pairwise (sources, targets):
- """Walk two filename lists in parallel, testing if each source is newer
- than its corresponding target. Return a pair of lists (sources,
- targets) where source is newer than target, according to the semantics
- of 'newer()'.
- """
- if len(sources) != len(targets):
- raise ValueError, "'sources' and 'targets' must be same length"
-
- # build a pair of lists (sources, targets) where source is newer
- n_sources = []
- n_targets = []
- for i in range(len(sources)):
- if newer(sources[i], targets[i]):
- n_sources.append(sources[i])
- n_targets.append(targets[i])
-
- return (n_sources, n_targets)
-
-# newer_pairwise ()
-
-
-def newer_group (sources, target, missing='error'):
- """Return true if 'target' is out-of-date with respect to any file
- listed in 'sources'. In other words, if 'target' exists and is newer
- than every file in 'sources', return false; otherwise return true.
- 'missing' controls what we do when a source file is missing; the
- default ("error") is to blow up with an OSError from inside 'stat()';
- if it is "ignore", we silently drop any missing source files; if it is
- "newer", any missing source files make us assume that 'target' is
- out-of-date (this is handy in "dry-run" mode: it'll make you pretend to
- carry out commands that wouldn't work because inputs are missing, but
- that doesn't matter because you're not actually going to run the
- commands).
- """
- # If the target doesn't even exist, then it's definitely out-of-date.
- if not os.path.exists(target):
- return 1
-
- # Otherwise we have to find out the hard way: if *any* source file
- # is more recent than 'target', then 'target' is out-of-date and
- # we can immediately return true. If we fall through to the end
- # of the loop, then 'target' is up-to-date and we return false.
- from stat import ST_MTIME
- target_mtime = os.stat(target)[ST_MTIME]
- for source in sources:
- if not os.path.exists(source):
- if missing == 'error': # blow up when we stat() the file
- pass
- elif missing == 'ignore': # missing source dropped from
- continue # target's dependency list
- elif missing == 'newer': # missing source means target is
- return 1 # out-of-date
-
- source_mtime = os.stat(source)[ST_MTIME]
- if source_mtime > target_mtime:
- return 1
- else:
- return 0
-
-# newer_group ()
diff --git a/sys/lib/python/distutils/dir_util.py b/sys/lib/python/distutils/dir_util.py
deleted file mode 100644
index 8c79a9f66..000000000
--- a/sys/lib/python/distutils/dir_util.py
+++ /dev/null
@@ -1,227 +0,0 @@
-"""distutils.dir_util
-
-Utility functions for manipulating directories and directory trees."""
-
-# This module should be kept compatible with Python 2.1.
-
-__revision__ = "$Id: dir_util.py 39416 2005-08-26 15:20:46Z tim_one $"
-
-import os, sys
-from types import *
-from distutils.errors import DistutilsFileError, DistutilsInternalError
-from distutils import log
-
-# cache for by mkpath() -- in addition to cheapening redundant calls,
-# eliminates redundant "creating /foo/bar/baz" messages in dry-run mode
-_path_created = {}
-
-# I don't use os.makedirs because a) it's new to Python 1.5.2, and
-# b) it blows up if the directory already exists (I want to silently
-# succeed in that case).
-def mkpath (name, mode=0777, verbose=0, dry_run=0):
- """Create a directory and any missing ancestor directories. If the
- directory already exists (or if 'name' is the empty string, which
- means the current directory, which of course exists), then do
- nothing. Raise DistutilsFileError if unable to create some
- directory along the way (eg. some sub-path exists, but is a file
- rather than a directory). If 'verbose' is true, print a one-line
- summary of each mkdir to stdout. Return the list of directories
- actually created."""
-
- global _path_created
-
- # Detect a common bug -- name is None
- if not isinstance(name, StringTypes):
- raise DistutilsInternalError, \
- "mkpath: 'name' must be a string (got %r)" % (name,)
-
- # XXX what's the better way to handle verbosity? print as we create
- # each directory in the path (the current behaviour), or only announce
- # the creation of the whole path? (quite easy to do the latter since
- # we're not using a recursive algorithm)
-
- name = os.path.normpath(name)
- created_dirs = []
- if os.path.isdir(name) or name == '':
- return created_dirs
- if _path_created.get(os.path.abspath(name)):
- return created_dirs
-
- (head, tail) = os.path.split(name)
- tails = [tail] # stack of lone dirs to create
-
- while head and tail and not os.path.isdir(head):
- #print "splitting '%s': " % head,
- (head, tail) = os.path.split(head)
- #print "to ('%s','%s')" % (head, tail)
- tails.insert(0, tail) # push next higher dir onto stack
-
- #print "stack of tails:", tails
-
- # now 'head' contains the deepest directory that already exists
- # (that is, the child of 'head' in 'name' is the highest directory
- # that does *not* exist)
- for d in tails:
- #print "head = %s, d = %s: " % (head, d),
- head = os.path.join(head, d)
- abs_head = os.path.abspath(head)
-
- if _path_created.get(abs_head):
- continue
-
- log.info("creating %s", head)
-
- if not dry_run:
- try:
- os.mkdir(head)
- created_dirs.append(head)
- except OSError, exc:
- raise DistutilsFileError, \
- "could not create '%s': %s" % (head, exc[-1])
-
- _path_created[abs_head] = 1
- return created_dirs
-
-# mkpath ()
-
-
-def create_tree (base_dir, files, mode=0777, verbose=0, dry_run=0):
-
- """Create all the empty directories under 'base_dir' needed to
- put 'files' there. 'base_dir' is just the a name of a directory
- which doesn't necessarily exist yet; 'files' is a list of filenames
- to be interpreted relative to 'base_dir'. 'base_dir' + the
- directory portion of every file in 'files' will be created if it
- doesn't already exist. 'mode', 'verbose' and 'dry_run' flags are as
- for 'mkpath()'."""
-
- # First get the list of directories to create
- need_dir = {}
- for file in files:
- need_dir[os.path.join(base_dir, os.path.dirname(file))] = 1
- need_dirs = need_dir.keys()
- need_dirs.sort()
-
- # Now create them
- for dir in need_dirs:
- mkpath(dir, mode, dry_run=dry_run)
-
-# create_tree ()
-
-
-def copy_tree (src, dst,
- preserve_mode=1,
- preserve_times=1,
- preserve_symlinks=0,
- update=0,
- verbose=0,
- dry_run=0):
-
- """Copy an entire directory tree 'src' to a new location 'dst'. Both
- 'src' and 'dst' must be directory names. If 'src' is not a
- directory, raise DistutilsFileError. If 'dst' does not exist, it is
- created with 'mkpath()'. The end result of the copy is that every
- file in 'src' is copied to 'dst', and directories under 'src' are
- recursively copied to 'dst'. Return the list of files that were
- copied or might have been copied, using their output name. The
- return value is unaffected by 'update' or 'dry_run': it is simply
- the list of all files under 'src', with the names changed to be
- under 'dst'.
-
- 'preserve_mode' and 'preserve_times' are the same as for
- 'copy_file'; note that they only apply to regular files, not to
- directories. If 'preserve_symlinks' is true, symlinks will be
- copied as symlinks (on platforms that support them!); otherwise
- (the default), the destination of the symlink will be copied.
- 'update' and 'verbose' are the same as for 'copy_file'."""
-
- from distutils.file_util import copy_file
-
- if not dry_run and not os.path.isdir(src):
- raise DistutilsFileError, \
- "cannot copy tree '%s': not a directory" % src
- try:
- names = os.listdir(src)
- except os.error, (errno, errstr):
- if dry_run:
- names = []
- else:
- raise DistutilsFileError, \
- "error listing files in '%s': %s" % (src, errstr)
-
- if not dry_run:
- mkpath(dst)
-
- outputs = []
-
- for n in names:
- src_name = os.path.join(src, n)
- dst_name = os.path.join(dst, n)
-
- if preserve_symlinks and os.path.islink(src_name):
- link_dest = os.readlink(src_name)
- log.info("linking %s -> %s", dst_name, link_dest)
- if not dry_run:
- os.symlink(link_dest, dst_name)
- outputs.append(dst_name)
-
- elif os.path.isdir(src_name):
- outputs.extend(
- copy_tree(src_name, dst_name, preserve_mode,
- preserve_times, preserve_symlinks, update,
- dry_run=dry_run))
- else:
- copy_file(src_name, dst_name, preserve_mode,
- preserve_times, update, dry_run=dry_run)
- outputs.append(dst_name)
-
- return outputs
-
-# copy_tree ()
-
-# Helper for remove_tree()
-def _build_cmdtuple(path, cmdtuples):
- for f in os.listdir(path):
- real_f = os.path.join(path,f)
- if os.path.isdir(real_f) and not os.path.islink(real_f):
- _build_cmdtuple(real_f, cmdtuples)
- else:
- cmdtuples.append((os.remove, real_f))
- cmdtuples.append((os.rmdir, path))
-
-
-def remove_tree (directory, verbose=0, dry_run=0):
- """Recursively remove an entire directory tree. Any errors are ignored
- (apart from being reported to stdout if 'verbose' is true).
- """
- from distutils.util import grok_environment_error
- global _path_created
-
- log.info("removing '%s' (and everything under it)", directory)
- if dry_run:
- return
- cmdtuples = []
- _build_cmdtuple(directory, cmdtuples)
- for cmd in cmdtuples:
- try:
- apply(cmd[0], (cmd[1],))
- # remove dir from cache if it's already there
- abspath = os.path.abspath(cmd[1])
- if _path_created.has_key(abspath):
- del _path_created[abspath]
- except (IOError, OSError), exc:
- log.warn(grok_environment_error(
- exc, "error removing %s: " % directory))
-
-
-def ensure_relative (path):
- """Take the full path 'path', and make it a relative path so
- it can be the second argument to os.path.join().
- """
- drive, path = os.path.splitdrive(path)
- if sys.platform == 'mac':
- return os.sep + path
- else:
- if path[0:1] == os.sep:
- path = drive + path[1:]
- return path
diff --git a/sys/lib/python/distutils/dist.py b/sys/lib/python/distutils/dist.py
deleted file mode 100644
index 68a7ad09b..000000000
--- a/sys/lib/python/distutils/dist.py
+++ /dev/null
@@ -1,1222 +0,0 @@
-"""distutils.dist
-
-Provides the Distribution class, which represents the module distribution
-being built/installed/distributed.
-"""
-
-# This module should be kept compatible with Python 2.1.
-
-__revision__ = "$Id: dist.py 38697 2005-03-23 18:54:36Z loewis $"
-
-import sys, os, string, re
-from types import *
-from copy import copy
-
-try:
- import warnings
-except ImportError:
- warnings = None
-
-from distutils.errors import *
-from distutils.fancy_getopt import FancyGetopt, translate_longopt
-from distutils.util import check_environ, strtobool, rfc822_escape
-from distutils import log
-from distutils.debug import DEBUG
-
-# Regex to define acceptable Distutils command names. This is not *quite*
-# the same as a Python NAME -- I don't allow leading underscores. The fact
-# that they're very similar is no coincidence; the default naming scheme is
-# to look for a Python module named after the command.
-command_re = re.compile (r'^[a-zA-Z]([a-zA-Z0-9_]*)$')
-
-
-class Distribution:
- """The core of the Distutils. Most of the work hiding behind 'setup'
- is really done within a Distribution instance, which farms the work out
- to the Distutils commands specified on the command line.
-
- Setup scripts will almost never instantiate Distribution directly,
- unless the 'setup()' function is totally inadequate to their needs.
- However, it is conceivable that a setup script might wish to subclass
- Distribution for some specialized purpose, and then pass the subclass
- to 'setup()' as the 'distclass' keyword argument. If so, it is
- necessary to respect the expectations that 'setup' has of Distribution.
- See the code for 'setup()', in core.py, for details.
- """
-
-
- # 'global_options' describes the command-line options that may be
- # supplied to the setup script prior to any actual commands.
- # Eg. "./setup.py -n" or "./setup.py --quiet" both take advantage of
- # these global options. This list should be kept to a bare minimum,
- # since every global option is also valid as a command option -- and we
- # don't want to pollute the commands with too many options that they
- # have minimal control over.
- # The fourth entry for verbose means that it can be repeated.
- global_options = [('verbose', 'v', "run verbosely (default)", 1),
- ('quiet', 'q', "run quietly (turns verbosity off)"),
- ('dry-run', 'n', "don't actually do anything"),
- ('help', 'h', "show detailed help message"),
- ]
-
- # 'common_usage' is a short (2-3 line) string describing the common
- # usage of the setup script.
- common_usage = """\
-Common commands: (see '--help-commands' for more)
-
- setup.py build will build the package underneath 'build/'
- setup.py install will install the package
-"""
-
- # options that are not propagated to the commands
- display_options = [
- ('help-commands', None,
- "list all available commands"),
- ('name', None,
- "print package name"),
- ('version', 'V',
- "print package version"),
- ('fullname', None,
- "print <package name>-<version>"),
- ('author', None,
- "print the author's name"),
- ('author-email', None,
- "print the author's email address"),
- ('maintainer', None,
- "print the maintainer's name"),
- ('maintainer-email', None,
- "print the maintainer's email address"),
- ('contact', None,
- "print the maintainer's name if known, else the author's"),
- ('contact-email', None,
- "print the maintainer's email address if known, else the author's"),
- ('url', None,
- "print the URL for this package"),
- ('license', None,
- "print the license of the package"),
- ('licence', None,
- "alias for --license"),
- ('description', None,
- "print the package description"),
- ('long-description', None,
- "print the long package description"),
- ('platforms', None,
- "print the list of platforms"),
- ('classifiers', None,
- "print the list of classifiers"),
- ('keywords', None,
- "print the list of keywords"),
- ('provides', None,
- "print the list of packages/modules provided"),
- ('requires', None,
- "print the list of packages/modules required"),
- ('obsoletes', None,
- "print the list of packages/modules made obsolete")
- ]
- display_option_names = map(lambda x: translate_longopt(x[0]),
- display_options)
-
- # negative options are options that exclude other options
- negative_opt = {'quiet': 'verbose'}
-
-
- # -- Creation/initialization methods -------------------------------
-
- def __init__ (self, attrs=None):
- """Construct a new Distribution instance: initialize all the
- attributes of a Distribution, and then use 'attrs' (a dictionary
- mapping attribute names to values) to assign some of those
- attributes their "real" values. (Any attributes not mentioned in
- 'attrs' will be assigned to some null value: 0, None, an empty list
- or dictionary, etc.) Most importantly, initialize the
- 'command_obj' attribute to the empty dictionary; this will be
- filled in with real command objects by 'parse_command_line()'.
- """
-
- # Default values for our command-line options
- self.verbose = 1
- self.dry_run = 0
- self.help = 0
- for attr in self.display_option_names:
- setattr(self, attr, 0)
-
- # Store the distribution meta-data (name, version, author, and so
- # forth) in a separate object -- we're getting to have enough
- # information here (and enough command-line options) that it's
- # worth it. Also delegate 'get_XXX()' methods to the 'metadata'
- # object in a sneaky and underhanded (but efficient!) way.
- self.metadata = DistributionMetadata()
- for basename in self.metadata._METHOD_BASENAMES:
- method_name = "get_" + basename
- setattr(self, method_name, getattr(self.metadata, method_name))
-
- # 'cmdclass' maps command names to class objects, so we
- # can 1) quickly figure out which class to instantiate when
- # we need to create a new command object, and 2) have a way
- # for the setup script to override command classes
- self.cmdclass = {}
-
- # 'command_packages' is a list of packages in which commands
- # are searched for. The factory for command 'foo' is expected
- # to be named 'foo' in the module 'foo' in one of the packages
- # named here. This list is searched from the left; an error
- # is raised if no named package provides the command being
- # searched for. (Always access using get_command_packages().)
- self.command_packages = None
-
- # 'script_name' and 'script_args' are usually set to sys.argv[0]
- # and sys.argv[1:], but they can be overridden when the caller is
- # not necessarily a setup script run from the command-line.
- self.script_name = None
- self.script_args = None
-
- # 'command_options' is where we store command options between
- # parsing them (from config files, the command-line, etc.) and when
- # they are actually needed -- ie. when the command in question is
- # instantiated. It is a dictionary of dictionaries of 2-tuples:
- # command_options = { command_name : { option : (source, value) } }
- self.command_options = {}
-
- # 'dist_files' is the list of (command, pyversion, file) that
- # have been created by any dist commands run so far. This is
- # filled regardless of whether the run is dry or not. pyversion
- # gives sysconfig.get_python_version() if the dist file is
- # specific to a Python version, 'any' if it is good for all
- # Python versions on the target platform, and '' for a source
- # file. pyversion should not be used to specify minimum or
- # maximum required Python versions; use the metainfo for that
- # instead.
- self.dist_files = []
-
- # These options are really the business of various commands, rather
- # than of the Distribution itself. We provide aliases for them in
- # Distribution as a convenience to the developer.
- self.packages = None
- self.package_data = {}
- self.package_dir = None
- self.py_modules = None
- self.libraries = None
- self.headers = None
- self.ext_modules = None
- self.ext_package = None
- self.include_dirs = None
- self.extra_path = None
- self.scripts = None
- self.data_files = None
-
- # And now initialize bookkeeping stuff that can't be supplied by
- # the caller at all. 'command_obj' maps command names to
- # Command instances -- that's how we enforce that every command
- # class is a singleton.
- self.command_obj = {}
-
- # 'have_run' maps command names to boolean values; it keeps track
- # of whether we have actually run a particular command, to make it
- # cheap to "run" a command whenever we think we might need to -- if
- # it's already been done, no need for expensive filesystem
- # operations, we just check the 'have_run' dictionary and carry on.
- # It's only safe to query 'have_run' for a command class that has
- # been instantiated -- a false value will be inserted when the
- # command object is created, and replaced with a true value when
- # the command is successfully run. Thus it's probably best to use
- # '.get()' rather than a straight lookup.
- self.have_run = {}
-
- # Now we'll use the attrs dictionary (ultimately, keyword args from
- # the setup script) to possibly override any or all of these
- # distribution options.
-
- if attrs:
- # Pull out the set of command options and work on them
- # specifically. Note that this order guarantees that aliased
- # command options will override any supplied redundantly
- # through the general options dictionary.
- options = attrs.get('options')
- if options:
- del attrs['options']
- for (command, cmd_options) in options.items():
- opt_dict = self.get_option_dict(command)
- for (opt, val) in cmd_options.items():
- opt_dict[opt] = ("setup script", val)
-
- if attrs.has_key('licence'):
- attrs['license'] = attrs['licence']
- del attrs['licence']
- msg = "'licence' distribution option is deprecated; use 'license'"
- if warnings is not None:
- warnings.warn(msg)
- else:
- sys.stderr.write(msg + "\n")
-
- # Now work on the rest of the attributes. Any attribute that's
- # not already defined is invalid!
- for (key,val) in attrs.items():
- if hasattr(self.metadata, "set_" + key):
- getattr(self.metadata, "set_" + key)(val)
- elif hasattr(self.metadata, key):
- setattr(self.metadata, key, val)
- elif hasattr(self, key):
- setattr(self, key, val)
- else:
- msg = "Unknown distribution option: %s" % repr(key)
- if warnings is not None:
- warnings.warn(msg)
- else:
- sys.stderr.write(msg + "\n")
-
- self.finalize_options()
-
- # __init__ ()
-
-
- def get_option_dict (self, command):
- """Get the option dictionary for a given command. If that
- command's option dictionary hasn't been created yet, then create it
- and return the new dictionary; otherwise, return the existing
- option dictionary.
- """
-
- dict = self.command_options.get(command)
- if dict is None:
- dict = self.command_options[command] = {}
- return dict
-
-
- def dump_option_dicts (self, header=None, commands=None, indent=""):
- from pprint import pformat
-
- if commands is None: # dump all command option dicts
- commands = self.command_options.keys()
- commands.sort()
-
- if header is not None:
- print indent + header
- indent = indent + " "
-
- if not commands:
- print indent + "no commands known yet"
- return
-
- for cmd_name in commands:
- opt_dict = self.command_options.get(cmd_name)
- if opt_dict is None:
- print indent + "no option dict for '%s' command" % cmd_name
- else:
- print indent + "option dict for '%s' command:" % cmd_name
- out = pformat(opt_dict)
- for line in string.split(out, "\n"):
- print indent + " " + line
-
- # dump_option_dicts ()
-
-
-
- # -- Config file finding/parsing methods ---------------------------
-
- def find_config_files (self):
- """Find as many configuration files as should be processed for this
- platform, and return a list of filenames in the order in which they
- should be parsed. The filenames returned are guaranteed to exist
- (modulo nasty race conditions).
-
- There are three possible config files: distutils.cfg in the
- Distutils installation directory (ie. where the top-level
- Distutils __inst__.py file lives), a file in the user's home
- directory named .pydistutils.cfg on Unix and pydistutils.cfg
- on Windows/Mac, and setup.cfg in the current directory.
- """
- files = []
- check_environ()
-
- # Where to look for the system-wide Distutils config file
- sys_dir = os.path.dirname(sys.modules['distutils'].__file__)
-
- # Look for the system config file
- sys_file = os.path.join(sys_dir, "distutils.cfg")
- if os.path.isfile(sys_file):
- files.append(sys_file)
-
- # What to call the per-user config file
- if os.name == 'posix':
- user_filename = ".pydistutils.cfg"
- else:
- user_filename = "pydistutils.cfg"
-
- # And look for the user config file
- if os.environ.has_key('HOME'):
- user_file = os.path.join(os.environ.get('HOME'), user_filename)
- if os.path.isfile(user_file):
- files.append(user_file)
-
- # All platforms support local setup.cfg
- local_file = "setup.cfg"
- if os.path.isfile(local_file):
- files.append(local_file)
-
- return files
-
- # find_config_files ()
-
-
- def parse_config_files (self, filenames=None):
-
- from ConfigParser import ConfigParser
-
- if filenames is None:
- filenames = self.find_config_files()
-
- if DEBUG: print "Distribution.parse_config_files():"
-
- parser = ConfigParser()
- for filename in filenames:
- if DEBUG: print " reading", filename
- parser.read(filename)
- for section in parser.sections():
- options = parser.options(section)
- opt_dict = self.get_option_dict(section)
-
- for opt in options:
- if opt != '__name__':
- val = parser.get(section,opt)
- opt = string.replace(opt, '-', '_')
- opt_dict[opt] = (filename, val)
-
- # Make the ConfigParser forget everything (so we retain
- # the original filenames that options come from)
- parser.__init__()
-
- # If there was a "global" section in the config file, use it
- # to set Distribution options.
-
- if self.command_options.has_key('global'):
- for (opt, (src, val)) in self.command_options['global'].items():
- alias = self.negative_opt.get(opt)
- try:
- if alias:
- setattr(self, alias, not strtobool(val))
- elif opt in ('verbose', 'dry_run'): # ugh!
- setattr(self, opt, strtobool(val))
- else:
- setattr(self, opt, val)
- except ValueError, msg:
- raise DistutilsOptionError, msg
-
- # parse_config_files ()
-
-
- # -- Command-line parsing methods ----------------------------------
-
- def parse_command_line (self):
- """Parse the setup script's command line, taken from the
- 'script_args' instance attribute (which defaults to 'sys.argv[1:]'
- -- see 'setup()' in core.py). This list is first processed for
- "global options" -- options that set attributes of the Distribution
- instance. Then, it is alternately scanned for Distutils commands
- and options for that command. Each new command terminates the
- options for the previous command. The allowed options for a
- command are determined by the 'user_options' attribute of the
- command class -- thus, we have to be able to load command classes
- in order to parse the command line. Any error in that 'options'
- attribute raises DistutilsGetoptError; any error on the
- command-line raises DistutilsArgError. If no Distutils commands
- were found on the command line, raises DistutilsArgError. Return
- true if command-line was successfully parsed and we should carry
- on with executing commands; false if no errors but we shouldn't
- execute commands (currently, this only happens if user asks for
- help).
- """
- #
- # We now have enough information to show the Macintosh dialog
- # that allows the user to interactively specify the "command line".
- #
- toplevel_options = self._get_toplevel_options()
- if sys.platform == 'mac':
- import EasyDialogs
- cmdlist = self.get_command_list()
- self.script_args = EasyDialogs.GetArgv(
- toplevel_options + self.display_options, cmdlist)
-
- # We have to parse the command line a bit at a time -- global
- # options, then the first command, then its options, and so on --
- # because each command will be handled by a different class, and
- # the options that are valid for a particular class aren't known
- # until we have loaded the command class, which doesn't happen
- # until we know what the command is.
-
- self.commands = []
- parser = FancyGetopt(toplevel_options + self.display_options)
- parser.set_negative_aliases(self.negative_opt)
- parser.set_aliases({'licence': 'license'})
- args = parser.getopt(args=self.script_args, object=self)
- option_order = parser.get_option_order()
- log.set_verbosity(self.verbose)
-
- # for display options we return immediately
- if self.handle_display_options(option_order):
- return
-
- while args:
- args = self._parse_command_opts(parser, args)
- if args is None: # user asked for help (and got it)
- return
-
- # Handle the cases of --help as a "global" option, ie.
- # "setup.py --help" and "setup.py --help command ...". For the
- # former, we show global options (--verbose, --dry-run, etc.)
- # and display-only options (--name, --version, etc.); for the
- # latter, we omit the display-only options and show help for
- # each command listed on the command line.
- if self.help:
- self._show_help(parser,
- display_options=len(self.commands) == 0,
- commands=self.commands)
- return
-
- # Oops, no commands found -- an end-user error
- if not self.commands:
- raise DistutilsArgError, "no commands supplied"
-
- # All is well: return true
- return 1
-
- # parse_command_line()
-
- def _get_toplevel_options (self):
- """Return the non-display options recognized at the top level.
-
- This includes options that are recognized *only* at the top
- level as well as options recognized for commands.
- """
- return self.global_options + [
- ("command-packages=", None,
- "list of packages that provide distutils commands"),
- ]
-
- def _parse_command_opts (self, parser, args):
- """Parse the command-line options for a single command.
- 'parser' must be a FancyGetopt instance; 'args' must be the list
- of arguments, starting with the current command (whose options
- we are about to parse). Returns a new version of 'args' with
- the next command at the front of the list; will be the empty
- list if there are no more commands on the command line. Returns
- None if the user asked for help on this command.
- """
- # late import because of mutual dependence between these modules
- from distutils.cmd import Command
-
- # Pull the current command from the head of the command line
- command = args[0]
- if not command_re.match(command):
- raise SystemExit, "invalid command name '%s'" % command
- self.commands.append(command)
-
- # Dig up the command class that implements this command, so we
- # 1) know that it's a valid command, and 2) know which options
- # it takes.
- try:
- cmd_class = self.get_command_class(command)
- except DistutilsModuleError, msg:
- raise DistutilsArgError, msg
-
- # Require that the command class be derived from Command -- want
- # to be sure that the basic "command" interface is implemented.
- if not issubclass(cmd_class, Command):
- raise DistutilsClassError, \
- "command class %s must subclass Command" % cmd_class
-
- # Also make sure that the command object provides a list of its
- # known options.
- if not (hasattr(cmd_class, 'user_options') and
- type(cmd_class.user_options) is ListType):
- raise DistutilsClassError, \
- ("command class %s must provide " +
- "'user_options' attribute (a list of tuples)") % \
- cmd_class
-
- # If the command class has a list of negative alias options,
- # merge it in with the global negative aliases.
- negative_opt = self.negative_opt
- if hasattr(cmd_class, 'negative_opt'):
- negative_opt = copy(negative_opt)
- negative_opt.update(cmd_class.negative_opt)
-
- # Check for help_options in command class. They have a different
- # format (tuple of four) so we need to preprocess them here.
- if (hasattr(cmd_class, 'help_options') and
- type(cmd_class.help_options) is ListType):
- help_options = fix_help_options(cmd_class.help_options)
- else:
- help_options = []
-
-
- # All commands support the global options too, just by adding
- # in 'global_options'.
- parser.set_option_table(self.global_options +
- cmd_class.user_options +
- help_options)
- parser.set_negative_aliases(negative_opt)
- (args, opts) = parser.getopt(args[1:])
- if hasattr(opts, 'help') and opts.help:
- self._show_help(parser, display_options=0, commands=[cmd_class])
- return
-
- if (hasattr(cmd_class, 'help_options') and
- type(cmd_class.help_options) is ListType):
- help_option_found=0
- for (help_option, short, desc, func) in cmd_class.help_options:
- if hasattr(opts, parser.get_attr_name(help_option)):
- help_option_found=1
- #print "showing help for option %s of command %s" % \
- # (help_option[0],cmd_class)
-
- if callable(func):
- func()
- else:
- raise DistutilsClassError(
- "invalid help function %r for help option '%s': "
- "must be a callable object (function, etc.)"
- % (func, help_option))
-
- if help_option_found:
- return
-
- # Put the options from the command-line into their official
- # holding pen, the 'command_options' dictionary.
- opt_dict = self.get_option_dict(command)
- for (name, value) in vars(opts).items():
- opt_dict[name] = ("command line", value)
-
- return args
-
- # _parse_command_opts ()
-
- def finalize_options (self):
- """Set final values for all the options on the Distribution
- instance, analogous to the .finalize_options() method of Command
- objects.
- """
-
- keywords = self.metadata.keywords
- if keywords is not None:
- if type(keywords) is StringType:
- keywordlist = string.split(keywords, ',')
- self.metadata.keywords = map(string.strip, keywordlist)
-
- platforms = self.metadata.platforms
- if platforms is not None:
- if type(platforms) is StringType:
- platformlist = string.split(platforms, ',')
- self.metadata.platforms = map(string.strip, platformlist)
-
- def _show_help (self,
- parser,
- global_options=1,
- display_options=1,
- commands=[]):
- """Show help for the setup script command-line in the form of
- several lists of command-line options. 'parser' should be a
- FancyGetopt instance; do not expect it to be returned in the
- same state, as its option table will be reset to make it
- generate the correct help text.
-
- If 'global_options' is true, lists the global options:
- --verbose, --dry-run, etc. If 'display_options' is true, lists
- the "display-only" options: --name, --version, etc. Finally,
- lists per-command help for every command name or command class
- in 'commands'.
- """
- # late import because of mutual dependence between these modules
- from distutils.core import gen_usage
- from distutils.cmd import Command
-
- if global_options:
- if display_options:
- options = self._get_toplevel_options()
- else:
- options = self.global_options
- parser.set_option_table(options)
- parser.print_help(self.common_usage + "\nGlobal options:")
- print
-
- if display_options:
- parser.set_option_table(self.display_options)
- parser.print_help(
- "Information display options (just display " +
- "information, ignore any commands)")
- print
-
- for command in self.commands:
- if type(command) is ClassType and issubclass(command, Command):
- klass = command
- else:
- klass = self.get_command_class(command)
- if (hasattr(klass, 'help_options') and
- type(klass.help_options) is ListType):
- parser.set_option_table(klass.user_options +
- fix_help_options(klass.help_options))
- else:
- parser.set_option_table(klass.user_options)
- parser.print_help("Options for '%s' command:" % klass.__name__)
- print
-
- print gen_usage(self.script_name)
- return
-
- # _show_help ()
-
-
- def handle_display_options (self, option_order):
- """If there were any non-global "display-only" options
- (--help-commands or the metadata display options) on the command
- line, display the requested info and return true; else return
- false.
- """
- from distutils.core import gen_usage
-
- # User just wants a list of commands -- we'll print it out and stop
- # processing now (ie. if they ran "setup --help-commands foo bar",
- # we ignore "foo bar").
- if self.help_commands:
- self.print_commands()
- print
- print gen_usage(self.script_name)
- return 1
-
- # If user supplied any of the "display metadata" options, then
- # display that metadata in the order in which the user supplied the
- # metadata options.
- any_display_options = 0
- is_display_option = {}
- for option in self.display_options:
- is_display_option[option[0]] = 1
-
- for (opt, val) in option_order:
- if val and is_display_option.get(opt):
- opt = translate_longopt(opt)
- value = getattr(self.metadata, "get_"+opt)()
- if opt in ['keywords', 'platforms']:
- print string.join(value, ',')
- elif opt in ('classifiers', 'provides', 'requires',
- 'obsoletes'):
- print string.join(value, '\n')
- else:
- print value
- any_display_options = 1
-
- return any_display_options
-
- # handle_display_options()
-
- def print_command_list (self, commands, header, max_length):
- """Print a subset of the list of all commands -- used by
- 'print_commands()'.
- """
-
- print header + ":"
-
- for cmd in commands:
- klass = self.cmdclass.get(cmd)
- if not klass:
- klass = self.get_command_class(cmd)
- try:
- description = klass.description
- except AttributeError:
- description = "(no description available)"
-
- print " %-*s %s" % (max_length, cmd, description)
-
- # print_command_list ()
-
-
- def print_commands (self):
- """Print out a help message listing all available commands with a
- description of each. The list is divided into "standard commands"
- (listed in distutils.command.__all__) and "extra commands"
- (mentioned in self.cmdclass, but not a standard command). The
- descriptions come from the command class attribute
- 'description'.
- """
-
- import distutils.command
- std_commands = distutils.command.__all__
- is_std = {}
- for cmd in std_commands:
- is_std[cmd] = 1
-
- extra_commands = []
- for cmd in self.cmdclass.keys():
- if not is_std.get(cmd):
- extra_commands.append(cmd)
-
- max_length = 0
- for cmd in (std_commands + extra_commands):
- if len(cmd) > max_length:
- max_length = len(cmd)
-
- self.print_command_list(std_commands,
- "Standard commands",
- max_length)
- if extra_commands:
- print
- self.print_command_list(extra_commands,
- "Extra commands",
- max_length)
-
- # print_commands ()
-
- def get_command_list (self):
- """Get a list of (command, description) tuples.
- The list is divided into "standard commands" (listed in
- distutils.command.__all__) and "extra commands" (mentioned in
- self.cmdclass, but not a standard command). The descriptions come
- from the command class attribute 'description'.
- """
- # Currently this is only used on Mac OS, for the Mac-only GUI
- # Distutils interface (by Jack Jansen)
-
- import distutils.command
- std_commands = distutils.command.__all__
- is_std = {}
- for cmd in std_commands:
- is_std[cmd] = 1
-
- extra_commands = []
- for cmd in self.cmdclass.keys():
- if not is_std.get(cmd):
- extra_commands.append(cmd)
-
- rv = []
- for cmd in (std_commands + extra_commands):
- klass = self.cmdclass.get(cmd)
- if not klass:
- klass = self.get_command_class(cmd)
- try:
- description = klass.description
- except AttributeError:
- description = "(no description available)"
- rv.append((cmd, description))
- return rv
-
- # -- Command class/object methods ----------------------------------
-
- def get_command_packages (self):
- """Return a list of packages from which commands are loaded."""
- pkgs = self.command_packages
- if not isinstance(pkgs, type([])):
- pkgs = string.split(pkgs or "", ",")
- for i in range(len(pkgs)):
- pkgs[i] = string.strip(pkgs[i])
- pkgs = filter(None, pkgs)
- if "distutils.command" not in pkgs:
- pkgs.insert(0, "distutils.command")
- self.command_packages = pkgs
- return pkgs
-
- def get_command_class (self, command):
- """Return the class that implements the Distutils command named by
- 'command'. First we check the 'cmdclass' dictionary; if the
- command is mentioned there, we fetch the class object from the
- dictionary and return it. Otherwise we load the command module
- ("distutils.command." + command) and fetch the command class from
- the module. The loaded class is also stored in 'cmdclass'
- to speed future calls to 'get_command_class()'.
-
- Raises DistutilsModuleError if the expected module could not be
- found, or if that module does not define the expected class.
- """
- klass = self.cmdclass.get(command)
- if klass:
- return klass
-
- for pkgname in self.get_command_packages():
- module_name = "%s.%s" % (pkgname, command)
- klass_name = command
-
- try:
- __import__ (module_name)
- module = sys.modules[module_name]
- except ImportError:
- continue
-
- try:
- klass = getattr(module, klass_name)
- except AttributeError:
- raise DistutilsModuleError, \
- "invalid command '%s' (no class '%s' in module '%s')" \
- % (command, klass_name, module_name)
-
- self.cmdclass[command] = klass
- return klass
-
- raise DistutilsModuleError("invalid command '%s'" % command)
-
-
- # get_command_class ()
-
- def get_command_obj (self, command, create=1):
- """Return the command object for 'command'. Normally this object
- is cached on a previous call to 'get_command_obj()'; if no command
- object for 'command' is in the cache, then we either create and
- return it (if 'create' is true) or return None.
- """
- cmd_obj = self.command_obj.get(command)
- if not cmd_obj and create:
- if DEBUG:
- print "Distribution.get_command_obj(): " \
- "creating '%s' command object" % command
-
- klass = self.get_command_class(command)
- cmd_obj = self.command_obj[command] = klass(self)
- self.have_run[command] = 0
-
- # Set any options that were supplied in config files
- # or on the command line. (NB. support for error
- # reporting is lame here: any errors aren't reported
- # until 'finalize_options()' is called, which means
- # we won't report the source of the error.)
- options = self.command_options.get(command)
- if options:
- self._set_command_options(cmd_obj, options)
-
- return cmd_obj
-
- def _set_command_options (self, command_obj, option_dict=None):
- """Set the options for 'command_obj' from 'option_dict'. Basically
- this means copying elements of a dictionary ('option_dict') to
- attributes of an instance ('command').
-
- 'command_obj' must be a Command instance. If 'option_dict' is not
- supplied, uses the standard option dictionary for this command
- (from 'self.command_options').
- """
- command_name = command_obj.get_command_name()
- if option_dict is None:
- option_dict = self.get_option_dict(command_name)
-
- if DEBUG: print " setting options for '%s' command:" % command_name
- for (option, (source, value)) in option_dict.items():
- if DEBUG: print " %s = %s (from %s)" % (option, value, source)
- try:
- bool_opts = map(translate_longopt, command_obj.boolean_options)
- except AttributeError:
- bool_opts = []
- try:
- neg_opt = command_obj.negative_opt
- except AttributeError:
- neg_opt = {}
-
- try:
- is_string = type(value) is StringType
- if neg_opt.has_key(option) and is_string:
- setattr(command_obj, neg_opt[option], not strtobool(value))
- elif option in bool_opts and is_string:
- setattr(command_obj, option, strtobool(value))
- elif hasattr(command_obj, option):
- setattr(command_obj, option, value)
- else:
- raise DistutilsOptionError, \
- ("error in %s: command '%s' has no such option '%s'"
- % (source, command_name, option))
- except ValueError, msg:
- raise DistutilsOptionError, msg
-
- def reinitialize_command (self, command, reinit_subcommands=0):
- """Reinitializes a command to the state it was in when first
- returned by 'get_command_obj()': ie., initialized but not yet
- finalized. This provides the opportunity to sneak option
- values in programmatically, overriding or supplementing
- user-supplied values from the config files and command line.
- You'll have to re-finalize the command object (by calling
- 'finalize_options()' or 'ensure_finalized()') before using it for
- real.
-
- 'command' should be a command name (string) or command object. If
- 'reinit_subcommands' is true, also reinitializes the command's
- sub-commands, as declared by the 'sub_commands' class attribute (if
- it has one). See the "install" command for an example. Only
- reinitializes the sub-commands that actually matter, ie. those
- whose test predicates return true.
-
- Returns the reinitialized command object.
- """
- from distutils.cmd import Command
- if not isinstance(command, Command):
- command_name = command
- command = self.get_command_obj(command_name)
- else:
- command_name = command.get_command_name()
-
- if not command.finalized:
- return command
- command.initialize_options()
- command.finalized = 0
- self.have_run[command_name] = 0
- self._set_command_options(command)
-
- if reinit_subcommands:
- for sub in command.get_sub_commands():
- self.reinitialize_command(sub, reinit_subcommands)
-
- return command
-
-
- # -- Methods that operate on the Distribution ----------------------
-
- def announce (self, msg, level=1):
- log.debug(msg)
-
- def run_commands (self):
- """Run each command that was seen on the setup script command line.
- Uses the list of commands found and cache of command objects
- created by 'get_command_obj()'.
- """
- for cmd in self.commands:
- self.run_command(cmd)
-
-
- # -- Methods that operate on its Commands --------------------------
-
- def run_command (self, command):
- """Do whatever it takes to run a command (including nothing at all,
- if the command has already been run). Specifically: if we have
- already created and run the command named by 'command', return
- silently without doing anything. If the command named by 'command'
- doesn't even have a command object yet, create one. Then invoke
- 'run()' on that command object (or an existing one).
- """
- # Already been here, done that? then return silently.
- if self.have_run.get(command):
- return
-
- log.info("running %s", command)
- cmd_obj = self.get_command_obj(command)
- cmd_obj.ensure_finalized()
- cmd_obj.run()
- self.have_run[command] = 1
-
-
- # -- Distribution query methods ------------------------------------
-
- def has_pure_modules (self):
- return len(self.packages or self.py_modules or []) > 0
-
- def has_ext_modules (self):
- return self.ext_modules and len(self.ext_modules) > 0
-
- def has_c_libraries (self):
- return self.libraries and len(self.libraries) > 0
-
- def has_modules (self):
- return self.has_pure_modules() or self.has_ext_modules()
-
- def has_headers (self):
- return self.headers and len(self.headers) > 0
-
- def has_scripts (self):
- return self.scripts and len(self.scripts) > 0
-
- def has_data_files (self):
- return self.data_files and len(self.data_files) > 0
-
- def is_pure (self):
- return (self.has_pure_modules() and
- not self.has_ext_modules() and
- not self.has_c_libraries())
-
- # -- Metadata query methods ----------------------------------------
-
- # If you're looking for 'get_name()', 'get_version()', and so forth,
- # they are defined in a sneaky way: the constructor binds self.get_XXX
- # to self.metadata.get_XXX. The actual code is in the
- # DistributionMetadata class, below.
-
-# class Distribution
-
-
-class DistributionMetadata:
- """Dummy class to hold the distribution meta-data: name, version,
- author, and so forth.
- """
-
- _METHOD_BASENAMES = ("name", "version", "author", "author_email",
- "maintainer", "maintainer_email", "url",
- "license", "description", "long_description",
- "keywords", "platforms", "fullname", "contact",
- "contact_email", "license", "classifiers",
- "download_url",
- # PEP 314
- "provides", "requires", "obsoletes",
- )
-
- def __init__ (self):
- self.name = None
- self.version = None
- self.author = None
- self.author_email = None
- self.maintainer = None
- self.maintainer_email = None
- self.url = None
- self.license = None
- self.description = None
- self.long_description = None
- self.keywords = None
- self.platforms = None
- self.classifiers = None
- self.download_url = None
- # PEP 314
- self.provides = None
- self.requires = None
- self.obsoletes = None
-
- def write_pkg_info (self, base_dir):
- """Write the PKG-INFO file into the release tree.
- """
- pkg_info = open( os.path.join(base_dir, 'PKG-INFO'), 'w')
-
- self.write_pkg_file(pkg_info)
-
- pkg_info.close()
-
- # write_pkg_info ()
-
- def write_pkg_file (self, file):
- """Write the PKG-INFO format data to a file object.
- """
- version = '1.0'
- if self.provides or self.requires or self.obsoletes:
- version = '1.1'
-
- file.write('Metadata-Version: %s\n' % version)
- file.write('Name: %s\n' % self.get_name() )
- file.write('Version: %s\n' % self.get_version() )
- file.write('Summary: %s\n' % self.get_description() )
- file.write('Home-page: %s\n' % self.get_url() )
- file.write('Author: %s\n' % self.get_contact() )
- file.write('Author-email: %s\n' % self.get_contact_email() )
- file.write('License: %s\n' % self.get_license() )
- if self.download_url:
- file.write('Download-URL: %s\n' % self.download_url)
-
- long_desc = rfc822_escape( self.get_long_description() )
- file.write('Description: %s\n' % long_desc)
-
- keywords = string.join( self.get_keywords(), ',')
- if keywords:
- file.write('Keywords: %s\n' % keywords )
-
- self._write_list(file, 'Platform', self.get_platforms())
- self._write_list(file, 'Classifier', self.get_classifiers())
-
- # PEP 314
- self._write_list(file, 'Requires', self.get_requires())
- self._write_list(file, 'Provides', self.get_provides())
- self._write_list(file, 'Obsoletes', self.get_obsoletes())
-
- def _write_list (self, file, name, values):
- for value in values:
- file.write('%s: %s\n' % (name, value))
-
- # -- Metadata query methods ----------------------------------------
-
- def get_name (self):
- return self.name or "UNKNOWN"
-
- def get_version(self):
- return self.version or "0.0.0"
-
- def get_fullname (self):
- return "%s-%s" % (self.get_name(), self.get_version())
-
- def get_author(self):
- return self.author or "UNKNOWN"
-
- def get_author_email(self):
- return self.author_email or "UNKNOWN"
-
- def get_maintainer(self):
- return self.maintainer or "UNKNOWN"
-
- def get_maintainer_email(self):
- return self.maintainer_email or "UNKNOWN"
-
- def get_contact(self):
- return (self.maintainer or
- self.author or
- "UNKNOWN")
-
- def get_contact_email(self):
- return (self.maintainer_email or
- self.author_email or
- "UNKNOWN")
-
- def get_url(self):
- return self.url or "UNKNOWN"
-
- def get_license(self):
- return self.license or "UNKNOWN"
- get_licence = get_license
-
- def get_description(self):
- return self.description or "UNKNOWN"
-
- def get_long_description(self):
- return self.long_description or "UNKNOWN"
-
- def get_keywords(self):
- return self.keywords or []
-
- def get_platforms(self):
- return self.platforms or ["UNKNOWN"]
-
- def get_classifiers(self):
- return self.classifiers or []
-
- def get_download_url(self):
- return self.download_url or "UNKNOWN"
-
- # PEP 314
-
- def get_requires(self):
- return self.requires or []
-
- def set_requires(self, value):
- import distutils.versionpredicate
- for v in value:
- distutils.versionpredicate.VersionPredicate(v)
- self.requires = value
-
- def get_provides(self):
- return self.provides or []
-
- def set_provides(self, value):
- value = [v.strip() for v in value]
- for v in value:
- import distutils.versionpredicate
- distutils.versionpredicate.split_provision(v)
- self.provides = value
-
- def get_obsoletes(self):
- return self.obsoletes or []
-
- def set_obsoletes(self, value):
- import distutils.versionpredicate
- for v in value:
- distutils.versionpredicate.VersionPredicate(v)
- self.obsoletes = value
-
-# class DistributionMetadata
-
-
-def fix_help_options (options):
- """Convert a 4-tuple 'help_options' list as found in various command
- classes to the 3-tuple form required by FancyGetopt.
- """
- new_options = []
- for help_tuple in options:
- new_options.append(help_tuple[0:3])
- return new_options
-
-
-if __name__ == "__main__":
- dist = Distribution()
- print "ok"
diff --git a/sys/lib/python/distutils/emxccompiler.py b/sys/lib/python/distutils/emxccompiler.py
deleted file mode 100644
index c3c24e732..000000000
--- a/sys/lib/python/distutils/emxccompiler.py
+++ /dev/null
@@ -1,315 +0,0 @@
-"""distutils.emxccompiler
-
-Provides the EMXCCompiler class, a subclass of UnixCCompiler that
-handles the EMX port of the GNU C compiler to OS/2.
-"""
-
-# issues:
-#
-# * OS/2 insists that DLLs can have names no longer than 8 characters
-# We put export_symbols in a def-file, as though the DLL can have
-# an arbitrary length name, but truncate the output filename.
-#
-# * only use OMF objects and use LINK386 as the linker (-Zomf)
-#
-# * always build for multithreading (-Zmt) as the accompanying OS/2 port
-# of Python is only distributed with threads enabled.
-#
-# tested configurations:
-#
-# * EMX gcc 2.81/EMX 0.9d fix03
-
-__revision__ = "$Id: emxccompiler.py 34786 2003-12-02 12:17:59Z aimacintyre $"
-
-import os,sys,copy
-from distutils.ccompiler import gen_preprocess_options, gen_lib_options
-from distutils.unixccompiler import UnixCCompiler
-from distutils.file_util import write_file
-from distutils.errors import DistutilsExecError, CompileError, UnknownFileError
-from distutils import log
-
-class EMXCCompiler (UnixCCompiler):
-
- compiler_type = 'emx'
- obj_extension = ".obj"
- static_lib_extension = ".lib"
- shared_lib_extension = ".dll"
- static_lib_format = "%s%s"
- shared_lib_format = "%s%s"
- res_extension = ".res" # compiled resource file
- exe_extension = ".exe"
-
- def __init__ (self,
- verbose=0,
- dry_run=0,
- force=0):
-
- UnixCCompiler.__init__ (self, verbose, dry_run, force)
-
- (status, details) = check_config_h()
- self.debug_print("Python's GCC status: %s (details: %s)" %
- (status, details))
- if status is not CONFIG_H_OK:
- self.warn(
- "Python's pyconfig.h doesn't seem to support your compiler. " +
- ("Reason: %s." % details) +
- "Compiling may fail because of undefined preprocessor macros.")
-
- (self.gcc_version, self.ld_version) = \
- get_versions()
- self.debug_print(self.compiler_type + ": gcc %s, ld %s\n" %
- (self.gcc_version,
- self.ld_version) )
-
- # Hard-code GCC because that's what this is all about.
- # XXX optimization, warnings etc. should be customizable.
- self.set_executables(compiler='gcc -Zomf -Zmt -O3 -fomit-frame-pointer -mprobe -Wall',
- compiler_so='gcc -Zomf -Zmt -O3 -fomit-frame-pointer -mprobe -Wall',
- linker_exe='gcc -Zomf -Zmt -Zcrtdll',
- linker_so='gcc -Zomf -Zmt -Zcrtdll -Zdll')
-
- # want the gcc library statically linked (so that we don't have
- # to distribute a version dependent on the compiler we have)
- self.dll_libraries=["gcc"]
-
- # __init__ ()
-
- def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):
- if ext == '.rc':
- # gcc requires '.rc' compiled to binary ('.res') files !!!
- try:
- self.spawn(["rc", "-r", src])
- except DistutilsExecError, msg:
- raise CompileError, msg
- else: # for other files use the C-compiler
- try:
- self.spawn(self.compiler_so + cc_args + [src, '-o', obj] +
- extra_postargs)
- except DistutilsExecError, msg:
- raise CompileError, msg
-
- def link (self,
- target_desc,
- objects,
- output_filename,
- output_dir=None,
- libraries=None,
- library_dirs=None,
- runtime_library_dirs=None,
- export_symbols=None,
- debug=0,
- extra_preargs=None,
- extra_postargs=None,
- build_temp=None,
- target_lang=None):
-
- # use separate copies, so we can modify the lists
- extra_preargs = copy.copy(extra_preargs or [])
- libraries = copy.copy(libraries or [])
- objects = copy.copy(objects or [])
-
- # Additional libraries
- libraries.extend(self.dll_libraries)
-
- # handle export symbols by creating a def-file
- # with executables this only works with gcc/ld as linker
- if ((export_symbols is not None) and
- (target_desc != self.EXECUTABLE)):
- # (The linker doesn't do anything if output is up-to-date.
- # So it would probably better to check if we really need this,
- # but for this we had to insert some unchanged parts of
- # UnixCCompiler, and this is not what we want.)
-
- # we want to put some files in the same directory as the
- # object files are, build_temp doesn't help much
- # where are the object files
- temp_dir = os.path.dirname(objects[0])
- # name of dll to give the helper files the same base name
- (dll_name, dll_extension) = os.path.splitext(
- os.path.basename(output_filename))
-
- # generate the filenames for these files
- def_file = os.path.join(temp_dir, dll_name + ".def")
-
- # Generate .def file
- contents = [
- "LIBRARY %s INITINSTANCE TERMINSTANCE" % \
- os.path.splitext(os.path.basename(output_filename))[0],
- "DATA MULTIPLE NONSHARED",
- "EXPORTS"]
- for sym in export_symbols:
- contents.append(' "%s"' % sym)
- self.execute(write_file, (def_file, contents),
- "writing %s" % def_file)
-
- # next add options for def-file and to creating import libraries
- # for gcc/ld the def-file is specified as any other object files
- objects.append(def_file)
-
- #end: if ((export_symbols is not None) and
- # (target_desc != self.EXECUTABLE or self.linker_dll == "gcc")):
-
- # who wants symbols and a many times larger output file
- # should explicitly switch the debug mode on
- # otherwise we let dllwrap/ld strip the output file
- # (On my machine: 10KB < stripped_file < ??100KB
- # unstripped_file = stripped_file + XXX KB
- # ( XXX=254 for a typical python extension))
- if not debug:
- extra_preargs.append("-s")
-
- UnixCCompiler.link(self,
- target_desc,
- objects,
- output_filename,
- output_dir,
- libraries,
- library_dirs,
- runtime_library_dirs,
- None, # export_symbols, we do this in our def-file
- debug,
- extra_preargs,
- extra_postargs,
- build_temp,
- target_lang)
-
- # link ()
-
- # -- Miscellaneous methods -----------------------------------------
-
- # override the object_filenames method from CCompiler to
- # support rc and res-files
- def object_filenames (self,
- source_filenames,
- strip_dir=0,
- output_dir=''):
- if output_dir is None: output_dir = ''
- obj_names = []
- for src_name in source_filenames:
- # use normcase to make sure '.rc' is really '.rc' and not '.RC'
- (base, ext) = os.path.splitext (os.path.normcase(src_name))
- if ext not in (self.src_extensions + ['.rc']):
- raise UnknownFileError, \
- "unknown file type '%s' (from '%s')" % \
- (ext, src_name)
- if strip_dir:
- base = os.path.basename (base)
- if ext == '.rc':
- # these need to be compiled to object files
- obj_names.append (os.path.join (output_dir,
- base + self.res_extension))
- else:
- obj_names.append (os.path.join (output_dir,
- base + self.obj_extension))
- return obj_names
-
- # object_filenames ()
-
- # override the find_library_file method from UnixCCompiler
- # to deal with file naming/searching differences
- def find_library_file(self, dirs, lib, debug=0):
- shortlib = '%s.lib' % lib
- longlib = 'lib%s.lib' % lib # this form very rare
-
- # get EMX's default library directory search path
- try:
- emx_dirs = os.environ['LIBRARY_PATH'].split(';')
- except KeyError:
- emx_dirs = []
-
- for dir in dirs + emx_dirs:
- shortlibp = os.path.join(dir, shortlib)
- longlibp = os.path.join(dir, longlib)
- if os.path.exists(shortlibp):
- return shortlibp
- elif os.path.exists(longlibp):
- return longlibp
-
- # Oops, didn't find it in *any* of 'dirs'
- return None
-
-# class EMXCCompiler
-
-
-# Because these compilers aren't configured in Python's pyconfig.h file by
-# default, we should at least warn the user if he is using a unmodified
-# version.
-
-CONFIG_H_OK = "ok"
-CONFIG_H_NOTOK = "not ok"
-CONFIG_H_UNCERTAIN = "uncertain"
-
-def check_config_h():
-
- """Check if the current Python installation (specifically, pyconfig.h)
- appears amenable to building extensions with GCC. Returns a tuple
- (status, details), where 'status' is one of the following constants:
- CONFIG_H_OK
- all is well, go ahead and compile
- CONFIG_H_NOTOK
- doesn't look good
- CONFIG_H_UNCERTAIN
- not sure -- unable to read pyconfig.h
- 'details' is a human-readable string explaining the situation.
-
- Note there are two ways to conclude "OK": either 'sys.version' contains
- the string "GCC" (implying that this Python was built with GCC), or the
- installed "pyconfig.h" contains the string "__GNUC__".
- """
-
- # XXX since this function also checks sys.version, it's not strictly a
- # "pyconfig.h" check -- should probably be renamed...
-
- from distutils import sysconfig
- import string
- # if sys.version contains GCC then python was compiled with
- # GCC, and the pyconfig.h file should be OK
- if string.find(sys.version,"GCC") >= 0:
- return (CONFIG_H_OK, "sys.version mentions 'GCC'")
-
- fn = sysconfig.get_config_h_filename()
- try:
- # It would probably better to read single lines to search.
- # But we do this only once, and it is fast enough
- f = open(fn)
- s = f.read()
- f.close()
-
- except IOError, exc:
- # if we can't read this file, we cannot say it is wrong
- # the compiler will complain later about this file as missing
- return (CONFIG_H_UNCERTAIN,
- "couldn't read '%s': %s" % (fn, exc.strerror))
-
- else:
- # "pyconfig.h" contains an "#ifdef __GNUC__" or something similar
- if string.find(s,"__GNUC__") >= 0:
- return (CONFIG_H_OK, "'%s' mentions '__GNUC__'" % fn)
- else:
- return (CONFIG_H_NOTOK, "'%s' does not mention '__GNUC__'" % fn)
-
-
-def get_versions():
- """ Try to find out the versions of gcc and ld.
- If not possible it returns None for it.
- """
- from distutils.version import StrictVersion
- from distutils.spawn import find_executable
- import re
-
- gcc_exe = find_executable('gcc')
- if gcc_exe:
- out = os.popen(gcc_exe + ' -dumpversion','r')
- out_string = out.read()
- out.close()
- result = re.search('(\d+\.\d+\.\d+)',out_string)
- if result:
- gcc_version = StrictVersion(result.group(1))
- else:
- gcc_version = None
- else:
- gcc_version = None
- # EMX ld has no way of reporting version number, and we use GCC
- # anyway - so we can link OMF DLLs
- ld_version = None
- return (gcc_version, ld_version)
diff --git a/sys/lib/python/distutils/errors.py b/sys/lib/python/distutils/errors.py
deleted file mode 100644
index efe1f78fd..000000000
--- a/sys/lib/python/distutils/errors.py
+++ /dev/null
@@ -1,99 +0,0 @@
-"""distutils.errors
-
-Provides exceptions used by the Distutils modules. Note that Distutils
-modules may raise standard exceptions; in particular, SystemExit is
-usually raised for errors that are obviously the end-user's fault
-(eg. bad command-line arguments).
-
-This module is safe to use in "from ... import *" mode; it only exports
-symbols whose names start with "Distutils" and end with "Error"."""
-
-# This module should be kept compatible with Python 2.1.
-
-__revision__ = "$Id: errors.py 37828 2004-11-10 22:23:15Z loewis $"
-
-class DistutilsError (Exception):
- """The root of all Distutils evil."""
- pass
-
-class DistutilsModuleError (DistutilsError):
- """Unable to load an expected module, or to find an expected class
- within some module (in particular, command modules and classes)."""
- pass
-
-class DistutilsClassError (DistutilsError):
- """Some command class (or possibly distribution class, if anyone
- feels a need to subclass Distribution) is found not to be holding
- up its end of the bargain, ie. implementing some part of the
- "command "interface."""
- pass
-
-class DistutilsGetoptError (DistutilsError):
- """The option table provided to 'fancy_getopt()' is bogus."""
- pass
-
-class DistutilsArgError (DistutilsError):
- """Raised by fancy_getopt in response to getopt.error -- ie. an
- error in the command line usage."""
- pass
-
-class DistutilsFileError (DistutilsError):
- """Any problems in the filesystem: expected file not found, etc.
- Typically this is for problems that we detect before IOError or
- OSError could be raised."""
- pass
-
-class DistutilsOptionError (DistutilsError):
- """Syntactic/semantic errors in command options, such as use of
- mutually conflicting options, or inconsistent options,
- badly-spelled values, etc. No distinction is made between option
- values originating in the setup script, the command line, config
- files, or what-have-you -- but if we *know* something originated in
- the setup script, we'll raise DistutilsSetupError instead."""
- pass
-
-class DistutilsSetupError (DistutilsError):
- """For errors that can be definitely blamed on the setup script,
- such as invalid keyword arguments to 'setup()'."""
- pass
-
-class DistutilsPlatformError (DistutilsError):
- """We don't know how to do something on the current platform (but
- we do know how to do it on some platform) -- eg. trying to compile
- C files on a platform not supported by a CCompiler subclass."""
- pass
-
-class DistutilsExecError (DistutilsError):
- """Any problems executing an external program (such as the C
- compiler, when compiling C files)."""
- pass
-
-class DistutilsInternalError (DistutilsError):
- """Internal inconsistencies or impossibilities (obviously, this
- should never be seen if the code is working!)."""
- pass
-
-class DistutilsTemplateError (DistutilsError):
- """Syntax error in a file list template."""
-
-
-# Exception classes used by the CCompiler implementation classes
-class CCompilerError (Exception):
- """Some compile/link operation failed."""
-
-class PreprocessError (CCompilerError):
- """Failure to preprocess one or more C/C++ files."""
-
-class CompileError (CCompilerError):
- """Failure to compile one or more C/C++ source files."""
-
-class LibError (CCompilerError):
- """Failure to create a static library from one or more C/C++ object
- files."""
-
-class LinkError (CCompilerError):
- """Failure to link one or more C/C++ object files into an executable
- or shared library file."""
-
-class UnknownFileError (CCompilerError):
- """Attempt to process an unknown file type."""
diff --git a/sys/lib/python/distutils/extension.py b/sys/lib/python/distutils/extension.py
deleted file mode 100644
index b66083f55..000000000
--- a/sys/lib/python/distutils/extension.py
+++ /dev/null
@@ -1,246 +0,0 @@
-"""distutils.extension
-
-Provides the Extension class, used to describe C/C++ extension
-modules in setup scripts."""
-
-__revision__ = "$Id: extension.py 37623 2004-10-14 10:02:08Z anthonybaxter $"
-
-import os, string, sys
-from types import *
-
-try:
- import warnings
-except ImportError:
- warnings = None
-
-# This class is really only used by the "build_ext" command, so it might
-# make sense to put it in distutils.command.build_ext. However, that
-# module is already big enough, and I want to make this class a bit more
-# complex to simplify some common cases ("foo" module in "foo.c") and do
-# better error-checking ("foo.c" actually exists).
-#
-# Also, putting this in build_ext.py means every setup script would have to
-# import that large-ish module (indirectly, through distutils.core) in
-# order to do anything.
-
-class Extension:
- """Just a collection of attributes that describes an extension
- module and everything needed to build it (hopefully in a portable
- way, but there are hooks that let you be as unportable as you need).
-
- Instance attributes:
- name : string
- the full name of the extension, including any packages -- ie.
- *not* a filename or pathname, but Python dotted name
- sources : [string]
- list of source filenames, relative to the distribution root
- (where the setup script lives), in Unix form (slash-separated)
- for portability. Source files may be C, C++, SWIG (.i),
- platform-specific resource files, or whatever else is recognized
- by the "build_ext" command as source for a Python extension.
- include_dirs : [string]
- list of directories to search for C/C++ header files (in Unix
- form for portability)
- define_macros : [(name : string, value : string|None)]
- list of macros to define; each macro is defined using a 2-tuple,
- where 'value' is either the string to define it to or None to
- define it without a particular value (equivalent of "#define
- FOO" in source or -DFOO on Unix C compiler command line)
- undef_macros : [string]
- list of macros to undefine explicitly
- library_dirs : [string]
- list of directories to search for C/C++ libraries at link time
- libraries : [string]
- list of library names (not filenames or paths) to link against
- runtime_library_dirs : [string]
- list of directories to search for C/C++ libraries at run time
- (for shared extensions, this is when the extension is loaded)
- extra_objects : [string]
- list of extra files to link with (eg. object files not implied
- by 'sources', static library that must be explicitly specified,
- binary resource files, etc.)
- extra_compile_args : [string]
- any extra platform- and compiler-specific information to use
- when compiling the source files in 'sources'. For platforms and
- compilers where "command line" makes sense, this is typically a
- list of command-line arguments, but for other platforms it could
- be anything.
- extra_link_args : [string]
- any extra platform- and compiler-specific information to use
- when linking object files together to create the extension (or
- to create a new static Python interpreter). Similar
- interpretation as for 'extra_compile_args'.
- export_symbols : [string]
- list of symbols to be exported from a shared extension. Not
- used on all platforms, and not generally necessary for Python
- extensions, which typically export exactly one symbol: "init" +
- extension_name.
- swig_opts : [string]
- any extra options to pass to SWIG if a source file has the .i
- extension.
- depends : [string]
- list of files that the extension depends on
- language : string
- extension language (i.e. "c", "c++", "objc"). Will be detected
- from the source extensions if not provided.
- """
-
- # When adding arguments to this constructor, be sure to update
- # setup_keywords in core.py.
- def __init__ (self, name, sources,
- include_dirs=None,
- define_macros=None,
- undef_macros=None,
- library_dirs=None,
- libraries=None,
- runtime_library_dirs=None,
- extra_objects=None,
- extra_compile_args=None,
- extra_link_args=None,
- export_symbols=None,
- swig_opts = None,
- depends=None,
- language=None,
- **kw # To catch unknown keywords
- ):
- assert type(name) is StringType, "'name' must be a string"
- assert (type(sources) is ListType and
- map(type, sources) == [StringType]*len(sources)), \
- "'sources' must be a list of strings"
-
- self.name = name
- self.sources = sources
- self.include_dirs = include_dirs or []
- self.define_macros = define_macros or []
- self.undef_macros = undef_macros or []
- self.library_dirs = library_dirs or []
- self.libraries = libraries or []
- self.runtime_library_dirs = runtime_library_dirs or []
- self.extra_objects = extra_objects or []
- self.extra_compile_args = extra_compile_args or []
- self.extra_link_args = extra_link_args or []
- self.export_symbols = export_symbols or []
- self.swig_opts = swig_opts or []
- self.depends = depends or []
- self.language = language
-
- # If there are unknown keyword options, warn about them
- if len(kw):
- L = kw.keys() ; L.sort()
- L = map(repr, L)
- msg = "Unknown Extension options: " + string.join(L, ', ')
- if warnings is not None:
- warnings.warn(msg)
- else:
- sys.stderr.write(msg + '\n')
-# class Extension
-
-
-def read_setup_file (filename):
- from distutils.sysconfig import \
- parse_makefile, expand_makefile_vars, _variable_rx
- from distutils.text_file import TextFile
- from distutils.util import split_quoted
-
- # First pass over the file to gather "VAR = VALUE" assignments.
- vars = parse_makefile(filename)
-
- # Second pass to gobble up the real content: lines of the form
- # <module> ... [<sourcefile> ...] [<cpparg> ...] [<library> ...]
- file = TextFile(filename,
- strip_comments=1, skip_blanks=1, join_lines=1,
- lstrip_ws=1, rstrip_ws=1)
- extensions = []
-
- while 1:
- line = file.readline()
- if line is None: # eof
- break
- if _variable_rx.match(line): # VAR=VALUE, handled in first pass
- continue
-
- if line[0] == line[-1] == "*":
- file.warn("'%s' lines not handled yet" % line)
- continue
-
- #print "original line: " + line
- line = expand_makefile_vars(line, vars)
- words = split_quoted(line)
- #print "expanded line: " + line
-
- # NB. this parses a slightly different syntax than the old
- # makesetup script: here, there must be exactly one extension per
- # line, and it must be the first word of the line. I have no idea
- # why the old syntax supported multiple extensions per line, as
- # they all wind up being the same.
-
- module = words[0]
- ext = Extension(module, [])
- append_next_word = None
-
- for word in words[1:]:
- if append_next_word is not None:
- append_next_word.append(word)
- append_next_word = None
- continue
-
- suffix = os.path.splitext(word)[1]
- switch = word[0:2] ; value = word[2:]
-
- if suffix in (".c", ".cc", ".cpp", ".cxx", ".c++", ".m", ".mm"):
- # hmm, should we do something about C vs. C++ sources?
- # or leave it up to the CCompiler implementation to
- # worry about?
- ext.sources.append(word)
- elif switch == "-I":
- ext.include_dirs.append(value)
- elif switch == "-D":
- equals = string.find(value, "=")
- if equals == -1: # bare "-DFOO" -- no value
- ext.define_macros.append((value, None))
- else: # "-DFOO=blah"
- ext.define_macros.append((value[0:equals],
- value[equals+2:]))
- elif switch == "-U":
- ext.undef_macros.append(value)
- elif switch == "-C": # only here 'cause makesetup has it!
- ext.extra_compile_args.append(word)
- elif switch == "-l":
- ext.libraries.append(value)
- elif switch == "-L":
- ext.library_dirs.append(value)
- elif switch == "-R":
- ext.runtime_library_dirs.append(value)
- elif word == "-rpath":
- append_next_word = ext.runtime_library_dirs
- elif word == "-Xlinker":
- append_next_word = ext.extra_link_args
- elif word == "-Xcompiler":
- append_next_word = ext.extra_compile_args
- elif switch == "-u":
- ext.extra_link_args.append(word)
- if not value:
- append_next_word = ext.extra_link_args
- elif suffix in (".a", ".so", ".sl", ".o", ".dylib"):
- # NB. a really faithful emulation of makesetup would
- # append a .o file to extra_objects only if it
- # had a slash in it; otherwise, it would s/.o/.c/
- # and append it to sources. Hmmmm.
- ext.extra_objects.append(word)
- else:
- file.warn("unrecognized argument '%s'" % word)
-
- extensions.append(ext)
-
- #print "module:", module
- #print "source files:", source_files
- #print "cpp args:", cpp_args
- #print "lib args:", library_args
-
- #extensions[module] = { 'sources': source_files,
- # 'cpp_args': cpp_args,
- # 'lib_args': library_args }
-
- return extensions
-
-# read_setup_file ()
diff --git a/sys/lib/python/distutils/fancy_getopt.py b/sys/lib/python/distutils/fancy_getopt.py
deleted file mode 100644
index 9617ed1a5..000000000
--- a/sys/lib/python/distutils/fancy_getopt.py
+++ /dev/null
@@ -1,502 +0,0 @@
-"""distutils.fancy_getopt
-
-Wrapper around the standard getopt module that provides the following
-additional features:
- * short and long options are tied together
- * options have help strings, so fancy_getopt could potentially
- create a complete usage summary
- * options set attributes of a passed-in object
-"""
-
-# This module should be kept compatible with Python 2.1.
-
-__revision__ = "$Id: fancy_getopt.py 37828 2004-11-10 22:23:15Z loewis $"
-
-import sys, string, re
-from types import *
-import getopt
-from distutils.errors import *
-
-# Much like command_re in distutils.core, this is close to but not quite
-# the same as a Python NAME -- except, in the spirit of most GNU
-# utilities, we use '-' in place of '_'. (The spirit of LISP lives on!)
-# The similarities to NAME are again not a coincidence...
-longopt_pat = r'[a-zA-Z](?:[a-zA-Z0-9-]*)'
-longopt_re = re.compile(r'^%s$' % longopt_pat)
-
-# For recognizing "negative alias" options, eg. "quiet=!verbose"
-neg_alias_re = re.compile("^(%s)=!(%s)$" % (longopt_pat, longopt_pat))
-
-# This is used to translate long options to legitimate Python identifiers
-# (for use as attributes of some object).
-longopt_xlate = string.maketrans('-', '_')
-
-class FancyGetopt:
- """Wrapper around the standard 'getopt()' module that provides some
- handy extra functionality:
- * short and long options are tied together
- * options have help strings, and help text can be assembled
- from them
- * options set attributes of a passed-in object
- * boolean options can have "negative aliases" -- eg. if
- --quiet is the "negative alias" of --verbose, then "--quiet"
- on the command line sets 'verbose' to false
- """
-
- def __init__ (self, option_table=None):
-
- # The option table is (currently) a list of tuples. The
- # tuples may have 3 or four values:
- # (long_option, short_option, help_string [, repeatable])
- # if an option takes an argument, its long_option should have '='
- # appended; short_option should just be a single character, no ':'
- # in any case. If a long_option doesn't have a corresponding
- # short_option, short_option should be None. All option tuples
- # must have long options.
- self.option_table = option_table
-
- # 'option_index' maps long option names to entries in the option
- # table (ie. those 3-tuples).
- self.option_index = {}
- if self.option_table:
- self._build_index()
-
- # 'alias' records (duh) alias options; {'foo': 'bar'} means
- # --foo is an alias for --bar
- self.alias = {}
-
- # 'negative_alias' keeps track of options that are the boolean
- # opposite of some other option
- self.negative_alias = {}
-
- # These keep track of the information in the option table. We
- # don't actually populate these structures until we're ready to
- # parse the command-line, since the 'option_table' passed in here
- # isn't necessarily the final word.
- self.short_opts = []
- self.long_opts = []
- self.short2long = {}
- self.attr_name = {}
- self.takes_arg = {}
-
- # And 'option_order' is filled up in 'getopt()'; it records the
- # original order of options (and their values) on the command-line,
- # but expands short options, converts aliases, etc.
- self.option_order = []
-
- # __init__ ()
-
-
- def _build_index (self):
- self.option_index.clear()
- for option in self.option_table:
- self.option_index[option[0]] = option
-
- def set_option_table (self, option_table):
- self.option_table = option_table
- self._build_index()
-
- def add_option (self, long_option, short_option=None, help_string=None):
- if self.option_index.has_key(long_option):
- raise DistutilsGetoptError, \
- "option conflict: already an option '%s'" % long_option
- else:
- option = (long_option, short_option, help_string)
- self.option_table.append(option)
- self.option_index[long_option] = option
-
-
- def has_option (self, long_option):
- """Return true if the option table for this parser has an
- option with long name 'long_option'."""
- return self.option_index.has_key(long_option)
-
- def get_attr_name (self, long_option):
- """Translate long option name 'long_option' to the form it
- has as an attribute of some object: ie., translate hyphens
- to underscores."""
- return string.translate(long_option, longopt_xlate)
-
-
- def _check_alias_dict (self, aliases, what):
- assert type(aliases) is DictionaryType
- for (alias, opt) in aliases.items():
- if not self.option_index.has_key(alias):
- raise DistutilsGetoptError, \
- ("invalid %s '%s': "
- "option '%s' not defined") % (what, alias, alias)
- if not self.option_index.has_key(opt):
- raise DistutilsGetoptError, \
- ("invalid %s '%s': "
- "aliased option '%s' not defined") % (what, alias, opt)
-
- def set_aliases (self, alias):
- """Set the aliases for this option parser."""
- self._check_alias_dict(alias, "alias")
- self.alias = alias
-
- def set_negative_aliases (self, negative_alias):
- """Set the negative aliases for this option parser.
- 'negative_alias' should be a dictionary mapping option names to
- option names, both the key and value must already be defined
- in the option table."""
- self._check_alias_dict(negative_alias, "negative alias")
- self.negative_alias = negative_alias
-
-
- def _grok_option_table (self):
- """Populate the various data structures that keep tabs on the
- option table. Called by 'getopt()' before it can do anything
- worthwhile.
- """
- self.long_opts = []
- self.short_opts = []
- self.short2long.clear()
- self.repeat = {}
-
- for option in self.option_table:
- if len(option) == 3:
- long, short, help = option
- repeat = 0
- elif len(option) == 4:
- long, short, help, repeat = option
- else:
- # the option table is part of the code, so simply
- # assert that it is correct
- raise ValueError, "invalid option tuple: %r" % (option,)
-
- # Type- and value-check the option names
- if type(long) is not StringType or len(long) < 2:
- raise DistutilsGetoptError, \
- ("invalid long option '%s': "
- "must be a string of length >= 2") % long
-
- if (not ((short is None) or
- (type(short) is StringType and len(short) == 1))):
- raise DistutilsGetoptError, \
- ("invalid short option '%s': "
- "must a single character or None") % short
-
- self.repeat[long] = repeat
- self.long_opts.append(long)
-
- if long[-1] == '=': # option takes an argument?
- if short: short = short + ':'
- long = long[0:-1]
- self.takes_arg[long] = 1
- else:
-
- # Is option is a "negative alias" for some other option (eg.
- # "quiet" == "!verbose")?
- alias_to = self.negative_alias.get(long)
- if alias_to is not None:
- if self.takes_arg[alias_to]:
- raise DistutilsGetoptError, \
- ("invalid negative alias '%s': "
- "aliased option '%s' takes a value") % \
- (long, alias_to)
-
- self.long_opts[-1] = long # XXX redundant?!
- self.takes_arg[long] = 0
-
- else:
- self.takes_arg[long] = 0
-
- # If this is an alias option, make sure its "takes arg" flag is
- # the same as the option it's aliased to.
- alias_to = self.alias.get(long)
- if alias_to is not None:
- if self.takes_arg[long] != self.takes_arg[alias_to]:
- raise DistutilsGetoptError, \
- ("invalid alias '%s': inconsistent with "
- "aliased option '%s' (one of them takes a value, "
- "the other doesn't") % (long, alias_to)
-
-
- # Now enforce some bondage on the long option name, so we can
- # later translate it to an attribute name on some object. Have
- # to do this a bit late to make sure we've removed any trailing
- # '='.
- if not longopt_re.match(long):
- raise DistutilsGetoptError, \
- ("invalid long option name '%s' " +
- "(must be letters, numbers, hyphens only") % long
-
- self.attr_name[long] = self.get_attr_name(long)
- if short:
- self.short_opts.append(short)
- self.short2long[short[0]] = long
-
- # for option_table
-
- # _grok_option_table()
-
-
- def getopt (self, args=None, object=None):
- """Parse command-line options in args. Store as attributes on object.
-
- If 'args' is None or not supplied, uses 'sys.argv[1:]'. If
- 'object' is None or not supplied, creates a new OptionDummy
- object, stores option values there, and returns a tuple (args,
- object). If 'object' is supplied, it is modified in place and
- 'getopt()' just returns 'args'; in both cases, the returned
- 'args' is a modified copy of the passed-in 'args' list, which
- is left untouched.
- """
- if args is None:
- args = sys.argv[1:]
- if object is None:
- object = OptionDummy()
- created_object = 1
- else:
- created_object = 0
-
- self._grok_option_table()
-
- short_opts = string.join(self.short_opts)
- try:
- opts, args = getopt.getopt(args, short_opts, self.long_opts)
- except getopt.error, msg:
- raise DistutilsArgError, msg
-
- for opt, val in opts:
- if len(opt) == 2 and opt[0] == '-': # it's a short option
- opt = self.short2long[opt[1]]
- else:
- assert len(opt) > 2 and opt[:2] == '--'
- opt = opt[2:]
-
- alias = self.alias.get(opt)
- if alias:
- opt = alias
-
- if not self.takes_arg[opt]: # boolean option?
- assert val == '', "boolean option can't have value"
- alias = self.negative_alias.get(opt)
- if alias:
- opt = alias
- val = 0
- else:
- val = 1
-
- attr = self.attr_name[opt]
- # The only repeating option at the moment is 'verbose'.
- # It has a negative option -q quiet, which should set verbose = 0.
- if val and self.repeat.get(attr) is not None:
- val = getattr(object, attr, 0) + 1
- setattr(object, attr, val)
- self.option_order.append((opt, val))
-
- # for opts
- if created_object:
- return args, object
- else:
- return args
-
- # getopt()
-
-
- def get_option_order (self):
- """Returns the list of (option, value) tuples processed by the
- previous run of 'getopt()'. Raises RuntimeError if
- 'getopt()' hasn't been called yet.
- """
- if self.option_order is None:
- raise RuntimeError, "'getopt()' hasn't been called yet"
- else:
- return self.option_order
-
-
- def generate_help (self, header=None):
- """Generate help text (a list of strings, one per suggested line of
- output) from the option table for this FancyGetopt object.
- """
- # Blithely assume the option table is good: probably wouldn't call
- # 'generate_help()' unless you've already called 'getopt()'.
-
- # First pass: determine maximum length of long option names
- max_opt = 0
- for option in self.option_table:
- long = option[0]
- short = option[1]
- l = len(long)
- if long[-1] == '=':
- l = l - 1
- if short is not None:
- l = l + 5 # " (-x)" where short == 'x'
- if l > max_opt:
- max_opt = l
-
- opt_width = max_opt + 2 + 2 + 2 # room for indent + dashes + gutter
-
- # Typical help block looks like this:
- # --foo controls foonabulation
- # Help block for longest option looks like this:
- # --flimflam set the flim-flam level
- # and with wrapped text:
- # --flimflam set the flim-flam level (must be between
- # 0 and 100, except on Tuesdays)
- # Options with short names will have the short name shown (but
- # it doesn't contribute to max_opt):
- # --foo (-f) controls foonabulation
- # If adding the short option would make the left column too wide,
- # we push the explanation off to the next line
- # --flimflam (-l)
- # set the flim-flam level
- # Important parameters:
- # - 2 spaces before option block start lines
- # - 2 dashes for each long option name
- # - min. 2 spaces between option and explanation (gutter)
- # - 5 characters (incl. space) for short option name
-
- # Now generate lines of help text. (If 80 columns were good enough
- # for Jesus, then 78 columns are good enough for me!)
- line_width = 78
- text_width = line_width - opt_width
- big_indent = ' ' * opt_width
- if header:
- lines = [header]
- else:
- lines = ['Option summary:']
-
- for option in self.option_table:
- long, short, help = option[:3]
- text = wrap_text(help, text_width)
- if long[-1] == '=':
- long = long[0:-1]
-
- # Case 1: no short option at all (makes life easy)
- if short is None:
- if text:
- lines.append(" --%-*s %s" % (max_opt, long, text[0]))
- else:
- lines.append(" --%-*s " % (max_opt, long))
-
- # Case 2: we have a short option, so we have to include it
- # just after the long option
- else:
- opt_names = "%s (-%s)" % (long, short)
- if text:
- lines.append(" --%-*s %s" %
- (max_opt, opt_names, text[0]))
- else:
- lines.append(" --%-*s" % opt_names)
-
- for l in text[1:]:
- lines.append(big_indent + l)
-
- # for self.option_table
-
- return lines
-
- # generate_help ()
-
- def print_help (self, header=None, file=None):
- if file is None:
- file = sys.stdout
- for line in self.generate_help(header):
- file.write(line + "\n")
-
-# class FancyGetopt
-
-
-def fancy_getopt (options, negative_opt, object, args):
- parser = FancyGetopt(options)
- parser.set_negative_aliases(negative_opt)
- return parser.getopt(args, object)
-
-
-WS_TRANS = string.maketrans(string.whitespace, ' ' * len(string.whitespace))
-
-def wrap_text (text, width):
- """wrap_text(text : string, width : int) -> [string]
-
- Split 'text' into multiple lines of no more than 'width' characters
- each, and return the list of strings that results.
- """
-
- if text is None:
- return []
- if len(text) <= width:
- return [text]
-
- text = string.expandtabs(text)
- text = string.translate(text, WS_TRANS)
- chunks = re.split(r'( +|-+)', text)
- chunks = filter(None, chunks) # ' - ' results in empty strings
- lines = []
-
- while chunks:
-
- cur_line = [] # list of chunks (to-be-joined)
- cur_len = 0 # length of current line
-
- while chunks:
- l = len(chunks[0])
- if cur_len + l <= width: # can squeeze (at least) this chunk in
- cur_line.append(chunks[0])
- del chunks[0]
- cur_len = cur_len + l
- else: # this line is full
- # drop last chunk if all space
- if cur_line and cur_line[-1][0] == ' ':
- del cur_line[-1]
- break
-
- if chunks: # any chunks left to process?
-
- # if the current line is still empty, then we had a single
- # chunk that's too big too fit on a line -- so we break
- # down and break it up at the line width
- if cur_len == 0:
- cur_line.append(chunks[0][0:width])
- chunks[0] = chunks[0][width:]
-
- # all-whitespace chunks at the end of a line can be discarded
- # (and we know from the re.split above that if a chunk has
- # *any* whitespace, it is *all* whitespace)
- if chunks[0][0] == ' ':
- del chunks[0]
-
- # and store this line in the list-of-all-lines -- as a single
- # string, of course!
- lines.append(string.join(cur_line, ''))
-
- # while chunks
-
- return lines
-
-# wrap_text ()
-
-
-def translate_longopt (opt):
- """Convert a long option name to a valid Python identifier by
- changing "-" to "_".
- """
- return string.translate(opt, longopt_xlate)
-
-
-class OptionDummy:
- """Dummy class just used as a place to hold command-line option
- values as instance attributes."""
-
- def __init__ (self, options=[]):
- """Create a new OptionDummy instance. The attributes listed in
- 'options' will be initialized to None."""
- for opt in options:
- setattr(self, opt, None)
-
-# class OptionDummy
-
-
-if __name__ == "__main__":
- text = """\
-Tra-la-la, supercalifragilisticexpialidocious.
-How *do* you spell that odd word, anyways?
-(Someone ask Mary -- she'll know [or she'll
-say, "How should I know?"].)"""
-
- for w in (10, 20, 30, 40):
- print "width: %d" % w
- print string.join(wrap_text(text, w), "\n")
- print
diff --git a/sys/lib/python/distutils/file_util.py b/sys/lib/python/distutils/file_util.py
deleted file mode 100644
index f961804de..000000000
--- a/sys/lib/python/distutils/file_util.py
+++ /dev/null
@@ -1,253 +0,0 @@
-"""distutils.file_util
-
-Utility functions for operating on single files.
-"""
-
-# This module should be kept compatible with Python 2.1.
-
-__revision__ = "$Id: file_util.py 37828 2004-11-10 22:23:15Z loewis $"
-
-import os
-from distutils.errors import DistutilsFileError
-from distutils import log
-
-# for generating verbose output in 'copy_file()'
-_copy_action = { None: 'copying',
- 'hard': 'hard linking',
- 'sym': 'symbolically linking' }
-
-
-def _copy_file_contents (src, dst, buffer_size=16*1024):
- """Copy the file 'src' to 'dst'; both must be filenames. Any error
- opening either file, reading from 'src', or writing to 'dst', raises
- DistutilsFileError. Data is read/written in chunks of 'buffer_size'
- bytes (default 16k). No attempt is made to handle anything apart from
- regular files.
- """
- # Stolen from shutil module in the standard library, but with
- # custom error-handling added.
-
- fsrc = None
- fdst = None
- try:
- try:
- fsrc = open(src, 'rb')
- except os.error, (errno, errstr):
- raise DistutilsFileError, \
- "could not open '%s': %s" % (src, errstr)
-
- if os.path.exists(dst):
- try:
- os.unlink(dst)
- except os.error, (errno, errstr):
- raise DistutilsFileError, \
- "could not delete '%s': %s" % (dst, errstr)
-
- try:
- fdst = open(dst, 'wb')
- except os.error, (errno, errstr):
- raise DistutilsFileError, \
- "could not create '%s': %s" % (dst, errstr)
-
- while 1:
- try:
- buf = fsrc.read(buffer_size)
- except os.error, (errno, errstr):
- raise DistutilsFileError, \
- "could not read from '%s': %s" % (src, errstr)
-
- if not buf:
- break
-
- try:
- fdst.write(buf)
- except os.error, (errno, errstr):
- raise DistutilsFileError, \
- "could not write to '%s': %s" % (dst, errstr)
-
- finally:
- if fdst:
- fdst.close()
- if fsrc:
- fsrc.close()
-
-# _copy_file_contents()
-
-def copy_file (src, dst,
- preserve_mode=1,
- preserve_times=1,
- update=0,
- link=None,
- verbose=0,
- dry_run=0):
-
- """Copy a file 'src' to 'dst'. If 'dst' is a directory, then 'src' is
- copied there with the same name; otherwise, it must be a filename. (If
- the file exists, it will be ruthlessly clobbered.) If 'preserve_mode'
- is true (the default), the file's mode (type and permission bits, or
- whatever is analogous on the current platform) is copied. If
- 'preserve_times' is true (the default), the last-modified and
- last-access times are copied as well. If 'update' is true, 'src' will
- only be copied if 'dst' does not exist, or if 'dst' does exist but is
- older than 'src'.
-
- 'link' allows you to make hard links (os.link) or symbolic links
- (os.symlink) instead of copying: set it to "hard" or "sym"; if it is
- None (the default), files are copied. Don't set 'link' on systems that
- don't support it: 'copy_file()' doesn't check if hard or symbolic
- linking is available.
-
- Under Mac OS, uses the native file copy function in macostools; on
- other systems, uses '_copy_file_contents()' to copy file contents.
-
- Return a tuple (dest_name, copied): 'dest_name' is the actual name of
- the output file, and 'copied' is true if the file was copied (or would
- have been copied, if 'dry_run' true).
- """
- # XXX if the destination file already exists, we clobber it if
- # copying, but blow up if linking. Hmmm. And I don't know what
- # macostools.copyfile() does. Should definitely be consistent, and
- # should probably blow up if destination exists and we would be
- # changing it (ie. it's not already a hard/soft link to src OR
- # (not update) and (src newer than dst).
-
- from distutils.dep_util import newer
- from stat import ST_ATIME, ST_MTIME, ST_MODE, S_IMODE
-
- if not os.path.isfile(src):
- raise DistutilsFileError, \
- "can't copy '%s': doesn't exist or not a regular file" % src
-
- if os.path.isdir(dst):
- dir = dst
- dst = os.path.join(dst, os.path.basename(src))
- else:
- dir = os.path.dirname(dst)
-
- if update and not newer(src, dst):
- log.debug("not copying %s (output up-to-date)", src)
- return dst, 0
-
- try:
- action = _copy_action[link]
- except KeyError:
- raise ValueError, \
- "invalid value '%s' for 'link' argument" % link
- if os.path.basename(dst) == os.path.basename(src):
- log.info("%s %s -> %s", action, src, dir)
- else:
- log.info("%s %s -> %s", action, src, dst)
-
- if dry_run:
- return (dst, 1)
-
- # On Mac OS, use the native file copy routine
- if os.name == 'mac':
- import macostools
- try:
- macostools.copy(src, dst, 0, preserve_times)
- except os.error, exc:
- raise DistutilsFileError, \
- "could not copy '%s' to '%s': %s" % (src, dst, exc[-1])
-
- # If linking (hard or symbolic), use the appropriate system call
- # (Unix only, of course, but that's the caller's responsibility)
- elif link == 'hard':
- if not (os.path.exists(dst) and os.path.samefile(src, dst)):
- os.link(src, dst)
- elif link == 'sym':
- if not (os.path.exists(dst) and os.path.samefile(src, dst)):
- os.symlink(src, dst)
-
- # Otherwise (non-Mac, not linking), copy the file contents and
- # (optionally) copy the times and mode.
- else:
- _copy_file_contents(src, dst)
- if preserve_mode or preserve_times:
- st = os.stat(src)
-
- # According to David Ascher <da@ski.org>, utime() should be done
- # before chmod() (at least under NT).
- if preserve_times:
- os.utime(dst, (st[ST_ATIME], st[ST_MTIME]))
- if preserve_mode:
- os.chmod(dst, S_IMODE(st[ST_MODE]))
-
- return (dst, 1)
-
-# copy_file ()
-
-
-# XXX I suspect this is Unix-specific -- need porting help!
-def move_file (src, dst,
- verbose=0,
- dry_run=0):
-
- """Move a file 'src' to 'dst'. If 'dst' is a directory, the file will
- be moved into it with the same name; otherwise, 'src' is just renamed
- to 'dst'. Return the new full name of the file.
-
- Handles cross-device moves on Unix using 'copy_file()'. What about
- other systems???
- """
- from os.path import exists, isfile, isdir, basename, dirname
- import errno
-
- log.info("moving %s -> %s", src, dst)
-
- if dry_run:
- return dst
-
- if not isfile(src):
- raise DistutilsFileError, \
- "can't move '%s': not a regular file" % src
-
- if isdir(dst):
- dst = os.path.join(dst, basename(src))
- elif exists(dst):
- raise DistutilsFileError, \
- "can't move '%s': destination '%s' already exists" % \
- (src, dst)
-
- if not isdir(dirname(dst)):
- raise DistutilsFileError, \
- "can't move '%s': destination '%s' not a valid path" % \
- (src, dst)
-
- copy_it = 0
- try:
- os.rename(src, dst)
- except os.error, (num, msg):
- if num == errno.EXDEV:
- copy_it = 1
- else:
- raise DistutilsFileError, \
- "couldn't move '%s' to '%s': %s" % (src, dst, msg)
-
- if copy_it:
- copy_file(src, dst)
- try:
- os.unlink(src)
- except os.error, (num, msg):
- try:
- os.unlink(dst)
- except os.error:
- pass
- raise DistutilsFileError, \
- ("couldn't move '%s' to '%s' by copy/delete: " +
- "delete '%s' failed: %s") % \
- (src, dst, src, msg)
-
- return dst
-
-# move_file ()
-
-
-def write_file (filename, contents):
- """Create a file with the specified name and write 'contents' (a
- sequence of strings without line terminators) to it.
- """
- f = open(filename, "w")
- for line in contents:
- f.write(line + "\n")
- f.close()
diff --git a/sys/lib/python/distutils/filelist.py b/sys/lib/python/distutils/filelist.py
deleted file mode 100644
index 04be0b34c..000000000
--- a/sys/lib/python/distutils/filelist.py
+++ /dev/null
@@ -1,355 +0,0 @@
-"""distutils.filelist
-
-Provides the FileList class, used for poking about the filesystem
-and building lists of files.
-"""
-
-# This module should be kept compatible with Python 2.1.
-
-__revision__ = "$Id: filelist.py 37828 2004-11-10 22:23:15Z loewis $"
-
-import os, string, re
-import fnmatch
-from types import *
-from glob import glob
-from distutils.util import convert_path
-from distutils.errors import DistutilsTemplateError, DistutilsInternalError
-from distutils import log
-
-class FileList:
-
- """A list of files built by on exploring the filesystem and filtered by
- applying various patterns to what we find there.
-
- Instance attributes:
- dir
- directory from which files will be taken -- only used if
- 'allfiles' not supplied to constructor
- files
- list of filenames currently being built/filtered/manipulated
- allfiles
- complete list of files under consideration (ie. without any
- filtering applied)
- """
-
- def __init__(self,
- warn=None,
- debug_print=None):
- # ignore argument to FileList, but keep them for backwards
- # compatibility
-
- self.allfiles = None
- self.files = []
-
- def set_allfiles (self, allfiles):
- self.allfiles = allfiles
-
- def findall (self, dir=os.curdir):
- self.allfiles = findall(dir)
-
- def debug_print (self, msg):
- """Print 'msg' to stdout if the global DEBUG (taken from the
- DISTUTILS_DEBUG environment variable) flag is true.
- """
- from distutils.debug import DEBUG
- if DEBUG:
- print msg
-
- # -- List-like methods ---------------------------------------------
-
- def append (self, item):
- self.files.append(item)
-
- def extend (self, items):
- self.files.extend(items)
-
- def sort (self):
- # Not a strict lexical sort!
- sortable_files = map(os.path.split, self.files)
- sortable_files.sort()
- self.files = []
- for sort_tuple in sortable_files:
- self.files.append(apply(os.path.join, sort_tuple))
-
-
- # -- Other miscellaneous utility methods ---------------------------
-
- def remove_duplicates (self):
- # Assumes list has been sorted!
- for i in range(len(self.files) - 1, 0, -1):
- if self.files[i] == self.files[i - 1]:
- del self.files[i]
-
-
- # -- "File template" methods ---------------------------------------
-
- def _parse_template_line (self, line):
- words = string.split(line)
- action = words[0]
-
- patterns = dir = dir_pattern = None
-
- if action in ('include', 'exclude',
- 'global-include', 'global-exclude'):
- if len(words) < 2:
- raise DistutilsTemplateError, \
- "'%s' expects <pattern1> <pattern2> ..." % action
-
- patterns = map(convert_path, words[1:])
-
- elif action in ('recursive-include', 'recursive-exclude'):
- if len(words) < 3:
- raise DistutilsTemplateError, \
- "'%s' expects <dir> <pattern1> <pattern2> ..." % action
-
- dir = convert_path(words[1])
- patterns = map(convert_path, words[2:])
-
- elif action in ('graft', 'prune'):
- if len(words) != 2:
- raise DistutilsTemplateError, \
- "'%s' expects a single <dir_pattern>" % action
-
- dir_pattern = convert_path(words[1])
-
- else:
- raise DistutilsTemplateError, "unknown action '%s'" % action
-
- return (action, patterns, dir, dir_pattern)
-
- # _parse_template_line ()
-
-
- def process_template_line (self, line):
-
- # Parse the line: split it up, make sure the right number of words
- # is there, and return the relevant words. 'action' is always
- # defined: it's the first word of the line. Which of the other
- # three are defined depends on the action; it'll be either
- # patterns, (dir and patterns), or (dir_pattern).
- (action, patterns, dir, dir_pattern) = self._parse_template_line(line)
-
- # OK, now we know that the action is valid and we have the
- # right number of words on the line for that action -- so we
- # can proceed with minimal error-checking.
- if action == 'include':
- self.debug_print("include " + string.join(patterns))
- for pattern in patterns:
- if not self.include_pattern(pattern, anchor=1):
- log.warn("warning: no files found matching '%s'",
- pattern)
-
- elif action == 'exclude':
- self.debug_print("exclude " + string.join(patterns))
- for pattern in patterns:
- if not self.exclude_pattern(pattern, anchor=1):
- log.warn(("warning: no previously-included files "
- "found matching '%s'"), pattern)
-
- elif action == 'global-include':
- self.debug_print("global-include " + string.join(patterns))
- for pattern in patterns:
- if not self.include_pattern(pattern, anchor=0):
- log.warn(("warning: no files found matching '%s' " +
- "anywhere in distribution"), pattern)
-
- elif action == 'global-exclude':
- self.debug_print("global-exclude " + string.join(patterns))
- for pattern in patterns:
- if not self.exclude_pattern(pattern, anchor=0):
- log.warn(("warning: no previously-included files matching "
- "'%s' found anywhere in distribution"),
- pattern)
-
- elif action == 'recursive-include':
- self.debug_print("recursive-include %s %s" %
- (dir, string.join(patterns)))
- for pattern in patterns:
- if not self.include_pattern(pattern, prefix=dir):
- log.warn(("warning: no files found matching '%s' " +
- "under directory '%s'"),
- pattern, dir)
-
- elif action == 'recursive-exclude':
- self.debug_print("recursive-exclude %s %s" %
- (dir, string.join(patterns)))
- for pattern in patterns:
- if not self.exclude_pattern(pattern, prefix=dir):
- log.warn(("warning: no previously-included files matching "
- "'%s' found under directory '%s'"),
- pattern, dir)
-
- elif action == 'graft':
- self.debug_print("graft " + dir_pattern)
- if not self.include_pattern(None, prefix=dir_pattern):
- log.warn("warning: no directories found matching '%s'",
- dir_pattern)
-
- elif action == 'prune':
- self.debug_print("prune " + dir_pattern)
- if not self.exclude_pattern(None, prefix=dir_pattern):
- log.warn(("no previously-included directories found " +
- "matching '%s'"), dir_pattern)
- else:
- raise DistutilsInternalError, \
- "this cannot happen: invalid action '%s'" % action
-
- # process_template_line ()
-
-
- # -- Filtering/selection methods -----------------------------------
-
- def include_pattern (self, pattern,
- anchor=1, prefix=None, is_regex=0):
- """Select strings (presumably filenames) from 'self.files' that
- match 'pattern', a Unix-style wildcard (glob) pattern. Patterns
- are not quite the same as implemented by the 'fnmatch' module: '*'
- and '?' match non-special characters, where "special" is platform-
- dependent: slash on Unix; colon, slash, and backslash on
- DOS/Windows; and colon on Mac OS.
-
- If 'anchor' is true (the default), then the pattern match is more
- stringent: "*.py" will match "foo.py" but not "foo/bar.py". If
- 'anchor' is false, both of these will match.
-
- If 'prefix' is supplied, then only filenames starting with 'prefix'
- (itself a pattern) and ending with 'pattern', with anything in between
- them, will match. 'anchor' is ignored in this case.
-
- If 'is_regex' is true, 'anchor' and 'prefix' are ignored, and
- 'pattern' is assumed to be either a string containing a regex or a
- regex object -- no translation is done, the regex is just compiled
- and used as-is.
-
- Selected strings will be added to self.files.
-
- Return 1 if files are found.
- """
- files_found = 0
- pattern_re = translate_pattern(pattern, anchor, prefix, is_regex)
- self.debug_print("include_pattern: applying regex r'%s'" %
- pattern_re.pattern)
-
- # delayed loading of allfiles list
- if self.allfiles is None:
- self.findall()
-
- for name in self.allfiles:
- if pattern_re.search(name):
- self.debug_print(" adding " + name)
- self.files.append(name)
- files_found = 1
-
- return files_found
-
- # include_pattern ()
-
-
- def exclude_pattern (self, pattern,
- anchor=1, prefix=None, is_regex=0):
- """Remove strings (presumably filenames) from 'files' that match
- 'pattern'. Other parameters are the same as for
- 'include_pattern()', above.
- The list 'self.files' is modified in place.
- Return 1 if files are found.
- """
- files_found = 0
- pattern_re = translate_pattern(pattern, anchor, prefix, is_regex)
- self.debug_print("exclude_pattern: applying regex r'%s'" %
- pattern_re.pattern)
- for i in range(len(self.files)-1, -1, -1):
- if pattern_re.search(self.files[i]):
- self.debug_print(" removing " + self.files[i])
- del self.files[i]
- files_found = 1
-
- return files_found
-
- # exclude_pattern ()
-
-# class FileList
-
-
-# ----------------------------------------------------------------------
-# Utility functions
-
-def findall (dir = os.curdir):
- """Find all files under 'dir' and return the list of full filenames
- (relative to 'dir').
- """
- from stat import ST_MODE, S_ISREG, S_ISDIR, S_ISLNK
-
- list = []
- stack = [dir]
- pop = stack.pop
- push = stack.append
-
- while stack:
- dir = pop()
- names = os.listdir(dir)
-
- for name in names:
- if dir != os.curdir: # avoid the dreaded "./" syndrome
- fullname = os.path.join(dir, name)
- else:
- fullname = name
-
- # Avoid excess stat calls -- just one will do, thank you!
- stat = os.stat(fullname)
- mode = stat[ST_MODE]
- if S_ISREG(mode):
- list.append(fullname)
- elif S_ISDIR(mode) and not S_ISLNK(mode):
- push(fullname)
-
- return list
-
-
-def glob_to_re (pattern):
- """Translate a shell-like glob pattern to a regular expression; return
- a string containing the regex. Differs from 'fnmatch.translate()' in
- that '*' does not match "special characters" (which are
- platform-specific).
- """
- pattern_re = fnmatch.translate(pattern)
-
- # '?' and '*' in the glob pattern become '.' and '.*' in the RE, which
- # IMHO is wrong -- '?' and '*' aren't supposed to match slash in Unix,
- # and by extension they shouldn't match such "special characters" under
- # any OS. So change all non-escaped dots in the RE to match any
- # character except the special characters.
- # XXX currently the "special characters" are just slash -- i.e. this is
- # Unix-only.
- pattern_re = re.sub(r'(^|[^\\])\.', r'\1[^/]', pattern_re)
- return pattern_re
-
-# glob_to_re ()
-
-
-def translate_pattern (pattern, anchor=1, prefix=None, is_regex=0):
- """Translate a shell-like wildcard pattern to a compiled regular
- expression. Return the compiled regex. If 'is_regex' true,
- then 'pattern' is directly compiled to a regex (if it's a string)
- or just returned as-is (assumes it's a regex object).
- """
- if is_regex:
- if type(pattern) is StringType:
- return re.compile(pattern)
- else:
- return pattern
-
- if pattern:
- pattern_re = glob_to_re(pattern)
- else:
- pattern_re = ''
-
- if prefix is not None:
- prefix_re = (glob_to_re(prefix))[0:-1] # ditch trailing $
- pattern_re = "^" + os.path.join(prefix_re, ".*" + pattern_re)
- else: # no prefix -- respect anchor flag
- if anchor:
- pattern_re = "^" + pattern_re
-
- return re.compile(pattern_re)
-
-# translate_pattern ()
diff --git a/sys/lib/python/distutils/log.py b/sys/lib/python/distutils/log.py
deleted file mode 100644
index 95d4c1c5a..000000000
--- a/sys/lib/python/distutils/log.py
+++ /dev/null
@@ -1,69 +0,0 @@
-"""A simple log mechanism styled after PEP 282."""
-
-# This module should be kept compatible with Python 2.1.
-
-# The class here is styled after PEP 282 so that it could later be
-# replaced with a standard Python logging implementation.
-
-DEBUG = 1
-INFO = 2
-WARN = 3
-ERROR = 4
-FATAL = 5
-
-import sys
-
-class Log:
-
- def __init__(self, threshold=WARN):
- self.threshold = threshold
-
- def _log(self, level, msg, args):
- if level >= self.threshold:
- if not args:
- # msg may contain a '%'. If args is empty,
- # don't even try to string-format
- print msg
- else:
- print msg % args
- sys.stdout.flush()
-
- def log(self, level, msg, *args):
- self._log(level, msg, args)
-
- def debug(self, msg, *args):
- self._log(DEBUG, msg, args)
-
- def info(self, msg, *args):
- self._log(INFO, msg, args)
-
- def warn(self, msg, *args):
- self._log(WARN, msg, args)
-
- def error(self, msg, *args):
- self._log(ERROR, msg, args)
-
- def fatal(self, msg, *args):
- self._log(FATAL, msg, args)
-
-_global_log = Log()
-log = _global_log.log
-debug = _global_log.debug
-info = _global_log.info
-warn = _global_log.warn
-error = _global_log.error
-fatal = _global_log.fatal
-
-def set_threshold(level):
- # return the old threshold for use from tests
- old = _global_log.threshold
- _global_log.threshold = level
- return old
-
-def set_verbosity(v):
- if v <= 0:
- set_threshold(WARN)
- elif v == 1:
- set_threshold(INFO)
- elif v >= 2:
- set_threshold(DEBUG)
diff --git a/sys/lib/python/distutils/msvccompiler.py b/sys/lib/python/distutils/msvccompiler.py
deleted file mode 100644
index 631f054eb..000000000
--- a/sys/lib/python/distutils/msvccompiler.py
+++ /dev/null
@@ -1,652 +0,0 @@
-"""distutils.msvccompiler
-
-Contains MSVCCompiler, an implementation of the abstract CCompiler class
-for the Microsoft Visual Studio.
-"""
-
-# Written by Perry Stoll
-# hacked by Robin Becker and Thomas Heller to do a better job of
-# finding DevStudio (through the registry)
-
-# This module should be kept compatible with Python 2.1.
-
-__revision__ = "$Id: msvccompiler.py 54645 2007-04-01 18:29:47Z neal.norwitz $"
-
-import sys, os, string
-from distutils.errors import \
- DistutilsExecError, DistutilsPlatformError, \
- CompileError, LibError, LinkError
-from distutils.ccompiler import \
- CCompiler, gen_preprocess_options, gen_lib_options
-from distutils import log
-
-_can_read_reg = 0
-try:
- import _winreg
-
- _can_read_reg = 1
- hkey_mod = _winreg
-
- RegOpenKeyEx = _winreg.OpenKeyEx
- RegEnumKey = _winreg.EnumKey
- RegEnumValue = _winreg.EnumValue
- RegError = _winreg.error
-
-except ImportError:
- try:
- import win32api
- import win32con
- _can_read_reg = 1
- hkey_mod = win32con
-
- RegOpenKeyEx = win32api.RegOpenKeyEx
- RegEnumKey = win32api.RegEnumKey
- RegEnumValue = win32api.RegEnumValue
- RegError = win32api.error
-
- except ImportError:
- log.info("Warning: Can't read registry to find the "
- "necessary compiler setting\n"
- "Make sure that Python modules _winreg, "
- "win32api or win32con are installed.")
- pass
-
-if _can_read_reg:
- HKEYS = (hkey_mod.HKEY_USERS,
- hkey_mod.HKEY_CURRENT_USER,
- hkey_mod.HKEY_LOCAL_MACHINE,
- hkey_mod.HKEY_CLASSES_ROOT)
-
-def read_keys(base, key):
- """Return list of registry keys."""
-
- try:
- handle = RegOpenKeyEx(base, key)
- except RegError:
- return None
- L = []
- i = 0
- while 1:
- try:
- k = RegEnumKey(handle, i)
- except RegError:
- break
- L.append(k)
- i = i + 1
- return L
-
-def read_values(base, key):
- """Return dict of registry keys and values.
-
- All names are converted to lowercase.
- """
- try:
- handle = RegOpenKeyEx(base, key)
- except RegError:
- return None
- d = {}
- i = 0
- while 1:
- try:
- name, value, type = RegEnumValue(handle, i)
- except RegError:
- break
- name = name.lower()
- d[convert_mbcs(name)] = convert_mbcs(value)
- i = i + 1
- return d
-
-def convert_mbcs(s):
- enc = getattr(s, "encode", None)
- if enc is not None:
- try:
- s = enc("mbcs")
- except UnicodeError:
- pass
- return s
-
-class MacroExpander:
-
- def __init__(self, version):
- self.macros = {}
- self.load_macros(version)
-
- def set_macro(self, macro, path, key):
- for base in HKEYS:
- d = read_values(base, path)
- if d:
- self.macros["$(%s)" % macro] = d[key]
- break
-
- def load_macros(self, version):
- vsbase = r"Software\Microsoft\VisualStudio\%0.1f" % version
- self.set_macro("VCInstallDir", vsbase + r"\Setup\VC", "productdir")
- self.set_macro("VSInstallDir", vsbase + r"\Setup\VS", "productdir")
- net = r"Software\Microsoft\.NETFramework"
- self.set_macro("FrameworkDir", net, "installroot")
- try:
- if version > 7.0:
- self.set_macro("FrameworkSDKDir", net, "sdkinstallrootv1.1")
- else:
- self.set_macro("FrameworkSDKDir", net, "sdkinstallroot")
- except KeyError, exc: #
- raise DistutilsPlatformError, \
- ("""Python was built with Visual Studio 2003;
-extensions must be built with a compiler than can generate compatible binaries.
-Visual Studio 2003 was not found on this system. If you have Cygwin installed,
-you can try compiling with MingW32, by passing "-c mingw32" to setup.py.""")
-
- p = r"Software\Microsoft\NET Framework Setup\Product"
- for base in HKEYS:
- try:
- h = RegOpenKeyEx(base, p)
- except RegError:
- continue
- key = RegEnumKey(h, 0)
- d = read_values(base, r"%s\%s" % (p, key))
- self.macros["$(FrameworkVersion)"] = d["version"]
-
- def sub(self, s):
- for k, v in self.macros.items():
- s = string.replace(s, k, v)
- return s
-
-def get_build_version():
- """Return the version of MSVC that was used to build Python.
-
- For Python 2.3 and up, the version number is included in
- sys.version. For earlier versions, assume the compiler is MSVC 6.
- """
-
- prefix = "MSC v."
- i = string.find(sys.version, prefix)
- if i == -1:
- return 6
- i = i + len(prefix)
- s, rest = sys.version[i:].split(" ", 1)
- majorVersion = int(s[:-2]) - 6
- minorVersion = int(s[2:3]) / 10.0
- # I don't think paths are affected by minor version in version 6
- if majorVersion == 6:
- minorVersion = 0
- if majorVersion >= 6:
- return majorVersion + minorVersion
- # else we don't know what version of the compiler this is
- return None
-
-def get_build_architecture():
- """Return the processor architecture.
-
- Possible results are "Intel", "Itanium", or "AMD64".
- """
-
- prefix = " bit ("
- i = string.find(sys.version, prefix)
- if i == -1:
- return "Intel"
- j = string.find(sys.version, ")", i)
- return sys.version[i+len(prefix):j]
-
-def normalize_and_reduce_paths(paths):
- """Return a list of normalized paths with duplicates removed.
-
- The current order of paths is maintained.
- """
- # Paths are normalized so things like: /a and /a/ aren't both preserved.
- reduced_paths = []
- for p in paths:
- np = os.path.normpath(p)
- # XXX(nnorwitz): O(n**2), if reduced_paths gets long perhaps use a set.
- if np not in reduced_paths:
- reduced_paths.append(np)
- return reduced_paths
-
-
-class MSVCCompiler (CCompiler) :
- """Concrete class that implements an interface to Microsoft Visual C++,
- as defined by the CCompiler abstract class."""
-
- compiler_type = 'msvc'
-
- # Just set this so CCompiler's constructor doesn't barf. We currently
- # don't use the 'set_executables()' bureaucracy provided by CCompiler,
- # as it really isn't necessary for this sort of single-compiler class.
- # Would be nice to have a consistent interface with UnixCCompiler,
- # though, so it's worth thinking about.
- executables = {}
-
- # Private class data (need to distinguish C from C++ source for compiler)
- _c_extensions = ['.c']
- _cpp_extensions = ['.cc', '.cpp', '.cxx']
- _rc_extensions = ['.rc']
- _mc_extensions = ['.mc']
-
- # Needed for the filename generation methods provided by the
- # base class, CCompiler.
- src_extensions = (_c_extensions + _cpp_extensions +
- _rc_extensions + _mc_extensions)
- res_extension = '.res'
- obj_extension = '.obj'
- static_lib_extension = '.lib'
- shared_lib_extension = '.dll'
- static_lib_format = shared_lib_format = '%s%s'
- exe_extension = '.exe'
-
- def __init__ (self, verbose=0, dry_run=0, force=0):
- CCompiler.__init__ (self, verbose, dry_run, force)
- self.__version = get_build_version()
- self.__arch = get_build_architecture()
- if self.__arch == "Intel":
- # x86
- if self.__version >= 7:
- self.__root = r"Software\Microsoft\VisualStudio"
- self.__macros = MacroExpander(self.__version)
- else:
- self.__root = r"Software\Microsoft\Devstudio"
- self.__product = "Visual Studio version %s" % self.__version
- else:
- # Win64. Assume this was built with the platform SDK
- self.__product = "Microsoft SDK compiler %s" % (self.__version + 6)
-
- self.initialized = False
-
- def initialize(self):
- self.__paths = []
- if os.environ.has_key("DISTUTILS_USE_SDK") and os.environ.has_key("MSSdk") and self.find_exe("cl.exe"):
- # Assume that the SDK set up everything alright; don't try to be
- # smarter
- self.cc = "cl.exe"
- self.linker = "link.exe"
- self.lib = "lib.exe"
- self.rc = "rc.exe"
- self.mc = "mc.exe"
- else:
- self.__paths = self.get_msvc_paths("path")
-
- if len (self.__paths) == 0:
- raise DistutilsPlatformError, \
- ("Python was built with %s, "
- "and extensions need to be built with the same "
- "version of the compiler, but it isn't installed." % self.__product)
-
- self.cc = self.find_exe("cl.exe")
- self.linker = self.find_exe("link.exe")
- self.lib = self.find_exe("lib.exe")
- self.rc = self.find_exe("rc.exe") # resource compiler
- self.mc = self.find_exe("mc.exe") # message compiler
- self.set_path_env_var('lib')
- self.set_path_env_var('include')
-
- # extend the MSVC path with the current path
- try:
- for p in string.split(os.environ['path'], ';'):
- self.__paths.append(p)
- except KeyError:
- pass
- self.__paths = normalize_and_reduce_paths(self.__paths)
- os.environ['path'] = string.join(self.__paths, ';')
-
- self.preprocess_options = None
- if self.__arch == "Intel":
- self.compile_options = [ '/nologo', '/Ox', '/MD', '/W3', '/GX' ,
- '/DNDEBUG']
- self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3', '/GX',
- '/Z7', '/D_DEBUG']
- else:
- # Win64
- self.compile_options = [ '/nologo', '/Ox', '/MD', '/W3', '/GS-' ,
- '/DNDEBUG']
- self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3', '/GS-',
- '/Z7', '/D_DEBUG']
-
- self.ldflags_shared = ['/DLL', '/nologo', '/INCREMENTAL:NO']
- if self.__version >= 7:
- self.ldflags_shared_debug = [
- '/DLL', '/nologo', '/INCREMENTAL:no', '/DEBUG'
- ]
- else:
- self.ldflags_shared_debug = [
- '/DLL', '/nologo', '/INCREMENTAL:no', '/pdb:None', '/DEBUG'
- ]
- self.ldflags_static = [ '/nologo']
-
- self.initialized = True
-
- # -- Worker methods ------------------------------------------------
-
- def object_filenames (self,
- source_filenames,
- strip_dir=0,
- output_dir=''):
- # Copied from ccompiler.py, extended to return .res as 'object'-file
- # for .rc input file
- if output_dir is None: output_dir = ''
- obj_names = []
- for src_name in source_filenames:
- (base, ext) = os.path.splitext (src_name)
- base = os.path.splitdrive(base)[1] # Chop off the drive
- base = base[os.path.isabs(base):] # If abs, chop off leading /
- if ext not in self.src_extensions:
- # Better to raise an exception instead of silently continuing
- # and later complain about sources and targets having
- # different lengths
- raise CompileError ("Don't know how to compile %s" % src_name)
- if strip_dir:
- base = os.path.basename (base)
- if ext in self._rc_extensions:
- obj_names.append (os.path.join (output_dir,
- base + self.res_extension))
- elif ext in self._mc_extensions:
- obj_names.append (os.path.join (output_dir,
- base + self.res_extension))
- else:
- obj_names.append (os.path.join (output_dir,
- base + self.obj_extension))
- return obj_names
-
- # object_filenames ()
-
-
- def compile(self, sources,
- output_dir=None, macros=None, include_dirs=None, debug=0,
- extra_preargs=None, extra_postargs=None, depends=None):
-
- if not self.initialized: self.initialize()
- macros, objects, extra_postargs, pp_opts, build = \
- self._setup_compile(output_dir, macros, include_dirs, sources,
- depends, extra_postargs)
-
- compile_opts = extra_preargs or []
- compile_opts.append ('/c')
- if debug:
- compile_opts.extend(self.compile_options_debug)
- else:
- compile_opts.extend(self.compile_options)
-
- for obj in objects:
- try:
- src, ext = build[obj]
- except KeyError:
- continue
- if debug:
- # pass the full pathname to MSVC in debug mode,
- # this allows the debugger to find the source file
- # without asking the user to browse for it
- src = os.path.abspath(src)
-
- if ext in self._c_extensions:
- input_opt = "/Tc" + src
- elif ext in self._cpp_extensions:
- input_opt = "/Tp" + src
- elif ext in self._rc_extensions:
- # compile .RC to .RES file
- input_opt = src
- output_opt = "/fo" + obj
- try:
- self.spawn ([self.rc] + pp_opts +
- [output_opt] + [input_opt])
- except DistutilsExecError, msg:
- raise CompileError, msg
- continue
- elif ext in self._mc_extensions:
-
- # Compile .MC to .RC file to .RES file.
- # * '-h dir' specifies the directory for the
- # generated include file
- # * '-r dir' specifies the target directory of the
- # generated RC file and the binary message resource
- # it includes
- #
- # For now (since there are no options to change this),
- # we use the source-directory for the include file and
- # the build directory for the RC file and message
- # resources. This works at least for win32all.
-
- h_dir = os.path.dirname (src)
- rc_dir = os.path.dirname (obj)
- try:
- # first compile .MC to .RC and .H file
- self.spawn ([self.mc] +
- ['-h', h_dir, '-r', rc_dir] + [src])
- base, _ = os.path.splitext (os.path.basename (src))
- rc_file = os.path.join (rc_dir, base + '.rc')
- # then compile .RC to .RES file
- self.spawn ([self.rc] +
- ["/fo" + obj] + [rc_file])
-
- except DistutilsExecError, msg:
- raise CompileError, msg
- continue
- else:
- # how to handle this file?
- raise CompileError (
- "Don't know how to compile %s to %s" % \
- (src, obj))
-
- output_opt = "/Fo" + obj
- try:
- self.spawn ([self.cc] + compile_opts + pp_opts +
- [input_opt, output_opt] +
- extra_postargs)
- except DistutilsExecError, msg:
- raise CompileError, msg
-
- return objects
-
- # compile ()
-
-
- def create_static_lib (self,
- objects,
- output_libname,
- output_dir=None,
- debug=0,
- target_lang=None):
-
- if not self.initialized: self.initialize()
- (objects, output_dir) = self._fix_object_args (objects, output_dir)
- output_filename = \
- self.library_filename (output_libname, output_dir=output_dir)
-
- if self._need_link (objects, output_filename):
- lib_args = objects + ['/OUT:' + output_filename]
- if debug:
- pass # XXX what goes here?
- try:
- self.spawn ([self.lib] + lib_args)
- except DistutilsExecError, msg:
- raise LibError, msg
-
- else:
- log.debug("skipping %s (up-to-date)", output_filename)
-
- # create_static_lib ()
-
- def link (self,
- target_desc,
- objects,
- output_filename,
- output_dir=None,
- libraries=None,
- library_dirs=None,
- runtime_library_dirs=None,
- export_symbols=None,
- debug=0,
- extra_preargs=None,
- extra_postargs=None,
- build_temp=None,
- target_lang=None):
-
- if not self.initialized: self.initialize()
- (objects, output_dir) = self._fix_object_args (objects, output_dir)
- (libraries, library_dirs, runtime_library_dirs) = \
- self._fix_lib_args (libraries, library_dirs, runtime_library_dirs)
-
- if runtime_library_dirs:
- self.warn ("I don't know what to do with 'runtime_library_dirs': "
- + str (runtime_library_dirs))
-
- lib_opts = gen_lib_options (self,
- library_dirs, runtime_library_dirs,
- libraries)
- if output_dir is not None:
- output_filename = os.path.join (output_dir, output_filename)
-
- if self._need_link (objects, output_filename):
-
- if target_desc == CCompiler.EXECUTABLE:
- if debug:
- ldflags = self.ldflags_shared_debug[1:]
- else:
- ldflags = self.ldflags_shared[1:]
- else:
- if debug:
- ldflags = self.ldflags_shared_debug
- else:
- ldflags = self.ldflags_shared
-
- export_opts = []
- for sym in (export_symbols or []):
- export_opts.append("/EXPORT:" + sym)
-
- ld_args = (ldflags + lib_opts + export_opts +
- objects + ['/OUT:' + output_filename])
-
- # The MSVC linker generates .lib and .exp files, which cannot be
- # suppressed by any linker switches. The .lib files may even be
- # needed! Make sure they are generated in the temporary build
- # directory. Since they have different names for debug and release
- # builds, they can go into the same directory.
- if export_symbols is not None:
- (dll_name, dll_ext) = os.path.splitext(
- os.path.basename(output_filename))
- implib_file = os.path.join(
- os.path.dirname(objects[0]),
- self.library_filename(dll_name))
- ld_args.append ('/IMPLIB:' + implib_file)
-
- if extra_preargs:
- ld_args[:0] = extra_preargs
- if extra_postargs:
- ld_args.extend(extra_postargs)
-
- self.mkpath (os.path.dirname (output_filename))
- try:
- self.spawn ([self.linker] + ld_args)
- except DistutilsExecError, msg:
- raise LinkError, msg
-
- else:
- log.debug("skipping %s (up-to-date)", output_filename)
-
- # link ()
-
-
- # -- Miscellaneous methods -----------------------------------------
- # These are all used by the 'gen_lib_options() function, in
- # ccompiler.py.
-
- def library_dir_option (self, dir):
- return "/LIBPATH:" + dir
-
- def runtime_library_dir_option (self, dir):
- raise DistutilsPlatformError, \
- "don't know how to set runtime library search path for MSVC++"
-
- def library_option (self, lib):
- return self.library_filename (lib)
-
-
- def find_library_file (self, dirs, lib, debug=0):
- # Prefer a debugging library if found (and requested), but deal
- # with it if we don't have one.
- if debug:
- try_names = [lib + "_d", lib]
- else:
- try_names = [lib]
- for dir in dirs:
- for name in try_names:
- libfile = os.path.join(dir, self.library_filename (name))
- if os.path.exists(libfile):
- return libfile
- else:
- # Oops, didn't find it in *any* of 'dirs'
- return None
-
- # find_library_file ()
-
- # Helper methods for using the MSVC registry settings
-
- def find_exe(self, exe):
- """Return path to an MSVC executable program.
-
- Tries to find the program in several places: first, one of the
- MSVC program search paths from the registry; next, the directories
- in the PATH environment variable. If any of those work, return an
- absolute path that is known to exist. If none of them work, just
- return the original program name, 'exe'.
- """
-
- for p in self.__paths:
- fn = os.path.join(os.path.abspath(p), exe)
- if os.path.isfile(fn):
- return fn
-
- # didn't find it; try existing path
- for p in string.split(os.environ['Path'],';'):
- fn = os.path.join(os.path.abspath(p),exe)
- if os.path.isfile(fn):
- return fn
-
- return exe
-
- def get_msvc_paths(self, path, platform='x86'):
- """Get a list of devstudio directories (include, lib or path).
-
- Return a list of strings. The list will be empty if unable to
- access the registry or appropriate registry keys not found.
- """
-
- if not _can_read_reg:
- return []
-
- path = path + " dirs"
- if self.__version >= 7:
- key = (r"%s\%0.1f\VC\VC_OBJECTS_PLATFORM_INFO\Win32\Directories"
- % (self.__root, self.__version))
- else:
- key = (r"%s\6.0\Build System\Components\Platforms"
- r"\Win32 (%s)\Directories" % (self.__root, platform))
-
- for base in HKEYS:
- d = read_values(base, key)
- if d:
- if self.__version >= 7:
- return string.split(self.__macros.sub(d[path]), ";")
- else:
- return string.split(d[path], ";")
- # MSVC 6 seems to create the registry entries we need only when
- # the GUI is run.
- if self.__version == 6:
- for base in HKEYS:
- if read_values(base, r"%s\6.0" % self.__root) is not None:
- self.warn("It seems you have Visual Studio 6 installed, "
- "but the expected registry settings are not present.\n"
- "You must at least run the Visual Studio GUI once "
- "so that these entries are created.")
- break
- return []
-
- def set_path_env_var(self, name):
- """Set environment variable 'name' to an MSVC path type value.
-
- This is equivalent to a SET command prior to execution of spawned
- commands.
- """
-
- if name == "lib":
- p = self.get_msvc_paths("library")
- else:
- p = self.get_msvc_paths(name)
- if p:
- os.environ[name] = string.join(p, ';')
diff --git a/sys/lib/python/distutils/mwerkscompiler.py b/sys/lib/python/distutils/mwerkscompiler.py
deleted file mode 100644
index aa5b476a1..000000000
--- a/sys/lib/python/distutils/mwerkscompiler.py
+++ /dev/null
@@ -1,248 +0,0 @@
-"""distutils.mwerkscompiler
-
-Contains MWerksCompiler, an implementation of the abstract CCompiler class
-for MetroWerks CodeWarrior on the Macintosh. Needs work to support CW on
-Windows."""
-
-# This module should be kept compatible with Python 2.1.
-
-__revision__ = "$Id: mwerkscompiler.py 37828 2004-11-10 22:23:15Z loewis $"
-
-import sys, os, string
-from types import *
-from distutils.errors import \
- DistutilsExecError, DistutilsPlatformError, \
- CompileError, LibError, LinkError
-from distutils.ccompiler import \
- CCompiler, gen_preprocess_options, gen_lib_options
-import distutils.util
-import distutils.dir_util
-from distutils import log
-import mkcwproject
-
-class MWerksCompiler (CCompiler) :
- """Concrete class that implements an interface to MetroWerks CodeWarrior,
- as defined by the CCompiler abstract class."""
-
- compiler_type = 'mwerks'
-
- # Just set this so CCompiler's constructor doesn't barf. We currently
- # don't use the 'set_executables()' bureaucracy provided by CCompiler,
- # as it really isn't necessary for this sort of single-compiler class.
- # Would be nice to have a consistent interface with UnixCCompiler,
- # though, so it's worth thinking about.
- executables = {}
-
- # Private class data (need to distinguish C from C++ source for compiler)
- _c_extensions = ['.c']
- _cpp_extensions = ['.cc', '.cpp', '.cxx']
- _rc_extensions = ['.r']
- _exp_extension = '.exp'
-
- # Needed for the filename generation methods provided by the
- # base class, CCompiler.
- src_extensions = (_c_extensions + _cpp_extensions +
- _rc_extensions)
- res_extension = '.rsrc'
- obj_extension = '.obj' # Not used, really
- static_lib_extension = '.lib'
- shared_lib_extension = '.slb'
- static_lib_format = shared_lib_format = '%s%s'
- exe_extension = ''
-
-
- def __init__ (self,
- verbose=0,
- dry_run=0,
- force=0):
-
- CCompiler.__init__ (self, verbose, dry_run, force)
-
-
- def compile (self,
- sources,
- output_dir=None,
- macros=None,
- include_dirs=None,
- debug=0,
- extra_preargs=None,
- extra_postargs=None,
- depends=None):
- (output_dir, macros, include_dirs) = \
- self._fix_compile_args (output_dir, macros, include_dirs)
- self.__sources = sources
- self.__macros = macros
- self.__include_dirs = include_dirs
- # Don't need extra_preargs and extra_postargs for CW
- return []
-
- def link (self,
- target_desc,
- objects,
- output_filename,
- output_dir=None,
- libraries=None,
- library_dirs=None,
- runtime_library_dirs=None,
- export_symbols=None,
- debug=0,
- extra_preargs=None,
- extra_postargs=None,
- build_temp=None,
- target_lang=None):
- # First fixup.
- (objects, output_dir) = self._fix_object_args (objects, output_dir)
- (libraries, library_dirs, runtime_library_dirs) = \
- self._fix_lib_args (libraries, library_dirs, runtime_library_dirs)
-
- # First examine a couple of options for things that aren't implemented yet
- if not target_desc in (self.SHARED_LIBRARY, self.SHARED_OBJECT):
- raise DistutilsPlatformError, 'Can only make SHARED_LIBRARY or SHARED_OBJECT targets on the Mac'
- if runtime_library_dirs:
- raise DistutilsPlatformError, 'Runtime library dirs not implemented yet'
- if extra_preargs or extra_postargs:
- raise DistutilsPlatformError, 'Runtime library dirs not implemented yet'
- if len(export_symbols) != 1:
- raise DistutilsPlatformError, 'Need exactly one export symbol'
- # Next there are various things for which we need absolute pathnames.
- # This is because we (usually) create the project in a subdirectory of
- # where we are now, and keeping the paths relative is too much work right
- # now.
- sources = map(self._filename_to_abs, self.__sources)
- include_dirs = map(self._filename_to_abs, self.__include_dirs)
- if objects:
- objects = map(self._filename_to_abs, objects)
- else:
- objects = []
- if build_temp:
- build_temp = self._filename_to_abs(build_temp)
- else:
- build_temp = os.curdir()
- if output_dir:
- output_filename = os.path.join(output_dir, output_filename)
- # The output filename needs special handling: splitting it into dir and
- # filename part. Actually I'm not sure this is really needed, but it
- # can't hurt.
- output_filename = self._filename_to_abs(output_filename)
- output_dir, output_filename = os.path.split(output_filename)
- # Now we need the short names of a couple of things for putting them
- # into the project.
- if output_filename[-8:] == '.ppc.slb':
- basename = output_filename[:-8]
- elif output_filename[-11:] == '.carbon.slb':
- basename = output_filename[:-11]
- else:
- basename = os.path.strip(output_filename)[0]
- projectname = basename + '.mcp'
- targetname = basename
- xmlname = basename + '.xml'
- exportname = basename + '.mcp.exp'
- prefixname = 'mwerks_%s_config.h'%basename
- # Create the directories we need
- distutils.dir_util.mkpath(build_temp, dry_run=self.dry_run)
- distutils.dir_util.mkpath(output_dir, dry_run=self.dry_run)
- # And on to filling in the parameters for the project builder
- settings = {}
- settings['mac_exportname'] = exportname
- settings['mac_outputdir'] = output_dir
- settings['mac_dllname'] = output_filename
- settings['mac_targetname'] = targetname
- settings['sysprefix'] = sys.prefix
- settings['mac_sysprefixtype'] = 'Absolute'
- sourcefilenames = []
- sourcefiledirs = []
- for filename in sources + objects:
- dirname, filename = os.path.split(filename)
- sourcefilenames.append(filename)
- if not dirname in sourcefiledirs:
- sourcefiledirs.append(dirname)
- settings['sources'] = sourcefilenames
- settings['libraries'] = libraries
- settings['extrasearchdirs'] = sourcefiledirs + include_dirs + library_dirs
- if self.dry_run:
- print 'CALLING LINKER IN', os.getcwd()
- for key, value in settings.items():
- print '%20.20s %s'%(key, value)
- return
- # Build the export file
- exportfilename = os.path.join(build_temp, exportname)
- log.debug("\tCreate export file %s", exportfilename)
- fp = open(exportfilename, 'w')
- fp.write('%s\n'%export_symbols[0])
- fp.close()
- # Generate the prefix file, if needed, and put it in the settings
- if self.__macros:
- prefixfilename = os.path.join(os.getcwd(), os.path.join(build_temp, prefixname))
- fp = open(prefixfilename, 'w')
- fp.write('#include "mwerks_shcarbon_config.h"\n')
- for name, value in self.__macros:
- if value is None:
- fp.write('#define %s\n'%name)
- else:
- fp.write('#define %s %s\n'%(name, value))
- fp.close()
- settings['prefixname'] = prefixname
-
- # Build the XML file. We need the full pathname (only lateron, really)
- # because we pass this pathname to CodeWarrior in an AppleEvent, and CW
- # doesn't have a clue about our working directory.
- xmlfilename = os.path.join(os.getcwd(), os.path.join(build_temp, xmlname))
- log.debug("\tCreate XML file %s", xmlfilename)
- xmlbuilder = mkcwproject.cwxmlgen.ProjectBuilder(settings)
- xmlbuilder.generate()
- xmldata = settings['tmp_projectxmldata']
- fp = open(xmlfilename, 'w')
- fp.write(xmldata)
- fp.close()
- # Generate the project. Again a full pathname.
- projectfilename = os.path.join(os.getcwd(), os.path.join(build_temp, projectname))
- log.debug('\tCreate project file %s', projectfilename)
- mkcwproject.makeproject(xmlfilename, projectfilename)
- # And build it
- log.debug('\tBuild project')
- mkcwproject.buildproject(projectfilename)
-
- def _filename_to_abs(self, filename):
- # Some filenames seem to be unix-like. Convert to Mac names.
-## if '/' in filename and ':' in filename:
-## raise DistutilsPlatformError, 'Filename may be Unix or Mac style: %s'%filename
-## if '/' in filename:
-## filename = macurl2path(filename)
- filename = distutils.util.convert_path(filename)
- if not os.path.isabs(filename):
- curdir = os.getcwd()
- filename = os.path.join(curdir, filename)
- # Finally remove .. components
- components = string.split(filename, ':')
- for i in range(1, len(components)):
- if components[i] == '..':
- components[i] = ''
- return string.join(components, ':')
-
- def library_dir_option (self, dir):
- """Return the compiler option to add 'dir' to the list of
- directories searched for libraries.
- """
- return # XXXX Not correct...
-
- def runtime_library_dir_option (self, dir):
- """Return the compiler option to add 'dir' to the list of
- directories searched for runtime libraries.
- """
- # Nothing needed or Mwerks/Mac.
- return
-
- def library_option (self, lib):
- """Return the compiler option to add 'dir' to the list of libraries
- linked into the shared library or executable.
- """
- return
-
- def find_library_file (self, dirs, lib, debug=0):
- """Search the specified list of directories for a static or shared
- library file 'lib' and return the full path to that file. If
- 'debug' true, look for a debugging version (if that makes sense on
- the current platform). Return None if 'lib' wasn't found in any of
- the specified directories.
- """
- return 0
diff --git a/sys/lib/python/distutils/spawn.py b/sys/lib/python/distutils/spawn.py
deleted file mode 100644
index b16ca559c..000000000
--- a/sys/lib/python/distutils/spawn.py
+++ /dev/null
@@ -1,201 +0,0 @@
-"""distutils.spawn
-
-Provides the 'spawn()' function, a front-end to various platform-
-specific functions for launching another program in a sub-process.
-Also provides the 'find_executable()' to search the path for a given
-executable name.
-"""
-
-# This module should be kept compatible with Python 2.1.
-
-__revision__ = "$Id: spawn.py 37828 2004-11-10 22:23:15Z loewis $"
-
-import sys, os, string
-from distutils.errors import *
-from distutils import log
-
-def spawn (cmd,
- search_path=1,
- verbose=0,
- dry_run=0):
-
- """Run another program, specified as a command list 'cmd', in a new
- process. 'cmd' is just the argument list for the new process, ie.
- cmd[0] is the program to run and cmd[1:] are the rest of its arguments.
- There is no way to run a program with a name different from that of its
- executable.
-
- If 'search_path' is true (the default), the system's executable
- search path will be used to find the program; otherwise, cmd[0]
- must be the exact path to the executable. If 'dry_run' is true,
- the command will not actually be run.
-
- Raise DistutilsExecError if running the program fails in any way; just
- return on success.
- """
- if os.name == 'posix':
- _spawn_posix(cmd, search_path, dry_run=dry_run)
- elif os.name == 'nt':
- _spawn_nt(cmd, search_path, dry_run=dry_run)
- elif os.name == 'os2':
- _spawn_os2(cmd, search_path, dry_run=dry_run)
- else:
- raise DistutilsPlatformError, \
- "don't know how to spawn programs on platform '%s'" % os.name
-
-# spawn ()
-
-
-def _nt_quote_args (args):
- """Quote command-line arguments for DOS/Windows conventions: just
- wraps every argument which contains blanks in double quotes, and
- returns a new argument list.
- """
-
- # XXX this doesn't seem very robust to me -- but if the Windows guys
- # say it'll work, I guess I'll have to accept it. (What if an arg
- # contains quotes? What other magic characters, other than spaces,
- # have to be escaped? Is there an escaping mechanism other than
- # quoting?)
-
- for i in range(len(args)):
- if string.find(args[i], ' ') != -1:
- args[i] = '"%s"' % args[i]
- return args
-
-def _spawn_nt (cmd,
- search_path=1,
- verbose=0,
- dry_run=0):
-
- executable = cmd[0]
- cmd = _nt_quote_args(cmd)
- if search_path:
- # either we find one or it stays the same
- executable = find_executable(executable) or executable
- log.info(string.join([executable] + cmd[1:], ' '))
- if not dry_run:
- # spawn for NT requires a full path to the .exe
- try:
- rc = os.spawnv(os.P_WAIT, executable, cmd)
- except OSError, exc:
- # this seems to happen when the command isn't found
- raise DistutilsExecError, \
- "command '%s' failed: %s" % (cmd[0], exc[-1])
- if rc != 0:
- # and this reflects the command running but failing
- raise DistutilsExecError, \
- "command '%s' failed with exit status %d" % (cmd[0], rc)
-
-
-def _spawn_os2 (cmd,
- search_path=1,
- verbose=0,
- dry_run=0):
-
- executable = cmd[0]
- #cmd = _nt_quote_args(cmd)
- if search_path:
- # either we find one or it stays the same
- executable = find_executable(executable) or executable
- log.info(string.join([executable] + cmd[1:], ' '))
- if not dry_run:
- # spawnv for OS/2 EMX requires a full path to the .exe
- try:
- rc = os.spawnv(os.P_WAIT, executable, cmd)
- except OSError, exc:
- # this seems to happen when the command isn't found
- raise DistutilsExecError, \
- "command '%s' failed: %s" % (cmd[0], exc[-1])
- if rc != 0:
- # and this reflects the command running but failing
- print "command '%s' failed with exit status %d" % (cmd[0], rc)
- raise DistutilsExecError, \
- "command '%s' failed with exit status %d" % (cmd[0], rc)
-
-
-def _spawn_posix (cmd,
- search_path=1,
- verbose=0,
- dry_run=0):
-
- log.info(string.join(cmd, ' '))
- if dry_run:
- return
- exec_fn = search_path and os.execvp or os.execv
-
- pid = os.fork()
-
- if pid == 0: # in the child
- try:
- #print "cmd[0] =", cmd[0]
- #print "cmd =", cmd
- exec_fn(cmd[0], cmd)
- except OSError, e:
- sys.stderr.write("unable to execute %s: %s\n" %
- (cmd[0], e.strerror))
- os._exit(1)
-
- sys.stderr.write("unable to execute %s for unknown reasons" % cmd[0])
- os._exit(1)
-
-
- else: # in the parent
- # Loop until the child either exits or is terminated by a signal
- # (ie. keep waiting if it's merely stopped)
- while 1:
- try:
- (pid, status) = os.waitpid(pid, 0)
- except OSError, exc:
- import errno
- if exc.errno == errno.EINTR:
- continue
- raise DistutilsExecError, \
- "command '%s' failed: %s" % (cmd[0], exc[-1])
- if os.WIFSIGNALED(status):
- raise DistutilsExecError, \
- "command '%s' terminated by signal %d" % \
- (cmd[0], os.WTERMSIG(status))
-
- elif os.WIFEXITED(status):
- exit_status = os.WEXITSTATUS(status)
- if exit_status == 0:
- return # hey, it succeeded!
- else:
- raise DistutilsExecError, \
- "command '%s' failed with exit status %d" % \
- (cmd[0], exit_status)
-
- elif os.WIFSTOPPED(status):
- continue
-
- else:
- raise DistutilsExecError, \
- "unknown error executing '%s': termination status %d" % \
- (cmd[0], status)
-# _spawn_posix ()
-
-
-def find_executable(executable, path=None):
- """Try to find 'executable' in the directories listed in 'path' (a
- string listing directories separated by 'os.pathsep'; defaults to
- os.environ['PATH']). Returns the complete filename or None if not
- found.
- """
- if path is None:
- path = os.environ['PATH']
- paths = string.split(path, os.pathsep)
- (base, ext) = os.path.splitext(executable)
- if (sys.platform == 'win32' or os.name == 'os2') and (ext != '.exe'):
- executable = executable + '.exe'
- if not os.path.isfile(executable):
- for p in paths:
- f = os.path.join(p, executable)
- if os.path.isfile(f):
- # the file exists, we have a shot at spawn working
- return f
- return None
- else:
- return executable
-
-# find_executable()
diff --git a/sys/lib/python/distutils/sysconfig.py b/sys/lib/python/distutils/sysconfig.py
deleted file mode 100644
index 40e082769..000000000
--- a/sys/lib/python/distutils/sysconfig.py
+++ /dev/null
@@ -1,538 +0,0 @@
-"""Provide access to Python's configuration information. The specific
-configuration variables available depend heavily on the platform and
-configuration. The values may be retrieved using
-get_config_var(name), and the list of variables is available via
-get_config_vars().keys(). Additional convenience functions are also
-available.
-
-Written by: Fred L. Drake, Jr.
-Email: <fdrake@acm.org>
-"""
-
-__revision__ = "$Id: sysconfig.py 52234 2006-10-08 17:50:26Z ronald.oussoren $"
-
-import os
-import re
-import string
-import sys
-
-from distutils.errors import DistutilsPlatformError
-
-# These are needed in a couple of spots, so just compute them once.
-PREFIX = os.path.normpath(sys.prefix)
-EXEC_PREFIX = os.path.normpath(sys.exec_prefix)
-
-# python_build: (Boolean) if true, we're either building Python or
-# building an extension with an un-installed Python, so we use
-# different (hard-wired) directories.
-
-argv0_path = os.path.dirname(os.path.abspath(sys.executable))
-landmark = os.path.join(argv0_path, "Modules", "Setup")
-
-python_build = os.path.isfile(landmark)
-
-del landmark
-
-
-def get_python_version():
- """Return a string containing the major and minor Python version,
- leaving off the patchlevel. Sample return values could be '1.5'
- or '2.2'.
- """
- return sys.version[:3]
-
-
-def get_python_inc(plat_specific=0, prefix=None):
- """Return the directory containing installed Python header files.
-
- If 'plat_specific' is false (the default), this is the path to the
- non-platform-specific header files, i.e. Python.h and so on;
- otherwise, this is the path to platform-specific header files
- (namely pyconfig.h).
-
- If 'prefix' is supplied, use it instead of sys.prefix or
- sys.exec_prefix -- i.e., ignore 'plat_specific'.
- """
- if prefix is None:
- prefix = plat_specific and EXEC_PREFIX or PREFIX
- if os.name == "posix":
- if python_build:
- base = os.path.dirname(os.path.abspath(sys.executable))
- if plat_specific:
- inc_dir = base
- else:
- inc_dir = os.path.join(base, "Include")
- if not os.path.exists(inc_dir):
- inc_dir = os.path.join(os.path.dirname(base), "Include")
- return inc_dir
-# return os.path.join(prefix, "include", "python" + get_python_version())
- return os.path.join(prefix, "include", "python")
- elif os.name == "nt":
- return os.path.join(prefix, "include")
- elif os.name == "mac":
- if plat_specific:
- return os.path.join(prefix, "Mac", "Include")
- else:
- return os.path.join(prefix, "Include")
- elif os.name == "os2":
- return os.path.join(prefix, "Include")
- else:
- raise DistutilsPlatformError(
- "I don't know where Python installs its C header files "
- "on platform '%s'" % os.name)
-
-
-def get_python_lib(plat_specific=0, standard_lib=0, prefix=None):
- """Return the directory containing the Python library (standard or
- site additions).
-
- If 'plat_specific' is true, return the directory containing
- platform-specific modules, i.e. any module from a non-pure-Python
- module distribution; otherwise, return the platform-shared library
- directory. If 'standard_lib' is true, return the directory
- containing standard Python library modules; otherwise, return the
- directory for site-specific modules.
-
- If 'prefix' is supplied, use it instead of sys.prefix or
- sys.exec_prefix -- i.e., ignore 'plat_specific'.
- """
- if prefix is None:
- prefix = plat_specific and EXEC_PREFIX or PREFIX
-
- if os.name == "posix":
-# libpython = os.path.join(prefix,
-# "lib", "python" + get_python_version())
- libpython = prefix
- if standard_lib:
- return libpython
- else:
- return os.path.join(libpython, "site-packages")
-
- elif os.name == "nt":
- if standard_lib:
- return os.path.join(prefix, "Lib")
- else:
- if get_python_version() < "2.2":
- return prefix
- else:
- return os.path.join(PREFIX, "Lib", "site-packages")
-
- elif os.name == "mac":
- if plat_specific:
- if standard_lib:
- return os.path.join(prefix, "Lib", "lib-dynload")
- else:
- return os.path.join(prefix, "Lib", "site-packages")
- else:
- if standard_lib:
- return os.path.join(prefix, "Lib")
- else:
- return os.path.join(prefix, "Lib", "site-packages")
-
- elif os.name == "os2":
- if standard_lib:
- return os.path.join(PREFIX, "Lib")
- else:
- return os.path.join(PREFIX, "Lib", "site-packages")
-
- else:
- raise DistutilsPlatformError(
- "I don't know where Python installs its library "
- "on platform '%s'" % os.name)
-
-
-def customize_compiler(compiler):
- """Do any platform-specific customization of a CCompiler instance.
-
- Mainly needed on Unix, so we can plug in the information that
- varies across Unices and is stored in Python's Makefile.
- """
- if compiler.compiler_type == "unix":
- (cc, cxx, opt, cflags, ccshared, ldshared, so_ext) = \
- get_config_vars('CC', 'CXX', 'OPT', 'CFLAGS',
- 'CCSHARED', 'LDSHARED', 'SO')
-
- if os.environ.has_key('CC'):
- cc = os.environ['CC']
- if os.environ.has_key('CXX'):
- cxx = os.environ['CXX']
- if os.environ.has_key('LDSHARED'):
- ldshared = os.environ['LDSHARED']
- if os.environ.has_key('CPP'):
- cpp = os.environ['CPP']
- else:
- cpp = cc + " -E" # not always
- if os.environ.has_key('LDFLAGS'):
- ldshared = ldshared + ' ' + os.environ['LDFLAGS']
- if os.environ.has_key('CFLAGS'):
- cflags = opt + ' ' + os.environ['CFLAGS']
- ldshared = ldshared + ' ' + os.environ['CFLAGS']
- if os.environ.has_key('CPPFLAGS'):
- cpp = cpp + ' ' + os.environ['CPPFLAGS']
- cflags = cflags + ' ' + os.environ['CPPFLAGS']
- ldshared = ldshared + ' ' + os.environ['CPPFLAGS']
-
- cc_cmd = cc + ' ' + cflags
- compiler.set_executables(
- preprocessor=cpp,
- compiler=cc_cmd,
- compiler_so=cc_cmd + ' ' + ccshared,
- compiler_cxx=cxx,
- linker_so=ldshared,
- linker_exe=cc)
-
- compiler.shared_lib_extension = so_ext
-
-
-def get_config_h_filename():
- """Return full pathname of installed pyconfig.h file."""
- if python_build:
- inc_dir = argv0_path
- else:
- inc_dir = get_python_inc(plat_specific=1)
- if get_python_version() < '2.2':
- config_h = 'config.h'
- else:
- # The name of the config.h file changed in 2.2
- config_h = 'pyconfig.h'
- return os.path.join(inc_dir, config_h)
-
-
-def get_makefile_filename():
- """Return full pathname of installed Makefile from the Python build."""
- if python_build:
- return os.path.join(os.path.dirname(sys.executable), "Makefile")
- lib_dir = get_python_lib(plat_specific=1, standard_lib=1)
- return os.path.join(lib_dir, "config", "Makefile")
-
-
-def parse_config_h(fp, g=None):
- """Parse a config.h-style file.
-
- A dictionary containing name/value pairs is returned. If an
- optional dictionary is passed in as the second argument, it is
- used instead of a new dictionary.
- """
- if g is None:
- g = {}
- define_rx = re.compile("#define ([A-Z][A-Za-z0-9_]+) (.*)\n")
- undef_rx = re.compile("/[*] #undef ([A-Z][A-Za-z0-9_]+) [*]/\n")
- #
- while 1:
- line = fp.readline()
- if not line:
- break
- m = define_rx.match(line)
- if m:
- n, v = m.group(1, 2)
- try: v = int(v)
- except ValueError: pass
- g[n] = v
- else:
- m = undef_rx.match(line)
- if m:
- g[m.group(1)] = 0
- return g
-
-
-# Regexes needed for parsing Makefile (and similar syntaxes,
-# like old-style Setup files).
-_variable_rx = re.compile("([a-zA-Z][a-zA-Z0-9_]+)\s*=\s*(.*)")
-_findvar1_rx = re.compile(r"\$\(([A-Za-z][A-Za-z0-9_]*)\)")
-_findvar2_rx = re.compile(r"\${([A-Za-z][A-Za-z0-9_]*)}")
-
-def parse_makefile(fn, g=None):
- """Parse a Makefile-style file.
-
- A dictionary containing name/value pairs is returned. If an
- optional dictionary is passed in as the second argument, it is
- used instead of a new dictionary.
- """
- from distutils.text_file import TextFile
- fp = TextFile(fn, strip_comments=1, skip_blanks=1, join_lines=1)
-
- if g is None:
- g = {}
- done = {}
- notdone = {}
-
- while 1:
- line = fp.readline()
- if line is None: # eof
- break
- m = _variable_rx.match(line)
- if m:
- n, v = m.group(1, 2)
- v = string.strip(v)
- if "$" in v:
- notdone[n] = v
- else:
- try: v = int(v)
- except ValueError: pass
- done[n] = v
-
- # do variable interpolation here
- while notdone:
- for name in notdone.keys():
- value = notdone[name]
- m = _findvar1_rx.search(value) or _findvar2_rx.search(value)
- if m:
- n = m.group(1)
- found = True
- if done.has_key(n):
- item = str(done[n])
- elif notdone.has_key(n):
- # get it on a subsequent round
- found = False
- elif os.environ.has_key(n):
- # do it like make: fall back to environment
- item = os.environ[n]
- else:
- done[n] = item = ""
- if found:
- after = value[m.end():]
- value = value[:m.start()] + item + after
- if "$" in after:
- notdone[name] = value
- else:
- try: value = int(value)
- except ValueError:
- done[name] = string.strip(value)
- else:
- done[name] = value
- del notdone[name]
- else:
- # bogus variable reference; just drop it since we can't deal
- del notdone[name]
-
- fp.close()
-
- # save the results in the global dictionary
- g.update(done)
- return g
-
-
-def expand_makefile_vars(s, vars):
- """Expand Makefile-style variables -- "${foo}" or "$(foo)" -- in
- 'string' according to 'vars' (a dictionary mapping variable names to
- values). Variables not present in 'vars' are silently expanded to the
- empty string. The variable values in 'vars' should not contain further
- variable expansions; if 'vars' is the output of 'parse_makefile()',
- you're fine. Returns a variable-expanded version of 's'.
- """
-
- # This algorithm does multiple expansion, so if vars['foo'] contains
- # "${bar}", it will expand ${foo} to ${bar}, and then expand
- # ${bar}... and so forth. This is fine as long as 'vars' comes from
- # 'parse_makefile()', which takes care of such expansions eagerly,
- # according to make's variable expansion semantics.
-
- while 1:
- m = _findvar1_rx.search(s) or _findvar2_rx.search(s)
- if m:
- (beg, end) = m.span()
- s = s[0:beg] + vars.get(m.group(1)) + s[end:]
- else:
- break
- return s
-
-
-_config_vars = None
-
-def _init_posix():
- """Initialize the module as appropriate for POSIX systems."""
- g = {}
- # load the installed Makefile:
- try:
- filename = get_makefile_filename()
- parse_makefile(filename, g)
- except IOError, msg:
- my_msg = "invalid Python installation: unable to open %s" % filename
- if hasattr(msg, "strerror"):
- my_msg = my_msg + " (%s)" % msg.strerror
-
- raise DistutilsPlatformError(my_msg)
-
- # load the installed pyconfig.h:
- try:
-# filename = get_config_h_filename()
- filename = "/sys/src/cmd/python/pyconfig.h"
- parse_config_h(file(filename), g)
- except IOError, msg:
- my_msg = "invalid Python installation: unable to open %s" % filename
- if hasattr(msg, "strerror"):
- my_msg = my_msg + " (%s)" % msg.strerror
-
- raise DistutilsPlatformError(my_msg)
-
- # On MacOSX we need to check the setting of the environment variable
- # MACOSX_DEPLOYMENT_TARGET: configure bases some choices on it so
- # it needs to be compatible.
- # If it isn't set we set it to the configure-time value
- if sys.platform == 'darwin' and g.has_key('MACOSX_DEPLOYMENT_TARGET'):
- cfg_target = g['MACOSX_DEPLOYMENT_TARGET']
- cur_target = os.getenv('MACOSX_DEPLOYMENT_TARGET', '')
- if cur_target == '':
- cur_target = cfg_target
- os.putenv('MACOSX_DEPLOYMENT_TARGET', cfg_target)
- elif map(int, cfg_target.split('.')) > map(int, cur_target.split('.')):
- my_msg = ('$MACOSX_DEPLOYMENT_TARGET mismatch: now "%s" but "%s" during configure'
- % (cur_target, cfg_target))
- raise DistutilsPlatformError(my_msg)
-
- # On AIX, there are wrong paths to the linker scripts in the Makefile
- # -- these paths are relative to the Python source, but when installed
- # the scripts are in another directory.
- if python_build:
- g['LDSHARED'] = g['BLDSHARED']
-
- elif get_python_version() < '2.1':
- # The following two branches are for 1.5.2 compatibility.
- if sys.platform == 'aix4': # what about AIX 3.x ?
- # Linker script is in the config directory, not in Modules as the
- # Makefile says.
- python_lib = get_python_lib(standard_lib=1)
- ld_so_aix = os.path.join(python_lib, 'config', 'ld_so_aix')
- python_exp = os.path.join(python_lib, 'config', 'python.exp')
-
- g['LDSHARED'] = "%s %s -bI:%s" % (ld_so_aix, g['CC'], python_exp)
-
- elif sys.platform == 'beos':
- # Linker script is in the config directory. In the Makefile it is
- # relative to the srcdir, which after installation no longer makes
- # sense.
- python_lib = get_python_lib(standard_lib=1)
- linkerscript_path = string.split(g['LDSHARED'])[0]
- linkerscript_name = os.path.basename(linkerscript_path)
- linkerscript = os.path.join(python_lib, 'config',
- linkerscript_name)
-
- # XXX this isn't the right place to do this: adding the Python
- # library to the link, if needed, should be in the "build_ext"
- # command. (It's also needed for non-MS compilers on Windows, and
- # it's taken care of for them by the 'build_ext.get_libraries()'
- # method.)
- g['LDSHARED'] = ("%s -L%s/lib -lpython%s" %
- (linkerscript, PREFIX, get_python_version()))
-
- global _config_vars
- _config_vars = g
-
-
-def _init_nt():
- """Initialize the module as appropriate for NT"""
- g = {}
- # set basic install directories
- g['LIBDEST'] = get_python_lib(plat_specific=0, standard_lib=1)
- g['BINLIBDEST'] = get_python_lib(plat_specific=1, standard_lib=1)
-
- # XXX hmmm.. a normal install puts include files here
- g['INCLUDEPY'] = get_python_inc(plat_specific=0)
-
- g['SO'] = '.pyd'
- g['EXE'] = ".exe"
-
- global _config_vars
- _config_vars = g
-
-
-def _init_mac():
- """Initialize the module as appropriate for Macintosh systems"""
- g = {}
- # set basic install directories
- g['LIBDEST'] = get_python_lib(plat_specific=0, standard_lib=1)
- g['BINLIBDEST'] = get_python_lib(plat_specific=1, standard_lib=1)
-
- # XXX hmmm.. a normal install puts include files here
- g['INCLUDEPY'] = get_python_inc(plat_specific=0)
-
- import MacOS
- if not hasattr(MacOS, 'runtimemodel'):
- g['SO'] = '.ppc.slb'
- else:
- g['SO'] = '.%s.slb' % MacOS.runtimemodel
-
- # XXX are these used anywhere?
- g['install_lib'] = os.path.join(EXEC_PREFIX, "Lib")
- g['install_platlib'] = os.path.join(EXEC_PREFIX, "Mac", "Lib")
-
- # These are used by the extension module build
- g['srcdir'] = ':'
- global _config_vars
- _config_vars = g
-
-
-def _init_os2():
- """Initialize the module as appropriate for OS/2"""
- g = {}
- # set basic install directories
- g['LIBDEST'] = get_python_lib(plat_specific=0, standard_lib=1)
- g['BINLIBDEST'] = get_python_lib(plat_specific=1, standard_lib=1)
-
- # XXX hmmm.. a normal install puts include files here
- g['INCLUDEPY'] = get_python_inc(plat_specific=0)
-
- g['SO'] = '.pyd'
- g['EXE'] = ".exe"
-
- global _config_vars
- _config_vars = g
-
-
-def get_config_vars(*args):
- """With no arguments, return a dictionary of all configuration
- variables relevant for the current platform. Generally this includes
- everything needed to build extensions and install both pure modules and
- extensions. On Unix, this means every variable defined in Python's
- installed Makefile; on Windows and Mac OS it's a much smaller set.
-
- With arguments, return a list of values that result from looking up
- each argument in the configuration variable dictionary.
- """
- global _config_vars
- if _config_vars is None:
- func = globals().get("_init_" + os.name)
- if func:
- func()
- else:
- _config_vars = {}
-
- # Normalized versions of prefix and exec_prefix are handy to have;
- # in fact, these are the standard versions used most places in the
- # Distutils.
- _config_vars['prefix'] = PREFIX
- _config_vars['exec_prefix'] = EXEC_PREFIX
-
- if sys.platform == 'darwin':
- kernel_version = os.uname()[2] # Kernel version (8.4.3)
- major_version = int(kernel_version.split('.')[0])
-
- if major_version < 8:
- # On Mac OS X before 10.4, check if -arch and -isysroot
- # are in CFLAGS or LDFLAGS and remove them if they are.
- # This is needed when building extensions on a 10.3 system
- # using a universal build of python.
- for key in ('LDFLAGS', 'BASECFLAGS',
- # a number of derived variables. These need to be
- # patched up as well.
- 'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'):
-
- flags = _config_vars[key]
- flags = re.sub('-arch\s+\w+\s', ' ', flags)
- flags = re.sub('-isysroot [^ \t]*', ' ', flags)
- _config_vars[key] = flags
-
- if args:
- vals = []
- for name in args:
- vals.append(_config_vars.get(name))
- return vals
- else:
- return _config_vars
-
-def get_config_var(name):
- """Return the value of a single variable using the dictionary
- returned by 'get_config_vars()'. Equivalent to
- get_config_vars().get(name)
- """
- return get_config_vars().get(name)
diff --git a/sys/lib/python/distutils/tests/__init__.py b/sys/lib/python/distutils/tests/__init__.py
deleted file mode 100644
index 7bdb91246..000000000
--- a/sys/lib/python/distutils/tests/__init__.py
+++ /dev/null
@@ -1,35 +0,0 @@
-"""Test suite for distutils.
-
-This test suite consists of a collection of test modules in the
-distutils.tests package. Each test module has a name starting with
-'test' and contains a function test_suite(). The function is expected
-to return an initialized unittest.TestSuite instance.
-
-Tests for the command classes in the distutils.command package are
-included in distutils.tests as well, instead of using a separate
-distutils.command.tests package, since command identification is done
-by import rather than matching pre-defined names.
-
-"""
-
-import os
-import sys
-import unittest
-
-
-here = os.path.dirname(__file__)
-
-
-def test_suite():
- suite = unittest.TestSuite()
- for fn in os.listdir(here):
- if fn.startswith("test") and fn.endswith(".py"):
- modname = "distutils.tests." + fn[:-3]
- __import__(modname)
- module = sys.modules[modname]
- suite.addTest(module.test_suite())
- return suite
-
-
-if __name__ == "__main__":
- unittest.main(defaultTest="test_suite")
diff --git a/sys/lib/python/distutils/tests/support.py b/sys/lib/python/distutils/tests/support.py
deleted file mode 100644
index 475ceee59..000000000
--- a/sys/lib/python/distutils/tests/support.py
+++ /dev/null
@@ -1,54 +0,0 @@
-"""Support code for distutils test cases."""
-
-import shutil
-import tempfile
-
-from distutils import log
-
-
-class LoggingSilencer(object):
-
- def setUp(self):
- super(LoggingSilencer, self).setUp()
- self.threshold = log.set_threshold(log.FATAL)
-
- def tearDown(self):
- log.set_threshold(self.threshold)
- super(LoggingSilencer, self).tearDown()
-
-
-class TempdirManager(object):
- """Mix-in class that handles temporary directories for test cases.
-
- This is intended to be used with unittest.TestCase.
- """
-
- def setUp(self):
- super(TempdirManager, self).setUp()
- self.tempdirs = []
-
- def tearDown(self):
- super(TempdirManager, self).tearDown()
- while self.tempdirs:
- d = self.tempdirs.pop()
- shutil.rmtree(d)
-
- def mkdtemp(self):
- """Create a temporary directory that will be cleaned up.
-
- Returns the path of the directory.
- """
- d = tempfile.mkdtemp()
- self.tempdirs.append(d)
- return d
-
-
-class DummyCommand:
- """Class to store options for retrieval via set_undefined_options()."""
-
- def __init__(self, **kwargs):
- for kw, val in kwargs.items():
- setattr(self, kw, val)
-
- def ensure_finalized(self):
- pass
diff --git a/sys/lib/python/distutils/tests/test_build_py.py b/sys/lib/python/distutils/tests/test_build_py.py
deleted file mode 100644
index 78e4c55ed..000000000
--- a/sys/lib/python/distutils/tests/test_build_py.py
+++ /dev/null
@@ -1,61 +0,0 @@
-"""Tests for distutils.command.build_py."""
-
-import os
-import unittest
-
-from distutils.command.build_py import build_py
-from distutils.core import Distribution
-
-from distutils.tests import support
-
-
-class BuildPyTestCase(support.TempdirManager,
- support.LoggingSilencer,
- unittest.TestCase):
-
- def test_package_data(self):
- sources = self.mkdtemp()
- f = open(os.path.join(sources, "__init__.py"), "w")
- f.write("# Pretend this is a package.")
- f.close()
- f = open(os.path.join(sources, "README.txt"), "w")
- f.write("Info about this package")
- f.close()
-
- destination = self.mkdtemp()
-
- dist = Distribution({"packages": ["pkg"],
- "package_dir": {"pkg": sources}})
- # script_name need not exist, it just need to be initialized
- dist.script_name = os.path.join(sources, "setup.py")
- dist.command_obj["build"] = support.DummyCommand(
- force=0,
- build_lib=destination)
- dist.packages = ["pkg"]
- dist.package_data = {"pkg": ["README.txt"]}
- dist.package_dir = {"pkg": sources}
-
- cmd = build_py(dist)
- cmd.compile = 1
- cmd.ensure_finalized()
- self.assertEqual(cmd.package_data, dist.package_data)
-
- cmd.run()
-
- # This makes sure the list of outputs includes byte-compiled
- # files for Python modules but not for package data files
- # (there shouldn't *be* byte-code files for those!).
- #
- self.assertEqual(len(cmd.get_outputs()), 3)
- pkgdest = os.path.join(destination, "pkg")
- files = os.listdir(pkgdest)
- self.assert_("__init__.py" in files)
- self.assert_("__init__.pyc" in files)
- self.assert_("README.txt" in files)
-
-
-def test_suite():
- return unittest.makeSuite(BuildPyTestCase)
-
-if __name__ == "__main__":
- unittest.main(defaultTest="test_suite")
diff --git a/sys/lib/python/distutils/tests/test_build_scripts.py b/sys/lib/python/distutils/tests/test_build_scripts.py
deleted file mode 100644
index 666ca44c1..000000000
--- a/sys/lib/python/distutils/tests/test_build_scripts.py
+++ /dev/null
@@ -1,81 +0,0 @@
-"""Tests for distutils.command.build_scripts."""
-
-import os
-import unittest
-
-from distutils.command.build_scripts import build_scripts
-from distutils.core import Distribution
-
-from distutils.tests import support
-
-
-class BuildScriptsTestCase(support.TempdirManager,
- support.LoggingSilencer,
- unittest.TestCase):
-
- def test_default_settings(self):
- cmd = self.get_build_scripts_cmd("/foo/bar", [])
- self.assert_(not cmd.force)
- self.assert_(cmd.build_dir is None)
-
- cmd.finalize_options()
-
- self.assert_(cmd.force)
- self.assertEqual(cmd.build_dir, "/foo/bar")
-
- def test_build(self):
- source = self.mkdtemp()
- target = self.mkdtemp()
- expected = self.write_sample_scripts(source)
-
- cmd = self.get_build_scripts_cmd(target,
- [os.path.join(source, fn)
- for fn in expected])
- cmd.finalize_options()
- cmd.run()
-
- built = os.listdir(target)
- for name in expected:
- self.assert_(name in built)
-
- def get_build_scripts_cmd(self, target, scripts):
- import sys
- dist = Distribution()
- dist.scripts = scripts
- dist.command_obj["build"] = support.DummyCommand(
- build_scripts=target,
- force=1,
- executable=sys.executable
- )
- return build_scripts(dist)
-
- def write_sample_scripts(self, dir):
- expected = []
- expected.append("script1.py")
- self.write_script(dir, "script1.py",
- ("#! /usr/bin/env python2.3\n"
- "# bogus script w/ Python sh-bang\n"
- "pass\n"))
- expected.append("script2.py")
- self.write_script(dir, "script2.py",
- ("#!/usr/bin/python\n"
- "# bogus script w/ Python sh-bang\n"
- "pass\n"))
- expected.append("shell.sh")
- self.write_script(dir, "shell.sh",
- ("#!/bin/sh\n"
- "# bogus shell script w/ sh-bang\n"
- "exit 0\n"))
- return expected
-
- def write_script(self, dir, name, text):
- f = open(os.path.join(dir, name), "w")
- f.write(text)
- f.close()
-
-
-def test_suite():
- return unittest.makeSuite(BuildScriptsTestCase)
-
-if __name__ == "__main__":
- unittest.main(defaultTest="test_suite")
diff --git a/sys/lib/python/distutils/tests/test_dist.py b/sys/lib/python/distutils/tests/test_dist.py
deleted file mode 100644
index 4d2a7cdf1..000000000
--- a/sys/lib/python/distutils/tests/test_dist.py
+++ /dev/null
@@ -1,189 +0,0 @@
-"""Tests for distutils.dist."""
-
-import distutils.cmd
-import distutils.dist
-import os
-import shutil
-import StringIO
-import sys
-import tempfile
-import unittest
-
-from test.test_support import TESTFN
-
-
-class test_dist(distutils.cmd.Command):
- """Sample distutils extension command."""
-
- user_options = [
- ("sample-option=", "S", "help text"),
- ]
-
- def initialize_options(self):
- self.sample_option = None
-
-
-class TestDistribution(distutils.dist.Distribution):
- """Distribution subclasses that avoids the default search for
- configuration files.
-
- The ._config_files attribute must be set before
- .parse_config_files() is called.
- """
-
- def find_config_files(self):
- return self._config_files
-
-
-class DistributionTestCase(unittest.TestCase):
-
- def setUp(self):
- self.argv = sys.argv[:]
- del sys.argv[1:]
-
- def tearDown(self):
- sys.argv[:] = self.argv
-
- def create_distribution(self, configfiles=()):
- d = TestDistribution()
- d._config_files = configfiles
- d.parse_config_files()
- d.parse_command_line()
- return d
-
- def test_command_packages_unspecified(self):
- sys.argv.append("build")
- d = self.create_distribution()
- self.assertEqual(d.get_command_packages(), ["distutils.command"])
-
- def test_command_packages_cmdline(self):
- sys.argv.extend(["--command-packages",
- "foo.bar,distutils.tests",
- "test_dist",
- "-Ssometext",
- ])
- d = self.create_distribution()
- # let's actually try to load our test command:
- self.assertEqual(d.get_command_packages(),
- ["distutils.command", "foo.bar", "distutils.tests"])
- cmd = d.get_command_obj("test_dist")
- self.assert_(isinstance(cmd, test_dist))
- self.assertEqual(cmd.sample_option, "sometext")
-
- def test_command_packages_configfile(self):
- sys.argv.append("build")
- f = open(TESTFN, "w")
- try:
- print >>f, "[global]"
- print >>f, "command_packages = foo.bar, splat"
- f.close()
- d = self.create_distribution([TESTFN])
- self.assertEqual(d.get_command_packages(),
- ["distutils.command", "foo.bar", "splat"])
-
- # ensure command line overrides config:
- sys.argv[1:] = ["--command-packages", "spork", "build"]
- d = self.create_distribution([TESTFN])
- self.assertEqual(d.get_command_packages(),
- ["distutils.command", "spork"])
-
- # Setting --command-packages to '' should cause the default to
- # be used even if a config file specified something else:
- sys.argv[1:] = ["--command-packages", "", "build"]
- d = self.create_distribution([TESTFN])
- self.assertEqual(d.get_command_packages(), ["distutils.command"])
-
- finally:
- os.unlink(TESTFN)
-
-
-class MetadataTestCase(unittest.TestCase):
-
- def test_simple_metadata(self):
- attrs = {"name": "package",
- "version": "1.0"}
- dist = distutils.dist.Distribution(attrs)
- meta = self.format_metadata(dist)
- self.assert_("Metadata-Version: 1.0" in meta)
- self.assert_("provides:" not in meta.lower())
- self.assert_("requires:" not in meta.lower())
- self.assert_("obsoletes:" not in meta.lower())
-
- def test_provides(self):
- attrs = {"name": "package",
- "version": "1.0",
- "provides": ["package", "package.sub"]}
- dist = distutils.dist.Distribution(attrs)
- self.assertEqual(dist.metadata.get_provides(),
- ["package", "package.sub"])
- self.assertEqual(dist.get_provides(),
- ["package", "package.sub"])
- meta = self.format_metadata(dist)
- self.assert_("Metadata-Version: 1.1" in meta)
- self.assert_("requires:" not in meta.lower())
- self.assert_("obsoletes:" not in meta.lower())
-
- def test_provides_illegal(self):
- self.assertRaises(ValueError,
- distutils.dist.Distribution,
- {"name": "package",
- "version": "1.0",
- "provides": ["my.pkg (splat)"]})
-
- def test_requires(self):
- attrs = {"name": "package",
- "version": "1.0",
- "requires": ["other", "another (==1.0)"]}
- dist = distutils.dist.Distribution(attrs)
- self.assertEqual(dist.metadata.get_requires(),
- ["other", "another (==1.0)"])
- self.assertEqual(dist.get_requires(),
- ["other", "another (==1.0)"])
- meta = self.format_metadata(dist)
- self.assert_("Metadata-Version: 1.1" in meta)
- self.assert_("provides:" not in meta.lower())
- self.assert_("Requires: other" in meta)
- self.assert_("Requires: another (==1.0)" in meta)
- self.assert_("obsoletes:" not in meta.lower())
-
- def test_requires_illegal(self):
- self.assertRaises(ValueError,
- distutils.dist.Distribution,
- {"name": "package",
- "version": "1.0",
- "requires": ["my.pkg (splat)"]})
-
- def test_obsoletes(self):
- attrs = {"name": "package",
- "version": "1.0",
- "obsoletes": ["other", "another (<1.0)"]}
- dist = distutils.dist.Distribution(attrs)
- self.assertEqual(dist.metadata.get_obsoletes(),
- ["other", "another (<1.0)"])
- self.assertEqual(dist.get_obsoletes(),
- ["other", "another (<1.0)"])
- meta = self.format_metadata(dist)
- self.assert_("Metadata-Version: 1.1" in meta)
- self.assert_("provides:" not in meta.lower())
- self.assert_("requires:" not in meta.lower())
- self.assert_("Obsoletes: other" in meta)
- self.assert_("Obsoletes: another (<1.0)" in meta)
-
- def test_obsoletes_illegal(self):
- self.assertRaises(ValueError,
- distutils.dist.Distribution,
- {"name": "package",
- "version": "1.0",
- "obsoletes": ["my.pkg (splat)"]})
-
- def format_metadata(self, dist):
- sio = StringIO.StringIO()
- dist.metadata.write_pkg_file(sio)
- return sio.getvalue()
-
-
-def test_suite():
- suite = unittest.TestSuite()
- suite.addTest(unittest.makeSuite(DistributionTestCase))
- suite.addTest(unittest.makeSuite(MetadataTestCase))
- return suite
diff --git a/sys/lib/python/distutils/tests/test_install.py b/sys/lib/python/distutils/tests/test_install.py
deleted file mode 100644
index c834b91b3..000000000
--- a/sys/lib/python/distutils/tests/test_install.py
+++ /dev/null
@@ -1,55 +0,0 @@
-"""Tests for distutils.command.install."""
-
-import os
-import unittest
-
-from distutils.command.install import install
-from distutils.core import Distribution
-
-from distutils.tests import support
-
-
-class InstallTestCase(support.TempdirManager, unittest.TestCase):
-
- def test_home_installation_scheme(self):
- # This ensure two things:
- # - that --home generates the desired set of directory names
- # - test --home is supported on all platforms
- builddir = self.mkdtemp()
- destination = os.path.join(builddir, "installation")
-
- dist = Distribution({"name": "foopkg"})
- # script_name need not exist, it just need to be initialized
- dist.script_name = os.path.join(builddir, "setup.py")
- dist.command_obj["build"] = support.DummyCommand(
- build_base=builddir,
- build_lib=os.path.join(builddir, "lib"),
- )
-
- cmd = install(dist)
- cmd.home = destination
- cmd.ensure_finalized()
-
- self.assertEqual(cmd.install_base, destination)
- self.assertEqual(cmd.install_platbase, destination)
-
- def check_path(got, expected):
- got = os.path.normpath(got)
- expected = os.path.normpath(expected)
- self.assertEqual(got, expected)
-
- libdir = os.path.join(destination, "lib", "python")
- check_path(cmd.install_lib, libdir)
- check_path(cmd.install_platlib, libdir)
- check_path(cmd.install_purelib, libdir)
- check_path(cmd.install_headers,
- os.path.join(destination, "include", "python", "foopkg"))
- check_path(cmd.install_scripts, os.path.join(destination, "bin"))
- check_path(cmd.install_data, destination)
-
-
-def test_suite():
- return unittest.makeSuite(InstallTestCase)
-
-if __name__ == "__main__":
- unittest.main(defaultTest="test_suite")
diff --git a/sys/lib/python/distutils/tests/test_install_scripts.py b/sys/lib/python/distutils/tests/test_install_scripts.py
deleted file mode 100644
index fffa6ef2c..000000000
--- a/sys/lib/python/distutils/tests/test_install_scripts.py
+++ /dev/null
@@ -1,79 +0,0 @@
-"""Tests for distutils.command.install_scripts."""
-
-import os
-import unittest
-
-from distutils.command.install_scripts import install_scripts
-from distutils.core import Distribution
-
-from distutils.tests import support
-
-
-class InstallScriptsTestCase(support.TempdirManager,
- support.LoggingSilencer,
- unittest.TestCase):
-
- def test_default_settings(self):
- dist = Distribution()
- dist.command_obj["build"] = support.DummyCommand(
- build_scripts="/foo/bar")
- dist.command_obj["install"] = support.DummyCommand(
- install_scripts="/splat/funk",
- force=1,
- skip_build=1,
- )
- cmd = install_scripts(dist)
- self.assert_(not cmd.force)
- self.assert_(not cmd.skip_build)
- self.assert_(cmd.build_dir is None)
- self.assert_(cmd.install_dir is None)
-
- cmd.finalize_options()
-
- self.assert_(cmd.force)
- self.assert_(cmd.skip_build)
- self.assertEqual(cmd.build_dir, "/foo/bar")
- self.assertEqual(cmd.install_dir, "/splat/funk")
-
- def test_installation(self):
- source = self.mkdtemp()
- expected = []
-
- def write_script(name, text):
- expected.append(name)
- f = open(os.path.join(source, name), "w")
- f.write(text)
- f.close()
-
- write_script("script1.py", ("#! /usr/bin/env python2.3\n"
- "# bogus script w/ Python sh-bang\n"
- "pass\n"))
- write_script("script2.py", ("#!/usr/bin/python\n"
- "# bogus script w/ Python sh-bang\n"
- "pass\n"))
- write_script("shell.sh", ("#!/bin/sh\n"
- "# bogus shell script w/ sh-bang\n"
- "exit 0\n"))
-
- target = self.mkdtemp()
- dist = Distribution()
- dist.command_obj["build"] = support.DummyCommand(build_scripts=source)
- dist.command_obj["install"] = support.DummyCommand(
- install_scripts=target,
- force=1,
- skip_build=1,
- )
- cmd = install_scripts(dist)
- cmd.finalize_options()
- cmd.run()
-
- installed = os.listdir(target)
- for name in expected:
- self.assert_(name in installed)
-
-
-def test_suite():
- return unittest.makeSuite(InstallScriptsTestCase)
-
-if __name__ == "__main__":
- unittest.main(defaultTest="test_suite")
diff --git a/sys/lib/python/distutils/tests/test_versionpredicate.py b/sys/lib/python/distutils/tests/test_versionpredicate.py
deleted file mode 100644
index 8a60dbe80..000000000
--- a/sys/lib/python/distutils/tests/test_versionpredicate.py
+++ /dev/null
@@ -1,9 +0,0 @@
-"""Tests harness for distutils.versionpredicate.
-
-"""
-
-import distutils.versionpredicate
-import doctest
-
-def test_suite():
- return doctest.DocTestSuite(distutils.versionpredicate)
diff --git a/sys/lib/python/distutils/text_file.py b/sys/lib/python/distutils/text_file.py
deleted file mode 100644
index a1eb022d4..000000000
--- a/sys/lib/python/distutils/text_file.py
+++ /dev/null
@@ -1,382 +0,0 @@
-"""text_file
-
-provides the TextFile class, which gives an interface to text files
-that (optionally) takes care of stripping comments, ignoring blank
-lines, and joining lines with backslashes."""
-
-__revision__ = "$Id: text_file.py 29687 2002-11-14 02:25:42Z akuchling $"
-
-from types import *
-import sys, os, string
-
-
-class TextFile:
-
- """Provides a file-like object that takes care of all the things you
- commonly want to do when processing a text file that has some
- line-by-line syntax: strip comments (as long as "#" is your
- comment character), skip blank lines, join adjacent lines by
- escaping the newline (ie. backslash at end of line), strip
- leading and/or trailing whitespace. All of these are optional
- and independently controllable.
-
- Provides a 'warn()' method so you can generate warning messages that
- report physical line number, even if the logical line in question
- spans multiple physical lines. Also provides 'unreadline()' for
- implementing line-at-a-time lookahead.
-
- Constructor is called as:
-
- TextFile (filename=None, file=None, **options)
-
- It bombs (RuntimeError) if both 'filename' and 'file' are None;
- 'filename' should be a string, and 'file' a file object (or
- something that provides 'readline()' and 'close()' methods). It is
- recommended that you supply at least 'filename', so that TextFile
- can include it in warning messages. If 'file' is not supplied,
- TextFile creates its own using the 'open()' builtin.
-
- The options are all boolean, and affect the value returned by
- 'readline()':
- strip_comments [default: true]
- strip from "#" to end-of-line, as well as any whitespace
- leading up to the "#" -- unless it is escaped by a backslash
- lstrip_ws [default: false]
- strip leading whitespace from each line before returning it
- rstrip_ws [default: true]
- strip trailing whitespace (including line terminator!) from
- each line before returning it
- skip_blanks [default: true}
- skip lines that are empty *after* stripping comments and
- whitespace. (If both lstrip_ws and rstrip_ws are false,
- then some lines may consist of solely whitespace: these will
- *not* be skipped, even if 'skip_blanks' is true.)
- join_lines [default: false]
- if a backslash is the last non-newline character on a line
- after stripping comments and whitespace, join the following line
- to it to form one "logical line"; if N consecutive lines end
- with a backslash, then N+1 physical lines will be joined to
- form one logical line.
- collapse_join [default: false]
- strip leading whitespace from lines that are joined to their
- predecessor; only matters if (join_lines and not lstrip_ws)
-
- Note that since 'rstrip_ws' can strip the trailing newline, the
- semantics of 'readline()' must differ from those of the builtin file
- object's 'readline()' method! In particular, 'readline()' returns
- None for end-of-file: an empty string might just be a blank line (or
- an all-whitespace line), if 'rstrip_ws' is true but 'skip_blanks' is
- not."""
-
- default_options = { 'strip_comments': 1,
- 'skip_blanks': 1,
- 'lstrip_ws': 0,
- 'rstrip_ws': 1,
- 'join_lines': 0,
- 'collapse_join': 0,
- }
-
- def __init__ (self, filename=None, file=None, **options):
- """Construct a new TextFile object. At least one of 'filename'
- (a string) and 'file' (a file-like object) must be supplied.
- They keyword argument options are described above and affect
- the values returned by 'readline()'."""
-
- if filename is None and file is None:
- raise RuntimeError, \
- "you must supply either or both of 'filename' and 'file'"
-
- # set values for all options -- either from client option hash
- # or fallback to default_options
- for opt in self.default_options.keys():
- if options.has_key (opt):
- setattr (self, opt, options[opt])
-
- else:
- setattr (self, opt, self.default_options[opt])
-
- # sanity check client option hash
- for opt in options.keys():
- if not self.default_options.has_key (opt):
- raise KeyError, "invalid TextFile option '%s'" % opt
-
- if file is None:
- self.open (filename)
- else:
- self.filename = filename
- self.file = file
- self.current_line = 0 # assuming that file is at BOF!
-
- # 'linebuf' is a stack of lines that will be emptied before we
- # actually read from the file; it's only populated by an
- # 'unreadline()' operation
- self.linebuf = []
-
-
- def open (self, filename):
- """Open a new file named 'filename'. This overrides both the
- 'filename' and 'file' arguments to the constructor."""
-
- self.filename = filename
- self.file = open (self.filename, 'r')
- self.current_line = 0
-
-
- def close (self):
- """Close the current file and forget everything we know about it
- (filename, current line number)."""
-
- self.file.close ()
- self.file = None
- self.filename = None
- self.current_line = None
-
-
- def gen_error (self, msg, line=None):
- outmsg = []
- if line is None:
- line = self.current_line
- outmsg.append(self.filename + ", ")
- if type (line) in (ListType, TupleType):
- outmsg.append("lines %d-%d: " % tuple (line))
- else:
- outmsg.append("line %d: " % line)
- outmsg.append(str(msg))
- return string.join(outmsg, "")
-
-
- def error (self, msg, line=None):
- raise ValueError, "error: " + self.gen_error(msg, line)
-
- def warn (self, msg, line=None):
- """Print (to stderr) a warning message tied to the current logical
- line in the current file. If the current logical line in the
- file spans multiple physical lines, the warning refers to the
- whole range, eg. "lines 3-5". If 'line' supplied, it overrides
- the current line number; it may be a list or tuple to indicate a
- range of physical lines, or an integer for a single physical
- line."""
- sys.stderr.write("warning: " + self.gen_error(msg, line) + "\n")
-
-
- def readline (self):
- """Read and return a single logical line from the current file (or
- from an internal buffer if lines have previously been "unread"
- with 'unreadline()'). If the 'join_lines' option is true, this
- may involve reading multiple physical lines concatenated into a
- single string. Updates the current line number, so calling
- 'warn()' after 'readline()' emits a warning about the physical
- line(s) just read. Returns None on end-of-file, since the empty
- string can occur if 'rstrip_ws' is true but 'strip_blanks' is
- not."""
-
- # If any "unread" lines waiting in 'linebuf', return the top
- # one. (We don't actually buffer read-ahead data -- lines only
- # get put in 'linebuf' if the client explicitly does an
- # 'unreadline()'.
- if self.linebuf:
- line = self.linebuf[-1]
- del self.linebuf[-1]
- return line
-
- buildup_line = ''
-
- while 1:
- # read the line, make it None if EOF
- line = self.file.readline()
- if line == '': line = None
-
- if self.strip_comments and line:
-
- # Look for the first "#" in the line. If none, never
- # mind. If we find one and it's the first character, or
- # is not preceded by "\", then it starts a comment --
- # strip the comment, strip whitespace before it, and
- # carry on. Otherwise, it's just an escaped "#", so
- # unescape it (and any other escaped "#"'s that might be
- # lurking in there) and otherwise leave the line alone.
-
- pos = string.find (line, "#")
- if pos == -1: # no "#" -- no comments
- pass
-
- # It's definitely a comment -- either "#" is the first
- # character, or it's elsewhere and unescaped.
- elif pos == 0 or line[pos-1] != "\\":
- # Have to preserve the trailing newline, because it's
- # the job of a later step (rstrip_ws) to remove it --
- # and if rstrip_ws is false, we'd better preserve it!
- # (NB. this means that if the final line is all comment
- # and has no trailing newline, we will think that it's
- # EOF; I think that's OK.)
- eol = (line[-1] == '\n') and '\n' or ''
- line = line[0:pos] + eol
-
- # If all that's left is whitespace, then skip line
- # *now*, before we try to join it to 'buildup_line' --
- # that way constructs like
- # hello \\
- # # comment that should be ignored
- # there
- # result in "hello there".
- if string.strip(line) == "":
- continue
-
- else: # it's an escaped "#"
- line = string.replace (line, "\\#", "#")
-
-
- # did previous line end with a backslash? then accumulate
- if self.join_lines and buildup_line:
- # oops: end of file
- if line is None:
- self.warn ("continuation line immediately precedes "
- "end-of-file")
- return buildup_line
-
- if self.collapse_join:
- line = string.lstrip (line)
- line = buildup_line + line
-
- # careful: pay attention to line number when incrementing it
- if type (self.current_line) is ListType:
- self.current_line[1] = self.current_line[1] + 1
- else:
- self.current_line = [self.current_line,
- self.current_line+1]
- # just an ordinary line, read it as usual
- else:
- if line is None: # eof
- return None
-
- # still have to be careful about incrementing the line number!
- if type (self.current_line) is ListType:
- self.current_line = self.current_line[1] + 1
- else:
- self.current_line = self.current_line + 1
-
-
- # strip whitespace however the client wants (leading and
- # trailing, or one or the other, or neither)
- if self.lstrip_ws and self.rstrip_ws:
- line = string.strip (line)
- elif self.lstrip_ws:
- line = string.lstrip (line)
- elif self.rstrip_ws:
- line = string.rstrip (line)
-
- # blank line (whether we rstrip'ed or not)? skip to next line
- # if appropriate
- if (line == '' or line == '\n') and self.skip_blanks:
- continue
-
- if self.join_lines:
- if line[-1] == '\\':
- buildup_line = line[:-1]
- continue
-
- if line[-2:] == '\\\n':
- buildup_line = line[0:-2] + '\n'
- continue
-
- # well, I guess there's some actual content there: return it
- return line
-
- # readline ()
-
-
- def readlines (self):
- """Read and return the list of all logical lines remaining in the
- current file."""
-
- lines = []
- while 1:
- line = self.readline()
- if line is None:
- return lines
- lines.append (line)
-
-
- def unreadline (self, line):
- """Push 'line' (a string) onto an internal buffer that will be
- checked by future 'readline()' calls. Handy for implementing
- a parser with line-at-a-time lookahead."""
-
- self.linebuf.append (line)
-
-
-if __name__ == "__main__":
- test_data = """# test file
-
-line 3 \\
-# intervening comment
- continues on next line
-"""
- # result 1: no fancy options
- result1 = map (lambda x: x + "\n", string.split (test_data, "\n")[0:-1])
-
- # result 2: just strip comments
- result2 = ["\n",
- "line 3 \\\n",
- " continues on next line\n"]
-
- # result 3: just strip blank lines
- result3 = ["# test file\n",
- "line 3 \\\n",
- "# intervening comment\n",
- " continues on next line\n"]
-
- # result 4: default, strip comments, blank lines, and trailing whitespace
- result4 = ["line 3 \\",
- " continues on next line"]
-
- # result 5: strip comments and blanks, plus join lines (but don't
- # "collapse" joined lines
- result5 = ["line 3 continues on next line"]
-
- # result 6: strip comments and blanks, plus join lines (and
- # "collapse" joined lines
- result6 = ["line 3 continues on next line"]
-
- def test_input (count, description, file, expected_result):
- result = file.readlines ()
- # result = string.join (result, '')
- if result == expected_result:
- print "ok %d (%s)" % (count, description)
- else:
- print "not ok %d (%s):" % (count, description)
- print "** expected:"
- print expected_result
- print "** received:"
- print result
-
-
- filename = "test.txt"
- out_file = open (filename, "w")
- out_file.write (test_data)
- out_file.close ()
-
- in_file = TextFile (filename, strip_comments=0, skip_blanks=0,
- lstrip_ws=0, rstrip_ws=0)
- test_input (1, "no processing", in_file, result1)
-
- in_file = TextFile (filename, strip_comments=1, skip_blanks=0,
- lstrip_ws=0, rstrip_ws=0)
- test_input (2, "strip comments", in_file, result2)
-
- in_file = TextFile (filename, strip_comments=0, skip_blanks=1,
- lstrip_ws=0, rstrip_ws=0)
- test_input (3, "strip blanks", in_file, result3)
-
- in_file = TextFile (filename)
- test_input (4, "default processing", in_file, result4)
-
- in_file = TextFile (filename, strip_comments=1, skip_blanks=1,
- join_lines=1, rstrip_ws=1)
- test_input (5, "join lines without collapsing", in_file, result5)
-
- in_file = TextFile (filename, strip_comments=1, skip_blanks=1,
- join_lines=1, rstrip_ws=1, collapse_join=1)
- test_input (6, "join lines with collapsing", in_file, result6)
-
- os.remove (filename)
diff --git a/sys/lib/python/distutils/unixccompiler.py b/sys/lib/python/distutils/unixccompiler.py
deleted file mode 100644
index 93fa567ad..000000000
--- a/sys/lib/python/distutils/unixccompiler.py
+++ /dev/null
@@ -1,315 +0,0 @@
-"""distutils.unixccompiler
-
-Contains the UnixCCompiler class, a subclass of CCompiler that handles
-the "typical" Unix-style command-line C compiler:
- * macros defined with -Dname[=value]
- * macros undefined with -Uname
- * include search directories specified with -Idir
- * libraries specified with -lllib
- * library search directories specified with -Ldir
- * compile handled by 'cc' (or similar) executable with -c option:
- compiles .c to .o
- * link static library handled by 'ar' command (possibly with 'ranlib')
- * link shared library handled by 'cc -shared'
-"""
-
-__revision__ = "$Id: unixccompiler.py 52237 2006-10-08 17:52:37Z ronald.oussoren $"
-
-import os, sys
-from types import StringType, NoneType
-from copy import copy
-
-from distutils import sysconfig
-from distutils.dep_util import newer
-from distutils.ccompiler import \
- CCompiler, gen_preprocess_options, gen_lib_options
-from distutils.errors import \
- DistutilsExecError, CompileError, LibError, LinkError
-from distutils import log
-
-# XXX Things not currently handled:
-# * optimization/debug/warning flags; we just use whatever's in Python's
-# Makefile and live with it. Is this adequate? If not, we might
-# have to have a bunch of subclasses GNUCCompiler, SGICCompiler,
-# SunCCompiler, and I suspect down that road lies madness.
-# * even if we don't know a warning flag from an optimization flag,
-# we need some way for outsiders to feed preprocessor/compiler/linker
-# flags in to us -- eg. a sysadmin might want to mandate certain flags
-# via a site config file, or a user might want to set something for
-# compiling this module distribution only via the setup.py command
-# line, whatever. As long as these options come from something on the
-# current system, they can be as system-dependent as they like, and we
-# should just happily stuff them into the preprocessor/compiler/linker
-# options and carry on.
-
-def _darwin_compiler_fixup(compiler_so, cc_args):
- """
- This function will strip '-isysroot PATH' and '-arch ARCH' from the
- compile flags if the user has specified one them in extra_compile_flags.
-
- This is needed because '-arch ARCH' adds another architecture to the
- build, without a way to remove an architecture. Furthermore GCC will
- barf if multiple '-isysroot' arguments are present.
- """
- stripArch = stripSysroot = 0
-
- compiler_so = list(compiler_so)
- kernel_version = os.uname()[2] # 8.4.3
- major_version = int(kernel_version.split('.')[0])
-
- if major_version < 8:
- # OSX before 10.4.0, these don't support -arch and -isysroot at
- # all.
- stripArch = stripSysroot = True
- else:
- stripArch = '-arch' in cc_args
- stripSysroot = '-isysroot' in cc_args
-
- if stripArch:
- while 1:
- try:
- index = compiler_so.index('-arch')
- # Strip this argument and the next one:
- del compiler_so[index:index+2]
- except ValueError:
- break
-
- if stripSysroot:
- try:
- index = compiler_so.index('-isysroot')
- # Strip this argument and the next one:
- del compiler_so[index:index+2]
- except ValueError:
- pass
-
- # Check if the SDK that is used during compilation actually exists,
- # the universal build requires the usage of a universal SDK and not all
- # users have that installed by default.
- sysroot = None
- if '-isysroot' in cc_args:
- idx = cc_args.index('-isysroot')
- sysroot = cc_args[idx+1]
- elif '-isysroot' in compiler_so:
- idx = compiler_so.index('-isysroot')
- sysroot = compiler_so[idx+1]
-
- if sysroot and not os.path.isdir(sysroot):
- log.warn("Compiling with an SDK that doesn't seem to exist: %s",
- sysroot)
- log.warn("Please check your Xcode installation")
-
- return compiler_so
-
-class UnixCCompiler(CCompiler):
-
- compiler_type = 'unix'
-
- # These are used by CCompiler in two places: the constructor sets
- # instance attributes 'preprocessor', 'compiler', etc. from them, and
- # 'set_executable()' allows any of these to be set. The defaults here
- # are pretty generic; they will probably have to be set by an outsider
- # (eg. using information discovered by the sysconfig about building
- # Python extensions).
- executables = {'preprocessor' : None,
- 'compiler' : ["cc"],
- 'compiler_so' : ["cc"],
- 'compiler_cxx' : ["cc"],
- 'linker_so' : ["cc", "-shared"],
- 'linker_exe' : ["cc"],
- 'archiver' : ["ar", "-cr"],
- 'ranlib' : None,
- }
-
- if sys.platform[:6] == "darwin":
- executables['ranlib'] = ["ranlib"]
-
- # Needed for the filename generation methods provided by the base
- # class, CCompiler. NB. whoever instantiates/uses a particular
- # UnixCCompiler instance should set 'shared_lib_ext' -- we set a
- # reasonable common default here, but it's not necessarily used on all
- # Unices!
-
- src_extensions = [".c",".C",".cc",".cxx",".cpp",".m"]
- obj_extension = ".o"
- static_lib_extension = ".a"
- shared_lib_extension = ".so"
- dylib_lib_extension = ".dylib"
- static_lib_format = shared_lib_format = dylib_lib_format = "lib%s%s"
- if sys.platform == "cygwin":
- exe_extension = ".exe"
-
- def preprocess(self, source,
- output_file=None, macros=None, include_dirs=None,
- extra_preargs=None, extra_postargs=None):
- ignore, macros, include_dirs = \
- self._fix_compile_args(None, macros, include_dirs)
- pp_opts = gen_preprocess_options(macros, include_dirs)
- pp_args = self.preprocessor + pp_opts
- if output_file:
- pp_args.extend(['-o', output_file])
- if extra_preargs:
- pp_args[:0] = extra_preargs
- if extra_postargs:
- pp_args.extend(extra_postargs)
- pp_args.append(source)
-
- # We need to preprocess: either we're being forced to, or we're
- # generating output to stdout, or there's a target output file and
- # the source file is newer than the target (or the target doesn't
- # exist).
- if self.force or output_file is None or newer(source, output_file):
- if output_file:
- self.mkpath(os.path.dirname(output_file))
- try:
- self.spawn(pp_args)
- except DistutilsExecError, msg:
- raise CompileError, msg
-
- def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):
- compiler_so = self.compiler_so
- if sys.platform == 'darwin':
- compiler_so = _darwin_compiler_fixup(compiler_so, cc_args + extra_postargs)
- try:
- self.spawn(compiler_so + cc_args + [src, '-o', obj] +
- extra_postargs)
- except DistutilsExecError, msg:
- raise CompileError, msg
-
- def create_static_lib(self, objects, output_libname,
- output_dir=None, debug=0, target_lang=None):
- objects, output_dir = self._fix_object_args(objects, output_dir)
-
- output_filename = \
- self.library_filename(output_libname, output_dir=output_dir)
-
- if self._need_link(objects, output_filename):
- self.mkpath(os.path.dirname(output_filename))
- self.spawn(self.archiver +
- [output_filename] +
- objects + self.objects)
-
- # Not many Unices required ranlib anymore -- SunOS 4.x is, I
- # think the only major Unix that does. Maybe we need some
- # platform intelligence here to skip ranlib if it's not
- # needed -- or maybe Python's configure script took care of
- # it for us, hence the check for leading colon.
- if self.ranlib:
- try:
- self.spawn(self.ranlib + [output_filename])
- except DistutilsExecError, msg:
- raise LibError, msg
- else:
- log.debug("skipping %s (up-to-date)", output_filename)
-
- def link(self, target_desc, objects,
- output_filename, output_dir=None, libraries=None,
- library_dirs=None, runtime_library_dirs=None,
- export_symbols=None, debug=0, extra_preargs=None,
- extra_postargs=None, build_temp=None, target_lang=None):
- objects, output_dir = self._fix_object_args(objects, output_dir)
- libraries, library_dirs, runtime_library_dirs = \
- self._fix_lib_args(libraries, library_dirs, runtime_library_dirs)
-
- lib_opts = gen_lib_options(self, library_dirs, runtime_library_dirs,
- libraries)
- if type(output_dir) not in (StringType, NoneType):
- raise TypeError, "'output_dir' must be a string or None"
- if output_dir is not None:
- output_filename = os.path.join(output_dir, output_filename)
-
- if self._need_link(objects, output_filename):
- ld_args = (objects + self.objects +
- lib_opts + ['-o', output_filename])
- if debug:
- ld_args[:0] = ['-g']
- if extra_preargs:
- ld_args[:0] = extra_preargs
- if extra_postargs:
- ld_args.extend(extra_postargs)
- self.mkpath(os.path.dirname(output_filename))
- try:
- if target_desc == CCompiler.EXECUTABLE:
- linker = self.linker_exe[:]
- else:
- linker = self.linker_so[:]
- if target_lang == "c++" and self.compiler_cxx:
- # skip over environment variable settings if /usr/bin/env
- # is used to set up the linker's environment.
- # This is needed on OSX. Note: this assumes that the
- # normal and C++ compiler have the same environment
- # settings.
- i = 0
- if os.path.basename(linker[0]) == "env":
- i = 1
- while '=' in linker[i]:
- i = i + 1
-
- linker[i] = self.compiler_cxx[i]
-
- if sys.platform == 'darwin':
- linker = _darwin_compiler_fixup(linker, ld_args)
-
- self.spawn(linker + ld_args)
- except DistutilsExecError, msg:
- raise LinkError, msg
- else:
- log.debug("skipping %s (up-to-date)", output_filename)
-
- # -- Miscellaneous methods -----------------------------------------
- # These are all used by the 'gen_lib_options() function, in
- # ccompiler.py.
-
- def library_dir_option(self, dir):
- return "-L" + dir
-
- def runtime_library_dir_option(self, dir):
- # XXX Hackish, at the very least. See Python bug #445902:
- # http://sourceforge.net/tracker/index.php
- # ?func=detail&aid=445902&group_id=5470&atid=105470
- # Linkers on different platforms need different options to
- # specify that directories need to be added to the list of
- # directories searched for dependencies when a dynamic library
- # is sought. GCC has to be told to pass the -R option through
- # to the linker, whereas other compilers just know this.
- # Other compilers may need something slightly different. At
- # this time, there's no way to determine this information from
- # the configuration data stored in the Python installation, so
- # we use this hack.
- compiler = os.path.basename(sysconfig.get_config_var("CC"))
- if sys.platform[:6] == "darwin":
- # MacOSX's linker doesn't understand the -R flag at all
- return "-L" + dir
- elif sys.platform[:5] == "hp-ux":
- return "+s -L" + dir
- elif sys.platform[:7] == "irix646" or sys.platform[:6] == "osf1V5":
- return ["-rpath", dir]
- elif compiler[:3] == "gcc" or compiler[:3] == "g++":
- return "-Wl,-R" + dir
- else:
- return "-R" + dir
-
- def library_option(self, lib):
- return "-l" + lib
-
- def find_library_file(self, dirs, lib, debug=0):
- shared_f = self.library_filename(lib, lib_type='shared')
- dylib_f = self.library_filename(lib, lib_type='dylib')
- static_f = self.library_filename(lib, lib_type='static')
-
- for dir in dirs:
- shared = os.path.join(dir, shared_f)
- dylib = os.path.join(dir, dylib_f)
- static = os.path.join(dir, static_f)
- # We're second-guessing the linker here, with not much hard
- # data to go on: GCC seems to prefer the shared library, so I'm
- # assuming that *all* Unix C compilers do. And of course I'm
- # ignoring even GCC's "-static" option. So sue me.
- if os.path.exists(dylib):
- return dylib
- elif os.path.exists(shared):
- return shared
- elif os.path.exists(static):
- return static
-
- # Oops, didn't find it in *any* of 'dirs'
- return None
diff --git a/sys/lib/python/distutils/util.py b/sys/lib/python/distutils/util.py
deleted file mode 100644
index 7db5fec49..000000000
--- a/sys/lib/python/distutils/util.py
+++ /dev/null
@@ -1,513 +0,0 @@
-"""distutils.util
-
-Miscellaneous utility functions -- anything that doesn't fit into
-one of the other *util.py modules.
-"""
-
-__revision__ = "$Id: util.py 46157 2006-05-23 21:54:23Z tim.peters $"
-
-import sys, os, string, re
-from distutils.errors import DistutilsPlatformError
-from distutils.dep_util import newer
-from distutils.spawn import spawn
-from distutils import log
-
-def get_platform ():
- """Return a string that identifies the current platform. This is used
- mainly to distinguish platform-specific build directories and
- platform-specific built distributions. Typically includes the OS name
- and version and the architecture (as supplied by 'os.uname()'),
- although the exact information included depends on the OS; eg. for IRIX
- the architecture isn't particularly important (IRIX only runs on SGI
- hardware), but for Linux the kernel version isn't particularly
- important.
-
- Examples of returned values:
- linux-i586
- linux-alpha (?)
- solaris-2.6-sun4u
- irix-5.3
- irix64-6.2
-
- For non-POSIX platforms, currently just returns 'sys.platform'.
- """
- if os.name != "posix" or not hasattr(os, 'uname'):
- # XXX what about the architecture? NT is Intel or Alpha,
- # Mac OS is M68k or PPC, etc.
- return sys.platform
-
- # Try to distinguish various flavours of Unix
-
- (osname, host, release, version, machine) = os.uname()
-
- # Convert the OS name to lowercase, remove '/' characters
- # (to accommodate BSD/OS), and translate spaces (for "Power Macintosh")
- osname = string.lower(osname)
- osname = string.replace(osname, '/', '')
- machine = string.replace(machine, ' ', '_')
- machine = string.replace(machine, '/', '-')
-
- if osname[:5] == "linux":
- # At least on Linux/Intel, 'machine' is the processor --
- # i386, etc.
- # XXX what about Alpha, SPARC, etc?
- return "%s-%s" % (osname, machine)
- elif osname[:5] == "sunos":
- if release[0] >= "5": # SunOS 5 == Solaris 2
- osname = "solaris"
- release = "%d.%s" % (int(release[0]) - 3, release[2:])
- # fall through to standard osname-release-machine representation
- elif osname[:4] == "irix": # could be "irix64"!
- return "%s-%s" % (osname, release)
- elif osname[:3] == "aix":
- return "%s-%s.%s" % (osname, version, release)
- elif osname[:6] == "cygwin":
- osname = "cygwin"
- rel_re = re.compile (r'[\d.]+')
- m = rel_re.match(release)
- if m:
- release = m.group()
- elif osname[:6] == "darwin":
- #
- # For our purposes, we'll assume that the system version from
- # distutils' perspective is what MACOSX_DEPLOYMENT_TARGET is set
- # to. This makes the compatibility story a bit more sane because the
- # machine is going to compile and link as if it were
- # MACOSX_DEPLOYMENT_TARGET.
- from distutils.sysconfig import get_config_vars
- cfgvars = get_config_vars()
-
- macver = os.environ.get('MACOSX_DEPLOYMENT_TARGET')
- if not macver:
- macver = cfgvars.get('MACOSX_DEPLOYMENT_TARGET')
-
- if not macver:
- # Get the system version. Reading this plist is a documented
- # way to get the system version (see the documentation for
- # the Gestalt Manager)
- try:
- f = open('/System/Library/CoreServices/SystemVersion.plist')
- except IOError:
- # We're on a plain darwin box, fall back to the default
- # behaviour.
- pass
- else:
- m = re.search(
- r'<key>ProductUserVisibleVersion</key>\s*' +
- r'<string>(.*?)</string>', f.read())
- f.close()
- if m is not None:
- macver = '.'.join(m.group(1).split('.')[:2])
- # else: fall back to the default behaviour
-
- if macver:
- from distutils.sysconfig import get_config_vars
- release = macver
- osname = "macosx"
-
-
- if (release + '.') < '10.4.' and \
- get_config_vars().get('UNIVERSALSDK', '').strip():
- # The universal build will build fat binaries, but not on
- # systems before 10.4
- machine = 'fat'
-
- elif machine in ('PowerPC', 'Power_Macintosh'):
- # Pick a sane name for the PPC architecture.
- machine = 'ppc'
-
- return "%s-%s-%s" % (osname, release, machine)
-
-# get_platform ()
-
-
-def convert_path (pathname):
- """Return 'pathname' as a name that will work on the native filesystem,
- i.e. split it on '/' and put it back together again using the current
- directory separator. Needed because filenames in the setup script are
- always supplied in Unix style, and have to be converted to the local
- convention before we can actually use them in the filesystem. Raises
- ValueError on non-Unix-ish systems if 'pathname' either starts or
- ends with a slash.
- """
- if os.sep == '/':
- return pathname
- if not pathname:
- return pathname
- if pathname[0] == '/':
- raise ValueError, "path '%s' cannot be absolute" % pathname
- if pathname[-1] == '/':
- raise ValueError, "path '%s' cannot end with '/'" % pathname
-
- paths = string.split(pathname, '/')
- while '.' in paths:
- paths.remove('.')
- if not paths:
- return os.curdir
- return apply(os.path.join, paths)
-
-# convert_path ()
-
-
-def change_root (new_root, pathname):
- """Return 'pathname' with 'new_root' prepended. If 'pathname' is
- relative, this is equivalent to "os.path.join(new_root,pathname)".
- Otherwise, it requires making 'pathname' relative and then joining the
- two, which is tricky on DOS/Windows and Mac OS.
- """
- if os.name == 'posix':
- if not os.path.isabs(pathname):
- return os.path.join(new_root, pathname)
- else:
- return os.path.join(new_root, pathname[1:])
-
- elif os.name == 'nt':
- (drive, path) = os.path.splitdrive(pathname)
- if path[0] == '\\':
- path = path[1:]
- return os.path.join(new_root, path)
-
- elif os.name == 'os2':
- (drive, path) = os.path.splitdrive(pathname)
- if path[0] == os.sep:
- path = path[1:]
- return os.path.join(new_root, path)
-
- elif os.name == 'mac':
- if not os.path.isabs(pathname):
- return os.path.join(new_root, pathname)
- else:
- # Chop off volume name from start of path
- elements = string.split(pathname, ":", 1)
- pathname = ":" + elements[1]
- return os.path.join(new_root, pathname)
-
- else:
- raise DistutilsPlatformError, \
- "nothing known about platform '%s'" % os.name
-
-
-_environ_checked = 0
-def check_environ ():
- """Ensure that 'os.environ' has all the environment variables we
- guarantee that users can use in config files, command-line options,
- etc. Currently this includes:
- HOME - user's home directory (Unix only)
- PLAT - description of the current platform, including hardware
- and OS (see 'get_platform()')
- """
- global _environ_checked
- if _environ_checked:
- return
-
- if os.name == 'posix' and not os.environ.has_key('HOME'):
- import pwd
- os.environ['HOME'] = pwd.getpwuid(os.getuid())[5]
-
- if not os.environ.has_key('PLAT'):
- os.environ['PLAT'] = get_platform()
-
- _environ_checked = 1
-
-
-def subst_vars (s, local_vars):
- """Perform shell/Perl-style variable substitution on 'string'. Every
- occurrence of '$' followed by a name is considered a variable, and
- variable is substituted by the value found in the 'local_vars'
- dictionary, or in 'os.environ' if it's not in 'local_vars'.
- 'os.environ' is first checked/augmented to guarantee that it contains
- certain values: see 'check_environ()'. Raise ValueError for any
- variables not found in either 'local_vars' or 'os.environ'.
- """
- check_environ()
- def _subst (match, local_vars=local_vars):
- var_name = match.group(1)
- if local_vars.has_key(var_name):
- return str(local_vars[var_name])
- else:
- return os.environ[var_name]
-
- try:
- return re.sub(r'\$([a-zA-Z_][a-zA-Z_0-9]*)', _subst, s)
- except KeyError, var:
- raise ValueError, "invalid variable '$%s'" % var
-
-# subst_vars ()
-
-
-def grok_environment_error (exc, prefix="error: "):
- """Generate a useful error message from an EnvironmentError (IOError or
- OSError) exception object. Handles Python 1.5.1 and 1.5.2 styles, and
- does what it can to deal with exception objects that don't have a
- filename (which happens when the error is due to a two-file operation,
- such as 'rename()' or 'link()'. Returns the error message as a string
- prefixed with 'prefix'.
- """
- # check for Python 1.5.2-style {IO,OS}Error exception objects
- if hasattr(exc, 'filename') and hasattr(exc, 'strerror'):
- if exc.filename:
- error = prefix + "%s: %s" % (exc.filename, exc.strerror)
- else:
- # two-argument functions in posix module don't
- # include the filename in the exception object!
- error = prefix + "%s" % exc.strerror
- else:
- error = prefix + str(exc[-1])
-
- return error
-
-
-# Needed by 'split_quoted()'
-_wordchars_re = _squote_re = _dquote_re = None
-def _init_regex():
- global _wordchars_re, _squote_re, _dquote_re
- _wordchars_re = re.compile(r'[^\\\'\"%s ]*' % string.whitespace)
- _squote_re = re.compile(r"'(?:[^'\\]|\\.)*'")
- _dquote_re = re.compile(r'"(?:[^"\\]|\\.)*"')
-
-def split_quoted (s):
- """Split a string up according to Unix shell-like rules for quotes and
- backslashes. In short: words are delimited by spaces, as long as those
- spaces are not escaped by a backslash, or inside a quoted string.
- Single and double quotes are equivalent, and the quote characters can
- be backslash-escaped. The backslash is stripped from any two-character
- escape sequence, leaving only the escaped character. The quote
- characters are stripped from any quoted string. Returns a list of
- words.
- """
-
- # This is a nice algorithm for splitting up a single string, since it
- # doesn't require character-by-character examination. It was a little
- # bit of a brain-bender to get it working right, though...
- if _wordchars_re is None: _init_regex()
-
- s = string.strip(s)
- words = []
- pos = 0
-
- while s:
- m = _wordchars_re.match(s, pos)
- end = m.end()
- if end == len(s):
- words.append(s[:end])
- break
-
- if s[end] in string.whitespace: # unescaped, unquoted whitespace: now
- words.append(s[:end]) # we definitely have a word delimiter
- s = string.lstrip(s[end:])
- pos = 0
-
- elif s[end] == '\\': # preserve whatever is being escaped;
- # will become part of the current word
- s = s[:end] + s[end+1:]
- pos = end+1
-
- else:
- if s[end] == "'": # slurp singly-quoted string
- m = _squote_re.match(s, end)
- elif s[end] == '"': # slurp doubly-quoted string
- m = _dquote_re.match(s, end)
- else:
- raise RuntimeError, \
- "this can't happen (bad char '%c')" % s[end]
-
- if m is None:
- raise ValueError, \
- "bad string (mismatched %s quotes?)" % s[end]
-
- (beg, end) = m.span()
- s = s[:beg] + s[beg+1:end-1] + s[end:]
- pos = m.end() - 2
-
- if pos >= len(s):
- words.append(s)
- break
-
- return words
-
-# split_quoted ()
-
-
-def execute (func, args, msg=None, verbose=0, dry_run=0):
- """Perform some action that affects the outside world (eg. by
- writing to the filesystem). Such actions are special because they
- are disabled by the 'dry_run' flag. This method takes care of all
- that bureaucracy for you; all you have to do is supply the
- function to call and an argument tuple for it (to embody the
- "external action" being performed), and an optional message to
- print.
- """
- if msg is None:
- msg = "%s%r" % (func.__name__, args)
- if msg[-2:] == ',)': # correct for singleton tuple
- msg = msg[0:-2] + ')'
-
- log.info(msg)
- if not dry_run:
- apply(func, args)
-
-
-def strtobool (val):
- """Convert a string representation of truth to true (1) or false (0).
-
- True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values
- are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if
- 'val' is anything else.
- """
- val = string.lower(val)
- if val in ('y', 'yes', 't', 'true', 'on', '1'):
- return 1
- elif val in ('n', 'no', 'f', 'false', 'off', '0'):
- return 0
- else:
- raise ValueError, "invalid truth value %r" % (val,)
-
-
-def byte_compile (py_files,
- optimize=0, force=0,
- prefix=None, base_dir=None,
- verbose=1, dry_run=0,
- direct=None):
- """Byte-compile a collection of Python source files to either .pyc
- or .pyo files in the same directory. 'py_files' is a list of files
- to compile; any files that don't end in ".py" are silently skipped.
- 'optimize' must be one of the following:
- 0 - don't optimize (generate .pyc)
- 1 - normal optimization (like "python -O")
- 2 - extra optimization (like "python -OO")
- If 'force' is true, all files are recompiled regardless of
- timestamps.
-
- The source filename encoded in each bytecode file defaults to the
- filenames listed in 'py_files'; you can modify these with 'prefix' and
- 'basedir'. 'prefix' is a string that will be stripped off of each
- source filename, and 'base_dir' is a directory name that will be
- prepended (after 'prefix' is stripped). You can supply either or both
- (or neither) of 'prefix' and 'base_dir', as you wish.
-
- If 'dry_run' is true, doesn't actually do anything that would
- affect the filesystem.
-
- Byte-compilation is either done directly in this interpreter process
- with the standard py_compile module, or indirectly by writing a
- temporary script and executing it. Normally, you should let
- 'byte_compile()' figure out to use direct compilation or not (see
- the source for details). The 'direct' flag is used by the script
- generated in indirect mode; unless you know what you're doing, leave
- it set to None.
- """
-
- # First, if the caller didn't force us into direct or indirect mode,
- # figure out which mode we should be in. We take a conservative
- # approach: choose direct mode *only* if the current interpreter is
- # in debug mode and optimize is 0. If we're not in debug mode (-O
- # or -OO), we don't know which level of optimization this
- # interpreter is running with, so we can't do direct
- # byte-compilation and be certain that it's the right thing. Thus,
- # always compile indirectly if the current interpreter is in either
- # optimize mode, or if either optimization level was requested by
- # the caller.
- if direct is None:
- direct = (__debug__ and optimize == 0)
-
- # "Indirect" byte-compilation: write a temporary script and then
- # run it with the appropriate flags.
- if not direct:
- try:
- from tempfile import mkstemp
- (script_fd, script_name) = mkstemp(".py")
- except ImportError:
- from tempfile import mktemp
- (script_fd, script_name) = None, mktemp(".py")
- log.info("writing byte-compilation script '%s'", script_name)
- if not dry_run:
- if script_fd is not None:
- script = os.fdopen(script_fd, "w")
- else:
- script = open(script_name, "w")
-
- script.write("""\
-from distutils.util import byte_compile
-files = [
-""")
-
- # XXX would be nice to write absolute filenames, just for
- # safety's sake (script should be more robust in the face of
- # chdir'ing before running it). But this requires abspath'ing
- # 'prefix' as well, and that breaks the hack in build_lib's
- # 'byte_compile()' method that carefully tacks on a trailing
- # slash (os.sep really) to make sure the prefix here is "just
- # right". This whole prefix business is rather delicate -- the
- # problem is that it's really a directory, but I'm treating it
- # as a dumb string, so trailing slashes and so forth matter.
-
- #py_files = map(os.path.abspath, py_files)
- #if prefix:
- # prefix = os.path.abspath(prefix)
-
- script.write(string.join(map(repr, py_files), ",\n") + "]\n")
- script.write("""
-byte_compile(files, optimize=%r, force=%r,
- prefix=%r, base_dir=%r,
- verbose=%r, dry_run=0,
- direct=1)
-""" % (optimize, force, prefix, base_dir, verbose))
-
- script.close()
-
- cmd = [sys.executable, script_name]
- if optimize == 1:
- cmd.insert(1, "-O")
- elif optimize == 2:
- cmd.insert(1, "-OO")
- spawn(cmd, dry_run=dry_run)
- execute(os.remove, (script_name,), "removing %s" % script_name,
- dry_run=dry_run)
-
- # "Direct" byte-compilation: use the py_compile module to compile
- # right here, right now. Note that the script generated in indirect
- # mode simply calls 'byte_compile()' in direct mode, a weird sort of
- # cross-process recursion. Hey, it works!
- else:
- from py_compile import compile
-
- for file in py_files:
- if file[-3:] != ".py":
- # This lets us be lazy and not filter filenames in
- # the "install_lib" command.
- continue
-
- # Terminology from the py_compile module:
- # cfile - byte-compiled file
- # dfile - purported source filename (same as 'file' by default)
- cfile = file + (__debug__ and "c" or "o")
- dfile = file
- if prefix:
- if file[:len(prefix)] != prefix:
- raise ValueError, \
- ("invalid prefix: filename %r doesn't start with %r"
- % (file, prefix))
- dfile = dfile[len(prefix):]
- if base_dir:
- dfile = os.path.join(base_dir, dfile)
-
- cfile_base = os.path.basename(cfile)
- if direct:
- if force or newer(file, cfile):
- log.info("byte-compiling %s to %s", file, cfile_base)
- if not dry_run:
- compile(file, cfile, dfile)
- else:
- log.debug("skipping byte-compilation of %s to %s",
- file, cfile_base)
-
-# byte_compile ()
-
-def rfc822_escape (header):
- """Return a version of the string escaped for inclusion in an
- RFC-822 header, by ensuring there are 8 spaces space after each newline.
- """
- lines = string.split(header, '\n')
- lines = map(string.strip, lines)
- header = string.join(lines, '\n' + 8*' ')
- return header
diff --git a/sys/lib/python/distutils/version.py b/sys/lib/python/distutils/version.py
deleted file mode 100644
index 7689097fe..000000000
--- a/sys/lib/python/distutils/version.py
+++ /dev/null
@@ -1,299 +0,0 @@
-#
-# distutils/version.py
-#
-# Implements multiple version numbering conventions for the
-# Python Module Distribution Utilities.
-#
-# $Id: version.py 29687 2002-11-14 02:25:42Z akuchling $
-#
-
-"""Provides classes to represent module version numbers (one class for
-each style of version numbering). There are currently two such classes
-implemented: StrictVersion and LooseVersion.
-
-Every version number class implements the following interface:
- * the 'parse' method takes a string and parses it to some internal
- representation; if the string is an invalid version number,
- 'parse' raises a ValueError exception
- * the class constructor takes an optional string argument which,
- if supplied, is passed to 'parse'
- * __str__ reconstructs the string that was passed to 'parse' (or
- an equivalent string -- ie. one that will generate an equivalent
- version number instance)
- * __repr__ generates Python code to recreate the version number instance
- * __cmp__ compares the current instance with either another instance
- of the same class or a string (which will be parsed to an instance
- of the same class, thus must follow the same rules)
-"""
-
-import string, re
-from types import StringType
-
-class Version:
- """Abstract base class for version numbering classes. Just provides
- constructor (__init__) and reproducer (__repr__), because those
- seem to be the same for all version numbering classes.
- """
-
- def __init__ (self, vstring=None):
- if vstring:
- self.parse(vstring)
-
- def __repr__ (self):
- return "%s ('%s')" % (self.__class__.__name__, str(self))
-
-
-# Interface for version-number classes -- must be implemented
-# by the following classes (the concrete ones -- Version should
-# be treated as an abstract class).
-# __init__ (string) - create and take same action as 'parse'
-# (string parameter is optional)
-# parse (string) - convert a string representation to whatever
-# internal representation is appropriate for
-# this style of version numbering
-# __str__ (self) - convert back to a string; should be very similar
-# (if not identical to) the string supplied to parse
-# __repr__ (self) - generate Python code to recreate
-# the instance
-# __cmp__ (self, other) - compare two version numbers ('other' may
-# be an unparsed version string, or another
-# instance of your version class)
-
-
-class StrictVersion (Version):
-
- """Version numbering for anal retentives and software idealists.
- Implements the standard interface for version number classes as
- described above. A version number consists of two or three
- dot-separated numeric components, with an optional "pre-release" tag
- on the end. The pre-release tag consists of the letter 'a' or 'b'
- followed by a number. If the numeric components of two version
- numbers are equal, then one with a pre-release tag will always
- be deemed earlier (lesser) than one without.
-
- The following are valid version numbers (shown in the order that
- would be obtained by sorting according to the supplied cmp function):
-
- 0.4 0.4.0 (these two are equivalent)
- 0.4.1
- 0.5a1
- 0.5b3
- 0.5
- 0.9.6
- 1.0
- 1.0.4a3
- 1.0.4b1
- 1.0.4
-
- The following are examples of invalid version numbers:
-
- 1
- 2.7.2.2
- 1.3.a4
- 1.3pl1
- 1.3c4
-
- The rationale for this version numbering system will be explained
- in the distutils documentation.
- """
-
- version_re = re.compile(r'^(\d+) \. (\d+) (\. (\d+))? ([ab](\d+))?$',
- re.VERBOSE)
-
-
- def parse (self, vstring):
- match = self.version_re.match(vstring)
- if not match:
- raise ValueError, "invalid version number '%s'" % vstring
-
- (major, minor, patch, prerelease, prerelease_num) = \
- match.group(1, 2, 4, 5, 6)
-
- if patch:
- self.version = tuple(map(string.atoi, [major, minor, patch]))
- else:
- self.version = tuple(map(string.atoi, [major, minor]) + [0])
-
- if prerelease:
- self.prerelease = (prerelease[0], string.atoi(prerelease_num))
- else:
- self.prerelease = None
-
-
- def __str__ (self):
-
- if self.version[2] == 0:
- vstring = string.join(map(str, self.version[0:2]), '.')
- else:
- vstring = string.join(map(str, self.version), '.')
-
- if self.prerelease:
- vstring = vstring + self.prerelease[0] + str(self.prerelease[1])
-
- return vstring
-
-
- def __cmp__ (self, other):
- if isinstance(other, StringType):
- other = StrictVersion(other)
-
- compare = cmp(self.version, other.version)
- if (compare == 0): # have to compare prerelease
-
- # case 1: neither has prerelease; they're equal
- # case 2: self has prerelease, other doesn't; other is greater
- # case 3: self doesn't have prerelease, other does: self is greater
- # case 4: both have prerelease: must compare them!
-
- if (not self.prerelease and not other.prerelease):
- return 0
- elif (self.prerelease and not other.prerelease):
- return -1
- elif (not self.prerelease and other.prerelease):
- return 1
- elif (self.prerelease and other.prerelease):
- return cmp(self.prerelease, other.prerelease)
-
- else: # numeric versions don't match --
- return compare # prerelease stuff doesn't matter
-
-
-# end class StrictVersion
-
-
-# The rules according to Greg Stein:
-# 1) a version number has 1 or more numbers separate by a period or by
-# sequences of letters. If only periods, then these are compared
-# left-to-right to determine an ordering.
-# 2) sequences of letters are part of the tuple for comparison and are
-# compared lexicographically
-# 3) recognize the numeric components may have leading zeroes
-#
-# The LooseVersion class below implements these rules: a version number
-# string is split up into a tuple of integer and string components, and
-# comparison is a simple tuple comparison. This means that version
-# numbers behave in a predictable and obvious way, but a way that might
-# not necessarily be how people *want* version numbers to behave. There
-# wouldn't be a problem if people could stick to purely numeric version
-# numbers: just split on period and compare the numbers as tuples.
-# However, people insist on putting letters into their version numbers;
-# the most common purpose seems to be:
-# - indicating a "pre-release" version
-# ('alpha', 'beta', 'a', 'b', 'pre', 'p')
-# - indicating a post-release patch ('p', 'pl', 'patch')
-# but of course this can't cover all version number schemes, and there's
-# no way to know what a programmer means without asking him.
-#
-# The problem is what to do with letters (and other non-numeric
-# characters) in a version number. The current implementation does the
-# obvious and predictable thing: keep them as strings and compare
-# lexically within a tuple comparison. This has the desired effect if
-# an appended letter sequence implies something "post-release":
-# eg. "0.99" < "0.99pl14" < "1.0", and "5.001" < "5.001m" < "5.002".
-#
-# However, if letters in a version number imply a pre-release version,
-# the "obvious" thing isn't correct. Eg. you would expect that
-# "1.5.1" < "1.5.2a2" < "1.5.2", but under the tuple/lexical comparison
-# implemented here, this just isn't so.
-#
-# Two possible solutions come to mind. The first is to tie the
-# comparison algorithm to a particular set of semantic rules, as has
-# been done in the StrictVersion class above. This works great as long
-# as everyone can go along with bondage and discipline. Hopefully a
-# (large) subset of Python module programmers will agree that the
-# particular flavour of bondage and discipline provided by StrictVersion
-# provides enough benefit to be worth using, and will submit their
-# version numbering scheme to its domination. The free-thinking
-# anarchists in the lot will never give in, though, and something needs
-# to be done to accommodate them.
-#
-# Perhaps a "moderately strict" version class could be implemented that
-# lets almost anything slide (syntactically), and makes some heuristic
-# assumptions about non-digits in version number strings. This could
-# sink into special-case-hell, though; if I was as talented and
-# idiosyncratic as Larry Wall, I'd go ahead and implement a class that
-# somehow knows that "1.2.1" < "1.2.2a2" < "1.2.2" < "1.2.2pl3", and is
-# just as happy dealing with things like "2g6" and "1.13++". I don't
-# think I'm smart enough to do it right though.
-#
-# In any case, I've coded the test suite for this module (see
-# ../test/test_version.py) specifically to fail on things like comparing
-# "1.2a2" and "1.2". That's not because the *code* is doing anything
-# wrong, it's because the simple, obvious design doesn't match my
-# complicated, hairy expectations for real-world version numbers. It
-# would be a snap to fix the test suite to say, "Yep, LooseVersion does
-# the Right Thing" (ie. the code matches the conception). But I'd rather
-# have a conception that matches common notions about version numbers.
-
-class LooseVersion (Version):
-
- """Version numbering for anarchists and software realists.
- Implements the standard interface for version number classes as
- described above. A version number consists of a series of numbers,
- separated by either periods or strings of letters. When comparing
- version numbers, the numeric components will be compared
- numerically, and the alphabetic components lexically. The following
- are all valid version numbers, in no particular order:
-
- 1.5.1
- 1.5.2b2
- 161
- 3.10a
- 8.02
- 3.4j
- 1996.07.12
- 3.2.pl0
- 3.1.1.6
- 2g6
- 11g
- 0.960923
- 2.2beta29
- 1.13++
- 5.5.kw
- 2.0b1pl0
-
- In fact, there is no such thing as an invalid version number under
- this scheme; the rules for comparison are simple and predictable,
- but may not always give the results you want (for some definition
- of "want").
- """
-
- component_re = re.compile(r'(\d+ | [a-z]+ | \.)', re.VERBOSE)
-
- def __init__ (self, vstring=None):
- if vstring:
- self.parse(vstring)
-
-
- def parse (self, vstring):
- # I've given up on thinking I can reconstruct the version string
- # from the parsed tuple -- so I just store the string here for
- # use by __str__
- self.vstring = vstring
- components = filter(lambda x: x and x != '.',
- self.component_re.split(vstring))
- for i in range(len(components)):
- try:
- components[i] = int(components[i])
- except ValueError:
- pass
-
- self.version = components
-
-
- def __str__ (self):
- return self.vstring
-
-
- def __repr__ (self):
- return "LooseVersion ('%s')" % str(self)
-
-
- def __cmp__ (self, other):
- if isinstance(other, StringType):
- other = LooseVersion(other)
-
- return cmp(self.version, other.version)
-
-
-# end class LooseVersion
diff --git a/sys/lib/python/distutils/versionpredicate.py b/sys/lib/python/distutils/versionpredicate.py
deleted file mode 100644
index ba8b6c021..000000000
--- a/sys/lib/python/distutils/versionpredicate.py
+++ /dev/null
@@ -1,164 +0,0 @@
-"""Module for parsing and testing package version predicate strings.
-"""
-import re
-import distutils.version
-import operator
-
-
-re_validPackage = re.compile(r"(?i)^\s*([a-z_]\w*(?:\.[a-z_]\w*)*)(.*)")
-# (package) (rest)
-
-re_paren = re.compile(r"^\s*\((.*)\)\s*$") # (list) inside of parentheses
-re_splitComparison = re.compile(r"^\s*(<=|>=|<|>|!=|==)\s*([^\s,]+)\s*$")
-# (comp) (version)
-
-
-def splitUp(pred):
- """Parse a single version comparison.
-
- Return (comparison string, StrictVersion)
- """
- res = re_splitComparison.match(pred)
- if not res:
- raise ValueError("bad package restriction syntax: %r" % pred)
- comp, verStr = res.groups()
- return (comp, distutils.version.StrictVersion(verStr))
-
-compmap = {"<": operator.lt, "<=": operator.le, "==": operator.eq,
- ">": operator.gt, ">=": operator.ge, "!=": operator.ne}
-
-class VersionPredicate:
- """Parse and test package version predicates.
-
- >>> v = VersionPredicate('pyepat.abc (>1.0, <3333.3a1, !=1555.1b3)')
-
- The `name` attribute provides the full dotted name that is given::
-
- >>> v.name
- 'pyepat.abc'
-
- The str() of a `VersionPredicate` provides a normalized
- human-readable version of the expression::
-
- >>> print v
- pyepat.abc (> 1.0, < 3333.3a1, != 1555.1b3)
-
- The `satisfied_by()` method can be used to determine with a given
- version number is included in the set described by the version
- restrictions::
-
- >>> v.satisfied_by('1.1')
- True
- >>> v.satisfied_by('1.4')
- True
- >>> v.satisfied_by('1.0')
- False
- >>> v.satisfied_by('4444.4')
- False
- >>> v.satisfied_by('1555.1b3')
- False
-
- `VersionPredicate` is flexible in accepting extra whitespace::
-
- >>> v = VersionPredicate(' pat( == 0.1 ) ')
- >>> v.name
- 'pat'
- >>> v.satisfied_by('0.1')
- True
- >>> v.satisfied_by('0.2')
- False
-
- If any version numbers passed in do not conform to the
- restrictions of `StrictVersion`, a `ValueError` is raised::
-
- >>> v = VersionPredicate('p1.p2.p3.p4(>=1.0, <=1.3a1, !=1.2zb3)')
- Traceback (most recent call last):
- ...
- ValueError: invalid version number '1.2zb3'
-
- It the module or package name given does not conform to what's
- allowed as a legal module or package name, `ValueError` is
- raised::
-
- >>> v = VersionPredicate('foo-bar')
- Traceback (most recent call last):
- ...
- ValueError: expected parenthesized list: '-bar'
-
- >>> v = VersionPredicate('foo bar (12.21)')
- Traceback (most recent call last):
- ...
- ValueError: expected parenthesized list: 'bar (12.21)'
-
- """
-
- def __init__(self, versionPredicateStr):
- """Parse a version predicate string.
- """
- # Fields:
- # name: package name
- # pred: list of (comparison string, StrictVersion)
-
- versionPredicateStr = versionPredicateStr.strip()
- if not versionPredicateStr:
- raise ValueError("empty package restriction")
- match = re_validPackage.match(versionPredicateStr)
- if not match:
- raise ValueError("bad package name in %r" % versionPredicateStr)
- self.name, paren = match.groups()
- paren = paren.strip()
- if paren:
- match = re_paren.match(paren)
- if not match:
- raise ValueError("expected parenthesized list: %r" % paren)
- str = match.groups()[0]
- self.pred = [splitUp(aPred) for aPred in str.split(",")]
- if not self.pred:
- raise ValueError("empty parenthesized list in %r"
- % versionPredicateStr)
- else:
- self.pred = []
-
- def __str__(self):
- if self.pred:
- seq = [cond + " " + str(ver) for cond, ver in self.pred]
- return self.name + " (" + ", ".join(seq) + ")"
- else:
- return self.name
-
- def satisfied_by(self, version):
- """True if version is compatible with all the predicates in self.
- The parameter version must be acceptable to the StrictVersion
- constructor. It may be either a string or StrictVersion.
- """
- for cond, ver in self.pred:
- if not compmap[cond](version, ver):
- return False
- return True
-
-
-_provision_rx = None
-
-def split_provision(value):
- """Return the name and optional version number of a provision.
-
- The version number, if given, will be returned as a `StrictVersion`
- instance, otherwise it will be `None`.
-
- >>> split_provision('mypkg')
- ('mypkg', None)
- >>> split_provision(' mypkg( 1.2 ) ')
- ('mypkg', StrictVersion ('1.2'))
- """
- global _provision_rx
- if _provision_rx is None:
- _provision_rx = re.compile(
- "([a-zA-Z_]\w*(?:\.[a-zA-Z_]\w*)*)(?:\s*\(\s*([^)\s]+)\s*\))?$")
- value = value.strip()
- m = _provision_rx.match(value)
- if not m:
- raise ValueError("illegal provides specification: %r" % value)
- ver = m.group(2) or None
- if ver:
- ver = distutils.version.StrictVersion(ver)
- return m.group(1), ver
diff --git a/sys/lib/python/doctest.py b/sys/lib/python/doctest.py
deleted file mode 100644
index 32d076aa7..000000000
--- a/sys/lib/python/doctest.py
+++ /dev/null
@@ -1,2637 +0,0 @@
-# Module doctest.
-# Released to the public domain 16-Jan-2001, by Tim Peters (tim@python.org).
-# Major enhancements and refactoring by:
-# Jim Fulton
-# Edward Loper
-
-# Provided as-is; use at your own risk; no warranty; no promises; enjoy!
-
-r"""Module doctest -- a framework for running examples in docstrings.
-
-In simplest use, end each module M to be tested with:
-
-def _test():
- import doctest
- doctest.testmod()
-
-if __name__ == "__main__":
- _test()
-
-Then running the module as a script will cause the examples in the
-docstrings to get executed and verified:
-
-python M.py
-
-This won't display anything unless an example fails, in which case the
-failing example(s) and the cause(s) of the failure(s) are printed to stdout
-(why not stderr? because stderr is a lame hack <0.2 wink>), and the final
-line of output is "Test failed.".
-
-Run it with the -v switch instead:
-
-python M.py -v
-
-and a detailed report of all examples tried is printed to stdout, along
-with assorted summaries at the end.
-
-You can force verbose mode by passing "verbose=True" to testmod, or prohibit
-it by passing "verbose=False". In either of those cases, sys.argv is not
-examined by testmod.
-
-There are a variety of other ways to run doctests, including integration
-with the unittest framework, and support for running non-Python text
-files containing doctests. There are also many ways to override parts
-of doctest's default behaviors. See the Library Reference Manual for
-details.
-"""
-
-__docformat__ = 'reStructuredText en'
-
-__all__ = [
- # 0, Option Flags
- 'register_optionflag',
- 'DONT_ACCEPT_TRUE_FOR_1',
- 'DONT_ACCEPT_BLANKLINE',
- 'NORMALIZE_WHITESPACE',
- 'ELLIPSIS',
- 'SKIP',
- 'IGNORE_EXCEPTION_DETAIL',
- 'COMPARISON_FLAGS',
- 'REPORT_UDIFF',
- 'REPORT_CDIFF',
- 'REPORT_NDIFF',
- 'REPORT_ONLY_FIRST_FAILURE',
- 'REPORTING_FLAGS',
- # 1. Utility Functions
- # 2. Example & DocTest
- 'Example',
- 'DocTest',
- # 3. Doctest Parser
- 'DocTestParser',
- # 4. Doctest Finder
- 'DocTestFinder',
- # 5. Doctest Runner
- 'DocTestRunner',
- 'OutputChecker',
- 'DocTestFailure',
- 'UnexpectedException',
- 'DebugRunner',
- # 6. Test Functions
- 'testmod',
- 'testfile',
- 'run_docstring_examples',
- # 7. Tester
- 'Tester',
- # 8. Unittest Support
- 'DocTestSuite',
- 'DocFileSuite',
- 'set_unittest_reportflags',
- # 9. Debugging Support
- 'script_from_examples',
- 'testsource',
- 'debug_src',
- 'debug',
-]
-
-import __future__
-
-import sys, traceback, inspect, linecache, os, re
-import unittest, difflib, pdb, tempfile
-import warnings
-from StringIO import StringIO
-
-# There are 4 basic classes:
-# - Example: a <source, want> pair, plus an intra-docstring line number.
-# - DocTest: a collection of examples, parsed from a docstring, plus
-# info about where the docstring came from (name, filename, lineno).
-# - DocTestFinder: extracts DocTests from a given object's docstring and
-# its contained objects' docstrings.
-# - DocTestRunner: runs DocTest cases, and accumulates statistics.
-#
-# So the basic picture is:
-#
-# list of:
-# +------+ +---------+ +-------+
-# |object| --DocTestFinder-> | DocTest | --DocTestRunner-> |results|
-# +------+ +---------+ +-------+
-# | Example |
-# | ... |
-# | Example |
-# +---------+
-
-# Option constants.
-
-OPTIONFLAGS_BY_NAME = {}
-def register_optionflag(name):
- # Create a new flag unless `name` is already known.
- return OPTIONFLAGS_BY_NAME.setdefault(name, 1 << len(OPTIONFLAGS_BY_NAME))
-
-DONT_ACCEPT_TRUE_FOR_1 = register_optionflag('DONT_ACCEPT_TRUE_FOR_1')
-DONT_ACCEPT_BLANKLINE = register_optionflag('DONT_ACCEPT_BLANKLINE')
-NORMALIZE_WHITESPACE = register_optionflag('NORMALIZE_WHITESPACE')
-ELLIPSIS = register_optionflag('ELLIPSIS')
-SKIP = register_optionflag('SKIP')
-IGNORE_EXCEPTION_DETAIL = register_optionflag('IGNORE_EXCEPTION_DETAIL')
-
-COMPARISON_FLAGS = (DONT_ACCEPT_TRUE_FOR_1 |
- DONT_ACCEPT_BLANKLINE |
- NORMALIZE_WHITESPACE |
- ELLIPSIS |
- SKIP |
- IGNORE_EXCEPTION_DETAIL)
-
-REPORT_UDIFF = register_optionflag('REPORT_UDIFF')
-REPORT_CDIFF = register_optionflag('REPORT_CDIFF')
-REPORT_NDIFF = register_optionflag('REPORT_NDIFF')
-REPORT_ONLY_FIRST_FAILURE = register_optionflag('REPORT_ONLY_FIRST_FAILURE')
-
-REPORTING_FLAGS = (REPORT_UDIFF |
- REPORT_CDIFF |
- REPORT_NDIFF |
- REPORT_ONLY_FIRST_FAILURE)
-
-# Special string markers for use in `want` strings:
-BLANKLINE_MARKER = '<BLANKLINE>'
-ELLIPSIS_MARKER = '...'
-
-######################################################################
-## Table of Contents
-######################################################################
-# 1. Utility Functions
-# 2. Example & DocTest -- store test cases
-# 3. DocTest Parser -- extracts examples from strings
-# 4. DocTest Finder -- extracts test cases from objects
-# 5. DocTest Runner -- runs test cases
-# 6. Test Functions -- convenient wrappers for testing
-# 7. Tester Class -- for backwards compatibility
-# 8. Unittest Support
-# 9. Debugging Support
-# 10. Example Usage
-
-######################################################################
-## 1. Utility Functions
-######################################################################
-
-def _extract_future_flags(globs):
- """
- Return the compiler-flags associated with the future features that
- have been imported into the given namespace (globs).
- """
- flags = 0
- for fname in __future__.all_feature_names:
- feature = globs.get(fname, None)
- if feature is getattr(__future__, fname):
- flags |= feature.compiler_flag
- return flags
-
-def _normalize_module(module, depth=2):
- """
- Return the module specified by `module`. In particular:
- - If `module` is a module, then return module.
- - If `module` is a string, then import and return the
- module with that name.
- - If `module` is None, then return the calling module.
- The calling module is assumed to be the module of
- the stack frame at the given depth in the call stack.
- """
- if inspect.ismodule(module):
- return module
- elif isinstance(module, (str, unicode)):
- return __import__(module, globals(), locals(), ["*"])
- elif module is None:
- return sys.modules[sys._getframe(depth).f_globals['__name__']]
- else:
- raise TypeError("Expected a module, string, or None")
-
-def _load_testfile(filename, package, module_relative):
- if module_relative:
- package = _normalize_module(package, 3)
- filename = _module_relative_path(package, filename)
- if hasattr(package, '__loader__'):
- if hasattr(package.__loader__, 'get_data'):
- return package.__loader__.get_data(filename), filename
- return open(filename).read(), filename
-
-def _indent(s, indent=4):
- """
- Add the given number of space characters to the beginning every
- non-blank line in `s`, and return the result.
- """
- # This regexp matches the start of non-blank lines:
- return re.sub('(?m)^(?!$)', indent*' ', s)
-
-def _exception_traceback(exc_info):
- """
- Return a string containing a traceback message for the given
- exc_info tuple (as returned by sys.exc_info()).
- """
- # Get a traceback message.
- excout = StringIO()
- exc_type, exc_val, exc_tb = exc_info
- traceback.print_exception(exc_type, exc_val, exc_tb, file=excout)
- return excout.getvalue()
-
-# Override some StringIO methods.
-class _SpoofOut(StringIO):
- def getvalue(self):
- result = StringIO.getvalue(self)
- # If anything at all was written, make sure there's a trailing
- # newline. There's no way for the expected output to indicate
- # that a trailing newline is missing.
- if result and not result.endswith("\n"):
- result += "\n"
- # Prevent softspace from screwing up the next test case, in
- # case they used print with a trailing comma in an example.
- if hasattr(self, "softspace"):
- del self.softspace
- return result
-
- def truncate(self, size=None):
- StringIO.truncate(self, size)
- if hasattr(self, "softspace"):
- del self.softspace
-
-# Worst-case linear-time ellipsis matching.
-def _ellipsis_match(want, got):
- """
- Essentially the only subtle case:
- >>> _ellipsis_match('aa...aa', 'aaa')
- False
- """
- if ELLIPSIS_MARKER not in want:
- return want == got
-
- # Find "the real" strings.
- ws = want.split(ELLIPSIS_MARKER)
- assert len(ws) >= 2
-
- # Deal with exact matches possibly needed at one or both ends.
- startpos, endpos = 0, len(got)
- w = ws[0]
- if w: # starts with exact match
- if got.startswith(w):
- startpos = len(w)
- del ws[0]
- else:
- return False
- w = ws[-1]
- if w: # ends with exact match
- if got.endswith(w):
- endpos -= len(w)
- del ws[-1]
- else:
- return False
-
- if startpos > endpos:
- # Exact end matches required more characters than we have, as in
- # _ellipsis_match('aa...aa', 'aaa')
- return False
-
- # For the rest, we only need to find the leftmost non-overlapping
- # match for each piece. If there's no overall match that way alone,
- # there's no overall match period.
- for w in ws:
- # w may be '' at times, if there are consecutive ellipses, or
- # due to an ellipsis at the start or end of `want`. That's OK.
- # Search for an empty string succeeds, and doesn't change startpos.
- startpos = got.find(w, startpos, endpos)
- if startpos < 0:
- return False
- startpos += len(w)
-
- return True
-
-def _comment_line(line):
- "Return a commented form of the given line"
- line = line.rstrip()
- if line:
- return '# '+line
- else:
- return '#'
-
-class _OutputRedirectingPdb(pdb.Pdb):
- """
- A specialized version of the python debugger that redirects stdout
- to a given stream when interacting with the user. Stdout is *not*
- redirected when traced code is executed.
- """
- def __init__(self, out):
- self.__out = out
- pdb.Pdb.__init__(self, stdout=out)
-
- def trace_dispatch(self, *args):
- # Redirect stdout to the given stream.
- save_stdout = sys.stdout
- sys.stdout = self.__out
- # Call Pdb's trace dispatch method.
- try:
- return pdb.Pdb.trace_dispatch(self, *args)
- finally:
- sys.stdout = save_stdout
-
-# [XX] Normalize with respect to os.path.pardir?
-def _module_relative_path(module, path):
- if not inspect.ismodule(module):
- raise TypeError, 'Expected a module: %r' % module
- if path.startswith('/'):
- raise ValueError, 'Module-relative files may not have absolute paths'
-
- # Find the base directory for the path.
- if hasattr(module, '__file__'):
- # A normal module/package
- basedir = os.path.split(module.__file__)[0]
- elif module.__name__ == '__main__':
- # An interactive session.
- if len(sys.argv)>0 and sys.argv[0] != '':
- basedir = os.path.split(sys.argv[0])[0]
- else:
- basedir = os.curdir
- else:
- # A module w/o __file__ (this includes builtins)
- raise ValueError("Can't resolve paths relative to the module " +
- module + " (it has no __file__)")
-
- # Combine the base directory and the path.
- return os.path.join(basedir, *(path.split('/')))
-
-######################################################################
-## 2. Example & DocTest
-######################################################################
-## - An "example" is a <source, want> pair, where "source" is a
-## fragment of source code, and "want" is the expected output for
-## "source." The Example class also includes information about
-## where the example was extracted from.
-##
-## - A "doctest" is a collection of examples, typically extracted from
-## a string (such as an object's docstring). The DocTest class also
-## includes information about where the string was extracted from.
-
-class Example:
- """
- A single doctest example, consisting of source code and expected
- output. `Example` defines the following attributes:
-
- - source: A single Python statement, always ending with a newline.
- The constructor adds a newline if needed.
-
- - want: The expected output from running the source code (either
- from stdout, or a traceback in case of exception). `want` ends
- with a newline unless it's empty, in which case it's an empty
- string. The constructor adds a newline if needed.
-
- - exc_msg: The exception message generated by the example, if
- the example is expected to generate an exception; or `None` if
- it is not expected to generate an exception. This exception
- message is compared against the return value of
- `traceback.format_exception_only()`. `exc_msg` ends with a
- newline unless it's `None`. The constructor adds a newline
- if needed.
-
- - lineno: The line number within the DocTest string containing
- this Example where the Example begins. This line number is
- zero-based, with respect to the beginning of the DocTest.
-
- - indent: The example's indentation in the DocTest string.
- I.e., the number of space characters that preceed the
- example's first prompt.
-
- - options: A dictionary mapping from option flags to True or
- False, which is used to override default options for this
- example. Any option flags not contained in this dictionary
- are left at their default value (as specified by the
- DocTestRunner's optionflags). By default, no options are set.
- """
- def __init__(self, source, want, exc_msg=None, lineno=0, indent=0,
- options=None):
- # Normalize inputs.
- if not source.endswith('\n'):
- source += '\n'
- if want and not want.endswith('\n'):
- want += '\n'
- if exc_msg is not None and not exc_msg.endswith('\n'):
- exc_msg += '\n'
- # Store properties.
- self.source = source
- self.want = want
- self.lineno = lineno
- self.indent = indent
- if options is None: options = {}
- self.options = options
- self.exc_msg = exc_msg
-
-class DocTest:
- """
- A collection of doctest examples that should be run in a single
- namespace. Each `DocTest` defines the following attributes:
-
- - examples: the list of examples.
-
- - globs: The namespace (aka globals) that the examples should
- be run in.
-
- - name: A name identifying the DocTest (typically, the name of
- the object whose docstring this DocTest was extracted from).
-
- - filename: The name of the file that this DocTest was extracted
- from, or `None` if the filename is unknown.
-
- - lineno: The line number within filename where this DocTest
- begins, or `None` if the line number is unavailable. This
- line number is zero-based, with respect to the beginning of
- the file.
-
- - docstring: The string that the examples were extracted from,
- or `None` if the string is unavailable.
- """
- def __init__(self, examples, globs, name, filename, lineno, docstring):
- """
- Create a new DocTest containing the given examples. The
- DocTest's globals are initialized with a copy of `globs`.
- """
- assert not isinstance(examples, basestring), \
- "DocTest no longer accepts str; use DocTestParser instead"
- self.examples = examples
- self.docstring = docstring
- self.globs = globs.copy()
- self.name = name
- self.filename = filename
- self.lineno = lineno
-
- def __repr__(self):
- if len(self.examples) == 0:
- examples = 'no examples'
- elif len(self.examples) == 1:
- examples = '1 example'
- else:
- examples = '%d examples' % len(self.examples)
- return ('<DocTest %s from %s:%s (%s)>' %
- (self.name, self.filename, self.lineno, examples))
-
-
- # This lets us sort tests by name:
- def __cmp__(self, other):
- if not isinstance(other, DocTest):
- return -1
- return cmp((self.name, self.filename, self.lineno, id(self)),
- (other.name, other.filename, other.lineno, id(other)))
-
-######################################################################
-## 3. DocTestParser
-######################################################################
-
-class DocTestParser:
- """
- A class used to parse strings containing doctest examples.
- """
- # This regular expression is used to find doctest examples in a
- # string. It defines three groups: `source` is the source code
- # (including leading indentation and prompts); `indent` is the
- # indentation of the first (PS1) line of the source code; and
- # `want` is the expected output (including leading indentation).
- _EXAMPLE_RE = re.compile(r'''
- # Source consists of a PS1 line followed by zero or more PS2 lines.
- (?P<source>
- (?:^(?P<indent> [ ]*) >>> .*) # PS1 line
- (?:\n [ ]* \.\.\. .*)*) # PS2 lines
- \n?
- # Want consists of any non-blank lines that do not start with PS1.
- (?P<want> (?:(?![ ]*$) # Not a blank line
- (?![ ]*>>>) # Not a line starting with PS1
- .*$\n? # But any other line
- )*)
- ''', re.MULTILINE | re.VERBOSE)
-
- # A regular expression for handling `want` strings that contain
- # expected exceptions. It divides `want` into three pieces:
- # - the traceback header line (`hdr`)
- # - the traceback stack (`stack`)
- # - the exception message (`msg`), as generated by
- # traceback.format_exception_only()
- # `msg` may have multiple lines. We assume/require that the
- # exception message is the first non-indented line starting with a word
- # character following the traceback header line.
- _EXCEPTION_RE = re.compile(r"""
- # Grab the traceback header. Different versions of Python have
- # said different things on the first traceback line.
- ^(?P<hdr> Traceback\ \(
- (?: most\ recent\ call\ last
- | innermost\ last
- ) \) :
- )
- \s* $ # toss trailing whitespace on the header.
- (?P<stack> .*?) # don't blink: absorb stuff until...
- ^ (?P<msg> \w+ .*) # a line *starts* with alphanum.
- """, re.VERBOSE | re.MULTILINE | re.DOTALL)
-
- # A callable returning a true value iff its argument is a blank line
- # or contains a single comment.
- _IS_BLANK_OR_COMMENT = re.compile(r'^[ ]*(#.*)?$').match
-
- def parse(self, string, name='<string>'):
- """
- Divide the given string into examples and intervening text,
- and return them as a list of alternating Examples and strings.
- Line numbers for the Examples are 0-based. The optional
- argument `name` is a name identifying this string, and is only
- used for error messages.
- """
- string = string.expandtabs()
- # If all lines begin with the same indentation, then strip it.
- min_indent = self._min_indent(string)
- if min_indent > 0:
- string = '\n'.join([l[min_indent:] for l in string.split('\n')])
-
- output = []
- charno, lineno = 0, 0
- # Find all doctest examples in the string:
- for m in self._EXAMPLE_RE.finditer(string):
- # Add the pre-example text to `output`.
- output.append(string[charno:m.start()])
- # Update lineno (lines before this example)
- lineno += string.count('\n', charno, m.start())
- # Extract info from the regexp match.
- (source, options, want, exc_msg) = \
- self._parse_example(m, name, lineno)
- # Create an Example, and add it to the list.
- if not self._IS_BLANK_OR_COMMENT(source):
- output.append( Example(source, want, exc_msg,
- lineno=lineno,
- indent=min_indent+len(m.group('indent')),
- options=options) )
- # Update lineno (lines inside this example)
- lineno += string.count('\n', m.start(), m.end())
- # Update charno.
- charno = m.end()
- # Add any remaining post-example text to `output`.
- output.append(string[charno:])
- return output
-
- def get_doctest(self, string, globs, name, filename, lineno):
- """
- Extract all doctest examples from the given string, and
- collect them into a `DocTest` object.
-
- `globs`, `name`, `filename`, and `lineno` are attributes for
- the new `DocTest` object. See the documentation for `DocTest`
- for more information.
- """
- return DocTest(self.get_examples(string, name), globs,
- name, filename, lineno, string)
-
- def get_examples(self, string, name='<string>'):
- """
- Extract all doctest examples from the given string, and return
- them as a list of `Example` objects. Line numbers are
- 0-based, because it's most common in doctests that nothing
- interesting appears on the same line as opening triple-quote,
- and so the first interesting line is called \"line 1\" then.
-
- The optional argument `name` is a name identifying this
- string, and is only used for error messages.
- """
- return [x for x in self.parse(string, name)
- if isinstance(x, Example)]
-
- def _parse_example(self, m, name, lineno):
- """
- Given a regular expression match from `_EXAMPLE_RE` (`m`),
- return a pair `(source, want)`, where `source` is the matched
- example's source code (with prompts and indentation stripped);
- and `want` is the example's expected output (with indentation
- stripped).
-
- `name` is the string's name, and `lineno` is the line number
- where the example starts; both are used for error messages.
- """
- # Get the example's indentation level.
- indent = len(m.group('indent'))
-
- # Divide source into lines; check that they're properly
- # indented; and then strip their indentation & prompts.
- source_lines = m.group('source').split('\n')
- self._check_prompt_blank(source_lines, indent, name, lineno)
- self._check_prefix(source_lines[1:], ' '*indent + '.', name, lineno)
- source = '\n'.join([sl[indent+4:] for sl in source_lines])
-
- # Divide want into lines; check that it's properly indented; and
- # then strip the indentation. Spaces before the last newline should
- # be preserved, so plain rstrip() isn't good enough.
- want = m.group('want')
- want_lines = want.split('\n')
- if len(want_lines) > 1 and re.match(r' *$', want_lines[-1]):
- del want_lines[-1] # forget final newline & spaces after it
- self._check_prefix(want_lines, ' '*indent, name,
- lineno + len(source_lines))
- want = '\n'.join([wl[indent:] for wl in want_lines])
-
- # If `want` contains a traceback message, then extract it.
- m = self._EXCEPTION_RE.match(want)
- if m:
- exc_msg = m.group('msg')
- else:
- exc_msg = None
-
- # Extract options from the source.
- options = self._find_options(source, name, lineno)
-
- return source, options, want, exc_msg
-
- # This regular expression looks for option directives in the
- # source code of an example. Option directives are comments
- # starting with "doctest:". Warning: this may give false
- # positives for string-literals that contain the string
- # "#doctest:". Eliminating these false positives would require
- # actually parsing the string; but we limit them by ignoring any
- # line containing "#doctest:" that is *followed* by a quote mark.
- _OPTION_DIRECTIVE_RE = re.compile(r'#\s*doctest:\s*([^\n\'"]*)$',
- re.MULTILINE)
-
- def _find_options(self, source, name, lineno):
- """
- Return a dictionary containing option overrides extracted from
- option directives in the given source string.
-
- `name` is the string's name, and `lineno` is the line number
- where the example starts; both are used for error messages.
- """
- options = {}
- # (note: with the current regexp, this will match at most once:)
- for m in self._OPTION_DIRECTIVE_RE.finditer(source):
- option_strings = m.group(1).replace(',', ' ').split()
- for option in option_strings:
- if (option[0] not in '+-' or
- option[1:] not in OPTIONFLAGS_BY_NAME):
- raise ValueError('line %r of the doctest for %s '
- 'has an invalid option: %r' %
- (lineno+1, name, option))
- flag = OPTIONFLAGS_BY_NAME[option[1:]]
- options[flag] = (option[0] == '+')
- if options and self._IS_BLANK_OR_COMMENT(source):
- raise ValueError('line %r of the doctest for %s has an option '
- 'directive on a line with no example: %r' %
- (lineno, name, source))
- return options
-
- # This regular expression finds the indentation of every non-blank
- # line in a string.
- _INDENT_RE = re.compile('^([ ]*)(?=\S)', re.MULTILINE)
-
- def _min_indent(self, s):
- "Return the minimum indentation of any non-blank line in `s`"
- indents = [len(indent) for indent in self._INDENT_RE.findall(s)]
- if len(indents) > 0:
- return min(indents)
- else:
- return 0
-
- def _check_prompt_blank(self, lines, indent, name, lineno):
- """
- Given the lines of a source string (including prompts and
- leading indentation), check to make sure that every prompt is
- followed by a space character. If any line is not followed by
- a space character, then raise ValueError.
- """
- for i, line in enumerate(lines):
- if len(line) >= indent+4 and line[indent+3] != ' ':
- raise ValueError('line %r of the docstring for %s '
- 'lacks blank after %s: %r' %
- (lineno+i+1, name,
- line[indent:indent+3], line))
-
- def _check_prefix(self, lines, prefix, name, lineno):
- """
- Check that every line in the given list starts with the given
- prefix; if any line does not, then raise a ValueError.
- """
- for i, line in enumerate(lines):
- if line and not line.startswith(prefix):
- raise ValueError('line %r of the docstring for %s has '
- 'inconsistent leading whitespace: %r' %
- (lineno+i+1, name, line))
-
-
-######################################################################
-## 4. DocTest Finder
-######################################################################
-
-class DocTestFinder:
- """
- A class used to extract the DocTests that are relevant to a given
- object, from its docstring and the docstrings of its contained
- objects. Doctests can currently be extracted from the following
- object types: modules, functions, classes, methods, staticmethods,
- classmethods, and properties.
- """
-
- def __init__(self, verbose=False, parser=DocTestParser(),
- recurse=True, exclude_empty=True):
- """
- Create a new doctest finder.
-
- The optional argument `parser` specifies a class or
- function that should be used to create new DocTest objects (or
- objects that implement the same interface as DocTest). The
- signature for this factory function should match the signature
- of the DocTest constructor.
-
- If the optional argument `recurse` is false, then `find` will
- only examine the given object, and not any contained objects.
-
- If the optional argument `exclude_empty` is false, then `find`
- will include tests for objects with empty docstrings.
- """
- self._parser = parser
- self._verbose = verbose
- self._recurse = recurse
- self._exclude_empty = exclude_empty
-
- def find(self, obj, name=None, module=None, globs=None, extraglobs=None):
- """
- Return a list of the DocTests that are defined by the given
- object's docstring, or by any of its contained objects'
- docstrings.
-
- The optional parameter `module` is the module that contains
- the given object. If the module is not specified or is None, then
- the test finder will attempt to automatically determine the
- correct module. The object's module is used:
-
- - As a default namespace, if `globs` is not specified.
- - To prevent the DocTestFinder from extracting DocTests
- from objects that are imported from other modules.
- - To find the name of the file containing the object.
- - To help find the line number of the object within its
- file.
-
- Contained objects whose module does not match `module` are ignored.
-
- If `module` is False, no attempt to find the module will be made.
- This is obscure, of use mostly in tests: if `module` is False, or
- is None but cannot be found automatically, then all objects are
- considered to belong to the (non-existent) module, so all contained
- objects will (recursively) be searched for doctests.
-
- The globals for each DocTest is formed by combining `globs`
- and `extraglobs` (bindings in `extraglobs` override bindings
- in `globs`). A new copy of the globals dictionary is created
- for each DocTest. If `globs` is not specified, then it
- defaults to the module's `__dict__`, if specified, or {}
- otherwise. If `extraglobs` is not specified, then it defaults
- to {}.
-
- """
- # If name was not specified, then extract it from the object.
- if name is None:
- name = getattr(obj, '__name__', None)
- if name is None:
- raise ValueError("DocTestFinder.find: name must be given "
- "when obj.__name__ doesn't exist: %r" %
- (type(obj),))
-
- # Find the module that contains the given object (if obj is
- # a module, then module=obj.). Note: this may fail, in which
- # case module will be None.
- if module is False:
- module = None
- elif module is None:
- module = inspect.getmodule(obj)
-
- # Read the module's source code. This is used by
- # DocTestFinder._find_lineno to find the line number for a
- # given object's docstring.
- try:
- file = inspect.getsourcefile(obj) or inspect.getfile(obj)
- source_lines = linecache.getlines(file)
- if not source_lines:
- source_lines = None
- except TypeError:
- source_lines = None
-
- # Initialize globals, and merge in extraglobs.
- if globs is None:
- if module is None:
- globs = {}
- else:
- globs = module.__dict__.copy()
- else:
- globs = globs.copy()
- if extraglobs is not None:
- globs.update(extraglobs)
-
- # Recursively expore `obj`, extracting DocTests.
- tests = []
- self._find(tests, obj, name, module, source_lines, globs, {})
- # Sort the tests by alpha order of names, for consistency in
- # verbose-mode output. This was a feature of doctest in Pythons
- # <= 2.3 that got lost by accident in 2.4. It was repaired in
- # 2.4.4 and 2.5.
- tests.sort()
- return tests
-
- def _from_module(self, module, object):
- """
- Return true if the given object is defined in the given
- module.
- """
- if module is None:
- return True
- elif inspect.isfunction(object):
- return module.__dict__ is object.func_globals
- elif inspect.isclass(object):
- return module.__name__ == object.__module__
- elif inspect.getmodule(object) is not None:
- return module is inspect.getmodule(object)
- elif hasattr(object, '__module__'):
- return module.__name__ == object.__module__
- elif isinstance(object, property):
- return True # [XX] no way not be sure.
- else:
- raise ValueError("object must be a class or function")
-
- def _find(self, tests, obj, name, module, source_lines, globs, seen):
- """
- Find tests for the given object and any contained objects, and
- add them to `tests`.
- """
- if self._verbose:
- print 'Finding tests in %s' % name
-
- # If we've already processed this object, then ignore it.
- if id(obj) in seen:
- return
- seen[id(obj)] = 1
-
- # Find a test for this object, and add it to the list of tests.
- test = self._get_test(obj, name, module, globs, source_lines)
- if test is not None:
- tests.append(test)
-
- # Look for tests in a module's contained objects.
- if inspect.ismodule(obj) and self._recurse:
- for valname, val in obj.__dict__.items():
- valname = '%s.%s' % (name, valname)
- # Recurse to functions & classes.
- if ((inspect.isfunction(val) or inspect.isclass(val)) and
- self._from_module(module, val)):
- self._find(tests, val, valname, module, source_lines,
- globs, seen)
-
- # Look for tests in a module's __test__ dictionary.
- if inspect.ismodule(obj) and self._recurse:
- for valname, val in getattr(obj, '__test__', {}).items():
- if not isinstance(valname, basestring):
- raise ValueError("DocTestFinder.find: __test__ keys "
- "must be strings: %r" %
- (type(valname),))
- if not (inspect.isfunction(val) or inspect.isclass(val) or
- inspect.ismethod(val) or inspect.ismodule(val) or
- isinstance(val, basestring)):
- raise ValueError("DocTestFinder.find: __test__ values "
- "must be strings, functions, methods, "
- "classes, or modules: %r" %
- (type(val),))
- valname = '%s.__test__.%s' % (name, valname)
- self._find(tests, val, valname, module, source_lines,
- globs, seen)
-
- # Look for tests in a class's contained objects.
- if inspect.isclass(obj) and self._recurse:
- for valname, val in obj.__dict__.items():
- # Special handling for staticmethod/classmethod.
- if isinstance(val, staticmethod):
- val = getattr(obj, valname)
- if isinstance(val, classmethod):
- val = getattr(obj, valname).im_func
-
- # Recurse to methods, properties, and nested classes.
- if ((inspect.isfunction(val) or inspect.isclass(val) or
- isinstance(val, property)) and
- self._from_module(module, val)):
- valname = '%s.%s' % (name, valname)
- self._find(tests, val, valname, module, source_lines,
- globs, seen)
-
- def _get_test(self, obj, name, module, globs, source_lines):
- """
- Return a DocTest for the given object, if it defines a docstring;
- otherwise, return None.
- """
- # Extract the object's docstring. If it doesn't have one,
- # then return None (no test for this object).
- if isinstance(obj, basestring):
- docstring = obj
- else:
- try:
- if obj.__doc__ is None:
- docstring = ''
- else:
- docstring = obj.__doc__
- if not isinstance(docstring, basestring):
- docstring = str(docstring)
- except (TypeError, AttributeError):
- docstring = ''
-
- # Find the docstring's location in the file.
- lineno = self._find_lineno(obj, source_lines)
-
- # Don't bother if the docstring is empty.
- if self._exclude_empty and not docstring:
- return None
-
- # Return a DocTest for this object.
- if module is None:
- filename = None
- else:
- filename = getattr(module, '__file__', module.__name__)
- if filename[-4:] in (".pyc", ".pyo"):
- filename = filename[:-1]
- return self._parser.get_doctest(docstring, globs, name,
- filename, lineno)
-
- def _find_lineno(self, obj, source_lines):
- """
- Return a line number of the given object's docstring. Note:
- this method assumes that the object has a docstring.
- """
- lineno = None
-
- # Find the line number for modules.
- if inspect.ismodule(obj):
- lineno = 0
-
- # Find the line number for classes.
- # Note: this could be fooled if a class is defined multiple
- # times in a single file.
- if inspect.isclass(obj):
- if source_lines is None:
- return None
- pat = re.compile(r'^\s*class\s*%s\b' %
- getattr(obj, '__name__', '-'))
- for i, line in enumerate(source_lines):
- if pat.match(line):
- lineno = i
- break
-
- # Find the line number for functions & methods.
- if inspect.ismethod(obj): obj = obj.im_func
- if inspect.isfunction(obj): obj = obj.func_code
- if inspect.istraceback(obj): obj = obj.tb_frame
- if inspect.isframe(obj): obj = obj.f_code
- if inspect.iscode(obj):
- lineno = getattr(obj, 'co_firstlineno', None)-1
-
- # Find the line number where the docstring starts. Assume
- # that it's the first line that begins with a quote mark.
- # Note: this could be fooled by a multiline function
- # signature, where a continuation line begins with a quote
- # mark.
- if lineno is not None:
- if source_lines is None:
- return lineno+1
- pat = re.compile('(^|.*:)\s*\w*("|\')')
- for lineno in range(lineno, len(source_lines)):
- if pat.match(source_lines[lineno]):
- return lineno
-
- # We couldn't find the line number.
- return None
-
-######################################################################
-## 5. DocTest Runner
-######################################################################
-
-class DocTestRunner:
- """
- A class used to run DocTest test cases, and accumulate statistics.
- The `run` method is used to process a single DocTest case. It
- returns a tuple `(f, t)`, where `t` is the number of test cases
- tried, and `f` is the number of test cases that failed.
-
- >>> tests = DocTestFinder().find(_TestClass)
- >>> runner = DocTestRunner(verbose=False)
- >>> tests.sort(key = lambda test: test.name)
- >>> for test in tests:
- ... print test.name, '->', runner.run(test)
- _TestClass -> (0, 2)
- _TestClass.__init__ -> (0, 2)
- _TestClass.get -> (0, 2)
- _TestClass.square -> (0, 1)
-
- The `summarize` method prints a summary of all the test cases that
- have been run by the runner, and returns an aggregated `(f, t)`
- tuple:
-
- >>> runner.summarize(verbose=1)
- 4 items passed all tests:
- 2 tests in _TestClass
- 2 tests in _TestClass.__init__
- 2 tests in _TestClass.get
- 1 tests in _TestClass.square
- 7 tests in 4 items.
- 7 passed and 0 failed.
- Test passed.
- (0, 7)
-
- The aggregated number of tried examples and failed examples is
- also available via the `tries` and `failures` attributes:
-
- >>> runner.tries
- 7
- >>> runner.failures
- 0
-
- The comparison between expected outputs and actual outputs is done
- by an `OutputChecker`. This comparison may be customized with a
- number of option flags; see the documentation for `testmod` for
- more information. If the option flags are insufficient, then the
- comparison may also be customized by passing a subclass of
- `OutputChecker` to the constructor.
-
- The test runner's display output can be controlled in two ways.
- First, an output function (`out) can be passed to
- `TestRunner.run`; this function will be called with strings that
- should be displayed. It defaults to `sys.stdout.write`. If
- capturing the output is not sufficient, then the display output
- can be also customized by subclassing DocTestRunner, and
- overriding the methods `report_start`, `report_success`,
- `report_unexpected_exception`, and `report_failure`.
- """
- # This divider string is used to separate failure messages, and to
- # separate sections of the summary.
- DIVIDER = "*" * 70
-
- def __init__(self, checker=None, verbose=None, optionflags=0):
- """
- Create a new test runner.
-
- Optional keyword arg `checker` is the `OutputChecker` that
- should be used to compare the expected outputs and actual
- outputs of doctest examples.
-
- Optional keyword arg 'verbose' prints lots of stuff if true,
- only failures if false; by default, it's true iff '-v' is in
- sys.argv.
-
- Optional argument `optionflags` can be used to control how the
- test runner compares expected output to actual output, and how
- it displays failures. See the documentation for `testmod` for
- more information.
- """
- self._checker = checker or OutputChecker()
- if verbose is None:
- verbose = '-v' in sys.argv
- self._verbose = verbose
- self.optionflags = optionflags
- self.original_optionflags = optionflags
-
- # Keep track of the examples we've run.
- self.tries = 0
- self.failures = 0
- self._name2ft = {}
-
- # Create a fake output target for capturing doctest output.
- self._fakeout = _SpoofOut()
-
- #/////////////////////////////////////////////////////////////////
- # Reporting methods
- #/////////////////////////////////////////////////////////////////
-
- def report_start(self, out, test, example):
- """
- Report that the test runner is about to process the given
- example. (Only displays a message if verbose=True)
- """
- if self._verbose:
- if example.want:
- out('Trying:\n' + _indent(example.source) +
- 'Expecting:\n' + _indent(example.want))
- else:
- out('Trying:\n' + _indent(example.source) +
- 'Expecting nothing\n')
-
- def report_success(self, out, test, example, got):
- """
- Report that the given example ran successfully. (Only
- displays a message if verbose=True)
- """
- if self._verbose:
- out("ok\n")
-
- def report_failure(self, out, test, example, got):
- """
- Report that the given example failed.
- """
- out(self._failure_header(test, example) +
- self._checker.output_difference(example, got, self.optionflags))
-
- def report_unexpected_exception(self, out, test, example, exc_info):
- """
- Report that the given example raised an unexpected exception.
- """
- out(self._failure_header(test, example) +
- 'Exception raised:\n' + _indent(_exception_traceback(exc_info)))
-
- def _failure_header(self, test, example):
- out = [self.DIVIDER]
- if test.filename:
- if test.lineno is not None and example.lineno is not None:
- lineno = test.lineno + example.lineno + 1
- else:
- lineno = '?'
- out.append('File "%s", line %s, in %s' %
- (test.filename, lineno, test.name))
- else:
- out.append('Line %s, in %s' % (example.lineno+1, test.name))
- out.append('Failed example:')
- source = example.source
- out.append(_indent(source))
- return '\n'.join(out)
-
- #/////////////////////////////////////////////////////////////////
- # DocTest Running
- #/////////////////////////////////////////////////////////////////
-
- def __run(self, test, compileflags, out):
- """
- Run the examples in `test`. Write the outcome of each example
- with one of the `DocTestRunner.report_*` methods, using the
- writer function `out`. `compileflags` is the set of compiler
- flags that should be used to execute examples. Return a tuple
- `(f, t)`, where `t` is the number of examples tried, and `f`
- is the number of examples that failed. The examples are run
- in the namespace `test.globs`.
- """
- # Keep track of the number of failures and tries.
- failures = tries = 0
-
- # Save the option flags (since option directives can be used
- # to modify them).
- original_optionflags = self.optionflags
-
- SUCCESS, FAILURE, BOOM = range(3) # `outcome` state
-
- check = self._checker.check_output
-
- # Process each example.
- for examplenum, example in enumerate(test.examples):
-
- # If REPORT_ONLY_FIRST_FAILURE is set, then supress
- # reporting after the first failure.
- quiet = (self.optionflags & REPORT_ONLY_FIRST_FAILURE and
- failures > 0)
-
- # Merge in the example's options.
- self.optionflags = original_optionflags
- if example.options:
- for (optionflag, val) in example.options.items():
- if val:
- self.optionflags |= optionflag
- else:
- self.optionflags &= ~optionflag
-
- # If 'SKIP' is set, then skip this example.
- if self.optionflags & SKIP:
- continue
-
- # Record that we started this example.
- tries += 1
- if not quiet:
- self.report_start(out, test, example)
-
- # Use a special filename for compile(), so we can retrieve
- # the source code during interactive debugging (see
- # __patched_linecache_getlines).
- filename = '<doctest %s[%d]>' % (test.name, examplenum)
-
- # Run the example in the given context (globs), and record
- # any exception that gets raised. (But don't intercept
- # keyboard interrupts.)
- try:
- # Don't blink! This is where the user's code gets run.
- exec compile(example.source, filename, "single",
- compileflags, 1) in test.globs
- self.debugger.set_continue() # ==== Example Finished ====
- exception = None
- except KeyboardInterrupt:
- raise
- except:
- exception = sys.exc_info()
- self.debugger.set_continue() # ==== Example Finished ====
-
- got = self._fakeout.getvalue() # the actual output
- self._fakeout.truncate(0)
- outcome = FAILURE # guilty until proved innocent or insane
-
- # If the example executed without raising any exceptions,
- # verify its output.
- if exception is None:
- if check(example.want, got, self.optionflags):
- outcome = SUCCESS
-
- # The example raised an exception: check if it was expected.
- else:
- exc_info = sys.exc_info()
- exc_msg = traceback.format_exception_only(*exc_info[:2])[-1]
- if not quiet:
- got += _exception_traceback(exc_info)
-
- # If `example.exc_msg` is None, then we weren't expecting
- # an exception.
- if example.exc_msg is None:
- outcome = BOOM
-
- # We expected an exception: see whether it matches.
- elif check(example.exc_msg, exc_msg, self.optionflags):
- outcome = SUCCESS
-
- # Another chance if they didn't care about the detail.
- elif self.optionflags & IGNORE_EXCEPTION_DETAIL:
- m1 = re.match(r'[^:]*:', example.exc_msg)
- m2 = re.match(r'[^:]*:', exc_msg)
- if m1 and m2 and check(m1.group(0), m2.group(0),
- self.optionflags):
- outcome = SUCCESS
-
- # Report the outcome.
- if outcome is SUCCESS:
- if not quiet:
- self.report_success(out, test, example, got)
- elif outcome is FAILURE:
- if not quiet:
- self.report_failure(out, test, example, got)
- failures += 1
- elif outcome is BOOM:
- if not quiet:
- self.report_unexpected_exception(out, test, example,
- exc_info)
- failures += 1
- else:
- assert False, ("unknown outcome", outcome)
-
- # Restore the option flags (in case they were modified)
- self.optionflags = original_optionflags
-
- # Record and return the number of failures and tries.
- self.__record_outcome(test, failures, tries)
- return failures, tries
-
- def __record_outcome(self, test, f, t):
- """
- Record the fact that the given DocTest (`test`) generated `f`
- failures out of `t` tried examples.
- """
- f2, t2 = self._name2ft.get(test.name, (0,0))
- self._name2ft[test.name] = (f+f2, t+t2)
- self.failures += f
- self.tries += t
-
- __LINECACHE_FILENAME_RE = re.compile(r'<doctest '
- r'(?P<name>[\w\.]+)'
- r'\[(?P<examplenum>\d+)\]>$')
- def __patched_linecache_getlines(self, filename, module_globals=None):
- m = self.__LINECACHE_FILENAME_RE.match(filename)
- if m and m.group('name') == self.test.name:
- example = self.test.examples[int(m.group('examplenum'))]
- return example.source.splitlines(True)
- else:
- return self.save_linecache_getlines(filename, module_globals)
-
- def run(self, test, compileflags=None, out=None, clear_globs=True):
- """
- Run the examples in `test`, and display the results using the
- writer function `out`.
-
- The examples are run in the namespace `test.globs`. If
- `clear_globs` is true (the default), then this namespace will
- be cleared after the test runs, to help with garbage
- collection. If you would like to examine the namespace after
- the test completes, then use `clear_globs=False`.
-
- `compileflags` gives the set of flags that should be used by
- the Python compiler when running the examples. If not
- specified, then it will default to the set of future-import
- flags that apply to `globs`.
-
- The output of each example is checked using
- `DocTestRunner.check_output`, and the results are formatted by
- the `DocTestRunner.report_*` methods.
- """
- self.test = test
-
- if compileflags is None:
- compileflags = _extract_future_flags(test.globs)
-
- save_stdout = sys.stdout
- if out is None:
- out = save_stdout.write
- sys.stdout = self._fakeout
-
- # Patch pdb.set_trace to restore sys.stdout during interactive
- # debugging (so it's not still redirected to self._fakeout).
- # Note that the interactive output will go to *our*
- # save_stdout, even if that's not the real sys.stdout; this
- # allows us to write test cases for the set_trace behavior.
- save_set_trace = pdb.set_trace
- self.debugger = _OutputRedirectingPdb(save_stdout)
- self.debugger.reset()
- pdb.set_trace = self.debugger.set_trace
-
- # Patch linecache.getlines, so we can see the example's source
- # when we're inside the debugger.
- self.save_linecache_getlines = linecache.getlines
- linecache.getlines = self.__patched_linecache_getlines
-
- try:
- return self.__run(test, compileflags, out)
- finally:
- sys.stdout = save_stdout
- pdb.set_trace = save_set_trace
- linecache.getlines = self.save_linecache_getlines
- if clear_globs:
- test.globs.clear()
-
- #/////////////////////////////////////////////////////////////////
- # Summarization
- #/////////////////////////////////////////////////////////////////
- def summarize(self, verbose=None):
- """
- Print a summary of all the test cases that have been run by
- this DocTestRunner, and return a tuple `(f, t)`, where `f` is
- the total number of failed examples, and `t` is the total
- number of tried examples.
-
- The optional `verbose` argument controls how detailed the
- summary is. If the verbosity is not specified, then the
- DocTestRunner's verbosity is used.
- """
- if verbose is None:
- verbose = self._verbose
- notests = []
- passed = []
- failed = []
- totalt = totalf = 0
- for x in self._name2ft.items():
- name, (f, t) = x
- assert f <= t
- totalt += t
- totalf += f
- if t == 0:
- notests.append(name)
- elif f == 0:
- passed.append( (name, t) )
- else:
- failed.append(x)
- if verbose:
- if notests:
- print len(notests), "items had no tests:"
- notests.sort()
- for thing in notests:
- print " ", thing
- if passed:
- print len(passed), "items passed all tests:"
- passed.sort()
- for thing, count in passed:
- print " %3d tests in %s" % (count, thing)
- if failed:
- print self.DIVIDER
- print len(failed), "items had failures:"
- failed.sort()
- for thing, (f, t) in failed:
- print " %3d of %3d in %s" % (f, t, thing)
- if verbose:
- print totalt, "tests in", len(self._name2ft), "items."
- print totalt - totalf, "passed and", totalf, "failed."
- if totalf:
- print "***Test Failed***", totalf, "failures."
- elif verbose:
- print "Test passed."
- return totalf, totalt
-
- #/////////////////////////////////////////////////////////////////
- # Backward compatibility cruft to maintain doctest.master.
- #/////////////////////////////////////////////////////////////////
- def merge(self, other):
- d = self._name2ft
- for name, (f, t) in other._name2ft.items():
- if name in d:
- print "*** DocTestRunner.merge: '" + name + "' in both" \
- " testers; summing outcomes."
- f2, t2 = d[name]
- f = f + f2
- t = t + t2
- d[name] = f, t
-
-class OutputChecker:
- """
- A class used to check the whether the actual output from a doctest
- example matches the expected output. `OutputChecker` defines two
- methods: `check_output`, which compares a given pair of outputs,
- and returns true if they match; and `output_difference`, which
- returns a string describing the differences between two outputs.
- """
- def check_output(self, want, got, optionflags):
- """
- Return True iff the actual output from an example (`got`)
- matches the expected output (`want`). These strings are
- always considered to match if they are identical; but
- depending on what option flags the test runner is using,
- several non-exact match types are also possible. See the
- documentation for `TestRunner` for more information about
- option flags.
- """
- # Handle the common case first, for efficiency:
- # if they're string-identical, always return true.
- if got == want:
- return True
-
- # The values True and False replaced 1 and 0 as the return
- # value for boolean comparisons in Python 2.3.
- if not (optionflags & DONT_ACCEPT_TRUE_FOR_1):
- if (got,want) == ("True\n", "1\n"):
- return True
- if (got,want) == ("False\n", "0\n"):
- return True
-
- # <BLANKLINE> can be used as a special sequence to signify a
- # blank line, unless the DONT_ACCEPT_BLANKLINE flag is used.
- if not (optionflags & DONT_ACCEPT_BLANKLINE):
- # Replace <BLANKLINE> in want with a blank line.
- want = re.sub('(?m)^%s\s*?$' % re.escape(BLANKLINE_MARKER),
- '', want)
- # If a line in got contains only spaces, then remove the
- # spaces.
- got = re.sub('(?m)^\s*?$', '', got)
- if got == want:
- return True
-
- # This flag causes doctest to ignore any differences in the
- # contents of whitespace strings. Note that this can be used
- # in conjunction with the ELLIPSIS flag.
- if optionflags & NORMALIZE_WHITESPACE:
- got = ' '.join(got.split())
- want = ' '.join(want.split())
- if got == want:
- return True
-
- # The ELLIPSIS flag says to let the sequence "..." in `want`
- # match any substring in `got`.
- if optionflags & ELLIPSIS:
- if _ellipsis_match(want, got):
- return True
-
- # We didn't find any match; return false.
- return False
-
- # Should we do a fancy diff?
- def _do_a_fancy_diff(self, want, got, optionflags):
- # Not unless they asked for a fancy diff.
- if not optionflags & (REPORT_UDIFF |
- REPORT_CDIFF |
- REPORT_NDIFF):
- return False
-
- # If expected output uses ellipsis, a meaningful fancy diff is
- # too hard ... or maybe not. In two real-life failures Tim saw,
- # a diff was a major help anyway, so this is commented out.
- # [todo] _ellipsis_match() knows which pieces do and don't match,
- # and could be the basis for a kick-ass diff in this case.
- ##if optionflags & ELLIPSIS and ELLIPSIS_MARKER in want:
- ## return False
-
- # ndiff does intraline difference marking, so can be useful even
- # for 1-line differences.
- if optionflags & REPORT_NDIFF:
- return True
-
- # The other diff types need at least a few lines to be helpful.
- return want.count('\n') > 2 and got.count('\n') > 2
-
- def output_difference(self, example, got, optionflags):
- """
- Return a string describing the differences between the
- expected output for a given example (`example`) and the actual
- output (`got`). `optionflags` is the set of option flags used
- to compare `want` and `got`.
- """
- want = example.want
- # If <BLANKLINE>s are being used, then replace blank lines
- # with <BLANKLINE> in the actual output string.
- if not (optionflags & DONT_ACCEPT_BLANKLINE):
- got = re.sub('(?m)^[ ]*(?=\n)', BLANKLINE_MARKER, got)
-
- # Check if we should use diff.
- if self._do_a_fancy_diff(want, got, optionflags):
- # Split want & got into lines.
- want_lines = want.splitlines(True) # True == keep line ends
- got_lines = got.splitlines(True)
- # Use difflib to find their differences.
- if optionflags & REPORT_UDIFF:
- diff = difflib.unified_diff(want_lines, got_lines, n=2)
- diff = list(diff)[2:] # strip the diff header
- kind = 'unified diff with -expected +actual'
- elif optionflags & REPORT_CDIFF:
- diff = difflib.context_diff(want_lines, got_lines, n=2)
- diff = list(diff)[2:] # strip the diff header
- kind = 'context diff with expected followed by actual'
- elif optionflags & REPORT_NDIFF:
- engine = difflib.Differ(charjunk=difflib.IS_CHARACTER_JUNK)
- diff = list(engine.compare(want_lines, got_lines))
- kind = 'ndiff with -expected +actual'
- else:
- assert 0, 'Bad diff option'
- # Remove trailing whitespace on diff output.
- diff = [line.rstrip() + '\n' for line in diff]
- return 'Differences (%s):\n' % kind + _indent(''.join(diff))
-
- # If we're not using diff, then simply list the expected
- # output followed by the actual output.
- if want and got:
- return 'Expected:\n%sGot:\n%s' % (_indent(want), _indent(got))
- elif want:
- return 'Expected:\n%sGot nothing\n' % _indent(want)
- elif got:
- return 'Expected nothing\nGot:\n%s' % _indent(got)
- else:
- return 'Expected nothing\nGot nothing\n'
-
-class DocTestFailure(Exception):
- """A DocTest example has failed in debugging mode.
-
- The exception instance has variables:
-
- - test: the DocTest object being run
-
- - example: the Example object that failed
-
- - got: the actual output
- """
- def __init__(self, test, example, got):
- self.test = test
- self.example = example
- self.got = got
-
- def __str__(self):
- return str(self.test)
-
-class UnexpectedException(Exception):
- """A DocTest example has encountered an unexpected exception
-
- The exception instance has variables:
-
- - test: the DocTest object being run
-
- - example: the Example object that failed
-
- - exc_info: the exception info
- """
- def __init__(self, test, example, exc_info):
- self.test = test
- self.example = example
- self.exc_info = exc_info
-
- def __str__(self):
- return str(self.test)
-
-class DebugRunner(DocTestRunner):
- r"""Run doc tests but raise an exception as soon as there is a failure.
-
- If an unexpected exception occurs, an UnexpectedException is raised.
- It contains the test, the example, and the original exception:
-
- >>> runner = DebugRunner(verbose=False)
- >>> test = DocTestParser().get_doctest('>>> raise KeyError\n42',
- ... {}, 'foo', 'foo.py', 0)
- >>> try:
- ... runner.run(test)
- ... except UnexpectedException, failure:
- ... pass
-
- >>> failure.test is test
- True
-
- >>> failure.example.want
- '42\n'
-
- >>> exc_info = failure.exc_info
- >>> raise exc_info[0], exc_info[1], exc_info[2]
- Traceback (most recent call last):
- ...
- KeyError
-
- We wrap the original exception to give the calling application
- access to the test and example information.
-
- If the output doesn't match, then a DocTestFailure is raised:
-
- >>> test = DocTestParser().get_doctest('''
- ... >>> x = 1
- ... >>> x
- ... 2
- ... ''', {}, 'foo', 'foo.py', 0)
-
- >>> try:
- ... runner.run(test)
- ... except DocTestFailure, failure:
- ... pass
-
- DocTestFailure objects provide access to the test:
-
- >>> failure.test is test
- True
-
- As well as to the example:
-
- >>> failure.example.want
- '2\n'
-
- and the actual output:
-
- >>> failure.got
- '1\n'
-
- If a failure or error occurs, the globals are left intact:
-
- >>> del test.globs['__builtins__']
- >>> test.globs
- {'x': 1}
-
- >>> test = DocTestParser().get_doctest('''
- ... >>> x = 2
- ... >>> raise KeyError
- ... ''', {}, 'foo', 'foo.py', 0)
-
- >>> runner.run(test)
- Traceback (most recent call last):
- ...
- UnexpectedException: <DocTest foo from foo.py:0 (2 examples)>
-
- >>> del test.globs['__builtins__']
- >>> test.globs
- {'x': 2}
-
- But the globals are cleared if there is no error:
-
- >>> test = DocTestParser().get_doctest('''
- ... >>> x = 2
- ... ''', {}, 'foo', 'foo.py', 0)
-
- >>> runner.run(test)
- (0, 1)
-
- >>> test.globs
- {}
-
- """
-
- def run(self, test, compileflags=None, out=None, clear_globs=True):
- r = DocTestRunner.run(self, test, compileflags, out, False)
- if clear_globs:
- test.globs.clear()
- return r
-
- def report_unexpected_exception(self, out, test, example, exc_info):
- raise UnexpectedException(test, example, exc_info)
-
- def report_failure(self, out, test, example, got):
- raise DocTestFailure(test, example, got)
-
-######################################################################
-## 6. Test Functions
-######################################################################
-# These should be backwards compatible.
-
-# For backward compatibility, a global instance of a DocTestRunner
-# class, updated by testmod.
-master = None
-
-def testmod(m=None, name=None, globs=None, verbose=None,
- report=True, optionflags=0, extraglobs=None,
- raise_on_error=False, exclude_empty=False):
- """m=None, name=None, globs=None, verbose=None, report=True,
- optionflags=0, extraglobs=None, raise_on_error=False,
- exclude_empty=False
-
- Test examples in docstrings in functions and classes reachable
- from module m (or the current module if m is not supplied), starting
- with m.__doc__.
-
- Also test examples reachable from dict m.__test__ if it exists and is
- not None. m.__test__ maps names to functions, classes and strings;
- function and class docstrings are tested even if the name is private;
- strings are tested directly, as if they were docstrings.
-
- Return (#failures, #tests).
-
- See doctest.__doc__ for an overview.
-
- Optional keyword arg "name" gives the name of the module; by default
- use m.__name__.
-
- Optional keyword arg "globs" gives a dict to be used as the globals
- when executing examples; by default, use m.__dict__. A copy of this
- dict is actually used for each docstring, so that each docstring's
- examples start with a clean slate.
-
- Optional keyword arg "extraglobs" gives a dictionary that should be
- merged into the globals that are used to execute examples. By
- default, no extra globals are used. This is new in 2.4.
-
- Optional keyword arg "verbose" prints lots of stuff if true, prints
- only failures if false; by default, it's true iff "-v" is in sys.argv.
-
- Optional keyword arg "report" prints a summary at the end when true,
- else prints nothing at the end. In verbose mode, the summary is
- detailed, else very brief (in fact, empty if all tests passed).
-
- Optional keyword arg "optionflags" or's together module constants,
- and defaults to 0. This is new in 2.3. Possible values (see the
- docs for details):
-
- DONT_ACCEPT_TRUE_FOR_1
- DONT_ACCEPT_BLANKLINE
- NORMALIZE_WHITESPACE
- ELLIPSIS
- SKIP
- IGNORE_EXCEPTION_DETAIL
- REPORT_UDIFF
- REPORT_CDIFF
- REPORT_NDIFF
- REPORT_ONLY_FIRST_FAILURE
-
- Optional keyword arg "raise_on_error" raises an exception on the
- first unexpected exception or failure. This allows failures to be
- post-mortem debugged.
-
- Advanced tomfoolery: testmod runs methods of a local instance of
- class doctest.Tester, then merges the results into (or creates)
- global Tester instance doctest.master. Methods of doctest.master
- can be called directly too, if you want to do something unusual.
- Passing report=0 to testmod is especially useful then, to delay
- displaying a summary. Invoke doctest.master.summarize(verbose)
- when you're done fiddling.
- """
- global master
-
- # If no module was given, then use __main__.
- if m is None:
- # DWA - m will still be None if this wasn't invoked from the command
- # line, in which case the following TypeError is about as good an error
- # as we should expect
- m = sys.modules.get('__main__')
-
- # Check that we were actually given a module.
- if not inspect.ismodule(m):
- raise TypeError("testmod: module required; %r" % (m,))
-
- # If no name was given, then use the module's name.
- if name is None:
- name = m.__name__
-
- # Find, parse, and run all tests in the given module.
- finder = DocTestFinder(exclude_empty=exclude_empty)
-
- if raise_on_error:
- runner = DebugRunner(verbose=verbose, optionflags=optionflags)
- else:
- runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
-
- for test in finder.find(m, name, globs=globs, extraglobs=extraglobs):
- runner.run(test)
-
- if report:
- runner.summarize()
-
- if master is None:
- master = runner
- else:
- master.merge(runner)
-
- return runner.failures, runner.tries
-
-def testfile(filename, module_relative=True, name=None, package=None,
- globs=None, verbose=None, report=True, optionflags=0,
- extraglobs=None, raise_on_error=False, parser=DocTestParser(),
- encoding=None):
- """
- Test examples in the given file. Return (#failures, #tests).
-
- Optional keyword arg "module_relative" specifies how filenames
- should be interpreted:
-
- - If "module_relative" is True (the default), then "filename"
- specifies a module-relative path. By default, this path is
- relative to the calling module's directory; but if the
- "package" argument is specified, then it is relative to that
- package. To ensure os-independence, "filename" should use
- "/" characters to separate path segments, and should not
- be an absolute path (i.e., it may not begin with "/").
-
- - If "module_relative" is False, then "filename" specifies an
- os-specific path. The path may be absolute or relative (to
- the current working directory).
-
- Optional keyword arg "name" gives the name of the test; by default
- use the file's basename.
-
- Optional keyword argument "package" is a Python package or the
- name of a Python package whose directory should be used as the
- base directory for a module relative filename. If no package is
- specified, then the calling module's directory is used as the base
- directory for module relative filenames. It is an error to
- specify "package" if "module_relative" is False.
-
- Optional keyword arg "globs" gives a dict to be used as the globals
- when executing examples; by default, use {}. A copy of this dict
- is actually used for each docstring, so that each docstring's
- examples start with a clean slate.
-
- Optional keyword arg "extraglobs" gives a dictionary that should be
- merged into the globals that are used to execute examples. By
- default, no extra globals are used.
-
- Optional keyword arg "verbose" prints lots of stuff if true, prints
- only failures if false; by default, it's true iff "-v" is in sys.argv.
-
- Optional keyword arg "report" prints a summary at the end when true,
- else prints nothing at the end. In verbose mode, the summary is
- detailed, else very brief (in fact, empty if all tests passed).
-
- Optional keyword arg "optionflags" or's together module constants,
- and defaults to 0. Possible values (see the docs for details):
-
- DONT_ACCEPT_TRUE_FOR_1
- DONT_ACCEPT_BLANKLINE
- NORMALIZE_WHITESPACE
- ELLIPSIS
- SKIP
- IGNORE_EXCEPTION_DETAIL
- REPORT_UDIFF
- REPORT_CDIFF
- REPORT_NDIFF
- REPORT_ONLY_FIRST_FAILURE
-
- Optional keyword arg "raise_on_error" raises an exception on the
- first unexpected exception or failure. This allows failures to be
- post-mortem debugged.
-
- Optional keyword arg "parser" specifies a DocTestParser (or
- subclass) that should be used to extract tests from the files.
-
- Optional keyword arg "encoding" specifies an encoding that should
- be used to convert the file to unicode.
-
- Advanced tomfoolery: testmod runs methods of a local instance of
- class doctest.Tester, then merges the results into (or creates)
- global Tester instance doctest.master. Methods of doctest.master
- can be called directly too, if you want to do something unusual.
- Passing report=0 to testmod is especially useful then, to delay
- displaying a summary. Invoke doctest.master.summarize(verbose)
- when you're done fiddling.
- """
- global master
-
- if package and not module_relative:
- raise ValueError("Package may only be specified for module-"
- "relative paths.")
-
- # Relativize the path
- text, filename = _load_testfile(filename, package, module_relative)
-
- # If no name was given, then use the file's name.
- if name is None:
- name = os.path.basename(filename)
-
- # Assemble the globals.
- if globs is None:
- globs = {}
- else:
- globs = globs.copy()
- if extraglobs is not None:
- globs.update(extraglobs)
-
- if raise_on_error:
- runner = DebugRunner(verbose=verbose, optionflags=optionflags)
- else:
- runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
-
- if encoding is not None:
- text = text.decode(encoding)
-
- # Read the file, convert it to a test, and run it.
- test = parser.get_doctest(text, globs, name, filename, 0)
- runner.run(test)
-
- if report:
- runner.summarize()
-
- if master is None:
- master = runner
- else:
- master.merge(runner)
-
- return runner.failures, runner.tries
-
-def run_docstring_examples(f, globs, verbose=False, name="NoName",
- compileflags=None, optionflags=0):
- """
- Test examples in the given object's docstring (`f`), using `globs`
- as globals. Optional argument `name` is used in failure messages.
- If the optional argument `verbose` is true, then generate output
- even if there are no failures.
-
- `compileflags` gives the set of flags that should be used by the
- Python compiler when running the examples. If not specified, then
- it will default to the set of future-import flags that apply to
- `globs`.
-
- Optional keyword arg `optionflags` specifies options for the
- testing and output. See the documentation for `testmod` for more
- information.
- """
- # Find, parse, and run all tests in the given module.
- finder = DocTestFinder(verbose=verbose, recurse=False)
- runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
- for test in finder.find(f, name, globs=globs):
- runner.run(test, compileflags=compileflags)
-
-######################################################################
-## 7. Tester
-######################################################################
-# This is provided only for backwards compatibility. It's not
-# actually used in any way.
-
-class Tester:
- def __init__(self, mod=None, globs=None, verbose=None, optionflags=0):
-
- warnings.warn("class Tester is deprecated; "
- "use class doctest.DocTestRunner instead",
- DeprecationWarning, stacklevel=2)
- if mod is None and globs is None:
- raise TypeError("Tester.__init__: must specify mod or globs")
- if mod is not None and not inspect.ismodule(mod):
- raise TypeError("Tester.__init__: mod must be a module; %r" %
- (mod,))
- if globs is None:
- globs = mod.__dict__
- self.globs = globs
-
- self.verbose = verbose
- self.optionflags = optionflags
- self.testfinder = DocTestFinder()
- self.testrunner = DocTestRunner(verbose=verbose,
- optionflags=optionflags)
-
- def runstring(self, s, name):
- test = DocTestParser().get_doctest(s, self.globs, name, None, None)
- if self.verbose:
- print "Running string", name
- (f,t) = self.testrunner.run(test)
- if self.verbose:
- print f, "of", t, "examples failed in string", name
- return (f,t)
-
- def rundoc(self, object, name=None, module=None):
- f = t = 0
- tests = self.testfinder.find(object, name, module=module,
- globs=self.globs)
- for test in tests:
- (f2, t2) = self.testrunner.run(test)
- (f,t) = (f+f2, t+t2)
- return (f,t)
-
- def rundict(self, d, name, module=None):
- import new
- m = new.module(name)
- m.__dict__.update(d)
- if module is None:
- module = False
- return self.rundoc(m, name, module)
-
- def run__test__(self, d, name):
- import new
- m = new.module(name)
- m.__test__ = d
- return self.rundoc(m, name)
-
- def summarize(self, verbose=None):
- return self.testrunner.summarize(verbose)
-
- def merge(self, other):
- self.testrunner.merge(other.testrunner)
-
-######################################################################
-## 8. Unittest Support
-######################################################################
-
-_unittest_reportflags = 0
-
-def set_unittest_reportflags(flags):
- """Sets the unittest option flags.
-
- The old flag is returned so that a runner could restore the old
- value if it wished to:
-
- >>> import doctest
- >>> old = doctest._unittest_reportflags
- >>> doctest.set_unittest_reportflags(REPORT_NDIFF |
- ... REPORT_ONLY_FIRST_FAILURE) == old
- True
-
- >>> doctest._unittest_reportflags == (REPORT_NDIFF |
- ... REPORT_ONLY_FIRST_FAILURE)
- True
-
- Only reporting flags can be set:
-
- >>> doctest.set_unittest_reportflags(ELLIPSIS)
- Traceback (most recent call last):
- ...
- ValueError: ('Only reporting flags allowed', 8)
-
- >>> doctest.set_unittest_reportflags(old) == (REPORT_NDIFF |
- ... REPORT_ONLY_FIRST_FAILURE)
- True
- """
- global _unittest_reportflags
-
- if (flags & REPORTING_FLAGS) != flags:
- raise ValueError("Only reporting flags allowed", flags)
- old = _unittest_reportflags
- _unittest_reportflags = flags
- return old
-
-
-class DocTestCase(unittest.TestCase):
-
- def __init__(self, test, optionflags=0, setUp=None, tearDown=None,
- checker=None):
-
- unittest.TestCase.__init__(self)
- self._dt_optionflags = optionflags
- self._dt_checker = checker
- self._dt_test = test
- self._dt_setUp = setUp
- self._dt_tearDown = tearDown
-
- def setUp(self):
- test = self._dt_test
-
- if self._dt_setUp is not None:
- self._dt_setUp(test)
-
- def tearDown(self):
- test = self._dt_test
-
- if self._dt_tearDown is not None:
- self._dt_tearDown(test)
-
- test.globs.clear()
-
- def runTest(self):
- test = self._dt_test
- old = sys.stdout
- new = StringIO()
- optionflags = self._dt_optionflags
-
- if not (optionflags & REPORTING_FLAGS):
- # The option flags don't include any reporting flags,
- # so add the default reporting flags
- optionflags |= _unittest_reportflags
-
- runner = DocTestRunner(optionflags=optionflags,
- checker=self._dt_checker, verbose=False)
-
- try:
- runner.DIVIDER = "-"*70
- failures, tries = runner.run(
- test, out=new.write, clear_globs=False)
- finally:
- sys.stdout = old
-
- if failures:
- raise self.failureException(self.format_failure(new.getvalue()))
-
- def format_failure(self, err):
- test = self._dt_test
- if test.lineno is None:
- lineno = 'unknown line number'
- else:
- lineno = '%s' % test.lineno
- lname = '.'.join(test.name.split('.')[-1:])
- return ('Failed doctest test for %s\n'
- ' File "%s", line %s, in %s\n\n%s'
- % (test.name, test.filename, lineno, lname, err)
- )
-
- def debug(self):
- r"""Run the test case without results and without catching exceptions
-
- The unit test framework includes a debug method on test cases
- and test suites to support post-mortem debugging. The test code
- is run in such a way that errors are not caught. This way a
- caller can catch the errors and initiate post-mortem debugging.
-
- The DocTestCase provides a debug method that raises
- UnexpectedException errors if there is an unexepcted
- exception:
-
- >>> test = DocTestParser().get_doctest('>>> raise KeyError\n42',
- ... {}, 'foo', 'foo.py', 0)
- >>> case = DocTestCase(test)
- >>> try:
- ... case.debug()
- ... except UnexpectedException, failure:
- ... pass
-
- The UnexpectedException contains the test, the example, and
- the original exception:
-
- >>> failure.test is test
- True
-
- >>> failure.example.want
- '42\n'
-
- >>> exc_info = failure.exc_info
- >>> raise exc_info[0], exc_info[1], exc_info[2]
- Traceback (most recent call last):
- ...
- KeyError
-
- If the output doesn't match, then a DocTestFailure is raised:
-
- >>> test = DocTestParser().get_doctest('''
- ... >>> x = 1
- ... >>> x
- ... 2
- ... ''', {}, 'foo', 'foo.py', 0)
- >>> case = DocTestCase(test)
-
- >>> try:
- ... case.debug()
- ... except DocTestFailure, failure:
- ... pass
-
- DocTestFailure objects provide access to the test:
-
- >>> failure.test is test
- True
-
- As well as to the example:
-
- >>> failure.example.want
- '2\n'
-
- and the actual output:
-
- >>> failure.got
- '1\n'
-
- """
-
- self.setUp()
- runner = DebugRunner(optionflags=self._dt_optionflags,
- checker=self._dt_checker, verbose=False)
- runner.run(self._dt_test)
- self.tearDown()
-
- def id(self):
- return self._dt_test.name
-
- def __repr__(self):
- name = self._dt_test.name.split('.')
- return "%s (%s)" % (name[-1], '.'.join(name[:-1]))
-
- __str__ = __repr__
-
- def shortDescription(self):
- return "Doctest: " + self._dt_test.name
-
-def DocTestSuite(module=None, globs=None, extraglobs=None, test_finder=None,
- **options):
- """
- Convert doctest tests for a module to a unittest test suite.
-
- This converts each documentation string in a module that
- contains doctest tests to a unittest test case. If any of the
- tests in a doc string fail, then the test case fails. An exception
- is raised showing the name of the file containing the test and a
- (sometimes approximate) line number.
-
- The `module` argument provides the module to be tested. The argument
- can be either a module or a module name.
-
- If no argument is given, the calling module is used.
-
- A number of options may be provided as keyword arguments:
-
- setUp
- A set-up function. This is called before running the
- tests in each file. The setUp function will be passed a DocTest
- object. The setUp function can access the test globals as the
- globs attribute of the test passed.
-
- tearDown
- A tear-down function. This is called after running the
- tests in each file. The tearDown function will be passed a DocTest
- object. The tearDown function can access the test globals as the
- globs attribute of the test passed.
-
- globs
- A dictionary containing initial global variables for the tests.
-
- optionflags
- A set of doctest option flags expressed as an integer.
- """
-
- if test_finder is None:
- test_finder = DocTestFinder()
-
- module = _normalize_module(module)
- tests = test_finder.find(module, globs=globs, extraglobs=extraglobs)
- if globs is None:
- globs = module.__dict__
- if not tests:
- # Why do we want to do this? Because it reveals a bug that might
- # otherwise be hidden.
- raise ValueError(module, "has no tests")
-
- tests.sort()
- suite = unittest.TestSuite()
- for test in tests:
- if len(test.examples) == 0:
- continue
- if not test.filename:
- filename = module.__file__
- if filename[-4:] in (".pyc", ".pyo"):
- filename = filename[:-1]
- test.filename = filename
- suite.addTest(DocTestCase(test, **options))
-
- return suite
-
-class DocFileCase(DocTestCase):
-
- def id(self):
- return '_'.join(self._dt_test.name.split('.'))
-
- def __repr__(self):
- return self._dt_test.filename
- __str__ = __repr__
-
- def format_failure(self, err):
- return ('Failed doctest test for %s\n File "%s", line 0\n\n%s'
- % (self._dt_test.name, self._dt_test.filename, err)
- )
-
-def DocFileTest(path, module_relative=True, package=None,
- globs=None, parser=DocTestParser(),
- encoding=None, **options):
- if globs is None:
- globs = {}
- else:
- globs = globs.copy()
-
- if package and not module_relative:
- raise ValueError("Package may only be specified for module-"
- "relative paths.")
-
- # Relativize the path.
- doc, path = _load_testfile(path, package, module_relative)
-
- if "__file__" not in globs:
- globs["__file__"] = path
-
- # Find the file and read it.
- name = os.path.basename(path)
-
- # If an encoding is specified, use it to convert the file to unicode
- if encoding is not None:
- doc = doc.decode(encoding)
-
- # Convert it to a test, and wrap it in a DocFileCase.
- test = parser.get_doctest(doc, globs, name, path, 0)
- return DocFileCase(test, **options)
-
-def DocFileSuite(*paths, **kw):
- """A unittest suite for one or more doctest files.
-
- The path to each doctest file is given as a string; the
- interpretation of that string depends on the keyword argument
- "module_relative".
-
- A number of options may be provided as keyword arguments:
-
- module_relative
- If "module_relative" is True, then the given file paths are
- interpreted as os-independent module-relative paths. By
- default, these paths are relative to the calling module's
- directory; but if the "package" argument is specified, then
- they are relative to that package. To ensure os-independence,
- "filename" should use "/" characters to separate path
- segments, and may not be an absolute path (i.e., it may not
- begin with "/").
-
- If "module_relative" is False, then the given file paths are
- interpreted as os-specific paths. These paths may be absolute
- or relative (to the current working directory).
-
- package
- A Python package or the name of a Python package whose directory
- should be used as the base directory for module relative paths.
- If "package" is not specified, then the calling module's
- directory is used as the base directory for module relative
- filenames. It is an error to specify "package" if
- "module_relative" is False.
-
- setUp
- A set-up function. This is called before running the
- tests in each file. The setUp function will be passed a DocTest
- object. The setUp function can access the test globals as the
- globs attribute of the test passed.
-
- tearDown
- A tear-down function. This is called after running the
- tests in each file. The tearDown function will be passed a DocTest
- object. The tearDown function can access the test globals as the
- globs attribute of the test passed.
-
- globs
- A dictionary containing initial global variables for the tests.
-
- optionflags
- A set of doctest option flags expressed as an integer.
-
- parser
- A DocTestParser (or subclass) that should be used to extract
- tests from the files.
-
- encoding
- An encoding that will be used to convert the files to unicode.
- """
- suite = unittest.TestSuite()
-
- # We do this here so that _normalize_module is called at the right
- # level. If it were called in DocFileTest, then this function
- # would be the caller and we might guess the package incorrectly.
- if kw.get('module_relative', True):
- kw['package'] = _normalize_module(kw.get('package'))
-
- for path in paths:
- suite.addTest(DocFileTest(path, **kw))
-
- return suite
-
-######################################################################
-## 9. Debugging Support
-######################################################################
-
-def script_from_examples(s):
- r"""Extract script from text with examples.
-
- Converts text with examples to a Python script. Example input is
- converted to regular code. Example output and all other words
- are converted to comments:
-
- >>> text = '''
- ... Here are examples of simple math.
- ...
- ... Python has super accurate integer addition
- ...
- ... >>> 2 + 2
- ... 5
- ...
- ... And very friendly error messages:
- ...
- ... >>> 1/0
- ... To Infinity
- ... And
- ... Beyond
- ...
- ... You can use logic if you want:
- ...
- ... >>> if 0:
- ... ... blah
- ... ... blah
- ... ...
- ...
- ... Ho hum
- ... '''
-
- >>> print script_from_examples(text)
- # Here are examples of simple math.
- #
- # Python has super accurate integer addition
- #
- 2 + 2
- # Expected:
- ## 5
- #
- # And very friendly error messages:
- #
- 1/0
- # Expected:
- ## To Infinity
- ## And
- ## Beyond
- #
- # You can use logic if you want:
- #
- if 0:
- blah
- blah
- #
- # Ho hum
- <BLANKLINE>
- """
- output = []
- for piece in DocTestParser().parse(s):
- if isinstance(piece, Example):
- # Add the example's source code (strip trailing NL)
- output.append(piece.source[:-1])
- # Add the expected output:
- want = piece.want
- if want:
- output.append('# Expected:')
- output += ['## '+l for l in want.split('\n')[:-1]]
- else:
- # Add non-example text.
- output += [_comment_line(l)
- for l in piece.split('\n')[:-1]]
-
- # Trim junk on both ends.
- while output and output[-1] == '#':
- output.pop()
- while output and output[0] == '#':
- output.pop(0)
- # Combine the output, and return it.
- # Add a courtesy newline to prevent exec from choking (see bug #1172785)
- return '\n'.join(output) + '\n'
-
-def testsource(module, name):
- """Extract the test sources from a doctest docstring as a script.
-
- Provide the module (or dotted name of the module) containing the
- test to be debugged and the name (within the module) of the object
- with the doc string with tests to be debugged.
- """
- module = _normalize_module(module)
- tests = DocTestFinder().find(module)
- test = [t for t in tests if t.name == name]
- if not test:
- raise ValueError(name, "not found in tests")
- test = test[0]
- testsrc = script_from_examples(test.docstring)
- return testsrc
-
-def debug_src(src, pm=False, globs=None):
- """Debug a single doctest docstring, in argument `src`'"""
- testsrc = script_from_examples(src)
- debug_script(testsrc, pm, globs)
-
-def debug_script(src, pm=False, globs=None):
- "Debug a test script. `src` is the script, as a string."
- import pdb
-
- # Note that tempfile.NameTemporaryFile() cannot be used. As the
- # docs say, a file so created cannot be opened by name a second time
- # on modern Windows boxes, and execfile() needs to open it.
- srcfilename = tempfile.mktemp(".py", "doctestdebug")
- f = open(srcfilename, 'w')
- f.write(src)
- f.close()
-
- try:
- if globs:
- globs = globs.copy()
- else:
- globs = {}
-
- if pm:
- try:
- execfile(srcfilename, globs, globs)
- except:
- print sys.exc_info()[1]
- pdb.post_mortem(sys.exc_info()[2])
- else:
- # Note that %r is vital here. '%s' instead can, e.g., cause
- # backslashes to get treated as metacharacters on Windows.
- pdb.run("execfile(%r)" % srcfilename, globs, globs)
-
- finally:
- os.remove(srcfilename)
-
-def debug(module, name, pm=False):
- """Debug a single doctest docstring.
-
- Provide the module (or dotted name of the module) containing the
- test to be debugged and the name (within the module) of the object
- with the docstring with tests to be debugged.
- """
- module = _normalize_module(module)
- testsrc = testsource(module, name)
- debug_script(testsrc, pm, module.__dict__)
-
-######################################################################
-## 10. Example Usage
-######################################################################
-class _TestClass:
- """
- A pointless class, for sanity-checking of docstring testing.
-
- Methods:
- square()
- get()
-
- >>> _TestClass(13).get() + _TestClass(-12).get()
- 1
- >>> hex(_TestClass(13).square().get())
- '0xa9'
- """
-
- def __init__(self, val):
- """val -> _TestClass object with associated value val.
-
- >>> t = _TestClass(123)
- >>> print t.get()
- 123
- """
-
- self.val = val
-
- def square(self):
- """square() -> square TestClass's associated value
-
- >>> _TestClass(13).square().get()
- 169
- """
-
- self.val = self.val ** 2
- return self
-
- def get(self):
- """get() -> return TestClass's associated value.
-
- >>> x = _TestClass(-42)
- >>> print x.get()
- -42
- """
-
- return self.val
-
-__test__ = {"_TestClass": _TestClass,
- "string": r"""
- Example of a string object, searched as-is.
- >>> x = 1; y = 2
- >>> x + y, x * y
- (3, 2)
- """,
-
- "bool-int equivalence": r"""
- In 2.2, boolean expressions displayed
- 0 or 1. By default, we still accept
- them. This can be disabled by passing
- DONT_ACCEPT_TRUE_FOR_1 to the new
- optionflags argument.
- >>> 4 == 4
- 1
- >>> 4 == 4
- True
- >>> 4 > 4
- 0
- >>> 4 > 4
- False
- """,
-
- "blank lines": r"""
- Blank lines can be marked with <BLANKLINE>:
- >>> print 'foo\n\nbar\n'
- foo
- <BLANKLINE>
- bar
- <BLANKLINE>
- """,
-
- "ellipsis": r"""
- If the ellipsis flag is used, then '...' can be used to
- elide substrings in the desired output:
- >>> print range(1000) #doctest: +ELLIPSIS
- [0, 1, 2, ..., 999]
- """,
-
- "whitespace normalization": r"""
- If the whitespace normalization flag is used, then
- differences in whitespace are ignored.
- >>> print range(30) #doctest: +NORMALIZE_WHITESPACE
- [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
- 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
- 27, 28, 29]
- """,
- }
-
-def _test():
- r = unittest.TextTestRunner()
- r.run(DocTestSuite())
-
-if __name__ == "__main__":
- _test()
diff --git a/sys/lib/python/dumbdbm.py b/sys/lib/python/dumbdbm.py
deleted file mode 100644
index 84a766589..000000000
--- a/sys/lib/python/dumbdbm.py
+++ /dev/null
@@ -1,233 +0,0 @@
-"""A dumb and slow but simple dbm clone.
-
-For database spam, spam.dir contains the index (a text file),
-spam.bak *may* contain a backup of the index (also a text file),
-while spam.dat contains the data (a binary file).
-
-XXX TO DO:
-
-- seems to contain a bug when updating...
-
-- reclaim free space (currently, space once occupied by deleted or expanded
-items is never reused)
-
-- support concurrent access (currently, if two processes take turns making
-updates, they can mess up the index)
-
-- support efficient access to large databases (currently, the whole index
-is read when the database is opened, and some updates rewrite the whole index)
-
-- support opening for read-only (flag = 'm')
-
-"""
-
-import os as _os
-import __builtin__
-import UserDict
-
-_open = __builtin__.open
-
-_BLOCKSIZE = 512
-
-error = IOError # For anydbm
-
-class _Database(UserDict.DictMixin):
-
- # The on-disk directory and data files can remain in mutually
- # inconsistent states for an arbitrarily long time (see comments
- # at the end of __setitem__). This is only repaired when _commit()
- # gets called. One place _commit() gets called is from __del__(),
- # and if that occurs at program shutdown time, module globals may
- # already have gotten rebound to None. Since it's crucial that
- # _commit() finish successfully, we can't ignore shutdown races
- # here, and _commit() must not reference any globals.
- _os = _os # for _commit()
- _open = _open # for _commit()
-
- def __init__(self, filebasename, mode):
- self._mode = mode
-
- # The directory file is a text file. Each line looks like
- # "%r, (%d, %d)\n" % (key, pos, siz)
- # where key is the string key, pos is the offset into the dat
- # file of the associated value's first byte, and siz is the number
- # of bytes in the associated value.
- self._dirfile = filebasename + _os.extsep + 'dir'
-
- # The data file is a binary file pointed into by the directory
- # file, and holds the values associated with keys. Each value
- # begins at a _BLOCKSIZE-aligned byte offset, and is a raw
- # binary 8-bit string value.
- self._datfile = filebasename + _os.extsep + 'dat'
- self._bakfile = filebasename + _os.extsep + 'bak'
-
- # The index is an in-memory dict, mirroring the directory file.
- self._index = None # maps keys to (pos, siz) pairs
-
- # Mod by Jack: create data file if needed
- try:
- f = _open(self._datfile, 'r')
- except IOError:
- f = _open(self._datfile, 'w', self._mode)
- f.close()
- self._update()
-
- # Read directory file into the in-memory index dict.
- def _update(self):
- self._index = {}
- try:
- f = _open(self._dirfile)
- except IOError:
- pass
- else:
- for line in f:
- line = line.rstrip()
- key, pos_and_siz_pair = eval(line)
- self._index[key] = pos_and_siz_pair
- f.close()
-
- # Write the index dict to the directory file. The original directory
- # file (if any) is renamed with a .bak extension first. If a .bak
- # file currently exists, it's deleted.
- def _commit(self):
- # CAUTION: It's vital that _commit() succeed, and _commit() can
- # be called from __del__(). Therefore we must never reference a
- # global in this routine.
- if self._index is None:
- return # nothing to do
-
- try:
- self._os.unlink(self._bakfile)
- except self._os.error:
- pass
-
- try:
- self._os.rename(self._dirfile, self._bakfile)
- except self._os.error:
- pass
-
- f = self._open(self._dirfile, 'w', self._mode)
- for key, pos_and_siz_pair in self._index.iteritems():
- f.write("%r, %r\n" % (key, pos_and_siz_pair))
- f.close()
-
- sync = _commit
-
- def __getitem__(self, key):
- pos, siz = self._index[key] # may raise KeyError
- f = _open(self._datfile, 'rb')
- f.seek(pos)
- dat = f.read(siz)
- f.close()
- return dat
-
- # Append val to the data file, starting at a _BLOCKSIZE-aligned
- # offset. The data file is first padded with NUL bytes (if needed)
- # to get to an aligned offset. Return pair
- # (starting offset of val, len(val))
- def _addval(self, val):
- f = _open(self._datfile, 'rb+')
- f.seek(0, 2)
- pos = int(f.tell())
- npos = ((pos + _BLOCKSIZE - 1) // _BLOCKSIZE) * _BLOCKSIZE
- f.write('\0'*(npos-pos))
- pos = npos
- f.write(val)
- f.close()
- return (pos, len(val))
-
- # Write val to the data file, starting at offset pos. The caller
- # is responsible for ensuring that there's enough room starting at
- # pos to hold val, without overwriting some other value. Return
- # pair (pos, len(val)).
- def _setval(self, pos, val):
- f = _open(self._datfile, 'rb+')
- f.seek(pos)
- f.write(val)
- f.close()
- return (pos, len(val))
-
- # key is a new key whose associated value starts in the data file
- # at offset pos and with length siz. Add an index record to
- # the in-memory index dict, and append one to the directory file.
- def _addkey(self, key, pos_and_siz_pair):
- self._index[key] = pos_and_siz_pair
- f = _open(self._dirfile, 'a', self._mode)
- f.write("%r, %r\n" % (key, pos_and_siz_pair))
- f.close()
-
- def __setitem__(self, key, val):
- if not type(key) == type('') == type(val):
- raise TypeError, "keys and values must be strings"
- if key not in self._index:
- self._addkey(key, self._addval(val))
- else:
- # See whether the new value is small enough to fit in the
- # (padded) space currently occupied by the old value.
- pos, siz = self._index[key]
- oldblocks = (siz + _BLOCKSIZE - 1) // _BLOCKSIZE
- newblocks = (len(val) + _BLOCKSIZE - 1) // _BLOCKSIZE
- if newblocks <= oldblocks:
- self._index[key] = self._setval(pos, val)
- else:
- # The new value doesn't fit in the (padded) space used
- # by the old value. The blocks used by the old value are
- # forever lost.
- self._index[key] = self._addval(val)
-
- # Note that _index may be out of synch with the directory
- # file now: _setval() and _addval() don't update the directory
- # file. This also means that the on-disk directory and data
- # files are in a mutually inconsistent state, and they'll
- # remain that way until _commit() is called. Note that this
- # is a disaster (for the database) if the program crashes
- # (so that _commit() never gets called).
-
- def __delitem__(self, key):
- # The blocks used by the associated value are lost.
- del self._index[key]
- # XXX It's unclear why we do a _commit() here (the code always
- # XXX has, so I'm not changing it). _setitem__ doesn't try to
- # XXX keep the directory file in synch. Why should we? Or
- # XXX why shouldn't __setitem__?
- self._commit()
-
- def keys(self):
- return self._index.keys()
-
- def has_key(self, key):
- return key in self._index
-
- def __contains__(self, key):
- return key in self._index
-
- def iterkeys(self):
- return self._index.iterkeys()
- __iter__ = iterkeys
-
- def __len__(self):
- return len(self._index)
-
- def close(self):
- self._commit()
- self._index = self._datfile = self._dirfile = self._bakfile = None
-
- __del__ = close
-
-
-
-def open(file, flag=None, mode=0666):
- """Open the database file, filename, and return corresponding object.
-
- The flag argument, used to control how the database is opened in the
- other DBM implementations, is ignored in the dumbdbm module; the
- database is always opened for update, and will be created if it does
- not exist.
-
- The optional mode argument is the UNIX mode of the file, used only when
- the database has to be created. It defaults to octal code 0666 (and
- will be modified by the prevailing umask).
-
- """
- # flag argument is currently ignored
- return _Database(file, mode)
diff --git a/sys/lib/python/dummy_thread.py b/sys/lib/python/dummy_thread.py
deleted file mode 100644
index a72c92722..000000000
--- a/sys/lib/python/dummy_thread.py
+++ /dev/null
@@ -1,152 +0,0 @@
-"""Drop-in replacement for the thread module.
-
-Meant to be used as a brain-dead substitute so that threaded code does
-not need to be rewritten for when the thread module is not present.
-
-Suggested usage is::
-
- try:
- import thread
- except ImportError:
- import dummy_thread as thread
-
-"""
-__author__ = "Brett Cannon"
-__email__ = "brett@python.org"
-
-# Exports only things specified by thread documentation
-# (skipping obsolete synonyms allocate(), start_new(), exit_thread())
-__all__ = ['error', 'start_new_thread', 'exit', 'get_ident', 'allocate_lock',
- 'interrupt_main', 'LockType']
-
-import traceback as _traceback
-import warnings
-
-class error(Exception):
- """Dummy implementation of thread.error."""
-
- def __init__(self, *args):
- self.args = args
-
-def start_new_thread(function, args, kwargs={}):
- """Dummy implementation of thread.start_new_thread().
-
- Compatibility is maintained by making sure that ``args`` is a
- tuple and ``kwargs`` is a dictionary. If an exception is raised
- and it is SystemExit (which can be done by thread.exit()) it is
- caught and nothing is done; all other exceptions are printed out
- by using traceback.print_exc().
-
- If the executed function calls interrupt_main the KeyboardInterrupt will be
- raised when the function returns.
-
- """
- if type(args) != type(tuple()):
- raise TypeError("2nd arg must be a tuple")
- if type(kwargs) != type(dict()):
- raise TypeError("3rd arg must be a dict")
- global _main
- _main = False
- try:
- function(*args, **kwargs)
- except SystemExit:
- pass
- except:
- _traceback.print_exc()
- _main = True
- global _interrupt
- if _interrupt:
- _interrupt = False
- raise KeyboardInterrupt
-
-def exit():
- """Dummy implementation of thread.exit()."""
- raise SystemExit
-
-def get_ident():
- """Dummy implementation of thread.get_ident().
-
- Since this module should only be used when threadmodule is not
- available, it is safe to assume that the current process is the
- only thread. Thus a constant can be safely returned.
- """
- return -1
-
-def allocate_lock():
- """Dummy implementation of thread.allocate_lock()."""
- return LockType()
-
-def stack_size(size=None):
- """Dummy implementation of thread.stack_size()."""
- if size is not None:
- raise error("setting thread stack size not supported")
- return 0
-
-class LockType(object):
- """Class implementing dummy implementation of thread.LockType.
-
- Compatibility is maintained by maintaining self.locked_status
- which is a boolean that stores the state of the lock. Pickling of
- the lock, though, should not be done since if the thread module is
- then used with an unpickled ``lock()`` from here problems could
- occur from this class not having atomic methods.
-
- """
-
- def __init__(self):
- self.locked_status = False
-
- def acquire(self, waitflag=None):
- """Dummy implementation of acquire().
-
- For blocking calls, self.locked_status is automatically set to
- True and returned appropriately based on value of
- ``waitflag``. If it is non-blocking, then the value is
- actually checked and not set if it is already acquired. This
- is all done so that threading.Condition's assert statements
- aren't triggered and throw a little fit.
-
- """
- if waitflag is None:
- self.locked_status = True
- return None
- elif not waitflag:
- if not self.locked_status:
- self.locked_status = True
- return True
- else:
- return False
- else:
- self.locked_status = True
- return True
-
- __enter__ = acquire
-
- def __exit__(self, typ, val, tb):
- self.release()
-
- def release(self):
- """Release the dummy lock."""
- # XXX Perhaps shouldn't actually bother to test? Could lead
- # to problems for complex, threaded code.
- if not self.locked_status:
- raise error
- self.locked_status = False
- return True
-
- def locked(self):
- return self.locked_status
-
-# Used to signal that interrupt_main was called in a "thread"
-_interrupt = False
-# True when not executing in a "thread"
-_main = True
-
-def interrupt_main():
- """Set _interrupt flag to True to have start_new_thread raise
- KeyboardInterrupt upon exiting."""
- if _main:
- raise KeyboardInterrupt
- else:
- global _interrupt
- _interrupt = True
diff --git a/sys/lib/python/dummy_threading.py b/sys/lib/python/dummy_threading.py
deleted file mode 100644
index 48f7c4c74..000000000
--- a/sys/lib/python/dummy_threading.py
+++ /dev/null
@@ -1,83 +0,0 @@
-"""Faux ``threading`` version using ``dummy_thread`` instead of ``thread``.
-
-The module ``_dummy_threading`` is added to ``sys.modules`` in order
-to not have ``threading`` considered imported. Had ``threading`` been
-directly imported it would have made all subsequent imports succeed
-regardless of whether ``thread`` was available which is not desired.
-
-:Author: Brett Cannon
-:Contact: brett@python.org
-
-XXX: Try to get rid of ``_dummy_threading``.
-
-"""
-from sys import modules as sys_modules
-
-import dummy_thread
-
-# Declaring now so as to not have to nest ``try``s to get proper clean-up.
-holding_thread = False
-holding_threading = False
-holding__threading_local = False
-
-try:
- # Could have checked if ``thread`` was not in sys.modules and gone
- # a different route, but decided to mirror technique used with
- # ``threading`` below.
- if 'thread' in sys_modules:
- held_thread = sys_modules['thread']
- holding_thread = True
- # Must have some module named ``thread`` that implements its API
- # in order to initially import ``threading``.
- sys_modules['thread'] = sys_modules['dummy_thread']
-
- if 'threading' in sys_modules:
- # If ``threading`` is already imported, might as well prevent
- # trying to import it more than needed by saving it if it is
- # already imported before deleting it.
- held_threading = sys_modules['threading']
- holding_threading = True
- del sys_modules['threading']
-
- if '_threading_local' in sys_modules:
- # If ``_threading_local`` is already imported, might as well prevent
- # trying to import it more than needed by saving it if it is
- # already imported before deleting it.
- held__threading_local = sys_modules['_threading_local']
- holding__threading_local = True
- del sys_modules['_threading_local']
-
- import threading
- # Need a copy of the code kept somewhere...
- sys_modules['_dummy_threading'] = sys_modules['threading']
- del sys_modules['threading']
- sys_modules['_dummy__threading_local'] = sys_modules['_threading_local']
- del sys_modules['_threading_local']
- from _dummy_threading import *
- from _dummy_threading import __all__
-
-finally:
- # Put back ``threading`` if we overwrote earlier
-
- if holding_threading:
- sys_modules['threading'] = held_threading
- del held_threading
- del holding_threading
-
- # Put back ``_threading_local`` if we overwrote earlier
-
- if holding__threading_local:
- sys_modules['_threading_local'] = held__threading_local
- del held__threading_local
- del holding__threading_local
-
- # Put back ``thread`` if we overwrote, else del the entry we made
- if holding_thread:
- sys_modules['thread'] = held_thread
- del held_thread
- else:
- del sys_modules['thread']
- del holding_thread
-
- del dummy_thread
- del sys_modules
diff --git a/sys/lib/python/email/__init__.py b/sys/lib/python/email/__init__.py
deleted file mode 100644
index 8d230fdeb..000000000
--- a/sys/lib/python/email/__init__.py
+++ /dev/null
@@ -1,123 +0,0 @@
-# Copyright (C) 2001-2006 Python Software Foundation
-# Author: Barry Warsaw
-# Contact: email-sig@python.org
-
-"""A package for parsing, handling, and generating email messages."""
-
-__version__ = '4.0.1'
-
-__all__ = [
- # Old names
- 'base64MIME',
- 'Charset',
- 'Encoders',
- 'Errors',
- 'Generator',
- 'Header',
- 'Iterators',
- 'Message',
- 'MIMEAudio',
- 'MIMEBase',
- 'MIMEImage',
- 'MIMEMessage',
- 'MIMEMultipart',
- 'MIMENonMultipart',
- 'MIMEText',
- 'Parser',
- 'quopriMIME',
- 'Utils',
- 'message_from_string',
- 'message_from_file',
- # new names
- 'base64mime',
- 'charset',
- 'encoders',
- 'errors',
- 'generator',
- 'header',
- 'iterators',
- 'message',
- 'mime',
- 'parser',
- 'quoprimime',
- 'utils',
- ]
-
-
-
-# Some convenience routines. Don't import Parser and Message as side-effects
-# of importing email since those cascadingly import most of the rest of the
-# email package.
-def message_from_string(s, *args, **kws):
- """Parse a string into a Message object model.
-
- Optional _class and strict are passed to the Parser constructor.
- """
- from email.parser import Parser
- return Parser(*args, **kws).parsestr(s)
-
-
-def message_from_file(fp, *args, **kws):
- """Read a file and parse its contents into a Message object model.
-
- Optional _class and strict are passed to the Parser constructor.
- """
- from email.parser import Parser
- return Parser(*args, **kws).parse(fp)
-
-
-
-# Lazy loading to provide name mapping from new-style names (PEP 8 compatible
-# email 4.0 module names), to old-style names (email 3.0 module names).
-import sys
-
-class LazyImporter(object):
- def __init__(self, module_name):
- self.__name__ = 'email.' + module_name
-
- def __getattr__(self, name):
- __import__(self.__name__)
- mod = sys.modules[self.__name__]
- self.__dict__.update(mod.__dict__)
- return getattr(mod, name)
-
-
-_LOWERNAMES = [
- # email.<old name> -> email.<new name is lowercased old name>
- 'Charset',
- 'Encoders',
- 'Errors',
- 'FeedParser',
- 'Generator',
- 'Header',
- 'Iterators',
- 'Message',
- 'Parser',
- 'Utils',
- 'base64MIME',
- 'quopriMIME',
- ]
-
-_MIMENAMES = [
- # email.MIME<old name> -> email.mime.<new name is lowercased old name>
- 'Audio',
- 'Base',
- 'Image',
- 'Message',
- 'Multipart',
- 'NonMultipart',
- 'Text',
- ]
-
-for _name in _LOWERNAMES:
- importer = LazyImporter(_name.lower())
- sys.modules['email.' + _name] = importer
- setattr(sys.modules['email'], _name, importer)
-
-
-import email.mime
-for _name in _MIMENAMES:
- importer = LazyImporter('mime.' + _name.lower())
- sys.modules['email.MIME' + _name] = importer
- setattr(sys.modules['email'], 'MIME' + _name, importer)
- setattr(sys.modules['email.mime'], _name, importer)
diff --git a/sys/lib/python/email/_parseaddr.py b/sys/lib/python/email/_parseaddr.py
deleted file mode 100644
index 791d8928e..000000000
--- a/sys/lib/python/email/_parseaddr.py
+++ /dev/null
@@ -1,480 +0,0 @@
-# Copyright (C) 2002-2007 Python Software Foundation
-# Contact: email-sig@python.org
-
-"""Email address parsing code.
-
-Lifted directly from rfc822.py. This should eventually be rewritten.
-"""
-
-__all__ = [
- 'mktime_tz',
- 'parsedate',
- 'parsedate_tz',
- 'quote',
- ]
-
-import time
-
-SPACE = ' '
-EMPTYSTRING = ''
-COMMASPACE = ', '
-
-# Parse a date field
-_monthnames = ['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul',
- 'aug', 'sep', 'oct', 'nov', 'dec',
- 'january', 'february', 'march', 'april', 'may', 'june', 'july',
- 'august', 'september', 'october', 'november', 'december']
-
-_daynames = ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun']
-
-# The timezone table does not include the military time zones defined
-# in RFC822, other than Z. According to RFC1123, the description in
-# RFC822 gets the signs wrong, so we can't rely on any such time
-# zones. RFC1123 recommends that numeric timezone indicators be used
-# instead of timezone names.
-
-_timezones = {'UT':0, 'UTC':0, 'GMT':0, 'Z':0,
- 'AST': -400, 'ADT': -300, # Atlantic (used in Canada)
- 'EST': -500, 'EDT': -400, # Eastern
- 'CST': -600, 'CDT': -500, # Central
- 'MST': -700, 'MDT': -600, # Mountain
- 'PST': -800, 'PDT': -700 # Pacific
- }
-
-
-def parsedate_tz(data):
- """Convert a date string to a time tuple.
-
- Accounts for military timezones.
- """
- data = data.split()
- # The FWS after the comma after the day-of-week is optional, so search and
- # adjust for this.
- if data[0].endswith(',') or data[0].lower() in _daynames:
- # There's a dayname here. Skip it
- del data[0]
- else:
- i = data[0].rfind(',')
- if i >= 0:
- data[0] = data[0][i+1:]
- if len(data) == 3: # RFC 850 date, deprecated
- stuff = data[0].split('-')
- if len(stuff) == 3:
- data = stuff + data[1:]
- if len(data) == 4:
- s = data[3]
- i = s.find('+')
- if i > 0:
- data[3:] = [s[:i], s[i+1:]]
- else:
- data.append('') # Dummy tz
- if len(data) < 5:
- return None
- data = data[:5]
- [dd, mm, yy, tm, tz] = data
- mm = mm.lower()
- if mm not in _monthnames:
- dd, mm = mm, dd.lower()
- if mm not in _monthnames:
- return None
- mm = _monthnames.index(mm) + 1
- if mm > 12:
- mm -= 12
- if dd[-1] == ',':
- dd = dd[:-1]
- i = yy.find(':')
- if i > 0:
- yy, tm = tm, yy
- if yy[-1] == ',':
- yy = yy[:-1]
- if not yy[0].isdigit():
- yy, tz = tz, yy
- if tm[-1] == ',':
- tm = tm[:-1]
- tm = tm.split(':')
- if len(tm) == 2:
- [thh, tmm] = tm
- tss = '0'
- elif len(tm) == 3:
- [thh, tmm, tss] = tm
- else:
- return None
- try:
- yy = int(yy)
- dd = int(dd)
- thh = int(thh)
- tmm = int(tmm)
- tss = int(tss)
- except ValueError:
- return None
- tzoffset = None
- tz = tz.upper()
- if _timezones.has_key(tz):
- tzoffset = _timezones[tz]
- else:
- try:
- tzoffset = int(tz)
- except ValueError:
- pass
- # Convert a timezone offset into seconds ; -0500 -> -18000
- if tzoffset:
- if tzoffset < 0:
- tzsign = -1
- tzoffset = -tzoffset
- else:
- tzsign = 1
- tzoffset = tzsign * ( (tzoffset//100)*3600 + (tzoffset % 100)*60)
- # Daylight Saving Time flag is set to -1, since DST is unknown.
- return yy, mm, dd, thh, tmm, tss, 0, 1, -1, tzoffset
-
-
-def parsedate(data):
- """Convert a time string to a time tuple."""
- t = parsedate_tz(data)
- if isinstance(t, tuple):
- return t[:9]
- else:
- return t
-
-
-def mktime_tz(data):
- """Turn a 10-tuple as returned by parsedate_tz() into a UTC timestamp."""
- if data[9] is None:
- # No zone info, so localtime is better assumption than GMT
- return time.mktime(data[:8] + (-1,))
- else:
- t = time.mktime(data[:8] + (0,))
- return t - data[9] - time.timezone
-
-
-def quote(str):
- """Add quotes around a string."""
- return str.replace('\\', '\\\\').replace('"', '\\"')
-
-
-class AddrlistClass:
- """Address parser class by Ben Escoto.
-
- To understand what this class does, it helps to have a copy of RFC 2822 in
- front of you.
-
- Note: this class interface is deprecated and may be removed in the future.
- Use rfc822.AddressList instead.
- """
-
- def __init__(self, field):
- """Initialize a new instance.
-
- `field' is an unparsed address header field, containing
- one or more addresses.
- """
- self.specials = '()<>@,:;.\"[]'
- self.pos = 0
- self.LWS = ' \t'
- self.CR = '\r\n'
- self.FWS = self.LWS + self.CR
- self.atomends = self.specials + self.LWS + self.CR
- # Note that RFC 2822 now specifies `.' as obs-phrase, meaning that it
- # is obsolete syntax. RFC 2822 requires that we recognize obsolete
- # syntax, so allow dots in phrases.
- self.phraseends = self.atomends.replace('.', '')
- self.field = field
- self.commentlist = []
-
- def gotonext(self):
- """Parse up to the start of the next address."""
- while self.pos < len(self.field):
- if self.field[self.pos] in self.LWS + '\n\r':
- self.pos += 1
- elif self.field[self.pos] == '(':
- self.commentlist.append(self.getcomment())
- else:
- break
-
- def getaddrlist(self):
- """Parse all addresses.
-
- Returns a list containing all of the addresses.
- """
- result = []
- while self.pos < len(self.field):
- ad = self.getaddress()
- if ad:
- result += ad
- else:
- result.append(('', ''))
- return result
-
- def getaddress(self):
- """Parse the next address."""
- self.commentlist = []
- self.gotonext()
-
- oldpos = self.pos
- oldcl = self.commentlist
- plist = self.getphraselist()
-
- self.gotonext()
- returnlist = []
-
- if self.pos >= len(self.field):
- # Bad email address technically, no domain.
- if plist:
- returnlist = [(SPACE.join(self.commentlist), plist[0])]
-
- elif self.field[self.pos] in '.@':
- # email address is just an addrspec
- # this isn't very efficient since we start over
- self.pos = oldpos
- self.commentlist = oldcl
- addrspec = self.getaddrspec()
- returnlist = [(SPACE.join(self.commentlist), addrspec)]
-
- elif self.field[self.pos] == ':':
- # address is a group
- returnlist = []
-
- fieldlen = len(self.field)
- self.pos += 1
- while self.pos < len(self.field):
- self.gotonext()
- if self.pos < fieldlen and self.field[self.pos] == ';':
- self.pos += 1
- break
- returnlist = returnlist + self.getaddress()
-
- elif self.field[self.pos] == '<':
- # Address is a phrase then a route addr
- routeaddr = self.getrouteaddr()
-
- if self.commentlist:
- returnlist = [(SPACE.join(plist) + ' (' +
- ' '.join(self.commentlist) + ')', routeaddr)]
- else:
- returnlist = [(SPACE.join(plist), routeaddr)]
-
- else:
- if plist:
- returnlist = [(SPACE.join(self.commentlist), plist[0])]
- elif self.field[self.pos] in self.specials:
- self.pos += 1
-
- self.gotonext()
- if self.pos < len(self.field) and self.field[self.pos] == ',':
- self.pos += 1
- return returnlist
-
- def getrouteaddr(self):
- """Parse a route address (Return-path value).
-
- This method just skips all the route stuff and returns the addrspec.
- """
- if self.field[self.pos] != '<':
- return
-
- expectroute = False
- self.pos += 1
- self.gotonext()
- adlist = ''
- while self.pos < len(self.field):
- if expectroute:
- self.getdomain()
- expectroute = False
- elif self.field[self.pos] == '>':
- self.pos += 1
- break
- elif self.field[self.pos] == '@':
- self.pos += 1
- expectroute = True
- elif self.field[self.pos] == ':':
- self.pos += 1
- else:
- adlist = self.getaddrspec()
- self.pos += 1
- break
- self.gotonext()
-
- return adlist
-
- def getaddrspec(self):
- """Parse an RFC 2822 addr-spec."""
- aslist = []
-
- self.gotonext()
- while self.pos < len(self.field):
- if self.field[self.pos] == '.':
- aslist.append('.')
- self.pos += 1
- elif self.field[self.pos] == '"':
- aslist.append('"%s"' % self.getquote())
- elif self.field[self.pos] in self.atomends:
- break
- else:
- aslist.append(self.getatom())
- self.gotonext()
-
- if self.pos >= len(self.field) or self.field[self.pos] != '@':
- return EMPTYSTRING.join(aslist)
-
- aslist.append('@')
- self.pos += 1
- self.gotonext()
- return EMPTYSTRING.join(aslist) + self.getdomain()
-
- def getdomain(self):
- """Get the complete domain name from an address."""
- sdlist = []
- while self.pos < len(self.field):
- if self.field[self.pos] in self.LWS:
- self.pos += 1
- elif self.field[self.pos] == '(':
- self.commentlist.append(self.getcomment())
- elif self.field[self.pos] == '[':
- sdlist.append(self.getdomainliteral())
- elif self.field[self.pos] == '.':
- self.pos += 1
- sdlist.append('.')
- elif self.field[self.pos] in self.atomends:
- break
- else:
- sdlist.append(self.getatom())
- return EMPTYSTRING.join(sdlist)
-
- def getdelimited(self, beginchar, endchars, allowcomments=True):
- """Parse a header fragment delimited by special characters.
-
- `beginchar' is the start character for the fragment.
- If self is not looking at an instance of `beginchar' then
- getdelimited returns the empty string.
-
- `endchars' is a sequence of allowable end-delimiting characters.
- Parsing stops when one of these is encountered.
-
- If `allowcomments' is non-zero, embedded RFC 2822 comments are allowed
- within the parsed fragment.
- """
- if self.field[self.pos] != beginchar:
- return ''
-
- slist = ['']
- quote = False
- self.pos += 1
- while self.pos < len(self.field):
- if quote:
- slist.append(self.field[self.pos])
- quote = False
- elif self.field[self.pos] in endchars:
- self.pos += 1
- break
- elif allowcomments and self.field[self.pos] == '(':
- slist.append(self.getcomment())
- continue # have already advanced pos from getcomment
- elif self.field[self.pos] == '\\':
- quote = True
- else:
- slist.append(self.field[self.pos])
- self.pos += 1
-
- return EMPTYSTRING.join(slist)
-
- def getquote(self):
- """Get a quote-delimited fragment from self's field."""
- return self.getdelimited('"', '"\r', False)
-
- def getcomment(self):
- """Get a parenthesis-delimited fragment from self's field."""
- return self.getdelimited('(', ')\r', True)
-
- def getdomainliteral(self):
- """Parse an RFC 2822 domain-literal."""
- return '[%s]' % self.getdelimited('[', ']\r', False)
-
- def getatom(self, atomends=None):
- """Parse an RFC 2822 atom.
-
- Optional atomends specifies a different set of end token delimiters
- (the default is to use self.atomends). This is used e.g. in
- getphraselist() since phrase endings must not include the `.' (which
- is legal in phrases)."""
- atomlist = ['']
- if atomends is None:
- atomends = self.atomends
-
- while self.pos < len(self.field):
- if self.field[self.pos] in atomends:
- break
- else:
- atomlist.append(self.field[self.pos])
- self.pos += 1
-
- return EMPTYSTRING.join(atomlist)
-
- def getphraselist(self):
- """Parse a sequence of RFC 2822 phrases.
-
- A phrase is a sequence of words, which are in turn either RFC 2822
- atoms or quoted-strings. Phrases are canonicalized by squeezing all
- runs of continuous whitespace into one space.
- """
- plist = []
-
- while self.pos < len(self.field):
- if self.field[self.pos] in self.FWS:
- self.pos += 1
- elif self.field[self.pos] == '"':
- plist.append(self.getquote())
- elif self.field[self.pos] == '(':
- self.commentlist.append(self.getcomment())
- elif self.field[self.pos] in self.phraseends:
- break
- else:
- plist.append(self.getatom(self.phraseends))
-
- return plist
-
-class AddressList(AddrlistClass):
- """An AddressList encapsulates a list of parsed RFC 2822 addresses."""
- def __init__(self, field):
- AddrlistClass.__init__(self, field)
- if field:
- self.addresslist = self.getaddrlist()
- else:
- self.addresslist = []
-
- def __len__(self):
- return len(self.addresslist)
-
- def __add__(self, other):
- # Set union
- newaddr = AddressList(None)
- newaddr.addresslist = self.addresslist[:]
- for x in other.addresslist:
- if not x in self.addresslist:
- newaddr.addresslist.append(x)
- return newaddr
-
- def __iadd__(self, other):
- # Set union, in-place
- for x in other.addresslist:
- if not x in self.addresslist:
- self.addresslist.append(x)
- return self
-
- def __sub__(self, other):
- # Set difference
- newaddr = AddressList(None)
- for x in self.addresslist:
- if not x in other.addresslist:
- newaddr.addresslist.append(x)
- return newaddr
-
- def __isub__(self, other):
- # Set difference, in-place
- for x in other.addresslist:
- if x in self.addresslist:
- self.addresslist.remove(x)
- return self
-
- def __getitem__(self, index):
- # Make indexing, slices, and 'in' work
- return self.addresslist[index]
diff --git a/sys/lib/python/email/base64mime.py b/sys/lib/python/email/base64mime.py
deleted file mode 100644
index 0129d9d4e..000000000
--- a/sys/lib/python/email/base64mime.py
+++ /dev/null
@@ -1,184 +0,0 @@
-# Copyright (C) 2002-2006 Python Software Foundation
-# Author: Ben Gertzfield
-# Contact: email-sig@python.org
-
-"""Base64 content transfer encoding per RFCs 2045-2047.
-
-This module handles the content transfer encoding method defined in RFC 2045
-to encode arbitrary 8-bit data using the three 8-bit bytes in four 7-bit
-characters encoding known as Base64.
-
-It is used in the MIME standards for email to attach images, audio, and text
-using some 8-bit character sets to messages.
-
-This module provides an interface to encode and decode both headers and bodies
-with Base64 encoding.
-
-RFC 2045 defines a method for including character set information in an
-`encoded-word' in a header. This method is commonly used for 8-bit real names
-in To:, From:, Cc:, etc. fields, as well as Subject: lines.
-
-This module does not do the line wrapping or end-of-line character conversion
-necessary for proper internationalized headers; it only does dumb encoding and
-decoding. To deal with the various line wrapping issues, use the email.Header
-module.
-"""
-
-__all__ = [
- 'base64_len',
- 'body_decode',
- 'body_encode',
- 'decode',
- 'decodestring',
- 'encode',
- 'encodestring',
- 'header_encode',
- ]
-
-import re
-
-from binascii import b2a_base64, a2b_base64
-from email.utils import fix_eols
-
-CRLF = '\r\n'
-NL = '\n'
-EMPTYSTRING = ''
-
-# See also Charset.py
-MISC_LEN = 7
-
-
-
-# Helpers
-def base64_len(s):
- """Return the length of s when it is encoded with base64."""
- groups_of_3, leftover = divmod(len(s), 3)
- # 4 bytes out for each 3 bytes (or nonzero fraction thereof) in.
- # Thanks, Tim!
- n = groups_of_3 * 4
- if leftover:
- n += 4
- return n
-
-
-
-def header_encode(header, charset='iso-8859-1', keep_eols=False,
- maxlinelen=76, eol=NL):
- """Encode a single header line with Base64 encoding in a given charset.
-
- Defined in RFC 2045, this Base64 encoding is identical to normal Base64
- encoding, except that each line must be intelligently wrapped (respecting
- the Base64 encoding), and subsequent lines must start with a space.
-
- charset names the character set to use to encode the header. It defaults
- to iso-8859-1.
-
- End-of-line characters (\\r, \\n, \\r\\n) will be automatically converted
- to the canonical email line separator \\r\\n unless the keep_eols
- parameter is True (the default is False).
-
- Each line of the header will be terminated in the value of eol, which
- defaults to "\\n". Set this to "\\r\\n" if you are using the result of
- this function directly in email.
-
- The resulting string will be in the form:
-
- "=?charset?b?WW/5ciBtYXp66XLrIHf8eiBhIGhhbXBzdGHuciBBIFlv+XIgbWF6euly?=\\n
- =?charset?b?6yB3/HogYSBoYW1wc3Rh7nIgQkMgWW/5ciBtYXp66XLrIHf8eiBhIGhh?="
-
- with each line wrapped at, at most, maxlinelen characters (defaults to 76
- characters).
- """
- # Return empty headers unchanged
- if not header:
- return header
-
- if not keep_eols:
- header = fix_eols(header)
-
- # Base64 encode each line, in encoded chunks no greater than maxlinelen in
- # length, after the RFC chrome is added in.
- base64ed = []
- max_encoded = maxlinelen - len(charset) - MISC_LEN
- max_unencoded = max_encoded * 3 // 4
-
- for i in range(0, len(header), max_unencoded):
- base64ed.append(b2a_base64(header[i:i+max_unencoded]))
-
- # Now add the RFC chrome to each encoded chunk
- lines = []
- for line in base64ed:
- # Ignore the last character of each line if it is a newline
- if line.endswith(NL):
- line = line[:-1]
- # Add the chrome
- lines.append('=?%s?b?%s?=' % (charset, line))
- # Glue the lines together and return it. BAW: should we be able to
- # specify the leading whitespace in the joiner?
- joiner = eol + ' '
- return joiner.join(lines)
-
-
-
-def encode(s, binary=True, maxlinelen=76, eol=NL):
- """Encode a string with base64.
-
- Each line will be wrapped at, at most, maxlinelen characters (defaults to
- 76 characters).
-
- If binary is False, end-of-line characters will be converted to the
- canonical email end-of-line sequence \\r\\n. Otherwise they will be left
- verbatim (this is the default).
-
- Each line of encoded text will end with eol, which defaults to "\\n". Set
- this to "\r\n" if you will be using the result of this function directly
- in an email.
- """
- if not s:
- return s
-
- if not binary:
- s = fix_eols(s)
-
- encvec = []
- max_unencoded = maxlinelen * 3 // 4
- for i in range(0, len(s), max_unencoded):
- # BAW: should encode() inherit b2a_base64()'s dubious behavior in
- # adding a newline to the encoded string?
- enc = b2a_base64(s[i:i + max_unencoded])
- if enc.endswith(NL) and eol <> NL:
- enc = enc[:-1] + eol
- encvec.append(enc)
- return EMPTYSTRING.join(encvec)
-
-
-# For convenience and backwards compatibility w/ standard base64 module
-body_encode = encode
-encodestring = encode
-
-
-
-def decode(s, convert_eols=None):
- """Decode a raw base64 string.
-
- If convert_eols is set to a string value, all canonical email linefeeds,
- e.g. "\\r\\n", in the decoded text will be converted to the value of
- convert_eols. os.linesep is a good choice for convert_eols if you are
- decoding a text attachment.
-
- This function does not parse a full MIME header value encoded with
- base64 (like =?iso-8895-1?b?bmloISBuaWgh?=) -- please use the high
- level email.Header class for that functionality.
- """
- if not s:
- return s
-
- dec = a2b_base64(s)
- if convert_eols:
- return dec.replace(CRLF, convert_eols)
- return dec
-
-
-# For convenience and backwards compatibility w/ standard base64 module
-body_decode = decode
-decodestring = decode
diff --git a/sys/lib/python/email/charset.py b/sys/lib/python/email/charset.py
deleted file mode 100644
index 8f218b209..000000000
--- a/sys/lib/python/email/charset.py
+++ /dev/null
@@ -1,388 +0,0 @@
-# Copyright (C) 2001-2006 Python Software Foundation
-# Author: Ben Gertzfield, Barry Warsaw
-# Contact: email-sig@python.org
-
-__all__ = [
- 'Charset',
- 'add_alias',
- 'add_charset',
- 'add_codec',
- ]
-
-import email.base64mime
-import email.quoprimime
-
-from email import errors
-from email.encoders import encode_7or8bit
-
-
-
-# Flags for types of header encodings
-QP = 1 # Quoted-Printable
-BASE64 = 2 # Base64
-SHORTEST = 3 # the shorter of QP and base64, but only for headers
-
-# In "=?charset?q?hello_world?=", the =?, ?q?, and ?= add up to 7
-MISC_LEN = 7
-
-DEFAULT_CHARSET = 'us-ascii'
-
-
-
-# Defaults
-CHARSETS = {
- # input header enc body enc output conv
- 'iso-8859-1': (QP, QP, None),
- 'iso-8859-2': (QP, QP, None),
- 'iso-8859-3': (QP, QP, None),
- 'iso-8859-4': (QP, QP, None),
- # iso-8859-5 is Cyrillic, and not especially used
- # iso-8859-6 is Arabic, also not particularly used
- # iso-8859-7 is Greek, QP will not make it readable
- # iso-8859-8 is Hebrew, QP will not make it readable
- 'iso-8859-9': (QP, QP, None),
- 'iso-8859-10': (QP, QP, None),
- # iso-8859-11 is Thai, QP will not make it readable
- 'iso-8859-13': (QP, QP, None),
- 'iso-8859-14': (QP, QP, None),
- 'iso-8859-15': (QP, QP, None),
- 'windows-1252':(QP, QP, None),
- 'viscii': (QP, QP, None),
- 'us-ascii': (None, None, None),
- 'big5': (BASE64, BASE64, None),
- 'gb2312': (BASE64, BASE64, None),
- 'euc-jp': (BASE64, None, 'iso-2022-jp'),
- 'shift_jis': (BASE64, None, 'iso-2022-jp'),
- 'iso-2022-jp': (BASE64, None, None),
- 'koi8-r': (BASE64, BASE64, None),
- 'utf-8': (SHORTEST, BASE64, 'utf-8'),
- # We're making this one up to represent raw unencoded 8-bit
- '8bit': (None, BASE64, 'utf-8'),
- }
-
-# Aliases for other commonly-used names for character sets. Map
-# them to the real ones used in email.
-ALIASES = {
- 'latin_1': 'iso-8859-1',
- 'latin-1': 'iso-8859-1',
- 'latin_2': 'iso-8859-2',
- 'latin-2': 'iso-8859-2',
- 'latin_3': 'iso-8859-3',
- 'latin-3': 'iso-8859-3',
- 'latin_4': 'iso-8859-4',
- 'latin-4': 'iso-8859-4',
- 'latin_5': 'iso-8859-9',
- 'latin-5': 'iso-8859-9',
- 'latin_6': 'iso-8859-10',
- 'latin-6': 'iso-8859-10',
- 'latin_7': 'iso-8859-13',
- 'latin-7': 'iso-8859-13',
- 'latin_8': 'iso-8859-14',
- 'latin-8': 'iso-8859-14',
- 'latin_9': 'iso-8859-15',
- 'latin-9': 'iso-8859-15',
- 'cp949': 'ks_c_5601-1987',
- 'euc_jp': 'euc-jp',
- 'euc_kr': 'euc-kr',
- 'ascii': 'us-ascii',
- }
-
-
-# Map charsets to their Unicode codec strings.
-CODEC_MAP = {
- 'gb2312': 'eucgb2312_cn',
- 'big5': 'big5_tw',
- # Hack: We don't want *any* conversion for stuff marked us-ascii, as all
- # sorts of garbage might be sent to us in the guise of 7-bit us-ascii.
- # Let that stuff pass through without conversion to/from Unicode.
- 'us-ascii': None,
- }
-
-
-
-# Convenience functions for extending the above mappings
-def add_charset(charset, header_enc=None, body_enc=None, output_charset=None):
- """Add character set properties to the global registry.
-
- charset is the input character set, and must be the canonical name of a
- character set.
-
- Optional header_enc and body_enc is either Charset.QP for
- quoted-printable, Charset.BASE64 for base64 encoding, Charset.SHORTEST for
- the shortest of qp or base64 encoding, or None for no encoding. SHORTEST
- is only valid for header_enc. It describes how message headers and
- message bodies in the input charset are to be encoded. Default is no
- encoding.
-
- Optional output_charset is the character set that the output should be
- in. Conversions will proceed from input charset, to Unicode, to the
- output charset when the method Charset.convert() is called. The default
- is to output in the same character set as the input.
-
- Both input_charset and output_charset must have Unicode codec entries in
- the module's charset-to-codec mapping; use add_codec(charset, codecname)
- to add codecs the module does not know about. See the codecs module's
- documentation for more information.
- """
- if body_enc == SHORTEST:
- raise ValueError('SHORTEST not allowed for body_enc')
- CHARSETS[charset] = (header_enc, body_enc, output_charset)
-
-
-def add_alias(alias, canonical):
- """Add a character set alias.
-
- alias is the alias name, e.g. latin-1
- canonical is the character set's canonical name, e.g. iso-8859-1
- """
- ALIASES[alias] = canonical
-
-
-def add_codec(charset, codecname):
- """Add a codec that map characters in the given charset to/from Unicode.
-
- charset is the canonical name of a character set. codecname is the name
- of a Python codec, as appropriate for the second argument to the unicode()
- built-in, or to the encode() method of a Unicode string.
- """
- CODEC_MAP[charset] = codecname
-
-
-
-class Charset:
- """Map character sets to their email properties.
-
- This class provides information about the requirements imposed on email
- for a specific character set. It also provides convenience routines for
- converting between character sets, given the availability of the
- applicable codecs. Given a character set, it will do its best to provide
- information on how to use that character set in an email in an
- RFC-compliant way.
-
- Certain character sets must be encoded with quoted-printable or base64
- when used in email headers or bodies. Certain character sets must be
- converted outright, and are not allowed in email. Instances of this
- module expose the following information about a character set:
-
- input_charset: The initial character set specified. Common aliases
- are converted to their `official' email names (e.g. latin_1
- is converted to iso-8859-1). Defaults to 7-bit us-ascii.
-
- header_encoding: If the character set must be encoded before it can be
- used in an email header, this attribute will be set to
- Charset.QP (for quoted-printable), Charset.BASE64 (for
- base64 encoding), or Charset.SHORTEST for the shortest of
- QP or BASE64 encoding. Otherwise, it will be None.
-
- body_encoding: Same as header_encoding, but describes the encoding for the
- mail message's body, which indeed may be different than the
- header encoding. Charset.SHORTEST is not allowed for
- body_encoding.
-
- output_charset: Some character sets must be converted before the can be
- used in email headers or bodies. If the input_charset is
- one of them, this attribute will contain the name of the
- charset output will be converted to. Otherwise, it will
- be None.
-
- input_codec: The name of the Python codec used to convert the
- input_charset to Unicode. If no conversion codec is
- necessary, this attribute will be None.
-
- output_codec: The name of the Python codec used to convert Unicode
- to the output_charset. If no conversion codec is necessary,
- this attribute will have the same value as the input_codec.
- """
- def __init__(self, input_charset=DEFAULT_CHARSET):
- # RFC 2046, $4.1.2 says charsets are not case sensitive. We coerce to
- # unicode because its .lower() is locale insensitive. If the argument
- # is already a unicode, we leave it at that, but ensure that the
- # charset is ASCII, as the standard (RFC XXX) requires.
- try:
- if isinstance(input_charset, unicode):
- input_charset.encode('ascii')
- else:
- input_charset = unicode(input_charset, 'ascii')
- except UnicodeError:
- raise errors.CharsetError(input_charset)
- input_charset = input_charset.lower()
- # Set the input charset after filtering through the aliases
- self.input_charset = ALIASES.get(input_charset, input_charset)
- # We can try to guess which encoding and conversion to use by the
- # charset_map dictionary. Try that first, but let the user override
- # it.
- henc, benc, conv = CHARSETS.get(self.input_charset,
- (SHORTEST, BASE64, None))
- if not conv:
- conv = self.input_charset
- # Set the attributes, allowing the arguments to override the default.
- self.header_encoding = henc
- self.body_encoding = benc
- self.output_charset = ALIASES.get(conv, conv)
- # Now set the codecs. If one isn't defined for input_charset,
- # guess and try a Unicode codec with the same name as input_codec.
- self.input_codec = CODEC_MAP.get(self.input_charset,
- self.input_charset)
- self.output_codec = CODEC_MAP.get(self.output_charset,
- self.output_charset)
-
- def __str__(self):
- return self.input_charset.lower()
-
- __repr__ = __str__
-
- def __eq__(self, other):
- return str(self) == str(other).lower()
-
- def __ne__(self, other):
- return not self.__eq__(other)
-
- def get_body_encoding(self):
- """Return the content-transfer-encoding used for body encoding.
-
- This is either the string `quoted-printable' or `base64' depending on
- the encoding used, or it is a function in which case you should call
- the function with a single argument, the Message object being
- encoded. The function should then set the Content-Transfer-Encoding
- header itself to whatever is appropriate.
-
- Returns "quoted-printable" if self.body_encoding is QP.
- Returns "base64" if self.body_encoding is BASE64.
- Returns "7bit" otherwise.
- """
- assert self.body_encoding <> SHORTEST
- if self.body_encoding == QP:
- return 'quoted-printable'
- elif self.body_encoding == BASE64:
- return 'base64'
- else:
- return encode_7or8bit
-
- def convert(self, s):
- """Convert a string from the input_codec to the output_codec."""
- if self.input_codec <> self.output_codec:
- return unicode(s, self.input_codec).encode(self.output_codec)
- else:
- return s
-
- def to_splittable(self, s):
- """Convert a possibly multibyte string to a safely splittable format.
-
- Uses the input_codec to try and convert the string to Unicode, so it
- can be safely split on character boundaries (even for multibyte
- characters).
-
- Returns the string as-is if it isn't known how to convert it to
- Unicode with the input_charset.
-
- Characters that could not be converted to Unicode will be replaced
- with the Unicode replacement character U+FFFD.
- """
- if isinstance(s, unicode) or self.input_codec is None:
- return s
- try:
- return unicode(s, self.input_codec, 'replace')
- except LookupError:
- # Input codec not installed on system, so return the original
- # string unchanged.
- return s
-
- def from_splittable(self, ustr, to_output=True):
- """Convert a splittable string back into an encoded string.
-
- Uses the proper codec to try and convert the string from Unicode back
- into an encoded format. Return the string as-is if it is not Unicode,
- or if it could not be converted from Unicode.
-
- Characters that could not be converted from Unicode will be replaced
- with an appropriate character (usually '?').
-
- If to_output is True (the default), uses output_codec to convert to an
- encoded format. If to_output is False, uses input_codec.
- """
- if to_output:
- codec = self.output_codec
- else:
- codec = self.input_codec
- if not isinstance(ustr, unicode) or codec is None:
- return ustr
- try:
- return ustr.encode(codec, 'replace')
- except LookupError:
- # Output codec not installed
- return ustr
-
- def get_output_charset(self):
- """Return the output character set.
-
- This is self.output_charset if that is not None, otherwise it is
- self.input_charset.
- """
- return self.output_charset or self.input_charset
-
- def encoded_header_len(self, s):
- """Return the length of the encoded header string."""
- cset = self.get_output_charset()
- # The len(s) of a 7bit encoding is len(s)
- if self.header_encoding == BASE64:
- return email.base64mime.base64_len(s) + len(cset) + MISC_LEN
- elif self.header_encoding == QP:
- return email.quoprimime.header_quopri_len(s) + len(cset) + MISC_LEN
- elif self.header_encoding == SHORTEST:
- lenb64 = email.base64mime.base64_len(s)
- lenqp = email.quoprimime.header_quopri_len(s)
- return min(lenb64, lenqp) + len(cset) + MISC_LEN
- else:
- return len(s)
-
- def header_encode(self, s, convert=False):
- """Header-encode a string, optionally converting it to output_charset.
-
- If convert is True, the string will be converted from the input
- charset to the output charset automatically. This is not useful for
- multibyte character sets, which have line length issues (multibyte
- characters must be split on a character, not a byte boundary); use the
- high-level Header class to deal with these issues. convert defaults
- to False.
-
- The type of encoding (base64 or quoted-printable) will be based on
- self.header_encoding.
- """
- cset = self.get_output_charset()
- if convert:
- s = self.convert(s)
- # 7bit/8bit encodings return the string unchanged (modulo conversions)
- if self.header_encoding == BASE64:
- return email.base64mime.header_encode(s, cset)
- elif self.header_encoding == QP:
- return email.quoprimime.header_encode(s, cset, maxlinelen=None)
- elif self.header_encoding == SHORTEST:
- lenb64 = email.base64mime.base64_len(s)
- lenqp = email.quoprimime.header_quopri_len(s)
- if lenb64 < lenqp:
- return email.base64mime.header_encode(s, cset)
- else:
- return email.quoprimime.header_encode(s, cset, maxlinelen=None)
- else:
- return s
-
- def body_encode(self, s, convert=True):
- """Body-encode a string and convert it to output_charset.
-
- If convert is True (the default), the string will be converted from
- the input charset to output charset automatically. Unlike
- header_encode(), there are no issues with byte boundaries and
- multibyte charsets in email bodies, so this is usually pretty safe.
-
- The type of encoding (base64 or quoted-printable) will be based on
- self.body_encoding.
- """
- if convert:
- s = self.convert(s)
- # 7bit/8bit encodings return the string unchanged (module conversions)
- if self.body_encoding is BASE64:
- return email.base64mime.body_encode(s)
- elif self.body_encoding is QP:
- return email.quoprimime.body_encode(s)
- else:
- return s
diff --git a/sys/lib/python/email/encoders.py b/sys/lib/python/email/encoders.py
deleted file mode 100644
index 06016cdea..000000000
--- a/sys/lib/python/email/encoders.py
+++ /dev/null
@@ -1,88 +0,0 @@
-# Copyright (C) 2001-2006 Python Software Foundation
-# Author: Barry Warsaw
-# Contact: email-sig@python.org
-
-"""Encodings and related functions."""
-
-__all__ = [
- 'encode_7or8bit',
- 'encode_base64',
- 'encode_noop',
- 'encode_quopri',
- ]
-
-import base64
-
-from quopri import encodestring as _encodestring
-
-
-
-def _qencode(s):
- enc = _encodestring(s, quotetabs=True)
- # Must encode spaces, which quopri.encodestring() doesn't do
- return enc.replace(' ', '=20')
-
-
-def _bencode(s):
- # We can't quite use base64.encodestring() since it tacks on a "courtesy
- # newline". Blech!
- if not s:
- return s
- hasnewline = (s[-1] == '\n')
- value = base64.encodestring(s)
- if not hasnewline and value[-1] == '\n':
- return value[:-1]
- return value
-
-
-
-def encode_base64(msg):
- """Encode the message's payload in Base64.
-
- Also, add an appropriate Content-Transfer-Encoding header.
- """
- orig = msg.get_payload()
- encdata = _bencode(orig)
- msg.set_payload(encdata)
- msg['Content-Transfer-Encoding'] = 'base64'
-
-
-
-def encode_quopri(msg):
- """Encode the message's payload in quoted-printable.
-
- Also, add an appropriate Content-Transfer-Encoding header.
- """
- orig = msg.get_payload()
- encdata = _qencode(orig)
- msg.set_payload(encdata)
- msg['Content-Transfer-Encoding'] = 'quoted-printable'
-
-
-
-def encode_7or8bit(msg):
- """Set the Content-Transfer-Encoding header to 7bit or 8bit."""
- orig = msg.get_payload()
- if orig is None:
- # There's no payload. For backwards compatibility we use 7bit
- msg['Content-Transfer-Encoding'] = '7bit'
- return
- # We play a trick to make this go fast. If encoding to ASCII succeeds, we
- # know the data must be 7bit, otherwise treat it as 8bit.
- try:
- orig.encode('ascii')
- except UnicodeError:
- # iso-2022-* is non-ASCII but still 7-bit
- charset = msg.get_charset()
- output_cset = charset and charset.output_charset
- if output_cset and output_cset.lower().startswith('iso-2202-'):
- msg['Content-Transfer-Encoding'] = '7bit'
- else:
- msg['Content-Transfer-Encoding'] = '8bit'
- else:
- msg['Content-Transfer-Encoding'] = '7bit'
-
-
-
-def encode_noop(msg):
- """Do nothing."""
diff --git a/sys/lib/python/email/errors.py b/sys/lib/python/email/errors.py
deleted file mode 100644
index d52a62460..000000000
--- a/sys/lib/python/email/errors.py
+++ /dev/null
@@ -1,57 +0,0 @@
-# Copyright (C) 2001-2006 Python Software Foundation
-# Author: Barry Warsaw
-# Contact: email-sig@python.org
-
-"""email package exception classes."""
-
-
-
-class MessageError(Exception):
- """Base class for errors in the email package."""
-
-
-class MessageParseError(MessageError):
- """Base class for message parsing errors."""
-
-
-class HeaderParseError(MessageParseError):
- """Error while parsing headers."""
-
-
-class BoundaryError(MessageParseError):
- """Couldn't find terminating boundary."""
-
-
-class MultipartConversionError(MessageError, TypeError):
- """Conversion to a multipart is prohibited."""
-
-
-class CharsetError(MessageError):
- """An illegal charset was given."""
-
-
-
-# These are parsing defects which the parser was able to work around.
-class MessageDefect:
- """Base class for a message defect."""
-
- def __init__(self, line=None):
- self.line = line
-
-class NoBoundaryInMultipartDefect(MessageDefect):
- """A message claimed to be a multipart but had no boundary parameter."""
-
-class StartBoundaryNotFoundDefect(MessageDefect):
- """The claimed start boundary was never found."""
-
-class FirstHeaderLineIsContinuationDefect(MessageDefect):
- """A message had a continuation line as its first header line."""
-
-class MisplacedEnvelopeHeaderDefect(MessageDefect):
- """A 'Unix-from' header was found in the middle of a header block."""
-
-class MalformedHeaderDefect(MessageDefect):
- """Found a header that was missing a colon, or was otherwise malformed."""
-
-class MultipartInvariantViolationDefect(MessageDefect):
- """A message claimed to be a multipart but no subparts were found."""
diff --git a/sys/lib/python/email/feedparser.py b/sys/lib/python/email/feedparser.py
deleted file mode 100644
index afb02b32b..000000000
--- a/sys/lib/python/email/feedparser.py
+++ /dev/null
@@ -1,480 +0,0 @@
-# Copyright (C) 2004-2006 Python Software Foundation
-# Authors: Baxter, Wouters and Warsaw
-# Contact: email-sig@python.org
-
-"""FeedParser - An email feed parser.
-
-The feed parser implements an interface for incrementally parsing an email
-message, line by line. This has advantages for certain applications, such as
-those reading email messages off a socket.
-
-FeedParser.feed() is the primary interface for pushing new data into the
-parser. It returns when there's nothing more it can do with the available
-data. When you have no more data to push into the parser, call .close().
-This completes the parsing and returns the root message object.
-
-The other advantage of this parser is that it will never throw a parsing
-exception. Instead, when it finds something unexpected, it adds a 'defect' to
-the current message. Defects are just instances that live on the message
-object's .defects attribute.
-"""
-
-__all__ = ['FeedParser']
-
-import re
-
-from email import errors
-from email import message
-
-NLCRE = re.compile('\r\n|\r|\n')
-NLCRE_bol = re.compile('(\r\n|\r|\n)')
-NLCRE_eol = re.compile('(\r\n|\r|\n)$')
-NLCRE_crack = re.compile('(\r\n|\r|\n)')
-# RFC 2822 $3.6.8 Optional fields. ftext is %d33-57 / %d59-126, Any character
-# except controls, SP, and ":".
-headerRE = re.compile(r'^(From |[\041-\071\073-\176]{1,}:|[\t ])')
-EMPTYSTRING = ''
-NL = '\n'
-
-NeedMoreData = object()
-
-
-
-class BufferedSubFile(object):
- """A file-ish object that can have new data loaded into it.
-
- You can also push and pop line-matching predicates onto a stack. When the
- current predicate matches the current line, a false EOF response
- (i.e. empty string) is returned instead. This lets the parser adhere to a
- simple abstraction -- it parses until EOF closes the current message.
- """
- def __init__(self):
- # The last partial line pushed into this object.
- self._partial = ''
- # The list of full, pushed lines, in reverse order
- self._lines = []
- # The stack of false-EOF checking predicates.
- self._eofstack = []
- # A flag indicating whether the file has been closed or not.
- self._closed = False
-
- def push_eof_matcher(self, pred):
- self._eofstack.append(pred)
-
- def pop_eof_matcher(self):
- return self._eofstack.pop()
-
- def close(self):
- # Don't forget any trailing partial line.
- self._lines.append(self._partial)
- self._partial = ''
- self._closed = True
-
- def readline(self):
- if not self._lines:
- if self._closed:
- return ''
- return NeedMoreData
- # Pop the line off the stack and see if it matches the current
- # false-EOF predicate.
- line = self._lines.pop()
- # RFC 2046, section 5.1.2 requires us to recognize outer level
- # boundaries at any level of inner nesting. Do this, but be sure it's
- # in the order of most to least nested.
- for ateof in self._eofstack[::-1]:
- if ateof(line):
- # We're at the false EOF. But push the last line back first.
- self._lines.append(line)
- return ''
- return line
-
- def unreadline(self, line):
- # Let the consumer push a line back into the buffer.
- assert line is not NeedMoreData
- self._lines.append(line)
-
- def push(self, data):
- """Push some new data into this object."""
- # Handle any previous leftovers
- data, self._partial = self._partial + data, ''
- # Crack into lines, but preserve the newlines on the end of each
- parts = NLCRE_crack.split(data)
- # The *ahem* interesting behaviour of re.split when supplied grouping
- # parentheses is that the last element of the resulting list is the
- # data after the final RE. In the case of a NL/CR terminated string,
- # this is the empty string.
- self._partial = parts.pop()
- # parts is a list of strings, alternating between the line contents
- # and the eol character(s). Gather up a list of lines after
- # re-attaching the newlines.
- lines = []
- for i in range(len(parts) // 2):
- lines.append(parts[i*2] + parts[i*2+1])
- self.pushlines(lines)
-
- def pushlines(self, lines):
- # Reverse and insert at the front of the lines.
- self._lines[:0] = lines[::-1]
-
- def is_closed(self):
- return self._closed
-
- def __iter__(self):
- return self
-
- def next(self):
- line = self.readline()
- if line == '':
- raise StopIteration
- return line
-
-
-
-class FeedParser:
- """A feed-style parser of email."""
-
- def __init__(self, _factory=message.Message):
- """_factory is called with no arguments to create a new message obj"""
- self._factory = _factory
- self._input = BufferedSubFile()
- self._msgstack = []
- self._parse = self._parsegen().next
- self._cur = None
- self._last = None
- self._headersonly = False
-
- # Non-public interface for supporting Parser's headersonly flag
- def _set_headersonly(self):
- self._headersonly = True
-
- def feed(self, data):
- """Push more data into the parser."""
- self._input.push(data)
- self._call_parse()
-
- def _call_parse(self):
- try:
- self._parse()
- except StopIteration:
- pass
-
- def close(self):
- """Parse all remaining data and return the root message object."""
- self._input.close()
- self._call_parse()
- root = self._pop_message()
- assert not self._msgstack
- # Look for final set of defects
- if root.get_content_maintype() == 'multipart' \
- and not root.is_multipart():
- root.defects.append(errors.MultipartInvariantViolationDefect())
- return root
-
- def _new_message(self):
- msg = self._factory()
- if self._cur and self._cur.get_content_type() == 'multipart/digest':
- msg.set_default_type('message/rfc822')
- if self._msgstack:
- self._msgstack[-1].attach(msg)
- self._msgstack.append(msg)
- self._cur = msg
- self._last = msg
-
- def _pop_message(self):
- retval = self._msgstack.pop()
- if self._msgstack:
- self._cur = self._msgstack[-1]
- else:
- self._cur = None
- return retval
-
- def _parsegen(self):
- # Create a new message and start by parsing headers.
- self._new_message()
- headers = []
- # Collect the headers, searching for a line that doesn't match the RFC
- # 2822 header or continuation pattern (including an empty line).
- for line in self._input:
- if line is NeedMoreData:
- yield NeedMoreData
- continue
- if not headerRE.match(line):
- # If we saw the RFC defined header/body separator
- # (i.e. newline), just throw it away. Otherwise the line is
- # part of the body so push it back.
- if not NLCRE.match(line):
- self._input.unreadline(line)
- break
- headers.append(line)
- # Done with the headers, so parse them and figure out what we're
- # supposed to see in the body of the message.
- self._parse_headers(headers)
- # Headers-only parsing is a backwards compatibility hack, which was
- # necessary in the older parser, which could throw errors. All
- # remaining lines in the input are thrown into the message body.
- if self._headersonly:
- lines = []
- while True:
- line = self._input.readline()
- if line is NeedMoreData:
- yield NeedMoreData
- continue
- if line == '':
- break
- lines.append(line)
- self._cur.set_payload(EMPTYSTRING.join(lines))
- return
- if self._cur.get_content_type() == 'message/delivery-status':
- # message/delivery-status contains blocks of headers separated by
- # a blank line. We'll represent each header block as a separate
- # nested message object, but the processing is a bit different
- # than standard message/* types because there is no body for the
- # nested messages. A blank line separates the subparts.
- while True:
- self._input.push_eof_matcher(NLCRE.match)
- for retval in self._parsegen():
- if retval is NeedMoreData:
- yield NeedMoreData
- continue
- break
- msg = self._pop_message()
- # We need to pop the EOF matcher in order to tell if we're at
- # the end of the current file, not the end of the last block
- # of message headers.
- self._input.pop_eof_matcher()
- # The input stream must be sitting at the newline or at the
- # EOF. We want to see if we're at the end of this subpart, so
- # first consume the blank line, then test the next line to see
- # if we're at this subpart's EOF.
- while True:
- line = self._input.readline()
- if line is NeedMoreData:
- yield NeedMoreData
- continue
- break
- while True:
- line = self._input.readline()
- if line is NeedMoreData:
- yield NeedMoreData
- continue
- break
- if line == '':
- break
- # Not at EOF so this is a line we're going to need.
- self._input.unreadline(line)
- return
- if self._cur.get_content_maintype() == 'message':
- # The message claims to be a message/* type, then what follows is
- # another RFC 2822 message.
- for retval in self._parsegen():
- if retval is NeedMoreData:
- yield NeedMoreData
- continue
- break
- self._pop_message()
- return
- if self._cur.get_content_maintype() == 'multipart':
- boundary = self._cur.get_boundary()
- if boundary is None:
- # The message /claims/ to be a multipart but it has not
- # defined a boundary. That's a problem which we'll handle by
- # reading everything until the EOF and marking the message as
- # defective.
- self._cur.defects.append(errors.NoBoundaryInMultipartDefect())
- lines = []
- for line in self._input:
- if line is NeedMoreData:
- yield NeedMoreData
- continue
- lines.append(line)
- self._cur.set_payload(EMPTYSTRING.join(lines))
- return
- # Create a line match predicate which matches the inter-part
- # boundary as well as the end-of-multipart boundary. Don't push
- # this onto the input stream until we've scanned past the
- # preamble.
- separator = '--' + boundary
- boundaryre = re.compile(
- '(?P<sep>' + re.escape(separator) +
- r')(?P<end>--)?(?P<ws>[ \t]*)(?P<linesep>\r\n|\r|\n)?$')
- capturing_preamble = True
- preamble = []
- linesep = False
- while True:
- line = self._input.readline()
- if line is NeedMoreData:
- yield NeedMoreData
- continue
- if line == '':
- break
- mo = boundaryre.match(line)
- if mo:
- # If we're looking at the end boundary, we're done with
- # this multipart. If there was a newline at the end of
- # the closing boundary, then we need to initialize the
- # epilogue with the empty string (see below).
- if mo.group('end'):
- linesep = mo.group('linesep')
- break
- # We saw an inter-part boundary. Were we in the preamble?
- if capturing_preamble:
- if preamble:
- # According to RFC 2046, the last newline belongs
- # to the boundary.
- lastline = preamble[-1]
- eolmo = NLCRE_eol.search(lastline)
- if eolmo:
- preamble[-1] = lastline[:-len(eolmo.group(0))]
- self._cur.preamble = EMPTYSTRING.join(preamble)
- capturing_preamble = False
- self._input.unreadline(line)
- continue
- # We saw a boundary separating two parts. Consume any
- # multiple boundary lines that may be following. Our
- # interpretation of RFC 2046 BNF grammar does not produce
- # body parts within such double boundaries.
- while True:
- line = self._input.readline()
- if line is NeedMoreData:
- yield NeedMoreData
- continue
- mo = boundaryre.match(line)
- if not mo:
- self._input.unreadline(line)
- break
- # Recurse to parse this subpart; the input stream points
- # at the subpart's first line.
- self._input.push_eof_matcher(boundaryre.match)
- for retval in self._parsegen():
- if retval is NeedMoreData:
- yield NeedMoreData
- continue
- break
- # Because of RFC 2046, the newline preceding the boundary
- # separator actually belongs to the boundary, not the
- # previous subpart's payload (or epilogue if the previous
- # part is a multipart).
- if self._last.get_content_maintype() == 'multipart':
- epilogue = self._last.epilogue
- if epilogue == '':
- self._last.epilogue = None
- elif epilogue is not None:
- mo = NLCRE_eol.search(epilogue)
- if mo:
- end = len(mo.group(0))
- self._last.epilogue = epilogue[:-end]
- else:
- payload = self._last.get_payload()
- if isinstance(payload, basestring):
- mo = NLCRE_eol.search(payload)
- if mo:
- payload = payload[:-len(mo.group(0))]
- self._last.set_payload(payload)
- self._input.pop_eof_matcher()
- self._pop_message()
- # Set the multipart up for newline cleansing, which will
- # happen if we're in a nested multipart.
- self._last = self._cur
- else:
- # I think we must be in the preamble
- assert capturing_preamble
- preamble.append(line)
- # We've seen either the EOF or the end boundary. If we're still
- # capturing the preamble, we never saw the start boundary. Note
- # that as a defect and store the captured text as the payload.
- # Everything from here to the EOF is epilogue.
- if capturing_preamble:
- self._cur.defects.append(errors.StartBoundaryNotFoundDefect())
- self._cur.set_payload(EMPTYSTRING.join(preamble))
- epilogue = []
- for line in self._input:
- if line is NeedMoreData:
- yield NeedMoreData
- continue
- self._cur.epilogue = EMPTYSTRING.join(epilogue)
- return
- # If the end boundary ended in a newline, we'll need to make sure
- # the epilogue isn't None
- if linesep:
- epilogue = ['']
- else:
- epilogue = []
- for line in self._input:
- if line is NeedMoreData:
- yield NeedMoreData
- continue
- epilogue.append(line)
- # Any CRLF at the front of the epilogue is not technically part of
- # the epilogue. Also, watch out for an empty string epilogue,
- # which means a single newline.
- if epilogue:
- firstline = epilogue[0]
- bolmo = NLCRE_bol.match(firstline)
- if bolmo:
- epilogue[0] = firstline[len(bolmo.group(0)):]
- self._cur.epilogue = EMPTYSTRING.join(epilogue)
- return
- # Otherwise, it's some non-multipart type, so the entire rest of the
- # file contents becomes the payload.
- lines = []
- for line in self._input:
- if line is NeedMoreData:
- yield NeedMoreData
- continue
- lines.append(line)
- self._cur.set_payload(EMPTYSTRING.join(lines))
-
- def _parse_headers(self, lines):
- # Passed a list of lines that make up the headers for the current msg
- lastheader = ''
- lastvalue = []
- for lineno, line in enumerate(lines):
- # Check for continuation
- if line[0] in ' \t':
- if not lastheader:
- # The first line of the headers was a continuation. This
- # is illegal, so let's note the defect, store the illegal
- # line, and ignore it for purposes of headers.
- defect = errors.FirstHeaderLineIsContinuationDefect(line)
- self._cur.defects.append(defect)
- continue
- lastvalue.append(line)
- continue
- if lastheader:
- # XXX reconsider the joining of folded lines
- lhdr = EMPTYSTRING.join(lastvalue)[:-1].rstrip('\r\n')
- self._cur[lastheader] = lhdr
- lastheader, lastvalue = '', []
- # Check for envelope header, i.e. unix-from
- if line.startswith('From '):
- if lineno == 0:
- # Strip off the trailing newline
- mo = NLCRE_eol.search(line)
- if mo:
- line = line[:-len(mo.group(0))]
- self._cur.set_unixfrom(line)
- continue
- elif lineno == len(lines) - 1:
- # Something looking like a unix-from at the end - it's
- # probably the first line of the body, so push back the
- # line and stop.
- self._input.unreadline(line)
- return
- else:
- # Weirdly placed unix-from line. Note this as a defect
- # and ignore it.
- defect = errors.MisplacedEnvelopeHeaderDefect(line)
- self._cur.defects.append(defect)
- continue
- # Split the line on the colon separating field name from value.
- i = line.find(':')
- if i < 0:
- defect = errors.MalformedHeaderDefect(line)
- self._cur.defects.append(defect)
- continue
- lastheader = line[:i]
- lastvalue = [line[i+1:].lstrip()]
- # Done with all the lines, so handle the last header.
- if lastheader:
- # XXX reconsider the joining of folded lines
- self._cur[lastheader] = EMPTYSTRING.join(lastvalue).rstrip('\r\n')
diff --git a/sys/lib/python/email/generator.py b/sys/lib/python/email/generator.py
deleted file mode 100644
index 6e7a51530..000000000
--- a/sys/lib/python/email/generator.py
+++ /dev/null
@@ -1,348 +0,0 @@
-# Copyright (C) 2001-2006 Python Software Foundation
-# Author: Barry Warsaw
-# Contact: email-sig@python.org
-
-"""Classes to generate plain text from a message object tree."""
-
-__all__ = ['Generator', 'DecodedGenerator']
-
-import re
-import sys
-import time
-import random
-import warnings
-
-from cStringIO import StringIO
-from email.header import Header
-
-UNDERSCORE = '_'
-NL = '\n'
-
-fcre = re.compile(r'^From ', re.MULTILINE)
-
-def _is8bitstring(s):
- if isinstance(s, str):
- try:
- unicode(s, 'us-ascii')
- except UnicodeError:
- return True
- return False
-
-
-
-class Generator:
- """Generates output from a Message object tree.
-
- This basic generator writes the message to the given file object as plain
- text.
- """
- #
- # Public interface
- #
-
- def __init__(self, outfp, mangle_from_=True, maxheaderlen=78):
- """Create the generator for message flattening.
-
- outfp is the output file-like object for writing the message to. It
- must have a write() method.
-
- Optional mangle_from_ is a flag that, when True (the default), escapes
- From_ lines in the body of the message by putting a `>' in front of
- them.
-
- Optional maxheaderlen specifies the longest length for a non-continued
- header. When a header line is longer (in characters, with tabs
- expanded to 8 spaces) than maxheaderlen, the header will split as
- defined in the Header class. Set maxheaderlen to zero to disable
- header wrapping. The default is 78, as recommended (but not required)
- by RFC 2822.
- """
- self._fp = outfp
- self._mangle_from_ = mangle_from_
- self._maxheaderlen = maxheaderlen
-
- def write(self, s):
- # Just delegate to the file object
- self._fp.write(s)
-
- def flatten(self, msg, unixfrom=False):
- """Print the message object tree rooted at msg to the output file
- specified when the Generator instance was created.
-
- unixfrom is a flag that forces the printing of a Unix From_ delimiter
- before the first object in the message tree. If the original message
- has no From_ delimiter, a `standard' one is crafted. By default, this
- is False to inhibit the printing of any From_ delimiter.
-
- Note that for subobjects, no From_ line is printed.
- """
- if unixfrom:
- ufrom = msg.get_unixfrom()
- if not ufrom:
- ufrom = 'From nobody ' + time.ctime(time.time())
- print >> self._fp, ufrom
- self._write(msg)
-
- def clone(self, fp):
- """Clone this generator with the exact same options."""
- return self.__class__(fp, self._mangle_from_, self._maxheaderlen)
-
- #
- # Protected interface - undocumented ;/
- #
-
- def _write(self, msg):
- # We can't write the headers yet because of the following scenario:
- # say a multipart message includes the boundary string somewhere in
- # its body. We'd have to calculate the new boundary /before/ we write
- # the headers so that we can write the correct Content-Type:
- # parameter.
- #
- # The way we do this, so as to make the _handle_*() methods simpler,
- # is to cache any subpart writes into a StringIO. The we write the
- # headers and the StringIO contents. That way, subpart handlers can
- # Do The Right Thing, and can still modify the Content-Type: header if
- # necessary.
- oldfp = self._fp
- try:
- self._fp = sfp = StringIO()
- self._dispatch(msg)
- finally:
- self._fp = oldfp
- # Write the headers. First we see if the message object wants to
- # handle that itself. If not, we'll do it generically.
- meth = getattr(msg, '_write_headers', None)
- if meth is None:
- self._write_headers(msg)
- else:
- meth(self)
- self._fp.write(sfp.getvalue())
-
- def _dispatch(self, msg):
- # Get the Content-Type: for the message, then try to dispatch to
- # self._handle_<maintype>_<subtype>(). If there's no handler for the
- # full MIME type, then dispatch to self._handle_<maintype>(). If
- # that's missing too, then dispatch to self._writeBody().
- main = msg.get_content_maintype()
- sub = msg.get_content_subtype()
- specific = UNDERSCORE.join((main, sub)).replace('-', '_')
- meth = getattr(self, '_handle_' + specific, None)
- if meth is None:
- generic = main.replace('-', '_')
- meth = getattr(self, '_handle_' + generic, None)
- if meth is None:
- meth = self._writeBody
- meth(msg)
-
- #
- # Default handlers
- #
-
- def _write_headers(self, msg):
- for h, v in msg.items():
- print >> self._fp, '%s:' % h,
- if self._maxheaderlen == 0:
- # Explicit no-wrapping
- print >> self._fp, v
- elif isinstance(v, Header):
- # Header instances know what to do
- print >> self._fp, v.encode()
- elif _is8bitstring(v):
- # If we have raw 8bit data in a byte string, we have no idea
- # what the encoding is. There is no safe way to split this
- # string. If it's ascii-subset, then we could do a normal
- # ascii split, but if it's multibyte then we could break the
- # string. There's no way to know so the least harm seems to
- # be to not split the string and risk it being too long.
- print >> self._fp, v
- else:
- # Header's got lots of smarts, so use it.
- print >> self._fp, Header(
- v, maxlinelen=self._maxheaderlen,
- header_name=h, continuation_ws='\t').encode()
- # A blank line always separates headers from body
- print >> self._fp
-
- #
- # Handlers for writing types and subtypes
- #
-
- def _handle_text(self, msg):
- payload = msg.get_payload()
- if payload is None:
- return
- if not isinstance(payload, basestring):
- raise TypeError('string payload expected: %s' % type(payload))
- if self._mangle_from_:
- payload = fcre.sub('>From ', payload)
- self._fp.write(payload)
-
- # Default body handler
- _writeBody = _handle_text
-
- def _handle_multipart(self, msg):
- # The trick here is to write out each part separately, merge them all
- # together, and then make sure that the boundary we've chosen isn't
- # present in the payload.
- msgtexts = []
- subparts = msg.get_payload()
- if subparts is None:
- subparts = []
- elif isinstance(subparts, basestring):
- # e.g. a non-strict parse of a message with no starting boundary.
- self._fp.write(subparts)
- return
- elif not isinstance(subparts, list):
- # Scalar payload
- subparts = [subparts]
- for part in subparts:
- s = StringIO()
- g = self.clone(s)
- g.flatten(part, unixfrom=False)
- msgtexts.append(s.getvalue())
- # Now make sure the boundary we've selected doesn't appear in any of
- # the message texts.
- alltext = NL.join(msgtexts)
- # BAW: What about boundaries that are wrapped in double-quotes?
- boundary = msg.get_boundary(failobj=_make_boundary(alltext))
- # If we had to calculate a new boundary because the body text
- # contained that string, set the new boundary. We don't do it
- # unconditionally because, while set_boundary() preserves order, it
- # doesn't preserve newlines/continuations in headers. This is no big
- # deal in practice, but turns out to be inconvenient for the unittest
- # suite.
- if msg.get_boundary() <> boundary:
- msg.set_boundary(boundary)
- # If there's a preamble, write it out, with a trailing CRLF
- if msg.preamble is not None:
- print >> self._fp, msg.preamble
- # dash-boundary transport-padding CRLF
- print >> self._fp, '--' + boundary
- # body-part
- if msgtexts:
- self._fp.write(msgtexts.pop(0))
- # *encapsulation
- # --> delimiter transport-padding
- # --> CRLF body-part
- for body_part in msgtexts:
- # delimiter transport-padding CRLF
- print >> self._fp, '\n--' + boundary
- # body-part
- self._fp.write(body_part)
- # close-delimiter transport-padding
- self._fp.write('\n--' + boundary + '--')
- if msg.epilogue is not None:
- print >> self._fp
- self._fp.write(msg.epilogue)
-
- def _handle_message_delivery_status(self, msg):
- # We can't just write the headers directly to self's file object
- # because this will leave an extra newline between the last header
- # block and the boundary. Sigh.
- blocks = []
- for part in msg.get_payload():
- s = StringIO()
- g = self.clone(s)
- g.flatten(part, unixfrom=False)
- text = s.getvalue()
- lines = text.split('\n')
- # Strip off the unnecessary trailing empty line
- if lines and lines[-1] == '':
- blocks.append(NL.join(lines[:-1]))
- else:
- blocks.append(text)
- # Now join all the blocks with an empty line. This has the lovely
- # effect of separating each block with an empty line, but not adding
- # an extra one after the last one.
- self._fp.write(NL.join(blocks))
-
- def _handle_message(self, msg):
- s = StringIO()
- g = self.clone(s)
- # The payload of a message/rfc822 part should be a multipart sequence
- # of length 1. The zeroth element of the list should be the Message
- # object for the subpart. Extract that object, stringify it, and
- # write it out.
- g.flatten(msg.get_payload(0), unixfrom=False)
- self._fp.write(s.getvalue())
-
-
-
-_FMT = '[Non-text (%(type)s) part of message omitted, filename %(filename)s]'
-
-class DecodedGenerator(Generator):
- """Generator a text representation of a message.
-
- Like the Generator base class, except that non-text parts are substituted
- with a format string representing the part.
- """
- def __init__(self, outfp, mangle_from_=True, maxheaderlen=78, fmt=None):
- """Like Generator.__init__() except that an additional optional
- argument is allowed.
-
- Walks through all subparts of a message. If the subpart is of main
- type `text', then it prints the decoded payload of the subpart.
-
- Otherwise, fmt is a format string that is used instead of the message
- payload. fmt is expanded with the following keywords (in
- %(keyword)s format):
-
- type : Full MIME type of the non-text part
- maintype : Main MIME type of the non-text part
- subtype : Sub-MIME type of the non-text part
- filename : Filename of the non-text part
- description: Description associated with the non-text part
- encoding : Content transfer encoding of the non-text part
-
- The default value for fmt is None, meaning
-
- [Non-text (%(type)s) part of message omitted, filename %(filename)s]
- """
- Generator.__init__(self, outfp, mangle_from_, maxheaderlen)
- if fmt is None:
- self._fmt = _FMT
- else:
- self._fmt = fmt
-
- def _dispatch(self, msg):
- for part in msg.walk():
- maintype = part.get_content_maintype()
- if maintype == 'text':
- print >> self, part.get_payload(decode=True)
- elif maintype == 'multipart':
- # Just skip this
- pass
- else:
- print >> self, self._fmt % {
- 'type' : part.get_content_type(),
- 'maintype' : part.get_content_maintype(),
- 'subtype' : part.get_content_subtype(),
- 'filename' : part.get_filename('[no filename]'),
- 'description': part.get('Content-Description',
- '[no description]'),
- 'encoding' : part.get('Content-Transfer-Encoding',
- '[no encoding]'),
- }
-
-
-
-# Helper
-_width = len(repr(sys.maxint-1))
-_fmt = '%%0%dd' % _width
-
-def _make_boundary(text=None):
- # Craft a random boundary. If text is given, ensure that the chosen
- # boundary doesn't appear in the text.
- token = random.randrange(sys.maxint)
- boundary = ('=' * 15) + (_fmt % token) + '=='
- if text is None:
- return boundary
- b = boundary
- counter = 0
- while True:
- cre = re.compile('^--' + re.escape(b) + '(--)?$', re.MULTILINE)
- if not cre.search(text):
- break
- b = boundary + '.' + str(counter)
- counter += 1
- return b
diff --git a/sys/lib/python/email/header.py b/sys/lib/python/email/header.py
deleted file mode 100644
index e139ccf64..000000000
--- a/sys/lib/python/email/header.py
+++ /dev/null
@@ -1,503 +0,0 @@
-# Copyright (C) 2002-2006 Python Software Foundation
-# Author: Ben Gertzfield, Barry Warsaw
-# Contact: email-sig@python.org
-
-"""Header encoding and decoding functionality."""
-
-__all__ = [
- 'Header',
- 'decode_header',
- 'make_header',
- ]
-
-import re
-import binascii
-
-import email.quoprimime
-import email.base64mime
-
-from email.errors import HeaderParseError
-from email.charset import Charset
-
-NL = '\n'
-SPACE = ' '
-USPACE = u' '
-SPACE8 = ' ' * 8
-UEMPTYSTRING = u''
-
-MAXLINELEN = 76
-
-USASCII = Charset('us-ascii')
-UTF8 = Charset('utf-8')
-
-# Match encoded-word strings in the form =?charset?q?Hello_World?=
-ecre = re.compile(r'''
- =\? # literal =?
- (?P<charset>[^?]*?) # non-greedy up to the next ? is the charset
- \? # literal ?
- (?P<encoding>[qb]) # either a "q" or a "b", case insensitive
- \? # literal ?
- (?P<encoded>.*?) # non-greedy up to the next ?= is the encoded string
- \?= # literal ?=
- (?=[ \t]|$) # whitespace or the end of the string
- ''', re.VERBOSE | re.IGNORECASE | re.MULTILINE)
-
-# Field name regexp, including trailing colon, but not separating whitespace,
-# according to RFC 2822. Character range is from tilde to exclamation mark.
-# For use with .match()
-fcre = re.compile(r'[\041-\176]+:$')
-
-
-
-# Helpers
-_max_append = email.quoprimime._max_append
-
-
-
-def decode_header(header):
- """Decode a message header value without converting charset.
-
- Returns a list of (decoded_string, charset) pairs containing each of the
- decoded parts of the header. Charset is None for non-encoded parts of the
- header, otherwise a lower-case string containing the name of the character
- set specified in the encoded string.
-
- An email.Errors.HeaderParseError may be raised when certain decoding error
- occurs (e.g. a base64 decoding exception).
- """
- # If no encoding, just return the header
- header = str(header)
- if not ecre.search(header):
- return [(header, None)]
- decoded = []
- dec = ''
- for line in header.splitlines():
- # This line might not have an encoding in it
- if not ecre.search(line):
- decoded.append((line, None))
- continue
- parts = ecre.split(line)
- while parts:
- unenc = parts.pop(0).strip()
- if unenc:
- # Should we continue a long line?
- if decoded and decoded[-1][1] is None:
- decoded[-1] = (decoded[-1][0] + SPACE + unenc, None)
- else:
- decoded.append((unenc, None))
- if parts:
- charset, encoding = [s.lower() for s in parts[0:2]]
- encoded = parts[2]
- dec = None
- if encoding == 'q':
- dec = email.quoprimime.header_decode(encoded)
- elif encoding == 'b':
- try:
- dec = email.base64mime.decode(encoded)
- except binascii.Error:
- # Turn this into a higher level exception. BAW: Right
- # now we throw the lower level exception away but
- # when/if we get exception chaining, we'll preserve it.
- raise HeaderParseError
- if dec is None:
- dec = encoded
-
- if decoded and decoded[-1][1] == charset:
- decoded[-1] = (decoded[-1][0] + dec, decoded[-1][1])
- else:
- decoded.append((dec, charset))
- del parts[0:3]
- return decoded
-
-
-
-def make_header(decoded_seq, maxlinelen=None, header_name=None,
- continuation_ws=' '):
- """Create a Header from a sequence of pairs as returned by decode_header()
-
- decode_header() takes a header value string and returns a sequence of
- pairs of the format (decoded_string, charset) where charset is the string
- name of the character set.
-
- This function takes one of those sequence of pairs and returns a Header
- instance. Optional maxlinelen, header_name, and continuation_ws are as in
- the Header constructor.
- """
- h = Header(maxlinelen=maxlinelen, header_name=header_name,
- continuation_ws=continuation_ws)
- for s, charset in decoded_seq:
- # None means us-ascii but we can simply pass it on to h.append()
- if charset is not None and not isinstance(charset, Charset):
- charset = Charset(charset)
- h.append(s, charset)
- return h
-
-
-
-class Header:
- def __init__(self, s=None, charset=None,
- maxlinelen=None, header_name=None,
- continuation_ws=' ', errors='strict'):
- """Create a MIME-compliant header that can contain many character sets.
-
- Optional s is the initial header value. If None, the initial header
- value is not set. You can later append to the header with .append()
- method calls. s may be a byte string or a Unicode string, but see the
- .append() documentation for semantics.
-
- Optional charset serves two purposes: it has the same meaning as the
- charset argument to the .append() method. It also sets the default
- character set for all subsequent .append() calls that omit the charset
- argument. If charset is not provided in the constructor, the us-ascii
- charset is used both as s's initial charset and as the default for
- subsequent .append() calls.
-
- The maximum line length can be specified explicit via maxlinelen. For
- splitting the first line to a shorter value (to account for the field
- header which isn't included in s, e.g. `Subject') pass in the name of
- the field in header_name. The default maxlinelen is 76.
-
- continuation_ws must be RFC 2822 compliant folding whitespace (usually
- either a space or a hard tab) which will be prepended to continuation
- lines.
-
- errors is passed through to the .append() call.
- """
- if charset is None:
- charset = USASCII
- if not isinstance(charset, Charset):
- charset = Charset(charset)
- self._charset = charset
- self._continuation_ws = continuation_ws
- cws_expanded_len = len(continuation_ws.replace('\t', SPACE8))
- # BAW: I believe `chunks' and `maxlinelen' should be non-public.
- self._chunks = []
- if s is not None:
- self.append(s, charset, errors)
- if maxlinelen is None:
- maxlinelen = MAXLINELEN
- if header_name is None:
- # We don't know anything about the field header so the first line
- # is the same length as subsequent lines.
- self._firstlinelen = maxlinelen
- else:
- # The first line should be shorter to take into account the field
- # header. Also subtract off 2 extra for the colon and space.
- self._firstlinelen = maxlinelen - len(header_name) - 2
- # Second and subsequent lines should subtract off the length in
- # columns of the continuation whitespace prefix.
- self._maxlinelen = maxlinelen - cws_expanded_len
-
- def __str__(self):
- """A synonym for self.encode()."""
- return self.encode()
-
- def __unicode__(self):
- """Helper for the built-in unicode function."""
- uchunks = []
- lastcs = None
- for s, charset in self._chunks:
- # We must preserve spaces between encoded and non-encoded word
- # boundaries, which means for us we need to add a space when we go
- # from a charset to None/us-ascii, or from None/us-ascii to a
- # charset. Only do this for the second and subsequent chunks.
- nextcs = charset
- if uchunks:
- if lastcs not in (None, 'us-ascii'):
- if nextcs in (None, 'us-ascii'):
- uchunks.append(USPACE)
- nextcs = None
- elif nextcs not in (None, 'us-ascii'):
- uchunks.append(USPACE)
- lastcs = nextcs
- uchunks.append(unicode(s, str(charset)))
- return UEMPTYSTRING.join(uchunks)
-
- # Rich comparison operators for equality only. BAW: does it make sense to
- # have or explicitly disable <, <=, >, >= operators?
- def __eq__(self, other):
- # other may be a Header or a string. Both are fine so coerce
- # ourselves to a string, swap the args and do another comparison.
- return other == self.encode()
-
- def __ne__(self, other):
- return not self == other
-
- def append(self, s, charset=None, errors='strict'):
- """Append a string to the MIME header.
-
- Optional charset, if given, should be a Charset instance or the name
- of a character set (which will be converted to a Charset instance). A
- value of None (the default) means that the charset given in the
- constructor is used.
-
- s may be a byte string or a Unicode string. If it is a byte string
- (i.e. isinstance(s, str) is true), then charset is the encoding of
- that byte string, and a UnicodeError will be raised if the string
- cannot be decoded with that charset. If s is a Unicode string, then
- charset is a hint specifying the character set of the characters in
- the string. In this case, when producing an RFC 2822 compliant header
- using RFC 2047 rules, the Unicode string will be encoded using the
- following charsets in order: us-ascii, the charset hint, utf-8. The
- first character set not to provoke a UnicodeError is used.
-
- Optional `errors' is passed as the third argument to any unicode() or
- ustr.encode() call.
- """
- if charset is None:
- charset = self._charset
- elif not isinstance(charset, Charset):
- charset = Charset(charset)
- # If the charset is our faux 8bit charset, leave the string unchanged
- if charset <> '8bit':
- # We need to test that the string can be converted to unicode and
- # back to a byte string, given the input and output codecs of the
- # charset.
- if isinstance(s, str):
- # Possibly raise UnicodeError if the byte string can't be
- # converted to a unicode with the input codec of the charset.
- incodec = charset.input_codec or 'us-ascii'
- ustr = unicode(s, incodec, errors)
- # Now make sure that the unicode could be converted back to a
- # byte string with the output codec, which may be different
- # than the iput coded. Still, use the original byte string.
- outcodec = charset.output_codec or 'us-ascii'
- ustr.encode(outcodec, errors)
- elif isinstance(s, unicode):
- # Now we have to be sure the unicode string can be converted
- # to a byte string with a reasonable output codec. We want to
- # use the byte string in the chunk.
- for charset in USASCII, charset, UTF8:
- try:
- outcodec = charset.output_codec or 'us-ascii'
- s = s.encode(outcodec, errors)
- break
- except UnicodeError:
- pass
- else:
- assert False, 'utf-8 conversion failed'
- self._chunks.append((s, charset))
-
- def _split(self, s, charset, maxlinelen, splitchars):
- # Split up a header safely for use with encode_chunks.
- splittable = charset.to_splittable(s)
- encoded = charset.from_splittable(splittable, True)
- elen = charset.encoded_header_len(encoded)
- # If the line's encoded length first, just return it
- if elen <= maxlinelen:
- return [(encoded, charset)]
- # If we have undetermined raw 8bit characters sitting in a byte
- # string, we really don't know what the right thing to do is. We
- # can't really split it because it might be multibyte data which we
- # could break if we split it between pairs. The least harm seems to
- # be to not split the header at all, but that means they could go out
- # longer than maxlinelen.
- if charset == '8bit':
- return [(s, charset)]
- # BAW: I'm not sure what the right test here is. What we're trying to
- # do is be faithful to RFC 2822's recommendation that ($2.2.3):
- #
- # "Note: Though structured field bodies are defined in such a way that
- # folding can take place between many of the lexical tokens (and even
- # within some of the lexical tokens), folding SHOULD be limited to
- # placing the CRLF at higher-level syntactic breaks."
- #
- # For now, I can only imagine doing this when the charset is us-ascii,
- # although it's possible that other charsets may also benefit from the
- # higher-level syntactic breaks.
- elif charset == 'us-ascii':
- return self._split_ascii(s, charset, maxlinelen, splitchars)
- # BAW: should we use encoded?
- elif elen == len(s):
- # We can split on _maxlinelen boundaries because we know that the
- # encoding won't change the size of the string
- splitpnt = maxlinelen
- first = charset.from_splittable(splittable[:splitpnt], False)
- last = charset.from_splittable(splittable[splitpnt:], False)
- else:
- # Binary search for split point
- first, last = _binsplit(splittable, charset, maxlinelen)
- # first is of the proper length so just wrap it in the appropriate
- # chrome. last must be recursively split.
- fsplittable = charset.to_splittable(first)
- fencoded = charset.from_splittable(fsplittable, True)
- chunk = [(fencoded, charset)]
- return chunk + self._split(last, charset, self._maxlinelen, splitchars)
-
- def _split_ascii(self, s, charset, firstlen, splitchars):
- chunks = _split_ascii(s, firstlen, self._maxlinelen,
- self._continuation_ws, splitchars)
- return zip(chunks, [charset]*len(chunks))
-
- def _encode_chunks(self, newchunks, maxlinelen):
- # MIME-encode a header with many different charsets and/or encodings.
- #
- # Given a list of pairs (string, charset), return a MIME-encoded
- # string suitable for use in a header field. Each pair may have
- # different charsets and/or encodings, and the resulting header will
- # accurately reflect each setting.
- #
- # Each encoding can be email.Utils.QP (quoted-printable, for
- # ASCII-like character sets like iso-8859-1), email.Utils.BASE64
- # (Base64, for non-ASCII like character sets like KOI8-R and
- # iso-2022-jp), or None (no encoding).
- #
- # Each pair will be represented on a separate line; the resulting
- # string will be in the format:
- #
- # =?charset1?q?Mar=EDa_Gonz=E1lez_Alonso?=\n
- # =?charset2?b?SvxyZ2VuIEL2aW5n?="
- chunks = []
- for header, charset in newchunks:
- if not header:
- continue
- if charset is None or charset.header_encoding is None:
- s = header
- else:
- s = charset.header_encode(header)
- # Don't add more folding whitespace than necessary
- if chunks and chunks[-1].endswith(' '):
- extra = ''
- else:
- extra = ' '
- _max_append(chunks, s, maxlinelen, extra)
- joiner = NL + self._continuation_ws
- return joiner.join(chunks)
-
- def encode(self, splitchars=';, '):
- """Encode a message header into an RFC-compliant format.
-
- There are many issues involved in converting a given string for use in
- an email header. Only certain character sets are readable in most
- email clients, and as header strings can only contain a subset of
- 7-bit ASCII, care must be taken to properly convert and encode (with
- Base64 or quoted-printable) header strings. In addition, there is a
- 75-character length limit on any given encoded header field, so
- line-wrapping must be performed, even with double-byte character sets.
-
- This method will do its best to convert the string to the correct
- character set used in email, and encode and line wrap it safely with
- the appropriate scheme for that character set.
-
- If the given charset is not known or an error occurs during
- conversion, this function will return the header untouched.
-
- Optional splitchars is a string containing characters to split long
- ASCII lines on, in rough support of RFC 2822's `highest level
- syntactic breaks'. This doesn't affect RFC 2047 encoded lines.
- """
- newchunks = []
- maxlinelen = self._firstlinelen
- lastlen = 0
- for s, charset in self._chunks:
- # The first bit of the next chunk should be just long enough to
- # fill the next line. Don't forget the space separating the
- # encoded words.
- targetlen = maxlinelen - lastlen - 1
- if targetlen < charset.encoded_header_len(''):
- # Stick it on the next line
- targetlen = maxlinelen
- newchunks += self._split(s, charset, targetlen, splitchars)
- lastchunk, lastcharset = newchunks[-1]
- lastlen = lastcharset.encoded_header_len(lastchunk)
- return self._encode_chunks(newchunks, maxlinelen)
-
-
-
-def _split_ascii(s, firstlen, restlen, continuation_ws, splitchars):
- lines = []
- maxlen = firstlen
- for line in s.splitlines():
- # Ignore any leading whitespace (i.e. continuation whitespace) already
- # on the line, since we'll be adding our own.
- line = line.lstrip()
- if len(line) < maxlen:
- lines.append(line)
- maxlen = restlen
- continue
- # Attempt to split the line at the highest-level syntactic break
- # possible. Note that we don't have a lot of smarts about field
- # syntax; we just try to break on semi-colons, then commas, then
- # whitespace.
- for ch in splitchars:
- if ch in line:
- break
- else:
- # There's nothing useful to split the line on, not even spaces, so
- # just append this line unchanged
- lines.append(line)
- maxlen = restlen
- continue
- # Now split the line on the character plus trailing whitespace
- cre = re.compile(r'%s\s*' % ch)
- if ch in ';,':
- eol = ch
- else:
- eol = ''
- joiner = eol + ' '
- joinlen = len(joiner)
- wslen = len(continuation_ws.replace('\t', SPACE8))
- this = []
- linelen = 0
- for part in cre.split(line):
- curlen = linelen + max(0, len(this)-1) * joinlen
- partlen = len(part)
- onfirstline = not lines
- # We don't want to split after the field name, if we're on the
- # first line and the field name is present in the header string.
- if ch == ' ' and onfirstline and \
- len(this) == 1 and fcre.match(this[0]):
- this.append(part)
- linelen += partlen
- elif curlen + partlen > maxlen:
- if this:
- lines.append(joiner.join(this) + eol)
- # If this part is longer than maxlen and we aren't already
- # splitting on whitespace, try to recursively split this line
- # on whitespace.
- if partlen > maxlen and ch <> ' ':
- subl = _split_ascii(part, maxlen, restlen,
- continuation_ws, ' ')
- lines.extend(subl[:-1])
- this = [subl[-1]]
- else:
- this = [part]
- linelen = wslen + len(this[-1])
- maxlen = restlen
- else:
- this.append(part)
- linelen += partlen
- # Put any left over parts on a line by themselves
- if this:
- lines.append(joiner.join(this))
- return lines
-
-
-
-def _binsplit(splittable, charset, maxlinelen):
- i = 0
- j = len(splittable)
- while i < j:
- # Invariants:
- # 1. splittable[:k] fits for all k <= i (note that we *assume*,
- # at the start, that splittable[:0] fits).
- # 2. splittable[:k] does not fit for any k > j (at the start,
- # this means we shouldn't look at any k > len(splittable)).
- # 3. We don't know about splittable[:k] for k in i+1..j.
- # 4. We want to set i to the largest k that fits, with i <= k <= j.
- #
- m = (i+j+1) >> 1 # ceiling((i+j)/2); i < m <= j
- chunk = charset.from_splittable(splittable[:m], True)
- chunklen = charset.encoded_header_len(chunk)
- if chunklen <= maxlinelen:
- # m is acceptable, so is a new lower bound.
- i = m
- else:
- # m is not acceptable, so final i must be < m.
- j = m - 1
- # i == j. Invariant #1 implies that splittable[:i] fits, and
- # invariant #2 implies that splittable[:i+1] does not fit, so i
- # is what we're looking for.
- first = charset.from_splittable(splittable[:i], False)
- last = charset.from_splittable(splittable[i:], False)
- return first, last
diff --git a/sys/lib/python/email/iterators.py b/sys/lib/python/email/iterators.py
deleted file mode 100644
index e99f2280d..000000000
--- a/sys/lib/python/email/iterators.py
+++ /dev/null
@@ -1,73 +0,0 @@
-# Copyright (C) 2001-2006 Python Software Foundation
-# Author: Barry Warsaw
-# Contact: email-sig@python.org
-
-"""Various types of useful iterators and generators."""
-
-__all__ = [
- 'body_line_iterator',
- 'typed_subpart_iterator',
- 'walk',
- # Do not include _structure() since it's part of the debugging API.
- ]
-
-import sys
-from cStringIO import StringIO
-
-
-
-# This function will become a method of the Message class
-def walk(self):
- """Walk over the message tree, yielding each subpart.
-
- The walk is performed in depth-first order. This method is a
- generator.
- """
- yield self
- if self.is_multipart():
- for subpart in self.get_payload():
- for subsubpart in subpart.walk():
- yield subsubpart
-
-
-
-# These two functions are imported into the Iterators.py interface module.
-def body_line_iterator(msg, decode=False):
- """Iterate over the parts, returning string payloads line-by-line.
-
- Optional decode (default False) is passed through to .get_payload().
- """
- for subpart in msg.walk():
- payload = subpart.get_payload(decode=decode)
- if isinstance(payload, basestring):
- for line in StringIO(payload):
- yield line
-
-
-def typed_subpart_iterator(msg, maintype='text', subtype=None):
- """Iterate over the subparts with a given MIME type.
-
- Use `maintype' as the main MIME type to match against; this defaults to
- "text". Optional `subtype' is the MIME subtype to match against; if
- omitted, only the main type is matched.
- """
- for subpart in msg.walk():
- if subpart.get_content_maintype() == maintype:
- if subtype is None or subpart.get_content_subtype() == subtype:
- yield subpart
-
-
-
-def _structure(msg, fp=None, level=0, include_default=False):
- """A handy debugging aid"""
- if fp is None:
- fp = sys.stdout
- tab = ' ' * (level * 4)
- print >> fp, tab + msg.get_content_type(),
- if include_default:
- print >> fp, '[%s]' % msg.get_default_type()
- else:
- print >> fp
- if msg.is_multipart():
- for subpart in msg.get_payload():
- _structure(subpart, fp, level+1, include_default)
diff --git a/sys/lib/python/email/message.py b/sys/lib/python/email/message.py
deleted file mode 100644
index 88ae1833e..000000000
--- a/sys/lib/python/email/message.py
+++ /dev/null
@@ -1,786 +0,0 @@
-# Copyright (C) 2001-2006 Python Software Foundation
-# Author: Barry Warsaw
-# Contact: email-sig@python.org
-
-"""Basic message object for the email package object model."""
-
-__all__ = ['Message']
-
-import re
-import uu
-import binascii
-import warnings
-from cStringIO import StringIO
-
-# Intrapackage imports
-import email.charset
-from email import utils
-from email import errors
-
-SEMISPACE = '; '
-
-# Regular expression used to split header parameters. BAW: this may be too
-# simple. It isn't strictly RFC 2045 (section 5.1) compliant, but it catches
-# most headers found in the wild. We may eventually need a full fledged
-# parser eventually.
-paramre = re.compile(r'\s*;\s*')
-# Regular expression that matches `special' characters in parameters, the
-# existance of which force quoting of the parameter value.
-tspecials = re.compile(r'[ \(\)<>@,;:\\"/\[\]\?=]')
-
-
-
-# Helper functions
-def _formatparam(param, value=None, quote=True):
- """Convenience function to format and return a key=value pair.
-
- This will quote the value if needed or if quote is true.
- """
- if value is not None and len(value) > 0:
- # A tuple is used for RFC 2231 encoded parameter values where items
- # are (charset, language, value). charset is a string, not a Charset
- # instance.
- if isinstance(value, tuple):
- # Encode as per RFC 2231
- param += '*'
- value = utils.encode_rfc2231(value[2], value[0], value[1])
- # BAW: Please check this. I think that if quote is set it should
- # force quoting even if not necessary.
- if quote or tspecials.search(value):
- return '%s="%s"' % (param, utils.quote(value))
- else:
- return '%s=%s' % (param, value)
- else:
- return param
-
-def _parseparam(s):
- plist = []
- while s[:1] == ';':
- s = s[1:]
- end = s.find(';')
- while end > 0 and s.count('"', 0, end) % 2:
- end = s.find(';', end + 1)
- if end < 0:
- end = len(s)
- f = s[:end]
- if '=' in f:
- i = f.index('=')
- f = f[:i].strip().lower() + '=' + f[i+1:].strip()
- plist.append(f.strip())
- s = s[end:]
- return plist
-
-
-def _unquotevalue(value):
- # This is different than utils.collapse_rfc2231_value() because it doesn't
- # try to convert the value to a unicode. Message.get_param() and
- # Message.get_params() are both currently defined to return the tuple in
- # the face of RFC 2231 parameters.
- if isinstance(value, tuple):
- return value[0], value[1], utils.unquote(value[2])
- else:
- return utils.unquote(value)
-
-
-
-class Message:
- """Basic message object.
-
- A message object is defined as something that has a bunch of RFC 2822
- headers and a payload. It may optionally have an envelope header
- (a.k.a. Unix-From or From_ header). If the message is a container (i.e. a
- multipart or a message/rfc822), then the payload is a list of Message
- objects, otherwise it is a string.
-
- Message objects implement part of the `mapping' interface, which assumes
- there is exactly one occurrance of the header per message. Some headers
- do in fact appear multiple times (e.g. Received) and for those headers,
- you must use the explicit API to set or get all the headers. Not all of
- the mapping methods are implemented.
- """
- def __init__(self):
- self._headers = []
- self._unixfrom = None
- self._payload = None
- self._charset = None
- # Defaults for multipart messages
- self.preamble = self.epilogue = None
- self.defects = []
- # Default content type
- self._default_type = 'text/plain'
-
- def __str__(self):
- """Return the entire formatted message as a string.
- This includes the headers, body, and envelope header.
- """
- return self.as_string(unixfrom=True)
-
- def as_string(self, unixfrom=False):
- """Return the entire formatted message as a string.
- Optional `unixfrom' when True, means include the Unix From_ envelope
- header.
-
- This is a convenience method and may not generate the message exactly
- as you intend because by default it mangles lines that begin with
- "From ". For more flexibility, use the flatten() method of a
- Generator instance.
- """
- from email.Generator import Generator
- fp = StringIO()
- g = Generator(fp)
- g.flatten(self, unixfrom=unixfrom)
- return fp.getvalue()
-
- def is_multipart(self):
- """Return True if the message consists of multiple parts."""
- return isinstance(self._payload, list)
-
- #
- # Unix From_ line
- #
- def set_unixfrom(self, unixfrom):
- self._unixfrom = unixfrom
-
- def get_unixfrom(self):
- return self._unixfrom
-
- #
- # Payload manipulation.
- #
- def attach(self, payload):
- """Add the given payload to the current payload.
-
- The current payload will always be a list of objects after this method
- is called. If you want to set the payload to a scalar object, use
- set_payload() instead.
- """
- if self._payload is None:
- self._payload = [payload]
- else:
- self._payload.append(payload)
-
- def get_payload(self, i=None, decode=False):
- """Return a reference to the payload.
-
- The payload will either be a list object or a string. If you mutate
- the list object, you modify the message's payload in place. Optional
- i returns that index into the payload.
-
- Optional decode is a flag indicating whether the payload should be
- decoded or not, according to the Content-Transfer-Encoding header
- (default is False).
-
- When True and the message is not a multipart, the payload will be
- decoded if this header's value is `quoted-printable' or `base64'. If
- some other encoding is used, or the header is missing, or if the
- payload has bogus data (i.e. bogus base64 or uuencoded data), the
- payload is returned as-is.
-
- If the message is a multipart and the decode flag is True, then None
- is returned.
- """
- if i is None:
- payload = self._payload
- elif not isinstance(self._payload, list):
- raise TypeError('Expected list, got %s' % type(self._payload))
- else:
- payload = self._payload[i]
- if decode:
- if self.is_multipart():
- return None
- cte = self.get('content-transfer-encoding', '').lower()
- if cte == 'quoted-printable':
- return utils._qdecode(payload)
- elif cte == 'base64':
- try:
- return utils._bdecode(payload)
- except binascii.Error:
- # Incorrect padding
- return payload
- elif cte in ('x-uuencode', 'uuencode', 'uue', 'x-uue'):
- sfp = StringIO()
- try:
- uu.decode(StringIO(payload+'\n'), sfp, quiet=True)
- payload = sfp.getvalue()
- except uu.Error:
- # Some decoding problem
- return payload
- # Everything else, including encodings with 8bit or 7bit are returned
- # unchanged.
- return payload
-
- def set_payload(self, payload, charset=None):
- """Set the payload to the given value.
-
- Optional charset sets the message's default character set. See
- set_charset() for details.
- """
- self._payload = payload
- if charset is not None:
- self.set_charset(charset)
-
- def set_charset(self, charset):
- """Set the charset of the payload to a given character set.
-
- charset can be a Charset instance, a string naming a character set, or
- None. If it is a string it will be converted to a Charset instance.
- If charset is None, the charset parameter will be removed from the
- Content-Type field. Anything else will generate a TypeError.
-
- The message will be assumed to be of type text/* encoded with
- charset.input_charset. It will be converted to charset.output_charset
- and encoded properly, if needed, when generating the plain text
- representation of the message. MIME headers (MIME-Version,
- Content-Type, Content-Transfer-Encoding) will be added as needed.
-
- """
- if charset is None:
- self.del_param('charset')
- self._charset = None
- return
- if isinstance(charset, basestring):
- charset = email.charset.Charset(charset)
- if not isinstance(charset, email.charset.Charset):
- raise TypeError(charset)
- # BAW: should we accept strings that can serve as arguments to the
- # Charset constructor?
- self._charset = charset
- if not self.has_key('MIME-Version'):
- self.add_header('MIME-Version', '1.0')
- if not self.has_key('Content-Type'):
- self.add_header('Content-Type', 'text/plain',
- charset=charset.get_output_charset())
- else:
- self.set_param('charset', charset.get_output_charset())
- if str(charset) <> charset.get_output_charset():
- self._payload = charset.body_encode(self._payload)
- if not self.has_key('Content-Transfer-Encoding'):
- cte = charset.get_body_encoding()
- try:
- cte(self)
- except TypeError:
- self._payload = charset.body_encode(self._payload)
- self.add_header('Content-Transfer-Encoding', cte)
-
- def get_charset(self):
- """Return the Charset instance associated with the message's payload.
- """
- return self._charset
-
- #
- # MAPPING INTERFACE (partial)
- #
- def __len__(self):
- """Return the total number of headers, including duplicates."""
- return len(self._headers)
-
- def __getitem__(self, name):
- """Get a header value.
-
- Return None if the header is missing instead of raising an exception.
-
- Note that if the header appeared multiple times, exactly which
- occurrance gets returned is undefined. Use get_all() to get all
- the values matching a header field name.
- """
- return self.get(name)
-
- def __setitem__(self, name, val):
- """Set the value of a header.
-
- Note: this does not overwrite an existing header with the same field
- name. Use __delitem__() first to delete any existing headers.
- """
- self._headers.append((name, val))
-
- def __delitem__(self, name):
- """Delete all occurrences of a header, if present.
-
- Does not raise an exception if the header is missing.
- """
- name = name.lower()
- newheaders = []
- for k, v in self._headers:
- if k.lower() <> name:
- newheaders.append((k, v))
- self._headers = newheaders
-
- def __contains__(self, name):
- return name.lower() in [k.lower() for k, v in self._headers]
-
- def has_key(self, name):
- """Return true if the message contains the header."""
- missing = object()
- return self.get(name, missing) is not missing
-
- def keys(self):
- """Return a list of all the message's header field names.
-
- These will be sorted in the order they appeared in the original
- message, or were added to the message, and may contain duplicates.
- Any fields deleted and re-inserted are always appended to the header
- list.
- """
- return [k for k, v in self._headers]
-
- def values(self):
- """Return a list of all the message's header values.
-
- These will be sorted in the order they appeared in the original
- message, or were added to the message, and may contain duplicates.
- Any fields deleted and re-inserted are always appended to the header
- list.
- """
- return [v for k, v in self._headers]
-
- def items(self):
- """Get all the message's header fields and values.
-
- These will be sorted in the order they appeared in the original
- message, or were added to the message, and may contain duplicates.
- Any fields deleted and re-inserted are always appended to the header
- list.
- """
- return self._headers[:]
-
- def get(self, name, failobj=None):
- """Get a header value.
-
- Like __getitem__() but return failobj instead of None when the field
- is missing.
- """
- name = name.lower()
- for k, v in self._headers:
- if k.lower() == name:
- return v
- return failobj
-
- #
- # Additional useful stuff
- #
-
- def get_all(self, name, failobj=None):
- """Return a list of all the values for the named field.
-
- These will be sorted in the order they appeared in the original
- message, and may contain duplicates. Any fields deleted and
- re-inserted are always appended to the header list.
-
- If no such fields exist, failobj is returned (defaults to None).
- """
- values = []
- name = name.lower()
- for k, v in self._headers:
- if k.lower() == name:
- values.append(v)
- if not values:
- return failobj
- return values
-
- def add_header(self, _name, _value, **_params):
- """Extended header setting.
-
- name is the header field to add. keyword arguments can be used to set
- additional parameters for the header field, with underscores converted
- to dashes. Normally the parameter will be added as key="value" unless
- value is None, in which case only the key will be added.
-
- Example:
-
- msg.add_header('content-disposition', 'attachment', filename='bud.gif')
- """
- parts = []
- for k, v in _params.items():
- if v is None:
- parts.append(k.replace('_', '-'))
- else:
- parts.append(_formatparam(k.replace('_', '-'), v))
- if _value is not None:
- parts.insert(0, _value)
- self._headers.append((_name, SEMISPACE.join(parts)))
-
- def replace_header(self, _name, _value):
- """Replace a header.
-
- Replace the first matching header found in the message, retaining
- header order and case. If no matching header was found, a KeyError is
- raised.
- """
- _name = _name.lower()
- for i, (k, v) in zip(range(len(self._headers)), self._headers):
- if k.lower() == _name:
- self._headers[i] = (k, _value)
- break
- else:
- raise KeyError(_name)
-
- #
- # Use these three methods instead of the three above.
- #
-
- def get_content_type(self):
- """Return the message's content type.
-
- The returned string is coerced to lower case of the form
- `maintype/subtype'. If there was no Content-Type header in the
- message, the default type as given by get_default_type() will be
- returned. Since according to RFC 2045, messages always have a default
- type this will always return a value.
-
- RFC 2045 defines a message's default type to be text/plain unless it
- appears inside a multipart/digest container, in which case it would be
- message/rfc822.
- """
- missing = object()
- value = self.get('content-type', missing)
- if value is missing:
- # This should have no parameters
- return self.get_default_type()
- ctype = paramre.split(value)[0].lower().strip()
- # RFC 2045, section 5.2 says if its invalid, use text/plain
- if ctype.count('/') <> 1:
- return 'text/plain'
- return ctype
-
- def get_content_maintype(self):
- """Return the message's main content type.
-
- This is the `maintype' part of the string returned by
- get_content_type().
- """
- ctype = self.get_content_type()
- return ctype.split('/')[0]
-
- def get_content_subtype(self):
- """Returns the message's sub-content type.
-
- This is the `subtype' part of the string returned by
- get_content_type().
- """
- ctype = self.get_content_type()
- return ctype.split('/')[1]
-
- def get_default_type(self):
- """Return the `default' content type.
-
- Most messages have a default content type of text/plain, except for
- messages that are subparts of multipart/digest containers. Such
- subparts have a default content type of message/rfc822.
- """
- return self._default_type
-
- def set_default_type(self, ctype):
- """Set the `default' content type.
-
- ctype should be either "text/plain" or "message/rfc822", although this
- is not enforced. The default content type is not stored in the
- Content-Type header.
- """
- self._default_type = ctype
-
- def _get_params_preserve(self, failobj, header):
- # Like get_params() but preserves the quoting of values. BAW:
- # should this be part of the public interface?
- missing = object()
- value = self.get(header, missing)
- if value is missing:
- return failobj
- params = []
- for p in _parseparam(';' + value):
- try:
- name, val = p.split('=', 1)
- name = name.strip()
- val = val.strip()
- except ValueError:
- # Must have been a bare attribute
- name = p.strip()
- val = ''
- params.append((name, val))
- params = utils.decode_params(params)
- return params
-
- def get_params(self, failobj=None, header='content-type', unquote=True):
- """Return the message's Content-Type parameters, as a list.
-
- The elements of the returned list are 2-tuples of key/value pairs, as
- split on the `=' sign. The left hand side of the `=' is the key,
- while the right hand side is the value. If there is no `=' sign in
- the parameter the value is the empty string. The value is as
- described in the get_param() method.
-
- Optional failobj is the object to return if there is no Content-Type
- header. Optional header is the header to search instead of
- Content-Type. If unquote is True, the value is unquoted.
- """
- missing = object()
- params = self._get_params_preserve(missing, header)
- if params is missing:
- return failobj
- if unquote:
- return [(k, _unquotevalue(v)) for k, v in params]
- else:
- return params
-
- def get_param(self, param, failobj=None, header='content-type',
- unquote=True):
- """Return the parameter value if found in the Content-Type header.
-
- Optional failobj is the object to return if there is no Content-Type
- header, or the Content-Type header has no such parameter. Optional
- header is the header to search instead of Content-Type.
-
- Parameter keys are always compared case insensitively. The return
- value can either be a string, or a 3-tuple if the parameter was RFC
- 2231 encoded. When it's a 3-tuple, the elements of the value are of
- the form (CHARSET, LANGUAGE, VALUE). Note that both CHARSET and
- LANGUAGE can be None, in which case you should consider VALUE to be
- encoded in the us-ascii charset. You can usually ignore LANGUAGE.
-
- Your application should be prepared to deal with 3-tuple return
- values, and can convert the parameter to a Unicode string like so:
-
- param = msg.get_param('foo')
- if isinstance(param, tuple):
- param = unicode(param[2], param[0] or 'us-ascii')
-
- In any case, the parameter value (either the returned string, or the
- VALUE item in the 3-tuple) is always unquoted, unless unquote is set
- to False.
- """
- if not self.has_key(header):
- return failobj
- for k, v in self._get_params_preserve(failobj, header):
- if k.lower() == param.lower():
- if unquote:
- return _unquotevalue(v)
- else:
- return v
- return failobj
-
- def set_param(self, param, value, header='Content-Type', requote=True,
- charset=None, language=''):
- """Set a parameter in the Content-Type header.
-
- If the parameter already exists in the header, its value will be
- replaced with the new value.
-
- If header is Content-Type and has not yet been defined for this
- message, it will be set to "text/plain" and the new parameter and
- value will be appended as per RFC 2045.
-
- An alternate header can specified in the header argument, and all
- parameters will be quoted as necessary unless requote is False.
-
- If charset is specified, the parameter will be encoded according to RFC
- 2231. Optional language specifies the RFC 2231 language, defaulting
- to the empty string. Both charset and language should be strings.
- """
- if not isinstance(value, tuple) and charset:
- value = (charset, language, value)
-
- if not self.has_key(header) and header.lower() == 'content-type':
- ctype = 'text/plain'
- else:
- ctype = self.get(header)
- if not self.get_param(param, header=header):
- if not ctype:
- ctype = _formatparam(param, value, requote)
- else:
- ctype = SEMISPACE.join(
- [ctype, _formatparam(param, value, requote)])
- else:
- ctype = ''
- for old_param, old_value in self.get_params(header=header,
- unquote=requote):
- append_param = ''
- if old_param.lower() == param.lower():
- append_param = _formatparam(param, value, requote)
- else:
- append_param = _formatparam(old_param, old_value, requote)
- if not ctype:
- ctype = append_param
- else:
- ctype = SEMISPACE.join([ctype, append_param])
- if ctype <> self.get(header):
- del self[header]
- self[header] = ctype
-
- def del_param(self, param, header='content-type', requote=True):
- """Remove the given parameter completely from the Content-Type header.
-
- The header will be re-written in place without the parameter or its
- value. All values will be quoted as necessary unless requote is
- False. Optional header specifies an alternative to the Content-Type
- header.
- """
- if not self.has_key(header):
- return
- new_ctype = ''
- for p, v in self.get_params(header=header, unquote=requote):
- if p.lower() <> param.lower():
- if not new_ctype:
- new_ctype = _formatparam(p, v, requote)
- else:
- new_ctype = SEMISPACE.join([new_ctype,
- _formatparam(p, v, requote)])
- if new_ctype <> self.get(header):
- del self[header]
- self[header] = new_ctype
-
- def set_type(self, type, header='Content-Type', requote=True):
- """Set the main type and subtype for the Content-Type header.
-
- type must be a string in the form "maintype/subtype", otherwise a
- ValueError is raised.
-
- This method replaces the Content-Type header, keeping all the
- parameters in place. If requote is False, this leaves the existing
- header's quoting as is. Otherwise, the parameters will be quoted (the
- default).
-
- An alternative header can be specified in the header argument. When
- the Content-Type header is set, we'll always also add a MIME-Version
- header.
- """
- # BAW: should we be strict?
- if not type.count('/') == 1:
- raise ValueError
- # Set the Content-Type, you get a MIME-Version
- if header.lower() == 'content-type':
- del self['mime-version']
- self['MIME-Version'] = '1.0'
- if not self.has_key(header):
- self[header] = type
- return
- params = self.get_params(header=header, unquote=requote)
- del self[header]
- self[header] = type
- # Skip the first param; it's the old type.
- for p, v in params[1:]:
- self.set_param(p, v, header, requote)
-
- def get_filename(self, failobj=None):
- """Return the filename associated with the payload if present.
-
- The filename is extracted from the Content-Disposition header's
- `filename' parameter, and it is unquoted. If that header is missing
- the `filename' parameter, this method falls back to looking for the
- `name' parameter.
- """
- missing = object()
- filename = self.get_param('filename', missing, 'content-disposition')
- if filename is missing:
- filename = self.get_param('name', missing, 'content-disposition')
- if filename is missing:
- return failobj
- return utils.collapse_rfc2231_value(filename).strip()
-
- def get_boundary(self, failobj=None):
- """Return the boundary associated with the payload if present.
-
- The boundary is extracted from the Content-Type header's `boundary'
- parameter, and it is unquoted.
- """
- missing = object()
- boundary = self.get_param('boundary', missing)
- if boundary is missing:
- return failobj
- # RFC 2046 says that boundaries may begin but not end in w/s
- return utils.collapse_rfc2231_value(boundary).rstrip()
-
- def set_boundary(self, boundary):
- """Set the boundary parameter in Content-Type to 'boundary'.
-
- This is subtly different than deleting the Content-Type header and
- adding a new one with a new boundary parameter via add_header(). The
- main difference is that using the set_boundary() method preserves the
- order of the Content-Type header in the original message.
-
- HeaderParseError is raised if the message has no Content-Type header.
- """
- missing = object()
- params = self._get_params_preserve(missing, 'content-type')
- if params is missing:
- # There was no Content-Type header, and we don't know what type
- # to set it to, so raise an exception.
- raise errors.HeaderParseError('No Content-Type header found')
- newparams = []
- foundp = False
- for pk, pv in params:
- if pk.lower() == 'boundary':
- newparams.append(('boundary', '"%s"' % boundary))
- foundp = True
- else:
- newparams.append((pk, pv))
- if not foundp:
- # The original Content-Type header had no boundary attribute.
- # Tack one on the end. BAW: should we raise an exception
- # instead???
- newparams.append(('boundary', '"%s"' % boundary))
- # Replace the existing Content-Type header with the new value
- newheaders = []
- for h, v in self._headers:
- if h.lower() == 'content-type':
- parts = []
- for k, v in newparams:
- if v == '':
- parts.append(k)
- else:
- parts.append('%s=%s' % (k, v))
- newheaders.append((h, SEMISPACE.join(parts)))
-
- else:
- newheaders.append((h, v))
- self._headers = newheaders
-
- def get_content_charset(self, failobj=None):
- """Return the charset parameter of the Content-Type header.
-
- The returned string is always coerced to lower case. If there is no
- Content-Type header, or if that header has no charset parameter,
- failobj is returned.
- """
- missing = object()
- charset = self.get_param('charset', missing)
- if charset is missing:
- return failobj
- if isinstance(charset, tuple):
- # RFC 2231 encoded, so decode it, and it better end up as ascii.
- pcharset = charset[0] or 'us-ascii'
- try:
- # LookupError will be raised if the charset isn't known to
- # Python. UnicodeError will be raised if the encoded text
- # contains a character not in the charset.
- charset = unicode(charset[2], pcharset).encode('us-ascii')
- except (LookupError, UnicodeError):
- charset = charset[2]
- # charset character must be in us-ascii range
- try:
- if isinstance(charset, str):
- charset = unicode(charset, 'us-ascii')
- charset = charset.encode('us-ascii')
- except UnicodeError:
- return failobj
- # RFC 2046, $4.1.2 says charsets are not case sensitive
- return charset.lower()
-
- def get_charsets(self, failobj=None):
- """Return a list containing the charset(s) used in this message.
-
- The returned list of items describes the Content-Type headers'
- charset parameter for this message and all the subparts in its
- payload.
-
- Each item will either be a string (the value of the charset parameter
- in the Content-Type header of that part) or the value of the
- 'failobj' parameter (defaults to None), if the part does not have a
- main MIME type of "text", or the charset is not defined.
-
- The list will contain one string for each part of the message, plus
- one for the container message (i.e. self), so that a non-multipart
- message will still return a list of length 1.
- """
- return [part.get_content_charset(failobj) for part in self.walk()]
-
- # I.e. def walk(self): ...
- from email.Iterators import walk
diff --git a/sys/lib/python/email/mime/__init__.py b/sys/lib/python/email/mime/__init__.py
deleted file mode 100644
index e69de29bb..000000000
--- a/sys/lib/python/email/mime/__init__.py
+++ /dev/null
diff --git a/sys/lib/python/email/mime/application.py b/sys/lib/python/email/mime/application.py
deleted file mode 100644
index 6f8bb8a82..000000000
--- a/sys/lib/python/email/mime/application.py
+++ /dev/null
@@ -1,36 +0,0 @@
-# Copyright (C) 2001-2006 Python Software Foundation
-# Author: Keith Dart
-# Contact: email-sig@python.org
-
-"""Class representing application/* type MIME documents."""
-
-__all__ = ["MIMEApplication"]
-
-from email import encoders
-from email.mime.nonmultipart import MIMENonMultipart
-
-
-class MIMEApplication(MIMENonMultipart):
- """Class for generating application/* MIME documents."""
-
- def __init__(self, _data, _subtype='octet-stream',
- _encoder=encoders.encode_base64, **_params):
- """Create an application/* type MIME document.
-
- _data is a string containing the raw applicatoin data.
-
- _subtype is the MIME content type subtype, defaulting to
- 'octet-stream'.
-
- _encoder is a function which will perform the actual encoding for
- transport of the application data, defaulting to base64 encoding.
-
- Any additional keyword arguments are passed to the base class
- constructor, which turns them into parameters on the Content-Type
- header.
- """
- if _subtype is None:
- raise TypeError('Invalid application MIME subtype')
- MIMENonMultipart.__init__(self, 'application', _subtype, **_params)
- self.set_payload(_data)
- _encoder(self)
diff --git a/sys/lib/python/email/mime/audio.py b/sys/lib/python/email/mime/audio.py
deleted file mode 100644
index c7290c4b1..000000000
--- a/sys/lib/python/email/mime/audio.py
+++ /dev/null
@@ -1,73 +0,0 @@
-# Copyright (C) 2001-2006 Python Software Foundation
-# Author: Anthony Baxter
-# Contact: email-sig@python.org
-
-"""Class representing audio/* type MIME documents."""
-
-__all__ = ['MIMEAudio']
-
-import sndhdr
-
-from cStringIO import StringIO
-from email import encoders
-from email.mime.nonmultipart import MIMENonMultipart
-
-
-
-_sndhdr_MIMEmap = {'au' : 'basic',
- 'wav' :'x-wav',
- 'aiff':'x-aiff',
- 'aifc':'x-aiff',
- }
-
-# There are others in sndhdr that don't have MIME types. :(
-# Additional ones to be added to sndhdr? midi, mp3, realaudio, wma??
-def _whatsnd(data):
- """Try to identify a sound file type.
-
- sndhdr.what() has a pretty cruddy interface, unfortunately. This is why
- we re-do it here. It would be easier to reverse engineer the Unix 'file'
- command and use the standard 'magic' file, as shipped with a modern Unix.
- """
- hdr = data[:512]
- fakefile = StringIO(hdr)
- for testfn in sndhdr.tests:
- res = testfn(hdr, fakefile)
- if res is not None:
- return _sndhdr_MIMEmap.get(res[0])
- return None
-
-
-
-class MIMEAudio(MIMENonMultipart):
- """Class for generating audio/* MIME documents."""
-
- def __init__(self, _audiodata, _subtype=None,
- _encoder=encoders.encode_base64, **_params):
- """Create an audio/* type MIME document.
-
- _audiodata is a string containing the raw audio data. If this data
- can be decoded by the standard Python `sndhdr' module, then the
- subtype will be automatically included in the Content-Type header.
- Otherwise, you can specify the specific audio subtype via the
- _subtype parameter. If _subtype is not given, and no subtype can be
- guessed, a TypeError is raised.
-
- _encoder is a function which will perform the actual encoding for
- transport of the image data. It takes one argument, which is this
- Image instance. It should use get_payload() and set_payload() to
- change the payload to the encoded form. It should also add any
- Content-Transfer-Encoding or other headers to the message as
- necessary. The default encoding is Base64.
-
- Any additional keyword arguments are passed to the base class
- constructor, which turns them into parameters on the Content-Type
- header.
- """
- if _subtype is None:
- _subtype = _whatsnd(_audiodata)
- if _subtype is None:
- raise TypeError('Could not find audio MIME subtype')
- MIMENonMultipart.__init__(self, 'audio', _subtype, **_params)
- self.set_payload(_audiodata)
- _encoder(self)
diff --git a/sys/lib/python/email/mime/base.py b/sys/lib/python/email/mime/base.py
deleted file mode 100644
index ac919258b..000000000
--- a/sys/lib/python/email/mime/base.py
+++ /dev/null
@@ -1,26 +0,0 @@
-# Copyright (C) 2001-2006 Python Software Foundation
-# Author: Barry Warsaw
-# Contact: email-sig@python.org
-
-"""Base class for MIME specializations."""
-
-__all__ = ['MIMEBase']
-
-from email import message
-
-
-
-class MIMEBase(message.Message):
- """Base class for MIME specializations."""
-
- def __init__(self, _maintype, _subtype, **_params):
- """This constructor adds a Content-Type: and a MIME-Version: header.
-
- The Content-Type: header is taken from the _maintype and _subtype
- arguments. Additional parameters for this header are taken from the
- keyword arguments.
- """
- message.Message.__init__(self)
- ctype = '%s/%s' % (_maintype, _subtype)
- self.add_header('Content-Type', ctype, **_params)
- self['MIME-Version'] = '1.0'
diff --git a/sys/lib/python/email/mime/image.py b/sys/lib/python/email/mime/image.py
deleted file mode 100644
index 556382323..000000000
--- a/sys/lib/python/email/mime/image.py
+++ /dev/null
@@ -1,46 +0,0 @@
-# Copyright (C) 2001-2006 Python Software Foundation
-# Author: Barry Warsaw
-# Contact: email-sig@python.org
-
-"""Class representing image/* type MIME documents."""
-
-__all__ = ['MIMEImage']
-
-import imghdr
-
-from email import encoders
-from email.mime.nonmultipart import MIMENonMultipart
-
-
-
-class MIMEImage(MIMENonMultipart):
- """Class for generating image/* type MIME documents."""
-
- def __init__(self, _imagedata, _subtype=None,
- _encoder=encoders.encode_base64, **_params):
- """Create an image/* type MIME document.
-
- _imagedata is a string containing the raw image data. If this data
- can be decoded by the standard Python `imghdr' module, then the
- subtype will be automatically included in the Content-Type header.
- Otherwise, you can specify the specific image subtype via the _subtype
- parameter.
-
- _encoder is a function which will perform the actual encoding for
- transport of the image data. It takes one argument, which is this
- Image instance. It should use get_payload() and set_payload() to
- change the payload to the encoded form. It should also add any
- Content-Transfer-Encoding or other headers to the message as
- necessary. The default encoding is Base64.
-
- Any additional keyword arguments are passed to the base class
- constructor, which turns them into parameters on the Content-Type
- header.
- """
- if _subtype is None:
- _subtype = imghdr.what(None, _imagedata)
- if _subtype is None:
- raise TypeError('Could not guess image MIME subtype')
- MIMENonMultipart.__init__(self, 'image', _subtype, **_params)
- self.set_payload(_imagedata)
- _encoder(self)
diff --git a/sys/lib/python/email/mime/message.py b/sys/lib/python/email/mime/message.py
deleted file mode 100644
index 275dbfd08..000000000
--- a/sys/lib/python/email/mime/message.py
+++ /dev/null
@@ -1,34 +0,0 @@
-# Copyright (C) 2001-2006 Python Software Foundation
-# Author: Barry Warsaw
-# Contact: email-sig@python.org
-
-"""Class representing message/* MIME documents."""
-
-__all__ = ['MIMEMessage']
-
-from email import message
-from email.mime.nonmultipart import MIMENonMultipart
-
-
-
-class MIMEMessage(MIMENonMultipart):
- """Class representing message/* MIME documents."""
-
- def __init__(self, _msg, _subtype='rfc822'):
- """Create a message/* type MIME document.
-
- _msg is a message object and must be an instance of Message, or a
- derived class of Message, otherwise a TypeError is raised.
-
- Optional _subtype defines the subtype of the contained message. The
- default is "rfc822" (this is defined by the MIME standard, even though
- the term "rfc822" is technically outdated by RFC 2822).
- """
- MIMENonMultipart.__init__(self, 'message', _subtype)
- if not isinstance(_msg, message.Message):
- raise TypeError('Argument is not an instance of Message')
- # It's convenient to use this base class method. We need to do it
- # this way or we'll get an exception
- message.Message.attach(self, _msg)
- # And be sure our default type is set correctly
- self.set_default_type('message/rfc822')
diff --git a/sys/lib/python/email/mime/multipart.py b/sys/lib/python/email/mime/multipart.py
deleted file mode 100644
index 5c8c9dbc4..000000000
--- a/sys/lib/python/email/mime/multipart.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# Copyright (C) 2002-2006 Python Software Foundation
-# Author: Barry Warsaw
-# Contact: email-sig@python.org
-
-"""Base class for MIME multipart/* type messages."""
-
-__all__ = ['MIMEMultipart']
-
-from email.mime.base import MIMEBase
-
-
-
-class MIMEMultipart(MIMEBase):
- """Base class for MIME multipart/* type messages."""
-
- def __init__(self, _subtype='mixed', boundary=None, _subparts=None,
- **_params):
- """Creates a multipart/* type message.
-
- By default, creates a multipart/mixed message, with proper
- Content-Type and MIME-Version headers.
-
- _subtype is the subtype of the multipart content type, defaulting to
- `mixed'.
-
- boundary is the multipart boundary string. By default it is
- calculated as needed.
-
- _subparts is a sequence of initial subparts for the payload. It
- must be an iterable object, such as a list. You can always
- attach new subparts to the message by using the attach() method.
-
- Additional parameters for the Content-Type header are taken from the
- keyword arguments (or passed into the _params argument).
- """
- MIMEBase.__init__(self, 'multipart', _subtype, **_params)
- if _subparts:
- for p in _subparts:
- self.attach(p)
- if boundary:
- self.set_boundary(boundary)
diff --git a/sys/lib/python/email/mime/nonmultipart.py b/sys/lib/python/email/mime/nonmultipart.py
deleted file mode 100644
index dd280b51d..000000000
--- a/sys/lib/python/email/mime/nonmultipart.py
+++ /dev/null
@@ -1,26 +0,0 @@
-# Copyright (C) 2002-2006 Python Software Foundation
-# Author: Barry Warsaw
-# Contact: email-sig@python.org
-
-"""Base class for MIME type messages that are not multipart."""
-
-__all__ = ['MIMENonMultipart']
-
-from email import errors
-from email.mime.base import MIMEBase
-
-
-
-class MIMENonMultipart(MIMEBase):
- """Base class for MIME multipart/* type messages."""
-
- __pychecker__ = 'unusednames=payload'
-
- def attach(self, payload):
- # The public API prohibits attaching multiple subparts to MIMEBase
- # derived subtypes since none of them are, by definition, of content
- # type multipart/*
- raise errors.MultipartConversionError(
- 'Cannot attach additional subparts to non-multipart/*')
-
- del __pychecker__
diff --git a/sys/lib/python/email/mime/text.py b/sys/lib/python/email/mime/text.py
deleted file mode 100644
index 5747db5d6..000000000
--- a/sys/lib/python/email/mime/text.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# Copyright (C) 2001-2006 Python Software Foundation
-# Author: Barry Warsaw
-# Contact: email-sig@python.org
-
-"""Class representing text/* type MIME documents."""
-
-__all__ = ['MIMEText']
-
-from email.encoders import encode_7or8bit
-from email.mime.nonmultipart import MIMENonMultipart
-
-
-
-class MIMEText(MIMENonMultipart):
- """Class for generating text/* type MIME documents."""
-
- def __init__(self, _text, _subtype='plain', _charset='us-ascii'):
- """Create a text/* type MIME document.
-
- _text is the string for this message object.
-
- _subtype is the MIME sub content type, defaulting to "plain".
-
- _charset is the character set parameter added to the Content-Type
- header. This defaults to "us-ascii". Note that as a side-effect, the
- Content-Transfer-Encoding header will also be set.
- """
- MIMENonMultipart.__init__(self, 'text', _subtype,
- **{'charset': _charset})
- self.set_payload(_text, _charset)
diff --git a/sys/lib/python/email/parser.py b/sys/lib/python/email/parser.py
deleted file mode 100644
index 2fcaf2545..000000000
--- a/sys/lib/python/email/parser.py
+++ /dev/null
@@ -1,91 +0,0 @@
-# Copyright (C) 2001-2006 Python Software Foundation
-# Author: Barry Warsaw, Thomas Wouters, Anthony Baxter
-# Contact: email-sig@python.org
-
-"""A parser of RFC 2822 and MIME email messages."""
-
-__all__ = ['Parser', 'HeaderParser']
-
-import warnings
-from cStringIO import StringIO
-
-from email.feedparser import FeedParser
-from email.message import Message
-
-
-
-class Parser:
- def __init__(self, *args, **kws):
- """Parser of RFC 2822 and MIME email messages.
-
- Creates an in-memory object tree representing the email message, which
- can then be manipulated and turned over to a Generator to return the
- textual representation of the message.
-
- The string must be formatted as a block of RFC 2822 headers and header
- continuation lines, optionally preceeded by a `Unix-from' header. The
- header block is terminated either by the end of the string or by a
- blank line.
-
- _class is the class to instantiate for new message objects when they
- must be created. This class must have a constructor that can take
- zero arguments. Default is Message.Message.
- """
- if len(args) >= 1:
- if '_class' in kws:
- raise TypeError("Multiple values for keyword arg '_class'")
- kws['_class'] = args[0]
- if len(args) == 2:
- if 'strict' in kws:
- raise TypeError("Multiple values for keyword arg 'strict'")
- kws['strict'] = args[1]
- if len(args) > 2:
- raise TypeError('Too many arguments')
- if '_class' in kws:
- self._class = kws['_class']
- del kws['_class']
- else:
- self._class = Message
- if 'strict' in kws:
- warnings.warn("'strict' argument is deprecated (and ignored)",
- DeprecationWarning, 2)
- del kws['strict']
- if kws:
- raise TypeError('Unexpected keyword arguments')
-
- def parse(self, fp, headersonly=False):
- """Create a message structure from the data in a file.
-
- Reads all the data from the file and returns the root of the message
- structure. Optional headersonly is a flag specifying whether to stop
- parsing after reading the headers or not. The default is False,
- meaning it parses the entire contents of the file.
- """
- feedparser = FeedParser(self._class)
- if headersonly:
- feedparser._set_headersonly()
- while True:
- data = fp.read(8192)
- if not data:
- break
- feedparser.feed(data)
- return feedparser.close()
-
- def parsestr(self, text, headersonly=False):
- """Create a message structure from a string.
-
- Returns the root of the message structure. Optional headersonly is a
- flag specifying whether to stop parsing after reading the headers or
- not. The default is False, meaning it parses the entire contents of
- the file.
- """
- return self.parse(StringIO(text), headersonly=headersonly)
-
-
-
-class HeaderParser(Parser):
- def parse(self, fp, headersonly=True):
- return Parser.parse(self, fp, True)
-
- def parsestr(self, text, headersonly=True):
- return Parser.parsestr(self, text, True)
diff --git a/sys/lib/python/email/quoprimime.py b/sys/lib/python/email/quoprimime.py
deleted file mode 100644
index a5658dd3f..000000000
--- a/sys/lib/python/email/quoprimime.py
+++ /dev/null
@@ -1,336 +0,0 @@
-# Copyright (C) 2001-2006 Python Software Foundation
-# Author: Ben Gertzfield
-# Contact: email-sig@python.org
-
-"""Quoted-printable content transfer encoding per RFCs 2045-2047.
-
-This module handles the content transfer encoding method defined in RFC 2045
-to encode US ASCII-like 8-bit data called `quoted-printable'. It is used to
-safely encode text that is in a character set similar to the 7-bit US ASCII
-character set, but that includes some 8-bit characters that are normally not
-allowed in email bodies or headers.
-
-Quoted-printable is very space-inefficient for encoding binary files; use the
-email.base64MIME module for that instead.
-
-This module provides an interface to encode and decode both headers and bodies
-with quoted-printable encoding.
-
-RFC 2045 defines a method for including character set information in an
-`encoded-word' in a header. This method is commonly used for 8-bit real names
-in To:/From:/Cc: etc. fields, as well as Subject: lines.
-
-This module does not do the line wrapping or end-of-line character
-conversion necessary for proper internationalized headers; it only
-does dumb encoding and decoding. To deal with the various line
-wrapping issues, use the email.Header module.
-"""
-
-__all__ = [
- 'body_decode',
- 'body_encode',
- 'body_quopri_check',
- 'body_quopri_len',
- 'decode',
- 'decodestring',
- 'encode',
- 'encodestring',
- 'header_decode',
- 'header_encode',
- 'header_quopri_check',
- 'header_quopri_len',
- 'quote',
- 'unquote',
- ]
-
-import re
-
-from string import hexdigits
-from email.utils import fix_eols
-
-CRLF = '\r\n'
-NL = '\n'
-
-# See also Charset.py
-MISC_LEN = 7
-
-hqre = re.compile(r'[^-a-zA-Z0-9!*+/ ]')
-bqre = re.compile(r'[^ !-<>-~\t]')
-
-
-
-# Helpers
-def header_quopri_check(c):
- """Return True if the character should be escaped with header quopri."""
- return bool(hqre.match(c))
-
-
-def body_quopri_check(c):
- """Return True if the character should be escaped with body quopri."""
- return bool(bqre.match(c))
-
-
-def header_quopri_len(s):
- """Return the length of str when it is encoded with header quopri."""
- count = 0
- for c in s:
- if hqre.match(c):
- count += 3
- else:
- count += 1
- return count
-
-
-def body_quopri_len(str):
- """Return the length of str when it is encoded with body quopri."""
- count = 0
- for c in str:
- if bqre.match(c):
- count += 3
- else:
- count += 1
- return count
-
-
-def _max_append(L, s, maxlen, extra=''):
- if not L:
- L.append(s.lstrip())
- elif len(L[-1]) + len(s) <= maxlen:
- L[-1] += extra + s
- else:
- L.append(s.lstrip())
-
-
-def unquote(s):
- """Turn a string in the form =AB to the ASCII character with value 0xab"""
- return chr(int(s[1:3], 16))
-
-
-def quote(c):
- return "=%02X" % ord(c)
-
-
-
-def header_encode(header, charset="iso-8859-1", keep_eols=False,
- maxlinelen=76, eol=NL):
- """Encode a single header line with quoted-printable (like) encoding.
-
- Defined in RFC 2045, this `Q' encoding is similar to quoted-printable, but
- used specifically for email header fields to allow charsets with mostly 7
- bit characters (and some 8 bit) to remain more or less readable in non-RFC
- 2045 aware mail clients.
-
- charset names the character set to use to encode the header. It defaults
- to iso-8859-1.
-
- The resulting string will be in the form:
-
- "=?charset?q?I_f=E2rt_in_your_g=E8n=E8ral_dire=E7tion?\\n
- =?charset?q?Silly_=C8nglish_Kn=EEghts?="
-
- with each line wrapped safely at, at most, maxlinelen characters (defaults
- to 76 characters). If maxlinelen is None, the entire string is encoded in
- one chunk with no splitting.
-
- End-of-line characters (\\r, \\n, \\r\\n) will be automatically converted
- to the canonical email line separator \\r\\n unless the keep_eols
- parameter is True (the default is False).
-
- Each line of the header will be terminated in the value of eol, which
- defaults to "\\n". Set this to "\\r\\n" if you are using the result of
- this function directly in email.
- """
- # Return empty headers unchanged
- if not header:
- return header
-
- if not keep_eols:
- header = fix_eols(header)
-
- # Quopri encode each line, in encoded chunks no greater than maxlinelen in
- # length, after the RFC chrome is added in.
- quoted = []
- if maxlinelen is None:
- # An obnoxiously large number that's good enough
- max_encoded = 100000
- else:
- max_encoded = maxlinelen - len(charset) - MISC_LEN - 1
-
- for c in header:
- # Space may be represented as _ instead of =20 for readability
- if c == ' ':
- _max_append(quoted, '_', max_encoded)
- # These characters can be included verbatim
- elif not hqre.match(c):
- _max_append(quoted, c, max_encoded)
- # Otherwise, replace with hex value like =E2
- else:
- _max_append(quoted, "=%02X" % ord(c), max_encoded)
-
- # Now add the RFC chrome to each encoded chunk and glue the chunks
- # together. BAW: should we be able to specify the leading whitespace in
- # the joiner?
- joiner = eol + ' '
- return joiner.join(['=?%s?q?%s?=' % (charset, line) for line in quoted])
-
-
-
-def encode(body, binary=False, maxlinelen=76, eol=NL):
- """Encode with quoted-printable, wrapping at maxlinelen characters.
-
- If binary is False (the default), end-of-line characters will be converted
- to the canonical email end-of-line sequence \\r\\n. Otherwise they will
- be left verbatim.
-
- Each line of encoded text will end with eol, which defaults to "\\n". Set
- this to "\\r\\n" if you will be using the result of this function directly
- in an email.
-
- Each line will be wrapped at, at most, maxlinelen characters (defaults to
- 76 characters). Long lines will have the `soft linefeed' quoted-printable
- character "=" appended to them, so the decoded text will be identical to
- the original text.
- """
- if not body:
- return body
-
- if not binary:
- body = fix_eols(body)
-
- # BAW: We're accumulating the body text by string concatenation. That
- # can't be very efficient, but I don't have time now to rewrite it. It
- # just feels like this algorithm could be more efficient.
- encoded_body = ''
- lineno = -1
- # Preserve line endings here so we can check later to see an eol needs to
- # be added to the output later.
- lines = body.splitlines(1)
- for line in lines:
- # But strip off line-endings for processing this line.
- if line.endswith(CRLF):
- line = line[:-2]
- elif line[-1] in CRLF:
- line = line[:-1]
-
- lineno += 1
- encoded_line = ''
- prev = None
- linelen = len(line)
- # Now we need to examine every character to see if it needs to be
- # quopri encoded. BAW: again, string concatenation is inefficient.
- for j in range(linelen):
- c = line[j]
- prev = c
- if bqre.match(c):
- c = quote(c)
- elif j+1 == linelen:
- # Check for whitespace at end of line; special case
- if c not in ' \t':
- encoded_line += c
- prev = c
- continue
- # Check to see to see if the line has reached its maximum length
- if len(encoded_line) + len(c) >= maxlinelen:
- encoded_body += encoded_line + '=' + eol
- encoded_line = ''
- encoded_line += c
- # Now at end of line..
- if prev and prev in ' \t':
- # Special case for whitespace at end of file
- if lineno + 1 == len(lines):
- prev = quote(prev)
- if len(encoded_line) + len(prev) > maxlinelen:
- encoded_body += encoded_line + '=' + eol + prev
- else:
- encoded_body += encoded_line + prev
- # Just normal whitespace at end of line
- else:
- encoded_body += encoded_line + prev + '=' + eol
- encoded_line = ''
- # Now look at the line we just finished and it has a line ending, we
- # need to add eol to the end of the line.
- if lines[lineno].endswith(CRLF) or lines[lineno][-1] in CRLF:
- encoded_body += encoded_line + eol
- else:
- encoded_body += encoded_line
- encoded_line = ''
- return encoded_body
-
-
-# For convenience and backwards compatibility w/ standard base64 module
-body_encode = encode
-encodestring = encode
-
-
-
-# BAW: I'm not sure if the intent was for the signature of this function to be
-# the same as base64MIME.decode() or not...
-def decode(encoded, eol=NL):
- """Decode a quoted-printable string.
-
- Lines are separated with eol, which defaults to \\n.
- """
- if not encoded:
- return encoded
- # BAW: see comment in encode() above. Again, we're building up the
- # decoded string with string concatenation, which could be done much more
- # efficiently.
- decoded = ''
-
- for line in encoded.splitlines():
- line = line.rstrip()
- if not line:
- decoded += eol
- continue
-
- i = 0
- n = len(line)
- while i < n:
- c = line[i]
- if c <> '=':
- decoded += c
- i += 1
- # Otherwise, c == "=". Are we at the end of the line? If so, add
- # a soft line break.
- elif i+1 == n:
- i += 1
- continue
- # Decode if in form =AB
- elif i+2 < n and line[i+1] in hexdigits and line[i+2] in hexdigits:
- decoded += unquote(line[i:i+3])
- i += 3
- # Otherwise, not in form =AB, pass literally
- else:
- decoded += c
- i += 1
-
- if i == n:
- decoded += eol
- # Special case if original string did not end with eol
- if not encoded.endswith(eol) and decoded.endswith(eol):
- decoded = decoded[:-1]
- return decoded
-
-
-# For convenience and backwards compatibility w/ standard base64 module
-body_decode = decode
-decodestring = decode
-
-
-
-def _unquote_match(match):
- """Turn a match in the form =AB to the ASCII character with value 0xab"""
- s = match.group(0)
- return unquote(s)
-
-
-# Header decoding is done a bit differently
-def header_decode(s):
- """Decode a string encoded with RFC 2045 MIME header `Q' encoding.
-
- This function does not parse a full MIME header value encoded with
- quoted-printable (like =?iso-8895-1?q?Hello_World?=) -- please use
- the high level email.Header class for that functionality.
- """
- s = s.replace('_', ' ')
- return re.sub(r'=\w{2}', _unquote_match, s)
diff --git a/sys/lib/python/email/utils.py b/sys/lib/python/email/utils.py
deleted file mode 100644
index ee952d392..000000000
--- a/sys/lib/python/email/utils.py
+++ /dev/null
@@ -1,323 +0,0 @@
-# Copyright (C) 2001-2006 Python Software Foundation
-# Author: Barry Warsaw
-# Contact: email-sig@python.org
-
-"""Miscellaneous utilities."""
-
-__all__ = [
- 'collapse_rfc2231_value',
- 'decode_params',
- 'decode_rfc2231',
- 'encode_rfc2231',
- 'formataddr',
- 'formatdate',
- 'getaddresses',
- 'make_msgid',
- 'parseaddr',
- 'parsedate',
- 'parsedate_tz',
- 'unquote',
- ]
-
-import os
-import re
-import time
-import base64
-import random
-import socket
-import urllib
-import warnings
-from cStringIO import StringIO
-
-from email._parseaddr import quote
-from email._parseaddr import AddressList as _AddressList
-from email._parseaddr import mktime_tz
-
-# We need wormarounds for bugs in these methods in older Pythons (see below)
-from email._parseaddr import parsedate as _parsedate
-from email._parseaddr import parsedate_tz as _parsedate_tz
-
-from quopri import decodestring as _qdecode
-
-# Intrapackage imports
-from email.encoders import _bencode, _qencode
-
-COMMASPACE = ', '
-EMPTYSTRING = ''
-UEMPTYSTRING = u''
-CRLF = '\r\n'
-TICK = "'"
-
-specialsre = re.compile(r'[][\\()<>@,:;".]')
-escapesre = re.compile(r'[][\\()"]')
-
-
-
-# Helpers
-
-def _identity(s):
- return s
-
-
-def _bdecode(s):
- # We can't quite use base64.encodestring() since it tacks on a "courtesy
- # newline". Blech!
- if not s:
- return s
- value = base64.decodestring(s)
- if not s.endswith('\n') and value.endswith('\n'):
- return value[:-1]
- return value
-
-
-
-def fix_eols(s):
- """Replace all line-ending characters with \r\n."""
- # Fix newlines with no preceding carriage return
- s = re.sub(r'(?<!\r)\n', CRLF, s)
- # Fix carriage returns with no following newline
- s = re.sub(r'\r(?!\n)', CRLF, s)
- return s
-
-
-
-def formataddr(pair):
- """The inverse of parseaddr(), this takes a 2-tuple of the form
- (realname, email_address) and returns the string value suitable
- for an RFC 2822 From, To or Cc header.
-
- If the first element of pair is false, then the second element is
- returned unmodified.
- """
- name, address = pair
- if name:
- quotes = ''
- if specialsre.search(name):
- quotes = '"'
- name = escapesre.sub(r'\\\g<0>', name)
- return '%s%s%s <%s>' % (quotes, name, quotes, address)
- return address
-
-
-
-def getaddresses(fieldvalues):
- """Return a list of (REALNAME, EMAIL) for each fieldvalue."""
- all = COMMASPACE.join(fieldvalues)
- a = _AddressList(all)
- return a.addresslist
-
-
-
-ecre = re.compile(r'''
- =\? # literal =?
- (?P<charset>[^?]*?) # non-greedy up to the next ? is the charset
- \? # literal ?
- (?P<encoding>[qb]) # either a "q" or a "b", case insensitive
- \? # literal ?
- (?P<atom>.*?) # non-greedy up to the next ?= is the atom
- \?= # literal ?=
- ''', re.VERBOSE | re.IGNORECASE)
-
-
-
-def formatdate(timeval=None, localtime=False, usegmt=False):
- """Returns a date string as specified by RFC 2822, e.g.:
-
- Fri, 09 Nov 2001 01:08:47 -0000
-
- Optional timeval if given is a floating point time value as accepted by
- gmtime() and localtime(), otherwise the current time is used.
-
- Optional localtime is a flag that when True, interprets timeval, and
- returns a date relative to the local timezone instead of UTC, properly
- taking daylight savings time into account.
-
- Optional argument usegmt means that the timezone is written out as
- an ascii string, not numeric one (so "GMT" instead of "+0000"). This
- is needed for HTTP, and is only used when localtime==False.
- """
- # Note: we cannot use strftime() because that honors the locale and RFC
- # 2822 requires that day and month names be the English abbreviations.
- if timeval is None:
- timeval = time.time()
- if localtime:
- now = time.localtime(timeval)
- # Calculate timezone offset, based on whether the local zone has
- # daylight savings time, and whether DST is in effect.
- if time.daylight and now[-1]:
- offset = time.altzone
- else:
- offset = time.timezone
- hours, minutes = divmod(abs(offset), 3600)
- # Remember offset is in seconds west of UTC, but the timezone is in
- # minutes east of UTC, so the signs differ.
- if offset > 0:
- sign = '-'
- else:
- sign = '+'
- zone = '%s%02d%02d' % (sign, hours, minutes // 60)
- else:
- now = time.gmtime(timeval)
- # Timezone offset is always -0000
- if usegmt:
- zone = 'GMT'
- else:
- zone = '-0000'
- return '%s, %02d %s %04d %02d:%02d:%02d %s' % (
- ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'][now[6]],
- now[2],
- ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
- 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'][now[1] - 1],
- now[0], now[3], now[4], now[5],
- zone)
-
-
-
-def make_msgid(idstring=None):
- """Returns a string suitable for RFC 2822 compliant Message-ID, e.g:
-
- <20020201195627.33539.96671@nightshade.la.mastaler.com>
-
- Optional idstring if given is a string used to strengthen the
- uniqueness of the message id.
- """
- timeval = time.time()
- utcdate = time.strftime('%Y%m%d%H%M%S', time.gmtime(timeval))
- pid = os.getpid()
- randint = random.randrange(100000)
- if idstring is None:
- idstring = ''
- else:
- idstring = '.' + idstring
- idhost = socket.getfqdn()
- msgid = '<%s.%s.%s%s@%s>' % (utcdate, pid, randint, idstring, idhost)
- return msgid
-
-
-
-# These functions are in the standalone mimelib version only because they've
-# subsequently been fixed in the latest Python versions. We use this to worm
-# around broken older Pythons.
-def parsedate(data):
- if not data:
- return None
- return _parsedate(data)
-
-
-def parsedate_tz(data):
- if not data:
- return None
- return _parsedate_tz(data)
-
-
-def parseaddr(addr):
- addrs = _AddressList(addr).addresslist
- if not addrs:
- return '', ''
- return addrs[0]
-
-
-# rfc822.unquote() doesn't properly de-backslash-ify in Python pre-2.3.
-def unquote(str):
- """Remove quotes from a string."""
- if len(str) > 1:
- if str.startswith('"') and str.endswith('"'):
- return str[1:-1].replace('\\\\', '\\').replace('\\"', '"')
- if str.startswith('<') and str.endswith('>'):
- return str[1:-1]
- return str
-
-
-
-# RFC2231-related functions - parameter encoding and decoding
-def decode_rfc2231(s):
- """Decode string according to RFC 2231"""
- parts = s.split(TICK, 2)
- if len(parts) <= 2:
- return None, None, s
- return parts
-
-
-def encode_rfc2231(s, charset=None, language=None):
- """Encode string according to RFC 2231.
-
- If neither charset nor language is given, then s is returned as-is. If
- charset is given but not language, the string is encoded using the empty
- string for language.
- """
- import urllib
- s = urllib.quote(s, safe='')
- if charset is None and language is None:
- return s
- if language is None:
- language = ''
- return "%s'%s'%s" % (charset, language, s)
-
-
-rfc2231_continuation = re.compile(r'^(?P<name>\w+)\*((?P<num>[0-9]+)\*?)?$')
-
-def decode_params(params):
- """Decode parameters list according to RFC 2231.
-
- params is a sequence of 2-tuples containing (param name, string value).
- """
- # Copy params so we don't mess with the original
- params = params[:]
- new_params = []
- # Map parameter's name to a list of continuations. The values are a
- # 3-tuple of the continuation number, the string value, and a flag
- # specifying whether a particular segment is %-encoded.
- rfc2231_params = {}
- name, value = params.pop(0)
- new_params.append((name, value))
- while params:
- name, value = params.pop(0)
- if name.endswith('*'):
- encoded = True
- else:
- encoded = False
- value = unquote(value)
- mo = rfc2231_continuation.match(name)
- if mo:
- name, num = mo.group('name', 'num')
- if num is not None:
- num = int(num)
- rfc2231_params.setdefault(name, []).append((num, value, encoded))
- else:
- new_params.append((name, '"%s"' % quote(value)))
- if rfc2231_params:
- for name, continuations in rfc2231_params.items():
- value = []
- extended = False
- # Sort by number
- continuations.sort()
- # And now append all values in numerical order, converting
- # %-encodings for the encoded segments. If any of the
- # continuation names ends in a *, then the entire string, after
- # decoding segments and concatenating, must have the charset and
- # language specifiers at the beginning of the string.
- for num, s, encoded in continuations:
- if encoded:
- s = urllib.unquote(s)
- extended = True
- value.append(s)
- value = quote(EMPTYSTRING.join(value))
- if extended:
- charset, language, value = decode_rfc2231(value)
- new_params.append((name, (charset, language, '"%s"' % value)))
- else:
- new_params.append((name, '"%s"' % value))
- return new_params
-
-def collapse_rfc2231_value(value, errors='replace',
- fallback_charset='us-ascii'):
- if isinstance(value, tuple):
- rawval = unquote(value[2])
- charset = value[0] or 'us-ascii'
- try:
- return unicode(rawval, charset, errors)
- except LookupError:
- # XXX charset is unknown to Python.
- return unicode(rawval, fallback_charset, errors)
- else:
- return unquote(value)
diff --git a/sys/lib/python/encodings/__init__.py b/sys/lib/python/encodings/__init__.py
deleted file mode 100644
index 98ae2fae7..000000000
--- a/sys/lib/python/encodings/__init__.py
+++ /dev/null
@@ -1,154 +0,0 @@
-""" Standard "encodings" Package
-
- Standard Python encoding modules are stored in this package
- directory.
-
- Codec modules must have names corresponding to normalized encoding
- names as defined in the normalize_encoding() function below, e.g.
- 'utf-8' must be implemented by the module 'utf_8.py'.
-
- Each codec module must export the following interface:
-
- * getregentry() -> codecs.CodecInfo object
- The getregentry() API must a CodecInfo object with encoder, decoder,
- incrementalencoder, incrementaldecoder, streamwriter and streamreader
- atttributes which adhere to the Python Codec Interface Standard.
-
- In addition, a module may optionally also define the following
- APIs which are then used by the package's codec search function:
-
- * getaliases() -> sequence of encoding name strings to use as aliases
-
- Alias names returned by getaliases() must be normalized encoding
- names as defined by normalize_encoding().
-
-Written by Marc-Andre Lemburg (mal@lemburg.com).
-
-(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
-
-"""#"
-
-import codecs, types
-from encodings import aliases
-
-_cache = {}
-_unknown = '--unknown--'
-_import_tail = ['*']
-_norm_encoding_map = (' . '
- '0123456789 ABCDEFGHIJKLMNOPQRSTUVWXYZ '
- ' abcdefghijklmnopqrstuvwxyz '
- ' '
- ' '
- ' ')
-_aliases = aliases.aliases
-
-class CodecRegistryError(LookupError, SystemError):
- pass
-
-def normalize_encoding(encoding):
-
- """ Normalize an encoding name.
-
- Normalization works as follows: all non-alphanumeric
- characters except the dot used for Python package names are
- collapsed and replaced with a single underscore, e.g. ' -;#'
- becomes '_'. Leading and trailing underscores are removed.
-
- Note that encoding names should be ASCII only; if they do use
- non-ASCII characters, these must be Latin-1 compatible.
-
- """
- # Make sure we have an 8-bit string, because .translate() works
- # differently for Unicode strings.
- if type(encoding) is types.UnicodeType:
- # Note that .encode('latin-1') does *not* use the codec
- # registry, so this call doesn't recurse. (See unicodeobject.c
- # PyUnicode_AsEncodedString() for details)
- encoding = encoding.encode('latin-1')
- return '_'.join(encoding.translate(_norm_encoding_map).split())
-
-def search_function(encoding):
-
- # Cache lookup
- entry = _cache.get(encoding, _unknown)
- if entry is not _unknown:
- return entry
-
- # Import the module:
- #
- # First try to find an alias for the normalized encoding
- # name and lookup the module using the aliased name, then try to
- # lookup the module using the standard import scheme, i.e. first
- # try in the encodings package, then at top-level.
- #
- norm_encoding = normalize_encoding(encoding)
- aliased_encoding = _aliases.get(norm_encoding) or \
- _aliases.get(norm_encoding.replace('.', '_'))
- if aliased_encoding is not None:
- modnames = [aliased_encoding,
- norm_encoding]
- else:
- modnames = [norm_encoding]
- for modname in modnames:
- if not modname or '.' in modname:
- continue
- try:
- mod = __import__('encodings.' + modname,
- globals(), locals(), _import_tail)
- except ImportError:
- pass
- else:
- break
- else:
- mod = None
-
- try:
- getregentry = mod.getregentry
- except AttributeError:
- # Not a codec module
- mod = None
-
- if mod is None:
- # Cache misses
- _cache[encoding] = None
- return None
-
- # Now ask the module for the registry entry
- entry = getregentry()
- if not isinstance(entry, codecs.CodecInfo):
- if not 4 <= len(entry) <= 7:
- raise CodecRegistryError,\
- 'module "%s" (%s) failed to register' % \
- (mod.__name__, mod.__file__)
- if not callable(entry[0]) or \
- not callable(entry[1]) or \
- (entry[2] is not None and not callable(entry[2])) or \
- (entry[3] is not None and not callable(entry[3])) or \
- (len(entry) > 4 and entry[4] is not None and not callable(entry[4])) or \
- (len(entry) > 5 and entry[5] is not None and not callable(entry[5])):
- raise CodecRegistryError,\
- 'incompatible codecs in module "%s" (%s)' % \
- (mod.__name__, mod.__file__)
- if len(entry)<7 or entry[6] is None:
- entry += (None,)*(6-len(entry)) + (mod.__name__.split(".", 1)[1],)
- entry = codecs.CodecInfo(*entry)
-
- # Cache the codec registry entry
- _cache[encoding] = entry
-
- # Register its aliases (without overwriting previously registered
- # aliases)
- try:
- codecaliases = mod.getaliases()
- except AttributeError:
- pass
- else:
- for alias in codecaliases:
- if not _aliases.has_key(alias):
- _aliases[alias] = modname
-
- # Return the registry entry
- return entry
-
-# Register the search_function in the Python codec registry
-codecs.register(search_function)
diff --git a/sys/lib/python/encodings/aliases.py b/sys/lib/python/encodings/aliases.py
deleted file mode 100644
index 681f9e356..000000000
--- a/sys/lib/python/encodings/aliases.py
+++ /dev/null
@@ -1,508 +0,0 @@
-""" Encoding Aliases Support
-
- This module is used by the encodings package search function to
- map encodings names to module names.
-
- Note that the search function normalizes the encoding names before
- doing the lookup, so the mapping will have to map normalized
- encoding names to module names.
-
- Contents:
-
- The following aliases dictionary contains mappings of all IANA
- character set names for which the Python core library provides
- codecs. In addition to these, a few Python specific codec
- aliases have also been added.
-
-"""
-aliases = {
-
- # Please keep this list sorted alphabetically by value !
-
- # ascii codec
- '646' : 'ascii',
- 'ansi_x3.4_1968' : 'ascii',
- 'ansi_x3_4_1968' : 'ascii', # some email headers use this non-standard name
- 'ansi_x3.4_1986' : 'ascii',
- 'cp367' : 'ascii',
- 'csascii' : 'ascii',
- 'ibm367' : 'ascii',
- 'iso646_us' : 'ascii',
- 'iso_646.irv_1991' : 'ascii',
- 'iso_ir_6' : 'ascii',
- 'us' : 'ascii',
- 'us_ascii' : 'ascii',
-
- # base64_codec codec
- 'base64' : 'base64_codec',
- 'base_64' : 'base64_codec',
-
- # big5 codec
- 'big5_tw' : 'big5',
- 'csbig5' : 'big5',
-
- # big5hkscs codec
- 'big5_hkscs' : 'big5hkscs',
- 'hkscs' : 'big5hkscs',
-
- # bz2_codec codec
- 'bz2' : 'bz2_codec',
-
- # cp037 codec
- '037' : 'cp037',
- 'csibm037' : 'cp037',
- 'ebcdic_cp_ca' : 'cp037',
- 'ebcdic_cp_nl' : 'cp037',
- 'ebcdic_cp_us' : 'cp037',
- 'ebcdic_cp_wt' : 'cp037',
- 'ibm037' : 'cp037',
- 'ibm039' : 'cp037',
-
- # cp1026 codec
- '1026' : 'cp1026',
- 'csibm1026' : 'cp1026',
- 'ibm1026' : 'cp1026',
-
- # cp1140 codec
- '1140' : 'cp1140',
- 'ibm1140' : 'cp1140',
-
- # cp1250 codec
- '1250' : 'cp1250',
- 'windows_1250' : 'cp1250',
-
- # cp1251 codec
- '1251' : 'cp1251',
- 'windows_1251' : 'cp1251',
-
- # cp1252 codec
- '1252' : 'cp1252',
- 'windows_1252' : 'cp1252',
-
- # cp1253 codec
- '1253' : 'cp1253',
- 'windows_1253' : 'cp1253',
-
- # cp1254 codec
- '1254' : 'cp1254',
- 'windows_1254' : 'cp1254',
-
- # cp1255 codec
- '1255' : 'cp1255',
- 'windows_1255' : 'cp1255',
-
- # cp1256 codec
- '1256' : 'cp1256',
- 'windows_1256' : 'cp1256',
-
- # cp1257 codec
- '1257' : 'cp1257',
- 'windows_1257' : 'cp1257',
-
- # cp1258 codec
- '1258' : 'cp1258',
- 'windows_1258' : 'cp1258',
-
- # cp424 codec
- '424' : 'cp424',
- 'csibm424' : 'cp424',
- 'ebcdic_cp_he' : 'cp424',
- 'ibm424' : 'cp424',
-
- # cp437 codec
- '437' : 'cp437',
- 'cspc8codepage437' : 'cp437',
- 'ibm437' : 'cp437',
-
- # cp500 codec
- '500' : 'cp500',
- 'csibm500' : 'cp500',
- 'ebcdic_cp_be' : 'cp500',
- 'ebcdic_cp_ch' : 'cp500',
- 'ibm500' : 'cp500',
-
- # cp775 codec
- '775' : 'cp775',
- 'cspc775baltic' : 'cp775',
- 'ibm775' : 'cp775',
-
- # cp850 codec
- '850' : 'cp850',
- 'cspc850multilingual' : 'cp850',
- 'ibm850' : 'cp850',
-
- # cp852 codec
- '852' : 'cp852',
- 'cspcp852' : 'cp852',
- 'ibm852' : 'cp852',
-
- # cp855 codec
- '855' : 'cp855',
- 'csibm855' : 'cp855',
- 'ibm855' : 'cp855',
-
- # cp857 codec
- '857' : 'cp857',
- 'csibm857' : 'cp857',
- 'ibm857' : 'cp857',
-
- # cp860 codec
- '860' : 'cp860',
- 'csibm860' : 'cp860',
- 'ibm860' : 'cp860',
-
- # cp861 codec
- '861' : 'cp861',
- 'cp_is' : 'cp861',
- 'csibm861' : 'cp861',
- 'ibm861' : 'cp861',
-
- # cp862 codec
- '862' : 'cp862',
- 'cspc862latinhebrew' : 'cp862',
- 'ibm862' : 'cp862',
-
- # cp863 codec
- '863' : 'cp863',
- 'csibm863' : 'cp863',
- 'ibm863' : 'cp863',
-
- # cp864 codec
- '864' : 'cp864',
- 'csibm864' : 'cp864',
- 'ibm864' : 'cp864',
-
- # cp865 codec
- '865' : 'cp865',
- 'csibm865' : 'cp865',
- 'ibm865' : 'cp865',
-
- # cp866 codec
- '866' : 'cp866',
- 'csibm866' : 'cp866',
- 'ibm866' : 'cp866',
-
- # cp869 codec
- '869' : 'cp869',
- 'cp_gr' : 'cp869',
- 'csibm869' : 'cp869',
- 'ibm869' : 'cp869',
-
- # cp932 codec
- '932' : 'cp932',
- 'ms932' : 'cp932',
- 'mskanji' : 'cp932',
- 'ms_kanji' : 'cp932',
-
- # cp949 codec
- '949' : 'cp949',
- 'ms949' : 'cp949',
- 'uhc' : 'cp949',
-
- # cp950 codec
- '950' : 'cp950',
- 'ms950' : 'cp950',
-
- # euc_jis_2004 codec
- 'jisx0213' : 'euc_jis_2004',
- 'eucjis2004' : 'euc_jis_2004',
- 'euc_jis2004' : 'euc_jis_2004',
-
- # euc_jisx0213 codec
- 'eucjisx0213' : 'euc_jisx0213',
-
- # euc_jp codec
- 'eucjp' : 'euc_jp',
- 'ujis' : 'euc_jp',
- 'u_jis' : 'euc_jp',
-
- # euc_kr codec
- 'euckr' : 'euc_kr',
- 'korean' : 'euc_kr',
- 'ksc5601' : 'euc_kr',
- 'ks_c_5601' : 'euc_kr',
- 'ks_c_5601_1987' : 'euc_kr',
- 'ksx1001' : 'euc_kr',
- 'ks_x_1001' : 'euc_kr',
-
- # gb18030 codec
- 'gb18030_2000' : 'gb18030',
-
- # gb2312 codec
- 'chinese' : 'gb2312',
- 'csiso58gb231280' : 'gb2312',
- 'euc_cn' : 'gb2312',
- 'euccn' : 'gb2312',
- 'eucgb2312_cn' : 'gb2312',
- 'gb2312_1980' : 'gb2312',
- 'gb2312_80' : 'gb2312',
- 'iso_ir_58' : 'gb2312',
-
- # gbk codec
- '936' : 'gbk',
- 'cp936' : 'gbk',
- 'ms936' : 'gbk',
-
- # hex_codec codec
- 'hex' : 'hex_codec',
-
- # hp_roman8 codec
- 'roman8' : 'hp_roman8',
- 'r8' : 'hp_roman8',
- 'csHPRoman8' : 'hp_roman8',
-
- # hz codec
- 'hzgb' : 'hz',
- 'hz_gb' : 'hz',
- 'hz_gb_2312' : 'hz',
-
- # iso2022_jp codec
- 'csiso2022jp' : 'iso2022_jp',
- 'iso2022jp' : 'iso2022_jp',
- 'iso_2022_jp' : 'iso2022_jp',
-
- # iso2022_jp_1 codec
- 'iso2022jp_1' : 'iso2022_jp_1',
- 'iso_2022_jp_1' : 'iso2022_jp_1',
-
- # iso2022_jp_2 codec
- 'iso2022jp_2' : 'iso2022_jp_2',
- 'iso_2022_jp_2' : 'iso2022_jp_2',
-
- # iso2022_jp_2004 codec
- 'iso_2022_jp_2004' : 'iso2022_jp_2004',
- 'iso2022jp_2004' : 'iso2022_jp_2004',
-
- # iso2022_jp_3 codec
- 'iso2022jp_3' : 'iso2022_jp_3',
- 'iso_2022_jp_3' : 'iso2022_jp_3',
-
- # iso2022_jp_ext codec
- 'iso2022jp_ext' : 'iso2022_jp_ext',
- 'iso_2022_jp_ext' : 'iso2022_jp_ext',
-
- # iso2022_kr codec
- 'csiso2022kr' : 'iso2022_kr',
- 'iso2022kr' : 'iso2022_kr',
- 'iso_2022_kr' : 'iso2022_kr',
-
- # iso8859_10 codec
- 'csisolatin6' : 'iso8859_10',
- 'iso_8859_10' : 'iso8859_10',
- 'iso_8859_10_1992' : 'iso8859_10',
- 'iso_ir_157' : 'iso8859_10',
- 'l6' : 'iso8859_10',
- 'latin6' : 'iso8859_10',
-
- # iso8859_11 codec
- 'thai' : 'iso8859_11',
- 'iso_8859_11' : 'iso8859_11',
- 'iso_8859_11_2001' : 'iso8859_11',
-
- # iso8859_13 codec
- 'iso_8859_13' : 'iso8859_13',
-
- # iso8859_14 codec
- 'iso_8859_14' : 'iso8859_14',
- 'iso_8859_14_1998' : 'iso8859_14',
- 'iso_celtic' : 'iso8859_14',
- 'iso_ir_199' : 'iso8859_14',
- 'l8' : 'iso8859_14',
- 'latin8' : 'iso8859_14',
-
- # iso8859_15 codec
- 'iso_8859_15' : 'iso8859_15',
-
- # iso8859_16 codec
- 'iso_8859_16' : 'iso8859_16',
- 'iso_8859_16_2001' : 'iso8859_16',
- 'iso_ir_226' : 'iso8859_16',
- 'l10' : 'iso8859_16',
- 'latin10' : 'iso8859_16',
-
- # iso8859_2 codec
- 'csisolatin2' : 'iso8859_2',
- 'iso_8859_2' : 'iso8859_2',
- 'iso_8859_2_1987' : 'iso8859_2',
- 'iso_ir_101' : 'iso8859_2',
- 'l2' : 'iso8859_2',
- 'latin2' : 'iso8859_2',
-
- # iso8859_3 codec
- 'csisolatin3' : 'iso8859_3',
- 'iso_8859_3' : 'iso8859_3',
- 'iso_8859_3_1988' : 'iso8859_3',
- 'iso_ir_109' : 'iso8859_3',
- 'l3' : 'iso8859_3',
- 'latin3' : 'iso8859_3',
-
- # iso8859_4 codec
- 'csisolatin4' : 'iso8859_4',
- 'iso_8859_4' : 'iso8859_4',
- 'iso_8859_4_1988' : 'iso8859_4',
- 'iso_ir_110' : 'iso8859_4',
- 'l4' : 'iso8859_4',
- 'latin4' : 'iso8859_4',
-
- # iso8859_5 codec
- 'csisolatincyrillic' : 'iso8859_5',
- 'cyrillic' : 'iso8859_5',
- 'iso_8859_5' : 'iso8859_5',
- 'iso_8859_5_1988' : 'iso8859_5',
- 'iso_ir_144' : 'iso8859_5',
-
- # iso8859_6 codec
- 'arabic' : 'iso8859_6',
- 'asmo_708' : 'iso8859_6',
- 'csisolatinarabic' : 'iso8859_6',
- 'ecma_114' : 'iso8859_6',
- 'iso_8859_6' : 'iso8859_6',
- 'iso_8859_6_1987' : 'iso8859_6',
- 'iso_ir_127' : 'iso8859_6',
-
- # iso8859_7 codec
- 'csisolatingreek' : 'iso8859_7',
- 'ecma_118' : 'iso8859_7',
- 'elot_928' : 'iso8859_7',
- 'greek' : 'iso8859_7',
- 'greek8' : 'iso8859_7',
- 'iso_8859_7' : 'iso8859_7',
- 'iso_8859_7_1987' : 'iso8859_7',
- 'iso_ir_126' : 'iso8859_7',
-
- # iso8859_8 codec
- 'csisolatinhebrew' : 'iso8859_8',
- 'hebrew' : 'iso8859_8',
- 'iso_8859_8' : 'iso8859_8',
- 'iso_8859_8_1988' : 'iso8859_8',
- 'iso_ir_138' : 'iso8859_8',
-
- # iso8859_9 codec
- 'csisolatin5' : 'iso8859_9',
- 'iso_8859_9' : 'iso8859_9',
- 'iso_8859_9_1989' : 'iso8859_9',
- 'iso_ir_148' : 'iso8859_9',
- 'l5' : 'iso8859_9',
- 'latin5' : 'iso8859_9',
-
- # johab codec
- 'cp1361' : 'johab',
- 'ms1361' : 'johab',
-
- # koi8_r codec
- 'cskoi8r' : 'koi8_r',
-
- # latin_1 codec
- #
- # Note that the latin_1 codec is implemented internally in C and a
- # lot faster than the charmap codec iso8859_1 which uses the same
- # encoding. This is why we discourage the use of the iso8859_1
- # codec and alias it to latin_1 instead.
- #
- '8859' : 'latin_1',
- 'cp819' : 'latin_1',
- 'csisolatin1' : 'latin_1',
- 'ibm819' : 'latin_1',
- 'iso8859' : 'latin_1',
- 'iso8859_1' : 'latin_1',
- 'iso_8859_1' : 'latin_1',
- 'iso_8859_1_1987' : 'latin_1',
- 'iso_ir_100' : 'latin_1',
- 'l1' : 'latin_1',
- 'latin' : 'latin_1',
- 'latin1' : 'latin_1',
-
- # mac_cyrillic codec
- 'maccyrillic' : 'mac_cyrillic',
-
- # mac_greek codec
- 'macgreek' : 'mac_greek',
-
- # mac_iceland codec
- 'maciceland' : 'mac_iceland',
-
- # mac_latin2 codec
- 'maccentraleurope' : 'mac_latin2',
- 'maclatin2' : 'mac_latin2',
-
- # mac_roman codec
- 'macroman' : 'mac_roman',
-
- # mac_turkish codec
- 'macturkish' : 'mac_turkish',
-
- # mbcs codec
- 'dbcs' : 'mbcs',
-
- # ptcp154 codec
- 'csptcp154' : 'ptcp154',
- 'pt154' : 'ptcp154',
- 'cp154' : 'ptcp154',
- 'cyrillic-asian' : 'ptcp154',
-
- # quopri_codec codec
- 'quopri' : 'quopri_codec',
- 'quoted_printable' : 'quopri_codec',
- 'quotedprintable' : 'quopri_codec',
-
- # rot_13 codec
- 'rot13' : 'rot_13',
-
- # shift_jis codec
- 'csshiftjis' : 'shift_jis',
- 'shiftjis' : 'shift_jis',
- 'sjis' : 'shift_jis',
- 's_jis' : 'shift_jis',
-
- # shift_jis_2004 codec
- 'shiftjis2004' : 'shift_jis_2004',
- 'sjis_2004' : 'shift_jis_2004',
- 's_jis_2004' : 'shift_jis_2004',
-
- # shift_jisx0213 codec
- 'shiftjisx0213' : 'shift_jisx0213',
- 'sjisx0213' : 'shift_jisx0213',
- 's_jisx0213' : 'shift_jisx0213',
-
- # tactis codec
- 'tis260' : 'tactis',
-
- # tis_620 codec
- 'tis620' : 'tis_620',
- 'tis_620_0' : 'tis_620',
- 'tis_620_2529_0' : 'tis_620',
- 'tis_620_2529_1' : 'tis_620',
- 'iso_ir_166' : 'tis_620',
-
- # utf_16 codec
- 'u16' : 'utf_16',
- 'utf16' : 'utf_16',
-
- # utf_16_be codec
- 'unicodebigunmarked' : 'utf_16_be',
- 'utf_16be' : 'utf_16_be',
-
- # utf_16_le codec
- 'unicodelittleunmarked' : 'utf_16_le',
- 'utf_16le' : 'utf_16_le',
-
- # utf_7 codec
- 'u7' : 'utf_7',
- 'utf7' : 'utf_7',
- 'unicode_1_1_utf_7' : 'utf_7',
-
- # utf_8 codec
- 'u8' : 'utf_8',
- 'utf' : 'utf_8',
- 'utf8' : 'utf_8',
- 'utf8_ucs2' : 'utf_8',
- 'utf8_ucs4' : 'utf_8',
-
- # uu_codec codec
- 'uu' : 'uu_codec',
-
- # zlib_codec codec
- 'zip' : 'zlib_codec',
- 'zlib' : 'zlib_codec',
-
-}
diff --git a/sys/lib/python/encodings/ascii.py b/sys/lib/python/encodings/ascii.py
deleted file mode 100644
index 2033cde97..000000000
--- a/sys/lib/python/encodings/ascii.py
+++ /dev/null
@@ -1,50 +0,0 @@
-""" Python 'ascii' Codec
-
-
-Written by Marc-Andre Lemburg (mal@lemburg.com).
-
-(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
-
-"""
-import codecs
-
-### Codec APIs
-
-class Codec(codecs.Codec):
-
- # Note: Binding these as C functions will result in the class not
- # converting them to methods. This is intended.
- encode = codecs.ascii_encode
- decode = codecs.ascii_decode
-
-class IncrementalEncoder(codecs.IncrementalEncoder):
- def encode(self, input, final=False):
- return codecs.ascii_encode(input, self.errors)[0]
-
-class IncrementalDecoder(codecs.IncrementalDecoder):
- def decode(self, input, final=False):
- return codecs.ascii_decode(input, self.errors)[0]
-
-class StreamWriter(Codec,codecs.StreamWriter):
- pass
-
-class StreamReader(Codec,codecs.StreamReader):
- pass
-
-class StreamConverter(StreamWriter,StreamReader):
-
- encode = codecs.ascii_decode
- decode = codecs.ascii_encode
-
-### encodings module API
-
-def getregentry():
- return codecs.CodecInfo(
- name='ascii',
- encode=Codec.encode,
- decode=Codec.decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamwriter=StreamWriter,
- streamreader=StreamReader,
- )
diff --git a/sys/lib/python/encodings/base64_codec.py b/sys/lib/python/encodings/base64_codec.py
deleted file mode 100644
index f84e7808e..000000000
--- a/sys/lib/python/encodings/base64_codec.py
+++ /dev/null
@@ -1,79 +0,0 @@
-""" Python 'base64_codec' Codec - base64 content transfer encoding
-
- Unlike most of the other codecs which target Unicode, this codec
- will return Python string objects for both encode and decode.
-
- Written by Marc-Andre Lemburg (mal@lemburg.com).
-
-"""
-import codecs, base64
-
-### Codec APIs
-
-def base64_encode(input,errors='strict'):
-
- """ Encodes the object input and returns a tuple (output
- object, length consumed).
-
- errors defines the error handling to apply. It defaults to
- 'strict' handling which is the only currently supported
- error handling for this codec.
-
- """
- assert errors == 'strict'
- output = base64.encodestring(input)
- return (output, len(input))
-
-def base64_decode(input,errors='strict'):
-
- """ Decodes the object input and returns a tuple (output
- object, length consumed).
-
- input must be an object which provides the bf_getreadbuf
- buffer slot. Python strings, buffer objects and memory
- mapped files are examples of objects providing this slot.
-
- errors defines the error handling to apply. It defaults to
- 'strict' handling which is the only currently supported
- error handling for this codec.
-
- """
- assert errors == 'strict'
- output = base64.decodestring(input)
- return (output, len(input))
-
-class Codec(codecs.Codec):
-
- def encode(self, input,errors='strict'):
- return base64_encode(input,errors)
- def decode(self, input,errors='strict'):
- return base64_decode(input,errors)
-
-class IncrementalEncoder(codecs.IncrementalEncoder):
- def encode(self, input, final=False):
- assert self.errors == 'strict'
- return base64.encodestring(input)
-
-class IncrementalDecoder(codecs.IncrementalDecoder):
- def decode(self, input, final=False):
- assert self.errors == 'strict'
- return base64.decodestring(input)
-
-class StreamWriter(Codec,codecs.StreamWriter):
- pass
-
-class StreamReader(Codec,codecs.StreamReader):
- pass
-
-### encodings module API
-
-def getregentry():
- return codecs.CodecInfo(
- name='base64',
- encode=base64_encode,
- decode=base64_decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamwriter=StreamWriter,
- streamreader=StreamReader,
- )
diff --git a/sys/lib/python/encodings/big5.py b/sys/lib/python/encodings/big5.py
deleted file mode 100644
index 7adeb0e16..000000000
--- a/sys/lib/python/encodings/big5.py
+++ /dev/null
@@ -1,39 +0,0 @@
-#
-# big5.py: Python Unicode Codec for BIG5
-#
-# Written by Hye-Shik Chang <perky@FreeBSD.org>
-#
-
-import _codecs_tw, codecs
-import _multibytecodec as mbc
-
-codec = _codecs_tw.getcodec('big5')
-
-class Codec(codecs.Codec):
- encode = codec.encode
- decode = codec.decode
-
-class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
- codecs.IncrementalEncoder):
- codec = codec
-
-class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
- codecs.IncrementalDecoder):
- codec = codec
-
-class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
- codec = codec
-
-class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
- codec = codec
-
-def getregentry():
- return codecs.CodecInfo(
- name='big5',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
diff --git a/sys/lib/python/encodings/big5hkscs.py b/sys/lib/python/encodings/big5hkscs.py
deleted file mode 100644
index 350df37ba..000000000
--- a/sys/lib/python/encodings/big5hkscs.py
+++ /dev/null
@@ -1,39 +0,0 @@
-#
-# big5hkscs.py: Python Unicode Codec for BIG5HKSCS
-#
-# Written by Hye-Shik Chang <perky@FreeBSD.org>
-#
-
-import _codecs_hk, codecs
-import _multibytecodec as mbc
-
-codec = _codecs_hk.getcodec('big5hkscs')
-
-class Codec(codecs.Codec):
- encode = codec.encode
- decode = codec.decode
-
-class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
- codecs.IncrementalEncoder):
- codec = codec
-
-class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
- codecs.IncrementalDecoder):
- codec = codec
-
-class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
- codec = codec
-
-class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
- codec = codec
-
-def getregentry():
- return codecs.CodecInfo(
- name='big5hkscs',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
diff --git a/sys/lib/python/encodings/bz2_codec.py b/sys/lib/python/encodings/bz2_codec.py
deleted file mode 100644
index 054b36b40..000000000
--- a/sys/lib/python/encodings/bz2_codec.py
+++ /dev/null
@@ -1,102 +0,0 @@
-""" Python 'bz2_codec' Codec - bz2 compression encoding
-
- Unlike most of the other codecs which target Unicode, this codec
- will return Python string objects for both encode and decode.
-
- Adapted by Raymond Hettinger from zlib_codec.py which was written
- by Marc-Andre Lemburg (mal@lemburg.com).
-
-"""
-import codecs
-import bz2 # this codec needs the optional bz2 module !
-
-### Codec APIs
-
-def bz2_encode(input,errors='strict'):
-
- """ Encodes the object input and returns a tuple (output
- object, length consumed).
-
- errors defines the error handling to apply. It defaults to
- 'strict' handling which is the only currently supported
- error handling for this codec.
-
- """
- assert errors == 'strict'
- output = bz2.compress(input)
- return (output, len(input))
-
-def bz2_decode(input,errors='strict'):
-
- """ Decodes the object input and returns a tuple (output
- object, length consumed).
-
- input must be an object which provides the bf_getreadbuf
- buffer slot. Python strings, buffer objects and memory
- mapped files are examples of objects providing this slot.
-
- errors defines the error handling to apply. It defaults to
- 'strict' handling which is the only currently supported
- error handling for this codec.
-
- """
- assert errors == 'strict'
- output = bz2.decompress(input)
- return (output, len(input))
-
-class Codec(codecs.Codec):
-
- def encode(self, input, errors='strict'):
- return bz2_encode(input, errors)
- def decode(self, input, errors='strict'):
- return bz2_decode(input, errors)
-
-class IncrementalEncoder(codecs.IncrementalEncoder):
- def __init__(self, errors='strict'):
- assert errors == 'strict'
- self.errors = errors
- self.compressobj = bz2.BZ2Compressor()
-
- def encode(self, input, final=False):
- if final:
- c = self.compressobj.compress(input)
- return c + self.compressobj.flush()
- else:
- return self.compressobj.compress(input)
-
- def reset(self):
- self.compressobj = bz2.BZ2Compressor()
-
-class IncrementalDecoder(codecs.IncrementalDecoder):
- def __init__(self, errors='strict'):
- assert errors == 'strict'
- self.errors = errors
- self.decompressobj = bz2.BZ2Decompressor()
-
- def decode(self, input, final=False):
- try:
- return self.decompressobj.decompress(input)
- except EOFError:
- return ''
-
- def reset(self):
- self.decompressobj = bz2.BZ2Decompressor()
-
-class StreamWriter(Codec,codecs.StreamWriter):
- pass
-
-class StreamReader(Codec,codecs.StreamReader):
- pass
-
-### encodings module API
-
-def getregentry():
- return codecs.CodecInfo(
- name="bz2",
- encode=bz2_encode,
- decode=bz2_decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamwriter=StreamWriter,
- streamreader=StreamReader,
- )
diff --git a/sys/lib/python/encodings/charmap.py b/sys/lib/python/encodings/charmap.py
deleted file mode 100644
index 81189b161..000000000
--- a/sys/lib/python/encodings/charmap.py
+++ /dev/null
@@ -1,69 +0,0 @@
-""" Generic Python Character Mapping Codec.
-
- Use this codec directly rather than through the automatic
- conversion mechanisms supplied by unicode() and .encode().
-
-
-Written by Marc-Andre Lemburg (mal@lemburg.com).
-
-(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
-
-"""#"
-
-import codecs
-
-### Codec APIs
-
-class Codec(codecs.Codec):
-
- # Note: Binding these as C functions will result in the class not
- # converting them to methods. This is intended.
- encode = codecs.charmap_encode
- decode = codecs.charmap_decode
-
-class IncrementalEncoder(codecs.IncrementalEncoder):
- def __init__(self, errors='strict', mapping=None):
- codecs.IncrementalEncoder.__init__(self, errors)
- self.mapping = mapping
-
- def encode(self, input, final=False):
- return codecs.charmap_encode(input, self.errors, self.mapping)[0]
-
-class IncrementalDecoder(codecs.IncrementalDecoder):
- def __init__(self, errors='strict', mapping=None):
- codecs.IncrementalDecoder.__init__(self, errors)
- self.mapping = mapping
-
- def decode(self, input, final=False):
- return codecs.charmap_decode(input, self.errors, self.mapping)[0]
-
-class StreamWriter(Codec,codecs.StreamWriter):
-
- def __init__(self,stream,errors='strict',mapping=None):
- codecs.StreamWriter.__init__(self,stream,errors)
- self.mapping = mapping
-
- def encode(self,input,errors='strict'):
- return Codec.encode(input,errors,self.mapping)
-
-class StreamReader(Codec,codecs.StreamReader):
-
- def __init__(self,stream,errors='strict',mapping=None):
- codecs.StreamReader.__init__(self,stream,errors)
- self.mapping = mapping
-
- def decode(self,input,errors='strict'):
- return Codec.decode(input,errors,self.mapping)
-
-### encodings module API
-
-def getregentry():
- return codecs.CodecInfo(
- name='charmap',
- encode=Codec.encode,
- decode=Codec.decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamwriter=StreamWriter,
- streamreader=StreamReader,
- )
diff --git a/sys/lib/python/encodings/cp037.py b/sys/lib/python/encodings/cp037.py
deleted file mode 100644
index c802b899a..000000000
--- a/sys/lib/python/encodings/cp037.py
+++ /dev/null
@@ -1,307 +0,0 @@
-""" Python Character Mapping Codec cp037 generated from 'MAPPINGS/VENDORS/MICSFT/EBCDIC/CP037.TXT' with gencodec.py.
-
-"""#"
-
-import codecs
-
-### Codec APIs
-
-class Codec(codecs.Codec):
-
- def encode(self,input,errors='strict'):
- return codecs.charmap_encode(input,errors,encoding_table)
-
- def decode(self,input,errors='strict'):
- return codecs.charmap_decode(input,errors,decoding_table)
-
-class IncrementalEncoder(codecs.IncrementalEncoder):
- def encode(self, input, final=False):
- return codecs.charmap_encode(input,self.errors,encoding_table)[0]
-
-class IncrementalDecoder(codecs.IncrementalDecoder):
- def decode(self, input, final=False):
- return codecs.charmap_decode(input,self.errors,decoding_table)[0]
-
-class StreamWriter(Codec,codecs.StreamWriter):
- pass
-
-class StreamReader(Codec,codecs.StreamReader):
- pass
-
-### encodings module API
-
-def getregentry():
- return codecs.CodecInfo(
- name='cp037',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
-
-
-### Decoding Table
-
-decoding_table = (
- u'\x00' # 0x00 -> NULL
- u'\x01' # 0x01 -> START OF HEADING
- u'\x02' # 0x02 -> START OF TEXT
- u'\x03' # 0x03 -> END OF TEXT
- u'\x9c' # 0x04 -> CONTROL
- u'\t' # 0x05 -> HORIZONTAL TABULATION
- u'\x86' # 0x06 -> CONTROL
- u'\x7f' # 0x07 -> DELETE
- u'\x97' # 0x08 -> CONTROL
- u'\x8d' # 0x09 -> CONTROL
- u'\x8e' # 0x0A -> CONTROL
- u'\x0b' # 0x0B -> VERTICAL TABULATION
- u'\x0c' # 0x0C -> FORM FEED
- u'\r' # 0x0D -> CARRIAGE RETURN
- u'\x0e' # 0x0E -> SHIFT OUT
- u'\x0f' # 0x0F -> SHIFT IN
- u'\x10' # 0x10 -> DATA LINK ESCAPE
- u'\x11' # 0x11 -> DEVICE CONTROL ONE
- u'\x12' # 0x12 -> DEVICE CONTROL TWO
- u'\x13' # 0x13 -> DEVICE CONTROL THREE
- u'\x9d' # 0x14 -> CONTROL
- u'\x85' # 0x15 -> CONTROL
- u'\x08' # 0x16 -> BACKSPACE
- u'\x87' # 0x17 -> CONTROL
- u'\x18' # 0x18 -> CANCEL
- u'\x19' # 0x19 -> END OF MEDIUM
- u'\x92' # 0x1A -> CONTROL
- u'\x8f' # 0x1B -> CONTROL
- u'\x1c' # 0x1C -> FILE SEPARATOR
- u'\x1d' # 0x1D -> GROUP SEPARATOR
- u'\x1e' # 0x1E -> RECORD SEPARATOR
- u'\x1f' # 0x1F -> UNIT SEPARATOR
- u'\x80' # 0x20 -> CONTROL
- u'\x81' # 0x21 -> CONTROL
- u'\x82' # 0x22 -> CONTROL
- u'\x83' # 0x23 -> CONTROL
- u'\x84' # 0x24 -> CONTROL
- u'\n' # 0x25 -> LINE FEED
- u'\x17' # 0x26 -> END OF TRANSMISSION BLOCK
- u'\x1b' # 0x27 -> ESCAPE
- u'\x88' # 0x28 -> CONTROL
- u'\x89' # 0x29 -> CONTROL
- u'\x8a' # 0x2A -> CONTROL
- u'\x8b' # 0x2B -> CONTROL
- u'\x8c' # 0x2C -> CONTROL
- u'\x05' # 0x2D -> ENQUIRY
- u'\x06' # 0x2E -> ACKNOWLEDGE
- u'\x07' # 0x2F -> BELL
- u'\x90' # 0x30 -> CONTROL
- u'\x91' # 0x31 -> CONTROL
- u'\x16' # 0x32 -> SYNCHRONOUS IDLE
- u'\x93' # 0x33 -> CONTROL
- u'\x94' # 0x34 -> CONTROL
- u'\x95' # 0x35 -> CONTROL
- u'\x96' # 0x36 -> CONTROL
- u'\x04' # 0x37 -> END OF TRANSMISSION
- u'\x98' # 0x38 -> CONTROL
- u'\x99' # 0x39 -> CONTROL
- u'\x9a' # 0x3A -> CONTROL
- u'\x9b' # 0x3B -> CONTROL
- u'\x14' # 0x3C -> DEVICE CONTROL FOUR
- u'\x15' # 0x3D -> NEGATIVE ACKNOWLEDGE
- u'\x9e' # 0x3E -> CONTROL
- u'\x1a' # 0x3F -> SUBSTITUTE
- u' ' # 0x40 -> SPACE
- u'\xa0' # 0x41 -> NO-BREAK SPACE
- u'\xe2' # 0x42 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
- u'\xe4' # 0x43 -> LATIN SMALL LETTER A WITH DIAERESIS
- u'\xe0' # 0x44 -> LATIN SMALL LETTER A WITH GRAVE
- u'\xe1' # 0x45 -> LATIN SMALL LETTER A WITH ACUTE
- u'\xe3' # 0x46 -> LATIN SMALL LETTER A WITH TILDE
- u'\xe5' # 0x47 -> LATIN SMALL LETTER A WITH RING ABOVE
- u'\xe7' # 0x48 -> LATIN SMALL LETTER C WITH CEDILLA
- u'\xf1' # 0x49 -> LATIN SMALL LETTER N WITH TILDE
- u'\xa2' # 0x4A -> CENT SIGN
- u'.' # 0x4B -> FULL STOP
- u'<' # 0x4C -> LESS-THAN SIGN
- u'(' # 0x4D -> LEFT PARENTHESIS
- u'+' # 0x4E -> PLUS SIGN
- u'|' # 0x4F -> VERTICAL LINE
- u'&' # 0x50 -> AMPERSAND
- u'\xe9' # 0x51 -> LATIN SMALL LETTER E WITH ACUTE
- u'\xea' # 0x52 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
- u'\xeb' # 0x53 -> LATIN SMALL LETTER E WITH DIAERESIS
- u'\xe8' # 0x54 -> LATIN SMALL LETTER E WITH GRAVE
- u'\xed' # 0x55 -> LATIN SMALL LETTER I WITH ACUTE
- u'\xee' # 0x56 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
- u'\xef' # 0x57 -> LATIN SMALL LETTER I WITH DIAERESIS
- u'\xec' # 0x58 -> LATIN SMALL LETTER I WITH GRAVE
- u'\xdf' # 0x59 -> LATIN SMALL LETTER SHARP S (GERMAN)
- u'!' # 0x5A -> EXCLAMATION MARK
- u'$' # 0x5B -> DOLLAR SIGN
- u'*' # 0x5C -> ASTERISK
- u')' # 0x5D -> RIGHT PARENTHESIS
- u';' # 0x5E -> SEMICOLON
- u'\xac' # 0x5F -> NOT SIGN
- u'-' # 0x60 -> HYPHEN-MINUS
- u'/' # 0x61 -> SOLIDUS
- u'\xc2' # 0x62 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
- u'\xc4' # 0x63 -> LATIN CAPITAL LETTER A WITH DIAERESIS
- u'\xc0' # 0x64 -> LATIN CAPITAL LETTER A WITH GRAVE
- u'\xc1' # 0x65 -> LATIN CAPITAL LETTER A WITH ACUTE
- u'\xc3' # 0x66 -> LATIN CAPITAL LETTER A WITH TILDE
- u'\xc5' # 0x67 -> LATIN CAPITAL LETTER A WITH RING ABOVE
- u'\xc7' # 0x68 -> LATIN CAPITAL LETTER C WITH CEDILLA
- u'\xd1' # 0x69 -> LATIN CAPITAL LETTER N WITH TILDE
- u'\xa6' # 0x6A -> BROKEN BAR
- u',' # 0x6B -> COMMA
- u'%' # 0x6C -> PERCENT SIGN
- u'_' # 0x6D -> LOW LINE
- u'>' # 0x6E -> GREATER-THAN SIGN
- u'?' # 0x6F -> QUESTION MARK
- u'\xf8' # 0x70 -> LATIN SMALL LETTER O WITH STROKE
- u'\xc9' # 0x71 -> LATIN CAPITAL LETTER E WITH ACUTE
- u'\xca' # 0x72 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
- u'\xcb' # 0x73 -> LATIN CAPITAL LETTER E WITH DIAERESIS
- u'\xc8' # 0x74 -> LATIN CAPITAL LETTER E WITH GRAVE
- u'\xcd' # 0x75 -> LATIN CAPITAL LETTER I WITH ACUTE
- u'\xce' # 0x76 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
- u'\xcf' # 0x77 -> LATIN CAPITAL LETTER I WITH DIAERESIS
- u'\xcc' # 0x78 -> LATIN CAPITAL LETTER I WITH GRAVE
- u'`' # 0x79 -> GRAVE ACCENT
- u':' # 0x7A -> COLON
- u'#' # 0x7B -> NUMBER SIGN
- u'@' # 0x7C -> COMMERCIAL AT
- u"'" # 0x7D -> APOSTROPHE
- u'=' # 0x7E -> EQUALS SIGN
- u'"' # 0x7F -> QUOTATION MARK
- u'\xd8' # 0x80 -> LATIN CAPITAL LETTER O WITH STROKE
- u'a' # 0x81 -> LATIN SMALL LETTER A
- u'b' # 0x82 -> LATIN SMALL LETTER B
- u'c' # 0x83 -> LATIN SMALL LETTER C
- u'd' # 0x84 -> LATIN SMALL LETTER D
- u'e' # 0x85 -> LATIN SMALL LETTER E
- u'f' # 0x86 -> LATIN SMALL LETTER F
- u'g' # 0x87 -> LATIN SMALL LETTER G
- u'h' # 0x88 -> LATIN SMALL LETTER H
- u'i' # 0x89 -> LATIN SMALL LETTER I
- u'\xab' # 0x8A -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
- u'\xbb' # 0x8B -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
- u'\xf0' # 0x8C -> LATIN SMALL LETTER ETH (ICELANDIC)
- u'\xfd' # 0x8D -> LATIN SMALL LETTER Y WITH ACUTE
- u'\xfe' # 0x8E -> LATIN SMALL LETTER THORN (ICELANDIC)
- u'\xb1' # 0x8F -> PLUS-MINUS SIGN
- u'\xb0' # 0x90 -> DEGREE SIGN
- u'j' # 0x91 -> LATIN SMALL LETTER J
- u'k' # 0x92 -> LATIN SMALL LETTER K
- u'l' # 0x93 -> LATIN SMALL LETTER L
- u'm' # 0x94 -> LATIN SMALL LETTER M
- u'n' # 0x95 -> LATIN SMALL LETTER N
- u'o' # 0x96 -> LATIN SMALL LETTER O
- u'p' # 0x97 -> LATIN SMALL LETTER P
- u'q' # 0x98 -> LATIN SMALL LETTER Q
- u'r' # 0x99 -> LATIN SMALL LETTER R
- u'\xaa' # 0x9A -> FEMININE ORDINAL INDICATOR
- u'\xba' # 0x9B -> MASCULINE ORDINAL INDICATOR
- u'\xe6' # 0x9C -> LATIN SMALL LIGATURE AE
- u'\xb8' # 0x9D -> CEDILLA
- u'\xc6' # 0x9E -> LATIN CAPITAL LIGATURE AE
- u'\xa4' # 0x9F -> CURRENCY SIGN
- u'\xb5' # 0xA0 -> MICRO SIGN
- u'~' # 0xA1 -> TILDE
- u's' # 0xA2 -> LATIN SMALL LETTER S
- u't' # 0xA3 -> LATIN SMALL LETTER T
- u'u' # 0xA4 -> LATIN SMALL LETTER U
- u'v' # 0xA5 -> LATIN SMALL LETTER V
- u'w' # 0xA6 -> LATIN SMALL LETTER W
- u'x' # 0xA7 -> LATIN SMALL LETTER X
- u'y' # 0xA8 -> LATIN SMALL LETTER Y
- u'z' # 0xA9 -> LATIN SMALL LETTER Z
- u'\xa1' # 0xAA -> INVERTED EXCLAMATION MARK
- u'\xbf' # 0xAB -> INVERTED QUESTION MARK
- u'\xd0' # 0xAC -> LATIN CAPITAL LETTER ETH (ICELANDIC)
- u'\xdd' # 0xAD -> LATIN CAPITAL LETTER Y WITH ACUTE
- u'\xde' # 0xAE -> LATIN CAPITAL LETTER THORN (ICELANDIC)
- u'\xae' # 0xAF -> REGISTERED SIGN
- u'^' # 0xB0 -> CIRCUMFLEX ACCENT
- u'\xa3' # 0xB1 -> POUND SIGN
- u'\xa5' # 0xB2 -> YEN SIGN
- u'\xb7' # 0xB3 -> MIDDLE DOT
- u'\xa9' # 0xB4 -> COPYRIGHT SIGN
- u'\xa7' # 0xB5 -> SECTION SIGN
- u'\xb6' # 0xB6 -> PILCROW SIGN
- u'\xbc' # 0xB7 -> VULGAR FRACTION ONE QUARTER
- u'\xbd' # 0xB8 -> VULGAR FRACTION ONE HALF
- u'\xbe' # 0xB9 -> VULGAR FRACTION THREE QUARTERS
- u'[' # 0xBA -> LEFT SQUARE BRACKET
- u']' # 0xBB -> RIGHT SQUARE BRACKET
- u'\xaf' # 0xBC -> MACRON
- u'\xa8' # 0xBD -> DIAERESIS
- u'\xb4' # 0xBE -> ACUTE ACCENT
- u'\xd7' # 0xBF -> MULTIPLICATION SIGN
- u'{' # 0xC0 -> LEFT CURLY BRACKET
- u'A' # 0xC1 -> LATIN CAPITAL LETTER A
- u'B' # 0xC2 -> LATIN CAPITAL LETTER B
- u'C' # 0xC3 -> LATIN CAPITAL LETTER C
- u'D' # 0xC4 -> LATIN CAPITAL LETTER D
- u'E' # 0xC5 -> LATIN CAPITAL LETTER E
- u'F' # 0xC6 -> LATIN CAPITAL LETTER F
- u'G' # 0xC7 -> LATIN CAPITAL LETTER G
- u'H' # 0xC8 -> LATIN CAPITAL LETTER H
- u'I' # 0xC9 -> LATIN CAPITAL LETTER I
- u'\xad' # 0xCA -> SOFT HYPHEN
- u'\xf4' # 0xCB -> LATIN SMALL LETTER O WITH CIRCUMFLEX
- u'\xf6' # 0xCC -> LATIN SMALL LETTER O WITH DIAERESIS
- u'\xf2' # 0xCD -> LATIN SMALL LETTER O WITH GRAVE
- u'\xf3' # 0xCE -> LATIN SMALL LETTER O WITH ACUTE
- u'\xf5' # 0xCF -> LATIN SMALL LETTER O WITH TILDE
- u'}' # 0xD0 -> RIGHT CURLY BRACKET
- u'J' # 0xD1 -> LATIN CAPITAL LETTER J
- u'K' # 0xD2 -> LATIN CAPITAL LETTER K
- u'L' # 0xD3 -> LATIN CAPITAL LETTER L
- u'M' # 0xD4 -> LATIN CAPITAL LETTER M
- u'N' # 0xD5 -> LATIN CAPITAL LETTER N
- u'O' # 0xD6 -> LATIN CAPITAL LETTER O
- u'P' # 0xD7 -> LATIN CAPITAL LETTER P
- u'Q' # 0xD8 -> LATIN CAPITAL LETTER Q
- u'R' # 0xD9 -> LATIN CAPITAL LETTER R
- u'\xb9' # 0xDA -> SUPERSCRIPT ONE
- u'\xfb' # 0xDB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
- u'\xfc' # 0xDC -> LATIN SMALL LETTER U WITH DIAERESIS
- u'\xf9' # 0xDD -> LATIN SMALL LETTER U WITH GRAVE
- u'\xfa' # 0xDE -> LATIN SMALL LETTER U WITH ACUTE
- u'\xff' # 0xDF -> LATIN SMALL LETTER Y WITH DIAERESIS
- u'\\' # 0xE0 -> REVERSE SOLIDUS
- u'\xf7' # 0xE1 -> DIVISION SIGN
- u'S' # 0xE2 -> LATIN CAPITAL LETTER S
- u'T' # 0xE3 -> LATIN CAPITAL LETTER T
- u'U' # 0xE4 -> LATIN CAPITAL LETTER U
- u'V' # 0xE5 -> LATIN CAPITAL LETTER V
- u'W' # 0xE6 -> LATIN CAPITAL LETTER W
- u'X' # 0xE7 -> LATIN CAPITAL LETTER X
- u'Y' # 0xE8 -> LATIN CAPITAL LETTER Y
- u'Z' # 0xE9 -> LATIN CAPITAL LETTER Z
- u'\xb2' # 0xEA -> SUPERSCRIPT TWO
- u'\xd4' # 0xEB -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
- u'\xd6' # 0xEC -> LATIN CAPITAL LETTER O WITH DIAERESIS
- u'\xd2' # 0xED -> LATIN CAPITAL LETTER O WITH GRAVE
- u'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
- u'\xd5' # 0xEF -> LATIN CAPITAL LETTER O WITH TILDE
- u'0' # 0xF0 -> DIGIT ZERO
- u'1' # 0xF1 -> DIGIT ONE
- u'2' # 0xF2 -> DIGIT TWO
- u'3' # 0xF3 -> DIGIT THREE
- u'4' # 0xF4 -> DIGIT FOUR
- u'5' # 0xF5 -> DIGIT FIVE
- u'6' # 0xF6 -> DIGIT SIX
- u'7' # 0xF7 -> DIGIT SEVEN
- u'8' # 0xF8 -> DIGIT EIGHT
- u'9' # 0xF9 -> DIGIT NINE
- u'\xb3' # 0xFA -> SUPERSCRIPT THREE
- u'\xdb' # 0xFB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
- u'\xdc' # 0xFC -> LATIN CAPITAL LETTER U WITH DIAERESIS
- u'\xd9' # 0xFD -> LATIN CAPITAL LETTER U WITH GRAVE
- u'\xda' # 0xFE -> LATIN CAPITAL LETTER U WITH ACUTE
- u'\x9f' # 0xFF -> CONTROL
-)
-
-### Encoding table
-encoding_table=codecs.charmap_build(decoding_table)
diff --git a/sys/lib/python/encodings/cp1006.py b/sys/lib/python/encodings/cp1006.py
deleted file mode 100644
index e21e804eb..000000000
--- a/sys/lib/python/encodings/cp1006.py
+++ /dev/null
@@ -1,307 +0,0 @@
-""" Python Character Mapping Codec cp1006 generated from 'MAPPINGS/VENDORS/MISC/CP1006.TXT' with gencodec.py.
-
-"""#"
-
-import codecs
-
-### Codec APIs
-
-class Codec(codecs.Codec):
-
- def encode(self,input,errors='strict'):
- return codecs.charmap_encode(input,errors,encoding_table)
-
- def decode(self,input,errors='strict'):
- return codecs.charmap_decode(input,errors,decoding_table)
-
-class IncrementalEncoder(codecs.IncrementalEncoder):
- def encode(self, input, final=False):
- return codecs.charmap_encode(input,self.errors,encoding_table)[0]
-
-class IncrementalDecoder(codecs.IncrementalDecoder):
- def decode(self, input, final=False):
- return codecs.charmap_decode(input,self.errors,decoding_table)[0]
-
-class StreamWriter(Codec,codecs.StreamWriter):
- pass
-
-class StreamReader(Codec,codecs.StreamReader):
- pass
-
-### encodings module API
-
-def getregentry():
- return codecs.CodecInfo(
- name='cp1006',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
-
-
-### Decoding Table
-
-decoding_table = (
- u'\x00' # 0x00 -> NULL
- u'\x01' # 0x01 -> START OF HEADING
- u'\x02' # 0x02 -> START OF TEXT
- u'\x03' # 0x03 -> END OF TEXT
- u'\x04' # 0x04 -> END OF TRANSMISSION
- u'\x05' # 0x05 -> ENQUIRY
- u'\x06' # 0x06 -> ACKNOWLEDGE
- u'\x07' # 0x07 -> BELL
- u'\x08' # 0x08 -> BACKSPACE
- u'\t' # 0x09 -> HORIZONTAL TABULATION
- u'\n' # 0x0A -> LINE FEED
- u'\x0b' # 0x0B -> VERTICAL TABULATION
- u'\x0c' # 0x0C -> FORM FEED
- u'\r' # 0x0D -> CARRIAGE RETURN
- u'\x0e' # 0x0E -> SHIFT OUT
- u'\x0f' # 0x0F -> SHIFT IN
- u'\x10' # 0x10 -> DATA LINK ESCAPE
- u'\x11' # 0x11 -> DEVICE CONTROL ONE
- u'\x12' # 0x12 -> DEVICE CONTROL TWO
- u'\x13' # 0x13 -> DEVICE CONTROL THREE
- u'\x14' # 0x14 -> DEVICE CONTROL FOUR
- u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
- u'\x16' # 0x16 -> SYNCHRONOUS IDLE
- u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
- u'\x18' # 0x18 -> CANCEL
- u'\x19' # 0x19 -> END OF MEDIUM
- u'\x1a' # 0x1A -> SUBSTITUTE
- u'\x1b' # 0x1B -> ESCAPE
- u'\x1c' # 0x1C -> FILE SEPARATOR
- u'\x1d' # 0x1D -> GROUP SEPARATOR
- u'\x1e' # 0x1E -> RECORD SEPARATOR
- u'\x1f' # 0x1F -> UNIT SEPARATOR
- u' ' # 0x20 -> SPACE
- u'!' # 0x21 -> EXCLAMATION MARK
- u'"' # 0x22 -> QUOTATION MARK
- u'#' # 0x23 -> NUMBER SIGN
- u'$' # 0x24 -> DOLLAR SIGN
- u'%' # 0x25 -> PERCENT SIGN
- u'&' # 0x26 -> AMPERSAND
- u"'" # 0x27 -> APOSTROPHE
- u'(' # 0x28 -> LEFT PARENTHESIS
- u')' # 0x29 -> RIGHT PARENTHESIS
- u'*' # 0x2A -> ASTERISK
- u'+' # 0x2B -> PLUS SIGN
- u',' # 0x2C -> COMMA
- u'-' # 0x2D -> HYPHEN-MINUS
- u'.' # 0x2E -> FULL STOP
- u'/' # 0x2F -> SOLIDUS
- u'0' # 0x30 -> DIGIT ZERO
- u'1' # 0x31 -> DIGIT ONE
- u'2' # 0x32 -> DIGIT TWO
- u'3' # 0x33 -> DIGIT THREE
- u'4' # 0x34 -> DIGIT FOUR
- u'5' # 0x35 -> DIGIT FIVE
- u'6' # 0x36 -> DIGIT SIX
- u'7' # 0x37 -> DIGIT SEVEN
- u'8' # 0x38 -> DIGIT EIGHT
- u'9' # 0x39 -> DIGIT NINE
- u':' # 0x3A -> COLON
- u';' # 0x3B -> SEMICOLON
- u'<' # 0x3C -> LESS-THAN SIGN
- u'=' # 0x3D -> EQUALS SIGN
- u'>' # 0x3E -> GREATER-THAN SIGN
- u'?' # 0x3F -> QUESTION MARK
- u'@' # 0x40 -> COMMERCIAL AT
- u'A' # 0x41 -> LATIN CAPITAL LETTER A
- u'B' # 0x42 -> LATIN CAPITAL LETTER B
- u'C' # 0x43 -> LATIN CAPITAL LETTER C
- u'D' # 0x44 -> LATIN CAPITAL LETTER D
- u'E' # 0x45 -> LATIN CAPITAL LETTER E
- u'F' # 0x46 -> LATIN CAPITAL LETTER F
- u'G' # 0x47 -> LATIN CAPITAL LETTER G
- u'H' # 0x48 -> LATIN CAPITAL LETTER H
- u'I' # 0x49 -> LATIN CAPITAL LETTER I
- u'J' # 0x4A -> LATIN CAPITAL LETTER J
- u'K' # 0x4B -> LATIN CAPITAL LETTER K
- u'L' # 0x4C -> LATIN CAPITAL LETTER L
- u'M' # 0x4D -> LATIN CAPITAL LETTER M
- u'N' # 0x4E -> LATIN CAPITAL LETTER N
- u'O' # 0x4F -> LATIN CAPITAL LETTER O
- u'P' # 0x50 -> LATIN CAPITAL LETTER P
- u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
- u'R' # 0x52 -> LATIN CAPITAL LETTER R
- u'S' # 0x53 -> LATIN CAPITAL LETTER S
- u'T' # 0x54 -> LATIN CAPITAL LETTER T
- u'U' # 0x55 -> LATIN CAPITAL LETTER U
- u'V' # 0x56 -> LATIN CAPITAL LETTER V
- u'W' # 0x57 -> LATIN CAPITAL LETTER W
- u'X' # 0x58 -> LATIN CAPITAL LETTER X
- u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
- u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
- u'[' # 0x5B -> LEFT SQUARE BRACKET
- u'\\' # 0x5C -> REVERSE SOLIDUS
- u']' # 0x5D -> RIGHT SQUARE BRACKET
- u'^' # 0x5E -> CIRCUMFLEX ACCENT
- u'_' # 0x5F -> LOW LINE
- u'`' # 0x60 -> GRAVE ACCENT
- u'a' # 0x61 -> LATIN SMALL LETTER A
- u'b' # 0x62 -> LATIN SMALL LETTER B
- u'c' # 0x63 -> LATIN SMALL LETTER C
- u'd' # 0x64 -> LATIN SMALL LETTER D
- u'e' # 0x65 -> LATIN SMALL LETTER E
- u'f' # 0x66 -> LATIN SMALL LETTER F
- u'g' # 0x67 -> LATIN SMALL LETTER G
- u'h' # 0x68 -> LATIN SMALL LETTER H
- u'i' # 0x69 -> LATIN SMALL LETTER I
- u'j' # 0x6A -> LATIN SMALL LETTER J
- u'k' # 0x6B -> LATIN SMALL LETTER K
- u'l' # 0x6C -> LATIN SMALL LETTER L
- u'm' # 0x6D -> LATIN SMALL LETTER M
- u'n' # 0x6E -> LATIN SMALL LETTER N
- u'o' # 0x6F -> LATIN SMALL LETTER O
- u'p' # 0x70 -> LATIN SMALL LETTER P
- u'q' # 0x71 -> LATIN SMALL LETTER Q
- u'r' # 0x72 -> LATIN SMALL LETTER R
- u's' # 0x73 -> LATIN SMALL LETTER S
- u't' # 0x74 -> LATIN SMALL LETTER T
- u'u' # 0x75 -> LATIN SMALL LETTER U
- u'v' # 0x76 -> LATIN SMALL LETTER V
- u'w' # 0x77 -> LATIN SMALL LETTER W
- u'x' # 0x78 -> LATIN SMALL LETTER X
- u'y' # 0x79 -> LATIN SMALL LETTER Y
- u'z' # 0x7A -> LATIN SMALL LETTER Z
- u'{' # 0x7B -> LEFT CURLY BRACKET
- u'|' # 0x7C -> VERTICAL LINE
- u'}' # 0x7D -> RIGHT CURLY BRACKET
- u'~' # 0x7E -> TILDE
- u'\x7f' # 0x7F -> DELETE
- u'\x80' # 0x80 -> <control>
- u'\x81' # 0x81 -> <control>
- u'\x82' # 0x82 -> <control>
- u'\x83' # 0x83 -> <control>
- u'\x84' # 0x84 -> <control>
- u'\x85' # 0x85 -> <control>
- u'\x86' # 0x86 -> <control>
- u'\x87' # 0x87 -> <control>
- u'\x88' # 0x88 -> <control>
- u'\x89' # 0x89 -> <control>
- u'\x8a' # 0x8A -> <control>
- u'\x8b' # 0x8B -> <control>
- u'\x8c' # 0x8C -> <control>
- u'\x8d' # 0x8D -> <control>
- u'\x8e' # 0x8E -> <control>
- u'\x8f' # 0x8F -> <control>
- u'\x90' # 0x90 -> <control>
- u'\x91' # 0x91 -> <control>
- u'\x92' # 0x92 -> <control>
- u'\x93' # 0x93 -> <control>
- u'\x94' # 0x94 -> <control>
- u'\x95' # 0x95 -> <control>
- u'\x96' # 0x96 -> <control>
- u'\x97' # 0x97 -> <control>
- u'\x98' # 0x98 -> <control>
- u'\x99' # 0x99 -> <control>
- u'\x9a' # 0x9A -> <control>
- u'\x9b' # 0x9B -> <control>
- u'\x9c' # 0x9C -> <control>
- u'\x9d' # 0x9D -> <control>
- u'\x9e' # 0x9E -> <control>
- u'\x9f' # 0x9F -> <control>
- u'\xa0' # 0xA0 -> NO-BREAK SPACE
- u'\u06f0' # 0xA1 -> EXTENDED ARABIC-INDIC DIGIT ZERO
- u'\u06f1' # 0xA2 -> EXTENDED ARABIC-INDIC DIGIT ONE
- u'\u06f2' # 0xA3 -> EXTENDED ARABIC-INDIC DIGIT TWO
- u'\u06f3' # 0xA4 -> EXTENDED ARABIC-INDIC DIGIT THREE
- u'\u06f4' # 0xA5 -> EXTENDED ARABIC-INDIC DIGIT FOUR
- u'\u06f5' # 0xA6 -> EXTENDED ARABIC-INDIC DIGIT FIVE
- u'\u06f6' # 0xA7 -> EXTENDED ARABIC-INDIC DIGIT SIX
- u'\u06f7' # 0xA8 -> EXTENDED ARABIC-INDIC DIGIT SEVEN
- u'\u06f8' # 0xA9 -> EXTENDED ARABIC-INDIC DIGIT EIGHT
- u'\u06f9' # 0xAA -> EXTENDED ARABIC-INDIC DIGIT NINE
- u'\u060c' # 0xAB -> ARABIC COMMA
- u'\u061b' # 0xAC -> ARABIC SEMICOLON
- u'\xad' # 0xAD -> SOFT HYPHEN
- u'\u061f' # 0xAE -> ARABIC QUESTION MARK
- u'\ufe81' # 0xAF -> ARABIC LETTER ALEF WITH MADDA ABOVE ISOLATED FORM
- u'\ufe8d' # 0xB0 -> ARABIC LETTER ALEF ISOLATED FORM
- u'\ufe8e' # 0xB1 -> ARABIC LETTER ALEF FINAL FORM
- u'\ufe8e' # 0xB2 -> ARABIC LETTER ALEF FINAL FORM
- u'\ufe8f' # 0xB3 -> ARABIC LETTER BEH ISOLATED FORM
- u'\ufe91' # 0xB4 -> ARABIC LETTER BEH INITIAL FORM
- u'\ufb56' # 0xB5 -> ARABIC LETTER PEH ISOLATED FORM
- u'\ufb58' # 0xB6 -> ARABIC LETTER PEH INITIAL FORM
- u'\ufe93' # 0xB7 -> ARABIC LETTER TEH MARBUTA ISOLATED FORM
- u'\ufe95' # 0xB8 -> ARABIC LETTER TEH ISOLATED FORM
- u'\ufe97' # 0xB9 -> ARABIC LETTER TEH INITIAL FORM
- u'\ufb66' # 0xBA -> ARABIC LETTER TTEH ISOLATED FORM
- u'\ufb68' # 0xBB -> ARABIC LETTER TTEH INITIAL FORM
- u'\ufe99' # 0xBC -> ARABIC LETTER THEH ISOLATED FORM
- u'\ufe9b' # 0xBD -> ARABIC LETTER THEH INITIAL FORM
- u'\ufe9d' # 0xBE -> ARABIC LETTER JEEM ISOLATED FORM
- u'\ufe9f' # 0xBF -> ARABIC LETTER JEEM INITIAL FORM
- u'\ufb7a' # 0xC0 -> ARABIC LETTER TCHEH ISOLATED FORM
- u'\ufb7c' # 0xC1 -> ARABIC LETTER TCHEH INITIAL FORM
- u'\ufea1' # 0xC2 -> ARABIC LETTER HAH ISOLATED FORM
- u'\ufea3' # 0xC3 -> ARABIC LETTER HAH INITIAL FORM
- u'\ufea5' # 0xC4 -> ARABIC LETTER KHAH ISOLATED FORM
- u'\ufea7' # 0xC5 -> ARABIC LETTER KHAH INITIAL FORM
- u'\ufea9' # 0xC6 -> ARABIC LETTER DAL ISOLATED FORM
- u'\ufb84' # 0xC7 -> ARABIC LETTER DAHAL ISOLATED FORMN
- u'\ufeab' # 0xC8 -> ARABIC LETTER THAL ISOLATED FORM
- u'\ufead' # 0xC9 -> ARABIC LETTER REH ISOLATED FORM
- u'\ufb8c' # 0xCA -> ARABIC LETTER RREH ISOLATED FORM
- u'\ufeaf' # 0xCB -> ARABIC LETTER ZAIN ISOLATED FORM
- u'\ufb8a' # 0xCC -> ARABIC LETTER JEH ISOLATED FORM
- u'\ufeb1' # 0xCD -> ARABIC LETTER SEEN ISOLATED FORM
- u'\ufeb3' # 0xCE -> ARABIC LETTER SEEN INITIAL FORM
- u'\ufeb5' # 0xCF -> ARABIC LETTER SHEEN ISOLATED FORM
- u'\ufeb7' # 0xD0 -> ARABIC LETTER SHEEN INITIAL FORM
- u'\ufeb9' # 0xD1 -> ARABIC LETTER SAD ISOLATED FORM
- u'\ufebb' # 0xD2 -> ARABIC LETTER SAD INITIAL FORM
- u'\ufebd' # 0xD3 -> ARABIC LETTER DAD ISOLATED FORM
- u'\ufebf' # 0xD4 -> ARABIC LETTER DAD INITIAL FORM
- u'\ufec1' # 0xD5 -> ARABIC LETTER TAH ISOLATED FORM
- u'\ufec5' # 0xD6 -> ARABIC LETTER ZAH ISOLATED FORM
- u'\ufec9' # 0xD7 -> ARABIC LETTER AIN ISOLATED FORM
- u'\ufeca' # 0xD8 -> ARABIC LETTER AIN FINAL FORM
- u'\ufecb' # 0xD9 -> ARABIC LETTER AIN INITIAL FORM
- u'\ufecc' # 0xDA -> ARABIC LETTER AIN MEDIAL FORM
- u'\ufecd' # 0xDB -> ARABIC LETTER GHAIN ISOLATED FORM
- u'\ufece' # 0xDC -> ARABIC LETTER GHAIN FINAL FORM
- u'\ufecf' # 0xDD -> ARABIC LETTER GHAIN INITIAL FORM
- u'\ufed0' # 0xDE -> ARABIC LETTER GHAIN MEDIAL FORM
- u'\ufed1' # 0xDF -> ARABIC LETTER FEH ISOLATED FORM
- u'\ufed3' # 0xE0 -> ARABIC LETTER FEH INITIAL FORM
- u'\ufed5' # 0xE1 -> ARABIC LETTER QAF ISOLATED FORM
- u'\ufed7' # 0xE2 -> ARABIC LETTER QAF INITIAL FORM
- u'\ufed9' # 0xE3 -> ARABIC LETTER KAF ISOLATED FORM
- u'\ufedb' # 0xE4 -> ARABIC LETTER KAF INITIAL FORM
- u'\ufb92' # 0xE5 -> ARABIC LETTER GAF ISOLATED FORM
- u'\ufb94' # 0xE6 -> ARABIC LETTER GAF INITIAL FORM
- u'\ufedd' # 0xE7 -> ARABIC LETTER LAM ISOLATED FORM
- u'\ufedf' # 0xE8 -> ARABIC LETTER LAM INITIAL FORM
- u'\ufee0' # 0xE9 -> ARABIC LETTER LAM MEDIAL FORM
- u'\ufee1' # 0xEA -> ARABIC LETTER MEEM ISOLATED FORM
- u'\ufee3' # 0xEB -> ARABIC LETTER MEEM INITIAL FORM
- u'\ufb9e' # 0xEC -> ARABIC LETTER NOON GHUNNA ISOLATED FORM
- u'\ufee5' # 0xED -> ARABIC LETTER NOON ISOLATED FORM
- u'\ufee7' # 0xEE -> ARABIC LETTER NOON INITIAL FORM
- u'\ufe85' # 0xEF -> ARABIC LETTER WAW WITH HAMZA ABOVE ISOLATED FORM
- u'\ufeed' # 0xF0 -> ARABIC LETTER WAW ISOLATED FORM
- u'\ufba6' # 0xF1 -> ARABIC LETTER HEH GOAL ISOLATED FORM
- u'\ufba8' # 0xF2 -> ARABIC LETTER HEH GOAL INITIAL FORM
- u'\ufba9' # 0xF3 -> ARABIC LETTER HEH GOAL MEDIAL FORM
- u'\ufbaa' # 0xF4 -> ARABIC LETTER HEH DOACHASHMEE ISOLATED FORM
- u'\ufe80' # 0xF5 -> ARABIC LETTER HAMZA ISOLATED FORM
- u'\ufe89' # 0xF6 -> ARABIC LETTER YEH WITH HAMZA ABOVE ISOLATED FORM
- u'\ufe8a' # 0xF7 -> ARABIC LETTER YEH WITH HAMZA ABOVE FINAL FORM
- u'\ufe8b' # 0xF8 -> ARABIC LETTER YEH WITH HAMZA ABOVE INITIAL FORM
- u'\ufef1' # 0xF9 -> ARABIC LETTER YEH ISOLATED FORM
- u'\ufef2' # 0xFA -> ARABIC LETTER YEH FINAL FORM
- u'\ufef3' # 0xFB -> ARABIC LETTER YEH INITIAL FORM
- u'\ufbb0' # 0xFC -> ARABIC LETTER YEH BARREE WITH HAMZA ABOVE ISOLATED FORM
- u'\ufbae' # 0xFD -> ARABIC LETTER YEH BARREE ISOLATED FORM
- u'\ufe7c' # 0xFE -> ARABIC SHADDA ISOLATED FORM
- u'\ufe7d' # 0xFF -> ARABIC SHADDA MEDIAL FORM
-)
-
-### Encoding table
-encoding_table=codecs.charmap_build(decoding_table)
diff --git a/sys/lib/python/encodings/cp1026.py b/sys/lib/python/encodings/cp1026.py
deleted file mode 100644
index 45bbe626f..000000000
--- a/sys/lib/python/encodings/cp1026.py
+++ /dev/null
@@ -1,307 +0,0 @@
-""" Python Character Mapping Codec cp1026 generated from 'MAPPINGS/VENDORS/MICSFT/EBCDIC/CP1026.TXT' with gencodec.py.
-
-"""#"
-
-import codecs
-
-### Codec APIs
-
-class Codec(codecs.Codec):
-
- def encode(self,input,errors='strict'):
- return codecs.charmap_encode(input,errors,encoding_table)
-
- def decode(self,input,errors='strict'):
- return codecs.charmap_decode(input,errors,decoding_table)
-
-class IncrementalEncoder(codecs.IncrementalEncoder):
- def encode(self, input, final=False):
- return codecs.charmap_encode(input,self.errors,encoding_table)[0]
-
-class IncrementalDecoder(codecs.IncrementalDecoder):
- def decode(self, input, final=False):
- return codecs.charmap_decode(input,self.errors,decoding_table)[0]
-
-class StreamWriter(Codec,codecs.StreamWriter):
- pass
-
-class StreamReader(Codec,codecs.StreamReader):
- pass
-
-### encodings module API
-
-def getregentry():
- return codecs.CodecInfo(
- name='cp1026',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
-
-
-### Decoding Table
-
-decoding_table = (
- u'\x00' # 0x00 -> NULL
- u'\x01' # 0x01 -> START OF HEADING
- u'\x02' # 0x02 -> START OF TEXT
- u'\x03' # 0x03 -> END OF TEXT
- u'\x9c' # 0x04 -> CONTROL
- u'\t' # 0x05 -> HORIZONTAL TABULATION
- u'\x86' # 0x06 -> CONTROL
- u'\x7f' # 0x07 -> DELETE
- u'\x97' # 0x08 -> CONTROL
- u'\x8d' # 0x09 -> CONTROL
- u'\x8e' # 0x0A -> CONTROL
- u'\x0b' # 0x0B -> VERTICAL TABULATION
- u'\x0c' # 0x0C -> FORM FEED
- u'\r' # 0x0D -> CARRIAGE RETURN
- u'\x0e' # 0x0E -> SHIFT OUT
- u'\x0f' # 0x0F -> SHIFT IN
- u'\x10' # 0x10 -> DATA LINK ESCAPE
- u'\x11' # 0x11 -> DEVICE CONTROL ONE
- u'\x12' # 0x12 -> DEVICE CONTROL TWO
- u'\x13' # 0x13 -> DEVICE CONTROL THREE
- u'\x9d' # 0x14 -> CONTROL
- u'\x85' # 0x15 -> CONTROL
- u'\x08' # 0x16 -> BACKSPACE
- u'\x87' # 0x17 -> CONTROL
- u'\x18' # 0x18 -> CANCEL
- u'\x19' # 0x19 -> END OF MEDIUM
- u'\x92' # 0x1A -> CONTROL
- u'\x8f' # 0x1B -> CONTROL
- u'\x1c' # 0x1C -> FILE SEPARATOR
- u'\x1d' # 0x1D -> GROUP SEPARATOR
- u'\x1e' # 0x1E -> RECORD SEPARATOR
- u'\x1f' # 0x1F -> UNIT SEPARATOR
- u'\x80' # 0x20 -> CONTROL
- u'\x81' # 0x21 -> CONTROL
- u'\x82' # 0x22 -> CONTROL
- u'\x83' # 0x23 -> CONTROL
- u'\x84' # 0x24 -> CONTROL
- u'\n' # 0x25 -> LINE FEED
- u'\x17' # 0x26 -> END OF TRANSMISSION BLOCK
- u'\x1b' # 0x27 -> ESCAPE
- u'\x88' # 0x28 -> CONTROL
- u'\x89' # 0x29 -> CONTROL
- u'\x8a' # 0x2A -> CONTROL
- u'\x8b' # 0x2B -> CONTROL
- u'\x8c' # 0x2C -> CONTROL
- u'\x05' # 0x2D -> ENQUIRY
- u'\x06' # 0x2E -> ACKNOWLEDGE
- u'\x07' # 0x2F -> BELL
- u'\x90' # 0x30 -> CONTROL
- u'\x91' # 0x31 -> CONTROL
- u'\x16' # 0x32 -> SYNCHRONOUS IDLE
- u'\x93' # 0x33 -> CONTROL
- u'\x94' # 0x34 -> CONTROL
- u'\x95' # 0x35 -> CONTROL
- u'\x96' # 0x36 -> CONTROL
- u'\x04' # 0x37 -> END OF TRANSMISSION
- u'\x98' # 0x38 -> CONTROL
- u'\x99' # 0x39 -> CONTROL
- u'\x9a' # 0x3A -> CONTROL
- u'\x9b' # 0x3B -> CONTROL
- u'\x14' # 0x3C -> DEVICE CONTROL FOUR
- u'\x15' # 0x3D -> NEGATIVE ACKNOWLEDGE
- u'\x9e' # 0x3E -> CONTROL
- u'\x1a' # 0x3F -> SUBSTITUTE
- u' ' # 0x40 -> SPACE
- u'\xa0' # 0x41 -> NO-BREAK SPACE
- u'\xe2' # 0x42 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
- u'\xe4' # 0x43 -> LATIN SMALL LETTER A WITH DIAERESIS
- u'\xe0' # 0x44 -> LATIN SMALL LETTER A WITH GRAVE
- u'\xe1' # 0x45 -> LATIN SMALL LETTER A WITH ACUTE
- u'\xe3' # 0x46 -> LATIN SMALL LETTER A WITH TILDE
- u'\xe5' # 0x47 -> LATIN SMALL LETTER A WITH RING ABOVE
- u'{' # 0x48 -> LEFT CURLY BRACKET
- u'\xf1' # 0x49 -> LATIN SMALL LETTER N WITH TILDE
- u'\xc7' # 0x4A -> LATIN CAPITAL LETTER C WITH CEDILLA
- u'.' # 0x4B -> FULL STOP
- u'<' # 0x4C -> LESS-THAN SIGN
- u'(' # 0x4D -> LEFT PARENTHESIS
- u'+' # 0x4E -> PLUS SIGN
- u'!' # 0x4F -> EXCLAMATION MARK
- u'&' # 0x50 -> AMPERSAND
- u'\xe9' # 0x51 -> LATIN SMALL LETTER E WITH ACUTE
- u'\xea' # 0x52 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
- u'\xeb' # 0x53 -> LATIN SMALL LETTER E WITH DIAERESIS
- u'\xe8' # 0x54 -> LATIN SMALL LETTER E WITH GRAVE
- u'\xed' # 0x55 -> LATIN SMALL LETTER I WITH ACUTE
- u'\xee' # 0x56 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
- u'\xef' # 0x57 -> LATIN SMALL LETTER I WITH DIAERESIS
- u'\xec' # 0x58 -> LATIN SMALL LETTER I WITH GRAVE
- u'\xdf' # 0x59 -> LATIN SMALL LETTER SHARP S (GERMAN)
- u'\u011e' # 0x5A -> LATIN CAPITAL LETTER G WITH BREVE
- u'\u0130' # 0x5B -> LATIN CAPITAL LETTER I WITH DOT ABOVE
- u'*' # 0x5C -> ASTERISK
- u')' # 0x5D -> RIGHT PARENTHESIS
- u';' # 0x5E -> SEMICOLON
- u'^' # 0x5F -> CIRCUMFLEX ACCENT
- u'-' # 0x60 -> HYPHEN-MINUS
- u'/' # 0x61 -> SOLIDUS
- u'\xc2' # 0x62 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
- u'\xc4' # 0x63 -> LATIN CAPITAL LETTER A WITH DIAERESIS
- u'\xc0' # 0x64 -> LATIN CAPITAL LETTER A WITH GRAVE
- u'\xc1' # 0x65 -> LATIN CAPITAL LETTER A WITH ACUTE
- u'\xc3' # 0x66 -> LATIN CAPITAL LETTER A WITH TILDE
- u'\xc5' # 0x67 -> LATIN CAPITAL LETTER A WITH RING ABOVE
- u'[' # 0x68 -> LEFT SQUARE BRACKET
- u'\xd1' # 0x69 -> LATIN CAPITAL LETTER N WITH TILDE
- u'\u015f' # 0x6A -> LATIN SMALL LETTER S WITH CEDILLA
- u',' # 0x6B -> COMMA
- u'%' # 0x6C -> PERCENT SIGN
- u'_' # 0x6D -> LOW LINE
- u'>' # 0x6E -> GREATER-THAN SIGN
- u'?' # 0x6F -> QUESTION MARK
- u'\xf8' # 0x70 -> LATIN SMALL LETTER O WITH STROKE
- u'\xc9' # 0x71 -> LATIN CAPITAL LETTER E WITH ACUTE
- u'\xca' # 0x72 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
- u'\xcb' # 0x73 -> LATIN CAPITAL LETTER E WITH DIAERESIS
- u'\xc8' # 0x74 -> LATIN CAPITAL LETTER E WITH GRAVE
- u'\xcd' # 0x75 -> LATIN CAPITAL LETTER I WITH ACUTE
- u'\xce' # 0x76 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
- u'\xcf' # 0x77 -> LATIN CAPITAL LETTER I WITH DIAERESIS
- u'\xcc' # 0x78 -> LATIN CAPITAL LETTER I WITH GRAVE
- u'\u0131' # 0x79 -> LATIN SMALL LETTER DOTLESS I
- u':' # 0x7A -> COLON
- u'\xd6' # 0x7B -> LATIN CAPITAL LETTER O WITH DIAERESIS
- u'\u015e' # 0x7C -> LATIN CAPITAL LETTER S WITH CEDILLA
- u"'" # 0x7D -> APOSTROPHE
- u'=' # 0x7E -> EQUALS SIGN
- u'\xdc' # 0x7F -> LATIN CAPITAL LETTER U WITH DIAERESIS
- u'\xd8' # 0x80 -> LATIN CAPITAL LETTER O WITH STROKE
- u'a' # 0x81 -> LATIN SMALL LETTER A
- u'b' # 0x82 -> LATIN SMALL LETTER B
- u'c' # 0x83 -> LATIN SMALL LETTER C
- u'd' # 0x84 -> LATIN SMALL LETTER D
- u'e' # 0x85 -> LATIN SMALL LETTER E
- u'f' # 0x86 -> LATIN SMALL LETTER F
- u'g' # 0x87 -> LATIN SMALL LETTER G
- u'h' # 0x88 -> LATIN SMALL LETTER H
- u'i' # 0x89 -> LATIN SMALL LETTER I
- u'\xab' # 0x8A -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
- u'\xbb' # 0x8B -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
- u'}' # 0x8C -> RIGHT CURLY BRACKET
- u'`' # 0x8D -> GRAVE ACCENT
- u'\xa6' # 0x8E -> BROKEN BAR
- u'\xb1' # 0x8F -> PLUS-MINUS SIGN
- u'\xb0' # 0x90 -> DEGREE SIGN
- u'j' # 0x91 -> LATIN SMALL LETTER J
- u'k' # 0x92 -> LATIN SMALL LETTER K
- u'l' # 0x93 -> LATIN SMALL LETTER L
- u'm' # 0x94 -> LATIN SMALL LETTER M
- u'n' # 0x95 -> LATIN SMALL LETTER N
- u'o' # 0x96 -> LATIN SMALL LETTER O
- u'p' # 0x97 -> LATIN SMALL LETTER P
- u'q' # 0x98 -> LATIN SMALL LETTER Q
- u'r' # 0x99 -> LATIN SMALL LETTER R
- u'\xaa' # 0x9A -> FEMININE ORDINAL INDICATOR
- u'\xba' # 0x9B -> MASCULINE ORDINAL INDICATOR
- u'\xe6' # 0x9C -> LATIN SMALL LIGATURE AE
- u'\xb8' # 0x9D -> CEDILLA
- u'\xc6' # 0x9E -> LATIN CAPITAL LIGATURE AE
- u'\xa4' # 0x9F -> CURRENCY SIGN
- u'\xb5' # 0xA0 -> MICRO SIGN
- u'\xf6' # 0xA1 -> LATIN SMALL LETTER O WITH DIAERESIS
- u's' # 0xA2 -> LATIN SMALL LETTER S
- u't' # 0xA3 -> LATIN SMALL LETTER T
- u'u' # 0xA4 -> LATIN SMALL LETTER U
- u'v' # 0xA5 -> LATIN SMALL LETTER V
- u'w' # 0xA6 -> LATIN SMALL LETTER W
- u'x' # 0xA7 -> LATIN SMALL LETTER X
- u'y' # 0xA8 -> LATIN SMALL LETTER Y
- u'z' # 0xA9 -> LATIN SMALL LETTER Z
- u'\xa1' # 0xAA -> INVERTED EXCLAMATION MARK
- u'\xbf' # 0xAB -> INVERTED QUESTION MARK
- u']' # 0xAC -> RIGHT SQUARE BRACKET
- u'$' # 0xAD -> DOLLAR SIGN
- u'@' # 0xAE -> COMMERCIAL AT
- u'\xae' # 0xAF -> REGISTERED SIGN
- u'\xa2' # 0xB0 -> CENT SIGN
- u'\xa3' # 0xB1 -> POUND SIGN
- u'\xa5' # 0xB2 -> YEN SIGN
- u'\xb7' # 0xB3 -> MIDDLE DOT
- u'\xa9' # 0xB4 -> COPYRIGHT SIGN
- u'\xa7' # 0xB5 -> SECTION SIGN
- u'\xb6' # 0xB6 -> PILCROW SIGN
- u'\xbc' # 0xB7 -> VULGAR FRACTION ONE QUARTER
- u'\xbd' # 0xB8 -> VULGAR FRACTION ONE HALF
- u'\xbe' # 0xB9 -> VULGAR FRACTION THREE QUARTERS
- u'\xac' # 0xBA -> NOT SIGN
- u'|' # 0xBB -> VERTICAL LINE
- u'\xaf' # 0xBC -> MACRON
- u'\xa8' # 0xBD -> DIAERESIS
- u'\xb4' # 0xBE -> ACUTE ACCENT
- u'\xd7' # 0xBF -> MULTIPLICATION SIGN
- u'\xe7' # 0xC0 -> LATIN SMALL LETTER C WITH CEDILLA
- u'A' # 0xC1 -> LATIN CAPITAL LETTER A
- u'B' # 0xC2 -> LATIN CAPITAL LETTER B
- u'C' # 0xC3 -> LATIN CAPITAL LETTER C
- u'D' # 0xC4 -> LATIN CAPITAL LETTER D
- u'E' # 0xC5 -> LATIN CAPITAL LETTER E
- u'F' # 0xC6 -> LATIN CAPITAL LETTER F
- u'G' # 0xC7 -> LATIN CAPITAL LETTER G
- u'H' # 0xC8 -> LATIN CAPITAL LETTER H
- u'I' # 0xC9 -> LATIN CAPITAL LETTER I
- u'\xad' # 0xCA -> SOFT HYPHEN
- u'\xf4' # 0xCB -> LATIN SMALL LETTER O WITH CIRCUMFLEX
- u'~' # 0xCC -> TILDE
- u'\xf2' # 0xCD -> LATIN SMALL LETTER O WITH GRAVE
- u'\xf3' # 0xCE -> LATIN SMALL LETTER O WITH ACUTE
- u'\xf5' # 0xCF -> LATIN SMALL LETTER O WITH TILDE
- u'\u011f' # 0xD0 -> LATIN SMALL LETTER G WITH BREVE
- u'J' # 0xD1 -> LATIN CAPITAL LETTER J
- u'K' # 0xD2 -> LATIN CAPITAL LETTER K
- u'L' # 0xD3 -> LATIN CAPITAL LETTER L
- u'M' # 0xD4 -> LATIN CAPITAL LETTER M
- u'N' # 0xD5 -> LATIN CAPITAL LETTER N
- u'O' # 0xD6 -> LATIN CAPITAL LETTER O
- u'P' # 0xD7 -> LATIN CAPITAL LETTER P
- u'Q' # 0xD8 -> LATIN CAPITAL LETTER Q
- u'R' # 0xD9 -> LATIN CAPITAL LETTER R
- u'\xb9' # 0xDA -> SUPERSCRIPT ONE
- u'\xfb' # 0xDB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
- u'\\' # 0xDC -> REVERSE SOLIDUS
- u'\xf9' # 0xDD -> LATIN SMALL LETTER U WITH GRAVE
- u'\xfa' # 0xDE -> LATIN SMALL LETTER U WITH ACUTE
- u'\xff' # 0xDF -> LATIN SMALL LETTER Y WITH DIAERESIS
- u'\xfc' # 0xE0 -> LATIN SMALL LETTER U WITH DIAERESIS
- u'\xf7' # 0xE1 -> DIVISION SIGN
- u'S' # 0xE2 -> LATIN CAPITAL LETTER S
- u'T' # 0xE3 -> LATIN CAPITAL LETTER T
- u'U' # 0xE4 -> LATIN CAPITAL LETTER U
- u'V' # 0xE5 -> LATIN CAPITAL LETTER V
- u'W' # 0xE6 -> LATIN CAPITAL LETTER W
- u'X' # 0xE7 -> LATIN CAPITAL LETTER X
- u'Y' # 0xE8 -> LATIN CAPITAL LETTER Y
- u'Z' # 0xE9 -> LATIN CAPITAL LETTER Z
- u'\xb2' # 0xEA -> SUPERSCRIPT TWO
- u'\xd4' # 0xEB -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
- u'#' # 0xEC -> NUMBER SIGN
- u'\xd2' # 0xED -> LATIN CAPITAL LETTER O WITH GRAVE
- u'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
- u'\xd5' # 0xEF -> LATIN CAPITAL LETTER O WITH TILDE
- u'0' # 0xF0 -> DIGIT ZERO
- u'1' # 0xF1 -> DIGIT ONE
- u'2' # 0xF2 -> DIGIT TWO
- u'3' # 0xF3 -> DIGIT THREE
- u'4' # 0xF4 -> DIGIT FOUR
- u'5' # 0xF5 -> DIGIT FIVE
- u'6' # 0xF6 -> DIGIT SIX
- u'7' # 0xF7 -> DIGIT SEVEN
- u'8' # 0xF8 -> DIGIT EIGHT
- u'9' # 0xF9 -> DIGIT NINE
- u'\xb3' # 0xFA -> SUPERSCRIPT THREE
- u'\xdb' # 0xFB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
- u'"' # 0xFC -> QUOTATION MARK
- u'\xd9' # 0xFD -> LATIN CAPITAL LETTER U WITH GRAVE
- u'\xda' # 0xFE -> LATIN CAPITAL LETTER U WITH ACUTE
- u'\x9f' # 0xFF -> CONTROL
-)
-
-### Encoding table
-encoding_table=codecs.charmap_build(decoding_table)
diff --git a/sys/lib/python/encodings/cp1140.py b/sys/lib/python/encodings/cp1140.py
deleted file mode 100644
index 7e507fd85..000000000
--- a/sys/lib/python/encodings/cp1140.py
+++ /dev/null
@@ -1,307 +0,0 @@
-""" Python Character Mapping Codec cp1140 generated from 'python-mappings/CP1140.TXT' with gencodec.py.
-
-"""#"
-
-import codecs
-
-### Codec APIs
-
-class Codec(codecs.Codec):
-
- def encode(self,input,errors='strict'):
- return codecs.charmap_encode(input,errors,encoding_table)
-
- def decode(self,input,errors='strict'):
- return codecs.charmap_decode(input,errors,decoding_table)
-
-class IncrementalEncoder(codecs.IncrementalEncoder):
- def encode(self, input, final=False):
- return codecs.charmap_encode(input,self.errors,encoding_table)[0]
-
-class IncrementalDecoder(codecs.IncrementalDecoder):
- def decode(self, input, final=False):
- return codecs.charmap_decode(input,self.errors,decoding_table)[0]
-
-class StreamWriter(Codec,codecs.StreamWriter):
- pass
-
-class StreamReader(Codec,codecs.StreamReader):
- pass
-
-### encodings module API
-
-def getregentry():
- return codecs.CodecInfo(
- name='cp1140',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
-
-
-### Decoding Table
-
-decoding_table = (
- u'\x00' # 0x00 -> NULL
- u'\x01' # 0x01 -> START OF HEADING
- u'\x02' # 0x02 -> START OF TEXT
- u'\x03' # 0x03 -> END OF TEXT
- u'\x9c' # 0x04 -> CONTROL
- u'\t' # 0x05 -> HORIZONTAL TABULATION
- u'\x86' # 0x06 -> CONTROL
- u'\x7f' # 0x07 -> DELETE
- u'\x97' # 0x08 -> CONTROL
- u'\x8d' # 0x09 -> CONTROL
- u'\x8e' # 0x0A -> CONTROL
- u'\x0b' # 0x0B -> VERTICAL TABULATION
- u'\x0c' # 0x0C -> FORM FEED
- u'\r' # 0x0D -> CARRIAGE RETURN
- u'\x0e' # 0x0E -> SHIFT OUT
- u'\x0f' # 0x0F -> SHIFT IN
- u'\x10' # 0x10 -> DATA LINK ESCAPE
- u'\x11' # 0x11 -> DEVICE CONTROL ONE
- u'\x12' # 0x12 -> DEVICE CONTROL TWO
- u'\x13' # 0x13 -> DEVICE CONTROL THREE
- u'\x9d' # 0x14 -> CONTROL
- u'\x85' # 0x15 -> CONTROL
- u'\x08' # 0x16 -> BACKSPACE
- u'\x87' # 0x17 -> CONTROL
- u'\x18' # 0x18 -> CANCEL
- u'\x19' # 0x19 -> END OF MEDIUM
- u'\x92' # 0x1A -> CONTROL
- u'\x8f' # 0x1B -> CONTROL
- u'\x1c' # 0x1C -> FILE SEPARATOR
- u'\x1d' # 0x1D -> GROUP SEPARATOR
- u'\x1e' # 0x1E -> RECORD SEPARATOR
- u'\x1f' # 0x1F -> UNIT SEPARATOR
- u'\x80' # 0x20 -> CONTROL
- u'\x81' # 0x21 -> CONTROL
- u'\x82' # 0x22 -> CONTROL
- u'\x83' # 0x23 -> CONTROL
- u'\x84' # 0x24 -> CONTROL
- u'\n' # 0x25 -> LINE FEED
- u'\x17' # 0x26 -> END OF TRANSMISSION BLOCK
- u'\x1b' # 0x27 -> ESCAPE
- u'\x88' # 0x28 -> CONTROL
- u'\x89' # 0x29 -> CONTROL
- u'\x8a' # 0x2A -> CONTROL
- u'\x8b' # 0x2B -> CONTROL
- u'\x8c' # 0x2C -> CONTROL
- u'\x05' # 0x2D -> ENQUIRY
- u'\x06' # 0x2E -> ACKNOWLEDGE
- u'\x07' # 0x2F -> BELL
- u'\x90' # 0x30 -> CONTROL
- u'\x91' # 0x31 -> CONTROL
- u'\x16' # 0x32 -> SYNCHRONOUS IDLE
- u'\x93' # 0x33 -> CONTROL
- u'\x94' # 0x34 -> CONTROL
- u'\x95' # 0x35 -> CONTROL
- u'\x96' # 0x36 -> CONTROL
- u'\x04' # 0x37 -> END OF TRANSMISSION
- u'\x98' # 0x38 -> CONTROL
- u'\x99' # 0x39 -> CONTROL
- u'\x9a' # 0x3A -> CONTROL
- u'\x9b' # 0x3B -> CONTROL
- u'\x14' # 0x3C -> DEVICE CONTROL FOUR
- u'\x15' # 0x3D -> NEGATIVE ACKNOWLEDGE
- u'\x9e' # 0x3E -> CONTROL
- u'\x1a' # 0x3F -> SUBSTITUTE
- u' ' # 0x40 -> SPACE
- u'\xa0' # 0x41 -> NO-BREAK SPACE
- u'\xe2' # 0x42 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
- u'\xe4' # 0x43 -> LATIN SMALL LETTER A WITH DIAERESIS
- u'\xe0' # 0x44 -> LATIN SMALL LETTER A WITH GRAVE
- u'\xe1' # 0x45 -> LATIN SMALL LETTER A WITH ACUTE
- u'\xe3' # 0x46 -> LATIN SMALL LETTER A WITH TILDE
- u'\xe5' # 0x47 -> LATIN SMALL LETTER A WITH RING ABOVE
- u'\xe7' # 0x48 -> LATIN SMALL LETTER C WITH CEDILLA
- u'\xf1' # 0x49 -> LATIN SMALL LETTER N WITH TILDE
- u'\xa2' # 0x4A -> CENT SIGN
- u'.' # 0x4B -> FULL STOP
- u'<' # 0x4C -> LESS-THAN SIGN
- u'(' # 0x4D -> LEFT PARENTHESIS
- u'+' # 0x4E -> PLUS SIGN
- u'|' # 0x4F -> VERTICAL LINE
- u'&' # 0x50 -> AMPERSAND
- u'\xe9' # 0x51 -> LATIN SMALL LETTER E WITH ACUTE
- u'\xea' # 0x52 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
- u'\xeb' # 0x53 -> LATIN SMALL LETTER E WITH DIAERESIS
- u'\xe8' # 0x54 -> LATIN SMALL LETTER E WITH GRAVE
- u'\xed' # 0x55 -> LATIN SMALL LETTER I WITH ACUTE
- u'\xee' # 0x56 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
- u'\xef' # 0x57 -> LATIN SMALL LETTER I WITH DIAERESIS
- u'\xec' # 0x58 -> LATIN SMALL LETTER I WITH GRAVE
- u'\xdf' # 0x59 -> LATIN SMALL LETTER SHARP S (GERMAN)
- u'!' # 0x5A -> EXCLAMATION MARK
- u'$' # 0x5B -> DOLLAR SIGN
- u'*' # 0x5C -> ASTERISK
- u')' # 0x5D -> RIGHT PARENTHESIS
- u';' # 0x5E -> SEMICOLON
- u'\xac' # 0x5F -> NOT SIGN
- u'-' # 0x60 -> HYPHEN-MINUS
- u'/' # 0x61 -> SOLIDUS
- u'\xc2' # 0x62 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
- u'\xc4' # 0x63 -> LATIN CAPITAL LETTER A WITH DIAERESIS
- u'\xc0' # 0x64 -> LATIN CAPITAL LETTER A WITH GRAVE
- u'\xc1' # 0x65 -> LATIN CAPITAL LETTER A WITH ACUTE
- u'\xc3' # 0x66 -> LATIN CAPITAL LETTER A WITH TILDE
- u'\xc5' # 0x67 -> LATIN CAPITAL LETTER A WITH RING ABOVE
- u'\xc7' # 0x68 -> LATIN CAPITAL LETTER C WITH CEDILLA
- u'\xd1' # 0x69 -> LATIN CAPITAL LETTER N WITH TILDE
- u'\xa6' # 0x6A -> BROKEN BAR
- u',' # 0x6B -> COMMA
- u'%' # 0x6C -> PERCENT SIGN
- u'_' # 0x6D -> LOW LINE
- u'>' # 0x6E -> GREATER-THAN SIGN
- u'?' # 0x6F -> QUESTION MARK
- u'\xf8' # 0x70 -> LATIN SMALL LETTER O WITH STROKE
- u'\xc9' # 0x71 -> LATIN CAPITAL LETTER E WITH ACUTE
- u'\xca' # 0x72 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
- u'\xcb' # 0x73 -> LATIN CAPITAL LETTER E WITH DIAERESIS
- u'\xc8' # 0x74 -> LATIN CAPITAL LETTER E WITH GRAVE
- u'\xcd' # 0x75 -> LATIN CAPITAL LETTER I WITH ACUTE
- u'\xce' # 0x76 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
- u'\xcf' # 0x77 -> LATIN CAPITAL LETTER I WITH DIAERESIS
- u'\xcc' # 0x78 -> LATIN CAPITAL LETTER I WITH GRAVE
- u'`' # 0x79 -> GRAVE ACCENT
- u':' # 0x7A -> COLON
- u'#' # 0x7B -> NUMBER SIGN
- u'@' # 0x7C -> COMMERCIAL AT
- u"'" # 0x7D -> APOSTROPHE
- u'=' # 0x7E -> EQUALS SIGN
- u'"' # 0x7F -> QUOTATION MARK
- u'\xd8' # 0x80 -> LATIN CAPITAL LETTER O WITH STROKE
- u'a' # 0x81 -> LATIN SMALL LETTER A
- u'b' # 0x82 -> LATIN SMALL LETTER B
- u'c' # 0x83 -> LATIN SMALL LETTER C
- u'd' # 0x84 -> LATIN SMALL LETTER D
- u'e' # 0x85 -> LATIN SMALL LETTER E
- u'f' # 0x86 -> LATIN SMALL LETTER F
- u'g' # 0x87 -> LATIN SMALL LETTER G
- u'h' # 0x88 -> LATIN SMALL LETTER H
- u'i' # 0x89 -> LATIN SMALL LETTER I
- u'\xab' # 0x8A -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
- u'\xbb' # 0x8B -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
- u'\xf0' # 0x8C -> LATIN SMALL LETTER ETH (ICELANDIC)
- u'\xfd' # 0x8D -> LATIN SMALL LETTER Y WITH ACUTE
- u'\xfe' # 0x8E -> LATIN SMALL LETTER THORN (ICELANDIC)
- u'\xb1' # 0x8F -> PLUS-MINUS SIGN
- u'\xb0' # 0x90 -> DEGREE SIGN
- u'j' # 0x91 -> LATIN SMALL LETTER J
- u'k' # 0x92 -> LATIN SMALL LETTER K
- u'l' # 0x93 -> LATIN SMALL LETTER L
- u'm' # 0x94 -> LATIN SMALL LETTER M
- u'n' # 0x95 -> LATIN SMALL LETTER N
- u'o' # 0x96 -> LATIN SMALL LETTER O
- u'p' # 0x97 -> LATIN SMALL LETTER P
- u'q' # 0x98 -> LATIN SMALL LETTER Q
- u'r' # 0x99 -> LATIN SMALL LETTER R
- u'\xaa' # 0x9A -> FEMININE ORDINAL INDICATOR
- u'\xba' # 0x9B -> MASCULINE ORDINAL INDICATOR
- u'\xe6' # 0x9C -> LATIN SMALL LIGATURE AE
- u'\xb8' # 0x9D -> CEDILLA
- u'\xc6' # 0x9E -> LATIN CAPITAL LIGATURE AE
- u'\u20ac' # 0x9F -> EURO SIGN
- u'\xb5' # 0xA0 -> MICRO SIGN
- u'~' # 0xA1 -> TILDE
- u's' # 0xA2 -> LATIN SMALL LETTER S
- u't' # 0xA3 -> LATIN SMALL LETTER T
- u'u' # 0xA4 -> LATIN SMALL LETTER U
- u'v' # 0xA5 -> LATIN SMALL LETTER V
- u'w' # 0xA6 -> LATIN SMALL LETTER W
- u'x' # 0xA7 -> LATIN SMALL LETTER X
- u'y' # 0xA8 -> LATIN SMALL LETTER Y
- u'z' # 0xA9 -> LATIN SMALL LETTER Z
- u'\xa1' # 0xAA -> INVERTED EXCLAMATION MARK
- u'\xbf' # 0xAB -> INVERTED QUESTION MARK
- u'\xd0' # 0xAC -> LATIN CAPITAL LETTER ETH (ICELANDIC)
- u'\xdd' # 0xAD -> LATIN CAPITAL LETTER Y WITH ACUTE
- u'\xde' # 0xAE -> LATIN CAPITAL LETTER THORN (ICELANDIC)
- u'\xae' # 0xAF -> REGISTERED SIGN
- u'^' # 0xB0 -> CIRCUMFLEX ACCENT
- u'\xa3' # 0xB1 -> POUND SIGN
- u'\xa5' # 0xB2 -> YEN SIGN
- u'\xb7' # 0xB3 -> MIDDLE DOT
- u'\xa9' # 0xB4 -> COPYRIGHT SIGN
- u'\xa7' # 0xB5 -> SECTION SIGN
- u'\xb6' # 0xB6 -> PILCROW SIGN
- u'\xbc' # 0xB7 -> VULGAR FRACTION ONE QUARTER
- u'\xbd' # 0xB8 -> VULGAR FRACTION ONE HALF
- u'\xbe' # 0xB9 -> VULGAR FRACTION THREE QUARTERS
- u'[' # 0xBA -> LEFT SQUARE BRACKET
- u']' # 0xBB -> RIGHT SQUARE BRACKET
- u'\xaf' # 0xBC -> MACRON
- u'\xa8' # 0xBD -> DIAERESIS
- u'\xb4' # 0xBE -> ACUTE ACCENT
- u'\xd7' # 0xBF -> MULTIPLICATION SIGN
- u'{' # 0xC0 -> LEFT CURLY BRACKET
- u'A' # 0xC1 -> LATIN CAPITAL LETTER A
- u'B' # 0xC2 -> LATIN CAPITAL LETTER B
- u'C' # 0xC3 -> LATIN CAPITAL LETTER C
- u'D' # 0xC4 -> LATIN CAPITAL LETTER D
- u'E' # 0xC5 -> LATIN CAPITAL LETTER E
- u'F' # 0xC6 -> LATIN CAPITAL LETTER F
- u'G' # 0xC7 -> LATIN CAPITAL LETTER G
- u'H' # 0xC8 -> LATIN CAPITAL LETTER H
- u'I' # 0xC9 -> LATIN CAPITAL LETTER I
- u'\xad' # 0xCA -> SOFT HYPHEN
- u'\xf4' # 0xCB -> LATIN SMALL LETTER O WITH CIRCUMFLEX
- u'\xf6' # 0xCC -> LATIN SMALL LETTER O WITH DIAERESIS
- u'\xf2' # 0xCD -> LATIN SMALL LETTER O WITH GRAVE
- u'\xf3' # 0xCE -> LATIN SMALL LETTER O WITH ACUTE
- u'\xf5' # 0xCF -> LATIN SMALL LETTER O WITH TILDE
- u'}' # 0xD0 -> RIGHT CURLY BRACKET
- u'J' # 0xD1 -> LATIN CAPITAL LETTER J
- u'K' # 0xD2 -> LATIN CAPITAL LETTER K
- u'L' # 0xD3 -> LATIN CAPITAL LETTER L
- u'M' # 0xD4 -> LATIN CAPITAL LETTER M
- u'N' # 0xD5 -> LATIN CAPITAL LETTER N
- u'O' # 0xD6 -> LATIN CAPITAL LETTER O
- u'P' # 0xD7 -> LATIN CAPITAL LETTER P
- u'Q' # 0xD8 -> LATIN CAPITAL LETTER Q
- u'R' # 0xD9 -> LATIN CAPITAL LETTER R
- u'\xb9' # 0xDA -> SUPERSCRIPT ONE
- u'\xfb' # 0xDB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
- u'\xfc' # 0xDC -> LATIN SMALL LETTER U WITH DIAERESIS
- u'\xf9' # 0xDD -> LATIN SMALL LETTER U WITH GRAVE
- u'\xfa' # 0xDE -> LATIN SMALL LETTER U WITH ACUTE
- u'\xff' # 0xDF -> LATIN SMALL LETTER Y WITH DIAERESIS
- u'\\' # 0xE0 -> REVERSE SOLIDUS
- u'\xf7' # 0xE1 -> DIVISION SIGN
- u'S' # 0xE2 -> LATIN CAPITAL LETTER S
- u'T' # 0xE3 -> LATIN CAPITAL LETTER T
- u'U' # 0xE4 -> LATIN CAPITAL LETTER U
- u'V' # 0xE5 -> LATIN CAPITAL LETTER V
- u'W' # 0xE6 -> LATIN CAPITAL LETTER W
- u'X' # 0xE7 -> LATIN CAPITAL LETTER X
- u'Y' # 0xE8 -> LATIN CAPITAL LETTER Y
- u'Z' # 0xE9 -> LATIN CAPITAL LETTER Z
- u'\xb2' # 0xEA -> SUPERSCRIPT TWO
- u'\xd4' # 0xEB -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
- u'\xd6' # 0xEC -> LATIN CAPITAL LETTER O WITH DIAERESIS
- u'\xd2' # 0xED -> LATIN CAPITAL LETTER O WITH GRAVE
- u'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
- u'\xd5' # 0xEF -> LATIN CAPITAL LETTER O WITH TILDE
- u'0' # 0xF0 -> DIGIT ZERO
- u'1' # 0xF1 -> DIGIT ONE
- u'2' # 0xF2 -> DIGIT TWO
- u'3' # 0xF3 -> DIGIT THREE
- u'4' # 0xF4 -> DIGIT FOUR
- u'5' # 0xF5 -> DIGIT FIVE
- u'6' # 0xF6 -> DIGIT SIX
- u'7' # 0xF7 -> DIGIT SEVEN
- u'8' # 0xF8 -> DIGIT EIGHT
- u'9' # 0xF9 -> DIGIT NINE
- u'\xb3' # 0xFA -> SUPERSCRIPT THREE
- u'\xdb' # 0xFB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
- u'\xdc' # 0xFC -> LATIN CAPITAL LETTER U WITH DIAERESIS
- u'\xd9' # 0xFD -> LATIN CAPITAL LETTER U WITH GRAVE
- u'\xda' # 0xFE -> LATIN CAPITAL LETTER U WITH ACUTE
- u'\x9f' # 0xFF -> CONTROL
-)
-
-### Encoding table
-encoding_table=codecs.charmap_build(decoding_table)
diff --git a/sys/lib/python/encodings/cp1250.py b/sys/lib/python/encodings/cp1250.py
deleted file mode 100644
index d620b8933..000000000
--- a/sys/lib/python/encodings/cp1250.py
+++ /dev/null
@@ -1,307 +0,0 @@
-""" Python Character Mapping Codec cp1250 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1250.TXT' with gencodec.py.
-
-"""#"
-
-import codecs
-
-### Codec APIs
-
-class Codec(codecs.Codec):
-
- def encode(self,input,errors='strict'):
- return codecs.charmap_encode(input,errors,encoding_table)
-
- def decode(self,input,errors='strict'):
- return codecs.charmap_decode(input,errors,decoding_table)
-
-class IncrementalEncoder(codecs.IncrementalEncoder):
- def encode(self, input, final=False):
- return codecs.charmap_encode(input,self.errors,encoding_table)[0]
-
-class IncrementalDecoder(codecs.IncrementalDecoder):
- def decode(self, input, final=False):
- return codecs.charmap_decode(input,self.errors,decoding_table)[0]
-
-class StreamWriter(Codec,codecs.StreamWriter):
- pass
-
-class StreamReader(Codec,codecs.StreamReader):
- pass
-
-### encodings module API
-
-def getregentry():
- return codecs.CodecInfo(
- name='cp1250',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
-
-
-### Decoding Table
-
-decoding_table = (
- u'\x00' # 0x00 -> NULL
- u'\x01' # 0x01 -> START OF HEADING
- u'\x02' # 0x02 -> START OF TEXT
- u'\x03' # 0x03 -> END OF TEXT
- u'\x04' # 0x04 -> END OF TRANSMISSION
- u'\x05' # 0x05 -> ENQUIRY
- u'\x06' # 0x06 -> ACKNOWLEDGE
- u'\x07' # 0x07 -> BELL
- u'\x08' # 0x08 -> BACKSPACE
- u'\t' # 0x09 -> HORIZONTAL TABULATION
- u'\n' # 0x0A -> LINE FEED
- u'\x0b' # 0x0B -> VERTICAL TABULATION
- u'\x0c' # 0x0C -> FORM FEED
- u'\r' # 0x0D -> CARRIAGE RETURN
- u'\x0e' # 0x0E -> SHIFT OUT
- u'\x0f' # 0x0F -> SHIFT IN
- u'\x10' # 0x10 -> DATA LINK ESCAPE
- u'\x11' # 0x11 -> DEVICE CONTROL ONE
- u'\x12' # 0x12 -> DEVICE CONTROL TWO
- u'\x13' # 0x13 -> DEVICE CONTROL THREE
- u'\x14' # 0x14 -> DEVICE CONTROL FOUR
- u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
- u'\x16' # 0x16 -> SYNCHRONOUS IDLE
- u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
- u'\x18' # 0x18 -> CANCEL
- u'\x19' # 0x19 -> END OF MEDIUM
- u'\x1a' # 0x1A -> SUBSTITUTE
- u'\x1b' # 0x1B -> ESCAPE
- u'\x1c' # 0x1C -> FILE SEPARATOR
- u'\x1d' # 0x1D -> GROUP SEPARATOR
- u'\x1e' # 0x1E -> RECORD SEPARATOR
- u'\x1f' # 0x1F -> UNIT SEPARATOR
- u' ' # 0x20 -> SPACE
- u'!' # 0x21 -> EXCLAMATION MARK
- u'"' # 0x22 -> QUOTATION MARK
- u'#' # 0x23 -> NUMBER SIGN
- u'$' # 0x24 -> DOLLAR SIGN
- u'%' # 0x25 -> PERCENT SIGN
- u'&' # 0x26 -> AMPERSAND
- u"'" # 0x27 -> APOSTROPHE
- u'(' # 0x28 -> LEFT PARENTHESIS
- u')' # 0x29 -> RIGHT PARENTHESIS
- u'*' # 0x2A -> ASTERISK
- u'+' # 0x2B -> PLUS SIGN
- u',' # 0x2C -> COMMA
- u'-' # 0x2D -> HYPHEN-MINUS
- u'.' # 0x2E -> FULL STOP
- u'/' # 0x2F -> SOLIDUS
- u'0' # 0x30 -> DIGIT ZERO
- u'1' # 0x31 -> DIGIT ONE
- u'2' # 0x32 -> DIGIT TWO
- u'3' # 0x33 -> DIGIT THREE
- u'4' # 0x34 -> DIGIT FOUR
- u'5' # 0x35 -> DIGIT FIVE
- u'6' # 0x36 -> DIGIT SIX
- u'7' # 0x37 -> DIGIT SEVEN
- u'8' # 0x38 -> DIGIT EIGHT
- u'9' # 0x39 -> DIGIT NINE
- u':' # 0x3A -> COLON
- u';' # 0x3B -> SEMICOLON
- u'<' # 0x3C -> LESS-THAN SIGN
- u'=' # 0x3D -> EQUALS SIGN
- u'>' # 0x3E -> GREATER-THAN SIGN
- u'?' # 0x3F -> QUESTION MARK
- u'@' # 0x40 -> COMMERCIAL AT
- u'A' # 0x41 -> LATIN CAPITAL LETTER A
- u'B' # 0x42 -> LATIN CAPITAL LETTER B
- u'C' # 0x43 -> LATIN CAPITAL LETTER C
- u'D' # 0x44 -> LATIN CAPITAL LETTER D
- u'E' # 0x45 -> LATIN CAPITAL LETTER E
- u'F' # 0x46 -> LATIN CAPITAL LETTER F
- u'G' # 0x47 -> LATIN CAPITAL LETTER G
- u'H' # 0x48 -> LATIN CAPITAL LETTER H
- u'I' # 0x49 -> LATIN CAPITAL LETTER I
- u'J' # 0x4A -> LATIN CAPITAL LETTER J
- u'K' # 0x4B -> LATIN CAPITAL LETTER K
- u'L' # 0x4C -> LATIN CAPITAL LETTER L
- u'M' # 0x4D -> LATIN CAPITAL LETTER M
- u'N' # 0x4E -> LATIN CAPITAL LETTER N
- u'O' # 0x4F -> LATIN CAPITAL LETTER O
- u'P' # 0x50 -> LATIN CAPITAL LETTER P
- u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
- u'R' # 0x52 -> LATIN CAPITAL LETTER R
- u'S' # 0x53 -> LATIN CAPITAL LETTER S
- u'T' # 0x54 -> LATIN CAPITAL LETTER T
- u'U' # 0x55 -> LATIN CAPITAL LETTER U
- u'V' # 0x56 -> LATIN CAPITAL LETTER V
- u'W' # 0x57 -> LATIN CAPITAL LETTER W
- u'X' # 0x58 -> LATIN CAPITAL LETTER X
- u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
- u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
- u'[' # 0x5B -> LEFT SQUARE BRACKET
- u'\\' # 0x5C -> REVERSE SOLIDUS
- u']' # 0x5D -> RIGHT SQUARE BRACKET
- u'^' # 0x5E -> CIRCUMFLEX ACCENT
- u'_' # 0x5F -> LOW LINE
- u'`' # 0x60 -> GRAVE ACCENT
- u'a' # 0x61 -> LATIN SMALL LETTER A
- u'b' # 0x62 -> LATIN SMALL LETTER B
- u'c' # 0x63 -> LATIN SMALL LETTER C
- u'd' # 0x64 -> LATIN SMALL LETTER D
- u'e' # 0x65 -> LATIN SMALL LETTER E
- u'f' # 0x66 -> LATIN SMALL LETTER F
- u'g' # 0x67 -> LATIN SMALL LETTER G
- u'h' # 0x68 -> LATIN SMALL LETTER H
- u'i' # 0x69 -> LATIN SMALL LETTER I
- u'j' # 0x6A -> LATIN SMALL LETTER J
- u'k' # 0x6B -> LATIN SMALL LETTER K
- u'l' # 0x6C -> LATIN SMALL LETTER L
- u'm' # 0x6D -> LATIN SMALL LETTER M
- u'n' # 0x6E -> LATIN SMALL LETTER N
- u'o' # 0x6F -> LATIN SMALL LETTER O
- u'p' # 0x70 -> LATIN SMALL LETTER P
- u'q' # 0x71 -> LATIN SMALL LETTER Q
- u'r' # 0x72 -> LATIN SMALL LETTER R
- u's' # 0x73 -> LATIN SMALL LETTER S
- u't' # 0x74 -> LATIN SMALL LETTER T
- u'u' # 0x75 -> LATIN SMALL LETTER U
- u'v' # 0x76 -> LATIN SMALL LETTER V
- u'w' # 0x77 -> LATIN SMALL LETTER W
- u'x' # 0x78 -> LATIN SMALL LETTER X
- u'y' # 0x79 -> LATIN SMALL LETTER Y
- u'z' # 0x7A -> LATIN SMALL LETTER Z
- u'{' # 0x7B -> LEFT CURLY BRACKET
- u'|' # 0x7C -> VERTICAL LINE
- u'}' # 0x7D -> RIGHT CURLY BRACKET
- u'~' # 0x7E -> TILDE
- u'\x7f' # 0x7F -> DELETE
- u'\u20ac' # 0x80 -> EURO SIGN
- u'\ufffe' # 0x81 -> UNDEFINED
- u'\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK
- u'\ufffe' # 0x83 -> UNDEFINED
- u'\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK
- u'\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
- u'\u2020' # 0x86 -> DAGGER
- u'\u2021' # 0x87 -> DOUBLE DAGGER
- u'\ufffe' # 0x88 -> UNDEFINED
- u'\u2030' # 0x89 -> PER MILLE SIGN
- u'\u0160' # 0x8A -> LATIN CAPITAL LETTER S WITH CARON
- u'\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
- u'\u015a' # 0x8C -> LATIN CAPITAL LETTER S WITH ACUTE
- u'\u0164' # 0x8D -> LATIN CAPITAL LETTER T WITH CARON
- u'\u017d' # 0x8E -> LATIN CAPITAL LETTER Z WITH CARON
- u'\u0179' # 0x8F -> LATIN CAPITAL LETTER Z WITH ACUTE
- u'\ufffe' # 0x90 -> UNDEFINED
- u'\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
- u'\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
- u'\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
- u'\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
- u'\u2022' # 0x95 -> BULLET
- u'\u2013' # 0x96 -> EN DASH
- u'\u2014' # 0x97 -> EM DASH
- u'\ufffe' # 0x98 -> UNDEFINED
- u'\u2122' # 0x99 -> TRADE MARK SIGN
- u'\u0161' # 0x9A -> LATIN SMALL LETTER S WITH CARON
- u'\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
- u'\u015b' # 0x9C -> LATIN SMALL LETTER S WITH ACUTE
- u'\u0165' # 0x9D -> LATIN SMALL LETTER T WITH CARON
- u'\u017e' # 0x9E -> LATIN SMALL LETTER Z WITH CARON
- u'\u017a' # 0x9F -> LATIN SMALL LETTER Z WITH ACUTE
- u'\xa0' # 0xA0 -> NO-BREAK SPACE
- u'\u02c7' # 0xA1 -> CARON
- u'\u02d8' # 0xA2 -> BREVE
- u'\u0141' # 0xA3 -> LATIN CAPITAL LETTER L WITH STROKE
- u'\xa4' # 0xA4 -> CURRENCY SIGN
- u'\u0104' # 0xA5 -> LATIN CAPITAL LETTER A WITH OGONEK
- u'\xa6' # 0xA6 -> BROKEN BAR
- u'\xa7' # 0xA7 -> SECTION SIGN
- u'\xa8' # 0xA8 -> DIAERESIS
- u'\xa9' # 0xA9 -> COPYRIGHT SIGN
- u'\u015e' # 0xAA -> LATIN CAPITAL LETTER S WITH CEDILLA
- u'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
- u'\xac' # 0xAC -> NOT SIGN
- u'\xad' # 0xAD -> SOFT HYPHEN
- u'\xae' # 0xAE -> REGISTERED SIGN
- u'\u017b' # 0xAF -> LATIN CAPITAL LETTER Z WITH DOT ABOVE
- u'\xb0' # 0xB0 -> DEGREE SIGN
- u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
- u'\u02db' # 0xB2 -> OGONEK
- u'\u0142' # 0xB3 -> LATIN SMALL LETTER L WITH STROKE
- u'\xb4' # 0xB4 -> ACUTE ACCENT
- u'\xb5' # 0xB5 -> MICRO SIGN
- u'\xb6' # 0xB6 -> PILCROW SIGN
- u'\xb7' # 0xB7 -> MIDDLE DOT
- u'\xb8' # 0xB8 -> CEDILLA
- u'\u0105' # 0xB9 -> LATIN SMALL LETTER A WITH OGONEK
- u'\u015f' # 0xBA -> LATIN SMALL LETTER S WITH CEDILLA
- u'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
- u'\u013d' # 0xBC -> LATIN CAPITAL LETTER L WITH CARON
- u'\u02dd' # 0xBD -> DOUBLE ACUTE ACCENT
- u'\u013e' # 0xBE -> LATIN SMALL LETTER L WITH CARON
- u'\u017c' # 0xBF -> LATIN SMALL LETTER Z WITH DOT ABOVE
- u'\u0154' # 0xC0 -> LATIN CAPITAL LETTER R WITH ACUTE
- u'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
- u'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
- u'\u0102' # 0xC3 -> LATIN CAPITAL LETTER A WITH BREVE
- u'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
- u'\u0139' # 0xC5 -> LATIN CAPITAL LETTER L WITH ACUTE
- u'\u0106' # 0xC6 -> LATIN CAPITAL LETTER C WITH ACUTE
- u'\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA
- u'\u010c' # 0xC8 -> LATIN CAPITAL LETTER C WITH CARON
- u'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
- u'\u0118' # 0xCA -> LATIN CAPITAL LETTER E WITH OGONEK
- u'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
- u'\u011a' # 0xCC -> LATIN CAPITAL LETTER E WITH CARON
- u'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
- u'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
- u'\u010e' # 0xCF -> LATIN CAPITAL LETTER D WITH CARON
- u'\u0110' # 0xD0 -> LATIN CAPITAL LETTER D WITH STROKE
- u'\u0143' # 0xD1 -> LATIN CAPITAL LETTER N WITH ACUTE
- u'\u0147' # 0xD2 -> LATIN CAPITAL LETTER N WITH CARON
- u'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
- u'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
- u'\u0150' # 0xD5 -> LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
- u'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
- u'\xd7' # 0xD7 -> MULTIPLICATION SIGN
- u'\u0158' # 0xD8 -> LATIN CAPITAL LETTER R WITH CARON
- u'\u016e' # 0xD9 -> LATIN CAPITAL LETTER U WITH RING ABOVE
- u'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
- u'\u0170' # 0xDB -> LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
- u'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
- u'\xdd' # 0xDD -> LATIN CAPITAL LETTER Y WITH ACUTE
- u'\u0162' # 0xDE -> LATIN CAPITAL LETTER T WITH CEDILLA
- u'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S
- u'\u0155' # 0xE0 -> LATIN SMALL LETTER R WITH ACUTE
- u'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
- u'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
- u'\u0103' # 0xE3 -> LATIN SMALL LETTER A WITH BREVE
- u'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
- u'\u013a' # 0xE5 -> LATIN SMALL LETTER L WITH ACUTE
- u'\u0107' # 0xE6 -> LATIN SMALL LETTER C WITH ACUTE
- u'\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA
- u'\u010d' # 0xE8 -> LATIN SMALL LETTER C WITH CARON
- u'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
- u'\u0119' # 0xEA -> LATIN SMALL LETTER E WITH OGONEK
- u'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
- u'\u011b' # 0xEC -> LATIN SMALL LETTER E WITH CARON
- u'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
- u'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
- u'\u010f' # 0xEF -> LATIN SMALL LETTER D WITH CARON
- u'\u0111' # 0xF0 -> LATIN SMALL LETTER D WITH STROKE
- u'\u0144' # 0xF1 -> LATIN SMALL LETTER N WITH ACUTE
- u'\u0148' # 0xF2 -> LATIN SMALL LETTER N WITH CARON
- u'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
- u'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
- u'\u0151' # 0xF5 -> LATIN SMALL LETTER O WITH DOUBLE ACUTE
- u'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
- u'\xf7' # 0xF7 -> DIVISION SIGN
- u'\u0159' # 0xF8 -> LATIN SMALL LETTER R WITH CARON
- u'\u016f' # 0xF9 -> LATIN SMALL LETTER U WITH RING ABOVE
- u'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
- u'\u0171' # 0xFB -> LATIN SMALL LETTER U WITH DOUBLE ACUTE
- u'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
- u'\xfd' # 0xFD -> LATIN SMALL LETTER Y WITH ACUTE
- u'\u0163' # 0xFE -> LATIN SMALL LETTER T WITH CEDILLA
- u'\u02d9' # 0xFF -> DOT ABOVE
-)
-
-### Encoding table
-encoding_table=codecs.charmap_build(decoding_table)
diff --git a/sys/lib/python/encodings/cp1251.py b/sys/lib/python/encodings/cp1251.py
deleted file mode 100644
index 216771fa4..000000000
--- a/sys/lib/python/encodings/cp1251.py
+++ /dev/null
@@ -1,307 +0,0 @@
-""" Python Character Mapping Codec cp1251 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1251.TXT' with gencodec.py.
-
-"""#"
-
-import codecs
-
-### Codec APIs
-
-class Codec(codecs.Codec):
-
- def encode(self,input,errors='strict'):
- return codecs.charmap_encode(input,errors,encoding_table)
-
- def decode(self,input,errors='strict'):
- return codecs.charmap_decode(input,errors,decoding_table)
-
-class IncrementalEncoder(codecs.IncrementalEncoder):
- def encode(self, input, final=False):
- return codecs.charmap_encode(input,self.errors,encoding_table)[0]
-
-class IncrementalDecoder(codecs.IncrementalDecoder):
- def decode(self, input, final=False):
- return codecs.charmap_decode(input,self.errors,decoding_table)[0]
-
-class StreamWriter(Codec,codecs.StreamWriter):
- pass
-
-class StreamReader(Codec,codecs.StreamReader):
- pass
-
-### encodings module API
-
-def getregentry():
- return codecs.CodecInfo(
- name='cp1251',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
-
-
-### Decoding Table
-
-decoding_table = (
- u'\x00' # 0x00 -> NULL
- u'\x01' # 0x01 -> START OF HEADING
- u'\x02' # 0x02 -> START OF TEXT
- u'\x03' # 0x03 -> END OF TEXT
- u'\x04' # 0x04 -> END OF TRANSMISSION
- u'\x05' # 0x05 -> ENQUIRY
- u'\x06' # 0x06 -> ACKNOWLEDGE
- u'\x07' # 0x07 -> BELL
- u'\x08' # 0x08 -> BACKSPACE
- u'\t' # 0x09 -> HORIZONTAL TABULATION
- u'\n' # 0x0A -> LINE FEED
- u'\x0b' # 0x0B -> VERTICAL TABULATION
- u'\x0c' # 0x0C -> FORM FEED
- u'\r' # 0x0D -> CARRIAGE RETURN
- u'\x0e' # 0x0E -> SHIFT OUT
- u'\x0f' # 0x0F -> SHIFT IN
- u'\x10' # 0x10 -> DATA LINK ESCAPE
- u'\x11' # 0x11 -> DEVICE CONTROL ONE
- u'\x12' # 0x12 -> DEVICE CONTROL TWO
- u'\x13' # 0x13 -> DEVICE CONTROL THREE
- u'\x14' # 0x14 -> DEVICE CONTROL FOUR
- u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
- u'\x16' # 0x16 -> SYNCHRONOUS IDLE
- u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
- u'\x18' # 0x18 -> CANCEL
- u'\x19' # 0x19 -> END OF MEDIUM
- u'\x1a' # 0x1A -> SUBSTITUTE
- u'\x1b' # 0x1B -> ESCAPE
- u'\x1c' # 0x1C -> FILE SEPARATOR
- u'\x1d' # 0x1D -> GROUP SEPARATOR
- u'\x1e' # 0x1E -> RECORD SEPARATOR
- u'\x1f' # 0x1F -> UNIT SEPARATOR
- u' ' # 0x20 -> SPACE
- u'!' # 0x21 -> EXCLAMATION MARK
- u'"' # 0x22 -> QUOTATION MARK
- u'#' # 0x23 -> NUMBER SIGN
- u'$' # 0x24 -> DOLLAR SIGN
- u'%' # 0x25 -> PERCENT SIGN
- u'&' # 0x26 -> AMPERSAND
- u"'" # 0x27 -> APOSTROPHE
- u'(' # 0x28 -> LEFT PARENTHESIS
- u')' # 0x29 -> RIGHT PARENTHESIS
- u'*' # 0x2A -> ASTERISK
- u'+' # 0x2B -> PLUS SIGN
- u',' # 0x2C -> COMMA
- u'-' # 0x2D -> HYPHEN-MINUS
- u'.' # 0x2E -> FULL STOP
- u'/' # 0x2F -> SOLIDUS
- u'0' # 0x30 -> DIGIT ZERO
- u'1' # 0x31 -> DIGIT ONE
- u'2' # 0x32 -> DIGIT TWO
- u'3' # 0x33 -> DIGIT THREE
- u'4' # 0x34 -> DIGIT FOUR
- u'5' # 0x35 -> DIGIT FIVE
- u'6' # 0x36 -> DIGIT SIX
- u'7' # 0x37 -> DIGIT SEVEN
- u'8' # 0x38 -> DIGIT EIGHT
- u'9' # 0x39 -> DIGIT NINE
- u':' # 0x3A -> COLON
- u';' # 0x3B -> SEMICOLON
- u'<' # 0x3C -> LESS-THAN SIGN
- u'=' # 0x3D -> EQUALS SIGN
- u'>' # 0x3E -> GREATER-THAN SIGN
- u'?' # 0x3F -> QUESTION MARK
- u'@' # 0x40 -> COMMERCIAL AT
- u'A' # 0x41 -> LATIN CAPITAL LETTER A
- u'B' # 0x42 -> LATIN CAPITAL LETTER B
- u'C' # 0x43 -> LATIN CAPITAL LETTER C
- u'D' # 0x44 -> LATIN CAPITAL LETTER D
- u'E' # 0x45 -> LATIN CAPITAL LETTER E
- u'F' # 0x46 -> LATIN CAPITAL LETTER F
- u'G' # 0x47 -> LATIN CAPITAL LETTER G
- u'H' # 0x48 -> LATIN CAPITAL LETTER H
- u'I' # 0x49 -> LATIN CAPITAL LETTER I
- u'J' # 0x4A -> LATIN CAPITAL LETTER J
- u'K' # 0x4B -> LATIN CAPITAL LETTER K
- u'L' # 0x4C -> LATIN CAPITAL LETTER L
- u'M' # 0x4D -> LATIN CAPITAL LETTER M
- u'N' # 0x4E -> LATIN CAPITAL LETTER N
- u'O' # 0x4F -> LATIN CAPITAL LETTER O
- u'P' # 0x50 -> LATIN CAPITAL LETTER P
- u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
- u'R' # 0x52 -> LATIN CAPITAL LETTER R
- u'S' # 0x53 -> LATIN CAPITAL LETTER S
- u'T' # 0x54 -> LATIN CAPITAL LETTER T
- u'U' # 0x55 -> LATIN CAPITAL LETTER U
- u'V' # 0x56 -> LATIN CAPITAL LETTER V
- u'W' # 0x57 -> LATIN CAPITAL LETTER W
- u'X' # 0x58 -> LATIN CAPITAL LETTER X
- u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
- u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
- u'[' # 0x5B -> LEFT SQUARE BRACKET
- u'\\' # 0x5C -> REVERSE SOLIDUS
- u']' # 0x5D -> RIGHT SQUARE BRACKET
- u'^' # 0x5E -> CIRCUMFLEX ACCENT
- u'_' # 0x5F -> LOW LINE
- u'`' # 0x60 -> GRAVE ACCENT
- u'a' # 0x61 -> LATIN SMALL LETTER A
- u'b' # 0x62 -> LATIN SMALL LETTER B
- u'c' # 0x63 -> LATIN SMALL LETTER C
- u'd' # 0x64 -> LATIN SMALL LETTER D
- u'e' # 0x65 -> LATIN SMALL LETTER E
- u'f' # 0x66 -> LATIN SMALL LETTER F
- u'g' # 0x67 -> LATIN SMALL LETTER G
- u'h' # 0x68 -> LATIN SMALL LETTER H
- u'i' # 0x69 -> LATIN SMALL LETTER I
- u'j' # 0x6A -> LATIN SMALL LETTER J
- u'k' # 0x6B -> LATIN SMALL LETTER K
- u'l' # 0x6C -> LATIN SMALL LETTER L
- u'm' # 0x6D -> LATIN SMALL LETTER M
- u'n' # 0x6E -> LATIN SMALL LETTER N
- u'o' # 0x6F -> LATIN SMALL LETTER O
- u'p' # 0x70 -> LATIN SMALL LETTER P
- u'q' # 0x71 -> LATIN SMALL LETTER Q
- u'r' # 0x72 -> LATIN SMALL LETTER R
- u's' # 0x73 -> LATIN SMALL LETTER S
- u't' # 0x74 -> LATIN SMALL LETTER T
- u'u' # 0x75 -> LATIN SMALL LETTER U
- u'v' # 0x76 -> LATIN SMALL LETTER V
- u'w' # 0x77 -> LATIN SMALL LETTER W
- u'x' # 0x78 -> LATIN SMALL LETTER X
- u'y' # 0x79 -> LATIN SMALL LETTER Y
- u'z' # 0x7A -> LATIN SMALL LETTER Z
- u'{' # 0x7B -> LEFT CURLY BRACKET
- u'|' # 0x7C -> VERTICAL LINE
- u'}' # 0x7D -> RIGHT CURLY BRACKET
- u'~' # 0x7E -> TILDE
- u'\x7f' # 0x7F -> DELETE
- u'\u0402' # 0x80 -> CYRILLIC CAPITAL LETTER DJE
- u'\u0403' # 0x81 -> CYRILLIC CAPITAL LETTER GJE
- u'\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK
- u'\u0453' # 0x83 -> CYRILLIC SMALL LETTER GJE
- u'\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK
- u'\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
- u'\u2020' # 0x86 -> DAGGER
- u'\u2021' # 0x87 -> DOUBLE DAGGER
- u'\u20ac' # 0x88 -> EURO SIGN
- u'\u2030' # 0x89 -> PER MILLE SIGN
- u'\u0409' # 0x8A -> CYRILLIC CAPITAL LETTER LJE
- u'\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
- u'\u040a' # 0x8C -> CYRILLIC CAPITAL LETTER NJE
- u'\u040c' # 0x8D -> CYRILLIC CAPITAL LETTER KJE
- u'\u040b' # 0x8E -> CYRILLIC CAPITAL LETTER TSHE
- u'\u040f' # 0x8F -> CYRILLIC CAPITAL LETTER DZHE
- u'\u0452' # 0x90 -> CYRILLIC SMALL LETTER DJE
- u'\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
- u'\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
- u'\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
- u'\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
- u'\u2022' # 0x95 -> BULLET
- u'\u2013' # 0x96 -> EN DASH
- u'\u2014' # 0x97 -> EM DASH
- u'\ufffe' # 0x98 -> UNDEFINED
- u'\u2122' # 0x99 -> TRADE MARK SIGN
- u'\u0459' # 0x9A -> CYRILLIC SMALL LETTER LJE
- u'\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
- u'\u045a' # 0x9C -> CYRILLIC SMALL LETTER NJE
- u'\u045c' # 0x9D -> CYRILLIC SMALL LETTER KJE
- u'\u045b' # 0x9E -> CYRILLIC SMALL LETTER TSHE
- u'\u045f' # 0x9F -> CYRILLIC SMALL LETTER DZHE
- u'\xa0' # 0xA0 -> NO-BREAK SPACE
- u'\u040e' # 0xA1 -> CYRILLIC CAPITAL LETTER SHORT U
- u'\u045e' # 0xA2 -> CYRILLIC SMALL LETTER SHORT U
- u'\u0408' # 0xA3 -> CYRILLIC CAPITAL LETTER JE
- u'\xa4' # 0xA4 -> CURRENCY SIGN
- u'\u0490' # 0xA5 -> CYRILLIC CAPITAL LETTER GHE WITH UPTURN
- u'\xa6' # 0xA6 -> BROKEN BAR
- u'\xa7' # 0xA7 -> SECTION SIGN
- u'\u0401' # 0xA8 -> CYRILLIC CAPITAL LETTER IO
- u'\xa9' # 0xA9 -> COPYRIGHT SIGN
- u'\u0404' # 0xAA -> CYRILLIC CAPITAL LETTER UKRAINIAN IE
- u'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
- u'\xac' # 0xAC -> NOT SIGN
- u'\xad' # 0xAD -> SOFT HYPHEN
- u'\xae' # 0xAE -> REGISTERED SIGN
- u'\u0407' # 0xAF -> CYRILLIC CAPITAL LETTER YI
- u'\xb0' # 0xB0 -> DEGREE SIGN
- u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
- u'\u0406' # 0xB2 -> CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
- u'\u0456' # 0xB3 -> CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
- u'\u0491' # 0xB4 -> CYRILLIC SMALL LETTER GHE WITH UPTURN
- u'\xb5' # 0xB5 -> MICRO SIGN
- u'\xb6' # 0xB6 -> PILCROW SIGN
- u'\xb7' # 0xB7 -> MIDDLE DOT
- u'\u0451' # 0xB8 -> CYRILLIC SMALL LETTER IO
- u'\u2116' # 0xB9 -> NUMERO SIGN
- u'\u0454' # 0xBA -> CYRILLIC SMALL LETTER UKRAINIAN IE
- u'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
- u'\u0458' # 0xBC -> CYRILLIC SMALL LETTER JE
- u'\u0405' # 0xBD -> CYRILLIC CAPITAL LETTER DZE
- u'\u0455' # 0xBE -> CYRILLIC SMALL LETTER DZE
- u'\u0457' # 0xBF -> CYRILLIC SMALL LETTER YI
- u'\u0410' # 0xC0 -> CYRILLIC CAPITAL LETTER A
- u'\u0411' # 0xC1 -> CYRILLIC CAPITAL LETTER BE
- u'\u0412' # 0xC2 -> CYRILLIC CAPITAL LETTER VE
- u'\u0413' # 0xC3 -> CYRILLIC CAPITAL LETTER GHE
- u'\u0414' # 0xC4 -> CYRILLIC CAPITAL LETTER DE
- u'\u0415' # 0xC5 -> CYRILLIC CAPITAL LETTER IE
- u'\u0416' # 0xC6 -> CYRILLIC CAPITAL LETTER ZHE
- u'\u0417' # 0xC7 -> CYRILLIC CAPITAL LETTER ZE
- u'\u0418' # 0xC8 -> CYRILLIC CAPITAL LETTER I
- u'\u0419' # 0xC9 -> CYRILLIC CAPITAL LETTER SHORT I
- u'\u041a' # 0xCA -> CYRILLIC CAPITAL LETTER KA
- u'\u041b' # 0xCB -> CYRILLIC CAPITAL LETTER EL
- u'\u041c' # 0xCC -> CYRILLIC CAPITAL LETTER EM
- u'\u041d' # 0xCD -> CYRILLIC CAPITAL LETTER EN
- u'\u041e' # 0xCE -> CYRILLIC CAPITAL LETTER O
- u'\u041f' # 0xCF -> CYRILLIC CAPITAL LETTER PE
- u'\u0420' # 0xD0 -> CYRILLIC CAPITAL LETTER ER
- u'\u0421' # 0xD1 -> CYRILLIC CAPITAL LETTER ES
- u'\u0422' # 0xD2 -> CYRILLIC CAPITAL LETTER TE
- u'\u0423' # 0xD3 -> CYRILLIC CAPITAL LETTER U
- u'\u0424' # 0xD4 -> CYRILLIC CAPITAL LETTER EF
- u'\u0425' # 0xD5 -> CYRILLIC CAPITAL LETTER HA
- u'\u0426' # 0xD6 -> CYRILLIC CAPITAL LETTER TSE
- u'\u0427' # 0xD7 -> CYRILLIC CAPITAL LETTER CHE
- u'\u0428' # 0xD8 -> CYRILLIC CAPITAL LETTER SHA
- u'\u0429' # 0xD9 -> CYRILLIC CAPITAL LETTER SHCHA
- u'\u042a' # 0xDA -> CYRILLIC CAPITAL LETTER HARD SIGN
- u'\u042b' # 0xDB -> CYRILLIC CAPITAL LETTER YERU
- u'\u042c' # 0xDC -> CYRILLIC CAPITAL LETTER SOFT SIGN
- u'\u042d' # 0xDD -> CYRILLIC CAPITAL LETTER E
- u'\u042e' # 0xDE -> CYRILLIC CAPITAL LETTER YU
- u'\u042f' # 0xDF -> CYRILLIC CAPITAL LETTER YA
- u'\u0430' # 0xE0 -> CYRILLIC SMALL LETTER A
- u'\u0431' # 0xE1 -> CYRILLIC SMALL LETTER BE
- u'\u0432' # 0xE2 -> CYRILLIC SMALL LETTER VE
- u'\u0433' # 0xE3 -> CYRILLIC SMALL LETTER GHE
- u'\u0434' # 0xE4 -> CYRILLIC SMALL LETTER DE
- u'\u0435' # 0xE5 -> CYRILLIC SMALL LETTER IE
- u'\u0436' # 0xE6 -> CYRILLIC SMALL LETTER ZHE
- u'\u0437' # 0xE7 -> CYRILLIC SMALL LETTER ZE
- u'\u0438' # 0xE8 -> CYRILLIC SMALL LETTER I
- u'\u0439' # 0xE9 -> CYRILLIC SMALL LETTER SHORT I
- u'\u043a' # 0xEA -> CYRILLIC SMALL LETTER KA
- u'\u043b' # 0xEB -> CYRILLIC SMALL LETTER EL
- u'\u043c' # 0xEC -> CYRILLIC SMALL LETTER EM
- u'\u043d' # 0xED -> CYRILLIC SMALL LETTER EN
- u'\u043e' # 0xEE -> CYRILLIC SMALL LETTER O
- u'\u043f' # 0xEF -> CYRILLIC SMALL LETTER PE
- u'\u0440' # 0xF0 -> CYRILLIC SMALL LETTER ER
- u'\u0441' # 0xF1 -> CYRILLIC SMALL LETTER ES
- u'\u0442' # 0xF2 -> CYRILLIC SMALL LETTER TE
- u'\u0443' # 0xF3 -> CYRILLIC SMALL LETTER U
- u'\u0444' # 0xF4 -> CYRILLIC SMALL LETTER EF
- u'\u0445' # 0xF5 -> CYRILLIC SMALL LETTER HA
- u'\u0446' # 0xF6 -> CYRILLIC SMALL LETTER TSE
- u'\u0447' # 0xF7 -> CYRILLIC SMALL LETTER CHE
- u'\u0448' # 0xF8 -> CYRILLIC SMALL LETTER SHA
- u'\u0449' # 0xF9 -> CYRILLIC SMALL LETTER SHCHA
- u'\u044a' # 0xFA -> CYRILLIC SMALL LETTER HARD SIGN
- u'\u044b' # 0xFB -> CYRILLIC SMALL LETTER YERU
- u'\u044c' # 0xFC -> CYRILLIC SMALL LETTER SOFT SIGN
- u'\u044d' # 0xFD -> CYRILLIC SMALL LETTER E
- u'\u044e' # 0xFE -> CYRILLIC SMALL LETTER YU
- u'\u044f' # 0xFF -> CYRILLIC SMALL LETTER YA
-)
-
-### Encoding table
-encoding_table=codecs.charmap_build(decoding_table)
diff --git a/sys/lib/python/encodings/cp1252.py b/sys/lib/python/encodings/cp1252.py
deleted file mode 100644
index e60a328db..000000000
--- a/sys/lib/python/encodings/cp1252.py
+++ /dev/null
@@ -1,307 +0,0 @@
-""" Python Character Mapping Codec cp1252 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1252.TXT' with gencodec.py.
-
-"""#"
-
-import codecs
-
-### Codec APIs
-
-class Codec(codecs.Codec):
-
- def encode(self,input,errors='strict'):
- return codecs.charmap_encode(input,errors,encoding_table)
-
- def decode(self,input,errors='strict'):
- return codecs.charmap_decode(input,errors,decoding_table)
-
-class IncrementalEncoder(codecs.IncrementalEncoder):
- def encode(self, input, final=False):
- return codecs.charmap_encode(input,self.errors,encoding_table)[0]
-
-class IncrementalDecoder(codecs.IncrementalDecoder):
- def decode(self, input, final=False):
- return codecs.charmap_decode(input,self.errors,decoding_table)[0]
-
-class StreamWriter(Codec,codecs.StreamWriter):
- pass
-
-class StreamReader(Codec,codecs.StreamReader):
- pass
-
-### encodings module API
-
-def getregentry():
- return codecs.CodecInfo(
- name='cp1252',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
-
-
-### Decoding Table
-
-decoding_table = (
- u'\x00' # 0x00 -> NULL
- u'\x01' # 0x01 -> START OF HEADING
- u'\x02' # 0x02 -> START OF TEXT
- u'\x03' # 0x03 -> END OF TEXT
- u'\x04' # 0x04 -> END OF TRANSMISSION
- u'\x05' # 0x05 -> ENQUIRY
- u'\x06' # 0x06 -> ACKNOWLEDGE
- u'\x07' # 0x07 -> BELL
- u'\x08' # 0x08 -> BACKSPACE
- u'\t' # 0x09 -> HORIZONTAL TABULATION
- u'\n' # 0x0A -> LINE FEED
- u'\x0b' # 0x0B -> VERTICAL TABULATION
- u'\x0c' # 0x0C -> FORM FEED
- u'\r' # 0x0D -> CARRIAGE RETURN
- u'\x0e' # 0x0E -> SHIFT OUT
- u'\x0f' # 0x0F -> SHIFT IN
- u'\x10' # 0x10 -> DATA LINK ESCAPE
- u'\x11' # 0x11 -> DEVICE CONTROL ONE
- u'\x12' # 0x12 -> DEVICE CONTROL TWO
- u'\x13' # 0x13 -> DEVICE CONTROL THREE
- u'\x14' # 0x14 -> DEVICE CONTROL FOUR
- u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
- u'\x16' # 0x16 -> SYNCHRONOUS IDLE
- u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
- u'\x18' # 0x18 -> CANCEL
- u'\x19' # 0x19 -> END OF MEDIUM
- u'\x1a' # 0x1A -> SUBSTITUTE
- u'\x1b' # 0x1B -> ESCAPE
- u'\x1c' # 0x1C -> FILE SEPARATOR
- u'\x1d' # 0x1D -> GROUP SEPARATOR
- u'\x1e' # 0x1E -> RECORD SEPARATOR
- u'\x1f' # 0x1F -> UNIT SEPARATOR
- u' ' # 0x20 -> SPACE
- u'!' # 0x21 -> EXCLAMATION MARK
- u'"' # 0x22 -> QUOTATION MARK
- u'#' # 0x23 -> NUMBER SIGN
- u'$' # 0x24 -> DOLLAR SIGN
- u'%' # 0x25 -> PERCENT SIGN
- u'&' # 0x26 -> AMPERSAND
- u"'" # 0x27 -> APOSTROPHE
- u'(' # 0x28 -> LEFT PARENTHESIS
- u')' # 0x29 -> RIGHT PARENTHESIS
- u'*' # 0x2A -> ASTERISK
- u'+' # 0x2B -> PLUS SIGN
- u',' # 0x2C -> COMMA
- u'-' # 0x2D -> HYPHEN-MINUS
- u'.' # 0x2E -> FULL STOP
- u'/' # 0x2F -> SOLIDUS
- u'0' # 0x30 -> DIGIT ZERO
- u'1' # 0x31 -> DIGIT ONE
- u'2' # 0x32 -> DIGIT TWO
- u'3' # 0x33 -> DIGIT THREE
- u'4' # 0x34 -> DIGIT FOUR
- u'5' # 0x35 -> DIGIT FIVE
- u'6' # 0x36 -> DIGIT SIX
- u'7' # 0x37 -> DIGIT SEVEN
- u'8' # 0x38 -> DIGIT EIGHT
- u'9' # 0x39 -> DIGIT NINE
- u':' # 0x3A -> COLON
- u';' # 0x3B -> SEMICOLON
- u'<' # 0x3C -> LESS-THAN SIGN
- u'=' # 0x3D -> EQUALS SIGN
- u'>' # 0x3E -> GREATER-THAN SIGN
- u'?' # 0x3F -> QUESTION MARK
- u'@' # 0x40 -> COMMERCIAL AT
- u'A' # 0x41 -> LATIN CAPITAL LETTER A
- u'B' # 0x42 -> LATIN CAPITAL LETTER B
- u'C' # 0x43 -> LATIN CAPITAL LETTER C
- u'D' # 0x44 -> LATIN CAPITAL LETTER D
- u'E' # 0x45 -> LATIN CAPITAL LETTER E
- u'F' # 0x46 -> LATIN CAPITAL LETTER F
- u'G' # 0x47 -> LATIN CAPITAL LETTER G
- u'H' # 0x48 -> LATIN CAPITAL LETTER H
- u'I' # 0x49 -> LATIN CAPITAL LETTER I
- u'J' # 0x4A -> LATIN CAPITAL LETTER J
- u'K' # 0x4B -> LATIN CAPITAL LETTER K
- u'L' # 0x4C -> LATIN CAPITAL LETTER L
- u'M' # 0x4D -> LATIN CAPITAL LETTER M
- u'N' # 0x4E -> LATIN CAPITAL LETTER N
- u'O' # 0x4F -> LATIN CAPITAL LETTER O
- u'P' # 0x50 -> LATIN CAPITAL LETTER P
- u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
- u'R' # 0x52 -> LATIN CAPITAL LETTER R
- u'S' # 0x53 -> LATIN CAPITAL LETTER S
- u'T' # 0x54 -> LATIN CAPITAL LETTER T
- u'U' # 0x55 -> LATIN CAPITAL LETTER U
- u'V' # 0x56 -> LATIN CAPITAL LETTER V
- u'W' # 0x57 -> LATIN CAPITAL LETTER W
- u'X' # 0x58 -> LATIN CAPITAL LETTER X
- u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
- u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
- u'[' # 0x5B -> LEFT SQUARE BRACKET
- u'\\' # 0x5C -> REVERSE SOLIDUS
- u']' # 0x5D -> RIGHT SQUARE BRACKET
- u'^' # 0x5E -> CIRCUMFLEX ACCENT
- u'_' # 0x5F -> LOW LINE
- u'`' # 0x60 -> GRAVE ACCENT
- u'a' # 0x61 -> LATIN SMALL LETTER A
- u'b' # 0x62 -> LATIN SMALL LETTER B
- u'c' # 0x63 -> LATIN SMALL LETTER C
- u'd' # 0x64 -> LATIN SMALL LETTER D
- u'e' # 0x65 -> LATIN SMALL LETTER E
- u'f' # 0x66 -> LATIN SMALL LETTER F
- u'g' # 0x67 -> LATIN SMALL LETTER G
- u'h' # 0x68 -> LATIN SMALL LETTER H
- u'i' # 0x69 -> LATIN SMALL LETTER I
- u'j' # 0x6A -> LATIN SMALL LETTER J
- u'k' # 0x6B -> LATIN SMALL LETTER K
- u'l' # 0x6C -> LATIN SMALL LETTER L
- u'm' # 0x6D -> LATIN SMALL LETTER M
- u'n' # 0x6E -> LATIN SMALL LETTER N
- u'o' # 0x6F -> LATIN SMALL LETTER O
- u'p' # 0x70 -> LATIN SMALL LETTER P
- u'q' # 0x71 -> LATIN SMALL LETTER Q
- u'r' # 0x72 -> LATIN SMALL LETTER R
- u's' # 0x73 -> LATIN SMALL LETTER S
- u't' # 0x74 -> LATIN SMALL LETTER T
- u'u' # 0x75 -> LATIN SMALL LETTER U
- u'v' # 0x76 -> LATIN SMALL LETTER V
- u'w' # 0x77 -> LATIN SMALL LETTER W
- u'x' # 0x78 -> LATIN SMALL LETTER X
- u'y' # 0x79 -> LATIN SMALL LETTER Y
- u'z' # 0x7A -> LATIN SMALL LETTER Z
- u'{' # 0x7B -> LEFT CURLY BRACKET
- u'|' # 0x7C -> VERTICAL LINE
- u'}' # 0x7D -> RIGHT CURLY BRACKET
- u'~' # 0x7E -> TILDE
- u'\x7f' # 0x7F -> DELETE
- u'\u20ac' # 0x80 -> EURO SIGN
- u'\ufffe' # 0x81 -> UNDEFINED
- u'\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK
- u'\u0192' # 0x83 -> LATIN SMALL LETTER F WITH HOOK
- u'\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK
- u'\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
- u'\u2020' # 0x86 -> DAGGER
- u'\u2021' # 0x87 -> DOUBLE DAGGER
- u'\u02c6' # 0x88 -> MODIFIER LETTER CIRCUMFLEX ACCENT
- u'\u2030' # 0x89 -> PER MILLE SIGN
- u'\u0160' # 0x8A -> LATIN CAPITAL LETTER S WITH CARON
- u'\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
- u'\u0152' # 0x8C -> LATIN CAPITAL LIGATURE OE
- u'\ufffe' # 0x8D -> UNDEFINED
- u'\u017d' # 0x8E -> LATIN CAPITAL LETTER Z WITH CARON
- u'\ufffe' # 0x8F -> UNDEFINED
- u'\ufffe' # 0x90 -> UNDEFINED
- u'\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
- u'\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
- u'\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
- u'\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
- u'\u2022' # 0x95 -> BULLET
- u'\u2013' # 0x96 -> EN DASH
- u'\u2014' # 0x97 -> EM DASH
- u'\u02dc' # 0x98 -> SMALL TILDE
- u'\u2122' # 0x99 -> TRADE MARK SIGN
- u'\u0161' # 0x9A -> LATIN SMALL LETTER S WITH CARON
- u'\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
- u'\u0153' # 0x9C -> LATIN SMALL LIGATURE OE
- u'\ufffe' # 0x9D -> UNDEFINED
- u'\u017e' # 0x9E -> LATIN SMALL LETTER Z WITH CARON
- u'\u0178' # 0x9F -> LATIN CAPITAL LETTER Y WITH DIAERESIS
- u'\xa0' # 0xA0 -> NO-BREAK SPACE
- u'\xa1' # 0xA1 -> INVERTED EXCLAMATION MARK
- u'\xa2' # 0xA2 -> CENT SIGN
- u'\xa3' # 0xA3 -> POUND SIGN
- u'\xa4' # 0xA4 -> CURRENCY SIGN
- u'\xa5' # 0xA5 -> YEN SIGN
- u'\xa6' # 0xA6 -> BROKEN BAR
- u'\xa7' # 0xA7 -> SECTION SIGN
- u'\xa8' # 0xA8 -> DIAERESIS
- u'\xa9' # 0xA9 -> COPYRIGHT SIGN
- u'\xaa' # 0xAA -> FEMININE ORDINAL INDICATOR
- u'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
- u'\xac' # 0xAC -> NOT SIGN
- u'\xad' # 0xAD -> SOFT HYPHEN
- u'\xae' # 0xAE -> REGISTERED SIGN
- u'\xaf' # 0xAF -> MACRON
- u'\xb0' # 0xB0 -> DEGREE SIGN
- u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
- u'\xb2' # 0xB2 -> SUPERSCRIPT TWO
- u'\xb3' # 0xB3 -> SUPERSCRIPT THREE
- u'\xb4' # 0xB4 -> ACUTE ACCENT
- u'\xb5' # 0xB5 -> MICRO SIGN
- u'\xb6' # 0xB6 -> PILCROW SIGN
- u'\xb7' # 0xB7 -> MIDDLE DOT
- u'\xb8' # 0xB8 -> CEDILLA
- u'\xb9' # 0xB9 -> SUPERSCRIPT ONE
- u'\xba' # 0xBA -> MASCULINE ORDINAL INDICATOR
- u'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
- u'\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER
- u'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
- u'\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS
- u'\xbf' # 0xBF -> INVERTED QUESTION MARK
- u'\xc0' # 0xC0 -> LATIN CAPITAL LETTER A WITH GRAVE
- u'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
- u'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
- u'\xc3' # 0xC3 -> LATIN CAPITAL LETTER A WITH TILDE
- u'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
- u'\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE
- u'\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE
- u'\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA
- u'\xc8' # 0xC8 -> LATIN CAPITAL LETTER E WITH GRAVE
- u'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
- u'\xca' # 0xCA -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
- u'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
- u'\xcc' # 0xCC -> LATIN CAPITAL LETTER I WITH GRAVE
- u'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
- u'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
- u'\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS
- u'\xd0' # 0xD0 -> LATIN CAPITAL LETTER ETH
- u'\xd1' # 0xD1 -> LATIN CAPITAL LETTER N WITH TILDE
- u'\xd2' # 0xD2 -> LATIN CAPITAL LETTER O WITH GRAVE
- u'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
- u'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
- u'\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE
- u'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
- u'\xd7' # 0xD7 -> MULTIPLICATION SIGN
- u'\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE
- u'\xd9' # 0xD9 -> LATIN CAPITAL LETTER U WITH GRAVE
- u'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
- u'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
- u'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
- u'\xdd' # 0xDD -> LATIN CAPITAL LETTER Y WITH ACUTE
- u'\xde' # 0xDE -> LATIN CAPITAL LETTER THORN
- u'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S
- u'\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE
- u'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
- u'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
- u'\xe3' # 0xE3 -> LATIN SMALL LETTER A WITH TILDE
- u'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
- u'\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE
- u'\xe6' # 0xE6 -> LATIN SMALL LETTER AE
- u'\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA
- u'\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE
- u'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
- u'\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX
- u'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
- u'\xec' # 0xEC -> LATIN SMALL LETTER I WITH GRAVE
- u'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
- u'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
- u'\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS
- u'\xf0' # 0xF0 -> LATIN SMALL LETTER ETH
- u'\xf1' # 0xF1 -> LATIN SMALL LETTER N WITH TILDE
- u'\xf2' # 0xF2 -> LATIN SMALL LETTER O WITH GRAVE
- u'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
- u'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
- u'\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE
- u'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
- u'\xf7' # 0xF7 -> DIVISION SIGN
- u'\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE
- u'\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE
- u'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
- u'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
- u'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
- u'\xfd' # 0xFD -> LATIN SMALL LETTER Y WITH ACUTE
- u'\xfe' # 0xFE -> LATIN SMALL LETTER THORN
- u'\xff' # 0xFF -> LATIN SMALL LETTER Y WITH DIAERESIS
-)
-
-### Encoding table
-encoding_table=codecs.charmap_build(decoding_table)
diff --git a/sys/lib/python/encodings/cp1253.py b/sys/lib/python/encodings/cp1253.py
deleted file mode 100644
index 49f6cccbd..000000000
--- a/sys/lib/python/encodings/cp1253.py
+++ /dev/null
@@ -1,307 +0,0 @@
-""" Python Character Mapping Codec cp1253 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1253.TXT' with gencodec.py.
-
-"""#"
-
-import codecs
-
-### Codec APIs
-
-class Codec(codecs.Codec):
-
- def encode(self,input,errors='strict'):
- return codecs.charmap_encode(input,errors,encoding_table)
-
- def decode(self,input,errors='strict'):
- return codecs.charmap_decode(input,errors,decoding_table)
-
-class IncrementalEncoder(codecs.IncrementalEncoder):
- def encode(self, input, final=False):
- return codecs.charmap_encode(input,self.errors,encoding_table)[0]
-
-class IncrementalDecoder(codecs.IncrementalDecoder):
- def decode(self, input, final=False):
- return codecs.charmap_decode(input,self.errors,decoding_table)[0]
-
-class StreamWriter(Codec,codecs.StreamWriter):
- pass
-
-class StreamReader(Codec,codecs.StreamReader):
- pass
-
-### encodings module API
-
-def getregentry():
- return codecs.CodecInfo(
- name='cp1253',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
-
-
-### Decoding Table
-
-decoding_table = (
- u'\x00' # 0x00 -> NULL
- u'\x01' # 0x01 -> START OF HEADING
- u'\x02' # 0x02 -> START OF TEXT
- u'\x03' # 0x03 -> END OF TEXT
- u'\x04' # 0x04 -> END OF TRANSMISSION
- u'\x05' # 0x05 -> ENQUIRY
- u'\x06' # 0x06 -> ACKNOWLEDGE
- u'\x07' # 0x07 -> BELL
- u'\x08' # 0x08 -> BACKSPACE
- u'\t' # 0x09 -> HORIZONTAL TABULATION
- u'\n' # 0x0A -> LINE FEED
- u'\x0b' # 0x0B -> VERTICAL TABULATION
- u'\x0c' # 0x0C -> FORM FEED
- u'\r' # 0x0D -> CARRIAGE RETURN
- u'\x0e' # 0x0E -> SHIFT OUT
- u'\x0f' # 0x0F -> SHIFT IN
- u'\x10' # 0x10 -> DATA LINK ESCAPE
- u'\x11' # 0x11 -> DEVICE CONTROL ONE
- u'\x12' # 0x12 -> DEVICE CONTROL TWO
- u'\x13' # 0x13 -> DEVICE CONTROL THREE
- u'\x14' # 0x14 -> DEVICE CONTROL FOUR
- u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
- u'\x16' # 0x16 -> SYNCHRONOUS IDLE
- u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
- u'\x18' # 0x18 -> CANCEL
- u'\x19' # 0x19 -> END OF MEDIUM
- u'\x1a' # 0x1A -> SUBSTITUTE
- u'\x1b' # 0x1B -> ESCAPE
- u'\x1c' # 0x1C -> FILE SEPARATOR
- u'\x1d' # 0x1D -> GROUP SEPARATOR
- u'\x1e' # 0x1E -> RECORD SEPARATOR
- u'\x1f' # 0x1F -> UNIT SEPARATOR
- u' ' # 0x20 -> SPACE
- u'!' # 0x21 -> EXCLAMATION MARK
- u'"' # 0x22 -> QUOTATION MARK
- u'#' # 0x23 -> NUMBER SIGN
- u'$' # 0x24 -> DOLLAR SIGN
- u'%' # 0x25 -> PERCENT SIGN
- u'&' # 0x26 -> AMPERSAND
- u"'" # 0x27 -> APOSTROPHE
- u'(' # 0x28 -> LEFT PARENTHESIS
- u')' # 0x29 -> RIGHT PARENTHESIS
- u'*' # 0x2A -> ASTERISK
- u'+' # 0x2B -> PLUS SIGN
- u',' # 0x2C -> COMMA
- u'-' # 0x2D -> HYPHEN-MINUS
- u'.' # 0x2E -> FULL STOP
- u'/' # 0x2F -> SOLIDUS
- u'0' # 0x30 -> DIGIT ZERO
- u'1' # 0x31 -> DIGIT ONE
- u'2' # 0x32 -> DIGIT TWO
- u'3' # 0x33 -> DIGIT THREE
- u'4' # 0x34 -> DIGIT FOUR
- u'5' # 0x35 -> DIGIT FIVE
- u'6' # 0x36 -> DIGIT SIX
- u'7' # 0x37 -> DIGIT SEVEN
- u'8' # 0x38 -> DIGIT EIGHT
- u'9' # 0x39 -> DIGIT NINE
- u':' # 0x3A -> COLON
- u';' # 0x3B -> SEMICOLON
- u'<' # 0x3C -> LESS-THAN SIGN
- u'=' # 0x3D -> EQUALS SIGN
- u'>' # 0x3E -> GREATER-THAN SIGN
- u'?' # 0x3F -> QUESTION MARK
- u'@' # 0x40 -> COMMERCIAL AT
- u'A' # 0x41 -> LATIN CAPITAL LETTER A
- u'B' # 0x42 -> LATIN CAPITAL LETTER B
- u'C' # 0x43 -> LATIN CAPITAL LETTER C
- u'D' # 0x44 -> LATIN CAPITAL LETTER D
- u'E' # 0x45 -> LATIN CAPITAL LETTER E
- u'F' # 0x46 -> LATIN CAPITAL LETTER F
- u'G' # 0x47 -> LATIN CAPITAL LETTER G
- u'H' # 0x48 -> LATIN CAPITAL LETTER H
- u'I' # 0x49 -> LATIN CAPITAL LETTER I
- u'J' # 0x4A -> LATIN CAPITAL LETTER J
- u'K' # 0x4B -> LATIN CAPITAL LETTER K
- u'L' # 0x4C -> LATIN CAPITAL LETTER L
- u'M' # 0x4D -> LATIN CAPITAL LETTER M
- u'N' # 0x4E -> LATIN CAPITAL LETTER N
- u'O' # 0x4F -> LATIN CAPITAL LETTER O
- u'P' # 0x50 -> LATIN CAPITAL LETTER P
- u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
- u'R' # 0x52 -> LATIN CAPITAL LETTER R
- u'S' # 0x53 -> LATIN CAPITAL LETTER S
- u'T' # 0x54 -> LATIN CAPITAL LETTER T
- u'U' # 0x55 -> LATIN CAPITAL LETTER U
- u'V' # 0x56 -> LATIN CAPITAL LETTER V
- u'W' # 0x57 -> LATIN CAPITAL LETTER W
- u'X' # 0x58 -> LATIN CAPITAL LETTER X
- u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
- u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
- u'[' # 0x5B -> LEFT SQUARE BRACKET
- u'\\' # 0x5C -> REVERSE SOLIDUS
- u']' # 0x5D -> RIGHT SQUARE BRACKET
- u'^' # 0x5E -> CIRCUMFLEX ACCENT
- u'_' # 0x5F -> LOW LINE
- u'`' # 0x60 -> GRAVE ACCENT
- u'a' # 0x61 -> LATIN SMALL LETTER A
- u'b' # 0x62 -> LATIN SMALL LETTER B
- u'c' # 0x63 -> LATIN SMALL LETTER C
- u'd' # 0x64 -> LATIN SMALL LETTER D
- u'e' # 0x65 -> LATIN SMALL LETTER E
- u'f' # 0x66 -> LATIN SMALL LETTER F
- u'g' # 0x67 -> LATIN SMALL LETTER G
- u'h' # 0x68 -> LATIN SMALL LETTER H
- u'i' # 0x69 -> LATIN SMALL LETTER I
- u'j' # 0x6A -> LATIN SMALL LETTER J
- u'k' # 0x6B -> LATIN SMALL LETTER K
- u'l' # 0x6C -> LATIN SMALL LETTER L
- u'm' # 0x6D -> LATIN SMALL LETTER M
- u'n' # 0x6E -> LATIN SMALL LETTER N
- u'o' # 0x6F -> LATIN SMALL LETTER O
- u'p' # 0x70 -> LATIN SMALL LETTER P
- u'q' # 0x71 -> LATIN SMALL LETTER Q
- u'r' # 0x72 -> LATIN SMALL LETTER R
- u's' # 0x73 -> LATIN SMALL LETTER S
- u't' # 0x74 -> LATIN SMALL LETTER T
- u'u' # 0x75 -> LATIN SMALL LETTER U
- u'v' # 0x76 -> LATIN SMALL LETTER V
- u'w' # 0x77 -> LATIN SMALL LETTER W
- u'x' # 0x78 -> LATIN SMALL LETTER X
- u'y' # 0x79 -> LATIN SMALL LETTER Y
- u'z' # 0x7A -> LATIN SMALL LETTER Z
- u'{' # 0x7B -> LEFT CURLY BRACKET
- u'|' # 0x7C -> VERTICAL LINE
- u'}' # 0x7D -> RIGHT CURLY BRACKET
- u'~' # 0x7E -> TILDE
- u'\x7f' # 0x7F -> DELETE
- u'\u20ac' # 0x80 -> EURO SIGN
- u'\ufffe' # 0x81 -> UNDEFINED
- u'\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK
- u'\u0192' # 0x83 -> LATIN SMALL LETTER F WITH HOOK
- u'\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK
- u'\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
- u'\u2020' # 0x86 -> DAGGER
- u'\u2021' # 0x87 -> DOUBLE DAGGER
- u'\ufffe' # 0x88 -> UNDEFINED
- u'\u2030' # 0x89 -> PER MILLE SIGN
- u'\ufffe' # 0x8A -> UNDEFINED
- u'\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
- u'\ufffe' # 0x8C -> UNDEFINED
- u'\ufffe' # 0x8D -> UNDEFINED
- u'\ufffe' # 0x8E -> UNDEFINED
- u'\ufffe' # 0x8F -> UNDEFINED
- u'\ufffe' # 0x90 -> UNDEFINED
- u'\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
- u'\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
- u'\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
- u'\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
- u'\u2022' # 0x95 -> BULLET
- u'\u2013' # 0x96 -> EN DASH
- u'\u2014' # 0x97 -> EM DASH
- u'\ufffe' # 0x98 -> UNDEFINED
- u'\u2122' # 0x99 -> TRADE MARK SIGN
- u'\ufffe' # 0x9A -> UNDEFINED
- u'\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
- u'\ufffe' # 0x9C -> UNDEFINED
- u'\ufffe' # 0x9D -> UNDEFINED
- u'\ufffe' # 0x9E -> UNDEFINED
- u'\ufffe' # 0x9F -> UNDEFINED
- u'\xa0' # 0xA0 -> NO-BREAK SPACE
- u'\u0385' # 0xA1 -> GREEK DIALYTIKA TONOS
- u'\u0386' # 0xA2 -> GREEK CAPITAL LETTER ALPHA WITH TONOS
- u'\xa3' # 0xA3 -> POUND SIGN
- u'\xa4' # 0xA4 -> CURRENCY SIGN
- u'\xa5' # 0xA5 -> YEN SIGN
- u'\xa6' # 0xA6 -> BROKEN BAR
- u'\xa7' # 0xA7 -> SECTION SIGN
- u'\xa8' # 0xA8 -> DIAERESIS
- u'\xa9' # 0xA9 -> COPYRIGHT SIGN
- u'\ufffe' # 0xAA -> UNDEFINED
- u'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
- u'\xac' # 0xAC -> NOT SIGN
- u'\xad' # 0xAD -> SOFT HYPHEN
- u'\xae' # 0xAE -> REGISTERED SIGN
- u'\u2015' # 0xAF -> HORIZONTAL BAR
- u'\xb0' # 0xB0 -> DEGREE SIGN
- u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
- u'\xb2' # 0xB2 -> SUPERSCRIPT TWO
- u'\xb3' # 0xB3 -> SUPERSCRIPT THREE
- u'\u0384' # 0xB4 -> GREEK TONOS
- u'\xb5' # 0xB5 -> MICRO SIGN
- u'\xb6' # 0xB6 -> PILCROW SIGN
- u'\xb7' # 0xB7 -> MIDDLE DOT
- u'\u0388' # 0xB8 -> GREEK CAPITAL LETTER EPSILON WITH TONOS
- u'\u0389' # 0xB9 -> GREEK CAPITAL LETTER ETA WITH TONOS
- u'\u038a' # 0xBA -> GREEK CAPITAL LETTER IOTA WITH TONOS
- u'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
- u'\u038c' # 0xBC -> GREEK CAPITAL LETTER OMICRON WITH TONOS
- u'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
- u'\u038e' # 0xBE -> GREEK CAPITAL LETTER UPSILON WITH TONOS
- u'\u038f' # 0xBF -> GREEK CAPITAL LETTER OMEGA WITH TONOS
- u'\u0390' # 0xC0 -> GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS
- u'\u0391' # 0xC1 -> GREEK CAPITAL LETTER ALPHA
- u'\u0392' # 0xC2 -> GREEK CAPITAL LETTER BETA
- u'\u0393' # 0xC3 -> GREEK CAPITAL LETTER GAMMA
- u'\u0394' # 0xC4 -> GREEK CAPITAL LETTER DELTA
- u'\u0395' # 0xC5 -> GREEK CAPITAL LETTER EPSILON
- u'\u0396' # 0xC6 -> GREEK CAPITAL LETTER ZETA
- u'\u0397' # 0xC7 -> GREEK CAPITAL LETTER ETA
- u'\u0398' # 0xC8 -> GREEK CAPITAL LETTER THETA
- u'\u0399' # 0xC9 -> GREEK CAPITAL LETTER IOTA
- u'\u039a' # 0xCA -> GREEK CAPITAL LETTER KAPPA
- u'\u039b' # 0xCB -> GREEK CAPITAL LETTER LAMDA
- u'\u039c' # 0xCC -> GREEK CAPITAL LETTER MU
- u'\u039d' # 0xCD -> GREEK CAPITAL LETTER NU
- u'\u039e' # 0xCE -> GREEK CAPITAL LETTER XI
- u'\u039f' # 0xCF -> GREEK CAPITAL LETTER OMICRON
- u'\u03a0' # 0xD0 -> GREEK CAPITAL LETTER PI
- u'\u03a1' # 0xD1 -> GREEK CAPITAL LETTER RHO
- u'\ufffe' # 0xD2 -> UNDEFINED
- u'\u03a3' # 0xD3 -> GREEK CAPITAL LETTER SIGMA
- u'\u03a4' # 0xD4 -> GREEK CAPITAL LETTER TAU
- u'\u03a5' # 0xD5 -> GREEK CAPITAL LETTER UPSILON
- u'\u03a6' # 0xD6 -> GREEK CAPITAL LETTER PHI
- u'\u03a7' # 0xD7 -> GREEK CAPITAL LETTER CHI
- u'\u03a8' # 0xD8 -> GREEK CAPITAL LETTER PSI
- u'\u03a9' # 0xD9 -> GREEK CAPITAL LETTER OMEGA
- u'\u03aa' # 0xDA -> GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
- u'\u03ab' # 0xDB -> GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
- u'\u03ac' # 0xDC -> GREEK SMALL LETTER ALPHA WITH TONOS
- u'\u03ad' # 0xDD -> GREEK SMALL LETTER EPSILON WITH TONOS
- u'\u03ae' # 0xDE -> GREEK SMALL LETTER ETA WITH TONOS
- u'\u03af' # 0xDF -> GREEK SMALL LETTER IOTA WITH TONOS
- u'\u03b0' # 0xE0 -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS
- u'\u03b1' # 0xE1 -> GREEK SMALL LETTER ALPHA
- u'\u03b2' # 0xE2 -> GREEK SMALL LETTER BETA
- u'\u03b3' # 0xE3 -> GREEK SMALL LETTER GAMMA
- u'\u03b4' # 0xE4 -> GREEK SMALL LETTER DELTA
- u'\u03b5' # 0xE5 -> GREEK SMALL LETTER EPSILON
- u'\u03b6' # 0xE6 -> GREEK SMALL LETTER ZETA
- u'\u03b7' # 0xE7 -> GREEK SMALL LETTER ETA
- u'\u03b8' # 0xE8 -> GREEK SMALL LETTER THETA
- u'\u03b9' # 0xE9 -> GREEK SMALL LETTER IOTA
- u'\u03ba' # 0xEA -> GREEK SMALL LETTER KAPPA
- u'\u03bb' # 0xEB -> GREEK SMALL LETTER LAMDA
- u'\u03bc' # 0xEC -> GREEK SMALL LETTER MU
- u'\u03bd' # 0xED -> GREEK SMALL LETTER NU
- u'\u03be' # 0xEE -> GREEK SMALL LETTER XI
- u'\u03bf' # 0xEF -> GREEK SMALL LETTER OMICRON
- u'\u03c0' # 0xF0 -> GREEK SMALL LETTER PI
- u'\u03c1' # 0xF1 -> GREEK SMALL LETTER RHO
- u'\u03c2' # 0xF2 -> GREEK SMALL LETTER FINAL SIGMA
- u'\u03c3' # 0xF3 -> GREEK SMALL LETTER SIGMA
- u'\u03c4' # 0xF4 -> GREEK SMALL LETTER TAU
- u'\u03c5' # 0xF5 -> GREEK SMALL LETTER UPSILON
- u'\u03c6' # 0xF6 -> GREEK SMALL LETTER PHI
- u'\u03c7' # 0xF7 -> GREEK SMALL LETTER CHI
- u'\u03c8' # 0xF8 -> GREEK SMALL LETTER PSI
- u'\u03c9' # 0xF9 -> GREEK SMALL LETTER OMEGA
- u'\u03ca' # 0xFA -> GREEK SMALL LETTER IOTA WITH DIALYTIKA
- u'\u03cb' # 0xFB -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA
- u'\u03cc' # 0xFC -> GREEK SMALL LETTER OMICRON WITH TONOS
- u'\u03cd' # 0xFD -> GREEK SMALL LETTER UPSILON WITH TONOS
- u'\u03ce' # 0xFE -> GREEK SMALL LETTER OMEGA WITH TONOS
- u'\ufffe' # 0xFF -> UNDEFINED
-)
-
-### Encoding table
-encoding_table=codecs.charmap_build(decoding_table)
diff --git a/sys/lib/python/encodings/cp1254.py b/sys/lib/python/encodings/cp1254.py
deleted file mode 100644
index 65530ab54..000000000
--- a/sys/lib/python/encodings/cp1254.py
+++ /dev/null
@@ -1,307 +0,0 @@
-""" Python Character Mapping Codec cp1254 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1254.TXT' with gencodec.py.
-
-"""#"
-
-import codecs
-
-### Codec APIs
-
-class Codec(codecs.Codec):
-
- def encode(self,input,errors='strict'):
- return codecs.charmap_encode(input,errors,encoding_table)
-
- def decode(self,input,errors='strict'):
- return codecs.charmap_decode(input,errors,decoding_table)
-
-class IncrementalEncoder(codecs.IncrementalEncoder):
- def encode(self, input, final=False):
- return codecs.charmap_encode(input,self.errors,encoding_table)[0]
-
-class IncrementalDecoder(codecs.IncrementalDecoder):
- def decode(self, input, final=False):
- return codecs.charmap_decode(input,self.errors,decoding_table)[0]
-
-class StreamWriter(Codec,codecs.StreamWriter):
- pass
-
-class StreamReader(Codec,codecs.StreamReader):
- pass
-
-### encodings module API
-
-def getregentry():
- return codecs.CodecInfo(
- name='cp1254',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
-
-
-### Decoding Table
-
-decoding_table = (
- u'\x00' # 0x00 -> NULL
- u'\x01' # 0x01 -> START OF HEADING
- u'\x02' # 0x02 -> START OF TEXT
- u'\x03' # 0x03 -> END OF TEXT
- u'\x04' # 0x04 -> END OF TRANSMISSION
- u'\x05' # 0x05 -> ENQUIRY
- u'\x06' # 0x06 -> ACKNOWLEDGE
- u'\x07' # 0x07 -> BELL
- u'\x08' # 0x08 -> BACKSPACE
- u'\t' # 0x09 -> HORIZONTAL TABULATION
- u'\n' # 0x0A -> LINE FEED
- u'\x0b' # 0x0B -> VERTICAL TABULATION
- u'\x0c' # 0x0C -> FORM FEED
- u'\r' # 0x0D -> CARRIAGE RETURN
- u'\x0e' # 0x0E -> SHIFT OUT
- u'\x0f' # 0x0F -> SHIFT IN
- u'\x10' # 0x10 -> DATA LINK ESCAPE
- u'\x11' # 0x11 -> DEVICE CONTROL ONE
- u'\x12' # 0x12 -> DEVICE CONTROL TWO
- u'\x13' # 0x13 -> DEVICE CONTROL THREE
- u'\x14' # 0x14 -> DEVICE CONTROL FOUR
- u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
- u'\x16' # 0x16 -> SYNCHRONOUS IDLE
- u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
- u'\x18' # 0x18 -> CANCEL
- u'\x19' # 0x19 -> END OF MEDIUM
- u'\x1a' # 0x1A -> SUBSTITUTE
- u'\x1b' # 0x1B -> ESCAPE
- u'\x1c' # 0x1C -> FILE SEPARATOR
- u'\x1d' # 0x1D -> GROUP SEPARATOR
- u'\x1e' # 0x1E -> RECORD SEPARATOR
- u'\x1f' # 0x1F -> UNIT SEPARATOR
- u' ' # 0x20 -> SPACE
- u'!' # 0x21 -> EXCLAMATION MARK
- u'"' # 0x22 -> QUOTATION MARK
- u'#' # 0x23 -> NUMBER SIGN
- u'$' # 0x24 -> DOLLAR SIGN
- u'%' # 0x25 -> PERCENT SIGN
- u'&' # 0x26 -> AMPERSAND
- u"'" # 0x27 -> APOSTROPHE
- u'(' # 0x28 -> LEFT PARENTHESIS
- u')' # 0x29 -> RIGHT PARENTHESIS
- u'*' # 0x2A -> ASTERISK
- u'+' # 0x2B -> PLUS SIGN
- u',' # 0x2C -> COMMA
- u'-' # 0x2D -> HYPHEN-MINUS
- u'.' # 0x2E -> FULL STOP
- u'/' # 0x2F -> SOLIDUS
- u'0' # 0x30 -> DIGIT ZERO
- u'1' # 0x31 -> DIGIT ONE
- u'2' # 0x32 -> DIGIT TWO
- u'3' # 0x33 -> DIGIT THREE
- u'4' # 0x34 -> DIGIT FOUR
- u'5' # 0x35 -> DIGIT FIVE
- u'6' # 0x36 -> DIGIT SIX
- u'7' # 0x37 -> DIGIT SEVEN
- u'8' # 0x38 -> DIGIT EIGHT
- u'9' # 0x39 -> DIGIT NINE
- u':' # 0x3A -> COLON
- u';' # 0x3B -> SEMICOLON
- u'<' # 0x3C -> LESS-THAN SIGN
- u'=' # 0x3D -> EQUALS SIGN
- u'>' # 0x3E -> GREATER-THAN SIGN
- u'?' # 0x3F -> QUESTION MARK
- u'@' # 0x40 -> COMMERCIAL AT
- u'A' # 0x41 -> LATIN CAPITAL LETTER A
- u'B' # 0x42 -> LATIN CAPITAL LETTER B
- u'C' # 0x43 -> LATIN CAPITAL LETTER C
- u'D' # 0x44 -> LATIN CAPITAL LETTER D
- u'E' # 0x45 -> LATIN CAPITAL LETTER E
- u'F' # 0x46 -> LATIN CAPITAL LETTER F
- u'G' # 0x47 -> LATIN CAPITAL LETTER G
- u'H' # 0x48 -> LATIN CAPITAL LETTER H
- u'I' # 0x49 -> LATIN CAPITAL LETTER I
- u'J' # 0x4A -> LATIN CAPITAL LETTER J
- u'K' # 0x4B -> LATIN CAPITAL LETTER K
- u'L' # 0x4C -> LATIN CAPITAL LETTER L
- u'M' # 0x4D -> LATIN CAPITAL LETTER M
- u'N' # 0x4E -> LATIN CAPITAL LETTER N
- u'O' # 0x4F -> LATIN CAPITAL LETTER O
- u'P' # 0x50 -> LATIN CAPITAL LETTER P
- u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
- u'R' # 0x52 -> LATIN CAPITAL LETTER R
- u'S' # 0x53 -> LATIN CAPITAL LETTER S
- u'T' # 0x54 -> LATIN CAPITAL LETTER T
- u'U' # 0x55 -> LATIN CAPITAL LETTER U
- u'V' # 0x56 -> LATIN CAPITAL LETTER V
- u'W' # 0x57 -> LATIN CAPITAL LETTER W
- u'X' # 0x58 -> LATIN CAPITAL LETTER X
- u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
- u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
- u'[' # 0x5B -> LEFT SQUARE BRACKET
- u'\\' # 0x5C -> REVERSE SOLIDUS
- u']' # 0x5D -> RIGHT SQUARE BRACKET
- u'^' # 0x5E -> CIRCUMFLEX ACCENT
- u'_' # 0x5F -> LOW LINE
- u'`' # 0x60 -> GRAVE ACCENT
- u'a' # 0x61 -> LATIN SMALL LETTER A
- u'b' # 0x62 -> LATIN SMALL LETTER B
- u'c' # 0x63 -> LATIN SMALL LETTER C
- u'd' # 0x64 -> LATIN SMALL LETTER D
- u'e' # 0x65 -> LATIN SMALL LETTER E
- u'f' # 0x66 -> LATIN SMALL LETTER F
- u'g' # 0x67 -> LATIN SMALL LETTER G
- u'h' # 0x68 -> LATIN SMALL LETTER H
- u'i' # 0x69 -> LATIN SMALL LETTER I
- u'j' # 0x6A -> LATIN SMALL LETTER J
- u'k' # 0x6B -> LATIN SMALL LETTER K
- u'l' # 0x6C -> LATIN SMALL LETTER L
- u'm' # 0x6D -> LATIN SMALL LETTER M
- u'n' # 0x6E -> LATIN SMALL LETTER N
- u'o' # 0x6F -> LATIN SMALL LETTER O
- u'p' # 0x70 -> LATIN SMALL LETTER P
- u'q' # 0x71 -> LATIN SMALL LETTER Q
- u'r' # 0x72 -> LATIN SMALL LETTER R
- u's' # 0x73 -> LATIN SMALL LETTER S
- u't' # 0x74 -> LATIN SMALL LETTER T
- u'u' # 0x75 -> LATIN SMALL LETTER U
- u'v' # 0x76 -> LATIN SMALL LETTER V
- u'w' # 0x77 -> LATIN SMALL LETTER W
- u'x' # 0x78 -> LATIN SMALL LETTER X
- u'y' # 0x79 -> LATIN SMALL LETTER Y
- u'z' # 0x7A -> LATIN SMALL LETTER Z
- u'{' # 0x7B -> LEFT CURLY BRACKET
- u'|' # 0x7C -> VERTICAL LINE
- u'}' # 0x7D -> RIGHT CURLY BRACKET
- u'~' # 0x7E -> TILDE
- u'\x7f' # 0x7F -> DELETE
- u'\u20ac' # 0x80 -> EURO SIGN
- u'\ufffe' # 0x81 -> UNDEFINED
- u'\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK
- u'\u0192' # 0x83 -> LATIN SMALL LETTER F WITH HOOK
- u'\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK
- u'\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
- u'\u2020' # 0x86 -> DAGGER
- u'\u2021' # 0x87 -> DOUBLE DAGGER
- u'\u02c6' # 0x88 -> MODIFIER LETTER CIRCUMFLEX ACCENT
- u'\u2030' # 0x89 -> PER MILLE SIGN
- u'\u0160' # 0x8A -> LATIN CAPITAL LETTER S WITH CARON
- u'\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
- u'\u0152' # 0x8C -> LATIN CAPITAL LIGATURE OE
- u'\ufffe' # 0x8D -> UNDEFINED
- u'\ufffe' # 0x8E -> UNDEFINED
- u'\ufffe' # 0x8F -> UNDEFINED
- u'\ufffe' # 0x90 -> UNDEFINED
- u'\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
- u'\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
- u'\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
- u'\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
- u'\u2022' # 0x95 -> BULLET
- u'\u2013' # 0x96 -> EN DASH
- u'\u2014' # 0x97 -> EM DASH
- u'\u02dc' # 0x98 -> SMALL TILDE
- u'\u2122' # 0x99 -> TRADE MARK SIGN
- u'\u0161' # 0x9A -> LATIN SMALL LETTER S WITH CARON
- u'\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
- u'\u0153' # 0x9C -> LATIN SMALL LIGATURE OE
- u'\ufffe' # 0x9D -> UNDEFINED
- u'\ufffe' # 0x9E -> UNDEFINED
- u'\u0178' # 0x9F -> LATIN CAPITAL LETTER Y WITH DIAERESIS
- u'\xa0' # 0xA0 -> NO-BREAK SPACE
- u'\xa1' # 0xA1 -> INVERTED EXCLAMATION MARK
- u'\xa2' # 0xA2 -> CENT SIGN
- u'\xa3' # 0xA3 -> POUND SIGN
- u'\xa4' # 0xA4 -> CURRENCY SIGN
- u'\xa5' # 0xA5 -> YEN SIGN
- u'\xa6' # 0xA6 -> BROKEN BAR
- u'\xa7' # 0xA7 -> SECTION SIGN
- u'\xa8' # 0xA8 -> DIAERESIS
- u'\xa9' # 0xA9 -> COPYRIGHT SIGN
- u'\xaa' # 0xAA -> FEMININE ORDINAL INDICATOR
- u'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
- u'\xac' # 0xAC -> NOT SIGN
- u'\xad' # 0xAD -> SOFT HYPHEN
- u'\xae' # 0xAE -> REGISTERED SIGN
- u'\xaf' # 0xAF -> MACRON
- u'\xb0' # 0xB0 -> DEGREE SIGN
- u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
- u'\xb2' # 0xB2 -> SUPERSCRIPT TWO
- u'\xb3' # 0xB3 -> SUPERSCRIPT THREE
- u'\xb4' # 0xB4 -> ACUTE ACCENT
- u'\xb5' # 0xB5 -> MICRO SIGN
- u'\xb6' # 0xB6 -> PILCROW SIGN
- u'\xb7' # 0xB7 -> MIDDLE DOT
- u'\xb8' # 0xB8 -> CEDILLA
- u'\xb9' # 0xB9 -> SUPERSCRIPT ONE
- u'\xba' # 0xBA -> MASCULINE ORDINAL INDICATOR
- u'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
- u'\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER
- u'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
- u'\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS
- u'\xbf' # 0xBF -> INVERTED QUESTION MARK
- u'\xc0' # 0xC0 -> LATIN CAPITAL LETTER A WITH GRAVE
- u'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
- u'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
- u'\xc3' # 0xC3 -> LATIN CAPITAL LETTER A WITH TILDE
- u'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
- u'\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE
- u'\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE
- u'\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA
- u'\xc8' # 0xC8 -> LATIN CAPITAL LETTER E WITH GRAVE
- u'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
- u'\xca' # 0xCA -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
- u'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
- u'\xcc' # 0xCC -> LATIN CAPITAL LETTER I WITH GRAVE
- u'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
- u'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
- u'\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS
- u'\u011e' # 0xD0 -> LATIN CAPITAL LETTER G WITH BREVE
- u'\xd1' # 0xD1 -> LATIN CAPITAL LETTER N WITH TILDE
- u'\xd2' # 0xD2 -> LATIN CAPITAL LETTER O WITH GRAVE
- u'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
- u'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
- u'\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE
- u'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
- u'\xd7' # 0xD7 -> MULTIPLICATION SIGN
- u'\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE
- u'\xd9' # 0xD9 -> LATIN CAPITAL LETTER U WITH GRAVE
- u'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
- u'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
- u'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
- u'\u0130' # 0xDD -> LATIN CAPITAL LETTER I WITH DOT ABOVE
- u'\u015e' # 0xDE -> LATIN CAPITAL LETTER S WITH CEDILLA
- u'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S
- u'\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE
- u'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
- u'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
- u'\xe3' # 0xE3 -> LATIN SMALL LETTER A WITH TILDE
- u'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
- u'\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE
- u'\xe6' # 0xE6 -> LATIN SMALL LETTER AE
- u'\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA
- u'\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE
- u'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
- u'\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX
- u'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
- u'\xec' # 0xEC -> LATIN SMALL LETTER I WITH GRAVE
- u'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
- u'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
- u'\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS
- u'\u011f' # 0xF0 -> LATIN SMALL LETTER G WITH BREVE
- u'\xf1' # 0xF1 -> LATIN SMALL LETTER N WITH TILDE
- u'\xf2' # 0xF2 -> LATIN SMALL LETTER O WITH GRAVE
- u'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
- u'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
- u'\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE
- u'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
- u'\xf7' # 0xF7 -> DIVISION SIGN
- u'\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE
- u'\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE
- u'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
- u'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
- u'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
- u'\u0131' # 0xFD -> LATIN SMALL LETTER DOTLESS I
- u'\u015f' # 0xFE -> LATIN SMALL LETTER S WITH CEDILLA
- u'\xff' # 0xFF -> LATIN SMALL LETTER Y WITH DIAERESIS
-)
-
-### Encoding table
-encoding_table=codecs.charmap_build(decoding_table)
diff --git a/sys/lib/python/encodings/cp1255.py b/sys/lib/python/encodings/cp1255.py
deleted file mode 100644
index fd1456fab..000000000
--- a/sys/lib/python/encodings/cp1255.py
+++ /dev/null
@@ -1,307 +0,0 @@
-""" Python Character Mapping Codec cp1255 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1255.TXT' with gencodec.py.
-
-"""#"
-
-import codecs
-
-### Codec APIs
-
-class Codec(codecs.Codec):
-
- def encode(self,input,errors='strict'):
- return codecs.charmap_encode(input,errors,encoding_table)
-
- def decode(self,input,errors='strict'):
- return codecs.charmap_decode(input,errors,decoding_table)
-
-class IncrementalEncoder(codecs.IncrementalEncoder):
- def encode(self, input, final=False):
- return codecs.charmap_encode(input,self.errors,encoding_table)[0]
-
-class IncrementalDecoder(codecs.IncrementalDecoder):
- def decode(self, input, final=False):
- return codecs.charmap_decode(input,self.errors,decoding_table)[0]
-
-class StreamWriter(Codec,codecs.StreamWriter):
- pass
-
-class StreamReader(Codec,codecs.StreamReader):
- pass
-
-### encodings module API
-
-def getregentry():
- return codecs.CodecInfo(
- name='cp1255',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
-
-
-### Decoding Table
-
-decoding_table = (
- u'\x00' # 0x00 -> NULL
- u'\x01' # 0x01 -> START OF HEADING
- u'\x02' # 0x02 -> START OF TEXT
- u'\x03' # 0x03 -> END OF TEXT
- u'\x04' # 0x04 -> END OF TRANSMISSION
- u'\x05' # 0x05 -> ENQUIRY
- u'\x06' # 0x06 -> ACKNOWLEDGE
- u'\x07' # 0x07 -> BELL
- u'\x08' # 0x08 -> BACKSPACE
- u'\t' # 0x09 -> HORIZONTAL TABULATION
- u'\n' # 0x0A -> LINE FEED
- u'\x0b' # 0x0B -> VERTICAL TABULATION
- u'\x0c' # 0x0C -> FORM FEED
- u'\r' # 0x0D -> CARRIAGE RETURN
- u'\x0e' # 0x0E -> SHIFT OUT
- u'\x0f' # 0x0F -> SHIFT IN
- u'\x10' # 0x10 -> DATA LINK ESCAPE
- u'\x11' # 0x11 -> DEVICE CONTROL ONE
- u'\x12' # 0x12 -> DEVICE CONTROL TWO
- u'\x13' # 0x13 -> DEVICE CONTROL THREE
- u'\x14' # 0x14 -> DEVICE CONTROL FOUR
- u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
- u'\x16' # 0x16 -> SYNCHRONOUS IDLE
- u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
- u'\x18' # 0x18 -> CANCEL
- u'\x19' # 0x19 -> END OF MEDIUM
- u'\x1a' # 0x1A -> SUBSTITUTE
- u'\x1b' # 0x1B -> ESCAPE
- u'\x1c' # 0x1C -> FILE SEPARATOR
- u'\x1d' # 0x1D -> GROUP SEPARATOR
- u'\x1e' # 0x1E -> RECORD SEPARATOR
- u'\x1f' # 0x1F -> UNIT SEPARATOR
- u' ' # 0x20 -> SPACE
- u'!' # 0x21 -> EXCLAMATION MARK
- u'"' # 0x22 -> QUOTATION MARK
- u'#' # 0x23 -> NUMBER SIGN
- u'$' # 0x24 -> DOLLAR SIGN
- u'%' # 0x25 -> PERCENT SIGN
- u'&' # 0x26 -> AMPERSAND
- u"'" # 0x27 -> APOSTROPHE
- u'(' # 0x28 -> LEFT PARENTHESIS
- u')' # 0x29 -> RIGHT PARENTHESIS
- u'*' # 0x2A -> ASTERISK
- u'+' # 0x2B -> PLUS SIGN
- u',' # 0x2C -> COMMA
- u'-' # 0x2D -> HYPHEN-MINUS
- u'.' # 0x2E -> FULL STOP
- u'/' # 0x2F -> SOLIDUS
- u'0' # 0x30 -> DIGIT ZERO
- u'1' # 0x31 -> DIGIT ONE
- u'2' # 0x32 -> DIGIT TWO
- u'3' # 0x33 -> DIGIT THREE
- u'4' # 0x34 -> DIGIT FOUR
- u'5' # 0x35 -> DIGIT FIVE
- u'6' # 0x36 -> DIGIT SIX
- u'7' # 0x37 -> DIGIT SEVEN
- u'8' # 0x38 -> DIGIT EIGHT
- u'9' # 0x39 -> DIGIT NINE
- u':' # 0x3A -> COLON
- u';' # 0x3B -> SEMICOLON
- u'<' # 0x3C -> LESS-THAN SIGN
- u'=' # 0x3D -> EQUALS SIGN
- u'>' # 0x3E -> GREATER-THAN SIGN
- u'?' # 0x3F -> QUESTION MARK
- u'@' # 0x40 -> COMMERCIAL AT
- u'A' # 0x41 -> LATIN CAPITAL LETTER A
- u'B' # 0x42 -> LATIN CAPITAL LETTER B
- u'C' # 0x43 -> LATIN CAPITAL LETTER C
- u'D' # 0x44 -> LATIN CAPITAL LETTER D
- u'E' # 0x45 -> LATIN CAPITAL LETTER E
- u'F' # 0x46 -> LATIN CAPITAL LETTER F
- u'G' # 0x47 -> LATIN CAPITAL LETTER G
- u'H' # 0x48 -> LATIN CAPITAL LETTER H
- u'I' # 0x49 -> LATIN CAPITAL LETTER I
- u'J' # 0x4A -> LATIN CAPITAL LETTER J
- u'K' # 0x4B -> LATIN CAPITAL LETTER K
- u'L' # 0x4C -> LATIN CAPITAL LETTER L
- u'M' # 0x4D -> LATIN CAPITAL LETTER M
- u'N' # 0x4E -> LATIN CAPITAL LETTER N
- u'O' # 0x4F -> LATIN CAPITAL LETTER O
- u'P' # 0x50 -> LATIN CAPITAL LETTER P
- u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
- u'R' # 0x52 -> LATIN CAPITAL LETTER R
- u'S' # 0x53 -> LATIN CAPITAL LETTER S
- u'T' # 0x54 -> LATIN CAPITAL LETTER T
- u'U' # 0x55 -> LATIN CAPITAL LETTER U
- u'V' # 0x56 -> LATIN CAPITAL LETTER V
- u'W' # 0x57 -> LATIN CAPITAL LETTER W
- u'X' # 0x58 -> LATIN CAPITAL LETTER X
- u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
- u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
- u'[' # 0x5B -> LEFT SQUARE BRACKET
- u'\\' # 0x5C -> REVERSE SOLIDUS
- u']' # 0x5D -> RIGHT SQUARE BRACKET
- u'^' # 0x5E -> CIRCUMFLEX ACCENT
- u'_' # 0x5F -> LOW LINE
- u'`' # 0x60 -> GRAVE ACCENT
- u'a' # 0x61 -> LATIN SMALL LETTER A
- u'b' # 0x62 -> LATIN SMALL LETTER B
- u'c' # 0x63 -> LATIN SMALL LETTER C
- u'd' # 0x64 -> LATIN SMALL LETTER D
- u'e' # 0x65 -> LATIN SMALL LETTER E
- u'f' # 0x66 -> LATIN SMALL LETTER F
- u'g' # 0x67 -> LATIN SMALL LETTER G
- u'h' # 0x68 -> LATIN SMALL LETTER H
- u'i' # 0x69 -> LATIN SMALL LETTER I
- u'j' # 0x6A -> LATIN SMALL LETTER J
- u'k' # 0x6B -> LATIN SMALL LETTER K
- u'l' # 0x6C -> LATIN SMALL LETTER L
- u'm' # 0x6D -> LATIN SMALL LETTER M
- u'n' # 0x6E -> LATIN SMALL LETTER N
- u'o' # 0x6F -> LATIN SMALL LETTER O
- u'p' # 0x70 -> LATIN SMALL LETTER P
- u'q' # 0x71 -> LATIN SMALL LETTER Q
- u'r' # 0x72 -> LATIN SMALL LETTER R
- u's' # 0x73 -> LATIN SMALL LETTER S
- u't' # 0x74 -> LATIN SMALL LETTER T
- u'u' # 0x75 -> LATIN SMALL LETTER U
- u'v' # 0x76 -> LATIN SMALL LETTER V
- u'w' # 0x77 -> LATIN SMALL LETTER W
- u'x' # 0x78 -> LATIN SMALL LETTER X
- u'y' # 0x79 -> LATIN SMALL LETTER Y
- u'z' # 0x7A -> LATIN SMALL LETTER Z
- u'{' # 0x7B -> LEFT CURLY BRACKET
- u'|' # 0x7C -> VERTICAL LINE
- u'}' # 0x7D -> RIGHT CURLY BRACKET
- u'~' # 0x7E -> TILDE
- u'\x7f' # 0x7F -> DELETE
- u'\u20ac' # 0x80 -> EURO SIGN
- u'\ufffe' # 0x81 -> UNDEFINED
- u'\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK
- u'\u0192' # 0x83 -> LATIN SMALL LETTER F WITH HOOK
- u'\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK
- u'\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
- u'\u2020' # 0x86 -> DAGGER
- u'\u2021' # 0x87 -> DOUBLE DAGGER
- u'\u02c6' # 0x88 -> MODIFIER LETTER CIRCUMFLEX ACCENT
- u'\u2030' # 0x89 -> PER MILLE SIGN
- u'\ufffe' # 0x8A -> UNDEFINED
- u'\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
- u'\ufffe' # 0x8C -> UNDEFINED
- u'\ufffe' # 0x8D -> UNDEFINED
- u'\ufffe' # 0x8E -> UNDEFINED
- u'\ufffe' # 0x8F -> UNDEFINED
- u'\ufffe' # 0x90 -> UNDEFINED
- u'\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
- u'\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
- u'\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
- u'\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
- u'\u2022' # 0x95 -> BULLET
- u'\u2013' # 0x96 -> EN DASH
- u'\u2014' # 0x97 -> EM DASH
- u'\u02dc' # 0x98 -> SMALL TILDE
- u'\u2122' # 0x99 -> TRADE MARK SIGN
- u'\ufffe' # 0x9A -> UNDEFINED
- u'\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
- u'\ufffe' # 0x9C -> UNDEFINED
- u'\ufffe' # 0x9D -> UNDEFINED
- u'\ufffe' # 0x9E -> UNDEFINED
- u'\ufffe' # 0x9F -> UNDEFINED
- u'\xa0' # 0xA0 -> NO-BREAK SPACE
- u'\xa1' # 0xA1 -> INVERTED EXCLAMATION MARK
- u'\xa2' # 0xA2 -> CENT SIGN
- u'\xa3' # 0xA3 -> POUND SIGN
- u'\u20aa' # 0xA4 -> NEW SHEQEL SIGN
- u'\xa5' # 0xA5 -> YEN SIGN
- u'\xa6' # 0xA6 -> BROKEN BAR
- u'\xa7' # 0xA7 -> SECTION SIGN
- u'\xa8' # 0xA8 -> DIAERESIS
- u'\xa9' # 0xA9 -> COPYRIGHT SIGN
- u'\xd7' # 0xAA -> MULTIPLICATION SIGN
- u'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
- u'\xac' # 0xAC -> NOT SIGN
- u'\xad' # 0xAD -> SOFT HYPHEN
- u'\xae' # 0xAE -> REGISTERED SIGN
- u'\xaf' # 0xAF -> MACRON
- u'\xb0' # 0xB0 -> DEGREE SIGN
- u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
- u'\xb2' # 0xB2 -> SUPERSCRIPT TWO
- u'\xb3' # 0xB3 -> SUPERSCRIPT THREE
- u'\xb4' # 0xB4 -> ACUTE ACCENT
- u'\xb5' # 0xB5 -> MICRO SIGN
- u'\xb6' # 0xB6 -> PILCROW SIGN
- u'\xb7' # 0xB7 -> MIDDLE DOT
- u'\xb8' # 0xB8 -> CEDILLA
- u'\xb9' # 0xB9 -> SUPERSCRIPT ONE
- u'\xf7' # 0xBA -> DIVISION SIGN
- u'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
- u'\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER
- u'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
- u'\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS
- u'\xbf' # 0xBF -> INVERTED QUESTION MARK
- u'\u05b0' # 0xC0 -> HEBREW POINT SHEVA
- u'\u05b1' # 0xC1 -> HEBREW POINT HATAF SEGOL
- u'\u05b2' # 0xC2 -> HEBREW POINT HATAF PATAH
- u'\u05b3' # 0xC3 -> HEBREW POINT HATAF QAMATS
- u'\u05b4' # 0xC4 -> HEBREW POINT HIRIQ
- u'\u05b5' # 0xC5 -> HEBREW POINT TSERE
- u'\u05b6' # 0xC6 -> HEBREW POINT SEGOL
- u'\u05b7' # 0xC7 -> HEBREW POINT PATAH
- u'\u05b8' # 0xC8 -> HEBREW POINT QAMATS
- u'\u05b9' # 0xC9 -> HEBREW POINT HOLAM
- u'\ufffe' # 0xCA -> UNDEFINED
- u'\u05bb' # 0xCB -> HEBREW POINT QUBUTS
- u'\u05bc' # 0xCC -> HEBREW POINT DAGESH OR MAPIQ
- u'\u05bd' # 0xCD -> HEBREW POINT METEG
- u'\u05be' # 0xCE -> HEBREW PUNCTUATION MAQAF
- u'\u05bf' # 0xCF -> HEBREW POINT RAFE
- u'\u05c0' # 0xD0 -> HEBREW PUNCTUATION PASEQ
- u'\u05c1' # 0xD1 -> HEBREW POINT SHIN DOT
- u'\u05c2' # 0xD2 -> HEBREW POINT SIN DOT
- u'\u05c3' # 0xD3 -> HEBREW PUNCTUATION SOF PASUQ
- u'\u05f0' # 0xD4 -> HEBREW LIGATURE YIDDISH DOUBLE VAV
- u'\u05f1' # 0xD5 -> HEBREW LIGATURE YIDDISH VAV YOD
- u'\u05f2' # 0xD6 -> HEBREW LIGATURE YIDDISH DOUBLE YOD
- u'\u05f3' # 0xD7 -> HEBREW PUNCTUATION GERESH
- u'\u05f4' # 0xD8 -> HEBREW PUNCTUATION GERSHAYIM
- u'\ufffe' # 0xD9 -> UNDEFINED
- u'\ufffe' # 0xDA -> UNDEFINED
- u'\ufffe' # 0xDB -> UNDEFINED
- u'\ufffe' # 0xDC -> UNDEFINED
- u'\ufffe' # 0xDD -> UNDEFINED
- u'\ufffe' # 0xDE -> UNDEFINED
- u'\ufffe' # 0xDF -> UNDEFINED
- u'\u05d0' # 0xE0 -> HEBREW LETTER ALEF
- u'\u05d1' # 0xE1 -> HEBREW LETTER BET
- u'\u05d2' # 0xE2 -> HEBREW LETTER GIMEL
- u'\u05d3' # 0xE3 -> HEBREW LETTER DALET
- u'\u05d4' # 0xE4 -> HEBREW LETTER HE
- u'\u05d5' # 0xE5 -> HEBREW LETTER VAV
- u'\u05d6' # 0xE6 -> HEBREW LETTER ZAYIN
- u'\u05d7' # 0xE7 -> HEBREW LETTER HET
- u'\u05d8' # 0xE8 -> HEBREW LETTER TET
- u'\u05d9' # 0xE9 -> HEBREW LETTER YOD
- u'\u05da' # 0xEA -> HEBREW LETTER FINAL KAF
- u'\u05db' # 0xEB -> HEBREW LETTER KAF
- u'\u05dc' # 0xEC -> HEBREW LETTER LAMED
- u'\u05dd' # 0xED -> HEBREW LETTER FINAL MEM
- u'\u05de' # 0xEE -> HEBREW LETTER MEM
- u'\u05df' # 0xEF -> HEBREW LETTER FINAL NUN
- u'\u05e0' # 0xF0 -> HEBREW LETTER NUN
- u'\u05e1' # 0xF1 -> HEBREW LETTER SAMEKH
- u'\u05e2' # 0xF2 -> HEBREW LETTER AYIN
- u'\u05e3' # 0xF3 -> HEBREW LETTER FINAL PE
- u'\u05e4' # 0xF4 -> HEBREW LETTER PE
- u'\u05e5' # 0xF5 -> HEBREW LETTER FINAL TSADI
- u'\u05e6' # 0xF6 -> HEBREW LETTER TSADI
- u'\u05e7' # 0xF7 -> HEBREW LETTER QOF
- u'\u05e8' # 0xF8 -> HEBREW LETTER RESH
- u'\u05e9' # 0xF9 -> HEBREW LETTER SHIN
- u'\u05ea' # 0xFA -> HEBREW LETTER TAV
- u'\ufffe' # 0xFB -> UNDEFINED
- u'\ufffe' # 0xFC -> UNDEFINED
- u'\u200e' # 0xFD -> LEFT-TO-RIGHT MARK
- u'\u200f' # 0xFE -> RIGHT-TO-LEFT MARK
- u'\ufffe' # 0xFF -> UNDEFINED
-)
-
-### Encoding table
-encoding_table=codecs.charmap_build(decoding_table)
diff --git a/sys/lib/python/encodings/cp1256.py b/sys/lib/python/encodings/cp1256.py
deleted file mode 100644
index 302b5fa06..000000000
--- a/sys/lib/python/encodings/cp1256.py
+++ /dev/null
@@ -1,307 +0,0 @@
-""" Python Character Mapping Codec cp1256 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1256.TXT' with gencodec.py.
-
-"""#"
-
-import codecs
-
-### Codec APIs
-
-class Codec(codecs.Codec):
-
- def encode(self,input,errors='strict'):
- return codecs.charmap_encode(input,errors,encoding_table)
-
- def decode(self,input,errors='strict'):
- return codecs.charmap_decode(input,errors,decoding_table)
-
-class IncrementalEncoder(codecs.IncrementalEncoder):
- def encode(self, input, final=False):
- return codecs.charmap_encode(input,self.errors,encoding_table)[0]
-
-class IncrementalDecoder(codecs.IncrementalDecoder):
- def decode(self, input, final=False):
- return codecs.charmap_decode(input,self.errors,decoding_table)[0]
-
-class StreamWriter(Codec,codecs.StreamWriter):
- pass
-
-class StreamReader(Codec,codecs.StreamReader):
- pass
-
-### encodings module API
-
-def getregentry():
- return codecs.CodecInfo(
- name='cp1256',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
-
-
-### Decoding Table
-
-decoding_table = (
- u'\x00' # 0x00 -> NULL
- u'\x01' # 0x01 -> START OF HEADING
- u'\x02' # 0x02 -> START OF TEXT
- u'\x03' # 0x03 -> END OF TEXT
- u'\x04' # 0x04 -> END OF TRANSMISSION
- u'\x05' # 0x05 -> ENQUIRY
- u'\x06' # 0x06 -> ACKNOWLEDGE
- u'\x07' # 0x07 -> BELL
- u'\x08' # 0x08 -> BACKSPACE
- u'\t' # 0x09 -> HORIZONTAL TABULATION
- u'\n' # 0x0A -> LINE FEED
- u'\x0b' # 0x0B -> VERTICAL TABULATION
- u'\x0c' # 0x0C -> FORM FEED
- u'\r' # 0x0D -> CARRIAGE RETURN
- u'\x0e' # 0x0E -> SHIFT OUT
- u'\x0f' # 0x0F -> SHIFT IN
- u'\x10' # 0x10 -> DATA LINK ESCAPE
- u'\x11' # 0x11 -> DEVICE CONTROL ONE
- u'\x12' # 0x12 -> DEVICE CONTROL TWO
- u'\x13' # 0x13 -> DEVICE CONTROL THREE
- u'\x14' # 0x14 -> DEVICE CONTROL FOUR
- u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
- u'\x16' # 0x16 -> SYNCHRONOUS IDLE
- u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
- u'\x18' # 0x18 -> CANCEL
- u'\x19' # 0x19 -> END OF MEDIUM
- u'\x1a' # 0x1A -> SUBSTITUTE
- u'\x1b' # 0x1B -> ESCAPE
- u'\x1c' # 0x1C -> FILE SEPARATOR
- u'\x1d' # 0x1D -> GROUP SEPARATOR
- u'\x1e' # 0x1E -> RECORD SEPARATOR
- u'\x1f' # 0x1F -> UNIT SEPARATOR
- u' ' # 0x20 -> SPACE
- u'!' # 0x21 -> EXCLAMATION MARK
- u'"' # 0x22 -> QUOTATION MARK
- u'#' # 0x23 -> NUMBER SIGN
- u'$' # 0x24 -> DOLLAR SIGN
- u'%' # 0x25 -> PERCENT SIGN
- u'&' # 0x26 -> AMPERSAND
- u"'" # 0x27 -> APOSTROPHE
- u'(' # 0x28 -> LEFT PARENTHESIS
- u')' # 0x29 -> RIGHT PARENTHESIS
- u'*' # 0x2A -> ASTERISK
- u'+' # 0x2B -> PLUS SIGN
- u',' # 0x2C -> COMMA
- u'-' # 0x2D -> HYPHEN-MINUS
- u'.' # 0x2E -> FULL STOP
- u'/' # 0x2F -> SOLIDUS
- u'0' # 0x30 -> DIGIT ZERO
- u'1' # 0x31 -> DIGIT ONE
- u'2' # 0x32 -> DIGIT TWO
- u'3' # 0x33 -> DIGIT THREE
- u'4' # 0x34 -> DIGIT FOUR
- u'5' # 0x35 -> DIGIT FIVE
- u'6' # 0x36 -> DIGIT SIX
- u'7' # 0x37 -> DIGIT SEVEN
- u'8' # 0x38 -> DIGIT EIGHT
- u'9' # 0x39 -> DIGIT NINE
- u':' # 0x3A -> COLON
- u';' # 0x3B -> SEMICOLON
- u'<' # 0x3C -> LESS-THAN SIGN
- u'=' # 0x3D -> EQUALS SIGN
- u'>' # 0x3E -> GREATER-THAN SIGN
- u'?' # 0x3F -> QUESTION MARK
- u'@' # 0x40 -> COMMERCIAL AT
- u'A' # 0x41 -> LATIN CAPITAL LETTER A
- u'B' # 0x42 -> LATIN CAPITAL LETTER B
- u'C' # 0x43 -> LATIN CAPITAL LETTER C
- u'D' # 0x44 -> LATIN CAPITAL LETTER D
- u'E' # 0x45 -> LATIN CAPITAL LETTER E
- u'F' # 0x46 -> LATIN CAPITAL LETTER F
- u'G' # 0x47 -> LATIN CAPITAL LETTER G
- u'H' # 0x48 -> LATIN CAPITAL LETTER H
- u'I' # 0x49 -> LATIN CAPITAL LETTER I
- u'J' # 0x4A -> LATIN CAPITAL LETTER J
- u'K' # 0x4B -> LATIN CAPITAL LETTER K
- u'L' # 0x4C -> LATIN CAPITAL LETTER L
- u'M' # 0x4D -> LATIN CAPITAL LETTER M
- u'N' # 0x4E -> LATIN CAPITAL LETTER N
- u'O' # 0x4F -> LATIN CAPITAL LETTER O
- u'P' # 0x50 -> LATIN CAPITAL LETTER P
- u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
- u'R' # 0x52 -> LATIN CAPITAL LETTER R
- u'S' # 0x53 -> LATIN CAPITAL LETTER S
- u'T' # 0x54 -> LATIN CAPITAL LETTER T
- u'U' # 0x55 -> LATIN CAPITAL LETTER U
- u'V' # 0x56 -> LATIN CAPITAL LETTER V
- u'W' # 0x57 -> LATIN CAPITAL LETTER W
- u'X' # 0x58 -> LATIN CAPITAL LETTER X
- u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
- u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
- u'[' # 0x5B -> LEFT SQUARE BRACKET
- u'\\' # 0x5C -> REVERSE SOLIDUS
- u']' # 0x5D -> RIGHT SQUARE BRACKET
- u'^' # 0x5E -> CIRCUMFLEX ACCENT
- u'_' # 0x5F -> LOW LINE
- u'`' # 0x60 -> GRAVE ACCENT
- u'a' # 0x61 -> LATIN SMALL LETTER A
- u'b' # 0x62 -> LATIN SMALL LETTER B
- u'c' # 0x63 -> LATIN SMALL LETTER C
- u'd' # 0x64 -> LATIN SMALL LETTER D
- u'e' # 0x65 -> LATIN SMALL LETTER E
- u'f' # 0x66 -> LATIN SMALL LETTER F
- u'g' # 0x67 -> LATIN SMALL LETTER G
- u'h' # 0x68 -> LATIN SMALL LETTER H
- u'i' # 0x69 -> LATIN SMALL LETTER I
- u'j' # 0x6A -> LATIN SMALL LETTER J
- u'k' # 0x6B -> LATIN SMALL LETTER K
- u'l' # 0x6C -> LATIN SMALL LETTER L
- u'm' # 0x6D -> LATIN SMALL LETTER M
- u'n' # 0x6E -> LATIN SMALL LETTER N
- u'o' # 0x6F -> LATIN SMALL LETTER O
- u'p' # 0x70 -> LATIN SMALL LETTER P
- u'q' # 0x71 -> LATIN SMALL LETTER Q
- u'r' # 0x72 -> LATIN SMALL LETTER R
- u's' # 0x73 -> LATIN SMALL LETTER S
- u't' # 0x74 -> LATIN SMALL LETTER T
- u'u' # 0x75 -> LATIN SMALL LETTER U
- u'v' # 0x76 -> LATIN SMALL LETTER V
- u'w' # 0x77 -> LATIN SMALL LETTER W
- u'x' # 0x78 -> LATIN SMALL LETTER X
- u'y' # 0x79 -> LATIN SMALL LETTER Y
- u'z' # 0x7A -> LATIN SMALL LETTER Z
- u'{' # 0x7B -> LEFT CURLY BRACKET
- u'|' # 0x7C -> VERTICAL LINE
- u'}' # 0x7D -> RIGHT CURLY BRACKET
- u'~' # 0x7E -> TILDE
- u'\x7f' # 0x7F -> DELETE
- u'\u20ac' # 0x80 -> EURO SIGN
- u'\u067e' # 0x81 -> ARABIC LETTER PEH
- u'\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK
- u'\u0192' # 0x83 -> LATIN SMALL LETTER F WITH HOOK
- u'\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK
- u'\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
- u'\u2020' # 0x86 -> DAGGER
- u'\u2021' # 0x87 -> DOUBLE DAGGER
- u'\u02c6' # 0x88 -> MODIFIER LETTER CIRCUMFLEX ACCENT
- u'\u2030' # 0x89 -> PER MILLE SIGN
- u'\u0679' # 0x8A -> ARABIC LETTER TTEH
- u'\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
- u'\u0152' # 0x8C -> LATIN CAPITAL LIGATURE OE
- u'\u0686' # 0x8D -> ARABIC LETTER TCHEH
- u'\u0698' # 0x8E -> ARABIC LETTER JEH
- u'\u0688' # 0x8F -> ARABIC LETTER DDAL
- u'\u06af' # 0x90 -> ARABIC LETTER GAF
- u'\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
- u'\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
- u'\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
- u'\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
- u'\u2022' # 0x95 -> BULLET
- u'\u2013' # 0x96 -> EN DASH
- u'\u2014' # 0x97 -> EM DASH
- u'\u06a9' # 0x98 -> ARABIC LETTER KEHEH
- u'\u2122' # 0x99 -> TRADE MARK SIGN
- u'\u0691' # 0x9A -> ARABIC LETTER RREH
- u'\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
- u'\u0153' # 0x9C -> LATIN SMALL LIGATURE OE
- u'\u200c' # 0x9D -> ZERO WIDTH NON-JOINER
- u'\u200d' # 0x9E -> ZERO WIDTH JOINER
- u'\u06ba' # 0x9F -> ARABIC LETTER NOON GHUNNA
- u'\xa0' # 0xA0 -> NO-BREAK SPACE
- u'\u060c' # 0xA1 -> ARABIC COMMA
- u'\xa2' # 0xA2 -> CENT SIGN
- u'\xa3' # 0xA3 -> POUND SIGN
- u'\xa4' # 0xA4 -> CURRENCY SIGN
- u'\xa5' # 0xA5 -> YEN SIGN
- u'\xa6' # 0xA6 -> BROKEN BAR
- u'\xa7' # 0xA7 -> SECTION SIGN
- u'\xa8' # 0xA8 -> DIAERESIS
- u'\xa9' # 0xA9 -> COPYRIGHT SIGN
- u'\u06be' # 0xAA -> ARABIC LETTER HEH DOACHASHMEE
- u'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
- u'\xac' # 0xAC -> NOT SIGN
- u'\xad' # 0xAD -> SOFT HYPHEN
- u'\xae' # 0xAE -> REGISTERED SIGN
- u'\xaf' # 0xAF -> MACRON
- u'\xb0' # 0xB0 -> DEGREE SIGN
- u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
- u'\xb2' # 0xB2 -> SUPERSCRIPT TWO
- u'\xb3' # 0xB3 -> SUPERSCRIPT THREE
- u'\xb4' # 0xB4 -> ACUTE ACCENT
- u'\xb5' # 0xB5 -> MICRO SIGN
- u'\xb6' # 0xB6 -> PILCROW SIGN
- u'\xb7' # 0xB7 -> MIDDLE DOT
- u'\xb8' # 0xB8 -> CEDILLA
- u'\xb9' # 0xB9 -> SUPERSCRIPT ONE
- u'\u061b' # 0xBA -> ARABIC SEMICOLON
- u'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
- u'\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER
- u'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
- u'\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS
- u'\u061f' # 0xBF -> ARABIC QUESTION MARK
- u'\u06c1' # 0xC0 -> ARABIC LETTER HEH GOAL
- u'\u0621' # 0xC1 -> ARABIC LETTER HAMZA
- u'\u0622' # 0xC2 -> ARABIC LETTER ALEF WITH MADDA ABOVE
- u'\u0623' # 0xC3 -> ARABIC LETTER ALEF WITH HAMZA ABOVE
- u'\u0624' # 0xC4 -> ARABIC LETTER WAW WITH HAMZA ABOVE
- u'\u0625' # 0xC5 -> ARABIC LETTER ALEF WITH HAMZA BELOW
- u'\u0626' # 0xC6 -> ARABIC LETTER YEH WITH HAMZA ABOVE
- u'\u0627' # 0xC7 -> ARABIC LETTER ALEF
- u'\u0628' # 0xC8 -> ARABIC LETTER BEH
- u'\u0629' # 0xC9 -> ARABIC LETTER TEH MARBUTA
- u'\u062a' # 0xCA -> ARABIC LETTER TEH
- u'\u062b' # 0xCB -> ARABIC LETTER THEH
- u'\u062c' # 0xCC -> ARABIC LETTER JEEM
- u'\u062d' # 0xCD -> ARABIC LETTER HAH
- u'\u062e' # 0xCE -> ARABIC LETTER KHAH
- u'\u062f' # 0xCF -> ARABIC LETTER DAL
- u'\u0630' # 0xD0 -> ARABIC LETTER THAL
- u'\u0631' # 0xD1 -> ARABIC LETTER REH
- u'\u0632' # 0xD2 -> ARABIC LETTER ZAIN
- u'\u0633' # 0xD3 -> ARABIC LETTER SEEN
- u'\u0634' # 0xD4 -> ARABIC LETTER SHEEN
- u'\u0635' # 0xD5 -> ARABIC LETTER SAD
- u'\u0636' # 0xD6 -> ARABIC LETTER DAD
- u'\xd7' # 0xD7 -> MULTIPLICATION SIGN
- u'\u0637' # 0xD8 -> ARABIC LETTER TAH
- u'\u0638' # 0xD9 -> ARABIC LETTER ZAH
- u'\u0639' # 0xDA -> ARABIC LETTER AIN
- u'\u063a' # 0xDB -> ARABIC LETTER GHAIN
- u'\u0640' # 0xDC -> ARABIC TATWEEL
- u'\u0641' # 0xDD -> ARABIC LETTER FEH
- u'\u0642' # 0xDE -> ARABIC LETTER QAF
- u'\u0643' # 0xDF -> ARABIC LETTER KAF
- u'\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE
- u'\u0644' # 0xE1 -> ARABIC LETTER LAM
- u'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
- u'\u0645' # 0xE3 -> ARABIC LETTER MEEM
- u'\u0646' # 0xE4 -> ARABIC LETTER NOON
- u'\u0647' # 0xE5 -> ARABIC LETTER HEH
- u'\u0648' # 0xE6 -> ARABIC LETTER WAW
- u'\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA
- u'\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE
- u'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
- u'\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX
- u'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
- u'\u0649' # 0xEC -> ARABIC LETTER ALEF MAKSURA
- u'\u064a' # 0xED -> ARABIC LETTER YEH
- u'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
- u'\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS
- u'\u064b' # 0xF0 -> ARABIC FATHATAN
- u'\u064c' # 0xF1 -> ARABIC DAMMATAN
- u'\u064d' # 0xF2 -> ARABIC KASRATAN
- u'\u064e' # 0xF3 -> ARABIC FATHA
- u'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
- u'\u064f' # 0xF5 -> ARABIC DAMMA
- u'\u0650' # 0xF6 -> ARABIC KASRA
- u'\xf7' # 0xF7 -> DIVISION SIGN
- u'\u0651' # 0xF8 -> ARABIC SHADDA
- u'\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE
- u'\u0652' # 0xFA -> ARABIC SUKUN
- u'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
- u'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
- u'\u200e' # 0xFD -> LEFT-TO-RIGHT MARK
- u'\u200f' # 0xFE -> RIGHT-TO-LEFT MARK
- u'\u06d2' # 0xFF -> ARABIC LETTER YEH BARREE
-)
-
-### Encoding table
-encoding_table=codecs.charmap_build(decoding_table)
diff --git a/sys/lib/python/encodings/cp1257.py b/sys/lib/python/encodings/cp1257.py
deleted file mode 100644
index 53a6b29d5..000000000
--- a/sys/lib/python/encodings/cp1257.py
+++ /dev/null
@@ -1,307 +0,0 @@
-""" Python Character Mapping Codec cp1257 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1257.TXT' with gencodec.py.
-
-"""#"
-
-import codecs
-
-### Codec APIs
-
-class Codec(codecs.Codec):
-
- def encode(self,input,errors='strict'):
- return codecs.charmap_encode(input,errors,encoding_table)
-
- def decode(self,input,errors='strict'):
- return codecs.charmap_decode(input,errors,decoding_table)
-
-class IncrementalEncoder(codecs.IncrementalEncoder):
- def encode(self, input, final=False):
- return codecs.charmap_encode(input,self.errors,encoding_table)[0]
-
-class IncrementalDecoder(codecs.IncrementalDecoder):
- def decode(self, input, final=False):
- return codecs.charmap_decode(input,self.errors,decoding_table)[0]
-
-class StreamWriter(Codec,codecs.StreamWriter):
- pass
-
-class StreamReader(Codec,codecs.StreamReader):
- pass
-
-### encodings module API
-
-def getregentry():
- return codecs.CodecInfo(
- name='cp1257',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
-
-
-### Decoding Table
-
-decoding_table = (
- u'\x00' # 0x00 -> NULL
- u'\x01' # 0x01 -> START OF HEADING
- u'\x02' # 0x02 -> START OF TEXT
- u'\x03' # 0x03 -> END OF TEXT
- u'\x04' # 0x04 -> END OF TRANSMISSION
- u'\x05' # 0x05 -> ENQUIRY
- u'\x06' # 0x06 -> ACKNOWLEDGE
- u'\x07' # 0x07 -> BELL
- u'\x08' # 0x08 -> BACKSPACE
- u'\t' # 0x09 -> HORIZONTAL TABULATION
- u'\n' # 0x0A -> LINE FEED
- u'\x0b' # 0x0B -> VERTICAL TABULATION
- u'\x0c' # 0x0C -> FORM FEED
- u'\r' # 0x0D -> CARRIAGE RETURN
- u'\x0e' # 0x0E -> SHIFT OUT
- u'\x0f' # 0x0F -> SHIFT IN
- u'\x10' # 0x10 -> DATA LINK ESCAPE
- u'\x11' # 0x11 -> DEVICE CONTROL ONE
- u'\x12' # 0x12 -> DEVICE CONTROL TWO
- u'\x13' # 0x13 -> DEVICE CONTROL THREE
- u'\x14' # 0x14 -> DEVICE CONTROL FOUR
- u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
- u'\x16' # 0x16 -> SYNCHRONOUS IDLE
- u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
- u'\x18' # 0x18 -> CANCEL
- u'\x19' # 0x19 -> END OF MEDIUM
- u'\x1a' # 0x1A -> SUBSTITUTE
- u'\x1b' # 0x1B -> ESCAPE
- u'\x1c' # 0x1C -> FILE SEPARATOR
- u'\x1d' # 0x1D -> GROUP SEPARATOR
- u'\x1e' # 0x1E -> RECORD SEPARATOR
- u'\x1f' # 0x1F -> UNIT SEPARATOR
- u' ' # 0x20 -> SPACE
- u'!' # 0x21 -> EXCLAMATION MARK
- u'"' # 0x22 -> QUOTATION MARK
- u'#' # 0x23 -> NUMBER SIGN
- u'$' # 0x24 -> DOLLAR SIGN
- u'%' # 0x25 -> PERCENT SIGN
- u'&' # 0x26 -> AMPERSAND
- u"'" # 0x27 -> APOSTROPHE
- u'(' # 0x28 -> LEFT PARENTHESIS
- u')' # 0x29 -> RIGHT PARENTHESIS
- u'*' # 0x2A -> ASTERISK
- u'+' # 0x2B -> PLUS SIGN
- u',' # 0x2C -> COMMA
- u'-' # 0x2D -> HYPHEN-MINUS
- u'.' # 0x2E -> FULL STOP
- u'/' # 0x2F -> SOLIDUS
- u'0' # 0x30 -> DIGIT ZERO
- u'1' # 0x31 -> DIGIT ONE
- u'2' # 0x32 -> DIGIT TWO
- u'3' # 0x33 -> DIGIT THREE
- u'4' # 0x34 -> DIGIT FOUR
- u'5' # 0x35 -> DIGIT FIVE
- u'6' # 0x36 -> DIGIT SIX
- u'7' # 0x37 -> DIGIT SEVEN
- u'8' # 0x38 -> DIGIT EIGHT
- u'9' # 0x39 -> DIGIT NINE
- u':' # 0x3A -> COLON
- u';' # 0x3B -> SEMICOLON
- u'<' # 0x3C -> LESS-THAN SIGN
- u'=' # 0x3D -> EQUALS SIGN
- u'>' # 0x3E -> GREATER-THAN SIGN
- u'?' # 0x3F -> QUESTION MARK
- u'@' # 0x40 -> COMMERCIAL AT
- u'A' # 0x41 -> LATIN CAPITAL LETTER A
- u'B' # 0x42 -> LATIN CAPITAL LETTER B
- u'C' # 0x43 -> LATIN CAPITAL LETTER C
- u'D' # 0x44 -> LATIN CAPITAL LETTER D
- u'E' # 0x45 -> LATIN CAPITAL LETTER E
- u'F' # 0x46 -> LATIN CAPITAL LETTER F
- u'G' # 0x47 -> LATIN CAPITAL LETTER G
- u'H' # 0x48 -> LATIN CAPITAL LETTER H
- u'I' # 0x49 -> LATIN CAPITAL LETTER I
- u'J' # 0x4A -> LATIN CAPITAL LETTER J
- u'K' # 0x4B -> LATIN CAPITAL LETTER K
- u'L' # 0x4C -> LATIN CAPITAL LETTER L
- u'M' # 0x4D -> LATIN CAPITAL LETTER M
- u'N' # 0x4E -> LATIN CAPITAL LETTER N
- u'O' # 0x4F -> LATIN CAPITAL LETTER O
- u'P' # 0x50 -> LATIN CAPITAL LETTER P
- u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
- u'R' # 0x52 -> LATIN CAPITAL LETTER R
- u'S' # 0x53 -> LATIN CAPITAL LETTER S
- u'T' # 0x54 -> LATIN CAPITAL LETTER T
- u'U' # 0x55 -> LATIN CAPITAL LETTER U
- u'V' # 0x56 -> LATIN CAPITAL LETTER V
- u'W' # 0x57 -> LATIN CAPITAL LETTER W
- u'X' # 0x58 -> LATIN CAPITAL LETTER X
- u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
- u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
- u'[' # 0x5B -> LEFT SQUARE BRACKET
- u'\\' # 0x5C -> REVERSE SOLIDUS
- u']' # 0x5D -> RIGHT SQUARE BRACKET
- u'^' # 0x5E -> CIRCUMFLEX ACCENT
- u'_' # 0x5F -> LOW LINE
- u'`' # 0x60 -> GRAVE ACCENT
- u'a' # 0x61 -> LATIN SMALL LETTER A
- u'b' # 0x62 -> LATIN SMALL LETTER B
- u'c' # 0x63 -> LATIN SMALL LETTER C
- u'd' # 0x64 -> LATIN SMALL LETTER D
- u'e' # 0x65 -> LATIN SMALL LETTER E
- u'f' # 0x66 -> LATIN SMALL LETTER F
- u'g' # 0x67 -> LATIN SMALL LETTER G
- u'h' # 0x68 -> LATIN SMALL LETTER H
- u'i' # 0x69 -> LATIN SMALL LETTER I
- u'j' # 0x6A -> LATIN SMALL LETTER J
- u'k' # 0x6B -> LATIN SMALL LETTER K
- u'l' # 0x6C -> LATIN SMALL LETTER L
- u'm' # 0x6D -> LATIN SMALL LETTER M
- u'n' # 0x6E -> LATIN SMALL LETTER N
- u'o' # 0x6F -> LATIN SMALL LETTER O
- u'p' # 0x70 -> LATIN SMALL LETTER P
- u'q' # 0x71 -> LATIN SMALL LETTER Q
- u'r' # 0x72 -> LATIN SMALL LETTER R
- u's' # 0x73 -> LATIN SMALL LETTER S
- u't' # 0x74 -> LATIN SMALL LETTER T
- u'u' # 0x75 -> LATIN SMALL LETTER U
- u'v' # 0x76 -> LATIN SMALL LETTER V
- u'w' # 0x77 -> LATIN SMALL LETTER W
- u'x' # 0x78 -> LATIN SMALL LETTER X
- u'y' # 0x79 -> LATIN SMALL LETTER Y
- u'z' # 0x7A -> LATIN SMALL LETTER Z
- u'{' # 0x7B -> LEFT CURLY BRACKET
- u'|' # 0x7C -> VERTICAL LINE
- u'}' # 0x7D -> RIGHT CURLY BRACKET
- u'~' # 0x7E -> TILDE
- u'\x7f' # 0x7F -> DELETE
- u'\u20ac' # 0x80 -> EURO SIGN
- u'\ufffe' # 0x81 -> UNDEFINED
- u'\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK
- u'\ufffe' # 0x83 -> UNDEFINED
- u'\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK
- u'\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
- u'\u2020' # 0x86 -> DAGGER
- u'\u2021' # 0x87 -> DOUBLE DAGGER
- u'\ufffe' # 0x88 -> UNDEFINED
- u'\u2030' # 0x89 -> PER MILLE SIGN
- u'\ufffe' # 0x8A -> UNDEFINED
- u'\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
- u'\ufffe' # 0x8C -> UNDEFINED
- u'\xa8' # 0x8D -> DIAERESIS
- u'\u02c7' # 0x8E -> CARON
- u'\xb8' # 0x8F -> CEDILLA
- u'\ufffe' # 0x90 -> UNDEFINED
- u'\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
- u'\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
- u'\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
- u'\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
- u'\u2022' # 0x95 -> BULLET
- u'\u2013' # 0x96 -> EN DASH
- u'\u2014' # 0x97 -> EM DASH
- u'\ufffe' # 0x98 -> UNDEFINED
- u'\u2122' # 0x99 -> TRADE MARK SIGN
- u'\ufffe' # 0x9A -> UNDEFINED
- u'\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
- u'\ufffe' # 0x9C -> UNDEFINED
- u'\xaf' # 0x9D -> MACRON
- u'\u02db' # 0x9E -> OGONEK
- u'\ufffe' # 0x9F -> UNDEFINED
- u'\xa0' # 0xA0 -> NO-BREAK SPACE
- u'\ufffe' # 0xA1 -> UNDEFINED
- u'\xa2' # 0xA2 -> CENT SIGN
- u'\xa3' # 0xA3 -> POUND SIGN
- u'\xa4' # 0xA4 -> CURRENCY SIGN
- u'\ufffe' # 0xA5 -> UNDEFINED
- u'\xa6' # 0xA6 -> BROKEN BAR
- u'\xa7' # 0xA7 -> SECTION SIGN
- u'\xd8' # 0xA8 -> LATIN CAPITAL LETTER O WITH STROKE
- u'\xa9' # 0xA9 -> COPYRIGHT SIGN
- u'\u0156' # 0xAA -> LATIN CAPITAL LETTER R WITH CEDILLA
- u'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
- u'\xac' # 0xAC -> NOT SIGN
- u'\xad' # 0xAD -> SOFT HYPHEN
- u'\xae' # 0xAE -> REGISTERED SIGN
- u'\xc6' # 0xAF -> LATIN CAPITAL LETTER AE
- u'\xb0' # 0xB0 -> DEGREE SIGN
- u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
- u'\xb2' # 0xB2 -> SUPERSCRIPT TWO
- u'\xb3' # 0xB3 -> SUPERSCRIPT THREE
- u'\xb4' # 0xB4 -> ACUTE ACCENT
- u'\xb5' # 0xB5 -> MICRO SIGN
- u'\xb6' # 0xB6 -> PILCROW SIGN
- u'\xb7' # 0xB7 -> MIDDLE DOT
- u'\xf8' # 0xB8 -> LATIN SMALL LETTER O WITH STROKE
- u'\xb9' # 0xB9 -> SUPERSCRIPT ONE
- u'\u0157' # 0xBA -> LATIN SMALL LETTER R WITH CEDILLA
- u'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
- u'\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER
- u'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
- u'\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS
- u'\xe6' # 0xBF -> LATIN SMALL LETTER AE
- u'\u0104' # 0xC0 -> LATIN CAPITAL LETTER A WITH OGONEK
- u'\u012e' # 0xC1 -> LATIN CAPITAL LETTER I WITH OGONEK
- u'\u0100' # 0xC2 -> LATIN CAPITAL LETTER A WITH MACRON
- u'\u0106' # 0xC3 -> LATIN CAPITAL LETTER C WITH ACUTE
- u'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
- u'\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE
- u'\u0118' # 0xC6 -> LATIN CAPITAL LETTER E WITH OGONEK
- u'\u0112' # 0xC7 -> LATIN CAPITAL LETTER E WITH MACRON
- u'\u010c' # 0xC8 -> LATIN CAPITAL LETTER C WITH CARON
- u'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
- u'\u0179' # 0xCA -> LATIN CAPITAL LETTER Z WITH ACUTE
- u'\u0116' # 0xCB -> LATIN CAPITAL LETTER E WITH DOT ABOVE
- u'\u0122' # 0xCC -> LATIN CAPITAL LETTER G WITH CEDILLA
- u'\u0136' # 0xCD -> LATIN CAPITAL LETTER K WITH CEDILLA
- u'\u012a' # 0xCE -> LATIN CAPITAL LETTER I WITH MACRON
- u'\u013b' # 0xCF -> LATIN CAPITAL LETTER L WITH CEDILLA
- u'\u0160' # 0xD0 -> LATIN CAPITAL LETTER S WITH CARON
- u'\u0143' # 0xD1 -> LATIN CAPITAL LETTER N WITH ACUTE
- u'\u0145' # 0xD2 -> LATIN CAPITAL LETTER N WITH CEDILLA
- u'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
- u'\u014c' # 0xD4 -> LATIN CAPITAL LETTER O WITH MACRON
- u'\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE
- u'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
- u'\xd7' # 0xD7 -> MULTIPLICATION SIGN
- u'\u0172' # 0xD8 -> LATIN CAPITAL LETTER U WITH OGONEK
- u'\u0141' # 0xD9 -> LATIN CAPITAL LETTER L WITH STROKE
- u'\u015a' # 0xDA -> LATIN CAPITAL LETTER S WITH ACUTE
- u'\u016a' # 0xDB -> LATIN CAPITAL LETTER U WITH MACRON
- u'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
- u'\u017b' # 0xDD -> LATIN CAPITAL LETTER Z WITH DOT ABOVE
- u'\u017d' # 0xDE -> LATIN CAPITAL LETTER Z WITH CARON
- u'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S
- u'\u0105' # 0xE0 -> LATIN SMALL LETTER A WITH OGONEK
- u'\u012f' # 0xE1 -> LATIN SMALL LETTER I WITH OGONEK
- u'\u0101' # 0xE2 -> LATIN SMALL LETTER A WITH MACRON
- u'\u0107' # 0xE3 -> LATIN SMALL LETTER C WITH ACUTE
- u'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
- u'\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE
- u'\u0119' # 0xE6 -> LATIN SMALL LETTER E WITH OGONEK
- u'\u0113' # 0xE7 -> LATIN SMALL LETTER E WITH MACRON
- u'\u010d' # 0xE8 -> LATIN SMALL LETTER C WITH CARON
- u'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
- u'\u017a' # 0xEA -> LATIN SMALL LETTER Z WITH ACUTE
- u'\u0117' # 0xEB -> LATIN SMALL LETTER E WITH DOT ABOVE
- u'\u0123' # 0xEC -> LATIN SMALL LETTER G WITH CEDILLA
- u'\u0137' # 0xED -> LATIN SMALL LETTER K WITH CEDILLA
- u'\u012b' # 0xEE -> LATIN SMALL LETTER I WITH MACRON
- u'\u013c' # 0xEF -> LATIN SMALL LETTER L WITH CEDILLA
- u'\u0161' # 0xF0 -> LATIN SMALL LETTER S WITH CARON
- u'\u0144' # 0xF1 -> LATIN SMALL LETTER N WITH ACUTE
- u'\u0146' # 0xF2 -> LATIN SMALL LETTER N WITH CEDILLA
- u'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
- u'\u014d' # 0xF4 -> LATIN SMALL LETTER O WITH MACRON
- u'\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE
- u'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
- u'\xf7' # 0xF7 -> DIVISION SIGN
- u'\u0173' # 0xF8 -> LATIN SMALL LETTER U WITH OGONEK
- u'\u0142' # 0xF9 -> LATIN SMALL LETTER L WITH STROKE
- u'\u015b' # 0xFA -> LATIN SMALL LETTER S WITH ACUTE
- u'\u016b' # 0xFB -> LATIN SMALL LETTER U WITH MACRON
- u'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
- u'\u017c' # 0xFD -> LATIN SMALL LETTER Z WITH DOT ABOVE
- u'\u017e' # 0xFE -> LATIN SMALL LETTER Z WITH CARON
- u'\u02d9' # 0xFF -> DOT ABOVE
-)
-
-### Encoding table
-encoding_table=codecs.charmap_build(decoding_table)
diff --git a/sys/lib/python/encodings/cp1258.py b/sys/lib/python/encodings/cp1258.py
deleted file mode 100644
index 4b25d8e7e..000000000
--- a/sys/lib/python/encodings/cp1258.py
+++ /dev/null
@@ -1,307 +0,0 @@
-""" Python Character Mapping Codec cp1258 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1258.TXT' with gencodec.py.
-
-"""#"
-
-import codecs
-
-### Codec APIs
-
-class Codec(codecs.Codec):
-
- def encode(self,input,errors='strict'):
- return codecs.charmap_encode(input,errors,encoding_table)
-
- def decode(self,input,errors='strict'):
- return codecs.charmap_decode(input,errors,decoding_table)
-
-class IncrementalEncoder(codecs.IncrementalEncoder):
- def encode(self, input, final=False):
- return codecs.charmap_encode(input,self.errors,encoding_table)[0]
-
-class IncrementalDecoder(codecs.IncrementalDecoder):
- def decode(self, input, final=False):
- return codecs.charmap_decode(input,self.errors,decoding_table)[0]
-
-class StreamWriter(Codec,codecs.StreamWriter):
- pass
-
-class StreamReader(Codec,codecs.StreamReader):
- pass
-
-### encodings module API
-
-def getregentry():
- return codecs.CodecInfo(
- name='cp1258',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
-
-
-### Decoding Table
-
-decoding_table = (
- u'\x00' # 0x00 -> NULL
- u'\x01' # 0x01 -> START OF HEADING
- u'\x02' # 0x02 -> START OF TEXT
- u'\x03' # 0x03 -> END OF TEXT
- u'\x04' # 0x04 -> END OF TRANSMISSION
- u'\x05' # 0x05 -> ENQUIRY
- u'\x06' # 0x06 -> ACKNOWLEDGE
- u'\x07' # 0x07 -> BELL
- u'\x08' # 0x08 -> BACKSPACE
- u'\t' # 0x09 -> HORIZONTAL TABULATION
- u'\n' # 0x0A -> LINE FEED
- u'\x0b' # 0x0B -> VERTICAL TABULATION
- u'\x0c' # 0x0C -> FORM FEED
- u'\r' # 0x0D -> CARRIAGE RETURN
- u'\x0e' # 0x0E -> SHIFT OUT
- u'\x0f' # 0x0F -> SHIFT IN
- u'\x10' # 0x10 -> DATA LINK ESCAPE
- u'\x11' # 0x11 -> DEVICE CONTROL ONE
- u'\x12' # 0x12 -> DEVICE CONTROL TWO
- u'\x13' # 0x13 -> DEVICE CONTROL THREE
- u'\x14' # 0x14 -> DEVICE CONTROL FOUR
- u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
- u'\x16' # 0x16 -> SYNCHRONOUS IDLE
- u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
- u'\x18' # 0x18 -> CANCEL
- u'\x19' # 0x19 -> END OF MEDIUM
- u'\x1a' # 0x1A -> SUBSTITUTE
- u'\x1b' # 0x1B -> ESCAPE
- u'\x1c' # 0x1C -> FILE SEPARATOR
- u'\x1d' # 0x1D -> GROUP SEPARATOR
- u'\x1e' # 0x1E -> RECORD SEPARATOR
- u'\x1f' # 0x1F -> UNIT SEPARATOR
- u' ' # 0x20 -> SPACE
- u'!' # 0x21 -> EXCLAMATION MARK
- u'"' # 0x22 -> QUOTATION MARK
- u'#' # 0x23 -> NUMBER SIGN
- u'$' # 0x24 -> DOLLAR SIGN
- u'%' # 0x25 -> PERCENT SIGN
- u'&' # 0x26 -> AMPERSAND
- u"'" # 0x27 -> APOSTROPHE
- u'(' # 0x28 -> LEFT PARENTHESIS
- u')' # 0x29 -> RIGHT PARENTHESIS
- u'*' # 0x2A -> ASTERISK
- u'+' # 0x2B -> PLUS SIGN
- u',' # 0x2C -> COMMA
- u'-' # 0x2D -> HYPHEN-MINUS
- u'.' # 0x2E -> FULL STOP
- u'/' # 0x2F -> SOLIDUS
- u'0' # 0x30 -> DIGIT ZERO
- u'1' # 0x31 -> DIGIT ONE
- u'2' # 0x32 -> DIGIT TWO
- u'3' # 0x33 -> DIGIT THREE
- u'4' # 0x34 -> DIGIT FOUR
- u'5' # 0x35 -> DIGIT FIVE
- u'6' # 0x36 -> DIGIT SIX
- u'7' # 0x37 -> DIGIT SEVEN
- u'8' # 0x38 -> DIGIT EIGHT
- u'9' # 0x39 -> DIGIT NINE
- u':' # 0x3A -> COLON
- u';' # 0x3B -> SEMICOLON
- u'<' # 0x3C -> LESS-THAN SIGN
- u'=' # 0x3D -> EQUALS SIGN
- u'>' # 0x3E -> GREATER-THAN SIGN
- u'?' # 0x3F -> QUESTION MARK
- u'@' # 0x40 -> COMMERCIAL AT
- u'A' # 0x41 -> LATIN CAPITAL LETTER A
- u'B' # 0x42 -> LATIN CAPITAL LETTER B
- u'C' # 0x43 -> LATIN CAPITAL LETTER C
- u'D' # 0x44 -> LATIN CAPITAL LETTER D
- u'E' # 0x45 -> LATIN CAPITAL LETTER E
- u'F' # 0x46 -> LATIN CAPITAL LETTER F
- u'G' # 0x47 -> LATIN CAPITAL LETTER G
- u'H' # 0x48 -> LATIN CAPITAL LETTER H
- u'I' # 0x49 -> LATIN CAPITAL LETTER I
- u'J' # 0x4A -> LATIN CAPITAL LETTER J
- u'K' # 0x4B -> LATIN CAPITAL LETTER K
- u'L' # 0x4C -> LATIN CAPITAL LETTER L
- u'M' # 0x4D -> LATIN CAPITAL LETTER M
- u'N' # 0x4E -> LATIN CAPITAL LETTER N
- u'O' # 0x4F -> LATIN CAPITAL LETTER O
- u'P' # 0x50 -> LATIN CAPITAL LETTER P
- u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
- u'R' # 0x52 -> LATIN CAPITAL LETTER R
- u'S' # 0x53 -> LATIN CAPITAL LETTER S
- u'T' # 0x54 -> LATIN CAPITAL LETTER T
- u'U' # 0x55 -> LATIN CAPITAL LETTER U
- u'V' # 0x56 -> LATIN CAPITAL LETTER V
- u'W' # 0x57 -> LATIN CAPITAL LETTER W
- u'X' # 0x58 -> LATIN CAPITAL LETTER X
- u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
- u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
- u'[' # 0x5B -> LEFT SQUARE BRACKET
- u'\\' # 0x5C -> REVERSE SOLIDUS
- u']' # 0x5D -> RIGHT SQUARE BRACKET
- u'^' # 0x5E -> CIRCUMFLEX ACCENT
- u'_' # 0x5F -> LOW LINE
- u'`' # 0x60 -> GRAVE ACCENT
- u'a' # 0x61 -> LATIN SMALL LETTER A
- u'b' # 0x62 -> LATIN SMALL LETTER B
- u'c' # 0x63 -> LATIN SMALL LETTER C
- u'd' # 0x64 -> LATIN SMALL LETTER D
- u'e' # 0x65 -> LATIN SMALL LETTER E
- u'f' # 0x66 -> LATIN SMALL LETTER F
- u'g' # 0x67 -> LATIN SMALL LETTER G
- u'h' # 0x68 -> LATIN SMALL LETTER H
- u'i' # 0x69 -> LATIN SMALL LETTER I
- u'j' # 0x6A -> LATIN SMALL LETTER J
- u'k' # 0x6B -> LATIN SMALL LETTER K
- u'l' # 0x6C -> LATIN SMALL LETTER L
- u'm' # 0x6D -> LATIN SMALL LETTER M
- u'n' # 0x6E -> LATIN SMALL LETTER N
- u'o' # 0x6F -> LATIN SMALL LETTER O
- u'p' # 0x70 -> LATIN SMALL LETTER P
- u'q' # 0x71 -> LATIN SMALL LETTER Q
- u'r' # 0x72 -> LATIN SMALL LETTER R
- u's' # 0x73 -> LATIN SMALL LETTER S
- u't' # 0x74 -> LATIN SMALL LETTER T
- u'u' # 0x75 -> LATIN SMALL LETTER U
- u'v' # 0x76 -> LATIN SMALL LETTER V
- u'w' # 0x77 -> LATIN SMALL LETTER W
- u'x' # 0x78 -> LATIN SMALL LETTER X
- u'y' # 0x79 -> LATIN SMALL LETTER Y
- u'z' # 0x7A -> LATIN SMALL LETTER Z
- u'{' # 0x7B -> LEFT CURLY BRACKET
- u'|' # 0x7C -> VERTICAL LINE
- u'}' # 0x7D -> RIGHT CURLY BRACKET
- u'~' # 0x7E -> TILDE
- u'\x7f' # 0x7F -> DELETE
- u'\u20ac' # 0x80 -> EURO SIGN
- u'\ufffe' # 0x81 -> UNDEFINED
- u'\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK
- u'\u0192' # 0x83 -> LATIN SMALL LETTER F WITH HOOK
- u'\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK
- u'\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
- u'\u2020' # 0x86 -> DAGGER
- u'\u2021' # 0x87 -> DOUBLE DAGGER
- u'\u02c6' # 0x88 -> MODIFIER LETTER CIRCUMFLEX ACCENT
- u'\u2030' # 0x89 -> PER MILLE SIGN
- u'\ufffe' # 0x8A -> UNDEFINED
- u'\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
- u'\u0152' # 0x8C -> LATIN CAPITAL LIGATURE OE
- u'\ufffe' # 0x8D -> UNDEFINED
- u'\ufffe' # 0x8E -> UNDEFINED
- u'\ufffe' # 0x8F -> UNDEFINED
- u'\ufffe' # 0x90 -> UNDEFINED
- u'\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
- u'\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
- u'\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
- u'\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
- u'\u2022' # 0x95 -> BULLET
- u'\u2013' # 0x96 -> EN DASH
- u'\u2014' # 0x97 -> EM DASH
- u'\u02dc' # 0x98 -> SMALL TILDE
- u'\u2122' # 0x99 -> TRADE MARK SIGN
- u'\ufffe' # 0x9A -> UNDEFINED
- u'\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
- u'\u0153' # 0x9C -> LATIN SMALL LIGATURE OE
- u'\ufffe' # 0x9D -> UNDEFINED
- u'\ufffe' # 0x9E -> UNDEFINED
- u'\u0178' # 0x9F -> LATIN CAPITAL LETTER Y WITH DIAERESIS
- u'\xa0' # 0xA0 -> NO-BREAK SPACE
- u'\xa1' # 0xA1 -> INVERTED EXCLAMATION MARK
- u'\xa2' # 0xA2 -> CENT SIGN
- u'\xa3' # 0xA3 -> POUND SIGN
- u'\xa4' # 0xA4 -> CURRENCY SIGN
- u'\xa5' # 0xA5 -> YEN SIGN
- u'\xa6' # 0xA6 -> BROKEN BAR
- u'\xa7' # 0xA7 -> SECTION SIGN
- u'\xa8' # 0xA8 -> DIAERESIS
- u'\xa9' # 0xA9 -> COPYRIGHT SIGN
- u'\xaa' # 0xAA -> FEMININE ORDINAL INDICATOR
- u'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
- u'\xac' # 0xAC -> NOT SIGN
- u'\xad' # 0xAD -> SOFT HYPHEN
- u'\xae' # 0xAE -> REGISTERED SIGN
- u'\xaf' # 0xAF -> MACRON
- u'\xb0' # 0xB0 -> DEGREE SIGN
- u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
- u'\xb2' # 0xB2 -> SUPERSCRIPT TWO
- u'\xb3' # 0xB3 -> SUPERSCRIPT THREE
- u'\xb4' # 0xB4 -> ACUTE ACCENT
- u'\xb5' # 0xB5 -> MICRO SIGN
- u'\xb6' # 0xB6 -> PILCROW SIGN
- u'\xb7' # 0xB7 -> MIDDLE DOT
- u'\xb8' # 0xB8 -> CEDILLA
- u'\xb9' # 0xB9 -> SUPERSCRIPT ONE
- u'\xba' # 0xBA -> MASCULINE ORDINAL INDICATOR
- u'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
- u'\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER
- u'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
- u'\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS
- u'\xbf' # 0xBF -> INVERTED QUESTION MARK
- u'\xc0' # 0xC0 -> LATIN CAPITAL LETTER A WITH GRAVE
- u'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
- u'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
- u'\u0102' # 0xC3 -> LATIN CAPITAL LETTER A WITH BREVE
- u'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
- u'\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE
- u'\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE
- u'\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA
- u'\xc8' # 0xC8 -> LATIN CAPITAL LETTER E WITH GRAVE
- u'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
- u'\xca' # 0xCA -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
- u'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
- u'\u0300' # 0xCC -> COMBINING GRAVE ACCENT
- u'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
- u'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
- u'\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS
- u'\u0110' # 0xD0 -> LATIN CAPITAL LETTER D WITH STROKE
- u'\xd1' # 0xD1 -> LATIN CAPITAL LETTER N WITH TILDE
- u'\u0309' # 0xD2 -> COMBINING HOOK ABOVE
- u'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
- u'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
- u'\u01a0' # 0xD5 -> LATIN CAPITAL LETTER O WITH HORN
- u'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
- u'\xd7' # 0xD7 -> MULTIPLICATION SIGN
- u'\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE
- u'\xd9' # 0xD9 -> LATIN CAPITAL LETTER U WITH GRAVE
- u'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
- u'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
- u'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
- u'\u01af' # 0xDD -> LATIN CAPITAL LETTER U WITH HORN
- u'\u0303' # 0xDE -> COMBINING TILDE
- u'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S
- u'\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE
- u'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
- u'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
- u'\u0103' # 0xE3 -> LATIN SMALL LETTER A WITH BREVE
- u'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
- u'\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE
- u'\xe6' # 0xE6 -> LATIN SMALL LETTER AE
- u'\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA
- u'\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE
- u'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
- u'\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX
- u'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
- u'\u0301' # 0xEC -> COMBINING ACUTE ACCENT
- u'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
- u'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
- u'\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS
- u'\u0111' # 0xF0 -> LATIN SMALL LETTER D WITH STROKE
- u'\xf1' # 0xF1 -> LATIN SMALL LETTER N WITH TILDE
- u'\u0323' # 0xF2 -> COMBINING DOT BELOW
- u'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
- u'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
- u'\u01a1' # 0xF5 -> LATIN SMALL LETTER O WITH HORN
- u'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
- u'\xf7' # 0xF7 -> DIVISION SIGN
- u'\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE
- u'\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE
- u'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
- u'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
- u'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
- u'\u01b0' # 0xFD -> LATIN SMALL LETTER U WITH HORN
- u'\u20ab' # 0xFE -> DONG SIGN
- u'\xff' # 0xFF -> LATIN SMALL LETTER Y WITH DIAERESIS
-)
-
-### Encoding table
-encoding_table=codecs.charmap_build(decoding_table)
diff --git a/sys/lib/python/encodings/cp424.py b/sys/lib/python/encodings/cp424.py
deleted file mode 100644
index d3ade2277..000000000
--- a/sys/lib/python/encodings/cp424.py
+++ /dev/null
@@ -1,307 +0,0 @@
-""" Python Character Mapping Codec cp424 generated from 'MAPPINGS/VENDORS/MISC/CP424.TXT' with gencodec.py.
-
-"""#"
-
-import codecs
-
-### Codec APIs
-
-class Codec(codecs.Codec):
-
- def encode(self,input,errors='strict'):
- return codecs.charmap_encode(input,errors,encoding_table)
-
- def decode(self,input,errors='strict'):
- return codecs.charmap_decode(input,errors,decoding_table)
-
-class IncrementalEncoder(codecs.IncrementalEncoder):
- def encode(self, input, final=False):
- return codecs.charmap_encode(input,self.errors,encoding_table)[0]
-
-class IncrementalDecoder(codecs.IncrementalDecoder):
- def decode(self, input, final=False):
- return codecs.charmap_decode(input,self.errors,decoding_table)[0]
-
-class StreamWriter(Codec,codecs.StreamWriter):
- pass
-
-class StreamReader(Codec,codecs.StreamReader):
- pass
-
-### encodings module API
-
-def getregentry():
- return codecs.CodecInfo(
- name='cp424',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
-
-
-### Decoding Table
-
-decoding_table = (
- u'\x00' # 0x00 -> NULL
- u'\x01' # 0x01 -> START OF HEADING
- u'\x02' # 0x02 -> START OF TEXT
- u'\x03' # 0x03 -> END OF TEXT
- u'\x9c' # 0x04 -> SELECT
- u'\t' # 0x05 -> HORIZONTAL TABULATION
- u'\x86' # 0x06 -> REQUIRED NEW LINE
- u'\x7f' # 0x07 -> DELETE
- u'\x97' # 0x08 -> GRAPHIC ESCAPE
- u'\x8d' # 0x09 -> SUPERSCRIPT
- u'\x8e' # 0x0A -> REPEAT
- u'\x0b' # 0x0B -> VERTICAL TABULATION
- u'\x0c' # 0x0C -> FORM FEED
- u'\r' # 0x0D -> CARRIAGE RETURN
- u'\x0e' # 0x0E -> SHIFT OUT
- u'\x0f' # 0x0F -> SHIFT IN
- u'\x10' # 0x10 -> DATA LINK ESCAPE
- u'\x11' # 0x11 -> DEVICE CONTROL ONE
- u'\x12' # 0x12 -> DEVICE CONTROL TWO
- u'\x13' # 0x13 -> DEVICE CONTROL THREE
- u'\x9d' # 0x14 -> RESTORE/ENABLE PRESENTATION
- u'\x85' # 0x15 -> NEW LINE
- u'\x08' # 0x16 -> BACKSPACE
- u'\x87' # 0x17 -> PROGRAM OPERATOR COMMUNICATION
- u'\x18' # 0x18 -> CANCEL
- u'\x19' # 0x19 -> END OF MEDIUM
- u'\x92' # 0x1A -> UNIT BACK SPACE
- u'\x8f' # 0x1B -> CUSTOMER USE ONE
- u'\x1c' # 0x1C -> FILE SEPARATOR
- u'\x1d' # 0x1D -> GROUP SEPARATOR
- u'\x1e' # 0x1E -> RECORD SEPARATOR
- u'\x1f' # 0x1F -> UNIT SEPARATOR
- u'\x80' # 0x20 -> DIGIT SELECT
- u'\x81' # 0x21 -> START OF SIGNIFICANCE
- u'\x82' # 0x22 -> FIELD SEPARATOR
- u'\x83' # 0x23 -> WORD UNDERSCORE
- u'\x84' # 0x24 -> BYPASS OR INHIBIT PRESENTATION
- u'\n' # 0x25 -> LINE FEED
- u'\x17' # 0x26 -> END OF TRANSMISSION BLOCK
- u'\x1b' # 0x27 -> ESCAPE
- u'\x88' # 0x28 -> SET ATTRIBUTE
- u'\x89' # 0x29 -> START FIELD EXTENDED
- u'\x8a' # 0x2A -> SET MODE OR SWITCH
- u'\x8b' # 0x2B -> CONTROL SEQUENCE PREFIX
- u'\x8c' # 0x2C -> MODIFY FIELD ATTRIBUTE
- u'\x05' # 0x2D -> ENQUIRY
- u'\x06' # 0x2E -> ACKNOWLEDGE
- u'\x07' # 0x2F -> BELL
- u'\x90' # 0x30 -> <reserved>
- u'\x91' # 0x31 -> <reserved>
- u'\x16' # 0x32 -> SYNCHRONOUS IDLE
- u'\x93' # 0x33 -> INDEX RETURN
- u'\x94' # 0x34 -> PRESENTATION POSITION
- u'\x95' # 0x35 -> TRANSPARENT
- u'\x96' # 0x36 -> NUMERIC BACKSPACE
- u'\x04' # 0x37 -> END OF TRANSMISSION
- u'\x98' # 0x38 -> SUBSCRIPT
- u'\x99' # 0x39 -> INDENT TABULATION
- u'\x9a' # 0x3A -> REVERSE FORM FEED
- u'\x9b' # 0x3B -> CUSTOMER USE THREE
- u'\x14' # 0x3C -> DEVICE CONTROL FOUR
- u'\x15' # 0x3D -> NEGATIVE ACKNOWLEDGE
- u'\x9e' # 0x3E -> <reserved>
- u'\x1a' # 0x3F -> SUBSTITUTE
- u' ' # 0x40 -> SPACE
- u'\u05d0' # 0x41 -> HEBREW LETTER ALEF
- u'\u05d1' # 0x42 -> HEBREW LETTER BET
- u'\u05d2' # 0x43 -> HEBREW LETTER GIMEL
- u'\u05d3' # 0x44 -> HEBREW LETTER DALET
- u'\u05d4' # 0x45 -> HEBREW LETTER HE
- u'\u05d5' # 0x46 -> HEBREW LETTER VAV
- u'\u05d6' # 0x47 -> HEBREW LETTER ZAYIN
- u'\u05d7' # 0x48 -> HEBREW LETTER HET
- u'\u05d8' # 0x49 -> HEBREW LETTER TET
- u'\xa2' # 0x4A -> CENT SIGN
- u'.' # 0x4B -> FULL STOP
- u'<' # 0x4C -> LESS-THAN SIGN
- u'(' # 0x4D -> LEFT PARENTHESIS
- u'+' # 0x4E -> PLUS SIGN
- u'|' # 0x4F -> VERTICAL LINE
- u'&' # 0x50 -> AMPERSAND
- u'\u05d9' # 0x51 -> HEBREW LETTER YOD
- u'\u05da' # 0x52 -> HEBREW LETTER FINAL KAF
- u'\u05db' # 0x53 -> HEBREW LETTER KAF
- u'\u05dc' # 0x54 -> HEBREW LETTER LAMED
- u'\u05dd' # 0x55 -> HEBREW LETTER FINAL MEM
- u'\u05de' # 0x56 -> HEBREW LETTER MEM
- u'\u05df' # 0x57 -> HEBREW LETTER FINAL NUN
- u'\u05e0' # 0x58 -> HEBREW LETTER NUN
- u'\u05e1' # 0x59 -> HEBREW LETTER SAMEKH
- u'!' # 0x5A -> EXCLAMATION MARK
- u'$' # 0x5B -> DOLLAR SIGN
- u'*' # 0x5C -> ASTERISK
- u')' # 0x5D -> RIGHT PARENTHESIS
- u';' # 0x5E -> SEMICOLON
- u'\xac' # 0x5F -> NOT SIGN
- u'-' # 0x60 -> HYPHEN-MINUS
- u'/' # 0x61 -> SOLIDUS
- u'\u05e2' # 0x62 -> HEBREW LETTER AYIN
- u'\u05e3' # 0x63 -> HEBREW LETTER FINAL PE
- u'\u05e4' # 0x64 -> HEBREW LETTER PE
- u'\u05e5' # 0x65 -> HEBREW LETTER FINAL TSADI
- u'\u05e6' # 0x66 -> HEBREW LETTER TSADI
- u'\u05e7' # 0x67 -> HEBREW LETTER QOF
- u'\u05e8' # 0x68 -> HEBREW LETTER RESH
- u'\u05e9' # 0x69 -> HEBREW LETTER SHIN
- u'\xa6' # 0x6A -> BROKEN BAR
- u',' # 0x6B -> COMMA
- u'%' # 0x6C -> PERCENT SIGN
- u'_' # 0x6D -> LOW LINE
- u'>' # 0x6E -> GREATER-THAN SIGN
- u'?' # 0x6F -> QUESTION MARK
- u'\ufffe' # 0x70 -> UNDEFINED
- u'\u05ea' # 0x71 -> HEBREW LETTER TAV
- u'\ufffe' # 0x72 -> UNDEFINED
- u'\ufffe' # 0x73 -> UNDEFINED
- u'\xa0' # 0x74 -> NO-BREAK SPACE
- u'\ufffe' # 0x75 -> UNDEFINED
- u'\ufffe' # 0x76 -> UNDEFINED
- u'\ufffe' # 0x77 -> UNDEFINED
- u'\u2017' # 0x78 -> DOUBLE LOW LINE
- u'`' # 0x79 -> GRAVE ACCENT
- u':' # 0x7A -> COLON
- u'#' # 0x7B -> NUMBER SIGN
- u'@' # 0x7C -> COMMERCIAL AT
- u"'" # 0x7D -> APOSTROPHE
- u'=' # 0x7E -> EQUALS SIGN
- u'"' # 0x7F -> QUOTATION MARK
- u'\ufffe' # 0x80 -> UNDEFINED
- u'a' # 0x81 -> LATIN SMALL LETTER A
- u'b' # 0x82 -> LATIN SMALL LETTER B
- u'c' # 0x83 -> LATIN SMALL LETTER C
- u'd' # 0x84 -> LATIN SMALL LETTER D
- u'e' # 0x85 -> LATIN SMALL LETTER E
- u'f' # 0x86 -> LATIN SMALL LETTER F
- u'g' # 0x87 -> LATIN SMALL LETTER G
- u'h' # 0x88 -> LATIN SMALL LETTER H
- u'i' # 0x89 -> LATIN SMALL LETTER I
- u'\xab' # 0x8A -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
- u'\xbb' # 0x8B -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
- u'\ufffe' # 0x8C -> UNDEFINED
- u'\ufffe' # 0x8D -> UNDEFINED
- u'\ufffe' # 0x8E -> UNDEFINED
- u'\xb1' # 0x8F -> PLUS-MINUS SIGN
- u'\xb0' # 0x90 -> DEGREE SIGN
- u'j' # 0x91 -> LATIN SMALL LETTER J
- u'k' # 0x92 -> LATIN SMALL LETTER K
- u'l' # 0x93 -> LATIN SMALL LETTER L
- u'm' # 0x94 -> LATIN SMALL LETTER M
- u'n' # 0x95 -> LATIN SMALL LETTER N
- u'o' # 0x96 -> LATIN SMALL LETTER O
- u'p' # 0x97 -> LATIN SMALL LETTER P
- u'q' # 0x98 -> LATIN SMALL LETTER Q
- u'r' # 0x99 -> LATIN SMALL LETTER R
- u'\ufffe' # 0x9A -> UNDEFINED
- u'\ufffe' # 0x9B -> UNDEFINED
- u'\ufffe' # 0x9C -> UNDEFINED
- u'\xb8' # 0x9D -> CEDILLA
- u'\ufffe' # 0x9E -> UNDEFINED
- u'\xa4' # 0x9F -> CURRENCY SIGN
- u'\xb5' # 0xA0 -> MICRO SIGN
- u'~' # 0xA1 -> TILDE
- u's' # 0xA2 -> LATIN SMALL LETTER S
- u't' # 0xA3 -> LATIN SMALL LETTER T
- u'u' # 0xA4 -> LATIN SMALL LETTER U
- u'v' # 0xA5 -> LATIN SMALL LETTER V
- u'w' # 0xA6 -> LATIN SMALL LETTER W
- u'x' # 0xA7 -> LATIN SMALL LETTER X
- u'y' # 0xA8 -> LATIN SMALL LETTER Y
- u'z' # 0xA9 -> LATIN SMALL LETTER Z
- u'\ufffe' # 0xAA -> UNDEFINED
- u'\ufffe' # 0xAB -> UNDEFINED
- u'\ufffe' # 0xAC -> UNDEFINED
- u'\ufffe' # 0xAD -> UNDEFINED
- u'\ufffe' # 0xAE -> UNDEFINED
- u'\xae' # 0xAF -> REGISTERED SIGN
- u'^' # 0xB0 -> CIRCUMFLEX ACCENT
- u'\xa3' # 0xB1 -> POUND SIGN
- u'\xa5' # 0xB2 -> YEN SIGN
- u'\xb7' # 0xB3 -> MIDDLE DOT
- u'\xa9' # 0xB4 -> COPYRIGHT SIGN
- u'\xa7' # 0xB5 -> SECTION SIGN
- u'\xb6' # 0xB6 -> PILCROW SIGN
- u'\xbc' # 0xB7 -> VULGAR FRACTION ONE QUARTER
- u'\xbd' # 0xB8 -> VULGAR FRACTION ONE HALF
- u'\xbe' # 0xB9 -> VULGAR FRACTION THREE QUARTERS
- u'[' # 0xBA -> LEFT SQUARE BRACKET
- u']' # 0xBB -> RIGHT SQUARE BRACKET
- u'\xaf' # 0xBC -> MACRON
- u'\xa8' # 0xBD -> DIAERESIS
- u'\xb4' # 0xBE -> ACUTE ACCENT
- u'\xd7' # 0xBF -> MULTIPLICATION SIGN
- u'{' # 0xC0 -> LEFT CURLY BRACKET
- u'A' # 0xC1 -> LATIN CAPITAL LETTER A
- u'B' # 0xC2 -> LATIN CAPITAL LETTER B
- u'C' # 0xC3 -> LATIN CAPITAL LETTER C
- u'D' # 0xC4 -> LATIN CAPITAL LETTER D
- u'E' # 0xC5 -> LATIN CAPITAL LETTER E
- u'F' # 0xC6 -> LATIN CAPITAL LETTER F
- u'G' # 0xC7 -> LATIN CAPITAL LETTER G
- u'H' # 0xC8 -> LATIN CAPITAL LETTER H
- u'I' # 0xC9 -> LATIN CAPITAL LETTER I
- u'\xad' # 0xCA -> SOFT HYPHEN
- u'\ufffe' # 0xCB -> UNDEFINED
- u'\ufffe' # 0xCC -> UNDEFINED
- u'\ufffe' # 0xCD -> UNDEFINED
- u'\ufffe' # 0xCE -> UNDEFINED
- u'\ufffe' # 0xCF -> UNDEFINED
- u'}' # 0xD0 -> RIGHT CURLY BRACKET
- u'J' # 0xD1 -> LATIN CAPITAL LETTER J
- u'K' # 0xD2 -> LATIN CAPITAL LETTER K
- u'L' # 0xD3 -> LATIN CAPITAL LETTER L
- u'M' # 0xD4 -> LATIN CAPITAL LETTER M
- u'N' # 0xD5 -> LATIN CAPITAL LETTER N
- u'O' # 0xD6 -> LATIN CAPITAL LETTER O
- u'P' # 0xD7 -> LATIN CAPITAL LETTER P
- u'Q' # 0xD8 -> LATIN CAPITAL LETTER Q
- u'R' # 0xD9 -> LATIN CAPITAL LETTER R
- u'\xb9' # 0xDA -> SUPERSCRIPT ONE
- u'\ufffe' # 0xDB -> UNDEFINED
- u'\ufffe' # 0xDC -> UNDEFINED
- u'\ufffe' # 0xDD -> UNDEFINED
- u'\ufffe' # 0xDE -> UNDEFINED
- u'\ufffe' # 0xDF -> UNDEFINED
- u'\\' # 0xE0 -> REVERSE SOLIDUS
- u'\xf7' # 0xE1 -> DIVISION SIGN
- u'S' # 0xE2 -> LATIN CAPITAL LETTER S
- u'T' # 0xE3 -> LATIN CAPITAL LETTER T
- u'U' # 0xE4 -> LATIN CAPITAL LETTER U
- u'V' # 0xE5 -> LATIN CAPITAL LETTER V
- u'W' # 0xE6 -> LATIN CAPITAL LETTER W
- u'X' # 0xE7 -> LATIN CAPITAL LETTER X
- u'Y' # 0xE8 -> LATIN CAPITAL LETTER Y
- u'Z' # 0xE9 -> LATIN CAPITAL LETTER Z
- u'\xb2' # 0xEA -> SUPERSCRIPT TWO
- u'\ufffe' # 0xEB -> UNDEFINED
- u'\ufffe' # 0xEC -> UNDEFINED
- u'\ufffe' # 0xED -> UNDEFINED
- u'\ufffe' # 0xEE -> UNDEFINED
- u'\ufffe' # 0xEF -> UNDEFINED
- u'0' # 0xF0 -> DIGIT ZERO
- u'1' # 0xF1 -> DIGIT ONE
- u'2' # 0xF2 -> DIGIT TWO
- u'3' # 0xF3 -> DIGIT THREE
- u'4' # 0xF4 -> DIGIT FOUR
- u'5' # 0xF5 -> DIGIT FIVE
- u'6' # 0xF6 -> DIGIT SIX
- u'7' # 0xF7 -> DIGIT SEVEN
- u'8' # 0xF8 -> DIGIT EIGHT
- u'9' # 0xF9 -> DIGIT NINE
- u'\xb3' # 0xFA -> SUPERSCRIPT THREE
- u'\ufffe' # 0xFB -> UNDEFINED
- u'\ufffe' # 0xFC -> UNDEFINED
- u'\ufffe' # 0xFD -> UNDEFINED
- u'\ufffe' # 0xFE -> UNDEFINED
- u'\x9f' # 0xFF -> EIGHT ONES
-)
-
-### Encoding table
-encoding_table=codecs.charmap_build(decoding_table)
diff --git a/sys/lib/python/encodings/cp437.py b/sys/lib/python/encodings/cp437.py
deleted file mode 100644
index 52cd88294..000000000
--- a/sys/lib/python/encodings/cp437.py
+++ /dev/null
@@ -1,698 +0,0 @@
-""" Python Character Mapping Codec cp437 generated from 'VENDORS/MICSFT/PC/CP437.TXT' with gencodec.py.
-
-"""#"
-
-import codecs
-
-### Codec APIs
-
-class Codec(codecs.Codec):
-
- def encode(self,input,errors='strict'):
- return codecs.charmap_encode(input,errors,encoding_map)
-
- def decode(self,input,errors='strict'):
- return codecs.charmap_decode(input,errors,decoding_table)
-
-class IncrementalEncoder(codecs.IncrementalEncoder):
- def encode(self, input, final=False):
- return codecs.charmap_encode(input,self.errors,encoding_map)[0]
-
-class IncrementalDecoder(codecs.IncrementalDecoder):
- def decode(self, input, final=False):
- return codecs.charmap_decode(input,self.errors,decoding_table)[0]
-
-class StreamWriter(Codec,codecs.StreamWriter):
- pass
-
-class StreamReader(Codec,codecs.StreamReader):
- pass
-
-### encodings module API
-
-def getregentry():
- return codecs.CodecInfo(
- name='cp437',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
-
-### Decoding Map
-
-decoding_map = codecs.make_identity_dict(range(256))
-decoding_map.update({
- 0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
- 0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
- 0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
- 0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
- 0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
- 0x0085: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
- 0x0086: 0x00e5, # LATIN SMALL LETTER A WITH RING ABOVE
- 0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
- 0x0088: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
- 0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
- 0x008a: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
- 0x008b: 0x00ef, # LATIN SMALL LETTER I WITH DIAERESIS
- 0x008c: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX
- 0x008d: 0x00ec, # LATIN SMALL LETTER I WITH GRAVE
- 0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
- 0x008f: 0x00c5, # LATIN CAPITAL LETTER A WITH RING ABOVE
- 0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
- 0x0091: 0x00e6, # LATIN SMALL LIGATURE AE
- 0x0092: 0x00c6, # LATIN CAPITAL LIGATURE AE
- 0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
- 0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
- 0x0095: 0x00f2, # LATIN SMALL LETTER O WITH GRAVE
- 0x0096: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX
- 0x0097: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE
- 0x0098: 0x00ff, # LATIN SMALL LETTER Y WITH DIAERESIS
- 0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
- 0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
- 0x009b: 0x00a2, # CENT SIGN
- 0x009c: 0x00a3, # POUND SIGN
- 0x009d: 0x00a5, # YEN SIGN
- 0x009e: 0x20a7, # PESETA SIGN
- 0x009f: 0x0192, # LATIN SMALL LETTER F WITH HOOK
- 0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
- 0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
- 0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
- 0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
- 0x00a4: 0x00f1, # LATIN SMALL LETTER N WITH TILDE
- 0x00a5: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE
- 0x00a6: 0x00aa, # FEMININE ORDINAL INDICATOR
- 0x00a7: 0x00ba, # MASCULINE ORDINAL INDICATOR
- 0x00a8: 0x00bf, # INVERTED QUESTION MARK
- 0x00a9: 0x2310, # REVERSED NOT SIGN
- 0x00aa: 0x00ac, # NOT SIGN
- 0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
- 0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
- 0x00ad: 0x00a1, # INVERTED EXCLAMATION MARK
- 0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
- 0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
- 0x00b0: 0x2591, # LIGHT SHADE
- 0x00b1: 0x2592, # MEDIUM SHADE
- 0x00b2: 0x2593, # DARK SHADE
- 0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
- 0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
- 0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
- 0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
- 0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
- 0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
- 0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
- 0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
- 0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
- 0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
- 0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
- 0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
- 0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
- 0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
- 0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
- 0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
- 0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
- 0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
- 0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
- 0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
- 0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
- 0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
- 0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
- 0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
- 0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
- 0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
- 0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
- 0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
- 0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
- 0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
- 0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
- 0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
- 0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
- 0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
- 0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
- 0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
- 0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
- 0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
- 0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
- 0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
- 0x00db: 0x2588, # FULL BLOCK
- 0x00dc: 0x2584, # LOWER HALF BLOCK
- 0x00dd: 0x258c, # LEFT HALF BLOCK
- 0x00de: 0x2590, # RIGHT HALF BLOCK
- 0x00df: 0x2580, # UPPER HALF BLOCK
- 0x00e0: 0x03b1, # GREEK SMALL LETTER ALPHA
- 0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
- 0x00e2: 0x0393, # GREEK CAPITAL LETTER GAMMA
- 0x00e3: 0x03c0, # GREEK SMALL LETTER PI
- 0x00e4: 0x03a3, # GREEK CAPITAL LETTER SIGMA
- 0x00e5: 0x03c3, # GREEK SMALL LETTER SIGMA
- 0x00e6: 0x00b5, # MICRO SIGN
- 0x00e7: 0x03c4, # GREEK SMALL LETTER TAU
- 0x00e8: 0x03a6, # GREEK CAPITAL LETTER PHI
- 0x00e9: 0x0398, # GREEK CAPITAL LETTER THETA
- 0x00ea: 0x03a9, # GREEK CAPITAL LETTER OMEGA
- 0x00eb: 0x03b4, # GREEK SMALL LETTER DELTA
- 0x00ec: 0x221e, # INFINITY
- 0x00ed: 0x03c6, # GREEK SMALL LETTER PHI
- 0x00ee: 0x03b5, # GREEK SMALL LETTER EPSILON
- 0x00ef: 0x2229, # INTERSECTION
- 0x00f0: 0x2261, # IDENTICAL TO
- 0x00f1: 0x00b1, # PLUS-MINUS SIGN
- 0x00f2: 0x2265, # GREATER-THAN OR EQUAL TO
- 0x00f3: 0x2264, # LESS-THAN OR EQUAL TO
- 0x00f4: 0x2320, # TOP HALF INTEGRAL
- 0x00f5: 0x2321, # BOTTOM HALF INTEGRAL
- 0x00f6: 0x00f7, # DIVISION SIGN
- 0x00f7: 0x2248, # ALMOST EQUAL TO
- 0x00f8: 0x00b0, # DEGREE SIGN
- 0x00f9: 0x2219, # BULLET OPERATOR
- 0x00fa: 0x00b7, # MIDDLE DOT
- 0x00fb: 0x221a, # SQUARE ROOT
- 0x00fc: 0x207f, # SUPERSCRIPT LATIN SMALL LETTER N
- 0x00fd: 0x00b2, # SUPERSCRIPT TWO
- 0x00fe: 0x25a0, # BLACK SQUARE
- 0x00ff: 0x00a0, # NO-BREAK SPACE
-})
-
-### Decoding Table
-
-decoding_table = (
- u'\x00' # 0x0000 -> NULL
- u'\x01' # 0x0001 -> START OF HEADING
- u'\x02' # 0x0002 -> START OF TEXT
- u'\x03' # 0x0003 -> END OF TEXT
- u'\x04' # 0x0004 -> END OF TRANSMISSION
- u'\x05' # 0x0005 -> ENQUIRY
- u'\x06' # 0x0006 -> ACKNOWLEDGE
- u'\x07' # 0x0007 -> BELL
- u'\x08' # 0x0008 -> BACKSPACE
- u'\t' # 0x0009 -> HORIZONTAL TABULATION
- u'\n' # 0x000a -> LINE FEED
- u'\x0b' # 0x000b -> VERTICAL TABULATION
- u'\x0c' # 0x000c -> FORM FEED
- u'\r' # 0x000d -> CARRIAGE RETURN
- u'\x0e' # 0x000e -> SHIFT OUT
- u'\x0f' # 0x000f -> SHIFT IN
- u'\x10' # 0x0010 -> DATA LINK ESCAPE
- u'\x11' # 0x0011 -> DEVICE CONTROL ONE
- u'\x12' # 0x0012 -> DEVICE CONTROL TWO
- u'\x13' # 0x0013 -> DEVICE CONTROL THREE
- u'\x14' # 0x0014 -> DEVICE CONTROL FOUR
- u'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
- u'\x16' # 0x0016 -> SYNCHRONOUS IDLE
- u'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
- u'\x18' # 0x0018 -> CANCEL
- u'\x19' # 0x0019 -> END OF MEDIUM
- u'\x1a' # 0x001a -> SUBSTITUTE
- u'\x1b' # 0x001b -> ESCAPE
- u'\x1c' # 0x001c -> FILE SEPARATOR
- u'\x1d' # 0x001d -> GROUP SEPARATOR
- u'\x1e' # 0x001e -> RECORD SEPARATOR
- u'\x1f' # 0x001f -> UNIT SEPARATOR
- u' ' # 0x0020 -> SPACE
- u'!' # 0x0021 -> EXCLAMATION MARK
- u'"' # 0x0022 -> QUOTATION MARK
- u'#' # 0x0023 -> NUMBER SIGN
- u'$' # 0x0024 -> DOLLAR SIGN
- u'%' # 0x0025 -> PERCENT SIGN
- u'&' # 0x0026 -> AMPERSAND
- u"'" # 0x0027 -> APOSTROPHE
- u'(' # 0x0028 -> LEFT PARENTHESIS
- u')' # 0x0029 -> RIGHT PARENTHESIS
- u'*' # 0x002a -> ASTERISK
- u'+' # 0x002b -> PLUS SIGN
- u',' # 0x002c -> COMMA
- u'-' # 0x002d -> HYPHEN-MINUS
- u'.' # 0x002e -> FULL STOP
- u'/' # 0x002f -> SOLIDUS
- u'0' # 0x0030 -> DIGIT ZERO
- u'1' # 0x0031 -> DIGIT ONE
- u'2' # 0x0032 -> DIGIT TWO
- u'3' # 0x0033 -> DIGIT THREE
- u'4' # 0x0034 -> DIGIT FOUR
- u'5' # 0x0035 -> DIGIT FIVE
- u'6' # 0x0036 -> DIGIT SIX
- u'7' # 0x0037 -> DIGIT SEVEN
- u'8' # 0x0038 -> DIGIT EIGHT
- u'9' # 0x0039 -> DIGIT NINE
- u':' # 0x003a -> COLON
- u';' # 0x003b -> SEMICOLON
- u'<' # 0x003c -> LESS-THAN SIGN
- u'=' # 0x003d -> EQUALS SIGN
- u'>' # 0x003e -> GREATER-THAN SIGN
- u'?' # 0x003f -> QUESTION MARK
- u'@' # 0x0040 -> COMMERCIAL AT
- u'A' # 0x0041 -> LATIN CAPITAL LETTER A
- u'B' # 0x0042 -> LATIN CAPITAL LETTER B
- u'C' # 0x0043 -> LATIN CAPITAL LETTER C
- u'D' # 0x0044 -> LATIN CAPITAL LETTER D
- u'E' # 0x0045 -> LATIN CAPITAL LETTER E
- u'F' # 0x0046 -> LATIN CAPITAL LETTER F
- u'G' # 0x0047 -> LATIN CAPITAL LETTER G
- u'H' # 0x0048 -> LATIN CAPITAL LETTER H
- u'I' # 0x0049 -> LATIN CAPITAL LETTER I
- u'J' # 0x004a -> LATIN CAPITAL LETTER J
- u'K' # 0x004b -> LATIN CAPITAL LETTER K
- u'L' # 0x004c -> LATIN CAPITAL LETTER L
- u'M' # 0x004d -> LATIN CAPITAL LETTER M
- u'N' # 0x004e -> LATIN CAPITAL LETTER N
- u'O' # 0x004f -> LATIN CAPITAL LETTER O
- u'P' # 0x0050 -> LATIN CAPITAL LETTER P
- u'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
- u'R' # 0x0052 -> LATIN CAPITAL LETTER R
- u'S' # 0x0053 -> LATIN CAPITAL LETTER S
- u'T' # 0x0054 -> LATIN CAPITAL LETTER T
- u'U' # 0x0055 -> LATIN CAPITAL LETTER U
- u'V' # 0x0056 -> LATIN CAPITAL LETTER V
- u'W' # 0x0057 -> LATIN CAPITAL LETTER W
- u'X' # 0x0058 -> LATIN CAPITAL LETTER X
- u'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
- u'Z' # 0x005a -> LATIN CAPITAL LETTER Z
- u'[' # 0x005b -> LEFT SQUARE BRACKET
- u'\\' # 0x005c -> REVERSE SOLIDUS
- u']' # 0x005d -> RIGHT SQUARE BRACKET
- u'^' # 0x005e -> CIRCUMFLEX ACCENT
- u'_' # 0x005f -> LOW LINE
- u'`' # 0x0060 -> GRAVE ACCENT
- u'a' # 0x0061 -> LATIN SMALL LETTER A
- u'b' # 0x0062 -> LATIN SMALL LETTER B
- u'c' # 0x0063 -> LATIN SMALL LETTER C
- u'd' # 0x0064 -> LATIN SMALL LETTER D
- u'e' # 0x0065 -> LATIN SMALL LETTER E
- u'f' # 0x0066 -> LATIN SMALL LETTER F
- u'g' # 0x0067 -> LATIN SMALL LETTER G
- u'h' # 0x0068 -> LATIN SMALL LETTER H
- u'i' # 0x0069 -> LATIN SMALL LETTER I
- u'j' # 0x006a -> LATIN SMALL LETTER J
- u'k' # 0x006b -> LATIN SMALL LETTER K
- u'l' # 0x006c -> LATIN SMALL LETTER L
- u'm' # 0x006d -> LATIN SMALL LETTER M
- u'n' # 0x006e -> LATIN SMALL LETTER N
- u'o' # 0x006f -> LATIN SMALL LETTER O
- u'p' # 0x0070 -> LATIN SMALL LETTER P
- u'q' # 0x0071 -> LATIN SMALL LETTER Q
- u'r' # 0x0072 -> LATIN SMALL LETTER R
- u's' # 0x0073 -> LATIN SMALL LETTER S
- u't' # 0x0074 -> LATIN SMALL LETTER T
- u'u' # 0x0075 -> LATIN SMALL LETTER U
- u'v' # 0x0076 -> LATIN SMALL LETTER V
- u'w' # 0x0077 -> LATIN SMALL LETTER W
- u'x' # 0x0078 -> LATIN SMALL LETTER X
- u'y' # 0x0079 -> LATIN SMALL LETTER Y
- u'z' # 0x007a -> LATIN SMALL LETTER Z
- u'{' # 0x007b -> LEFT CURLY BRACKET
- u'|' # 0x007c -> VERTICAL LINE
- u'}' # 0x007d -> RIGHT CURLY BRACKET
- u'~' # 0x007e -> TILDE
- u'\x7f' # 0x007f -> DELETE
- u'\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA
- u'\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
- u'\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
- u'\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
- u'\xe4' # 0x0084 -> LATIN SMALL LETTER A WITH DIAERESIS
- u'\xe0' # 0x0085 -> LATIN SMALL LETTER A WITH GRAVE
- u'\xe5' # 0x0086 -> LATIN SMALL LETTER A WITH RING ABOVE
- u'\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA
- u'\xea' # 0x0088 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
- u'\xeb' # 0x0089 -> LATIN SMALL LETTER E WITH DIAERESIS
- u'\xe8' # 0x008a -> LATIN SMALL LETTER E WITH GRAVE
- u'\xef' # 0x008b -> LATIN SMALL LETTER I WITH DIAERESIS
- u'\xee' # 0x008c -> LATIN SMALL LETTER I WITH CIRCUMFLEX
- u'\xec' # 0x008d -> LATIN SMALL LETTER I WITH GRAVE
- u'\xc4' # 0x008e -> LATIN CAPITAL LETTER A WITH DIAERESIS
- u'\xc5' # 0x008f -> LATIN CAPITAL LETTER A WITH RING ABOVE
- u'\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
- u'\xe6' # 0x0091 -> LATIN SMALL LIGATURE AE
- u'\xc6' # 0x0092 -> LATIN CAPITAL LIGATURE AE
- u'\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
- u'\xf6' # 0x0094 -> LATIN SMALL LETTER O WITH DIAERESIS
- u'\xf2' # 0x0095 -> LATIN SMALL LETTER O WITH GRAVE
- u'\xfb' # 0x0096 -> LATIN SMALL LETTER U WITH CIRCUMFLEX
- u'\xf9' # 0x0097 -> LATIN SMALL LETTER U WITH GRAVE
- u'\xff' # 0x0098 -> LATIN SMALL LETTER Y WITH DIAERESIS
- u'\xd6' # 0x0099 -> LATIN CAPITAL LETTER O WITH DIAERESIS
- u'\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
- u'\xa2' # 0x009b -> CENT SIGN
- u'\xa3' # 0x009c -> POUND SIGN
- u'\xa5' # 0x009d -> YEN SIGN
- u'\u20a7' # 0x009e -> PESETA SIGN
- u'\u0192' # 0x009f -> LATIN SMALL LETTER F WITH HOOK
- u'\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE
- u'\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE
- u'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
- u'\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE
- u'\xf1' # 0x00a4 -> LATIN SMALL LETTER N WITH TILDE
- u'\xd1' # 0x00a5 -> LATIN CAPITAL LETTER N WITH TILDE
- u'\xaa' # 0x00a6 -> FEMININE ORDINAL INDICATOR
- u'\xba' # 0x00a7 -> MASCULINE ORDINAL INDICATOR
- u'\xbf' # 0x00a8 -> INVERTED QUESTION MARK
- u'\u2310' # 0x00a9 -> REVERSED NOT SIGN
- u'\xac' # 0x00aa -> NOT SIGN
- u'\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF
- u'\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER
- u'\xa1' # 0x00ad -> INVERTED EXCLAMATION MARK
- u'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
- u'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
- u'\u2591' # 0x00b0 -> LIGHT SHADE
- u'\u2592' # 0x00b1 -> MEDIUM SHADE
- u'\u2593' # 0x00b2 -> DARK SHADE
- u'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
- u'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
- u'\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
- u'\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
- u'\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
- u'\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
- u'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
- u'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
- u'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
- u'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
- u'\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
- u'\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
- u'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
- u'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
- u'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
- u'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
- u'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
- u'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
- u'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
- u'\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
- u'\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
- u'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
- u'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
- u'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
- u'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
- u'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
- u'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
- u'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
- u'\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
- u'\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
- u'\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
- u'\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
- u'\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
- u'\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
- u'\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
- u'\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
- u'\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
- u'\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
- u'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
- u'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
- u'\u2588' # 0x00db -> FULL BLOCK
- u'\u2584' # 0x00dc -> LOWER HALF BLOCK
- u'\u258c' # 0x00dd -> LEFT HALF BLOCK
- u'\u2590' # 0x00de -> RIGHT HALF BLOCK
- u'\u2580' # 0x00df -> UPPER HALF BLOCK
- u'\u03b1' # 0x00e0 -> GREEK SMALL LETTER ALPHA
- u'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S
- u'\u0393' # 0x00e2 -> GREEK CAPITAL LETTER GAMMA
- u'\u03c0' # 0x00e3 -> GREEK SMALL LETTER PI
- u'\u03a3' # 0x00e4 -> GREEK CAPITAL LETTER SIGMA
- u'\u03c3' # 0x00e5 -> GREEK SMALL LETTER SIGMA
- u'\xb5' # 0x00e6 -> MICRO SIGN
- u'\u03c4' # 0x00e7 -> GREEK SMALL LETTER TAU
- u'\u03a6' # 0x00e8 -> GREEK CAPITAL LETTER PHI
- u'\u0398' # 0x00e9 -> GREEK CAPITAL LETTER THETA
- u'\u03a9' # 0x00ea -> GREEK CAPITAL LETTER OMEGA
- u'\u03b4' # 0x00eb -> GREEK SMALL LETTER DELTA
- u'\u221e' # 0x00ec -> INFINITY
- u'\u03c6' # 0x00ed -> GREEK SMALL LETTER PHI
- u'\u03b5' # 0x00ee -> GREEK SMALL LETTER EPSILON
- u'\u2229' # 0x00ef -> INTERSECTION
- u'\u2261' # 0x00f0 -> IDENTICAL TO
- u'\xb1' # 0x00f1 -> PLUS-MINUS SIGN
- u'\u2265' # 0x00f2 -> GREATER-THAN OR EQUAL TO
- u'\u2264' # 0x00f3 -> LESS-THAN OR EQUAL TO
- u'\u2320' # 0x00f4 -> TOP HALF INTEGRAL
- u'\u2321' # 0x00f5 -> BOTTOM HALF INTEGRAL
- u'\xf7' # 0x00f6 -> DIVISION SIGN
- u'\u2248' # 0x00f7 -> ALMOST EQUAL TO
- u'\xb0' # 0x00f8 -> DEGREE SIGN
- u'\u2219' # 0x00f9 -> BULLET OPERATOR
- u'\xb7' # 0x00fa -> MIDDLE DOT
- u'\u221a' # 0x00fb -> SQUARE ROOT
- u'\u207f' # 0x00fc -> SUPERSCRIPT LATIN SMALL LETTER N
- u'\xb2' # 0x00fd -> SUPERSCRIPT TWO
- u'\u25a0' # 0x00fe -> BLACK SQUARE
- u'\xa0' # 0x00ff -> NO-BREAK SPACE
-)
-
-### Encoding Map
-
-encoding_map = {
- 0x0000: 0x0000, # NULL
- 0x0001: 0x0001, # START OF HEADING
- 0x0002: 0x0002, # START OF TEXT
- 0x0003: 0x0003, # END OF TEXT
- 0x0004: 0x0004, # END OF TRANSMISSION
- 0x0005: 0x0005, # ENQUIRY
- 0x0006: 0x0006, # ACKNOWLEDGE
- 0x0007: 0x0007, # BELL
- 0x0008: 0x0008, # BACKSPACE
- 0x0009: 0x0009, # HORIZONTAL TABULATION
- 0x000a: 0x000a, # LINE FEED
- 0x000b: 0x000b, # VERTICAL TABULATION
- 0x000c: 0x000c, # FORM FEED
- 0x000d: 0x000d, # CARRIAGE RETURN
- 0x000e: 0x000e, # SHIFT OUT
- 0x000f: 0x000f, # SHIFT IN
- 0x0010: 0x0010, # DATA LINK ESCAPE
- 0x0011: 0x0011, # DEVICE CONTROL ONE
- 0x0012: 0x0012, # DEVICE CONTROL TWO
- 0x0013: 0x0013, # DEVICE CONTROL THREE
- 0x0014: 0x0014, # DEVICE CONTROL FOUR
- 0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
- 0x0016: 0x0016, # SYNCHRONOUS IDLE
- 0x0017: 0x0017, # END OF TRANSMISSION BLOCK
- 0x0018: 0x0018, # CANCEL
- 0x0019: 0x0019, # END OF MEDIUM
- 0x001a: 0x001a, # SUBSTITUTE
- 0x001b: 0x001b, # ESCAPE
- 0x001c: 0x001c, # FILE SEPARATOR
- 0x001d: 0x001d, # GROUP SEPARATOR
- 0x001e: 0x001e, # RECORD SEPARATOR
- 0x001f: 0x001f, # UNIT SEPARATOR
- 0x0020: 0x0020, # SPACE
- 0x0021: 0x0021, # EXCLAMATION MARK
- 0x0022: 0x0022, # QUOTATION MARK
- 0x0023: 0x0023, # NUMBER SIGN
- 0x0024: 0x0024, # DOLLAR SIGN
- 0x0025: 0x0025, # PERCENT SIGN
- 0x0026: 0x0026, # AMPERSAND
- 0x0027: 0x0027, # APOSTROPHE
- 0x0028: 0x0028, # LEFT PARENTHESIS
- 0x0029: 0x0029, # RIGHT PARENTHESIS
- 0x002a: 0x002a, # ASTERISK
- 0x002b: 0x002b, # PLUS SIGN
- 0x002c: 0x002c, # COMMA
- 0x002d: 0x002d, # HYPHEN-MINUS
- 0x002e: 0x002e, # FULL STOP
- 0x002f: 0x002f, # SOLIDUS
- 0x0030: 0x0030, # DIGIT ZERO
- 0x0031: 0x0031, # DIGIT ONE
- 0x0032: 0x0032, # DIGIT TWO
- 0x0033: 0x0033, # DIGIT THREE
- 0x0034: 0x0034, # DIGIT FOUR
- 0x0035: 0x0035, # DIGIT FIVE
- 0x0036: 0x0036, # DIGIT SIX
- 0x0037: 0x0037, # DIGIT SEVEN
- 0x0038: 0x0038, # DIGIT EIGHT
- 0x0039: 0x0039, # DIGIT NINE
- 0x003a: 0x003a, # COLON
- 0x003b: 0x003b, # SEMICOLON
- 0x003c: 0x003c, # LESS-THAN SIGN
- 0x003d: 0x003d, # EQUALS SIGN
- 0x003e: 0x003e, # GREATER-THAN SIGN
- 0x003f: 0x003f, # QUESTION MARK
- 0x0040: 0x0040, # COMMERCIAL AT
- 0x0041: 0x0041, # LATIN CAPITAL LETTER A
- 0x0042: 0x0042, # LATIN CAPITAL LETTER B
- 0x0043: 0x0043, # LATIN CAPITAL LETTER C
- 0x0044: 0x0044, # LATIN CAPITAL LETTER D
- 0x0045: 0x0045, # LATIN CAPITAL LETTER E
- 0x0046: 0x0046, # LATIN CAPITAL LETTER F
- 0x0047: 0x0047, # LATIN CAPITAL LETTER G
- 0x0048: 0x0048, # LATIN CAPITAL LETTER H
- 0x0049: 0x0049, # LATIN CAPITAL LETTER I
- 0x004a: 0x004a, # LATIN CAPITAL LETTER J
- 0x004b: 0x004b, # LATIN CAPITAL LETTER K
- 0x004c: 0x004c, # LATIN CAPITAL LETTER L
- 0x004d: 0x004d, # LATIN CAPITAL LETTER M
- 0x004e: 0x004e, # LATIN CAPITAL LETTER N
- 0x004f: 0x004f, # LATIN CAPITAL LETTER O
- 0x0050: 0x0050, # LATIN CAPITAL LETTER P
- 0x0051: 0x0051, # LATIN CAPITAL LETTER Q
- 0x0052: 0x0052, # LATIN CAPITAL LETTER R
- 0x0053: 0x0053, # LATIN CAPITAL LETTER S
- 0x0054: 0x0054, # LATIN CAPITAL LETTER T
- 0x0055: 0x0055, # LATIN CAPITAL LETTER U
- 0x0056: 0x0056, # LATIN CAPITAL LETTER V
- 0x0057: 0x0057, # LATIN CAPITAL LETTER W
- 0x0058: 0x0058, # LATIN CAPITAL LETTER X
- 0x0059: 0x0059, # LATIN CAPITAL LETTER Y
- 0x005a: 0x005a, # LATIN CAPITAL LETTER Z
- 0x005b: 0x005b, # LEFT SQUARE BRACKET
- 0x005c: 0x005c, # REVERSE SOLIDUS
- 0x005d: 0x005d, # RIGHT SQUARE BRACKET
- 0x005e: 0x005e, # CIRCUMFLEX ACCENT
- 0x005f: 0x005f, # LOW LINE
- 0x0060: 0x0060, # GRAVE ACCENT
- 0x0061: 0x0061, # LATIN SMALL LETTER A
- 0x0062: 0x0062, # LATIN SMALL LETTER B
- 0x0063: 0x0063, # LATIN SMALL LETTER C
- 0x0064: 0x0064, # LATIN SMALL LETTER D
- 0x0065: 0x0065, # LATIN SMALL LETTER E
- 0x0066: 0x0066, # LATIN SMALL LETTER F
- 0x0067: 0x0067, # LATIN SMALL LETTER G
- 0x0068: 0x0068, # LATIN SMALL LETTER H
- 0x0069: 0x0069, # LATIN SMALL LETTER I
- 0x006a: 0x006a, # LATIN SMALL LETTER J
- 0x006b: 0x006b, # LATIN SMALL LETTER K
- 0x006c: 0x006c, # LATIN SMALL LETTER L
- 0x006d: 0x006d, # LATIN SMALL LETTER M
- 0x006e: 0x006e, # LATIN SMALL LETTER N
- 0x006f: 0x006f, # LATIN SMALL LETTER O
- 0x0070: 0x0070, # LATIN SMALL LETTER P
- 0x0071: 0x0071, # LATIN SMALL LETTER Q
- 0x0072: 0x0072, # LATIN SMALL LETTER R
- 0x0073: 0x0073, # LATIN SMALL LETTER S
- 0x0074: 0x0074, # LATIN SMALL LETTER T
- 0x0075: 0x0075, # LATIN SMALL LETTER U
- 0x0076: 0x0076, # LATIN SMALL LETTER V
- 0x0077: 0x0077, # LATIN SMALL LETTER W
- 0x0078: 0x0078, # LATIN SMALL LETTER X
- 0x0079: 0x0079, # LATIN SMALL LETTER Y
- 0x007a: 0x007a, # LATIN SMALL LETTER Z
- 0x007b: 0x007b, # LEFT CURLY BRACKET
- 0x007c: 0x007c, # VERTICAL LINE
- 0x007d: 0x007d, # RIGHT CURLY BRACKET
- 0x007e: 0x007e, # TILDE
- 0x007f: 0x007f, # DELETE
- 0x00a0: 0x00ff, # NO-BREAK SPACE
- 0x00a1: 0x00ad, # INVERTED EXCLAMATION MARK
- 0x00a2: 0x009b, # CENT SIGN
- 0x00a3: 0x009c, # POUND SIGN
- 0x00a5: 0x009d, # YEN SIGN
- 0x00aa: 0x00a6, # FEMININE ORDINAL INDICATOR
- 0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
- 0x00ac: 0x00aa, # NOT SIGN
- 0x00b0: 0x00f8, # DEGREE SIGN
- 0x00b1: 0x00f1, # PLUS-MINUS SIGN
- 0x00b2: 0x00fd, # SUPERSCRIPT TWO
- 0x00b5: 0x00e6, # MICRO SIGN
- 0x00b7: 0x00fa, # MIDDLE DOT
- 0x00ba: 0x00a7, # MASCULINE ORDINAL INDICATOR
- 0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
- 0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER
- 0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF
- 0x00bf: 0x00a8, # INVERTED QUESTION MARK
- 0x00c4: 0x008e, # LATIN CAPITAL LETTER A WITH DIAERESIS
- 0x00c5: 0x008f, # LATIN CAPITAL LETTER A WITH RING ABOVE
- 0x00c6: 0x0092, # LATIN CAPITAL LIGATURE AE
- 0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA
- 0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
- 0x00d1: 0x00a5, # LATIN CAPITAL LETTER N WITH TILDE
- 0x00d6: 0x0099, # LATIN CAPITAL LETTER O WITH DIAERESIS
- 0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
- 0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S
- 0x00e0: 0x0085, # LATIN SMALL LETTER A WITH GRAVE
- 0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE
- 0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX
- 0x00e4: 0x0084, # LATIN SMALL LETTER A WITH DIAERESIS
- 0x00e5: 0x0086, # LATIN SMALL LETTER A WITH RING ABOVE
- 0x00e6: 0x0091, # LATIN SMALL LIGATURE AE
- 0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA
- 0x00e8: 0x008a, # LATIN SMALL LETTER E WITH GRAVE
- 0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
- 0x00ea: 0x0088, # LATIN SMALL LETTER E WITH CIRCUMFLEX
- 0x00eb: 0x0089, # LATIN SMALL LETTER E WITH DIAERESIS
- 0x00ec: 0x008d, # LATIN SMALL LETTER I WITH GRAVE
- 0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE
- 0x00ee: 0x008c, # LATIN SMALL LETTER I WITH CIRCUMFLEX
- 0x00ef: 0x008b, # LATIN SMALL LETTER I WITH DIAERESIS
- 0x00f1: 0x00a4, # LATIN SMALL LETTER N WITH TILDE
- 0x00f2: 0x0095, # LATIN SMALL LETTER O WITH GRAVE
- 0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
- 0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX
- 0x00f6: 0x0094, # LATIN SMALL LETTER O WITH DIAERESIS
- 0x00f7: 0x00f6, # DIVISION SIGN
- 0x00f9: 0x0097, # LATIN SMALL LETTER U WITH GRAVE
- 0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE
- 0x00fb: 0x0096, # LATIN SMALL LETTER U WITH CIRCUMFLEX
- 0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
- 0x00ff: 0x0098, # LATIN SMALL LETTER Y WITH DIAERESIS
- 0x0192: 0x009f, # LATIN SMALL LETTER F WITH HOOK
- 0x0393: 0x00e2, # GREEK CAPITAL LETTER GAMMA
- 0x0398: 0x00e9, # GREEK CAPITAL LETTER THETA
- 0x03a3: 0x00e4, # GREEK CAPITAL LETTER SIGMA
- 0x03a6: 0x00e8, # GREEK CAPITAL LETTER PHI
- 0x03a9: 0x00ea, # GREEK CAPITAL LETTER OMEGA
- 0x03b1: 0x00e0, # GREEK SMALL LETTER ALPHA
- 0x03b4: 0x00eb, # GREEK SMALL LETTER DELTA
- 0x03b5: 0x00ee, # GREEK SMALL LETTER EPSILON
- 0x03c0: 0x00e3, # GREEK SMALL LETTER PI
- 0x03c3: 0x00e5, # GREEK SMALL LETTER SIGMA
- 0x03c4: 0x00e7, # GREEK SMALL LETTER TAU
- 0x03c6: 0x00ed, # GREEK SMALL LETTER PHI
- 0x207f: 0x00fc, # SUPERSCRIPT LATIN SMALL LETTER N
- 0x20a7: 0x009e, # PESETA SIGN
- 0x2219: 0x00f9, # BULLET OPERATOR
- 0x221a: 0x00fb, # SQUARE ROOT
- 0x221e: 0x00ec, # INFINITY
- 0x2229: 0x00ef, # INTERSECTION
- 0x2248: 0x00f7, # ALMOST EQUAL TO
- 0x2261: 0x00f0, # IDENTICAL TO
- 0x2264: 0x00f3, # LESS-THAN OR EQUAL TO
- 0x2265: 0x00f2, # GREATER-THAN OR EQUAL TO
- 0x2310: 0x00a9, # REVERSED NOT SIGN
- 0x2320: 0x00f4, # TOP HALF INTEGRAL
- 0x2321: 0x00f5, # BOTTOM HALF INTEGRAL
- 0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
- 0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
- 0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
- 0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
- 0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
- 0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
- 0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
- 0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
- 0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
- 0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
- 0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
- 0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
- 0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
- 0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
- 0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
- 0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
- 0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
- 0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
- 0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
- 0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
- 0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
- 0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
- 0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
- 0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
- 0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
- 0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
- 0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
- 0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
- 0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
- 0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
- 0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
- 0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
- 0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
- 0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
- 0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
- 0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
- 0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
- 0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
- 0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
- 0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
- 0x2580: 0x00df, # UPPER HALF BLOCK
- 0x2584: 0x00dc, # LOWER HALF BLOCK
- 0x2588: 0x00db, # FULL BLOCK
- 0x258c: 0x00dd, # LEFT HALF BLOCK
- 0x2590: 0x00de, # RIGHT HALF BLOCK
- 0x2591: 0x00b0, # LIGHT SHADE
- 0x2592: 0x00b1, # MEDIUM SHADE
- 0x2593: 0x00b2, # DARK SHADE
- 0x25a0: 0x00fe, # BLACK SQUARE
-}
diff --git a/sys/lib/python/encodings/cp500.py b/sys/lib/python/encodings/cp500.py
deleted file mode 100644
index 60766c039..000000000
--- a/sys/lib/python/encodings/cp500.py
+++ /dev/null
@@ -1,307 +0,0 @@
-""" Python Character Mapping Codec cp500 generated from 'MAPPINGS/VENDORS/MICSFT/EBCDIC/CP500.TXT' with gencodec.py.
-
-"""#"
-
-import codecs
-
-### Codec APIs
-
-class Codec(codecs.Codec):
-
- def encode(self,input,errors='strict'):
- return codecs.charmap_encode(input,errors,encoding_table)
-
- def decode(self,input,errors='strict'):
- return codecs.charmap_decode(input,errors,decoding_table)
-
-class IncrementalEncoder(codecs.IncrementalEncoder):
- def encode(self, input, final=False):
- return codecs.charmap_encode(input,self.errors,encoding_table)[0]
-
-class IncrementalDecoder(codecs.IncrementalDecoder):
- def decode(self, input, final=False):
- return codecs.charmap_decode(input,self.errors,decoding_table)[0]
-
-class StreamWriter(Codec,codecs.StreamWriter):
- pass
-
-class StreamReader(Codec,codecs.StreamReader):
- pass
-
-### encodings module API
-
-def getregentry():
- return codecs.CodecInfo(
- name='cp500',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
-
-
-### Decoding Table
-
-decoding_table = (
- u'\x00' # 0x00 -> NULL
- u'\x01' # 0x01 -> START OF HEADING
- u'\x02' # 0x02 -> START OF TEXT
- u'\x03' # 0x03 -> END OF TEXT
- u'\x9c' # 0x04 -> CONTROL
- u'\t' # 0x05 -> HORIZONTAL TABULATION
- u'\x86' # 0x06 -> CONTROL
- u'\x7f' # 0x07 -> DELETE
- u'\x97' # 0x08 -> CONTROL
- u'\x8d' # 0x09 -> CONTROL
- u'\x8e' # 0x0A -> CONTROL
- u'\x0b' # 0x0B -> VERTICAL TABULATION
- u'\x0c' # 0x0C -> FORM FEED
- u'\r' # 0x0D -> CARRIAGE RETURN
- u'\x0e' # 0x0E -> SHIFT OUT
- u'\x0f' # 0x0F -> SHIFT IN
- u'\x10' # 0x10 -> DATA LINK ESCAPE
- u'\x11' # 0x11 -> DEVICE CONTROL ONE
- u'\x12' # 0x12 -> DEVICE CONTROL TWO
- u'\x13' # 0x13 -> DEVICE CONTROL THREE
- u'\x9d' # 0x14 -> CONTROL
- u'\x85' # 0x15 -> CONTROL
- u'\x08' # 0x16 -> BACKSPACE
- u'\x87' # 0x17 -> CONTROL
- u'\x18' # 0x18 -> CANCEL
- u'\x19' # 0x19 -> END OF MEDIUM
- u'\x92' # 0x1A -> CONTROL
- u'\x8f' # 0x1B -> CONTROL
- u'\x1c' # 0x1C -> FILE SEPARATOR
- u'\x1d' # 0x1D -> GROUP SEPARATOR
- u'\x1e' # 0x1E -> RECORD SEPARATOR
- u'\x1f' # 0x1F -> UNIT SEPARATOR
- u'\x80' # 0x20 -> CONTROL
- u'\x81' # 0x21 -> CONTROL
- u'\x82' # 0x22 -> CONTROL
- u'\x83' # 0x23 -> CONTROL
- u'\x84' # 0x24 -> CONTROL
- u'\n' # 0x25 -> LINE FEED
- u'\x17' # 0x26 -> END OF TRANSMISSION BLOCK
- u'\x1b' # 0x27 -> ESCAPE
- u'\x88' # 0x28 -> CONTROL
- u'\x89' # 0x29 -> CONTROL
- u'\x8a' # 0x2A -> CONTROL
- u'\x8b' # 0x2B -> CONTROL
- u'\x8c' # 0x2C -> CONTROL
- u'\x05' # 0x2D -> ENQUIRY
- u'\x06' # 0x2E -> ACKNOWLEDGE
- u'\x07' # 0x2F -> BELL
- u'\x90' # 0x30 -> CONTROL
- u'\x91' # 0x31 -> CONTROL
- u'\x16' # 0x32 -> SYNCHRONOUS IDLE
- u'\x93' # 0x33 -> CONTROL
- u'\x94' # 0x34 -> CONTROL
- u'\x95' # 0x35 -> CONTROL
- u'\x96' # 0x36 -> CONTROL
- u'\x04' # 0x37 -> END OF TRANSMISSION
- u'\x98' # 0x38 -> CONTROL
- u'\x99' # 0x39 -> CONTROL
- u'\x9a' # 0x3A -> CONTROL
- u'\x9b' # 0x3B -> CONTROL
- u'\x14' # 0x3C -> DEVICE CONTROL FOUR
- u'\x15' # 0x3D -> NEGATIVE ACKNOWLEDGE
- u'\x9e' # 0x3E -> CONTROL
- u'\x1a' # 0x3F -> SUBSTITUTE
- u' ' # 0x40 -> SPACE
- u'\xa0' # 0x41 -> NO-BREAK SPACE
- u'\xe2' # 0x42 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
- u'\xe4' # 0x43 -> LATIN SMALL LETTER A WITH DIAERESIS
- u'\xe0' # 0x44 -> LATIN SMALL LETTER A WITH GRAVE
- u'\xe1' # 0x45 -> LATIN SMALL LETTER A WITH ACUTE
- u'\xe3' # 0x46 -> LATIN SMALL LETTER A WITH TILDE
- u'\xe5' # 0x47 -> LATIN SMALL LETTER A WITH RING ABOVE
- u'\xe7' # 0x48 -> LATIN SMALL LETTER C WITH CEDILLA
- u'\xf1' # 0x49 -> LATIN SMALL LETTER N WITH TILDE
- u'[' # 0x4A -> LEFT SQUARE BRACKET
- u'.' # 0x4B -> FULL STOP
- u'<' # 0x4C -> LESS-THAN SIGN
- u'(' # 0x4D -> LEFT PARENTHESIS
- u'+' # 0x4E -> PLUS SIGN
- u'!' # 0x4F -> EXCLAMATION MARK
- u'&' # 0x50 -> AMPERSAND
- u'\xe9' # 0x51 -> LATIN SMALL LETTER E WITH ACUTE
- u'\xea' # 0x52 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
- u'\xeb' # 0x53 -> LATIN SMALL LETTER E WITH DIAERESIS
- u'\xe8' # 0x54 -> LATIN SMALL LETTER E WITH GRAVE
- u'\xed' # 0x55 -> LATIN SMALL LETTER I WITH ACUTE
- u'\xee' # 0x56 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
- u'\xef' # 0x57 -> LATIN SMALL LETTER I WITH DIAERESIS
- u'\xec' # 0x58 -> LATIN SMALL LETTER I WITH GRAVE
- u'\xdf' # 0x59 -> LATIN SMALL LETTER SHARP S (GERMAN)
- u']' # 0x5A -> RIGHT SQUARE BRACKET
- u'$' # 0x5B -> DOLLAR SIGN
- u'*' # 0x5C -> ASTERISK
- u')' # 0x5D -> RIGHT PARENTHESIS
- u';' # 0x5E -> SEMICOLON
- u'^' # 0x5F -> CIRCUMFLEX ACCENT
- u'-' # 0x60 -> HYPHEN-MINUS
- u'/' # 0x61 -> SOLIDUS
- u'\xc2' # 0x62 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
- u'\xc4' # 0x63 -> LATIN CAPITAL LETTER A WITH DIAERESIS
- u'\xc0' # 0x64 -> LATIN CAPITAL LETTER A WITH GRAVE
- u'\xc1' # 0x65 -> LATIN CAPITAL LETTER A WITH ACUTE
- u'\xc3' # 0x66 -> LATIN CAPITAL LETTER A WITH TILDE
- u'\xc5' # 0x67 -> LATIN CAPITAL LETTER A WITH RING ABOVE
- u'\xc7' # 0x68 -> LATIN CAPITAL LETTER C WITH CEDILLA
- u'\xd1' # 0x69 -> LATIN CAPITAL LETTER N WITH TILDE
- u'\xa6' # 0x6A -> BROKEN BAR
- u',' # 0x6B -> COMMA
- u'%' # 0x6C -> PERCENT SIGN
- u'_' # 0x6D -> LOW LINE
- u'>' # 0x6E -> GREATER-THAN SIGN
- u'?' # 0x6F -> QUESTION MARK
- u'\xf8' # 0x70 -> LATIN SMALL LETTER O WITH STROKE
- u'\xc9' # 0x71 -> LATIN CAPITAL LETTER E WITH ACUTE
- u'\xca' # 0x72 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
- u'\xcb' # 0x73 -> LATIN CAPITAL LETTER E WITH DIAERESIS
- u'\xc8' # 0x74 -> LATIN CAPITAL LETTER E WITH GRAVE
- u'\xcd' # 0x75 -> LATIN CAPITAL LETTER I WITH ACUTE
- u'\xce' # 0x76 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
- u'\xcf' # 0x77 -> LATIN CAPITAL LETTER I WITH DIAERESIS
- u'\xcc' # 0x78 -> LATIN CAPITAL LETTER I WITH GRAVE
- u'`' # 0x79 -> GRAVE ACCENT
- u':' # 0x7A -> COLON
- u'#' # 0x7B -> NUMBER SIGN
- u'@' # 0x7C -> COMMERCIAL AT
- u"'" # 0x7D -> APOSTROPHE
- u'=' # 0x7E -> EQUALS SIGN
- u'"' # 0x7F -> QUOTATION MARK
- u'\xd8' # 0x80 -> LATIN CAPITAL LETTER O WITH STROKE
- u'a' # 0x81 -> LATIN SMALL LETTER A
- u'b' # 0x82 -> LATIN SMALL LETTER B
- u'c' # 0x83 -> LATIN SMALL LETTER C
- u'd' # 0x84 -> LATIN SMALL LETTER D
- u'e' # 0x85 -> LATIN SMALL LETTER E
- u'f' # 0x86 -> LATIN SMALL LETTER F
- u'g' # 0x87 -> LATIN SMALL LETTER G
- u'h' # 0x88 -> LATIN SMALL LETTER H
- u'i' # 0x89 -> LATIN SMALL LETTER I
- u'\xab' # 0x8A -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
- u'\xbb' # 0x8B -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
- u'\xf0' # 0x8C -> LATIN SMALL LETTER ETH (ICELANDIC)
- u'\xfd' # 0x8D -> LATIN SMALL LETTER Y WITH ACUTE
- u'\xfe' # 0x8E -> LATIN SMALL LETTER THORN (ICELANDIC)
- u'\xb1' # 0x8F -> PLUS-MINUS SIGN
- u'\xb0' # 0x90 -> DEGREE SIGN
- u'j' # 0x91 -> LATIN SMALL LETTER J
- u'k' # 0x92 -> LATIN SMALL LETTER K
- u'l' # 0x93 -> LATIN SMALL LETTER L
- u'm' # 0x94 -> LATIN SMALL LETTER M
- u'n' # 0x95 -> LATIN SMALL LETTER N
- u'o' # 0x96 -> LATIN SMALL LETTER O
- u'p' # 0x97 -> LATIN SMALL LETTER P
- u'q' # 0x98 -> LATIN SMALL LETTER Q
- u'r' # 0x99 -> LATIN SMALL LETTER R
- u'\xaa' # 0x9A -> FEMININE ORDINAL INDICATOR
- u'\xba' # 0x9B -> MASCULINE ORDINAL INDICATOR
- u'\xe6' # 0x9C -> LATIN SMALL LIGATURE AE
- u'\xb8' # 0x9D -> CEDILLA
- u'\xc6' # 0x9E -> LATIN CAPITAL LIGATURE AE
- u'\xa4' # 0x9F -> CURRENCY SIGN
- u'\xb5' # 0xA0 -> MICRO SIGN
- u'~' # 0xA1 -> TILDE
- u's' # 0xA2 -> LATIN SMALL LETTER S
- u't' # 0xA3 -> LATIN SMALL LETTER T
- u'u' # 0xA4 -> LATIN SMALL LETTER U
- u'v' # 0xA5 -> LATIN SMALL LETTER V
- u'w' # 0xA6 -> LATIN SMALL LETTER W
- u'x' # 0xA7 -> LATIN SMALL LETTER X
- u'y' # 0xA8 -> LATIN SMALL LETTER Y
- u'z' # 0xA9 -> LATIN SMALL LETTER Z
- u'\xa1' # 0xAA -> INVERTED EXCLAMATION MARK
- u'\xbf' # 0xAB -> INVERTED QUESTION MARK
- u'\xd0' # 0xAC -> LATIN CAPITAL LETTER ETH (ICELANDIC)
- u'\xdd' # 0xAD -> LATIN CAPITAL LETTER Y WITH ACUTE
- u'\xde' # 0xAE -> LATIN CAPITAL LETTER THORN (ICELANDIC)
- u'\xae' # 0xAF -> REGISTERED SIGN
- u'\xa2' # 0xB0 -> CENT SIGN
- u'\xa3' # 0xB1 -> POUND SIGN
- u'\xa5' # 0xB2 -> YEN SIGN
- u'\xb7' # 0xB3 -> MIDDLE DOT
- u'\xa9' # 0xB4 -> COPYRIGHT SIGN
- u'\xa7' # 0xB5 -> SECTION SIGN
- u'\xb6' # 0xB6 -> PILCROW SIGN
- u'\xbc' # 0xB7 -> VULGAR FRACTION ONE QUARTER
- u'\xbd' # 0xB8 -> VULGAR FRACTION ONE HALF
- u'\xbe' # 0xB9 -> VULGAR FRACTION THREE QUARTERS
- u'\xac' # 0xBA -> NOT SIGN
- u'|' # 0xBB -> VERTICAL LINE
- u'\xaf' # 0xBC -> MACRON
- u'\xa8' # 0xBD -> DIAERESIS
- u'\xb4' # 0xBE -> ACUTE ACCENT
- u'\xd7' # 0xBF -> MULTIPLICATION SIGN
- u'{' # 0xC0 -> LEFT CURLY BRACKET
- u'A' # 0xC1 -> LATIN CAPITAL LETTER A
- u'B' # 0xC2 -> LATIN CAPITAL LETTER B
- u'C' # 0xC3 -> LATIN CAPITAL LETTER C
- u'D' # 0xC4 -> LATIN CAPITAL LETTER D
- u'E' # 0xC5 -> LATIN CAPITAL LETTER E
- u'F' # 0xC6 -> LATIN CAPITAL LETTER F
- u'G' # 0xC7 -> LATIN CAPITAL LETTER G
- u'H' # 0xC8 -> LATIN CAPITAL LETTER H
- u'I' # 0xC9 -> LATIN CAPITAL LETTER I
- u'\xad' # 0xCA -> SOFT HYPHEN
- u'\xf4' # 0xCB -> LATIN SMALL LETTER O WITH CIRCUMFLEX
- u'\xf6' # 0xCC -> LATIN SMALL LETTER O WITH DIAERESIS
- u'\xf2' # 0xCD -> LATIN SMALL LETTER O WITH GRAVE
- u'\xf3' # 0xCE -> LATIN SMALL LETTER O WITH ACUTE
- u'\xf5' # 0xCF -> LATIN SMALL LETTER O WITH TILDE
- u'}' # 0xD0 -> RIGHT CURLY BRACKET
- u'J' # 0xD1 -> LATIN CAPITAL LETTER J
- u'K' # 0xD2 -> LATIN CAPITAL LETTER K
- u'L' # 0xD3 -> LATIN CAPITAL LETTER L
- u'M' # 0xD4 -> LATIN CAPITAL LETTER M
- u'N' # 0xD5 -> LATIN CAPITAL LETTER N
- u'O' # 0xD6 -> LATIN CAPITAL LETTER O
- u'P' # 0xD7 -> LATIN CAPITAL LETTER P
- u'Q' # 0xD8 -> LATIN CAPITAL LETTER Q
- u'R' # 0xD9 -> LATIN CAPITAL LETTER R
- u'\xb9' # 0xDA -> SUPERSCRIPT ONE
- u'\xfb' # 0xDB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
- u'\xfc' # 0xDC -> LATIN SMALL LETTER U WITH DIAERESIS
- u'\xf9' # 0xDD -> LATIN SMALL LETTER U WITH GRAVE
- u'\xfa' # 0xDE -> LATIN SMALL LETTER U WITH ACUTE
- u'\xff' # 0xDF -> LATIN SMALL LETTER Y WITH DIAERESIS
- u'\\' # 0xE0 -> REVERSE SOLIDUS
- u'\xf7' # 0xE1 -> DIVISION SIGN
- u'S' # 0xE2 -> LATIN CAPITAL LETTER S
- u'T' # 0xE3 -> LATIN CAPITAL LETTER T
- u'U' # 0xE4 -> LATIN CAPITAL LETTER U
- u'V' # 0xE5 -> LATIN CAPITAL LETTER V
- u'W' # 0xE6 -> LATIN CAPITAL LETTER W
- u'X' # 0xE7 -> LATIN CAPITAL LETTER X
- u'Y' # 0xE8 -> LATIN CAPITAL LETTER Y
- u'Z' # 0xE9 -> LATIN CAPITAL LETTER Z
- u'\xb2' # 0xEA -> SUPERSCRIPT TWO
- u'\xd4' # 0xEB -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
- u'\xd6' # 0xEC -> LATIN CAPITAL LETTER O WITH DIAERESIS
- u'\xd2' # 0xED -> LATIN CAPITAL LETTER O WITH GRAVE
- u'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
- u'\xd5' # 0xEF -> LATIN CAPITAL LETTER O WITH TILDE
- u'0' # 0xF0 -> DIGIT ZERO
- u'1' # 0xF1 -> DIGIT ONE
- u'2' # 0xF2 -> DIGIT TWO
- u'3' # 0xF3 -> DIGIT THREE
- u'4' # 0xF4 -> DIGIT FOUR
- u'5' # 0xF5 -> DIGIT FIVE
- u'6' # 0xF6 -> DIGIT SIX
- u'7' # 0xF7 -> DIGIT SEVEN
- u'8' # 0xF8 -> DIGIT EIGHT
- u'9' # 0xF9 -> DIGIT NINE
- u'\xb3' # 0xFA -> SUPERSCRIPT THREE
- u'\xdb' # 0xFB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
- u'\xdc' # 0xFC -> LATIN CAPITAL LETTER U WITH DIAERESIS
- u'\xd9' # 0xFD -> LATIN CAPITAL LETTER U WITH GRAVE
- u'\xda' # 0xFE -> LATIN CAPITAL LETTER U WITH ACUTE
- u'\x9f' # 0xFF -> CONTROL
-)
-
-### Encoding table
-encoding_table=codecs.charmap_build(decoding_table)
diff --git a/sys/lib/python/encodings/cp737.py b/sys/lib/python/encodings/cp737.py
deleted file mode 100644
index d6544482d..000000000
--- a/sys/lib/python/encodings/cp737.py
+++ /dev/null
@@ -1,698 +0,0 @@
-""" Python Character Mapping Codec cp737 generated from 'VENDORS/MICSFT/PC/CP737.TXT' with gencodec.py.
-
-"""#"
-
-import codecs
-
-### Codec APIs
-
-class Codec(codecs.Codec):
-
- def encode(self,input,errors='strict'):
- return codecs.charmap_encode(input,errors,encoding_map)
-
- def decode(self,input,errors='strict'):
- return codecs.charmap_decode(input,errors,decoding_table)
-
-class IncrementalEncoder(codecs.IncrementalEncoder):
- def encode(self, input, final=False):
- return codecs.charmap_encode(input,self.errors,encoding_map)[0]
-
-class IncrementalDecoder(codecs.IncrementalDecoder):
- def decode(self, input, final=False):
- return codecs.charmap_decode(input,self.errors,decoding_table)[0]
-
-class StreamWriter(Codec,codecs.StreamWriter):
- pass
-
-class StreamReader(Codec,codecs.StreamReader):
- pass
-
-### encodings module API
-
-def getregentry():
- return codecs.CodecInfo(
- name='cp737',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
-
-### Decoding Map
-
-decoding_map = codecs.make_identity_dict(range(256))
-decoding_map.update({
- 0x0080: 0x0391, # GREEK CAPITAL LETTER ALPHA
- 0x0081: 0x0392, # GREEK CAPITAL LETTER BETA
- 0x0082: 0x0393, # GREEK CAPITAL LETTER GAMMA
- 0x0083: 0x0394, # GREEK CAPITAL LETTER DELTA
- 0x0084: 0x0395, # GREEK CAPITAL LETTER EPSILON
- 0x0085: 0x0396, # GREEK CAPITAL LETTER ZETA
- 0x0086: 0x0397, # GREEK CAPITAL LETTER ETA
- 0x0087: 0x0398, # GREEK CAPITAL LETTER THETA
- 0x0088: 0x0399, # GREEK CAPITAL LETTER IOTA
- 0x0089: 0x039a, # GREEK CAPITAL LETTER KAPPA
- 0x008a: 0x039b, # GREEK CAPITAL LETTER LAMDA
- 0x008b: 0x039c, # GREEK CAPITAL LETTER MU
- 0x008c: 0x039d, # GREEK CAPITAL LETTER NU
- 0x008d: 0x039e, # GREEK CAPITAL LETTER XI
- 0x008e: 0x039f, # GREEK CAPITAL LETTER OMICRON
- 0x008f: 0x03a0, # GREEK CAPITAL LETTER PI
- 0x0090: 0x03a1, # GREEK CAPITAL LETTER RHO
- 0x0091: 0x03a3, # GREEK CAPITAL LETTER SIGMA
- 0x0092: 0x03a4, # GREEK CAPITAL LETTER TAU
- 0x0093: 0x03a5, # GREEK CAPITAL LETTER UPSILON
- 0x0094: 0x03a6, # GREEK CAPITAL LETTER PHI
- 0x0095: 0x03a7, # GREEK CAPITAL LETTER CHI
- 0x0096: 0x03a8, # GREEK CAPITAL LETTER PSI
- 0x0097: 0x03a9, # GREEK CAPITAL LETTER OMEGA
- 0x0098: 0x03b1, # GREEK SMALL LETTER ALPHA
- 0x0099: 0x03b2, # GREEK SMALL LETTER BETA
- 0x009a: 0x03b3, # GREEK SMALL LETTER GAMMA
- 0x009b: 0x03b4, # GREEK SMALL LETTER DELTA
- 0x009c: 0x03b5, # GREEK SMALL LETTER EPSILON
- 0x009d: 0x03b6, # GREEK SMALL LETTER ZETA
- 0x009e: 0x03b7, # GREEK SMALL LETTER ETA
- 0x009f: 0x03b8, # GREEK SMALL LETTER THETA
- 0x00a0: 0x03b9, # GREEK SMALL LETTER IOTA
- 0x00a1: 0x03ba, # GREEK SMALL LETTER KAPPA
- 0x00a2: 0x03bb, # GREEK SMALL LETTER LAMDA
- 0x00a3: 0x03bc, # GREEK SMALL LETTER MU
- 0x00a4: 0x03bd, # GREEK SMALL LETTER NU
- 0x00a5: 0x03be, # GREEK SMALL LETTER XI
- 0x00a6: 0x03bf, # GREEK SMALL LETTER OMICRON
- 0x00a7: 0x03c0, # GREEK SMALL LETTER PI
- 0x00a8: 0x03c1, # GREEK SMALL LETTER RHO
- 0x00a9: 0x03c3, # GREEK SMALL LETTER SIGMA
- 0x00aa: 0x03c2, # GREEK SMALL LETTER FINAL SIGMA
- 0x00ab: 0x03c4, # GREEK SMALL LETTER TAU
- 0x00ac: 0x03c5, # GREEK SMALL LETTER UPSILON
- 0x00ad: 0x03c6, # GREEK SMALL LETTER PHI
- 0x00ae: 0x03c7, # GREEK SMALL LETTER CHI
- 0x00af: 0x03c8, # GREEK SMALL LETTER PSI
- 0x00b0: 0x2591, # LIGHT SHADE
- 0x00b1: 0x2592, # MEDIUM SHADE
- 0x00b2: 0x2593, # DARK SHADE
- 0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
- 0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
- 0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
- 0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
- 0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
- 0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
- 0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
- 0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
- 0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
- 0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
- 0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
- 0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
- 0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
- 0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
- 0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
- 0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
- 0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
- 0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
- 0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
- 0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
- 0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
- 0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
- 0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
- 0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
- 0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
- 0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
- 0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
- 0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
- 0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
- 0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
- 0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
- 0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
- 0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
- 0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
- 0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
- 0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
- 0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
- 0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
- 0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
- 0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
- 0x00db: 0x2588, # FULL BLOCK
- 0x00dc: 0x2584, # LOWER HALF BLOCK
- 0x00dd: 0x258c, # LEFT HALF BLOCK
- 0x00de: 0x2590, # RIGHT HALF BLOCK
- 0x00df: 0x2580, # UPPER HALF BLOCK
- 0x00e0: 0x03c9, # GREEK SMALL LETTER OMEGA
- 0x00e1: 0x03ac, # GREEK SMALL LETTER ALPHA WITH TONOS
- 0x00e2: 0x03ad, # GREEK SMALL LETTER EPSILON WITH TONOS
- 0x00e3: 0x03ae, # GREEK SMALL LETTER ETA WITH TONOS
- 0x00e4: 0x03ca, # GREEK SMALL LETTER IOTA WITH DIALYTIKA
- 0x00e5: 0x03af, # GREEK SMALL LETTER IOTA WITH TONOS
- 0x00e6: 0x03cc, # GREEK SMALL LETTER OMICRON WITH TONOS
- 0x00e7: 0x03cd, # GREEK SMALL LETTER UPSILON WITH TONOS
- 0x00e8: 0x03cb, # GREEK SMALL LETTER UPSILON WITH DIALYTIKA
- 0x00e9: 0x03ce, # GREEK SMALL LETTER OMEGA WITH TONOS
- 0x00ea: 0x0386, # GREEK CAPITAL LETTER ALPHA WITH TONOS
- 0x00eb: 0x0388, # GREEK CAPITAL LETTER EPSILON WITH TONOS
- 0x00ec: 0x0389, # GREEK CAPITAL LETTER ETA WITH TONOS
- 0x00ed: 0x038a, # GREEK CAPITAL LETTER IOTA WITH TONOS
- 0x00ee: 0x038c, # GREEK CAPITAL LETTER OMICRON WITH TONOS
- 0x00ef: 0x038e, # GREEK CAPITAL LETTER UPSILON WITH TONOS
- 0x00f0: 0x038f, # GREEK CAPITAL LETTER OMEGA WITH TONOS
- 0x00f1: 0x00b1, # PLUS-MINUS SIGN
- 0x00f2: 0x2265, # GREATER-THAN OR EQUAL TO
- 0x00f3: 0x2264, # LESS-THAN OR EQUAL TO
- 0x00f4: 0x03aa, # GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
- 0x00f5: 0x03ab, # GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
- 0x00f6: 0x00f7, # DIVISION SIGN
- 0x00f7: 0x2248, # ALMOST EQUAL TO
- 0x00f8: 0x00b0, # DEGREE SIGN
- 0x00f9: 0x2219, # BULLET OPERATOR
- 0x00fa: 0x00b7, # MIDDLE DOT
- 0x00fb: 0x221a, # SQUARE ROOT
- 0x00fc: 0x207f, # SUPERSCRIPT LATIN SMALL LETTER N
- 0x00fd: 0x00b2, # SUPERSCRIPT TWO
- 0x00fe: 0x25a0, # BLACK SQUARE
- 0x00ff: 0x00a0, # NO-BREAK SPACE
-})
-
-### Decoding Table
-
-decoding_table = (
- u'\x00' # 0x0000 -> NULL
- u'\x01' # 0x0001 -> START OF HEADING
- u'\x02' # 0x0002 -> START OF TEXT
- u'\x03' # 0x0003 -> END OF TEXT
- u'\x04' # 0x0004 -> END OF TRANSMISSION
- u'\x05' # 0x0005 -> ENQUIRY
- u'\x06' # 0x0006 -> ACKNOWLEDGE
- u'\x07' # 0x0007 -> BELL
- u'\x08' # 0x0008 -> BACKSPACE
- u'\t' # 0x0009 -> HORIZONTAL TABULATION
- u'\n' # 0x000a -> LINE FEED
- u'\x0b' # 0x000b -> VERTICAL TABULATION
- u'\x0c' # 0x000c -> FORM FEED
- u'\r' # 0x000d -> CARRIAGE RETURN
- u'\x0e' # 0x000e -> SHIFT OUT
- u'\x0f' # 0x000f -> SHIFT IN
- u'\x10' # 0x0010 -> DATA LINK ESCAPE
- u'\x11' # 0x0011 -> DEVICE CONTROL ONE
- u'\x12' # 0x0012 -> DEVICE CONTROL TWO
- u'\x13' # 0x0013 -> DEVICE CONTROL THREE
- u'\x14' # 0x0014 -> DEVICE CONTROL FOUR
- u'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
- u'\x16' # 0x0016 -> SYNCHRONOUS IDLE
- u'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
- u'\x18' # 0x0018 -> CANCEL
- u'\x19' # 0x0019 -> END OF MEDIUM
- u'\x1a' # 0x001a -> SUBSTITUTE
- u'\x1b' # 0x001b -> ESCAPE
- u'\x1c' # 0x001c -> FILE SEPARATOR
- u'\x1d' # 0x001d -> GROUP SEPARATOR
- u'\x1e' # 0x001e -> RECORD SEPARATOR
- u'\x1f' # 0x001f -> UNIT SEPARATOR
- u' ' # 0x0020 -> SPACE
- u'!' # 0x0021 -> EXCLAMATION MARK
- u'"' # 0x0022 -> QUOTATION MARK
- u'#' # 0x0023 -> NUMBER SIGN
- u'$' # 0x0024 -> DOLLAR SIGN
- u'%' # 0x0025 -> PERCENT SIGN
- u'&' # 0x0026 -> AMPERSAND
- u"'" # 0x0027 -> APOSTROPHE
- u'(' # 0x0028 -> LEFT PARENTHESIS
- u')' # 0x0029 -> RIGHT PARENTHESIS
- u'*' # 0x002a -> ASTERISK
- u'+' # 0x002b -> PLUS SIGN
- u',' # 0x002c -> COMMA
- u'-' # 0x002d -> HYPHEN-MINUS
- u'.' # 0x002e -> FULL STOP
- u'/' # 0x002f -> SOLIDUS
- u'0' # 0x0030 -> DIGIT ZERO
- u'1' # 0x0031 -> DIGIT ONE
- u'2' # 0x0032 -> DIGIT TWO
- u'3' # 0x0033 -> DIGIT THREE
- u'4' # 0x0034 -> DIGIT FOUR
- u'5' # 0x0035 -> DIGIT FIVE
- u'6' # 0x0036 -> DIGIT SIX
- u'7' # 0x0037 -> DIGIT SEVEN
- u'8' # 0x0038 -> DIGIT EIGHT
- u'9' # 0x0039 -> DIGIT NINE
- u':' # 0x003a -> COLON
- u';' # 0x003b -> SEMICOLON
- u'<' # 0x003c -> LESS-THAN SIGN
- u'=' # 0x003d -> EQUALS SIGN
- u'>' # 0x003e -> GREATER-THAN SIGN
- u'?' # 0x003f -> QUESTION MARK
- u'@' # 0x0040 -> COMMERCIAL AT
- u'A' # 0x0041 -> LATIN CAPITAL LETTER A
- u'B' # 0x0042 -> LATIN CAPITAL LETTER B
- u'C' # 0x0043 -> LATIN CAPITAL LETTER C
- u'D' # 0x0044 -> LATIN CAPITAL LETTER D
- u'E' # 0x0045 -> LATIN CAPITAL LETTER E
- u'F' # 0x0046 -> LATIN CAPITAL LETTER F
- u'G' # 0x0047 -> LATIN CAPITAL LETTER G
- u'H' # 0x0048 -> LATIN CAPITAL LETTER H
- u'I' # 0x0049 -> LATIN CAPITAL LETTER I
- u'J' # 0x004a -> LATIN CAPITAL LETTER J
- u'K' # 0x004b -> LATIN CAPITAL LETTER K
- u'L' # 0x004c -> LATIN CAPITAL LETTER L
- u'M' # 0x004d -> LATIN CAPITAL LETTER M
- u'N' # 0x004e -> LATIN CAPITAL LETTER N
- u'O' # 0x004f -> LATIN CAPITAL LETTER O
- u'P' # 0x0050 -> LATIN CAPITAL LETTER P
- u'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
- u'R' # 0x0052 -> LATIN CAPITAL LETTER R
- u'S' # 0x0053 -> LATIN CAPITAL LETTER S
- u'T' # 0x0054 -> LATIN CAPITAL LETTER T
- u'U' # 0x0055 -> LATIN CAPITAL LETTER U
- u'V' # 0x0056 -> LATIN CAPITAL LETTER V
- u'W' # 0x0057 -> LATIN CAPITAL LETTER W
- u'X' # 0x0058 -> LATIN CAPITAL LETTER X
- u'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
- u'Z' # 0x005a -> LATIN CAPITAL LETTER Z
- u'[' # 0x005b -> LEFT SQUARE BRACKET
- u'\\' # 0x005c -> REVERSE SOLIDUS
- u']' # 0x005d -> RIGHT SQUARE BRACKET
- u'^' # 0x005e -> CIRCUMFLEX ACCENT
- u'_' # 0x005f -> LOW LINE
- u'`' # 0x0060 -> GRAVE ACCENT
- u'a' # 0x0061 -> LATIN SMALL LETTER A
- u'b' # 0x0062 -> LATIN SMALL LETTER B
- u'c' # 0x0063 -> LATIN SMALL LETTER C
- u'd' # 0x0064 -> LATIN SMALL LETTER D
- u'e' # 0x0065 -> LATIN SMALL LETTER E
- u'f' # 0x0066 -> LATIN SMALL LETTER F
- u'g' # 0x0067 -> LATIN SMALL LETTER G
- u'h' # 0x0068 -> LATIN SMALL LETTER H
- u'i' # 0x0069 -> LATIN SMALL LETTER I
- u'j' # 0x006a -> LATIN SMALL LETTER J
- u'k' # 0x006b -> LATIN SMALL LETTER K
- u'l' # 0x006c -> LATIN SMALL LETTER L
- u'm' # 0x006d -> LATIN SMALL LETTER M
- u'n' # 0x006e -> LATIN SMALL LETTER N
- u'o' # 0x006f -> LATIN SMALL LETTER O
- u'p' # 0x0070 -> LATIN SMALL LETTER P
- u'q' # 0x0071 -> LATIN SMALL LETTER Q
- u'r' # 0x0072 -> LATIN SMALL LETTER R
- u's' # 0x0073 -> LATIN SMALL LETTER S
- u't' # 0x0074 -> LATIN SMALL LETTER T
- u'u' # 0x0075 -> LATIN SMALL LETTER U
- u'v' # 0x0076 -> LATIN SMALL LETTER V
- u'w' # 0x0077 -> LATIN SMALL LETTER W
- u'x' # 0x0078 -> LATIN SMALL LETTER X
- u'y' # 0x0079 -> LATIN SMALL LETTER Y
- u'z' # 0x007a -> LATIN SMALL LETTER Z
- u'{' # 0x007b -> LEFT CURLY BRACKET
- u'|' # 0x007c -> VERTICAL LINE
- u'}' # 0x007d -> RIGHT CURLY BRACKET
- u'~' # 0x007e -> TILDE
- u'\x7f' # 0x007f -> DELETE
- u'\u0391' # 0x0080 -> GREEK CAPITAL LETTER ALPHA
- u'\u0392' # 0x0081 -> GREEK CAPITAL LETTER BETA
- u'\u0393' # 0x0082 -> GREEK CAPITAL LETTER GAMMA
- u'\u0394' # 0x0083 -> GREEK CAPITAL LETTER DELTA
- u'\u0395' # 0x0084 -> GREEK CAPITAL LETTER EPSILON
- u'\u0396' # 0x0085 -> GREEK CAPITAL LETTER ZETA
- u'\u0397' # 0x0086 -> GREEK CAPITAL LETTER ETA
- u'\u0398' # 0x0087 -> GREEK CAPITAL LETTER THETA
- u'\u0399' # 0x0088 -> GREEK CAPITAL LETTER IOTA
- u'\u039a' # 0x0089 -> GREEK CAPITAL LETTER KAPPA
- u'\u039b' # 0x008a -> GREEK CAPITAL LETTER LAMDA
- u'\u039c' # 0x008b -> GREEK CAPITAL LETTER MU
- u'\u039d' # 0x008c -> GREEK CAPITAL LETTER NU
- u'\u039e' # 0x008d -> GREEK CAPITAL LETTER XI
- u'\u039f' # 0x008e -> GREEK CAPITAL LETTER OMICRON
- u'\u03a0' # 0x008f -> GREEK CAPITAL LETTER PI
- u'\u03a1' # 0x0090 -> GREEK CAPITAL LETTER RHO
- u'\u03a3' # 0x0091 -> GREEK CAPITAL LETTER SIGMA
- u'\u03a4' # 0x0092 -> GREEK CAPITAL LETTER TAU
- u'\u03a5' # 0x0093 -> GREEK CAPITAL LETTER UPSILON
- u'\u03a6' # 0x0094 -> GREEK CAPITAL LETTER PHI
- u'\u03a7' # 0x0095 -> GREEK CAPITAL LETTER CHI
- u'\u03a8' # 0x0096 -> GREEK CAPITAL LETTER PSI
- u'\u03a9' # 0x0097 -> GREEK CAPITAL LETTER OMEGA
- u'\u03b1' # 0x0098 -> GREEK SMALL LETTER ALPHA
- u'\u03b2' # 0x0099 -> GREEK SMALL LETTER BETA
- u'\u03b3' # 0x009a -> GREEK SMALL LETTER GAMMA
- u'\u03b4' # 0x009b -> GREEK SMALL LETTER DELTA
- u'\u03b5' # 0x009c -> GREEK SMALL LETTER EPSILON
- u'\u03b6' # 0x009d -> GREEK SMALL LETTER ZETA
- u'\u03b7' # 0x009e -> GREEK SMALL LETTER ETA
- u'\u03b8' # 0x009f -> GREEK SMALL LETTER THETA
- u'\u03b9' # 0x00a0 -> GREEK SMALL LETTER IOTA
- u'\u03ba' # 0x00a1 -> GREEK SMALL LETTER KAPPA
- u'\u03bb' # 0x00a2 -> GREEK SMALL LETTER LAMDA
- u'\u03bc' # 0x00a3 -> GREEK SMALL LETTER MU
- u'\u03bd' # 0x00a4 -> GREEK SMALL LETTER NU
- u'\u03be' # 0x00a5 -> GREEK SMALL LETTER XI
- u'\u03bf' # 0x00a6 -> GREEK SMALL LETTER OMICRON
- u'\u03c0' # 0x00a7 -> GREEK SMALL LETTER PI
- u'\u03c1' # 0x00a8 -> GREEK SMALL LETTER RHO
- u'\u03c3' # 0x00a9 -> GREEK SMALL LETTER SIGMA
- u'\u03c2' # 0x00aa -> GREEK SMALL LETTER FINAL SIGMA
- u'\u03c4' # 0x00ab -> GREEK SMALL LETTER TAU
- u'\u03c5' # 0x00ac -> GREEK SMALL LETTER UPSILON
- u'\u03c6' # 0x00ad -> GREEK SMALL LETTER PHI
- u'\u03c7' # 0x00ae -> GREEK SMALL LETTER CHI
- u'\u03c8' # 0x00af -> GREEK SMALL LETTER PSI
- u'\u2591' # 0x00b0 -> LIGHT SHADE
- u'\u2592' # 0x00b1 -> MEDIUM SHADE
- u'\u2593' # 0x00b2 -> DARK SHADE
- u'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
- u'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
- u'\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
- u'\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
- u'\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
- u'\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
- u'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
- u'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
- u'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
- u'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
- u'\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
- u'\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
- u'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
- u'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
- u'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
- u'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
- u'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
- u'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
- u'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
- u'\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
- u'\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
- u'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
- u'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
- u'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
- u'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
- u'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
- u'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
- u'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
- u'\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
- u'\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
- u'\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
- u'\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
- u'\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
- u'\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
- u'\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
- u'\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
- u'\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
- u'\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
- u'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
- u'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
- u'\u2588' # 0x00db -> FULL BLOCK
- u'\u2584' # 0x00dc -> LOWER HALF BLOCK
- u'\u258c' # 0x00dd -> LEFT HALF BLOCK
- u'\u2590' # 0x00de -> RIGHT HALF BLOCK
- u'\u2580' # 0x00df -> UPPER HALF BLOCK
- u'\u03c9' # 0x00e0 -> GREEK SMALL LETTER OMEGA
- u'\u03ac' # 0x00e1 -> GREEK SMALL LETTER ALPHA WITH TONOS
- u'\u03ad' # 0x00e2 -> GREEK SMALL LETTER EPSILON WITH TONOS
- u'\u03ae' # 0x00e3 -> GREEK SMALL LETTER ETA WITH TONOS
- u'\u03ca' # 0x00e4 -> GREEK SMALL LETTER IOTA WITH DIALYTIKA
- u'\u03af' # 0x00e5 -> GREEK SMALL LETTER IOTA WITH TONOS
- u'\u03cc' # 0x00e6 -> GREEK SMALL LETTER OMICRON WITH TONOS
- u'\u03cd' # 0x00e7 -> GREEK SMALL LETTER UPSILON WITH TONOS
- u'\u03cb' # 0x00e8 -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA
- u'\u03ce' # 0x00e9 -> GREEK SMALL LETTER OMEGA WITH TONOS
- u'\u0386' # 0x00ea -> GREEK CAPITAL LETTER ALPHA WITH TONOS
- u'\u0388' # 0x00eb -> GREEK CAPITAL LETTER EPSILON WITH TONOS
- u'\u0389' # 0x00ec -> GREEK CAPITAL LETTER ETA WITH TONOS
- u'\u038a' # 0x00ed -> GREEK CAPITAL LETTER IOTA WITH TONOS
- u'\u038c' # 0x00ee -> GREEK CAPITAL LETTER OMICRON WITH TONOS
- u'\u038e' # 0x00ef -> GREEK CAPITAL LETTER UPSILON WITH TONOS
- u'\u038f' # 0x00f0 -> GREEK CAPITAL LETTER OMEGA WITH TONOS
- u'\xb1' # 0x00f1 -> PLUS-MINUS SIGN
- u'\u2265' # 0x00f2 -> GREATER-THAN OR EQUAL TO
- u'\u2264' # 0x00f3 -> LESS-THAN OR EQUAL TO
- u'\u03aa' # 0x00f4 -> GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
- u'\u03ab' # 0x00f5 -> GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
- u'\xf7' # 0x00f6 -> DIVISION SIGN
- u'\u2248' # 0x00f7 -> ALMOST EQUAL TO
- u'\xb0' # 0x00f8 -> DEGREE SIGN
- u'\u2219' # 0x00f9 -> BULLET OPERATOR
- u'\xb7' # 0x00fa -> MIDDLE DOT
- u'\u221a' # 0x00fb -> SQUARE ROOT
- u'\u207f' # 0x00fc -> SUPERSCRIPT LATIN SMALL LETTER N
- u'\xb2' # 0x00fd -> SUPERSCRIPT TWO
- u'\u25a0' # 0x00fe -> BLACK SQUARE
- u'\xa0' # 0x00ff -> NO-BREAK SPACE
-)
-
-### Encoding Map
-
-encoding_map = {
- 0x0000: 0x0000, # NULL
- 0x0001: 0x0001, # START OF HEADING
- 0x0002: 0x0002, # START OF TEXT
- 0x0003: 0x0003, # END OF TEXT
- 0x0004: 0x0004, # END OF TRANSMISSION
- 0x0005: 0x0005, # ENQUIRY
- 0x0006: 0x0006, # ACKNOWLEDGE
- 0x0007: 0x0007, # BELL
- 0x0008: 0x0008, # BACKSPACE
- 0x0009: 0x0009, # HORIZONTAL TABULATION
- 0x000a: 0x000a, # LINE FEED
- 0x000b: 0x000b, # VERTICAL TABULATION
- 0x000c: 0x000c, # FORM FEED
- 0x000d: 0x000d, # CARRIAGE RETURN
- 0x000e: 0x000e, # SHIFT OUT
- 0x000f: 0x000f, # SHIFT IN
- 0x0010: 0x0010, # DATA LINK ESCAPE
- 0x0011: 0x0011, # DEVICE CONTROL ONE
- 0x0012: 0x0012, # DEVICE CONTROL TWO
- 0x0013: 0x0013, # DEVICE CONTROL THREE
- 0x0014: 0x0014, # DEVICE CONTROL FOUR
- 0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
- 0x0016: 0x0016, # SYNCHRONOUS IDLE
- 0x0017: 0x0017, # END OF TRANSMISSION BLOCK
- 0x0018: 0x0018, # CANCEL
- 0x0019: 0x0019, # END OF MEDIUM
- 0x001a: 0x001a, # SUBSTITUTE
- 0x001b: 0x001b, # ESCAPE
- 0x001c: 0x001c, # FILE SEPARATOR
- 0x001d: 0x001d, # GROUP SEPARATOR
- 0x001e: 0x001e, # RECORD SEPARATOR
- 0x001f: 0x001f, # UNIT SEPARATOR
- 0x0020: 0x0020, # SPACE
- 0x0021: 0x0021, # EXCLAMATION MARK
- 0x0022: 0x0022, # QUOTATION MARK
- 0x0023: 0x0023, # NUMBER SIGN
- 0x0024: 0x0024, # DOLLAR SIGN
- 0x0025: 0x0025, # PERCENT SIGN
- 0x0026: 0x0026, # AMPERSAND
- 0x0027: 0x0027, # APOSTROPHE
- 0x0028: 0x0028, # LEFT PARENTHESIS
- 0x0029: 0x0029, # RIGHT PARENTHESIS
- 0x002a: 0x002a, # ASTERISK
- 0x002b: 0x002b, # PLUS SIGN
- 0x002c: 0x002c, # COMMA
- 0x002d: 0x002d, # HYPHEN-MINUS
- 0x002e: 0x002e, # FULL STOP
- 0x002f: 0x002f, # SOLIDUS
- 0x0030: 0x0030, # DIGIT ZERO
- 0x0031: 0x0031, # DIGIT ONE
- 0x0032: 0x0032, # DIGIT TWO
- 0x0033: 0x0033, # DIGIT THREE
- 0x0034: 0x0034, # DIGIT FOUR
- 0x0035: 0x0035, # DIGIT FIVE
- 0x0036: 0x0036, # DIGIT SIX
- 0x0037: 0x0037, # DIGIT SEVEN
- 0x0038: 0x0038, # DIGIT EIGHT
- 0x0039: 0x0039, # DIGIT NINE
- 0x003a: 0x003a, # COLON
- 0x003b: 0x003b, # SEMICOLON
- 0x003c: 0x003c, # LESS-THAN SIGN
- 0x003d: 0x003d, # EQUALS SIGN
- 0x003e: 0x003e, # GREATER-THAN SIGN
- 0x003f: 0x003f, # QUESTION MARK
- 0x0040: 0x0040, # COMMERCIAL AT
- 0x0041: 0x0041, # LATIN CAPITAL LETTER A
- 0x0042: 0x0042, # LATIN CAPITAL LETTER B
- 0x0043: 0x0043, # LATIN CAPITAL LETTER C
- 0x0044: 0x0044, # LATIN CAPITAL LETTER D
- 0x0045: 0x0045, # LATIN CAPITAL LETTER E
- 0x0046: 0x0046, # LATIN CAPITAL LETTER F
- 0x0047: 0x0047, # LATIN CAPITAL LETTER G
- 0x0048: 0x0048, # LATIN CAPITAL LETTER H
- 0x0049: 0x0049, # LATIN CAPITAL LETTER I
- 0x004a: 0x004a, # LATIN CAPITAL LETTER J
- 0x004b: 0x004b, # LATIN CAPITAL LETTER K
- 0x004c: 0x004c, # LATIN CAPITAL LETTER L
- 0x004d: 0x004d, # LATIN CAPITAL LETTER M
- 0x004e: 0x004e, # LATIN CAPITAL LETTER N
- 0x004f: 0x004f, # LATIN CAPITAL LETTER O
- 0x0050: 0x0050, # LATIN CAPITAL LETTER P
- 0x0051: 0x0051, # LATIN CAPITAL LETTER Q
- 0x0052: 0x0052, # LATIN CAPITAL LETTER R
- 0x0053: 0x0053, # LATIN CAPITAL LETTER S
- 0x0054: 0x0054, # LATIN CAPITAL LETTER T
- 0x0055: 0x0055, # LATIN CAPITAL LETTER U
- 0x0056: 0x0056, # LATIN CAPITAL LETTER V
- 0x0057: 0x0057, # LATIN CAPITAL LETTER W
- 0x0058: 0x0058, # LATIN CAPITAL LETTER X
- 0x0059: 0x0059, # LATIN CAPITAL LETTER Y
- 0x005a: 0x005a, # LATIN CAPITAL LETTER Z
- 0x005b: 0x005b, # LEFT SQUARE BRACKET
- 0x005c: 0x005c, # REVERSE SOLIDUS
- 0x005d: 0x005d, # RIGHT SQUARE BRACKET
- 0x005e: 0x005e, # CIRCUMFLEX ACCENT
- 0x005f: 0x005f, # LOW LINE
- 0x0060: 0x0060, # GRAVE ACCENT
- 0x0061: 0x0061, # LATIN SMALL LETTER A
- 0x0062: 0x0062, # LATIN SMALL LETTER B
- 0x0063: 0x0063, # LATIN SMALL LETTER C
- 0x0064: 0x0064, # LATIN SMALL LETTER D
- 0x0065: 0x0065, # LATIN SMALL LETTER E
- 0x0066: 0x0066, # LATIN SMALL LETTER F
- 0x0067: 0x0067, # LATIN SMALL LETTER G
- 0x0068: 0x0068, # LATIN SMALL LETTER H
- 0x0069: 0x0069, # LATIN SMALL LETTER I
- 0x006a: 0x006a, # LATIN SMALL LETTER J
- 0x006b: 0x006b, # LATIN SMALL LETTER K
- 0x006c: 0x006c, # LATIN SMALL LETTER L
- 0x006d: 0x006d, # LATIN SMALL LETTER M
- 0x006e: 0x006e, # LATIN SMALL LETTER N
- 0x006f: 0x006f, # LATIN SMALL LETTER O
- 0x0070: 0x0070, # LATIN SMALL LETTER P
- 0x0071: 0x0071, # LATIN SMALL LETTER Q
- 0x0072: 0x0072, # LATIN SMALL LETTER R
- 0x0073: 0x0073, # LATIN SMALL LETTER S
- 0x0074: 0x0074, # LATIN SMALL LETTER T
- 0x0075: 0x0075, # LATIN SMALL LETTER U
- 0x0076: 0x0076, # LATIN SMALL LETTER V
- 0x0077: 0x0077, # LATIN SMALL LETTER W
- 0x0078: 0x0078, # LATIN SMALL LETTER X
- 0x0079: 0x0079, # LATIN SMALL LETTER Y
- 0x007a: 0x007a, # LATIN SMALL LETTER Z
- 0x007b: 0x007b, # LEFT CURLY BRACKET
- 0x007c: 0x007c, # VERTICAL LINE
- 0x007d: 0x007d, # RIGHT CURLY BRACKET
- 0x007e: 0x007e, # TILDE
- 0x007f: 0x007f, # DELETE
- 0x00a0: 0x00ff, # NO-BREAK SPACE
- 0x00b0: 0x00f8, # DEGREE SIGN
- 0x00b1: 0x00f1, # PLUS-MINUS SIGN
- 0x00b2: 0x00fd, # SUPERSCRIPT TWO
- 0x00b7: 0x00fa, # MIDDLE DOT
- 0x00f7: 0x00f6, # DIVISION SIGN
- 0x0386: 0x00ea, # GREEK CAPITAL LETTER ALPHA WITH TONOS
- 0x0388: 0x00eb, # GREEK CAPITAL LETTER EPSILON WITH TONOS
- 0x0389: 0x00ec, # GREEK CAPITAL LETTER ETA WITH TONOS
- 0x038a: 0x00ed, # GREEK CAPITAL LETTER IOTA WITH TONOS
- 0x038c: 0x00ee, # GREEK CAPITAL LETTER OMICRON WITH TONOS
- 0x038e: 0x00ef, # GREEK CAPITAL LETTER UPSILON WITH TONOS
- 0x038f: 0x00f0, # GREEK CAPITAL LETTER OMEGA WITH TONOS
- 0x0391: 0x0080, # GREEK CAPITAL LETTER ALPHA
- 0x0392: 0x0081, # GREEK CAPITAL LETTER BETA
- 0x0393: 0x0082, # GREEK CAPITAL LETTER GAMMA
- 0x0394: 0x0083, # GREEK CAPITAL LETTER DELTA
- 0x0395: 0x0084, # GREEK CAPITAL LETTER EPSILON
- 0x0396: 0x0085, # GREEK CAPITAL LETTER ZETA
- 0x0397: 0x0086, # GREEK CAPITAL LETTER ETA
- 0x0398: 0x0087, # GREEK CAPITAL LETTER THETA
- 0x0399: 0x0088, # GREEK CAPITAL LETTER IOTA
- 0x039a: 0x0089, # GREEK CAPITAL LETTER KAPPA
- 0x039b: 0x008a, # GREEK CAPITAL LETTER LAMDA
- 0x039c: 0x008b, # GREEK CAPITAL LETTER MU
- 0x039d: 0x008c, # GREEK CAPITAL LETTER NU
- 0x039e: 0x008d, # GREEK CAPITAL LETTER XI
- 0x039f: 0x008e, # GREEK CAPITAL LETTER OMICRON
- 0x03a0: 0x008f, # GREEK CAPITAL LETTER PI
- 0x03a1: 0x0090, # GREEK CAPITAL LETTER RHO
- 0x03a3: 0x0091, # GREEK CAPITAL LETTER SIGMA
- 0x03a4: 0x0092, # GREEK CAPITAL LETTER TAU
- 0x03a5: 0x0093, # GREEK CAPITAL LETTER UPSILON
- 0x03a6: 0x0094, # GREEK CAPITAL LETTER PHI
- 0x03a7: 0x0095, # GREEK CAPITAL LETTER CHI
- 0x03a8: 0x0096, # GREEK CAPITAL LETTER PSI
- 0x03a9: 0x0097, # GREEK CAPITAL LETTER OMEGA
- 0x03aa: 0x00f4, # GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
- 0x03ab: 0x00f5, # GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
- 0x03ac: 0x00e1, # GREEK SMALL LETTER ALPHA WITH TONOS
- 0x03ad: 0x00e2, # GREEK SMALL LETTER EPSILON WITH TONOS
- 0x03ae: 0x00e3, # GREEK SMALL LETTER ETA WITH TONOS
- 0x03af: 0x00e5, # GREEK SMALL LETTER IOTA WITH TONOS
- 0x03b1: 0x0098, # GREEK SMALL LETTER ALPHA
- 0x03b2: 0x0099, # GREEK SMALL LETTER BETA
- 0x03b3: 0x009a, # GREEK SMALL LETTER GAMMA
- 0x03b4: 0x009b, # GREEK SMALL LETTER DELTA
- 0x03b5: 0x009c, # GREEK SMALL LETTER EPSILON
- 0x03b6: 0x009d, # GREEK SMALL LETTER ZETA
- 0x03b7: 0x009e, # GREEK SMALL LETTER ETA
- 0x03b8: 0x009f, # GREEK SMALL LETTER THETA
- 0x03b9: 0x00a0, # GREEK SMALL LETTER IOTA
- 0x03ba: 0x00a1, # GREEK SMALL LETTER KAPPA
- 0x03bb: 0x00a2, # GREEK SMALL LETTER LAMDA
- 0x03bc: 0x00a3, # GREEK SMALL LETTER MU
- 0x03bd: 0x00a4, # GREEK SMALL LETTER NU
- 0x03be: 0x00a5, # GREEK SMALL LETTER XI
- 0x03bf: 0x00a6, # GREEK SMALL LETTER OMICRON
- 0x03c0: 0x00a7, # GREEK SMALL LETTER PI
- 0x03c1: 0x00a8, # GREEK SMALL LETTER RHO
- 0x03c2: 0x00aa, # GREEK SMALL LETTER FINAL SIGMA
- 0x03c3: 0x00a9, # GREEK SMALL LETTER SIGMA
- 0x03c4: 0x00ab, # GREEK SMALL LETTER TAU
- 0x03c5: 0x00ac, # GREEK SMALL LETTER UPSILON
- 0x03c6: 0x00ad, # GREEK SMALL LETTER PHI
- 0x03c7: 0x00ae, # GREEK SMALL LETTER CHI
- 0x03c8: 0x00af, # GREEK SMALL LETTER PSI
- 0x03c9: 0x00e0, # GREEK SMALL LETTER OMEGA
- 0x03ca: 0x00e4, # GREEK SMALL LETTER IOTA WITH DIALYTIKA
- 0x03cb: 0x00e8, # GREEK SMALL LETTER UPSILON WITH DIALYTIKA
- 0x03cc: 0x00e6, # GREEK SMALL LETTER OMICRON WITH TONOS
- 0x03cd: 0x00e7, # GREEK SMALL LETTER UPSILON WITH TONOS
- 0x03ce: 0x00e9, # GREEK SMALL LETTER OMEGA WITH TONOS
- 0x207f: 0x00fc, # SUPERSCRIPT LATIN SMALL LETTER N
- 0x2219: 0x00f9, # BULLET OPERATOR
- 0x221a: 0x00fb, # SQUARE ROOT
- 0x2248: 0x00f7, # ALMOST EQUAL TO
- 0x2264: 0x00f3, # LESS-THAN OR EQUAL TO
- 0x2265: 0x00f2, # GREATER-THAN OR EQUAL TO
- 0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
- 0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
- 0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
- 0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
- 0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
- 0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
- 0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
- 0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
- 0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
- 0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
- 0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
- 0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
- 0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
- 0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
- 0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
- 0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
- 0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
- 0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
- 0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
- 0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
- 0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
- 0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
- 0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
- 0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
- 0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
- 0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
- 0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
- 0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
- 0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
- 0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
- 0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
- 0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
- 0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
- 0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
- 0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
- 0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
- 0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
- 0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
- 0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
- 0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
- 0x2580: 0x00df, # UPPER HALF BLOCK
- 0x2584: 0x00dc, # LOWER HALF BLOCK
- 0x2588: 0x00db, # FULL BLOCK
- 0x258c: 0x00dd, # LEFT HALF BLOCK
- 0x2590: 0x00de, # RIGHT HALF BLOCK
- 0x2591: 0x00b0, # LIGHT SHADE
- 0x2592: 0x00b1, # MEDIUM SHADE
- 0x2593: 0x00b2, # DARK SHADE
- 0x25a0: 0x00fe, # BLACK SQUARE
-}
diff --git a/sys/lib/python/encodings/cp775.py b/sys/lib/python/encodings/cp775.py
deleted file mode 100644
index 6a456a582..000000000
--- a/sys/lib/python/encodings/cp775.py
+++ /dev/null
@@ -1,697 +0,0 @@
-""" Python Character Mapping Codec cp775 generated from 'VENDORS/MICSFT/PC/CP775.TXT' with gencodec.py.
-
-"""#"
-
-import codecs
-
-### Codec APIs
-
-class Codec(codecs.Codec):
-
- def encode(self,input,errors='strict'):
- return codecs.charmap_encode(input,errors,encoding_map)
-
- def decode(self,input,errors='strict'):
- return codecs.charmap_decode(input,errors,decoding_table)
-
-class IncrementalEncoder(codecs.IncrementalEncoder):
- def encode(self, input, final=False):
- return codecs.charmap_encode(input,self.errors,encoding_map)[0]
-
-class IncrementalDecoder(codecs.IncrementalDecoder):
- def decode(self, input, final=False):
- return codecs.charmap_decode(input,self.errors,decoding_table)[0]
-
-class StreamWriter(Codec,codecs.StreamWriter):
- pass
-
-class StreamReader(Codec,codecs.StreamReader):
- pass
-
-### encodings module API
-
-def getregentry():
- return codecs.CodecInfo(
- name='cp775',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
-### Decoding Map
-
-decoding_map = codecs.make_identity_dict(range(256))
-decoding_map.update({
- 0x0080: 0x0106, # LATIN CAPITAL LETTER C WITH ACUTE
- 0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
- 0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
- 0x0083: 0x0101, # LATIN SMALL LETTER A WITH MACRON
- 0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
- 0x0085: 0x0123, # LATIN SMALL LETTER G WITH CEDILLA
- 0x0086: 0x00e5, # LATIN SMALL LETTER A WITH RING ABOVE
- 0x0087: 0x0107, # LATIN SMALL LETTER C WITH ACUTE
- 0x0088: 0x0142, # LATIN SMALL LETTER L WITH STROKE
- 0x0089: 0x0113, # LATIN SMALL LETTER E WITH MACRON
- 0x008a: 0x0156, # LATIN CAPITAL LETTER R WITH CEDILLA
- 0x008b: 0x0157, # LATIN SMALL LETTER R WITH CEDILLA
- 0x008c: 0x012b, # LATIN SMALL LETTER I WITH MACRON
- 0x008d: 0x0179, # LATIN CAPITAL LETTER Z WITH ACUTE
- 0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
- 0x008f: 0x00c5, # LATIN CAPITAL LETTER A WITH RING ABOVE
- 0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
- 0x0091: 0x00e6, # LATIN SMALL LIGATURE AE
- 0x0092: 0x00c6, # LATIN CAPITAL LIGATURE AE
- 0x0093: 0x014d, # LATIN SMALL LETTER O WITH MACRON
- 0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
- 0x0095: 0x0122, # LATIN CAPITAL LETTER G WITH CEDILLA
- 0x0096: 0x00a2, # CENT SIGN
- 0x0097: 0x015a, # LATIN CAPITAL LETTER S WITH ACUTE
- 0x0098: 0x015b, # LATIN SMALL LETTER S WITH ACUTE
- 0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
- 0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
- 0x009b: 0x00f8, # LATIN SMALL LETTER O WITH STROKE
- 0x009c: 0x00a3, # POUND SIGN
- 0x009d: 0x00d8, # LATIN CAPITAL LETTER O WITH STROKE
- 0x009e: 0x00d7, # MULTIPLICATION SIGN
- 0x009f: 0x00a4, # CURRENCY SIGN
- 0x00a0: 0x0100, # LATIN CAPITAL LETTER A WITH MACRON
- 0x00a1: 0x012a, # LATIN CAPITAL LETTER I WITH MACRON
- 0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
- 0x00a3: 0x017b, # LATIN CAPITAL LETTER Z WITH DOT ABOVE
- 0x00a4: 0x017c, # LATIN SMALL LETTER Z WITH DOT ABOVE
- 0x00a5: 0x017a, # LATIN SMALL LETTER Z WITH ACUTE
- 0x00a6: 0x201d, # RIGHT DOUBLE QUOTATION MARK
- 0x00a7: 0x00a6, # BROKEN BAR
- 0x00a8: 0x00a9, # COPYRIGHT SIGN
- 0x00a9: 0x00ae, # REGISTERED SIGN
- 0x00aa: 0x00ac, # NOT SIGN
- 0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
- 0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
- 0x00ad: 0x0141, # LATIN CAPITAL LETTER L WITH STROKE
- 0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
- 0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
- 0x00b0: 0x2591, # LIGHT SHADE
- 0x00b1: 0x2592, # MEDIUM SHADE
- 0x00b2: 0x2593, # DARK SHADE
- 0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
- 0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
- 0x00b5: 0x0104, # LATIN CAPITAL LETTER A WITH OGONEK
- 0x00b6: 0x010c, # LATIN CAPITAL LETTER C WITH CARON
- 0x00b7: 0x0118, # LATIN CAPITAL LETTER E WITH OGONEK
- 0x00b8: 0x0116, # LATIN CAPITAL LETTER E WITH DOT ABOVE
- 0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
- 0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
- 0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
- 0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
- 0x00bd: 0x012e, # LATIN CAPITAL LETTER I WITH OGONEK
- 0x00be: 0x0160, # LATIN CAPITAL LETTER S WITH CARON
- 0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
- 0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
- 0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
- 0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
- 0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
- 0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
- 0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
- 0x00c6: 0x0172, # LATIN CAPITAL LETTER U WITH OGONEK
- 0x00c7: 0x016a, # LATIN CAPITAL LETTER U WITH MACRON
- 0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
- 0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
- 0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
- 0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
- 0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
- 0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
- 0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
- 0x00cf: 0x017d, # LATIN CAPITAL LETTER Z WITH CARON
- 0x00d0: 0x0105, # LATIN SMALL LETTER A WITH OGONEK
- 0x00d1: 0x010d, # LATIN SMALL LETTER C WITH CARON
- 0x00d2: 0x0119, # LATIN SMALL LETTER E WITH OGONEK
- 0x00d3: 0x0117, # LATIN SMALL LETTER E WITH DOT ABOVE
- 0x00d4: 0x012f, # LATIN SMALL LETTER I WITH OGONEK
- 0x00d5: 0x0161, # LATIN SMALL LETTER S WITH CARON
- 0x00d6: 0x0173, # LATIN SMALL LETTER U WITH OGONEK
- 0x00d7: 0x016b, # LATIN SMALL LETTER U WITH MACRON
- 0x00d8: 0x017e, # LATIN SMALL LETTER Z WITH CARON
- 0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
- 0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
- 0x00db: 0x2588, # FULL BLOCK
- 0x00dc: 0x2584, # LOWER HALF BLOCK
- 0x00dd: 0x258c, # LEFT HALF BLOCK
- 0x00de: 0x2590, # RIGHT HALF BLOCK
- 0x00df: 0x2580, # UPPER HALF BLOCK
- 0x00e0: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE
- 0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S (GERMAN)
- 0x00e2: 0x014c, # LATIN CAPITAL LETTER O WITH MACRON
- 0x00e3: 0x0143, # LATIN CAPITAL LETTER N WITH ACUTE
- 0x00e4: 0x00f5, # LATIN SMALL LETTER O WITH TILDE
- 0x00e5: 0x00d5, # LATIN CAPITAL LETTER O WITH TILDE
- 0x00e6: 0x00b5, # MICRO SIGN
- 0x00e7: 0x0144, # LATIN SMALL LETTER N WITH ACUTE
- 0x00e8: 0x0136, # LATIN CAPITAL LETTER K WITH CEDILLA
- 0x00e9: 0x0137, # LATIN SMALL LETTER K WITH CEDILLA
- 0x00ea: 0x013b, # LATIN CAPITAL LETTER L WITH CEDILLA
- 0x00eb: 0x013c, # LATIN SMALL LETTER L WITH CEDILLA
- 0x00ec: 0x0146, # LATIN SMALL LETTER N WITH CEDILLA
- 0x00ed: 0x0112, # LATIN CAPITAL LETTER E WITH MACRON
- 0x00ee: 0x0145, # LATIN CAPITAL LETTER N WITH CEDILLA
- 0x00ef: 0x2019, # RIGHT SINGLE QUOTATION MARK
- 0x00f0: 0x00ad, # SOFT HYPHEN
- 0x00f1: 0x00b1, # PLUS-MINUS SIGN
- 0x00f2: 0x201c, # LEFT DOUBLE QUOTATION MARK
- 0x00f3: 0x00be, # VULGAR FRACTION THREE QUARTERS
- 0x00f4: 0x00b6, # PILCROW SIGN
- 0x00f5: 0x00a7, # SECTION SIGN
- 0x00f6: 0x00f7, # DIVISION SIGN
- 0x00f7: 0x201e, # DOUBLE LOW-9 QUOTATION MARK
- 0x00f8: 0x00b0, # DEGREE SIGN
- 0x00f9: 0x2219, # BULLET OPERATOR
- 0x00fa: 0x00b7, # MIDDLE DOT
- 0x00fb: 0x00b9, # SUPERSCRIPT ONE
- 0x00fc: 0x00b3, # SUPERSCRIPT THREE
- 0x00fd: 0x00b2, # SUPERSCRIPT TWO
- 0x00fe: 0x25a0, # BLACK SQUARE
- 0x00ff: 0x00a0, # NO-BREAK SPACE
-})
-
-### Decoding Table
-
-decoding_table = (
- u'\x00' # 0x0000 -> NULL
- u'\x01' # 0x0001 -> START OF HEADING
- u'\x02' # 0x0002 -> START OF TEXT
- u'\x03' # 0x0003 -> END OF TEXT
- u'\x04' # 0x0004 -> END OF TRANSMISSION
- u'\x05' # 0x0005 -> ENQUIRY
- u'\x06' # 0x0006 -> ACKNOWLEDGE
- u'\x07' # 0x0007 -> BELL
- u'\x08' # 0x0008 -> BACKSPACE
- u'\t' # 0x0009 -> HORIZONTAL TABULATION
- u'\n' # 0x000a -> LINE FEED
- u'\x0b' # 0x000b -> VERTICAL TABULATION
- u'\x0c' # 0x000c -> FORM FEED
- u'\r' # 0x000d -> CARRIAGE RETURN
- u'\x0e' # 0x000e -> SHIFT OUT
- u'\x0f' # 0x000f -> SHIFT IN
- u'\x10' # 0x0010 -> DATA LINK ESCAPE
- u'\x11' # 0x0011 -> DEVICE CONTROL ONE
- u'\x12' # 0x0012 -> DEVICE CONTROL TWO
- u'\x13' # 0x0013 -> DEVICE CONTROL THREE
- u'\x14' # 0x0014 -> DEVICE CONTROL FOUR
- u'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
- u'\x16' # 0x0016 -> SYNCHRONOUS IDLE
- u'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
- u'\x18' # 0x0018 -> CANCEL
- u'\x19' # 0x0019 -> END OF MEDIUM
- u'\x1a' # 0x001a -> SUBSTITUTE
- u'\x1b' # 0x001b -> ESCAPE
- u'\x1c' # 0x001c -> FILE SEPARATOR
- u'\x1d' # 0x001d -> GROUP SEPARATOR
- u'\x1e' # 0x001e -> RECORD SEPARATOR
- u'\x1f' # 0x001f -> UNIT SEPARATOR
- u' ' # 0x0020 -> SPACE
- u'!' # 0x0021 -> EXCLAMATION MARK
- u'"' # 0x0022 -> QUOTATION MARK
- u'#' # 0x0023 -> NUMBER SIGN
- u'$' # 0x0024 -> DOLLAR SIGN
- u'%' # 0x0025 -> PERCENT SIGN
- u'&' # 0x0026 -> AMPERSAND
- u"'" # 0x0027 -> APOSTROPHE
- u'(' # 0x0028 -> LEFT PARENTHESIS
- u')' # 0x0029 -> RIGHT PARENTHESIS
- u'*' # 0x002a -> ASTERISK
- u'+' # 0x002b -> PLUS SIGN
- u',' # 0x002c -> COMMA
- u'-' # 0x002d -> HYPHEN-MINUS
- u'.' # 0x002e -> FULL STOP
- u'/' # 0x002f -> SOLIDUS
- u'0' # 0x0030 -> DIGIT ZERO
- u'1' # 0x0031 -> DIGIT ONE
- u'2' # 0x0032 -> DIGIT TWO
- u'3' # 0x0033 -> DIGIT THREE
- u'4' # 0x0034 -> DIGIT FOUR
- u'5' # 0x0035 -> DIGIT FIVE
- u'6' # 0x0036 -> DIGIT SIX
- u'7' # 0x0037 -> DIGIT SEVEN
- u'8' # 0x0038 -> DIGIT EIGHT
- u'9' # 0x0039 -> DIGIT NINE
- u':' # 0x003a -> COLON
- u';' # 0x003b -> SEMICOLON
- u'<' # 0x003c -> LESS-THAN SIGN
- u'=' # 0x003d -> EQUALS SIGN
- u'>' # 0x003e -> GREATER-THAN SIGN
- u'?' # 0x003f -> QUESTION MARK
- u'@' # 0x0040 -> COMMERCIAL AT
- u'A' # 0x0041 -> LATIN CAPITAL LETTER A
- u'B' # 0x0042 -> LATIN CAPITAL LETTER B
- u'C' # 0x0043 -> LATIN CAPITAL LETTER C
- u'D' # 0x0044 -> LATIN CAPITAL LETTER D
- u'E' # 0x0045 -> LATIN CAPITAL LETTER E
- u'F' # 0x0046 -> LATIN CAPITAL LETTER F
- u'G' # 0x0047 -> LATIN CAPITAL LETTER G
- u'H' # 0x0048 -> LATIN CAPITAL LETTER H
- u'I' # 0x0049 -> LATIN CAPITAL LETTER I
- u'J' # 0x004a -> LATIN CAPITAL LETTER J
- u'K' # 0x004b -> LATIN CAPITAL LETTER K
- u'L' # 0x004c -> LATIN CAPITAL LETTER L
- u'M' # 0x004d -> LATIN CAPITAL LETTER M
- u'N' # 0x004e -> LATIN CAPITAL LETTER N
- u'O' # 0x004f -> LATIN CAPITAL LETTER O
- u'P' # 0x0050 -> LATIN CAPITAL LETTER P
- u'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
- u'R' # 0x0052 -> LATIN CAPITAL LETTER R
- u'S' # 0x0053 -> LATIN CAPITAL LETTER S
- u'T' # 0x0054 -> LATIN CAPITAL LETTER T
- u'U' # 0x0055 -> LATIN CAPITAL LETTER U
- u'V' # 0x0056 -> LATIN CAPITAL LETTER V
- u'W' # 0x0057 -> LATIN CAPITAL LETTER W
- u'X' # 0x0058 -> LATIN CAPITAL LETTER X
- u'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
- u'Z' # 0x005a -> LATIN CAPITAL LETTER Z
- u'[' # 0x005b -> LEFT SQUARE BRACKET
- u'\\' # 0x005c -> REVERSE SOLIDUS
- u']' # 0x005d -> RIGHT SQUARE BRACKET
- u'^' # 0x005e -> CIRCUMFLEX ACCENT
- u'_' # 0x005f -> LOW LINE
- u'`' # 0x0060 -> GRAVE ACCENT
- u'a' # 0x0061 -> LATIN SMALL LETTER A
- u'b' # 0x0062 -> LATIN SMALL LETTER B
- u'c' # 0x0063 -> LATIN SMALL LETTER C
- u'd' # 0x0064 -> LATIN SMALL LETTER D
- u'e' # 0x0065 -> LATIN SMALL LETTER E
- u'f' # 0x0066 -> LATIN SMALL LETTER F
- u'g' # 0x0067 -> LATIN SMALL LETTER G
- u'h' # 0x0068 -> LATIN SMALL LETTER H
- u'i' # 0x0069 -> LATIN SMALL LETTER I
- u'j' # 0x006a -> LATIN SMALL LETTER J
- u'k' # 0x006b -> LATIN SMALL LETTER K
- u'l' # 0x006c -> LATIN SMALL LETTER L
- u'm' # 0x006d -> LATIN SMALL LETTER M
- u'n' # 0x006e -> LATIN SMALL LETTER N
- u'o' # 0x006f -> LATIN SMALL LETTER O
- u'p' # 0x0070 -> LATIN SMALL LETTER P
- u'q' # 0x0071 -> LATIN SMALL LETTER Q
- u'r' # 0x0072 -> LATIN SMALL LETTER R
- u's' # 0x0073 -> LATIN SMALL LETTER S
- u't' # 0x0074 -> LATIN SMALL LETTER T
- u'u' # 0x0075 -> LATIN SMALL LETTER U
- u'v' # 0x0076 -> LATIN SMALL LETTER V
- u'w' # 0x0077 -> LATIN SMALL LETTER W
- u'x' # 0x0078 -> LATIN SMALL LETTER X
- u'y' # 0x0079 -> LATIN SMALL LETTER Y
- u'z' # 0x007a -> LATIN SMALL LETTER Z
- u'{' # 0x007b -> LEFT CURLY BRACKET
- u'|' # 0x007c -> VERTICAL LINE
- u'}' # 0x007d -> RIGHT CURLY BRACKET
- u'~' # 0x007e -> TILDE
- u'\x7f' # 0x007f -> DELETE
- u'\u0106' # 0x0080 -> LATIN CAPITAL LETTER C WITH ACUTE
- u'\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
- u'\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
- u'\u0101' # 0x0083 -> LATIN SMALL LETTER A WITH MACRON
- u'\xe4' # 0x0084 -> LATIN SMALL LETTER A WITH DIAERESIS
- u'\u0123' # 0x0085 -> LATIN SMALL LETTER G WITH CEDILLA
- u'\xe5' # 0x0086 -> LATIN SMALL LETTER A WITH RING ABOVE
- u'\u0107' # 0x0087 -> LATIN SMALL LETTER C WITH ACUTE
- u'\u0142' # 0x0088 -> LATIN SMALL LETTER L WITH STROKE
- u'\u0113' # 0x0089 -> LATIN SMALL LETTER E WITH MACRON
- u'\u0156' # 0x008a -> LATIN CAPITAL LETTER R WITH CEDILLA
- u'\u0157' # 0x008b -> LATIN SMALL LETTER R WITH CEDILLA
- u'\u012b' # 0x008c -> LATIN SMALL LETTER I WITH MACRON
- u'\u0179' # 0x008d -> LATIN CAPITAL LETTER Z WITH ACUTE
- u'\xc4' # 0x008e -> LATIN CAPITAL LETTER A WITH DIAERESIS
- u'\xc5' # 0x008f -> LATIN CAPITAL LETTER A WITH RING ABOVE
- u'\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
- u'\xe6' # 0x0091 -> LATIN SMALL LIGATURE AE
- u'\xc6' # 0x0092 -> LATIN CAPITAL LIGATURE AE
- u'\u014d' # 0x0093 -> LATIN SMALL LETTER O WITH MACRON
- u'\xf6' # 0x0094 -> LATIN SMALL LETTER O WITH DIAERESIS
- u'\u0122' # 0x0095 -> LATIN CAPITAL LETTER G WITH CEDILLA
- u'\xa2' # 0x0096 -> CENT SIGN
- u'\u015a' # 0x0097 -> LATIN CAPITAL LETTER S WITH ACUTE
- u'\u015b' # 0x0098 -> LATIN SMALL LETTER S WITH ACUTE
- u'\xd6' # 0x0099 -> LATIN CAPITAL LETTER O WITH DIAERESIS
- u'\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
- u'\xf8' # 0x009b -> LATIN SMALL LETTER O WITH STROKE
- u'\xa3' # 0x009c -> POUND SIGN
- u'\xd8' # 0x009d -> LATIN CAPITAL LETTER O WITH STROKE
- u'\xd7' # 0x009e -> MULTIPLICATION SIGN
- u'\xa4' # 0x009f -> CURRENCY SIGN
- u'\u0100' # 0x00a0 -> LATIN CAPITAL LETTER A WITH MACRON
- u'\u012a' # 0x00a1 -> LATIN CAPITAL LETTER I WITH MACRON
- u'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
- u'\u017b' # 0x00a3 -> LATIN CAPITAL LETTER Z WITH DOT ABOVE
- u'\u017c' # 0x00a4 -> LATIN SMALL LETTER Z WITH DOT ABOVE
- u'\u017a' # 0x00a5 -> LATIN SMALL LETTER Z WITH ACUTE
- u'\u201d' # 0x00a6 -> RIGHT DOUBLE QUOTATION MARK
- u'\xa6' # 0x00a7 -> BROKEN BAR
- u'\xa9' # 0x00a8 -> COPYRIGHT SIGN
- u'\xae' # 0x00a9 -> REGISTERED SIGN
- u'\xac' # 0x00aa -> NOT SIGN
- u'\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF
- u'\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER
- u'\u0141' # 0x00ad -> LATIN CAPITAL LETTER L WITH STROKE
- u'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
- u'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
- u'\u2591' # 0x00b0 -> LIGHT SHADE
- u'\u2592' # 0x00b1 -> MEDIUM SHADE
- u'\u2593' # 0x00b2 -> DARK SHADE
- u'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
- u'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
- u'\u0104' # 0x00b5 -> LATIN CAPITAL LETTER A WITH OGONEK
- u'\u010c' # 0x00b6 -> LATIN CAPITAL LETTER C WITH CARON
- u'\u0118' # 0x00b7 -> LATIN CAPITAL LETTER E WITH OGONEK
- u'\u0116' # 0x00b8 -> LATIN CAPITAL LETTER E WITH DOT ABOVE
- u'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
- u'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
- u'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
- u'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
- u'\u012e' # 0x00bd -> LATIN CAPITAL LETTER I WITH OGONEK
- u'\u0160' # 0x00be -> LATIN CAPITAL LETTER S WITH CARON
- u'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
- u'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
- u'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
- u'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
- u'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
- u'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
- u'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
- u'\u0172' # 0x00c6 -> LATIN CAPITAL LETTER U WITH OGONEK
- u'\u016a' # 0x00c7 -> LATIN CAPITAL LETTER U WITH MACRON
- u'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
- u'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
- u'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
- u'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
- u'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
- u'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
- u'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
- u'\u017d' # 0x00cf -> LATIN CAPITAL LETTER Z WITH CARON
- u'\u0105' # 0x00d0 -> LATIN SMALL LETTER A WITH OGONEK
- u'\u010d' # 0x00d1 -> LATIN SMALL LETTER C WITH CARON
- u'\u0119' # 0x00d2 -> LATIN SMALL LETTER E WITH OGONEK
- u'\u0117' # 0x00d3 -> LATIN SMALL LETTER E WITH DOT ABOVE
- u'\u012f' # 0x00d4 -> LATIN SMALL LETTER I WITH OGONEK
- u'\u0161' # 0x00d5 -> LATIN SMALL LETTER S WITH CARON
- u'\u0173' # 0x00d6 -> LATIN SMALL LETTER U WITH OGONEK
- u'\u016b' # 0x00d7 -> LATIN SMALL LETTER U WITH MACRON
- u'\u017e' # 0x00d8 -> LATIN SMALL LETTER Z WITH CARON
- u'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
- u'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
- u'\u2588' # 0x00db -> FULL BLOCK
- u'\u2584' # 0x00dc -> LOWER HALF BLOCK
- u'\u258c' # 0x00dd -> LEFT HALF BLOCK
- u'\u2590' # 0x00de -> RIGHT HALF BLOCK
- u'\u2580' # 0x00df -> UPPER HALF BLOCK
- u'\xd3' # 0x00e0 -> LATIN CAPITAL LETTER O WITH ACUTE
- u'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S (GERMAN)
- u'\u014c' # 0x00e2 -> LATIN CAPITAL LETTER O WITH MACRON
- u'\u0143' # 0x00e3 -> LATIN CAPITAL LETTER N WITH ACUTE
- u'\xf5' # 0x00e4 -> LATIN SMALL LETTER O WITH TILDE
- u'\xd5' # 0x00e5 -> LATIN CAPITAL LETTER O WITH TILDE
- u'\xb5' # 0x00e6 -> MICRO SIGN
- u'\u0144' # 0x00e7 -> LATIN SMALL LETTER N WITH ACUTE
- u'\u0136' # 0x00e8 -> LATIN CAPITAL LETTER K WITH CEDILLA
- u'\u0137' # 0x00e9 -> LATIN SMALL LETTER K WITH CEDILLA
- u'\u013b' # 0x00ea -> LATIN CAPITAL LETTER L WITH CEDILLA
- u'\u013c' # 0x00eb -> LATIN SMALL LETTER L WITH CEDILLA
- u'\u0146' # 0x00ec -> LATIN SMALL LETTER N WITH CEDILLA
- u'\u0112' # 0x00ed -> LATIN CAPITAL LETTER E WITH MACRON
- u'\u0145' # 0x00ee -> LATIN CAPITAL LETTER N WITH CEDILLA
- u'\u2019' # 0x00ef -> RIGHT SINGLE QUOTATION MARK
- u'\xad' # 0x00f0 -> SOFT HYPHEN
- u'\xb1' # 0x00f1 -> PLUS-MINUS SIGN
- u'\u201c' # 0x00f2 -> LEFT DOUBLE QUOTATION MARK
- u'\xbe' # 0x00f3 -> VULGAR FRACTION THREE QUARTERS
- u'\xb6' # 0x00f4 -> PILCROW SIGN
- u'\xa7' # 0x00f5 -> SECTION SIGN
- u'\xf7' # 0x00f6 -> DIVISION SIGN
- u'\u201e' # 0x00f7 -> DOUBLE LOW-9 QUOTATION MARK
- u'\xb0' # 0x00f8 -> DEGREE SIGN
- u'\u2219' # 0x00f9 -> BULLET OPERATOR
- u'\xb7' # 0x00fa -> MIDDLE DOT
- u'\xb9' # 0x00fb -> SUPERSCRIPT ONE
- u'\xb3' # 0x00fc -> SUPERSCRIPT THREE
- u'\xb2' # 0x00fd -> SUPERSCRIPT TWO
- u'\u25a0' # 0x00fe -> BLACK SQUARE
- u'\xa0' # 0x00ff -> NO-BREAK SPACE
-)
-
-### Encoding Map
-
-encoding_map = {
- 0x0000: 0x0000, # NULL
- 0x0001: 0x0001, # START OF HEADING
- 0x0002: 0x0002, # START OF TEXT
- 0x0003: 0x0003, # END OF TEXT
- 0x0004: 0x0004, # END OF TRANSMISSION
- 0x0005: 0x0005, # ENQUIRY
- 0x0006: 0x0006, # ACKNOWLEDGE
- 0x0007: 0x0007, # BELL
- 0x0008: 0x0008, # BACKSPACE
- 0x0009: 0x0009, # HORIZONTAL TABULATION
- 0x000a: 0x000a, # LINE FEED
- 0x000b: 0x000b, # VERTICAL TABULATION
- 0x000c: 0x000c, # FORM FEED
- 0x000d: 0x000d, # CARRIAGE RETURN
- 0x000e: 0x000e, # SHIFT OUT
- 0x000f: 0x000f, # SHIFT IN
- 0x0010: 0x0010, # DATA LINK ESCAPE
- 0x0011: 0x0011, # DEVICE CONTROL ONE
- 0x0012: 0x0012, # DEVICE CONTROL TWO
- 0x0013: 0x0013, # DEVICE CONTROL THREE
- 0x0014: 0x0014, # DEVICE CONTROL FOUR
- 0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
- 0x0016: 0x0016, # SYNCHRONOUS IDLE
- 0x0017: 0x0017, # END OF TRANSMISSION BLOCK
- 0x0018: 0x0018, # CANCEL
- 0x0019: 0x0019, # END OF MEDIUM
- 0x001a: 0x001a, # SUBSTITUTE
- 0x001b: 0x001b, # ESCAPE
- 0x001c: 0x001c, # FILE SEPARATOR
- 0x001d: 0x001d, # GROUP SEPARATOR
- 0x001e: 0x001e, # RECORD SEPARATOR
- 0x001f: 0x001f, # UNIT SEPARATOR
- 0x0020: 0x0020, # SPACE
- 0x0021: 0x0021, # EXCLAMATION MARK
- 0x0022: 0x0022, # QUOTATION MARK
- 0x0023: 0x0023, # NUMBER SIGN
- 0x0024: 0x0024, # DOLLAR SIGN
- 0x0025: 0x0025, # PERCENT SIGN
- 0x0026: 0x0026, # AMPERSAND
- 0x0027: 0x0027, # APOSTROPHE
- 0x0028: 0x0028, # LEFT PARENTHESIS
- 0x0029: 0x0029, # RIGHT PARENTHESIS
- 0x002a: 0x002a, # ASTERISK
- 0x002b: 0x002b, # PLUS SIGN
- 0x002c: 0x002c, # COMMA
- 0x002d: 0x002d, # HYPHEN-MINUS
- 0x002e: 0x002e, # FULL STOP
- 0x002f: 0x002f, # SOLIDUS
- 0x0030: 0x0030, # DIGIT ZERO
- 0x0031: 0x0031, # DIGIT ONE
- 0x0032: 0x0032, # DIGIT TWO
- 0x0033: 0x0033, # DIGIT THREE
- 0x0034: 0x0034, # DIGIT FOUR
- 0x0035: 0x0035, # DIGIT FIVE
- 0x0036: 0x0036, # DIGIT SIX
- 0x0037: 0x0037, # DIGIT SEVEN
- 0x0038: 0x0038, # DIGIT EIGHT
- 0x0039: 0x0039, # DIGIT NINE
- 0x003a: 0x003a, # COLON
- 0x003b: 0x003b, # SEMICOLON
- 0x003c: 0x003c, # LESS-THAN SIGN
- 0x003d: 0x003d, # EQUALS SIGN
- 0x003e: 0x003e, # GREATER-THAN SIGN
- 0x003f: 0x003f, # QUESTION MARK
- 0x0040: 0x0040, # COMMERCIAL AT
- 0x0041: 0x0041, # LATIN CAPITAL LETTER A
- 0x0042: 0x0042, # LATIN CAPITAL LETTER B
- 0x0043: 0x0043, # LATIN CAPITAL LETTER C
- 0x0044: 0x0044, # LATIN CAPITAL LETTER D
- 0x0045: 0x0045, # LATIN CAPITAL LETTER E
- 0x0046: 0x0046, # LATIN CAPITAL LETTER F
- 0x0047: 0x0047, # LATIN CAPITAL LETTER G
- 0x0048: 0x0048, # LATIN CAPITAL LETTER H
- 0x0049: 0x0049, # LATIN CAPITAL LETTER I
- 0x004a: 0x004a, # LATIN CAPITAL LETTER J
- 0x004b: 0x004b, # LATIN CAPITAL LETTER K
- 0x004c: 0x004c, # LATIN CAPITAL LETTER L
- 0x004d: 0x004d, # LATIN CAPITAL LETTER M
- 0x004e: 0x004e, # LATIN CAPITAL LETTER N
- 0x004f: 0x004f, # LATIN CAPITAL LETTER O
- 0x0050: 0x0050, # LATIN CAPITAL LETTER P
- 0x0051: 0x0051, # LATIN CAPITAL LETTER Q
- 0x0052: 0x0052, # LATIN CAPITAL LETTER R
- 0x0053: 0x0053, # LATIN CAPITAL LETTER S
- 0x0054: 0x0054, # LATIN CAPITAL LETTER T
- 0x0055: 0x0055, # LATIN CAPITAL LETTER U
- 0x0056: 0x0056, # LATIN CAPITAL LETTER V
- 0x0057: 0x0057, # LATIN CAPITAL LETTER W
- 0x0058: 0x0058, # LATIN CAPITAL LETTER X
- 0x0059: 0x0059, # LATIN CAPITAL LETTER Y
- 0x005a: 0x005a, # LATIN CAPITAL LETTER Z
- 0x005b: 0x005b, # LEFT SQUARE BRACKET
- 0x005c: 0x005c, # REVERSE SOLIDUS
- 0x005d: 0x005d, # RIGHT SQUARE BRACKET
- 0x005e: 0x005e, # CIRCUMFLEX ACCENT
- 0x005f: 0x005f, # LOW LINE
- 0x0060: 0x0060, # GRAVE ACCENT
- 0x0061: 0x0061, # LATIN SMALL LETTER A
- 0x0062: 0x0062, # LATIN SMALL LETTER B
- 0x0063: 0x0063, # LATIN SMALL LETTER C
- 0x0064: 0x0064, # LATIN SMALL LETTER D
- 0x0065: 0x0065, # LATIN SMALL LETTER E
- 0x0066: 0x0066, # LATIN SMALL LETTER F
- 0x0067: 0x0067, # LATIN SMALL LETTER G
- 0x0068: 0x0068, # LATIN SMALL LETTER H
- 0x0069: 0x0069, # LATIN SMALL LETTER I
- 0x006a: 0x006a, # LATIN SMALL LETTER J
- 0x006b: 0x006b, # LATIN SMALL LETTER K
- 0x006c: 0x006c, # LATIN SMALL LETTER L
- 0x006d: 0x006d, # LATIN SMALL LETTER M
- 0x006e: 0x006e, # LATIN SMALL LETTER N
- 0x006f: 0x006f, # LATIN SMALL LETTER O
- 0x0070: 0x0070, # LATIN SMALL LETTER P
- 0x0071: 0x0071, # LATIN SMALL LETTER Q
- 0x0072: 0x0072, # LATIN SMALL LETTER R
- 0x0073: 0x0073, # LATIN SMALL LETTER S
- 0x0074: 0x0074, # LATIN SMALL LETTER T
- 0x0075: 0x0075, # LATIN SMALL LETTER U
- 0x0076: 0x0076, # LATIN SMALL LETTER V
- 0x0077: 0x0077, # LATIN SMALL LETTER W
- 0x0078: 0x0078, # LATIN SMALL LETTER X
- 0x0079: 0x0079, # LATIN SMALL LETTER Y
- 0x007a: 0x007a, # LATIN SMALL LETTER Z
- 0x007b: 0x007b, # LEFT CURLY BRACKET
- 0x007c: 0x007c, # VERTICAL LINE
- 0x007d: 0x007d, # RIGHT CURLY BRACKET
- 0x007e: 0x007e, # TILDE
- 0x007f: 0x007f, # DELETE
- 0x00a0: 0x00ff, # NO-BREAK SPACE
- 0x00a2: 0x0096, # CENT SIGN
- 0x00a3: 0x009c, # POUND SIGN
- 0x00a4: 0x009f, # CURRENCY SIGN
- 0x00a6: 0x00a7, # BROKEN BAR
- 0x00a7: 0x00f5, # SECTION SIGN
- 0x00a9: 0x00a8, # COPYRIGHT SIGN
- 0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
- 0x00ac: 0x00aa, # NOT SIGN
- 0x00ad: 0x00f0, # SOFT HYPHEN
- 0x00ae: 0x00a9, # REGISTERED SIGN
- 0x00b0: 0x00f8, # DEGREE SIGN
- 0x00b1: 0x00f1, # PLUS-MINUS SIGN
- 0x00b2: 0x00fd, # SUPERSCRIPT TWO
- 0x00b3: 0x00fc, # SUPERSCRIPT THREE
- 0x00b5: 0x00e6, # MICRO SIGN
- 0x00b6: 0x00f4, # PILCROW SIGN
- 0x00b7: 0x00fa, # MIDDLE DOT
- 0x00b9: 0x00fb, # SUPERSCRIPT ONE
- 0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
- 0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER
- 0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF
- 0x00be: 0x00f3, # VULGAR FRACTION THREE QUARTERS
- 0x00c4: 0x008e, # LATIN CAPITAL LETTER A WITH DIAERESIS
- 0x00c5: 0x008f, # LATIN CAPITAL LETTER A WITH RING ABOVE
- 0x00c6: 0x0092, # LATIN CAPITAL LIGATURE AE
- 0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
- 0x00d3: 0x00e0, # LATIN CAPITAL LETTER O WITH ACUTE
- 0x00d5: 0x00e5, # LATIN CAPITAL LETTER O WITH TILDE
- 0x00d6: 0x0099, # LATIN CAPITAL LETTER O WITH DIAERESIS
- 0x00d7: 0x009e, # MULTIPLICATION SIGN
- 0x00d8: 0x009d, # LATIN CAPITAL LETTER O WITH STROKE
- 0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
- 0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S (GERMAN)
- 0x00e4: 0x0084, # LATIN SMALL LETTER A WITH DIAERESIS
- 0x00e5: 0x0086, # LATIN SMALL LETTER A WITH RING ABOVE
- 0x00e6: 0x0091, # LATIN SMALL LIGATURE AE
- 0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
- 0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
- 0x00f5: 0x00e4, # LATIN SMALL LETTER O WITH TILDE
- 0x00f6: 0x0094, # LATIN SMALL LETTER O WITH DIAERESIS
- 0x00f7: 0x00f6, # DIVISION SIGN
- 0x00f8: 0x009b, # LATIN SMALL LETTER O WITH STROKE
- 0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
- 0x0100: 0x00a0, # LATIN CAPITAL LETTER A WITH MACRON
- 0x0101: 0x0083, # LATIN SMALL LETTER A WITH MACRON
- 0x0104: 0x00b5, # LATIN CAPITAL LETTER A WITH OGONEK
- 0x0105: 0x00d0, # LATIN SMALL LETTER A WITH OGONEK
- 0x0106: 0x0080, # LATIN CAPITAL LETTER C WITH ACUTE
- 0x0107: 0x0087, # LATIN SMALL LETTER C WITH ACUTE
- 0x010c: 0x00b6, # LATIN CAPITAL LETTER C WITH CARON
- 0x010d: 0x00d1, # LATIN SMALL LETTER C WITH CARON
- 0x0112: 0x00ed, # LATIN CAPITAL LETTER E WITH MACRON
- 0x0113: 0x0089, # LATIN SMALL LETTER E WITH MACRON
- 0x0116: 0x00b8, # LATIN CAPITAL LETTER E WITH DOT ABOVE
- 0x0117: 0x00d3, # LATIN SMALL LETTER E WITH DOT ABOVE
- 0x0118: 0x00b7, # LATIN CAPITAL LETTER E WITH OGONEK
- 0x0119: 0x00d2, # LATIN SMALL LETTER E WITH OGONEK
- 0x0122: 0x0095, # LATIN CAPITAL LETTER G WITH CEDILLA
- 0x0123: 0x0085, # LATIN SMALL LETTER G WITH CEDILLA
- 0x012a: 0x00a1, # LATIN CAPITAL LETTER I WITH MACRON
- 0x012b: 0x008c, # LATIN SMALL LETTER I WITH MACRON
- 0x012e: 0x00bd, # LATIN CAPITAL LETTER I WITH OGONEK
- 0x012f: 0x00d4, # LATIN SMALL LETTER I WITH OGONEK
- 0x0136: 0x00e8, # LATIN CAPITAL LETTER K WITH CEDILLA
- 0x0137: 0x00e9, # LATIN SMALL LETTER K WITH CEDILLA
- 0x013b: 0x00ea, # LATIN CAPITAL LETTER L WITH CEDILLA
- 0x013c: 0x00eb, # LATIN SMALL LETTER L WITH CEDILLA
- 0x0141: 0x00ad, # LATIN CAPITAL LETTER L WITH STROKE
- 0x0142: 0x0088, # LATIN SMALL LETTER L WITH STROKE
- 0x0143: 0x00e3, # LATIN CAPITAL LETTER N WITH ACUTE
- 0x0144: 0x00e7, # LATIN SMALL LETTER N WITH ACUTE
- 0x0145: 0x00ee, # LATIN CAPITAL LETTER N WITH CEDILLA
- 0x0146: 0x00ec, # LATIN SMALL LETTER N WITH CEDILLA
- 0x014c: 0x00e2, # LATIN CAPITAL LETTER O WITH MACRON
- 0x014d: 0x0093, # LATIN SMALL LETTER O WITH MACRON
- 0x0156: 0x008a, # LATIN CAPITAL LETTER R WITH CEDILLA
- 0x0157: 0x008b, # LATIN SMALL LETTER R WITH CEDILLA
- 0x015a: 0x0097, # LATIN CAPITAL LETTER S WITH ACUTE
- 0x015b: 0x0098, # LATIN SMALL LETTER S WITH ACUTE
- 0x0160: 0x00be, # LATIN CAPITAL LETTER S WITH CARON
- 0x0161: 0x00d5, # LATIN SMALL LETTER S WITH CARON
- 0x016a: 0x00c7, # LATIN CAPITAL LETTER U WITH MACRON
- 0x016b: 0x00d7, # LATIN SMALL LETTER U WITH MACRON
- 0x0172: 0x00c6, # LATIN CAPITAL LETTER U WITH OGONEK
- 0x0173: 0x00d6, # LATIN SMALL LETTER U WITH OGONEK
- 0x0179: 0x008d, # LATIN CAPITAL LETTER Z WITH ACUTE
- 0x017a: 0x00a5, # LATIN SMALL LETTER Z WITH ACUTE
- 0x017b: 0x00a3, # LATIN CAPITAL LETTER Z WITH DOT ABOVE
- 0x017c: 0x00a4, # LATIN SMALL LETTER Z WITH DOT ABOVE
- 0x017d: 0x00cf, # LATIN CAPITAL LETTER Z WITH CARON
- 0x017e: 0x00d8, # LATIN SMALL LETTER Z WITH CARON
- 0x2019: 0x00ef, # RIGHT SINGLE QUOTATION MARK
- 0x201c: 0x00f2, # LEFT DOUBLE QUOTATION MARK
- 0x201d: 0x00a6, # RIGHT DOUBLE QUOTATION MARK
- 0x201e: 0x00f7, # DOUBLE LOW-9 QUOTATION MARK
- 0x2219: 0x00f9, # BULLET OPERATOR
- 0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
- 0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
- 0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
- 0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
- 0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
- 0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
- 0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
- 0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
- 0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
- 0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
- 0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
- 0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
- 0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
- 0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
- 0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
- 0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
- 0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
- 0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
- 0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
- 0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
- 0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
- 0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
- 0x2580: 0x00df, # UPPER HALF BLOCK
- 0x2584: 0x00dc, # LOWER HALF BLOCK
- 0x2588: 0x00db, # FULL BLOCK
- 0x258c: 0x00dd, # LEFT HALF BLOCK
- 0x2590: 0x00de, # RIGHT HALF BLOCK
- 0x2591: 0x00b0, # LIGHT SHADE
- 0x2592: 0x00b1, # MEDIUM SHADE
- 0x2593: 0x00b2, # DARK SHADE
- 0x25a0: 0x00fe, # BLACK SQUARE
-}
diff --git a/sys/lib/python/encodings/cp850.py b/sys/lib/python/encodings/cp850.py
deleted file mode 100644
index 0c8478c8b..000000000
--- a/sys/lib/python/encodings/cp850.py
+++ /dev/null
@@ -1,698 +0,0 @@
-""" Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP850.TXT' with gencodec.py.
-
-"""#"
-
-import codecs
-
-### Codec APIs
-
-class Codec(codecs.Codec):
-
- def encode(self,input,errors='strict'):
- return codecs.charmap_encode(input,errors,encoding_map)
-
- def decode(self,input,errors='strict'):
- return codecs.charmap_decode(input,errors,decoding_table)
-
-class IncrementalEncoder(codecs.IncrementalEncoder):
- def encode(self, input, final=False):
- return codecs.charmap_encode(input,self.errors,encoding_map)[0]
-
-class IncrementalDecoder(codecs.IncrementalDecoder):
- def decode(self, input, final=False):
- return codecs.charmap_decode(input,self.errors,decoding_table)[0]
-
-class StreamWriter(Codec,codecs.StreamWriter):
- pass
-
-class StreamReader(Codec,codecs.StreamReader):
- pass
-
-### encodings module API
-
-def getregentry():
- return codecs.CodecInfo(
- name='cp850',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
-
-### Decoding Map
-
-decoding_map = codecs.make_identity_dict(range(256))
-decoding_map.update({
- 0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
- 0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
- 0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
- 0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
- 0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
- 0x0085: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
- 0x0086: 0x00e5, # LATIN SMALL LETTER A WITH RING ABOVE
- 0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
- 0x0088: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
- 0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
- 0x008a: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
- 0x008b: 0x00ef, # LATIN SMALL LETTER I WITH DIAERESIS
- 0x008c: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX
- 0x008d: 0x00ec, # LATIN SMALL LETTER I WITH GRAVE
- 0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
- 0x008f: 0x00c5, # LATIN CAPITAL LETTER A WITH RING ABOVE
- 0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
- 0x0091: 0x00e6, # LATIN SMALL LIGATURE AE
- 0x0092: 0x00c6, # LATIN CAPITAL LIGATURE AE
- 0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
- 0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
- 0x0095: 0x00f2, # LATIN SMALL LETTER O WITH GRAVE
- 0x0096: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX
- 0x0097: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE
- 0x0098: 0x00ff, # LATIN SMALL LETTER Y WITH DIAERESIS
- 0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
- 0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
- 0x009b: 0x00f8, # LATIN SMALL LETTER O WITH STROKE
- 0x009c: 0x00a3, # POUND SIGN
- 0x009d: 0x00d8, # LATIN CAPITAL LETTER O WITH STROKE
- 0x009e: 0x00d7, # MULTIPLICATION SIGN
- 0x009f: 0x0192, # LATIN SMALL LETTER F WITH HOOK
- 0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
- 0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
- 0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
- 0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
- 0x00a4: 0x00f1, # LATIN SMALL LETTER N WITH TILDE
- 0x00a5: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE
- 0x00a6: 0x00aa, # FEMININE ORDINAL INDICATOR
- 0x00a7: 0x00ba, # MASCULINE ORDINAL INDICATOR
- 0x00a8: 0x00bf, # INVERTED QUESTION MARK
- 0x00a9: 0x00ae, # REGISTERED SIGN
- 0x00aa: 0x00ac, # NOT SIGN
- 0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
- 0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
- 0x00ad: 0x00a1, # INVERTED EXCLAMATION MARK
- 0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
- 0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
- 0x00b0: 0x2591, # LIGHT SHADE
- 0x00b1: 0x2592, # MEDIUM SHADE
- 0x00b2: 0x2593, # DARK SHADE
- 0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
- 0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
- 0x00b5: 0x00c1, # LATIN CAPITAL LETTER A WITH ACUTE
- 0x00b6: 0x00c2, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
- 0x00b7: 0x00c0, # LATIN CAPITAL LETTER A WITH GRAVE
- 0x00b8: 0x00a9, # COPYRIGHT SIGN
- 0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
- 0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
- 0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
- 0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
- 0x00bd: 0x00a2, # CENT SIGN
- 0x00be: 0x00a5, # YEN SIGN
- 0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
- 0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
- 0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
- 0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
- 0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
- 0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
- 0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
- 0x00c6: 0x00e3, # LATIN SMALL LETTER A WITH TILDE
- 0x00c7: 0x00c3, # LATIN CAPITAL LETTER A WITH TILDE
- 0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
- 0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
- 0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
- 0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
- 0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
- 0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
- 0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
- 0x00cf: 0x00a4, # CURRENCY SIGN
- 0x00d0: 0x00f0, # LATIN SMALL LETTER ETH
- 0x00d1: 0x00d0, # LATIN CAPITAL LETTER ETH
- 0x00d2: 0x00ca, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
- 0x00d3: 0x00cb, # LATIN CAPITAL LETTER E WITH DIAERESIS
- 0x00d4: 0x00c8, # LATIN CAPITAL LETTER E WITH GRAVE
- 0x00d5: 0x0131, # LATIN SMALL LETTER DOTLESS I
- 0x00d6: 0x00cd, # LATIN CAPITAL LETTER I WITH ACUTE
- 0x00d7: 0x00ce, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
- 0x00d8: 0x00cf, # LATIN CAPITAL LETTER I WITH DIAERESIS
- 0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
- 0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
- 0x00db: 0x2588, # FULL BLOCK
- 0x00dc: 0x2584, # LOWER HALF BLOCK
- 0x00dd: 0x00a6, # BROKEN BAR
- 0x00de: 0x00cc, # LATIN CAPITAL LETTER I WITH GRAVE
- 0x00df: 0x2580, # UPPER HALF BLOCK
- 0x00e0: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE
- 0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
- 0x00e2: 0x00d4, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
- 0x00e3: 0x00d2, # LATIN CAPITAL LETTER O WITH GRAVE
- 0x00e4: 0x00f5, # LATIN SMALL LETTER O WITH TILDE
- 0x00e5: 0x00d5, # LATIN CAPITAL LETTER O WITH TILDE
- 0x00e6: 0x00b5, # MICRO SIGN
- 0x00e7: 0x00fe, # LATIN SMALL LETTER THORN
- 0x00e8: 0x00de, # LATIN CAPITAL LETTER THORN
- 0x00e9: 0x00da, # LATIN CAPITAL LETTER U WITH ACUTE
- 0x00ea: 0x00db, # LATIN CAPITAL LETTER U WITH CIRCUMFLEX
- 0x00eb: 0x00d9, # LATIN CAPITAL LETTER U WITH GRAVE
- 0x00ec: 0x00fd, # LATIN SMALL LETTER Y WITH ACUTE
- 0x00ed: 0x00dd, # LATIN CAPITAL LETTER Y WITH ACUTE
- 0x00ee: 0x00af, # MACRON
- 0x00ef: 0x00b4, # ACUTE ACCENT
- 0x00f0: 0x00ad, # SOFT HYPHEN
- 0x00f1: 0x00b1, # PLUS-MINUS SIGN
- 0x00f2: 0x2017, # DOUBLE LOW LINE
- 0x00f3: 0x00be, # VULGAR FRACTION THREE QUARTERS
- 0x00f4: 0x00b6, # PILCROW SIGN
- 0x00f5: 0x00a7, # SECTION SIGN
- 0x00f6: 0x00f7, # DIVISION SIGN
- 0x00f7: 0x00b8, # CEDILLA
- 0x00f8: 0x00b0, # DEGREE SIGN
- 0x00f9: 0x00a8, # DIAERESIS
- 0x00fa: 0x00b7, # MIDDLE DOT
- 0x00fb: 0x00b9, # SUPERSCRIPT ONE
- 0x00fc: 0x00b3, # SUPERSCRIPT THREE
- 0x00fd: 0x00b2, # SUPERSCRIPT TWO
- 0x00fe: 0x25a0, # BLACK SQUARE
- 0x00ff: 0x00a0, # NO-BREAK SPACE
-})
-
-### Decoding Table
-
-decoding_table = (
- u'\x00' # 0x0000 -> NULL
- u'\x01' # 0x0001 -> START OF HEADING
- u'\x02' # 0x0002 -> START OF TEXT
- u'\x03' # 0x0003 -> END OF TEXT
- u'\x04' # 0x0004 -> END OF TRANSMISSION
- u'\x05' # 0x0005 -> ENQUIRY
- u'\x06' # 0x0006 -> ACKNOWLEDGE
- u'\x07' # 0x0007 -> BELL
- u'\x08' # 0x0008 -> BACKSPACE
- u'\t' # 0x0009 -> HORIZONTAL TABULATION
- u'\n' # 0x000a -> LINE FEED
- u'\x0b' # 0x000b -> VERTICAL TABULATION
- u'\x0c' # 0x000c -> FORM FEED
- u'\r' # 0x000d -> CARRIAGE RETURN
- u'\x0e' # 0x000e -> SHIFT OUT
- u'\x0f' # 0x000f -> SHIFT IN
- u'\x10' # 0x0010 -> DATA LINK ESCAPE
- u'\x11' # 0x0011 -> DEVICE CONTROL ONE
- u'\x12' # 0x0012 -> DEVICE CONTROL TWO
- u'\x13' # 0x0013 -> DEVICE CONTROL THREE
- u'\x14' # 0x0014 -> DEVICE CONTROL FOUR
- u'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
- u'\x16' # 0x0016 -> SYNCHRONOUS IDLE
- u'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
- u'\x18' # 0x0018 -> CANCEL
- u'\x19' # 0x0019 -> END OF MEDIUM
- u'\x1a' # 0x001a -> SUBSTITUTE
- u'\x1b' # 0x001b -> ESCAPE
- u'\x1c' # 0x001c -> FILE SEPARATOR
- u'\x1d' # 0x001d -> GROUP SEPARATOR
- u'\x1e' # 0x001e -> RECORD SEPARATOR
- u'\x1f' # 0x001f -> UNIT SEPARATOR
- u' ' # 0x0020 -> SPACE
- u'!' # 0x0021 -> EXCLAMATION MARK
- u'"' # 0x0022 -> QUOTATION MARK
- u'#' # 0x0023 -> NUMBER SIGN
- u'$' # 0x0024 -> DOLLAR SIGN
- u'%' # 0x0025 -> PERCENT SIGN
- u'&' # 0x0026 -> AMPERSAND
- u"'" # 0x0027 -> APOSTROPHE
- u'(' # 0x0028 -> LEFT PARENTHESIS
- u')' # 0x0029 -> RIGHT PARENTHESIS
- u'*' # 0x002a -> ASTERISK
- u'+' # 0x002b -> PLUS SIGN
- u',' # 0x002c -> COMMA
- u'-' # 0x002d -> HYPHEN-MINUS
- u'.' # 0x002e -> FULL STOP
- u'/' # 0x002f -> SOLIDUS
- u'0' # 0x0030 -> DIGIT ZERO
- u'1' # 0x0031 -> DIGIT ONE
- u'2' # 0x0032 -> DIGIT TWO
- u'3' # 0x0033 -> DIGIT THREE
- u'4' # 0x0034 -> DIGIT FOUR
- u'5' # 0x0035 -> DIGIT FIVE
- u'6' # 0x0036 -> DIGIT SIX
- u'7' # 0x0037 -> DIGIT SEVEN
- u'8' # 0x0038 -> DIGIT EIGHT
- u'9' # 0x0039 -> DIGIT NINE
- u':' # 0x003a -> COLON
- u';' # 0x003b -> SEMICOLON
- u'<' # 0x003c -> LESS-THAN SIGN
- u'=' # 0x003d -> EQUALS SIGN
- u'>' # 0x003e -> GREATER-THAN SIGN
- u'?' # 0x003f -> QUESTION MARK
- u'@' # 0x0040 -> COMMERCIAL AT
- u'A' # 0x0041 -> LATIN CAPITAL LETTER A
- u'B' # 0x0042 -> LATIN CAPITAL LETTER B
- u'C' # 0x0043 -> LATIN CAPITAL LETTER C
- u'D' # 0x0044 -> LATIN CAPITAL LETTER D
- u'E' # 0x0045 -> LATIN CAPITAL LETTER E
- u'F' # 0x0046 -> LATIN CAPITAL LETTER F
- u'G' # 0x0047 -> LATIN CAPITAL LETTER G
- u'H' # 0x0048 -> LATIN CAPITAL LETTER H
- u'I' # 0x0049 -> LATIN CAPITAL LETTER I
- u'J' # 0x004a -> LATIN CAPITAL LETTER J
- u'K' # 0x004b -> LATIN CAPITAL LETTER K
- u'L' # 0x004c -> LATIN CAPITAL LETTER L
- u'M' # 0x004d -> LATIN CAPITAL LETTER M
- u'N' # 0x004e -> LATIN CAPITAL LETTER N
- u'O' # 0x004f -> LATIN CAPITAL LETTER O
- u'P' # 0x0050 -> LATIN CAPITAL LETTER P
- u'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
- u'R' # 0x0052 -> LATIN CAPITAL LETTER R
- u'S' # 0x0053 -> LATIN CAPITAL LETTER S
- u'T' # 0x0054 -> LATIN CAPITAL LETTER T
- u'U' # 0x0055 -> LATIN CAPITAL LETTER U
- u'V' # 0x0056 -> LATIN CAPITAL LETTER V
- u'W' # 0x0057 -> LATIN CAPITAL LETTER W
- u'X' # 0x0058 -> LATIN CAPITAL LETTER X
- u'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
- u'Z' # 0x005a -> LATIN CAPITAL LETTER Z
- u'[' # 0x005b -> LEFT SQUARE BRACKET
- u'\\' # 0x005c -> REVERSE SOLIDUS
- u']' # 0x005d -> RIGHT SQUARE BRACKET
- u'^' # 0x005e -> CIRCUMFLEX ACCENT
- u'_' # 0x005f -> LOW LINE
- u'`' # 0x0060 -> GRAVE ACCENT
- u'a' # 0x0061 -> LATIN SMALL LETTER A
- u'b' # 0x0062 -> LATIN SMALL LETTER B
- u'c' # 0x0063 -> LATIN SMALL LETTER C
- u'd' # 0x0064 -> LATIN SMALL LETTER D
- u'e' # 0x0065 -> LATIN SMALL LETTER E
- u'f' # 0x0066 -> LATIN SMALL LETTER F
- u'g' # 0x0067 -> LATIN SMALL LETTER G
- u'h' # 0x0068 -> LATIN SMALL LETTER H
- u'i' # 0x0069 -> LATIN SMALL LETTER I
- u'j' # 0x006a -> LATIN SMALL LETTER J
- u'k' # 0x006b -> LATIN SMALL LETTER K
- u'l' # 0x006c -> LATIN SMALL LETTER L
- u'm' # 0x006d -> LATIN SMALL LETTER M
- u'n' # 0x006e -> LATIN SMALL LETTER N
- u'o' # 0x006f -> LATIN SMALL LETTER O
- u'p' # 0x0070 -> LATIN SMALL LETTER P
- u'q' # 0x0071 -> LATIN SMALL LETTER Q
- u'r' # 0x0072 -> LATIN SMALL LETTER R
- u's' # 0x0073 -> LATIN SMALL LETTER S
- u't' # 0x0074 -> LATIN SMALL LETTER T
- u'u' # 0x0075 -> LATIN SMALL LETTER U
- u'v' # 0x0076 -> LATIN SMALL LETTER V
- u'w' # 0x0077 -> LATIN SMALL LETTER W
- u'x' # 0x0078 -> LATIN SMALL LETTER X
- u'y' # 0x0079 -> LATIN SMALL LETTER Y
- u'z' # 0x007a -> LATIN SMALL LETTER Z
- u'{' # 0x007b -> LEFT CURLY BRACKET
- u'|' # 0x007c -> VERTICAL LINE
- u'}' # 0x007d -> RIGHT CURLY BRACKET
- u'~' # 0x007e -> TILDE
- u'\x7f' # 0x007f -> DELETE
- u'\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA
- u'\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
- u'\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
- u'\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
- u'\xe4' # 0x0084 -> LATIN SMALL LETTER A WITH DIAERESIS
- u'\xe0' # 0x0085 -> LATIN SMALL LETTER A WITH GRAVE
- u'\xe5' # 0x0086 -> LATIN SMALL LETTER A WITH RING ABOVE
- u'\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA
- u'\xea' # 0x0088 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
- u'\xeb' # 0x0089 -> LATIN SMALL LETTER E WITH DIAERESIS
- u'\xe8' # 0x008a -> LATIN SMALL LETTER E WITH GRAVE
- u'\xef' # 0x008b -> LATIN SMALL LETTER I WITH DIAERESIS
- u'\xee' # 0x008c -> LATIN SMALL LETTER I WITH CIRCUMFLEX
- u'\xec' # 0x008d -> LATIN SMALL LETTER I WITH GRAVE
- u'\xc4' # 0x008e -> LATIN CAPITAL LETTER A WITH DIAERESIS
- u'\xc5' # 0x008f -> LATIN CAPITAL LETTER A WITH RING ABOVE
- u'\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
- u'\xe6' # 0x0091 -> LATIN SMALL LIGATURE AE
- u'\xc6' # 0x0092 -> LATIN CAPITAL LIGATURE AE
- u'\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
- u'\xf6' # 0x0094 -> LATIN SMALL LETTER O WITH DIAERESIS
- u'\xf2' # 0x0095 -> LATIN SMALL LETTER O WITH GRAVE
- u'\xfb' # 0x0096 -> LATIN SMALL LETTER U WITH CIRCUMFLEX
- u'\xf9' # 0x0097 -> LATIN SMALL LETTER U WITH GRAVE
- u'\xff' # 0x0098 -> LATIN SMALL LETTER Y WITH DIAERESIS
- u'\xd6' # 0x0099 -> LATIN CAPITAL LETTER O WITH DIAERESIS
- u'\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
- u'\xf8' # 0x009b -> LATIN SMALL LETTER O WITH STROKE
- u'\xa3' # 0x009c -> POUND SIGN
- u'\xd8' # 0x009d -> LATIN CAPITAL LETTER O WITH STROKE
- u'\xd7' # 0x009e -> MULTIPLICATION SIGN
- u'\u0192' # 0x009f -> LATIN SMALL LETTER F WITH HOOK
- u'\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE
- u'\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE
- u'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
- u'\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE
- u'\xf1' # 0x00a4 -> LATIN SMALL LETTER N WITH TILDE
- u'\xd1' # 0x00a5 -> LATIN CAPITAL LETTER N WITH TILDE
- u'\xaa' # 0x00a6 -> FEMININE ORDINAL INDICATOR
- u'\xba' # 0x00a7 -> MASCULINE ORDINAL INDICATOR
- u'\xbf' # 0x00a8 -> INVERTED QUESTION MARK
- u'\xae' # 0x00a9 -> REGISTERED SIGN
- u'\xac' # 0x00aa -> NOT SIGN
- u'\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF
- u'\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER
- u'\xa1' # 0x00ad -> INVERTED EXCLAMATION MARK
- u'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
- u'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
- u'\u2591' # 0x00b0 -> LIGHT SHADE
- u'\u2592' # 0x00b1 -> MEDIUM SHADE
- u'\u2593' # 0x00b2 -> DARK SHADE
- u'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
- u'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
- u'\xc1' # 0x00b5 -> LATIN CAPITAL LETTER A WITH ACUTE
- u'\xc2' # 0x00b6 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
- u'\xc0' # 0x00b7 -> LATIN CAPITAL LETTER A WITH GRAVE
- u'\xa9' # 0x00b8 -> COPYRIGHT SIGN
- u'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
- u'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
- u'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
- u'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
- u'\xa2' # 0x00bd -> CENT SIGN
- u'\xa5' # 0x00be -> YEN SIGN
- u'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
- u'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
- u'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
- u'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
- u'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
- u'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
- u'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
- u'\xe3' # 0x00c6 -> LATIN SMALL LETTER A WITH TILDE
- u'\xc3' # 0x00c7 -> LATIN CAPITAL LETTER A WITH TILDE
- u'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
- u'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
- u'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
- u'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
- u'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
- u'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
- u'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
- u'\xa4' # 0x00cf -> CURRENCY SIGN
- u'\xf0' # 0x00d0 -> LATIN SMALL LETTER ETH
- u'\xd0' # 0x00d1 -> LATIN CAPITAL LETTER ETH
- u'\xca' # 0x00d2 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
- u'\xcb' # 0x00d3 -> LATIN CAPITAL LETTER E WITH DIAERESIS
- u'\xc8' # 0x00d4 -> LATIN CAPITAL LETTER E WITH GRAVE
- u'\u0131' # 0x00d5 -> LATIN SMALL LETTER DOTLESS I
- u'\xcd' # 0x00d6 -> LATIN CAPITAL LETTER I WITH ACUTE
- u'\xce' # 0x00d7 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
- u'\xcf' # 0x00d8 -> LATIN CAPITAL LETTER I WITH DIAERESIS
- u'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
- u'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
- u'\u2588' # 0x00db -> FULL BLOCK
- u'\u2584' # 0x00dc -> LOWER HALF BLOCK
- u'\xa6' # 0x00dd -> BROKEN BAR
- u'\xcc' # 0x00de -> LATIN CAPITAL LETTER I WITH GRAVE
- u'\u2580' # 0x00df -> UPPER HALF BLOCK
- u'\xd3' # 0x00e0 -> LATIN CAPITAL LETTER O WITH ACUTE
- u'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S
- u'\xd4' # 0x00e2 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
- u'\xd2' # 0x00e3 -> LATIN CAPITAL LETTER O WITH GRAVE
- u'\xf5' # 0x00e4 -> LATIN SMALL LETTER O WITH TILDE
- u'\xd5' # 0x00e5 -> LATIN CAPITAL LETTER O WITH TILDE
- u'\xb5' # 0x00e6 -> MICRO SIGN
- u'\xfe' # 0x00e7 -> LATIN SMALL LETTER THORN
- u'\xde' # 0x00e8 -> LATIN CAPITAL LETTER THORN
- u'\xda' # 0x00e9 -> LATIN CAPITAL LETTER U WITH ACUTE
- u'\xdb' # 0x00ea -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
- u'\xd9' # 0x00eb -> LATIN CAPITAL LETTER U WITH GRAVE
- u'\xfd' # 0x00ec -> LATIN SMALL LETTER Y WITH ACUTE
- u'\xdd' # 0x00ed -> LATIN CAPITAL LETTER Y WITH ACUTE
- u'\xaf' # 0x00ee -> MACRON
- u'\xb4' # 0x00ef -> ACUTE ACCENT
- u'\xad' # 0x00f0 -> SOFT HYPHEN
- u'\xb1' # 0x00f1 -> PLUS-MINUS SIGN
- u'\u2017' # 0x00f2 -> DOUBLE LOW LINE
- u'\xbe' # 0x00f3 -> VULGAR FRACTION THREE QUARTERS
- u'\xb6' # 0x00f4 -> PILCROW SIGN
- u'\xa7' # 0x00f5 -> SECTION SIGN
- u'\xf7' # 0x00f6 -> DIVISION SIGN
- u'\xb8' # 0x00f7 -> CEDILLA
- u'\xb0' # 0x00f8 -> DEGREE SIGN
- u'\xa8' # 0x00f9 -> DIAERESIS
- u'\xb7' # 0x00fa -> MIDDLE DOT
- u'\xb9' # 0x00fb -> SUPERSCRIPT ONE
- u'\xb3' # 0x00fc -> SUPERSCRIPT THREE
- u'\xb2' # 0x00fd -> SUPERSCRIPT TWO
- u'\u25a0' # 0x00fe -> BLACK SQUARE
- u'\xa0' # 0x00ff -> NO-BREAK SPACE
-)
-
-### Encoding Map
-
-encoding_map = {
- 0x0000: 0x0000, # NULL
- 0x0001: 0x0001, # START OF HEADING
- 0x0002: 0x0002, # START OF TEXT
- 0x0003: 0x0003, # END OF TEXT
- 0x0004: 0x0004, # END OF TRANSMISSION
- 0x0005: 0x0005, # ENQUIRY
- 0x0006: 0x0006, # ACKNOWLEDGE
- 0x0007: 0x0007, # BELL
- 0x0008: 0x0008, # BACKSPACE
- 0x0009: 0x0009, # HORIZONTAL TABULATION
- 0x000a: 0x000a, # LINE FEED
- 0x000b: 0x000b, # VERTICAL TABULATION
- 0x000c: 0x000c, # FORM FEED
- 0x000d: 0x000d, # CARRIAGE RETURN
- 0x000e: 0x000e, # SHIFT OUT
- 0x000f: 0x000f, # SHIFT IN
- 0x0010: 0x0010, # DATA LINK ESCAPE
- 0x0011: 0x0011, # DEVICE CONTROL ONE
- 0x0012: 0x0012, # DEVICE CONTROL TWO
- 0x0013: 0x0013, # DEVICE CONTROL THREE
- 0x0014: 0x0014, # DEVICE CONTROL FOUR
- 0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
- 0x0016: 0x0016, # SYNCHRONOUS IDLE
- 0x0017: 0x0017, # END OF TRANSMISSION BLOCK
- 0x0018: 0x0018, # CANCEL
- 0x0019: 0x0019, # END OF MEDIUM
- 0x001a: 0x001a, # SUBSTITUTE
- 0x001b: 0x001b, # ESCAPE
- 0x001c: 0x001c, # FILE SEPARATOR
- 0x001d: 0x001d, # GROUP SEPARATOR
- 0x001e: 0x001e, # RECORD SEPARATOR
- 0x001f: 0x001f, # UNIT SEPARATOR
- 0x0020: 0x0020, # SPACE
- 0x0021: 0x0021, # EXCLAMATION MARK
- 0x0022: 0x0022, # QUOTATION MARK
- 0x0023: 0x0023, # NUMBER SIGN
- 0x0024: 0x0024, # DOLLAR SIGN
- 0x0025: 0x0025, # PERCENT SIGN
- 0x0026: 0x0026, # AMPERSAND
- 0x0027: 0x0027, # APOSTROPHE
- 0x0028: 0x0028, # LEFT PARENTHESIS
- 0x0029: 0x0029, # RIGHT PARENTHESIS
- 0x002a: 0x002a, # ASTERISK
- 0x002b: 0x002b, # PLUS SIGN
- 0x002c: 0x002c, # COMMA
- 0x002d: 0x002d, # HYPHEN-MINUS
- 0x002e: 0x002e, # FULL STOP
- 0x002f: 0x002f, # SOLIDUS
- 0x0030: 0x0030, # DIGIT ZERO
- 0x0031: 0x0031, # DIGIT ONE
- 0x0032: 0x0032, # DIGIT TWO
- 0x0033: 0x0033, # DIGIT THREE
- 0x0034: 0x0034, # DIGIT FOUR
- 0x0035: 0x0035, # DIGIT FIVE
- 0x0036: 0x0036, # DIGIT SIX
- 0x0037: 0x0037, # DIGIT SEVEN
- 0x0038: 0x0038, # DIGIT EIGHT
- 0x0039: 0x0039, # DIGIT NINE
- 0x003a: 0x003a, # COLON
- 0x003b: 0x003b, # SEMICOLON
- 0x003c: 0x003c, # LESS-THAN SIGN
- 0x003d: 0x003d, # EQUALS SIGN
- 0x003e: 0x003e, # GREATER-THAN SIGN
- 0x003f: 0x003f, # QUESTION MARK
- 0x0040: 0x0040, # COMMERCIAL AT
- 0x0041: 0x0041, # LATIN CAPITAL LETTER A
- 0x0042: 0x0042, # LATIN CAPITAL LETTER B
- 0x0043: 0x0043, # LATIN CAPITAL LETTER C
- 0x0044: 0x0044, # LATIN CAPITAL LETTER D
- 0x0045: 0x0045, # LATIN CAPITAL LETTER E
- 0x0046: 0x0046, # LATIN CAPITAL LETTER F
- 0x0047: 0x0047, # LATIN CAPITAL LETTER G
- 0x0048: 0x0048, # LATIN CAPITAL LETTER H
- 0x0049: 0x0049, # LATIN CAPITAL LETTER I
- 0x004a: 0x004a, # LATIN CAPITAL LETTER J
- 0x004b: 0x004b, # LATIN CAPITAL LETTER K
- 0x004c: 0x004c, # LATIN CAPITAL LETTER L
- 0x004d: 0x004d, # LATIN CAPITAL LETTER M
- 0x004e: 0x004e, # LATIN CAPITAL LETTER N
- 0x004f: 0x004f, # LATIN CAPITAL LETTER O
- 0x0050: 0x0050, # LATIN CAPITAL LETTER P
- 0x0051: 0x0051, # LATIN CAPITAL LETTER Q
- 0x0052: 0x0052, # LATIN CAPITAL LETTER R
- 0x0053: 0x0053, # LATIN CAPITAL LETTER S
- 0x0054: 0x0054, # LATIN CAPITAL LETTER T
- 0x0055: 0x0055, # LATIN CAPITAL LETTER U
- 0x0056: 0x0056, # LATIN CAPITAL LETTER V
- 0x0057: 0x0057, # LATIN CAPITAL LETTER W
- 0x0058: 0x0058, # LATIN CAPITAL LETTER X
- 0x0059: 0x0059, # LATIN CAPITAL LETTER Y
- 0x005a: 0x005a, # LATIN CAPITAL LETTER Z
- 0x005b: 0x005b, # LEFT SQUARE BRACKET
- 0x005c: 0x005c, # REVERSE SOLIDUS
- 0x005d: 0x005d, # RIGHT SQUARE BRACKET
- 0x005e: 0x005e, # CIRCUMFLEX ACCENT
- 0x005f: 0x005f, # LOW LINE
- 0x0060: 0x0060, # GRAVE ACCENT
- 0x0061: 0x0061, # LATIN SMALL LETTER A
- 0x0062: 0x0062, # LATIN SMALL LETTER B
- 0x0063: 0x0063, # LATIN SMALL LETTER C
- 0x0064: 0x0064, # LATIN SMALL LETTER D
- 0x0065: 0x0065, # LATIN SMALL LETTER E
- 0x0066: 0x0066, # LATIN SMALL LETTER F
- 0x0067: 0x0067, # LATIN SMALL LETTER G
- 0x0068: 0x0068, # LATIN SMALL LETTER H
- 0x0069: 0x0069, # LATIN SMALL LETTER I
- 0x006a: 0x006a, # LATIN SMALL LETTER J
- 0x006b: 0x006b, # LATIN SMALL LETTER K
- 0x006c: 0x006c, # LATIN SMALL LETTER L
- 0x006d: 0x006d, # LATIN SMALL LETTER M
- 0x006e: 0x006e, # LATIN SMALL LETTER N
- 0x006f: 0x006f, # LATIN SMALL LETTER O
- 0x0070: 0x0070, # LATIN SMALL LETTER P
- 0x0071: 0x0071, # LATIN SMALL LETTER Q
- 0x0072: 0x0072, # LATIN SMALL LETTER R
- 0x0073: 0x0073, # LATIN SMALL LETTER S
- 0x0074: 0x0074, # LATIN SMALL LETTER T
- 0x0075: 0x0075, # LATIN SMALL LETTER U
- 0x0076: 0x0076, # LATIN SMALL LETTER V
- 0x0077: 0x0077, # LATIN SMALL LETTER W
- 0x0078: 0x0078, # LATIN SMALL LETTER X
- 0x0079: 0x0079, # LATIN SMALL LETTER Y
- 0x007a: 0x007a, # LATIN SMALL LETTER Z
- 0x007b: 0x007b, # LEFT CURLY BRACKET
- 0x007c: 0x007c, # VERTICAL LINE
- 0x007d: 0x007d, # RIGHT CURLY BRACKET
- 0x007e: 0x007e, # TILDE
- 0x007f: 0x007f, # DELETE
- 0x00a0: 0x00ff, # NO-BREAK SPACE
- 0x00a1: 0x00ad, # INVERTED EXCLAMATION MARK
- 0x00a2: 0x00bd, # CENT SIGN
- 0x00a3: 0x009c, # POUND SIGN
- 0x00a4: 0x00cf, # CURRENCY SIGN
- 0x00a5: 0x00be, # YEN SIGN
- 0x00a6: 0x00dd, # BROKEN BAR
- 0x00a7: 0x00f5, # SECTION SIGN
- 0x00a8: 0x00f9, # DIAERESIS
- 0x00a9: 0x00b8, # COPYRIGHT SIGN
- 0x00aa: 0x00a6, # FEMININE ORDINAL INDICATOR
- 0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
- 0x00ac: 0x00aa, # NOT SIGN
- 0x00ad: 0x00f0, # SOFT HYPHEN
- 0x00ae: 0x00a9, # REGISTERED SIGN
- 0x00af: 0x00ee, # MACRON
- 0x00b0: 0x00f8, # DEGREE SIGN
- 0x00b1: 0x00f1, # PLUS-MINUS SIGN
- 0x00b2: 0x00fd, # SUPERSCRIPT TWO
- 0x00b3: 0x00fc, # SUPERSCRIPT THREE
- 0x00b4: 0x00ef, # ACUTE ACCENT
- 0x00b5: 0x00e6, # MICRO SIGN
- 0x00b6: 0x00f4, # PILCROW SIGN
- 0x00b7: 0x00fa, # MIDDLE DOT
- 0x00b8: 0x00f7, # CEDILLA
- 0x00b9: 0x00fb, # SUPERSCRIPT ONE
- 0x00ba: 0x00a7, # MASCULINE ORDINAL INDICATOR
- 0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
- 0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER
- 0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF
- 0x00be: 0x00f3, # VULGAR FRACTION THREE QUARTERS
- 0x00bf: 0x00a8, # INVERTED QUESTION MARK
- 0x00c0: 0x00b7, # LATIN CAPITAL LETTER A WITH GRAVE
- 0x00c1: 0x00b5, # LATIN CAPITAL LETTER A WITH ACUTE
- 0x00c2: 0x00b6, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
- 0x00c3: 0x00c7, # LATIN CAPITAL LETTER A WITH TILDE
- 0x00c4: 0x008e, # LATIN CAPITAL LETTER A WITH DIAERESIS
- 0x00c5: 0x008f, # LATIN CAPITAL LETTER A WITH RING ABOVE
- 0x00c6: 0x0092, # LATIN CAPITAL LIGATURE AE
- 0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA
- 0x00c8: 0x00d4, # LATIN CAPITAL LETTER E WITH GRAVE
- 0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
- 0x00ca: 0x00d2, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
- 0x00cb: 0x00d3, # LATIN CAPITAL LETTER E WITH DIAERESIS
- 0x00cc: 0x00de, # LATIN CAPITAL LETTER I WITH GRAVE
- 0x00cd: 0x00d6, # LATIN CAPITAL LETTER I WITH ACUTE
- 0x00ce: 0x00d7, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
- 0x00cf: 0x00d8, # LATIN CAPITAL LETTER I WITH DIAERESIS
- 0x00d0: 0x00d1, # LATIN CAPITAL LETTER ETH
- 0x00d1: 0x00a5, # LATIN CAPITAL LETTER N WITH TILDE
- 0x00d2: 0x00e3, # LATIN CAPITAL LETTER O WITH GRAVE
- 0x00d3: 0x00e0, # LATIN CAPITAL LETTER O WITH ACUTE
- 0x00d4: 0x00e2, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
- 0x00d5: 0x00e5, # LATIN CAPITAL LETTER O WITH TILDE
- 0x00d6: 0x0099, # LATIN CAPITAL LETTER O WITH DIAERESIS
- 0x00d7: 0x009e, # MULTIPLICATION SIGN
- 0x00d8: 0x009d, # LATIN CAPITAL LETTER O WITH STROKE
- 0x00d9: 0x00eb, # LATIN CAPITAL LETTER U WITH GRAVE
- 0x00da: 0x00e9, # LATIN CAPITAL LETTER U WITH ACUTE
- 0x00db: 0x00ea, # LATIN CAPITAL LETTER U WITH CIRCUMFLEX
- 0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
- 0x00dd: 0x00ed, # LATIN CAPITAL LETTER Y WITH ACUTE
- 0x00de: 0x00e8, # LATIN CAPITAL LETTER THORN
- 0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S
- 0x00e0: 0x0085, # LATIN SMALL LETTER A WITH GRAVE
- 0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE
- 0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX
- 0x00e3: 0x00c6, # LATIN SMALL LETTER A WITH TILDE
- 0x00e4: 0x0084, # LATIN SMALL LETTER A WITH DIAERESIS
- 0x00e5: 0x0086, # LATIN SMALL LETTER A WITH RING ABOVE
- 0x00e6: 0x0091, # LATIN SMALL LIGATURE AE
- 0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA
- 0x00e8: 0x008a, # LATIN SMALL LETTER E WITH GRAVE
- 0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
- 0x00ea: 0x0088, # LATIN SMALL LETTER E WITH CIRCUMFLEX
- 0x00eb: 0x0089, # LATIN SMALL LETTER E WITH DIAERESIS
- 0x00ec: 0x008d, # LATIN SMALL LETTER I WITH GRAVE
- 0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE
- 0x00ee: 0x008c, # LATIN SMALL LETTER I WITH CIRCUMFLEX
- 0x00ef: 0x008b, # LATIN SMALL LETTER I WITH DIAERESIS
- 0x00f0: 0x00d0, # LATIN SMALL LETTER ETH
- 0x00f1: 0x00a4, # LATIN SMALL LETTER N WITH TILDE
- 0x00f2: 0x0095, # LATIN SMALL LETTER O WITH GRAVE
- 0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
- 0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX
- 0x00f5: 0x00e4, # LATIN SMALL LETTER O WITH TILDE
- 0x00f6: 0x0094, # LATIN SMALL LETTER O WITH DIAERESIS
- 0x00f7: 0x00f6, # DIVISION SIGN
- 0x00f8: 0x009b, # LATIN SMALL LETTER O WITH STROKE
- 0x00f9: 0x0097, # LATIN SMALL LETTER U WITH GRAVE
- 0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE
- 0x00fb: 0x0096, # LATIN SMALL LETTER U WITH CIRCUMFLEX
- 0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
- 0x00fd: 0x00ec, # LATIN SMALL LETTER Y WITH ACUTE
- 0x00fe: 0x00e7, # LATIN SMALL LETTER THORN
- 0x00ff: 0x0098, # LATIN SMALL LETTER Y WITH DIAERESIS
- 0x0131: 0x00d5, # LATIN SMALL LETTER DOTLESS I
- 0x0192: 0x009f, # LATIN SMALL LETTER F WITH HOOK
- 0x2017: 0x00f2, # DOUBLE LOW LINE
- 0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
- 0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
- 0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
- 0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
- 0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
- 0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
- 0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
- 0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
- 0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
- 0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
- 0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
- 0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
- 0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
- 0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
- 0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
- 0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
- 0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
- 0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
- 0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
- 0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
- 0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
- 0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
- 0x2580: 0x00df, # UPPER HALF BLOCK
- 0x2584: 0x00dc, # LOWER HALF BLOCK
- 0x2588: 0x00db, # FULL BLOCK
- 0x2591: 0x00b0, # LIGHT SHADE
- 0x2592: 0x00b1, # MEDIUM SHADE
- 0x2593: 0x00b2, # DARK SHADE
- 0x25a0: 0x00fe, # BLACK SQUARE
-}
diff --git a/sys/lib/python/encodings/cp852.py b/sys/lib/python/encodings/cp852.py
deleted file mode 100644
index 069d5473b..000000000
--- a/sys/lib/python/encodings/cp852.py
+++ /dev/null
@@ -1,698 +0,0 @@
-""" Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP852.TXT' with gencodec.py.
-
-"""#"
-
-import codecs
-
-### Codec APIs
-
-class Codec(codecs.Codec):
-
- def encode(self,input,errors='strict'):
- return codecs.charmap_encode(input,errors,encoding_map)
-
- def decode(self,input,errors='strict'):
- return codecs.charmap_decode(input,errors,decoding_table)
-
-class IncrementalEncoder(codecs.IncrementalEncoder):
- def encode(self, input, final=False):
- return codecs.charmap_encode(input,self.errors,encoding_map)[0]
-
-class IncrementalDecoder(codecs.IncrementalDecoder):
- def decode(self, input, final=False):
- return codecs.charmap_decode(input,self.errors,decoding_table)[0]
-
-class StreamWriter(Codec,codecs.StreamWriter):
- pass
-
-class StreamReader(Codec,codecs.StreamReader):
- pass
-
-### encodings module API
-
-def getregentry():
- return codecs.CodecInfo(
- name='cp852',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
-
-### Decoding Map
-
-decoding_map = codecs.make_identity_dict(range(256))
-decoding_map.update({
- 0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
- 0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
- 0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
- 0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
- 0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
- 0x0085: 0x016f, # LATIN SMALL LETTER U WITH RING ABOVE
- 0x0086: 0x0107, # LATIN SMALL LETTER C WITH ACUTE
- 0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
- 0x0088: 0x0142, # LATIN SMALL LETTER L WITH STROKE
- 0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
- 0x008a: 0x0150, # LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
- 0x008b: 0x0151, # LATIN SMALL LETTER O WITH DOUBLE ACUTE
- 0x008c: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX
- 0x008d: 0x0179, # LATIN CAPITAL LETTER Z WITH ACUTE
- 0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
- 0x008f: 0x0106, # LATIN CAPITAL LETTER C WITH ACUTE
- 0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
- 0x0091: 0x0139, # LATIN CAPITAL LETTER L WITH ACUTE
- 0x0092: 0x013a, # LATIN SMALL LETTER L WITH ACUTE
- 0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
- 0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
- 0x0095: 0x013d, # LATIN CAPITAL LETTER L WITH CARON
- 0x0096: 0x013e, # LATIN SMALL LETTER L WITH CARON
- 0x0097: 0x015a, # LATIN CAPITAL LETTER S WITH ACUTE
- 0x0098: 0x015b, # LATIN SMALL LETTER S WITH ACUTE
- 0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
- 0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
- 0x009b: 0x0164, # LATIN CAPITAL LETTER T WITH CARON
- 0x009c: 0x0165, # LATIN SMALL LETTER T WITH CARON
- 0x009d: 0x0141, # LATIN CAPITAL LETTER L WITH STROKE
- 0x009e: 0x00d7, # MULTIPLICATION SIGN
- 0x009f: 0x010d, # LATIN SMALL LETTER C WITH CARON
- 0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
- 0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
- 0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
- 0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
- 0x00a4: 0x0104, # LATIN CAPITAL LETTER A WITH OGONEK
- 0x00a5: 0x0105, # LATIN SMALL LETTER A WITH OGONEK
- 0x00a6: 0x017d, # LATIN CAPITAL LETTER Z WITH CARON
- 0x00a7: 0x017e, # LATIN SMALL LETTER Z WITH CARON
- 0x00a8: 0x0118, # LATIN CAPITAL LETTER E WITH OGONEK
- 0x00a9: 0x0119, # LATIN SMALL LETTER E WITH OGONEK
- 0x00aa: 0x00ac, # NOT SIGN
- 0x00ab: 0x017a, # LATIN SMALL LETTER Z WITH ACUTE
- 0x00ac: 0x010c, # LATIN CAPITAL LETTER C WITH CARON
- 0x00ad: 0x015f, # LATIN SMALL LETTER S WITH CEDILLA
- 0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
- 0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
- 0x00b0: 0x2591, # LIGHT SHADE
- 0x00b1: 0x2592, # MEDIUM SHADE
- 0x00b2: 0x2593, # DARK SHADE
- 0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
- 0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
- 0x00b5: 0x00c1, # LATIN CAPITAL LETTER A WITH ACUTE
- 0x00b6: 0x00c2, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
- 0x00b7: 0x011a, # LATIN CAPITAL LETTER E WITH CARON
- 0x00b8: 0x015e, # LATIN CAPITAL LETTER S WITH CEDILLA
- 0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
- 0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
- 0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
- 0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
- 0x00bd: 0x017b, # LATIN CAPITAL LETTER Z WITH DOT ABOVE
- 0x00be: 0x017c, # LATIN SMALL LETTER Z WITH DOT ABOVE
- 0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
- 0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
- 0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
- 0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
- 0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
- 0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
- 0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
- 0x00c6: 0x0102, # LATIN CAPITAL LETTER A WITH BREVE
- 0x00c7: 0x0103, # LATIN SMALL LETTER A WITH BREVE
- 0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
- 0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
- 0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
- 0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
- 0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
- 0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
- 0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
- 0x00cf: 0x00a4, # CURRENCY SIGN
- 0x00d0: 0x0111, # LATIN SMALL LETTER D WITH STROKE
- 0x00d1: 0x0110, # LATIN CAPITAL LETTER D WITH STROKE
- 0x00d2: 0x010e, # LATIN CAPITAL LETTER D WITH CARON
- 0x00d3: 0x00cb, # LATIN CAPITAL LETTER E WITH DIAERESIS
- 0x00d4: 0x010f, # LATIN SMALL LETTER D WITH CARON
- 0x00d5: 0x0147, # LATIN CAPITAL LETTER N WITH CARON
- 0x00d6: 0x00cd, # LATIN CAPITAL LETTER I WITH ACUTE
- 0x00d7: 0x00ce, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
- 0x00d8: 0x011b, # LATIN SMALL LETTER E WITH CARON
- 0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
- 0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
- 0x00db: 0x2588, # FULL BLOCK
- 0x00dc: 0x2584, # LOWER HALF BLOCK
- 0x00dd: 0x0162, # LATIN CAPITAL LETTER T WITH CEDILLA
- 0x00de: 0x016e, # LATIN CAPITAL LETTER U WITH RING ABOVE
- 0x00df: 0x2580, # UPPER HALF BLOCK
- 0x00e0: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE
- 0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
- 0x00e2: 0x00d4, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
- 0x00e3: 0x0143, # LATIN CAPITAL LETTER N WITH ACUTE
- 0x00e4: 0x0144, # LATIN SMALL LETTER N WITH ACUTE
- 0x00e5: 0x0148, # LATIN SMALL LETTER N WITH CARON
- 0x00e6: 0x0160, # LATIN CAPITAL LETTER S WITH CARON
- 0x00e7: 0x0161, # LATIN SMALL LETTER S WITH CARON
- 0x00e8: 0x0154, # LATIN CAPITAL LETTER R WITH ACUTE
- 0x00e9: 0x00da, # LATIN CAPITAL LETTER U WITH ACUTE
- 0x00ea: 0x0155, # LATIN SMALL LETTER R WITH ACUTE
- 0x00eb: 0x0170, # LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
- 0x00ec: 0x00fd, # LATIN SMALL LETTER Y WITH ACUTE
- 0x00ed: 0x00dd, # LATIN CAPITAL LETTER Y WITH ACUTE
- 0x00ee: 0x0163, # LATIN SMALL LETTER T WITH CEDILLA
- 0x00ef: 0x00b4, # ACUTE ACCENT
- 0x00f0: 0x00ad, # SOFT HYPHEN
- 0x00f1: 0x02dd, # DOUBLE ACUTE ACCENT
- 0x00f2: 0x02db, # OGONEK
- 0x00f3: 0x02c7, # CARON
- 0x00f4: 0x02d8, # BREVE
- 0x00f5: 0x00a7, # SECTION SIGN
- 0x00f6: 0x00f7, # DIVISION SIGN
- 0x00f7: 0x00b8, # CEDILLA
- 0x00f8: 0x00b0, # DEGREE SIGN
- 0x00f9: 0x00a8, # DIAERESIS
- 0x00fa: 0x02d9, # DOT ABOVE
- 0x00fb: 0x0171, # LATIN SMALL LETTER U WITH DOUBLE ACUTE
- 0x00fc: 0x0158, # LATIN CAPITAL LETTER R WITH CARON
- 0x00fd: 0x0159, # LATIN SMALL LETTER R WITH CARON
- 0x00fe: 0x25a0, # BLACK SQUARE
- 0x00ff: 0x00a0, # NO-BREAK SPACE
-})
-
-### Decoding Table
-
-decoding_table = (
- u'\x00' # 0x0000 -> NULL
- u'\x01' # 0x0001 -> START OF HEADING
- u'\x02' # 0x0002 -> START OF TEXT
- u'\x03' # 0x0003 -> END OF TEXT
- u'\x04' # 0x0004 -> END OF TRANSMISSION
- u'\x05' # 0x0005 -> ENQUIRY
- u'\x06' # 0x0006 -> ACKNOWLEDGE
- u'\x07' # 0x0007 -> BELL
- u'\x08' # 0x0008 -> BACKSPACE
- u'\t' # 0x0009 -> HORIZONTAL TABULATION
- u'\n' # 0x000a -> LINE FEED
- u'\x0b' # 0x000b -> VERTICAL TABULATION
- u'\x0c' # 0x000c -> FORM FEED
- u'\r' # 0x000d -> CARRIAGE RETURN
- u'\x0e' # 0x000e -> SHIFT OUT
- u'\x0f' # 0x000f -> SHIFT IN
- u'\x10' # 0x0010 -> DATA LINK ESCAPE
- u'\x11' # 0x0011 -> DEVICE CONTROL ONE
- u'\x12' # 0x0012 -> DEVICE CONTROL TWO
- u'\x13' # 0x0013 -> DEVICE CONTROL THREE
- u'\x14' # 0x0014 -> DEVICE CONTROL FOUR
- u'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
- u'\x16' # 0x0016 -> SYNCHRONOUS IDLE
- u'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
- u'\x18' # 0x0018 -> CANCEL
- u'\x19' # 0x0019 -> END OF MEDIUM
- u'\x1a' # 0x001a -> SUBSTITUTE
- u'\x1b' # 0x001b -> ESCAPE
- u'\x1c' # 0x001c -> FILE SEPARATOR
- u'\x1d' # 0x001d -> GROUP SEPARATOR
- u'\x1e' # 0x001e -> RECORD SEPARATOR
- u'\x1f' # 0x001f -> UNIT SEPARATOR
- u' ' # 0x0020 -> SPACE
- u'!' # 0x0021 -> EXCLAMATION MARK
- u'"' # 0x0022 -> QUOTATION MARK
- u'#' # 0x0023 -> NUMBER SIGN
- u'$' # 0x0024 -> DOLLAR SIGN
- u'%' # 0x0025 -> PERCENT SIGN
- u'&' # 0x0026 -> AMPERSAND
- u"'" # 0x0027 -> APOSTROPHE
- u'(' # 0x0028 -> LEFT PARENTHESIS
- u')' # 0x0029 -> RIGHT PARENTHESIS
- u'*' # 0x002a -> ASTERISK
- u'+' # 0x002b -> PLUS SIGN
- u',' # 0x002c -> COMMA
- u'-' # 0x002d -> HYPHEN-MINUS
- u'.' # 0x002e -> FULL STOP
- u'/' # 0x002f -> SOLIDUS
- u'0' # 0x0030 -> DIGIT ZERO
- u'1' # 0x0031 -> DIGIT ONE
- u'2' # 0x0032 -> DIGIT TWO
- u'3' # 0x0033 -> DIGIT THREE
- u'4' # 0x0034 -> DIGIT FOUR
- u'5' # 0x0035 -> DIGIT FIVE
- u'6' # 0x0036 -> DIGIT SIX
- u'7' # 0x0037 -> DIGIT SEVEN
- u'8' # 0x0038 -> DIGIT EIGHT
- u'9' # 0x0039 -> DIGIT NINE
- u':' # 0x003a -> COLON
- u';' # 0x003b -> SEMICOLON
- u'<' # 0x003c -> LESS-THAN SIGN
- u'=' # 0x003d -> EQUALS SIGN
- u'>' # 0x003e -> GREATER-THAN SIGN
- u'?' # 0x003f -> QUESTION MARK
- u'@' # 0x0040 -> COMMERCIAL AT
- u'A' # 0x0041 -> LATIN CAPITAL LETTER A
- u'B' # 0x0042 -> LATIN CAPITAL LETTER B
- u'C' # 0x0043 -> LATIN CAPITAL LETTER C
- u'D' # 0x0044 -> LATIN CAPITAL LETTER D
- u'E' # 0x0045 -> LATIN CAPITAL LETTER E
- u'F' # 0x0046 -> LATIN CAPITAL LETTER F
- u'G' # 0x0047 -> LATIN CAPITAL LETTER G
- u'H' # 0x0048 -> LATIN CAPITAL LETTER H
- u'I' # 0x0049 -> LATIN CAPITAL LETTER I
- u'J' # 0x004a -> LATIN CAPITAL LETTER J
- u'K' # 0x004b -> LATIN CAPITAL LETTER K
- u'L' # 0x004c -> LATIN CAPITAL LETTER L
- u'M' # 0x004d -> LATIN CAPITAL LETTER M
- u'N' # 0x004e -> LATIN CAPITAL LETTER N
- u'O' # 0x004f -> LATIN CAPITAL LETTER O
- u'P' # 0x0050 -> LATIN CAPITAL LETTER P
- u'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
- u'R' # 0x0052 -> LATIN CAPITAL LETTER R
- u'S' # 0x0053 -> LATIN CAPITAL LETTER S
- u'T' # 0x0054 -> LATIN CAPITAL LETTER T
- u'U' # 0x0055 -> LATIN CAPITAL LETTER U
- u'V' # 0x0056 -> LATIN CAPITAL LETTER V
- u'W' # 0x0057 -> LATIN CAPITAL LETTER W
- u'X' # 0x0058 -> LATIN CAPITAL LETTER X
- u'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
- u'Z' # 0x005a -> LATIN CAPITAL LETTER Z
- u'[' # 0x005b -> LEFT SQUARE BRACKET
- u'\\' # 0x005c -> REVERSE SOLIDUS
- u']' # 0x005d -> RIGHT SQUARE BRACKET
- u'^' # 0x005e -> CIRCUMFLEX ACCENT
- u'_' # 0x005f -> LOW LINE
- u'`' # 0x0060 -> GRAVE ACCENT
- u'a' # 0x0061 -> LATIN SMALL LETTER A
- u'b' # 0x0062 -> LATIN SMALL LETTER B
- u'c' # 0x0063 -> LATIN SMALL LETTER C
- u'd' # 0x0064 -> LATIN SMALL LETTER D
- u'e' # 0x0065 -> LATIN SMALL LETTER E
- u'f' # 0x0066 -> LATIN SMALL LETTER F
- u'g' # 0x0067 -> LATIN SMALL LETTER G
- u'h' # 0x0068 -> LATIN SMALL LETTER H
- u'i' # 0x0069 -> LATIN SMALL LETTER I
- u'j' # 0x006a -> LATIN SMALL LETTER J
- u'k' # 0x006b -> LATIN SMALL LETTER K
- u'l' # 0x006c -> LATIN SMALL LETTER L
- u'm' # 0x006d -> LATIN SMALL LETTER M
- u'n' # 0x006e -> LATIN SMALL LETTER N
- u'o' # 0x006f -> LATIN SMALL LETTER O
- u'p' # 0x0070 -> LATIN SMALL LETTER P
- u'q' # 0x0071 -> LATIN SMALL LETTER Q
- u'r' # 0x0072 -> LATIN SMALL LETTER R
- u's' # 0x0073 -> LATIN SMALL LETTER S
- u't' # 0x0074 -> LATIN SMALL LETTER T
- u'u' # 0x0075 -> LATIN SMALL LETTER U
- u'v' # 0x0076 -> LATIN SMALL LETTER V
- u'w' # 0x0077 -> LATIN SMALL LETTER W
- u'x' # 0x0078 -> LATIN SMALL LETTER X
- u'y' # 0x0079 -> LATIN SMALL LETTER Y
- u'z' # 0x007a -> LATIN SMALL LETTER Z
- u'{' # 0x007b -> LEFT CURLY BRACKET
- u'|' # 0x007c -> VERTICAL LINE
- u'}' # 0x007d -> RIGHT CURLY BRACKET
- u'~' # 0x007e -> TILDE
- u'\x7f' # 0x007f -> DELETE
- u'\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA
- u'\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
- u'\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
- u'\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
- u'\xe4' # 0x0084 -> LATIN SMALL LETTER A WITH DIAERESIS
- u'\u016f' # 0x0085 -> LATIN SMALL LETTER U WITH RING ABOVE
- u'\u0107' # 0x0086 -> LATIN SMALL LETTER C WITH ACUTE
- u'\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA
- u'\u0142' # 0x0088 -> LATIN SMALL LETTER L WITH STROKE
- u'\xeb' # 0x0089 -> LATIN SMALL LETTER E WITH DIAERESIS
- u'\u0150' # 0x008a -> LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
- u'\u0151' # 0x008b -> LATIN SMALL LETTER O WITH DOUBLE ACUTE
- u'\xee' # 0x008c -> LATIN SMALL LETTER I WITH CIRCUMFLEX
- u'\u0179' # 0x008d -> LATIN CAPITAL LETTER Z WITH ACUTE
- u'\xc4' # 0x008e -> LATIN CAPITAL LETTER A WITH DIAERESIS
- u'\u0106' # 0x008f -> LATIN CAPITAL LETTER C WITH ACUTE
- u'\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
- u'\u0139' # 0x0091 -> LATIN CAPITAL LETTER L WITH ACUTE
- u'\u013a' # 0x0092 -> LATIN SMALL LETTER L WITH ACUTE
- u'\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
- u'\xf6' # 0x0094 -> LATIN SMALL LETTER O WITH DIAERESIS
- u'\u013d' # 0x0095 -> LATIN CAPITAL LETTER L WITH CARON
- u'\u013e' # 0x0096 -> LATIN SMALL LETTER L WITH CARON
- u'\u015a' # 0x0097 -> LATIN CAPITAL LETTER S WITH ACUTE
- u'\u015b' # 0x0098 -> LATIN SMALL LETTER S WITH ACUTE
- u'\xd6' # 0x0099 -> LATIN CAPITAL LETTER O WITH DIAERESIS
- u'\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
- u'\u0164' # 0x009b -> LATIN CAPITAL LETTER T WITH CARON
- u'\u0165' # 0x009c -> LATIN SMALL LETTER T WITH CARON
- u'\u0141' # 0x009d -> LATIN CAPITAL LETTER L WITH STROKE
- u'\xd7' # 0x009e -> MULTIPLICATION SIGN
- u'\u010d' # 0x009f -> LATIN SMALL LETTER C WITH CARON
- u'\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE
- u'\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE
- u'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
- u'\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE
- u'\u0104' # 0x00a4 -> LATIN CAPITAL LETTER A WITH OGONEK
- u'\u0105' # 0x00a5 -> LATIN SMALL LETTER A WITH OGONEK
- u'\u017d' # 0x00a6 -> LATIN CAPITAL LETTER Z WITH CARON
- u'\u017e' # 0x00a7 -> LATIN SMALL LETTER Z WITH CARON
- u'\u0118' # 0x00a8 -> LATIN CAPITAL LETTER E WITH OGONEK
- u'\u0119' # 0x00a9 -> LATIN SMALL LETTER E WITH OGONEK
- u'\xac' # 0x00aa -> NOT SIGN
- u'\u017a' # 0x00ab -> LATIN SMALL LETTER Z WITH ACUTE
- u'\u010c' # 0x00ac -> LATIN CAPITAL LETTER C WITH CARON
- u'\u015f' # 0x00ad -> LATIN SMALL LETTER S WITH CEDILLA
- u'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
- u'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
- u'\u2591' # 0x00b0 -> LIGHT SHADE
- u'\u2592' # 0x00b1 -> MEDIUM SHADE
- u'\u2593' # 0x00b2 -> DARK SHADE
- u'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
- u'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
- u'\xc1' # 0x00b5 -> LATIN CAPITAL LETTER A WITH ACUTE
- u'\xc2' # 0x00b6 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
- u'\u011a' # 0x00b7 -> LATIN CAPITAL LETTER E WITH CARON
- u'\u015e' # 0x00b8 -> LATIN CAPITAL LETTER S WITH CEDILLA
- u'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
- u'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
- u'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
- u'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
- u'\u017b' # 0x00bd -> LATIN CAPITAL LETTER Z WITH DOT ABOVE
- u'\u017c' # 0x00be -> LATIN SMALL LETTER Z WITH DOT ABOVE
- u'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
- u'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
- u'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
- u'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
- u'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
- u'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
- u'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
- u'\u0102' # 0x00c6 -> LATIN CAPITAL LETTER A WITH BREVE
- u'\u0103' # 0x00c7 -> LATIN SMALL LETTER A WITH BREVE
- u'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
- u'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
- u'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
- u'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
- u'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
- u'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
- u'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
- u'\xa4' # 0x00cf -> CURRENCY SIGN
- u'\u0111' # 0x00d0 -> LATIN SMALL LETTER D WITH STROKE
- u'\u0110' # 0x00d1 -> LATIN CAPITAL LETTER D WITH STROKE
- u'\u010e' # 0x00d2 -> LATIN CAPITAL LETTER D WITH CARON
- u'\xcb' # 0x00d3 -> LATIN CAPITAL LETTER E WITH DIAERESIS
- u'\u010f' # 0x00d4 -> LATIN SMALL LETTER D WITH CARON
- u'\u0147' # 0x00d5 -> LATIN CAPITAL LETTER N WITH CARON
- u'\xcd' # 0x00d6 -> LATIN CAPITAL LETTER I WITH ACUTE
- u'\xce' # 0x00d7 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
- u'\u011b' # 0x00d8 -> LATIN SMALL LETTER E WITH CARON
- u'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
- u'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
- u'\u2588' # 0x00db -> FULL BLOCK
- u'\u2584' # 0x00dc -> LOWER HALF BLOCK
- u'\u0162' # 0x00dd -> LATIN CAPITAL LETTER T WITH CEDILLA
- u'\u016e' # 0x00de -> LATIN CAPITAL LETTER U WITH RING ABOVE
- u'\u2580' # 0x00df -> UPPER HALF BLOCK
- u'\xd3' # 0x00e0 -> LATIN CAPITAL LETTER O WITH ACUTE
- u'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S
- u'\xd4' # 0x00e2 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
- u'\u0143' # 0x00e3 -> LATIN CAPITAL LETTER N WITH ACUTE
- u'\u0144' # 0x00e4 -> LATIN SMALL LETTER N WITH ACUTE
- u'\u0148' # 0x00e5 -> LATIN SMALL LETTER N WITH CARON
- u'\u0160' # 0x00e6 -> LATIN CAPITAL LETTER S WITH CARON
- u'\u0161' # 0x00e7 -> LATIN SMALL LETTER S WITH CARON
- u'\u0154' # 0x00e8 -> LATIN CAPITAL LETTER R WITH ACUTE
- u'\xda' # 0x00e9 -> LATIN CAPITAL LETTER U WITH ACUTE
- u'\u0155' # 0x00ea -> LATIN SMALL LETTER R WITH ACUTE
- u'\u0170' # 0x00eb -> LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
- u'\xfd' # 0x00ec -> LATIN SMALL LETTER Y WITH ACUTE
- u'\xdd' # 0x00ed -> LATIN CAPITAL LETTER Y WITH ACUTE
- u'\u0163' # 0x00ee -> LATIN SMALL LETTER T WITH CEDILLA
- u'\xb4' # 0x00ef -> ACUTE ACCENT
- u'\xad' # 0x00f0 -> SOFT HYPHEN
- u'\u02dd' # 0x00f1 -> DOUBLE ACUTE ACCENT
- u'\u02db' # 0x00f2 -> OGONEK
- u'\u02c7' # 0x00f3 -> CARON
- u'\u02d8' # 0x00f4 -> BREVE
- u'\xa7' # 0x00f5 -> SECTION SIGN
- u'\xf7' # 0x00f6 -> DIVISION SIGN
- u'\xb8' # 0x00f7 -> CEDILLA
- u'\xb0' # 0x00f8 -> DEGREE SIGN
- u'\xa8' # 0x00f9 -> DIAERESIS
- u'\u02d9' # 0x00fa -> DOT ABOVE
- u'\u0171' # 0x00fb -> LATIN SMALL LETTER U WITH DOUBLE ACUTE
- u'\u0158' # 0x00fc -> LATIN CAPITAL LETTER R WITH CARON
- u'\u0159' # 0x00fd -> LATIN SMALL LETTER R WITH CARON
- u'\u25a0' # 0x00fe -> BLACK SQUARE
- u'\xa0' # 0x00ff -> NO-BREAK SPACE
-)
-
-### Encoding Map
-
-encoding_map = {
- 0x0000: 0x0000, # NULL
- 0x0001: 0x0001, # START OF HEADING
- 0x0002: 0x0002, # START OF TEXT
- 0x0003: 0x0003, # END OF TEXT
- 0x0004: 0x0004, # END OF TRANSMISSION
- 0x0005: 0x0005, # ENQUIRY
- 0x0006: 0x0006, # ACKNOWLEDGE
- 0x0007: 0x0007, # BELL
- 0x0008: 0x0008, # BACKSPACE
- 0x0009: 0x0009, # HORIZONTAL TABULATION
- 0x000a: 0x000a, # LINE FEED
- 0x000b: 0x000b, # VERTICAL TABULATION
- 0x000c: 0x000c, # FORM FEED
- 0x000d: 0x000d, # CARRIAGE RETURN
- 0x000e: 0x000e, # SHIFT OUT
- 0x000f: 0x000f, # SHIFT IN
- 0x0010: 0x0010, # DATA LINK ESCAPE
- 0x0011: 0x0011, # DEVICE CONTROL ONE
- 0x0012: 0x0012, # DEVICE CONTROL TWO
- 0x0013: 0x0013, # DEVICE CONTROL THREE
- 0x0014: 0x0014, # DEVICE CONTROL FOUR
- 0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
- 0x0016: 0x0016, # SYNCHRONOUS IDLE
- 0x0017: 0x0017, # END OF TRANSMISSION BLOCK
- 0x0018: 0x0018, # CANCEL
- 0x0019: 0x0019, # END OF MEDIUM
- 0x001a: 0x001a, # SUBSTITUTE
- 0x001b: 0x001b, # ESCAPE
- 0x001c: 0x001c, # FILE SEPARATOR
- 0x001d: 0x001d, # GROUP SEPARATOR
- 0x001e: 0x001e, # RECORD SEPARATOR
- 0x001f: 0x001f, # UNIT SEPARATOR
- 0x0020: 0x0020, # SPACE
- 0x0021: 0x0021, # EXCLAMATION MARK
- 0x0022: 0x0022, # QUOTATION MARK
- 0x0023: 0x0023, # NUMBER SIGN
- 0x0024: 0x0024, # DOLLAR SIGN
- 0x0025: 0x0025, # PERCENT SIGN
- 0x0026: 0x0026, # AMPERSAND
- 0x0027: 0x0027, # APOSTROPHE
- 0x0028: 0x0028, # LEFT PARENTHESIS
- 0x0029: 0x0029, # RIGHT PARENTHESIS
- 0x002a: 0x002a, # ASTERISK
- 0x002b: 0x002b, # PLUS SIGN
- 0x002c: 0x002c, # COMMA
- 0x002d: 0x002d, # HYPHEN-MINUS
- 0x002e: 0x002e, # FULL STOP
- 0x002f: 0x002f, # SOLIDUS
- 0x0030: 0x0030, # DIGIT ZERO
- 0x0031: 0x0031, # DIGIT ONE
- 0x0032: 0x0032, # DIGIT TWO
- 0x0033: 0x0033, # DIGIT THREE
- 0x0034: 0x0034, # DIGIT FOUR
- 0x0035: 0x0035, # DIGIT FIVE
- 0x0036: 0x0036, # DIGIT SIX
- 0x0037: 0x0037, # DIGIT SEVEN
- 0x0038: 0x0038, # DIGIT EIGHT
- 0x0039: 0x0039, # DIGIT NINE
- 0x003a: 0x003a, # COLON
- 0x003b: 0x003b, # SEMICOLON
- 0x003c: 0x003c, # LESS-THAN SIGN
- 0x003d: 0x003d, # EQUALS SIGN
- 0x003e: 0x003e, # GREATER-THAN SIGN
- 0x003f: 0x003f, # QUESTION MARK
- 0x0040: 0x0040, # COMMERCIAL AT
- 0x0041: 0x0041, # LATIN CAPITAL LETTER A
- 0x0042: 0x0042, # LATIN CAPITAL LETTER B
- 0x0043: 0x0043, # LATIN CAPITAL LETTER C
- 0x0044: 0x0044, # LATIN CAPITAL LETTER D
- 0x0045: 0x0045, # LATIN CAPITAL LETTER E
- 0x0046: 0x0046, # LATIN CAPITAL LETTER F
- 0x0047: 0x0047, # LATIN CAPITAL LETTER G
- 0x0048: 0x0048, # LATIN CAPITAL LETTER H
- 0x0049: 0x0049, # LATIN CAPITAL LETTER I
- 0x004a: 0x004a, # LATIN CAPITAL LETTER J
- 0x004b: 0x004b, # LATIN CAPITAL LETTER K
- 0x004c: 0x004c, # LATIN CAPITAL LETTER L
- 0x004d: 0x004d, # LATIN CAPITAL LETTER M
- 0x004e: 0x004e, # LATIN CAPITAL LETTER N
- 0x004f: 0x004f, # LATIN CAPITAL LETTER O
- 0x0050: 0x0050, # LATIN CAPITAL LETTER P
- 0x0051: 0x0051, # LATIN CAPITAL LETTER Q
- 0x0052: 0x0052, # LATIN CAPITAL LETTER R
- 0x0053: 0x0053, # LATIN CAPITAL LETTER S
- 0x0054: 0x0054, # LATIN CAPITAL LETTER T
- 0x0055: 0x0055, # LATIN CAPITAL LETTER U
- 0x0056: 0x0056, # LATIN CAPITAL LETTER V
- 0x0057: 0x0057, # LATIN CAPITAL LETTER W
- 0x0058: 0x0058, # LATIN CAPITAL LETTER X
- 0x0059: 0x0059, # LATIN CAPITAL LETTER Y
- 0x005a: 0x005a, # LATIN CAPITAL LETTER Z
- 0x005b: 0x005b, # LEFT SQUARE BRACKET
- 0x005c: 0x005c, # REVERSE SOLIDUS
- 0x005d: 0x005d, # RIGHT SQUARE BRACKET
- 0x005e: 0x005e, # CIRCUMFLEX ACCENT
- 0x005f: 0x005f, # LOW LINE
- 0x0060: 0x0060, # GRAVE ACCENT
- 0x0061: 0x0061, # LATIN SMALL LETTER A
- 0x0062: 0x0062, # LATIN SMALL LETTER B
- 0x0063: 0x0063, # LATIN SMALL LETTER C
- 0x0064: 0x0064, # LATIN SMALL LETTER D
- 0x0065: 0x0065, # LATIN SMALL LETTER E
- 0x0066: 0x0066, # LATIN SMALL LETTER F
- 0x0067: 0x0067, # LATIN SMALL LETTER G
- 0x0068: 0x0068, # LATIN SMALL LETTER H
- 0x0069: 0x0069, # LATIN SMALL LETTER I
- 0x006a: 0x006a, # LATIN SMALL LETTER J
- 0x006b: 0x006b, # LATIN SMALL LETTER K
- 0x006c: 0x006c, # LATIN SMALL LETTER L
- 0x006d: 0x006d, # LATIN SMALL LETTER M
- 0x006e: 0x006e, # LATIN SMALL LETTER N
- 0x006f: 0x006f, # LATIN SMALL LETTER O
- 0x0070: 0x0070, # LATIN SMALL LETTER P
- 0x0071: 0x0071, # LATIN SMALL LETTER Q
- 0x0072: 0x0072, # LATIN SMALL LETTER R
- 0x0073: 0x0073, # LATIN SMALL LETTER S
- 0x0074: 0x0074, # LATIN SMALL LETTER T
- 0x0075: 0x0075, # LATIN SMALL LETTER U
- 0x0076: 0x0076, # LATIN SMALL LETTER V
- 0x0077: 0x0077, # LATIN SMALL LETTER W
- 0x0078: 0x0078, # LATIN SMALL LETTER X
- 0x0079: 0x0079, # LATIN SMALL LETTER Y
- 0x007a: 0x007a, # LATIN SMALL LETTER Z
- 0x007b: 0x007b, # LEFT CURLY BRACKET
- 0x007c: 0x007c, # VERTICAL LINE
- 0x007d: 0x007d, # RIGHT CURLY BRACKET
- 0x007e: 0x007e, # TILDE
- 0x007f: 0x007f, # DELETE
- 0x00a0: 0x00ff, # NO-BREAK SPACE
- 0x00a4: 0x00cf, # CURRENCY SIGN
- 0x00a7: 0x00f5, # SECTION SIGN
- 0x00a8: 0x00f9, # DIAERESIS
- 0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
- 0x00ac: 0x00aa, # NOT SIGN
- 0x00ad: 0x00f0, # SOFT HYPHEN
- 0x00b0: 0x00f8, # DEGREE SIGN
- 0x00b4: 0x00ef, # ACUTE ACCENT
- 0x00b8: 0x00f7, # CEDILLA
- 0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
- 0x00c1: 0x00b5, # LATIN CAPITAL LETTER A WITH ACUTE
- 0x00c2: 0x00b6, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
- 0x00c4: 0x008e, # LATIN CAPITAL LETTER A WITH DIAERESIS
- 0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA
- 0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
- 0x00cb: 0x00d3, # LATIN CAPITAL LETTER E WITH DIAERESIS
- 0x00cd: 0x00d6, # LATIN CAPITAL LETTER I WITH ACUTE
- 0x00ce: 0x00d7, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
- 0x00d3: 0x00e0, # LATIN CAPITAL LETTER O WITH ACUTE
- 0x00d4: 0x00e2, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
- 0x00d6: 0x0099, # LATIN CAPITAL LETTER O WITH DIAERESIS
- 0x00d7: 0x009e, # MULTIPLICATION SIGN
- 0x00da: 0x00e9, # LATIN CAPITAL LETTER U WITH ACUTE
- 0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
- 0x00dd: 0x00ed, # LATIN CAPITAL LETTER Y WITH ACUTE
- 0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S
- 0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE
- 0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX
- 0x00e4: 0x0084, # LATIN SMALL LETTER A WITH DIAERESIS
- 0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA
- 0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
- 0x00eb: 0x0089, # LATIN SMALL LETTER E WITH DIAERESIS
- 0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE
- 0x00ee: 0x008c, # LATIN SMALL LETTER I WITH CIRCUMFLEX
- 0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
- 0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX
- 0x00f6: 0x0094, # LATIN SMALL LETTER O WITH DIAERESIS
- 0x00f7: 0x00f6, # DIVISION SIGN
- 0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE
- 0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
- 0x00fd: 0x00ec, # LATIN SMALL LETTER Y WITH ACUTE
- 0x0102: 0x00c6, # LATIN CAPITAL LETTER A WITH BREVE
- 0x0103: 0x00c7, # LATIN SMALL LETTER A WITH BREVE
- 0x0104: 0x00a4, # LATIN CAPITAL LETTER A WITH OGONEK
- 0x0105: 0x00a5, # LATIN SMALL LETTER A WITH OGONEK
- 0x0106: 0x008f, # LATIN CAPITAL LETTER C WITH ACUTE
- 0x0107: 0x0086, # LATIN SMALL LETTER C WITH ACUTE
- 0x010c: 0x00ac, # LATIN CAPITAL LETTER C WITH CARON
- 0x010d: 0x009f, # LATIN SMALL LETTER C WITH CARON
- 0x010e: 0x00d2, # LATIN CAPITAL LETTER D WITH CARON
- 0x010f: 0x00d4, # LATIN SMALL LETTER D WITH CARON
- 0x0110: 0x00d1, # LATIN CAPITAL LETTER D WITH STROKE
- 0x0111: 0x00d0, # LATIN SMALL LETTER D WITH STROKE
- 0x0118: 0x00a8, # LATIN CAPITAL LETTER E WITH OGONEK
- 0x0119: 0x00a9, # LATIN SMALL LETTER E WITH OGONEK
- 0x011a: 0x00b7, # LATIN CAPITAL LETTER E WITH CARON
- 0x011b: 0x00d8, # LATIN SMALL LETTER E WITH CARON
- 0x0139: 0x0091, # LATIN CAPITAL LETTER L WITH ACUTE
- 0x013a: 0x0092, # LATIN SMALL LETTER L WITH ACUTE
- 0x013d: 0x0095, # LATIN CAPITAL LETTER L WITH CARON
- 0x013e: 0x0096, # LATIN SMALL LETTER L WITH CARON
- 0x0141: 0x009d, # LATIN CAPITAL LETTER L WITH STROKE
- 0x0142: 0x0088, # LATIN SMALL LETTER L WITH STROKE
- 0x0143: 0x00e3, # LATIN CAPITAL LETTER N WITH ACUTE
- 0x0144: 0x00e4, # LATIN SMALL LETTER N WITH ACUTE
- 0x0147: 0x00d5, # LATIN CAPITAL LETTER N WITH CARON
- 0x0148: 0x00e5, # LATIN SMALL LETTER N WITH CARON
- 0x0150: 0x008a, # LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
- 0x0151: 0x008b, # LATIN SMALL LETTER O WITH DOUBLE ACUTE
- 0x0154: 0x00e8, # LATIN CAPITAL LETTER R WITH ACUTE
- 0x0155: 0x00ea, # LATIN SMALL LETTER R WITH ACUTE
- 0x0158: 0x00fc, # LATIN CAPITAL LETTER R WITH CARON
- 0x0159: 0x00fd, # LATIN SMALL LETTER R WITH CARON
- 0x015a: 0x0097, # LATIN CAPITAL LETTER S WITH ACUTE
- 0x015b: 0x0098, # LATIN SMALL LETTER S WITH ACUTE
- 0x015e: 0x00b8, # LATIN CAPITAL LETTER S WITH CEDILLA
- 0x015f: 0x00ad, # LATIN SMALL LETTER S WITH CEDILLA
- 0x0160: 0x00e6, # LATIN CAPITAL LETTER S WITH CARON
- 0x0161: 0x00e7, # LATIN SMALL LETTER S WITH CARON
- 0x0162: 0x00dd, # LATIN CAPITAL LETTER T WITH CEDILLA
- 0x0163: 0x00ee, # LATIN SMALL LETTER T WITH CEDILLA
- 0x0164: 0x009b, # LATIN CAPITAL LETTER T WITH CARON
- 0x0165: 0x009c, # LATIN SMALL LETTER T WITH CARON
- 0x016e: 0x00de, # LATIN CAPITAL LETTER U WITH RING ABOVE
- 0x016f: 0x0085, # LATIN SMALL LETTER U WITH RING ABOVE
- 0x0170: 0x00eb, # LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
- 0x0171: 0x00fb, # LATIN SMALL LETTER U WITH DOUBLE ACUTE
- 0x0179: 0x008d, # LATIN CAPITAL LETTER Z WITH ACUTE
- 0x017a: 0x00ab, # LATIN SMALL LETTER Z WITH ACUTE
- 0x017b: 0x00bd, # LATIN CAPITAL LETTER Z WITH DOT ABOVE
- 0x017c: 0x00be, # LATIN SMALL LETTER Z WITH DOT ABOVE
- 0x017d: 0x00a6, # LATIN CAPITAL LETTER Z WITH CARON
- 0x017e: 0x00a7, # LATIN SMALL LETTER Z WITH CARON
- 0x02c7: 0x00f3, # CARON
- 0x02d8: 0x00f4, # BREVE
- 0x02d9: 0x00fa, # DOT ABOVE
- 0x02db: 0x00f2, # OGONEK
- 0x02dd: 0x00f1, # DOUBLE ACUTE ACCENT
- 0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
- 0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
- 0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
- 0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
- 0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
- 0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
- 0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
- 0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
- 0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
- 0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
- 0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
- 0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
- 0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
- 0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
- 0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
- 0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
- 0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
- 0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
- 0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
- 0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
- 0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
- 0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
- 0x2580: 0x00df, # UPPER HALF BLOCK
- 0x2584: 0x00dc, # LOWER HALF BLOCK
- 0x2588: 0x00db, # FULL BLOCK
- 0x2591: 0x00b0, # LIGHT SHADE
- 0x2592: 0x00b1, # MEDIUM SHADE
- 0x2593: 0x00b2, # DARK SHADE
- 0x25a0: 0x00fe, # BLACK SQUARE
-}
diff --git a/sys/lib/python/encodings/cp855.py b/sys/lib/python/encodings/cp855.py
deleted file mode 100644
index 241ef9d1e..000000000
--- a/sys/lib/python/encodings/cp855.py
+++ /dev/null
@@ -1,698 +0,0 @@
-""" Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP855.TXT' with gencodec.py.
-
-"""#"
-
-import codecs
-
-### Codec APIs
-
-class Codec(codecs.Codec):
-
- def encode(self,input,errors='strict'):
- return codecs.charmap_encode(input,errors,encoding_map)
-
- def decode(self,input,errors='strict'):
- return codecs.charmap_decode(input,errors,decoding_table)
-
-class IncrementalEncoder(codecs.IncrementalEncoder):
- def encode(self, input, final=False):
- return codecs.charmap_encode(input,self.errors,encoding_map)[0]
-
-class IncrementalDecoder(codecs.IncrementalDecoder):
- def decode(self, input, final=False):
- return codecs.charmap_decode(input,self.errors,decoding_table)[0]
-
-class StreamWriter(Codec,codecs.StreamWriter):
- pass
-
-class StreamReader(Codec,codecs.StreamReader):
- pass
-
-### encodings module API
-
-def getregentry():
- return codecs.CodecInfo(
- name='cp855',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
-
-### Decoding Map
-
-decoding_map = codecs.make_identity_dict(range(256))
-decoding_map.update({
- 0x0080: 0x0452, # CYRILLIC SMALL LETTER DJE
- 0x0081: 0x0402, # CYRILLIC CAPITAL LETTER DJE
- 0x0082: 0x0453, # CYRILLIC SMALL LETTER GJE
- 0x0083: 0x0403, # CYRILLIC CAPITAL LETTER GJE
- 0x0084: 0x0451, # CYRILLIC SMALL LETTER IO
- 0x0085: 0x0401, # CYRILLIC CAPITAL LETTER IO
- 0x0086: 0x0454, # CYRILLIC SMALL LETTER UKRAINIAN IE
- 0x0087: 0x0404, # CYRILLIC CAPITAL LETTER UKRAINIAN IE
- 0x0088: 0x0455, # CYRILLIC SMALL LETTER DZE
- 0x0089: 0x0405, # CYRILLIC CAPITAL LETTER DZE
- 0x008a: 0x0456, # CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
- 0x008b: 0x0406, # CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
- 0x008c: 0x0457, # CYRILLIC SMALL LETTER YI
- 0x008d: 0x0407, # CYRILLIC CAPITAL LETTER YI
- 0x008e: 0x0458, # CYRILLIC SMALL LETTER JE
- 0x008f: 0x0408, # CYRILLIC CAPITAL LETTER JE
- 0x0090: 0x0459, # CYRILLIC SMALL LETTER LJE
- 0x0091: 0x0409, # CYRILLIC CAPITAL LETTER LJE
- 0x0092: 0x045a, # CYRILLIC SMALL LETTER NJE
- 0x0093: 0x040a, # CYRILLIC CAPITAL LETTER NJE
- 0x0094: 0x045b, # CYRILLIC SMALL LETTER TSHE
- 0x0095: 0x040b, # CYRILLIC CAPITAL LETTER TSHE
- 0x0096: 0x045c, # CYRILLIC SMALL LETTER KJE
- 0x0097: 0x040c, # CYRILLIC CAPITAL LETTER KJE
- 0x0098: 0x045e, # CYRILLIC SMALL LETTER SHORT U
- 0x0099: 0x040e, # CYRILLIC CAPITAL LETTER SHORT U
- 0x009a: 0x045f, # CYRILLIC SMALL LETTER DZHE
- 0x009b: 0x040f, # CYRILLIC CAPITAL LETTER DZHE
- 0x009c: 0x044e, # CYRILLIC SMALL LETTER YU
- 0x009d: 0x042e, # CYRILLIC CAPITAL LETTER YU
- 0x009e: 0x044a, # CYRILLIC SMALL LETTER HARD SIGN
- 0x009f: 0x042a, # CYRILLIC CAPITAL LETTER HARD SIGN
- 0x00a0: 0x0430, # CYRILLIC SMALL LETTER A
- 0x00a1: 0x0410, # CYRILLIC CAPITAL LETTER A
- 0x00a2: 0x0431, # CYRILLIC SMALL LETTER BE
- 0x00a3: 0x0411, # CYRILLIC CAPITAL LETTER BE
- 0x00a4: 0x0446, # CYRILLIC SMALL LETTER TSE
- 0x00a5: 0x0426, # CYRILLIC CAPITAL LETTER TSE
- 0x00a6: 0x0434, # CYRILLIC SMALL LETTER DE
- 0x00a7: 0x0414, # CYRILLIC CAPITAL LETTER DE
- 0x00a8: 0x0435, # CYRILLIC SMALL LETTER IE
- 0x00a9: 0x0415, # CYRILLIC CAPITAL LETTER IE
- 0x00aa: 0x0444, # CYRILLIC SMALL LETTER EF
- 0x00ab: 0x0424, # CYRILLIC CAPITAL LETTER EF
- 0x00ac: 0x0433, # CYRILLIC SMALL LETTER GHE
- 0x00ad: 0x0413, # CYRILLIC CAPITAL LETTER GHE
- 0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
- 0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
- 0x00b0: 0x2591, # LIGHT SHADE
- 0x00b1: 0x2592, # MEDIUM SHADE
- 0x00b2: 0x2593, # DARK SHADE
- 0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
- 0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
- 0x00b5: 0x0445, # CYRILLIC SMALL LETTER HA
- 0x00b6: 0x0425, # CYRILLIC CAPITAL LETTER HA
- 0x00b7: 0x0438, # CYRILLIC SMALL LETTER I
- 0x00b8: 0x0418, # CYRILLIC CAPITAL LETTER I
- 0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
- 0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
- 0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
- 0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
- 0x00bd: 0x0439, # CYRILLIC SMALL LETTER SHORT I
- 0x00be: 0x0419, # CYRILLIC CAPITAL LETTER SHORT I
- 0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
- 0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
- 0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
- 0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
- 0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
- 0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
- 0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
- 0x00c6: 0x043a, # CYRILLIC SMALL LETTER KA
- 0x00c7: 0x041a, # CYRILLIC CAPITAL LETTER KA
- 0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
- 0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
- 0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
- 0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
- 0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
- 0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
- 0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
- 0x00cf: 0x00a4, # CURRENCY SIGN
- 0x00d0: 0x043b, # CYRILLIC SMALL LETTER EL
- 0x00d1: 0x041b, # CYRILLIC CAPITAL LETTER EL
- 0x00d2: 0x043c, # CYRILLIC SMALL LETTER EM
- 0x00d3: 0x041c, # CYRILLIC CAPITAL LETTER EM
- 0x00d4: 0x043d, # CYRILLIC SMALL LETTER EN
- 0x00d5: 0x041d, # CYRILLIC CAPITAL LETTER EN
- 0x00d6: 0x043e, # CYRILLIC SMALL LETTER O
- 0x00d7: 0x041e, # CYRILLIC CAPITAL LETTER O
- 0x00d8: 0x043f, # CYRILLIC SMALL LETTER PE
- 0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
- 0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
- 0x00db: 0x2588, # FULL BLOCK
- 0x00dc: 0x2584, # LOWER HALF BLOCK
- 0x00dd: 0x041f, # CYRILLIC CAPITAL LETTER PE
- 0x00de: 0x044f, # CYRILLIC SMALL LETTER YA
- 0x00df: 0x2580, # UPPER HALF BLOCK
- 0x00e0: 0x042f, # CYRILLIC CAPITAL LETTER YA
- 0x00e1: 0x0440, # CYRILLIC SMALL LETTER ER
- 0x00e2: 0x0420, # CYRILLIC CAPITAL LETTER ER
- 0x00e3: 0x0441, # CYRILLIC SMALL LETTER ES
- 0x00e4: 0x0421, # CYRILLIC CAPITAL LETTER ES
- 0x00e5: 0x0442, # CYRILLIC SMALL LETTER TE
- 0x00e6: 0x0422, # CYRILLIC CAPITAL LETTER TE
- 0x00e7: 0x0443, # CYRILLIC SMALL LETTER U
- 0x00e8: 0x0423, # CYRILLIC CAPITAL LETTER U
- 0x00e9: 0x0436, # CYRILLIC SMALL LETTER ZHE
- 0x00ea: 0x0416, # CYRILLIC CAPITAL LETTER ZHE
- 0x00eb: 0x0432, # CYRILLIC SMALL LETTER VE
- 0x00ec: 0x0412, # CYRILLIC CAPITAL LETTER VE
- 0x00ed: 0x044c, # CYRILLIC SMALL LETTER SOFT SIGN
- 0x00ee: 0x042c, # CYRILLIC CAPITAL LETTER SOFT SIGN
- 0x00ef: 0x2116, # NUMERO SIGN
- 0x00f0: 0x00ad, # SOFT HYPHEN
- 0x00f1: 0x044b, # CYRILLIC SMALL LETTER YERU
- 0x00f2: 0x042b, # CYRILLIC CAPITAL LETTER YERU
- 0x00f3: 0x0437, # CYRILLIC SMALL LETTER ZE
- 0x00f4: 0x0417, # CYRILLIC CAPITAL LETTER ZE
- 0x00f5: 0x0448, # CYRILLIC SMALL LETTER SHA
- 0x00f6: 0x0428, # CYRILLIC CAPITAL LETTER SHA
- 0x00f7: 0x044d, # CYRILLIC SMALL LETTER E
- 0x00f8: 0x042d, # CYRILLIC CAPITAL LETTER E
- 0x00f9: 0x0449, # CYRILLIC SMALL LETTER SHCHA
- 0x00fa: 0x0429, # CYRILLIC CAPITAL LETTER SHCHA
- 0x00fb: 0x0447, # CYRILLIC SMALL LETTER CHE
- 0x00fc: 0x0427, # CYRILLIC CAPITAL LETTER CHE
- 0x00fd: 0x00a7, # SECTION SIGN
- 0x00fe: 0x25a0, # BLACK SQUARE
- 0x00ff: 0x00a0, # NO-BREAK SPACE
-})
-
-### Decoding Table
-
-decoding_table = (
- u'\x00' # 0x0000 -> NULL
- u'\x01' # 0x0001 -> START OF HEADING
- u'\x02' # 0x0002 -> START OF TEXT
- u'\x03' # 0x0003 -> END OF TEXT
- u'\x04' # 0x0004 -> END OF TRANSMISSION
- u'\x05' # 0x0005 -> ENQUIRY
- u'\x06' # 0x0006 -> ACKNOWLEDGE
- u'\x07' # 0x0007 -> BELL
- u'\x08' # 0x0008 -> BACKSPACE
- u'\t' # 0x0009 -> HORIZONTAL TABULATION
- u'\n' # 0x000a -> LINE FEED
- u'\x0b' # 0x000b -> VERTICAL TABULATION
- u'\x0c' # 0x000c -> FORM FEED
- u'\r' # 0x000d -> CARRIAGE RETURN
- u'\x0e' # 0x000e -> SHIFT OUT
- u'\x0f' # 0x000f -> SHIFT IN
- u'\x10' # 0x0010 -> DATA LINK ESCAPE
- u'\x11' # 0x0011 -> DEVICE CONTROL ONE
- u'\x12' # 0x0012 -> DEVICE CONTROL TWO
- u'\x13' # 0x0013 -> DEVICE CONTROL THREE
- u'\x14' # 0x0014 -> DEVICE CONTROL FOUR
- u'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
- u'\x16' # 0x0016 -> SYNCHRONOUS IDLE
- u'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
- u'\x18' # 0x0018 -> CANCEL
- u'\x19' # 0x0019 -> END OF MEDIUM
- u'\x1a' # 0x001a -> SUBSTITUTE
- u'\x1b' # 0x001b -> ESCAPE
- u'\x1c' # 0x001c -> FILE SEPARATOR
- u'\x1d' # 0x001d -> GROUP SEPARATOR
- u'\x1e' # 0x001e -> RECORD SEPARATOR
- u'\x1f' # 0x001f -> UNIT SEPARATOR
- u' ' # 0x0020 -> SPACE
- u'!' # 0x0021 -> EXCLAMATION MARK
- u'"' # 0x0022 -> QUOTATION MARK
- u'#' # 0x0023 -> NUMBER SIGN
- u'$' # 0x0024 -> DOLLAR SIGN
- u'%' # 0x0025 -> PERCENT SIGN
- u'&' # 0x0026 -> AMPERSAND
- u"'" # 0x0027 -> APOSTROPHE
- u'(' # 0x0028 -> LEFT PARENTHESIS
- u')' # 0x0029 -> RIGHT PARENTHESIS
- u'*' # 0x002a -> ASTERISK
- u'+' # 0x002b -> PLUS SIGN
- u',' # 0x002c -> COMMA
- u'-' # 0x002d -> HYPHEN-MINUS
- u'.' # 0x002e -> FULL STOP
- u'/' # 0x002f -> SOLIDUS
- u'0' # 0x0030 -> DIGIT ZERO
- u'1' # 0x0031 -> DIGIT ONE
- u'2' # 0x0032 -> DIGIT TWO
- u'3' # 0x0033 -> DIGIT THREE
- u'4' # 0x0034 -> DIGIT FOUR
- u'5' # 0x0035 -> DIGIT FIVE
- u'6' # 0x0036 -> DIGIT SIX
- u'7' # 0x0037 -> DIGIT SEVEN
- u'8' # 0x0038 -> DIGIT EIGHT
- u'9' # 0x0039 -> DIGIT NINE
- u':' # 0x003a -> COLON
- u';' # 0x003b -> SEMICOLON
- u'<' # 0x003c -> LESS-THAN SIGN
- u'=' # 0x003d -> EQUALS SIGN
- u'>' # 0x003e -> GREATER-THAN SIGN
- u'?' # 0x003f -> QUESTION MARK
- u'@' # 0x0040 -> COMMERCIAL AT
- u'A' # 0x0041 -> LATIN CAPITAL LETTER A
- u'B' # 0x0042 -> LATIN CAPITAL LETTER B
- u'C' # 0x0043 -> LATIN CAPITAL LETTER C
- u'D' # 0x0044 -> LATIN CAPITAL LETTER D
- u'E' # 0x0045 -> LATIN CAPITAL LETTER E
- u'F' # 0x0046 -> LATIN CAPITAL LETTER F
- u'G' # 0x0047 -> LATIN CAPITAL LETTER G
- u'H' # 0x0048 -> LATIN CAPITAL LETTER H
- u'I' # 0x0049 -> LATIN CAPITAL LETTER I
- u'J' # 0x004a -> LATIN CAPITAL LETTER J
- u'K' # 0x004b -> LATIN CAPITAL LETTER K
- u'L' # 0x004c -> LATIN CAPITAL LETTER L
- u'M' # 0x004d -> LATIN CAPITAL LETTER M
- u'N' # 0x004e -> LATIN CAPITAL LETTER N
- u'O' # 0x004f -> LATIN CAPITAL LETTER O
- u'P' # 0x0050 -> LATIN CAPITAL LETTER P
- u'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
- u'R' # 0x0052 -> LATIN CAPITAL LETTER R
- u'S' # 0x0053 -> LATIN CAPITAL LETTER S
- u'T' # 0x0054 -> LATIN CAPITAL LETTER T
- u'U' # 0x0055 -> LATIN CAPITAL LETTER U
- u'V' # 0x0056 -> LATIN CAPITAL LETTER V
- u'W' # 0x0057 -> LATIN CAPITAL LETTER W
- u'X' # 0x0058 -> LATIN CAPITAL LETTER X
- u'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
- u'Z' # 0x005a -> LATIN CAPITAL LETTER Z
- u'[' # 0x005b -> LEFT SQUARE BRACKET
- u'\\' # 0x005c -> REVERSE SOLIDUS
- u']' # 0x005d -> RIGHT SQUARE BRACKET
- u'^' # 0x005e -> CIRCUMFLEX ACCENT
- u'_' # 0x005f -> LOW LINE
- u'`' # 0x0060 -> GRAVE ACCENT
- u'a' # 0x0061 -> LATIN SMALL LETTER A
- u'b' # 0x0062 -> LATIN SMALL LETTER B
- u'c' # 0x0063 -> LATIN SMALL LETTER C
- u'd' # 0x0064 -> LATIN SMALL LETTER D
- u'e' # 0x0065 -> LATIN SMALL LETTER E
- u'f' # 0x0066 -> LATIN SMALL LETTER F
- u'g' # 0x0067 -> LATIN SMALL LETTER G
- u'h' # 0x0068 -> LATIN SMALL LETTER H
- u'i' # 0x0069 -> LATIN SMALL LETTER I
- u'j' # 0x006a -> LATIN SMALL LETTER J
- u'k' # 0x006b -> LATIN SMALL LETTER K
- u'l' # 0x006c -> LATIN SMALL LETTER L
- u'm' # 0x006d -> LATIN SMALL LETTER M
- u'n' # 0x006e -> LATIN SMALL LETTER N
- u'o' # 0x006f -> LATIN SMALL LETTER O
- u'p' # 0x0070 -> LATIN SMALL LETTER P
- u'q' # 0x0071 -> LATIN SMALL LETTER Q
- u'r' # 0x0072 -> LATIN SMALL LETTER R
- u's' # 0x0073 -> LATIN SMALL LETTER S
- u't' # 0x0074 -> LATIN SMALL LETTER T
- u'u' # 0x0075 -> LATIN SMALL LETTER U
- u'v' # 0x0076 -> LATIN SMALL LETTER V
- u'w' # 0x0077 -> LATIN SMALL LETTER W
- u'x' # 0x0078 -> LATIN SMALL LETTER X
- u'y' # 0x0079 -> LATIN SMALL LETTER Y
- u'z' # 0x007a -> LATIN SMALL LETTER Z
- u'{' # 0x007b -> LEFT CURLY BRACKET
- u'|' # 0x007c -> VERTICAL LINE
- u'}' # 0x007d -> RIGHT CURLY BRACKET
- u'~' # 0x007e -> TILDE
- u'\x7f' # 0x007f -> DELETE
- u'\u0452' # 0x0080 -> CYRILLIC SMALL LETTER DJE
- u'\u0402' # 0x0081 -> CYRILLIC CAPITAL LETTER DJE
- u'\u0453' # 0x0082 -> CYRILLIC SMALL LETTER GJE
- u'\u0403' # 0x0083 -> CYRILLIC CAPITAL LETTER GJE
- u'\u0451' # 0x0084 -> CYRILLIC SMALL LETTER IO
- u'\u0401' # 0x0085 -> CYRILLIC CAPITAL LETTER IO
- u'\u0454' # 0x0086 -> CYRILLIC SMALL LETTER UKRAINIAN IE
- u'\u0404' # 0x0087 -> CYRILLIC CAPITAL LETTER UKRAINIAN IE
- u'\u0455' # 0x0088 -> CYRILLIC SMALL LETTER DZE
- u'\u0405' # 0x0089 -> CYRILLIC CAPITAL LETTER DZE
- u'\u0456' # 0x008a -> CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
- u'\u0406' # 0x008b -> CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
- u'\u0457' # 0x008c -> CYRILLIC SMALL LETTER YI
- u'\u0407' # 0x008d -> CYRILLIC CAPITAL LETTER YI
- u'\u0458' # 0x008e -> CYRILLIC SMALL LETTER JE
- u'\u0408' # 0x008f -> CYRILLIC CAPITAL LETTER JE
- u'\u0459' # 0x0090 -> CYRILLIC SMALL LETTER LJE
- u'\u0409' # 0x0091 -> CYRILLIC CAPITAL LETTER LJE
- u'\u045a' # 0x0092 -> CYRILLIC SMALL LETTER NJE
- u'\u040a' # 0x0093 -> CYRILLIC CAPITAL LETTER NJE
- u'\u045b' # 0x0094 -> CYRILLIC SMALL LETTER TSHE
- u'\u040b' # 0x0095 -> CYRILLIC CAPITAL LETTER TSHE
- u'\u045c' # 0x0096 -> CYRILLIC SMALL LETTER KJE
- u'\u040c' # 0x0097 -> CYRILLIC CAPITAL LETTER KJE
- u'\u045e' # 0x0098 -> CYRILLIC SMALL LETTER SHORT U
- u'\u040e' # 0x0099 -> CYRILLIC CAPITAL LETTER SHORT U
- u'\u045f' # 0x009a -> CYRILLIC SMALL LETTER DZHE
- u'\u040f' # 0x009b -> CYRILLIC CAPITAL LETTER DZHE
- u'\u044e' # 0x009c -> CYRILLIC SMALL LETTER YU
- u'\u042e' # 0x009d -> CYRILLIC CAPITAL LETTER YU
- u'\u044a' # 0x009e -> CYRILLIC SMALL LETTER HARD SIGN
- u'\u042a' # 0x009f -> CYRILLIC CAPITAL LETTER HARD SIGN
- u'\u0430' # 0x00a0 -> CYRILLIC SMALL LETTER A
- u'\u0410' # 0x00a1 -> CYRILLIC CAPITAL LETTER A
- u'\u0431' # 0x00a2 -> CYRILLIC SMALL LETTER BE
- u'\u0411' # 0x00a3 -> CYRILLIC CAPITAL LETTER BE
- u'\u0446' # 0x00a4 -> CYRILLIC SMALL LETTER TSE
- u'\u0426' # 0x00a5 -> CYRILLIC CAPITAL LETTER TSE
- u'\u0434' # 0x00a6 -> CYRILLIC SMALL LETTER DE
- u'\u0414' # 0x00a7 -> CYRILLIC CAPITAL LETTER DE
- u'\u0435' # 0x00a8 -> CYRILLIC SMALL LETTER IE
- u'\u0415' # 0x00a9 -> CYRILLIC CAPITAL LETTER IE
- u'\u0444' # 0x00aa -> CYRILLIC SMALL LETTER EF
- u'\u0424' # 0x00ab -> CYRILLIC CAPITAL LETTER EF
- u'\u0433' # 0x00ac -> CYRILLIC SMALL LETTER GHE
- u'\u0413' # 0x00ad -> CYRILLIC CAPITAL LETTER GHE
- u'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
- u'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
- u'\u2591' # 0x00b0 -> LIGHT SHADE
- u'\u2592' # 0x00b1 -> MEDIUM SHADE
- u'\u2593' # 0x00b2 -> DARK SHADE
- u'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
- u'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
- u'\u0445' # 0x00b5 -> CYRILLIC SMALL LETTER HA
- u'\u0425' # 0x00b6 -> CYRILLIC CAPITAL LETTER HA
- u'\u0438' # 0x00b7 -> CYRILLIC SMALL LETTER I
- u'\u0418' # 0x00b8 -> CYRILLIC CAPITAL LETTER I
- u'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
- u'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
- u'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
- u'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
- u'\u0439' # 0x00bd -> CYRILLIC SMALL LETTER SHORT I
- u'\u0419' # 0x00be -> CYRILLIC CAPITAL LETTER SHORT I
- u'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
- u'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
- u'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
- u'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
- u'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
- u'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
- u'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
- u'\u043a' # 0x00c6 -> CYRILLIC SMALL LETTER KA
- u'\u041a' # 0x00c7 -> CYRILLIC CAPITAL LETTER KA
- u'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
- u'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
- u'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
- u'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
- u'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
- u'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
- u'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
- u'\xa4' # 0x00cf -> CURRENCY SIGN
- u'\u043b' # 0x00d0 -> CYRILLIC SMALL LETTER EL
- u'\u041b' # 0x00d1 -> CYRILLIC CAPITAL LETTER EL
- u'\u043c' # 0x00d2 -> CYRILLIC SMALL LETTER EM
- u'\u041c' # 0x00d3 -> CYRILLIC CAPITAL LETTER EM
- u'\u043d' # 0x00d4 -> CYRILLIC SMALL LETTER EN
- u'\u041d' # 0x00d5 -> CYRILLIC CAPITAL LETTER EN
- u'\u043e' # 0x00d6 -> CYRILLIC SMALL LETTER O
- u'\u041e' # 0x00d7 -> CYRILLIC CAPITAL LETTER O
- u'\u043f' # 0x00d8 -> CYRILLIC SMALL LETTER PE
- u'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
- u'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
- u'\u2588' # 0x00db -> FULL BLOCK
- u'\u2584' # 0x00dc -> LOWER HALF BLOCK
- u'\u041f' # 0x00dd -> CYRILLIC CAPITAL LETTER PE
- u'\u044f' # 0x00de -> CYRILLIC SMALL LETTER YA
- u'\u2580' # 0x00df -> UPPER HALF BLOCK
- u'\u042f' # 0x00e0 -> CYRILLIC CAPITAL LETTER YA
- u'\u0440' # 0x00e1 -> CYRILLIC SMALL LETTER ER
- u'\u0420' # 0x00e2 -> CYRILLIC CAPITAL LETTER ER
- u'\u0441' # 0x00e3 -> CYRILLIC SMALL LETTER ES
- u'\u0421' # 0x00e4 -> CYRILLIC CAPITAL LETTER ES
- u'\u0442' # 0x00e5 -> CYRILLIC SMALL LETTER TE
- u'\u0422' # 0x00e6 -> CYRILLIC CAPITAL LETTER TE
- u'\u0443' # 0x00e7 -> CYRILLIC SMALL LETTER U
- u'\u0423' # 0x00e8 -> CYRILLIC CAPITAL LETTER U
- u'\u0436' # 0x00e9 -> CYRILLIC SMALL LETTER ZHE
- u'\u0416' # 0x00ea -> CYRILLIC CAPITAL LETTER ZHE
- u'\u0432' # 0x00eb -> CYRILLIC SMALL LETTER VE
- u'\u0412' # 0x00ec -> CYRILLIC CAPITAL LETTER VE
- u'\u044c' # 0x00ed -> CYRILLIC SMALL LETTER SOFT SIGN
- u'\u042c' # 0x00ee -> CYRILLIC CAPITAL LETTER SOFT SIGN
- u'\u2116' # 0x00ef -> NUMERO SIGN
- u'\xad' # 0x00f0 -> SOFT HYPHEN
- u'\u044b' # 0x00f1 -> CYRILLIC SMALL LETTER YERU
- u'\u042b' # 0x00f2 -> CYRILLIC CAPITAL LETTER YERU
- u'\u0437' # 0x00f3 -> CYRILLIC SMALL LETTER ZE
- u'\u0417' # 0x00f4 -> CYRILLIC CAPITAL LETTER ZE
- u'\u0448' # 0x00f5 -> CYRILLIC SMALL LETTER SHA
- u'\u0428' # 0x00f6 -> CYRILLIC CAPITAL LETTER SHA
- u'\u044d' # 0x00f7 -> CYRILLIC SMALL LETTER E
- u'\u042d' # 0x00f8 -> CYRILLIC CAPITAL LETTER E
- u'\u0449' # 0x00f9 -> CYRILLIC SMALL LETTER SHCHA
- u'\u0429' # 0x00fa -> CYRILLIC CAPITAL LETTER SHCHA
- u'\u0447' # 0x00fb -> CYRILLIC SMALL LETTER CHE
- u'\u0427' # 0x00fc -> CYRILLIC CAPITAL LETTER CHE
- u'\xa7' # 0x00fd -> SECTION SIGN
- u'\u25a0' # 0x00fe -> BLACK SQUARE
- u'\xa0' # 0x00ff -> NO-BREAK SPACE
-)
-
-### Encoding Map
-
-encoding_map = {
- 0x0000: 0x0000, # NULL
- 0x0001: 0x0001, # START OF HEADING
- 0x0002: 0x0002, # START OF TEXT
- 0x0003: 0x0003, # END OF TEXT
- 0x0004: 0x0004, # END OF TRANSMISSION
- 0x0005: 0x0005, # ENQUIRY
- 0x0006: 0x0006, # ACKNOWLEDGE
- 0x0007: 0x0007, # BELL
- 0x0008: 0x0008, # BACKSPACE
- 0x0009: 0x0009, # HORIZONTAL TABULATION
- 0x000a: 0x000a, # LINE FEED
- 0x000b: 0x000b, # VERTICAL TABULATION
- 0x000c: 0x000c, # FORM FEED
- 0x000d: 0x000d, # CARRIAGE RETURN
- 0x000e: 0x000e, # SHIFT OUT
- 0x000f: 0x000f, # SHIFT IN
- 0x0010: 0x0010, # DATA LINK ESCAPE
- 0x0011: 0x0011, # DEVICE CONTROL ONE
- 0x0012: 0x0012, # DEVICE CONTROL TWO
- 0x0013: 0x0013, # DEVICE CONTROL THREE
- 0x0014: 0x0014, # DEVICE CONTROL FOUR
- 0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
- 0x0016: 0x0016, # SYNCHRONOUS IDLE
- 0x0017: 0x0017, # END OF TRANSMISSION BLOCK
- 0x0018: 0x0018, # CANCEL
- 0x0019: 0x0019, # END OF MEDIUM
- 0x001a: 0x001a, # SUBSTITUTE
- 0x001b: 0x001b, # ESCAPE
- 0x001c: 0x001c, # FILE SEPARATOR
- 0x001d: 0x001d, # GROUP SEPARATOR
- 0x001e: 0x001e, # RECORD SEPARATOR
- 0x001f: 0x001f, # UNIT SEPARATOR
- 0x0020: 0x0020, # SPACE
- 0x0021: 0x0021, # EXCLAMATION MARK
- 0x0022: 0x0022, # QUOTATION MARK
- 0x0023: 0x0023, # NUMBER SIGN
- 0x0024: 0x0024, # DOLLAR SIGN
- 0x0025: 0x0025, # PERCENT SIGN
- 0x0026: 0x0026, # AMPERSAND
- 0x0027: 0x0027, # APOSTROPHE
- 0x0028: 0x0028, # LEFT PARENTHESIS
- 0x0029: 0x0029, # RIGHT PARENTHESIS
- 0x002a: 0x002a, # ASTERISK
- 0x002b: 0x002b, # PLUS SIGN
- 0x002c: 0x002c, # COMMA
- 0x002d: 0x002d, # HYPHEN-MINUS
- 0x002e: 0x002e, # FULL STOP
- 0x002f: 0x002f, # SOLIDUS
- 0x0030: 0x0030, # DIGIT ZERO
- 0x0031: 0x0031, # DIGIT ONE
- 0x0032: 0x0032, # DIGIT TWO
- 0x0033: 0x0033, # DIGIT THREE
- 0x0034: 0x0034, # DIGIT FOUR
- 0x0035: 0x0035, # DIGIT FIVE
- 0x0036: 0x0036, # DIGIT SIX
- 0x0037: 0x0037, # DIGIT SEVEN
- 0x0038: 0x0038, # DIGIT EIGHT
- 0x0039: 0x0039, # DIGIT NINE
- 0x003a: 0x003a, # COLON
- 0x003b: 0x003b, # SEMICOLON
- 0x003c: 0x003c, # LESS-THAN SIGN
- 0x003d: 0x003d, # EQUALS SIGN
- 0x003e: 0x003e, # GREATER-THAN SIGN
- 0x003f: 0x003f, # QUESTION MARK
- 0x0040: 0x0040, # COMMERCIAL AT
- 0x0041: 0x0041, # LATIN CAPITAL LETTER A
- 0x0042: 0x0042, # LATIN CAPITAL LETTER B
- 0x0043: 0x0043, # LATIN CAPITAL LETTER C
- 0x0044: 0x0044, # LATIN CAPITAL LETTER D
- 0x0045: 0x0045, # LATIN CAPITAL LETTER E
- 0x0046: 0x0046, # LATIN CAPITAL LETTER F
- 0x0047: 0x0047, # LATIN CAPITAL LETTER G
- 0x0048: 0x0048, # LATIN CAPITAL LETTER H
- 0x0049: 0x0049, # LATIN CAPITAL LETTER I
- 0x004a: 0x004a, # LATIN CAPITAL LETTER J
- 0x004b: 0x004b, # LATIN CAPITAL LETTER K
- 0x004c: 0x004c, # LATIN CAPITAL LETTER L
- 0x004d: 0x004d, # LATIN CAPITAL LETTER M
- 0x004e: 0x004e, # LATIN CAPITAL LETTER N
- 0x004f: 0x004f, # LATIN CAPITAL LETTER O
- 0x0050: 0x0050, # LATIN CAPITAL LETTER P
- 0x0051: 0x0051, # LATIN CAPITAL LETTER Q
- 0x0052: 0x0052, # LATIN CAPITAL LETTER R
- 0x0053: 0x0053, # LATIN CAPITAL LETTER S
- 0x0054: 0x0054, # LATIN CAPITAL LETTER T
- 0x0055: 0x0055, # LATIN CAPITAL LETTER U
- 0x0056: 0x0056, # LATIN CAPITAL LETTER V
- 0x0057: 0x0057, # LATIN CAPITAL LETTER W
- 0x0058: 0x0058, # LATIN CAPITAL LETTER X
- 0x0059: 0x0059, # LATIN CAPITAL LETTER Y
- 0x005a: 0x005a, # LATIN CAPITAL LETTER Z
- 0x005b: 0x005b, # LEFT SQUARE BRACKET
- 0x005c: 0x005c, # REVERSE SOLIDUS
- 0x005d: 0x005d, # RIGHT SQUARE BRACKET
- 0x005e: 0x005e, # CIRCUMFLEX ACCENT
- 0x005f: 0x005f, # LOW LINE
- 0x0060: 0x0060, # GRAVE ACCENT
- 0x0061: 0x0061, # LATIN SMALL LETTER A
- 0x0062: 0x0062, # LATIN SMALL LETTER B
- 0x0063: 0x0063, # LATIN SMALL LETTER C
- 0x0064: 0x0064, # LATIN SMALL LETTER D
- 0x0065: 0x0065, # LATIN SMALL LETTER E
- 0x0066: 0x0066, # LATIN SMALL LETTER F
- 0x0067: 0x0067, # LATIN SMALL LETTER G
- 0x0068: 0x0068, # LATIN SMALL LETTER H
- 0x0069: 0x0069, # LATIN SMALL LETTER I
- 0x006a: 0x006a, # LATIN SMALL LETTER J
- 0x006b: 0x006b, # LATIN SMALL LETTER K
- 0x006c: 0x006c, # LATIN SMALL LETTER L
- 0x006d: 0x006d, # LATIN SMALL LETTER M
- 0x006e: 0x006e, # LATIN SMALL LETTER N
- 0x006f: 0x006f, # LATIN SMALL LETTER O
- 0x0070: 0x0070, # LATIN SMALL LETTER P
- 0x0071: 0x0071, # LATIN SMALL LETTER Q
- 0x0072: 0x0072, # LATIN SMALL LETTER R
- 0x0073: 0x0073, # LATIN SMALL LETTER S
- 0x0074: 0x0074, # LATIN SMALL LETTER T
- 0x0075: 0x0075, # LATIN SMALL LETTER U
- 0x0076: 0x0076, # LATIN SMALL LETTER V
- 0x0077: 0x0077, # LATIN SMALL LETTER W
- 0x0078: 0x0078, # LATIN SMALL LETTER X
- 0x0079: 0x0079, # LATIN SMALL LETTER Y
- 0x007a: 0x007a, # LATIN SMALL LETTER Z
- 0x007b: 0x007b, # LEFT CURLY BRACKET
- 0x007c: 0x007c, # VERTICAL LINE
- 0x007d: 0x007d, # RIGHT CURLY BRACKET
- 0x007e: 0x007e, # TILDE
- 0x007f: 0x007f, # DELETE
- 0x00a0: 0x00ff, # NO-BREAK SPACE
- 0x00a4: 0x00cf, # CURRENCY SIGN
- 0x00a7: 0x00fd, # SECTION SIGN
- 0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
- 0x00ad: 0x00f0, # SOFT HYPHEN
- 0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
- 0x0401: 0x0085, # CYRILLIC CAPITAL LETTER IO
- 0x0402: 0x0081, # CYRILLIC CAPITAL LETTER DJE
- 0x0403: 0x0083, # CYRILLIC CAPITAL LETTER GJE
- 0x0404: 0x0087, # CYRILLIC CAPITAL LETTER UKRAINIAN IE
- 0x0405: 0x0089, # CYRILLIC CAPITAL LETTER DZE
- 0x0406: 0x008b, # CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
- 0x0407: 0x008d, # CYRILLIC CAPITAL LETTER YI
- 0x0408: 0x008f, # CYRILLIC CAPITAL LETTER JE
- 0x0409: 0x0091, # CYRILLIC CAPITAL LETTER LJE
- 0x040a: 0x0093, # CYRILLIC CAPITAL LETTER NJE
- 0x040b: 0x0095, # CYRILLIC CAPITAL LETTER TSHE
- 0x040c: 0x0097, # CYRILLIC CAPITAL LETTER KJE
- 0x040e: 0x0099, # CYRILLIC CAPITAL LETTER SHORT U
- 0x040f: 0x009b, # CYRILLIC CAPITAL LETTER DZHE
- 0x0410: 0x00a1, # CYRILLIC CAPITAL LETTER A
- 0x0411: 0x00a3, # CYRILLIC CAPITAL LETTER BE
- 0x0412: 0x00ec, # CYRILLIC CAPITAL LETTER VE
- 0x0413: 0x00ad, # CYRILLIC CAPITAL LETTER GHE
- 0x0414: 0x00a7, # CYRILLIC CAPITAL LETTER DE
- 0x0415: 0x00a9, # CYRILLIC CAPITAL LETTER IE
- 0x0416: 0x00ea, # CYRILLIC CAPITAL LETTER ZHE
- 0x0417: 0x00f4, # CYRILLIC CAPITAL LETTER ZE
- 0x0418: 0x00b8, # CYRILLIC CAPITAL LETTER I
- 0x0419: 0x00be, # CYRILLIC CAPITAL LETTER SHORT I
- 0x041a: 0x00c7, # CYRILLIC CAPITAL LETTER KA
- 0x041b: 0x00d1, # CYRILLIC CAPITAL LETTER EL
- 0x041c: 0x00d3, # CYRILLIC CAPITAL LETTER EM
- 0x041d: 0x00d5, # CYRILLIC CAPITAL LETTER EN
- 0x041e: 0x00d7, # CYRILLIC CAPITAL LETTER O
- 0x041f: 0x00dd, # CYRILLIC CAPITAL LETTER PE
- 0x0420: 0x00e2, # CYRILLIC CAPITAL LETTER ER
- 0x0421: 0x00e4, # CYRILLIC CAPITAL LETTER ES
- 0x0422: 0x00e6, # CYRILLIC CAPITAL LETTER TE
- 0x0423: 0x00e8, # CYRILLIC CAPITAL LETTER U
- 0x0424: 0x00ab, # CYRILLIC CAPITAL LETTER EF
- 0x0425: 0x00b6, # CYRILLIC CAPITAL LETTER HA
- 0x0426: 0x00a5, # CYRILLIC CAPITAL LETTER TSE
- 0x0427: 0x00fc, # CYRILLIC CAPITAL LETTER CHE
- 0x0428: 0x00f6, # CYRILLIC CAPITAL LETTER SHA
- 0x0429: 0x00fa, # CYRILLIC CAPITAL LETTER SHCHA
- 0x042a: 0x009f, # CYRILLIC CAPITAL LETTER HARD SIGN
- 0x042b: 0x00f2, # CYRILLIC CAPITAL LETTER YERU
- 0x042c: 0x00ee, # CYRILLIC CAPITAL LETTER SOFT SIGN
- 0x042d: 0x00f8, # CYRILLIC CAPITAL LETTER E
- 0x042e: 0x009d, # CYRILLIC CAPITAL LETTER YU
- 0x042f: 0x00e0, # CYRILLIC CAPITAL LETTER YA
- 0x0430: 0x00a0, # CYRILLIC SMALL LETTER A
- 0x0431: 0x00a2, # CYRILLIC SMALL LETTER BE
- 0x0432: 0x00eb, # CYRILLIC SMALL LETTER VE
- 0x0433: 0x00ac, # CYRILLIC SMALL LETTER GHE
- 0x0434: 0x00a6, # CYRILLIC SMALL LETTER DE
- 0x0435: 0x00a8, # CYRILLIC SMALL LETTER IE
- 0x0436: 0x00e9, # CYRILLIC SMALL LETTER ZHE
- 0x0437: 0x00f3, # CYRILLIC SMALL LETTER ZE
- 0x0438: 0x00b7, # CYRILLIC SMALL LETTER I
- 0x0439: 0x00bd, # CYRILLIC SMALL LETTER SHORT I
- 0x043a: 0x00c6, # CYRILLIC SMALL LETTER KA
- 0x043b: 0x00d0, # CYRILLIC SMALL LETTER EL
- 0x043c: 0x00d2, # CYRILLIC SMALL LETTER EM
- 0x043d: 0x00d4, # CYRILLIC SMALL LETTER EN
- 0x043e: 0x00d6, # CYRILLIC SMALL LETTER O
- 0x043f: 0x00d8, # CYRILLIC SMALL LETTER PE
- 0x0440: 0x00e1, # CYRILLIC SMALL LETTER ER
- 0x0441: 0x00e3, # CYRILLIC SMALL LETTER ES
- 0x0442: 0x00e5, # CYRILLIC SMALL LETTER TE
- 0x0443: 0x00e7, # CYRILLIC SMALL LETTER U
- 0x0444: 0x00aa, # CYRILLIC SMALL LETTER EF
- 0x0445: 0x00b5, # CYRILLIC SMALL LETTER HA
- 0x0446: 0x00a4, # CYRILLIC SMALL LETTER TSE
- 0x0447: 0x00fb, # CYRILLIC SMALL LETTER CHE
- 0x0448: 0x00f5, # CYRILLIC SMALL LETTER SHA
- 0x0449: 0x00f9, # CYRILLIC SMALL LETTER SHCHA
- 0x044a: 0x009e, # CYRILLIC SMALL LETTER HARD SIGN
- 0x044b: 0x00f1, # CYRILLIC SMALL LETTER YERU
- 0x044c: 0x00ed, # CYRILLIC SMALL LETTER SOFT SIGN
- 0x044d: 0x00f7, # CYRILLIC SMALL LETTER E
- 0x044e: 0x009c, # CYRILLIC SMALL LETTER YU
- 0x044f: 0x00de, # CYRILLIC SMALL LETTER YA
- 0x0451: 0x0084, # CYRILLIC SMALL LETTER IO
- 0x0452: 0x0080, # CYRILLIC SMALL LETTER DJE
- 0x0453: 0x0082, # CYRILLIC SMALL LETTER GJE
- 0x0454: 0x0086, # CYRILLIC SMALL LETTER UKRAINIAN IE
- 0x0455: 0x0088, # CYRILLIC SMALL LETTER DZE
- 0x0456: 0x008a, # CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
- 0x0457: 0x008c, # CYRILLIC SMALL LETTER YI
- 0x0458: 0x008e, # CYRILLIC SMALL LETTER JE
- 0x0459: 0x0090, # CYRILLIC SMALL LETTER LJE
- 0x045a: 0x0092, # CYRILLIC SMALL LETTER NJE
- 0x045b: 0x0094, # CYRILLIC SMALL LETTER TSHE
- 0x045c: 0x0096, # CYRILLIC SMALL LETTER KJE
- 0x045e: 0x0098, # CYRILLIC SMALL LETTER SHORT U
- 0x045f: 0x009a, # CYRILLIC SMALL LETTER DZHE
- 0x2116: 0x00ef, # NUMERO SIGN
- 0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
- 0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
- 0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
- 0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
- 0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
- 0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
- 0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
- 0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
- 0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
- 0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
- 0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
- 0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
- 0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
- 0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
- 0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
- 0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
- 0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
- 0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
- 0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
- 0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
- 0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
- 0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
- 0x2580: 0x00df, # UPPER HALF BLOCK
- 0x2584: 0x00dc, # LOWER HALF BLOCK
- 0x2588: 0x00db, # FULL BLOCK
- 0x2591: 0x00b0, # LIGHT SHADE
- 0x2592: 0x00b1, # MEDIUM SHADE
- 0x2593: 0x00b2, # DARK SHADE
- 0x25a0: 0x00fe, # BLACK SQUARE
-}
diff --git a/sys/lib/python/encodings/cp856.py b/sys/lib/python/encodings/cp856.py
deleted file mode 100644
index 203c2c4ca..000000000
--- a/sys/lib/python/encodings/cp856.py
+++ /dev/null
@@ -1,307 +0,0 @@
-""" Python Character Mapping Codec cp856 generated from 'MAPPINGS/VENDORS/MISC/CP856.TXT' with gencodec.py.
-
-"""#"
-
-import codecs
-
-### Codec APIs
-
-class Codec(codecs.Codec):
-
- def encode(self,input,errors='strict'):
- return codecs.charmap_encode(input,errors,encoding_table)
-
- def decode(self,input,errors='strict'):
- return codecs.charmap_decode(input,errors,decoding_table)
-
-class IncrementalEncoder(codecs.IncrementalEncoder):
- def encode(self, input, final=False):
- return codecs.charmap_encode(input,self.errors,encoding_table)[0]
-
-class IncrementalDecoder(codecs.IncrementalDecoder):
- def decode(self, input, final=False):
- return codecs.charmap_decode(input,self.errors,decoding_table)[0]
-
-class StreamWriter(Codec,codecs.StreamWriter):
- pass
-
-class StreamReader(Codec,codecs.StreamReader):
- pass
-
-### encodings module API
-
-def getregentry():
- return codecs.CodecInfo(
- name='cp856',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
-
-
-### Decoding Table
-
-decoding_table = (
- u'\x00' # 0x00 -> NULL
- u'\x01' # 0x01 -> START OF HEADING
- u'\x02' # 0x02 -> START OF TEXT
- u'\x03' # 0x03 -> END OF TEXT
- u'\x04' # 0x04 -> END OF TRANSMISSION
- u'\x05' # 0x05 -> ENQUIRY
- u'\x06' # 0x06 -> ACKNOWLEDGE
- u'\x07' # 0x07 -> BELL
- u'\x08' # 0x08 -> BACKSPACE
- u'\t' # 0x09 -> HORIZONTAL TABULATION
- u'\n' # 0x0A -> LINE FEED
- u'\x0b' # 0x0B -> VERTICAL TABULATION
- u'\x0c' # 0x0C -> FORM FEED
- u'\r' # 0x0D -> CARRIAGE RETURN
- u'\x0e' # 0x0E -> SHIFT OUT
- u'\x0f' # 0x0F -> SHIFT IN
- u'\x10' # 0x10 -> DATA LINK ESCAPE
- u'\x11' # 0x11 -> DEVICE CONTROL ONE
- u'\x12' # 0x12 -> DEVICE CONTROL TWO
- u'\x13' # 0x13 -> DEVICE CONTROL THREE
- u'\x14' # 0x14 -> DEVICE CONTROL FOUR
- u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
- u'\x16' # 0x16 -> SYNCHRONOUS IDLE
- u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
- u'\x18' # 0x18 -> CANCEL
- u'\x19' # 0x19 -> END OF MEDIUM
- u'\x1a' # 0x1A -> SUBSTITUTE
- u'\x1b' # 0x1B -> ESCAPE
- u'\x1c' # 0x1C -> FILE SEPARATOR
- u'\x1d' # 0x1D -> GROUP SEPARATOR
- u'\x1e' # 0x1E -> RECORD SEPARATOR
- u'\x1f' # 0x1F -> UNIT SEPARATOR
- u' ' # 0x20 -> SPACE
- u'!' # 0x21 -> EXCLAMATION MARK
- u'"' # 0x22 -> QUOTATION MARK
- u'#' # 0x23 -> NUMBER SIGN
- u'$' # 0x24 -> DOLLAR SIGN
- u'%' # 0x25 -> PERCENT SIGN
- u'&' # 0x26 -> AMPERSAND
- u"'" # 0x27 -> APOSTROPHE
- u'(' # 0x28 -> LEFT PARENTHESIS
- u')' # 0x29 -> RIGHT PARENTHESIS
- u'*' # 0x2A -> ASTERISK
- u'+' # 0x2B -> PLUS SIGN
- u',' # 0x2C -> COMMA
- u'-' # 0x2D -> HYPHEN-MINUS
- u'.' # 0x2E -> FULL STOP
- u'/' # 0x2F -> SOLIDUS
- u'0' # 0x30 -> DIGIT ZERO
- u'1' # 0x31 -> DIGIT ONE
- u'2' # 0x32 -> DIGIT TWO
- u'3' # 0x33 -> DIGIT THREE
- u'4' # 0x34 -> DIGIT FOUR
- u'5' # 0x35 -> DIGIT FIVE
- u'6' # 0x36 -> DIGIT SIX
- u'7' # 0x37 -> DIGIT SEVEN
- u'8' # 0x38 -> DIGIT EIGHT
- u'9' # 0x39 -> DIGIT NINE
- u':' # 0x3A -> COLON
- u';' # 0x3B -> SEMICOLON
- u'<' # 0x3C -> LESS-THAN SIGN
- u'=' # 0x3D -> EQUALS SIGN
- u'>' # 0x3E -> GREATER-THAN SIGN
- u'?' # 0x3F -> QUESTION MARK
- u'@' # 0x40 -> COMMERCIAL AT
- u'A' # 0x41 -> LATIN CAPITAL LETTER A
- u'B' # 0x42 -> LATIN CAPITAL LETTER B
- u'C' # 0x43 -> LATIN CAPITAL LETTER C
- u'D' # 0x44 -> LATIN CAPITAL LETTER D
- u'E' # 0x45 -> LATIN CAPITAL LETTER E
- u'F' # 0x46 -> LATIN CAPITAL LETTER F
- u'G' # 0x47 -> LATIN CAPITAL LETTER G
- u'H' # 0x48 -> LATIN CAPITAL LETTER H
- u'I' # 0x49 -> LATIN CAPITAL LETTER I
- u'J' # 0x4A -> LATIN CAPITAL LETTER J
- u'K' # 0x4B -> LATIN CAPITAL LETTER K
- u'L' # 0x4C -> LATIN CAPITAL LETTER L
- u'M' # 0x4D -> LATIN CAPITAL LETTER M
- u'N' # 0x4E -> LATIN CAPITAL LETTER N
- u'O' # 0x4F -> LATIN CAPITAL LETTER O
- u'P' # 0x50 -> LATIN CAPITAL LETTER P
- u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
- u'R' # 0x52 -> LATIN CAPITAL LETTER R
- u'S' # 0x53 -> LATIN CAPITAL LETTER S
- u'T' # 0x54 -> LATIN CAPITAL LETTER T
- u'U' # 0x55 -> LATIN CAPITAL LETTER U
- u'V' # 0x56 -> LATIN CAPITAL LETTER V
- u'W' # 0x57 -> LATIN CAPITAL LETTER W
- u'X' # 0x58 -> LATIN CAPITAL LETTER X
- u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
- u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
- u'[' # 0x5B -> LEFT SQUARE BRACKET
- u'\\' # 0x5C -> REVERSE SOLIDUS
- u']' # 0x5D -> RIGHT SQUARE BRACKET
- u'^' # 0x5E -> CIRCUMFLEX ACCENT
- u'_' # 0x5F -> LOW LINE
- u'`' # 0x60 -> GRAVE ACCENT
- u'a' # 0x61 -> LATIN SMALL LETTER A
- u'b' # 0x62 -> LATIN SMALL LETTER B
- u'c' # 0x63 -> LATIN SMALL LETTER C
- u'd' # 0x64 -> LATIN SMALL LETTER D
- u'e' # 0x65 -> LATIN SMALL LETTER E
- u'f' # 0x66 -> LATIN SMALL LETTER F
- u'g' # 0x67 -> LATIN SMALL LETTER G
- u'h' # 0x68 -> LATIN SMALL LETTER H
- u'i' # 0x69 -> LATIN SMALL LETTER I
- u'j' # 0x6A -> LATIN SMALL LETTER J
- u'k' # 0x6B -> LATIN SMALL LETTER K
- u'l' # 0x6C -> LATIN SMALL LETTER L
- u'm' # 0x6D -> LATIN SMALL LETTER M
- u'n' # 0x6E -> LATIN SMALL LETTER N
- u'o' # 0x6F -> LATIN SMALL LETTER O
- u'p' # 0x70 -> LATIN SMALL LETTER P
- u'q' # 0x71 -> LATIN SMALL LETTER Q
- u'r' # 0x72 -> LATIN SMALL LETTER R
- u's' # 0x73 -> LATIN SMALL LETTER S
- u't' # 0x74 -> LATIN SMALL LETTER T
- u'u' # 0x75 -> LATIN SMALL LETTER U
- u'v' # 0x76 -> LATIN SMALL LETTER V
- u'w' # 0x77 -> LATIN SMALL LETTER W
- u'x' # 0x78 -> LATIN SMALL LETTER X
- u'y' # 0x79 -> LATIN SMALL LETTER Y
- u'z' # 0x7A -> LATIN SMALL LETTER Z
- u'{' # 0x7B -> LEFT CURLY BRACKET
- u'|' # 0x7C -> VERTICAL LINE
- u'}' # 0x7D -> RIGHT CURLY BRACKET
- u'~' # 0x7E -> TILDE
- u'\x7f' # 0x7F -> DELETE
- u'\u05d0' # 0x80 -> HEBREW LETTER ALEF
- u'\u05d1' # 0x81 -> HEBREW LETTER BET
- u'\u05d2' # 0x82 -> HEBREW LETTER GIMEL
- u'\u05d3' # 0x83 -> HEBREW LETTER DALET
- u'\u05d4' # 0x84 -> HEBREW LETTER HE
- u'\u05d5' # 0x85 -> HEBREW LETTER VAV
- u'\u05d6' # 0x86 -> HEBREW LETTER ZAYIN
- u'\u05d7' # 0x87 -> HEBREW LETTER HET
- u'\u05d8' # 0x88 -> HEBREW LETTER TET
- u'\u05d9' # 0x89 -> HEBREW LETTER YOD
- u'\u05da' # 0x8A -> HEBREW LETTER FINAL KAF
- u'\u05db' # 0x8B -> HEBREW LETTER KAF
- u'\u05dc' # 0x8C -> HEBREW LETTER LAMED
- u'\u05dd' # 0x8D -> HEBREW LETTER FINAL MEM
- u'\u05de' # 0x8E -> HEBREW LETTER MEM
- u'\u05df' # 0x8F -> HEBREW LETTER FINAL NUN
- u'\u05e0' # 0x90 -> HEBREW LETTER NUN
- u'\u05e1' # 0x91 -> HEBREW LETTER SAMEKH
- u'\u05e2' # 0x92 -> HEBREW LETTER AYIN
- u'\u05e3' # 0x93 -> HEBREW LETTER FINAL PE
- u'\u05e4' # 0x94 -> HEBREW LETTER PE
- u'\u05e5' # 0x95 -> HEBREW LETTER FINAL TSADI
- u'\u05e6' # 0x96 -> HEBREW LETTER TSADI
- u'\u05e7' # 0x97 -> HEBREW LETTER QOF
- u'\u05e8' # 0x98 -> HEBREW LETTER RESH
- u'\u05e9' # 0x99 -> HEBREW LETTER SHIN
- u'\u05ea' # 0x9A -> HEBREW LETTER TAV
- u'\ufffe' # 0x9B -> UNDEFINED
- u'\xa3' # 0x9C -> POUND SIGN
- u'\ufffe' # 0x9D -> UNDEFINED
- u'\xd7' # 0x9E -> MULTIPLICATION SIGN
- u'\ufffe' # 0x9F -> UNDEFINED
- u'\ufffe' # 0xA0 -> UNDEFINED
- u'\ufffe' # 0xA1 -> UNDEFINED
- u'\ufffe' # 0xA2 -> UNDEFINED
- u'\ufffe' # 0xA3 -> UNDEFINED
- u'\ufffe' # 0xA4 -> UNDEFINED
- u'\ufffe' # 0xA5 -> UNDEFINED
- u'\ufffe' # 0xA6 -> UNDEFINED
- u'\ufffe' # 0xA7 -> UNDEFINED
- u'\ufffe' # 0xA8 -> UNDEFINED
- u'\xae' # 0xA9 -> REGISTERED SIGN
- u'\xac' # 0xAA -> NOT SIGN
- u'\xbd' # 0xAB -> VULGAR FRACTION ONE HALF
- u'\xbc' # 0xAC -> VULGAR FRACTION ONE QUARTER
- u'\ufffe' # 0xAD -> UNDEFINED
- u'\xab' # 0xAE -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
- u'\xbb' # 0xAF -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
- u'\u2591' # 0xB0 -> LIGHT SHADE
- u'\u2592' # 0xB1 -> MEDIUM SHADE
- u'\u2593' # 0xB2 -> DARK SHADE
- u'\u2502' # 0xB3 -> BOX DRAWINGS LIGHT VERTICAL
- u'\u2524' # 0xB4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
- u'\ufffe' # 0xB5 -> UNDEFINED
- u'\ufffe' # 0xB6 -> UNDEFINED
- u'\ufffe' # 0xB7 -> UNDEFINED
- u'\xa9' # 0xB8 -> COPYRIGHT SIGN
- u'\u2563' # 0xB9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
- u'\u2551' # 0xBA -> BOX DRAWINGS DOUBLE VERTICAL
- u'\u2557' # 0xBB -> BOX DRAWINGS DOUBLE DOWN AND LEFT
- u'\u255d' # 0xBC -> BOX DRAWINGS DOUBLE UP AND LEFT
- u'\xa2' # 0xBD -> CENT SIGN
- u'\xa5' # 0xBE -> YEN SIGN
- u'\u2510' # 0xBF -> BOX DRAWINGS LIGHT DOWN AND LEFT
- u'\u2514' # 0xC0 -> BOX DRAWINGS LIGHT UP AND RIGHT
- u'\u2534' # 0xC1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
- u'\u252c' # 0xC2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
- u'\u251c' # 0xC3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
- u'\u2500' # 0xC4 -> BOX DRAWINGS LIGHT HORIZONTAL
- u'\u253c' # 0xC5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
- u'\ufffe' # 0xC6 -> UNDEFINED
- u'\ufffe' # 0xC7 -> UNDEFINED
- u'\u255a' # 0xC8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
- u'\u2554' # 0xC9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
- u'\u2569' # 0xCA -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
- u'\u2566' # 0xCB -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
- u'\u2560' # 0xCC -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
- u'\u2550' # 0xCD -> BOX DRAWINGS DOUBLE HORIZONTAL
- u'\u256c' # 0xCE -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
- u'\xa4' # 0xCF -> CURRENCY SIGN
- u'\ufffe' # 0xD0 -> UNDEFINED
- u'\ufffe' # 0xD1 -> UNDEFINED
- u'\ufffe' # 0xD2 -> UNDEFINED
- u'\ufffe' # 0xD3 -> UNDEFINEDS
- u'\ufffe' # 0xD4 -> UNDEFINED
- u'\ufffe' # 0xD5 -> UNDEFINED
- u'\ufffe' # 0xD6 -> UNDEFINEDE
- u'\ufffe' # 0xD7 -> UNDEFINED
- u'\ufffe' # 0xD8 -> UNDEFINED
- u'\u2518' # 0xD9 -> BOX DRAWINGS LIGHT UP AND LEFT
- u'\u250c' # 0xDA -> BOX DRAWINGS LIGHT DOWN AND RIGHT
- u'\u2588' # 0xDB -> FULL BLOCK
- u'\u2584' # 0xDC -> LOWER HALF BLOCK
- u'\xa6' # 0xDD -> BROKEN BAR
- u'\ufffe' # 0xDE -> UNDEFINED
- u'\u2580' # 0xDF -> UPPER HALF BLOCK
- u'\ufffe' # 0xE0 -> UNDEFINED
- u'\ufffe' # 0xE1 -> UNDEFINED
- u'\ufffe' # 0xE2 -> UNDEFINED
- u'\ufffe' # 0xE3 -> UNDEFINED
- u'\ufffe' # 0xE4 -> UNDEFINED
- u'\ufffe' # 0xE5 -> UNDEFINED
- u'\xb5' # 0xE6 -> MICRO SIGN
- u'\ufffe' # 0xE7 -> UNDEFINED
- u'\ufffe' # 0xE8 -> UNDEFINED
- u'\ufffe' # 0xE9 -> UNDEFINED
- u'\ufffe' # 0xEA -> UNDEFINED
- u'\ufffe' # 0xEB -> UNDEFINED
- u'\ufffe' # 0xEC -> UNDEFINED
- u'\ufffe' # 0xED -> UNDEFINED
- u'\xaf' # 0xEE -> MACRON
- u'\xb4' # 0xEF -> ACUTE ACCENT
- u'\xad' # 0xF0 -> SOFT HYPHEN
- u'\xb1' # 0xF1 -> PLUS-MINUS SIGN
- u'\u2017' # 0xF2 -> DOUBLE LOW LINE
- u'\xbe' # 0xF3 -> VULGAR FRACTION THREE QUARTERS
- u'\xb6' # 0xF4 -> PILCROW SIGN
- u'\xa7' # 0xF5 -> SECTION SIGN
- u'\xf7' # 0xF6 -> DIVISION SIGN
- u'\xb8' # 0xF7 -> CEDILLA
- u'\xb0' # 0xF8 -> DEGREE SIGN
- u'\xa8' # 0xF9 -> DIAERESIS
- u'\xb7' # 0xFA -> MIDDLE DOT
- u'\xb9' # 0xFB -> SUPERSCRIPT ONE
- u'\xb3' # 0xFC -> SUPERSCRIPT THREE
- u'\xb2' # 0xFD -> SUPERSCRIPT TWO
- u'\u25a0' # 0xFE -> BLACK SQUARE
- u'\xa0' # 0xFF -> NO-BREAK SPACE
-)
-
-### Encoding table
-encoding_table=codecs.charmap_build(decoding_table)
diff --git a/sys/lib/python/encodings/cp857.py b/sys/lib/python/encodings/cp857.py
deleted file mode 100644
index c24191b04..000000000
--- a/sys/lib/python/encodings/cp857.py
+++ /dev/null
@@ -1,694 +0,0 @@
-""" Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP857.TXT' with gencodec.py.
-
-"""#"
-
-import codecs
-
-### Codec APIs
-
-class Codec(codecs.Codec):
-
- def encode(self,input,errors='strict'):
- return codecs.charmap_encode(input,errors,encoding_map)
-
- def decode(self,input,errors='strict'):
- return codecs.charmap_decode(input,errors,decoding_table)
-
-class IncrementalEncoder(codecs.IncrementalEncoder):
- def encode(self, input, final=False):
- return codecs.charmap_encode(input,self.errors,encoding_map)[0]
-
-class IncrementalDecoder(codecs.IncrementalDecoder):
- def decode(self, input, final=False):
- return codecs.charmap_decode(input,self.errors,decoding_table)[0]
-
-class StreamWriter(Codec,codecs.StreamWriter):
- pass
-
-class StreamReader(Codec,codecs.StreamReader):
- pass
-
-### encodings module API
-
-def getregentry():
- return codecs.CodecInfo(
- name='cp857',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
-
-### Decoding Map
-
-decoding_map = codecs.make_identity_dict(range(256))
-decoding_map.update({
- 0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
- 0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
- 0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
- 0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
- 0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
- 0x0085: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
- 0x0086: 0x00e5, # LATIN SMALL LETTER A WITH RING ABOVE
- 0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
- 0x0088: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
- 0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
- 0x008a: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
- 0x008b: 0x00ef, # LATIN SMALL LETTER I WITH DIAERESIS
- 0x008c: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX
- 0x008d: 0x0131, # LATIN SMALL LETTER DOTLESS I
- 0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
- 0x008f: 0x00c5, # LATIN CAPITAL LETTER A WITH RING ABOVE
- 0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
- 0x0091: 0x00e6, # LATIN SMALL LIGATURE AE
- 0x0092: 0x00c6, # LATIN CAPITAL LIGATURE AE
- 0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
- 0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
- 0x0095: 0x00f2, # LATIN SMALL LETTER O WITH GRAVE
- 0x0096: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX
- 0x0097: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE
- 0x0098: 0x0130, # LATIN CAPITAL LETTER I WITH DOT ABOVE
- 0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
- 0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
- 0x009b: 0x00f8, # LATIN SMALL LETTER O WITH STROKE
- 0x009c: 0x00a3, # POUND SIGN
- 0x009d: 0x00d8, # LATIN CAPITAL LETTER O WITH STROKE
- 0x009e: 0x015e, # LATIN CAPITAL LETTER S WITH CEDILLA
- 0x009f: 0x015f, # LATIN SMALL LETTER S WITH CEDILLA
- 0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
- 0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
- 0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
- 0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
- 0x00a4: 0x00f1, # LATIN SMALL LETTER N WITH TILDE
- 0x00a5: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE
- 0x00a6: 0x011e, # LATIN CAPITAL LETTER G WITH BREVE
- 0x00a7: 0x011f, # LATIN SMALL LETTER G WITH BREVE
- 0x00a8: 0x00bf, # INVERTED QUESTION MARK
- 0x00a9: 0x00ae, # REGISTERED SIGN
- 0x00aa: 0x00ac, # NOT SIGN
- 0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
- 0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
- 0x00ad: 0x00a1, # INVERTED EXCLAMATION MARK
- 0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
- 0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
- 0x00b0: 0x2591, # LIGHT SHADE
- 0x00b1: 0x2592, # MEDIUM SHADE
- 0x00b2: 0x2593, # DARK SHADE
- 0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
- 0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
- 0x00b5: 0x00c1, # LATIN CAPITAL LETTER A WITH ACUTE
- 0x00b6: 0x00c2, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
- 0x00b7: 0x00c0, # LATIN CAPITAL LETTER A WITH GRAVE
- 0x00b8: 0x00a9, # COPYRIGHT SIGN
- 0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
- 0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
- 0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
- 0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
- 0x00bd: 0x00a2, # CENT SIGN
- 0x00be: 0x00a5, # YEN SIGN
- 0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
- 0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
- 0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
- 0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
- 0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
- 0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
- 0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
- 0x00c6: 0x00e3, # LATIN SMALL LETTER A WITH TILDE
- 0x00c7: 0x00c3, # LATIN CAPITAL LETTER A WITH TILDE
- 0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
- 0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
- 0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
- 0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
- 0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
- 0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
- 0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
- 0x00cf: 0x00a4, # CURRENCY SIGN
- 0x00d0: 0x00ba, # MASCULINE ORDINAL INDICATOR
- 0x00d1: 0x00aa, # FEMININE ORDINAL INDICATOR
- 0x00d2: 0x00ca, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
- 0x00d3: 0x00cb, # LATIN CAPITAL LETTER E WITH DIAERESIS
- 0x00d4: 0x00c8, # LATIN CAPITAL LETTER E WITH GRAVE
- 0x00d5: None, # UNDEFINED
- 0x00d6: 0x00cd, # LATIN CAPITAL LETTER I WITH ACUTE
- 0x00d7: 0x00ce, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
- 0x00d8: 0x00cf, # LATIN CAPITAL LETTER I WITH DIAERESIS
- 0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
- 0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
- 0x00db: 0x2588, # FULL BLOCK
- 0x00dc: 0x2584, # LOWER HALF BLOCK
- 0x00dd: 0x00a6, # BROKEN BAR
- 0x00de: 0x00cc, # LATIN CAPITAL LETTER I WITH GRAVE
- 0x00df: 0x2580, # UPPER HALF BLOCK
- 0x00e0: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE
- 0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
- 0x00e2: 0x00d4, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
- 0x00e3: 0x00d2, # LATIN CAPITAL LETTER O WITH GRAVE
- 0x00e4: 0x00f5, # LATIN SMALL LETTER O WITH TILDE
- 0x00e5: 0x00d5, # LATIN CAPITAL LETTER O WITH TILDE
- 0x00e6: 0x00b5, # MICRO SIGN
- 0x00e7: None, # UNDEFINED
- 0x00e8: 0x00d7, # MULTIPLICATION SIGN
- 0x00e9: 0x00da, # LATIN CAPITAL LETTER U WITH ACUTE
- 0x00ea: 0x00db, # LATIN CAPITAL LETTER U WITH CIRCUMFLEX
- 0x00eb: 0x00d9, # LATIN CAPITAL LETTER U WITH GRAVE
- 0x00ed: 0x00ff, # LATIN SMALL LETTER Y WITH DIAERESIS
- 0x00ee: 0x00af, # MACRON
- 0x00ef: 0x00b4, # ACUTE ACCENT
- 0x00f0: 0x00ad, # SOFT HYPHEN
- 0x00f1: 0x00b1, # PLUS-MINUS SIGN
- 0x00f2: None, # UNDEFINED
- 0x00f3: 0x00be, # VULGAR FRACTION THREE QUARTERS
- 0x00f4: 0x00b6, # PILCROW SIGN
- 0x00f5: 0x00a7, # SECTION SIGN
- 0x00f6: 0x00f7, # DIVISION SIGN
- 0x00f7: 0x00b8, # CEDILLA
- 0x00f8: 0x00b0, # DEGREE SIGN
- 0x00f9: 0x00a8, # DIAERESIS
- 0x00fa: 0x00b7, # MIDDLE DOT
- 0x00fb: 0x00b9, # SUPERSCRIPT ONE
- 0x00fc: 0x00b3, # SUPERSCRIPT THREE
- 0x00fd: 0x00b2, # SUPERSCRIPT TWO
- 0x00fe: 0x25a0, # BLACK SQUARE
- 0x00ff: 0x00a0, # NO-BREAK SPACE
-})
-
-### Decoding Table
-
-decoding_table = (
- u'\x00' # 0x0000 -> NULL
- u'\x01' # 0x0001 -> START OF HEADING
- u'\x02' # 0x0002 -> START OF TEXT
- u'\x03' # 0x0003 -> END OF TEXT
- u'\x04' # 0x0004 -> END OF TRANSMISSION
- u'\x05' # 0x0005 -> ENQUIRY
- u'\x06' # 0x0006 -> ACKNOWLEDGE
- u'\x07' # 0x0007 -> BELL
- u'\x08' # 0x0008 -> BACKSPACE
- u'\t' # 0x0009 -> HORIZONTAL TABULATION
- u'\n' # 0x000a -> LINE FEED
- u'\x0b' # 0x000b -> VERTICAL TABULATION
- u'\x0c' # 0x000c -> FORM FEED
- u'\r' # 0x000d -> CARRIAGE RETURN
- u'\x0e' # 0x000e -> SHIFT OUT
- u'\x0f' # 0x000f -> SHIFT IN
- u'\x10' # 0x0010 -> DATA LINK ESCAPE
- u'\x11' # 0x0011 -> DEVICE CONTROL ONE
- u'\x12' # 0x0012 -> DEVICE CONTROL TWO
- u'\x13' # 0x0013 -> DEVICE CONTROL THREE
- u'\x14' # 0x0014 -> DEVICE CONTROL FOUR
- u'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
- u'\x16' # 0x0016 -> SYNCHRONOUS IDLE
- u'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
- u'\x18' # 0x0018 -> CANCEL
- u'\x19' # 0x0019 -> END OF MEDIUM
- u'\x1a' # 0x001a -> SUBSTITUTE
- u'\x1b' # 0x001b -> ESCAPE
- u'\x1c' # 0x001c -> FILE SEPARATOR
- u'\x1d' # 0x001d -> GROUP SEPARATOR
- u'\x1e' # 0x001e -> RECORD SEPARATOR
- u'\x1f' # 0x001f -> UNIT SEPARATOR
- u' ' # 0x0020 -> SPACE
- u'!' # 0x0021 -> EXCLAMATION MARK
- u'"' # 0x0022 -> QUOTATION MARK
- u'#' # 0x0023 -> NUMBER SIGN
- u'$' # 0x0024 -> DOLLAR SIGN
- u'%' # 0x0025 -> PERCENT SIGN
- u'&' # 0x0026 -> AMPERSAND
- u"'" # 0x0027 -> APOSTROPHE
- u'(' # 0x0028 -> LEFT PARENTHESIS
- u')' # 0x0029 -> RIGHT PARENTHESIS
- u'*' # 0x002a -> ASTERISK
- u'+' # 0x002b -> PLUS SIGN
- u',' # 0x002c -> COMMA
- u'-' # 0x002d -> HYPHEN-MINUS
- u'.' # 0x002e -> FULL STOP
- u'/' # 0x002f -> SOLIDUS
- u'0' # 0x0030 -> DIGIT ZERO
- u'1' # 0x0031 -> DIGIT ONE
- u'2' # 0x0032 -> DIGIT TWO
- u'3' # 0x0033 -> DIGIT THREE
- u'4' # 0x0034 -> DIGIT FOUR
- u'5' # 0x0035 -> DIGIT FIVE
- u'6' # 0x0036 -> DIGIT SIX
- u'7' # 0x0037 -> DIGIT SEVEN
- u'8' # 0x0038 -> DIGIT EIGHT
- u'9' # 0x0039 -> DIGIT NINE
- u':' # 0x003a -> COLON
- u';' # 0x003b -> SEMICOLON
- u'<' # 0x003c -> LESS-THAN SIGN
- u'=' # 0x003d -> EQUALS SIGN
- u'>' # 0x003e -> GREATER-THAN SIGN
- u'?' # 0x003f -> QUESTION MARK
- u'@' # 0x0040 -> COMMERCIAL AT
- u'A' # 0x0041 -> LATIN CAPITAL LETTER A
- u'B' # 0x0042 -> LATIN CAPITAL LETTER B
- u'C' # 0x0043 -> LATIN CAPITAL LETTER C
- u'D' # 0x0044 -> LATIN CAPITAL LETTER D
- u'E' # 0x0045 -> LATIN CAPITAL LETTER E
- u'F' # 0x0046 -> LATIN CAPITAL LETTER F
- u'G' # 0x0047 -> LATIN CAPITAL LETTER G
- u'H' # 0x0048 -> LATIN CAPITAL LETTER H
- u'I' # 0x0049 -> LATIN CAPITAL LETTER I
- u'J' # 0x004a -> LATIN CAPITAL LETTER J
- u'K' # 0x004b -> LATIN CAPITAL LETTER K
- u'L' # 0x004c -> LATIN CAPITAL LETTER L
- u'M' # 0x004d -> LATIN CAPITAL LETTER M
- u'N' # 0x004e -> LATIN CAPITAL LETTER N
- u'O' # 0x004f -> LATIN CAPITAL LETTER O
- u'P' # 0x0050 -> LATIN CAPITAL LETTER P
- u'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
- u'R' # 0x0052 -> LATIN CAPITAL LETTER R
- u'S' # 0x0053 -> LATIN CAPITAL LETTER S
- u'T' # 0x0054 -> LATIN CAPITAL LETTER T
- u'U' # 0x0055 -> LATIN CAPITAL LETTER U
- u'V' # 0x0056 -> LATIN CAPITAL LETTER V
- u'W' # 0x0057 -> LATIN CAPITAL LETTER W
- u'X' # 0x0058 -> LATIN CAPITAL LETTER X
- u'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
- u'Z' # 0x005a -> LATIN CAPITAL LETTER Z
- u'[' # 0x005b -> LEFT SQUARE BRACKET
- u'\\' # 0x005c -> REVERSE SOLIDUS
- u']' # 0x005d -> RIGHT SQUARE BRACKET
- u'^' # 0x005e -> CIRCUMFLEX ACCENT
- u'_' # 0x005f -> LOW LINE
- u'`' # 0x0060 -> GRAVE ACCENT
- u'a' # 0x0061 -> LATIN SMALL LETTER A
- u'b' # 0x0062 -> LATIN SMALL LETTER B
- u'c' # 0x0063 -> LATIN SMALL LETTER C
- u'd' # 0x0064 -> LATIN SMALL LETTER D
- u'e' # 0x0065 -> LATIN SMALL LETTER E
- u'f' # 0x0066 -> LATIN SMALL LETTER F
- u'g' # 0x0067 -> LATIN SMALL LETTER G
- u'h' # 0x0068 -> LATIN SMALL LETTER H
- u'i' # 0x0069 -> LATIN SMALL LETTER I
- u'j' # 0x006a -> LATIN SMALL LETTER J
- u'k' # 0x006b -> LATIN SMALL LETTER K
- u'l' # 0x006c -> LATIN SMALL LETTER L
- u'm' # 0x006d -> LATIN SMALL LETTER M
- u'n' # 0x006e -> LATIN SMALL LETTER N
- u'o' # 0x006f -> LATIN SMALL LETTER O
- u'p' # 0x0070 -> LATIN SMALL LETTER P
- u'q' # 0x0071 -> LATIN SMALL LETTER Q
- u'r' # 0x0072 -> LATIN SMALL LETTER R
- u's' # 0x0073 -> LATIN SMALL LETTER S
- u't' # 0x0074 -> LATIN SMALL LETTER T
- u'u' # 0x0075 -> LATIN SMALL LETTER U
- u'v' # 0x0076 -> LATIN SMALL LETTER V
- u'w' # 0x0077 -> LATIN SMALL LETTER W
- u'x' # 0x0078 -> LATIN SMALL LETTER X
- u'y' # 0x0079 -> LATIN SMALL LETTER Y
- u'z' # 0x007a -> LATIN SMALL LETTER Z
- u'{' # 0x007b -> LEFT CURLY BRACKET
- u'|' # 0x007c -> VERTICAL LINE
- u'}' # 0x007d -> RIGHT CURLY BRACKET
- u'~' # 0x007e -> TILDE
- u'\x7f' # 0x007f -> DELETE
- u'\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA
- u'\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
- u'\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
- u'\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
- u'\xe4' # 0x0084 -> LATIN SMALL LETTER A WITH DIAERESIS
- u'\xe0' # 0x0085 -> LATIN SMALL LETTER A WITH GRAVE
- u'\xe5' # 0x0086 -> LATIN SMALL LETTER A WITH RING ABOVE
- u'\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA
- u'\xea' # 0x0088 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
- u'\xeb' # 0x0089 -> LATIN SMALL LETTER E WITH DIAERESIS
- u'\xe8' # 0x008a -> LATIN SMALL LETTER E WITH GRAVE
- u'\xef' # 0x008b -> LATIN SMALL LETTER I WITH DIAERESIS
- u'\xee' # 0x008c -> LATIN SMALL LETTER I WITH CIRCUMFLEX
- u'\u0131' # 0x008d -> LATIN SMALL LETTER DOTLESS I
- u'\xc4' # 0x008e -> LATIN CAPITAL LETTER A WITH DIAERESIS
- u'\xc5' # 0x008f -> LATIN CAPITAL LETTER A WITH RING ABOVE
- u'\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
- u'\xe6' # 0x0091 -> LATIN SMALL LIGATURE AE
- u'\xc6' # 0x0092 -> LATIN CAPITAL LIGATURE AE
- u'\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
- u'\xf6' # 0x0094 -> LATIN SMALL LETTER O WITH DIAERESIS
- u'\xf2' # 0x0095 -> LATIN SMALL LETTER O WITH GRAVE
- u'\xfb' # 0x0096 -> LATIN SMALL LETTER U WITH CIRCUMFLEX
- u'\xf9' # 0x0097 -> LATIN SMALL LETTER U WITH GRAVE
- u'\u0130' # 0x0098 -> LATIN CAPITAL LETTER I WITH DOT ABOVE
- u'\xd6' # 0x0099 -> LATIN CAPITAL LETTER O WITH DIAERESIS
- u'\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
- u'\xf8' # 0x009b -> LATIN SMALL LETTER O WITH STROKE
- u'\xa3' # 0x009c -> POUND SIGN
- u'\xd8' # 0x009d -> LATIN CAPITAL LETTER O WITH STROKE
- u'\u015e' # 0x009e -> LATIN CAPITAL LETTER S WITH CEDILLA
- u'\u015f' # 0x009f -> LATIN SMALL LETTER S WITH CEDILLA
- u'\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE
- u'\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE
- u'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
- u'\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE
- u'\xf1' # 0x00a4 -> LATIN SMALL LETTER N WITH TILDE
- u'\xd1' # 0x00a5 -> LATIN CAPITAL LETTER N WITH TILDE
- u'\u011e' # 0x00a6 -> LATIN CAPITAL LETTER G WITH BREVE
- u'\u011f' # 0x00a7 -> LATIN SMALL LETTER G WITH BREVE
- u'\xbf' # 0x00a8 -> INVERTED QUESTION MARK
- u'\xae' # 0x00a9 -> REGISTERED SIGN
- u'\xac' # 0x00aa -> NOT SIGN
- u'\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF
- u'\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER
- u'\xa1' # 0x00ad -> INVERTED EXCLAMATION MARK
- u'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
- u'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
- u'\u2591' # 0x00b0 -> LIGHT SHADE
- u'\u2592' # 0x00b1 -> MEDIUM SHADE
- u'\u2593' # 0x00b2 -> DARK SHADE
- u'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
- u'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
- u'\xc1' # 0x00b5 -> LATIN CAPITAL LETTER A WITH ACUTE
- u'\xc2' # 0x00b6 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
- u'\xc0' # 0x00b7 -> LATIN CAPITAL LETTER A WITH GRAVE
- u'\xa9' # 0x00b8 -> COPYRIGHT SIGN
- u'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
- u'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
- u'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
- u'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
- u'\xa2' # 0x00bd -> CENT SIGN
- u'\xa5' # 0x00be -> YEN SIGN
- u'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
- u'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
- u'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
- u'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
- u'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
- u'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
- u'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
- u'\xe3' # 0x00c6 -> LATIN SMALL LETTER A WITH TILDE
- u'\xc3' # 0x00c7 -> LATIN CAPITAL LETTER A WITH TILDE
- u'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
- u'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
- u'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
- u'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
- u'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
- u'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
- u'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
- u'\xa4' # 0x00cf -> CURRENCY SIGN
- u'\xba' # 0x00d0 -> MASCULINE ORDINAL INDICATOR
- u'\xaa' # 0x00d1 -> FEMININE ORDINAL INDICATOR
- u'\xca' # 0x00d2 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
- u'\xcb' # 0x00d3 -> LATIN CAPITAL LETTER E WITH DIAERESIS
- u'\xc8' # 0x00d4 -> LATIN CAPITAL LETTER E WITH GRAVE
- u'\ufffe' # 0x00d5 -> UNDEFINED
- u'\xcd' # 0x00d6 -> LATIN CAPITAL LETTER I WITH ACUTE
- u'\xce' # 0x00d7 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
- u'\xcf' # 0x00d8 -> LATIN CAPITAL LETTER I WITH DIAERESIS
- u'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
- u'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
- u'\u2588' # 0x00db -> FULL BLOCK
- u'\u2584' # 0x00dc -> LOWER HALF BLOCK
- u'\xa6' # 0x00dd -> BROKEN BAR
- u'\xcc' # 0x00de -> LATIN CAPITAL LETTER I WITH GRAVE
- u'\u2580' # 0x00df -> UPPER HALF BLOCK
- u'\xd3' # 0x00e0 -> LATIN CAPITAL LETTER O WITH ACUTE
- u'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S
- u'\xd4' # 0x00e2 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
- u'\xd2' # 0x00e3 -> LATIN CAPITAL LETTER O WITH GRAVE
- u'\xf5' # 0x00e4 -> LATIN SMALL LETTER O WITH TILDE
- u'\xd5' # 0x00e5 -> LATIN CAPITAL LETTER O WITH TILDE
- u'\xb5' # 0x00e6 -> MICRO SIGN
- u'\ufffe' # 0x00e7 -> UNDEFINED
- u'\xd7' # 0x00e8 -> MULTIPLICATION SIGN
- u'\xda' # 0x00e9 -> LATIN CAPITAL LETTER U WITH ACUTE
- u'\xdb' # 0x00ea -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
- u'\xd9' # 0x00eb -> LATIN CAPITAL LETTER U WITH GRAVE
- u'\xec' # 0x00ec -> LATIN SMALL LETTER I WITH GRAVE
- u'\xff' # 0x00ed -> LATIN SMALL LETTER Y WITH DIAERESIS
- u'\xaf' # 0x00ee -> MACRON
- u'\xb4' # 0x00ef -> ACUTE ACCENT
- u'\xad' # 0x00f0 -> SOFT HYPHEN
- u'\xb1' # 0x00f1 -> PLUS-MINUS SIGN
- u'\ufffe' # 0x00f2 -> UNDEFINED
- u'\xbe' # 0x00f3 -> VULGAR FRACTION THREE QUARTERS
- u'\xb6' # 0x00f4 -> PILCROW SIGN
- u'\xa7' # 0x00f5 -> SECTION SIGN
- u'\xf7' # 0x00f6 -> DIVISION SIGN
- u'\xb8' # 0x00f7 -> CEDILLA
- u'\xb0' # 0x00f8 -> DEGREE SIGN
- u'\xa8' # 0x00f9 -> DIAERESIS
- u'\xb7' # 0x00fa -> MIDDLE DOT
- u'\xb9' # 0x00fb -> SUPERSCRIPT ONE
- u'\xb3' # 0x00fc -> SUPERSCRIPT THREE
- u'\xb2' # 0x00fd -> SUPERSCRIPT TWO
- u'\u25a0' # 0x00fe -> BLACK SQUARE
- u'\xa0' # 0x00ff -> NO-BREAK SPACE
-)
-
-### Encoding Map
-
-encoding_map = {
- 0x0000: 0x0000, # NULL
- 0x0001: 0x0001, # START OF HEADING
- 0x0002: 0x0002, # START OF TEXT
- 0x0003: 0x0003, # END OF TEXT
- 0x0004: 0x0004, # END OF TRANSMISSION
- 0x0005: 0x0005, # ENQUIRY
- 0x0006: 0x0006, # ACKNOWLEDGE
- 0x0007: 0x0007, # BELL
- 0x0008: 0x0008, # BACKSPACE
- 0x0009: 0x0009, # HORIZONTAL TABULATION
- 0x000a: 0x000a, # LINE FEED
- 0x000b: 0x000b, # VERTICAL TABULATION
- 0x000c: 0x000c, # FORM FEED
- 0x000d: 0x000d, # CARRIAGE RETURN
- 0x000e: 0x000e, # SHIFT OUT
- 0x000f: 0x000f, # SHIFT IN
- 0x0010: 0x0010, # DATA LINK ESCAPE
- 0x0011: 0x0011, # DEVICE CONTROL ONE
- 0x0012: 0x0012, # DEVICE CONTROL TWO
- 0x0013: 0x0013, # DEVICE CONTROL THREE
- 0x0014: 0x0014, # DEVICE CONTROL FOUR
- 0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
- 0x0016: 0x0016, # SYNCHRONOUS IDLE
- 0x0017: 0x0017, # END OF TRANSMISSION BLOCK
- 0x0018: 0x0018, # CANCEL
- 0x0019: 0x0019, # END OF MEDIUM
- 0x001a: 0x001a, # SUBSTITUTE
- 0x001b: 0x001b, # ESCAPE
- 0x001c: 0x001c, # FILE SEPARATOR
- 0x001d: 0x001d, # GROUP SEPARATOR
- 0x001e: 0x001e, # RECORD SEPARATOR
- 0x001f: 0x001f, # UNIT SEPARATOR
- 0x0020: 0x0020, # SPACE
- 0x0021: 0x0021, # EXCLAMATION MARK
- 0x0022: 0x0022, # QUOTATION MARK
- 0x0023: 0x0023, # NUMBER SIGN
- 0x0024: 0x0024, # DOLLAR SIGN
- 0x0025: 0x0025, # PERCENT SIGN
- 0x0026: 0x0026, # AMPERSAND
- 0x0027: 0x0027, # APOSTROPHE
- 0x0028: 0x0028, # LEFT PARENTHESIS
- 0x0029: 0x0029, # RIGHT PARENTHESIS
- 0x002a: 0x002a, # ASTERISK
- 0x002b: 0x002b, # PLUS SIGN
- 0x002c: 0x002c, # COMMA
- 0x002d: 0x002d, # HYPHEN-MINUS
- 0x002e: 0x002e, # FULL STOP
- 0x002f: 0x002f, # SOLIDUS
- 0x0030: 0x0030, # DIGIT ZERO
- 0x0031: 0x0031, # DIGIT ONE
- 0x0032: 0x0032, # DIGIT TWO
- 0x0033: 0x0033, # DIGIT THREE
- 0x0034: 0x0034, # DIGIT FOUR
- 0x0035: 0x0035, # DIGIT FIVE
- 0x0036: 0x0036, # DIGIT SIX
- 0x0037: 0x0037, # DIGIT SEVEN
- 0x0038: 0x0038, # DIGIT EIGHT
- 0x0039: 0x0039, # DIGIT NINE
- 0x003a: 0x003a, # COLON
- 0x003b: 0x003b, # SEMICOLON
- 0x003c: 0x003c, # LESS-THAN SIGN
- 0x003d: 0x003d, # EQUALS SIGN
- 0x003e: 0x003e, # GREATER-THAN SIGN
- 0x003f: 0x003f, # QUESTION MARK
- 0x0040: 0x0040, # COMMERCIAL AT
- 0x0041: 0x0041, # LATIN CAPITAL LETTER A
- 0x0042: 0x0042, # LATIN CAPITAL LETTER B
- 0x0043: 0x0043, # LATIN CAPITAL LETTER C
- 0x0044: 0x0044, # LATIN CAPITAL LETTER D
- 0x0045: 0x0045, # LATIN CAPITAL LETTER E
- 0x0046: 0x0046, # LATIN CAPITAL LETTER F
- 0x0047: 0x0047, # LATIN CAPITAL LETTER G
- 0x0048: 0x0048, # LATIN CAPITAL LETTER H
- 0x0049: 0x0049, # LATIN CAPITAL LETTER I
- 0x004a: 0x004a, # LATIN CAPITAL LETTER J
- 0x004b: 0x004b, # LATIN CAPITAL LETTER K
- 0x004c: 0x004c, # LATIN CAPITAL LETTER L
- 0x004d: 0x004d, # LATIN CAPITAL LETTER M
- 0x004e: 0x004e, # LATIN CAPITAL LETTER N
- 0x004f: 0x004f, # LATIN CAPITAL LETTER O
- 0x0050: 0x0050, # LATIN CAPITAL LETTER P
- 0x0051: 0x0051, # LATIN CAPITAL LETTER Q
- 0x0052: 0x0052, # LATIN CAPITAL LETTER R
- 0x0053: 0x0053, # LATIN CAPITAL LETTER S
- 0x0054: 0x0054, # LATIN CAPITAL LETTER T
- 0x0055: 0x0055, # LATIN CAPITAL LETTER U
- 0x0056: 0x0056, # LATIN CAPITAL LETTER V
- 0x0057: 0x0057, # LATIN CAPITAL LETTER W
- 0x0058: 0x0058, # LATIN CAPITAL LETTER X
- 0x0059: 0x0059, # LATIN CAPITAL LETTER Y
- 0x005a: 0x005a, # LATIN CAPITAL LETTER Z
- 0x005b: 0x005b, # LEFT SQUARE BRACKET
- 0x005c: 0x005c, # REVERSE SOLIDUS
- 0x005d: 0x005d, # RIGHT SQUARE BRACKET
- 0x005e: 0x005e, # CIRCUMFLEX ACCENT
- 0x005f: 0x005f, # LOW LINE
- 0x0060: 0x0060, # GRAVE ACCENT
- 0x0061: 0x0061, # LATIN SMALL LETTER A
- 0x0062: 0x0062, # LATIN SMALL LETTER B
- 0x0063: 0x0063, # LATIN SMALL LETTER C
- 0x0064: 0x0064, # LATIN SMALL LETTER D
- 0x0065: 0x0065, # LATIN SMALL LETTER E
- 0x0066: 0x0066, # LATIN SMALL LETTER F
- 0x0067: 0x0067, # LATIN SMALL LETTER G
- 0x0068: 0x0068, # LATIN SMALL LETTER H
- 0x0069: 0x0069, # LATIN SMALL LETTER I
- 0x006a: 0x006a, # LATIN SMALL LETTER J
- 0x006b: 0x006b, # LATIN SMALL LETTER K
- 0x006c: 0x006c, # LATIN SMALL LETTER L
- 0x006d: 0x006d, # LATIN SMALL LETTER M
- 0x006e: 0x006e, # LATIN SMALL LETTER N
- 0x006f: 0x006f, # LATIN SMALL LETTER O
- 0x0070: 0x0070, # LATIN SMALL LETTER P
- 0x0071: 0x0071, # LATIN SMALL LETTER Q
- 0x0072: 0x0072, # LATIN SMALL LETTER R
- 0x0073: 0x0073, # LATIN SMALL LETTER S
- 0x0074: 0x0074, # LATIN SMALL LETTER T
- 0x0075: 0x0075, # LATIN SMALL LETTER U
- 0x0076: 0x0076, # LATIN SMALL LETTER V
- 0x0077: 0x0077, # LATIN SMALL LETTER W
- 0x0078: 0x0078, # LATIN SMALL LETTER X
- 0x0079: 0x0079, # LATIN SMALL LETTER Y
- 0x007a: 0x007a, # LATIN SMALL LETTER Z
- 0x007b: 0x007b, # LEFT CURLY BRACKET
- 0x007c: 0x007c, # VERTICAL LINE
- 0x007d: 0x007d, # RIGHT CURLY BRACKET
- 0x007e: 0x007e, # TILDE
- 0x007f: 0x007f, # DELETE
- 0x00a0: 0x00ff, # NO-BREAK SPACE
- 0x00a1: 0x00ad, # INVERTED EXCLAMATION MARK
- 0x00a2: 0x00bd, # CENT SIGN
- 0x00a3: 0x009c, # POUND SIGN
- 0x00a4: 0x00cf, # CURRENCY SIGN
- 0x00a5: 0x00be, # YEN SIGN
- 0x00a6: 0x00dd, # BROKEN BAR
- 0x00a7: 0x00f5, # SECTION SIGN
- 0x00a8: 0x00f9, # DIAERESIS
- 0x00a9: 0x00b8, # COPYRIGHT SIGN
- 0x00aa: 0x00d1, # FEMININE ORDINAL INDICATOR
- 0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
- 0x00ac: 0x00aa, # NOT SIGN
- 0x00ad: 0x00f0, # SOFT HYPHEN
- 0x00ae: 0x00a9, # REGISTERED SIGN
- 0x00af: 0x00ee, # MACRON
- 0x00b0: 0x00f8, # DEGREE SIGN
- 0x00b1: 0x00f1, # PLUS-MINUS SIGN
- 0x00b2: 0x00fd, # SUPERSCRIPT TWO
- 0x00b3: 0x00fc, # SUPERSCRIPT THREE
- 0x00b4: 0x00ef, # ACUTE ACCENT
- 0x00b5: 0x00e6, # MICRO SIGN
- 0x00b6: 0x00f4, # PILCROW SIGN
- 0x00b7: 0x00fa, # MIDDLE DOT
- 0x00b8: 0x00f7, # CEDILLA
- 0x00b9: 0x00fb, # SUPERSCRIPT ONE
- 0x00ba: 0x00d0, # MASCULINE ORDINAL INDICATOR
- 0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
- 0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER
- 0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF
- 0x00be: 0x00f3, # VULGAR FRACTION THREE QUARTERS
- 0x00bf: 0x00a8, # INVERTED QUESTION MARK
- 0x00c0: 0x00b7, # LATIN CAPITAL LETTER A WITH GRAVE
- 0x00c1: 0x00b5, # LATIN CAPITAL LETTER A WITH ACUTE
- 0x00c2: 0x00b6, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
- 0x00c3: 0x00c7, # LATIN CAPITAL LETTER A WITH TILDE
- 0x00c4: 0x008e, # LATIN CAPITAL LETTER A WITH DIAERESIS
- 0x00c5: 0x008f, # LATIN CAPITAL LETTER A WITH RING ABOVE
- 0x00c6: 0x0092, # LATIN CAPITAL LIGATURE AE
- 0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA
- 0x00c8: 0x00d4, # LATIN CAPITAL LETTER E WITH GRAVE
- 0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
- 0x00ca: 0x00d2, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
- 0x00cb: 0x00d3, # LATIN CAPITAL LETTER E WITH DIAERESIS
- 0x00cc: 0x00de, # LATIN CAPITAL LETTER I WITH GRAVE
- 0x00cd: 0x00d6, # LATIN CAPITAL LETTER I WITH ACUTE
- 0x00ce: 0x00d7, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
- 0x00cf: 0x00d8, # LATIN CAPITAL LETTER I WITH DIAERESIS
- 0x00d1: 0x00a5, # LATIN CAPITAL LETTER N WITH TILDE
- 0x00d2: 0x00e3, # LATIN CAPITAL LETTER O WITH GRAVE
- 0x00d3: 0x00e0, # LATIN CAPITAL LETTER O WITH ACUTE
- 0x00d4: 0x00e2, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
- 0x00d5: 0x00e5, # LATIN CAPITAL LETTER O WITH TILDE
- 0x00d6: 0x0099, # LATIN CAPITAL LETTER O WITH DIAERESIS
- 0x00d7: 0x00e8, # MULTIPLICATION SIGN
- 0x00d8: 0x009d, # LATIN CAPITAL LETTER O WITH STROKE
- 0x00d9: 0x00eb, # LATIN CAPITAL LETTER U WITH GRAVE
- 0x00da: 0x00e9, # LATIN CAPITAL LETTER U WITH ACUTE
- 0x00db: 0x00ea, # LATIN CAPITAL LETTER U WITH CIRCUMFLEX
- 0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
- 0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S
- 0x00e0: 0x0085, # LATIN SMALL LETTER A WITH GRAVE
- 0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE
- 0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX
- 0x00e3: 0x00c6, # LATIN SMALL LETTER A WITH TILDE
- 0x00e4: 0x0084, # LATIN SMALL LETTER A WITH DIAERESIS
- 0x00e5: 0x0086, # LATIN SMALL LETTER A WITH RING ABOVE
- 0x00e6: 0x0091, # LATIN SMALL LIGATURE AE
- 0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA
- 0x00e8: 0x008a, # LATIN SMALL LETTER E WITH GRAVE
- 0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
- 0x00ea: 0x0088, # LATIN SMALL LETTER E WITH CIRCUMFLEX
- 0x00eb: 0x0089, # LATIN SMALL LETTER E WITH DIAERESIS
- 0x00ec: 0x00ec, # LATIN SMALL LETTER I WITH GRAVE
- 0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE
- 0x00ee: 0x008c, # LATIN SMALL LETTER I WITH CIRCUMFLEX
- 0x00ef: 0x008b, # LATIN SMALL LETTER I WITH DIAERESIS
- 0x00f1: 0x00a4, # LATIN SMALL LETTER N WITH TILDE
- 0x00f2: 0x0095, # LATIN SMALL LETTER O WITH GRAVE
- 0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
- 0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX
- 0x00f5: 0x00e4, # LATIN SMALL LETTER O WITH TILDE
- 0x00f6: 0x0094, # LATIN SMALL LETTER O WITH DIAERESIS
- 0x00f7: 0x00f6, # DIVISION SIGN
- 0x00f8: 0x009b, # LATIN SMALL LETTER O WITH STROKE
- 0x00f9: 0x0097, # LATIN SMALL LETTER U WITH GRAVE
- 0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE
- 0x00fb: 0x0096, # LATIN SMALL LETTER U WITH CIRCUMFLEX
- 0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
- 0x00ff: 0x00ed, # LATIN SMALL LETTER Y WITH DIAERESIS
- 0x011e: 0x00a6, # LATIN CAPITAL LETTER G WITH BREVE
- 0x011f: 0x00a7, # LATIN SMALL LETTER G WITH BREVE
- 0x0130: 0x0098, # LATIN CAPITAL LETTER I WITH DOT ABOVE
- 0x0131: 0x008d, # LATIN SMALL LETTER DOTLESS I
- 0x015e: 0x009e, # LATIN CAPITAL LETTER S WITH CEDILLA
- 0x015f: 0x009f, # LATIN SMALL LETTER S WITH CEDILLA
- 0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
- 0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
- 0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
- 0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
- 0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
- 0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
- 0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
- 0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
- 0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
- 0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
- 0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
- 0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
- 0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
- 0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
- 0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
- 0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
- 0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
- 0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
- 0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
- 0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
- 0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
- 0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
- 0x2580: 0x00df, # UPPER HALF BLOCK
- 0x2584: 0x00dc, # LOWER HALF BLOCK
- 0x2588: 0x00db, # FULL BLOCK
- 0x2591: 0x00b0, # LIGHT SHADE
- 0x2592: 0x00b1, # MEDIUM SHADE
- 0x2593: 0x00b2, # DARK SHADE
- 0x25a0: 0x00fe, # BLACK SQUARE
-}
diff --git a/sys/lib/python/encodings/cp860.py b/sys/lib/python/encodings/cp860.py
deleted file mode 100644
index 4acb0cf36..000000000
--- a/sys/lib/python/encodings/cp860.py
+++ /dev/null
@@ -1,698 +0,0 @@
-""" Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP860.TXT' with gencodec.py.
-
-"""#"
-
-import codecs
-
-### Codec APIs
-
-class Codec(codecs.Codec):
-
- def encode(self,input,errors='strict'):
- return codecs.charmap_encode(input,errors,encoding_map)
-
- def decode(self,input,errors='strict'):
- return codecs.charmap_decode(input,errors,decoding_table)
-
-class IncrementalEncoder(codecs.IncrementalEncoder):
- def encode(self, input, final=False):
- return codecs.charmap_encode(input,self.errors,encoding_map)[0]
-
-class IncrementalDecoder(codecs.IncrementalDecoder):
- def decode(self, input, final=False):
- return codecs.charmap_decode(input,self.errors,decoding_table)[0]
-
-class StreamWriter(Codec,codecs.StreamWriter):
- pass
-
-class StreamReader(Codec,codecs.StreamReader):
- pass
-
-### encodings module API
-
-def getregentry():
- return codecs.CodecInfo(
- name='cp860',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
-
-### Decoding Map
-
-decoding_map = codecs.make_identity_dict(range(256))
-decoding_map.update({
- 0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
- 0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
- 0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
- 0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
- 0x0084: 0x00e3, # LATIN SMALL LETTER A WITH TILDE
- 0x0085: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
- 0x0086: 0x00c1, # LATIN CAPITAL LETTER A WITH ACUTE
- 0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
- 0x0088: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
- 0x0089: 0x00ca, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
- 0x008a: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
- 0x008b: 0x00cd, # LATIN CAPITAL LETTER I WITH ACUTE
- 0x008c: 0x00d4, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
- 0x008d: 0x00ec, # LATIN SMALL LETTER I WITH GRAVE
- 0x008e: 0x00c3, # LATIN CAPITAL LETTER A WITH TILDE
- 0x008f: 0x00c2, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
- 0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
- 0x0091: 0x00c0, # LATIN CAPITAL LETTER A WITH GRAVE
- 0x0092: 0x00c8, # LATIN CAPITAL LETTER E WITH GRAVE
- 0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
- 0x0094: 0x00f5, # LATIN SMALL LETTER O WITH TILDE
- 0x0095: 0x00f2, # LATIN SMALL LETTER O WITH GRAVE
- 0x0096: 0x00da, # LATIN CAPITAL LETTER U WITH ACUTE
- 0x0097: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE
- 0x0098: 0x00cc, # LATIN CAPITAL LETTER I WITH GRAVE
- 0x0099: 0x00d5, # LATIN CAPITAL LETTER O WITH TILDE
- 0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
- 0x009b: 0x00a2, # CENT SIGN
- 0x009c: 0x00a3, # POUND SIGN
- 0x009d: 0x00d9, # LATIN CAPITAL LETTER U WITH GRAVE
- 0x009e: 0x20a7, # PESETA SIGN
- 0x009f: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE
- 0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
- 0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
- 0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
- 0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
- 0x00a4: 0x00f1, # LATIN SMALL LETTER N WITH TILDE
- 0x00a5: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE
- 0x00a6: 0x00aa, # FEMININE ORDINAL INDICATOR
- 0x00a7: 0x00ba, # MASCULINE ORDINAL INDICATOR
- 0x00a8: 0x00bf, # INVERTED QUESTION MARK
- 0x00a9: 0x00d2, # LATIN CAPITAL LETTER O WITH GRAVE
- 0x00aa: 0x00ac, # NOT SIGN
- 0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
- 0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
- 0x00ad: 0x00a1, # INVERTED EXCLAMATION MARK
- 0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
- 0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
- 0x00b0: 0x2591, # LIGHT SHADE
- 0x00b1: 0x2592, # MEDIUM SHADE
- 0x00b2: 0x2593, # DARK SHADE
- 0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
- 0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
- 0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
- 0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
- 0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
- 0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
- 0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
- 0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
- 0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
- 0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
- 0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
- 0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
- 0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
- 0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
- 0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
- 0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
- 0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
- 0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
- 0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
- 0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
- 0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
- 0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
- 0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
- 0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
- 0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
- 0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
- 0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
- 0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
- 0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
- 0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
- 0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
- 0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
- 0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
- 0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
- 0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
- 0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
- 0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
- 0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
- 0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
- 0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
- 0x00db: 0x2588, # FULL BLOCK
- 0x00dc: 0x2584, # LOWER HALF BLOCK
- 0x00dd: 0x258c, # LEFT HALF BLOCK
- 0x00de: 0x2590, # RIGHT HALF BLOCK
- 0x00df: 0x2580, # UPPER HALF BLOCK
- 0x00e0: 0x03b1, # GREEK SMALL LETTER ALPHA
- 0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
- 0x00e2: 0x0393, # GREEK CAPITAL LETTER GAMMA
- 0x00e3: 0x03c0, # GREEK SMALL LETTER PI
- 0x00e4: 0x03a3, # GREEK CAPITAL LETTER SIGMA
- 0x00e5: 0x03c3, # GREEK SMALL LETTER SIGMA
- 0x00e6: 0x00b5, # MICRO SIGN
- 0x00e7: 0x03c4, # GREEK SMALL LETTER TAU
- 0x00e8: 0x03a6, # GREEK CAPITAL LETTER PHI
- 0x00e9: 0x0398, # GREEK CAPITAL LETTER THETA
- 0x00ea: 0x03a9, # GREEK CAPITAL LETTER OMEGA
- 0x00eb: 0x03b4, # GREEK SMALL LETTER DELTA
- 0x00ec: 0x221e, # INFINITY
- 0x00ed: 0x03c6, # GREEK SMALL LETTER PHI
- 0x00ee: 0x03b5, # GREEK SMALL LETTER EPSILON
- 0x00ef: 0x2229, # INTERSECTION
- 0x00f0: 0x2261, # IDENTICAL TO
- 0x00f1: 0x00b1, # PLUS-MINUS SIGN
- 0x00f2: 0x2265, # GREATER-THAN OR EQUAL TO
- 0x00f3: 0x2264, # LESS-THAN OR EQUAL TO
- 0x00f4: 0x2320, # TOP HALF INTEGRAL
- 0x00f5: 0x2321, # BOTTOM HALF INTEGRAL
- 0x00f6: 0x00f7, # DIVISION SIGN
- 0x00f7: 0x2248, # ALMOST EQUAL TO
- 0x00f8: 0x00b0, # DEGREE SIGN
- 0x00f9: 0x2219, # BULLET OPERATOR
- 0x00fa: 0x00b7, # MIDDLE DOT
- 0x00fb: 0x221a, # SQUARE ROOT
- 0x00fc: 0x207f, # SUPERSCRIPT LATIN SMALL LETTER N
- 0x00fd: 0x00b2, # SUPERSCRIPT TWO
- 0x00fe: 0x25a0, # BLACK SQUARE
- 0x00ff: 0x00a0, # NO-BREAK SPACE
-})
-
-### Decoding Table
-
-decoding_table = (
- u'\x00' # 0x0000 -> NULL
- u'\x01' # 0x0001 -> START OF HEADING
- u'\x02' # 0x0002 -> START OF TEXT
- u'\x03' # 0x0003 -> END OF TEXT
- u'\x04' # 0x0004 -> END OF TRANSMISSION
- u'\x05' # 0x0005 -> ENQUIRY
- u'\x06' # 0x0006 -> ACKNOWLEDGE
- u'\x07' # 0x0007 -> BELL
- u'\x08' # 0x0008 -> BACKSPACE
- u'\t' # 0x0009 -> HORIZONTAL TABULATION
- u'\n' # 0x000a -> LINE FEED
- u'\x0b' # 0x000b -> VERTICAL TABULATION
- u'\x0c' # 0x000c -> FORM FEED
- u'\r' # 0x000d -> CARRIAGE RETURN
- u'\x0e' # 0x000e -> SHIFT OUT
- u'\x0f' # 0x000f -> SHIFT IN
- u'\x10' # 0x0010 -> DATA LINK ESCAPE
- u'\x11' # 0x0011 -> DEVICE CONTROL ONE
- u'\x12' # 0x0012 -> DEVICE CONTROL TWO
- u'\x13' # 0x0013 -> DEVICE CONTROL THREE
- u'\x14' # 0x0014 -> DEVICE CONTROL FOUR
- u'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
- u'\x16' # 0x0016 -> SYNCHRONOUS IDLE
- u'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
- u'\x18' # 0x0018 -> CANCEL
- u'\x19' # 0x0019 -> END OF MEDIUM
- u'\x1a' # 0x001a -> SUBSTITUTE
- u'\x1b' # 0x001b -> ESCAPE
- u'\x1c' # 0x001c -> FILE SEPARATOR
- u'\x1d' # 0x001d -> GROUP SEPARATOR
- u'\x1e' # 0x001e -> RECORD SEPARATOR
- u'\x1f' # 0x001f -> UNIT SEPARATOR
- u' ' # 0x0020 -> SPACE
- u'!' # 0x0021 -> EXCLAMATION MARK
- u'"' # 0x0022 -> QUOTATION MARK
- u'#' # 0x0023 -> NUMBER SIGN
- u'$' # 0x0024 -> DOLLAR SIGN
- u'%' # 0x0025 -> PERCENT SIGN
- u'&' # 0x0026 -> AMPERSAND
- u"'" # 0x0027 -> APOSTROPHE
- u'(' # 0x0028 -> LEFT PARENTHESIS
- u')' # 0x0029 -> RIGHT PARENTHESIS
- u'*' # 0x002a -> ASTERISK
- u'+' # 0x002b -> PLUS SIGN
- u',' # 0x002c -> COMMA
- u'-' # 0x002d -> HYPHEN-MINUS
- u'.' # 0x002e -> FULL STOP
- u'/' # 0x002f -> SOLIDUS
- u'0' # 0x0030 -> DIGIT ZERO
- u'1' # 0x0031 -> DIGIT ONE
- u'2' # 0x0032 -> DIGIT TWO
- u'3' # 0x0033 -> DIGIT THREE
- u'4' # 0x0034 -> DIGIT FOUR
- u'5' # 0x0035 -> DIGIT FIVE
- u'6' # 0x0036 -> DIGIT SIX
- u'7' # 0x0037 -> DIGIT SEVEN
- u'8' # 0x0038 -> DIGIT EIGHT
- u'9' # 0x0039 -> DIGIT NINE
- u':' # 0x003a -> COLON
- u';' # 0x003b -> SEMICOLON
- u'<' # 0x003c -> LESS-THAN SIGN
- u'=' # 0x003d -> EQUALS SIGN
- u'>' # 0x003e -> GREATER-THAN SIGN
- u'?' # 0x003f -> QUESTION MARK
- u'@' # 0x0040 -> COMMERCIAL AT
- u'A' # 0x0041 -> LATIN CAPITAL LETTER A
- u'B' # 0x0042 -> LATIN CAPITAL LETTER B
- u'C' # 0x0043 -> LATIN CAPITAL LETTER C
- u'D' # 0x0044 -> LATIN CAPITAL LETTER D
- u'E' # 0x0045 -> LATIN CAPITAL LETTER E
- u'F' # 0x0046 -> LATIN CAPITAL LETTER F
- u'G' # 0x0047 -> LATIN CAPITAL LETTER G
- u'H' # 0x0048 -> LATIN CAPITAL LETTER H
- u'I' # 0x0049 -> LATIN CAPITAL LETTER I
- u'J' # 0x004a -> LATIN CAPITAL LETTER J
- u'K' # 0x004b -> LATIN CAPITAL LETTER K
- u'L' # 0x004c -> LATIN CAPITAL LETTER L
- u'M' # 0x004d -> LATIN CAPITAL LETTER M
- u'N' # 0x004e -> LATIN CAPITAL LETTER N
- u'O' # 0x004f -> LATIN CAPITAL LETTER O
- u'P' # 0x0050 -> LATIN CAPITAL LETTER P
- u'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
- u'R' # 0x0052 -> LATIN CAPITAL LETTER R
- u'S' # 0x0053 -> LATIN CAPITAL LETTER S
- u'T' # 0x0054 -> LATIN CAPITAL LETTER T
- u'U' # 0x0055 -> LATIN CAPITAL LETTER U
- u'V' # 0x0056 -> LATIN CAPITAL LETTER V
- u'W' # 0x0057 -> LATIN CAPITAL LETTER W
- u'X' # 0x0058 -> LATIN CAPITAL LETTER X
- u'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
- u'Z' # 0x005a -> LATIN CAPITAL LETTER Z
- u'[' # 0x005b -> LEFT SQUARE BRACKET
- u'\\' # 0x005c -> REVERSE SOLIDUS
- u']' # 0x005d -> RIGHT SQUARE BRACKET
- u'^' # 0x005e -> CIRCUMFLEX ACCENT
- u'_' # 0x005f -> LOW LINE
- u'`' # 0x0060 -> GRAVE ACCENT
- u'a' # 0x0061 -> LATIN SMALL LETTER A
- u'b' # 0x0062 -> LATIN SMALL LETTER B
- u'c' # 0x0063 -> LATIN SMALL LETTER C
- u'd' # 0x0064 -> LATIN SMALL LETTER D
- u'e' # 0x0065 -> LATIN SMALL LETTER E
- u'f' # 0x0066 -> LATIN SMALL LETTER F
- u'g' # 0x0067 -> LATIN SMALL LETTER G
- u'h' # 0x0068 -> LATIN SMALL LETTER H
- u'i' # 0x0069 -> LATIN SMALL LETTER I
- u'j' # 0x006a -> LATIN SMALL LETTER J
- u'k' # 0x006b -> LATIN SMALL LETTER K
- u'l' # 0x006c -> LATIN SMALL LETTER L
- u'm' # 0x006d -> LATIN SMALL LETTER M
- u'n' # 0x006e -> LATIN SMALL LETTER N
- u'o' # 0x006f -> LATIN SMALL LETTER O
- u'p' # 0x0070 -> LATIN SMALL LETTER P
- u'q' # 0x0071 -> LATIN SMALL LETTER Q
- u'r' # 0x0072 -> LATIN SMALL LETTER R
- u's' # 0x0073 -> LATIN SMALL LETTER S
- u't' # 0x0074 -> LATIN SMALL LETTER T
- u'u' # 0x0075 -> LATIN SMALL LETTER U
- u'v' # 0x0076 -> LATIN SMALL LETTER V
- u'w' # 0x0077 -> LATIN SMALL LETTER W
- u'x' # 0x0078 -> LATIN SMALL LETTER X
- u'y' # 0x0079 -> LATIN SMALL LETTER Y
- u'z' # 0x007a -> LATIN SMALL LETTER Z
- u'{' # 0x007b -> LEFT CURLY BRACKET
- u'|' # 0x007c -> VERTICAL LINE
- u'}' # 0x007d -> RIGHT CURLY BRACKET
- u'~' # 0x007e -> TILDE
- u'\x7f' # 0x007f -> DELETE
- u'\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA
- u'\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
- u'\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
- u'\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
- u'\xe3' # 0x0084 -> LATIN SMALL LETTER A WITH TILDE
- u'\xe0' # 0x0085 -> LATIN SMALL LETTER A WITH GRAVE
- u'\xc1' # 0x0086 -> LATIN CAPITAL LETTER A WITH ACUTE
- u'\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA
- u'\xea' # 0x0088 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
- u'\xca' # 0x0089 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
- u'\xe8' # 0x008a -> LATIN SMALL LETTER E WITH GRAVE
- u'\xcd' # 0x008b -> LATIN CAPITAL LETTER I WITH ACUTE
- u'\xd4' # 0x008c -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
- u'\xec' # 0x008d -> LATIN SMALL LETTER I WITH GRAVE
- u'\xc3' # 0x008e -> LATIN CAPITAL LETTER A WITH TILDE
- u'\xc2' # 0x008f -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
- u'\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
- u'\xc0' # 0x0091 -> LATIN CAPITAL LETTER A WITH GRAVE
- u'\xc8' # 0x0092 -> LATIN CAPITAL LETTER E WITH GRAVE
- u'\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
- u'\xf5' # 0x0094 -> LATIN SMALL LETTER O WITH TILDE
- u'\xf2' # 0x0095 -> LATIN SMALL LETTER O WITH GRAVE
- u'\xda' # 0x0096 -> LATIN CAPITAL LETTER U WITH ACUTE
- u'\xf9' # 0x0097 -> LATIN SMALL LETTER U WITH GRAVE
- u'\xcc' # 0x0098 -> LATIN CAPITAL LETTER I WITH GRAVE
- u'\xd5' # 0x0099 -> LATIN CAPITAL LETTER O WITH TILDE
- u'\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
- u'\xa2' # 0x009b -> CENT SIGN
- u'\xa3' # 0x009c -> POUND SIGN
- u'\xd9' # 0x009d -> LATIN CAPITAL LETTER U WITH GRAVE
- u'\u20a7' # 0x009e -> PESETA SIGN
- u'\xd3' # 0x009f -> LATIN CAPITAL LETTER O WITH ACUTE
- u'\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE
- u'\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE
- u'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
- u'\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE
- u'\xf1' # 0x00a4 -> LATIN SMALL LETTER N WITH TILDE
- u'\xd1' # 0x00a5 -> LATIN CAPITAL LETTER N WITH TILDE
- u'\xaa' # 0x00a6 -> FEMININE ORDINAL INDICATOR
- u'\xba' # 0x00a7 -> MASCULINE ORDINAL INDICATOR
- u'\xbf' # 0x00a8 -> INVERTED QUESTION MARK
- u'\xd2' # 0x00a9 -> LATIN CAPITAL LETTER O WITH GRAVE
- u'\xac' # 0x00aa -> NOT SIGN
- u'\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF
- u'\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER
- u'\xa1' # 0x00ad -> INVERTED EXCLAMATION MARK
- u'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
- u'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
- u'\u2591' # 0x00b0 -> LIGHT SHADE
- u'\u2592' # 0x00b1 -> MEDIUM SHADE
- u'\u2593' # 0x00b2 -> DARK SHADE
- u'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
- u'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
- u'\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
- u'\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
- u'\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
- u'\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
- u'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
- u'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
- u'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
- u'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
- u'\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
- u'\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
- u'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
- u'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
- u'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
- u'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
- u'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
- u'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
- u'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
- u'\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
- u'\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
- u'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
- u'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
- u'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
- u'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
- u'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
- u'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
- u'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
- u'\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
- u'\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
- u'\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
- u'\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
- u'\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
- u'\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
- u'\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
- u'\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
- u'\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
- u'\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
- u'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
- u'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
- u'\u2588' # 0x00db -> FULL BLOCK
- u'\u2584' # 0x00dc -> LOWER HALF BLOCK
- u'\u258c' # 0x00dd -> LEFT HALF BLOCK
- u'\u2590' # 0x00de -> RIGHT HALF BLOCK
- u'\u2580' # 0x00df -> UPPER HALF BLOCK
- u'\u03b1' # 0x00e0 -> GREEK SMALL LETTER ALPHA
- u'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S
- u'\u0393' # 0x00e2 -> GREEK CAPITAL LETTER GAMMA
- u'\u03c0' # 0x00e3 -> GREEK SMALL LETTER PI
- u'\u03a3' # 0x00e4 -> GREEK CAPITAL LETTER SIGMA
- u'\u03c3' # 0x00e5 -> GREEK SMALL LETTER SIGMA
- u'\xb5' # 0x00e6 -> MICRO SIGN
- u'\u03c4' # 0x00e7 -> GREEK SMALL LETTER TAU
- u'\u03a6' # 0x00e8 -> GREEK CAPITAL LETTER PHI
- u'\u0398' # 0x00e9 -> GREEK CAPITAL LETTER THETA
- u'\u03a9' # 0x00ea -> GREEK CAPITAL LETTER OMEGA
- u'\u03b4' # 0x00eb -> GREEK SMALL LETTER DELTA
- u'\u221e' # 0x00ec -> INFINITY
- u'\u03c6' # 0x00ed -> GREEK SMALL LETTER PHI
- u'\u03b5' # 0x00ee -> GREEK SMALL LETTER EPSILON
- u'\u2229' # 0x00ef -> INTERSECTION
- u'\u2261' # 0x00f0 -> IDENTICAL TO
- u'\xb1' # 0x00f1 -> PLUS-MINUS SIGN
- u'\u2265' # 0x00f2 -> GREATER-THAN OR EQUAL TO
- u'\u2264' # 0x00f3 -> LESS-THAN OR EQUAL TO
- u'\u2320' # 0x00f4 -> TOP HALF INTEGRAL
- u'\u2321' # 0x00f5 -> BOTTOM HALF INTEGRAL
- u'\xf7' # 0x00f6 -> DIVISION SIGN
- u'\u2248' # 0x00f7 -> ALMOST EQUAL TO
- u'\xb0' # 0x00f8 -> DEGREE SIGN
- u'\u2219' # 0x00f9 -> BULLET OPERATOR
- u'\xb7' # 0x00fa -> MIDDLE DOT
- u'\u221a' # 0x00fb -> SQUARE ROOT
- u'\u207f' # 0x00fc -> SUPERSCRIPT LATIN SMALL LETTER N
- u'\xb2' # 0x00fd -> SUPERSCRIPT TWO
- u'\u25a0' # 0x00fe -> BLACK SQUARE
- u'\xa0' # 0x00ff -> NO-BREAK SPACE
-)
-
-### Encoding Map
-
-encoding_map = {
- 0x0000: 0x0000, # NULL
- 0x0001: 0x0001, # START OF HEADING
- 0x0002: 0x0002, # START OF TEXT
- 0x0003: 0x0003, # END OF TEXT
- 0x0004: 0x0004, # END OF TRANSMISSION
- 0x0005: 0x0005, # ENQUIRY
- 0x0006: 0x0006, # ACKNOWLEDGE
- 0x0007: 0x0007, # BELL
- 0x0008: 0x0008, # BACKSPACE
- 0x0009: 0x0009, # HORIZONTAL TABULATION
- 0x000a: 0x000a, # LINE FEED
- 0x000b: 0x000b, # VERTICAL TABULATION
- 0x000c: 0x000c, # FORM FEED
- 0x000d: 0x000d, # CARRIAGE RETURN
- 0x000e: 0x000e, # SHIFT OUT
- 0x000f: 0x000f, # SHIFT IN
- 0x0010: 0x0010, # DATA LINK ESCAPE
- 0x0011: 0x0011, # DEVICE CONTROL ONE
- 0x0012: 0x0012, # DEVICE CONTROL TWO
- 0x0013: 0x0013, # DEVICE CONTROL THREE
- 0x0014: 0x0014, # DEVICE CONTROL FOUR
- 0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
- 0x0016: 0x0016, # SYNCHRONOUS IDLE
- 0x0017: 0x0017, # END OF TRANSMISSION BLOCK
- 0x0018: 0x0018, # CANCEL
- 0x0019: 0x0019, # END OF MEDIUM
- 0x001a: 0x001a, # SUBSTITUTE
- 0x001b: 0x001b, # ESCAPE
- 0x001c: 0x001c, # FILE SEPARATOR
- 0x001d: 0x001d, # GROUP SEPARATOR
- 0x001e: 0x001e, # RECORD SEPARATOR
- 0x001f: 0x001f, # UNIT SEPARATOR
- 0x0020: 0x0020, # SPACE
- 0x0021: 0x0021, # EXCLAMATION MARK
- 0x0022: 0x0022, # QUOTATION MARK
- 0x0023: 0x0023, # NUMBER SIGN
- 0x0024: 0x0024, # DOLLAR SIGN
- 0x0025: 0x0025, # PERCENT SIGN
- 0x0026: 0x0026, # AMPERSAND
- 0x0027: 0x0027, # APOSTROPHE
- 0x0028: 0x0028, # LEFT PARENTHESIS
- 0x0029: 0x0029, # RIGHT PARENTHESIS
- 0x002a: 0x002a, # ASTERISK
- 0x002b: 0x002b, # PLUS SIGN
- 0x002c: 0x002c, # COMMA
- 0x002d: 0x002d, # HYPHEN-MINUS
- 0x002e: 0x002e, # FULL STOP
- 0x002f: 0x002f, # SOLIDUS
- 0x0030: 0x0030, # DIGIT ZERO
- 0x0031: 0x0031, # DIGIT ONE
- 0x0032: 0x0032, # DIGIT TWO
- 0x0033: 0x0033, # DIGIT THREE
- 0x0034: 0x0034, # DIGIT FOUR
- 0x0035: 0x0035, # DIGIT FIVE
- 0x0036: 0x0036, # DIGIT SIX
- 0x0037: 0x0037, # DIGIT SEVEN
- 0x0038: 0x0038, # DIGIT EIGHT
- 0x0039: 0x0039, # DIGIT NINE
- 0x003a: 0x003a, # COLON
- 0x003b: 0x003b, # SEMICOLON
- 0x003c: 0x003c, # LESS-THAN SIGN
- 0x003d: 0x003d, # EQUALS SIGN
- 0x003e: 0x003e, # GREATER-THAN SIGN
- 0x003f: 0x003f, # QUESTION MARK
- 0x0040: 0x0040, # COMMERCIAL AT
- 0x0041: 0x0041, # LATIN CAPITAL LETTER A
- 0x0042: 0x0042, # LATIN CAPITAL LETTER B
- 0x0043: 0x0043, # LATIN CAPITAL LETTER C
- 0x0044: 0x0044, # LATIN CAPITAL LETTER D
- 0x0045: 0x0045, # LATIN CAPITAL LETTER E
- 0x0046: 0x0046, # LATIN CAPITAL LETTER F
- 0x0047: 0x0047, # LATIN CAPITAL LETTER G
- 0x0048: 0x0048, # LATIN CAPITAL LETTER H
- 0x0049: 0x0049, # LATIN CAPITAL LETTER I
- 0x004a: 0x004a, # LATIN CAPITAL LETTER J
- 0x004b: 0x004b, # LATIN CAPITAL LETTER K
- 0x004c: 0x004c, # LATIN CAPITAL LETTER L
- 0x004d: 0x004d, # LATIN CAPITAL LETTER M
- 0x004e: 0x004e, # LATIN CAPITAL LETTER N
- 0x004f: 0x004f, # LATIN CAPITAL LETTER O
- 0x0050: 0x0050, # LATIN CAPITAL LETTER P
- 0x0051: 0x0051, # LATIN CAPITAL LETTER Q
- 0x0052: 0x0052, # LATIN CAPITAL LETTER R
- 0x0053: 0x0053, # LATIN CAPITAL LETTER S
- 0x0054: 0x0054, # LATIN CAPITAL LETTER T
- 0x0055: 0x0055, # LATIN CAPITAL LETTER U
- 0x0056: 0x0056, # LATIN CAPITAL LETTER V
- 0x0057: 0x0057, # LATIN CAPITAL LETTER W
- 0x0058: 0x0058, # LATIN CAPITAL LETTER X
- 0x0059: 0x0059, # LATIN CAPITAL LETTER Y
- 0x005a: 0x005a, # LATIN CAPITAL LETTER Z
- 0x005b: 0x005b, # LEFT SQUARE BRACKET
- 0x005c: 0x005c, # REVERSE SOLIDUS
- 0x005d: 0x005d, # RIGHT SQUARE BRACKET
- 0x005e: 0x005e, # CIRCUMFLEX ACCENT
- 0x005f: 0x005f, # LOW LINE
- 0x0060: 0x0060, # GRAVE ACCENT
- 0x0061: 0x0061, # LATIN SMALL LETTER A
- 0x0062: 0x0062, # LATIN SMALL LETTER B
- 0x0063: 0x0063, # LATIN SMALL LETTER C
- 0x0064: 0x0064, # LATIN SMALL LETTER D
- 0x0065: 0x0065, # LATIN SMALL LETTER E
- 0x0066: 0x0066, # LATIN SMALL LETTER F
- 0x0067: 0x0067, # LATIN SMALL LETTER G
- 0x0068: 0x0068, # LATIN SMALL LETTER H
- 0x0069: 0x0069, # LATIN SMALL LETTER I
- 0x006a: 0x006a, # LATIN SMALL LETTER J
- 0x006b: 0x006b, # LATIN SMALL LETTER K
- 0x006c: 0x006c, # LATIN SMALL LETTER L
- 0x006d: 0x006d, # LATIN SMALL LETTER M
- 0x006e: 0x006e, # LATIN SMALL LETTER N
- 0x006f: 0x006f, # LATIN SMALL LETTER O
- 0x0070: 0x0070, # LATIN SMALL LETTER P
- 0x0071: 0x0071, # LATIN SMALL LETTER Q
- 0x0072: 0x0072, # LATIN SMALL LETTER R
- 0x0073: 0x0073, # LATIN SMALL LETTER S
- 0x0074: 0x0074, # LATIN SMALL LETTER T
- 0x0075: 0x0075, # LATIN SMALL LETTER U
- 0x0076: 0x0076, # LATIN SMALL LETTER V
- 0x0077: 0x0077, # LATIN SMALL LETTER W
- 0x0078: 0x0078, # LATIN SMALL LETTER X
- 0x0079: 0x0079, # LATIN SMALL LETTER Y
- 0x007a: 0x007a, # LATIN SMALL LETTER Z
- 0x007b: 0x007b, # LEFT CURLY BRACKET
- 0x007c: 0x007c, # VERTICAL LINE
- 0x007d: 0x007d, # RIGHT CURLY BRACKET
- 0x007e: 0x007e, # TILDE
- 0x007f: 0x007f, # DELETE
- 0x00a0: 0x00ff, # NO-BREAK SPACE
- 0x00a1: 0x00ad, # INVERTED EXCLAMATION MARK
- 0x00a2: 0x009b, # CENT SIGN
- 0x00a3: 0x009c, # POUND SIGN
- 0x00aa: 0x00a6, # FEMININE ORDINAL INDICATOR
- 0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
- 0x00ac: 0x00aa, # NOT SIGN
- 0x00b0: 0x00f8, # DEGREE SIGN
- 0x00b1: 0x00f1, # PLUS-MINUS SIGN
- 0x00b2: 0x00fd, # SUPERSCRIPT TWO
- 0x00b5: 0x00e6, # MICRO SIGN
- 0x00b7: 0x00fa, # MIDDLE DOT
- 0x00ba: 0x00a7, # MASCULINE ORDINAL INDICATOR
- 0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
- 0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER
- 0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF
- 0x00bf: 0x00a8, # INVERTED QUESTION MARK
- 0x00c0: 0x0091, # LATIN CAPITAL LETTER A WITH GRAVE
- 0x00c1: 0x0086, # LATIN CAPITAL LETTER A WITH ACUTE
- 0x00c2: 0x008f, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
- 0x00c3: 0x008e, # LATIN CAPITAL LETTER A WITH TILDE
- 0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA
- 0x00c8: 0x0092, # LATIN CAPITAL LETTER E WITH GRAVE
- 0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
- 0x00ca: 0x0089, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
- 0x00cc: 0x0098, # LATIN CAPITAL LETTER I WITH GRAVE
- 0x00cd: 0x008b, # LATIN CAPITAL LETTER I WITH ACUTE
- 0x00d1: 0x00a5, # LATIN CAPITAL LETTER N WITH TILDE
- 0x00d2: 0x00a9, # LATIN CAPITAL LETTER O WITH GRAVE
- 0x00d3: 0x009f, # LATIN CAPITAL LETTER O WITH ACUTE
- 0x00d4: 0x008c, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
- 0x00d5: 0x0099, # LATIN CAPITAL LETTER O WITH TILDE
- 0x00d9: 0x009d, # LATIN CAPITAL LETTER U WITH GRAVE
- 0x00da: 0x0096, # LATIN CAPITAL LETTER U WITH ACUTE
- 0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
- 0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S
- 0x00e0: 0x0085, # LATIN SMALL LETTER A WITH GRAVE
- 0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE
- 0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX
- 0x00e3: 0x0084, # LATIN SMALL LETTER A WITH TILDE
- 0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA
- 0x00e8: 0x008a, # LATIN SMALL LETTER E WITH GRAVE
- 0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
- 0x00ea: 0x0088, # LATIN SMALL LETTER E WITH CIRCUMFLEX
- 0x00ec: 0x008d, # LATIN SMALL LETTER I WITH GRAVE
- 0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE
- 0x00f1: 0x00a4, # LATIN SMALL LETTER N WITH TILDE
- 0x00f2: 0x0095, # LATIN SMALL LETTER O WITH GRAVE
- 0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
- 0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX
- 0x00f5: 0x0094, # LATIN SMALL LETTER O WITH TILDE
- 0x00f7: 0x00f6, # DIVISION SIGN
- 0x00f9: 0x0097, # LATIN SMALL LETTER U WITH GRAVE
- 0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE
- 0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
- 0x0393: 0x00e2, # GREEK CAPITAL LETTER GAMMA
- 0x0398: 0x00e9, # GREEK CAPITAL LETTER THETA
- 0x03a3: 0x00e4, # GREEK CAPITAL LETTER SIGMA
- 0x03a6: 0x00e8, # GREEK CAPITAL LETTER PHI
- 0x03a9: 0x00ea, # GREEK CAPITAL LETTER OMEGA
- 0x03b1: 0x00e0, # GREEK SMALL LETTER ALPHA
- 0x03b4: 0x00eb, # GREEK SMALL LETTER DELTA
- 0x03b5: 0x00ee, # GREEK SMALL LETTER EPSILON
- 0x03c0: 0x00e3, # GREEK SMALL LETTER PI
- 0x03c3: 0x00e5, # GREEK SMALL LETTER SIGMA
- 0x03c4: 0x00e7, # GREEK SMALL LETTER TAU
- 0x03c6: 0x00ed, # GREEK SMALL LETTER PHI
- 0x207f: 0x00fc, # SUPERSCRIPT LATIN SMALL LETTER N
- 0x20a7: 0x009e, # PESETA SIGN
- 0x2219: 0x00f9, # BULLET OPERATOR
- 0x221a: 0x00fb, # SQUARE ROOT
- 0x221e: 0x00ec, # INFINITY
- 0x2229: 0x00ef, # INTERSECTION
- 0x2248: 0x00f7, # ALMOST EQUAL TO
- 0x2261: 0x00f0, # IDENTICAL TO
- 0x2264: 0x00f3, # LESS-THAN OR EQUAL TO
- 0x2265: 0x00f2, # GREATER-THAN OR EQUAL TO
- 0x2320: 0x00f4, # TOP HALF INTEGRAL
- 0x2321: 0x00f5, # BOTTOM HALF INTEGRAL
- 0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
- 0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
- 0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
- 0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
- 0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
- 0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
- 0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
- 0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
- 0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
- 0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
- 0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
- 0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
- 0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
- 0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
- 0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
- 0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
- 0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
- 0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
- 0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
- 0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
- 0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
- 0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
- 0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
- 0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
- 0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
- 0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
- 0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
- 0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
- 0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
- 0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
- 0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
- 0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
- 0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
- 0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
- 0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
- 0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
- 0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
- 0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
- 0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
- 0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
- 0x2580: 0x00df, # UPPER HALF BLOCK
- 0x2584: 0x00dc, # LOWER HALF BLOCK
- 0x2588: 0x00db, # FULL BLOCK
- 0x258c: 0x00dd, # LEFT HALF BLOCK
- 0x2590: 0x00de, # RIGHT HALF BLOCK
- 0x2591: 0x00b0, # LIGHT SHADE
- 0x2592: 0x00b1, # MEDIUM SHADE
- 0x2593: 0x00b2, # DARK SHADE
- 0x25a0: 0x00fe, # BLACK SQUARE
-}
diff --git a/sys/lib/python/encodings/cp861.py b/sys/lib/python/encodings/cp861.py
deleted file mode 100644
index 0939b5b1e..000000000
--- a/sys/lib/python/encodings/cp861.py
+++ /dev/null
@@ -1,698 +0,0 @@
-""" Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP861.TXT' with gencodec.py.
-
-"""#"
-
-import codecs
-
-### Codec APIs
-
-class Codec(codecs.Codec):
-
- def encode(self,input,errors='strict'):
- return codecs.charmap_encode(input,errors,encoding_map)
-
- def decode(self,input,errors='strict'):
- return codecs.charmap_decode(input,errors,decoding_table)
-
-class IncrementalEncoder(codecs.IncrementalEncoder):
- def encode(self, input, final=False):
- return codecs.charmap_encode(input,self.errors,encoding_map)[0]
-
-class IncrementalDecoder(codecs.IncrementalDecoder):
- def decode(self, input, final=False):
- return codecs.charmap_decode(input,self.errors,decoding_table)[0]
-
-class StreamWriter(Codec,codecs.StreamWriter):
- pass
-
-class StreamReader(Codec,codecs.StreamReader):
- pass
-
-### encodings module API
-
-def getregentry():
- return codecs.CodecInfo(
- name='cp861',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
-
-### Decoding Map
-
-decoding_map = codecs.make_identity_dict(range(256))
-decoding_map.update({
- 0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
- 0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
- 0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
- 0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
- 0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
- 0x0085: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
- 0x0086: 0x00e5, # LATIN SMALL LETTER A WITH RING ABOVE
- 0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
- 0x0088: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
- 0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
- 0x008a: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
- 0x008b: 0x00d0, # LATIN CAPITAL LETTER ETH
- 0x008c: 0x00f0, # LATIN SMALL LETTER ETH
- 0x008d: 0x00de, # LATIN CAPITAL LETTER THORN
- 0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
- 0x008f: 0x00c5, # LATIN CAPITAL LETTER A WITH RING ABOVE
- 0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
- 0x0091: 0x00e6, # LATIN SMALL LIGATURE AE
- 0x0092: 0x00c6, # LATIN CAPITAL LIGATURE AE
- 0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
- 0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
- 0x0095: 0x00fe, # LATIN SMALL LETTER THORN
- 0x0096: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX
- 0x0097: 0x00dd, # LATIN CAPITAL LETTER Y WITH ACUTE
- 0x0098: 0x00fd, # LATIN SMALL LETTER Y WITH ACUTE
- 0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
- 0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
- 0x009b: 0x00f8, # LATIN SMALL LETTER O WITH STROKE
- 0x009c: 0x00a3, # POUND SIGN
- 0x009d: 0x00d8, # LATIN CAPITAL LETTER O WITH STROKE
- 0x009e: 0x20a7, # PESETA SIGN
- 0x009f: 0x0192, # LATIN SMALL LETTER F WITH HOOK
- 0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
- 0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
- 0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
- 0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
- 0x00a4: 0x00c1, # LATIN CAPITAL LETTER A WITH ACUTE
- 0x00a5: 0x00cd, # LATIN CAPITAL LETTER I WITH ACUTE
- 0x00a6: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE
- 0x00a7: 0x00da, # LATIN CAPITAL LETTER U WITH ACUTE
- 0x00a8: 0x00bf, # INVERTED QUESTION MARK
- 0x00a9: 0x2310, # REVERSED NOT SIGN
- 0x00aa: 0x00ac, # NOT SIGN
- 0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
- 0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
- 0x00ad: 0x00a1, # INVERTED EXCLAMATION MARK
- 0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
- 0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
- 0x00b0: 0x2591, # LIGHT SHADE
- 0x00b1: 0x2592, # MEDIUM SHADE
- 0x00b2: 0x2593, # DARK SHADE
- 0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
- 0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
- 0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
- 0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
- 0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
- 0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
- 0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
- 0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
- 0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
- 0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
- 0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
- 0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
- 0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
- 0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
- 0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
- 0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
- 0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
- 0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
- 0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
- 0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
- 0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
- 0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
- 0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
- 0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
- 0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
- 0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
- 0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
- 0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
- 0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
- 0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
- 0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
- 0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
- 0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
- 0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
- 0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
- 0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
- 0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
- 0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
- 0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
- 0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
- 0x00db: 0x2588, # FULL BLOCK
- 0x00dc: 0x2584, # LOWER HALF BLOCK
- 0x00dd: 0x258c, # LEFT HALF BLOCK
- 0x00de: 0x2590, # RIGHT HALF BLOCK
- 0x00df: 0x2580, # UPPER HALF BLOCK
- 0x00e0: 0x03b1, # GREEK SMALL LETTER ALPHA
- 0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
- 0x00e2: 0x0393, # GREEK CAPITAL LETTER GAMMA
- 0x00e3: 0x03c0, # GREEK SMALL LETTER PI
- 0x00e4: 0x03a3, # GREEK CAPITAL LETTER SIGMA
- 0x00e5: 0x03c3, # GREEK SMALL LETTER SIGMA
- 0x00e6: 0x00b5, # MICRO SIGN
- 0x00e7: 0x03c4, # GREEK SMALL LETTER TAU
- 0x00e8: 0x03a6, # GREEK CAPITAL LETTER PHI
- 0x00e9: 0x0398, # GREEK CAPITAL LETTER THETA
- 0x00ea: 0x03a9, # GREEK CAPITAL LETTER OMEGA
- 0x00eb: 0x03b4, # GREEK SMALL LETTER DELTA
- 0x00ec: 0x221e, # INFINITY
- 0x00ed: 0x03c6, # GREEK SMALL LETTER PHI
- 0x00ee: 0x03b5, # GREEK SMALL LETTER EPSILON
- 0x00ef: 0x2229, # INTERSECTION
- 0x00f0: 0x2261, # IDENTICAL TO
- 0x00f1: 0x00b1, # PLUS-MINUS SIGN
- 0x00f2: 0x2265, # GREATER-THAN OR EQUAL TO
- 0x00f3: 0x2264, # LESS-THAN OR EQUAL TO
- 0x00f4: 0x2320, # TOP HALF INTEGRAL
- 0x00f5: 0x2321, # BOTTOM HALF INTEGRAL
- 0x00f6: 0x00f7, # DIVISION SIGN
- 0x00f7: 0x2248, # ALMOST EQUAL TO
- 0x00f8: 0x00b0, # DEGREE SIGN
- 0x00f9: 0x2219, # BULLET OPERATOR
- 0x00fa: 0x00b7, # MIDDLE DOT
- 0x00fb: 0x221a, # SQUARE ROOT
- 0x00fc: 0x207f, # SUPERSCRIPT LATIN SMALL LETTER N
- 0x00fd: 0x00b2, # SUPERSCRIPT TWO
- 0x00fe: 0x25a0, # BLACK SQUARE
- 0x00ff: 0x00a0, # NO-BREAK SPACE
-})
-
-### Decoding Table
-
-decoding_table = (
- u'\x00' # 0x0000 -> NULL
- u'\x01' # 0x0001 -> START OF HEADING
- u'\x02' # 0x0002 -> START OF TEXT
- u'\x03' # 0x0003 -> END OF TEXT
- u'\x04' # 0x0004 -> END OF TRANSMISSION
- u'\x05' # 0x0005 -> ENQUIRY
- u'\x06' # 0x0006 -> ACKNOWLEDGE
- u'\x07' # 0x0007 -> BELL
- u'\x08' # 0x0008 -> BACKSPACE
- u'\t' # 0x0009 -> HORIZONTAL TABULATION
- u'\n' # 0x000a -> LINE FEED
- u'\x0b' # 0x000b -> VERTICAL TABULATION
- u'\x0c' # 0x000c -> FORM FEED
- u'\r' # 0x000d -> CARRIAGE RETURN
- u'\x0e' # 0x000e -> SHIFT OUT
- u'\x0f' # 0x000f -> SHIFT IN
- u'\x10' # 0x0010 -> DATA LINK ESCAPE
- u'\x11' # 0x0011 -> DEVICE CONTROL ONE
- u'\x12' # 0x0012 -> DEVICE CONTROL TWO
- u'\x13' # 0x0013 -> DEVICE CONTROL THREE
- u'\x14' # 0x0014 -> DEVICE CONTROL FOUR
- u'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
- u'\x16' # 0x0016 -> SYNCHRONOUS IDLE
- u'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
- u'\x18' # 0x0018 -> CANCEL
- u'\x19' # 0x0019 -> END OF MEDIUM
- u'\x1a' # 0x001a -> SUBSTITUTE
- u'\x1b' # 0x001b -> ESCAPE
- u'\x1c' # 0x001c -> FILE SEPARATOR
- u'\x1d' # 0x001d -> GROUP SEPARATOR
- u'\x1e' # 0x001e -> RECORD SEPARATOR
- u'\x1f' # 0x001f -> UNIT SEPARATOR
- u' ' # 0x0020 -> SPACE
- u'!' # 0x0021 -> EXCLAMATION MARK
- u'"' # 0x0022 -> QUOTATION MARK
- u'#' # 0x0023 -> NUMBER SIGN
- u'$' # 0x0024 -> DOLLAR SIGN
- u'%' # 0x0025 -> PERCENT SIGN
- u'&' # 0x0026 -> AMPERSAND
- u"'" # 0x0027 -> APOSTROPHE
- u'(' # 0x0028 -> LEFT PARENTHESIS
- u')' # 0x0029 -> RIGHT PARENTHESIS
- u'*' # 0x002a -> ASTERISK
- u'+' # 0x002b -> PLUS SIGN
- u',' # 0x002c -> COMMA
- u'-' # 0x002d -> HYPHEN-MINUS
- u'.' # 0x002e -> FULL STOP
- u'/' # 0x002f -> SOLIDUS
- u'0' # 0x0030 -> DIGIT ZERO
- u'1' # 0x0031 -> DIGIT ONE
- u'2' # 0x0032 -> DIGIT TWO
- u'3' # 0x0033 -> DIGIT THREE
- u'4' # 0x0034 -> DIGIT FOUR
- u'5' # 0x0035 -> DIGIT FIVE
- u'6' # 0x0036 -> DIGIT SIX
- u'7' # 0x0037 -> DIGIT SEVEN
- u'8' # 0x0038 -> DIGIT EIGHT
- u'9' # 0x0039 -> DIGIT NINE
- u':' # 0x003a -> COLON
- u';' # 0x003b -> SEMICOLON
- u'<' # 0x003c -> LESS-THAN SIGN
- u'=' # 0x003d -> EQUALS SIGN
- u'>' # 0x003e -> GREATER-THAN SIGN
- u'?' # 0x003f -> QUESTION MARK
- u'@' # 0x0040 -> COMMERCIAL AT
- u'A' # 0x0041 -> LATIN CAPITAL LETTER A
- u'B' # 0x0042 -> LATIN CAPITAL LETTER B
- u'C' # 0x0043 -> LATIN CAPITAL LETTER C
- u'D' # 0x0044 -> LATIN CAPITAL LETTER D
- u'E' # 0x0045 -> LATIN CAPITAL LETTER E
- u'F' # 0x0046 -> LATIN CAPITAL LETTER F
- u'G' # 0x0047 -> LATIN CAPITAL LETTER G
- u'H' # 0x0048 -> LATIN CAPITAL LETTER H
- u'I' # 0x0049 -> LATIN CAPITAL LETTER I
- u'J' # 0x004a -> LATIN CAPITAL LETTER J
- u'K' # 0x004b -> LATIN CAPITAL LETTER K
- u'L' # 0x004c -> LATIN CAPITAL LETTER L
- u'M' # 0x004d -> LATIN CAPITAL LETTER M
- u'N' # 0x004e -> LATIN CAPITAL LETTER N
- u'O' # 0x004f -> LATIN CAPITAL LETTER O
- u'P' # 0x0050 -> LATIN CAPITAL LETTER P
- u'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
- u'R' # 0x0052 -> LATIN CAPITAL LETTER R
- u'S' # 0x0053 -> LATIN CAPITAL LETTER S
- u'T' # 0x0054 -> LATIN CAPITAL LETTER T
- u'U' # 0x0055 -> LATIN CAPITAL LETTER U
- u'V' # 0x0056 -> LATIN CAPITAL LETTER V
- u'W' # 0x0057 -> LATIN CAPITAL LETTER W
- u'X' # 0x0058 -> LATIN CAPITAL LETTER X
- u'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
- u'Z' # 0x005a -> LATIN CAPITAL LETTER Z
- u'[' # 0x005b -> LEFT SQUARE BRACKET
- u'\\' # 0x005c -> REVERSE SOLIDUS
- u']' # 0x005d -> RIGHT SQUARE BRACKET
- u'^' # 0x005e -> CIRCUMFLEX ACCENT
- u'_' # 0x005f -> LOW LINE
- u'`' # 0x0060 -> GRAVE ACCENT
- u'a' # 0x0061 -> LATIN SMALL LETTER A
- u'b' # 0x0062 -> LATIN SMALL LETTER B
- u'c' # 0x0063 -> LATIN SMALL LETTER C
- u'd' # 0x0064 -> LATIN SMALL LETTER D
- u'e' # 0x0065 -> LATIN SMALL LETTER E
- u'f' # 0x0066 -> LATIN SMALL LETTER F
- u'g' # 0x0067 -> LATIN SMALL LETTER G
- u'h' # 0x0068 -> LATIN SMALL LETTER H
- u'i' # 0x0069 -> LATIN SMALL LETTER I
- u'j' # 0x006a -> LATIN SMALL LETTER J
- u'k' # 0x006b -> LATIN SMALL LETTER K
- u'l' # 0x006c -> LATIN SMALL LETTER L
- u'm' # 0x006d -> LATIN SMALL LETTER M
- u'n' # 0x006e -> LATIN SMALL LETTER N
- u'o' # 0x006f -> LATIN SMALL LETTER O
- u'p' # 0x0070 -> LATIN SMALL LETTER P
- u'q' # 0x0071 -> LATIN SMALL LETTER Q
- u'r' # 0x0072 -> LATIN SMALL LETTER R
- u's' # 0x0073 -> LATIN SMALL LETTER S
- u't' # 0x0074 -> LATIN SMALL LETTER T
- u'u' # 0x0075 -> LATIN SMALL LETTER U
- u'v' # 0x0076 -> LATIN SMALL LETTER V
- u'w' # 0x0077 -> LATIN SMALL LETTER W
- u'x' # 0x0078 -> LATIN SMALL LETTER X
- u'y' # 0x0079 -> LATIN SMALL LETTER Y
- u'z' # 0x007a -> LATIN SMALL LETTER Z
- u'{' # 0x007b -> LEFT CURLY BRACKET
- u'|' # 0x007c -> VERTICAL LINE
- u'}' # 0x007d -> RIGHT CURLY BRACKET
- u'~' # 0x007e -> TILDE
- u'\x7f' # 0x007f -> DELETE
- u'\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA
- u'\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
- u'\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
- u'\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
- u'\xe4' # 0x0084 -> LATIN SMALL LETTER A WITH DIAERESIS
- u'\xe0' # 0x0085 -> LATIN SMALL LETTER A WITH GRAVE
- u'\xe5' # 0x0086 -> LATIN SMALL LETTER A WITH RING ABOVE
- u'\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA
- u'\xea' # 0x0088 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
- u'\xeb' # 0x0089 -> LATIN SMALL LETTER E WITH DIAERESIS
- u'\xe8' # 0x008a -> LATIN SMALL LETTER E WITH GRAVE
- u'\xd0' # 0x008b -> LATIN CAPITAL LETTER ETH
- u'\xf0' # 0x008c -> LATIN SMALL LETTER ETH
- u'\xde' # 0x008d -> LATIN CAPITAL LETTER THORN
- u'\xc4' # 0x008e -> LATIN CAPITAL LETTER A WITH DIAERESIS
- u'\xc5' # 0x008f -> LATIN CAPITAL LETTER A WITH RING ABOVE
- u'\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
- u'\xe6' # 0x0091 -> LATIN SMALL LIGATURE AE
- u'\xc6' # 0x0092 -> LATIN CAPITAL LIGATURE AE
- u'\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
- u'\xf6' # 0x0094 -> LATIN SMALL LETTER O WITH DIAERESIS
- u'\xfe' # 0x0095 -> LATIN SMALL LETTER THORN
- u'\xfb' # 0x0096 -> LATIN SMALL LETTER U WITH CIRCUMFLEX
- u'\xdd' # 0x0097 -> LATIN CAPITAL LETTER Y WITH ACUTE
- u'\xfd' # 0x0098 -> LATIN SMALL LETTER Y WITH ACUTE
- u'\xd6' # 0x0099 -> LATIN CAPITAL LETTER O WITH DIAERESIS
- u'\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
- u'\xf8' # 0x009b -> LATIN SMALL LETTER O WITH STROKE
- u'\xa3' # 0x009c -> POUND SIGN
- u'\xd8' # 0x009d -> LATIN CAPITAL LETTER O WITH STROKE
- u'\u20a7' # 0x009e -> PESETA SIGN
- u'\u0192' # 0x009f -> LATIN SMALL LETTER F WITH HOOK
- u'\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE
- u'\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE
- u'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
- u'\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE
- u'\xc1' # 0x00a4 -> LATIN CAPITAL LETTER A WITH ACUTE
- u'\xcd' # 0x00a5 -> LATIN CAPITAL LETTER I WITH ACUTE
- u'\xd3' # 0x00a6 -> LATIN CAPITAL LETTER O WITH ACUTE
- u'\xda' # 0x00a7 -> LATIN CAPITAL LETTER U WITH ACUTE
- u'\xbf' # 0x00a8 -> INVERTED QUESTION MARK
- u'\u2310' # 0x00a9 -> REVERSED NOT SIGN
- u'\xac' # 0x00aa -> NOT SIGN
- u'\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF
- u'\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER
- u'\xa1' # 0x00ad -> INVERTED EXCLAMATION MARK
- u'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
- u'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
- u'\u2591' # 0x00b0 -> LIGHT SHADE
- u'\u2592' # 0x00b1 -> MEDIUM SHADE
- u'\u2593' # 0x00b2 -> DARK SHADE
- u'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
- u'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
- u'\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
- u'\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
- u'\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
- u'\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
- u'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
- u'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
- u'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
- u'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
- u'\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
- u'\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
- u'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
- u'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
- u'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
- u'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
- u'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
- u'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
- u'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
- u'\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
- u'\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
- u'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
- u'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
- u'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
- u'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
- u'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
- u'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
- u'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
- u'\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
- u'\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
- u'\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
- u'\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
- u'\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
- u'\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
- u'\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
- u'\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
- u'\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
- u'\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
- u'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
- u'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
- u'\u2588' # 0x00db -> FULL BLOCK
- u'\u2584' # 0x00dc -> LOWER HALF BLOCK
- u'\u258c' # 0x00dd -> LEFT HALF BLOCK
- u'\u2590' # 0x00de -> RIGHT HALF BLOCK
- u'\u2580' # 0x00df -> UPPER HALF BLOCK
- u'\u03b1' # 0x00e0 -> GREEK SMALL LETTER ALPHA
- u'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S
- u'\u0393' # 0x00e2 -> GREEK CAPITAL LETTER GAMMA
- u'\u03c0' # 0x00e3 -> GREEK SMALL LETTER PI
- u'\u03a3' # 0x00e4 -> GREEK CAPITAL LETTER SIGMA
- u'\u03c3' # 0x00e5 -> GREEK SMALL LETTER SIGMA
- u'\xb5' # 0x00e6 -> MICRO SIGN
- u'\u03c4' # 0x00e7 -> GREEK SMALL LETTER TAU
- u'\u03a6' # 0x00e8 -> GREEK CAPITAL LETTER PHI
- u'\u0398' # 0x00e9 -> GREEK CAPITAL LETTER THETA
- u'\u03a9' # 0x00ea -> GREEK CAPITAL LETTER OMEGA
- u'\u03b4' # 0x00eb -> GREEK SMALL LETTER DELTA
- u'\u221e' # 0x00ec -> INFINITY
- u'\u03c6' # 0x00ed -> GREEK SMALL LETTER PHI
- u'\u03b5' # 0x00ee -> GREEK SMALL LETTER EPSILON
- u'\u2229' # 0x00ef -> INTERSECTION
- u'\u2261' # 0x00f0 -> IDENTICAL TO
- u'\xb1' # 0x00f1 -> PLUS-MINUS SIGN
- u'\u2265' # 0x00f2 -> GREATER-THAN OR EQUAL TO
- u'\u2264' # 0x00f3 -> LESS-THAN OR EQUAL TO
- u'\u2320' # 0x00f4 -> TOP HALF INTEGRAL
- u'\u2321' # 0x00f5 -> BOTTOM HALF INTEGRAL
- u'\xf7' # 0x00f6 -> DIVISION SIGN
- u'\u2248' # 0x00f7 -> ALMOST EQUAL TO
- u'\xb0' # 0x00f8 -> DEGREE SIGN
- u'\u2219' # 0x00f9 -> BULLET OPERATOR
- u'\xb7' # 0x00fa -> MIDDLE DOT
- u'\u221a' # 0x00fb -> SQUARE ROOT
- u'\u207f' # 0x00fc -> SUPERSCRIPT LATIN SMALL LETTER N
- u'\xb2' # 0x00fd -> SUPERSCRIPT TWO
- u'\u25a0' # 0x00fe -> BLACK SQUARE
- u'\xa0' # 0x00ff -> NO-BREAK SPACE
-)
-
-### Encoding Map
-
-encoding_map = {
- 0x0000: 0x0000, # NULL
- 0x0001: 0x0001, # START OF HEADING
- 0x0002: 0x0002, # START OF TEXT
- 0x0003: 0x0003, # END OF TEXT
- 0x0004: 0x0004, # END OF TRANSMISSION
- 0x0005: 0x0005, # ENQUIRY
- 0x0006: 0x0006, # ACKNOWLEDGE
- 0x0007: 0x0007, # BELL
- 0x0008: 0x0008, # BACKSPACE
- 0x0009: 0x0009, # HORIZONTAL TABULATION
- 0x000a: 0x000a, # LINE FEED
- 0x000b: 0x000b, # VERTICAL TABULATION
- 0x000c: 0x000c, # FORM FEED
- 0x000d: 0x000d, # CARRIAGE RETURN
- 0x000e: 0x000e, # SHIFT OUT
- 0x000f: 0x000f, # SHIFT IN
- 0x0010: 0x0010, # DATA LINK ESCAPE
- 0x0011: 0x0011, # DEVICE CONTROL ONE
- 0x0012: 0x0012, # DEVICE CONTROL TWO
- 0x0013: 0x0013, # DEVICE CONTROL THREE
- 0x0014: 0x0014, # DEVICE CONTROL FOUR
- 0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
- 0x0016: 0x0016, # SYNCHRONOUS IDLE
- 0x0017: 0x0017, # END OF TRANSMISSION BLOCK
- 0x0018: 0x0018, # CANCEL
- 0x0019: 0x0019, # END OF MEDIUM
- 0x001a: 0x001a, # SUBSTITUTE
- 0x001b: 0x001b, # ESCAPE
- 0x001c: 0x001c, # FILE SEPARATOR
- 0x001d: 0x001d, # GROUP SEPARATOR
- 0x001e: 0x001e, # RECORD SEPARATOR
- 0x001f: 0x001f, # UNIT SEPARATOR
- 0x0020: 0x0020, # SPACE
- 0x0021: 0x0021, # EXCLAMATION MARK
- 0x0022: 0x0022, # QUOTATION MARK
- 0x0023: 0x0023, # NUMBER SIGN
- 0x0024: 0x0024, # DOLLAR SIGN
- 0x0025: 0x0025, # PERCENT SIGN
- 0x0026: 0x0026, # AMPERSAND
- 0x0027: 0x0027, # APOSTROPHE
- 0x0028: 0x0028, # LEFT PARENTHESIS
- 0x0029: 0x0029, # RIGHT PARENTHESIS
- 0x002a: 0x002a, # ASTERISK
- 0x002b: 0x002b, # PLUS SIGN
- 0x002c: 0x002c, # COMMA
- 0x002d: 0x002d, # HYPHEN-MINUS
- 0x002e: 0x002e, # FULL STOP
- 0x002f: 0x002f, # SOLIDUS
- 0x0030: 0x0030, # DIGIT ZERO
- 0x0031: 0x0031, # DIGIT ONE
- 0x0032: 0x0032, # DIGIT TWO
- 0x0033: 0x0033, # DIGIT THREE
- 0x0034: 0x0034, # DIGIT FOUR
- 0x0035: 0x0035, # DIGIT FIVE
- 0x0036: 0x0036, # DIGIT SIX
- 0x0037: 0x0037, # DIGIT SEVEN
- 0x0038: 0x0038, # DIGIT EIGHT
- 0x0039: 0x0039, # DIGIT NINE
- 0x003a: 0x003a, # COLON
- 0x003b: 0x003b, # SEMICOLON
- 0x003c: 0x003c, # LESS-THAN SIGN
- 0x003d: 0x003d, # EQUALS SIGN
- 0x003e: 0x003e, # GREATER-THAN SIGN
- 0x003f: 0x003f, # QUESTION MARK
- 0x0040: 0x0040, # COMMERCIAL AT
- 0x0041: 0x0041, # LATIN CAPITAL LETTER A
- 0x0042: 0x0042, # LATIN CAPITAL LETTER B
- 0x0043: 0x0043, # LATIN CAPITAL LETTER C
- 0x0044: 0x0044, # LATIN CAPITAL LETTER D
- 0x0045: 0x0045, # LATIN CAPITAL LETTER E
- 0x0046: 0x0046, # LATIN CAPITAL LETTER F
- 0x0047: 0x0047, # LATIN CAPITAL LETTER G
- 0x0048: 0x0048, # LATIN CAPITAL LETTER H
- 0x0049: 0x0049, # LATIN CAPITAL LETTER I
- 0x004a: 0x004a, # LATIN CAPITAL LETTER J
- 0x004b: 0x004b, # LATIN CAPITAL LETTER K
- 0x004c: 0x004c, # LATIN CAPITAL LETTER L
- 0x004d: 0x004d, # LATIN CAPITAL LETTER M
- 0x004e: 0x004e, # LATIN CAPITAL LETTER N
- 0x004f: 0x004f, # LATIN CAPITAL LETTER O
- 0x0050: 0x0050, # LATIN CAPITAL LETTER P
- 0x0051: 0x0051, # LATIN CAPITAL LETTER Q
- 0x0052: 0x0052, # LATIN CAPITAL LETTER R
- 0x0053: 0x0053, # LATIN CAPITAL LETTER S
- 0x0054: 0x0054, # LATIN CAPITAL LETTER T
- 0x0055: 0x0055, # LATIN CAPITAL LETTER U
- 0x0056: 0x0056, # LATIN CAPITAL LETTER V
- 0x0057: 0x0057, # LATIN CAPITAL LETTER W
- 0x0058: 0x0058, # LATIN CAPITAL LETTER X
- 0x0059: 0x0059, # LATIN CAPITAL LETTER Y
- 0x005a: 0x005a, # LATIN CAPITAL LETTER Z
- 0x005b: 0x005b, # LEFT SQUARE BRACKET
- 0x005c: 0x005c, # REVERSE SOLIDUS
- 0x005d: 0x005d, # RIGHT SQUARE BRACKET
- 0x005e: 0x005e, # CIRCUMFLEX ACCENT
- 0x005f: 0x005f, # LOW LINE
- 0x0060: 0x0060, # GRAVE ACCENT
- 0x0061: 0x0061, # LATIN SMALL LETTER A
- 0x0062: 0x0062, # LATIN SMALL LETTER B
- 0x0063: 0x0063, # LATIN SMALL LETTER C
- 0x0064: 0x0064, # LATIN SMALL LETTER D
- 0x0065: 0x0065, # LATIN SMALL LETTER E
- 0x0066: 0x0066, # LATIN SMALL LETTER F
- 0x0067: 0x0067, # LATIN SMALL LETTER G
- 0x0068: 0x0068, # LATIN SMALL LETTER H
- 0x0069: 0x0069, # LATIN SMALL LETTER I
- 0x006a: 0x006a, # LATIN SMALL LETTER J
- 0x006b: 0x006b, # LATIN SMALL LETTER K
- 0x006c: 0x006c, # LATIN SMALL LETTER L
- 0x006d: 0x006d, # LATIN SMALL LETTER M
- 0x006e: 0x006e, # LATIN SMALL LETTER N
- 0x006f: 0x006f, # LATIN SMALL LETTER O
- 0x0070: 0x0070, # LATIN SMALL LETTER P
- 0x0071: 0x0071, # LATIN SMALL LETTER Q
- 0x0072: 0x0072, # LATIN SMALL LETTER R
- 0x0073: 0x0073, # LATIN SMALL LETTER S
- 0x0074: 0x0074, # LATIN SMALL LETTER T
- 0x0075: 0x0075, # LATIN SMALL LETTER U
- 0x0076: 0x0076, # LATIN SMALL LETTER V
- 0x0077: 0x0077, # LATIN SMALL LETTER W
- 0x0078: 0x0078, # LATIN SMALL LETTER X
- 0x0079: 0x0079, # LATIN SMALL LETTER Y
- 0x007a: 0x007a, # LATIN SMALL LETTER Z
- 0x007b: 0x007b, # LEFT CURLY BRACKET
- 0x007c: 0x007c, # VERTICAL LINE
- 0x007d: 0x007d, # RIGHT CURLY BRACKET
- 0x007e: 0x007e, # TILDE
- 0x007f: 0x007f, # DELETE
- 0x00a0: 0x00ff, # NO-BREAK SPACE
- 0x00a1: 0x00ad, # INVERTED EXCLAMATION MARK
- 0x00a3: 0x009c, # POUND SIGN
- 0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
- 0x00ac: 0x00aa, # NOT SIGN
- 0x00b0: 0x00f8, # DEGREE SIGN
- 0x00b1: 0x00f1, # PLUS-MINUS SIGN
- 0x00b2: 0x00fd, # SUPERSCRIPT TWO
- 0x00b5: 0x00e6, # MICRO SIGN
- 0x00b7: 0x00fa, # MIDDLE DOT
- 0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
- 0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER
- 0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF
- 0x00bf: 0x00a8, # INVERTED QUESTION MARK
- 0x00c1: 0x00a4, # LATIN CAPITAL LETTER A WITH ACUTE
- 0x00c4: 0x008e, # LATIN CAPITAL LETTER A WITH DIAERESIS
- 0x00c5: 0x008f, # LATIN CAPITAL LETTER A WITH RING ABOVE
- 0x00c6: 0x0092, # LATIN CAPITAL LIGATURE AE
- 0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA
- 0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
- 0x00cd: 0x00a5, # LATIN CAPITAL LETTER I WITH ACUTE
- 0x00d0: 0x008b, # LATIN CAPITAL LETTER ETH
- 0x00d3: 0x00a6, # LATIN CAPITAL LETTER O WITH ACUTE
- 0x00d6: 0x0099, # LATIN CAPITAL LETTER O WITH DIAERESIS
- 0x00d8: 0x009d, # LATIN CAPITAL LETTER O WITH STROKE
- 0x00da: 0x00a7, # LATIN CAPITAL LETTER U WITH ACUTE
- 0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
- 0x00dd: 0x0097, # LATIN CAPITAL LETTER Y WITH ACUTE
- 0x00de: 0x008d, # LATIN CAPITAL LETTER THORN
- 0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S
- 0x00e0: 0x0085, # LATIN SMALL LETTER A WITH GRAVE
- 0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE
- 0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX
- 0x00e4: 0x0084, # LATIN SMALL LETTER A WITH DIAERESIS
- 0x00e5: 0x0086, # LATIN SMALL LETTER A WITH RING ABOVE
- 0x00e6: 0x0091, # LATIN SMALL LIGATURE AE
- 0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA
- 0x00e8: 0x008a, # LATIN SMALL LETTER E WITH GRAVE
- 0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
- 0x00ea: 0x0088, # LATIN SMALL LETTER E WITH CIRCUMFLEX
- 0x00eb: 0x0089, # LATIN SMALL LETTER E WITH DIAERESIS
- 0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE
- 0x00f0: 0x008c, # LATIN SMALL LETTER ETH
- 0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
- 0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX
- 0x00f6: 0x0094, # LATIN SMALL LETTER O WITH DIAERESIS
- 0x00f7: 0x00f6, # DIVISION SIGN
- 0x00f8: 0x009b, # LATIN SMALL LETTER O WITH STROKE
- 0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE
- 0x00fb: 0x0096, # LATIN SMALL LETTER U WITH CIRCUMFLEX
- 0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
- 0x00fd: 0x0098, # LATIN SMALL LETTER Y WITH ACUTE
- 0x00fe: 0x0095, # LATIN SMALL LETTER THORN
- 0x0192: 0x009f, # LATIN SMALL LETTER F WITH HOOK
- 0x0393: 0x00e2, # GREEK CAPITAL LETTER GAMMA
- 0x0398: 0x00e9, # GREEK CAPITAL LETTER THETA
- 0x03a3: 0x00e4, # GREEK CAPITAL LETTER SIGMA
- 0x03a6: 0x00e8, # GREEK CAPITAL LETTER PHI
- 0x03a9: 0x00ea, # GREEK CAPITAL LETTER OMEGA
- 0x03b1: 0x00e0, # GREEK SMALL LETTER ALPHA
- 0x03b4: 0x00eb, # GREEK SMALL LETTER DELTA
- 0x03b5: 0x00ee, # GREEK SMALL LETTER EPSILON
- 0x03c0: 0x00e3, # GREEK SMALL LETTER PI
- 0x03c3: 0x00e5, # GREEK SMALL LETTER SIGMA
- 0x03c4: 0x00e7, # GREEK SMALL LETTER TAU
- 0x03c6: 0x00ed, # GREEK SMALL LETTER PHI
- 0x207f: 0x00fc, # SUPERSCRIPT LATIN SMALL LETTER N
- 0x20a7: 0x009e, # PESETA SIGN
- 0x2219: 0x00f9, # BULLET OPERATOR
- 0x221a: 0x00fb, # SQUARE ROOT
- 0x221e: 0x00ec, # INFINITY
- 0x2229: 0x00ef, # INTERSECTION
- 0x2248: 0x00f7, # ALMOST EQUAL TO
- 0x2261: 0x00f0, # IDENTICAL TO
- 0x2264: 0x00f3, # LESS-THAN OR EQUAL TO
- 0x2265: 0x00f2, # GREATER-THAN OR EQUAL TO
- 0x2310: 0x00a9, # REVERSED NOT SIGN
- 0x2320: 0x00f4, # TOP HALF INTEGRAL
- 0x2321: 0x00f5, # BOTTOM HALF INTEGRAL
- 0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
- 0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
- 0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
- 0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
- 0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
- 0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
- 0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
- 0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
- 0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
- 0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
- 0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
- 0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
- 0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
- 0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
- 0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
- 0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
- 0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
- 0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
- 0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
- 0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
- 0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
- 0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
- 0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
- 0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
- 0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
- 0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
- 0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
- 0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
- 0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
- 0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
- 0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
- 0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
- 0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
- 0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
- 0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
- 0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
- 0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
- 0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
- 0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
- 0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
- 0x2580: 0x00df, # UPPER HALF BLOCK
- 0x2584: 0x00dc, # LOWER HALF BLOCK
- 0x2588: 0x00db, # FULL BLOCK
- 0x258c: 0x00dd, # LEFT HALF BLOCK
- 0x2590: 0x00de, # RIGHT HALF BLOCK
- 0x2591: 0x00b0, # LIGHT SHADE
- 0x2592: 0x00b1, # MEDIUM SHADE
- 0x2593: 0x00b2, # DARK SHADE
- 0x25a0: 0x00fe, # BLACK SQUARE
-}
diff --git a/sys/lib/python/encodings/cp862.py b/sys/lib/python/encodings/cp862.py
deleted file mode 100644
index ea0405ca1..000000000
--- a/sys/lib/python/encodings/cp862.py
+++ /dev/null
@@ -1,698 +0,0 @@
-""" Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP862.TXT' with gencodec.py.
-
-"""#"
-
-import codecs
-
-### Codec APIs
-
-class Codec(codecs.Codec):
-
- def encode(self,input,errors='strict'):
- return codecs.charmap_encode(input,errors,encoding_map)
-
- def decode(self,input,errors='strict'):
- return codecs.charmap_decode(input,errors,decoding_table)
-
-class IncrementalEncoder(codecs.IncrementalEncoder):
- def encode(self, input, final=False):
- return codecs.charmap_encode(input,self.errors,encoding_map)[0]
-
-class IncrementalDecoder(codecs.IncrementalDecoder):
- def decode(self, input, final=False):
- return codecs.charmap_decode(input,self.errors,decoding_table)[0]
-
-class StreamWriter(Codec,codecs.StreamWriter):
- pass
-
-class StreamReader(Codec,codecs.StreamReader):
- pass
-
-### encodings module API
-
-def getregentry():
- return codecs.CodecInfo(
- name='cp862',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
-
-### Decoding Map
-
-decoding_map = codecs.make_identity_dict(range(256))
-decoding_map.update({
- 0x0080: 0x05d0, # HEBREW LETTER ALEF
- 0x0081: 0x05d1, # HEBREW LETTER BET
- 0x0082: 0x05d2, # HEBREW LETTER GIMEL
- 0x0083: 0x05d3, # HEBREW LETTER DALET
- 0x0084: 0x05d4, # HEBREW LETTER HE
- 0x0085: 0x05d5, # HEBREW LETTER VAV
- 0x0086: 0x05d6, # HEBREW LETTER ZAYIN
- 0x0087: 0x05d7, # HEBREW LETTER HET
- 0x0088: 0x05d8, # HEBREW LETTER TET
- 0x0089: 0x05d9, # HEBREW LETTER YOD
- 0x008a: 0x05da, # HEBREW LETTER FINAL KAF
- 0x008b: 0x05db, # HEBREW LETTER KAF
- 0x008c: 0x05dc, # HEBREW LETTER LAMED
- 0x008d: 0x05dd, # HEBREW LETTER FINAL MEM
- 0x008e: 0x05de, # HEBREW LETTER MEM
- 0x008f: 0x05df, # HEBREW LETTER FINAL NUN
- 0x0090: 0x05e0, # HEBREW LETTER NUN
- 0x0091: 0x05e1, # HEBREW LETTER SAMEKH
- 0x0092: 0x05e2, # HEBREW LETTER AYIN
- 0x0093: 0x05e3, # HEBREW LETTER FINAL PE
- 0x0094: 0x05e4, # HEBREW LETTER PE
- 0x0095: 0x05e5, # HEBREW LETTER FINAL TSADI
- 0x0096: 0x05e6, # HEBREW LETTER TSADI
- 0x0097: 0x05e7, # HEBREW LETTER QOF
- 0x0098: 0x05e8, # HEBREW LETTER RESH
- 0x0099: 0x05e9, # HEBREW LETTER SHIN
- 0x009a: 0x05ea, # HEBREW LETTER TAV
- 0x009b: 0x00a2, # CENT SIGN
- 0x009c: 0x00a3, # POUND SIGN
- 0x009d: 0x00a5, # YEN SIGN
- 0x009e: 0x20a7, # PESETA SIGN
- 0x009f: 0x0192, # LATIN SMALL LETTER F WITH HOOK
- 0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
- 0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
- 0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
- 0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
- 0x00a4: 0x00f1, # LATIN SMALL LETTER N WITH TILDE
- 0x00a5: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE
- 0x00a6: 0x00aa, # FEMININE ORDINAL INDICATOR
- 0x00a7: 0x00ba, # MASCULINE ORDINAL INDICATOR
- 0x00a8: 0x00bf, # INVERTED QUESTION MARK
- 0x00a9: 0x2310, # REVERSED NOT SIGN
- 0x00aa: 0x00ac, # NOT SIGN
- 0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
- 0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
- 0x00ad: 0x00a1, # INVERTED EXCLAMATION MARK
- 0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
- 0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
- 0x00b0: 0x2591, # LIGHT SHADE
- 0x00b1: 0x2592, # MEDIUM SHADE
- 0x00b2: 0x2593, # DARK SHADE
- 0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
- 0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
- 0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
- 0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
- 0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
- 0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
- 0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
- 0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
- 0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
- 0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
- 0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
- 0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
- 0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
- 0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
- 0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
- 0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
- 0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
- 0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
- 0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
- 0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
- 0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
- 0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
- 0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
- 0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
- 0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
- 0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
- 0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
- 0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
- 0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
- 0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
- 0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
- 0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
- 0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
- 0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
- 0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
- 0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
- 0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
- 0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
- 0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
- 0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
- 0x00db: 0x2588, # FULL BLOCK
- 0x00dc: 0x2584, # LOWER HALF BLOCK
- 0x00dd: 0x258c, # LEFT HALF BLOCK
- 0x00de: 0x2590, # RIGHT HALF BLOCK
- 0x00df: 0x2580, # UPPER HALF BLOCK
- 0x00e0: 0x03b1, # GREEK SMALL LETTER ALPHA
- 0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S (GERMAN)
- 0x00e2: 0x0393, # GREEK CAPITAL LETTER GAMMA
- 0x00e3: 0x03c0, # GREEK SMALL LETTER PI
- 0x00e4: 0x03a3, # GREEK CAPITAL LETTER SIGMA
- 0x00e5: 0x03c3, # GREEK SMALL LETTER SIGMA
- 0x00e6: 0x00b5, # MICRO SIGN
- 0x00e7: 0x03c4, # GREEK SMALL LETTER TAU
- 0x00e8: 0x03a6, # GREEK CAPITAL LETTER PHI
- 0x00e9: 0x0398, # GREEK CAPITAL LETTER THETA
- 0x00ea: 0x03a9, # GREEK CAPITAL LETTER OMEGA
- 0x00eb: 0x03b4, # GREEK SMALL LETTER DELTA
- 0x00ec: 0x221e, # INFINITY
- 0x00ed: 0x03c6, # GREEK SMALL LETTER PHI
- 0x00ee: 0x03b5, # GREEK SMALL LETTER EPSILON
- 0x00ef: 0x2229, # INTERSECTION
- 0x00f0: 0x2261, # IDENTICAL TO
- 0x00f1: 0x00b1, # PLUS-MINUS SIGN
- 0x00f2: 0x2265, # GREATER-THAN OR EQUAL TO
- 0x00f3: 0x2264, # LESS-THAN OR EQUAL TO
- 0x00f4: 0x2320, # TOP HALF INTEGRAL
- 0x00f5: 0x2321, # BOTTOM HALF INTEGRAL
- 0x00f6: 0x00f7, # DIVISION SIGN
- 0x00f7: 0x2248, # ALMOST EQUAL TO
- 0x00f8: 0x00b0, # DEGREE SIGN
- 0x00f9: 0x2219, # BULLET OPERATOR
- 0x00fa: 0x00b7, # MIDDLE DOT
- 0x00fb: 0x221a, # SQUARE ROOT
- 0x00fc: 0x207f, # SUPERSCRIPT LATIN SMALL LETTER N
- 0x00fd: 0x00b2, # SUPERSCRIPT TWO
- 0x00fe: 0x25a0, # BLACK SQUARE
- 0x00ff: 0x00a0, # NO-BREAK SPACE
-})
-
-### Decoding Table
-
-decoding_table = (
- u'\x00' # 0x0000 -> NULL
- u'\x01' # 0x0001 -> START OF HEADING
- u'\x02' # 0x0002 -> START OF TEXT
- u'\x03' # 0x0003 -> END OF TEXT
- u'\x04' # 0x0004 -> END OF TRANSMISSION
- u'\x05' # 0x0005 -> ENQUIRY
- u'\x06' # 0x0006 -> ACKNOWLEDGE
- u'\x07' # 0x0007 -> BELL
- u'\x08' # 0x0008 -> BACKSPACE
- u'\t' # 0x0009 -> HORIZONTAL TABULATION
- u'\n' # 0x000a -> LINE FEED
- u'\x0b' # 0x000b -> VERTICAL TABULATION
- u'\x0c' # 0x000c -> FORM FEED
- u'\r' # 0x000d -> CARRIAGE RETURN
- u'\x0e' # 0x000e -> SHIFT OUT
- u'\x0f' # 0x000f -> SHIFT IN
- u'\x10' # 0x0010 -> DATA LINK ESCAPE
- u'\x11' # 0x0011 -> DEVICE CONTROL ONE
- u'\x12' # 0x0012 -> DEVICE CONTROL TWO
- u'\x13' # 0x0013 -> DEVICE CONTROL THREE
- u'\x14' # 0x0014 -> DEVICE CONTROL FOUR
- u'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
- u'\x16' # 0x0016 -> SYNCHRONOUS IDLE
- u'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
- u'\x18' # 0x0018 -> CANCEL
- u'\x19' # 0x0019 -> END OF MEDIUM
- u'\x1a' # 0x001a -> SUBSTITUTE
- u'\x1b' # 0x001b -> ESCAPE
- u'\x1c' # 0x001c -> FILE SEPARATOR
- u'\x1d' # 0x001d -> GROUP SEPARATOR
- u'\x1e' # 0x001e -> RECORD SEPARATOR
- u'\x1f' # 0x001f -> UNIT SEPARATOR
- u' ' # 0x0020 -> SPACE
- u'!' # 0x0021 -> EXCLAMATION MARK
- u'"' # 0x0022 -> QUOTATION MARK
- u'#' # 0x0023 -> NUMBER SIGN
- u'$' # 0x0024 -> DOLLAR SIGN
- u'%' # 0x0025 -> PERCENT SIGN
- u'&' # 0x0026 -> AMPERSAND
- u"'" # 0x0027 -> APOSTROPHE
- u'(' # 0x0028 -> LEFT PARENTHESIS
- u')' # 0x0029 -> RIGHT PARENTHESIS
- u'*' # 0x002a -> ASTERISK
- u'+' # 0x002b -> PLUS SIGN
- u',' # 0x002c -> COMMA
- u'-' # 0x002d -> HYPHEN-MINUS
- u'.' # 0x002e -> FULL STOP
- u'/' # 0x002f -> SOLIDUS
- u'0' # 0x0030 -> DIGIT ZERO
- u'1' # 0x0031 -> DIGIT ONE
- u'2' # 0x0032 -> DIGIT TWO
- u'3' # 0x0033 -> DIGIT THREE
- u'4' # 0x0034 -> DIGIT FOUR
- u'5' # 0x0035 -> DIGIT FIVE
- u'6' # 0x0036 -> DIGIT SIX
- u'7' # 0x0037 -> DIGIT SEVEN
- u'8' # 0x0038 -> DIGIT EIGHT
- u'9' # 0x0039 -> DIGIT NINE
- u':' # 0x003a -> COLON
- u';' # 0x003b -> SEMICOLON
- u'<' # 0x003c -> LESS-THAN SIGN
- u'=' # 0x003d -> EQUALS SIGN
- u'>' # 0x003e -> GREATER-THAN SIGN
- u'?' # 0x003f -> QUESTION MARK
- u'@' # 0x0040 -> COMMERCIAL AT
- u'A' # 0x0041 -> LATIN CAPITAL LETTER A
- u'B' # 0x0042 -> LATIN CAPITAL LETTER B
- u'C' # 0x0043 -> LATIN CAPITAL LETTER C
- u'D' # 0x0044 -> LATIN CAPITAL LETTER D
- u'E' # 0x0045 -> LATIN CAPITAL LETTER E
- u'F' # 0x0046 -> LATIN CAPITAL LETTER F
- u'G' # 0x0047 -> LATIN CAPITAL LETTER G
- u'H' # 0x0048 -> LATIN CAPITAL LETTER H
- u'I' # 0x0049 -> LATIN CAPITAL LETTER I
- u'J' # 0x004a -> LATIN CAPITAL LETTER J
- u'K' # 0x004b -> LATIN CAPITAL LETTER K
- u'L' # 0x004c -> LATIN CAPITAL LETTER L
- u'M' # 0x004d -> LATIN CAPITAL LETTER M
- u'N' # 0x004e -> LATIN CAPITAL LETTER N
- u'O' # 0x004f -> LATIN CAPITAL LETTER O
- u'P' # 0x0050 -> LATIN CAPITAL LETTER P
- u'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
- u'R' # 0x0052 -> LATIN CAPITAL LETTER R
- u'S' # 0x0053 -> LATIN CAPITAL LETTER S
- u'T' # 0x0054 -> LATIN CAPITAL LETTER T
- u'U' # 0x0055 -> LATIN CAPITAL LETTER U
- u'V' # 0x0056 -> LATIN CAPITAL LETTER V
- u'W' # 0x0057 -> LATIN CAPITAL LETTER W
- u'X' # 0x0058 -> LATIN CAPITAL LETTER X
- u'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
- u'Z' # 0x005a -> LATIN CAPITAL LETTER Z
- u'[' # 0x005b -> LEFT SQUARE BRACKET
- u'\\' # 0x005c -> REVERSE SOLIDUS
- u']' # 0x005d -> RIGHT SQUARE BRACKET
- u'^' # 0x005e -> CIRCUMFLEX ACCENT
- u'_' # 0x005f -> LOW LINE
- u'`' # 0x0060 -> GRAVE ACCENT
- u'a' # 0x0061 -> LATIN SMALL LETTER A
- u'b' # 0x0062 -> LATIN SMALL LETTER B
- u'c' # 0x0063 -> LATIN SMALL LETTER C
- u'd' # 0x0064 -> LATIN SMALL LETTER D
- u'e' # 0x0065 -> LATIN SMALL LETTER E
- u'f' # 0x0066 -> LATIN SMALL LETTER F
- u'g' # 0x0067 -> LATIN SMALL LETTER G
- u'h' # 0x0068 -> LATIN SMALL LETTER H
- u'i' # 0x0069 -> LATIN SMALL LETTER I
- u'j' # 0x006a -> LATIN SMALL LETTER J
- u'k' # 0x006b -> LATIN SMALL LETTER K
- u'l' # 0x006c -> LATIN SMALL LETTER L
- u'm' # 0x006d -> LATIN SMALL LETTER M
- u'n' # 0x006e -> LATIN SMALL LETTER N
- u'o' # 0x006f -> LATIN SMALL LETTER O
- u'p' # 0x0070 -> LATIN SMALL LETTER P
- u'q' # 0x0071 -> LATIN SMALL LETTER Q
- u'r' # 0x0072 -> LATIN SMALL LETTER R
- u's' # 0x0073 -> LATIN SMALL LETTER S
- u't' # 0x0074 -> LATIN SMALL LETTER T
- u'u' # 0x0075 -> LATIN SMALL LETTER U
- u'v' # 0x0076 -> LATIN SMALL LETTER V
- u'w' # 0x0077 -> LATIN SMALL LETTER W
- u'x' # 0x0078 -> LATIN SMALL LETTER X
- u'y' # 0x0079 -> LATIN SMALL LETTER Y
- u'z' # 0x007a -> LATIN SMALL LETTER Z
- u'{' # 0x007b -> LEFT CURLY BRACKET
- u'|' # 0x007c -> VERTICAL LINE
- u'}' # 0x007d -> RIGHT CURLY BRACKET
- u'~' # 0x007e -> TILDE
- u'\x7f' # 0x007f -> DELETE
- u'\u05d0' # 0x0080 -> HEBREW LETTER ALEF
- u'\u05d1' # 0x0081 -> HEBREW LETTER BET
- u'\u05d2' # 0x0082 -> HEBREW LETTER GIMEL
- u'\u05d3' # 0x0083 -> HEBREW LETTER DALET
- u'\u05d4' # 0x0084 -> HEBREW LETTER HE
- u'\u05d5' # 0x0085 -> HEBREW LETTER VAV
- u'\u05d6' # 0x0086 -> HEBREW LETTER ZAYIN
- u'\u05d7' # 0x0087 -> HEBREW LETTER HET
- u'\u05d8' # 0x0088 -> HEBREW LETTER TET
- u'\u05d9' # 0x0089 -> HEBREW LETTER YOD
- u'\u05da' # 0x008a -> HEBREW LETTER FINAL KAF
- u'\u05db' # 0x008b -> HEBREW LETTER KAF
- u'\u05dc' # 0x008c -> HEBREW LETTER LAMED
- u'\u05dd' # 0x008d -> HEBREW LETTER FINAL MEM
- u'\u05de' # 0x008e -> HEBREW LETTER MEM
- u'\u05df' # 0x008f -> HEBREW LETTER FINAL NUN
- u'\u05e0' # 0x0090 -> HEBREW LETTER NUN
- u'\u05e1' # 0x0091 -> HEBREW LETTER SAMEKH
- u'\u05e2' # 0x0092 -> HEBREW LETTER AYIN
- u'\u05e3' # 0x0093 -> HEBREW LETTER FINAL PE
- u'\u05e4' # 0x0094 -> HEBREW LETTER PE
- u'\u05e5' # 0x0095 -> HEBREW LETTER FINAL TSADI
- u'\u05e6' # 0x0096 -> HEBREW LETTER TSADI
- u'\u05e7' # 0x0097 -> HEBREW LETTER QOF
- u'\u05e8' # 0x0098 -> HEBREW LETTER RESH
- u'\u05e9' # 0x0099 -> HEBREW LETTER SHIN
- u'\u05ea' # 0x009a -> HEBREW LETTER TAV
- u'\xa2' # 0x009b -> CENT SIGN
- u'\xa3' # 0x009c -> POUND SIGN
- u'\xa5' # 0x009d -> YEN SIGN
- u'\u20a7' # 0x009e -> PESETA SIGN
- u'\u0192' # 0x009f -> LATIN SMALL LETTER F WITH HOOK
- u'\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE
- u'\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE
- u'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
- u'\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE
- u'\xf1' # 0x00a4 -> LATIN SMALL LETTER N WITH TILDE
- u'\xd1' # 0x00a5 -> LATIN CAPITAL LETTER N WITH TILDE
- u'\xaa' # 0x00a6 -> FEMININE ORDINAL INDICATOR
- u'\xba' # 0x00a7 -> MASCULINE ORDINAL INDICATOR
- u'\xbf' # 0x00a8 -> INVERTED QUESTION MARK
- u'\u2310' # 0x00a9 -> REVERSED NOT SIGN
- u'\xac' # 0x00aa -> NOT SIGN
- u'\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF
- u'\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER
- u'\xa1' # 0x00ad -> INVERTED EXCLAMATION MARK
- u'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
- u'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
- u'\u2591' # 0x00b0 -> LIGHT SHADE
- u'\u2592' # 0x00b1 -> MEDIUM SHADE
- u'\u2593' # 0x00b2 -> DARK SHADE
- u'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
- u'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
- u'\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
- u'\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
- u'\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
- u'\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
- u'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
- u'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
- u'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
- u'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
- u'\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
- u'\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
- u'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
- u'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
- u'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
- u'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
- u'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
- u'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
- u'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
- u'\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
- u'\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
- u'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
- u'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
- u'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
- u'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
- u'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
- u'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
- u'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
- u'\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
- u'\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
- u'\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
- u'\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
- u'\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
- u'\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
- u'\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
- u'\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
- u'\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
- u'\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
- u'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
- u'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
- u'\u2588' # 0x00db -> FULL BLOCK
- u'\u2584' # 0x00dc -> LOWER HALF BLOCK
- u'\u258c' # 0x00dd -> LEFT HALF BLOCK
- u'\u2590' # 0x00de -> RIGHT HALF BLOCK
- u'\u2580' # 0x00df -> UPPER HALF BLOCK
- u'\u03b1' # 0x00e0 -> GREEK SMALL LETTER ALPHA
- u'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S (GERMAN)
- u'\u0393' # 0x00e2 -> GREEK CAPITAL LETTER GAMMA
- u'\u03c0' # 0x00e3 -> GREEK SMALL LETTER PI
- u'\u03a3' # 0x00e4 -> GREEK CAPITAL LETTER SIGMA
- u'\u03c3' # 0x00e5 -> GREEK SMALL LETTER SIGMA
- u'\xb5' # 0x00e6 -> MICRO SIGN
- u'\u03c4' # 0x00e7 -> GREEK SMALL LETTER TAU
- u'\u03a6' # 0x00e8 -> GREEK CAPITAL LETTER PHI
- u'\u0398' # 0x00e9 -> GREEK CAPITAL LETTER THETA
- u'\u03a9' # 0x00ea -> GREEK CAPITAL LETTER OMEGA
- u'\u03b4' # 0x00eb -> GREEK SMALL LETTER DELTA
- u'\u221e' # 0x00ec -> INFINITY
- u'\u03c6' # 0x00ed -> GREEK SMALL LETTER PHI
- u'\u03b5' # 0x00ee -> GREEK SMALL LETTER EPSILON
- u'\u2229' # 0x00ef -> INTERSECTION
- u'\u2261' # 0x00f0 -> IDENTICAL TO
- u'\xb1' # 0x00f1 -> PLUS-MINUS SIGN
- u'\u2265' # 0x00f2 -> GREATER-THAN OR EQUAL TO
- u'\u2264' # 0x00f3 -> LESS-THAN OR EQUAL TO
- u'\u2320' # 0x00f4 -> TOP HALF INTEGRAL
- u'\u2321' # 0x00f5 -> BOTTOM HALF INTEGRAL
- u'\xf7' # 0x00f6 -> DIVISION SIGN
- u'\u2248' # 0x00f7 -> ALMOST EQUAL TO
- u'\xb0' # 0x00f8 -> DEGREE SIGN
- u'\u2219' # 0x00f9 -> BULLET OPERATOR
- u'\xb7' # 0x00fa -> MIDDLE DOT
- u'\u221a' # 0x00fb -> SQUARE ROOT
- u'\u207f' # 0x00fc -> SUPERSCRIPT LATIN SMALL LETTER N
- u'\xb2' # 0x00fd -> SUPERSCRIPT TWO
- u'\u25a0' # 0x00fe -> BLACK SQUARE
- u'\xa0' # 0x00ff -> NO-BREAK SPACE
-)
-
-### Encoding Map
-
-encoding_map = {
- 0x0000: 0x0000, # NULL
- 0x0001: 0x0001, # START OF HEADING
- 0x0002: 0x0002, # START OF TEXT
- 0x0003: 0x0003, # END OF TEXT
- 0x0004: 0x0004, # END OF TRANSMISSION
- 0x0005: 0x0005, # ENQUIRY
- 0x0006: 0x0006, # ACKNOWLEDGE
- 0x0007: 0x0007, # BELL
- 0x0008: 0x0008, # BACKSPACE
- 0x0009: 0x0009, # HORIZONTAL TABULATION
- 0x000a: 0x000a, # LINE FEED
- 0x000b: 0x000b, # VERTICAL TABULATION
- 0x000c: 0x000c, # FORM FEED
- 0x000d: 0x000d, # CARRIAGE RETURN
- 0x000e: 0x000e, # SHIFT OUT
- 0x000f: 0x000f, # SHIFT IN
- 0x0010: 0x0010, # DATA LINK ESCAPE
- 0x0011: 0x0011, # DEVICE CONTROL ONE
- 0x0012: 0x0012, # DEVICE CONTROL TWO
- 0x0013: 0x0013, # DEVICE CONTROL THREE
- 0x0014: 0x0014, # DEVICE CONTROL FOUR
- 0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
- 0x0016: 0x0016, # SYNCHRONOUS IDLE
- 0x0017: 0x0017, # END OF TRANSMISSION BLOCK
- 0x0018: 0x0018, # CANCEL
- 0x0019: 0x0019, # END OF MEDIUM
- 0x001a: 0x001a, # SUBSTITUTE
- 0x001b: 0x001b, # ESCAPE
- 0x001c: 0x001c, # FILE SEPARATOR
- 0x001d: 0x001d, # GROUP SEPARATOR
- 0x001e: 0x001e, # RECORD SEPARATOR
- 0x001f: 0x001f, # UNIT SEPARATOR
- 0x0020: 0x0020, # SPACE
- 0x0021: 0x0021, # EXCLAMATION MARK
- 0x0022: 0x0022, # QUOTATION MARK
- 0x0023: 0x0023, # NUMBER SIGN
- 0x0024: 0x0024, # DOLLAR SIGN
- 0x0025: 0x0025, # PERCENT SIGN
- 0x0026: 0x0026, # AMPERSAND
- 0x0027: 0x0027, # APOSTROPHE
- 0x0028: 0x0028, # LEFT PARENTHESIS
- 0x0029: 0x0029, # RIGHT PARENTHESIS
- 0x002a: 0x002a, # ASTERISK
- 0x002b: 0x002b, # PLUS SIGN
- 0x002c: 0x002c, # COMMA
- 0x002d: 0x002d, # HYPHEN-MINUS
- 0x002e: 0x002e, # FULL STOP
- 0x002f: 0x002f, # SOLIDUS
- 0x0030: 0x0030, # DIGIT ZERO
- 0x0031: 0x0031, # DIGIT ONE
- 0x0032: 0x0032, # DIGIT TWO
- 0x0033: 0x0033, # DIGIT THREE
- 0x0034: 0x0034, # DIGIT FOUR
- 0x0035: 0x0035, # DIGIT FIVE
- 0x0036: 0x0036, # DIGIT SIX
- 0x0037: 0x0037, # DIGIT SEVEN
- 0x0038: 0x0038, # DIGIT EIGHT
- 0x0039: 0x0039, # DIGIT NINE
- 0x003a: 0x003a, # COLON
- 0x003b: 0x003b, # SEMICOLON
- 0x003c: 0x003c, # LESS-THAN SIGN
- 0x003d: 0x003d, # EQUALS SIGN
- 0x003e: 0x003e, # GREATER-THAN SIGN
- 0x003f: 0x003f, # QUESTION MARK
- 0x0040: 0x0040, # COMMERCIAL AT
- 0x0041: 0x0041, # LATIN CAPITAL LETTER A
- 0x0042: 0x0042, # LATIN CAPITAL LETTER B
- 0x0043: 0x0043, # LATIN CAPITAL LETTER C
- 0x0044: 0x0044, # LATIN CAPITAL LETTER D
- 0x0045: 0x0045, # LATIN CAPITAL LETTER E
- 0x0046: 0x0046, # LATIN CAPITAL LETTER F
- 0x0047: 0x0047, # LATIN CAPITAL LETTER G
- 0x0048: 0x0048, # LATIN CAPITAL LETTER H
- 0x0049: 0x0049, # LATIN CAPITAL LETTER I
- 0x004a: 0x004a, # LATIN CAPITAL LETTER J
- 0x004b: 0x004b, # LATIN CAPITAL LETTER K
- 0x004c: 0x004c, # LATIN CAPITAL LETTER L
- 0x004d: 0x004d, # LATIN CAPITAL LETTER M
- 0x004e: 0x004e, # LATIN CAPITAL LETTER N
- 0x004f: 0x004f, # LATIN CAPITAL LETTER O
- 0x0050: 0x0050, # LATIN CAPITAL LETTER P
- 0x0051: 0x0051, # LATIN CAPITAL LETTER Q
- 0x0052: 0x0052, # LATIN CAPITAL LETTER R
- 0x0053: 0x0053, # LATIN CAPITAL LETTER S
- 0x0054: 0x0054, # LATIN CAPITAL LETTER T
- 0x0055: 0x0055, # LATIN CAPITAL LETTER U
- 0x0056: 0x0056, # LATIN CAPITAL LETTER V
- 0x0057: 0x0057, # LATIN CAPITAL LETTER W
- 0x0058: 0x0058, # LATIN CAPITAL LETTER X
- 0x0059: 0x0059, # LATIN CAPITAL LETTER Y
- 0x005a: 0x005a, # LATIN CAPITAL LETTER Z
- 0x005b: 0x005b, # LEFT SQUARE BRACKET
- 0x005c: 0x005c, # REVERSE SOLIDUS
- 0x005d: 0x005d, # RIGHT SQUARE BRACKET
- 0x005e: 0x005e, # CIRCUMFLEX ACCENT
- 0x005f: 0x005f, # LOW LINE
- 0x0060: 0x0060, # GRAVE ACCENT
- 0x0061: 0x0061, # LATIN SMALL LETTER A
- 0x0062: 0x0062, # LATIN SMALL LETTER B
- 0x0063: 0x0063, # LATIN SMALL LETTER C
- 0x0064: 0x0064, # LATIN SMALL LETTER D
- 0x0065: 0x0065, # LATIN SMALL LETTER E
- 0x0066: 0x0066, # LATIN SMALL LETTER F
- 0x0067: 0x0067, # LATIN SMALL LETTER G
- 0x0068: 0x0068, # LATIN SMALL LETTER H
- 0x0069: 0x0069, # LATIN SMALL LETTER I
- 0x006a: 0x006a, # LATIN SMALL LETTER J
- 0x006b: 0x006b, # LATIN SMALL LETTER K
- 0x006c: 0x006c, # LATIN SMALL LETTER L
- 0x006d: 0x006d, # LATIN SMALL LETTER M
- 0x006e: 0x006e, # LATIN SMALL LETTER N
- 0x006f: 0x006f, # LATIN SMALL LETTER O
- 0x0070: 0x0070, # LATIN SMALL LETTER P
- 0x0071: 0x0071, # LATIN SMALL LETTER Q
- 0x0072: 0x0072, # LATIN SMALL LETTER R
- 0x0073: 0x0073, # LATIN SMALL LETTER S
- 0x0074: 0x0074, # LATIN SMALL LETTER T
- 0x0075: 0x0075, # LATIN SMALL LETTER U
- 0x0076: 0x0076, # LATIN SMALL LETTER V
- 0x0077: 0x0077, # LATIN SMALL LETTER W
- 0x0078: 0x0078, # LATIN SMALL LETTER X
- 0x0079: 0x0079, # LATIN SMALL LETTER Y
- 0x007a: 0x007a, # LATIN SMALL LETTER Z
- 0x007b: 0x007b, # LEFT CURLY BRACKET
- 0x007c: 0x007c, # VERTICAL LINE
- 0x007d: 0x007d, # RIGHT CURLY BRACKET
- 0x007e: 0x007e, # TILDE
- 0x007f: 0x007f, # DELETE
- 0x00a0: 0x00ff, # NO-BREAK SPACE
- 0x00a1: 0x00ad, # INVERTED EXCLAMATION MARK
- 0x00a2: 0x009b, # CENT SIGN
- 0x00a3: 0x009c, # POUND SIGN
- 0x00a5: 0x009d, # YEN SIGN
- 0x00aa: 0x00a6, # FEMININE ORDINAL INDICATOR
- 0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
- 0x00ac: 0x00aa, # NOT SIGN
- 0x00b0: 0x00f8, # DEGREE SIGN
- 0x00b1: 0x00f1, # PLUS-MINUS SIGN
- 0x00b2: 0x00fd, # SUPERSCRIPT TWO
- 0x00b5: 0x00e6, # MICRO SIGN
- 0x00b7: 0x00fa, # MIDDLE DOT
- 0x00ba: 0x00a7, # MASCULINE ORDINAL INDICATOR
- 0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
- 0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER
- 0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF
- 0x00bf: 0x00a8, # INVERTED QUESTION MARK
- 0x00d1: 0x00a5, # LATIN CAPITAL LETTER N WITH TILDE
- 0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S (GERMAN)
- 0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE
- 0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE
- 0x00f1: 0x00a4, # LATIN SMALL LETTER N WITH TILDE
- 0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
- 0x00f7: 0x00f6, # DIVISION SIGN
- 0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE
- 0x0192: 0x009f, # LATIN SMALL LETTER F WITH HOOK
- 0x0393: 0x00e2, # GREEK CAPITAL LETTER GAMMA
- 0x0398: 0x00e9, # GREEK CAPITAL LETTER THETA
- 0x03a3: 0x00e4, # GREEK CAPITAL LETTER SIGMA
- 0x03a6: 0x00e8, # GREEK CAPITAL LETTER PHI
- 0x03a9: 0x00ea, # GREEK CAPITAL LETTER OMEGA
- 0x03b1: 0x00e0, # GREEK SMALL LETTER ALPHA
- 0x03b4: 0x00eb, # GREEK SMALL LETTER DELTA
- 0x03b5: 0x00ee, # GREEK SMALL LETTER EPSILON
- 0x03c0: 0x00e3, # GREEK SMALL LETTER PI
- 0x03c3: 0x00e5, # GREEK SMALL LETTER SIGMA
- 0x03c4: 0x00e7, # GREEK SMALL LETTER TAU
- 0x03c6: 0x00ed, # GREEK SMALL LETTER PHI
- 0x05d0: 0x0080, # HEBREW LETTER ALEF
- 0x05d1: 0x0081, # HEBREW LETTER BET
- 0x05d2: 0x0082, # HEBREW LETTER GIMEL
- 0x05d3: 0x0083, # HEBREW LETTER DALET
- 0x05d4: 0x0084, # HEBREW LETTER HE
- 0x05d5: 0x0085, # HEBREW LETTER VAV
- 0x05d6: 0x0086, # HEBREW LETTER ZAYIN
- 0x05d7: 0x0087, # HEBREW LETTER HET
- 0x05d8: 0x0088, # HEBREW LETTER TET
- 0x05d9: 0x0089, # HEBREW LETTER YOD
- 0x05da: 0x008a, # HEBREW LETTER FINAL KAF
- 0x05db: 0x008b, # HEBREW LETTER KAF
- 0x05dc: 0x008c, # HEBREW LETTER LAMED
- 0x05dd: 0x008d, # HEBREW LETTER FINAL MEM
- 0x05de: 0x008e, # HEBREW LETTER MEM
- 0x05df: 0x008f, # HEBREW LETTER FINAL NUN
- 0x05e0: 0x0090, # HEBREW LETTER NUN
- 0x05e1: 0x0091, # HEBREW LETTER SAMEKH
- 0x05e2: 0x0092, # HEBREW LETTER AYIN
- 0x05e3: 0x0093, # HEBREW LETTER FINAL PE
- 0x05e4: 0x0094, # HEBREW LETTER PE
- 0x05e5: 0x0095, # HEBREW LETTER FINAL TSADI
- 0x05e6: 0x0096, # HEBREW LETTER TSADI
- 0x05e7: 0x0097, # HEBREW LETTER QOF
- 0x05e8: 0x0098, # HEBREW LETTER RESH
- 0x05e9: 0x0099, # HEBREW LETTER SHIN
- 0x05ea: 0x009a, # HEBREW LETTER TAV
- 0x207f: 0x00fc, # SUPERSCRIPT LATIN SMALL LETTER N
- 0x20a7: 0x009e, # PESETA SIGN
- 0x2219: 0x00f9, # BULLET OPERATOR
- 0x221a: 0x00fb, # SQUARE ROOT
- 0x221e: 0x00ec, # INFINITY
- 0x2229: 0x00ef, # INTERSECTION
- 0x2248: 0x00f7, # ALMOST EQUAL TO
- 0x2261: 0x00f0, # IDENTICAL TO
- 0x2264: 0x00f3, # LESS-THAN OR EQUAL TO
- 0x2265: 0x00f2, # GREATER-THAN OR EQUAL TO
- 0x2310: 0x00a9, # REVERSED NOT SIGN
- 0x2320: 0x00f4, # TOP HALF INTEGRAL
- 0x2321: 0x00f5, # BOTTOM HALF INTEGRAL
- 0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
- 0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
- 0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
- 0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
- 0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
- 0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
- 0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
- 0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
- 0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
- 0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
- 0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
- 0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
- 0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
- 0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
- 0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
- 0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
- 0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
- 0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
- 0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
- 0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
- 0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
- 0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
- 0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
- 0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
- 0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
- 0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
- 0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
- 0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
- 0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
- 0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
- 0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
- 0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
- 0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
- 0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
- 0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
- 0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
- 0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
- 0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
- 0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
- 0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
- 0x2580: 0x00df, # UPPER HALF BLOCK
- 0x2584: 0x00dc, # LOWER HALF BLOCK
- 0x2588: 0x00db, # FULL BLOCK
- 0x258c: 0x00dd, # LEFT HALF BLOCK
- 0x2590: 0x00de, # RIGHT HALF BLOCK
- 0x2591: 0x00b0, # LIGHT SHADE
- 0x2592: 0x00b1, # MEDIUM SHADE
- 0x2593: 0x00b2, # DARK SHADE
- 0x25a0: 0x00fe, # BLACK SQUARE
-}
diff --git a/sys/lib/python/encodings/cp863.py b/sys/lib/python/encodings/cp863.py
deleted file mode 100644
index 62dfabf66..000000000
--- a/sys/lib/python/encodings/cp863.py
+++ /dev/null
@@ -1,698 +0,0 @@
-""" Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP863.TXT' with gencodec.py.
-
-"""#"
-
-import codecs
-
-### Codec APIs
-
-class Codec(codecs.Codec):
-
- def encode(self,input,errors='strict'):
- return codecs.charmap_encode(input,errors,encoding_map)
-
- def decode(self,input,errors='strict'):
- return codecs.charmap_decode(input,errors,decoding_table)
-
-class IncrementalEncoder(codecs.IncrementalEncoder):
- def encode(self, input, final=False):
- return codecs.charmap_encode(input,self.errors,encoding_map)[0]
-
-class IncrementalDecoder(codecs.IncrementalDecoder):
- def decode(self, input, final=False):
- return codecs.charmap_decode(input,self.errors,decoding_table)[0]
-
-class StreamWriter(Codec,codecs.StreamWriter):
- pass
-
-class StreamReader(Codec,codecs.StreamReader):
- pass
-
-### encodings module API
-
-def getregentry():
- return codecs.CodecInfo(
- name='cp863',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
-
-### Decoding Map
-
-decoding_map = codecs.make_identity_dict(range(256))
-decoding_map.update({
- 0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
- 0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
- 0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
- 0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
- 0x0084: 0x00c2, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
- 0x0085: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
- 0x0086: 0x00b6, # PILCROW SIGN
- 0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
- 0x0088: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
- 0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
- 0x008a: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
- 0x008b: 0x00ef, # LATIN SMALL LETTER I WITH DIAERESIS
- 0x008c: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX
- 0x008d: 0x2017, # DOUBLE LOW LINE
- 0x008e: 0x00c0, # LATIN CAPITAL LETTER A WITH GRAVE
- 0x008f: 0x00a7, # SECTION SIGN
- 0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
- 0x0091: 0x00c8, # LATIN CAPITAL LETTER E WITH GRAVE
- 0x0092: 0x00ca, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
- 0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
- 0x0094: 0x00cb, # LATIN CAPITAL LETTER E WITH DIAERESIS
- 0x0095: 0x00cf, # LATIN CAPITAL LETTER I WITH DIAERESIS
- 0x0096: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX
- 0x0097: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE
- 0x0098: 0x00a4, # CURRENCY SIGN
- 0x0099: 0x00d4, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
- 0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
- 0x009b: 0x00a2, # CENT SIGN
- 0x009c: 0x00a3, # POUND SIGN
- 0x009d: 0x00d9, # LATIN CAPITAL LETTER U WITH GRAVE
- 0x009e: 0x00db, # LATIN CAPITAL LETTER U WITH CIRCUMFLEX
- 0x009f: 0x0192, # LATIN SMALL LETTER F WITH HOOK
- 0x00a0: 0x00a6, # BROKEN BAR
- 0x00a1: 0x00b4, # ACUTE ACCENT
- 0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
- 0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
- 0x00a4: 0x00a8, # DIAERESIS
- 0x00a5: 0x00b8, # CEDILLA
- 0x00a6: 0x00b3, # SUPERSCRIPT THREE
- 0x00a7: 0x00af, # MACRON
- 0x00a8: 0x00ce, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
- 0x00a9: 0x2310, # REVERSED NOT SIGN
- 0x00aa: 0x00ac, # NOT SIGN
- 0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
- 0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
- 0x00ad: 0x00be, # VULGAR FRACTION THREE QUARTERS
- 0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
- 0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
- 0x00b0: 0x2591, # LIGHT SHADE
- 0x00b1: 0x2592, # MEDIUM SHADE
- 0x00b2: 0x2593, # DARK SHADE
- 0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
- 0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
- 0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
- 0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
- 0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
- 0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
- 0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
- 0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
- 0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
- 0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
- 0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
- 0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
- 0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
- 0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
- 0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
- 0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
- 0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
- 0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
- 0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
- 0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
- 0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
- 0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
- 0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
- 0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
- 0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
- 0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
- 0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
- 0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
- 0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
- 0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
- 0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
- 0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
- 0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
- 0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
- 0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
- 0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
- 0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
- 0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
- 0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
- 0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
- 0x00db: 0x2588, # FULL BLOCK
- 0x00dc: 0x2584, # LOWER HALF BLOCK
- 0x00dd: 0x258c, # LEFT HALF BLOCK
- 0x00de: 0x2590, # RIGHT HALF BLOCK
- 0x00df: 0x2580, # UPPER HALF BLOCK
- 0x00e0: 0x03b1, # GREEK SMALL LETTER ALPHA
- 0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
- 0x00e2: 0x0393, # GREEK CAPITAL LETTER GAMMA
- 0x00e3: 0x03c0, # GREEK SMALL LETTER PI
- 0x00e4: 0x03a3, # GREEK CAPITAL LETTER SIGMA
- 0x00e5: 0x03c3, # GREEK SMALL LETTER SIGMA
- 0x00e6: 0x00b5, # MICRO SIGN
- 0x00e7: 0x03c4, # GREEK SMALL LETTER TAU
- 0x00e8: 0x03a6, # GREEK CAPITAL LETTER PHI
- 0x00e9: 0x0398, # GREEK CAPITAL LETTER THETA
- 0x00ea: 0x03a9, # GREEK CAPITAL LETTER OMEGA
- 0x00eb: 0x03b4, # GREEK SMALL LETTER DELTA
- 0x00ec: 0x221e, # INFINITY
- 0x00ed: 0x03c6, # GREEK SMALL LETTER PHI
- 0x00ee: 0x03b5, # GREEK SMALL LETTER EPSILON
- 0x00ef: 0x2229, # INTERSECTION
- 0x00f0: 0x2261, # IDENTICAL TO
- 0x00f1: 0x00b1, # PLUS-MINUS SIGN
- 0x00f2: 0x2265, # GREATER-THAN OR EQUAL TO
- 0x00f3: 0x2264, # LESS-THAN OR EQUAL TO
- 0x00f4: 0x2320, # TOP HALF INTEGRAL
- 0x00f5: 0x2321, # BOTTOM HALF INTEGRAL
- 0x00f6: 0x00f7, # DIVISION SIGN
- 0x00f7: 0x2248, # ALMOST EQUAL TO
- 0x00f8: 0x00b0, # DEGREE SIGN
- 0x00f9: 0x2219, # BULLET OPERATOR
- 0x00fa: 0x00b7, # MIDDLE DOT
- 0x00fb: 0x221a, # SQUARE ROOT
- 0x00fc: 0x207f, # SUPERSCRIPT LATIN SMALL LETTER N
- 0x00fd: 0x00b2, # SUPERSCRIPT TWO
- 0x00fe: 0x25a0, # BLACK SQUARE
- 0x00ff: 0x00a0, # NO-BREAK SPACE
-})
-
-### Decoding Table
-
-decoding_table = (
- u'\x00' # 0x0000 -> NULL
- u'\x01' # 0x0001 -> START OF HEADING
- u'\x02' # 0x0002 -> START OF TEXT
- u'\x03' # 0x0003 -> END OF TEXT
- u'\x04' # 0x0004 -> END OF TRANSMISSION
- u'\x05' # 0x0005 -> ENQUIRY
- u'\x06' # 0x0006 -> ACKNOWLEDGE
- u'\x07' # 0x0007 -> BELL
- u'\x08' # 0x0008 -> BACKSPACE
- u'\t' # 0x0009 -> HORIZONTAL TABULATION
- u'\n' # 0x000a -> LINE FEED
- u'\x0b' # 0x000b -> VERTICAL TABULATION
- u'\x0c' # 0x000c -> FORM FEED
- u'\r' # 0x000d -> CARRIAGE RETURN
- u'\x0e' # 0x000e -> SHIFT OUT
- u'\x0f' # 0x000f -> SHIFT IN
- u'\x10' # 0x0010 -> DATA LINK ESCAPE
- u'\x11' # 0x0011 -> DEVICE CONTROL ONE
- u'\x12' # 0x0012 -> DEVICE CONTROL TWO
- u'\x13' # 0x0013 -> DEVICE CONTROL THREE
- u'\x14' # 0x0014 -> DEVICE CONTROL FOUR
- u'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
- u'\x16' # 0x0016 -> SYNCHRONOUS IDLE
- u'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
- u'\x18' # 0x0018 -> CANCEL
- u'\x19' # 0x0019 -> END OF MEDIUM
- u'\x1a' # 0x001a -> SUBSTITUTE
- u'\x1b' # 0x001b -> ESCAPE
- u'\x1c' # 0x001c -> FILE SEPARATOR
- u'\x1d' # 0x001d -> GROUP SEPARATOR
- u'\x1e' # 0x001e -> RECORD SEPARATOR
- u'\x1f' # 0x001f -> UNIT SEPARATOR
- u' ' # 0x0020 -> SPACE
- u'!' # 0x0021 -> EXCLAMATION MARK
- u'"' # 0x0022 -> QUOTATION MARK
- u'#' # 0x0023 -> NUMBER SIGN
- u'$' # 0x0024 -> DOLLAR SIGN
- u'%' # 0x0025 -> PERCENT SIGN
- u'&' # 0x0026 -> AMPERSAND
- u"'" # 0x0027 -> APOSTROPHE
- u'(' # 0x0028 -> LEFT PARENTHESIS
- u')' # 0x0029 -> RIGHT PARENTHESIS
- u'*' # 0x002a -> ASTERISK
- u'+' # 0x002b -> PLUS SIGN
- u',' # 0x002c -> COMMA
- u'-' # 0x002d -> HYPHEN-MINUS
- u'.' # 0x002e -> FULL STOP
- u'/' # 0x002f -> SOLIDUS
- u'0' # 0x0030 -> DIGIT ZERO
- u'1' # 0x0031 -> DIGIT ONE
- u'2' # 0x0032 -> DIGIT TWO
- u'3' # 0x0033 -> DIGIT THREE
- u'4' # 0x0034 -> DIGIT FOUR
- u'5' # 0x0035 -> DIGIT FIVE
- u'6' # 0x0036 -> DIGIT SIX
- u'7' # 0x0037 -> DIGIT SEVEN
- u'8' # 0x0038 -> DIGIT EIGHT
- u'9' # 0x0039 -> DIGIT NINE
- u':' # 0x003a -> COLON
- u';' # 0x003b -> SEMICOLON
- u'<' # 0x003c -> LESS-THAN SIGN
- u'=' # 0x003d -> EQUALS SIGN
- u'>' # 0x003e -> GREATER-THAN SIGN
- u'?' # 0x003f -> QUESTION MARK
- u'@' # 0x0040 -> COMMERCIAL AT
- u'A' # 0x0041 -> LATIN CAPITAL LETTER A
- u'B' # 0x0042 -> LATIN CAPITAL LETTER B
- u'C' # 0x0043 -> LATIN CAPITAL LETTER C
- u'D' # 0x0044 -> LATIN CAPITAL LETTER D
- u'E' # 0x0045 -> LATIN CAPITAL LETTER E
- u'F' # 0x0046 -> LATIN CAPITAL LETTER F
- u'G' # 0x0047 -> LATIN CAPITAL LETTER G
- u'H' # 0x0048 -> LATIN CAPITAL LETTER H
- u'I' # 0x0049 -> LATIN CAPITAL LETTER I
- u'J' # 0x004a -> LATIN CAPITAL LETTER J
- u'K' # 0x004b -> LATIN CAPITAL LETTER K
- u'L' # 0x004c -> LATIN CAPITAL LETTER L
- u'M' # 0x004d -> LATIN CAPITAL LETTER M
- u'N' # 0x004e -> LATIN CAPITAL LETTER N
- u'O' # 0x004f -> LATIN CAPITAL LETTER O
- u'P' # 0x0050 -> LATIN CAPITAL LETTER P
- u'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
- u'R' # 0x0052 -> LATIN CAPITAL LETTER R
- u'S' # 0x0053 -> LATIN CAPITAL LETTER S
- u'T' # 0x0054 -> LATIN CAPITAL LETTER T
- u'U' # 0x0055 -> LATIN CAPITAL LETTER U
- u'V' # 0x0056 -> LATIN CAPITAL LETTER V
- u'W' # 0x0057 -> LATIN CAPITAL LETTER W
- u'X' # 0x0058 -> LATIN CAPITAL LETTER X
- u'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
- u'Z' # 0x005a -> LATIN CAPITAL LETTER Z
- u'[' # 0x005b -> LEFT SQUARE BRACKET
- u'\\' # 0x005c -> REVERSE SOLIDUS
- u']' # 0x005d -> RIGHT SQUARE BRACKET
- u'^' # 0x005e -> CIRCUMFLEX ACCENT
- u'_' # 0x005f -> LOW LINE
- u'`' # 0x0060 -> GRAVE ACCENT
- u'a' # 0x0061 -> LATIN SMALL LETTER A
- u'b' # 0x0062 -> LATIN SMALL LETTER B
- u'c' # 0x0063 -> LATIN SMALL LETTER C
- u'd' # 0x0064 -> LATIN SMALL LETTER D
- u'e' # 0x0065 -> LATIN SMALL LETTER E
- u'f' # 0x0066 -> LATIN SMALL LETTER F
- u'g' # 0x0067 -> LATIN SMALL LETTER G
- u'h' # 0x0068 -> LATIN SMALL LETTER H
- u'i' # 0x0069 -> LATIN SMALL LETTER I
- u'j' # 0x006a -> LATIN SMALL LETTER J
- u'k' # 0x006b -> LATIN SMALL LETTER K
- u'l' # 0x006c -> LATIN SMALL LETTER L
- u'm' # 0x006d -> LATIN SMALL LETTER M
- u'n' # 0x006e -> LATIN SMALL LETTER N
- u'o' # 0x006f -> LATIN SMALL LETTER O
- u'p' # 0x0070 -> LATIN SMALL LETTER P
- u'q' # 0x0071 -> LATIN SMALL LETTER Q
- u'r' # 0x0072 -> LATIN SMALL LETTER R
- u's' # 0x0073 -> LATIN SMALL LETTER S
- u't' # 0x0074 -> LATIN SMALL LETTER T
- u'u' # 0x0075 -> LATIN SMALL LETTER U
- u'v' # 0x0076 -> LATIN SMALL LETTER V
- u'w' # 0x0077 -> LATIN SMALL LETTER W
- u'x' # 0x0078 -> LATIN SMALL LETTER X
- u'y' # 0x0079 -> LATIN SMALL LETTER Y
- u'z' # 0x007a -> LATIN SMALL LETTER Z
- u'{' # 0x007b -> LEFT CURLY BRACKET
- u'|' # 0x007c -> VERTICAL LINE
- u'}' # 0x007d -> RIGHT CURLY BRACKET
- u'~' # 0x007e -> TILDE
- u'\x7f' # 0x007f -> DELETE
- u'\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA
- u'\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
- u'\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
- u'\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
- u'\xc2' # 0x0084 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
- u'\xe0' # 0x0085 -> LATIN SMALL LETTER A WITH GRAVE
- u'\xb6' # 0x0086 -> PILCROW SIGN
- u'\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA
- u'\xea' # 0x0088 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
- u'\xeb' # 0x0089 -> LATIN SMALL LETTER E WITH DIAERESIS
- u'\xe8' # 0x008a -> LATIN SMALL LETTER E WITH GRAVE
- u'\xef' # 0x008b -> LATIN SMALL LETTER I WITH DIAERESIS
- u'\xee' # 0x008c -> LATIN SMALL LETTER I WITH CIRCUMFLEX
- u'\u2017' # 0x008d -> DOUBLE LOW LINE
- u'\xc0' # 0x008e -> LATIN CAPITAL LETTER A WITH GRAVE
- u'\xa7' # 0x008f -> SECTION SIGN
- u'\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
- u'\xc8' # 0x0091 -> LATIN CAPITAL LETTER E WITH GRAVE
- u'\xca' # 0x0092 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
- u'\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
- u'\xcb' # 0x0094 -> LATIN CAPITAL LETTER E WITH DIAERESIS
- u'\xcf' # 0x0095 -> LATIN CAPITAL LETTER I WITH DIAERESIS
- u'\xfb' # 0x0096 -> LATIN SMALL LETTER U WITH CIRCUMFLEX
- u'\xf9' # 0x0097 -> LATIN SMALL LETTER U WITH GRAVE
- u'\xa4' # 0x0098 -> CURRENCY SIGN
- u'\xd4' # 0x0099 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
- u'\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
- u'\xa2' # 0x009b -> CENT SIGN
- u'\xa3' # 0x009c -> POUND SIGN
- u'\xd9' # 0x009d -> LATIN CAPITAL LETTER U WITH GRAVE
- u'\xdb' # 0x009e -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
- u'\u0192' # 0x009f -> LATIN SMALL LETTER F WITH HOOK
- u'\xa6' # 0x00a0 -> BROKEN BAR
- u'\xb4' # 0x00a1 -> ACUTE ACCENT
- u'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
- u'\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE
- u'\xa8' # 0x00a4 -> DIAERESIS
- u'\xb8' # 0x00a5 -> CEDILLA
- u'\xb3' # 0x00a6 -> SUPERSCRIPT THREE
- u'\xaf' # 0x00a7 -> MACRON
- u'\xce' # 0x00a8 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
- u'\u2310' # 0x00a9 -> REVERSED NOT SIGN
- u'\xac' # 0x00aa -> NOT SIGN
- u'\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF
- u'\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER
- u'\xbe' # 0x00ad -> VULGAR FRACTION THREE QUARTERS
- u'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
- u'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
- u'\u2591' # 0x00b0 -> LIGHT SHADE
- u'\u2592' # 0x00b1 -> MEDIUM SHADE
- u'\u2593' # 0x00b2 -> DARK SHADE
- u'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
- u'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
- u'\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
- u'\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
- u'\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
- u'\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
- u'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
- u'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
- u'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
- u'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
- u'\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
- u'\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
- u'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
- u'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
- u'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
- u'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
- u'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
- u'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
- u'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
- u'\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
- u'\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
- u'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
- u'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
- u'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
- u'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
- u'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
- u'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
- u'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
- u'\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
- u'\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
- u'\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
- u'\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
- u'\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
- u'\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
- u'\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
- u'\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
- u'\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
- u'\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
- u'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
- u'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
- u'\u2588' # 0x00db -> FULL BLOCK
- u'\u2584' # 0x00dc -> LOWER HALF BLOCK
- u'\u258c' # 0x00dd -> LEFT HALF BLOCK
- u'\u2590' # 0x00de -> RIGHT HALF BLOCK
- u'\u2580' # 0x00df -> UPPER HALF BLOCK
- u'\u03b1' # 0x00e0 -> GREEK SMALL LETTER ALPHA
- u'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S
- u'\u0393' # 0x00e2 -> GREEK CAPITAL LETTER GAMMA
- u'\u03c0' # 0x00e3 -> GREEK SMALL LETTER PI
- u'\u03a3' # 0x00e4 -> GREEK CAPITAL LETTER SIGMA
- u'\u03c3' # 0x00e5 -> GREEK SMALL LETTER SIGMA
- u'\xb5' # 0x00e6 -> MICRO SIGN
- u'\u03c4' # 0x00e7 -> GREEK SMALL LETTER TAU
- u'\u03a6' # 0x00e8 -> GREEK CAPITAL LETTER PHI
- u'\u0398' # 0x00e9 -> GREEK CAPITAL LETTER THETA
- u'\u03a9' # 0x00ea -> GREEK CAPITAL LETTER OMEGA
- u'\u03b4' # 0x00eb -> GREEK SMALL LETTER DELTA
- u'\u221e' # 0x00ec -> INFINITY
- u'\u03c6' # 0x00ed -> GREEK SMALL LETTER PHI
- u'\u03b5' # 0x00ee -> GREEK SMALL LETTER EPSILON
- u'\u2229' # 0x00ef -> INTERSECTION
- u'\u2261' # 0x00f0 -> IDENTICAL TO
- u'\xb1' # 0x00f1 -> PLUS-MINUS SIGN
- u'\u2265' # 0x00f2 -> GREATER-THAN OR EQUAL TO
- u'\u2264' # 0x00f3 -> LESS-THAN OR EQUAL TO
- u'\u2320' # 0x00f4 -> TOP HALF INTEGRAL
- u'\u2321' # 0x00f5 -> BOTTOM HALF INTEGRAL
- u'\xf7' # 0x00f6 -> DIVISION SIGN
- u'\u2248' # 0x00f7 -> ALMOST EQUAL TO
- u'\xb0' # 0x00f8 -> DEGREE SIGN
- u'\u2219' # 0x00f9 -> BULLET OPERATOR
- u'\xb7' # 0x00fa -> MIDDLE DOT
- u'\u221a' # 0x00fb -> SQUARE ROOT
- u'\u207f' # 0x00fc -> SUPERSCRIPT LATIN SMALL LETTER N
- u'\xb2' # 0x00fd -> SUPERSCRIPT TWO
- u'\u25a0' # 0x00fe -> BLACK SQUARE
- u'\xa0' # 0x00ff -> NO-BREAK SPACE
-)
-
-### Encoding Map
-
-encoding_map = {
- 0x0000: 0x0000, # NULL
- 0x0001: 0x0001, # START OF HEADING
- 0x0002: 0x0002, # START OF TEXT
- 0x0003: 0x0003, # END OF TEXT
- 0x0004: 0x0004, # END OF TRANSMISSION
- 0x0005: 0x0005, # ENQUIRY
- 0x0006: 0x0006, # ACKNOWLEDGE
- 0x0007: 0x0007, # BELL
- 0x0008: 0x0008, # BACKSPACE
- 0x0009: 0x0009, # HORIZONTAL TABULATION
- 0x000a: 0x000a, # LINE FEED
- 0x000b: 0x000b, # VERTICAL TABULATION
- 0x000c: 0x000c, # FORM FEED
- 0x000d: 0x000d, # CARRIAGE RETURN
- 0x000e: 0x000e, # SHIFT OUT
- 0x000f: 0x000f, # SHIFT IN
- 0x0010: 0x0010, # DATA LINK ESCAPE
- 0x0011: 0x0011, # DEVICE CONTROL ONE
- 0x0012: 0x0012, # DEVICE CONTROL TWO
- 0x0013: 0x0013, # DEVICE CONTROL THREE
- 0x0014: 0x0014, # DEVICE CONTROL FOUR
- 0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
- 0x0016: 0x0016, # SYNCHRONOUS IDLE
- 0x0017: 0x0017, # END OF TRANSMISSION BLOCK
- 0x0018: 0x0018, # CANCEL
- 0x0019: 0x0019, # END OF MEDIUM
- 0x001a: 0x001a, # SUBSTITUTE
- 0x001b: 0x001b, # ESCAPE
- 0x001c: 0x001c, # FILE SEPARATOR
- 0x001d: 0x001d, # GROUP SEPARATOR
- 0x001e: 0x001e, # RECORD SEPARATOR
- 0x001f: 0x001f, # UNIT SEPARATOR
- 0x0020: 0x0020, # SPACE
- 0x0021: 0x0021, # EXCLAMATION MARK
- 0x0022: 0x0022, # QUOTATION MARK
- 0x0023: 0x0023, # NUMBER SIGN
- 0x0024: 0x0024, # DOLLAR SIGN
- 0x0025: 0x0025, # PERCENT SIGN
- 0x0026: 0x0026, # AMPERSAND
- 0x0027: 0x0027, # APOSTROPHE
- 0x0028: 0x0028, # LEFT PARENTHESIS
- 0x0029: 0x0029, # RIGHT PARENTHESIS
- 0x002a: 0x002a, # ASTERISK
- 0x002b: 0x002b, # PLUS SIGN
- 0x002c: 0x002c, # COMMA
- 0x002d: 0x002d, # HYPHEN-MINUS
- 0x002e: 0x002e, # FULL STOP
- 0x002f: 0x002f, # SOLIDUS
- 0x0030: 0x0030, # DIGIT ZERO
- 0x0031: 0x0031, # DIGIT ONE
- 0x0032: 0x0032, # DIGIT TWO
- 0x0033: 0x0033, # DIGIT THREE
- 0x0034: 0x0034, # DIGIT FOUR
- 0x0035: 0x0035, # DIGIT FIVE
- 0x0036: 0x0036, # DIGIT SIX
- 0x0037: 0x0037, # DIGIT SEVEN
- 0x0038: 0x0038, # DIGIT EIGHT
- 0x0039: 0x0039, # DIGIT NINE
- 0x003a: 0x003a, # COLON
- 0x003b: 0x003b, # SEMICOLON
- 0x003c: 0x003c, # LESS-THAN SIGN
- 0x003d: 0x003d, # EQUALS SIGN
- 0x003e: 0x003e, # GREATER-THAN SIGN
- 0x003f: 0x003f, # QUESTION MARK
- 0x0040: 0x0040, # COMMERCIAL AT
- 0x0041: 0x0041, # LATIN CAPITAL LETTER A
- 0x0042: 0x0042, # LATIN CAPITAL LETTER B
- 0x0043: 0x0043, # LATIN CAPITAL LETTER C
- 0x0044: 0x0044, # LATIN CAPITAL LETTER D
- 0x0045: 0x0045, # LATIN CAPITAL LETTER E
- 0x0046: 0x0046, # LATIN CAPITAL LETTER F
- 0x0047: 0x0047, # LATIN CAPITAL LETTER G
- 0x0048: 0x0048, # LATIN CAPITAL LETTER H
- 0x0049: 0x0049, # LATIN CAPITAL LETTER I
- 0x004a: 0x004a, # LATIN CAPITAL LETTER J
- 0x004b: 0x004b, # LATIN CAPITAL LETTER K
- 0x004c: 0x004c, # LATIN CAPITAL LETTER L
- 0x004d: 0x004d, # LATIN CAPITAL LETTER M
- 0x004e: 0x004e, # LATIN CAPITAL LETTER N
- 0x004f: 0x004f, # LATIN CAPITAL LETTER O
- 0x0050: 0x0050, # LATIN CAPITAL LETTER P
- 0x0051: 0x0051, # LATIN CAPITAL LETTER Q
- 0x0052: 0x0052, # LATIN CAPITAL LETTER R
- 0x0053: 0x0053, # LATIN CAPITAL LETTER S
- 0x0054: 0x0054, # LATIN CAPITAL LETTER T
- 0x0055: 0x0055, # LATIN CAPITAL LETTER U
- 0x0056: 0x0056, # LATIN CAPITAL LETTER V
- 0x0057: 0x0057, # LATIN CAPITAL LETTER W
- 0x0058: 0x0058, # LATIN CAPITAL LETTER X
- 0x0059: 0x0059, # LATIN CAPITAL LETTER Y
- 0x005a: 0x005a, # LATIN CAPITAL LETTER Z
- 0x005b: 0x005b, # LEFT SQUARE BRACKET
- 0x005c: 0x005c, # REVERSE SOLIDUS
- 0x005d: 0x005d, # RIGHT SQUARE BRACKET
- 0x005e: 0x005e, # CIRCUMFLEX ACCENT
- 0x005f: 0x005f, # LOW LINE
- 0x0060: 0x0060, # GRAVE ACCENT
- 0x0061: 0x0061, # LATIN SMALL LETTER A
- 0x0062: 0x0062, # LATIN SMALL LETTER B
- 0x0063: 0x0063, # LATIN SMALL LETTER C
- 0x0064: 0x0064, # LATIN SMALL LETTER D
- 0x0065: 0x0065, # LATIN SMALL LETTER E
- 0x0066: 0x0066, # LATIN SMALL LETTER F
- 0x0067: 0x0067, # LATIN SMALL LETTER G
- 0x0068: 0x0068, # LATIN SMALL LETTER H
- 0x0069: 0x0069, # LATIN SMALL LETTER I
- 0x006a: 0x006a, # LATIN SMALL LETTER J
- 0x006b: 0x006b, # LATIN SMALL LETTER K
- 0x006c: 0x006c, # LATIN SMALL LETTER L
- 0x006d: 0x006d, # LATIN SMALL LETTER M
- 0x006e: 0x006e, # LATIN SMALL LETTER N
- 0x006f: 0x006f, # LATIN SMALL LETTER O
- 0x0070: 0x0070, # LATIN SMALL LETTER P
- 0x0071: 0x0071, # LATIN SMALL LETTER Q
- 0x0072: 0x0072, # LATIN SMALL LETTER R
- 0x0073: 0x0073, # LATIN SMALL LETTER S
- 0x0074: 0x0074, # LATIN SMALL LETTER T
- 0x0075: 0x0075, # LATIN SMALL LETTER U
- 0x0076: 0x0076, # LATIN SMALL LETTER V
- 0x0077: 0x0077, # LATIN SMALL LETTER W
- 0x0078: 0x0078, # LATIN SMALL LETTER X
- 0x0079: 0x0079, # LATIN SMALL LETTER Y
- 0x007a: 0x007a, # LATIN SMALL LETTER Z
- 0x007b: 0x007b, # LEFT CURLY BRACKET
- 0x007c: 0x007c, # VERTICAL LINE
- 0x007d: 0x007d, # RIGHT CURLY BRACKET
- 0x007e: 0x007e, # TILDE
- 0x007f: 0x007f, # DELETE
- 0x00a0: 0x00ff, # NO-BREAK SPACE
- 0x00a2: 0x009b, # CENT SIGN
- 0x00a3: 0x009c, # POUND SIGN
- 0x00a4: 0x0098, # CURRENCY SIGN
- 0x00a6: 0x00a0, # BROKEN BAR
- 0x00a7: 0x008f, # SECTION SIGN
- 0x00a8: 0x00a4, # DIAERESIS
- 0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
- 0x00ac: 0x00aa, # NOT SIGN
- 0x00af: 0x00a7, # MACRON
- 0x00b0: 0x00f8, # DEGREE SIGN
- 0x00b1: 0x00f1, # PLUS-MINUS SIGN
- 0x00b2: 0x00fd, # SUPERSCRIPT TWO
- 0x00b3: 0x00a6, # SUPERSCRIPT THREE
- 0x00b4: 0x00a1, # ACUTE ACCENT
- 0x00b5: 0x00e6, # MICRO SIGN
- 0x00b6: 0x0086, # PILCROW SIGN
- 0x00b7: 0x00fa, # MIDDLE DOT
- 0x00b8: 0x00a5, # CEDILLA
- 0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
- 0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER
- 0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF
- 0x00be: 0x00ad, # VULGAR FRACTION THREE QUARTERS
- 0x00c0: 0x008e, # LATIN CAPITAL LETTER A WITH GRAVE
- 0x00c2: 0x0084, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
- 0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA
- 0x00c8: 0x0091, # LATIN CAPITAL LETTER E WITH GRAVE
- 0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
- 0x00ca: 0x0092, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
- 0x00cb: 0x0094, # LATIN CAPITAL LETTER E WITH DIAERESIS
- 0x00ce: 0x00a8, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
- 0x00cf: 0x0095, # LATIN CAPITAL LETTER I WITH DIAERESIS
- 0x00d4: 0x0099, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
- 0x00d9: 0x009d, # LATIN CAPITAL LETTER U WITH GRAVE
- 0x00db: 0x009e, # LATIN CAPITAL LETTER U WITH CIRCUMFLEX
- 0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
- 0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S
- 0x00e0: 0x0085, # LATIN SMALL LETTER A WITH GRAVE
- 0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX
- 0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA
- 0x00e8: 0x008a, # LATIN SMALL LETTER E WITH GRAVE
- 0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
- 0x00ea: 0x0088, # LATIN SMALL LETTER E WITH CIRCUMFLEX
- 0x00eb: 0x0089, # LATIN SMALL LETTER E WITH DIAERESIS
- 0x00ee: 0x008c, # LATIN SMALL LETTER I WITH CIRCUMFLEX
- 0x00ef: 0x008b, # LATIN SMALL LETTER I WITH DIAERESIS
- 0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
- 0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX
- 0x00f7: 0x00f6, # DIVISION SIGN
- 0x00f9: 0x0097, # LATIN SMALL LETTER U WITH GRAVE
- 0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE
- 0x00fb: 0x0096, # LATIN SMALL LETTER U WITH CIRCUMFLEX
- 0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
- 0x0192: 0x009f, # LATIN SMALL LETTER F WITH HOOK
- 0x0393: 0x00e2, # GREEK CAPITAL LETTER GAMMA
- 0x0398: 0x00e9, # GREEK CAPITAL LETTER THETA
- 0x03a3: 0x00e4, # GREEK CAPITAL LETTER SIGMA
- 0x03a6: 0x00e8, # GREEK CAPITAL LETTER PHI
- 0x03a9: 0x00ea, # GREEK CAPITAL LETTER OMEGA
- 0x03b1: 0x00e0, # GREEK SMALL LETTER ALPHA
- 0x03b4: 0x00eb, # GREEK SMALL LETTER DELTA
- 0x03b5: 0x00ee, # GREEK SMALL LETTER EPSILON
- 0x03c0: 0x00e3, # GREEK SMALL LETTER PI
- 0x03c3: 0x00e5, # GREEK SMALL LETTER SIGMA
- 0x03c4: 0x00e7, # GREEK SMALL LETTER TAU
- 0x03c6: 0x00ed, # GREEK SMALL LETTER PHI
- 0x2017: 0x008d, # DOUBLE LOW LINE
- 0x207f: 0x00fc, # SUPERSCRIPT LATIN SMALL LETTER N
- 0x2219: 0x00f9, # BULLET OPERATOR
- 0x221a: 0x00fb, # SQUARE ROOT
- 0x221e: 0x00ec, # INFINITY
- 0x2229: 0x00ef, # INTERSECTION
- 0x2248: 0x00f7, # ALMOST EQUAL TO
- 0x2261: 0x00f0, # IDENTICAL TO
- 0x2264: 0x00f3, # LESS-THAN OR EQUAL TO
- 0x2265: 0x00f2, # GREATER-THAN OR EQUAL TO
- 0x2310: 0x00a9, # REVERSED NOT SIGN
- 0x2320: 0x00f4, # TOP HALF INTEGRAL
- 0x2321: 0x00f5, # BOTTOM HALF INTEGRAL
- 0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
- 0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
- 0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
- 0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
- 0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
- 0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
- 0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
- 0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
- 0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
- 0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
- 0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
- 0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
- 0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
- 0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
- 0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
- 0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
- 0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
- 0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
- 0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
- 0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
- 0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
- 0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
- 0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
- 0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
- 0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
- 0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
- 0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
- 0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
- 0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
- 0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
- 0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
- 0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
- 0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
- 0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
- 0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
- 0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
- 0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
- 0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
- 0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
- 0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
- 0x2580: 0x00df, # UPPER HALF BLOCK
- 0x2584: 0x00dc, # LOWER HALF BLOCK
- 0x2588: 0x00db, # FULL BLOCK
- 0x258c: 0x00dd, # LEFT HALF BLOCK
- 0x2590: 0x00de, # RIGHT HALF BLOCK
- 0x2591: 0x00b0, # LIGHT SHADE
- 0x2592: 0x00b1, # MEDIUM SHADE
- 0x2593: 0x00b2, # DARK SHADE
- 0x25a0: 0x00fe, # BLACK SQUARE
-}
diff --git a/sys/lib/python/encodings/cp864.py b/sys/lib/python/encodings/cp864.py
deleted file mode 100644
index 02a0e733a..000000000
--- a/sys/lib/python/encodings/cp864.py
+++ /dev/null
@@ -1,690 +0,0 @@
-""" Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP864.TXT' with gencodec.py.
-
-"""#"
-
-import codecs
-
-### Codec APIs
-
-class Codec(codecs.Codec):
-
- def encode(self,input,errors='strict'):
- return codecs.charmap_encode(input,errors,encoding_map)
-
- def decode(self,input,errors='strict'):
- return codecs.charmap_decode(input,errors,decoding_table)
-
-class IncrementalEncoder(codecs.IncrementalEncoder):
- def encode(self, input, final=False):
- return codecs.charmap_encode(input,self.errors,encoding_map)[0]
-
-class IncrementalDecoder(codecs.IncrementalDecoder):
- def decode(self, input, final=False):
- return codecs.charmap_decode(input,self.errors,decoding_table)[0]
-
-class StreamWriter(Codec,codecs.StreamWriter):
- pass
-
-class StreamReader(Codec,codecs.StreamReader):
- pass
-
-### encodings module API
-
-def getregentry():
- return codecs.CodecInfo(
- name='cp864',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
-
-### Decoding Map
-
-decoding_map = codecs.make_identity_dict(range(256))
-decoding_map.update({
- 0x0025: 0x066a, # ARABIC PERCENT SIGN
- 0x0080: 0x00b0, # DEGREE SIGN
- 0x0081: 0x00b7, # MIDDLE DOT
- 0x0082: 0x2219, # BULLET OPERATOR
- 0x0083: 0x221a, # SQUARE ROOT
- 0x0084: 0x2592, # MEDIUM SHADE
- 0x0085: 0x2500, # FORMS LIGHT HORIZONTAL
- 0x0086: 0x2502, # FORMS LIGHT VERTICAL
- 0x0087: 0x253c, # FORMS LIGHT VERTICAL AND HORIZONTAL
- 0x0088: 0x2524, # FORMS LIGHT VERTICAL AND LEFT
- 0x0089: 0x252c, # FORMS LIGHT DOWN AND HORIZONTAL
- 0x008a: 0x251c, # FORMS LIGHT VERTICAL AND RIGHT
- 0x008b: 0x2534, # FORMS LIGHT UP AND HORIZONTAL
- 0x008c: 0x2510, # FORMS LIGHT DOWN AND LEFT
- 0x008d: 0x250c, # FORMS LIGHT DOWN AND RIGHT
- 0x008e: 0x2514, # FORMS LIGHT UP AND RIGHT
- 0x008f: 0x2518, # FORMS LIGHT UP AND LEFT
- 0x0090: 0x03b2, # GREEK SMALL BETA
- 0x0091: 0x221e, # INFINITY
- 0x0092: 0x03c6, # GREEK SMALL PHI
- 0x0093: 0x00b1, # PLUS-OR-MINUS SIGN
- 0x0094: 0x00bd, # FRACTION 1/2
- 0x0095: 0x00bc, # FRACTION 1/4
- 0x0096: 0x2248, # ALMOST EQUAL TO
- 0x0097: 0x00ab, # LEFT POINTING GUILLEMET
- 0x0098: 0x00bb, # RIGHT POINTING GUILLEMET
- 0x0099: 0xfef7, # ARABIC LIGATURE LAM WITH ALEF WITH HAMZA ABOVE ISOLATED FORM
- 0x009a: 0xfef8, # ARABIC LIGATURE LAM WITH ALEF WITH HAMZA ABOVE FINAL FORM
- 0x009b: None, # UNDEFINED
- 0x009c: None, # UNDEFINED
- 0x009d: 0xfefb, # ARABIC LIGATURE LAM WITH ALEF ISOLATED FORM
- 0x009e: 0xfefc, # ARABIC LIGATURE LAM WITH ALEF FINAL FORM
- 0x009f: None, # UNDEFINED
- 0x00a1: 0x00ad, # SOFT HYPHEN
- 0x00a2: 0xfe82, # ARABIC LETTER ALEF WITH MADDA ABOVE FINAL FORM
- 0x00a5: 0xfe84, # ARABIC LETTER ALEF WITH HAMZA ABOVE FINAL FORM
- 0x00a6: None, # UNDEFINED
- 0x00a7: None, # UNDEFINED
- 0x00a8: 0xfe8e, # ARABIC LETTER ALEF FINAL FORM
- 0x00a9: 0xfe8f, # ARABIC LETTER BEH ISOLATED FORM
- 0x00aa: 0xfe95, # ARABIC LETTER TEH ISOLATED FORM
- 0x00ab: 0xfe99, # ARABIC LETTER THEH ISOLATED FORM
- 0x00ac: 0x060c, # ARABIC COMMA
- 0x00ad: 0xfe9d, # ARABIC LETTER JEEM ISOLATED FORM
- 0x00ae: 0xfea1, # ARABIC LETTER HAH ISOLATED FORM
- 0x00af: 0xfea5, # ARABIC LETTER KHAH ISOLATED FORM
- 0x00b0: 0x0660, # ARABIC-INDIC DIGIT ZERO
- 0x00b1: 0x0661, # ARABIC-INDIC DIGIT ONE
- 0x00b2: 0x0662, # ARABIC-INDIC DIGIT TWO
- 0x00b3: 0x0663, # ARABIC-INDIC DIGIT THREE
- 0x00b4: 0x0664, # ARABIC-INDIC DIGIT FOUR
- 0x00b5: 0x0665, # ARABIC-INDIC DIGIT FIVE
- 0x00b6: 0x0666, # ARABIC-INDIC DIGIT SIX
- 0x00b7: 0x0667, # ARABIC-INDIC DIGIT SEVEN
- 0x00b8: 0x0668, # ARABIC-INDIC DIGIT EIGHT
- 0x00b9: 0x0669, # ARABIC-INDIC DIGIT NINE
- 0x00ba: 0xfed1, # ARABIC LETTER FEH ISOLATED FORM
- 0x00bb: 0x061b, # ARABIC SEMICOLON
- 0x00bc: 0xfeb1, # ARABIC LETTER SEEN ISOLATED FORM
- 0x00bd: 0xfeb5, # ARABIC LETTER SHEEN ISOLATED FORM
- 0x00be: 0xfeb9, # ARABIC LETTER SAD ISOLATED FORM
- 0x00bf: 0x061f, # ARABIC QUESTION MARK
- 0x00c0: 0x00a2, # CENT SIGN
- 0x00c1: 0xfe80, # ARABIC LETTER HAMZA ISOLATED FORM
- 0x00c2: 0xfe81, # ARABIC LETTER ALEF WITH MADDA ABOVE ISOLATED FORM
- 0x00c3: 0xfe83, # ARABIC LETTER ALEF WITH HAMZA ABOVE ISOLATED FORM
- 0x00c4: 0xfe85, # ARABIC LETTER WAW WITH HAMZA ABOVE ISOLATED FORM
- 0x00c5: 0xfeca, # ARABIC LETTER AIN FINAL FORM
- 0x00c6: 0xfe8b, # ARABIC LETTER YEH WITH HAMZA ABOVE INITIAL FORM
- 0x00c7: 0xfe8d, # ARABIC LETTER ALEF ISOLATED FORM
- 0x00c8: 0xfe91, # ARABIC LETTER BEH INITIAL FORM
- 0x00c9: 0xfe93, # ARABIC LETTER TEH MARBUTA ISOLATED FORM
- 0x00ca: 0xfe97, # ARABIC LETTER TEH INITIAL FORM
- 0x00cb: 0xfe9b, # ARABIC LETTER THEH INITIAL FORM
- 0x00cc: 0xfe9f, # ARABIC LETTER JEEM INITIAL FORM
- 0x00cd: 0xfea3, # ARABIC LETTER HAH INITIAL FORM
- 0x00ce: 0xfea7, # ARABIC LETTER KHAH INITIAL FORM
- 0x00cf: 0xfea9, # ARABIC LETTER DAL ISOLATED FORM
- 0x00d0: 0xfeab, # ARABIC LETTER THAL ISOLATED FORM
- 0x00d1: 0xfead, # ARABIC LETTER REH ISOLATED FORM
- 0x00d2: 0xfeaf, # ARABIC LETTER ZAIN ISOLATED FORM
- 0x00d3: 0xfeb3, # ARABIC LETTER SEEN INITIAL FORM
- 0x00d4: 0xfeb7, # ARABIC LETTER SHEEN INITIAL FORM
- 0x00d5: 0xfebb, # ARABIC LETTER SAD INITIAL FORM
- 0x00d6: 0xfebf, # ARABIC LETTER DAD INITIAL FORM
- 0x00d7: 0xfec1, # ARABIC LETTER TAH ISOLATED FORM
- 0x00d8: 0xfec5, # ARABIC LETTER ZAH ISOLATED FORM
- 0x00d9: 0xfecb, # ARABIC LETTER AIN INITIAL FORM
- 0x00da: 0xfecf, # ARABIC LETTER GHAIN INITIAL FORM
- 0x00db: 0x00a6, # BROKEN VERTICAL BAR
- 0x00dc: 0x00ac, # NOT SIGN
- 0x00dd: 0x00f7, # DIVISION SIGN
- 0x00de: 0x00d7, # MULTIPLICATION SIGN
- 0x00df: 0xfec9, # ARABIC LETTER AIN ISOLATED FORM
- 0x00e0: 0x0640, # ARABIC TATWEEL
- 0x00e1: 0xfed3, # ARABIC LETTER FEH INITIAL FORM
- 0x00e2: 0xfed7, # ARABIC LETTER QAF INITIAL FORM
- 0x00e3: 0xfedb, # ARABIC LETTER KAF INITIAL FORM
- 0x00e4: 0xfedf, # ARABIC LETTER LAM INITIAL FORM
- 0x00e5: 0xfee3, # ARABIC LETTER MEEM INITIAL FORM
- 0x00e6: 0xfee7, # ARABIC LETTER NOON INITIAL FORM
- 0x00e7: 0xfeeb, # ARABIC LETTER HEH INITIAL FORM
- 0x00e8: 0xfeed, # ARABIC LETTER WAW ISOLATED FORM
- 0x00e9: 0xfeef, # ARABIC LETTER ALEF MAKSURA ISOLATED FORM
- 0x00ea: 0xfef3, # ARABIC LETTER YEH INITIAL FORM
- 0x00eb: 0xfebd, # ARABIC LETTER DAD ISOLATED FORM
- 0x00ec: 0xfecc, # ARABIC LETTER AIN MEDIAL FORM
- 0x00ed: 0xfece, # ARABIC LETTER GHAIN FINAL FORM
- 0x00ee: 0xfecd, # ARABIC LETTER GHAIN ISOLATED FORM
- 0x00ef: 0xfee1, # ARABIC LETTER MEEM ISOLATED FORM
- 0x00f0: 0xfe7d, # ARABIC SHADDA MEDIAL FORM
- 0x00f1: 0x0651, # ARABIC SHADDAH
- 0x00f2: 0xfee5, # ARABIC LETTER NOON ISOLATED FORM
- 0x00f3: 0xfee9, # ARABIC LETTER HEH ISOLATED FORM
- 0x00f4: 0xfeec, # ARABIC LETTER HEH MEDIAL FORM
- 0x00f5: 0xfef0, # ARABIC LETTER ALEF MAKSURA FINAL FORM
- 0x00f6: 0xfef2, # ARABIC LETTER YEH FINAL FORM
- 0x00f7: 0xfed0, # ARABIC LETTER GHAIN MEDIAL FORM
- 0x00f8: 0xfed5, # ARABIC LETTER QAF ISOLATED FORM
- 0x00f9: 0xfef5, # ARABIC LIGATURE LAM WITH ALEF WITH MADDA ABOVE ISOLATED FORM
- 0x00fa: 0xfef6, # ARABIC LIGATURE LAM WITH ALEF WITH MADDA ABOVE FINAL FORM
- 0x00fb: 0xfedd, # ARABIC LETTER LAM ISOLATED FORM
- 0x00fc: 0xfed9, # ARABIC LETTER KAF ISOLATED FORM
- 0x00fd: 0xfef1, # ARABIC LETTER YEH ISOLATED FORM
- 0x00fe: 0x25a0, # BLACK SQUARE
- 0x00ff: None, # UNDEFINED
-})
-
-### Decoding Table
-
-decoding_table = (
- u'\x00' # 0x0000 -> NULL
- u'\x01' # 0x0001 -> START OF HEADING
- u'\x02' # 0x0002 -> START OF TEXT
- u'\x03' # 0x0003 -> END OF TEXT
- u'\x04' # 0x0004 -> END OF TRANSMISSION
- u'\x05' # 0x0005 -> ENQUIRY
- u'\x06' # 0x0006 -> ACKNOWLEDGE
- u'\x07' # 0x0007 -> BELL
- u'\x08' # 0x0008 -> BACKSPACE
- u'\t' # 0x0009 -> HORIZONTAL TABULATION
- u'\n' # 0x000a -> LINE FEED
- u'\x0b' # 0x000b -> VERTICAL TABULATION
- u'\x0c' # 0x000c -> FORM FEED
- u'\r' # 0x000d -> CARRIAGE RETURN
- u'\x0e' # 0x000e -> SHIFT OUT
- u'\x0f' # 0x000f -> SHIFT IN
- u'\x10' # 0x0010 -> DATA LINK ESCAPE
- u'\x11' # 0x0011 -> DEVICE CONTROL ONE
- u'\x12' # 0x0012 -> DEVICE CONTROL TWO
- u'\x13' # 0x0013 -> DEVICE CONTROL THREE
- u'\x14' # 0x0014 -> DEVICE CONTROL FOUR
- u'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
- u'\x16' # 0x0016 -> SYNCHRONOUS IDLE
- u'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
- u'\x18' # 0x0018 -> CANCEL
- u'\x19' # 0x0019 -> END OF MEDIUM
- u'\x1a' # 0x001a -> SUBSTITUTE
- u'\x1b' # 0x001b -> ESCAPE
- u'\x1c' # 0x001c -> FILE SEPARATOR
- u'\x1d' # 0x001d -> GROUP SEPARATOR
- u'\x1e' # 0x001e -> RECORD SEPARATOR
- u'\x1f' # 0x001f -> UNIT SEPARATOR
- u' ' # 0x0020 -> SPACE
- u'!' # 0x0021 -> EXCLAMATION MARK
- u'"' # 0x0022 -> QUOTATION MARK
- u'#' # 0x0023 -> NUMBER SIGN
- u'$' # 0x0024 -> DOLLAR SIGN
- u'\u066a' # 0x0025 -> ARABIC PERCENT SIGN
- u'&' # 0x0026 -> AMPERSAND
- u"'" # 0x0027 -> APOSTROPHE
- u'(' # 0x0028 -> LEFT PARENTHESIS
- u')' # 0x0029 -> RIGHT PARENTHESIS
- u'*' # 0x002a -> ASTERISK
- u'+' # 0x002b -> PLUS SIGN
- u',' # 0x002c -> COMMA
- u'-' # 0x002d -> HYPHEN-MINUS
- u'.' # 0x002e -> FULL STOP
- u'/' # 0x002f -> SOLIDUS
- u'0' # 0x0030 -> DIGIT ZERO
- u'1' # 0x0031 -> DIGIT ONE
- u'2' # 0x0032 -> DIGIT TWO
- u'3' # 0x0033 -> DIGIT THREE
- u'4' # 0x0034 -> DIGIT FOUR
- u'5' # 0x0035 -> DIGIT FIVE
- u'6' # 0x0036 -> DIGIT SIX
- u'7' # 0x0037 -> DIGIT SEVEN
- u'8' # 0x0038 -> DIGIT EIGHT
- u'9' # 0x0039 -> DIGIT NINE
- u':' # 0x003a -> COLON
- u';' # 0x003b -> SEMICOLON
- u'<' # 0x003c -> LESS-THAN SIGN
- u'=' # 0x003d -> EQUALS SIGN
- u'>' # 0x003e -> GREATER-THAN SIGN
- u'?' # 0x003f -> QUESTION MARK
- u'@' # 0x0040 -> COMMERCIAL AT
- u'A' # 0x0041 -> LATIN CAPITAL LETTER A
- u'B' # 0x0042 -> LATIN CAPITAL LETTER B
- u'C' # 0x0043 -> LATIN CAPITAL LETTER C
- u'D' # 0x0044 -> LATIN CAPITAL LETTER D
- u'E' # 0x0045 -> LATIN CAPITAL LETTER E
- u'F' # 0x0046 -> LATIN CAPITAL LETTER F
- u'G' # 0x0047 -> LATIN CAPITAL LETTER G
- u'H' # 0x0048 -> LATIN CAPITAL LETTER H
- u'I' # 0x0049 -> LATIN CAPITAL LETTER I
- u'J' # 0x004a -> LATIN CAPITAL LETTER J
- u'K' # 0x004b -> LATIN CAPITAL LETTER K
- u'L' # 0x004c -> LATIN CAPITAL LETTER L
- u'M' # 0x004d -> LATIN CAPITAL LETTER M
- u'N' # 0x004e -> LATIN CAPITAL LETTER N
- u'O' # 0x004f -> LATIN CAPITAL LETTER O
- u'P' # 0x0050 -> LATIN CAPITAL LETTER P
- u'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
- u'R' # 0x0052 -> LATIN CAPITAL LETTER R
- u'S' # 0x0053 -> LATIN CAPITAL LETTER S
- u'T' # 0x0054 -> LATIN CAPITAL LETTER T
- u'U' # 0x0055 -> LATIN CAPITAL LETTER U
- u'V' # 0x0056 -> LATIN CAPITAL LETTER V
- u'W' # 0x0057 -> LATIN CAPITAL LETTER W
- u'X' # 0x0058 -> LATIN CAPITAL LETTER X
- u'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
- u'Z' # 0x005a -> LATIN CAPITAL LETTER Z
- u'[' # 0x005b -> LEFT SQUARE BRACKET
- u'\\' # 0x005c -> REVERSE SOLIDUS
- u']' # 0x005d -> RIGHT SQUARE BRACKET
- u'^' # 0x005e -> CIRCUMFLEX ACCENT
- u'_' # 0x005f -> LOW LINE
- u'`' # 0x0060 -> GRAVE ACCENT
- u'a' # 0x0061 -> LATIN SMALL LETTER A
- u'b' # 0x0062 -> LATIN SMALL LETTER B
- u'c' # 0x0063 -> LATIN SMALL LETTER C
- u'd' # 0x0064 -> LATIN SMALL LETTER D
- u'e' # 0x0065 -> LATIN SMALL LETTER E
- u'f' # 0x0066 -> LATIN SMALL LETTER F
- u'g' # 0x0067 -> LATIN SMALL LETTER G
- u'h' # 0x0068 -> LATIN SMALL LETTER H
- u'i' # 0x0069 -> LATIN SMALL LETTER I
- u'j' # 0x006a -> LATIN SMALL LETTER J
- u'k' # 0x006b -> LATIN SMALL LETTER K
- u'l' # 0x006c -> LATIN SMALL LETTER L
- u'm' # 0x006d -> LATIN SMALL LETTER M
- u'n' # 0x006e -> LATIN SMALL LETTER N
- u'o' # 0x006f -> LATIN SMALL LETTER O
- u'p' # 0x0070 -> LATIN SMALL LETTER P
- u'q' # 0x0071 -> LATIN SMALL LETTER Q
- u'r' # 0x0072 -> LATIN SMALL LETTER R
- u's' # 0x0073 -> LATIN SMALL LETTER S
- u't' # 0x0074 -> LATIN SMALL LETTER T
- u'u' # 0x0075 -> LATIN SMALL LETTER U
- u'v' # 0x0076 -> LATIN SMALL LETTER V
- u'w' # 0x0077 -> LATIN SMALL LETTER W
- u'x' # 0x0078 -> LATIN SMALL LETTER X
- u'y' # 0x0079 -> LATIN SMALL LETTER Y
- u'z' # 0x007a -> LATIN SMALL LETTER Z
- u'{' # 0x007b -> LEFT CURLY BRACKET
- u'|' # 0x007c -> VERTICAL LINE
- u'}' # 0x007d -> RIGHT CURLY BRACKET
- u'~' # 0x007e -> TILDE
- u'\x7f' # 0x007f -> DELETE
- u'\xb0' # 0x0080 -> DEGREE SIGN
- u'\xb7' # 0x0081 -> MIDDLE DOT
- u'\u2219' # 0x0082 -> BULLET OPERATOR
- u'\u221a' # 0x0083 -> SQUARE ROOT
- u'\u2592' # 0x0084 -> MEDIUM SHADE
- u'\u2500' # 0x0085 -> FORMS LIGHT HORIZONTAL
- u'\u2502' # 0x0086 -> FORMS LIGHT VERTICAL
- u'\u253c' # 0x0087 -> FORMS LIGHT VERTICAL AND HORIZONTAL
- u'\u2524' # 0x0088 -> FORMS LIGHT VERTICAL AND LEFT
- u'\u252c' # 0x0089 -> FORMS LIGHT DOWN AND HORIZONTAL
- u'\u251c' # 0x008a -> FORMS LIGHT VERTICAL AND RIGHT
- u'\u2534' # 0x008b -> FORMS LIGHT UP AND HORIZONTAL
- u'\u2510' # 0x008c -> FORMS LIGHT DOWN AND LEFT
- u'\u250c' # 0x008d -> FORMS LIGHT DOWN AND RIGHT
- u'\u2514' # 0x008e -> FORMS LIGHT UP AND RIGHT
- u'\u2518' # 0x008f -> FORMS LIGHT UP AND LEFT
- u'\u03b2' # 0x0090 -> GREEK SMALL BETA
- u'\u221e' # 0x0091 -> INFINITY
- u'\u03c6' # 0x0092 -> GREEK SMALL PHI
- u'\xb1' # 0x0093 -> PLUS-OR-MINUS SIGN
- u'\xbd' # 0x0094 -> FRACTION 1/2
- u'\xbc' # 0x0095 -> FRACTION 1/4
- u'\u2248' # 0x0096 -> ALMOST EQUAL TO
- u'\xab' # 0x0097 -> LEFT POINTING GUILLEMET
- u'\xbb' # 0x0098 -> RIGHT POINTING GUILLEMET
- u'\ufef7' # 0x0099 -> ARABIC LIGATURE LAM WITH ALEF WITH HAMZA ABOVE ISOLATED FORM
- u'\ufef8' # 0x009a -> ARABIC LIGATURE LAM WITH ALEF WITH HAMZA ABOVE FINAL FORM
- u'\ufffe' # 0x009b -> UNDEFINED
- u'\ufffe' # 0x009c -> UNDEFINED
- u'\ufefb' # 0x009d -> ARABIC LIGATURE LAM WITH ALEF ISOLATED FORM
- u'\ufefc' # 0x009e -> ARABIC LIGATURE LAM WITH ALEF FINAL FORM
- u'\ufffe' # 0x009f -> UNDEFINED
- u'\xa0' # 0x00a0 -> NON-BREAKING SPACE
- u'\xad' # 0x00a1 -> SOFT HYPHEN
- u'\ufe82' # 0x00a2 -> ARABIC LETTER ALEF WITH MADDA ABOVE FINAL FORM
- u'\xa3' # 0x00a3 -> POUND SIGN
- u'\xa4' # 0x00a4 -> CURRENCY SIGN
- u'\ufe84' # 0x00a5 -> ARABIC LETTER ALEF WITH HAMZA ABOVE FINAL FORM
- u'\ufffe' # 0x00a6 -> UNDEFINED
- u'\ufffe' # 0x00a7 -> UNDEFINED
- u'\ufe8e' # 0x00a8 -> ARABIC LETTER ALEF FINAL FORM
- u'\ufe8f' # 0x00a9 -> ARABIC LETTER BEH ISOLATED FORM
- u'\ufe95' # 0x00aa -> ARABIC LETTER TEH ISOLATED FORM
- u'\ufe99' # 0x00ab -> ARABIC LETTER THEH ISOLATED FORM
- u'\u060c' # 0x00ac -> ARABIC COMMA
- u'\ufe9d' # 0x00ad -> ARABIC LETTER JEEM ISOLATED FORM
- u'\ufea1' # 0x00ae -> ARABIC LETTER HAH ISOLATED FORM
- u'\ufea5' # 0x00af -> ARABIC LETTER KHAH ISOLATED FORM
- u'\u0660' # 0x00b0 -> ARABIC-INDIC DIGIT ZERO
- u'\u0661' # 0x00b1 -> ARABIC-INDIC DIGIT ONE
- u'\u0662' # 0x00b2 -> ARABIC-INDIC DIGIT TWO
- u'\u0663' # 0x00b3 -> ARABIC-INDIC DIGIT THREE
- u'\u0664' # 0x00b4 -> ARABIC-INDIC DIGIT FOUR
- u'\u0665' # 0x00b5 -> ARABIC-INDIC DIGIT FIVE
- u'\u0666' # 0x00b6 -> ARABIC-INDIC DIGIT SIX
- u'\u0667' # 0x00b7 -> ARABIC-INDIC DIGIT SEVEN
- u'\u0668' # 0x00b8 -> ARABIC-INDIC DIGIT EIGHT
- u'\u0669' # 0x00b9 -> ARABIC-INDIC DIGIT NINE
- u'\ufed1' # 0x00ba -> ARABIC LETTER FEH ISOLATED FORM
- u'\u061b' # 0x00bb -> ARABIC SEMICOLON
- u'\ufeb1' # 0x00bc -> ARABIC LETTER SEEN ISOLATED FORM
- u'\ufeb5' # 0x00bd -> ARABIC LETTER SHEEN ISOLATED FORM
- u'\ufeb9' # 0x00be -> ARABIC LETTER SAD ISOLATED FORM
- u'\u061f' # 0x00bf -> ARABIC QUESTION MARK
- u'\xa2' # 0x00c0 -> CENT SIGN
- u'\ufe80' # 0x00c1 -> ARABIC LETTER HAMZA ISOLATED FORM
- u'\ufe81' # 0x00c2 -> ARABIC LETTER ALEF WITH MADDA ABOVE ISOLATED FORM
- u'\ufe83' # 0x00c3 -> ARABIC LETTER ALEF WITH HAMZA ABOVE ISOLATED FORM
- u'\ufe85' # 0x00c4 -> ARABIC LETTER WAW WITH HAMZA ABOVE ISOLATED FORM
- u'\ufeca' # 0x00c5 -> ARABIC LETTER AIN FINAL FORM
- u'\ufe8b' # 0x00c6 -> ARABIC LETTER YEH WITH HAMZA ABOVE INITIAL FORM
- u'\ufe8d' # 0x00c7 -> ARABIC LETTER ALEF ISOLATED FORM
- u'\ufe91' # 0x00c8 -> ARABIC LETTER BEH INITIAL FORM
- u'\ufe93' # 0x00c9 -> ARABIC LETTER TEH MARBUTA ISOLATED FORM
- u'\ufe97' # 0x00ca -> ARABIC LETTER TEH INITIAL FORM
- u'\ufe9b' # 0x00cb -> ARABIC LETTER THEH INITIAL FORM
- u'\ufe9f' # 0x00cc -> ARABIC LETTER JEEM INITIAL FORM
- u'\ufea3' # 0x00cd -> ARABIC LETTER HAH INITIAL FORM
- u'\ufea7' # 0x00ce -> ARABIC LETTER KHAH INITIAL FORM
- u'\ufea9' # 0x00cf -> ARABIC LETTER DAL ISOLATED FORM
- u'\ufeab' # 0x00d0 -> ARABIC LETTER THAL ISOLATED FORM
- u'\ufead' # 0x00d1 -> ARABIC LETTER REH ISOLATED FORM
- u'\ufeaf' # 0x00d2 -> ARABIC LETTER ZAIN ISOLATED FORM
- u'\ufeb3' # 0x00d3 -> ARABIC LETTER SEEN INITIAL FORM
- u'\ufeb7' # 0x00d4 -> ARABIC LETTER SHEEN INITIAL FORM
- u'\ufebb' # 0x00d5 -> ARABIC LETTER SAD INITIAL FORM
- u'\ufebf' # 0x00d6 -> ARABIC LETTER DAD INITIAL FORM
- u'\ufec1' # 0x00d7 -> ARABIC LETTER TAH ISOLATED FORM
- u'\ufec5' # 0x00d8 -> ARABIC LETTER ZAH ISOLATED FORM
- u'\ufecb' # 0x00d9 -> ARABIC LETTER AIN INITIAL FORM
- u'\ufecf' # 0x00da -> ARABIC LETTER GHAIN INITIAL FORM
- u'\xa6' # 0x00db -> BROKEN VERTICAL BAR
- u'\xac' # 0x00dc -> NOT SIGN
- u'\xf7' # 0x00dd -> DIVISION SIGN
- u'\xd7' # 0x00de -> MULTIPLICATION SIGN
- u'\ufec9' # 0x00df -> ARABIC LETTER AIN ISOLATED FORM
- u'\u0640' # 0x00e0 -> ARABIC TATWEEL
- u'\ufed3' # 0x00e1 -> ARABIC LETTER FEH INITIAL FORM
- u'\ufed7' # 0x00e2 -> ARABIC LETTER QAF INITIAL FORM
- u'\ufedb' # 0x00e3 -> ARABIC LETTER KAF INITIAL FORM
- u'\ufedf' # 0x00e4 -> ARABIC LETTER LAM INITIAL FORM
- u'\ufee3' # 0x00e5 -> ARABIC LETTER MEEM INITIAL FORM
- u'\ufee7' # 0x00e6 -> ARABIC LETTER NOON INITIAL FORM
- u'\ufeeb' # 0x00e7 -> ARABIC LETTER HEH INITIAL FORM
- u'\ufeed' # 0x00e8 -> ARABIC LETTER WAW ISOLATED FORM
- u'\ufeef' # 0x00e9 -> ARABIC LETTER ALEF MAKSURA ISOLATED FORM
- u'\ufef3' # 0x00ea -> ARABIC LETTER YEH INITIAL FORM
- u'\ufebd' # 0x00eb -> ARABIC LETTER DAD ISOLATED FORM
- u'\ufecc' # 0x00ec -> ARABIC LETTER AIN MEDIAL FORM
- u'\ufece' # 0x00ed -> ARABIC LETTER GHAIN FINAL FORM
- u'\ufecd' # 0x00ee -> ARABIC LETTER GHAIN ISOLATED FORM
- u'\ufee1' # 0x00ef -> ARABIC LETTER MEEM ISOLATED FORM
- u'\ufe7d' # 0x00f0 -> ARABIC SHADDA MEDIAL FORM
- u'\u0651' # 0x00f1 -> ARABIC SHADDAH
- u'\ufee5' # 0x00f2 -> ARABIC LETTER NOON ISOLATED FORM
- u'\ufee9' # 0x00f3 -> ARABIC LETTER HEH ISOLATED FORM
- u'\ufeec' # 0x00f4 -> ARABIC LETTER HEH MEDIAL FORM
- u'\ufef0' # 0x00f5 -> ARABIC LETTER ALEF MAKSURA FINAL FORM
- u'\ufef2' # 0x00f6 -> ARABIC LETTER YEH FINAL FORM
- u'\ufed0' # 0x00f7 -> ARABIC LETTER GHAIN MEDIAL FORM
- u'\ufed5' # 0x00f8 -> ARABIC LETTER QAF ISOLATED FORM
- u'\ufef5' # 0x00f9 -> ARABIC LIGATURE LAM WITH ALEF WITH MADDA ABOVE ISOLATED FORM
- u'\ufef6' # 0x00fa -> ARABIC LIGATURE LAM WITH ALEF WITH MADDA ABOVE FINAL FORM
- u'\ufedd' # 0x00fb -> ARABIC LETTER LAM ISOLATED FORM
- u'\ufed9' # 0x00fc -> ARABIC LETTER KAF ISOLATED FORM
- u'\ufef1' # 0x00fd -> ARABIC LETTER YEH ISOLATED FORM
- u'\u25a0' # 0x00fe -> BLACK SQUARE
- u'\ufffe' # 0x00ff -> UNDEFINED
-)
-
-### Encoding Map
-
-encoding_map = {
- 0x0000: 0x0000, # NULL
- 0x0001: 0x0001, # START OF HEADING
- 0x0002: 0x0002, # START OF TEXT
- 0x0003: 0x0003, # END OF TEXT
- 0x0004: 0x0004, # END OF TRANSMISSION
- 0x0005: 0x0005, # ENQUIRY
- 0x0006: 0x0006, # ACKNOWLEDGE
- 0x0007: 0x0007, # BELL
- 0x0008: 0x0008, # BACKSPACE
- 0x0009: 0x0009, # HORIZONTAL TABULATION
- 0x000a: 0x000a, # LINE FEED
- 0x000b: 0x000b, # VERTICAL TABULATION
- 0x000c: 0x000c, # FORM FEED
- 0x000d: 0x000d, # CARRIAGE RETURN
- 0x000e: 0x000e, # SHIFT OUT
- 0x000f: 0x000f, # SHIFT IN
- 0x0010: 0x0010, # DATA LINK ESCAPE
- 0x0011: 0x0011, # DEVICE CONTROL ONE
- 0x0012: 0x0012, # DEVICE CONTROL TWO
- 0x0013: 0x0013, # DEVICE CONTROL THREE
- 0x0014: 0x0014, # DEVICE CONTROL FOUR
- 0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
- 0x0016: 0x0016, # SYNCHRONOUS IDLE
- 0x0017: 0x0017, # END OF TRANSMISSION BLOCK
- 0x0018: 0x0018, # CANCEL
- 0x0019: 0x0019, # END OF MEDIUM
- 0x001a: 0x001a, # SUBSTITUTE
- 0x001b: 0x001b, # ESCAPE
- 0x001c: 0x001c, # FILE SEPARATOR
- 0x001d: 0x001d, # GROUP SEPARATOR
- 0x001e: 0x001e, # RECORD SEPARATOR
- 0x001f: 0x001f, # UNIT SEPARATOR
- 0x0020: 0x0020, # SPACE
- 0x0021: 0x0021, # EXCLAMATION MARK
- 0x0022: 0x0022, # QUOTATION MARK
- 0x0023: 0x0023, # NUMBER SIGN
- 0x0024: 0x0024, # DOLLAR SIGN
- 0x0026: 0x0026, # AMPERSAND
- 0x0027: 0x0027, # APOSTROPHE
- 0x0028: 0x0028, # LEFT PARENTHESIS
- 0x0029: 0x0029, # RIGHT PARENTHESIS
- 0x002a: 0x002a, # ASTERISK
- 0x002b: 0x002b, # PLUS SIGN
- 0x002c: 0x002c, # COMMA
- 0x002d: 0x002d, # HYPHEN-MINUS
- 0x002e: 0x002e, # FULL STOP
- 0x002f: 0x002f, # SOLIDUS
- 0x0030: 0x0030, # DIGIT ZERO
- 0x0031: 0x0031, # DIGIT ONE
- 0x0032: 0x0032, # DIGIT TWO
- 0x0033: 0x0033, # DIGIT THREE
- 0x0034: 0x0034, # DIGIT FOUR
- 0x0035: 0x0035, # DIGIT FIVE
- 0x0036: 0x0036, # DIGIT SIX
- 0x0037: 0x0037, # DIGIT SEVEN
- 0x0038: 0x0038, # DIGIT EIGHT
- 0x0039: 0x0039, # DIGIT NINE
- 0x003a: 0x003a, # COLON
- 0x003b: 0x003b, # SEMICOLON
- 0x003c: 0x003c, # LESS-THAN SIGN
- 0x003d: 0x003d, # EQUALS SIGN
- 0x003e: 0x003e, # GREATER-THAN SIGN
- 0x003f: 0x003f, # QUESTION MARK
- 0x0040: 0x0040, # COMMERCIAL AT
- 0x0041: 0x0041, # LATIN CAPITAL LETTER A
- 0x0042: 0x0042, # LATIN CAPITAL LETTER B
- 0x0043: 0x0043, # LATIN CAPITAL LETTER C
- 0x0044: 0x0044, # LATIN CAPITAL LETTER D
- 0x0045: 0x0045, # LATIN CAPITAL LETTER E
- 0x0046: 0x0046, # LATIN CAPITAL LETTER F
- 0x0047: 0x0047, # LATIN CAPITAL LETTER G
- 0x0048: 0x0048, # LATIN CAPITAL LETTER H
- 0x0049: 0x0049, # LATIN CAPITAL LETTER I
- 0x004a: 0x004a, # LATIN CAPITAL LETTER J
- 0x004b: 0x004b, # LATIN CAPITAL LETTER K
- 0x004c: 0x004c, # LATIN CAPITAL LETTER L
- 0x004d: 0x004d, # LATIN CAPITAL LETTER M
- 0x004e: 0x004e, # LATIN CAPITAL LETTER N
- 0x004f: 0x004f, # LATIN CAPITAL LETTER O
- 0x0050: 0x0050, # LATIN CAPITAL LETTER P
- 0x0051: 0x0051, # LATIN CAPITAL LETTER Q
- 0x0052: 0x0052, # LATIN CAPITAL LETTER R
- 0x0053: 0x0053, # LATIN CAPITAL LETTER S
- 0x0054: 0x0054, # LATIN CAPITAL LETTER T
- 0x0055: 0x0055, # LATIN CAPITAL LETTER U
- 0x0056: 0x0056, # LATIN CAPITAL LETTER V
- 0x0057: 0x0057, # LATIN CAPITAL LETTER W
- 0x0058: 0x0058, # LATIN CAPITAL LETTER X
- 0x0059: 0x0059, # LATIN CAPITAL LETTER Y
- 0x005a: 0x005a, # LATIN CAPITAL LETTER Z
- 0x005b: 0x005b, # LEFT SQUARE BRACKET
- 0x005c: 0x005c, # REVERSE SOLIDUS
- 0x005d: 0x005d, # RIGHT SQUARE BRACKET
- 0x005e: 0x005e, # CIRCUMFLEX ACCENT
- 0x005f: 0x005f, # LOW LINE
- 0x0060: 0x0060, # GRAVE ACCENT
- 0x0061: 0x0061, # LATIN SMALL LETTER A
- 0x0062: 0x0062, # LATIN SMALL LETTER B
- 0x0063: 0x0063, # LATIN SMALL LETTER C
- 0x0064: 0x0064, # LATIN SMALL LETTER D
- 0x0065: 0x0065, # LATIN SMALL LETTER E
- 0x0066: 0x0066, # LATIN SMALL LETTER F
- 0x0067: 0x0067, # LATIN SMALL LETTER G
- 0x0068: 0x0068, # LATIN SMALL LETTER H
- 0x0069: 0x0069, # LATIN SMALL LETTER I
- 0x006a: 0x006a, # LATIN SMALL LETTER J
- 0x006b: 0x006b, # LATIN SMALL LETTER K
- 0x006c: 0x006c, # LATIN SMALL LETTER L
- 0x006d: 0x006d, # LATIN SMALL LETTER M
- 0x006e: 0x006e, # LATIN SMALL LETTER N
- 0x006f: 0x006f, # LATIN SMALL LETTER O
- 0x0070: 0x0070, # LATIN SMALL LETTER P
- 0x0071: 0x0071, # LATIN SMALL LETTER Q
- 0x0072: 0x0072, # LATIN SMALL LETTER R
- 0x0073: 0x0073, # LATIN SMALL LETTER S
- 0x0074: 0x0074, # LATIN SMALL LETTER T
- 0x0075: 0x0075, # LATIN SMALL LETTER U
- 0x0076: 0x0076, # LATIN SMALL LETTER V
- 0x0077: 0x0077, # LATIN SMALL LETTER W
- 0x0078: 0x0078, # LATIN SMALL LETTER X
- 0x0079: 0x0079, # LATIN SMALL LETTER Y
- 0x007a: 0x007a, # LATIN SMALL LETTER Z
- 0x007b: 0x007b, # LEFT CURLY BRACKET
- 0x007c: 0x007c, # VERTICAL LINE
- 0x007d: 0x007d, # RIGHT CURLY BRACKET
- 0x007e: 0x007e, # TILDE
- 0x007f: 0x007f, # DELETE
- 0x00a0: 0x00a0, # NON-BREAKING SPACE
- 0x00a2: 0x00c0, # CENT SIGN
- 0x00a3: 0x00a3, # POUND SIGN
- 0x00a4: 0x00a4, # CURRENCY SIGN
- 0x00a6: 0x00db, # BROKEN VERTICAL BAR
- 0x00ab: 0x0097, # LEFT POINTING GUILLEMET
- 0x00ac: 0x00dc, # NOT SIGN
- 0x00ad: 0x00a1, # SOFT HYPHEN
- 0x00b0: 0x0080, # DEGREE SIGN
- 0x00b1: 0x0093, # PLUS-OR-MINUS SIGN
- 0x00b7: 0x0081, # MIDDLE DOT
- 0x00bb: 0x0098, # RIGHT POINTING GUILLEMET
- 0x00bc: 0x0095, # FRACTION 1/4
- 0x00bd: 0x0094, # FRACTION 1/2
- 0x00d7: 0x00de, # MULTIPLICATION SIGN
- 0x00f7: 0x00dd, # DIVISION SIGN
- 0x03b2: 0x0090, # GREEK SMALL BETA
- 0x03c6: 0x0092, # GREEK SMALL PHI
- 0x060c: 0x00ac, # ARABIC COMMA
- 0x061b: 0x00bb, # ARABIC SEMICOLON
- 0x061f: 0x00bf, # ARABIC QUESTION MARK
- 0x0640: 0x00e0, # ARABIC TATWEEL
- 0x0651: 0x00f1, # ARABIC SHADDAH
- 0x0660: 0x00b0, # ARABIC-INDIC DIGIT ZERO
- 0x0661: 0x00b1, # ARABIC-INDIC DIGIT ONE
- 0x0662: 0x00b2, # ARABIC-INDIC DIGIT TWO
- 0x0663: 0x00b3, # ARABIC-INDIC DIGIT THREE
- 0x0664: 0x00b4, # ARABIC-INDIC DIGIT FOUR
- 0x0665: 0x00b5, # ARABIC-INDIC DIGIT FIVE
- 0x0666: 0x00b6, # ARABIC-INDIC DIGIT SIX
- 0x0667: 0x00b7, # ARABIC-INDIC DIGIT SEVEN
- 0x0668: 0x00b8, # ARABIC-INDIC DIGIT EIGHT
- 0x0669: 0x00b9, # ARABIC-INDIC DIGIT NINE
- 0x066a: 0x0025, # ARABIC PERCENT SIGN
- 0x2219: 0x0082, # BULLET OPERATOR
- 0x221a: 0x0083, # SQUARE ROOT
- 0x221e: 0x0091, # INFINITY
- 0x2248: 0x0096, # ALMOST EQUAL TO
- 0x2500: 0x0085, # FORMS LIGHT HORIZONTAL
- 0x2502: 0x0086, # FORMS LIGHT VERTICAL
- 0x250c: 0x008d, # FORMS LIGHT DOWN AND RIGHT
- 0x2510: 0x008c, # FORMS LIGHT DOWN AND LEFT
- 0x2514: 0x008e, # FORMS LIGHT UP AND RIGHT
- 0x2518: 0x008f, # FORMS LIGHT UP AND LEFT
- 0x251c: 0x008a, # FORMS LIGHT VERTICAL AND RIGHT
- 0x2524: 0x0088, # FORMS LIGHT VERTICAL AND LEFT
- 0x252c: 0x0089, # FORMS LIGHT DOWN AND HORIZONTAL
- 0x2534: 0x008b, # FORMS LIGHT UP AND HORIZONTAL
- 0x253c: 0x0087, # FORMS LIGHT VERTICAL AND HORIZONTAL
- 0x2592: 0x0084, # MEDIUM SHADE
- 0x25a0: 0x00fe, # BLACK SQUARE
- 0xfe7d: 0x00f0, # ARABIC SHADDA MEDIAL FORM
- 0xfe80: 0x00c1, # ARABIC LETTER HAMZA ISOLATED FORM
- 0xfe81: 0x00c2, # ARABIC LETTER ALEF WITH MADDA ABOVE ISOLATED FORM
- 0xfe82: 0x00a2, # ARABIC LETTER ALEF WITH MADDA ABOVE FINAL FORM
- 0xfe83: 0x00c3, # ARABIC LETTER ALEF WITH HAMZA ABOVE ISOLATED FORM
- 0xfe84: 0x00a5, # ARABIC LETTER ALEF WITH HAMZA ABOVE FINAL FORM
- 0xfe85: 0x00c4, # ARABIC LETTER WAW WITH HAMZA ABOVE ISOLATED FORM
- 0xfe8b: 0x00c6, # ARABIC LETTER YEH WITH HAMZA ABOVE INITIAL FORM
- 0xfe8d: 0x00c7, # ARABIC LETTER ALEF ISOLATED FORM
- 0xfe8e: 0x00a8, # ARABIC LETTER ALEF FINAL FORM
- 0xfe8f: 0x00a9, # ARABIC LETTER BEH ISOLATED FORM
- 0xfe91: 0x00c8, # ARABIC LETTER BEH INITIAL FORM
- 0xfe93: 0x00c9, # ARABIC LETTER TEH MARBUTA ISOLATED FORM
- 0xfe95: 0x00aa, # ARABIC LETTER TEH ISOLATED FORM
- 0xfe97: 0x00ca, # ARABIC LETTER TEH INITIAL FORM
- 0xfe99: 0x00ab, # ARABIC LETTER THEH ISOLATED FORM
- 0xfe9b: 0x00cb, # ARABIC LETTER THEH INITIAL FORM
- 0xfe9d: 0x00ad, # ARABIC LETTER JEEM ISOLATED FORM
- 0xfe9f: 0x00cc, # ARABIC LETTER JEEM INITIAL FORM
- 0xfea1: 0x00ae, # ARABIC LETTER HAH ISOLATED FORM
- 0xfea3: 0x00cd, # ARABIC LETTER HAH INITIAL FORM
- 0xfea5: 0x00af, # ARABIC LETTER KHAH ISOLATED FORM
- 0xfea7: 0x00ce, # ARABIC LETTER KHAH INITIAL FORM
- 0xfea9: 0x00cf, # ARABIC LETTER DAL ISOLATED FORM
- 0xfeab: 0x00d0, # ARABIC LETTER THAL ISOLATED FORM
- 0xfead: 0x00d1, # ARABIC LETTER REH ISOLATED FORM
- 0xfeaf: 0x00d2, # ARABIC LETTER ZAIN ISOLATED FORM
- 0xfeb1: 0x00bc, # ARABIC LETTER SEEN ISOLATED FORM
- 0xfeb3: 0x00d3, # ARABIC LETTER SEEN INITIAL FORM
- 0xfeb5: 0x00bd, # ARABIC LETTER SHEEN ISOLATED FORM
- 0xfeb7: 0x00d4, # ARABIC LETTER SHEEN INITIAL FORM
- 0xfeb9: 0x00be, # ARABIC LETTER SAD ISOLATED FORM
- 0xfebb: 0x00d5, # ARABIC LETTER SAD INITIAL FORM
- 0xfebd: 0x00eb, # ARABIC LETTER DAD ISOLATED FORM
- 0xfebf: 0x00d6, # ARABIC LETTER DAD INITIAL FORM
- 0xfec1: 0x00d7, # ARABIC LETTER TAH ISOLATED FORM
- 0xfec5: 0x00d8, # ARABIC LETTER ZAH ISOLATED FORM
- 0xfec9: 0x00df, # ARABIC LETTER AIN ISOLATED FORM
- 0xfeca: 0x00c5, # ARABIC LETTER AIN FINAL FORM
- 0xfecb: 0x00d9, # ARABIC LETTER AIN INITIAL FORM
- 0xfecc: 0x00ec, # ARABIC LETTER AIN MEDIAL FORM
- 0xfecd: 0x00ee, # ARABIC LETTER GHAIN ISOLATED FORM
- 0xfece: 0x00ed, # ARABIC LETTER GHAIN FINAL FORM
- 0xfecf: 0x00da, # ARABIC LETTER GHAIN INITIAL FORM
- 0xfed0: 0x00f7, # ARABIC LETTER GHAIN MEDIAL FORM
- 0xfed1: 0x00ba, # ARABIC LETTER FEH ISOLATED FORM
- 0xfed3: 0x00e1, # ARABIC LETTER FEH INITIAL FORM
- 0xfed5: 0x00f8, # ARABIC LETTER QAF ISOLATED FORM
- 0xfed7: 0x00e2, # ARABIC LETTER QAF INITIAL FORM
- 0xfed9: 0x00fc, # ARABIC LETTER KAF ISOLATED FORM
- 0xfedb: 0x00e3, # ARABIC LETTER KAF INITIAL FORM
- 0xfedd: 0x00fb, # ARABIC LETTER LAM ISOLATED FORM
- 0xfedf: 0x00e4, # ARABIC LETTER LAM INITIAL FORM
- 0xfee1: 0x00ef, # ARABIC LETTER MEEM ISOLATED FORM
- 0xfee3: 0x00e5, # ARABIC LETTER MEEM INITIAL FORM
- 0xfee5: 0x00f2, # ARABIC LETTER NOON ISOLATED FORM
- 0xfee7: 0x00e6, # ARABIC LETTER NOON INITIAL FORM
- 0xfee9: 0x00f3, # ARABIC LETTER HEH ISOLATED FORM
- 0xfeeb: 0x00e7, # ARABIC LETTER HEH INITIAL FORM
- 0xfeec: 0x00f4, # ARABIC LETTER HEH MEDIAL FORM
- 0xfeed: 0x00e8, # ARABIC LETTER WAW ISOLATED FORM
- 0xfeef: 0x00e9, # ARABIC LETTER ALEF MAKSURA ISOLATED FORM
- 0xfef0: 0x00f5, # ARABIC LETTER ALEF MAKSURA FINAL FORM
- 0xfef1: 0x00fd, # ARABIC LETTER YEH ISOLATED FORM
- 0xfef2: 0x00f6, # ARABIC LETTER YEH FINAL FORM
- 0xfef3: 0x00ea, # ARABIC LETTER YEH INITIAL FORM
- 0xfef5: 0x00f9, # ARABIC LIGATURE LAM WITH ALEF WITH MADDA ABOVE ISOLATED FORM
- 0xfef6: 0x00fa, # ARABIC LIGATURE LAM WITH ALEF WITH MADDA ABOVE FINAL FORM
- 0xfef7: 0x0099, # ARABIC LIGATURE LAM WITH ALEF WITH HAMZA ABOVE ISOLATED FORM
- 0xfef8: 0x009a, # ARABIC LIGATURE LAM WITH ALEF WITH HAMZA ABOVE FINAL FORM
- 0xfefb: 0x009d, # ARABIC LIGATURE LAM WITH ALEF ISOLATED FORM
- 0xfefc: 0x009e, # ARABIC LIGATURE LAM WITH ALEF FINAL FORM
-}
diff --git a/sys/lib/python/encodings/cp865.py b/sys/lib/python/encodings/cp865.py
deleted file mode 100644
index e9f45f1b5..000000000
--- a/sys/lib/python/encodings/cp865.py
+++ /dev/null
@@ -1,698 +0,0 @@
-""" Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP865.TXT' with gencodec.py.
-
-"""#"
-
-import codecs
-
-### Codec APIs
-
-class Codec(codecs.Codec):
-
- def encode(self,input,errors='strict'):
- return codecs.charmap_encode(input,errors,encoding_map)
-
- def decode(self,input,errors='strict'):
- return codecs.charmap_decode(input,errors,decoding_table)
-
-class IncrementalEncoder(codecs.IncrementalEncoder):
- def encode(self, input, final=False):
- return codecs.charmap_encode(input,self.errors,encoding_map)[0]
-
-class IncrementalDecoder(codecs.IncrementalDecoder):
- def decode(self, input, final=False):
- return codecs.charmap_decode(input,self.errors,decoding_table)[0]
-
-class StreamWriter(Codec,codecs.StreamWriter):
- pass
-
-class StreamReader(Codec,codecs.StreamReader):
- pass
-
-### encodings module API
-
-def getregentry():
- return codecs.CodecInfo(
- name='cp865',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
-
-### Decoding Map
-
-decoding_map = codecs.make_identity_dict(range(256))
-decoding_map.update({
- 0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
- 0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
- 0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
- 0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
- 0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
- 0x0085: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
- 0x0086: 0x00e5, # LATIN SMALL LETTER A WITH RING ABOVE
- 0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
- 0x0088: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
- 0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
- 0x008a: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
- 0x008b: 0x00ef, # LATIN SMALL LETTER I WITH DIAERESIS
- 0x008c: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX
- 0x008d: 0x00ec, # LATIN SMALL LETTER I WITH GRAVE
- 0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
- 0x008f: 0x00c5, # LATIN CAPITAL LETTER A WITH RING ABOVE
- 0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
- 0x0091: 0x00e6, # LATIN SMALL LIGATURE AE
- 0x0092: 0x00c6, # LATIN CAPITAL LIGATURE AE
- 0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
- 0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
- 0x0095: 0x00f2, # LATIN SMALL LETTER O WITH GRAVE
- 0x0096: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX
- 0x0097: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE
- 0x0098: 0x00ff, # LATIN SMALL LETTER Y WITH DIAERESIS
- 0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
- 0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
- 0x009b: 0x00f8, # LATIN SMALL LETTER O WITH STROKE
- 0x009c: 0x00a3, # POUND SIGN
- 0x009d: 0x00d8, # LATIN CAPITAL LETTER O WITH STROKE
- 0x009e: 0x20a7, # PESETA SIGN
- 0x009f: 0x0192, # LATIN SMALL LETTER F WITH HOOK
- 0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
- 0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
- 0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
- 0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
- 0x00a4: 0x00f1, # LATIN SMALL LETTER N WITH TILDE
- 0x00a5: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE
- 0x00a6: 0x00aa, # FEMININE ORDINAL INDICATOR
- 0x00a7: 0x00ba, # MASCULINE ORDINAL INDICATOR
- 0x00a8: 0x00bf, # INVERTED QUESTION MARK
- 0x00a9: 0x2310, # REVERSED NOT SIGN
- 0x00aa: 0x00ac, # NOT SIGN
- 0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
- 0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
- 0x00ad: 0x00a1, # INVERTED EXCLAMATION MARK
- 0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
- 0x00af: 0x00a4, # CURRENCY SIGN
- 0x00b0: 0x2591, # LIGHT SHADE
- 0x00b1: 0x2592, # MEDIUM SHADE
- 0x00b2: 0x2593, # DARK SHADE
- 0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
- 0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
- 0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
- 0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
- 0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
- 0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
- 0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
- 0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
- 0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
- 0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
- 0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
- 0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
- 0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
- 0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
- 0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
- 0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
- 0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
- 0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
- 0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
- 0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
- 0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
- 0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
- 0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
- 0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
- 0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
- 0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
- 0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
- 0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
- 0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
- 0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
- 0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
- 0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
- 0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
- 0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
- 0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
- 0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
- 0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
- 0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
- 0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
- 0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
- 0x00db: 0x2588, # FULL BLOCK
- 0x00dc: 0x2584, # LOWER HALF BLOCK
- 0x00dd: 0x258c, # LEFT HALF BLOCK
- 0x00de: 0x2590, # RIGHT HALF BLOCK
- 0x00df: 0x2580, # UPPER HALF BLOCK
- 0x00e0: 0x03b1, # GREEK SMALL LETTER ALPHA
- 0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
- 0x00e2: 0x0393, # GREEK CAPITAL LETTER GAMMA
- 0x00e3: 0x03c0, # GREEK SMALL LETTER PI
- 0x00e4: 0x03a3, # GREEK CAPITAL LETTER SIGMA
- 0x00e5: 0x03c3, # GREEK SMALL LETTER SIGMA
- 0x00e6: 0x00b5, # MICRO SIGN
- 0x00e7: 0x03c4, # GREEK SMALL LETTER TAU
- 0x00e8: 0x03a6, # GREEK CAPITAL LETTER PHI
- 0x00e9: 0x0398, # GREEK CAPITAL LETTER THETA
- 0x00ea: 0x03a9, # GREEK CAPITAL LETTER OMEGA
- 0x00eb: 0x03b4, # GREEK SMALL LETTER DELTA
- 0x00ec: 0x221e, # INFINITY
- 0x00ed: 0x03c6, # GREEK SMALL LETTER PHI
- 0x00ee: 0x03b5, # GREEK SMALL LETTER EPSILON
- 0x00ef: 0x2229, # INTERSECTION
- 0x00f0: 0x2261, # IDENTICAL TO
- 0x00f1: 0x00b1, # PLUS-MINUS SIGN
- 0x00f2: 0x2265, # GREATER-THAN OR EQUAL TO
- 0x00f3: 0x2264, # LESS-THAN OR EQUAL TO
- 0x00f4: 0x2320, # TOP HALF INTEGRAL
- 0x00f5: 0x2321, # BOTTOM HALF INTEGRAL
- 0x00f6: 0x00f7, # DIVISION SIGN
- 0x00f7: 0x2248, # ALMOST EQUAL TO
- 0x00f8: 0x00b0, # DEGREE SIGN
- 0x00f9: 0x2219, # BULLET OPERATOR
- 0x00fa: 0x00b7, # MIDDLE DOT
- 0x00fb: 0x221a, # SQUARE ROOT
- 0x00fc: 0x207f, # SUPERSCRIPT LATIN SMALL LETTER N
- 0x00fd: 0x00b2, # SUPERSCRIPT TWO
- 0x00fe: 0x25a0, # BLACK SQUARE
- 0x00ff: 0x00a0, # NO-BREAK SPACE
-})
-
-### Decoding Table
-
-decoding_table = (
- u'\x00' # 0x0000 -> NULL
- u'\x01' # 0x0001 -> START OF HEADING
- u'\x02' # 0x0002 -> START OF TEXT
- u'\x03' # 0x0003 -> END OF TEXT
- u'\x04' # 0x0004 -> END OF TRANSMISSION
- u'\x05' # 0x0005 -> ENQUIRY
- u'\x06' # 0x0006 -> ACKNOWLEDGE
- u'\x07' # 0x0007 -> BELL
- u'\x08' # 0x0008 -> BACKSPACE
- u'\t' # 0x0009 -> HORIZONTAL TABULATION
- u'\n' # 0x000a -> LINE FEED
- u'\x0b' # 0x000b -> VERTICAL TABULATION
- u'\x0c' # 0x000c -> FORM FEED
- u'\r' # 0x000d -> CARRIAGE RETURN
- u'\x0e' # 0x000e -> SHIFT OUT
- u'\x0f' # 0x000f -> SHIFT IN
- u'\x10' # 0x0010 -> DATA LINK ESCAPE
- u'\x11' # 0x0011 -> DEVICE CONTROL ONE
- u'\x12' # 0x0012 -> DEVICE CONTROL TWO
- u'\x13' # 0x0013 -> DEVICE CONTROL THREE
- u'\x14' # 0x0014 -> DEVICE CONTROL FOUR
- u'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
- u'\x16' # 0x0016 -> SYNCHRONOUS IDLE
- u'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
- u'\x18' # 0x0018 -> CANCEL
- u'\x19' # 0x0019 -> END OF MEDIUM
- u'\x1a' # 0x001a -> SUBSTITUTE
- u'\x1b' # 0x001b -> ESCAPE
- u'\x1c' # 0x001c -> FILE SEPARATOR
- u'\x1d' # 0x001d -> GROUP SEPARATOR
- u'\x1e' # 0x001e -> RECORD SEPARATOR
- u'\x1f' # 0x001f -> UNIT SEPARATOR
- u' ' # 0x0020 -> SPACE
- u'!' # 0x0021 -> EXCLAMATION MARK
- u'"' # 0x0022 -> QUOTATION MARK
- u'#' # 0x0023 -> NUMBER SIGN
- u'$' # 0x0024 -> DOLLAR SIGN
- u'%' # 0x0025 -> PERCENT SIGN
- u'&' # 0x0026 -> AMPERSAND
- u"'" # 0x0027 -> APOSTROPHE
- u'(' # 0x0028 -> LEFT PARENTHESIS
- u')' # 0x0029 -> RIGHT PARENTHESIS
- u'*' # 0x002a -> ASTERISK
- u'+' # 0x002b -> PLUS SIGN
- u',' # 0x002c -> COMMA
- u'-' # 0x002d -> HYPHEN-MINUS
- u'.' # 0x002e -> FULL STOP
- u'/' # 0x002f -> SOLIDUS
- u'0' # 0x0030 -> DIGIT ZERO
- u'1' # 0x0031 -> DIGIT ONE
- u'2' # 0x0032 -> DIGIT TWO
- u'3' # 0x0033 -> DIGIT THREE
- u'4' # 0x0034 -> DIGIT FOUR
- u'5' # 0x0035 -> DIGIT FIVE
- u'6' # 0x0036 -> DIGIT SIX
- u'7' # 0x0037 -> DIGIT SEVEN
- u'8' # 0x0038 -> DIGIT EIGHT
- u'9' # 0x0039 -> DIGIT NINE
- u':' # 0x003a -> COLON
- u';' # 0x003b -> SEMICOLON
- u'<' # 0x003c -> LESS-THAN SIGN
- u'=' # 0x003d -> EQUALS SIGN
- u'>' # 0x003e -> GREATER-THAN SIGN
- u'?' # 0x003f -> QUESTION MARK
- u'@' # 0x0040 -> COMMERCIAL AT
- u'A' # 0x0041 -> LATIN CAPITAL LETTER A
- u'B' # 0x0042 -> LATIN CAPITAL LETTER B
- u'C' # 0x0043 -> LATIN CAPITAL LETTER C
- u'D' # 0x0044 -> LATIN CAPITAL LETTER D
- u'E' # 0x0045 -> LATIN CAPITAL LETTER E
- u'F' # 0x0046 -> LATIN CAPITAL LETTER F
- u'G' # 0x0047 -> LATIN CAPITAL LETTER G
- u'H' # 0x0048 -> LATIN CAPITAL LETTER H
- u'I' # 0x0049 -> LATIN CAPITAL LETTER I
- u'J' # 0x004a -> LATIN CAPITAL LETTER J
- u'K' # 0x004b -> LATIN CAPITAL LETTER K
- u'L' # 0x004c -> LATIN CAPITAL LETTER L
- u'M' # 0x004d -> LATIN CAPITAL LETTER M
- u'N' # 0x004e -> LATIN CAPITAL LETTER N
- u'O' # 0x004f -> LATIN CAPITAL LETTER O
- u'P' # 0x0050 -> LATIN CAPITAL LETTER P
- u'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
- u'R' # 0x0052 -> LATIN CAPITAL LETTER R
- u'S' # 0x0053 -> LATIN CAPITAL LETTER S
- u'T' # 0x0054 -> LATIN CAPITAL LETTER T
- u'U' # 0x0055 -> LATIN CAPITAL LETTER U
- u'V' # 0x0056 -> LATIN CAPITAL LETTER V
- u'W' # 0x0057 -> LATIN CAPITAL LETTER W
- u'X' # 0x0058 -> LATIN CAPITAL LETTER X
- u'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
- u'Z' # 0x005a -> LATIN CAPITAL LETTER Z
- u'[' # 0x005b -> LEFT SQUARE BRACKET
- u'\\' # 0x005c -> REVERSE SOLIDUS
- u']' # 0x005d -> RIGHT SQUARE BRACKET
- u'^' # 0x005e -> CIRCUMFLEX ACCENT
- u'_' # 0x005f -> LOW LINE
- u'`' # 0x0060 -> GRAVE ACCENT
- u'a' # 0x0061 -> LATIN SMALL LETTER A
- u'b' # 0x0062 -> LATIN SMALL LETTER B
- u'c' # 0x0063 -> LATIN SMALL LETTER C
- u'd' # 0x0064 -> LATIN SMALL LETTER D
- u'e' # 0x0065 -> LATIN SMALL LETTER E
- u'f' # 0x0066 -> LATIN SMALL LETTER F
- u'g' # 0x0067 -> LATIN SMALL LETTER G
- u'h' # 0x0068 -> LATIN SMALL LETTER H
- u'i' # 0x0069 -> LATIN SMALL LETTER I
- u'j' # 0x006a -> LATIN SMALL LETTER J
- u'k' # 0x006b -> LATIN SMALL LETTER K
- u'l' # 0x006c -> LATIN SMALL LETTER L
- u'm' # 0x006d -> LATIN SMALL LETTER M
- u'n' # 0x006e -> LATIN SMALL LETTER N
- u'o' # 0x006f -> LATIN SMALL LETTER O
- u'p' # 0x0070 -> LATIN SMALL LETTER P
- u'q' # 0x0071 -> LATIN SMALL LETTER Q
- u'r' # 0x0072 -> LATIN SMALL LETTER R
- u's' # 0x0073 -> LATIN SMALL LETTER S
- u't' # 0x0074 -> LATIN SMALL LETTER T
- u'u' # 0x0075 -> LATIN SMALL LETTER U
- u'v' # 0x0076 -> LATIN SMALL LETTER V
- u'w' # 0x0077 -> LATIN SMALL LETTER W
- u'x' # 0x0078 -> LATIN SMALL LETTER X
- u'y' # 0x0079 -> LATIN SMALL LETTER Y
- u'z' # 0x007a -> LATIN SMALL LETTER Z
- u'{' # 0x007b -> LEFT CURLY BRACKET
- u'|' # 0x007c -> VERTICAL LINE
- u'}' # 0x007d -> RIGHT CURLY BRACKET
- u'~' # 0x007e -> TILDE
- u'\x7f' # 0x007f -> DELETE
- u'\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA
- u'\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
- u'\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
- u'\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
- u'\xe4' # 0x0084 -> LATIN SMALL LETTER A WITH DIAERESIS
- u'\xe0' # 0x0085 -> LATIN SMALL LETTER A WITH GRAVE
- u'\xe5' # 0x0086 -> LATIN SMALL LETTER A WITH RING ABOVE
- u'\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA
- u'\xea' # 0x0088 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
- u'\xeb' # 0x0089 -> LATIN SMALL LETTER E WITH DIAERESIS
- u'\xe8' # 0x008a -> LATIN SMALL LETTER E WITH GRAVE
- u'\xef' # 0x008b -> LATIN SMALL LETTER I WITH DIAERESIS
- u'\xee' # 0x008c -> LATIN SMALL LETTER I WITH CIRCUMFLEX
- u'\xec' # 0x008d -> LATIN SMALL LETTER I WITH GRAVE
- u'\xc4' # 0x008e -> LATIN CAPITAL LETTER A WITH DIAERESIS
- u'\xc5' # 0x008f -> LATIN CAPITAL LETTER A WITH RING ABOVE
- u'\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
- u'\xe6' # 0x0091 -> LATIN SMALL LIGATURE AE
- u'\xc6' # 0x0092 -> LATIN CAPITAL LIGATURE AE
- u'\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
- u'\xf6' # 0x0094 -> LATIN SMALL LETTER O WITH DIAERESIS
- u'\xf2' # 0x0095 -> LATIN SMALL LETTER O WITH GRAVE
- u'\xfb' # 0x0096 -> LATIN SMALL LETTER U WITH CIRCUMFLEX
- u'\xf9' # 0x0097 -> LATIN SMALL LETTER U WITH GRAVE
- u'\xff' # 0x0098 -> LATIN SMALL LETTER Y WITH DIAERESIS
- u'\xd6' # 0x0099 -> LATIN CAPITAL LETTER O WITH DIAERESIS
- u'\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
- u'\xf8' # 0x009b -> LATIN SMALL LETTER O WITH STROKE
- u'\xa3' # 0x009c -> POUND SIGN
- u'\xd8' # 0x009d -> LATIN CAPITAL LETTER O WITH STROKE
- u'\u20a7' # 0x009e -> PESETA SIGN
- u'\u0192' # 0x009f -> LATIN SMALL LETTER F WITH HOOK
- u'\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE
- u'\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE
- u'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
- u'\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE
- u'\xf1' # 0x00a4 -> LATIN SMALL LETTER N WITH TILDE
- u'\xd1' # 0x00a5 -> LATIN CAPITAL LETTER N WITH TILDE
- u'\xaa' # 0x00a6 -> FEMININE ORDINAL INDICATOR
- u'\xba' # 0x00a7 -> MASCULINE ORDINAL INDICATOR
- u'\xbf' # 0x00a8 -> INVERTED QUESTION MARK
- u'\u2310' # 0x00a9 -> REVERSED NOT SIGN
- u'\xac' # 0x00aa -> NOT SIGN
- u'\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF
- u'\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER
- u'\xa1' # 0x00ad -> INVERTED EXCLAMATION MARK
- u'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
- u'\xa4' # 0x00af -> CURRENCY SIGN
- u'\u2591' # 0x00b0 -> LIGHT SHADE
- u'\u2592' # 0x00b1 -> MEDIUM SHADE
- u'\u2593' # 0x00b2 -> DARK SHADE
- u'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
- u'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
- u'\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
- u'\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
- u'\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
- u'\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
- u'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
- u'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
- u'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
- u'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
- u'\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
- u'\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
- u'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
- u'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
- u'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
- u'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
- u'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
- u'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
- u'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
- u'\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
- u'\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
- u'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
- u'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
- u'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
- u'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
- u'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
- u'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
- u'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
- u'\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
- u'\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
- u'\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
- u'\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
- u'\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
- u'\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
- u'\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
- u'\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
- u'\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
- u'\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
- u'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
- u'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
- u'\u2588' # 0x00db -> FULL BLOCK
- u'\u2584' # 0x00dc -> LOWER HALF BLOCK
- u'\u258c' # 0x00dd -> LEFT HALF BLOCK
- u'\u2590' # 0x00de -> RIGHT HALF BLOCK
- u'\u2580' # 0x00df -> UPPER HALF BLOCK
- u'\u03b1' # 0x00e0 -> GREEK SMALL LETTER ALPHA
- u'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S
- u'\u0393' # 0x00e2 -> GREEK CAPITAL LETTER GAMMA
- u'\u03c0' # 0x00e3 -> GREEK SMALL LETTER PI
- u'\u03a3' # 0x00e4 -> GREEK CAPITAL LETTER SIGMA
- u'\u03c3' # 0x00e5 -> GREEK SMALL LETTER SIGMA
- u'\xb5' # 0x00e6 -> MICRO SIGN
- u'\u03c4' # 0x00e7 -> GREEK SMALL LETTER TAU
- u'\u03a6' # 0x00e8 -> GREEK CAPITAL LETTER PHI
- u'\u0398' # 0x00e9 -> GREEK CAPITAL LETTER THETA
- u'\u03a9' # 0x00ea -> GREEK CAPITAL LETTER OMEGA
- u'\u03b4' # 0x00eb -> GREEK SMALL LETTER DELTA
- u'\u221e' # 0x00ec -> INFINITY
- u'\u03c6' # 0x00ed -> GREEK SMALL LETTER PHI
- u'\u03b5' # 0x00ee -> GREEK SMALL LETTER EPSILON
- u'\u2229' # 0x00ef -> INTERSECTION
- u'\u2261' # 0x00f0 -> IDENTICAL TO
- u'\xb1' # 0x00f1 -> PLUS-MINUS SIGN
- u'\u2265' # 0x00f2 -> GREATER-THAN OR EQUAL TO
- u'\u2264' # 0x00f3 -> LESS-THAN OR EQUAL TO
- u'\u2320' # 0x00f4 -> TOP HALF INTEGRAL
- u'\u2321' # 0x00f5 -> BOTTOM HALF INTEGRAL
- u'\xf7' # 0x00f6 -> DIVISION SIGN
- u'\u2248' # 0x00f7 -> ALMOST EQUAL TO
- u'\xb0' # 0x00f8 -> DEGREE SIGN
- u'\u2219' # 0x00f9 -> BULLET OPERATOR
- u'\xb7' # 0x00fa -> MIDDLE DOT
- u'\u221a' # 0x00fb -> SQUARE ROOT
- u'\u207f' # 0x00fc -> SUPERSCRIPT LATIN SMALL LETTER N
- u'\xb2' # 0x00fd -> SUPERSCRIPT TWO
- u'\u25a0' # 0x00fe -> BLACK SQUARE
- u'\xa0' # 0x00ff -> NO-BREAK SPACE
-)
-
-### Encoding Map
-
-encoding_map = {
- 0x0000: 0x0000, # NULL
- 0x0001: 0x0001, # START OF HEADING
- 0x0002: 0x0002, # START OF TEXT
- 0x0003: 0x0003, # END OF TEXT
- 0x0004: 0x0004, # END OF TRANSMISSION
- 0x0005: 0x0005, # ENQUIRY
- 0x0006: 0x0006, # ACKNOWLEDGE
- 0x0007: 0x0007, # BELL
- 0x0008: 0x0008, # BACKSPACE
- 0x0009: 0x0009, # HORIZONTAL TABULATION
- 0x000a: 0x000a, # LINE FEED
- 0x000b: 0x000b, # VERTICAL TABULATION
- 0x000c: 0x000c, # FORM FEED
- 0x000d: 0x000d, # CARRIAGE RETURN
- 0x000e: 0x000e, # SHIFT OUT
- 0x000f: 0x000f, # SHIFT IN
- 0x0010: 0x0010, # DATA LINK ESCAPE
- 0x0011: 0x0011, # DEVICE CONTROL ONE
- 0x0012: 0x0012, # DEVICE CONTROL TWO
- 0x0013: 0x0013, # DEVICE CONTROL THREE
- 0x0014: 0x0014, # DEVICE CONTROL FOUR
- 0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
- 0x0016: 0x0016, # SYNCHRONOUS IDLE
- 0x0017: 0x0017, # END OF TRANSMISSION BLOCK
- 0x0018: 0x0018, # CANCEL
- 0x0019: 0x0019, # END OF MEDIUM
- 0x001a: 0x001a, # SUBSTITUTE
- 0x001b: 0x001b, # ESCAPE
- 0x001c: 0x001c, # FILE SEPARATOR
- 0x001d: 0x001d, # GROUP SEPARATOR
- 0x001e: 0x001e, # RECORD SEPARATOR
- 0x001f: 0x001f, # UNIT SEPARATOR
- 0x0020: 0x0020, # SPACE
- 0x0021: 0x0021, # EXCLAMATION MARK
- 0x0022: 0x0022, # QUOTATION MARK
- 0x0023: 0x0023, # NUMBER SIGN
- 0x0024: 0x0024, # DOLLAR SIGN
- 0x0025: 0x0025, # PERCENT SIGN
- 0x0026: 0x0026, # AMPERSAND
- 0x0027: 0x0027, # APOSTROPHE
- 0x0028: 0x0028, # LEFT PARENTHESIS
- 0x0029: 0x0029, # RIGHT PARENTHESIS
- 0x002a: 0x002a, # ASTERISK
- 0x002b: 0x002b, # PLUS SIGN
- 0x002c: 0x002c, # COMMA
- 0x002d: 0x002d, # HYPHEN-MINUS
- 0x002e: 0x002e, # FULL STOP
- 0x002f: 0x002f, # SOLIDUS
- 0x0030: 0x0030, # DIGIT ZERO
- 0x0031: 0x0031, # DIGIT ONE
- 0x0032: 0x0032, # DIGIT TWO
- 0x0033: 0x0033, # DIGIT THREE
- 0x0034: 0x0034, # DIGIT FOUR
- 0x0035: 0x0035, # DIGIT FIVE
- 0x0036: 0x0036, # DIGIT SIX
- 0x0037: 0x0037, # DIGIT SEVEN
- 0x0038: 0x0038, # DIGIT EIGHT
- 0x0039: 0x0039, # DIGIT NINE
- 0x003a: 0x003a, # COLON
- 0x003b: 0x003b, # SEMICOLON
- 0x003c: 0x003c, # LESS-THAN SIGN
- 0x003d: 0x003d, # EQUALS SIGN
- 0x003e: 0x003e, # GREATER-THAN SIGN
- 0x003f: 0x003f, # QUESTION MARK
- 0x0040: 0x0040, # COMMERCIAL AT
- 0x0041: 0x0041, # LATIN CAPITAL LETTER A
- 0x0042: 0x0042, # LATIN CAPITAL LETTER B
- 0x0043: 0x0043, # LATIN CAPITAL LETTER C
- 0x0044: 0x0044, # LATIN CAPITAL LETTER D
- 0x0045: 0x0045, # LATIN CAPITAL LETTER E
- 0x0046: 0x0046, # LATIN CAPITAL LETTER F
- 0x0047: 0x0047, # LATIN CAPITAL LETTER G
- 0x0048: 0x0048, # LATIN CAPITAL LETTER H
- 0x0049: 0x0049, # LATIN CAPITAL LETTER I
- 0x004a: 0x004a, # LATIN CAPITAL LETTER J
- 0x004b: 0x004b, # LATIN CAPITAL LETTER K
- 0x004c: 0x004c, # LATIN CAPITAL LETTER L
- 0x004d: 0x004d, # LATIN CAPITAL LETTER M
- 0x004e: 0x004e, # LATIN CAPITAL LETTER N
- 0x004f: 0x004f, # LATIN CAPITAL LETTER O
- 0x0050: 0x0050, # LATIN CAPITAL LETTER P
- 0x0051: 0x0051, # LATIN CAPITAL LETTER Q
- 0x0052: 0x0052, # LATIN CAPITAL LETTER R
- 0x0053: 0x0053, # LATIN CAPITAL LETTER S
- 0x0054: 0x0054, # LATIN CAPITAL LETTER T
- 0x0055: 0x0055, # LATIN CAPITAL LETTER U
- 0x0056: 0x0056, # LATIN CAPITAL LETTER V
- 0x0057: 0x0057, # LATIN CAPITAL LETTER W
- 0x0058: 0x0058, # LATIN CAPITAL LETTER X
- 0x0059: 0x0059, # LATIN CAPITAL LETTER Y
- 0x005a: 0x005a, # LATIN CAPITAL LETTER Z
- 0x005b: 0x005b, # LEFT SQUARE BRACKET
- 0x005c: 0x005c, # REVERSE SOLIDUS
- 0x005d: 0x005d, # RIGHT SQUARE BRACKET
- 0x005e: 0x005e, # CIRCUMFLEX ACCENT
- 0x005f: 0x005f, # LOW LINE
- 0x0060: 0x0060, # GRAVE ACCENT
- 0x0061: 0x0061, # LATIN SMALL LETTER A
- 0x0062: 0x0062, # LATIN SMALL LETTER B
- 0x0063: 0x0063, # LATIN SMALL LETTER C
- 0x0064: 0x0064, # LATIN SMALL LETTER D
- 0x0065: 0x0065, # LATIN SMALL LETTER E
- 0x0066: 0x0066, # LATIN SMALL LETTER F
- 0x0067: 0x0067, # LATIN SMALL LETTER G
- 0x0068: 0x0068, # LATIN SMALL LETTER H
- 0x0069: 0x0069, # LATIN SMALL LETTER I
- 0x006a: 0x006a, # LATIN SMALL LETTER J
- 0x006b: 0x006b, # LATIN SMALL LETTER K
- 0x006c: 0x006c, # LATIN SMALL LETTER L
- 0x006d: 0x006d, # LATIN SMALL LETTER M
- 0x006e: 0x006e, # LATIN SMALL LETTER N
- 0x006f: 0x006f, # LATIN SMALL LETTER O
- 0x0070: 0x0070, # LATIN SMALL LETTER P
- 0x0071: 0x0071, # LATIN SMALL LETTER Q
- 0x0072: 0x0072, # LATIN SMALL LETTER R
- 0x0073: 0x0073, # LATIN SMALL LETTER S
- 0x0074: 0x0074, # LATIN SMALL LETTER T
- 0x0075: 0x0075, # LATIN SMALL LETTER U
- 0x0076: 0x0076, # LATIN SMALL LETTER V
- 0x0077: 0x0077, # LATIN SMALL LETTER W
- 0x0078: 0x0078, # LATIN SMALL LETTER X
- 0x0079: 0x0079, # LATIN SMALL LETTER Y
- 0x007a: 0x007a, # LATIN SMALL LETTER Z
- 0x007b: 0x007b, # LEFT CURLY BRACKET
- 0x007c: 0x007c, # VERTICAL LINE
- 0x007d: 0x007d, # RIGHT CURLY BRACKET
- 0x007e: 0x007e, # TILDE
- 0x007f: 0x007f, # DELETE
- 0x00a0: 0x00ff, # NO-BREAK SPACE
- 0x00a1: 0x00ad, # INVERTED EXCLAMATION MARK
- 0x00a3: 0x009c, # POUND SIGN
- 0x00a4: 0x00af, # CURRENCY SIGN
- 0x00aa: 0x00a6, # FEMININE ORDINAL INDICATOR
- 0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
- 0x00ac: 0x00aa, # NOT SIGN
- 0x00b0: 0x00f8, # DEGREE SIGN
- 0x00b1: 0x00f1, # PLUS-MINUS SIGN
- 0x00b2: 0x00fd, # SUPERSCRIPT TWO
- 0x00b5: 0x00e6, # MICRO SIGN
- 0x00b7: 0x00fa, # MIDDLE DOT
- 0x00ba: 0x00a7, # MASCULINE ORDINAL INDICATOR
- 0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER
- 0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF
- 0x00bf: 0x00a8, # INVERTED QUESTION MARK
- 0x00c4: 0x008e, # LATIN CAPITAL LETTER A WITH DIAERESIS
- 0x00c5: 0x008f, # LATIN CAPITAL LETTER A WITH RING ABOVE
- 0x00c6: 0x0092, # LATIN CAPITAL LIGATURE AE
- 0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA
- 0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
- 0x00d1: 0x00a5, # LATIN CAPITAL LETTER N WITH TILDE
- 0x00d6: 0x0099, # LATIN CAPITAL LETTER O WITH DIAERESIS
- 0x00d8: 0x009d, # LATIN CAPITAL LETTER O WITH STROKE
- 0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
- 0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S
- 0x00e0: 0x0085, # LATIN SMALL LETTER A WITH GRAVE
- 0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE
- 0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX
- 0x00e4: 0x0084, # LATIN SMALL LETTER A WITH DIAERESIS
- 0x00e5: 0x0086, # LATIN SMALL LETTER A WITH RING ABOVE
- 0x00e6: 0x0091, # LATIN SMALL LIGATURE AE
- 0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA
- 0x00e8: 0x008a, # LATIN SMALL LETTER E WITH GRAVE
- 0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
- 0x00ea: 0x0088, # LATIN SMALL LETTER E WITH CIRCUMFLEX
- 0x00eb: 0x0089, # LATIN SMALL LETTER E WITH DIAERESIS
- 0x00ec: 0x008d, # LATIN SMALL LETTER I WITH GRAVE
- 0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE
- 0x00ee: 0x008c, # LATIN SMALL LETTER I WITH CIRCUMFLEX
- 0x00ef: 0x008b, # LATIN SMALL LETTER I WITH DIAERESIS
- 0x00f1: 0x00a4, # LATIN SMALL LETTER N WITH TILDE
- 0x00f2: 0x0095, # LATIN SMALL LETTER O WITH GRAVE
- 0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
- 0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX
- 0x00f6: 0x0094, # LATIN SMALL LETTER O WITH DIAERESIS
- 0x00f7: 0x00f6, # DIVISION SIGN
- 0x00f8: 0x009b, # LATIN SMALL LETTER O WITH STROKE
- 0x00f9: 0x0097, # LATIN SMALL LETTER U WITH GRAVE
- 0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE
- 0x00fb: 0x0096, # LATIN SMALL LETTER U WITH CIRCUMFLEX
- 0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
- 0x00ff: 0x0098, # LATIN SMALL LETTER Y WITH DIAERESIS
- 0x0192: 0x009f, # LATIN SMALL LETTER F WITH HOOK
- 0x0393: 0x00e2, # GREEK CAPITAL LETTER GAMMA
- 0x0398: 0x00e9, # GREEK CAPITAL LETTER THETA
- 0x03a3: 0x00e4, # GREEK CAPITAL LETTER SIGMA
- 0x03a6: 0x00e8, # GREEK CAPITAL LETTER PHI
- 0x03a9: 0x00ea, # GREEK CAPITAL LETTER OMEGA
- 0x03b1: 0x00e0, # GREEK SMALL LETTER ALPHA
- 0x03b4: 0x00eb, # GREEK SMALL LETTER DELTA
- 0x03b5: 0x00ee, # GREEK SMALL LETTER EPSILON
- 0x03c0: 0x00e3, # GREEK SMALL LETTER PI
- 0x03c3: 0x00e5, # GREEK SMALL LETTER SIGMA
- 0x03c4: 0x00e7, # GREEK SMALL LETTER TAU
- 0x03c6: 0x00ed, # GREEK SMALL LETTER PHI
- 0x207f: 0x00fc, # SUPERSCRIPT LATIN SMALL LETTER N
- 0x20a7: 0x009e, # PESETA SIGN
- 0x2219: 0x00f9, # BULLET OPERATOR
- 0x221a: 0x00fb, # SQUARE ROOT
- 0x221e: 0x00ec, # INFINITY
- 0x2229: 0x00ef, # INTERSECTION
- 0x2248: 0x00f7, # ALMOST EQUAL TO
- 0x2261: 0x00f0, # IDENTICAL TO
- 0x2264: 0x00f3, # LESS-THAN OR EQUAL TO
- 0x2265: 0x00f2, # GREATER-THAN OR EQUAL TO
- 0x2310: 0x00a9, # REVERSED NOT SIGN
- 0x2320: 0x00f4, # TOP HALF INTEGRAL
- 0x2321: 0x00f5, # BOTTOM HALF INTEGRAL
- 0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
- 0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
- 0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
- 0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
- 0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
- 0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
- 0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
- 0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
- 0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
- 0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
- 0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
- 0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
- 0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
- 0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
- 0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
- 0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
- 0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
- 0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
- 0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
- 0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
- 0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
- 0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
- 0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
- 0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
- 0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
- 0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
- 0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
- 0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
- 0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
- 0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
- 0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
- 0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
- 0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
- 0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
- 0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
- 0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
- 0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
- 0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
- 0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
- 0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
- 0x2580: 0x00df, # UPPER HALF BLOCK
- 0x2584: 0x00dc, # LOWER HALF BLOCK
- 0x2588: 0x00db, # FULL BLOCK
- 0x258c: 0x00dd, # LEFT HALF BLOCK
- 0x2590: 0x00de, # RIGHT HALF BLOCK
- 0x2591: 0x00b0, # LIGHT SHADE
- 0x2592: 0x00b1, # MEDIUM SHADE
- 0x2593: 0x00b2, # DARK SHADE
- 0x25a0: 0x00fe, # BLACK SQUARE
-}
diff --git a/sys/lib/python/encodings/cp866.py b/sys/lib/python/encodings/cp866.py
deleted file mode 100644
index 29cd85a3f..000000000
--- a/sys/lib/python/encodings/cp866.py
+++ /dev/null
@@ -1,698 +0,0 @@
-""" Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP866.TXT' with gencodec.py.
-
-"""#"
-
-import codecs
-
-### Codec APIs
-
-class Codec(codecs.Codec):
-
- def encode(self,input,errors='strict'):
- return codecs.charmap_encode(input,errors,encoding_map)
-
- def decode(self,input,errors='strict'):
- return codecs.charmap_decode(input,errors,decoding_table)
-
-class IncrementalEncoder(codecs.IncrementalEncoder):
- def encode(self, input, final=False):
- return codecs.charmap_encode(input,self.errors,encoding_map)[0]
-
-class IncrementalDecoder(codecs.IncrementalDecoder):
- def decode(self, input, final=False):
- return codecs.charmap_decode(input,self.errors,decoding_table)[0]
-
-class StreamWriter(Codec,codecs.StreamWriter):
- pass
-
-class StreamReader(Codec,codecs.StreamReader):
- pass
-
-### encodings module API
-
-def getregentry():
- return codecs.CodecInfo(
- name='cp866',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
-
-### Decoding Map
-
-decoding_map = codecs.make_identity_dict(range(256))
-decoding_map.update({
- 0x0080: 0x0410, # CYRILLIC CAPITAL LETTER A
- 0x0081: 0x0411, # CYRILLIC CAPITAL LETTER BE
- 0x0082: 0x0412, # CYRILLIC CAPITAL LETTER VE
- 0x0083: 0x0413, # CYRILLIC CAPITAL LETTER GHE
- 0x0084: 0x0414, # CYRILLIC CAPITAL LETTER DE
- 0x0085: 0x0415, # CYRILLIC CAPITAL LETTER IE
- 0x0086: 0x0416, # CYRILLIC CAPITAL LETTER ZHE
- 0x0087: 0x0417, # CYRILLIC CAPITAL LETTER ZE
- 0x0088: 0x0418, # CYRILLIC CAPITAL LETTER I
- 0x0089: 0x0419, # CYRILLIC CAPITAL LETTER SHORT I
- 0x008a: 0x041a, # CYRILLIC CAPITAL LETTER KA
- 0x008b: 0x041b, # CYRILLIC CAPITAL LETTER EL
- 0x008c: 0x041c, # CYRILLIC CAPITAL LETTER EM
- 0x008d: 0x041d, # CYRILLIC CAPITAL LETTER EN
- 0x008e: 0x041e, # CYRILLIC CAPITAL LETTER O
- 0x008f: 0x041f, # CYRILLIC CAPITAL LETTER PE
- 0x0090: 0x0420, # CYRILLIC CAPITAL LETTER ER
- 0x0091: 0x0421, # CYRILLIC CAPITAL LETTER ES
- 0x0092: 0x0422, # CYRILLIC CAPITAL LETTER TE
- 0x0093: 0x0423, # CYRILLIC CAPITAL LETTER U
- 0x0094: 0x0424, # CYRILLIC CAPITAL LETTER EF
- 0x0095: 0x0425, # CYRILLIC CAPITAL LETTER HA
- 0x0096: 0x0426, # CYRILLIC CAPITAL LETTER TSE
- 0x0097: 0x0427, # CYRILLIC CAPITAL LETTER CHE
- 0x0098: 0x0428, # CYRILLIC CAPITAL LETTER SHA
- 0x0099: 0x0429, # CYRILLIC CAPITAL LETTER SHCHA
- 0x009a: 0x042a, # CYRILLIC CAPITAL LETTER HARD SIGN
- 0x009b: 0x042b, # CYRILLIC CAPITAL LETTER YERU
- 0x009c: 0x042c, # CYRILLIC CAPITAL LETTER SOFT SIGN
- 0x009d: 0x042d, # CYRILLIC CAPITAL LETTER E
- 0x009e: 0x042e, # CYRILLIC CAPITAL LETTER YU
- 0x009f: 0x042f, # CYRILLIC CAPITAL LETTER YA
- 0x00a0: 0x0430, # CYRILLIC SMALL LETTER A
- 0x00a1: 0x0431, # CYRILLIC SMALL LETTER BE
- 0x00a2: 0x0432, # CYRILLIC SMALL LETTER VE
- 0x00a3: 0x0433, # CYRILLIC SMALL LETTER GHE
- 0x00a4: 0x0434, # CYRILLIC SMALL LETTER DE
- 0x00a5: 0x0435, # CYRILLIC SMALL LETTER IE
- 0x00a6: 0x0436, # CYRILLIC SMALL LETTER ZHE
- 0x00a7: 0x0437, # CYRILLIC SMALL LETTER ZE
- 0x00a8: 0x0438, # CYRILLIC SMALL LETTER I
- 0x00a9: 0x0439, # CYRILLIC SMALL LETTER SHORT I
- 0x00aa: 0x043a, # CYRILLIC SMALL LETTER KA
- 0x00ab: 0x043b, # CYRILLIC SMALL LETTER EL
- 0x00ac: 0x043c, # CYRILLIC SMALL LETTER EM
- 0x00ad: 0x043d, # CYRILLIC SMALL LETTER EN
- 0x00ae: 0x043e, # CYRILLIC SMALL LETTER O
- 0x00af: 0x043f, # CYRILLIC SMALL LETTER PE
- 0x00b0: 0x2591, # LIGHT SHADE
- 0x00b1: 0x2592, # MEDIUM SHADE
- 0x00b2: 0x2593, # DARK SHADE
- 0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
- 0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
- 0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
- 0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
- 0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
- 0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
- 0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
- 0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
- 0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
- 0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
- 0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
- 0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
- 0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
- 0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
- 0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
- 0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
- 0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
- 0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
- 0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
- 0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
- 0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
- 0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
- 0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
- 0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
- 0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
- 0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
- 0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
- 0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
- 0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
- 0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
- 0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
- 0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
- 0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
- 0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
- 0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
- 0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
- 0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
- 0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
- 0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
- 0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
- 0x00db: 0x2588, # FULL BLOCK
- 0x00dc: 0x2584, # LOWER HALF BLOCK
- 0x00dd: 0x258c, # LEFT HALF BLOCK
- 0x00de: 0x2590, # RIGHT HALF BLOCK
- 0x00df: 0x2580, # UPPER HALF BLOCK
- 0x00e0: 0x0440, # CYRILLIC SMALL LETTER ER
- 0x00e1: 0x0441, # CYRILLIC SMALL LETTER ES
- 0x00e2: 0x0442, # CYRILLIC SMALL LETTER TE
- 0x00e3: 0x0443, # CYRILLIC SMALL LETTER U
- 0x00e4: 0x0444, # CYRILLIC SMALL LETTER EF
- 0x00e5: 0x0445, # CYRILLIC SMALL LETTER HA
- 0x00e6: 0x0446, # CYRILLIC SMALL LETTER TSE
- 0x00e7: 0x0447, # CYRILLIC SMALL LETTER CHE
- 0x00e8: 0x0448, # CYRILLIC SMALL LETTER SHA
- 0x00e9: 0x0449, # CYRILLIC SMALL LETTER SHCHA
- 0x00ea: 0x044a, # CYRILLIC SMALL LETTER HARD SIGN
- 0x00eb: 0x044b, # CYRILLIC SMALL LETTER YERU
- 0x00ec: 0x044c, # CYRILLIC SMALL LETTER SOFT SIGN
- 0x00ed: 0x044d, # CYRILLIC SMALL LETTER E
- 0x00ee: 0x044e, # CYRILLIC SMALL LETTER YU
- 0x00ef: 0x044f, # CYRILLIC SMALL LETTER YA
- 0x00f0: 0x0401, # CYRILLIC CAPITAL LETTER IO
- 0x00f1: 0x0451, # CYRILLIC SMALL LETTER IO
- 0x00f2: 0x0404, # CYRILLIC CAPITAL LETTER UKRAINIAN IE
- 0x00f3: 0x0454, # CYRILLIC SMALL LETTER UKRAINIAN IE
- 0x00f4: 0x0407, # CYRILLIC CAPITAL LETTER YI
- 0x00f5: 0x0457, # CYRILLIC SMALL LETTER YI
- 0x00f6: 0x040e, # CYRILLIC CAPITAL LETTER SHORT U
- 0x00f7: 0x045e, # CYRILLIC SMALL LETTER SHORT U
- 0x00f8: 0x00b0, # DEGREE SIGN
- 0x00f9: 0x2219, # BULLET OPERATOR
- 0x00fa: 0x00b7, # MIDDLE DOT
- 0x00fb: 0x221a, # SQUARE ROOT
- 0x00fc: 0x2116, # NUMERO SIGN
- 0x00fd: 0x00a4, # CURRENCY SIGN
- 0x00fe: 0x25a0, # BLACK SQUARE
- 0x00ff: 0x00a0, # NO-BREAK SPACE
-})
-
-### Decoding Table
-
-decoding_table = (
- u'\x00' # 0x0000 -> NULL
- u'\x01' # 0x0001 -> START OF HEADING
- u'\x02' # 0x0002 -> START OF TEXT
- u'\x03' # 0x0003 -> END OF TEXT
- u'\x04' # 0x0004 -> END OF TRANSMISSION
- u'\x05' # 0x0005 -> ENQUIRY
- u'\x06' # 0x0006 -> ACKNOWLEDGE
- u'\x07' # 0x0007 -> BELL
- u'\x08' # 0x0008 -> BACKSPACE
- u'\t' # 0x0009 -> HORIZONTAL TABULATION
- u'\n' # 0x000a -> LINE FEED
- u'\x0b' # 0x000b -> VERTICAL TABULATION
- u'\x0c' # 0x000c -> FORM FEED
- u'\r' # 0x000d -> CARRIAGE RETURN
- u'\x0e' # 0x000e -> SHIFT OUT
- u'\x0f' # 0x000f -> SHIFT IN
- u'\x10' # 0x0010 -> DATA LINK ESCAPE
- u'\x11' # 0x0011 -> DEVICE CONTROL ONE
- u'\x12' # 0x0012 -> DEVICE CONTROL TWO
- u'\x13' # 0x0013 -> DEVICE CONTROL THREE
- u'\x14' # 0x0014 -> DEVICE CONTROL FOUR
- u'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
- u'\x16' # 0x0016 -> SYNCHRONOUS IDLE
- u'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
- u'\x18' # 0x0018 -> CANCEL
- u'\x19' # 0x0019 -> END OF MEDIUM
- u'\x1a' # 0x001a -> SUBSTITUTE
- u'\x1b' # 0x001b -> ESCAPE
- u'\x1c' # 0x001c -> FILE SEPARATOR
- u'\x1d' # 0x001d -> GROUP SEPARATOR
- u'\x1e' # 0x001e -> RECORD SEPARATOR
- u'\x1f' # 0x001f -> UNIT SEPARATOR
- u' ' # 0x0020 -> SPACE
- u'!' # 0x0021 -> EXCLAMATION MARK
- u'"' # 0x0022 -> QUOTATION MARK
- u'#' # 0x0023 -> NUMBER SIGN
- u'$' # 0x0024 -> DOLLAR SIGN
- u'%' # 0x0025 -> PERCENT SIGN
- u'&' # 0x0026 -> AMPERSAND
- u"'" # 0x0027 -> APOSTROPHE
- u'(' # 0x0028 -> LEFT PARENTHESIS
- u')' # 0x0029 -> RIGHT PARENTHESIS
- u'*' # 0x002a -> ASTERISK
- u'+' # 0x002b -> PLUS SIGN
- u',' # 0x002c -> COMMA
- u'-' # 0x002d -> HYPHEN-MINUS
- u'.' # 0x002e -> FULL STOP
- u'/' # 0x002f -> SOLIDUS
- u'0' # 0x0030 -> DIGIT ZERO
- u'1' # 0x0031 -> DIGIT ONE
- u'2' # 0x0032 -> DIGIT TWO
- u'3' # 0x0033 -> DIGIT THREE
- u'4' # 0x0034 -> DIGIT FOUR
- u'5' # 0x0035 -> DIGIT FIVE
- u'6' # 0x0036 -> DIGIT SIX
- u'7' # 0x0037 -> DIGIT SEVEN
- u'8' # 0x0038 -> DIGIT EIGHT
- u'9' # 0x0039 -> DIGIT NINE
- u':' # 0x003a -> COLON
- u';' # 0x003b -> SEMICOLON
- u'<' # 0x003c -> LESS-THAN SIGN
- u'=' # 0x003d -> EQUALS SIGN
- u'>' # 0x003e -> GREATER-THAN SIGN
- u'?' # 0x003f -> QUESTION MARK
- u'@' # 0x0040 -> COMMERCIAL AT
- u'A' # 0x0041 -> LATIN CAPITAL LETTER A
- u'B' # 0x0042 -> LATIN CAPITAL LETTER B
- u'C' # 0x0043 -> LATIN CAPITAL LETTER C
- u'D' # 0x0044 -> LATIN CAPITAL LETTER D
- u'E' # 0x0045 -> LATIN CAPITAL LETTER E
- u'F' # 0x0046 -> LATIN CAPITAL LETTER F
- u'G' # 0x0047 -> LATIN CAPITAL LETTER G
- u'H' # 0x0048 -> LATIN CAPITAL LETTER H
- u'I' # 0x0049 -> LATIN CAPITAL LETTER I
- u'J' # 0x004a -> LATIN CAPITAL LETTER J
- u'K' # 0x004b -> LATIN CAPITAL LETTER K
- u'L' # 0x004c -> LATIN CAPITAL LETTER L
- u'M' # 0x004d -> LATIN CAPITAL LETTER M
- u'N' # 0x004e -> LATIN CAPITAL LETTER N
- u'O' # 0x004f -> LATIN CAPITAL LETTER O
- u'P' # 0x0050 -> LATIN CAPITAL LETTER P
- u'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
- u'R' # 0x0052 -> LATIN CAPITAL LETTER R
- u'S' # 0x0053 -> LATIN CAPITAL LETTER S
- u'T' # 0x0054 -> LATIN CAPITAL LETTER T
- u'U' # 0x0055 -> LATIN CAPITAL LETTER U
- u'V' # 0x0056 -> LATIN CAPITAL LETTER V
- u'W' # 0x0057 -> LATIN CAPITAL LETTER W
- u'X' # 0x0058 -> LATIN CAPITAL LETTER X
- u'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
- u'Z' # 0x005a -> LATIN CAPITAL LETTER Z
- u'[' # 0x005b -> LEFT SQUARE BRACKET
- u'\\' # 0x005c -> REVERSE SOLIDUS
- u']' # 0x005d -> RIGHT SQUARE BRACKET
- u'^' # 0x005e -> CIRCUMFLEX ACCENT
- u'_' # 0x005f -> LOW LINE
- u'`' # 0x0060 -> GRAVE ACCENT
- u'a' # 0x0061 -> LATIN SMALL LETTER A
- u'b' # 0x0062 -> LATIN SMALL LETTER B
- u'c' # 0x0063 -> LATIN SMALL LETTER C
- u'd' # 0x0064 -> LATIN SMALL LETTER D
- u'e' # 0x0065 -> LATIN SMALL LETTER E
- u'f' # 0x0066 -> LATIN SMALL LETTER F
- u'g' # 0x0067 -> LATIN SMALL LETTER G
- u'h' # 0x0068 -> LATIN SMALL LETTER H
- u'i' # 0x0069 -> LATIN SMALL LETTER I
- u'j' # 0x006a -> LATIN SMALL LETTER J
- u'k' # 0x006b -> LATIN SMALL LETTER K
- u'l' # 0x006c -> LATIN SMALL LETTER L
- u'm' # 0x006d -> LATIN SMALL LETTER M
- u'n' # 0x006e -> LATIN SMALL LETTER N
- u'o' # 0x006f -> LATIN SMALL LETTER O
- u'p' # 0x0070 -> LATIN SMALL LETTER P
- u'q' # 0x0071 -> LATIN SMALL LETTER Q
- u'r' # 0x0072 -> LATIN SMALL LETTER R
- u's' # 0x0073 -> LATIN SMALL LETTER S
- u't' # 0x0074 -> LATIN SMALL LETTER T
- u'u' # 0x0075 -> LATIN SMALL LETTER U
- u'v' # 0x0076 -> LATIN SMALL LETTER V
- u'w' # 0x0077 -> LATIN SMALL LETTER W
- u'x' # 0x0078 -> LATIN SMALL LETTER X
- u'y' # 0x0079 -> LATIN SMALL LETTER Y
- u'z' # 0x007a -> LATIN SMALL LETTER Z
- u'{' # 0x007b -> LEFT CURLY BRACKET
- u'|' # 0x007c -> VERTICAL LINE
- u'}' # 0x007d -> RIGHT CURLY BRACKET
- u'~' # 0x007e -> TILDE
- u'\x7f' # 0x007f -> DELETE
- u'\u0410' # 0x0080 -> CYRILLIC CAPITAL LETTER A
- u'\u0411' # 0x0081 -> CYRILLIC CAPITAL LETTER BE
- u'\u0412' # 0x0082 -> CYRILLIC CAPITAL LETTER VE
- u'\u0413' # 0x0083 -> CYRILLIC CAPITAL LETTER GHE
- u'\u0414' # 0x0084 -> CYRILLIC CAPITAL LETTER DE
- u'\u0415' # 0x0085 -> CYRILLIC CAPITAL LETTER IE
- u'\u0416' # 0x0086 -> CYRILLIC CAPITAL LETTER ZHE
- u'\u0417' # 0x0087 -> CYRILLIC CAPITAL LETTER ZE
- u'\u0418' # 0x0088 -> CYRILLIC CAPITAL LETTER I
- u'\u0419' # 0x0089 -> CYRILLIC CAPITAL LETTER SHORT I
- u'\u041a' # 0x008a -> CYRILLIC CAPITAL LETTER KA
- u'\u041b' # 0x008b -> CYRILLIC CAPITAL LETTER EL
- u'\u041c' # 0x008c -> CYRILLIC CAPITAL LETTER EM
- u'\u041d' # 0x008d -> CYRILLIC CAPITAL LETTER EN
- u'\u041e' # 0x008e -> CYRILLIC CAPITAL LETTER O
- u'\u041f' # 0x008f -> CYRILLIC CAPITAL LETTER PE
- u'\u0420' # 0x0090 -> CYRILLIC CAPITAL LETTER ER
- u'\u0421' # 0x0091 -> CYRILLIC CAPITAL LETTER ES
- u'\u0422' # 0x0092 -> CYRILLIC CAPITAL LETTER TE
- u'\u0423' # 0x0093 -> CYRILLIC CAPITAL LETTER U
- u'\u0424' # 0x0094 -> CYRILLIC CAPITAL LETTER EF
- u'\u0425' # 0x0095 -> CYRILLIC CAPITAL LETTER HA
- u'\u0426' # 0x0096 -> CYRILLIC CAPITAL LETTER TSE
- u'\u0427' # 0x0097 -> CYRILLIC CAPITAL LETTER CHE
- u'\u0428' # 0x0098 -> CYRILLIC CAPITAL LETTER SHA
- u'\u0429' # 0x0099 -> CYRILLIC CAPITAL LETTER SHCHA
- u'\u042a' # 0x009a -> CYRILLIC CAPITAL LETTER HARD SIGN
- u'\u042b' # 0x009b -> CYRILLIC CAPITAL LETTER YERU
- u'\u042c' # 0x009c -> CYRILLIC CAPITAL LETTER SOFT SIGN
- u'\u042d' # 0x009d -> CYRILLIC CAPITAL LETTER E
- u'\u042e' # 0x009e -> CYRILLIC CAPITAL LETTER YU
- u'\u042f' # 0x009f -> CYRILLIC CAPITAL LETTER YA
- u'\u0430' # 0x00a0 -> CYRILLIC SMALL LETTER A
- u'\u0431' # 0x00a1 -> CYRILLIC SMALL LETTER BE
- u'\u0432' # 0x00a2 -> CYRILLIC SMALL LETTER VE
- u'\u0433' # 0x00a3 -> CYRILLIC SMALL LETTER GHE
- u'\u0434' # 0x00a4 -> CYRILLIC SMALL LETTER DE
- u'\u0435' # 0x00a5 -> CYRILLIC SMALL LETTER IE
- u'\u0436' # 0x00a6 -> CYRILLIC SMALL LETTER ZHE
- u'\u0437' # 0x00a7 -> CYRILLIC SMALL LETTER ZE
- u'\u0438' # 0x00a8 -> CYRILLIC SMALL LETTER I
- u'\u0439' # 0x00a9 -> CYRILLIC SMALL LETTER SHORT I
- u'\u043a' # 0x00aa -> CYRILLIC SMALL LETTER KA
- u'\u043b' # 0x00ab -> CYRILLIC SMALL LETTER EL
- u'\u043c' # 0x00ac -> CYRILLIC SMALL LETTER EM
- u'\u043d' # 0x00ad -> CYRILLIC SMALL LETTER EN
- u'\u043e' # 0x00ae -> CYRILLIC SMALL LETTER O
- u'\u043f' # 0x00af -> CYRILLIC SMALL LETTER PE
- u'\u2591' # 0x00b0 -> LIGHT SHADE
- u'\u2592' # 0x00b1 -> MEDIUM SHADE
- u'\u2593' # 0x00b2 -> DARK SHADE
- u'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
- u'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
- u'\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
- u'\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
- u'\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
- u'\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
- u'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
- u'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
- u'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
- u'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
- u'\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
- u'\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
- u'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
- u'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
- u'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
- u'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
- u'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
- u'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
- u'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
- u'\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
- u'\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
- u'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
- u'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
- u'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
- u'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
- u'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
- u'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
- u'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
- u'\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
- u'\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
- u'\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
- u'\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
- u'\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
- u'\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
- u'\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
- u'\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
- u'\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
- u'\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
- u'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
- u'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
- u'\u2588' # 0x00db -> FULL BLOCK
- u'\u2584' # 0x00dc -> LOWER HALF BLOCK
- u'\u258c' # 0x00dd -> LEFT HALF BLOCK
- u'\u2590' # 0x00de -> RIGHT HALF BLOCK
- u'\u2580' # 0x00df -> UPPER HALF BLOCK
- u'\u0440' # 0x00e0 -> CYRILLIC SMALL LETTER ER
- u'\u0441' # 0x00e1 -> CYRILLIC SMALL LETTER ES
- u'\u0442' # 0x00e2 -> CYRILLIC SMALL LETTER TE
- u'\u0443' # 0x00e3 -> CYRILLIC SMALL LETTER U
- u'\u0444' # 0x00e4 -> CYRILLIC SMALL LETTER EF
- u'\u0445' # 0x00e5 -> CYRILLIC SMALL LETTER HA
- u'\u0446' # 0x00e6 -> CYRILLIC SMALL LETTER TSE
- u'\u0447' # 0x00e7 -> CYRILLIC SMALL LETTER CHE
- u'\u0448' # 0x00e8 -> CYRILLIC SMALL LETTER SHA
- u'\u0449' # 0x00e9 -> CYRILLIC SMALL LETTER SHCHA
- u'\u044a' # 0x00ea -> CYRILLIC SMALL LETTER HARD SIGN
- u'\u044b' # 0x00eb -> CYRILLIC SMALL LETTER YERU
- u'\u044c' # 0x00ec -> CYRILLIC SMALL LETTER SOFT SIGN
- u'\u044d' # 0x00ed -> CYRILLIC SMALL LETTER E
- u'\u044e' # 0x00ee -> CYRILLIC SMALL LETTER YU
- u'\u044f' # 0x00ef -> CYRILLIC SMALL LETTER YA
- u'\u0401' # 0x00f0 -> CYRILLIC CAPITAL LETTER IO
- u'\u0451' # 0x00f1 -> CYRILLIC SMALL LETTER IO
- u'\u0404' # 0x00f2 -> CYRILLIC CAPITAL LETTER UKRAINIAN IE
- u'\u0454' # 0x00f3 -> CYRILLIC SMALL LETTER UKRAINIAN IE
- u'\u0407' # 0x00f4 -> CYRILLIC CAPITAL LETTER YI
- u'\u0457' # 0x00f5 -> CYRILLIC SMALL LETTER YI
- u'\u040e' # 0x00f6 -> CYRILLIC CAPITAL LETTER SHORT U
- u'\u045e' # 0x00f7 -> CYRILLIC SMALL LETTER SHORT U
- u'\xb0' # 0x00f8 -> DEGREE SIGN
- u'\u2219' # 0x00f9 -> BULLET OPERATOR
- u'\xb7' # 0x00fa -> MIDDLE DOT
- u'\u221a' # 0x00fb -> SQUARE ROOT
- u'\u2116' # 0x00fc -> NUMERO SIGN
- u'\xa4' # 0x00fd -> CURRENCY SIGN
- u'\u25a0' # 0x00fe -> BLACK SQUARE
- u'\xa0' # 0x00ff -> NO-BREAK SPACE
-)
-
-### Encoding Map
-
-encoding_map = {
- 0x0000: 0x0000, # NULL
- 0x0001: 0x0001, # START OF HEADING
- 0x0002: 0x0002, # START OF TEXT
- 0x0003: 0x0003, # END OF TEXT
- 0x0004: 0x0004, # END OF TRANSMISSION
- 0x0005: 0x0005, # ENQUIRY
- 0x0006: 0x0006, # ACKNOWLEDGE
- 0x0007: 0x0007, # BELL
- 0x0008: 0x0008, # BACKSPACE
- 0x0009: 0x0009, # HORIZONTAL TABULATION
- 0x000a: 0x000a, # LINE FEED
- 0x000b: 0x000b, # VERTICAL TABULATION
- 0x000c: 0x000c, # FORM FEED
- 0x000d: 0x000d, # CARRIAGE RETURN
- 0x000e: 0x000e, # SHIFT OUT
- 0x000f: 0x000f, # SHIFT IN
- 0x0010: 0x0010, # DATA LINK ESCAPE
- 0x0011: 0x0011, # DEVICE CONTROL ONE
- 0x0012: 0x0012, # DEVICE CONTROL TWO
- 0x0013: 0x0013, # DEVICE CONTROL THREE
- 0x0014: 0x0014, # DEVICE CONTROL FOUR
- 0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
- 0x0016: 0x0016, # SYNCHRONOUS IDLE
- 0x0017: 0x0017, # END OF TRANSMISSION BLOCK
- 0x0018: 0x0018, # CANCEL
- 0x0019: 0x0019, # END OF MEDIUM
- 0x001a: 0x001a, # SUBSTITUTE
- 0x001b: 0x001b, # ESCAPE
- 0x001c: 0x001c, # FILE SEPARATOR
- 0x001d: 0x001d, # GROUP SEPARATOR
- 0x001e: 0x001e, # RECORD SEPARATOR
- 0x001f: 0x001f, # UNIT SEPARATOR
- 0x0020: 0x0020, # SPACE
- 0x0021: 0x0021, # EXCLAMATION MARK
- 0x0022: 0x0022, # QUOTATION MARK
- 0x0023: 0x0023, # NUMBER SIGN
- 0x0024: 0x0024, # DOLLAR SIGN
- 0x0025: 0x0025, # PERCENT SIGN
- 0x0026: 0x0026, # AMPERSAND
- 0x0027: 0x0027, # APOSTROPHE
- 0x0028: 0x0028, # LEFT PARENTHESIS
- 0x0029: 0x0029, # RIGHT PARENTHESIS
- 0x002a: 0x002a, # ASTERISK
- 0x002b: 0x002b, # PLUS SIGN
- 0x002c: 0x002c, # COMMA
- 0x002d: 0x002d, # HYPHEN-MINUS
- 0x002e: 0x002e, # FULL STOP
- 0x002f: 0x002f, # SOLIDUS
- 0x0030: 0x0030, # DIGIT ZERO
- 0x0031: 0x0031, # DIGIT ONE
- 0x0032: 0x0032, # DIGIT TWO
- 0x0033: 0x0033, # DIGIT THREE
- 0x0034: 0x0034, # DIGIT FOUR
- 0x0035: 0x0035, # DIGIT FIVE
- 0x0036: 0x0036, # DIGIT SIX
- 0x0037: 0x0037, # DIGIT SEVEN
- 0x0038: 0x0038, # DIGIT EIGHT
- 0x0039: 0x0039, # DIGIT NINE
- 0x003a: 0x003a, # COLON
- 0x003b: 0x003b, # SEMICOLON
- 0x003c: 0x003c, # LESS-THAN SIGN
- 0x003d: 0x003d, # EQUALS SIGN
- 0x003e: 0x003e, # GREATER-THAN SIGN
- 0x003f: 0x003f, # QUESTION MARK
- 0x0040: 0x0040, # COMMERCIAL AT
- 0x0041: 0x0041, # LATIN CAPITAL LETTER A
- 0x0042: 0x0042, # LATIN CAPITAL LETTER B
- 0x0043: 0x0043, # LATIN CAPITAL LETTER C
- 0x0044: 0x0044, # LATIN CAPITAL LETTER D
- 0x0045: 0x0045, # LATIN CAPITAL LETTER E
- 0x0046: 0x0046, # LATIN CAPITAL LETTER F
- 0x0047: 0x0047, # LATIN CAPITAL LETTER G
- 0x0048: 0x0048, # LATIN CAPITAL LETTER H
- 0x0049: 0x0049, # LATIN CAPITAL LETTER I
- 0x004a: 0x004a, # LATIN CAPITAL LETTER J
- 0x004b: 0x004b, # LATIN CAPITAL LETTER K
- 0x004c: 0x004c, # LATIN CAPITAL LETTER L
- 0x004d: 0x004d, # LATIN CAPITAL LETTER M
- 0x004e: 0x004e, # LATIN CAPITAL LETTER N
- 0x004f: 0x004f, # LATIN CAPITAL LETTER O
- 0x0050: 0x0050, # LATIN CAPITAL LETTER P
- 0x0051: 0x0051, # LATIN CAPITAL LETTER Q
- 0x0052: 0x0052, # LATIN CAPITAL LETTER R
- 0x0053: 0x0053, # LATIN CAPITAL LETTER S
- 0x0054: 0x0054, # LATIN CAPITAL LETTER T
- 0x0055: 0x0055, # LATIN CAPITAL LETTER U
- 0x0056: 0x0056, # LATIN CAPITAL LETTER V
- 0x0057: 0x0057, # LATIN CAPITAL LETTER W
- 0x0058: 0x0058, # LATIN CAPITAL LETTER X
- 0x0059: 0x0059, # LATIN CAPITAL LETTER Y
- 0x005a: 0x005a, # LATIN CAPITAL LETTER Z
- 0x005b: 0x005b, # LEFT SQUARE BRACKET
- 0x005c: 0x005c, # REVERSE SOLIDUS
- 0x005d: 0x005d, # RIGHT SQUARE BRACKET
- 0x005e: 0x005e, # CIRCUMFLEX ACCENT
- 0x005f: 0x005f, # LOW LINE
- 0x0060: 0x0060, # GRAVE ACCENT
- 0x0061: 0x0061, # LATIN SMALL LETTER A
- 0x0062: 0x0062, # LATIN SMALL LETTER B
- 0x0063: 0x0063, # LATIN SMALL LETTER C
- 0x0064: 0x0064, # LATIN SMALL LETTER D
- 0x0065: 0x0065, # LATIN SMALL LETTER E
- 0x0066: 0x0066, # LATIN SMALL LETTER F
- 0x0067: 0x0067, # LATIN SMALL LETTER G
- 0x0068: 0x0068, # LATIN SMALL LETTER H
- 0x0069: 0x0069, # LATIN SMALL LETTER I
- 0x006a: 0x006a, # LATIN SMALL LETTER J
- 0x006b: 0x006b, # LATIN SMALL LETTER K
- 0x006c: 0x006c, # LATIN SMALL LETTER L
- 0x006d: 0x006d, # LATIN SMALL LETTER M
- 0x006e: 0x006e, # LATIN SMALL LETTER N
- 0x006f: 0x006f, # LATIN SMALL LETTER O
- 0x0070: 0x0070, # LATIN SMALL LETTER P
- 0x0071: 0x0071, # LATIN SMALL LETTER Q
- 0x0072: 0x0072, # LATIN SMALL LETTER R
- 0x0073: 0x0073, # LATIN SMALL LETTER S
- 0x0074: 0x0074, # LATIN SMALL LETTER T
- 0x0075: 0x0075, # LATIN SMALL LETTER U
- 0x0076: 0x0076, # LATIN SMALL LETTER V
- 0x0077: 0x0077, # LATIN SMALL LETTER W
- 0x0078: 0x0078, # LATIN SMALL LETTER X
- 0x0079: 0x0079, # LATIN SMALL LETTER Y
- 0x007a: 0x007a, # LATIN SMALL LETTER Z
- 0x007b: 0x007b, # LEFT CURLY BRACKET
- 0x007c: 0x007c, # VERTICAL LINE
- 0x007d: 0x007d, # RIGHT CURLY BRACKET
- 0x007e: 0x007e, # TILDE
- 0x007f: 0x007f, # DELETE
- 0x00a0: 0x00ff, # NO-BREAK SPACE
- 0x00a4: 0x00fd, # CURRENCY SIGN
- 0x00b0: 0x00f8, # DEGREE SIGN
- 0x00b7: 0x00fa, # MIDDLE DOT
- 0x0401: 0x00f0, # CYRILLIC CAPITAL LETTER IO
- 0x0404: 0x00f2, # CYRILLIC CAPITAL LETTER UKRAINIAN IE
- 0x0407: 0x00f4, # CYRILLIC CAPITAL LETTER YI
- 0x040e: 0x00f6, # CYRILLIC CAPITAL LETTER SHORT U
- 0x0410: 0x0080, # CYRILLIC CAPITAL LETTER A
- 0x0411: 0x0081, # CYRILLIC CAPITAL LETTER BE
- 0x0412: 0x0082, # CYRILLIC CAPITAL LETTER VE
- 0x0413: 0x0083, # CYRILLIC CAPITAL LETTER GHE
- 0x0414: 0x0084, # CYRILLIC CAPITAL LETTER DE
- 0x0415: 0x0085, # CYRILLIC CAPITAL LETTER IE
- 0x0416: 0x0086, # CYRILLIC CAPITAL LETTER ZHE
- 0x0417: 0x0087, # CYRILLIC CAPITAL LETTER ZE
- 0x0418: 0x0088, # CYRILLIC CAPITAL LETTER I
- 0x0419: 0x0089, # CYRILLIC CAPITAL LETTER SHORT I
- 0x041a: 0x008a, # CYRILLIC CAPITAL LETTER KA
- 0x041b: 0x008b, # CYRILLIC CAPITAL LETTER EL
- 0x041c: 0x008c, # CYRILLIC CAPITAL LETTER EM
- 0x041d: 0x008d, # CYRILLIC CAPITAL LETTER EN
- 0x041e: 0x008e, # CYRILLIC CAPITAL LETTER O
- 0x041f: 0x008f, # CYRILLIC CAPITAL LETTER PE
- 0x0420: 0x0090, # CYRILLIC CAPITAL LETTER ER
- 0x0421: 0x0091, # CYRILLIC CAPITAL LETTER ES
- 0x0422: 0x0092, # CYRILLIC CAPITAL LETTER TE
- 0x0423: 0x0093, # CYRILLIC CAPITAL LETTER U
- 0x0424: 0x0094, # CYRILLIC CAPITAL LETTER EF
- 0x0425: 0x0095, # CYRILLIC CAPITAL LETTER HA
- 0x0426: 0x0096, # CYRILLIC CAPITAL LETTER TSE
- 0x0427: 0x0097, # CYRILLIC CAPITAL LETTER CHE
- 0x0428: 0x0098, # CYRILLIC CAPITAL LETTER SHA
- 0x0429: 0x0099, # CYRILLIC CAPITAL LETTER SHCHA
- 0x042a: 0x009a, # CYRILLIC CAPITAL LETTER HARD SIGN
- 0x042b: 0x009b, # CYRILLIC CAPITAL LETTER YERU
- 0x042c: 0x009c, # CYRILLIC CAPITAL LETTER SOFT SIGN
- 0x042d: 0x009d, # CYRILLIC CAPITAL LETTER E
- 0x042e: 0x009e, # CYRILLIC CAPITAL LETTER YU
- 0x042f: 0x009f, # CYRILLIC CAPITAL LETTER YA
- 0x0430: 0x00a0, # CYRILLIC SMALL LETTER A
- 0x0431: 0x00a1, # CYRILLIC SMALL LETTER BE
- 0x0432: 0x00a2, # CYRILLIC SMALL LETTER VE
- 0x0433: 0x00a3, # CYRILLIC SMALL LETTER GHE
- 0x0434: 0x00a4, # CYRILLIC SMALL LETTER DE
- 0x0435: 0x00a5, # CYRILLIC SMALL LETTER IE
- 0x0436: 0x00a6, # CYRILLIC SMALL LETTER ZHE
- 0x0437: 0x00a7, # CYRILLIC SMALL LETTER ZE
- 0x0438: 0x00a8, # CYRILLIC SMALL LETTER I
- 0x0439: 0x00a9, # CYRILLIC SMALL LETTER SHORT I
- 0x043a: 0x00aa, # CYRILLIC SMALL LETTER KA
- 0x043b: 0x00ab, # CYRILLIC SMALL LETTER EL
- 0x043c: 0x00ac, # CYRILLIC SMALL LETTER EM
- 0x043d: 0x00ad, # CYRILLIC SMALL LETTER EN
- 0x043e: 0x00ae, # CYRILLIC SMALL LETTER O
- 0x043f: 0x00af, # CYRILLIC SMALL LETTER PE
- 0x0440: 0x00e0, # CYRILLIC SMALL LETTER ER
- 0x0441: 0x00e1, # CYRILLIC SMALL LETTER ES
- 0x0442: 0x00e2, # CYRILLIC SMALL LETTER TE
- 0x0443: 0x00e3, # CYRILLIC SMALL LETTER U
- 0x0444: 0x00e4, # CYRILLIC SMALL LETTER EF
- 0x0445: 0x00e5, # CYRILLIC SMALL LETTER HA
- 0x0446: 0x00e6, # CYRILLIC SMALL LETTER TSE
- 0x0447: 0x00e7, # CYRILLIC SMALL LETTER CHE
- 0x0448: 0x00e8, # CYRILLIC SMALL LETTER SHA
- 0x0449: 0x00e9, # CYRILLIC SMALL LETTER SHCHA
- 0x044a: 0x00ea, # CYRILLIC SMALL LETTER HARD SIGN
- 0x044b: 0x00eb, # CYRILLIC SMALL LETTER YERU
- 0x044c: 0x00ec, # CYRILLIC SMALL LETTER SOFT SIGN
- 0x044d: 0x00ed, # CYRILLIC SMALL LETTER E
- 0x044e: 0x00ee, # CYRILLIC SMALL LETTER YU
- 0x044f: 0x00ef, # CYRILLIC SMALL LETTER YA
- 0x0451: 0x00f1, # CYRILLIC SMALL LETTER IO
- 0x0454: 0x00f3, # CYRILLIC SMALL LETTER UKRAINIAN IE
- 0x0457: 0x00f5, # CYRILLIC SMALL LETTER YI
- 0x045e: 0x00f7, # CYRILLIC SMALL LETTER SHORT U
- 0x2116: 0x00fc, # NUMERO SIGN
- 0x2219: 0x00f9, # BULLET OPERATOR
- 0x221a: 0x00fb, # SQUARE ROOT
- 0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
- 0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
- 0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
- 0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
- 0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
- 0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
- 0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
- 0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
- 0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
- 0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
- 0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
- 0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
- 0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
- 0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
- 0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
- 0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
- 0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
- 0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
- 0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
- 0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
- 0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
- 0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
- 0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
- 0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
- 0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
- 0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
- 0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
- 0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
- 0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
- 0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
- 0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
- 0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
- 0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
- 0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
- 0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
- 0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
- 0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
- 0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
- 0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
- 0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
- 0x2580: 0x00df, # UPPER HALF BLOCK
- 0x2584: 0x00dc, # LOWER HALF BLOCK
- 0x2588: 0x00db, # FULL BLOCK
- 0x258c: 0x00dd, # LEFT HALF BLOCK
- 0x2590: 0x00de, # RIGHT HALF BLOCK
- 0x2591: 0x00b0, # LIGHT SHADE
- 0x2592: 0x00b1, # MEDIUM SHADE
- 0x2593: 0x00b2, # DARK SHADE
- 0x25a0: 0x00fe, # BLACK SQUARE
-}
diff --git a/sys/lib/python/encodings/cp869.py b/sys/lib/python/encodings/cp869.py
deleted file mode 100644
index b4dc99bf2..000000000
--- a/sys/lib/python/encodings/cp869.py
+++ /dev/null
@@ -1,689 +0,0 @@
-""" Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP869.TXT' with gencodec.py.
-
-"""#"
-
-import codecs
-
-### Codec APIs
-
-class Codec(codecs.Codec):
-
- def encode(self,input,errors='strict'):
- return codecs.charmap_encode(input,errors,encoding_map)
-
- def decode(self,input,errors='strict'):
- return codecs.charmap_decode(input,errors,decoding_table)
-
-class IncrementalEncoder(codecs.IncrementalEncoder):
- def encode(self, input, final=False):
- return codecs.charmap_encode(input,self.errors,encoding_map)[0]
-
-class IncrementalDecoder(codecs.IncrementalDecoder):
- def decode(self, input, final=False):
- return codecs.charmap_decode(input,self.errors,decoding_table)[0]
-
-class StreamWriter(Codec,codecs.StreamWriter):
- pass
-
-class StreamReader(Codec,codecs.StreamReader):
- pass
-
-### encodings module API
-
-def getregentry():
- return codecs.CodecInfo(
- name='cp869',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
-
-### Decoding Map
-
-decoding_map = codecs.make_identity_dict(range(256))
-decoding_map.update({
- 0x0080: None, # UNDEFINED
- 0x0081: None, # UNDEFINED
- 0x0082: None, # UNDEFINED
- 0x0083: None, # UNDEFINED
- 0x0084: None, # UNDEFINED
- 0x0085: None, # UNDEFINED
- 0x0086: 0x0386, # GREEK CAPITAL LETTER ALPHA WITH TONOS
- 0x0087: None, # UNDEFINED
- 0x0088: 0x00b7, # MIDDLE DOT
- 0x0089: 0x00ac, # NOT SIGN
- 0x008a: 0x00a6, # BROKEN BAR
- 0x008b: 0x2018, # LEFT SINGLE QUOTATION MARK
- 0x008c: 0x2019, # RIGHT SINGLE QUOTATION MARK
- 0x008d: 0x0388, # GREEK CAPITAL LETTER EPSILON WITH TONOS
- 0x008e: 0x2015, # HORIZONTAL BAR
- 0x008f: 0x0389, # GREEK CAPITAL LETTER ETA WITH TONOS
- 0x0090: 0x038a, # GREEK CAPITAL LETTER IOTA WITH TONOS
- 0x0091: 0x03aa, # GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
- 0x0092: 0x038c, # GREEK CAPITAL LETTER OMICRON WITH TONOS
- 0x0093: None, # UNDEFINED
- 0x0094: None, # UNDEFINED
- 0x0095: 0x038e, # GREEK CAPITAL LETTER UPSILON WITH TONOS
- 0x0096: 0x03ab, # GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
- 0x0097: 0x00a9, # COPYRIGHT SIGN
- 0x0098: 0x038f, # GREEK CAPITAL LETTER OMEGA WITH TONOS
- 0x0099: 0x00b2, # SUPERSCRIPT TWO
- 0x009a: 0x00b3, # SUPERSCRIPT THREE
- 0x009b: 0x03ac, # GREEK SMALL LETTER ALPHA WITH TONOS
- 0x009c: 0x00a3, # POUND SIGN
- 0x009d: 0x03ad, # GREEK SMALL LETTER EPSILON WITH TONOS
- 0x009e: 0x03ae, # GREEK SMALL LETTER ETA WITH TONOS
- 0x009f: 0x03af, # GREEK SMALL LETTER IOTA WITH TONOS
- 0x00a0: 0x03ca, # GREEK SMALL LETTER IOTA WITH DIALYTIKA
- 0x00a1: 0x0390, # GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS
- 0x00a2: 0x03cc, # GREEK SMALL LETTER OMICRON WITH TONOS
- 0x00a3: 0x03cd, # GREEK SMALL LETTER UPSILON WITH TONOS
- 0x00a4: 0x0391, # GREEK CAPITAL LETTER ALPHA
- 0x00a5: 0x0392, # GREEK CAPITAL LETTER BETA
- 0x00a6: 0x0393, # GREEK CAPITAL LETTER GAMMA
- 0x00a7: 0x0394, # GREEK CAPITAL LETTER DELTA
- 0x00a8: 0x0395, # GREEK CAPITAL LETTER EPSILON
- 0x00a9: 0x0396, # GREEK CAPITAL LETTER ZETA
- 0x00aa: 0x0397, # GREEK CAPITAL LETTER ETA
- 0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
- 0x00ac: 0x0398, # GREEK CAPITAL LETTER THETA
- 0x00ad: 0x0399, # GREEK CAPITAL LETTER IOTA
- 0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
- 0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
- 0x00b0: 0x2591, # LIGHT SHADE
- 0x00b1: 0x2592, # MEDIUM SHADE
- 0x00b2: 0x2593, # DARK SHADE
- 0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
- 0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
- 0x00b5: 0x039a, # GREEK CAPITAL LETTER KAPPA
- 0x00b6: 0x039b, # GREEK CAPITAL LETTER LAMDA
- 0x00b7: 0x039c, # GREEK CAPITAL LETTER MU
- 0x00b8: 0x039d, # GREEK CAPITAL LETTER NU
- 0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
- 0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
- 0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
- 0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
- 0x00bd: 0x039e, # GREEK CAPITAL LETTER XI
- 0x00be: 0x039f, # GREEK CAPITAL LETTER OMICRON
- 0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
- 0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
- 0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
- 0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
- 0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
- 0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
- 0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
- 0x00c6: 0x03a0, # GREEK CAPITAL LETTER PI
- 0x00c7: 0x03a1, # GREEK CAPITAL LETTER RHO
- 0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
- 0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
- 0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
- 0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
- 0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
- 0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
- 0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
- 0x00cf: 0x03a3, # GREEK CAPITAL LETTER SIGMA
- 0x00d0: 0x03a4, # GREEK CAPITAL LETTER TAU
- 0x00d1: 0x03a5, # GREEK CAPITAL LETTER UPSILON
- 0x00d2: 0x03a6, # GREEK CAPITAL LETTER PHI
- 0x00d3: 0x03a7, # GREEK CAPITAL LETTER CHI
- 0x00d4: 0x03a8, # GREEK CAPITAL LETTER PSI
- 0x00d5: 0x03a9, # GREEK CAPITAL LETTER OMEGA
- 0x00d6: 0x03b1, # GREEK SMALL LETTER ALPHA
- 0x00d7: 0x03b2, # GREEK SMALL LETTER BETA
- 0x00d8: 0x03b3, # GREEK SMALL LETTER GAMMA
- 0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
- 0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
- 0x00db: 0x2588, # FULL BLOCK
- 0x00dc: 0x2584, # LOWER HALF BLOCK
- 0x00dd: 0x03b4, # GREEK SMALL LETTER DELTA
- 0x00de: 0x03b5, # GREEK SMALL LETTER EPSILON
- 0x00df: 0x2580, # UPPER HALF BLOCK
- 0x00e0: 0x03b6, # GREEK SMALL LETTER ZETA
- 0x00e1: 0x03b7, # GREEK SMALL LETTER ETA
- 0x00e2: 0x03b8, # GREEK SMALL LETTER THETA
- 0x00e3: 0x03b9, # GREEK SMALL LETTER IOTA
- 0x00e4: 0x03ba, # GREEK SMALL LETTER KAPPA
- 0x00e5: 0x03bb, # GREEK SMALL LETTER LAMDA
- 0x00e6: 0x03bc, # GREEK SMALL LETTER MU
- 0x00e7: 0x03bd, # GREEK SMALL LETTER NU
- 0x00e8: 0x03be, # GREEK SMALL LETTER XI
- 0x00e9: 0x03bf, # GREEK SMALL LETTER OMICRON
- 0x00ea: 0x03c0, # GREEK SMALL LETTER PI
- 0x00eb: 0x03c1, # GREEK SMALL LETTER RHO
- 0x00ec: 0x03c3, # GREEK SMALL LETTER SIGMA
- 0x00ed: 0x03c2, # GREEK SMALL LETTER FINAL SIGMA
- 0x00ee: 0x03c4, # GREEK SMALL LETTER TAU
- 0x00ef: 0x0384, # GREEK TONOS
- 0x00f0: 0x00ad, # SOFT HYPHEN
- 0x00f1: 0x00b1, # PLUS-MINUS SIGN
- 0x00f2: 0x03c5, # GREEK SMALL LETTER UPSILON
- 0x00f3: 0x03c6, # GREEK SMALL LETTER PHI
- 0x00f4: 0x03c7, # GREEK SMALL LETTER CHI
- 0x00f5: 0x00a7, # SECTION SIGN
- 0x00f6: 0x03c8, # GREEK SMALL LETTER PSI
- 0x00f7: 0x0385, # GREEK DIALYTIKA TONOS
- 0x00f8: 0x00b0, # DEGREE SIGN
- 0x00f9: 0x00a8, # DIAERESIS
- 0x00fa: 0x03c9, # GREEK SMALL LETTER OMEGA
- 0x00fb: 0x03cb, # GREEK SMALL LETTER UPSILON WITH DIALYTIKA
- 0x00fc: 0x03b0, # GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS
- 0x00fd: 0x03ce, # GREEK SMALL LETTER OMEGA WITH TONOS
- 0x00fe: 0x25a0, # BLACK SQUARE
- 0x00ff: 0x00a0, # NO-BREAK SPACE
-})
-
-### Decoding Table
-
-decoding_table = (
- u'\x00' # 0x0000 -> NULL
- u'\x01' # 0x0001 -> START OF HEADING
- u'\x02' # 0x0002 -> START OF TEXT
- u'\x03' # 0x0003 -> END OF TEXT
- u'\x04' # 0x0004 -> END OF TRANSMISSION
- u'\x05' # 0x0005 -> ENQUIRY
- u'\x06' # 0x0006 -> ACKNOWLEDGE
- u'\x07' # 0x0007 -> BELL
- u'\x08' # 0x0008 -> BACKSPACE
- u'\t' # 0x0009 -> HORIZONTAL TABULATION
- u'\n' # 0x000a -> LINE FEED
- u'\x0b' # 0x000b -> VERTICAL TABULATION
- u'\x0c' # 0x000c -> FORM FEED
- u'\r' # 0x000d -> CARRIAGE RETURN
- u'\x0e' # 0x000e -> SHIFT OUT
- u'\x0f' # 0x000f -> SHIFT IN
- u'\x10' # 0x0010 -> DATA LINK ESCAPE
- u'\x11' # 0x0011 -> DEVICE CONTROL ONE
- u'\x12' # 0x0012 -> DEVICE CONTROL TWO
- u'\x13' # 0x0013 -> DEVICE CONTROL THREE
- u'\x14' # 0x0014 -> DEVICE CONTROL FOUR
- u'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
- u'\x16' # 0x0016 -> SYNCHRONOUS IDLE
- u'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
- u'\x18' # 0x0018 -> CANCEL
- u'\x19' # 0x0019 -> END OF MEDIUM
- u'\x1a' # 0x001a -> SUBSTITUTE
- u'\x1b' # 0x001b -> ESCAPE
- u'\x1c' # 0x001c -> FILE SEPARATOR
- u'\x1d' # 0x001d -> GROUP SEPARATOR
- u'\x1e' # 0x001e -> RECORD SEPARATOR
- u'\x1f' # 0x001f -> UNIT SEPARATOR
- u' ' # 0x0020 -> SPACE
- u'!' # 0x0021 -> EXCLAMATION MARK
- u'"' # 0x0022 -> QUOTATION MARK
- u'#' # 0x0023 -> NUMBER SIGN
- u'$' # 0x0024 -> DOLLAR SIGN
- u'%' # 0x0025 -> PERCENT SIGN
- u'&' # 0x0026 -> AMPERSAND
- u"'" # 0x0027 -> APOSTROPHE
- u'(' # 0x0028 -> LEFT PARENTHESIS
- u')' # 0x0029 -> RIGHT PARENTHESIS
- u'*' # 0x002a -> ASTERISK
- u'+' # 0x002b -> PLUS SIGN
- u',' # 0x002c -> COMMA
- u'-' # 0x002d -> HYPHEN-MINUS
- u'.' # 0x002e -> FULL STOP
- u'/' # 0x002f -> SOLIDUS
- u'0' # 0x0030 -> DIGIT ZERO
- u'1' # 0x0031 -> DIGIT ONE
- u'2' # 0x0032 -> DIGIT TWO
- u'3' # 0x0033 -> DIGIT THREE
- u'4' # 0x0034 -> DIGIT FOUR
- u'5' # 0x0035 -> DIGIT FIVE
- u'6' # 0x0036 -> DIGIT SIX
- u'7' # 0x0037 -> DIGIT SEVEN
- u'8' # 0x0038 -> DIGIT EIGHT
- u'9' # 0x0039 -> DIGIT NINE
- u':' # 0x003a -> COLON
- u';' # 0x003b -> SEMICOLON
- u'<' # 0x003c -> LESS-THAN SIGN
- u'=' # 0x003d -> EQUALS SIGN
- u'>' # 0x003e -> GREATER-THAN SIGN
- u'?' # 0x003f -> QUESTION MARK
- u'@' # 0x0040 -> COMMERCIAL AT
- u'A' # 0x0041 -> LATIN CAPITAL LETTER A
- u'B' # 0x0042 -> LATIN CAPITAL LETTER B
- u'C' # 0x0043 -> LATIN CAPITAL LETTER C
- u'D' # 0x0044 -> LATIN CAPITAL LETTER D
- u'E' # 0x0045 -> LATIN CAPITAL LETTER E
- u'F' # 0x0046 -> LATIN CAPITAL LETTER F
- u'G' # 0x0047 -> LATIN CAPITAL LETTER G
- u'H' # 0x0048 -> LATIN CAPITAL LETTER H
- u'I' # 0x0049 -> LATIN CAPITAL LETTER I
- u'J' # 0x004a -> LATIN CAPITAL LETTER J
- u'K' # 0x004b -> LATIN CAPITAL LETTER K
- u'L' # 0x004c -> LATIN CAPITAL LETTER L
- u'M' # 0x004d -> LATIN CAPITAL LETTER M
- u'N' # 0x004e -> LATIN CAPITAL LETTER N
- u'O' # 0x004f -> LATIN CAPITAL LETTER O
- u'P' # 0x0050 -> LATIN CAPITAL LETTER P
- u'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
- u'R' # 0x0052 -> LATIN CAPITAL LETTER R
- u'S' # 0x0053 -> LATIN CAPITAL LETTER S
- u'T' # 0x0054 -> LATIN CAPITAL LETTER T
- u'U' # 0x0055 -> LATIN CAPITAL LETTER U
- u'V' # 0x0056 -> LATIN CAPITAL LETTER V
- u'W' # 0x0057 -> LATIN CAPITAL LETTER W
- u'X' # 0x0058 -> LATIN CAPITAL LETTER X
- u'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
- u'Z' # 0x005a -> LATIN CAPITAL LETTER Z
- u'[' # 0x005b -> LEFT SQUARE BRACKET
- u'\\' # 0x005c -> REVERSE SOLIDUS
- u']' # 0x005d -> RIGHT SQUARE BRACKET
- u'^' # 0x005e -> CIRCUMFLEX ACCENT
- u'_' # 0x005f -> LOW LINE
- u'`' # 0x0060 -> GRAVE ACCENT
- u'a' # 0x0061 -> LATIN SMALL LETTER A
- u'b' # 0x0062 -> LATIN SMALL LETTER B
- u'c' # 0x0063 -> LATIN SMALL LETTER C
- u'd' # 0x0064 -> LATIN SMALL LETTER D
- u'e' # 0x0065 -> LATIN SMALL LETTER E
- u'f' # 0x0066 -> LATIN SMALL LETTER F
- u'g' # 0x0067 -> LATIN SMALL LETTER G
- u'h' # 0x0068 -> LATIN SMALL LETTER H
- u'i' # 0x0069 -> LATIN SMALL LETTER I
- u'j' # 0x006a -> LATIN SMALL LETTER J
- u'k' # 0x006b -> LATIN SMALL LETTER K
- u'l' # 0x006c -> LATIN SMALL LETTER L
- u'm' # 0x006d -> LATIN SMALL LETTER M
- u'n' # 0x006e -> LATIN SMALL LETTER N
- u'o' # 0x006f -> LATIN SMALL LETTER O
- u'p' # 0x0070 -> LATIN SMALL LETTER P
- u'q' # 0x0071 -> LATIN SMALL LETTER Q
- u'r' # 0x0072 -> LATIN SMALL LETTER R
- u's' # 0x0073 -> LATIN SMALL LETTER S
- u't' # 0x0074 -> LATIN SMALL LETTER T
- u'u' # 0x0075 -> LATIN SMALL LETTER U
- u'v' # 0x0076 -> LATIN SMALL LETTER V
- u'w' # 0x0077 -> LATIN SMALL LETTER W
- u'x' # 0x0078 -> LATIN SMALL LETTER X
- u'y' # 0x0079 -> LATIN SMALL LETTER Y
- u'z' # 0x007a -> LATIN SMALL LETTER Z
- u'{' # 0x007b -> LEFT CURLY BRACKET
- u'|' # 0x007c -> VERTICAL LINE
- u'}' # 0x007d -> RIGHT CURLY BRACKET
- u'~' # 0x007e -> TILDE
- u'\x7f' # 0x007f -> DELETE
- u'\ufffe' # 0x0080 -> UNDEFINED
- u'\ufffe' # 0x0081 -> UNDEFINED
- u'\ufffe' # 0x0082 -> UNDEFINED
- u'\ufffe' # 0x0083 -> UNDEFINED
- u'\ufffe' # 0x0084 -> UNDEFINED
- u'\ufffe' # 0x0085 -> UNDEFINED
- u'\u0386' # 0x0086 -> GREEK CAPITAL LETTER ALPHA WITH TONOS
- u'\ufffe' # 0x0087 -> UNDEFINED
- u'\xb7' # 0x0088 -> MIDDLE DOT
- u'\xac' # 0x0089 -> NOT SIGN
- u'\xa6' # 0x008a -> BROKEN BAR
- u'\u2018' # 0x008b -> LEFT SINGLE QUOTATION MARK
- u'\u2019' # 0x008c -> RIGHT SINGLE QUOTATION MARK
- u'\u0388' # 0x008d -> GREEK CAPITAL LETTER EPSILON WITH TONOS
- u'\u2015' # 0x008e -> HORIZONTAL BAR
- u'\u0389' # 0x008f -> GREEK CAPITAL LETTER ETA WITH TONOS
- u'\u038a' # 0x0090 -> GREEK CAPITAL LETTER IOTA WITH TONOS
- u'\u03aa' # 0x0091 -> GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
- u'\u038c' # 0x0092 -> GREEK CAPITAL LETTER OMICRON WITH TONOS
- u'\ufffe' # 0x0093 -> UNDEFINED
- u'\ufffe' # 0x0094 -> UNDEFINED
- u'\u038e' # 0x0095 -> GREEK CAPITAL LETTER UPSILON WITH TONOS
- u'\u03ab' # 0x0096 -> GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
- u'\xa9' # 0x0097 -> COPYRIGHT SIGN
- u'\u038f' # 0x0098 -> GREEK CAPITAL LETTER OMEGA WITH TONOS
- u'\xb2' # 0x0099 -> SUPERSCRIPT TWO
- u'\xb3' # 0x009a -> SUPERSCRIPT THREE
- u'\u03ac' # 0x009b -> GREEK SMALL LETTER ALPHA WITH TONOS
- u'\xa3' # 0x009c -> POUND SIGN
- u'\u03ad' # 0x009d -> GREEK SMALL LETTER EPSILON WITH TONOS
- u'\u03ae' # 0x009e -> GREEK SMALL LETTER ETA WITH TONOS
- u'\u03af' # 0x009f -> GREEK SMALL LETTER IOTA WITH TONOS
- u'\u03ca' # 0x00a0 -> GREEK SMALL LETTER IOTA WITH DIALYTIKA
- u'\u0390' # 0x00a1 -> GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS
- u'\u03cc' # 0x00a2 -> GREEK SMALL LETTER OMICRON WITH TONOS
- u'\u03cd' # 0x00a3 -> GREEK SMALL LETTER UPSILON WITH TONOS
- u'\u0391' # 0x00a4 -> GREEK CAPITAL LETTER ALPHA
- u'\u0392' # 0x00a5 -> GREEK CAPITAL LETTER BETA
- u'\u0393' # 0x00a6 -> GREEK CAPITAL LETTER GAMMA
- u'\u0394' # 0x00a7 -> GREEK CAPITAL LETTER DELTA
- u'\u0395' # 0x00a8 -> GREEK CAPITAL LETTER EPSILON
- u'\u0396' # 0x00a9 -> GREEK CAPITAL LETTER ZETA
- u'\u0397' # 0x00aa -> GREEK CAPITAL LETTER ETA
- u'\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF
- u'\u0398' # 0x00ac -> GREEK CAPITAL LETTER THETA
- u'\u0399' # 0x00ad -> GREEK CAPITAL LETTER IOTA
- u'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
- u'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
- u'\u2591' # 0x00b0 -> LIGHT SHADE
- u'\u2592' # 0x00b1 -> MEDIUM SHADE
- u'\u2593' # 0x00b2 -> DARK SHADE
- u'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
- u'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
- u'\u039a' # 0x00b5 -> GREEK CAPITAL LETTER KAPPA
- u'\u039b' # 0x00b6 -> GREEK CAPITAL LETTER LAMDA
- u'\u039c' # 0x00b7 -> GREEK CAPITAL LETTER MU
- u'\u039d' # 0x00b8 -> GREEK CAPITAL LETTER NU
- u'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
- u'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
- u'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
- u'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
- u'\u039e' # 0x00bd -> GREEK CAPITAL LETTER XI
- u'\u039f' # 0x00be -> GREEK CAPITAL LETTER OMICRON
- u'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
- u'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
- u'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
- u'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
- u'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
- u'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
- u'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
- u'\u03a0' # 0x00c6 -> GREEK CAPITAL LETTER PI
- u'\u03a1' # 0x00c7 -> GREEK CAPITAL LETTER RHO
- u'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
- u'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
- u'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
- u'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
- u'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
- u'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
- u'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
- u'\u03a3' # 0x00cf -> GREEK CAPITAL LETTER SIGMA
- u'\u03a4' # 0x00d0 -> GREEK CAPITAL LETTER TAU
- u'\u03a5' # 0x00d1 -> GREEK CAPITAL LETTER UPSILON
- u'\u03a6' # 0x00d2 -> GREEK CAPITAL LETTER PHI
- u'\u03a7' # 0x00d3 -> GREEK CAPITAL LETTER CHI
- u'\u03a8' # 0x00d4 -> GREEK CAPITAL LETTER PSI
- u'\u03a9' # 0x00d5 -> GREEK CAPITAL LETTER OMEGA
- u'\u03b1' # 0x00d6 -> GREEK SMALL LETTER ALPHA
- u'\u03b2' # 0x00d7 -> GREEK SMALL LETTER BETA
- u'\u03b3' # 0x00d8 -> GREEK SMALL LETTER GAMMA
- u'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
- u'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
- u'\u2588' # 0x00db -> FULL BLOCK
- u'\u2584' # 0x00dc -> LOWER HALF BLOCK
- u'\u03b4' # 0x00dd -> GREEK SMALL LETTER DELTA
- u'\u03b5' # 0x00de -> GREEK SMALL LETTER EPSILON
- u'\u2580' # 0x00df -> UPPER HALF BLOCK
- u'\u03b6' # 0x00e0 -> GREEK SMALL LETTER ZETA
- u'\u03b7' # 0x00e1 -> GREEK SMALL LETTER ETA
- u'\u03b8' # 0x00e2 -> GREEK SMALL LETTER THETA
- u'\u03b9' # 0x00e3 -> GREEK SMALL LETTER IOTA
- u'\u03ba' # 0x00e4 -> GREEK SMALL LETTER KAPPA
- u'\u03bb' # 0x00e5 -> GREEK SMALL LETTER LAMDA
- u'\u03bc' # 0x00e6 -> GREEK SMALL LETTER MU
- u'\u03bd' # 0x00e7 -> GREEK SMALL LETTER NU
- u'\u03be' # 0x00e8 -> GREEK SMALL LETTER XI
- u'\u03bf' # 0x00e9 -> GREEK SMALL LETTER OMICRON
- u'\u03c0' # 0x00ea -> GREEK SMALL LETTER PI
- u'\u03c1' # 0x00eb -> GREEK SMALL LETTER RHO
- u'\u03c3' # 0x00ec -> GREEK SMALL LETTER SIGMA
- u'\u03c2' # 0x00ed -> GREEK SMALL LETTER FINAL SIGMA
- u'\u03c4' # 0x00ee -> GREEK SMALL LETTER TAU
- u'\u0384' # 0x00ef -> GREEK TONOS
- u'\xad' # 0x00f0 -> SOFT HYPHEN
- u'\xb1' # 0x00f1 -> PLUS-MINUS SIGN
- u'\u03c5' # 0x00f2 -> GREEK SMALL LETTER UPSILON
- u'\u03c6' # 0x00f3 -> GREEK SMALL LETTER PHI
- u'\u03c7' # 0x00f4 -> GREEK SMALL LETTER CHI
- u'\xa7' # 0x00f5 -> SECTION SIGN
- u'\u03c8' # 0x00f6 -> GREEK SMALL LETTER PSI
- u'\u0385' # 0x00f7 -> GREEK DIALYTIKA TONOS
- u'\xb0' # 0x00f8 -> DEGREE SIGN
- u'\xa8' # 0x00f9 -> DIAERESIS
- u'\u03c9' # 0x00fa -> GREEK SMALL LETTER OMEGA
- u'\u03cb' # 0x00fb -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA
- u'\u03b0' # 0x00fc -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS
- u'\u03ce' # 0x00fd -> GREEK SMALL LETTER OMEGA WITH TONOS
- u'\u25a0' # 0x00fe -> BLACK SQUARE
- u'\xa0' # 0x00ff -> NO-BREAK SPACE
-)
-
-### Encoding Map
-
-encoding_map = {
- 0x0000: 0x0000, # NULL
- 0x0001: 0x0001, # START OF HEADING
- 0x0002: 0x0002, # START OF TEXT
- 0x0003: 0x0003, # END OF TEXT
- 0x0004: 0x0004, # END OF TRANSMISSION
- 0x0005: 0x0005, # ENQUIRY
- 0x0006: 0x0006, # ACKNOWLEDGE
- 0x0007: 0x0007, # BELL
- 0x0008: 0x0008, # BACKSPACE
- 0x0009: 0x0009, # HORIZONTAL TABULATION
- 0x000a: 0x000a, # LINE FEED
- 0x000b: 0x000b, # VERTICAL TABULATION
- 0x000c: 0x000c, # FORM FEED
- 0x000d: 0x000d, # CARRIAGE RETURN
- 0x000e: 0x000e, # SHIFT OUT
- 0x000f: 0x000f, # SHIFT IN
- 0x0010: 0x0010, # DATA LINK ESCAPE
- 0x0011: 0x0011, # DEVICE CONTROL ONE
- 0x0012: 0x0012, # DEVICE CONTROL TWO
- 0x0013: 0x0013, # DEVICE CONTROL THREE
- 0x0014: 0x0014, # DEVICE CONTROL FOUR
- 0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
- 0x0016: 0x0016, # SYNCHRONOUS IDLE
- 0x0017: 0x0017, # END OF TRANSMISSION BLOCK
- 0x0018: 0x0018, # CANCEL
- 0x0019: 0x0019, # END OF MEDIUM
- 0x001a: 0x001a, # SUBSTITUTE
- 0x001b: 0x001b, # ESCAPE
- 0x001c: 0x001c, # FILE SEPARATOR
- 0x001d: 0x001d, # GROUP SEPARATOR
- 0x001e: 0x001e, # RECORD SEPARATOR
- 0x001f: 0x001f, # UNIT SEPARATOR
- 0x0020: 0x0020, # SPACE
- 0x0021: 0x0021, # EXCLAMATION MARK
- 0x0022: 0x0022, # QUOTATION MARK
- 0x0023: 0x0023, # NUMBER SIGN
- 0x0024: 0x0024, # DOLLAR SIGN
- 0x0025: 0x0025, # PERCENT SIGN
- 0x0026: 0x0026, # AMPERSAND
- 0x0027: 0x0027, # APOSTROPHE
- 0x0028: 0x0028, # LEFT PARENTHESIS
- 0x0029: 0x0029, # RIGHT PARENTHESIS
- 0x002a: 0x002a, # ASTERISK
- 0x002b: 0x002b, # PLUS SIGN
- 0x002c: 0x002c, # COMMA
- 0x002d: 0x002d, # HYPHEN-MINUS
- 0x002e: 0x002e, # FULL STOP
- 0x002f: 0x002f, # SOLIDUS
- 0x0030: 0x0030, # DIGIT ZERO
- 0x0031: 0x0031, # DIGIT ONE
- 0x0032: 0x0032, # DIGIT TWO
- 0x0033: 0x0033, # DIGIT THREE
- 0x0034: 0x0034, # DIGIT FOUR
- 0x0035: 0x0035, # DIGIT FIVE
- 0x0036: 0x0036, # DIGIT SIX
- 0x0037: 0x0037, # DIGIT SEVEN
- 0x0038: 0x0038, # DIGIT EIGHT
- 0x0039: 0x0039, # DIGIT NINE
- 0x003a: 0x003a, # COLON
- 0x003b: 0x003b, # SEMICOLON
- 0x003c: 0x003c, # LESS-THAN SIGN
- 0x003d: 0x003d, # EQUALS SIGN
- 0x003e: 0x003e, # GREATER-THAN SIGN
- 0x003f: 0x003f, # QUESTION MARK
- 0x0040: 0x0040, # COMMERCIAL AT
- 0x0041: 0x0041, # LATIN CAPITAL LETTER A
- 0x0042: 0x0042, # LATIN CAPITAL LETTER B
- 0x0043: 0x0043, # LATIN CAPITAL LETTER C
- 0x0044: 0x0044, # LATIN CAPITAL LETTER D
- 0x0045: 0x0045, # LATIN CAPITAL LETTER E
- 0x0046: 0x0046, # LATIN CAPITAL LETTER F
- 0x0047: 0x0047, # LATIN CAPITAL LETTER G
- 0x0048: 0x0048, # LATIN CAPITAL LETTER H
- 0x0049: 0x0049, # LATIN CAPITAL LETTER I
- 0x004a: 0x004a, # LATIN CAPITAL LETTER J
- 0x004b: 0x004b, # LATIN CAPITAL LETTER K
- 0x004c: 0x004c, # LATIN CAPITAL LETTER L
- 0x004d: 0x004d, # LATIN CAPITAL LETTER M
- 0x004e: 0x004e, # LATIN CAPITAL LETTER N
- 0x004f: 0x004f, # LATIN CAPITAL LETTER O
- 0x0050: 0x0050, # LATIN CAPITAL LETTER P
- 0x0051: 0x0051, # LATIN CAPITAL LETTER Q
- 0x0052: 0x0052, # LATIN CAPITAL LETTER R
- 0x0053: 0x0053, # LATIN CAPITAL LETTER S
- 0x0054: 0x0054, # LATIN CAPITAL LETTER T
- 0x0055: 0x0055, # LATIN CAPITAL LETTER U
- 0x0056: 0x0056, # LATIN CAPITAL LETTER V
- 0x0057: 0x0057, # LATIN CAPITAL LETTER W
- 0x0058: 0x0058, # LATIN CAPITAL LETTER X
- 0x0059: 0x0059, # LATIN CAPITAL LETTER Y
- 0x005a: 0x005a, # LATIN CAPITAL LETTER Z
- 0x005b: 0x005b, # LEFT SQUARE BRACKET
- 0x005c: 0x005c, # REVERSE SOLIDUS
- 0x005d: 0x005d, # RIGHT SQUARE BRACKET
- 0x005e: 0x005e, # CIRCUMFLEX ACCENT
- 0x005f: 0x005f, # LOW LINE
- 0x0060: 0x0060, # GRAVE ACCENT
- 0x0061: 0x0061, # LATIN SMALL LETTER A
- 0x0062: 0x0062, # LATIN SMALL LETTER B
- 0x0063: 0x0063, # LATIN SMALL LETTER C
- 0x0064: 0x0064, # LATIN SMALL LETTER D
- 0x0065: 0x0065, # LATIN SMALL LETTER E
- 0x0066: 0x0066, # LATIN SMALL LETTER F
- 0x0067: 0x0067, # LATIN SMALL LETTER G
- 0x0068: 0x0068, # LATIN SMALL LETTER H
- 0x0069: 0x0069, # LATIN SMALL LETTER I
- 0x006a: 0x006a, # LATIN SMALL LETTER J
- 0x006b: 0x006b, # LATIN SMALL LETTER K
- 0x006c: 0x006c, # LATIN SMALL LETTER L
- 0x006d: 0x006d, # LATIN SMALL LETTER M
- 0x006e: 0x006e, # LATIN SMALL LETTER N
- 0x006f: 0x006f, # LATIN SMALL LETTER O
- 0x0070: 0x0070, # LATIN SMALL LETTER P
- 0x0071: 0x0071, # LATIN SMALL LETTER Q
- 0x0072: 0x0072, # LATIN SMALL LETTER R
- 0x0073: 0x0073, # LATIN SMALL LETTER S
- 0x0074: 0x0074, # LATIN SMALL LETTER T
- 0x0075: 0x0075, # LATIN SMALL LETTER U
- 0x0076: 0x0076, # LATIN SMALL LETTER V
- 0x0077: 0x0077, # LATIN SMALL LETTER W
- 0x0078: 0x0078, # LATIN SMALL LETTER X
- 0x0079: 0x0079, # LATIN SMALL LETTER Y
- 0x007a: 0x007a, # LATIN SMALL LETTER Z
- 0x007b: 0x007b, # LEFT CURLY BRACKET
- 0x007c: 0x007c, # VERTICAL LINE
- 0x007d: 0x007d, # RIGHT CURLY BRACKET
- 0x007e: 0x007e, # TILDE
- 0x007f: 0x007f, # DELETE
- 0x00a0: 0x00ff, # NO-BREAK SPACE
- 0x00a3: 0x009c, # POUND SIGN
- 0x00a6: 0x008a, # BROKEN BAR
- 0x00a7: 0x00f5, # SECTION SIGN
- 0x00a8: 0x00f9, # DIAERESIS
- 0x00a9: 0x0097, # COPYRIGHT SIGN
- 0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
- 0x00ac: 0x0089, # NOT SIGN
- 0x00ad: 0x00f0, # SOFT HYPHEN
- 0x00b0: 0x00f8, # DEGREE SIGN
- 0x00b1: 0x00f1, # PLUS-MINUS SIGN
- 0x00b2: 0x0099, # SUPERSCRIPT TWO
- 0x00b3: 0x009a, # SUPERSCRIPT THREE
- 0x00b7: 0x0088, # MIDDLE DOT
- 0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
- 0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF
- 0x0384: 0x00ef, # GREEK TONOS
- 0x0385: 0x00f7, # GREEK DIALYTIKA TONOS
- 0x0386: 0x0086, # GREEK CAPITAL LETTER ALPHA WITH TONOS
- 0x0388: 0x008d, # GREEK CAPITAL LETTER EPSILON WITH TONOS
- 0x0389: 0x008f, # GREEK CAPITAL LETTER ETA WITH TONOS
- 0x038a: 0x0090, # GREEK CAPITAL LETTER IOTA WITH TONOS
- 0x038c: 0x0092, # GREEK CAPITAL LETTER OMICRON WITH TONOS
- 0x038e: 0x0095, # GREEK CAPITAL LETTER UPSILON WITH TONOS
- 0x038f: 0x0098, # GREEK CAPITAL LETTER OMEGA WITH TONOS
- 0x0390: 0x00a1, # GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS
- 0x0391: 0x00a4, # GREEK CAPITAL LETTER ALPHA
- 0x0392: 0x00a5, # GREEK CAPITAL LETTER BETA
- 0x0393: 0x00a6, # GREEK CAPITAL LETTER GAMMA
- 0x0394: 0x00a7, # GREEK CAPITAL LETTER DELTA
- 0x0395: 0x00a8, # GREEK CAPITAL LETTER EPSILON
- 0x0396: 0x00a9, # GREEK CAPITAL LETTER ZETA
- 0x0397: 0x00aa, # GREEK CAPITAL LETTER ETA
- 0x0398: 0x00ac, # GREEK CAPITAL LETTER THETA
- 0x0399: 0x00ad, # GREEK CAPITAL LETTER IOTA
- 0x039a: 0x00b5, # GREEK CAPITAL LETTER KAPPA
- 0x039b: 0x00b6, # GREEK CAPITAL LETTER LAMDA
- 0x039c: 0x00b7, # GREEK CAPITAL LETTER MU
- 0x039d: 0x00b8, # GREEK CAPITAL LETTER NU
- 0x039e: 0x00bd, # GREEK CAPITAL LETTER XI
- 0x039f: 0x00be, # GREEK CAPITAL LETTER OMICRON
- 0x03a0: 0x00c6, # GREEK CAPITAL LETTER PI
- 0x03a1: 0x00c7, # GREEK CAPITAL LETTER RHO
- 0x03a3: 0x00cf, # GREEK CAPITAL LETTER SIGMA
- 0x03a4: 0x00d0, # GREEK CAPITAL LETTER TAU
- 0x03a5: 0x00d1, # GREEK CAPITAL LETTER UPSILON
- 0x03a6: 0x00d2, # GREEK CAPITAL LETTER PHI
- 0x03a7: 0x00d3, # GREEK CAPITAL LETTER CHI
- 0x03a8: 0x00d4, # GREEK CAPITAL LETTER PSI
- 0x03a9: 0x00d5, # GREEK CAPITAL LETTER OMEGA
- 0x03aa: 0x0091, # GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
- 0x03ab: 0x0096, # GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
- 0x03ac: 0x009b, # GREEK SMALL LETTER ALPHA WITH TONOS
- 0x03ad: 0x009d, # GREEK SMALL LETTER EPSILON WITH TONOS
- 0x03ae: 0x009e, # GREEK SMALL LETTER ETA WITH TONOS
- 0x03af: 0x009f, # GREEK SMALL LETTER IOTA WITH TONOS
- 0x03b0: 0x00fc, # GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS
- 0x03b1: 0x00d6, # GREEK SMALL LETTER ALPHA
- 0x03b2: 0x00d7, # GREEK SMALL LETTER BETA
- 0x03b3: 0x00d8, # GREEK SMALL LETTER GAMMA
- 0x03b4: 0x00dd, # GREEK SMALL LETTER DELTA
- 0x03b5: 0x00de, # GREEK SMALL LETTER EPSILON
- 0x03b6: 0x00e0, # GREEK SMALL LETTER ZETA
- 0x03b7: 0x00e1, # GREEK SMALL LETTER ETA
- 0x03b8: 0x00e2, # GREEK SMALL LETTER THETA
- 0x03b9: 0x00e3, # GREEK SMALL LETTER IOTA
- 0x03ba: 0x00e4, # GREEK SMALL LETTER KAPPA
- 0x03bb: 0x00e5, # GREEK SMALL LETTER LAMDA
- 0x03bc: 0x00e6, # GREEK SMALL LETTER MU
- 0x03bd: 0x00e7, # GREEK SMALL LETTER NU
- 0x03be: 0x00e8, # GREEK SMALL LETTER XI
- 0x03bf: 0x00e9, # GREEK SMALL LETTER OMICRON
- 0x03c0: 0x00ea, # GREEK SMALL LETTER PI
- 0x03c1: 0x00eb, # GREEK SMALL LETTER RHO
- 0x03c2: 0x00ed, # GREEK SMALL LETTER FINAL SIGMA
- 0x03c3: 0x00ec, # GREEK SMALL LETTER SIGMA
- 0x03c4: 0x00ee, # GREEK SMALL LETTER TAU
- 0x03c5: 0x00f2, # GREEK SMALL LETTER UPSILON
- 0x03c6: 0x00f3, # GREEK SMALL LETTER PHI
- 0x03c7: 0x00f4, # GREEK SMALL LETTER CHI
- 0x03c8: 0x00f6, # GREEK SMALL LETTER PSI
- 0x03c9: 0x00fa, # GREEK SMALL LETTER OMEGA
- 0x03ca: 0x00a0, # GREEK SMALL LETTER IOTA WITH DIALYTIKA
- 0x03cb: 0x00fb, # GREEK SMALL LETTER UPSILON WITH DIALYTIKA
- 0x03cc: 0x00a2, # GREEK SMALL LETTER OMICRON WITH TONOS
- 0x03cd: 0x00a3, # GREEK SMALL LETTER UPSILON WITH TONOS
- 0x03ce: 0x00fd, # GREEK SMALL LETTER OMEGA WITH TONOS
- 0x2015: 0x008e, # HORIZONTAL BAR
- 0x2018: 0x008b, # LEFT SINGLE QUOTATION MARK
- 0x2019: 0x008c, # RIGHT SINGLE QUOTATION MARK
- 0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
- 0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
- 0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
- 0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
- 0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
- 0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
- 0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
- 0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
- 0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
- 0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
- 0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
- 0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
- 0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
- 0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
- 0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
- 0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
- 0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
- 0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
- 0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
- 0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
- 0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
- 0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
- 0x2580: 0x00df, # UPPER HALF BLOCK
- 0x2584: 0x00dc, # LOWER HALF BLOCK
- 0x2588: 0x00db, # FULL BLOCK
- 0x2591: 0x00b0, # LIGHT SHADE
- 0x2592: 0x00b1, # MEDIUM SHADE
- 0x2593: 0x00b2, # DARK SHADE
- 0x25a0: 0x00fe, # BLACK SQUARE
-}
diff --git a/sys/lib/python/encodings/cp874.py b/sys/lib/python/encodings/cp874.py
deleted file mode 100644
index 6110f46e5..000000000
--- a/sys/lib/python/encodings/cp874.py
+++ /dev/null
@@ -1,307 +0,0 @@
-""" Python Character Mapping Codec cp874 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP874.TXT' with gencodec.py.
-
-"""#"
-
-import codecs
-
-### Codec APIs
-
-class Codec(codecs.Codec):
-
- def encode(self,input,errors='strict'):
- return codecs.charmap_encode(input,errors,encoding_table)
-
- def decode(self,input,errors='strict'):
- return codecs.charmap_decode(input,errors,decoding_table)
-
-class IncrementalEncoder(codecs.IncrementalEncoder):
- def encode(self, input, final=False):
- return codecs.charmap_encode(input,self.errors,encoding_table)[0]
-
-class IncrementalDecoder(codecs.IncrementalDecoder):
- def decode(self, input, final=False):
- return codecs.charmap_decode(input,self.errors,decoding_table)[0]
-
-class StreamWriter(Codec,codecs.StreamWriter):
- pass
-
-class StreamReader(Codec,codecs.StreamReader):
- pass
-
-### encodings module API
-
-def getregentry():
- return codecs.CodecInfo(
- name='cp874',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
-
-
-### Decoding Table
-
-decoding_table = (
- u'\x00' # 0x00 -> NULL
- u'\x01' # 0x01 -> START OF HEADING
- u'\x02' # 0x02 -> START OF TEXT
- u'\x03' # 0x03 -> END OF TEXT
- u'\x04' # 0x04 -> END OF TRANSMISSION
- u'\x05' # 0x05 -> ENQUIRY
- u'\x06' # 0x06 -> ACKNOWLEDGE
- u'\x07' # 0x07 -> BELL
- u'\x08' # 0x08 -> BACKSPACE
- u'\t' # 0x09 -> HORIZONTAL TABULATION
- u'\n' # 0x0A -> LINE FEED
- u'\x0b' # 0x0B -> VERTICAL TABULATION
- u'\x0c' # 0x0C -> FORM FEED
- u'\r' # 0x0D -> CARRIAGE RETURN
- u'\x0e' # 0x0E -> SHIFT OUT
- u'\x0f' # 0x0F -> SHIFT IN
- u'\x10' # 0x10 -> DATA LINK ESCAPE
- u'\x11' # 0x11 -> DEVICE CONTROL ONE
- u'\x12' # 0x12 -> DEVICE CONTROL TWO
- u'\x13' # 0x13 -> DEVICE CONTROL THREE
- u'\x14' # 0x14 -> DEVICE CONTROL FOUR
- u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
- u'\x16' # 0x16 -> SYNCHRONOUS IDLE
- u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
- u'\x18' # 0x18 -> CANCEL
- u'\x19' # 0x19 -> END OF MEDIUM
- u'\x1a' # 0x1A -> SUBSTITUTE
- u'\x1b' # 0x1B -> ESCAPE
- u'\x1c' # 0x1C -> FILE SEPARATOR
- u'\x1d' # 0x1D -> GROUP SEPARATOR
- u'\x1e' # 0x1E -> RECORD SEPARATOR
- u'\x1f' # 0x1F -> UNIT SEPARATOR
- u' ' # 0x20 -> SPACE
- u'!' # 0x21 -> EXCLAMATION MARK
- u'"' # 0x22 -> QUOTATION MARK
- u'#' # 0x23 -> NUMBER SIGN
- u'$' # 0x24 -> DOLLAR SIGN
- u'%' # 0x25 -> PERCENT SIGN
- u'&' # 0x26 -> AMPERSAND
- u"'" # 0x27 -> APOSTROPHE
- u'(' # 0x28 -> LEFT PARENTHESIS
- u')' # 0x29 -> RIGHT PARENTHESIS
- u'*' # 0x2A -> ASTERISK
- u'+' # 0x2B -> PLUS SIGN
- u',' # 0x2C -> COMMA
- u'-' # 0x2D -> HYPHEN-MINUS
- u'.' # 0x2E -> FULL STOP
- u'/' # 0x2F -> SOLIDUS
- u'0' # 0x30 -> DIGIT ZERO
- u'1' # 0x31 -> DIGIT ONE
- u'2' # 0x32 -> DIGIT TWO
- u'3' # 0x33 -> DIGIT THREE
- u'4' # 0x34 -> DIGIT FOUR
- u'5' # 0x35 -> DIGIT FIVE
- u'6' # 0x36 -> DIGIT SIX
- u'7' # 0x37 -> DIGIT SEVEN
- u'8' # 0x38 -> DIGIT EIGHT
- u'9' # 0x39 -> DIGIT NINE
- u':' # 0x3A -> COLON
- u';' # 0x3B -> SEMICOLON
- u'<' # 0x3C -> LESS-THAN SIGN
- u'=' # 0x3D -> EQUALS SIGN
- u'>' # 0x3E -> GREATER-THAN SIGN
- u'?' # 0x3F -> QUESTION MARK
- u'@' # 0x40 -> COMMERCIAL AT
- u'A' # 0x41 -> LATIN CAPITAL LETTER A
- u'B' # 0x42 -> LATIN CAPITAL LETTER B
- u'C' # 0x43 -> LATIN CAPITAL LETTER C
- u'D' # 0x44 -> LATIN CAPITAL LETTER D
- u'E' # 0x45 -> LATIN CAPITAL LETTER E
- u'F' # 0x46 -> LATIN CAPITAL LETTER F
- u'G' # 0x47 -> LATIN CAPITAL LETTER G
- u'H' # 0x48 -> LATIN CAPITAL LETTER H
- u'I' # 0x49 -> LATIN CAPITAL LETTER I
- u'J' # 0x4A -> LATIN CAPITAL LETTER J
- u'K' # 0x4B -> LATIN CAPITAL LETTER K
- u'L' # 0x4C -> LATIN CAPITAL LETTER L
- u'M' # 0x4D -> LATIN CAPITAL LETTER M
- u'N' # 0x4E -> LATIN CAPITAL LETTER N
- u'O' # 0x4F -> LATIN CAPITAL LETTER O
- u'P' # 0x50 -> LATIN CAPITAL LETTER P
- u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
- u'R' # 0x52 -> LATIN CAPITAL LETTER R
- u'S' # 0x53 -> LATIN CAPITAL LETTER S
- u'T' # 0x54 -> LATIN CAPITAL LETTER T
- u'U' # 0x55 -> LATIN CAPITAL LETTER U
- u'V' # 0x56 -> LATIN CAPITAL LETTER V
- u'W' # 0x57 -> LATIN CAPITAL LETTER W
- u'X' # 0x58 -> LATIN CAPITAL LETTER X
- u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
- u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
- u'[' # 0x5B -> LEFT SQUARE BRACKET
- u'\\' # 0x5C -> REVERSE SOLIDUS
- u']' # 0x5D -> RIGHT SQUARE BRACKET
- u'^' # 0x5E -> CIRCUMFLEX ACCENT
- u'_' # 0x5F -> LOW LINE
- u'`' # 0x60 -> GRAVE ACCENT
- u'a' # 0x61 -> LATIN SMALL LETTER A
- u'b' # 0x62 -> LATIN SMALL LETTER B
- u'c' # 0x63 -> LATIN SMALL LETTER C
- u'd' # 0x64 -> LATIN SMALL LETTER D
- u'e' # 0x65 -> LATIN SMALL LETTER E
- u'f' # 0x66 -> LATIN SMALL LETTER F
- u'g' # 0x67 -> LATIN SMALL LETTER G
- u'h' # 0x68 -> LATIN SMALL LETTER H
- u'i' # 0x69 -> LATIN SMALL LETTER I
- u'j' # 0x6A -> LATIN SMALL LETTER J
- u'k' # 0x6B -> LATIN SMALL LETTER K
- u'l' # 0x6C -> LATIN SMALL LETTER L
- u'm' # 0x6D -> LATIN SMALL LETTER M
- u'n' # 0x6E -> LATIN SMALL LETTER N
- u'o' # 0x6F -> LATIN SMALL LETTER O
- u'p' # 0x70 -> LATIN SMALL LETTER P
- u'q' # 0x71 -> LATIN SMALL LETTER Q
- u'r' # 0x72 -> LATIN SMALL LETTER R
- u's' # 0x73 -> LATIN SMALL LETTER S
- u't' # 0x74 -> LATIN SMALL LETTER T
- u'u' # 0x75 -> LATIN SMALL LETTER U
- u'v' # 0x76 -> LATIN SMALL LETTER V
- u'w' # 0x77 -> LATIN SMALL LETTER W
- u'x' # 0x78 -> LATIN SMALL LETTER X
- u'y' # 0x79 -> LATIN SMALL LETTER Y
- u'z' # 0x7A -> LATIN SMALL LETTER Z
- u'{' # 0x7B -> LEFT CURLY BRACKET
- u'|' # 0x7C -> VERTICAL LINE
- u'}' # 0x7D -> RIGHT CURLY BRACKET
- u'~' # 0x7E -> TILDE
- u'\x7f' # 0x7F -> DELETE
- u'\u20ac' # 0x80 -> EURO SIGN
- u'\ufffe' # 0x81 -> UNDEFINED
- u'\ufffe' # 0x82 -> UNDEFINED
- u'\ufffe' # 0x83 -> UNDEFINED
- u'\ufffe' # 0x84 -> UNDEFINED
- u'\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
- u'\ufffe' # 0x86 -> UNDEFINED
- u'\ufffe' # 0x87 -> UNDEFINED
- u'\ufffe' # 0x88 -> UNDEFINED
- u'\ufffe' # 0x89 -> UNDEFINED
- u'\ufffe' # 0x8A -> UNDEFINED
- u'\ufffe' # 0x8B -> UNDEFINED
- u'\ufffe' # 0x8C -> UNDEFINED
- u'\ufffe' # 0x8D -> UNDEFINED
- u'\ufffe' # 0x8E -> UNDEFINED
- u'\ufffe' # 0x8F -> UNDEFINED
- u'\ufffe' # 0x90 -> UNDEFINED
- u'\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
- u'\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
- u'\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
- u'\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
- u'\u2022' # 0x95 -> BULLET
- u'\u2013' # 0x96 -> EN DASH
- u'\u2014' # 0x97 -> EM DASH
- u'\ufffe' # 0x98 -> UNDEFINED
- u'\ufffe' # 0x99 -> UNDEFINED
- u'\ufffe' # 0x9A -> UNDEFINED
- u'\ufffe' # 0x9B -> UNDEFINED
- u'\ufffe' # 0x9C -> UNDEFINED
- u'\ufffe' # 0x9D -> UNDEFINED
- u'\ufffe' # 0x9E -> UNDEFINED
- u'\ufffe' # 0x9F -> UNDEFINED
- u'\xa0' # 0xA0 -> NO-BREAK SPACE
- u'\u0e01' # 0xA1 -> THAI CHARACTER KO KAI
- u'\u0e02' # 0xA2 -> THAI CHARACTER KHO KHAI
- u'\u0e03' # 0xA3 -> THAI CHARACTER KHO KHUAT
- u'\u0e04' # 0xA4 -> THAI CHARACTER KHO KHWAI
- u'\u0e05' # 0xA5 -> THAI CHARACTER KHO KHON
- u'\u0e06' # 0xA6 -> THAI CHARACTER KHO RAKHANG
- u'\u0e07' # 0xA7 -> THAI CHARACTER NGO NGU
- u'\u0e08' # 0xA8 -> THAI CHARACTER CHO CHAN
- u'\u0e09' # 0xA9 -> THAI CHARACTER CHO CHING
- u'\u0e0a' # 0xAA -> THAI CHARACTER CHO CHANG
- u'\u0e0b' # 0xAB -> THAI CHARACTER SO SO
- u'\u0e0c' # 0xAC -> THAI CHARACTER CHO CHOE
- u'\u0e0d' # 0xAD -> THAI CHARACTER YO YING
- u'\u0e0e' # 0xAE -> THAI CHARACTER DO CHADA
- u'\u0e0f' # 0xAF -> THAI CHARACTER TO PATAK
- u'\u0e10' # 0xB0 -> THAI CHARACTER THO THAN
- u'\u0e11' # 0xB1 -> THAI CHARACTER THO NANGMONTHO
- u'\u0e12' # 0xB2 -> THAI CHARACTER THO PHUTHAO
- u'\u0e13' # 0xB3 -> THAI CHARACTER NO NEN
- u'\u0e14' # 0xB4 -> THAI CHARACTER DO DEK
- u'\u0e15' # 0xB5 -> THAI CHARACTER TO TAO
- u'\u0e16' # 0xB6 -> THAI CHARACTER THO THUNG
- u'\u0e17' # 0xB7 -> THAI CHARACTER THO THAHAN
- u'\u0e18' # 0xB8 -> THAI CHARACTER THO THONG
- u'\u0e19' # 0xB9 -> THAI CHARACTER NO NU
- u'\u0e1a' # 0xBA -> THAI CHARACTER BO BAIMAI
- u'\u0e1b' # 0xBB -> THAI CHARACTER PO PLA
- u'\u0e1c' # 0xBC -> THAI CHARACTER PHO PHUNG
- u'\u0e1d' # 0xBD -> THAI CHARACTER FO FA
- u'\u0e1e' # 0xBE -> THAI CHARACTER PHO PHAN
- u'\u0e1f' # 0xBF -> THAI CHARACTER FO FAN
- u'\u0e20' # 0xC0 -> THAI CHARACTER PHO SAMPHAO
- u'\u0e21' # 0xC1 -> THAI CHARACTER MO MA
- u'\u0e22' # 0xC2 -> THAI CHARACTER YO YAK
- u'\u0e23' # 0xC3 -> THAI CHARACTER RO RUA
- u'\u0e24' # 0xC4 -> THAI CHARACTER RU
- u'\u0e25' # 0xC5 -> THAI CHARACTER LO LING
- u'\u0e26' # 0xC6 -> THAI CHARACTER LU
- u'\u0e27' # 0xC7 -> THAI CHARACTER WO WAEN
- u'\u0e28' # 0xC8 -> THAI CHARACTER SO SALA
- u'\u0e29' # 0xC9 -> THAI CHARACTER SO RUSI
- u'\u0e2a' # 0xCA -> THAI CHARACTER SO SUA
- u'\u0e2b' # 0xCB -> THAI CHARACTER HO HIP
- u'\u0e2c' # 0xCC -> THAI CHARACTER LO CHULA
- u'\u0e2d' # 0xCD -> THAI CHARACTER O ANG
- u'\u0e2e' # 0xCE -> THAI CHARACTER HO NOKHUK
- u'\u0e2f' # 0xCF -> THAI CHARACTER PAIYANNOI
- u'\u0e30' # 0xD0 -> THAI CHARACTER SARA A
- u'\u0e31' # 0xD1 -> THAI CHARACTER MAI HAN-AKAT
- u'\u0e32' # 0xD2 -> THAI CHARACTER SARA AA
- u'\u0e33' # 0xD3 -> THAI CHARACTER SARA AM
- u'\u0e34' # 0xD4 -> THAI CHARACTER SARA I
- u'\u0e35' # 0xD5 -> THAI CHARACTER SARA II
- u'\u0e36' # 0xD6 -> THAI CHARACTER SARA UE
- u'\u0e37' # 0xD7 -> THAI CHARACTER SARA UEE
- u'\u0e38' # 0xD8 -> THAI CHARACTER SARA U
- u'\u0e39' # 0xD9 -> THAI CHARACTER SARA UU
- u'\u0e3a' # 0xDA -> THAI CHARACTER PHINTHU
- u'\ufffe' # 0xDB -> UNDEFINED
- u'\ufffe' # 0xDC -> UNDEFINED
- u'\ufffe' # 0xDD -> UNDEFINED
- u'\ufffe' # 0xDE -> UNDEFINED
- u'\u0e3f' # 0xDF -> THAI CURRENCY SYMBOL BAHT
- u'\u0e40' # 0xE0 -> THAI CHARACTER SARA E
- u'\u0e41' # 0xE1 -> THAI CHARACTER SARA AE
- u'\u0e42' # 0xE2 -> THAI CHARACTER SARA O
- u'\u0e43' # 0xE3 -> THAI CHARACTER SARA AI MAIMUAN
- u'\u0e44' # 0xE4 -> THAI CHARACTER SARA AI MAIMALAI
- u'\u0e45' # 0xE5 -> THAI CHARACTER LAKKHANGYAO
- u'\u0e46' # 0xE6 -> THAI CHARACTER MAIYAMOK
- u'\u0e47' # 0xE7 -> THAI CHARACTER MAITAIKHU
- u'\u0e48' # 0xE8 -> THAI CHARACTER MAI EK
- u'\u0e49' # 0xE9 -> THAI CHARACTER MAI THO
- u'\u0e4a' # 0xEA -> THAI CHARACTER MAI TRI
- u'\u0e4b' # 0xEB -> THAI CHARACTER MAI CHATTAWA
- u'\u0e4c' # 0xEC -> THAI CHARACTER THANTHAKHAT
- u'\u0e4d' # 0xED -> THAI CHARACTER NIKHAHIT
- u'\u0e4e' # 0xEE -> THAI CHARACTER YAMAKKAN
- u'\u0e4f' # 0xEF -> THAI CHARACTER FONGMAN
- u'\u0e50' # 0xF0 -> THAI DIGIT ZERO
- u'\u0e51' # 0xF1 -> THAI DIGIT ONE
- u'\u0e52' # 0xF2 -> THAI DIGIT TWO
- u'\u0e53' # 0xF3 -> THAI DIGIT THREE
- u'\u0e54' # 0xF4 -> THAI DIGIT FOUR
- u'\u0e55' # 0xF5 -> THAI DIGIT FIVE
- u'\u0e56' # 0xF6 -> THAI DIGIT SIX
- u'\u0e57' # 0xF7 -> THAI DIGIT SEVEN
- u'\u0e58' # 0xF8 -> THAI DIGIT EIGHT
- u'\u0e59' # 0xF9 -> THAI DIGIT NINE
- u'\u0e5a' # 0xFA -> THAI CHARACTER ANGKHANKHU
- u'\u0e5b' # 0xFB -> THAI CHARACTER KHOMUT
- u'\ufffe' # 0xFC -> UNDEFINED
- u'\ufffe' # 0xFD -> UNDEFINED
- u'\ufffe' # 0xFE -> UNDEFINED
- u'\ufffe' # 0xFF -> UNDEFINED
-)
-
-### Encoding table
-encoding_table=codecs.charmap_build(decoding_table)
diff --git a/sys/lib/python/encodings/cp875.py b/sys/lib/python/encodings/cp875.py
deleted file mode 100644
index 72b160b02..000000000
--- a/sys/lib/python/encodings/cp875.py
+++ /dev/null
@@ -1,307 +0,0 @@
-""" Python Character Mapping Codec cp875 generated from 'MAPPINGS/VENDORS/MICSFT/EBCDIC/CP875.TXT' with gencodec.py.
-
-"""#"
-
-import codecs
-
-### Codec APIs
-
-class Codec(codecs.Codec):
-
- def encode(self,input,errors='strict'):
- return codecs.charmap_encode(input,errors,encoding_table)
-
- def decode(self,input,errors='strict'):
- return codecs.charmap_decode(input,errors,decoding_table)
-
-class IncrementalEncoder(codecs.IncrementalEncoder):
- def encode(self, input, final=False):
- return codecs.charmap_encode(input,self.errors,encoding_table)[0]
-
-class IncrementalDecoder(codecs.IncrementalDecoder):
- def decode(self, input, final=False):
- return codecs.charmap_decode(input,self.errors,decoding_table)[0]
-
-class StreamWriter(Codec,codecs.StreamWriter):
- pass
-
-class StreamReader(Codec,codecs.StreamReader):
- pass
-
-### encodings module API
-
-def getregentry():
- return codecs.CodecInfo(
- name='cp875',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
-
-
-### Decoding Table
-
-decoding_table = (
- u'\x00' # 0x00 -> NULL
- u'\x01' # 0x01 -> START OF HEADING
- u'\x02' # 0x02 -> START OF TEXT
- u'\x03' # 0x03 -> END OF TEXT
- u'\x9c' # 0x04 -> CONTROL
- u'\t' # 0x05 -> HORIZONTAL TABULATION
- u'\x86' # 0x06 -> CONTROL
- u'\x7f' # 0x07 -> DELETE
- u'\x97' # 0x08 -> CONTROL
- u'\x8d' # 0x09 -> CONTROL
- u'\x8e' # 0x0A -> CONTROL
- u'\x0b' # 0x0B -> VERTICAL TABULATION
- u'\x0c' # 0x0C -> FORM FEED
- u'\r' # 0x0D -> CARRIAGE RETURN
- u'\x0e' # 0x0E -> SHIFT OUT
- u'\x0f' # 0x0F -> SHIFT IN
- u'\x10' # 0x10 -> DATA LINK ESCAPE
- u'\x11' # 0x11 -> DEVICE CONTROL ONE
- u'\x12' # 0x12 -> DEVICE CONTROL TWO
- u'\x13' # 0x13 -> DEVICE CONTROL THREE
- u'\x9d' # 0x14 -> CONTROL
- u'\x85' # 0x15 -> CONTROL
- u'\x08' # 0x16 -> BACKSPACE
- u'\x87' # 0x17 -> CONTROL
- u'\x18' # 0x18 -> CANCEL
- u'\x19' # 0x19 -> END OF MEDIUM
- u'\x92' # 0x1A -> CONTROL
- u'\x8f' # 0x1B -> CONTROL
- u'\x1c' # 0x1C -> FILE SEPARATOR
- u'\x1d' # 0x1D -> GROUP SEPARATOR
- u'\x1e' # 0x1E -> RECORD SEPARATOR
- u'\x1f' # 0x1F -> UNIT SEPARATOR
- u'\x80' # 0x20 -> CONTROL
- u'\x81' # 0x21 -> CONTROL
- u'\x82' # 0x22 -> CONTROL
- u'\x83' # 0x23 -> CONTROL
- u'\x84' # 0x24 -> CONTROL
- u'\n' # 0x25 -> LINE FEED
- u'\x17' # 0x26 -> END OF TRANSMISSION BLOCK
- u'\x1b' # 0x27 -> ESCAPE
- u'\x88' # 0x28 -> CONTROL
- u'\x89' # 0x29 -> CONTROL
- u'\x8a' # 0x2A -> CONTROL
- u'\x8b' # 0x2B -> CONTROL
- u'\x8c' # 0x2C -> CONTROL
- u'\x05' # 0x2D -> ENQUIRY
- u'\x06' # 0x2E -> ACKNOWLEDGE
- u'\x07' # 0x2F -> BELL
- u'\x90' # 0x30 -> CONTROL
- u'\x91' # 0x31 -> CONTROL
- u'\x16' # 0x32 -> SYNCHRONOUS IDLE
- u'\x93' # 0x33 -> CONTROL
- u'\x94' # 0x34 -> CONTROL
- u'\x95' # 0x35 -> CONTROL
- u'\x96' # 0x36 -> CONTROL
- u'\x04' # 0x37 -> END OF TRANSMISSION
- u'\x98' # 0x38 -> CONTROL
- u'\x99' # 0x39 -> CONTROL
- u'\x9a' # 0x3A -> CONTROL
- u'\x9b' # 0x3B -> CONTROL
- u'\x14' # 0x3C -> DEVICE CONTROL FOUR
- u'\x15' # 0x3D -> NEGATIVE ACKNOWLEDGE
- u'\x9e' # 0x3E -> CONTROL
- u'\x1a' # 0x3F -> SUBSTITUTE
- u' ' # 0x40 -> SPACE
- u'\u0391' # 0x41 -> GREEK CAPITAL LETTER ALPHA
- u'\u0392' # 0x42 -> GREEK CAPITAL LETTER BETA
- u'\u0393' # 0x43 -> GREEK CAPITAL LETTER GAMMA
- u'\u0394' # 0x44 -> GREEK CAPITAL LETTER DELTA
- u'\u0395' # 0x45 -> GREEK CAPITAL LETTER EPSILON
- u'\u0396' # 0x46 -> GREEK CAPITAL LETTER ZETA
- u'\u0397' # 0x47 -> GREEK CAPITAL LETTER ETA
- u'\u0398' # 0x48 -> GREEK CAPITAL LETTER THETA
- u'\u0399' # 0x49 -> GREEK CAPITAL LETTER IOTA
- u'[' # 0x4A -> LEFT SQUARE BRACKET
- u'.' # 0x4B -> FULL STOP
- u'<' # 0x4C -> LESS-THAN SIGN
- u'(' # 0x4D -> LEFT PARENTHESIS
- u'+' # 0x4E -> PLUS SIGN
- u'!' # 0x4F -> EXCLAMATION MARK
- u'&' # 0x50 -> AMPERSAND
- u'\u039a' # 0x51 -> GREEK CAPITAL LETTER KAPPA
- u'\u039b' # 0x52 -> GREEK CAPITAL LETTER LAMDA
- u'\u039c' # 0x53 -> GREEK CAPITAL LETTER MU
- u'\u039d' # 0x54 -> GREEK CAPITAL LETTER NU
- u'\u039e' # 0x55 -> GREEK CAPITAL LETTER XI
- u'\u039f' # 0x56 -> GREEK CAPITAL LETTER OMICRON
- u'\u03a0' # 0x57 -> GREEK CAPITAL LETTER PI
- u'\u03a1' # 0x58 -> GREEK CAPITAL LETTER RHO
- u'\u03a3' # 0x59 -> GREEK CAPITAL LETTER SIGMA
- u']' # 0x5A -> RIGHT SQUARE BRACKET
- u'$' # 0x5B -> DOLLAR SIGN
- u'*' # 0x5C -> ASTERISK
- u')' # 0x5D -> RIGHT PARENTHESIS
- u';' # 0x5E -> SEMICOLON
- u'^' # 0x5F -> CIRCUMFLEX ACCENT
- u'-' # 0x60 -> HYPHEN-MINUS
- u'/' # 0x61 -> SOLIDUS
- u'\u03a4' # 0x62 -> GREEK CAPITAL LETTER TAU
- u'\u03a5' # 0x63 -> GREEK CAPITAL LETTER UPSILON
- u'\u03a6' # 0x64 -> GREEK CAPITAL LETTER PHI
- u'\u03a7' # 0x65 -> GREEK CAPITAL LETTER CHI
- u'\u03a8' # 0x66 -> GREEK CAPITAL LETTER PSI
- u'\u03a9' # 0x67 -> GREEK CAPITAL LETTER OMEGA
- u'\u03aa' # 0x68 -> GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
- u'\u03ab' # 0x69 -> GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
- u'|' # 0x6A -> VERTICAL LINE
- u',' # 0x6B -> COMMA
- u'%' # 0x6C -> PERCENT SIGN
- u'_' # 0x6D -> LOW LINE
- u'>' # 0x6E -> GREATER-THAN SIGN
- u'?' # 0x6F -> QUESTION MARK
- u'\xa8' # 0x70 -> DIAERESIS
- u'\u0386' # 0x71 -> GREEK CAPITAL LETTER ALPHA WITH TONOS
- u'\u0388' # 0x72 -> GREEK CAPITAL LETTER EPSILON WITH TONOS
- u'\u0389' # 0x73 -> GREEK CAPITAL LETTER ETA WITH TONOS
- u'\xa0' # 0x74 -> NO-BREAK SPACE
- u'\u038a' # 0x75 -> GREEK CAPITAL LETTER IOTA WITH TONOS
- u'\u038c' # 0x76 -> GREEK CAPITAL LETTER OMICRON WITH TONOS
- u'\u038e' # 0x77 -> GREEK CAPITAL LETTER UPSILON WITH TONOS
- u'\u038f' # 0x78 -> GREEK CAPITAL LETTER OMEGA WITH TONOS
- u'`' # 0x79 -> GRAVE ACCENT
- u':' # 0x7A -> COLON
- u'#' # 0x7B -> NUMBER SIGN
- u'@' # 0x7C -> COMMERCIAL AT
- u"'" # 0x7D -> APOSTROPHE
- u'=' # 0x7E -> EQUALS SIGN
- u'"' # 0x7F -> QUOTATION MARK
- u'\u0385' # 0x80 -> GREEK DIALYTIKA TONOS
- u'a' # 0x81 -> LATIN SMALL LETTER A
- u'b' # 0x82 -> LATIN SMALL LETTER B
- u'c' # 0x83 -> LATIN SMALL LETTER C
- u'd' # 0x84 -> LATIN SMALL LETTER D
- u'e' # 0x85 -> LATIN SMALL LETTER E
- u'f' # 0x86 -> LATIN SMALL LETTER F
- u'g' # 0x87 -> LATIN SMALL LETTER G
- u'h' # 0x88 -> LATIN SMALL LETTER H
- u'i' # 0x89 -> LATIN SMALL LETTER I
- u'\u03b1' # 0x8A -> GREEK SMALL LETTER ALPHA
- u'\u03b2' # 0x8B -> GREEK SMALL LETTER BETA
- u'\u03b3' # 0x8C -> GREEK SMALL LETTER GAMMA
- u'\u03b4' # 0x8D -> GREEK SMALL LETTER DELTA
- u'\u03b5' # 0x8E -> GREEK SMALL LETTER EPSILON
- u'\u03b6' # 0x8F -> GREEK SMALL LETTER ZETA
- u'\xb0' # 0x90 -> DEGREE SIGN
- u'j' # 0x91 -> LATIN SMALL LETTER J
- u'k' # 0x92 -> LATIN SMALL LETTER K
- u'l' # 0x93 -> LATIN SMALL LETTER L
- u'm' # 0x94 -> LATIN SMALL LETTER M
- u'n' # 0x95 -> LATIN SMALL LETTER N
- u'o' # 0x96 -> LATIN SMALL LETTER O
- u'p' # 0x97 -> LATIN SMALL LETTER P
- u'q' # 0x98 -> LATIN SMALL LETTER Q
- u'r' # 0x99 -> LATIN SMALL LETTER R
- u'\u03b7' # 0x9A -> GREEK SMALL LETTER ETA
- u'\u03b8' # 0x9B -> GREEK SMALL LETTER THETA
- u'\u03b9' # 0x9C -> GREEK SMALL LETTER IOTA
- u'\u03ba' # 0x9D -> GREEK SMALL LETTER KAPPA
- u'\u03bb' # 0x9E -> GREEK SMALL LETTER LAMDA
- u'\u03bc' # 0x9F -> GREEK SMALL LETTER MU
- u'\xb4' # 0xA0 -> ACUTE ACCENT
- u'~' # 0xA1 -> TILDE
- u's' # 0xA2 -> LATIN SMALL LETTER S
- u't' # 0xA3 -> LATIN SMALL LETTER T
- u'u' # 0xA4 -> LATIN SMALL LETTER U
- u'v' # 0xA5 -> LATIN SMALL LETTER V
- u'w' # 0xA6 -> LATIN SMALL LETTER W
- u'x' # 0xA7 -> LATIN SMALL LETTER X
- u'y' # 0xA8 -> LATIN SMALL LETTER Y
- u'z' # 0xA9 -> LATIN SMALL LETTER Z
- u'\u03bd' # 0xAA -> GREEK SMALL LETTER NU
- u'\u03be' # 0xAB -> GREEK SMALL LETTER XI
- u'\u03bf' # 0xAC -> GREEK SMALL LETTER OMICRON
- u'\u03c0' # 0xAD -> GREEK SMALL LETTER PI
- u'\u03c1' # 0xAE -> GREEK SMALL LETTER RHO
- u'\u03c3' # 0xAF -> GREEK SMALL LETTER SIGMA
- u'\xa3' # 0xB0 -> POUND SIGN
- u'\u03ac' # 0xB1 -> GREEK SMALL LETTER ALPHA WITH TONOS
- u'\u03ad' # 0xB2 -> GREEK SMALL LETTER EPSILON WITH TONOS
- u'\u03ae' # 0xB3 -> GREEK SMALL LETTER ETA WITH TONOS
- u'\u03ca' # 0xB4 -> GREEK SMALL LETTER IOTA WITH DIALYTIKA
- u'\u03af' # 0xB5 -> GREEK SMALL LETTER IOTA WITH TONOS
- u'\u03cc' # 0xB6 -> GREEK SMALL LETTER OMICRON WITH TONOS
- u'\u03cd' # 0xB7 -> GREEK SMALL LETTER UPSILON WITH TONOS
- u'\u03cb' # 0xB8 -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA
- u'\u03ce' # 0xB9 -> GREEK SMALL LETTER OMEGA WITH TONOS
- u'\u03c2' # 0xBA -> GREEK SMALL LETTER FINAL SIGMA
- u'\u03c4' # 0xBB -> GREEK SMALL LETTER TAU
- u'\u03c5' # 0xBC -> GREEK SMALL LETTER UPSILON
- u'\u03c6' # 0xBD -> GREEK SMALL LETTER PHI
- u'\u03c7' # 0xBE -> GREEK SMALL LETTER CHI
- u'\u03c8' # 0xBF -> GREEK SMALL LETTER PSI
- u'{' # 0xC0 -> LEFT CURLY BRACKET
- u'A' # 0xC1 -> LATIN CAPITAL LETTER A
- u'B' # 0xC2 -> LATIN CAPITAL LETTER B
- u'C' # 0xC3 -> LATIN CAPITAL LETTER C
- u'D' # 0xC4 -> LATIN CAPITAL LETTER D
- u'E' # 0xC5 -> LATIN CAPITAL LETTER E
- u'F' # 0xC6 -> LATIN CAPITAL LETTER F
- u'G' # 0xC7 -> LATIN CAPITAL LETTER G
- u'H' # 0xC8 -> LATIN CAPITAL LETTER H
- u'I' # 0xC9 -> LATIN CAPITAL LETTER I
- u'\xad' # 0xCA -> SOFT HYPHEN
- u'\u03c9' # 0xCB -> GREEK SMALL LETTER OMEGA
- u'\u0390' # 0xCC -> GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS
- u'\u03b0' # 0xCD -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS
- u'\u2018' # 0xCE -> LEFT SINGLE QUOTATION MARK
- u'\u2015' # 0xCF -> HORIZONTAL BAR
- u'}' # 0xD0 -> RIGHT CURLY BRACKET
- u'J' # 0xD1 -> LATIN CAPITAL LETTER J
- u'K' # 0xD2 -> LATIN CAPITAL LETTER K
- u'L' # 0xD3 -> LATIN CAPITAL LETTER L
- u'M' # 0xD4 -> LATIN CAPITAL LETTER M
- u'N' # 0xD5 -> LATIN CAPITAL LETTER N
- u'O' # 0xD6 -> LATIN CAPITAL LETTER O
- u'P' # 0xD7 -> LATIN CAPITAL LETTER P
- u'Q' # 0xD8 -> LATIN CAPITAL LETTER Q
- u'R' # 0xD9 -> LATIN CAPITAL LETTER R
- u'\xb1' # 0xDA -> PLUS-MINUS SIGN
- u'\xbd' # 0xDB -> VULGAR FRACTION ONE HALF
- u'\x1a' # 0xDC -> SUBSTITUTE
- u'\u0387' # 0xDD -> GREEK ANO TELEIA
- u'\u2019' # 0xDE -> RIGHT SINGLE QUOTATION MARK
- u'\xa6' # 0xDF -> BROKEN BAR
- u'\\' # 0xE0 -> REVERSE SOLIDUS
- u'\x1a' # 0xE1 -> SUBSTITUTE
- u'S' # 0xE2 -> LATIN CAPITAL LETTER S
- u'T' # 0xE3 -> LATIN CAPITAL LETTER T
- u'U' # 0xE4 -> LATIN CAPITAL LETTER U
- u'V' # 0xE5 -> LATIN CAPITAL LETTER V
- u'W' # 0xE6 -> LATIN CAPITAL LETTER W
- u'X' # 0xE7 -> LATIN CAPITAL LETTER X
- u'Y' # 0xE8 -> LATIN CAPITAL LETTER Y
- u'Z' # 0xE9 -> LATIN CAPITAL LETTER Z
- u'\xb2' # 0xEA -> SUPERSCRIPT TWO
- u'\xa7' # 0xEB -> SECTION SIGN
- u'\x1a' # 0xEC -> SUBSTITUTE
- u'\x1a' # 0xED -> SUBSTITUTE
- u'\xab' # 0xEE -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
- u'\xac' # 0xEF -> NOT SIGN
- u'0' # 0xF0 -> DIGIT ZERO
- u'1' # 0xF1 -> DIGIT ONE
- u'2' # 0xF2 -> DIGIT TWO
- u'3' # 0xF3 -> DIGIT THREE
- u'4' # 0xF4 -> DIGIT FOUR
- u'5' # 0xF5 -> DIGIT FIVE
- u'6' # 0xF6 -> DIGIT SIX
- u'7' # 0xF7 -> DIGIT SEVEN
- u'8' # 0xF8 -> DIGIT EIGHT
- u'9' # 0xF9 -> DIGIT NINE
- u'\xb3' # 0xFA -> SUPERSCRIPT THREE
- u'\xa9' # 0xFB -> COPYRIGHT SIGN
- u'\x1a' # 0xFC -> SUBSTITUTE
- u'\x1a' # 0xFD -> SUBSTITUTE
- u'\xbb' # 0xFE -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
- u'\x9f' # 0xFF -> CONTROL
-)
-
-### Encoding table
-encoding_table=codecs.charmap_build(decoding_table)
diff --git a/sys/lib/python/encodings/cp932.py b/sys/lib/python/encodings/cp932.py
deleted file mode 100644
index e01f59b71..000000000
--- a/sys/lib/python/encodings/cp932.py
+++ /dev/null
@@ -1,39 +0,0 @@
-#
-# cp932.py: Python Unicode Codec for CP932
-#
-# Written by Hye-Shik Chang <perky@FreeBSD.org>
-#
-
-import _codecs_jp, codecs
-import _multibytecodec as mbc
-
-codec = _codecs_jp.getcodec('cp932')
-
-class Codec(codecs.Codec):
- encode = codec.encode
- decode = codec.decode
-
-class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
- codecs.IncrementalEncoder):
- codec = codec
-
-class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
- codecs.IncrementalDecoder):
- codec = codec
-
-class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
- codec = codec
-
-class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
- codec = codec
-
-def getregentry():
- return codecs.CodecInfo(
- name='cp932',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
diff --git a/sys/lib/python/encodings/cp949.py b/sys/lib/python/encodings/cp949.py
deleted file mode 100644
index 627c87125..000000000
--- a/sys/lib/python/encodings/cp949.py
+++ /dev/null
@@ -1,39 +0,0 @@
-#
-# cp949.py: Python Unicode Codec for CP949
-#
-# Written by Hye-Shik Chang <perky@FreeBSD.org>
-#
-
-import _codecs_kr, codecs
-import _multibytecodec as mbc
-
-codec = _codecs_kr.getcodec('cp949')
-
-class Codec(codecs.Codec):
- encode = codec.encode
- decode = codec.decode
-
-class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
- codecs.IncrementalEncoder):
- codec = codec
-
-class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
- codecs.IncrementalDecoder):
- codec = codec
-
-class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
- codec = codec
-
-class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
- codec = codec
-
-def getregentry():
- return codecs.CodecInfo(
- name='cp949',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
diff --git a/sys/lib/python/encodings/cp950.py b/sys/lib/python/encodings/cp950.py
deleted file mode 100644
index 39eec5ed0..000000000
--- a/sys/lib/python/encodings/cp950.py
+++ /dev/null
@@ -1,39 +0,0 @@
-#
-# cp950.py: Python Unicode Codec for CP950
-#
-# Written by Hye-Shik Chang <perky@FreeBSD.org>
-#
-
-import _codecs_tw, codecs
-import _multibytecodec as mbc
-
-codec = _codecs_tw.getcodec('cp950')
-
-class Codec(codecs.Codec):
- encode = codec.encode
- decode = codec.decode
-
-class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
- codecs.IncrementalEncoder):
- codec = codec
-
-class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
- codecs.IncrementalDecoder):
- codec = codec
-
-class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
- codec = codec
-
-class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
- codec = codec
-
-def getregentry():
- return codecs.CodecInfo(
- name='cp950',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
diff --git a/sys/lib/python/encodings/euc_jis_2004.py b/sys/lib/python/encodings/euc_jis_2004.py
deleted file mode 100644
index 72b87aea6..000000000
--- a/sys/lib/python/encodings/euc_jis_2004.py
+++ /dev/null
@@ -1,39 +0,0 @@
-#
-# euc_jis_2004.py: Python Unicode Codec for EUC_JIS_2004
-#
-# Written by Hye-Shik Chang <perky@FreeBSD.org>
-#
-
-import _codecs_jp, codecs
-import _multibytecodec as mbc
-
-codec = _codecs_jp.getcodec('euc_jis_2004')
-
-class Codec(codecs.Codec):
- encode = codec.encode
- decode = codec.decode
-
-class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
- codecs.IncrementalEncoder):
- codec = codec
-
-class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
- codecs.IncrementalDecoder):
- codec = codec
-
-class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
- codec = codec
-
-class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
- codec = codec
-
-def getregentry():
- return codecs.CodecInfo(
- name='euc_jis_2004',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
diff --git a/sys/lib/python/encodings/euc_jisx0213.py b/sys/lib/python/encodings/euc_jisx0213.py
deleted file mode 100644
index cc47d0411..000000000
--- a/sys/lib/python/encodings/euc_jisx0213.py
+++ /dev/null
@@ -1,39 +0,0 @@
-#
-# euc_jisx0213.py: Python Unicode Codec for EUC_JISX0213
-#
-# Written by Hye-Shik Chang <perky@FreeBSD.org>
-#
-
-import _codecs_jp, codecs
-import _multibytecodec as mbc
-
-codec = _codecs_jp.getcodec('euc_jisx0213')
-
-class Codec(codecs.Codec):
- encode = codec.encode
- decode = codec.decode
-
-class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
- codecs.IncrementalEncoder):
- codec = codec
-
-class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
- codecs.IncrementalDecoder):
- codec = codec
-
-class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
- codec = codec
-
-class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
- codec = codec
-
-def getregentry():
- return codecs.CodecInfo(
- name='euc_jisx0213',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
diff --git a/sys/lib/python/encodings/euc_jp.py b/sys/lib/python/encodings/euc_jp.py
deleted file mode 100644
index 7bcbe4147..000000000
--- a/sys/lib/python/encodings/euc_jp.py
+++ /dev/null
@@ -1,39 +0,0 @@
-#
-# euc_jp.py: Python Unicode Codec for EUC_JP
-#
-# Written by Hye-Shik Chang <perky@FreeBSD.org>
-#
-
-import _codecs_jp, codecs
-import _multibytecodec as mbc
-
-codec = _codecs_jp.getcodec('euc_jp')
-
-class Codec(codecs.Codec):
- encode = codec.encode
- decode = codec.decode
-
-class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
- codecs.IncrementalEncoder):
- codec = codec
-
-class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
- codecs.IncrementalDecoder):
- codec = codec
-
-class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
- codec = codec
-
-class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
- codec = codec
-
-def getregentry():
- return codecs.CodecInfo(
- name='euc_jp',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
diff --git a/sys/lib/python/encodings/euc_kr.py b/sys/lib/python/encodings/euc_kr.py
deleted file mode 100644
index c1fb1260e..000000000
--- a/sys/lib/python/encodings/euc_kr.py
+++ /dev/null
@@ -1,39 +0,0 @@
-#
-# euc_kr.py: Python Unicode Codec for EUC_KR
-#
-# Written by Hye-Shik Chang <perky@FreeBSD.org>
-#
-
-import _codecs_kr, codecs
-import _multibytecodec as mbc
-
-codec = _codecs_kr.getcodec('euc_kr')
-
-class Codec(codecs.Codec):
- encode = codec.encode
- decode = codec.decode
-
-class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
- codecs.IncrementalEncoder):
- codec = codec
-
-class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
- codecs.IncrementalDecoder):
- codec = codec
-
-class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
- codec = codec
-
-class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
- codec = codec
-
-def getregentry():
- return codecs.CodecInfo(
- name='euc_kr',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
diff --git a/sys/lib/python/encodings/gb18030.py b/sys/lib/python/encodings/gb18030.py
deleted file mode 100644
index 34fb6c366..000000000
--- a/sys/lib/python/encodings/gb18030.py
+++ /dev/null
@@ -1,39 +0,0 @@
-#
-# gb18030.py: Python Unicode Codec for GB18030
-#
-# Written by Hye-Shik Chang <perky@FreeBSD.org>
-#
-
-import _codecs_cn, codecs
-import _multibytecodec as mbc
-
-codec = _codecs_cn.getcodec('gb18030')
-
-class Codec(codecs.Codec):
- encode = codec.encode
- decode = codec.decode
-
-class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
- codecs.IncrementalEncoder):
- codec = codec
-
-class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
- codecs.IncrementalDecoder):
- codec = codec
-
-class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
- codec = codec
-
-class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
- codec = codec
-
-def getregentry():
- return codecs.CodecInfo(
- name='gb18030',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
diff --git a/sys/lib/python/encodings/gb2312.py b/sys/lib/python/encodings/gb2312.py
deleted file mode 100644
index 3c3b837d6..000000000
--- a/sys/lib/python/encodings/gb2312.py
+++ /dev/null
@@ -1,39 +0,0 @@
-#
-# gb2312.py: Python Unicode Codec for GB2312
-#
-# Written by Hye-Shik Chang <perky@FreeBSD.org>
-#
-
-import _codecs_cn, codecs
-import _multibytecodec as mbc
-
-codec = _codecs_cn.getcodec('gb2312')
-
-class Codec(codecs.Codec):
- encode = codec.encode
- decode = codec.decode
-
-class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
- codecs.IncrementalEncoder):
- codec = codec
-
-class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
- codecs.IncrementalDecoder):
- codec = codec
-
-class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
- codec = codec
-
-class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
- codec = codec
-
-def getregentry():
- return codecs.CodecInfo(
- name='gb2312',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
diff --git a/sys/lib/python/encodings/gbk.py b/sys/lib/python/encodings/gbk.py
deleted file mode 100644
index 1b45db898..000000000
--- a/sys/lib/python/encodings/gbk.py
+++ /dev/null
@@ -1,39 +0,0 @@
-#
-# gbk.py: Python Unicode Codec for GBK
-#
-# Written by Hye-Shik Chang <perky@FreeBSD.org>
-#
-
-import _codecs_cn, codecs
-import _multibytecodec as mbc
-
-codec = _codecs_cn.getcodec('gbk')
-
-class Codec(codecs.Codec):
- encode = codec.encode
- decode = codec.decode
-
-class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
- codecs.IncrementalEncoder):
- codec = codec
-
-class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
- codecs.IncrementalDecoder):
- codec = codec
-
-class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
- codec = codec
-
-class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
- codec = codec
-
-def getregentry():
- return codecs.CodecInfo(
- name='gbk',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
diff --git a/sys/lib/python/encodings/hex_codec.py b/sys/lib/python/encodings/hex_codec.py
deleted file mode 100644
index 91b38d952..000000000
--- a/sys/lib/python/encodings/hex_codec.py
+++ /dev/null
@@ -1,79 +0,0 @@
-""" Python 'hex_codec' Codec - 2-digit hex content transfer encoding
-
- Unlike most of the other codecs which target Unicode, this codec
- will return Python string objects for both encode and decode.
-
- Written by Marc-Andre Lemburg (mal@lemburg.com).
-
-"""
-import codecs, binascii
-
-### Codec APIs
-
-def hex_encode(input,errors='strict'):
-
- """ Encodes the object input and returns a tuple (output
- object, length consumed).
-
- errors defines the error handling to apply. It defaults to
- 'strict' handling which is the only currently supported
- error handling for this codec.
-
- """
- assert errors == 'strict'
- output = binascii.b2a_hex(input)
- return (output, len(input))
-
-def hex_decode(input,errors='strict'):
-
- """ Decodes the object input and returns a tuple (output
- object, length consumed).
-
- input must be an object which provides the bf_getreadbuf
- buffer slot. Python strings, buffer objects and memory
- mapped files are examples of objects providing this slot.
-
- errors defines the error handling to apply. It defaults to
- 'strict' handling which is the only currently supported
- error handling for this codec.
-
- """
- assert errors == 'strict'
- output = binascii.a2b_hex(input)
- return (output, len(input))
-
-class Codec(codecs.Codec):
-
- def encode(self, input,errors='strict'):
- return hex_encode(input,errors)
- def decode(self, input,errors='strict'):
- return hex_decode(input,errors)
-
-class IncrementalEncoder(codecs.IncrementalEncoder):
- def encode(self, input, final=False):
- assert self.errors == 'strict'
- return binascii.b2a_hex(input)
-
-class IncrementalDecoder(codecs.IncrementalDecoder):
- def decode(self, input, final=False):
- assert self.errors == 'strict'
- return binascii.a2b_hex(input)
-
-class StreamWriter(Codec,codecs.StreamWriter):
- pass
-
-class StreamReader(Codec,codecs.StreamReader):
- pass
-
-### encodings module API
-
-def getregentry():
- return codecs.CodecInfo(
- name='hex',
- encode=hex_encode,
- decode=hex_decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamwriter=StreamWriter,
- streamreader=StreamReader,
- )
diff --git a/sys/lib/python/encodings/hp_roman8.py b/sys/lib/python/encodings/hp_roman8.py
deleted file mode 100644
index dbaaa72d7..000000000
--- a/sys/lib/python/encodings/hp_roman8.py
+++ /dev/null
@@ -1,152 +0,0 @@
-""" Python Character Mapping Codec generated from 'hp_roman8.txt' with gencodec.py.
-
- Based on data from ftp://dkuug.dk/i18n/charmaps/HP-ROMAN8 (Keld Simonsen)
-
- Original source: LaserJet IIP Printer User's Manual HP part no
- 33471-90901, Hewlet-Packard, June 1989.
-
-"""#"
-
-import codecs
-
-### Codec APIs
-
-class Codec(codecs.Codec):
-
- def encode(self,input,errors='strict'):
- return codecs.charmap_encode(input,errors,encoding_map)
-
- def decode(self,input,errors='strict'):
- return codecs.charmap_decode(input,errors,decoding_map)
-
-class IncrementalEncoder(codecs.IncrementalEncoder):
- def encode(self, input, final=False):
- return codecs.charmap_encode(input,self.errors,encoding_map)[0]
-
-class IncrementalDecoder(codecs.IncrementalDecoder):
- def decode(self, input, final=False):
- return codecs.charmap_decode(input,self.errors,decoding_map)[0]
-
-class StreamWriter(Codec,codecs.StreamWriter):
- pass
-
-class StreamReader(Codec,codecs.StreamReader):
- pass
-
-### encodings module API
-
-def getregentry():
- return codecs.CodecInfo(
- name='hp-roman8',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamwriter=StreamWriter,
- streamreader=StreamReader,
- )
-
-### Decoding Map
-
-decoding_map = codecs.make_identity_dict(range(256))
-decoding_map.update({
- 0x00a1: 0x00c0, # LATIN CAPITAL LETTER A WITH GRAVE
- 0x00a2: 0x00c2, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
- 0x00a3: 0x00c8, # LATIN CAPITAL LETTER E WITH GRAVE
- 0x00a4: 0x00ca, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
- 0x00a5: 0x00cb, # LATIN CAPITAL LETTER E WITH DIAERESIS
- 0x00a6: 0x00ce, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
- 0x00a7: 0x00cf, # LATIN CAPITAL LETTER I WITH DIAERESIS
- 0x00a8: 0x00b4, # ACUTE ACCENT
- 0x00a9: 0x02cb, # MODIFIER LETTER GRAVE ACCENT (Mandarin Chinese fourth tone)
- 0x00aa: 0x02c6, # MODIFIER LETTER CIRCUMFLEX ACCENT
- 0x00ab: 0x00a8, # DIAERESIS
- 0x00ac: 0x02dc, # SMALL TILDE
- 0x00ad: 0x00d9, # LATIN CAPITAL LETTER U WITH GRAVE
- 0x00ae: 0x00db, # LATIN CAPITAL LETTER U WITH CIRCUMFLEX
- 0x00af: 0x20a4, # LIRA SIGN
- 0x00b0: 0x00af, # MACRON
- 0x00b1: 0x00dd, # LATIN CAPITAL LETTER Y WITH ACUTE
- 0x00b2: 0x00fd, # LATIN SMALL LETTER Y WITH ACUTE
- 0x00b3: 0x00b0, # DEGREE SIGN
- 0x00b4: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
- 0x00b5: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
- 0x00b6: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE
- 0x00b7: 0x00f1, # LATIN SMALL LETTER N WITH TILDE
- 0x00b8: 0x00a1, # INVERTED EXCLAMATION MARK
- 0x00b9: 0x00bf, # INVERTED QUESTION MARK
- 0x00ba: 0x00a4, # CURRENCY SIGN
- 0x00bb: 0x00a3, # POUND SIGN
- 0x00bc: 0x00a5, # YEN SIGN
- 0x00bd: 0x00a7, # SECTION SIGN
- 0x00be: 0x0192, # LATIN SMALL LETTER F WITH HOOK
- 0x00bf: 0x00a2, # CENT SIGN
- 0x00c0: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
- 0x00c1: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
- 0x00c2: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
- 0x00c3: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX
- 0x00c4: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
- 0x00c5: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
- 0x00c6: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
- 0x00c7: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
- 0x00c8: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
- 0x00c9: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
- 0x00ca: 0x00f2, # LATIN SMALL LETTER O WITH GRAVE
- 0x00cb: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE
- 0x00cc: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
- 0x00cd: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
- 0x00ce: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
- 0x00cf: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
- 0x00d0: 0x00c5, # LATIN CAPITAL LETTER A WITH RING ABOVE
- 0x00d1: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX
- 0x00d2: 0x00d8, # LATIN CAPITAL LETTER O WITH STROKE
- 0x00d3: 0x00c6, # LATIN CAPITAL LETTER AE
- 0x00d4: 0x00e5, # LATIN SMALL LETTER A WITH RING ABOVE
- 0x00d5: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
- 0x00d6: 0x00f8, # LATIN SMALL LETTER O WITH STROKE
- 0x00d7: 0x00e6, # LATIN SMALL LETTER AE
- 0x00d8: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
- 0x00d9: 0x00ec, # LATIN SMALL LETTER I WITH GRAVE
- 0x00da: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
- 0x00db: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
- 0x00dc: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
- 0x00dd: 0x00ef, # LATIN SMALL LETTER I WITH DIAERESIS
- 0x00de: 0x00df, # LATIN SMALL LETTER SHARP S (German)
- 0x00df: 0x00d4, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
- 0x00e0: 0x00c1, # LATIN CAPITAL LETTER A WITH ACUTE
- 0x00e1: 0x00c3, # LATIN CAPITAL LETTER A WITH TILDE
- 0x00e2: 0x00e3, # LATIN SMALL LETTER A WITH TILDE
- 0x00e3: 0x00d0, # LATIN CAPITAL LETTER ETH (Icelandic)
- 0x00e4: 0x00f0, # LATIN SMALL LETTER ETH (Icelandic)
- 0x00e5: 0x00cd, # LATIN CAPITAL LETTER I WITH ACUTE
- 0x00e6: 0x00cc, # LATIN CAPITAL LETTER I WITH GRAVE
- 0x00e7: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE
- 0x00e8: 0x00d2, # LATIN CAPITAL LETTER O WITH GRAVE
- 0x00e9: 0x00d5, # LATIN CAPITAL LETTER O WITH TILDE
- 0x00ea: 0x00f5, # LATIN SMALL LETTER O WITH TILDE
- 0x00eb: 0x0160, # LATIN CAPITAL LETTER S WITH CARON
- 0x00ec: 0x0161, # LATIN SMALL LETTER S WITH CARON
- 0x00ed: 0x00da, # LATIN CAPITAL LETTER U WITH ACUTE
- 0x00ee: 0x0178, # LATIN CAPITAL LETTER Y WITH DIAERESIS
- 0x00ef: 0x00ff, # LATIN SMALL LETTER Y WITH DIAERESIS
- 0x00f0: 0x00de, # LATIN CAPITAL LETTER THORN (Icelandic)
- 0x00f1: 0x00fe, # LATIN SMALL LETTER THORN (Icelandic)
- 0x00f2: 0x00b7, # MIDDLE DOT
- 0x00f3: 0x00b5, # MICRO SIGN
- 0x00f4: 0x00b6, # PILCROW SIGN
- 0x00f5: 0x00be, # VULGAR FRACTION THREE QUARTERS
- 0x00f6: 0x2014, # EM DASH
- 0x00f7: 0x00bc, # VULGAR FRACTION ONE QUARTER
- 0x00f8: 0x00bd, # VULGAR FRACTION ONE HALF
- 0x00f9: 0x00aa, # FEMININE ORDINAL INDICATOR
- 0x00fa: 0x00ba, # MASCULINE ORDINAL INDICATOR
- 0x00fb: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
- 0x00fc: 0x25a0, # BLACK SQUARE
- 0x00fd: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
- 0x00fe: 0x00b1, # PLUS-MINUS SIGN
- 0x00ff: None,
-})
-
-### Encoding Map
-
-encoding_map = codecs.make_encoding_map(decoding_map)
diff --git a/sys/lib/python/encodings/hz.py b/sys/lib/python/encodings/hz.py
deleted file mode 100644
index 383442a3c..000000000
--- a/sys/lib/python/encodings/hz.py
+++ /dev/null
@@ -1,39 +0,0 @@
-#
-# hz.py: Python Unicode Codec for HZ
-#
-# Written by Hye-Shik Chang <perky@FreeBSD.org>
-#
-
-import _codecs_cn, codecs
-import _multibytecodec as mbc
-
-codec = _codecs_cn.getcodec('hz')
-
-class Codec(codecs.Codec):
- encode = codec.encode
- decode = codec.decode
-
-class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
- codecs.IncrementalEncoder):
- codec = codec
-
-class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
- codecs.IncrementalDecoder):
- codec = codec
-
-class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
- codec = codec
-
-class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
- codec = codec
-
-def getregentry():
- return codecs.CodecInfo(
- name='hz',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
diff --git a/sys/lib/python/encodings/idna.py b/sys/lib/python/encodings/idna.py
deleted file mode 100644
index ea90d6714..000000000
--- a/sys/lib/python/encodings/idna.py
+++ /dev/null
@@ -1,288 +0,0 @@
-# This module implements the RFCs 3490 (IDNA) and 3491 (Nameprep)
-
-import stringprep, re, codecs
-from unicodedata import ucd_3_2_0 as unicodedata
-
-# IDNA section 3.1
-dots = re.compile(u"[\u002E\u3002\uFF0E\uFF61]")
-
-# IDNA section 5
-ace_prefix = "xn--"
-uace_prefix = unicode(ace_prefix, "ascii")
-
-# This assumes query strings, so AllowUnassigned is true
-def nameprep(label):
- # Map
- newlabel = []
- for c in label:
- if stringprep.in_table_b1(c):
- # Map to nothing
- continue
- newlabel.append(stringprep.map_table_b2(c))
- label = u"".join(newlabel)
-
- # Normalize
- label = unicodedata.normalize("NFKC", label)
-
- # Prohibit
- for c in label:
- if stringprep.in_table_c12(c) or \
- stringprep.in_table_c22(c) or \
- stringprep.in_table_c3(c) or \
- stringprep.in_table_c4(c) or \
- stringprep.in_table_c5(c) or \
- stringprep.in_table_c6(c) or \
- stringprep.in_table_c7(c) or \
- stringprep.in_table_c8(c) or \
- stringprep.in_table_c9(c):
- raise UnicodeError("Invalid character %r" % c)
-
- # Check bidi
- RandAL = map(stringprep.in_table_d1, label)
- for c in RandAL:
- if c:
- # There is a RandAL char in the string. Must perform further
- # tests:
- # 1) The characters in section 5.8 MUST be prohibited.
- # This is table C.8, which was already checked
- # 2) If a string contains any RandALCat character, the string
- # MUST NOT contain any LCat character.
- if filter(stringprep.in_table_d2, label):
- raise UnicodeError("Violation of BIDI requirement 2")
-
- # 3) If a string contains any RandALCat character, a
- # RandALCat character MUST be the first character of the
- # string, and a RandALCat character MUST be the last
- # character of the string.
- if not RandAL[0] or not RandAL[-1]:
- raise UnicodeError("Violation of BIDI requirement 3")
-
- return label
-
-def ToASCII(label):
- try:
- # Step 1: try ASCII
- label = label.encode("ascii")
- except UnicodeError:
- pass
- else:
- # Skip to step 3: UseSTD3ASCIIRules is false, so
- # Skip to step 8.
- if 0 < len(label) < 64:
- return label
- raise UnicodeError("label empty or too long")
-
- # Step 2: nameprep
- label = nameprep(label)
-
- # Step 3: UseSTD3ASCIIRules is false
- # Step 4: try ASCII
- try:
- label = label.encode("ascii")
- except UnicodeError:
- pass
- else:
- # Skip to step 8.
- if 0 < len(label) < 64:
- return label
- raise UnicodeError("label empty or too long")
-
- # Step 5: Check ACE prefix
- if label.startswith(uace_prefix):
- raise UnicodeError("Label starts with ACE prefix")
-
- # Step 6: Encode with PUNYCODE
- label = label.encode("punycode")
-
- # Step 7: Prepend ACE prefix
- label = ace_prefix + label
-
- # Step 8: Check size
- if 0 < len(label) < 64:
- return label
- raise UnicodeError("label empty or too long")
-
-def ToUnicode(label):
- # Step 1: Check for ASCII
- if isinstance(label, str):
- pure_ascii = True
- else:
- try:
- label = label.encode("ascii")
- pure_ascii = True
- except UnicodeError:
- pure_ascii = False
- if not pure_ascii:
- # Step 2: Perform nameprep
- label = nameprep(label)
- # It doesn't say this, but apparently, it should be ASCII now
- try:
- label = label.encode("ascii")
- except UnicodeError:
- raise UnicodeError("Invalid character in IDN label")
- # Step 3: Check for ACE prefix
- if not label.startswith(ace_prefix):
- return unicode(label, "ascii")
-
- # Step 4: Remove ACE prefix
- label1 = label[len(ace_prefix):]
-
- # Step 5: Decode using PUNYCODE
- result = label1.decode("punycode")
-
- # Step 6: Apply ToASCII
- label2 = ToASCII(result)
-
- # Step 7: Compare the result of step 6 with the one of step 3
- # label2 will already be in lower case.
- if label.lower() != label2:
- raise UnicodeError("IDNA does not round-trip", label, label2)
-
- # Step 8: return the result of step 5
- return result
-
-### Codec APIs
-
-class Codec(codecs.Codec):
- def encode(self,input,errors='strict'):
-
- if errors != 'strict':
- # IDNA is quite clear that implementations must be strict
- raise UnicodeError("unsupported error handling "+errors)
-
- if not input:
- return "", 0
-
- result = []
- labels = dots.split(input)
- if labels and len(labels[-1])==0:
- trailing_dot = '.'
- del labels[-1]
- else:
- trailing_dot = ''
- for label in labels:
- result.append(ToASCII(label))
- # Join with U+002E
- return ".".join(result)+trailing_dot, len(input)
-
- def decode(self,input,errors='strict'):
-
- if errors != 'strict':
- raise UnicodeError("Unsupported error handling "+errors)
-
- if not input:
- return u"", 0
-
- # IDNA allows decoding to operate on Unicode strings, too.
- if isinstance(input, unicode):
- labels = dots.split(input)
- else:
- # Must be ASCII string
- input = str(input)
- unicode(input, "ascii")
- labels = input.split(".")
-
- if labels and len(labels[-1]) == 0:
- trailing_dot = u'.'
- del labels[-1]
- else:
- trailing_dot = u''
-
- result = []
- for label in labels:
- result.append(ToUnicode(label))
-
- return u".".join(result)+trailing_dot, len(input)
-
-class IncrementalEncoder(codecs.BufferedIncrementalEncoder):
- def _buffer_encode(self, input, errors, final):
- if errors != 'strict':
- # IDNA is quite clear that implementations must be strict
- raise UnicodeError("unsupported error handling "+errors)
-
- if not input:
- return ("", 0)
-
- labels = dots.split(input)
- trailing_dot = u''
- if labels:
- if not labels[-1]:
- trailing_dot = '.'
- del labels[-1]
- elif not final:
- # Keep potentially unfinished label until the next call
- del labels[-1]
- if labels:
- trailing_dot = '.'
-
- result = []
- size = 0
- for label in labels:
- result.append(ToASCII(label))
- if size:
- size += 1
- size += len(label)
-
- # Join with U+002E
- result = ".".join(result) + trailing_dot
- size += len(trailing_dot)
- return (result, size)
-
-class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
- def _buffer_decode(self, input, errors, final):
- if errors != 'strict':
- raise UnicodeError("Unsupported error handling "+errors)
-
- if not input:
- return (u"", 0)
-
- # IDNA allows decoding to operate on Unicode strings, too.
- if isinstance(input, unicode):
- labels = dots.split(input)
- else:
- # Must be ASCII string
- input = str(input)
- unicode(input, "ascii")
- labels = input.split(".")
-
- trailing_dot = u''
- if labels:
- if not labels[-1]:
- trailing_dot = u'.'
- del labels[-1]
- elif not final:
- # Keep potentially unfinished label until the next call
- del labels[-1]
- if labels:
- trailing_dot = u'.'
-
- result = []
- size = 0
- for label in labels:
- result.append(ToUnicode(label))
- if size:
- size += 1
- size += len(label)
-
- result = u".".join(result) + trailing_dot
- size += len(trailing_dot)
- return (result, size)
-
-class StreamWriter(Codec,codecs.StreamWriter):
- pass
-
-class StreamReader(Codec,codecs.StreamReader):
- pass
-
-### encodings module API
-
-def getregentry():
- return codecs.CodecInfo(
- name='idna',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamwriter=StreamWriter,
- streamreader=StreamReader,
- )
diff --git a/sys/lib/python/encodings/iso2022_jp.py b/sys/lib/python/encodings/iso2022_jp.py
deleted file mode 100644
index ab0406069..000000000
--- a/sys/lib/python/encodings/iso2022_jp.py
+++ /dev/null
@@ -1,39 +0,0 @@
-#
-# iso2022_jp.py: Python Unicode Codec for ISO2022_JP
-#
-# Written by Hye-Shik Chang <perky@FreeBSD.org>
-#
-
-import _codecs_iso2022, codecs
-import _multibytecodec as mbc
-
-codec = _codecs_iso2022.getcodec('iso2022_jp')
-
-class Codec(codecs.Codec):
- encode = codec.encode
- decode = codec.decode
-
-class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
- codecs.IncrementalEncoder):
- codec = codec
-
-class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
- codecs.IncrementalDecoder):
- codec = codec
-
-class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
- codec = codec
-
-class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
- codec = codec
-
-def getregentry():
- return codecs.CodecInfo(
- name='iso2022_jp',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
diff --git a/sys/lib/python/encodings/iso2022_jp_1.py b/sys/lib/python/encodings/iso2022_jp_1.py
deleted file mode 100644
index 997044dc3..000000000
--- a/sys/lib/python/encodings/iso2022_jp_1.py
+++ /dev/null
@@ -1,39 +0,0 @@
-#
-# iso2022_jp_1.py: Python Unicode Codec for ISO2022_JP_1
-#
-# Written by Hye-Shik Chang <perky@FreeBSD.org>
-#
-
-import _codecs_iso2022, codecs
-import _multibytecodec as mbc
-
-codec = _codecs_iso2022.getcodec('iso2022_jp_1')
-
-class Codec(codecs.Codec):
- encode = codec.encode
- decode = codec.decode
-
-class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
- codecs.IncrementalEncoder):
- codec = codec
-
-class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
- codecs.IncrementalDecoder):
- codec = codec
-
-class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
- codec = codec
-
-class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
- codec = codec
-
-def getregentry():
- return codecs.CodecInfo(
- name='iso2022_jp_1',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
diff --git a/sys/lib/python/encodings/iso2022_jp_2.py b/sys/lib/python/encodings/iso2022_jp_2.py
deleted file mode 100644
index 9106bf762..000000000
--- a/sys/lib/python/encodings/iso2022_jp_2.py
+++ /dev/null
@@ -1,39 +0,0 @@
-#
-# iso2022_jp_2.py: Python Unicode Codec for ISO2022_JP_2
-#
-# Written by Hye-Shik Chang <perky@FreeBSD.org>
-#
-
-import _codecs_iso2022, codecs
-import _multibytecodec as mbc
-
-codec = _codecs_iso2022.getcodec('iso2022_jp_2')
-
-class Codec(codecs.Codec):
- encode = codec.encode
- decode = codec.decode
-
-class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
- codecs.IncrementalEncoder):
- codec = codec
-
-class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
- codecs.IncrementalDecoder):
- codec = codec
-
-class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
- codec = codec
-
-class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
- codec = codec
-
-def getregentry():
- return codecs.CodecInfo(
- name='iso2022_jp_2',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
diff --git a/sys/lib/python/encodings/iso2022_jp_2004.py b/sys/lib/python/encodings/iso2022_jp_2004.py
deleted file mode 100644
index 40198bf09..000000000
--- a/sys/lib/python/encodings/iso2022_jp_2004.py
+++ /dev/null
@@ -1,39 +0,0 @@
-#
-# iso2022_jp_2004.py: Python Unicode Codec for ISO2022_JP_2004
-#
-# Written by Hye-Shik Chang <perky@FreeBSD.org>
-#
-
-import _codecs_iso2022, codecs
-import _multibytecodec as mbc
-
-codec = _codecs_iso2022.getcodec('iso2022_jp_2004')
-
-class Codec(codecs.Codec):
- encode = codec.encode
- decode = codec.decode
-
-class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
- codecs.IncrementalEncoder):
- codec = codec
-
-class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
- codecs.IncrementalDecoder):
- codec = codec
-
-class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
- codec = codec
-
-class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
- codec = codec
-
-def getregentry():
- return codecs.CodecInfo(
- name='iso2022_jp_2004',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
diff --git a/sys/lib/python/encodings/iso2022_jp_3.py b/sys/lib/python/encodings/iso2022_jp_3.py
deleted file mode 100644
index 346e08bec..000000000
--- a/sys/lib/python/encodings/iso2022_jp_3.py
+++ /dev/null
@@ -1,39 +0,0 @@
-#
-# iso2022_jp_3.py: Python Unicode Codec for ISO2022_JP_3
-#
-# Written by Hye-Shik Chang <perky@FreeBSD.org>
-#
-
-import _codecs_iso2022, codecs
-import _multibytecodec as mbc
-
-codec = _codecs_iso2022.getcodec('iso2022_jp_3')
-
-class Codec(codecs.Codec):
- encode = codec.encode
- decode = codec.decode
-
-class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
- codecs.IncrementalEncoder):
- codec = codec
-
-class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
- codecs.IncrementalDecoder):
- codec = codec
-
-class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
- codec = codec
-
-class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
- codec = codec
-
-def getregentry():
- return codecs.CodecInfo(
- name='iso2022_jp_3',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
diff --git a/sys/lib/python/encodings/iso2022_jp_ext.py b/sys/lib/python/encodings/iso2022_jp_ext.py
deleted file mode 100644
index 752bab981..000000000
--- a/sys/lib/python/encodings/iso2022_jp_ext.py
+++ /dev/null
@@ -1,39 +0,0 @@
-#
-# iso2022_jp_ext.py: Python Unicode Codec for ISO2022_JP_EXT
-#
-# Written by Hye-Shik Chang <perky@FreeBSD.org>
-#
-
-import _codecs_iso2022, codecs
-import _multibytecodec as mbc
-
-codec = _codecs_iso2022.getcodec('iso2022_jp_ext')
-
-class Codec(codecs.Codec):
- encode = codec.encode
- decode = codec.decode
-
-class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
- codecs.IncrementalEncoder):
- codec = codec
-
-class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
- codecs.IncrementalDecoder):
- codec = codec
-
-class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
- codec = codec
-
-class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
- codec = codec
-
-def getregentry():
- return codecs.CodecInfo(
- name='iso2022_jp_ext',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
diff --git a/sys/lib/python/encodings/iso2022_kr.py b/sys/lib/python/encodings/iso2022_kr.py
deleted file mode 100644
index bf7018763..000000000
--- a/sys/lib/python/encodings/iso2022_kr.py
+++ /dev/null
@@ -1,39 +0,0 @@
-#
-# iso2022_kr.py: Python Unicode Codec for ISO2022_KR
-#
-# Written by Hye-Shik Chang <perky@FreeBSD.org>
-#
-
-import _codecs_iso2022, codecs
-import _multibytecodec as mbc
-
-codec = _codecs_iso2022.getcodec('iso2022_kr')
-
-class Codec(codecs.Codec):
- encode = codec.encode
- decode = codec.decode
-
-class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
- codecs.IncrementalEncoder):
- codec = codec
-
-class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
- codecs.IncrementalDecoder):
- codec = codec
-
-class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
- codec = codec
-
-class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
- codec = codec
-
-def getregentry():
- return codecs.CodecInfo(
- name='iso2022_kr',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
diff --git a/sys/lib/python/encodings/iso8859_1.py b/sys/lib/python/encodings/iso8859_1.py
deleted file mode 100644
index 71bc13fcb..000000000
--- a/sys/lib/python/encodings/iso8859_1.py
+++ /dev/null
@@ -1,307 +0,0 @@
-""" Python Character Mapping Codec iso8859_1 generated from 'MAPPINGS/ISO8859/8859-1.TXT' with gencodec.py.
-
-"""#"
-
-import codecs
-
-### Codec APIs
-
-class Codec(codecs.Codec):
-
- def encode(self,input,errors='strict'):
- return codecs.charmap_encode(input,errors,encoding_table)
-
- def decode(self,input,errors='strict'):
- return codecs.charmap_decode(input,errors,decoding_table)
-
-class IncrementalEncoder(codecs.IncrementalEncoder):
- def encode(self, input, final=False):
- return codecs.charmap_encode(input,self.errors,encoding_table)[0]
-
-class IncrementalDecoder(codecs.IncrementalDecoder):
- def decode(self, input, final=False):
- return codecs.charmap_decode(input,self.errors,decoding_table)[0]
-
-class StreamWriter(Codec,codecs.StreamWriter):
- pass
-
-class StreamReader(Codec,codecs.StreamReader):
- pass
-
-### encodings module API
-
-def getregentry():
- return codecs.CodecInfo(
- name='iso8859-1',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
-
-
-### Decoding Table
-
-decoding_table = (
- u'\x00' # 0x00 -> NULL
- u'\x01' # 0x01 -> START OF HEADING
- u'\x02' # 0x02 -> START OF TEXT
- u'\x03' # 0x03 -> END OF TEXT
- u'\x04' # 0x04 -> END OF TRANSMISSION
- u'\x05' # 0x05 -> ENQUIRY
- u'\x06' # 0x06 -> ACKNOWLEDGE
- u'\x07' # 0x07 -> BELL
- u'\x08' # 0x08 -> BACKSPACE
- u'\t' # 0x09 -> HORIZONTAL TABULATION
- u'\n' # 0x0A -> LINE FEED
- u'\x0b' # 0x0B -> VERTICAL TABULATION
- u'\x0c' # 0x0C -> FORM FEED
- u'\r' # 0x0D -> CARRIAGE RETURN
- u'\x0e' # 0x0E -> SHIFT OUT
- u'\x0f' # 0x0F -> SHIFT IN
- u'\x10' # 0x10 -> DATA LINK ESCAPE
- u'\x11' # 0x11 -> DEVICE CONTROL ONE
- u'\x12' # 0x12 -> DEVICE CONTROL TWO
- u'\x13' # 0x13 -> DEVICE CONTROL THREE
- u'\x14' # 0x14 -> DEVICE CONTROL FOUR
- u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
- u'\x16' # 0x16 -> SYNCHRONOUS IDLE
- u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
- u'\x18' # 0x18 -> CANCEL
- u'\x19' # 0x19 -> END OF MEDIUM
- u'\x1a' # 0x1A -> SUBSTITUTE
- u'\x1b' # 0x1B -> ESCAPE
- u'\x1c' # 0x1C -> FILE SEPARATOR
- u'\x1d' # 0x1D -> GROUP SEPARATOR
- u'\x1e' # 0x1E -> RECORD SEPARATOR
- u'\x1f' # 0x1F -> UNIT SEPARATOR
- u' ' # 0x20 -> SPACE
- u'!' # 0x21 -> EXCLAMATION MARK
- u'"' # 0x22 -> QUOTATION MARK
- u'#' # 0x23 -> NUMBER SIGN
- u'$' # 0x24 -> DOLLAR SIGN
- u'%' # 0x25 -> PERCENT SIGN
- u'&' # 0x26 -> AMPERSAND
- u"'" # 0x27 -> APOSTROPHE
- u'(' # 0x28 -> LEFT PARENTHESIS
- u')' # 0x29 -> RIGHT PARENTHESIS
- u'*' # 0x2A -> ASTERISK
- u'+' # 0x2B -> PLUS SIGN
- u',' # 0x2C -> COMMA
- u'-' # 0x2D -> HYPHEN-MINUS
- u'.' # 0x2E -> FULL STOP
- u'/' # 0x2F -> SOLIDUS
- u'0' # 0x30 -> DIGIT ZERO
- u'1' # 0x31 -> DIGIT ONE
- u'2' # 0x32 -> DIGIT TWO
- u'3' # 0x33 -> DIGIT THREE
- u'4' # 0x34 -> DIGIT FOUR
- u'5' # 0x35 -> DIGIT FIVE
- u'6' # 0x36 -> DIGIT SIX
- u'7' # 0x37 -> DIGIT SEVEN
- u'8' # 0x38 -> DIGIT EIGHT
- u'9' # 0x39 -> DIGIT NINE
- u':' # 0x3A -> COLON
- u';' # 0x3B -> SEMICOLON
- u'<' # 0x3C -> LESS-THAN SIGN
- u'=' # 0x3D -> EQUALS SIGN
- u'>' # 0x3E -> GREATER-THAN SIGN
- u'?' # 0x3F -> QUESTION MARK
- u'@' # 0x40 -> COMMERCIAL AT
- u'A' # 0x41 -> LATIN CAPITAL LETTER A
- u'B' # 0x42 -> LATIN CAPITAL LETTER B
- u'C' # 0x43 -> LATIN CAPITAL LETTER C
- u'D' # 0x44 -> LATIN CAPITAL LETTER D
- u'E' # 0x45 -> LATIN CAPITAL LETTER E
- u'F' # 0x46 -> LATIN CAPITAL LETTER F
- u'G' # 0x47 -> LATIN CAPITAL LETTER G
- u'H' # 0x48 -> LATIN CAPITAL LETTER H
- u'I' # 0x49 -> LATIN CAPITAL LETTER I
- u'J' # 0x4A -> LATIN CAPITAL LETTER J
- u'K' # 0x4B -> LATIN CAPITAL LETTER K
- u'L' # 0x4C -> LATIN CAPITAL LETTER L
- u'M' # 0x4D -> LATIN CAPITAL LETTER M
- u'N' # 0x4E -> LATIN CAPITAL LETTER N
- u'O' # 0x4F -> LATIN CAPITAL LETTER O
- u'P' # 0x50 -> LATIN CAPITAL LETTER P
- u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
- u'R' # 0x52 -> LATIN CAPITAL LETTER R
- u'S' # 0x53 -> LATIN CAPITAL LETTER S
- u'T' # 0x54 -> LATIN CAPITAL LETTER T
- u'U' # 0x55 -> LATIN CAPITAL LETTER U
- u'V' # 0x56 -> LATIN CAPITAL LETTER V
- u'W' # 0x57 -> LATIN CAPITAL LETTER W
- u'X' # 0x58 -> LATIN CAPITAL LETTER X
- u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
- u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
- u'[' # 0x5B -> LEFT SQUARE BRACKET
- u'\\' # 0x5C -> REVERSE SOLIDUS
- u']' # 0x5D -> RIGHT SQUARE BRACKET
- u'^' # 0x5E -> CIRCUMFLEX ACCENT
- u'_' # 0x5F -> LOW LINE
- u'`' # 0x60 -> GRAVE ACCENT
- u'a' # 0x61 -> LATIN SMALL LETTER A
- u'b' # 0x62 -> LATIN SMALL LETTER B
- u'c' # 0x63 -> LATIN SMALL LETTER C
- u'd' # 0x64 -> LATIN SMALL LETTER D
- u'e' # 0x65 -> LATIN SMALL LETTER E
- u'f' # 0x66 -> LATIN SMALL LETTER F
- u'g' # 0x67 -> LATIN SMALL LETTER G
- u'h' # 0x68 -> LATIN SMALL LETTER H
- u'i' # 0x69 -> LATIN SMALL LETTER I
- u'j' # 0x6A -> LATIN SMALL LETTER J
- u'k' # 0x6B -> LATIN SMALL LETTER K
- u'l' # 0x6C -> LATIN SMALL LETTER L
- u'm' # 0x6D -> LATIN SMALL LETTER M
- u'n' # 0x6E -> LATIN SMALL LETTER N
- u'o' # 0x6F -> LATIN SMALL LETTER O
- u'p' # 0x70 -> LATIN SMALL LETTER P
- u'q' # 0x71 -> LATIN SMALL LETTER Q
- u'r' # 0x72 -> LATIN SMALL LETTER R
- u's' # 0x73 -> LATIN SMALL LETTER S
- u't' # 0x74 -> LATIN SMALL LETTER T
- u'u' # 0x75 -> LATIN SMALL LETTER U
- u'v' # 0x76 -> LATIN SMALL LETTER V
- u'w' # 0x77 -> LATIN SMALL LETTER W
- u'x' # 0x78 -> LATIN SMALL LETTER X
- u'y' # 0x79 -> LATIN SMALL LETTER Y
- u'z' # 0x7A -> LATIN SMALL LETTER Z
- u'{' # 0x7B -> LEFT CURLY BRACKET
- u'|' # 0x7C -> VERTICAL LINE
- u'}' # 0x7D -> RIGHT CURLY BRACKET
- u'~' # 0x7E -> TILDE
- u'\x7f' # 0x7F -> DELETE
- u'\x80' # 0x80 -> <control>
- u'\x81' # 0x81 -> <control>
- u'\x82' # 0x82 -> <control>
- u'\x83' # 0x83 -> <control>
- u'\x84' # 0x84 -> <control>
- u'\x85' # 0x85 -> <control>
- u'\x86' # 0x86 -> <control>
- u'\x87' # 0x87 -> <control>
- u'\x88' # 0x88 -> <control>
- u'\x89' # 0x89 -> <control>
- u'\x8a' # 0x8A -> <control>
- u'\x8b' # 0x8B -> <control>
- u'\x8c' # 0x8C -> <control>
- u'\x8d' # 0x8D -> <control>
- u'\x8e' # 0x8E -> <control>
- u'\x8f' # 0x8F -> <control>
- u'\x90' # 0x90 -> <control>
- u'\x91' # 0x91 -> <control>
- u'\x92' # 0x92 -> <control>
- u'\x93' # 0x93 -> <control>
- u'\x94' # 0x94 -> <control>
- u'\x95' # 0x95 -> <control>
- u'\x96' # 0x96 -> <control>
- u'\x97' # 0x97 -> <control>
- u'\x98' # 0x98 -> <control>
- u'\x99' # 0x99 -> <control>
- u'\x9a' # 0x9A -> <control>
- u'\x9b' # 0x9B -> <control>
- u'\x9c' # 0x9C -> <control>
- u'\x9d' # 0x9D -> <control>
- u'\x9e' # 0x9E -> <control>
- u'\x9f' # 0x9F -> <control>
- u'\xa0' # 0xA0 -> NO-BREAK SPACE
- u'\xa1' # 0xA1 -> INVERTED EXCLAMATION MARK
- u'\xa2' # 0xA2 -> CENT SIGN
- u'\xa3' # 0xA3 -> POUND SIGN
- u'\xa4' # 0xA4 -> CURRENCY SIGN
- u'\xa5' # 0xA5 -> YEN SIGN
- u'\xa6' # 0xA6 -> BROKEN BAR
- u'\xa7' # 0xA7 -> SECTION SIGN
- u'\xa8' # 0xA8 -> DIAERESIS
- u'\xa9' # 0xA9 -> COPYRIGHT SIGN
- u'\xaa' # 0xAA -> FEMININE ORDINAL INDICATOR
- u'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
- u'\xac' # 0xAC -> NOT SIGN
- u'\xad' # 0xAD -> SOFT HYPHEN
- u'\xae' # 0xAE -> REGISTERED SIGN
- u'\xaf' # 0xAF -> MACRON
- u'\xb0' # 0xB0 -> DEGREE SIGN
- u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
- u'\xb2' # 0xB2 -> SUPERSCRIPT TWO
- u'\xb3' # 0xB3 -> SUPERSCRIPT THREE
- u'\xb4' # 0xB4 -> ACUTE ACCENT
- u'\xb5' # 0xB5 -> MICRO SIGN
- u'\xb6' # 0xB6 -> PILCROW SIGN
- u'\xb7' # 0xB7 -> MIDDLE DOT
- u'\xb8' # 0xB8 -> CEDILLA
- u'\xb9' # 0xB9 -> SUPERSCRIPT ONE
- u'\xba' # 0xBA -> MASCULINE ORDINAL INDICATOR
- u'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
- u'\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER
- u'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
- u'\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS
- u'\xbf' # 0xBF -> INVERTED QUESTION MARK
- u'\xc0' # 0xC0 -> LATIN CAPITAL LETTER A WITH GRAVE
- u'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
- u'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
- u'\xc3' # 0xC3 -> LATIN CAPITAL LETTER A WITH TILDE
- u'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
- u'\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE
- u'\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE
- u'\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA
- u'\xc8' # 0xC8 -> LATIN CAPITAL LETTER E WITH GRAVE
- u'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
- u'\xca' # 0xCA -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
- u'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
- u'\xcc' # 0xCC -> LATIN CAPITAL LETTER I WITH GRAVE
- u'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
- u'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
- u'\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS
- u'\xd0' # 0xD0 -> LATIN CAPITAL LETTER ETH (Icelandic)
- u'\xd1' # 0xD1 -> LATIN CAPITAL LETTER N WITH TILDE
- u'\xd2' # 0xD2 -> LATIN CAPITAL LETTER O WITH GRAVE
- u'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
- u'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
- u'\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE
- u'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
- u'\xd7' # 0xD7 -> MULTIPLICATION SIGN
- u'\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE
- u'\xd9' # 0xD9 -> LATIN CAPITAL LETTER U WITH GRAVE
- u'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
- u'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
- u'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
- u'\xdd' # 0xDD -> LATIN CAPITAL LETTER Y WITH ACUTE
- u'\xde' # 0xDE -> LATIN CAPITAL LETTER THORN (Icelandic)
- u'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S (German)
- u'\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE
- u'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
- u'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
- u'\xe3' # 0xE3 -> LATIN SMALL LETTER A WITH TILDE
- u'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
- u'\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE
- u'\xe6' # 0xE6 -> LATIN SMALL LETTER AE
- u'\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA
- u'\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE
- u'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
- u'\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX
- u'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
- u'\xec' # 0xEC -> LATIN SMALL LETTER I WITH GRAVE
- u'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
- u'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
- u'\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS
- u'\xf0' # 0xF0 -> LATIN SMALL LETTER ETH (Icelandic)
- u'\xf1' # 0xF1 -> LATIN SMALL LETTER N WITH TILDE
- u'\xf2' # 0xF2 -> LATIN SMALL LETTER O WITH GRAVE
- u'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
- u'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
- u'\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE
- u'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
- u'\xf7' # 0xF7 -> DIVISION SIGN
- u'\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE
- u'\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE
- u'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
- u'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
- u'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
- u'\xfd' # 0xFD -> LATIN SMALL LETTER Y WITH ACUTE
- u'\xfe' # 0xFE -> LATIN SMALL LETTER THORN (Icelandic)
- u'\xff' # 0xFF -> LATIN SMALL LETTER Y WITH DIAERESIS
-)
-
-### Encoding table
-encoding_table=codecs.charmap_build(decoding_table)
diff --git a/sys/lib/python/encodings/iso8859_10.py b/sys/lib/python/encodings/iso8859_10.py
deleted file mode 100644
index 757e5c5eb..000000000
--- a/sys/lib/python/encodings/iso8859_10.py
+++ /dev/null
@@ -1,307 +0,0 @@
-""" Python Character Mapping Codec iso8859_10 generated from 'MAPPINGS/ISO8859/8859-10.TXT' with gencodec.py.
-
-"""#"
-
-import codecs
-
-### Codec APIs
-
-class Codec(codecs.Codec):
-
- def encode(self,input,errors='strict'):
- return codecs.charmap_encode(input,errors,encoding_table)
-
- def decode(self,input,errors='strict'):
- return codecs.charmap_decode(input,errors,decoding_table)
-
-class IncrementalEncoder(codecs.IncrementalEncoder):
- def encode(self, input, final=False):
- return codecs.charmap_encode(input,self.errors,encoding_table)[0]
-
-class IncrementalDecoder(codecs.IncrementalDecoder):
- def decode(self, input, final=False):
- return codecs.charmap_decode(input,self.errors,decoding_table)[0]
-
-class StreamWriter(Codec,codecs.StreamWriter):
- pass
-
-class StreamReader(Codec,codecs.StreamReader):
- pass
-
-### encodings module API
-
-def getregentry():
- return codecs.CodecInfo(
- name='iso8859-10',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
-
-
-### Decoding Table
-
-decoding_table = (
- u'\x00' # 0x00 -> NULL
- u'\x01' # 0x01 -> START OF HEADING
- u'\x02' # 0x02 -> START OF TEXT
- u'\x03' # 0x03 -> END OF TEXT
- u'\x04' # 0x04 -> END OF TRANSMISSION
- u'\x05' # 0x05 -> ENQUIRY
- u'\x06' # 0x06 -> ACKNOWLEDGE
- u'\x07' # 0x07 -> BELL
- u'\x08' # 0x08 -> BACKSPACE
- u'\t' # 0x09 -> HORIZONTAL TABULATION
- u'\n' # 0x0A -> LINE FEED
- u'\x0b' # 0x0B -> VERTICAL TABULATION
- u'\x0c' # 0x0C -> FORM FEED
- u'\r' # 0x0D -> CARRIAGE RETURN
- u'\x0e' # 0x0E -> SHIFT OUT
- u'\x0f' # 0x0F -> SHIFT IN
- u'\x10' # 0x10 -> DATA LINK ESCAPE
- u'\x11' # 0x11 -> DEVICE CONTROL ONE
- u'\x12' # 0x12 -> DEVICE CONTROL TWO
- u'\x13' # 0x13 -> DEVICE CONTROL THREE
- u'\x14' # 0x14 -> DEVICE CONTROL FOUR
- u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
- u'\x16' # 0x16 -> SYNCHRONOUS IDLE
- u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
- u'\x18' # 0x18 -> CANCEL
- u'\x19' # 0x19 -> END OF MEDIUM
- u'\x1a' # 0x1A -> SUBSTITUTE
- u'\x1b' # 0x1B -> ESCAPE
- u'\x1c' # 0x1C -> FILE SEPARATOR
- u'\x1d' # 0x1D -> GROUP SEPARATOR
- u'\x1e' # 0x1E -> RECORD SEPARATOR
- u'\x1f' # 0x1F -> UNIT SEPARATOR
- u' ' # 0x20 -> SPACE
- u'!' # 0x21 -> EXCLAMATION MARK
- u'"' # 0x22 -> QUOTATION MARK
- u'#' # 0x23 -> NUMBER SIGN
- u'$' # 0x24 -> DOLLAR SIGN
- u'%' # 0x25 -> PERCENT SIGN
- u'&' # 0x26 -> AMPERSAND
- u"'" # 0x27 -> APOSTROPHE
- u'(' # 0x28 -> LEFT PARENTHESIS
- u')' # 0x29 -> RIGHT PARENTHESIS
- u'*' # 0x2A -> ASTERISK
- u'+' # 0x2B -> PLUS SIGN
- u',' # 0x2C -> COMMA
- u'-' # 0x2D -> HYPHEN-MINUS
- u'.' # 0x2E -> FULL STOP
- u'/' # 0x2F -> SOLIDUS
- u'0' # 0x30 -> DIGIT ZERO
- u'1' # 0x31 -> DIGIT ONE
- u'2' # 0x32 -> DIGIT TWO
- u'3' # 0x33 -> DIGIT THREE
- u'4' # 0x34 -> DIGIT FOUR
- u'5' # 0x35 -> DIGIT FIVE
- u'6' # 0x36 -> DIGIT SIX
- u'7' # 0x37 -> DIGIT SEVEN
- u'8' # 0x38 -> DIGIT EIGHT
- u'9' # 0x39 -> DIGIT NINE
- u':' # 0x3A -> COLON
- u';' # 0x3B -> SEMICOLON
- u'<' # 0x3C -> LESS-THAN SIGN
- u'=' # 0x3D -> EQUALS SIGN
- u'>' # 0x3E -> GREATER-THAN SIGN
- u'?' # 0x3F -> QUESTION MARK
- u'@' # 0x40 -> COMMERCIAL AT
- u'A' # 0x41 -> LATIN CAPITAL LETTER A
- u'B' # 0x42 -> LATIN CAPITAL LETTER B
- u'C' # 0x43 -> LATIN CAPITAL LETTER C
- u'D' # 0x44 -> LATIN CAPITAL LETTER D
- u'E' # 0x45 -> LATIN CAPITAL LETTER E
- u'F' # 0x46 -> LATIN CAPITAL LETTER F
- u'G' # 0x47 -> LATIN CAPITAL LETTER G
- u'H' # 0x48 -> LATIN CAPITAL LETTER H
- u'I' # 0x49 -> LATIN CAPITAL LETTER I
- u'J' # 0x4A -> LATIN CAPITAL LETTER J
- u'K' # 0x4B -> LATIN CAPITAL LETTER K
- u'L' # 0x4C -> LATIN CAPITAL LETTER L
- u'M' # 0x4D -> LATIN CAPITAL LETTER M
- u'N' # 0x4E -> LATIN CAPITAL LETTER N
- u'O' # 0x4F -> LATIN CAPITAL LETTER O
- u'P' # 0x50 -> LATIN CAPITAL LETTER P
- u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
- u'R' # 0x52 -> LATIN CAPITAL LETTER R
- u'S' # 0x53 -> LATIN CAPITAL LETTER S
- u'T' # 0x54 -> LATIN CAPITAL LETTER T
- u'U' # 0x55 -> LATIN CAPITAL LETTER U
- u'V' # 0x56 -> LATIN CAPITAL LETTER V
- u'W' # 0x57 -> LATIN CAPITAL LETTER W
- u'X' # 0x58 -> LATIN CAPITAL LETTER X
- u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
- u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
- u'[' # 0x5B -> LEFT SQUARE BRACKET
- u'\\' # 0x5C -> REVERSE SOLIDUS
- u']' # 0x5D -> RIGHT SQUARE BRACKET
- u'^' # 0x5E -> CIRCUMFLEX ACCENT
- u'_' # 0x5F -> LOW LINE
- u'`' # 0x60 -> GRAVE ACCENT
- u'a' # 0x61 -> LATIN SMALL LETTER A
- u'b' # 0x62 -> LATIN SMALL LETTER B
- u'c' # 0x63 -> LATIN SMALL LETTER C
- u'd' # 0x64 -> LATIN SMALL LETTER D
- u'e' # 0x65 -> LATIN SMALL LETTER E
- u'f' # 0x66 -> LATIN SMALL LETTER F
- u'g' # 0x67 -> LATIN SMALL LETTER G
- u'h' # 0x68 -> LATIN SMALL LETTER H
- u'i' # 0x69 -> LATIN SMALL LETTER I
- u'j' # 0x6A -> LATIN SMALL LETTER J
- u'k' # 0x6B -> LATIN SMALL LETTER K
- u'l' # 0x6C -> LATIN SMALL LETTER L
- u'm' # 0x6D -> LATIN SMALL LETTER M
- u'n' # 0x6E -> LATIN SMALL LETTER N
- u'o' # 0x6F -> LATIN SMALL LETTER O
- u'p' # 0x70 -> LATIN SMALL LETTER P
- u'q' # 0x71 -> LATIN SMALL LETTER Q
- u'r' # 0x72 -> LATIN SMALL LETTER R
- u's' # 0x73 -> LATIN SMALL LETTER S
- u't' # 0x74 -> LATIN SMALL LETTER T
- u'u' # 0x75 -> LATIN SMALL LETTER U
- u'v' # 0x76 -> LATIN SMALL LETTER V
- u'w' # 0x77 -> LATIN SMALL LETTER W
- u'x' # 0x78 -> LATIN SMALL LETTER X
- u'y' # 0x79 -> LATIN SMALL LETTER Y
- u'z' # 0x7A -> LATIN SMALL LETTER Z
- u'{' # 0x7B -> LEFT CURLY BRACKET
- u'|' # 0x7C -> VERTICAL LINE
- u'}' # 0x7D -> RIGHT CURLY BRACKET
- u'~' # 0x7E -> TILDE
- u'\x7f' # 0x7F -> DELETE
- u'\x80' # 0x80 -> <control>
- u'\x81' # 0x81 -> <control>
- u'\x82' # 0x82 -> <control>
- u'\x83' # 0x83 -> <control>
- u'\x84' # 0x84 -> <control>
- u'\x85' # 0x85 -> <control>
- u'\x86' # 0x86 -> <control>
- u'\x87' # 0x87 -> <control>
- u'\x88' # 0x88 -> <control>
- u'\x89' # 0x89 -> <control>
- u'\x8a' # 0x8A -> <control>
- u'\x8b' # 0x8B -> <control>
- u'\x8c' # 0x8C -> <control>
- u'\x8d' # 0x8D -> <control>
- u'\x8e' # 0x8E -> <control>
- u'\x8f' # 0x8F -> <control>
- u'\x90' # 0x90 -> <control>
- u'\x91' # 0x91 -> <control>
- u'\x92' # 0x92 -> <control>
- u'\x93' # 0x93 -> <control>
- u'\x94' # 0x94 -> <control>
- u'\x95' # 0x95 -> <control>
- u'\x96' # 0x96 -> <control>
- u'\x97' # 0x97 -> <control>
- u'\x98' # 0x98 -> <control>
- u'\x99' # 0x99 -> <control>
- u'\x9a' # 0x9A -> <control>
- u'\x9b' # 0x9B -> <control>
- u'\x9c' # 0x9C -> <control>
- u'\x9d' # 0x9D -> <control>
- u'\x9e' # 0x9E -> <control>
- u'\x9f' # 0x9F -> <control>
- u'\xa0' # 0xA0 -> NO-BREAK SPACE
- u'\u0104' # 0xA1 -> LATIN CAPITAL LETTER A WITH OGONEK
- u'\u0112' # 0xA2 -> LATIN CAPITAL LETTER E WITH MACRON
- u'\u0122' # 0xA3 -> LATIN CAPITAL LETTER G WITH CEDILLA
- u'\u012a' # 0xA4 -> LATIN CAPITAL LETTER I WITH MACRON
- u'\u0128' # 0xA5 -> LATIN CAPITAL LETTER I WITH TILDE
- u'\u0136' # 0xA6 -> LATIN CAPITAL LETTER K WITH CEDILLA
- u'\xa7' # 0xA7 -> SECTION SIGN
- u'\u013b' # 0xA8 -> LATIN CAPITAL LETTER L WITH CEDILLA
- u'\u0110' # 0xA9 -> LATIN CAPITAL LETTER D WITH STROKE
- u'\u0160' # 0xAA -> LATIN CAPITAL LETTER S WITH CARON
- u'\u0166' # 0xAB -> LATIN CAPITAL LETTER T WITH STROKE
- u'\u017d' # 0xAC -> LATIN CAPITAL LETTER Z WITH CARON
- u'\xad' # 0xAD -> SOFT HYPHEN
- u'\u016a' # 0xAE -> LATIN CAPITAL LETTER U WITH MACRON
- u'\u014a' # 0xAF -> LATIN CAPITAL LETTER ENG
- u'\xb0' # 0xB0 -> DEGREE SIGN
- u'\u0105' # 0xB1 -> LATIN SMALL LETTER A WITH OGONEK
- u'\u0113' # 0xB2 -> LATIN SMALL LETTER E WITH MACRON
- u'\u0123' # 0xB3 -> LATIN SMALL LETTER G WITH CEDILLA
- u'\u012b' # 0xB4 -> LATIN SMALL LETTER I WITH MACRON
- u'\u0129' # 0xB5 -> LATIN SMALL LETTER I WITH TILDE
- u'\u0137' # 0xB6 -> LATIN SMALL LETTER K WITH CEDILLA
- u'\xb7' # 0xB7 -> MIDDLE DOT
- u'\u013c' # 0xB8 -> LATIN SMALL LETTER L WITH CEDILLA
- u'\u0111' # 0xB9 -> LATIN SMALL LETTER D WITH STROKE
- u'\u0161' # 0xBA -> LATIN SMALL LETTER S WITH CARON
- u'\u0167' # 0xBB -> LATIN SMALL LETTER T WITH STROKE
- u'\u017e' # 0xBC -> LATIN SMALL LETTER Z WITH CARON
- u'\u2015' # 0xBD -> HORIZONTAL BAR
- u'\u016b' # 0xBE -> LATIN SMALL LETTER U WITH MACRON
- u'\u014b' # 0xBF -> LATIN SMALL LETTER ENG
- u'\u0100' # 0xC0 -> LATIN CAPITAL LETTER A WITH MACRON
- u'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
- u'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
- u'\xc3' # 0xC3 -> LATIN CAPITAL LETTER A WITH TILDE
- u'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
- u'\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE
- u'\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE
- u'\u012e' # 0xC7 -> LATIN CAPITAL LETTER I WITH OGONEK
- u'\u010c' # 0xC8 -> LATIN CAPITAL LETTER C WITH CARON
- u'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
- u'\u0118' # 0xCA -> LATIN CAPITAL LETTER E WITH OGONEK
- u'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
- u'\u0116' # 0xCC -> LATIN CAPITAL LETTER E WITH DOT ABOVE
- u'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
- u'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
- u'\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS
- u'\xd0' # 0xD0 -> LATIN CAPITAL LETTER ETH (Icelandic)
- u'\u0145' # 0xD1 -> LATIN CAPITAL LETTER N WITH CEDILLA
- u'\u014c' # 0xD2 -> LATIN CAPITAL LETTER O WITH MACRON
- u'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
- u'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
- u'\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE
- u'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
- u'\u0168' # 0xD7 -> LATIN CAPITAL LETTER U WITH TILDE
- u'\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE
- u'\u0172' # 0xD9 -> LATIN CAPITAL LETTER U WITH OGONEK
- u'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
- u'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
- u'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
- u'\xdd' # 0xDD -> LATIN CAPITAL LETTER Y WITH ACUTE
- u'\xde' # 0xDE -> LATIN CAPITAL LETTER THORN (Icelandic)
- u'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S (German)
- u'\u0101' # 0xE0 -> LATIN SMALL LETTER A WITH MACRON
- u'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
- u'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
- u'\xe3' # 0xE3 -> LATIN SMALL LETTER A WITH TILDE
- u'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
- u'\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE
- u'\xe6' # 0xE6 -> LATIN SMALL LETTER AE
- u'\u012f' # 0xE7 -> LATIN SMALL LETTER I WITH OGONEK
- u'\u010d' # 0xE8 -> LATIN SMALL LETTER C WITH CARON
- u'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
- u'\u0119' # 0xEA -> LATIN SMALL LETTER E WITH OGONEK
- u'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
- u'\u0117' # 0xEC -> LATIN SMALL LETTER E WITH DOT ABOVE
- u'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
- u'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
- u'\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS
- u'\xf0' # 0xF0 -> LATIN SMALL LETTER ETH (Icelandic)
- u'\u0146' # 0xF1 -> LATIN SMALL LETTER N WITH CEDILLA
- u'\u014d' # 0xF2 -> LATIN SMALL LETTER O WITH MACRON
- u'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
- u'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
- u'\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE
- u'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
- u'\u0169' # 0xF7 -> LATIN SMALL LETTER U WITH TILDE
- u'\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE
- u'\u0173' # 0xF9 -> LATIN SMALL LETTER U WITH OGONEK
- u'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
- u'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
- u'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
- u'\xfd' # 0xFD -> LATIN SMALL LETTER Y WITH ACUTE
- u'\xfe' # 0xFE -> LATIN SMALL LETTER THORN (Icelandic)
- u'\u0138' # 0xFF -> LATIN SMALL LETTER KRA
-)
-
-### Encoding table
-encoding_table=codecs.charmap_build(decoding_table)
diff --git a/sys/lib/python/encodings/iso8859_11.py b/sys/lib/python/encodings/iso8859_11.py
deleted file mode 100644
index 27ece8dc7..000000000
--- a/sys/lib/python/encodings/iso8859_11.py
+++ /dev/null
@@ -1,307 +0,0 @@
-""" Python Character Mapping Codec iso8859_11 generated from 'MAPPINGS/ISO8859/8859-11.TXT' with gencodec.py.
-
-"""#"
-
-import codecs
-
-### Codec APIs
-
-class Codec(codecs.Codec):
-
- def encode(self,input,errors='strict'):
- return codecs.charmap_encode(input,errors,encoding_table)
-
- def decode(self,input,errors='strict'):
- return codecs.charmap_decode(input,errors,decoding_table)
-
-class IncrementalEncoder(codecs.IncrementalEncoder):
- def encode(self, input, final=False):
- return codecs.charmap_encode(input,self.errors,encoding_table)[0]
-
-class IncrementalDecoder(codecs.IncrementalDecoder):
- def decode(self, input, final=False):
- return codecs.charmap_decode(input,self.errors,decoding_table)[0]
-
-class StreamWriter(Codec,codecs.StreamWriter):
- pass
-
-class StreamReader(Codec,codecs.StreamReader):
- pass
-
-### encodings module API
-
-def getregentry():
- return codecs.CodecInfo(
- name='iso8859-11',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
-
-
-### Decoding Table
-
-decoding_table = (
- u'\x00' # 0x00 -> NULL
- u'\x01' # 0x01 -> START OF HEADING
- u'\x02' # 0x02 -> START OF TEXT
- u'\x03' # 0x03 -> END OF TEXT
- u'\x04' # 0x04 -> END OF TRANSMISSION
- u'\x05' # 0x05 -> ENQUIRY
- u'\x06' # 0x06 -> ACKNOWLEDGE
- u'\x07' # 0x07 -> BELL
- u'\x08' # 0x08 -> BACKSPACE
- u'\t' # 0x09 -> HORIZONTAL TABULATION
- u'\n' # 0x0A -> LINE FEED
- u'\x0b' # 0x0B -> VERTICAL TABULATION
- u'\x0c' # 0x0C -> FORM FEED
- u'\r' # 0x0D -> CARRIAGE RETURN
- u'\x0e' # 0x0E -> SHIFT OUT
- u'\x0f' # 0x0F -> SHIFT IN
- u'\x10' # 0x10 -> DATA LINK ESCAPE
- u'\x11' # 0x11 -> DEVICE CONTROL ONE
- u'\x12' # 0x12 -> DEVICE CONTROL TWO
- u'\x13' # 0x13 -> DEVICE CONTROL THREE
- u'\x14' # 0x14 -> DEVICE CONTROL FOUR
- u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
- u'\x16' # 0x16 -> SYNCHRONOUS IDLE
- u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
- u'\x18' # 0x18 -> CANCEL
- u'\x19' # 0x19 -> END OF MEDIUM
- u'\x1a' # 0x1A -> SUBSTITUTE
- u'\x1b' # 0x1B -> ESCAPE
- u'\x1c' # 0x1C -> FILE SEPARATOR
- u'\x1d' # 0x1D -> GROUP SEPARATOR
- u'\x1e' # 0x1E -> RECORD SEPARATOR
- u'\x1f' # 0x1F -> UNIT SEPARATOR
- u' ' # 0x20 -> SPACE
- u'!' # 0x21 -> EXCLAMATION MARK
- u'"' # 0x22 -> QUOTATION MARK
- u'#' # 0x23 -> NUMBER SIGN
- u'$' # 0x24 -> DOLLAR SIGN
- u'%' # 0x25 -> PERCENT SIGN
- u'&' # 0x26 -> AMPERSAND
- u"'" # 0x27 -> APOSTROPHE
- u'(' # 0x28 -> LEFT PARENTHESIS
- u')' # 0x29 -> RIGHT PARENTHESIS
- u'*' # 0x2A -> ASTERISK
- u'+' # 0x2B -> PLUS SIGN
- u',' # 0x2C -> COMMA
- u'-' # 0x2D -> HYPHEN-MINUS
- u'.' # 0x2E -> FULL STOP
- u'/' # 0x2F -> SOLIDUS
- u'0' # 0x30 -> DIGIT ZERO
- u'1' # 0x31 -> DIGIT ONE
- u'2' # 0x32 -> DIGIT TWO
- u'3' # 0x33 -> DIGIT THREE
- u'4' # 0x34 -> DIGIT FOUR
- u'5' # 0x35 -> DIGIT FIVE
- u'6' # 0x36 -> DIGIT SIX
- u'7' # 0x37 -> DIGIT SEVEN
- u'8' # 0x38 -> DIGIT EIGHT
- u'9' # 0x39 -> DIGIT NINE
- u':' # 0x3A -> COLON
- u';' # 0x3B -> SEMICOLON
- u'<' # 0x3C -> LESS-THAN SIGN
- u'=' # 0x3D -> EQUALS SIGN
- u'>' # 0x3E -> GREATER-THAN SIGN
- u'?' # 0x3F -> QUESTION MARK
- u'@' # 0x40 -> COMMERCIAL AT
- u'A' # 0x41 -> LATIN CAPITAL LETTER A
- u'B' # 0x42 -> LATIN CAPITAL LETTER B
- u'C' # 0x43 -> LATIN CAPITAL LETTER C
- u'D' # 0x44 -> LATIN CAPITAL LETTER D
- u'E' # 0x45 -> LATIN CAPITAL LETTER E
- u'F' # 0x46 -> LATIN CAPITAL LETTER F
- u'G' # 0x47 -> LATIN CAPITAL LETTER G
- u'H' # 0x48 -> LATIN CAPITAL LETTER H
- u'I' # 0x49 -> LATIN CAPITAL LETTER I
- u'J' # 0x4A -> LATIN CAPITAL LETTER J
- u'K' # 0x4B -> LATIN CAPITAL LETTER K
- u'L' # 0x4C -> LATIN CAPITAL LETTER L
- u'M' # 0x4D -> LATIN CAPITAL LETTER M
- u'N' # 0x4E -> LATIN CAPITAL LETTER N
- u'O' # 0x4F -> LATIN CAPITAL LETTER O
- u'P' # 0x50 -> LATIN CAPITAL LETTER P
- u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
- u'R' # 0x52 -> LATIN CAPITAL LETTER R
- u'S' # 0x53 -> LATIN CAPITAL LETTER S
- u'T' # 0x54 -> LATIN CAPITAL LETTER T
- u'U' # 0x55 -> LATIN CAPITAL LETTER U
- u'V' # 0x56 -> LATIN CAPITAL LETTER V
- u'W' # 0x57 -> LATIN CAPITAL LETTER W
- u'X' # 0x58 -> LATIN CAPITAL LETTER X
- u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
- u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
- u'[' # 0x5B -> LEFT SQUARE BRACKET
- u'\\' # 0x5C -> REVERSE SOLIDUS
- u']' # 0x5D -> RIGHT SQUARE BRACKET
- u'^' # 0x5E -> CIRCUMFLEX ACCENT
- u'_' # 0x5F -> LOW LINE
- u'`' # 0x60 -> GRAVE ACCENT
- u'a' # 0x61 -> LATIN SMALL LETTER A
- u'b' # 0x62 -> LATIN SMALL LETTER B
- u'c' # 0x63 -> LATIN SMALL LETTER C
- u'd' # 0x64 -> LATIN SMALL LETTER D
- u'e' # 0x65 -> LATIN SMALL LETTER E
- u'f' # 0x66 -> LATIN SMALL LETTER F
- u'g' # 0x67 -> LATIN SMALL LETTER G
- u'h' # 0x68 -> LATIN SMALL LETTER H
- u'i' # 0x69 -> LATIN SMALL LETTER I
- u'j' # 0x6A -> LATIN SMALL LETTER J
- u'k' # 0x6B -> LATIN SMALL LETTER K
- u'l' # 0x6C -> LATIN SMALL LETTER L
- u'm' # 0x6D -> LATIN SMALL LETTER M
- u'n' # 0x6E -> LATIN SMALL LETTER N
- u'o' # 0x6F -> LATIN SMALL LETTER O
- u'p' # 0x70 -> LATIN SMALL LETTER P
- u'q' # 0x71 -> LATIN SMALL LETTER Q
- u'r' # 0x72 -> LATIN SMALL LETTER R
- u's' # 0x73 -> LATIN SMALL LETTER S
- u't' # 0x74 -> LATIN SMALL LETTER T
- u'u' # 0x75 -> LATIN SMALL LETTER U
- u'v' # 0x76 -> LATIN SMALL LETTER V
- u'w' # 0x77 -> LATIN SMALL LETTER W
- u'x' # 0x78 -> LATIN SMALL LETTER X
- u'y' # 0x79 -> LATIN SMALL LETTER Y
- u'z' # 0x7A -> LATIN SMALL LETTER Z
- u'{' # 0x7B -> LEFT CURLY BRACKET
- u'|' # 0x7C -> VERTICAL LINE
- u'}' # 0x7D -> RIGHT CURLY BRACKET
- u'~' # 0x7E -> TILDE
- u'\x7f' # 0x7F -> DELETE
- u'\x80' # 0x80 -> <control>
- u'\x81' # 0x81 -> <control>
- u'\x82' # 0x82 -> <control>
- u'\x83' # 0x83 -> <control>
- u'\x84' # 0x84 -> <control>
- u'\x85' # 0x85 -> <control>
- u'\x86' # 0x86 -> <control>
- u'\x87' # 0x87 -> <control>
- u'\x88' # 0x88 -> <control>
- u'\x89' # 0x89 -> <control>
- u'\x8a' # 0x8A -> <control>
- u'\x8b' # 0x8B -> <control>
- u'\x8c' # 0x8C -> <control>
- u'\x8d' # 0x8D -> <control>
- u'\x8e' # 0x8E -> <control>
- u'\x8f' # 0x8F -> <control>
- u'\x90' # 0x90 -> <control>
- u'\x91' # 0x91 -> <control>
- u'\x92' # 0x92 -> <control>
- u'\x93' # 0x93 -> <control>
- u'\x94' # 0x94 -> <control>
- u'\x95' # 0x95 -> <control>
- u'\x96' # 0x96 -> <control>
- u'\x97' # 0x97 -> <control>
- u'\x98' # 0x98 -> <control>
- u'\x99' # 0x99 -> <control>
- u'\x9a' # 0x9A -> <control>
- u'\x9b' # 0x9B -> <control>
- u'\x9c' # 0x9C -> <control>
- u'\x9d' # 0x9D -> <control>
- u'\x9e' # 0x9E -> <control>
- u'\x9f' # 0x9F -> <control>
- u'\xa0' # 0xA0 -> NO-BREAK SPACE
- u'\u0e01' # 0xA1 -> THAI CHARACTER KO KAI
- u'\u0e02' # 0xA2 -> THAI CHARACTER KHO KHAI
- u'\u0e03' # 0xA3 -> THAI CHARACTER KHO KHUAT
- u'\u0e04' # 0xA4 -> THAI CHARACTER KHO KHWAI
- u'\u0e05' # 0xA5 -> THAI CHARACTER KHO KHON
- u'\u0e06' # 0xA6 -> THAI CHARACTER KHO RAKHANG
- u'\u0e07' # 0xA7 -> THAI CHARACTER NGO NGU
- u'\u0e08' # 0xA8 -> THAI CHARACTER CHO CHAN
- u'\u0e09' # 0xA9 -> THAI CHARACTER CHO CHING
- u'\u0e0a' # 0xAA -> THAI CHARACTER CHO CHANG
- u'\u0e0b' # 0xAB -> THAI CHARACTER SO SO
- u'\u0e0c' # 0xAC -> THAI CHARACTER CHO CHOE
- u'\u0e0d' # 0xAD -> THAI CHARACTER YO YING
- u'\u0e0e' # 0xAE -> THAI CHARACTER DO CHADA
- u'\u0e0f' # 0xAF -> THAI CHARACTER TO PATAK
- u'\u0e10' # 0xB0 -> THAI CHARACTER THO THAN
- u'\u0e11' # 0xB1 -> THAI CHARACTER THO NANGMONTHO
- u'\u0e12' # 0xB2 -> THAI CHARACTER THO PHUTHAO
- u'\u0e13' # 0xB3 -> THAI CHARACTER NO NEN
- u'\u0e14' # 0xB4 -> THAI CHARACTER DO DEK
- u'\u0e15' # 0xB5 -> THAI CHARACTER TO TAO
- u'\u0e16' # 0xB6 -> THAI CHARACTER THO THUNG
- u'\u0e17' # 0xB7 -> THAI CHARACTER THO THAHAN
- u'\u0e18' # 0xB8 -> THAI CHARACTER THO THONG
- u'\u0e19' # 0xB9 -> THAI CHARACTER NO NU
- u'\u0e1a' # 0xBA -> THAI CHARACTER BO BAIMAI
- u'\u0e1b' # 0xBB -> THAI CHARACTER PO PLA
- u'\u0e1c' # 0xBC -> THAI CHARACTER PHO PHUNG
- u'\u0e1d' # 0xBD -> THAI CHARACTER FO FA
- u'\u0e1e' # 0xBE -> THAI CHARACTER PHO PHAN
- u'\u0e1f' # 0xBF -> THAI CHARACTER FO FAN
- u'\u0e20' # 0xC0 -> THAI CHARACTER PHO SAMPHAO
- u'\u0e21' # 0xC1 -> THAI CHARACTER MO MA
- u'\u0e22' # 0xC2 -> THAI CHARACTER YO YAK
- u'\u0e23' # 0xC3 -> THAI CHARACTER RO RUA
- u'\u0e24' # 0xC4 -> THAI CHARACTER RU
- u'\u0e25' # 0xC5 -> THAI CHARACTER LO LING
- u'\u0e26' # 0xC6 -> THAI CHARACTER LU
- u'\u0e27' # 0xC7 -> THAI CHARACTER WO WAEN
- u'\u0e28' # 0xC8 -> THAI CHARACTER SO SALA
- u'\u0e29' # 0xC9 -> THAI CHARACTER SO RUSI
- u'\u0e2a' # 0xCA -> THAI CHARACTER SO SUA
- u'\u0e2b' # 0xCB -> THAI CHARACTER HO HIP
- u'\u0e2c' # 0xCC -> THAI CHARACTER LO CHULA
- u'\u0e2d' # 0xCD -> THAI CHARACTER O ANG
- u'\u0e2e' # 0xCE -> THAI CHARACTER HO NOKHUK
- u'\u0e2f' # 0xCF -> THAI CHARACTER PAIYANNOI
- u'\u0e30' # 0xD0 -> THAI CHARACTER SARA A
- u'\u0e31' # 0xD1 -> THAI CHARACTER MAI HAN-AKAT
- u'\u0e32' # 0xD2 -> THAI CHARACTER SARA AA
- u'\u0e33' # 0xD3 -> THAI CHARACTER SARA AM
- u'\u0e34' # 0xD4 -> THAI CHARACTER SARA I
- u'\u0e35' # 0xD5 -> THAI CHARACTER SARA II
- u'\u0e36' # 0xD6 -> THAI CHARACTER SARA UE
- u'\u0e37' # 0xD7 -> THAI CHARACTER SARA UEE
- u'\u0e38' # 0xD8 -> THAI CHARACTER SARA U
- u'\u0e39' # 0xD9 -> THAI CHARACTER SARA UU
- u'\u0e3a' # 0xDA -> THAI CHARACTER PHINTHU
- u'\ufffe'
- u'\ufffe'
- u'\ufffe'
- u'\ufffe'
- u'\u0e3f' # 0xDF -> THAI CURRENCY SYMBOL BAHT
- u'\u0e40' # 0xE0 -> THAI CHARACTER SARA E
- u'\u0e41' # 0xE1 -> THAI CHARACTER SARA AE
- u'\u0e42' # 0xE2 -> THAI CHARACTER SARA O
- u'\u0e43' # 0xE3 -> THAI CHARACTER SARA AI MAIMUAN
- u'\u0e44' # 0xE4 -> THAI CHARACTER SARA AI MAIMALAI
- u'\u0e45' # 0xE5 -> THAI CHARACTER LAKKHANGYAO
- u'\u0e46' # 0xE6 -> THAI CHARACTER MAIYAMOK
- u'\u0e47' # 0xE7 -> THAI CHARACTER MAITAIKHU
- u'\u0e48' # 0xE8 -> THAI CHARACTER MAI EK
- u'\u0e49' # 0xE9 -> THAI CHARACTER MAI THO
- u'\u0e4a' # 0xEA -> THAI CHARACTER MAI TRI
- u'\u0e4b' # 0xEB -> THAI CHARACTER MAI CHATTAWA
- u'\u0e4c' # 0xEC -> THAI CHARACTER THANTHAKHAT
- u'\u0e4d' # 0xED -> THAI CHARACTER NIKHAHIT
- u'\u0e4e' # 0xEE -> THAI CHARACTER YAMAKKAN
- u'\u0e4f' # 0xEF -> THAI CHARACTER FONGMAN
- u'\u0e50' # 0xF0 -> THAI DIGIT ZERO
- u'\u0e51' # 0xF1 -> THAI DIGIT ONE
- u'\u0e52' # 0xF2 -> THAI DIGIT TWO
- u'\u0e53' # 0xF3 -> THAI DIGIT THREE
- u'\u0e54' # 0xF4 -> THAI DIGIT FOUR
- u'\u0e55' # 0xF5 -> THAI DIGIT FIVE
- u'\u0e56' # 0xF6 -> THAI DIGIT SIX
- u'\u0e57' # 0xF7 -> THAI DIGIT SEVEN
- u'\u0e58' # 0xF8 -> THAI DIGIT EIGHT
- u'\u0e59' # 0xF9 -> THAI DIGIT NINE
- u'\u0e5a' # 0xFA -> THAI CHARACTER ANGKHANKHU
- u'\u0e5b' # 0xFB -> THAI CHARACTER KHOMUT
- u'\ufffe'
- u'\ufffe'
- u'\ufffe'
- u'\ufffe'
-)
-
-### Encoding table
-encoding_table=codecs.charmap_build(decoding_table)
diff --git a/sys/lib/python/encodings/iso8859_13.py b/sys/lib/python/encodings/iso8859_13.py
deleted file mode 100644
index 71adb5c19..000000000
--- a/sys/lib/python/encodings/iso8859_13.py
+++ /dev/null
@@ -1,307 +0,0 @@
-""" Python Character Mapping Codec iso8859_13 generated from 'MAPPINGS/ISO8859/8859-13.TXT' with gencodec.py.
-
-"""#"
-
-import codecs
-
-### Codec APIs
-
-class Codec(codecs.Codec):
-
- def encode(self,input,errors='strict'):
- return codecs.charmap_encode(input,errors,encoding_table)
-
- def decode(self,input,errors='strict'):
- return codecs.charmap_decode(input,errors,decoding_table)
-
-class IncrementalEncoder(codecs.IncrementalEncoder):
- def encode(self, input, final=False):
- return codecs.charmap_encode(input,self.errors,encoding_table)[0]
-
-class IncrementalDecoder(codecs.IncrementalDecoder):
- def decode(self, input, final=False):
- return codecs.charmap_decode(input,self.errors,decoding_table)[0]
-
-class StreamWriter(Codec,codecs.StreamWriter):
- pass
-
-class StreamReader(Codec,codecs.StreamReader):
- pass
-
-### encodings module API
-
-def getregentry():
- return codecs.CodecInfo(
- name='iso8859-13',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
-
-
-### Decoding Table
-
-decoding_table = (
- u'\x00' # 0x00 -> NULL
- u'\x01' # 0x01 -> START OF HEADING
- u'\x02' # 0x02 -> START OF TEXT
- u'\x03' # 0x03 -> END OF TEXT
- u'\x04' # 0x04 -> END OF TRANSMISSION
- u'\x05' # 0x05 -> ENQUIRY
- u'\x06' # 0x06 -> ACKNOWLEDGE
- u'\x07' # 0x07 -> BELL
- u'\x08' # 0x08 -> BACKSPACE
- u'\t' # 0x09 -> HORIZONTAL TABULATION
- u'\n' # 0x0A -> LINE FEED
- u'\x0b' # 0x0B -> VERTICAL TABULATION
- u'\x0c' # 0x0C -> FORM FEED
- u'\r' # 0x0D -> CARRIAGE RETURN
- u'\x0e' # 0x0E -> SHIFT OUT
- u'\x0f' # 0x0F -> SHIFT IN
- u'\x10' # 0x10 -> DATA LINK ESCAPE
- u'\x11' # 0x11 -> DEVICE CONTROL ONE
- u'\x12' # 0x12 -> DEVICE CONTROL TWO
- u'\x13' # 0x13 -> DEVICE CONTROL THREE
- u'\x14' # 0x14 -> DEVICE CONTROL FOUR
- u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
- u'\x16' # 0x16 -> SYNCHRONOUS IDLE
- u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
- u'\x18' # 0x18 -> CANCEL
- u'\x19' # 0x19 -> END OF MEDIUM
- u'\x1a' # 0x1A -> SUBSTITUTE
- u'\x1b' # 0x1B -> ESCAPE
- u'\x1c' # 0x1C -> FILE SEPARATOR
- u'\x1d' # 0x1D -> GROUP SEPARATOR
- u'\x1e' # 0x1E -> RECORD SEPARATOR
- u'\x1f' # 0x1F -> UNIT SEPARATOR
- u' ' # 0x20 -> SPACE
- u'!' # 0x21 -> EXCLAMATION MARK
- u'"' # 0x22 -> QUOTATION MARK
- u'#' # 0x23 -> NUMBER SIGN
- u'$' # 0x24 -> DOLLAR SIGN
- u'%' # 0x25 -> PERCENT SIGN
- u'&' # 0x26 -> AMPERSAND
- u"'" # 0x27 -> APOSTROPHE
- u'(' # 0x28 -> LEFT PARENTHESIS
- u')' # 0x29 -> RIGHT PARENTHESIS
- u'*' # 0x2A -> ASTERISK
- u'+' # 0x2B -> PLUS SIGN
- u',' # 0x2C -> COMMA
- u'-' # 0x2D -> HYPHEN-MINUS
- u'.' # 0x2E -> FULL STOP
- u'/' # 0x2F -> SOLIDUS
- u'0' # 0x30 -> DIGIT ZERO
- u'1' # 0x31 -> DIGIT ONE
- u'2' # 0x32 -> DIGIT TWO
- u'3' # 0x33 -> DIGIT THREE
- u'4' # 0x34 -> DIGIT FOUR
- u'5' # 0x35 -> DIGIT FIVE
- u'6' # 0x36 -> DIGIT SIX
- u'7' # 0x37 -> DIGIT SEVEN
- u'8' # 0x38 -> DIGIT EIGHT
- u'9' # 0x39 -> DIGIT NINE
- u':' # 0x3A -> COLON
- u';' # 0x3B -> SEMICOLON
- u'<' # 0x3C -> LESS-THAN SIGN
- u'=' # 0x3D -> EQUALS SIGN
- u'>' # 0x3E -> GREATER-THAN SIGN
- u'?' # 0x3F -> QUESTION MARK
- u'@' # 0x40 -> COMMERCIAL AT
- u'A' # 0x41 -> LATIN CAPITAL LETTER A
- u'B' # 0x42 -> LATIN CAPITAL LETTER B
- u'C' # 0x43 -> LATIN CAPITAL LETTER C
- u'D' # 0x44 -> LATIN CAPITAL LETTER D
- u'E' # 0x45 -> LATIN CAPITAL LETTER E
- u'F' # 0x46 -> LATIN CAPITAL LETTER F
- u'G' # 0x47 -> LATIN CAPITAL LETTER G
- u'H' # 0x48 -> LATIN CAPITAL LETTER H
- u'I' # 0x49 -> LATIN CAPITAL LETTER I
- u'J' # 0x4A -> LATIN CAPITAL LETTER J
- u'K' # 0x4B -> LATIN CAPITAL LETTER K
- u'L' # 0x4C -> LATIN CAPITAL LETTER L
- u'M' # 0x4D -> LATIN CAPITAL LETTER M
- u'N' # 0x4E -> LATIN CAPITAL LETTER N
- u'O' # 0x4F -> LATIN CAPITAL LETTER O
- u'P' # 0x50 -> LATIN CAPITAL LETTER P
- u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
- u'R' # 0x52 -> LATIN CAPITAL LETTER R
- u'S' # 0x53 -> LATIN CAPITAL LETTER S
- u'T' # 0x54 -> LATIN CAPITAL LETTER T
- u'U' # 0x55 -> LATIN CAPITAL LETTER U
- u'V' # 0x56 -> LATIN CAPITAL LETTER V
- u'W' # 0x57 -> LATIN CAPITAL LETTER W
- u'X' # 0x58 -> LATIN CAPITAL LETTER X
- u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
- u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
- u'[' # 0x5B -> LEFT SQUARE BRACKET
- u'\\' # 0x5C -> REVERSE SOLIDUS
- u']' # 0x5D -> RIGHT SQUARE BRACKET
- u'^' # 0x5E -> CIRCUMFLEX ACCENT
- u'_' # 0x5F -> LOW LINE
- u'`' # 0x60 -> GRAVE ACCENT
- u'a' # 0x61 -> LATIN SMALL LETTER A
- u'b' # 0x62 -> LATIN SMALL LETTER B
- u'c' # 0x63 -> LATIN SMALL LETTER C
- u'd' # 0x64 -> LATIN SMALL LETTER D
- u'e' # 0x65 -> LATIN SMALL LETTER E
- u'f' # 0x66 -> LATIN SMALL LETTER F
- u'g' # 0x67 -> LATIN SMALL LETTER G
- u'h' # 0x68 -> LATIN SMALL LETTER H
- u'i' # 0x69 -> LATIN SMALL LETTER I
- u'j' # 0x6A -> LATIN SMALL LETTER J
- u'k' # 0x6B -> LATIN SMALL LETTER K
- u'l' # 0x6C -> LATIN SMALL LETTER L
- u'm' # 0x6D -> LATIN SMALL LETTER M
- u'n' # 0x6E -> LATIN SMALL LETTER N
- u'o' # 0x6F -> LATIN SMALL LETTER O
- u'p' # 0x70 -> LATIN SMALL LETTER P
- u'q' # 0x71 -> LATIN SMALL LETTER Q
- u'r' # 0x72 -> LATIN SMALL LETTER R
- u's' # 0x73 -> LATIN SMALL LETTER S
- u't' # 0x74 -> LATIN SMALL LETTER T
- u'u' # 0x75 -> LATIN SMALL LETTER U
- u'v' # 0x76 -> LATIN SMALL LETTER V
- u'w' # 0x77 -> LATIN SMALL LETTER W
- u'x' # 0x78 -> LATIN SMALL LETTER X
- u'y' # 0x79 -> LATIN SMALL LETTER Y
- u'z' # 0x7A -> LATIN SMALL LETTER Z
- u'{' # 0x7B -> LEFT CURLY BRACKET
- u'|' # 0x7C -> VERTICAL LINE
- u'}' # 0x7D -> RIGHT CURLY BRACKET
- u'~' # 0x7E -> TILDE
- u'\x7f' # 0x7F -> DELETE
- u'\x80' # 0x80 -> <control>
- u'\x81' # 0x81 -> <control>
- u'\x82' # 0x82 -> <control>
- u'\x83' # 0x83 -> <control>
- u'\x84' # 0x84 -> <control>
- u'\x85' # 0x85 -> <control>
- u'\x86' # 0x86 -> <control>
- u'\x87' # 0x87 -> <control>
- u'\x88' # 0x88 -> <control>
- u'\x89' # 0x89 -> <control>
- u'\x8a' # 0x8A -> <control>
- u'\x8b' # 0x8B -> <control>
- u'\x8c' # 0x8C -> <control>
- u'\x8d' # 0x8D -> <control>
- u'\x8e' # 0x8E -> <control>
- u'\x8f' # 0x8F -> <control>
- u'\x90' # 0x90 -> <control>
- u'\x91' # 0x91 -> <control>
- u'\x92' # 0x92 -> <control>
- u'\x93' # 0x93 -> <control>
- u'\x94' # 0x94 -> <control>
- u'\x95' # 0x95 -> <control>
- u'\x96' # 0x96 -> <control>
- u'\x97' # 0x97 -> <control>
- u'\x98' # 0x98 -> <control>
- u'\x99' # 0x99 -> <control>
- u'\x9a' # 0x9A -> <control>
- u'\x9b' # 0x9B -> <control>
- u'\x9c' # 0x9C -> <control>
- u'\x9d' # 0x9D -> <control>
- u'\x9e' # 0x9E -> <control>
- u'\x9f' # 0x9F -> <control>
- u'\xa0' # 0xA0 -> NO-BREAK SPACE
- u'\u201d' # 0xA1 -> RIGHT DOUBLE QUOTATION MARK
- u'\xa2' # 0xA2 -> CENT SIGN
- u'\xa3' # 0xA3 -> POUND SIGN
- u'\xa4' # 0xA4 -> CURRENCY SIGN
- u'\u201e' # 0xA5 -> DOUBLE LOW-9 QUOTATION MARK
- u'\xa6' # 0xA6 -> BROKEN BAR
- u'\xa7' # 0xA7 -> SECTION SIGN
- u'\xd8' # 0xA8 -> LATIN CAPITAL LETTER O WITH STROKE
- u'\xa9' # 0xA9 -> COPYRIGHT SIGN
- u'\u0156' # 0xAA -> LATIN CAPITAL LETTER R WITH CEDILLA
- u'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
- u'\xac' # 0xAC -> NOT SIGN
- u'\xad' # 0xAD -> SOFT HYPHEN
- u'\xae' # 0xAE -> REGISTERED SIGN
- u'\xc6' # 0xAF -> LATIN CAPITAL LETTER AE
- u'\xb0' # 0xB0 -> DEGREE SIGN
- u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
- u'\xb2' # 0xB2 -> SUPERSCRIPT TWO
- u'\xb3' # 0xB3 -> SUPERSCRIPT THREE
- u'\u201c' # 0xB4 -> LEFT DOUBLE QUOTATION MARK
- u'\xb5' # 0xB5 -> MICRO SIGN
- u'\xb6' # 0xB6 -> PILCROW SIGN
- u'\xb7' # 0xB7 -> MIDDLE DOT
- u'\xf8' # 0xB8 -> LATIN SMALL LETTER O WITH STROKE
- u'\xb9' # 0xB9 -> SUPERSCRIPT ONE
- u'\u0157' # 0xBA -> LATIN SMALL LETTER R WITH CEDILLA
- u'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
- u'\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER
- u'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
- u'\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS
- u'\xe6' # 0xBF -> LATIN SMALL LETTER AE
- u'\u0104' # 0xC0 -> LATIN CAPITAL LETTER A WITH OGONEK
- u'\u012e' # 0xC1 -> LATIN CAPITAL LETTER I WITH OGONEK
- u'\u0100' # 0xC2 -> LATIN CAPITAL LETTER A WITH MACRON
- u'\u0106' # 0xC3 -> LATIN CAPITAL LETTER C WITH ACUTE
- u'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
- u'\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE
- u'\u0118' # 0xC6 -> LATIN CAPITAL LETTER E WITH OGONEK
- u'\u0112' # 0xC7 -> LATIN CAPITAL LETTER E WITH MACRON
- u'\u010c' # 0xC8 -> LATIN CAPITAL LETTER C WITH CARON
- u'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
- u'\u0179' # 0xCA -> LATIN CAPITAL LETTER Z WITH ACUTE
- u'\u0116' # 0xCB -> LATIN CAPITAL LETTER E WITH DOT ABOVE
- u'\u0122' # 0xCC -> LATIN CAPITAL LETTER G WITH CEDILLA
- u'\u0136' # 0xCD -> LATIN CAPITAL LETTER K WITH CEDILLA
- u'\u012a' # 0xCE -> LATIN CAPITAL LETTER I WITH MACRON
- u'\u013b' # 0xCF -> LATIN CAPITAL LETTER L WITH CEDILLA
- u'\u0160' # 0xD0 -> LATIN CAPITAL LETTER S WITH CARON
- u'\u0143' # 0xD1 -> LATIN CAPITAL LETTER N WITH ACUTE
- u'\u0145' # 0xD2 -> LATIN CAPITAL LETTER N WITH CEDILLA
- u'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
- u'\u014c' # 0xD4 -> LATIN CAPITAL LETTER O WITH MACRON
- u'\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE
- u'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
- u'\xd7' # 0xD7 -> MULTIPLICATION SIGN
- u'\u0172' # 0xD8 -> LATIN CAPITAL LETTER U WITH OGONEK
- u'\u0141' # 0xD9 -> LATIN CAPITAL LETTER L WITH STROKE
- u'\u015a' # 0xDA -> LATIN CAPITAL LETTER S WITH ACUTE
- u'\u016a' # 0xDB -> LATIN CAPITAL LETTER U WITH MACRON
- u'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
- u'\u017b' # 0xDD -> LATIN CAPITAL LETTER Z WITH DOT ABOVE
- u'\u017d' # 0xDE -> LATIN CAPITAL LETTER Z WITH CARON
- u'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S (German)
- u'\u0105' # 0xE0 -> LATIN SMALL LETTER A WITH OGONEK
- u'\u012f' # 0xE1 -> LATIN SMALL LETTER I WITH OGONEK
- u'\u0101' # 0xE2 -> LATIN SMALL LETTER A WITH MACRON
- u'\u0107' # 0xE3 -> LATIN SMALL LETTER C WITH ACUTE
- u'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
- u'\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE
- u'\u0119' # 0xE6 -> LATIN SMALL LETTER E WITH OGONEK
- u'\u0113' # 0xE7 -> LATIN SMALL LETTER E WITH MACRON
- u'\u010d' # 0xE8 -> LATIN SMALL LETTER C WITH CARON
- u'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
- u'\u017a' # 0xEA -> LATIN SMALL LETTER Z WITH ACUTE
- u'\u0117' # 0xEB -> LATIN SMALL LETTER E WITH DOT ABOVE
- u'\u0123' # 0xEC -> LATIN SMALL LETTER G WITH CEDILLA
- u'\u0137' # 0xED -> LATIN SMALL LETTER K WITH CEDILLA
- u'\u012b' # 0xEE -> LATIN SMALL LETTER I WITH MACRON
- u'\u013c' # 0xEF -> LATIN SMALL LETTER L WITH CEDILLA
- u'\u0161' # 0xF0 -> LATIN SMALL LETTER S WITH CARON
- u'\u0144' # 0xF1 -> LATIN SMALL LETTER N WITH ACUTE
- u'\u0146' # 0xF2 -> LATIN SMALL LETTER N WITH CEDILLA
- u'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
- u'\u014d' # 0xF4 -> LATIN SMALL LETTER O WITH MACRON
- u'\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE
- u'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
- u'\xf7' # 0xF7 -> DIVISION SIGN
- u'\u0173' # 0xF8 -> LATIN SMALL LETTER U WITH OGONEK
- u'\u0142' # 0xF9 -> LATIN SMALL LETTER L WITH STROKE
- u'\u015b' # 0xFA -> LATIN SMALL LETTER S WITH ACUTE
- u'\u016b' # 0xFB -> LATIN SMALL LETTER U WITH MACRON
- u'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
- u'\u017c' # 0xFD -> LATIN SMALL LETTER Z WITH DOT ABOVE
- u'\u017e' # 0xFE -> LATIN SMALL LETTER Z WITH CARON
- u'\u2019' # 0xFF -> RIGHT SINGLE QUOTATION MARK
-)
-
-### Encoding table
-encoding_table=codecs.charmap_build(decoding_table)
diff --git a/sys/lib/python/encodings/iso8859_14.py b/sys/lib/python/encodings/iso8859_14.py
deleted file mode 100644
index 56843d5fd..000000000
--- a/sys/lib/python/encodings/iso8859_14.py
+++ /dev/null
@@ -1,307 +0,0 @@
-""" Python Character Mapping Codec iso8859_14 generated from 'MAPPINGS/ISO8859/8859-14.TXT' with gencodec.py.
-
-"""#"
-
-import codecs
-
-### Codec APIs
-
-class Codec(codecs.Codec):
-
- def encode(self,input,errors='strict'):
- return codecs.charmap_encode(input,errors,encoding_table)
-
- def decode(self,input,errors='strict'):
- return codecs.charmap_decode(input,errors,decoding_table)
-
-class IncrementalEncoder(codecs.IncrementalEncoder):
- def encode(self, input, final=False):
- return codecs.charmap_encode(input,self.errors,encoding_table)[0]
-
-class IncrementalDecoder(codecs.IncrementalDecoder):
- def decode(self, input, final=False):
- return codecs.charmap_decode(input,self.errors,decoding_table)[0]
-
-class StreamWriter(Codec,codecs.StreamWriter):
- pass
-
-class StreamReader(Codec,codecs.StreamReader):
- pass
-
-### encodings module API
-
-def getregentry():
- return codecs.CodecInfo(
- name='iso8859-14',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
-
-
-### Decoding Table
-
-decoding_table = (
- u'\x00' # 0x00 -> NULL
- u'\x01' # 0x01 -> START OF HEADING
- u'\x02' # 0x02 -> START OF TEXT
- u'\x03' # 0x03 -> END OF TEXT
- u'\x04' # 0x04 -> END OF TRANSMISSION
- u'\x05' # 0x05 -> ENQUIRY
- u'\x06' # 0x06 -> ACKNOWLEDGE
- u'\x07' # 0x07 -> BELL
- u'\x08' # 0x08 -> BACKSPACE
- u'\t' # 0x09 -> HORIZONTAL TABULATION
- u'\n' # 0x0A -> LINE FEED
- u'\x0b' # 0x0B -> VERTICAL TABULATION
- u'\x0c' # 0x0C -> FORM FEED
- u'\r' # 0x0D -> CARRIAGE RETURN
- u'\x0e' # 0x0E -> SHIFT OUT
- u'\x0f' # 0x0F -> SHIFT IN
- u'\x10' # 0x10 -> DATA LINK ESCAPE
- u'\x11' # 0x11 -> DEVICE CONTROL ONE
- u'\x12' # 0x12 -> DEVICE CONTROL TWO
- u'\x13' # 0x13 -> DEVICE CONTROL THREE
- u'\x14' # 0x14 -> DEVICE CONTROL FOUR
- u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
- u'\x16' # 0x16 -> SYNCHRONOUS IDLE
- u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
- u'\x18' # 0x18 -> CANCEL
- u'\x19' # 0x19 -> END OF MEDIUM
- u'\x1a' # 0x1A -> SUBSTITUTE
- u'\x1b' # 0x1B -> ESCAPE
- u'\x1c' # 0x1C -> FILE SEPARATOR
- u'\x1d' # 0x1D -> GROUP SEPARATOR
- u'\x1e' # 0x1E -> RECORD SEPARATOR
- u'\x1f' # 0x1F -> UNIT SEPARATOR
- u' ' # 0x20 -> SPACE
- u'!' # 0x21 -> EXCLAMATION MARK
- u'"' # 0x22 -> QUOTATION MARK
- u'#' # 0x23 -> NUMBER SIGN
- u'$' # 0x24 -> DOLLAR SIGN
- u'%' # 0x25 -> PERCENT SIGN
- u'&' # 0x26 -> AMPERSAND
- u"'" # 0x27 -> APOSTROPHE
- u'(' # 0x28 -> LEFT PARENTHESIS
- u')' # 0x29 -> RIGHT PARENTHESIS
- u'*' # 0x2A -> ASTERISK
- u'+' # 0x2B -> PLUS SIGN
- u',' # 0x2C -> COMMA
- u'-' # 0x2D -> HYPHEN-MINUS
- u'.' # 0x2E -> FULL STOP
- u'/' # 0x2F -> SOLIDUS
- u'0' # 0x30 -> DIGIT ZERO
- u'1' # 0x31 -> DIGIT ONE
- u'2' # 0x32 -> DIGIT TWO
- u'3' # 0x33 -> DIGIT THREE
- u'4' # 0x34 -> DIGIT FOUR
- u'5' # 0x35 -> DIGIT FIVE
- u'6' # 0x36 -> DIGIT SIX
- u'7' # 0x37 -> DIGIT SEVEN
- u'8' # 0x38 -> DIGIT EIGHT
- u'9' # 0x39 -> DIGIT NINE
- u':' # 0x3A -> COLON
- u';' # 0x3B -> SEMICOLON
- u'<' # 0x3C -> LESS-THAN SIGN
- u'=' # 0x3D -> EQUALS SIGN
- u'>' # 0x3E -> GREATER-THAN SIGN
- u'?' # 0x3F -> QUESTION MARK
- u'@' # 0x40 -> COMMERCIAL AT
- u'A' # 0x41 -> LATIN CAPITAL LETTER A
- u'B' # 0x42 -> LATIN CAPITAL LETTER B
- u'C' # 0x43 -> LATIN CAPITAL LETTER C
- u'D' # 0x44 -> LATIN CAPITAL LETTER D
- u'E' # 0x45 -> LATIN CAPITAL LETTER E
- u'F' # 0x46 -> LATIN CAPITAL LETTER F
- u'G' # 0x47 -> LATIN CAPITAL LETTER G
- u'H' # 0x48 -> LATIN CAPITAL LETTER H
- u'I' # 0x49 -> LATIN CAPITAL LETTER I
- u'J' # 0x4A -> LATIN CAPITAL LETTER J
- u'K' # 0x4B -> LATIN CAPITAL LETTER K
- u'L' # 0x4C -> LATIN CAPITAL LETTER L
- u'M' # 0x4D -> LATIN CAPITAL LETTER M
- u'N' # 0x4E -> LATIN CAPITAL LETTER N
- u'O' # 0x4F -> LATIN CAPITAL LETTER O
- u'P' # 0x50 -> LATIN CAPITAL LETTER P
- u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
- u'R' # 0x52 -> LATIN CAPITAL LETTER R
- u'S' # 0x53 -> LATIN CAPITAL LETTER S
- u'T' # 0x54 -> LATIN CAPITAL LETTER T
- u'U' # 0x55 -> LATIN CAPITAL LETTER U
- u'V' # 0x56 -> LATIN CAPITAL LETTER V
- u'W' # 0x57 -> LATIN CAPITAL LETTER W
- u'X' # 0x58 -> LATIN CAPITAL LETTER X
- u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
- u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
- u'[' # 0x5B -> LEFT SQUARE BRACKET
- u'\\' # 0x5C -> REVERSE SOLIDUS
- u']' # 0x5D -> RIGHT SQUARE BRACKET
- u'^' # 0x5E -> CIRCUMFLEX ACCENT
- u'_' # 0x5F -> LOW LINE
- u'`' # 0x60 -> GRAVE ACCENT
- u'a' # 0x61 -> LATIN SMALL LETTER A
- u'b' # 0x62 -> LATIN SMALL LETTER B
- u'c' # 0x63 -> LATIN SMALL LETTER C
- u'd' # 0x64 -> LATIN SMALL LETTER D
- u'e' # 0x65 -> LATIN SMALL LETTER E
- u'f' # 0x66 -> LATIN SMALL LETTER F
- u'g' # 0x67 -> LATIN SMALL LETTER G
- u'h' # 0x68 -> LATIN SMALL LETTER H
- u'i' # 0x69 -> LATIN SMALL LETTER I
- u'j' # 0x6A -> LATIN SMALL LETTER J
- u'k' # 0x6B -> LATIN SMALL LETTER K
- u'l' # 0x6C -> LATIN SMALL LETTER L
- u'm' # 0x6D -> LATIN SMALL LETTER M
- u'n' # 0x6E -> LATIN SMALL LETTER N
- u'o' # 0x6F -> LATIN SMALL LETTER O
- u'p' # 0x70 -> LATIN SMALL LETTER P
- u'q' # 0x71 -> LATIN SMALL LETTER Q
- u'r' # 0x72 -> LATIN SMALL LETTER R
- u's' # 0x73 -> LATIN SMALL LETTER S
- u't' # 0x74 -> LATIN SMALL LETTER T
- u'u' # 0x75 -> LATIN SMALL LETTER U
- u'v' # 0x76 -> LATIN SMALL LETTER V
- u'w' # 0x77 -> LATIN SMALL LETTER W
- u'x' # 0x78 -> LATIN SMALL LETTER X
- u'y' # 0x79 -> LATIN SMALL LETTER Y
- u'z' # 0x7A -> LATIN SMALL LETTER Z
- u'{' # 0x7B -> LEFT CURLY BRACKET
- u'|' # 0x7C -> VERTICAL LINE
- u'}' # 0x7D -> RIGHT CURLY BRACKET
- u'~' # 0x7E -> TILDE
- u'\x7f' # 0x7F -> DELETE
- u'\x80' # 0x80 -> <control>
- u'\x81' # 0x81 -> <control>
- u'\x82' # 0x82 -> <control>
- u'\x83' # 0x83 -> <control>
- u'\x84' # 0x84 -> <control>
- u'\x85' # 0x85 -> <control>
- u'\x86' # 0x86 -> <control>
- u'\x87' # 0x87 -> <control>
- u'\x88' # 0x88 -> <control>
- u'\x89' # 0x89 -> <control>
- u'\x8a' # 0x8A -> <control>
- u'\x8b' # 0x8B -> <control>
- u'\x8c' # 0x8C -> <control>
- u'\x8d' # 0x8D -> <control>
- u'\x8e' # 0x8E -> <control>
- u'\x8f' # 0x8F -> <control>
- u'\x90' # 0x90 -> <control>
- u'\x91' # 0x91 -> <control>
- u'\x92' # 0x92 -> <control>
- u'\x93' # 0x93 -> <control>
- u'\x94' # 0x94 -> <control>
- u'\x95' # 0x95 -> <control>
- u'\x96' # 0x96 -> <control>
- u'\x97' # 0x97 -> <control>
- u'\x98' # 0x98 -> <control>
- u'\x99' # 0x99 -> <control>
- u'\x9a' # 0x9A -> <control>
- u'\x9b' # 0x9B -> <control>
- u'\x9c' # 0x9C -> <control>
- u'\x9d' # 0x9D -> <control>
- u'\x9e' # 0x9E -> <control>
- u'\x9f' # 0x9F -> <control>
- u'\xa0' # 0xA0 -> NO-BREAK SPACE
- u'\u1e02' # 0xA1 -> LATIN CAPITAL LETTER B WITH DOT ABOVE
- u'\u1e03' # 0xA2 -> LATIN SMALL LETTER B WITH DOT ABOVE
- u'\xa3' # 0xA3 -> POUND SIGN
- u'\u010a' # 0xA4 -> LATIN CAPITAL LETTER C WITH DOT ABOVE
- u'\u010b' # 0xA5 -> LATIN SMALL LETTER C WITH DOT ABOVE
- u'\u1e0a' # 0xA6 -> LATIN CAPITAL LETTER D WITH DOT ABOVE
- u'\xa7' # 0xA7 -> SECTION SIGN
- u'\u1e80' # 0xA8 -> LATIN CAPITAL LETTER W WITH GRAVE
- u'\xa9' # 0xA9 -> COPYRIGHT SIGN
- u'\u1e82' # 0xAA -> LATIN CAPITAL LETTER W WITH ACUTE
- u'\u1e0b' # 0xAB -> LATIN SMALL LETTER D WITH DOT ABOVE
- u'\u1ef2' # 0xAC -> LATIN CAPITAL LETTER Y WITH GRAVE
- u'\xad' # 0xAD -> SOFT HYPHEN
- u'\xae' # 0xAE -> REGISTERED SIGN
- u'\u0178' # 0xAF -> LATIN CAPITAL LETTER Y WITH DIAERESIS
- u'\u1e1e' # 0xB0 -> LATIN CAPITAL LETTER F WITH DOT ABOVE
- u'\u1e1f' # 0xB1 -> LATIN SMALL LETTER F WITH DOT ABOVE
- u'\u0120' # 0xB2 -> LATIN CAPITAL LETTER G WITH DOT ABOVE
- u'\u0121' # 0xB3 -> LATIN SMALL LETTER G WITH DOT ABOVE
- u'\u1e40' # 0xB4 -> LATIN CAPITAL LETTER M WITH DOT ABOVE
- u'\u1e41' # 0xB5 -> LATIN SMALL LETTER M WITH DOT ABOVE
- u'\xb6' # 0xB6 -> PILCROW SIGN
- u'\u1e56' # 0xB7 -> LATIN CAPITAL LETTER P WITH DOT ABOVE
- u'\u1e81' # 0xB8 -> LATIN SMALL LETTER W WITH GRAVE
- u'\u1e57' # 0xB9 -> LATIN SMALL LETTER P WITH DOT ABOVE
- u'\u1e83' # 0xBA -> LATIN SMALL LETTER W WITH ACUTE
- u'\u1e60' # 0xBB -> LATIN CAPITAL LETTER S WITH DOT ABOVE
- u'\u1ef3' # 0xBC -> LATIN SMALL LETTER Y WITH GRAVE
- u'\u1e84' # 0xBD -> LATIN CAPITAL LETTER W WITH DIAERESIS
- u'\u1e85' # 0xBE -> LATIN SMALL LETTER W WITH DIAERESIS
- u'\u1e61' # 0xBF -> LATIN SMALL LETTER S WITH DOT ABOVE
- u'\xc0' # 0xC0 -> LATIN CAPITAL LETTER A WITH GRAVE
- u'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
- u'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
- u'\xc3' # 0xC3 -> LATIN CAPITAL LETTER A WITH TILDE
- u'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
- u'\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE
- u'\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE
- u'\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA
- u'\xc8' # 0xC8 -> LATIN CAPITAL LETTER E WITH GRAVE
- u'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
- u'\xca' # 0xCA -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
- u'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
- u'\xcc' # 0xCC -> LATIN CAPITAL LETTER I WITH GRAVE
- u'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
- u'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
- u'\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS
- u'\u0174' # 0xD0 -> LATIN CAPITAL LETTER W WITH CIRCUMFLEX
- u'\xd1' # 0xD1 -> LATIN CAPITAL LETTER N WITH TILDE
- u'\xd2' # 0xD2 -> LATIN CAPITAL LETTER O WITH GRAVE
- u'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
- u'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
- u'\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE
- u'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
- u'\u1e6a' # 0xD7 -> LATIN CAPITAL LETTER T WITH DOT ABOVE
- u'\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE
- u'\xd9' # 0xD9 -> LATIN CAPITAL LETTER U WITH GRAVE
- u'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
- u'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
- u'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
- u'\xdd' # 0xDD -> LATIN CAPITAL LETTER Y WITH ACUTE
- u'\u0176' # 0xDE -> LATIN CAPITAL LETTER Y WITH CIRCUMFLEX
- u'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S
- u'\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE
- u'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
- u'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
- u'\xe3' # 0xE3 -> LATIN SMALL LETTER A WITH TILDE
- u'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
- u'\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE
- u'\xe6' # 0xE6 -> LATIN SMALL LETTER AE
- u'\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA
- u'\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE
- u'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
- u'\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX
- u'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
- u'\xec' # 0xEC -> LATIN SMALL LETTER I WITH GRAVE
- u'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
- u'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
- u'\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS
- u'\u0175' # 0xF0 -> LATIN SMALL LETTER W WITH CIRCUMFLEX
- u'\xf1' # 0xF1 -> LATIN SMALL LETTER N WITH TILDE
- u'\xf2' # 0xF2 -> LATIN SMALL LETTER O WITH GRAVE
- u'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
- u'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
- u'\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE
- u'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
- u'\u1e6b' # 0xF7 -> LATIN SMALL LETTER T WITH DOT ABOVE
- u'\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE
- u'\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE
- u'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
- u'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
- u'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
- u'\xfd' # 0xFD -> LATIN SMALL LETTER Y WITH ACUTE
- u'\u0177' # 0xFE -> LATIN SMALL LETTER Y WITH CIRCUMFLEX
- u'\xff' # 0xFF -> LATIN SMALL LETTER Y WITH DIAERESIS
-)
-
-### Encoding table
-encoding_table=codecs.charmap_build(decoding_table)
diff --git a/sys/lib/python/encodings/iso8859_15.py b/sys/lib/python/encodings/iso8859_15.py
deleted file mode 100644
index 13b140ca3..000000000
--- a/sys/lib/python/encodings/iso8859_15.py
+++ /dev/null
@@ -1,307 +0,0 @@
-""" Python Character Mapping Codec iso8859_15 generated from 'MAPPINGS/ISO8859/8859-15.TXT' with gencodec.py.
-
-"""#"
-
-import codecs
-
-### Codec APIs
-
-class Codec(codecs.Codec):
-
- def encode(self,input,errors='strict'):
- return codecs.charmap_encode(input,errors,encoding_table)
-
- def decode(self,input,errors='strict'):
- return codecs.charmap_decode(input,errors,decoding_table)
-
-class IncrementalEncoder(codecs.IncrementalEncoder):
- def encode(self, input, final=False):
- return codecs.charmap_encode(input,self.errors,encoding_table)[0]
-
-class IncrementalDecoder(codecs.IncrementalDecoder):
- def decode(self, input, final=False):
- return codecs.charmap_decode(input,self.errors,decoding_table)[0]
-
-class StreamWriter(Codec,codecs.StreamWriter):
- pass
-
-class StreamReader(Codec,codecs.StreamReader):
- pass
-
-### encodings module API
-
-def getregentry():
- return codecs.CodecInfo(
- name='iso8859-15',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
-
-
-### Decoding Table
-
-decoding_table = (
- u'\x00' # 0x00 -> NULL
- u'\x01' # 0x01 -> START OF HEADING
- u'\x02' # 0x02 -> START OF TEXT
- u'\x03' # 0x03 -> END OF TEXT
- u'\x04' # 0x04 -> END OF TRANSMISSION
- u'\x05' # 0x05 -> ENQUIRY
- u'\x06' # 0x06 -> ACKNOWLEDGE
- u'\x07' # 0x07 -> BELL
- u'\x08' # 0x08 -> BACKSPACE
- u'\t' # 0x09 -> HORIZONTAL TABULATION
- u'\n' # 0x0A -> LINE FEED
- u'\x0b' # 0x0B -> VERTICAL TABULATION
- u'\x0c' # 0x0C -> FORM FEED
- u'\r' # 0x0D -> CARRIAGE RETURN
- u'\x0e' # 0x0E -> SHIFT OUT
- u'\x0f' # 0x0F -> SHIFT IN
- u'\x10' # 0x10 -> DATA LINK ESCAPE
- u'\x11' # 0x11 -> DEVICE CONTROL ONE
- u'\x12' # 0x12 -> DEVICE CONTROL TWO
- u'\x13' # 0x13 -> DEVICE CONTROL THREE
- u'\x14' # 0x14 -> DEVICE CONTROL FOUR
- u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
- u'\x16' # 0x16 -> SYNCHRONOUS IDLE
- u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
- u'\x18' # 0x18 -> CANCEL
- u'\x19' # 0x19 -> END OF MEDIUM
- u'\x1a' # 0x1A -> SUBSTITUTE
- u'\x1b' # 0x1B -> ESCAPE
- u'\x1c' # 0x1C -> FILE SEPARATOR
- u'\x1d' # 0x1D -> GROUP SEPARATOR
- u'\x1e' # 0x1E -> RECORD SEPARATOR
- u'\x1f' # 0x1F -> UNIT SEPARATOR
- u' ' # 0x20 -> SPACE
- u'!' # 0x21 -> EXCLAMATION MARK
- u'"' # 0x22 -> QUOTATION MARK
- u'#' # 0x23 -> NUMBER SIGN
- u'$' # 0x24 -> DOLLAR SIGN
- u'%' # 0x25 -> PERCENT SIGN
- u'&' # 0x26 -> AMPERSAND
- u"'" # 0x27 -> APOSTROPHE
- u'(' # 0x28 -> LEFT PARENTHESIS
- u')' # 0x29 -> RIGHT PARENTHESIS
- u'*' # 0x2A -> ASTERISK
- u'+' # 0x2B -> PLUS SIGN
- u',' # 0x2C -> COMMA
- u'-' # 0x2D -> HYPHEN-MINUS
- u'.' # 0x2E -> FULL STOP
- u'/' # 0x2F -> SOLIDUS
- u'0' # 0x30 -> DIGIT ZERO
- u'1' # 0x31 -> DIGIT ONE
- u'2' # 0x32 -> DIGIT TWO
- u'3' # 0x33 -> DIGIT THREE
- u'4' # 0x34 -> DIGIT FOUR
- u'5' # 0x35 -> DIGIT FIVE
- u'6' # 0x36 -> DIGIT SIX
- u'7' # 0x37 -> DIGIT SEVEN
- u'8' # 0x38 -> DIGIT EIGHT
- u'9' # 0x39 -> DIGIT NINE
- u':' # 0x3A -> COLON
- u';' # 0x3B -> SEMICOLON
- u'<' # 0x3C -> LESS-THAN SIGN
- u'=' # 0x3D -> EQUALS SIGN
- u'>' # 0x3E -> GREATER-THAN SIGN
- u'?' # 0x3F -> QUESTION MARK
- u'@' # 0x40 -> COMMERCIAL AT
- u'A' # 0x41 -> LATIN CAPITAL LETTER A
- u'B' # 0x42 -> LATIN CAPITAL LETTER B
- u'C' # 0x43 -> LATIN CAPITAL LETTER C
- u'D' # 0x44 -> LATIN CAPITAL LETTER D
- u'E' # 0x45 -> LATIN CAPITAL LETTER E
- u'F' # 0x46 -> LATIN CAPITAL LETTER F
- u'G' # 0x47 -> LATIN CAPITAL LETTER G
- u'H' # 0x48 -> LATIN CAPITAL LETTER H
- u'I' # 0x49 -> LATIN CAPITAL LETTER I
- u'J' # 0x4A -> LATIN CAPITAL LETTER J
- u'K' # 0x4B -> LATIN CAPITAL LETTER K
- u'L' # 0x4C -> LATIN CAPITAL LETTER L
- u'M' # 0x4D -> LATIN CAPITAL LETTER M
- u'N' # 0x4E -> LATIN CAPITAL LETTER N
- u'O' # 0x4F -> LATIN CAPITAL LETTER O
- u'P' # 0x50 -> LATIN CAPITAL LETTER P
- u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
- u'R' # 0x52 -> LATIN CAPITAL LETTER R
- u'S' # 0x53 -> LATIN CAPITAL LETTER S
- u'T' # 0x54 -> LATIN CAPITAL LETTER T
- u'U' # 0x55 -> LATIN CAPITAL LETTER U
- u'V' # 0x56 -> LATIN CAPITAL LETTER V
- u'W' # 0x57 -> LATIN CAPITAL LETTER W
- u'X' # 0x58 -> LATIN CAPITAL LETTER X
- u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
- u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
- u'[' # 0x5B -> LEFT SQUARE BRACKET
- u'\\' # 0x5C -> REVERSE SOLIDUS
- u']' # 0x5D -> RIGHT SQUARE BRACKET
- u'^' # 0x5E -> CIRCUMFLEX ACCENT
- u'_' # 0x5F -> LOW LINE
- u'`' # 0x60 -> GRAVE ACCENT
- u'a' # 0x61 -> LATIN SMALL LETTER A
- u'b' # 0x62 -> LATIN SMALL LETTER B
- u'c' # 0x63 -> LATIN SMALL LETTER C
- u'd' # 0x64 -> LATIN SMALL LETTER D
- u'e' # 0x65 -> LATIN SMALL LETTER E
- u'f' # 0x66 -> LATIN SMALL LETTER F
- u'g' # 0x67 -> LATIN SMALL LETTER G
- u'h' # 0x68 -> LATIN SMALL LETTER H
- u'i' # 0x69 -> LATIN SMALL LETTER I
- u'j' # 0x6A -> LATIN SMALL LETTER J
- u'k' # 0x6B -> LATIN SMALL LETTER K
- u'l' # 0x6C -> LATIN SMALL LETTER L
- u'm' # 0x6D -> LATIN SMALL LETTER M
- u'n' # 0x6E -> LATIN SMALL LETTER N
- u'o' # 0x6F -> LATIN SMALL LETTER O
- u'p' # 0x70 -> LATIN SMALL LETTER P
- u'q' # 0x71 -> LATIN SMALL LETTER Q
- u'r' # 0x72 -> LATIN SMALL LETTER R
- u's' # 0x73 -> LATIN SMALL LETTER S
- u't' # 0x74 -> LATIN SMALL LETTER T
- u'u' # 0x75 -> LATIN SMALL LETTER U
- u'v' # 0x76 -> LATIN SMALL LETTER V
- u'w' # 0x77 -> LATIN SMALL LETTER W
- u'x' # 0x78 -> LATIN SMALL LETTER X
- u'y' # 0x79 -> LATIN SMALL LETTER Y
- u'z' # 0x7A -> LATIN SMALL LETTER Z
- u'{' # 0x7B -> LEFT CURLY BRACKET
- u'|' # 0x7C -> VERTICAL LINE
- u'}' # 0x7D -> RIGHT CURLY BRACKET
- u'~' # 0x7E -> TILDE
- u'\x7f' # 0x7F -> DELETE
- u'\x80' # 0x80 -> <control>
- u'\x81' # 0x81 -> <control>
- u'\x82' # 0x82 -> <control>
- u'\x83' # 0x83 -> <control>
- u'\x84' # 0x84 -> <control>
- u'\x85' # 0x85 -> <control>
- u'\x86' # 0x86 -> <control>
- u'\x87' # 0x87 -> <control>
- u'\x88' # 0x88 -> <control>
- u'\x89' # 0x89 -> <control>
- u'\x8a' # 0x8A -> <control>
- u'\x8b' # 0x8B -> <control>
- u'\x8c' # 0x8C -> <control>
- u'\x8d' # 0x8D -> <control>
- u'\x8e' # 0x8E -> <control>
- u'\x8f' # 0x8F -> <control>
- u'\x90' # 0x90 -> <control>
- u'\x91' # 0x91 -> <control>
- u'\x92' # 0x92 -> <control>
- u'\x93' # 0x93 -> <control>
- u'\x94' # 0x94 -> <control>
- u'\x95' # 0x95 -> <control>
- u'\x96' # 0x96 -> <control>
- u'\x97' # 0x97 -> <control>
- u'\x98' # 0x98 -> <control>
- u'\x99' # 0x99 -> <control>
- u'\x9a' # 0x9A -> <control>
- u'\x9b' # 0x9B -> <control>
- u'\x9c' # 0x9C -> <control>
- u'\x9d' # 0x9D -> <control>
- u'\x9e' # 0x9E -> <control>
- u'\x9f' # 0x9F -> <control>
- u'\xa0' # 0xA0 -> NO-BREAK SPACE
- u'\xa1' # 0xA1 -> INVERTED EXCLAMATION MARK
- u'\xa2' # 0xA2 -> CENT SIGN
- u'\xa3' # 0xA3 -> POUND SIGN
- u'\u20ac' # 0xA4 -> EURO SIGN
- u'\xa5' # 0xA5 -> YEN SIGN
- u'\u0160' # 0xA6 -> LATIN CAPITAL LETTER S WITH CARON
- u'\xa7' # 0xA7 -> SECTION SIGN
- u'\u0161' # 0xA8 -> LATIN SMALL LETTER S WITH CARON
- u'\xa9' # 0xA9 -> COPYRIGHT SIGN
- u'\xaa' # 0xAA -> FEMININE ORDINAL INDICATOR
- u'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
- u'\xac' # 0xAC -> NOT SIGN
- u'\xad' # 0xAD -> SOFT HYPHEN
- u'\xae' # 0xAE -> REGISTERED SIGN
- u'\xaf' # 0xAF -> MACRON
- u'\xb0' # 0xB0 -> DEGREE SIGN
- u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
- u'\xb2' # 0xB2 -> SUPERSCRIPT TWO
- u'\xb3' # 0xB3 -> SUPERSCRIPT THREE
- u'\u017d' # 0xB4 -> LATIN CAPITAL LETTER Z WITH CARON
- u'\xb5' # 0xB5 -> MICRO SIGN
- u'\xb6' # 0xB6 -> PILCROW SIGN
- u'\xb7' # 0xB7 -> MIDDLE DOT
- u'\u017e' # 0xB8 -> LATIN SMALL LETTER Z WITH CARON
- u'\xb9' # 0xB9 -> SUPERSCRIPT ONE
- u'\xba' # 0xBA -> MASCULINE ORDINAL INDICATOR
- u'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
- u'\u0152' # 0xBC -> LATIN CAPITAL LIGATURE OE
- u'\u0153' # 0xBD -> LATIN SMALL LIGATURE OE
- u'\u0178' # 0xBE -> LATIN CAPITAL LETTER Y WITH DIAERESIS
- u'\xbf' # 0xBF -> INVERTED QUESTION MARK
- u'\xc0' # 0xC0 -> LATIN CAPITAL LETTER A WITH GRAVE
- u'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
- u'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
- u'\xc3' # 0xC3 -> LATIN CAPITAL LETTER A WITH TILDE
- u'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
- u'\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE
- u'\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE
- u'\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA
- u'\xc8' # 0xC8 -> LATIN CAPITAL LETTER E WITH GRAVE
- u'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
- u'\xca' # 0xCA -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
- u'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
- u'\xcc' # 0xCC -> LATIN CAPITAL LETTER I WITH GRAVE
- u'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
- u'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
- u'\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS
- u'\xd0' # 0xD0 -> LATIN CAPITAL LETTER ETH
- u'\xd1' # 0xD1 -> LATIN CAPITAL LETTER N WITH TILDE
- u'\xd2' # 0xD2 -> LATIN CAPITAL LETTER O WITH GRAVE
- u'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
- u'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
- u'\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE
- u'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
- u'\xd7' # 0xD7 -> MULTIPLICATION SIGN
- u'\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE
- u'\xd9' # 0xD9 -> LATIN CAPITAL LETTER U WITH GRAVE
- u'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
- u'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
- u'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
- u'\xdd' # 0xDD -> LATIN CAPITAL LETTER Y WITH ACUTE
- u'\xde' # 0xDE -> LATIN CAPITAL LETTER THORN
- u'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S
- u'\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE
- u'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
- u'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
- u'\xe3' # 0xE3 -> LATIN SMALL LETTER A WITH TILDE
- u'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
- u'\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE
- u'\xe6' # 0xE6 -> LATIN SMALL LETTER AE
- u'\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA
- u'\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE
- u'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
- u'\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX
- u'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
- u'\xec' # 0xEC -> LATIN SMALL LETTER I WITH GRAVE
- u'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
- u'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
- u'\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS
- u'\xf0' # 0xF0 -> LATIN SMALL LETTER ETH
- u'\xf1' # 0xF1 -> LATIN SMALL LETTER N WITH TILDE
- u'\xf2' # 0xF2 -> LATIN SMALL LETTER O WITH GRAVE
- u'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
- u'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
- u'\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE
- u'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
- u'\xf7' # 0xF7 -> DIVISION SIGN
- u'\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE
- u'\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE
- u'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
- u'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
- u'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
- u'\xfd' # 0xFD -> LATIN SMALL LETTER Y WITH ACUTE
- u'\xfe' # 0xFE -> LATIN SMALL LETTER THORN
- u'\xff' # 0xFF -> LATIN SMALL LETTER Y WITH DIAERESIS
-)
-
-### Encoding table
-encoding_table=codecs.charmap_build(decoding_table)
diff --git a/sys/lib/python/encodings/iso8859_16.py b/sys/lib/python/encodings/iso8859_16.py
deleted file mode 100644
index 00b9ac805..000000000
--- a/sys/lib/python/encodings/iso8859_16.py
+++ /dev/null
@@ -1,307 +0,0 @@
-""" Python Character Mapping Codec iso8859_16 generated from 'MAPPINGS/ISO8859/8859-16.TXT' with gencodec.py.
-
-"""#"
-
-import codecs
-
-### Codec APIs
-
-class Codec(codecs.Codec):
-
- def encode(self,input,errors='strict'):
- return codecs.charmap_encode(input,errors,encoding_table)
-
- def decode(self,input,errors='strict'):
- return codecs.charmap_decode(input,errors,decoding_table)
-
-class IncrementalEncoder(codecs.IncrementalEncoder):
- def encode(self, input, final=False):
- return codecs.charmap_encode(input,self.errors,encoding_table)[0]
-
-class IncrementalDecoder(codecs.IncrementalDecoder):
- def decode(self, input, final=False):
- return codecs.charmap_decode(input,self.errors,decoding_table)[0]
-
-class StreamWriter(Codec,codecs.StreamWriter):
- pass
-
-class StreamReader(Codec,codecs.StreamReader):
- pass
-
-### encodings module API
-
-def getregentry():
- return codecs.CodecInfo(
- name='iso8859-16',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
-
-
-### Decoding Table
-
-decoding_table = (
- u'\x00' # 0x00 -> NULL
- u'\x01' # 0x01 -> START OF HEADING
- u'\x02' # 0x02 -> START OF TEXT
- u'\x03' # 0x03 -> END OF TEXT
- u'\x04' # 0x04 -> END OF TRANSMISSION
- u'\x05' # 0x05 -> ENQUIRY
- u'\x06' # 0x06 -> ACKNOWLEDGE
- u'\x07' # 0x07 -> BELL
- u'\x08' # 0x08 -> BACKSPACE
- u'\t' # 0x09 -> HORIZONTAL TABULATION
- u'\n' # 0x0A -> LINE FEED
- u'\x0b' # 0x0B -> VERTICAL TABULATION
- u'\x0c' # 0x0C -> FORM FEED
- u'\r' # 0x0D -> CARRIAGE RETURN
- u'\x0e' # 0x0E -> SHIFT OUT
- u'\x0f' # 0x0F -> SHIFT IN
- u'\x10' # 0x10 -> DATA LINK ESCAPE
- u'\x11' # 0x11 -> DEVICE CONTROL ONE
- u'\x12' # 0x12 -> DEVICE CONTROL TWO
- u'\x13' # 0x13 -> DEVICE CONTROL THREE
- u'\x14' # 0x14 -> DEVICE CONTROL FOUR
- u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
- u'\x16' # 0x16 -> SYNCHRONOUS IDLE
- u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
- u'\x18' # 0x18 -> CANCEL
- u'\x19' # 0x19 -> END OF MEDIUM
- u'\x1a' # 0x1A -> SUBSTITUTE
- u'\x1b' # 0x1B -> ESCAPE
- u'\x1c' # 0x1C -> FILE SEPARATOR
- u'\x1d' # 0x1D -> GROUP SEPARATOR
- u'\x1e' # 0x1E -> RECORD SEPARATOR
- u'\x1f' # 0x1F -> UNIT SEPARATOR
- u' ' # 0x20 -> SPACE
- u'!' # 0x21 -> EXCLAMATION MARK
- u'"' # 0x22 -> QUOTATION MARK
- u'#' # 0x23 -> NUMBER SIGN
- u'$' # 0x24 -> DOLLAR SIGN
- u'%' # 0x25 -> PERCENT SIGN
- u'&' # 0x26 -> AMPERSAND
- u"'" # 0x27 -> APOSTROPHE
- u'(' # 0x28 -> LEFT PARENTHESIS
- u')' # 0x29 -> RIGHT PARENTHESIS
- u'*' # 0x2A -> ASTERISK
- u'+' # 0x2B -> PLUS SIGN
- u',' # 0x2C -> COMMA
- u'-' # 0x2D -> HYPHEN-MINUS
- u'.' # 0x2E -> FULL STOP
- u'/' # 0x2F -> SOLIDUS
- u'0' # 0x30 -> DIGIT ZERO
- u'1' # 0x31 -> DIGIT ONE
- u'2' # 0x32 -> DIGIT TWO
- u'3' # 0x33 -> DIGIT THREE
- u'4' # 0x34 -> DIGIT FOUR
- u'5' # 0x35 -> DIGIT FIVE
- u'6' # 0x36 -> DIGIT SIX
- u'7' # 0x37 -> DIGIT SEVEN
- u'8' # 0x38 -> DIGIT EIGHT
- u'9' # 0x39 -> DIGIT NINE
- u':' # 0x3A -> COLON
- u';' # 0x3B -> SEMICOLON
- u'<' # 0x3C -> LESS-THAN SIGN
- u'=' # 0x3D -> EQUALS SIGN
- u'>' # 0x3E -> GREATER-THAN SIGN
- u'?' # 0x3F -> QUESTION MARK
- u'@' # 0x40 -> COMMERCIAL AT
- u'A' # 0x41 -> LATIN CAPITAL LETTER A
- u'B' # 0x42 -> LATIN CAPITAL LETTER B
- u'C' # 0x43 -> LATIN CAPITAL LETTER C
- u'D' # 0x44 -> LATIN CAPITAL LETTER D
- u'E' # 0x45 -> LATIN CAPITAL LETTER E
- u'F' # 0x46 -> LATIN CAPITAL LETTER F
- u'G' # 0x47 -> LATIN CAPITAL LETTER G
- u'H' # 0x48 -> LATIN CAPITAL LETTER H
- u'I' # 0x49 -> LATIN CAPITAL LETTER I
- u'J' # 0x4A -> LATIN CAPITAL LETTER J
- u'K' # 0x4B -> LATIN CAPITAL LETTER K
- u'L' # 0x4C -> LATIN CAPITAL LETTER L
- u'M' # 0x4D -> LATIN CAPITAL LETTER M
- u'N' # 0x4E -> LATIN CAPITAL LETTER N
- u'O' # 0x4F -> LATIN CAPITAL LETTER O
- u'P' # 0x50 -> LATIN CAPITAL LETTER P
- u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
- u'R' # 0x52 -> LATIN CAPITAL LETTER R
- u'S' # 0x53 -> LATIN CAPITAL LETTER S
- u'T' # 0x54 -> LATIN CAPITAL LETTER T
- u'U' # 0x55 -> LATIN CAPITAL LETTER U
- u'V' # 0x56 -> LATIN CAPITAL LETTER V
- u'W' # 0x57 -> LATIN CAPITAL LETTER W
- u'X' # 0x58 -> LATIN CAPITAL LETTER X
- u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
- u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
- u'[' # 0x5B -> LEFT SQUARE BRACKET
- u'\\' # 0x5C -> REVERSE SOLIDUS
- u']' # 0x5D -> RIGHT SQUARE BRACKET
- u'^' # 0x5E -> CIRCUMFLEX ACCENT
- u'_' # 0x5F -> LOW LINE
- u'`' # 0x60 -> GRAVE ACCENT
- u'a' # 0x61 -> LATIN SMALL LETTER A
- u'b' # 0x62 -> LATIN SMALL LETTER B
- u'c' # 0x63 -> LATIN SMALL LETTER C
- u'd' # 0x64 -> LATIN SMALL LETTER D
- u'e' # 0x65 -> LATIN SMALL LETTER E
- u'f' # 0x66 -> LATIN SMALL LETTER F
- u'g' # 0x67 -> LATIN SMALL LETTER G
- u'h' # 0x68 -> LATIN SMALL LETTER H
- u'i' # 0x69 -> LATIN SMALL LETTER I
- u'j' # 0x6A -> LATIN SMALL LETTER J
- u'k' # 0x6B -> LATIN SMALL LETTER K
- u'l' # 0x6C -> LATIN SMALL LETTER L
- u'm' # 0x6D -> LATIN SMALL LETTER M
- u'n' # 0x6E -> LATIN SMALL LETTER N
- u'o' # 0x6F -> LATIN SMALL LETTER O
- u'p' # 0x70 -> LATIN SMALL LETTER P
- u'q' # 0x71 -> LATIN SMALL LETTER Q
- u'r' # 0x72 -> LATIN SMALL LETTER R
- u's' # 0x73 -> LATIN SMALL LETTER S
- u't' # 0x74 -> LATIN SMALL LETTER T
- u'u' # 0x75 -> LATIN SMALL LETTER U
- u'v' # 0x76 -> LATIN SMALL LETTER V
- u'w' # 0x77 -> LATIN SMALL LETTER W
- u'x' # 0x78 -> LATIN SMALL LETTER X
- u'y' # 0x79 -> LATIN SMALL LETTER Y
- u'z' # 0x7A -> LATIN SMALL LETTER Z
- u'{' # 0x7B -> LEFT CURLY BRACKET
- u'|' # 0x7C -> VERTICAL LINE
- u'}' # 0x7D -> RIGHT CURLY BRACKET
- u'~' # 0x7E -> TILDE
- u'\x7f' # 0x7F -> DELETE
- u'\x80' # 0x80 -> <control>
- u'\x81' # 0x81 -> <control>
- u'\x82' # 0x82 -> <control>
- u'\x83' # 0x83 -> <control>
- u'\x84' # 0x84 -> <control>
- u'\x85' # 0x85 -> <control>
- u'\x86' # 0x86 -> <control>
- u'\x87' # 0x87 -> <control>
- u'\x88' # 0x88 -> <control>
- u'\x89' # 0x89 -> <control>
- u'\x8a' # 0x8A -> <control>
- u'\x8b' # 0x8B -> <control>
- u'\x8c' # 0x8C -> <control>
- u'\x8d' # 0x8D -> <control>
- u'\x8e' # 0x8E -> <control>
- u'\x8f' # 0x8F -> <control>
- u'\x90' # 0x90 -> <control>
- u'\x91' # 0x91 -> <control>
- u'\x92' # 0x92 -> <control>
- u'\x93' # 0x93 -> <control>
- u'\x94' # 0x94 -> <control>
- u'\x95' # 0x95 -> <control>
- u'\x96' # 0x96 -> <control>
- u'\x97' # 0x97 -> <control>
- u'\x98' # 0x98 -> <control>
- u'\x99' # 0x99 -> <control>
- u'\x9a' # 0x9A -> <control>
- u'\x9b' # 0x9B -> <control>
- u'\x9c' # 0x9C -> <control>
- u'\x9d' # 0x9D -> <control>
- u'\x9e' # 0x9E -> <control>
- u'\x9f' # 0x9F -> <control>
- u'\xa0' # 0xA0 -> NO-BREAK SPACE
- u'\u0104' # 0xA1 -> LATIN CAPITAL LETTER A WITH OGONEK
- u'\u0105' # 0xA2 -> LATIN SMALL LETTER A WITH OGONEK
- u'\u0141' # 0xA3 -> LATIN CAPITAL LETTER L WITH STROKE
- u'\u20ac' # 0xA4 -> EURO SIGN
- u'\u201e' # 0xA5 -> DOUBLE LOW-9 QUOTATION MARK
- u'\u0160' # 0xA6 -> LATIN CAPITAL LETTER S WITH CARON
- u'\xa7' # 0xA7 -> SECTION SIGN
- u'\u0161' # 0xA8 -> LATIN SMALL LETTER S WITH CARON
- u'\xa9' # 0xA9 -> COPYRIGHT SIGN
- u'\u0218' # 0xAA -> LATIN CAPITAL LETTER S WITH COMMA BELOW
- u'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
- u'\u0179' # 0xAC -> LATIN CAPITAL LETTER Z WITH ACUTE
- u'\xad' # 0xAD -> SOFT HYPHEN
- u'\u017a' # 0xAE -> LATIN SMALL LETTER Z WITH ACUTE
- u'\u017b' # 0xAF -> LATIN CAPITAL LETTER Z WITH DOT ABOVE
- u'\xb0' # 0xB0 -> DEGREE SIGN
- u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
- u'\u010c' # 0xB2 -> LATIN CAPITAL LETTER C WITH CARON
- u'\u0142' # 0xB3 -> LATIN SMALL LETTER L WITH STROKE
- u'\u017d' # 0xB4 -> LATIN CAPITAL LETTER Z WITH CARON
- u'\u201d' # 0xB5 -> RIGHT DOUBLE QUOTATION MARK
- u'\xb6' # 0xB6 -> PILCROW SIGN
- u'\xb7' # 0xB7 -> MIDDLE DOT
- u'\u017e' # 0xB8 -> LATIN SMALL LETTER Z WITH CARON
- u'\u010d' # 0xB9 -> LATIN SMALL LETTER C WITH CARON
- u'\u0219' # 0xBA -> LATIN SMALL LETTER S WITH COMMA BELOW
- u'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
- u'\u0152' # 0xBC -> LATIN CAPITAL LIGATURE OE
- u'\u0153' # 0xBD -> LATIN SMALL LIGATURE OE
- u'\u0178' # 0xBE -> LATIN CAPITAL LETTER Y WITH DIAERESIS
- u'\u017c' # 0xBF -> LATIN SMALL LETTER Z WITH DOT ABOVE
- u'\xc0' # 0xC0 -> LATIN CAPITAL LETTER A WITH GRAVE
- u'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
- u'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
- u'\u0102' # 0xC3 -> LATIN CAPITAL LETTER A WITH BREVE
- u'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
- u'\u0106' # 0xC5 -> LATIN CAPITAL LETTER C WITH ACUTE
- u'\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE
- u'\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA
- u'\xc8' # 0xC8 -> LATIN CAPITAL LETTER E WITH GRAVE
- u'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
- u'\xca' # 0xCA -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
- u'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
- u'\xcc' # 0xCC -> LATIN CAPITAL LETTER I WITH GRAVE
- u'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
- u'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
- u'\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS
- u'\u0110' # 0xD0 -> LATIN CAPITAL LETTER D WITH STROKE
- u'\u0143' # 0xD1 -> LATIN CAPITAL LETTER N WITH ACUTE
- u'\xd2' # 0xD2 -> LATIN CAPITAL LETTER O WITH GRAVE
- u'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
- u'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
- u'\u0150' # 0xD5 -> LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
- u'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
- u'\u015a' # 0xD7 -> LATIN CAPITAL LETTER S WITH ACUTE
- u'\u0170' # 0xD8 -> LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
- u'\xd9' # 0xD9 -> LATIN CAPITAL LETTER U WITH GRAVE
- u'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
- u'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
- u'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
- u'\u0118' # 0xDD -> LATIN CAPITAL LETTER E WITH OGONEK
- u'\u021a' # 0xDE -> LATIN CAPITAL LETTER T WITH COMMA BELOW
- u'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S
- u'\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE
- u'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
- u'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
- u'\u0103' # 0xE3 -> LATIN SMALL LETTER A WITH BREVE
- u'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
- u'\u0107' # 0xE5 -> LATIN SMALL LETTER C WITH ACUTE
- u'\xe6' # 0xE6 -> LATIN SMALL LETTER AE
- u'\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA
- u'\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE
- u'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
- u'\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX
- u'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
- u'\xec' # 0xEC -> LATIN SMALL LETTER I WITH GRAVE
- u'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
- u'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
- u'\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS
- u'\u0111' # 0xF0 -> LATIN SMALL LETTER D WITH STROKE
- u'\u0144' # 0xF1 -> LATIN SMALL LETTER N WITH ACUTE
- u'\xf2' # 0xF2 -> LATIN SMALL LETTER O WITH GRAVE
- u'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
- u'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
- u'\u0151' # 0xF5 -> LATIN SMALL LETTER O WITH DOUBLE ACUTE
- u'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
- u'\u015b' # 0xF7 -> LATIN SMALL LETTER S WITH ACUTE
- u'\u0171' # 0xF8 -> LATIN SMALL LETTER U WITH DOUBLE ACUTE
- u'\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE
- u'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
- u'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
- u'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
- u'\u0119' # 0xFD -> LATIN SMALL LETTER E WITH OGONEK
- u'\u021b' # 0xFE -> LATIN SMALL LETTER T WITH COMMA BELOW
- u'\xff' # 0xFF -> LATIN SMALL LETTER Y WITH DIAERESIS
-)
-
-### Encoding table
-encoding_table=codecs.charmap_build(decoding_table)
diff --git a/sys/lib/python/encodings/iso8859_2.py b/sys/lib/python/encodings/iso8859_2.py
deleted file mode 100644
index 38e91d8e1..000000000
--- a/sys/lib/python/encodings/iso8859_2.py
+++ /dev/null
@@ -1,307 +0,0 @@
-""" Python Character Mapping Codec iso8859_2 generated from 'MAPPINGS/ISO8859/8859-2.TXT' with gencodec.py.
-
-"""#"
-
-import codecs
-
-### Codec APIs
-
-class Codec(codecs.Codec):
-
- def encode(self,input,errors='strict'):
- return codecs.charmap_encode(input,errors,encoding_table)
-
- def decode(self,input,errors='strict'):
- return codecs.charmap_decode(input,errors,decoding_table)
-
-class IncrementalEncoder(codecs.IncrementalEncoder):
- def encode(self, input, final=False):
- return codecs.charmap_encode(input,self.errors,encoding_table)[0]
-
-class IncrementalDecoder(codecs.IncrementalDecoder):
- def decode(self, input, final=False):
- return codecs.charmap_decode(input,self.errors,decoding_table)[0]
-
-class StreamWriter(Codec,codecs.StreamWriter):
- pass
-
-class StreamReader(Codec,codecs.StreamReader):
- pass
-
-### encodings module API
-
-def getregentry():
- return codecs.CodecInfo(
- name='iso8859-2',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
-
-
-### Decoding Table
-
-decoding_table = (
- u'\x00' # 0x00 -> NULL
- u'\x01' # 0x01 -> START OF HEADING
- u'\x02' # 0x02 -> START OF TEXT
- u'\x03' # 0x03 -> END OF TEXT
- u'\x04' # 0x04 -> END OF TRANSMISSION
- u'\x05' # 0x05 -> ENQUIRY
- u'\x06' # 0x06 -> ACKNOWLEDGE
- u'\x07' # 0x07 -> BELL
- u'\x08' # 0x08 -> BACKSPACE
- u'\t' # 0x09 -> HORIZONTAL TABULATION
- u'\n' # 0x0A -> LINE FEED
- u'\x0b' # 0x0B -> VERTICAL TABULATION
- u'\x0c' # 0x0C -> FORM FEED
- u'\r' # 0x0D -> CARRIAGE RETURN
- u'\x0e' # 0x0E -> SHIFT OUT
- u'\x0f' # 0x0F -> SHIFT IN
- u'\x10' # 0x10 -> DATA LINK ESCAPE
- u'\x11' # 0x11 -> DEVICE CONTROL ONE
- u'\x12' # 0x12 -> DEVICE CONTROL TWO
- u'\x13' # 0x13 -> DEVICE CONTROL THREE
- u'\x14' # 0x14 -> DEVICE CONTROL FOUR
- u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
- u'\x16' # 0x16 -> SYNCHRONOUS IDLE
- u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
- u'\x18' # 0x18 -> CANCEL
- u'\x19' # 0x19 -> END OF MEDIUM
- u'\x1a' # 0x1A -> SUBSTITUTE
- u'\x1b' # 0x1B -> ESCAPE
- u'\x1c' # 0x1C -> FILE SEPARATOR
- u'\x1d' # 0x1D -> GROUP SEPARATOR
- u'\x1e' # 0x1E -> RECORD SEPARATOR
- u'\x1f' # 0x1F -> UNIT SEPARATOR
- u' ' # 0x20 -> SPACE
- u'!' # 0x21 -> EXCLAMATION MARK
- u'"' # 0x22 -> QUOTATION MARK
- u'#' # 0x23 -> NUMBER SIGN
- u'$' # 0x24 -> DOLLAR SIGN
- u'%' # 0x25 -> PERCENT SIGN
- u'&' # 0x26 -> AMPERSAND
- u"'" # 0x27 -> APOSTROPHE
- u'(' # 0x28 -> LEFT PARENTHESIS
- u')' # 0x29 -> RIGHT PARENTHESIS
- u'*' # 0x2A -> ASTERISK
- u'+' # 0x2B -> PLUS SIGN
- u',' # 0x2C -> COMMA
- u'-' # 0x2D -> HYPHEN-MINUS
- u'.' # 0x2E -> FULL STOP
- u'/' # 0x2F -> SOLIDUS
- u'0' # 0x30 -> DIGIT ZERO
- u'1' # 0x31 -> DIGIT ONE
- u'2' # 0x32 -> DIGIT TWO
- u'3' # 0x33 -> DIGIT THREE
- u'4' # 0x34 -> DIGIT FOUR
- u'5' # 0x35 -> DIGIT FIVE
- u'6' # 0x36 -> DIGIT SIX
- u'7' # 0x37 -> DIGIT SEVEN
- u'8' # 0x38 -> DIGIT EIGHT
- u'9' # 0x39 -> DIGIT NINE
- u':' # 0x3A -> COLON
- u';' # 0x3B -> SEMICOLON
- u'<' # 0x3C -> LESS-THAN SIGN
- u'=' # 0x3D -> EQUALS SIGN
- u'>' # 0x3E -> GREATER-THAN SIGN
- u'?' # 0x3F -> QUESTION MARK
- u'@' # 0x40 -> COMMERCIAL AT
- u'A' # 0x41 -> LATIN CAPITAL LETTER A
- u'B' # 0x42 -> LATIN CAPITAL LETTER B
- u'C' # 0x43 -> LATIN CAPITAL LETTER C
- u'D' # 0x44 -> LATIN CAPITAL LETTER D
- u'E' # 0x45 -> LATIN CAPITAL LETTER E
- u'F' # 0x46 -> LATIN CAPITAL LETTER F
- u'G' # 0x47 -> LATIN CAPITAL LETTER G
- u'H' # 0x48 -> LATIN CAPITAL LETTER H
- u'I' # 0x49 -> LATIN CAPITAL LETTER I
- u'J' # 0x4A -> LATIN CAPITAL LETTER J
- u'K' # 0x4B -> LATIN CAPITAL LETTER K
- u'L' # 0x4C -> LATIN CAPITAL LETTER L
- u'M' # 0x4D -> LATIN CAPITAL LETTER M
- u'N' # 0x4E -> LATIN CAPITAL LETTER N
- u'O' # 0x4F -> LATIN CAPITAL LETTER O
- u'P' # 0x50 -> LATIN CAPITAL LETTER P
- u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
- u'R' # 0x52 -> LATIN CAPITAL LETTER R
- u'S' # 0x53 -> LATIN CAPITAL LETTER S
- u'T' # 0x54 -> LATIN CAPITAL LETTER T
- u'U' # 0x55 -> LATIN CAPITAL LETTER U
- u'V' # 0x56 -> LATIN CAPITAL LETTER V
- u'W' # 0x57 -> LATIN CAPITAL LETTER W
- u'X' # 0x58 -> LATIN CAPITAL LETTER X
- u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
- u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
- u'[' # 0x5B -> LEFT SQUARE BRACKET
- u'\\' # 0x5C -> REVERSE SOLIDUS
- u']' # 0x5D -> RIGHT SQUARE BRACKET
- u'^' # 0x5E -> CIRCUMFLEX ACCENT
- u'_' # 0x5F -> LOW LINE
- u'`' # 0x60 -> GRAVE ACCENT
- u'a' # 0x61 -> LATIN SMALL LETTER A
- u'b' # 0x62 -> LATIN SMALL LETTER B
- u'c' # 0x63 -> LATIN SMALL LETTER C
- u'd' # 0x64 -> LATIN SMALL LETTER D
- u'e' # 0x65 -> LATIN SMALL LETTER E
- u'f' # 0x66 -> LATIN SMALL LETTER F
- u'g' # 0x67 -> LATIN SMALL LETTER G
- u'h' # 0x68 -> LATIN SMALL LETTER H
- u'i' # 0x69 -> LATIN SMALL LETTER I
- u'j' # 0x6A -> LATIN SMALL LETTER J
- u'k' # 0x6B -> LATIN SMALL LETTER K
- u'l' # 0x6C -> LATIN SMALL LETTER L
- u'm' # 0x6D -> LATIN SMALL LETTER M
- u'n' # 0x6E -> LATIN SMALL LETTER N
- u'o' # 0x6F -> LATIN SMALL LETTER O
- u'p' # 0x70 -> LATIN SMALL LETTER P
- u'q' # 0x71 -> LATIN SMALL LETTER Q
- u'r' # 0x72 -> LATIN SMALL LETTER R
- u's' # 0x73 -> LATIN SMALL LETTER S
- u't' # 0x74 -> LATIN SMALL LETTER T
- u'u' # 0x75 -> LATIN SMALL LETTER U
- u'v' # 0x76 -> LATIN SMALL LETTER V
- u'w' # 0x77 -> LATIN SMALL LETTER W
- u'x' # 0x78 -> LATIN SMALL LETTER X
- u'y' # 0x79 -> LATIN SMALL LETTER Y
- u'z' # 0x7A -> LATIN SMALL LETTER Z
- u'{' # 0x7B -> LEFT CURLY BRACKET
- u'|' # 0x7C -> VERTICAL LINE
- u'}' # 0x7D -> RIGHT CURLY BRACKET
- u'~' # 0x7E -> TILDE
- u'\x7f' # 0x7F -> DELETE
- u'\x80' # 0x80 -> <control>
- u'\x81' # 0x81 -> <control>
- u'\x82' # 0x82 -> <control>
- u'\x83' # 0x83 -> <control>
- u'\x84' # 0x84 -> <control>
- u'\x85' # 0x85 -> <control>
- u'\x86' # 0x86 -> <control>
- u'\x87' # 0x87 -> <control>
- u'\x88' # 0x88 -> <control>
- u'\x89' # 0x89 -> <control>
- u'\x8a' # 0x8A -> <control>
- u'\x8b' # 0x8B -> <control>
- u'\x8c' # 0x8C -> <control>
- u'\x8d' # 0x8D -> <control>
- u'\x8e' # 0x8E -> <control>
- u'\x8f' # 0x8F -> <control>
- u'\x90' # 0x90 -> <control>
- u'\x91' # 0x91 -> <control>
- u'\x92' # 0x92 -> <control>
- u'\x93' # 0x93 -> <control>
- u'\x94' # 0x94 -> <control>
- u'\x95' # 0x95 -> <control>
- u'\x96' # 0x96 -> <control>
- u'\x97' # 0x97 -> <control>
- u'\x98' # 0x98 -> <control>
- u'\x99' # 0x99 -> <control>
- u'\x9a' # 0x9A -> <control>
- u'\x9b' # 0x9B -> <control>
- u'\x9c' # 0x9C -> <control>
- u'\x9d' # 0x9D -> <control>
- u'\x9e' # 0x9E -> <control>
- u'\x9f' # 0x9F -> <control>
- u'\xa0' # 0xA0 -> NO-BREAK SPACE
- u'\u0104' # 0xA1 -> LATIN CAPITAL LETTER A WITH OGONEK
- u'\u02d8' # 0xA2 -> BREVE
- u'\u0141' # 0xA3 -> LATIN CAPITAL LETTER L WITH STROKE
- u'\xa4' # 0xA4 -> CURRENCY SIGN
- u'\u013d' # 0xA5 -> LATIN CAPITAL LETTER L WITH CARON
- u'\u015a' # 0xA6 -> LATIN CAPITAL LETTER S WITH ACUTE
- u'\xa7' # 0xA7 -> SECTION SIGN
- u'\xa8' # 0xA8 -> DIAERESIS
- u'\u0160' # 0xA9 -> LATIN CAPITAL LETTER S WITH CARON
- u'\u015e' # 0xAA -> LATIN CAPITAL LETTER S WITH CEDILLA
- u'\u0164' # 0xAB -> LATIN CAPITAL LETTER T WITH CARON
- u'\u0179' # 0xAC -> LATIN CAPITAL LETTER Z WITH ACUTE
- u'\xad' # 0xAD -> SOFT HYPHEN
- u'\u017d' # 0xAE -> LATIN CAPITAL LETTER Z WITH CARON
- u'\u017b' # 0xAF -> LATIN CAPITAL LETTER Z WITH DOT ABOVE
- u'\xb0' # 0xB0 -> DEGREE SIGN
- u'\u0105' # 0xB1 -> LATIN SMALL LETTER A WITH OGONEK
- u'\u02db' # 0xB2 -> OGONEK
- u'\u0142' # 0xB3 -> LATIN SMALL LETTER L WITH STROKE
- u'\xb4' # 0xB4 -> ACUTE ACCENT
- u'\u013e' # 0xB5 -> LATIN SMALL LETTER L WITH CARON
- u'\u015b' # 0xB6 -> LATIN SMALL LETTER S WITH ACUTE
- u'\u02c7' # 0xB7 -> CARON
- u'\xb8' # 0xB8 -> CEDILLA
- u'\u0161' # 0xB9 -> LATIN SMALL LETTER S WITH CARON
- u'\u015f' # 0xBA -> LATIN SMALL LETTER S WITH CEDILLA
- u'\u0165' # 0xBB -> LATIN SMALL LETTER T WITH CARON
- u'\u017a' # 0xBC -> LATIN SMALL LETTER Z WITH ACUTE
- u'\u02dd' # 0xBD -> DOUBLE ACUTE ACCENT
- u'\u017e' # 0xBE -> LATIN SMALL LETTER Z WITH CARON
- u'\u017c' # 0xBF -> LATIN SMALL LETTER Z WITH DOT ABOVE
- u'\u0154' # 0xC0 -> LATIN CAPITAL LETTER R WITH ACUTE
- u'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
- u'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
- u'\u0102' # 0xC3 -> LATIN CAPITAL LETTER A WITH BREVE
- u'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
- u'\u0139' # 0xC5 -> LATIN CAPITAL LETTER L WITH ACUTE
- u'\u0106' # 0xC6 -> LATIN CAPITAL LETTER C WITH ACUTE
- u'\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA
- u'\u010c' # 0xC8 -> LATIN CAPITAL LETTER C WITH CARON
- u'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
- u'\u0118' # 0xCA -> LATIN CAPITAL LETTER E WITH OGONEK
- u'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
- u'\u011a' # 0xCC -> LATIN CAPITAL LETTER E WITH CARON
- u'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
- u'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
- u'\u010e' # 0xCF -> LATIN CAPITAL LETTER D WITH CARON
- u'\u0110' # 0xD0 -> LATIN CAPITAL LETTER D WITH STROKE
- u'\u0143' # 0xD1 -> LATIN CAPITAL LETTER N WITH ACUTE
- u'\u0147' # 0xD2 -> LATIN CAPITAL LETTER N WITH CARON
- u'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
- u'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
- u'\u0150' # 0xD5 -> LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
- u'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
- u'\xd7' # 0xD7 -> MULTIPLICATION SIGN
- u'\u0158' # 0xD8 -> LATIN CAPITAL LETTER R WITH CARON
- u'\u016e' # 0xD9 -> LATIN CAPITAL LETTER U WITH RING ABOVE
- u'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
- u'\u0170' # 0xDB -> LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
- u'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
- u'\xdd' # 0xDD -> LATIN CAPITAL LETTER Y WITH ACUTE
- u'\u0162' # 0xDE -> LATIN CAPITAL LETTER T WITH CEDILLA
- u'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S
- u'\u0155' # 0xE0 -> LATIN SMALL LETTER R WITH ACUTE
- u'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
- u'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
- u'\u0103' # 0xE3 -> LATIN SMALL LETTER A WITH BREVE
- u'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
- u'\u013a' # 0xE5 -> LATIN SMALL LETTER L WITH ACUTE
- u'\u0107' # 0xE6 -> LATIN SMALL LETTER C WITH ACUTE
- u'\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA
- u'\u010d' # 0xE8 -> LATIN SMALL LETTER C WITH CARON
- u'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
- u'\u0119' # 0xEA -> LATIN SMALL LETTER E WITH OGONEK
- u'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
- u'\u011b' # 0xEC -> LATIN SMALL LETTER E WITH CARON
- u'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
- u'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
- u'\u010f' # 0xEF -> LATIN SMALL LETTER D WITH CARON
- u'\u0111' # 0xF0 -> LATIN SMALL LETTER D WITH STROKE
- u'\u0144' # 0xF1 -> LATIN SMALL LETTER N WITH ACUTE
- u'\u0148' # 0xF2 -> LATIN SMALL LETTER N WITH CARON
- u'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
- u'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
- u'\u0151' # 0xF5 -> LATIN SMALL LETTER O WITH DOUBLE ACUTE
- u'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
- u'\xf7' # 0xF7 -> DIVISION SIGN
- u'\u0159' # 0xF8 -> LATIN SMALL LETTER R WITH CARON
- u'\u016f' # 0xF9 -> LATIN SMALL LETTER U WITH RING ABOVE
- u'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
- u'\u0171' # 0xFB -> LATIN SMALL LETTER U WITH DOUBLE ACUTE
- u'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
- u'\xfd' # 0xFD -> LATIN SMALL LETTER Y WITH ACUTE
- u'\u0163' # 0xFE -> LATIN SMALL LETTER T WITH CEDILLA
- u'\u02d9' # 0xFF -> DOT ABOVE
-)
-
-### Encoding table
-encoding_table=codecs.charmap_build(decoding_table)
diff --git a/sys/lib/python/encodings/iso8859_3.py b/sys/lib/python/encodings/iso8859_3.py
deleted file mode 100644
index 23daafdbb..000000000
--- a/sys/lib/python/encodings/iso8859_3.py
+++ /dev/null
@@ -1,307 +0,0 @@
-""" Python Character Mapping Codec iso8859_3 generated from 'MAPPINGS/ISO8859/8859-3.TXT' with gencodec.py.
-
-"""#"
-
-import codecs
-
-### Codec APIs
-
-class Codec(codecs.Codec):
-
- def encode(self,input,errors='strict'):
- return codecs.charmap_encode(input,errors,encoding_table)
-
- def decode(self,input,errors='strict'):
- return codecs.charmap_decode(input,errors,decoding_table)
-
-class IncrementalEncoder(codecs.IncrementalEncoder):
- def encode(self, input, final=False):
- return codecs.charmap_encode(input,self.errors,encoding_table)[0]
-
-class IncrementalDecoder(codecs.IncrementalDecoder):
- def decode(self, input, final=False):
- return codecs.charmap_decode(input,self.errors,decoding_table)[0]
-
-class StreamWriter(Codec,codecs.StreamWriter):
- pass
-
-class StreamReader(Codec,codecs.StreamReader):
- pass
-
-### encodings module API
-
-def getregentry():
- return codecs.CodecInfo(
- name='iso8859-3',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
-
-
-### Decoding Table
-
-decoding_table = (
- u'\x00' # 0x00 -> NULL
- u'\x01' # 0x01 -> START OF HEADING
- u'\x02' # 0x02 -> START OF TEXT
- u'\x03' # 0x03 -> END OF TEXT
- u'\x04' # 0x04 -> END OF TRANSMISSION
- u'\x05' # 0x05 -> ENQUIRY
- u'\x06' # 0x06 -> ACKNOWLEDGE
- u'\x07' # 0x07 -> BELL
- u'\x08' # 0x08 -> BACKSPACE
- u'\t' # 0x09 -> HORIZONTAL TABULATION
- u'\n' # 0x0A -> LINE FEED
- u'\x0b' # 0x0B -> VERTICAL TABULATION
- u'\x0c' # 0x0C -> FORM FEED
- u'\r' # 0x0D -> CARRIAGE RETURN
- u'\x0e' # 0x0E -> SHIFT OUT
- u'\x0f' # 0x0F -> SHIFT IN
- u'\x10' # 0x10 -> DATA LINK ESCAPE
- u'\x11' # 0x11 -> DEVICE CONTROL ONE
- u'\x12' # 0x12 -> DEVICE CONTROL TWO
- u'\x13' # 0x13 -> DEVICE CONTROL THREE
- u'\x14' # 0x14 -> DEVICE CONTROL FOUR
- u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
- u'\x16' # 0x16 -> SYNCHRONOUS IDLE
- u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
- u'\x18' # 0x18 -> CANCEL
- u'\x19' # 0x19 -> END OF MEDIUM
- u'\x1a' # 0x1A -> SUBSTITUTE
- u'\x1b' # 0x1B -> ESCAPE
- u'\x1c' # 0x1C -> FILE SEPARATOR
- u'\x1d' # 0x1D -> GROUP SEPARATOR
- u'\x1e' # 0x1E -> RECORD SEPARATOR
- u'\x1f' # 0x1F -> UNIT SEPARATOR
- u' ' # 0x20 -> SPACE
- u'!' # 0x21 -> EXCLAMATION MARK
- u'"' # 0x22 -> QUOTATION MARK
- u'#' # 0x23 -> NUMBER SIGN
- u'$' # 0x24 -> DOLLAR SIGN
- u'%' # 0x25 -> PERCENT SIGN
- u'&' # 0x26 -> AMPERSAND
- u"'" # 0x27 -> APOSTROPHE
- u'(' # 0x28 -> LEFT PARENTHESIS
- u')' # 0x29 -> RIGHT PARENTHESIS
- u'*' # 0x2A -> ASTERISK
- u'+' # 0x2B -> PLUS SIGN
- u',' # 0x2C -> COMMA
- u'-' # 0x2D -> HYPHEN-MINUS
- u'.' # 0x2E -> FULL STOP
- u'/' # 0x2F -> SOLIDUS
- u'0' # 0x30 -> DIGIT ZERO
- u'1' # 0x31 -> DIGIT ONE
- u'2' # 0x32 -> DIGIT TWO
- u'3' # 0x33 -> DIGIT THREE
- u'4' # 0x34 -> DIGIT FOUR
- u'5' # 0x35 -> DIGIT FIVE
- u'6' # 0x36 -> DIGIT SIX
- u'7' # 0x37 -> DIGIT SEVEN
- u'8' # 0x38 -> DIGIT EIGHT
- u'9' # 0x39 -> DIGIT NINE
- u':' # 0x3A -> COLON
- u';' # 0x3B -> SEMICOLON
- u'<' # 0x3C -> LESS-THAN SIGN
- u'=' # 0x3D -> EQUALS SIGN
- u'>' # 0x3E -> GREATER-THAN SIGN
- u'?' # 0x3F -> QUESTION MARK
- u'@' # 0x40 -> COMMERCIAL AT
- u'A' # 0x41 -> LATIN CAPITAL LETTER A
- u'B' # 0x42 -> LATIN CAPITAL LETTER B
- u'C' # 0x43 -> LATIN CAPITAL LETTER C
- u'D' # 0x44 -> LATIN CAPITAL LETTER D
- u'E' # 0x45 -> LATIN CAPITAL LETTER E
- u'F' # 0x46 -> LATIN CAPITAL LETTER F
- u'G' # 0x47 -> LATIN CAPITAL LETTER G
- u'H' # 0x48 -> LATIN CAPITAL LETTER H
- u'I' # 0x49 -> LATIN CAPITAL LETTER I
- u'J' # 0x4A -> LATIN CAPITAL LETTER J
- u'K' # 0x4B -> LATIN CAPITAL LETTER K
- u'L' # 0x4C -> LATIN CAPITAL LETTER L
- u'M' # 0x4D -> LATIN CAPITAL LETTER M
- u'N' # 0x4E -> LATIN CAPITAL LETTER N
- u'O' # 0x4F -> LATIN CAPITAL LETTER O
- u'P' # 0x50 -> LATIN CAPITAL LETTER P
- u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
- u'R' # 0x52 -> LATIN CAPITAL LETTER R
- u'S' # 0x53 -> LATIN CAPITAL LETTER S
- u'T' # 0x54 -> LATIN CAPITAL LETTER T
- u'U' # 0x55 -> LATIN CAPITAL LETTER U
- u'V' # 0x56 -> LATIN CAPITAL LETTER V
- u'W' # 0x57 -> LATIN CAPITAL LETTER W
- u'X' # 0x58 -> LATIN CAPITAL LETTER X
- u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
- u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
- u'[' # 0x5B -> LEFT SQUARE BRACKET
- u'\\' # 0x5C -> REVERSE SOLIDUS
- u']' # 0x5D -> RIGHT SQUARE BRACKET
- u'^' # 0x5E -> CIRCUMFLEX ACCENT
- u'_' # 0x5F -> LOW LINE
- u'`' # 0x60 -> GRAVE ACCENT
- u'a' # 0x61 -> LATIN SMALL LETTER A
- u'b' # 0x62 -> LATIN SMALL LETTER B
- u'c' # 0x63 -> LATIN SMALL LETTER C
- u'd' # 0x64 -> LATIN SMALL LETTER D
- u'e' # 0x65 -> LATIN SMALL LETTER E
- u'f' # 0x66 -> LATIN SMALL LETTER F
- u'g' # 0x67 -> LATIN SMALL LETTER G
- u'h' # 0x68 -> LATIN SMALL LETTER H
- u'i' # 0x69 -> LATIN SMALL LETTER I
- u'j' # 0x6A -> LATIN SMALL LETTER J
- u'k' # 0x6B -> LATIN SMALL LETTER K
- u'l' # 0x6C -> LATIN SMALL LETTER L
- u'm' # 0x6D -> LATIN SMALL LETTER M
- u'n' # 0x6E -> LATIN SMALL LETTER N
- u'o' # 0x6F -> LATIN SMALL LETTER O
- u'p' # 0x70 -> LATIN SMALL LETTER P
- u'q' # 0x71 -> LATIN SMALL LETTER Q
- u'r' # 0x72 -> LATIN SMALL LETTER R
- u's' # 0x73 -> LATIN SMALL LETTER S
- u't' # 0x74 -> LATIN SMALL LETTER T
- u'u' # 0x75 -> LATIN SMALL LETTER U
- u'v' # 0x76 -> LATIN SMALL LETTER V
- u'w' # 0x77 -> LATIN SMALL LETTER W
- u'x' # 0x78 -> LATIN SMALL LETTER X
- u'y' # 0x79 -> LATIN SMALL LETTER Y
- u'z' # 0x7A -> LATIN SMALL LETTER Z
- u'{' # 0x7B -> LEFT CURLY BRACKET
- u'|' # 0x7C -> VERTICAL LINE
- u'}' # 0x7D -> RIGHT CURLY BRACKET
- u'~' # 0x7E -> TILDE
- u'\x7f' # 0x7F -> DELETE
- u'\x80' # 0x80 -> <control>
- u'\x81' # 0x81 -> <control>
- u'\x82' # 0x82 -> <control>
- u'\x83' # 0x83 -> <control>
- u'\x84' # 0x84 -> <control>
- u'\x85' # 0x85 -> <control>
- u'\x86' # 0x86 -> <control>
- u'\x87' # 0x87 -> <control>
- u'\x88' # 0x88 -> <control>
- u'\x89' # 0x89 -> <control>
- u'\x8a' # 0x8A -> <control>
- u'\x8b' # 0x8B -> <control>
- u'\x8c' # 0x8C -> <control>
- u'\x8d' # 0x8D -> <control>
- u'\x8e' # 0x8E -> <control>
- u'\x8f' # 0x8F -> <control>
- u'\x90' # 0x90 -> <control>
- u'\x91' # 0x91 -> <control>
- u'\x92' # 0x92 -> <control>
- u'\x93' # 0x93 -> <control>
- u'\x94' # 0x94 -> <control>
- u'\x95' # 0x95 -> <control>
- u'\x96' # 0x96 -> <control>
- u'\x97' # 0x97 -> <control>
- u'\x98' # 0x98 -> <control>
- u'\x99' # 0x99 -> <control>
- u'\x9a' # 0x9A -> <control>
- u'\x9b' # 0x9B -> <control>
- u'\x9c' # 0x9C -> <control>
- u'\x9d' # 0x9D -> <control>
- u'\x9e' # 0x9E -> <control>
- u'\x9f' # 0x9F -> <control>
- u'\xa0' # 0xA0 -> NO-BREAK SPACE
- u'\u0126' # 0xA1 -> LATIN CAPITAL LETTER H WITH STROKE
- u'\u02d8' # 0xA2 -> BREVE
- u'\xa3' # 0xA3 -> POUND SIGN
- u'\xa4' # 0xA4 -> CURRENCY SIGN
- u'\ufffe'
- u'\u0124' # 0xA6 -> LATIN CAPITAL LETTER H WITH CIRCUMFLEX
- u'\xa7' # 0xA7 -> SECTION SIGN
- u'\xa8' # 0xA8 -> DIAERESIS
- u'\u0130' # 0xA9 -> LATIN CAPITAL LETTER I WITH DOT ABOVE
- u'\u015e' # 0xAA -> LATIN CAPITAL LETTER S WITH CEDILLA
- u'\u011e' # 0xAB -> LATIN CAPITAL LETTER G WITH BREVE
- u'\u0134' # 0xAC -> LATIN CAPITAL LETTER J WITH CIRCUMFLEX
- u'\xad' # 0xAD -> SOFT HYPHEN
- u'\ufffe'
- u'\u017b' # 0xAF -> LATIN CAPITAL LETTER Z WITH DOT ABOVE
- u'\xb0' # 0xB0 -> DEGREE SIGN
- u'\u0127' # 0xB1 -> LATIN SMALL LETTER H WITH STROKE
- u'\xb2' # 0xB2 -> SUPERSCRIPT TWO
- u'\xb3' # 0xB3 -> SUPERSCRIPT THREE
- u'\xb4' # 0xB4 -> ACUTE ACCENT
- u'\xb5' # 0xB5 -> MICRO SIGN
- u'\u0125' # 0xB6 -> LATIN SMALL LETTER H WITH CIRCUMFLEX
- u'\xb7' # 0xB7 -> MIDDLE DOT
- u'\xb8' # 0xB8 -> CEDILLA
- u'\u0131' # 0xB9 -> LATIN SMALL LETTER DOTLESS I
- u'\u015f' # 0xBA -> LATIN SMALL LETTER S WITH CEDILLA
- u'\u011f' # 0xBB -> LATIN SMALL LETTER G WITH BREVE
- u'\u0135' # 0xBC -> LATIN SMALL LETTER J WITH CIRCUMFLEX
- u'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
- u'\ufffe'
- u'\u017c' # 0xBF -> LATIN SMALL LETTER Z WITH DOT ABOVE
- u'\xc0' # 0xC0 -> LATIN CAPITAL LETTER A WITH GRAVE
- u'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
- u'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
- u'\ufffe'
- u'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
- u'\u010a' # 0xC5 -> LATIN CAPITAL LETTER C WITH DOT ABOVE
- u'\u0108' # 0xC6 -> LATIN CAPITAL LETTER C WITH CIRCUMFLEX
- u'\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA
- u'\xc8' # 0xC8 -> LATIN CAPITAL LETTER E WITH GRAVE
- u'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
- u'\xca' # 0xCA -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
- u'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
- u'\xcc' # 0xCC -> LATIN CAPITAL LETTER I WITH GRAVE
- u'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
- u'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
- u'\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS
- u'\ufffe'
- u'\xd1' # 0xD1 -> LATIN CAPITAL LETTER N WITH TILDE
- u'\xd2' # 0xD2 -> LATIN CAPITAL LETTER O WITH GRAVE
- u'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
- u'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
- u'\u0120' # 0xD5 -> LATIN CAPITAL LETTER G WITH DOT ABOVE
- u'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
- u'\xd7' # 0xD7 -> MULTIPLICATION SIGN
- u'\u011c' # 0xD8 -> LATIN CAPITAL LETTER G WITH CIRCUMFLEX
- u'\xd9' # 0xD9 -> LATIN CAPITAL LETTER U WITH GRAVE
- u'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
- u'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
- u'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
- u'\u016c' # 0xDD -> LATIN CAPITAL LETTER U WITH BREVE
- u'\u015c' # 0xDE -> LATIN CAPITAL LETTER S WITH CIRCUMFLEX
- u'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S
- u'\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE
- u'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
- u'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
- u'\ufffe'
- u'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
- u'\u010b' # 0xE5 -> LATIN SMALL LETTER C WITH DOT ABOVE
- u'\u0109' # 0xE6 -> LATIN SMALL LETTER C WITH CIRCUMFLEX
- u'\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA
- u'\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE
- u'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
- u'\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX
- u'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
- u'\xec' # 0xEC -> LATIN SMALL LETTER I WITH GRAVE
- u'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
- u'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
- u'\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS
- u'\ufffe'
- u'\xf1' # 0xF1 -> LATIN SMALL LETTER N WITH TILDE
- u'\xf2' # 0xF2 -> LATIN SMALL LETTER O WITH GRAVE
- u'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
- u'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
- u'\u0121' # 0xF5 -> LATIN SMALL LETTER G WITH DOT ABOVE
- u'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
- u'\xf7' # 0xF7 -> DIVISION SIGN
- u'\u011d' # 0xF8 -> LATIN SMALL LETTER G WITH CIRCUMFLEX
- u'\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE
- u'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
- u'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
- u'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
- u'\u016d' # 0xFD -> LATIN SMALL LETTER U WITH BREVE
- u'\u015d' # 0xFE -> LATIN SMALL LETTER S WITH CIRCUMFLEX
- u'\u02d9' # 0xFF -> DOT ABOVE
-)
-
-### Encoding table
-encoding_table=codecs.charmap_build(decoding_table)
diff --git a/sys/lib/python/encodings/iso8859_4.py b/sys/lib/python/encodings/iso8859_4.py
deleted file mode 100644
index c8e03b566..000000000
--- a/sys/lib/python/encodings/iso8859_4.py
+++ /dev/null
@@ -1,307 +0,0 @@
-""" Python Character Mapping Codec iso8859_4 generated from 'MAPPINGS/ISO8859/8859-4.TXT' with gencodec.py.
-
-"""#"
-
-import codecs
-
-### Codec APIs
-
-class Codec(codecs.Codec):
-
- def encode(self,input,errors='strict'):
- return codecs.charmap_encode(input,errors,encoding_table)
-
- def decode(self,input,errors='strict'):
- return codecs.charmap_decode(input,errors,decoding_table)
-
-class IncrementalEncoder(codecs.IncrementalEncoder):
- def encode(self, input, final=False):
- return codecs.charmap_encode(input,self.errors,encoding_table)[0]
-
-class IncrementalDecoder(codecs.IncrementalDecoder):
- def decode(self, input, final=False):
- return codecs.charmap_decode(input,self.errors,decoding_table)[0]
-
-class StreamWriter(Codec,codecs.StreamWriter):
- pass
-
-class StreamReader(Codec,codecs.StreamReader):
- pass
-
-### encodings module API
-
-def getregentry():
- return codecs.CodecInfo(
- name='iso8859-4',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
-
-
-### Decoding Table
-
-decoding_table = (
- u'\x00' # 0x00 -> NULL
- u'\x01' # 0x01 -> START OF HEADING
- u'\x02' # 0x02 -> START OF TEXT
- u'\x03' # 0x03 -> END OF TEXT
- u'\x04' # 0x04 -> END OF TRANSMISSION
- u'\x05' # 0x05 -> ENQUIRY
- u'\x06' # 0x06 -> ACKNOWLEDGE
- u'\x07' # 0x07 -> BELL
- u'\x08' # 0x08 -> BACKSPACE
- u'\t' # 0x09 -> HORIZONTAL TABULATION
- u'\n' # 0x0A -> LINE FEED
- u'\x0b' # 0x0B -> VERTICAL TABULATION
- u'\x0c' # 0x0C -> FORM FEED
- u'\r' # 0x0D -> CARRIAGE RETURN
- u'\x0e' # 0x0E -> SHIFT OUT
- u'\x0f' # 0x0F -> SHIFT IN
- u'\x10' # 0x10 -> DATA LINK ESCAPE
- u'\x11' # 0x11 -> DEVICE CONTROL ONE
- u'\x12' # 0x12 -> DEVICE CONTROL TWO
- u'\x13' # 0x13 -> DEVICE CONTROL THREE
- u'\x14' # 0x14 -> DEVICE CONTROL FOUR
- u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
- u'\x16' # 0x16 -> SYNCHRONOUS IDLE
- u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
- u'\x18' # 0x18 -> CANCEL
- u'\x19' # 0x19 -> END OF MEDIUM
- u'\x1a' # 0x1A -> SUBSTITUTE
- u'\x1b' # 0x1B -> ESCAPE
- u'\x1c' # 0x1C -> FILE SEPARATOR
- u'\x1d' # 0x1D -> GROUP SEPARATOR
- u'\x1e' # 0x1E -> RECORD SEPARATOR
- u'\x1f' # 0x1F -> UNIT SEPARATOR
- u' ' # 0x20 -> SPACE
- u'!' # 0x21 -> EXCLAMATION MARK
- u'"' # 0x22 -> QUOTATION MARK
- u'#' # 0x23 -> NUMBER SIGN
- u'$' # 0x24 -> DOLLAR SIGN
- u'%' # 0x25 -> PERCENT SIGN
- u'&' # 0x26 -> AMPERSAND
- u"'" # 0x27 -> APOSTROPHE
- u'(' # 0x28 -> LEFT PARENTHESIS
- u')' # 0x29 -> RIGHT PARENTHESIS
- u'*' # 0x2A -> ASTERISK
- u'+' # 0x2B -> PLUS SIGN
- u',' # 0x2C -> COMMA
- u'-' # 0x2D -> HYPHEN-MINUS
- u'.' # 0x2E -> FULL STOP
- u'/' # 0x2F -> SOLIDUS
- u'0' # 0x30 -> DIGIT ZERO
- u'1' # 0x31 -> DIGIT ONE
- u'2' # 0x32 -> DIGIT TWO
- u'3' # 0x33 -> DIGIT THREE
- u'4' # 0x34 -> DIGIT FOUR
- u'5' # 0x35 -> DIGIT FIVE
- u'6' # 0x36 -> DIGIT SIX
- u'7' # 0x37 -> DIGIT SEVEN
- u'8' # 0x38 -> DIGIT EIGHT
- u'9' # 0x39 -> DIGIT NINE
- u':' # 0x3A -> COLON
- u';' # 0x3B -> SEMICOLON
- u'<' # 0x3C -> LESS-THAN SIGN
- u'=' # 0x3D -> EQUALS SIGN
- u'>' # 0x3E -> GREATER-THAN SIGN
- u'?' # 0x3F -> QUESTION MARK
- u'@' # 0x40 -> COMMERCIAL AT
- u'A' # 0x41 -> LATIN CAPITAL LETTER A
- u'B' # 0x42 -> LATIN CAPITAL LETTER B
- u'C' # 0x43 -> LATIN CAPITAL LETTER C
- u'D' # 0x44 -> LATIN CAPITAL LETTER D
- u'E' # 0x45 -> LATIN CAPITAL LETTER E
- u'F' # 0x46 -> LATIN CAPITAL LETTER F
- u'G' # 0x47 -> LATIN CAPITAL LETTER G
- u'H' # 0x48 -> LATIN CAPITAL LETTER H
- u'I' # 0x49 -> LATIN CAPITAL LETTER I
- u'J' # 0x4A -> LATIN CAPITAL LETTER J
- u'K' # 0x4B -> LATIN CAPITAL LETTER K
- u'L' # 0x4C -> LATIN CAPITAL LETTER L
- u'M' # 0x4D -> LATIN CAPITAL LETTER M
- u'N' # 0x4E -> LATIN CAPITAL LETTER N
- u'O' # 0x4F -> LATIN CAPITAL LETTER O
- u'P' # 0x50 -> LATIN CAPITAL LETTER P
- u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
- u'R' # 0x52 -> LATIN CAPITAL LETTER R
- u'S' # 0x53 -> LATIN CAPITAL LETTER S
- u'T' # 0x54 -> LATIN CAPITAL LETTER T
- u'U' # 0x55 -> LATIN CAPITAL LETTER U
- u'V' # 0x56 -> LATIN CAPITAL LETTER V
- u'W' # 0x57 -> LATIN CAPITAL LETTER W
- u'X' # 0x58 -> LATIN CAPITAL LETTER X
- u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
- u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
- u'[' # 0x5B -> LEFT SQUARE BRACKET
- u'\\' # 0x5C -> REVERSE SOLIDUS
- u']' # 0x5D -> RIGHT SQUARE BRACKET
- u'^' # 0x5E -> CIRCUMFLEX ACCENT
- u'_' # 0x5F -> LOW LINE
- u'`' # 0x60 -> GRAVE ACCENT
- u'a' # 0x61 -> LATIN SMALL LETTER A
- u'b' # 0x62 -> LATIN SMALL LETTER B
- u'c' # 0x63 -> LATIN SMALL LETTER C
- u'd' # 0x64 -> LATIN SMALL LETTER D
- u'e' # 0x65 -> LATIN SMALL LETTER E
- u'f' # 0x66 -> LATIN SMALL LETTER F
- u'g' # 0x67 -> LATIN SMALL LETTER G
- u'h' # 0x68 -> LATIN SMALL LETTER H
- u'i' # 0x69 -> LATIN SMALL LETTER I
- u'j' # 0x6A -> LATIN SMALL LETTER J
- u'k' # 0x6B -> LATIN SMALL LETTER K
- u'l' # 0x6C -> LATIN SMALL LETTER L
- u'm' # 0x6D -> LATIN SMALL LETTER M
- u'n' # 0x6E -> LATIN SMALL LETTER N
- u'o' # 0x6F -> LATIN SMALL LETTER O
- u'p' # 0x70 -> LATIN SMALL LETTER P
- u'q' # 0x71 -> LATIN SMALL LETTER Q
- u'r' # 0x72 -> LATIN SMALL LETTER R
- u's' # 0x73 -> LATIN SMALL LETTER S
- u't' # 0x74 -> LATIN SMALL LETTER T
- u'u' # 0x75 -> LATIN SMALL LETTER U
- u'v' # 0x76 -> LATIN SMALL LETTER V
- u'w' # 0x77 -> LATIN SMALL LETTER W
- u'x' # 0x78 -> LATIN SMALL LETTER X
- u'y' # 0x79 -> LATIN SMALL LETTER Y
- u'z' # 0x7A -> LATIN SMALL LETTER Z
- u'{' # 0x7B -> LEFT CURLY BRACKET
- u'|' # 0x7C -> VERTICAL LINE
- u'}' # 0x7D -> RIGHT CURLY BRACKET
- u'~' # 0x7E -> TILDE
- u'\x7f' # 0x7F -> DELETE
- u'\x80' # 0x80 -> <control>
- u'\x81' # 0x81 -> <control>
- u'\x82' # 0x82 -> <control>
- u'\x83' # 0x83 -> <control>
- u'\x84' # 0x84 -> <control>
- u'\x85' # 0x85 -> <control>
- u'\x86' # 0x86 -> <control>
- u'\x87' # 0x87 -> <control>
- u'\x88' # 0x88 -> <control>
- u'\x89' # 0x89 -> <control>
- u'\x8a' # 0x8A -> <control>
- u'\x8b' # 0x8B -> <control>
- u'\x8c' # 0x8C -> <control>
- u'\x8d' # 0x8D -> <control>
- u'\x8e' # 0x8E -> <control>
- u'\x8f' # 0x8F -> <control>
- u'\x90' # 0x90 -> <control>
- u'\x91' # 0x91 -> <control>
- u'\x92' # 0x92 -> <control>
- u'\x93' # 0x93 -> <control>
- u'\x94' # 0x94 -> <control>
- u'\x95' # 0x95 -> <control>
- u'\x96' # 0x96 -> <control>
- u'\x97' # 0x97 -> <control>
- u'\x98' # 0x98 -> <control>
- u'\x99' # 0x99 -> <control>
- u'\x9a' # 0x9A -> <control>
- u'\x9b' # 0x9B -> <control>
- u'\x9c' # 0x9C -> <control>
- u'\x9d' # 0x9D -> <control>
- u'\x9e' # 0x9E -> <control>
- u'\x9f' # 0x9F -> <control>
- u'\xa0' # 0xA0 -> NO-BREAK SPACE
- u'\u0104' # 0xA1 -> LATIN CAPITAL LETTER A WITH OGONEK
- u'\u0138' # 0xA2 -> LATIN SMALL LETTER KRA
- u'\u0156' # 0xA3 -> LATIN CAPITAL LETTER R WITH CEDILLA
- u'\xa4' # 0xA4 -> CURRENCY SIGN
- u'\u0128' # 0xA5 -> LATIN CAPITAL LETTER I WITH TILDE
- u'\u013b' # 0xA6 -> LATIN CAPITAL LETTER L WITH CEDILLA
- u'\xa7' # 0xA7 -> SECTION SIGN
- u'\xa8' # 0xA8 -> DIAERESIS
- u'\u0160' # 0xA9 -> LATIN CAPITAL LETTER S WITH CARON
- u'\u0112' # 0xAA -> LATIN CAPITAL LETTER E WITH MACRON
- u'\u0122' # 0xAB -> LATIN CAPITAL LETTER G WITH CEDILLA
- u'\u0166' # 0xAC -> LATIN CAPITAL LETTER T WITH STROKE
- u'\xad' # 0xAD -> SOFT HYPHEN
- u'\u017d' # 0xAE -> LATIN CAPITAL LETTER Z WITH CARON
- u'\xaf' # 0xAF -> MACRON
- u'\xb0' # 0xB0 -> DEGREE SIGN
- u'\u0105' # 0xB1 -> LATIN SMALL LETTER A WITH OGONEK
- u'\u02db' # 0xB2 -> OGONEK
- u'\u0157' # 0xB3 -> LATIN SMALL LETTER R WITH CEDILLA
- u'\xb4' # 0xB4 -> ACUTE ACCENT
- u'\u0129' # 0xB5 -> LATIN SMALL LETTER I WITH TILDE
- u'\u013c' # 0xB6 -> LATIN SMALL LETTER L WITH CEDILLA
- u'\u02c7' # 0xB7 -> CARON
- u'\xb8' # 0xB8 -> CEDILLA
- u'\u0161' # 0xB9 -> LATIN SMALL LETTER S WITH CARON
- u'\u0113' # 0xBA -> LATIN SMALL LETTER E WITH MACRON
- u'\u0123' # 0xBB -> LATIN SMALL LETTER G WITH CEDILLA
- u'\u0167' # 0xBC -> LATIN SMALL LETTER T WITH STROKE
- u'\u014a' # 0xBD -> LATIN CAPITAL LETTER ENG
- u'\u017e' # 0xBE -> LATIN SMALL LETTER Z WITH CARON
- u'\u014b' # 0xBF -> LATIN SMALL LETTER ENG
- u'\u0100' # 0xC0 -> LATIN CAPITAL LETTER A WITH MACRON
- u'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
- u'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
- u'\xc3' # 0xC3 -> LATIN CAPITAL LETTER A WITH TILDE
- u'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
- u'\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE
- u'\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE
- u'\u012e' # 0xC7 -> LATIN CAPITAL LETTER I WITH OGONEK
- u'\u010c' # 0xC8 -> LATIN CAPITAL LETTER C WITH CARON
- u'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
- u'\u0118' # 0xCA -> LATIN CAPITAL LETTER E WITH OGONEK
- u'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
- u'\u0116' # 0xCC -> LATIN CAPITAL LETTER E WITH DOT ABOVE
- u'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
- u'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
- u'\u012a' # 0xCF -> LATIN CAPITAL LETTER I WITH MACRON
- u'\u0110' # 0xD0 -> LATIN CAPITAL LETTER D WITH STROKE
- u'\u0145' # 0xD1 -> LATIN CAPITAL LETTER N WITH CEDILLA
- u'\u014c' # 0xD2 -> LATIN CAPITAL LETTER O WITH MACRON
- u'\u0136' # 0xD3 -> LATIN CAPITAL LETTER K WITH CEDILLA
- u'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
- u'\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE
- u'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
- u'\xd7' # 0xD7 -> MULTIPLICATION SIGN
- u'\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE
- u'\u0172' # 0xD9 -> LATIN CAPITAL LETTER U WITH OGONEK
- u'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
- u'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
- u'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
- u'\u0168' # 0xDD -> LATIN CAPITAL LETTER U WITH TILDE
- u'\u016a' # 0xDE -> LATIN CAPITAL LETTER U WITH MACRON
- u'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S
- u'\u0101' # 0xE0 -> LATIN SMALL LETTER A WITH MACRON
- u'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
- u'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
- u'\xe3' # 0xE3 -> LATIN SMALL LETTER A WITH TILDE
- u'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
- u'\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE
- u'\xe6' # 0xE6 -> LATIN SMALL LETTER AE
- u'\u012f' # 0xE7 -> LATIN SMALL LETTER I WITH OGONEK
- u'\u010d' # 0xE8 -> LATIN SMALL LETTER C WITH CARON
- u'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
- u'\u0119' # 0xEA -> LATIN SMALL LETTER E WITH OGONEK
- u'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
- u'\u0117' # 0xEC -> LATIN SMALL LETTER E WITH DOT ABOVE
- u'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
- u'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
- u'\u012b' # 0xEF -> LATIN SMALL LETTER I WITH MACRON
- u'\u0111' # 0xF0 -> LATIN SMALL LETTER D WITH STROKE
- u'\u0146' # 0xF1 -> LATIN SMALL LETTER N WITH CEDILLA
- u'\u014d' # 0xF2 -> LATIN SMALL LETTER O WITH MACRON
- u'\u0137' # 0xF3 -> LATIN SMALL LETTER K WITH CEDILLA
- u'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
- u'\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE
- u'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
- u'\xf7' # 0xF7 -> DIVISION SIGN
- u'\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE
- u'\u0173' # 0xF9 -> LATIN SMALL LETTER U WITH OGONEK
- u'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
- u'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
- u'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
- u'\u0169' # 0xFD -> LATIN SMALL LETTER U WITH TILDE
- u'\u016b' # 0xFE -> LATIN SMALL LETTER U WITH MACRON
- u'\u02d9' # 0xFF -> DOT ABOVE
-)
-
-### Encoding table
-encoding_table=codecs.charmap_build(decoding_table)
diff --git a/sys/lib/python/encodings/iso8859_5.py b/sys/lib/python/encodings/iso8859_5.py
deleted file mode 100644
index c01cd1caa..000000000
--- a/sys/lib/python/encodings/iso8859_5.py
+++ /dev/null
@@ -1,307 +0,0 @@
-""" Python Character Mapping Codec iso8859_5 generated from 'MAPPINGS/ISO8859/8859-5.TXT' with gencodec.py.
-
-"""#"
-
-import codecs
-
-### Codec APIs
-
-class Codec(codecs.Codec):
-
- def encode(self,input,errors='strict'):
- return codecs.charmap_encode(input,errors,encoding_table)
-
- def decode(self,input,errors='strict'):
- return codecs.charmap_decode(input,errors,decoding_table)
-
-class IncrementalEncoder(codecs.IncrementalEncoder):
- def encode(self, input, final=False):
- return codecs.charmap_encode(input,self.errors,encoding_table)[0]
-
-class IncrementalDecoder(codecs.IncrementalDecoder):
- def decode(self, input, final=False):
- return codecs.charmap_decode(input,self.errors,decoding_table)[0]
-
-class StreamWriter(Codec,codecs.StreamWriter):
- pass
-
-class StreamReader(Codec,codecs.StreamReader):
- pass
-
-### encodings module API
-
-def getregentry():
- return codecs.CodecInfo(
- name='iso8859-5',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
-
-
-### Decoding Table
-
-decoding_table = (
- u'\x00' # 0x00 -> NULL
- u'\x01' # 0x01 -> START OF HEADING
- u'\x02' # 0x02 -> START OF TEXT
- u'\x03' # 0x03 -> END OF TEXT
- u'\x04' # 0x04 -> END OF TRANSMISSION
- u'\x05' # 0x05 -> ENQUIRY
- u'\x06' # 0x06 -> ACKNOWLEDGE
- u'\x07' # 0x07 -> BELL
- u'\x08' # 0x08 -> BACKSPACE
- u'\t' # 0x09 -> HORIZONTAL TABULATION
- u'\n' # 0x0A -> LINE FEED
- u'\x0b' # 0x0B -> VERTICAL TABULATION
- u'\x0c' # 0x0C -> FORM FEED
- u'\r' # 0x0D -> CARRIAGE RETURN
- u'\x0e' # 0x0E -> SHIFT OUT
- u'\x0f' # 0x0F -> SHIFT IN
- u'\x10' # 0x10 -> DATA LINK ESCAPE
- u'\x11' # 0x11 -> DEVICE CONTROL ONE
- u'\x12' # 0x12 -> DEVICE CONTROL TWO
- u'\x13' # 0x13 -> DEVICE CONTROL THREE
- u'\x14' # 0x14 -> DEVICE CONTROL FOUR
- u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
- u'\x16' # 0x16 -> SYNCHRONOUS IDLE
- u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
- u'\x18' # 0x18 -> CANCEL
- u'\x19' # 0x19 -> END OF MEDIUM
- u'\x1a' # 0x1A -> SUBSTITUTE
- u'\x1b' # 0x1B -> ESCAPE
- u'\x1c' # 0x1C -> FILE SEPARATOR
- u'\x1d' # 0x1D -> GROUP SEPARATOR
- u'\x1e' # 0x1E -> RECORD SEPARATOR
- u'\x1f' # 0x1F -> UNIT SEPARATOR
- u' ' # 0x20 -> SPACE
- u'!' # 0x21 -> EXCLAMATION MARK
- u'"' # 0x22 -> QUOTATION MARK
- u'#' # 0x23 -> NUMBER SIGN
- u'$' # 0x24 -> DOLLAR SIGN
- u'%' # 0x25 -> PERCENT SIGN
- u'&' # 0x26 -> AMPERSAND
- u"'" # 0x27 -> APOSTROPHE
- u'(' # 0x28 -> LEFT PARENTHESIS
- u')' # 0x29 -> RIGHT PARENTHESIS
- u'*' # 0x2A -> ASTERISK
- u'+' # 0x2B -> PLUS SIGN
- u',' # 0x2C -> COMMA
- u'-' # 0x2D -> HYPHEN-MINUS
- u'.' # 0x2E -> FULL STOP
- u'/' # 0x2F -> SOLIDUS
- u'0' # 0x30 -> DIGIT ZERO
- u'1' # 0x31 -> DIGIT ONE
- u'2' # 0x32 -> DIGIT TWO
- u'3' # 0x33 -> DIGIT THREE
- u'4' # 0x34 -> DIGIT FOUR
- u'5' # 0x35 -> DIGIT FIVE
- u'6' # 0x36 -> DIGIT SIX
- u'7' # 0x37 -> DIGIT SEVEN
- u'8' # 0x38 -> DIGIT EIGHT
- u'9' # 0x39 -> DIGIT NINE
- u':' # 0x3A -> COLON
- u';' # 0x3B -> SEMICOLON
- u'<' # 0x3C -> LESS-THAN SIGN
- u'=' # 0x3D -> EQUALS SIGN
- u'>' # 0x3E -> GREATER-THAN SIGN
- u'?' # 0x3F -> QUESTION MARK
- u'@' # 0x40 -> COMMERCIAL AT
- u'A' # 0x41 -> LATIN CAPITAL LETTER A
- u'B' # 0x42 -> LATIN CAPITAL LETTER B
- u'C' # 0x43 -> LATIN CAPITAL LETTER C
- u'D' # 0x44 -> LATIN CAPITAL LETTER D
- u'E' # 0x45 -> LATIN CAPITAL LETTER E
- u'F' # 0x46 -> LATIN CAPITAL LETTER F
- u'G' # 0x47 -> LATIN CAPITAL LETTER G
- u'H' # 0x48 -> LATIN CAPITAL LETTER H
- u'I' # 0x49 -> LATIN CAPITAL LETTER I
- u'J' # 0x4A -> LATIN CAPITAL LETTER J
- u'K' # 0x4B -> LATIN CAPITAL LETTER K
- u'L' # 0x4C -> LATIN CAPITAL LETTER L
- u'M' # 0x4D -> LATIN CAPITAL LETTER M
- u'N' # 0x4E -> LATIN CAPITAL LETTER N
- u'O' # 0x4F -> LATIN CAPITAL LETTER O
- u'P' # 0x50 -> LATIN CAPITAL LETTER P
- u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
- u'R' # 0x52 -> LATIN CAPITAL LETTER R
- u'S' # 0x53 -> LATIN CAPITAL LETTER S
- u'T' # 0x54 -> LATIN CAPITAL LETTER T
- u'U' # 0x55 -> LATIN CAPITAL LETTER U
- u'V' # 0x56 -> LATIN CAPITAL LETTER V
- u'W' # 0x57 -> LATIN CAPITAL LETTER W
- u'X' # 0x58 -> LATIN CAPITAL LETTER X
- u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
- u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
- u'[' # 0x5B -> LEFT SQUARE BRACKET
- u'\\' # 0x5C -> REVERSE SOLIDUS
- u']' # 0x5D -> RIGHT SQUARE BRACKET
- u'^' # 0x5E -> CIRCUMFLEX ACCENT
- u'_' # 0x5F -> LOW LINE
- u'`' # 0x60 -> GRAVE ACCENT
- u'a' # 0x61 -> LATIN SMALL LETTER A
- u'b' # 0x62 -> LATIN SMALL LETTER B
- u'c' # 0x63 -> LATIN SMALL LETTER C
- u'd' # 0x64 -> LATIN SMALL LETTER D
- u'e' # 0x65 -> LATIN SMALL LETTER E
- u'f' # 0x66 -> LATIN SMALL LETTER F
- u'g' # 0x67 -> LATIN SMALL LETTER G
- u'h' # 0x68 -> LATIN SMALL LETTER H
- u'i' # 0x69 -> LATIN SMALL LETTER I
- u'j' # 0x6A -> LATIN SMALL LETTER J
- u'k' # 0x6B -> LATIN SMALL LETTER K
- u'l' # 0x6C -> LATIN SMALL LETTER L
- u'm' # 0x6D -> LATIN SMALL LETTER M
- u'n' # 0x6E -> LATIN SMALL LETTER N
- u'o' # 0x6F -> LATIN SMALL LETTER O
- u'p' # 0x70 -> LATIN SMALL LETTER P
- u'q' # 0x71 -> LATIN SMALL LETTER Q
- u'r' # 0x72 -> LATIN SMALL LETTER R
- u's' # 0x73 -> LATIN SMALL LETTER S
- u't' # 0x74 -> LATIN SMALL LETTER T
- u'u' # 0x75 -> LATIN SMALL LETTER U
- u'v' # 0x76 -> LATIN SMALL LETTER V
- u'w' # 0x77 -> LATIN SMALL LETTER W
- u'x' # 0x78 -> LATIN SMALL LETTER X
- u'y' # 0x79 -> LATIN SMALL LETTER Y
- u'z' # 0x7A -> LATIN SMALL LETTER Z
- u'{' # 0x7B -> LEFT CURLY BRACKET
- u'|' # 0x7C -> VERTICAL LINE
- u'}' # 0x7D -> RIGHT CURLY BRACKET
- u'~' # 0x7E -> TILDE
- u'\x7f' # 0x7F -> DELETE
- u'\x80' # 0x80 -> <control>
- u'\x81' # 0x81 -> <control>
- u'\x82' # 0x82 -> <control>
- u'\x83' # 0x83 -> <control>
- u'\x84' # 0x84 -> <control>
- u'\x85' # 0x85 -> <control>
- u'\x86' # 0x86 -> <control>
- u'\x87' # 0x87 -> <control>
- u'\x88' # 0x88 -> <control>
- u'\x89' # 0x89 -> <control>
- u'\x8a' # 0x8A -> <control>
- u'\x8b' # 0x8B -> <control>
- u'\x8c' # 0x8C -> <control>
- u'\x8d' # 0x8D -> <control>
- u'\x8e' # 0x8E -> <control>
- u'\x8f' # 0x8F -> <control>
- u'\x90' # 0x90 -> <control>
- u'\x91' # 0x91 -> <control>
- u'\x92' # 0x92 -> <control>
- u'\x93' # 0x93 -> <control>
- u'\x94' # 0x94 -> <control>
- u'\x95' # 0x95 -> <control>
- u'\x96' # 0x96 -> <control>
- u'\x97' # 0x97 -> <control>
- u'\x98' # 0x98 -> <control>
- u'\x99' # 0x99 -> <control>
- u'\x9a' # 0x9A -> <control>
- u'\x9b' # 0x9B -> <control>
- u'\x9c' # 0x9C -> <control>
- u'\x9d' # 0x9D -> <control>
- u'\x9e' # 0x9E -> <control>
- u'\x9f' # 0x9F -> <control>
- u'\xa0' # 0xA0 -> NO-BREAK SPACE
- u'\u0401' # 0xA1 -> CYRILLIC CAPITAL LETTER IO
- u'\u0402' # 0xA2 -> CYRILLIC CAPITAL LETTER DJE
- u'\u0403' # 0xA3 -> CYRILLIC CAPITAL LETTER GJE
- u'\u0404' # 0xA4 -> CYRILLIC CAPITAL LETTER UKRAINIAN IE
- u'\u0405' # 0xA5 -> CYRILLIC CAPITAL LETTER DZE
- u'\u0406' # 0xA6 -> CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
- u'\u0407' # 0xA7 -> CYRILLIC CAPITAL LETTER YI
- u'\u0408' # 0xA8 -> CYRILLIC CAPITAL LETTER JE
- u'\u0409' # 0xA9 -> CYRILLIC CAPITAL LETTER LJE
- u'\u040a' # 0xAA -> CYRILLIC CAPITAL LETTER NJE
- u'\u040b' # 0xAB -> CYRILLIC CAPITAL LETTER TSHE
- u'\u040c' # 0xAC -> CYRILLIC CAPITAL LETTER KJE
- u'\xad' # 0xAD -> SOFT HYPHEN
- u'\u040e' # 0xAE -> CYRILLIC CAPITAL LETTER SHORT U
- u'\u040f' # 0xAF -> CYRILLIC CAPITAL LETTER DZHE
- u'\u0410' # 0xB0 -> CYRILLIC CAPITAL LETTER A
- u'\u0411' # 0xB1 -> CYRILLIC CAPITAL LETTER BE
- u'\u0412' # 0xB2 -> CYRILLIC CAPITAL LETTER VE
- u'\u0413' # 0xB3 -> CYRILLIC CAPITAL LETTER GHE
- u'\u0414' # 0xB4 -> CYRILLIC CAPITAL LETTER DE
- u'\u0415' # 0xB5 -> CYRILLIC CAPITAL LETTER IE
- u'\u0416' # 0xB6 -> CYRILLIC CAPITAL LETTER ZHE
- u'\u0417' # 0xB7 -> CYRILLIC CAPITAL LETTER ZE
- u'\u0418' # 0xB8 -> CYRILLIC CAPITAL LETTER I
- u'\u0419' # 0xB9 -> CYRILLIC CAPITAL LETTER SHORT I
- u'\u041a' # 0xBA -> CYRILLIC CAPITAL LETTER KA
- u'\u041b' # 0xBB -> CYRILLIC CAPITAL LETTER EL
- u'\u041c' # 0xBC -> CYRILLIC CAPITAL LETTER EM
- u'\u041d' # 0xBD -> CYRILLIC CAPITAL LETTER EN
- u'\u041e' # 0xBE -> CYRILLIC CAPITAL LETTER O
- u'\u041f' # 0xBF -> CYRILLIC CAPITAL LETTER PE
- u'\u0420' # 0xC0 -> CYRILLIC CAPITAL LETTER ER
- u'\u0421' # 0xC1 -> CYRILLIC CAPITAL LETTER ES
- u'\u0422' # 0xC2 -> CYRILLIC CAPITAL LETTER TE
- u'\u0423' # 0xC3 -> CYRILLIC CAPITAL LETTER U
- u'\u0424' # 0xC4 -> CYRILLIC CAPITAL LETTER EF
- u'\u0425' # 0xC5 -> CYRILLIC CAPITAL LETTER HA
- u'\u0426' # 0xC6 -> CYRILLIC CAPITAL LETTER TSE
- u'\u0427' # 0xC7 -> CYRILLIC CAPITAL LETTER CHE
- u'\u0428' # 0xC8 -> CYRILLIC CAPITAL LETTER SHA
- u'\u0429' # 0xC9 -> CYRILLIC CAPITAL LETTER SHCHA
- u'\u042a' # 0xCA -> CYRILLIC CAPITAL LETTER HARD SIGN
- u'\u042b' # 0xCB -> CYRILLIC CAPITAL LETTER YERU
- u'\u042c' # 0xCC -> CYRILLIC CAPITAL LETTER SOFT SIGN
- u'\u042d' # 0xCD -> CYRILLIC CAPITAL LETTER E
- u'\u042e' # 0xCE -> CYRILLIC CAPITAL LETTER YU
- u'\u042f' # 0xCF -> CYRILLIC CAPITAL LETTER YA
- u'\u0430' # 0xD0 -> CYRILLIC SMALL LETTER A
- u'\u0431' # 0xD1 -> CYRILLIC SMALL LETTER BE
- u'\u0432' # 0xD2 -> CYRILLIC SMALL LETTER VE
- u'\u0433' # 0xD3 -> CYRILLIC SMALL LETTER GHE
- u'\u0434' # 0xD4 -> CYRILLIC SMALL LETTER DE
- u'\u0435' # 0xD5 -> CYRILLIC SMALL LETTER IE
- u'\u0436' # 0xD6 -> CYRILLIC SMALL LETTER ZHE
- u'\u0437' # 0xD7 -> CYRILLIC SMALL LETTER ZE
- u'\u0438' # 0xD8 -> CYRILLIC SMALL LETTER I
- u'\u0439' # 0xD9 -> CYRILLIC SMALL LETTER SHORT I
- u'\u043a' # 0xDA -> CYRILLIC SMALL LETTER KA
- u'\u043b' # 0xDB -> CYRILLIC SMALL LETTER EL
- u'\u043c' # 0xDC -> CYRILLIC SMALL LETTER EM
- u'\u043d' # 0xDD -> CYRILLIC SMALL LETTER EN
- u'\u043e' # 0xDE -> CYRILLIC SMALL LETTER O
- u'\u043f' # 0xDF -> CYRILLIC SMALL LETTER PE
- u'\u0440' # 0xE0 -> CYRILLIC SMALL LETTER ER
- u'\u0441' # 0xE1 -> CYRILLIC SMALL LETTER ES
- u'\u0442' # 0xE2 -> CYRILLIC SMALL LETTER TE
- u'\u0443' # 0xE3 -> CYRILLIC SMALL LETTER U
- u'\u0444' # 0xE4 -> CYRILLIC SMALL LETTER EF
- u'\u0445' # 0xE5 -> CYRILLIC SMALL LETTER HA
- u'\u0446' # 0xE6 -> CYRILLIC SMALL LETTER TSE
- u'\u0447' # 0xE7 -> CYRILLIC SMALL LETTER CHE
- u'\u0448' # 0xE8 -> CYRILLIC SMALL LETTER SHA
- u'\u0449' # 0xE9 -> CYRILLIC SMALL LETTER SHCHA
- u'\u044a' # 0xEA -> CYRILLIC SMALL LETTER HARD SIGN
- u'\u044b' # 0xEB -> CYRILLIC SMALL LETTER YERU
- u'\u044c' # 0xEC -> CYRILLIC SMALL LETTER SOFT SIGN
- u'\u044d' # 0xED -> CYRILLIC SMALL LETTER E
- u'\u044e' # 0xEE -> CYRILLIC SMALL LETTER YU
- u'\u044f' # 0xEF -> CYRILLIC SMALL LETTER YA
- u'\u2116' # 0xF0 -> NUMERO SIGN
- u'\u0451' # 0xF1 -> CYRILLIC SMALL LETTER IO
- u'\u0452' # 0xF2 -> CYRILLIC SMALL LETTER DJE
- u'\u0453' # 0xF3 -> CYRILLIC SMALL LETTER GJE
- u'\u0454' # 0xF4 -> CYRILLIC SMALL LETTER UKRAINIAN IE
- u'\u0455' # 0xF5 -> CYRILLIC SMALL LETTER DZE
- u'\u0456' # 0xF6 -> CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
- u'\u0457' # 0xF7 -> CYRILLIC SMALL LETTER YI
- u'\u0458' # 0xF8 -> CYRILLIC SMALL LETTER JE
- u'\u0459' # 0xF9 -> CYRILLIC SMALL LETTER LJE
- u'\u045a' # 0xFA -> CYRILLIC SMALL LETTER NJE
- u'\u045b' # 0xFB -> CYRILLIC SMALL LETTER TSHE
- u'\u045c' # 0xFC -> CYRILLIC SMALL LETTER KJE
- u'\xa7' # 0xFD -> SECTION SIGN
- u'\u045e' # 0xFE -> CYRILLIC SMALL LETTER SHORT U
- u'\u045f' # 0xFF -> CYRILLIC SMALL LETTER DZHE
-)
-
-### Encoding table
-encoding_table=codecs.charmap_build(decoding_table)
diff --git a/sys/lib/python/encodings/iso8859_6.py b/sys/lib/python/encodings/iso8859_6.py
deleted file mode 100644
index 16c34a3f6..000000000
--- a/sys/lib/python/encodings/iso8859_6.py
+++ /dev/null
@@ -1,307 +0,0 @@
-""" Python Character Mapping Codec iso8859_6 generated from 'MAPPINGS/ISO8859/8859-6.TXT' with gencodec.py.
-
-"""#"
-
-import codecs
-
-### Codec APIs
-
-class Codec(codecs.Codec):
-
- def encode(self,input,errors='strict'):
- return codecs.charmap_encode(input,errors,encoding_table)
-
- def decode(self,input,errors='strict'):
- return codecs.charmap_decode(input,errors,decoding_table)
-
-class IncrementalEncoder(codecs.IncrementalEncoder):
- def encode(self, input, final=False):
- return codecs.charmap_encode(input,self.errors,encoding_table)[0]
-
-class IncrementalDecoder(codecs.IncrementalDecoder):
- def decode(self, input, final=False):
- return codecs.charmap_decode(input,self.errors,decoding_table)[0]
-
-class StreamWriter(Codec,codecs.StreamWriter):
- pass
-
-class StreamReader(Codec,codecs.StreamReader):
- pass
-
-### encodings module API
-
-def getregentry():
- return codecs.CodecInfo(
- name='iso8859-6',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
-
-
-### Decoding Table
-
-decoding_table = (
- u'\x00' # 0x00 -> NULL
- u'\x01' # 0x01 -> START OF HEADING
- u'\x02' # 0x02 -> START OF TEXT
- u'\x03' # 0x03 -> END OF TEXT
- u'\x04' # 0x04 -> END OF TRANSMISSION
- u'\x05' # 0x05 -> ENQUIRY
- u'\x06' # 0x06 -> ACKNOWLEDGE
- u'\x07' # 0x07 -> BELL
- u'\x08' # 0x08 -> BACKSPACE
- u'\t' # 0x09 -> HORIZONTAL TABULATION
- u'\n' # 0x0A -> LINE FEED
- u'\x0b' # 0x0B -> VERTICAL TABULATION
- u'\x0c' # 0x0C -> FORM FEED
- u'\r' # 0x0D -> CARRIAGE RETURN
- u'\x0e' # 0x0E -> SHIFT OUT
- u'\x0f' # 0x0F -> SHIFT IN
- u'\x10' # 0x10 -> DATA LINK ESCAPE
- u'\x11' # 0x11 -> DEVICE CONTROL ONE
- u'\x12' # 0x12 -> DEVICE CONTROL TWO
- u'\x13' # 0x13 -> DEVICE CONTROL THREE
- u'\x14' # 0x14 -> DEVICE CONTROL FOUR
- u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
- u'\x16' # 0x16 -> SYNCHRONOUS IDLE
- u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
- u'\x18' # 0x18 -> CANCEL
- u'\x19' # 0x19 -> END OF MEDIUM
- u'\x1a' # 0x1A -> SUBSTITUTE
- u'\x1b' # 0x1B -> ESCAPE
- u'\x1c' # 0x1C -> FILE SEPARATOR
- u'\x1d' # 0x1D -> GROUP SEPARATOR
- u'\x1e' # 0x1E -> RECORD SEPARATOR
- u'\x1f' # 0x1F -> UNIT SEPARATOR
- u' ' # 0x20 -> SPACE
- u'!' # 0x21 -> EXCLAMATION MARK
- u'"' # 0x22 -> QUOTATION MARK
- u'#' # 0x23 -> NUMBER SIGN
- u'$' # 0x24 -> DOLLAR SIGN
- u'%' # 0x25 -> PERCENT SIGN
- u'&' # 0x26 -> AMPERSAND
- u"'" # 0x27 -> APOSTROPHE
- u'(' # 0x28 -> LEFT PARENTHESIS
- u')' # 0x29 -> RIGHT PARENTHESIS
- u'*' # 0x2A -> ASTERISK
- u'+' # 0x2B -> PLUS SIGN
- u',' # 0x2C -> COMMA
- u'-' # 0x2D -> HYPHEN-MINUS
- u'.' # 0x2E -> FULL STOP
- u'/' # 0x2F -> SOLIDUS
- u'0' # 0x30 -> DIGIT ZERO
- u'1' # 0x31 -> DIGIT ONE
- u'2' # 0x32 -> DIGIT TWO
- u'3' # 0x33 -> DIGIT THREE
- u'4' # 0x34 -> DIGIT FOUR
- u'5' # 0x35 -> DIGIT FIVE
- u'6' # 0x36 -> DIGIT SIX
- u'7' # 0x37 -> DIGIT SEVEN
- u'8' # 0x38 -> DIGIT EIGHT
- u'9' # 0x39 -> DIGIT NINE
- u':' # 0x3A -> COLON
- u';' # 0x3B -> SEMICOLON
- u'<' # 0x3C -> LESS-THAN SIGN
- u'=' # 0x3D -> EQUALS SIGN
- u'>' # 0x3E -> GREATER-THAN SIGN
- u'?' # 0x3F -> QUESTION MARK
- u'@' # 0x40 -> COMMERCIAL AT
- u'A' # 0x41 -> LATIN CAPITAL LETTER A
- u'B' # 0x42 -> LATIN CAPITAL LETTER B
- u'C' # 0x43 -> LATIN CAPITAL LETTER C
- u'D' # 0x44 -> LATIN CAPITAL LETTER D
- u'E' # 0x45 -> LATIN CAPITAL LETTER E
- u'F' # 0x46 -> LATIN CAPITAL LETTER F
- u'G' # 0x47 -> LATIN CAPITAL LETTER G
- u'H' # 0x48 -> LATIN CAPITAL LETTER H
- u'I' # 0x49 -> LATIN CAPITAL LETTER I
- u'J' # 0x4A -> LATIN CAPITAL LETTER J
- u'K' # 0x4B -> LATIN CAPITAL LETTER K
- u'L' # 0x4C -> LATIN CAPITAL LETTER L
- u'M' # 0x4D -> LATIN CAPITAL LETTER M
- u'N' # 0x4E -> LATIN CAPITAL LETTER N
- u'O' # 0x4F -> LATIN CAPITAL LETTER O
- u'P' # 0x50 -> LATIN CAPITAL LETTER P
- u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
- u'R' # 0x52 -> LATIN CAPITAL LETTER R
- u'S' # 0x53 -> LATIN CAPITAL LETTER S
- u'T' # 0x54 -> LATIN CAPITAL LETTER T
- u'U' # 0x55 -> LATIN CAPITAL LETTER U
- u'V' # 0x56 -> LATIN CAPITAL LETTER V
- u'W' # 0x57 -> LATIN CAPITAL LETTER W
- u'X' # 0x58 -> LATIN CAPITAL LETTER X
- u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
- u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
- u'[' # 0x5B -> LEFT SQUARE BRACKET
- u'\\' # 0x5C -> REVERSE SOLIDUS
- u']' # 0x5D -> RIGHT SQUARE BRACKET
- u'^' # 0x5E -> CIRCUMFLEX ACCENT
- u'_' # 0x5F -> LOW LINE
- u'`' # 0x60 -> GRAVE ACCENT
- u'a' # 0x61 -> LATIN SMALL LETTER A
- u'b' # 0x62 -> LATIN SMALL LETTER B
- u'c' # 0x63 -> LATIN SMALL LETTER C
- u'd' # 0x64 -> LATIN SMALL LETTER D
- u'e' # 0x65 -> LATIN SMALL LETTER E
- u'f' # 0x66 -> LATIN SMALL LETTER F
- u'g' # 0x67 -> LATIN SMALL LETTER G
- u'h' # 0x68 -> LATIN SMALL LETTER H
- u'i' # 0x69 -> LATIN SMALL LETTER I
- u'j' # 0x6A -> LATIN SMALL LETTER J
- u'k' # 0x6B -> LATIN SMALL LETTER K
- u'l' # 0x6C -> LATIN SMALL LETTER L
- u'm' # 0x6D -> LATIN SMALL LETTER M
- u'n' # 0x6E -> LATIN SMALL LETTER N
- u'o' # 0x6F -> LATIN SMALL LETTER O
- u'p' # 0x70 -> LATIN SMALL LETTER P
- u'q' # 0x71 -> LATIN SMALL LETTER Q
- u'r' # 0x72 -> LATIN SMALL LETTER R
- u's' # 0x73 -> LATIN SMALL LETTER S
- u't' # 0x74 -> LATIN SMALL LETTER T
- u'u' # 0x75 -> LATIN SMALL LETTER U
- u'v' # 0x76 -> LATIN SMALL LETTER V
- u'w' # 0x77 -> LATIN SMALL LETTER W
- u'x' # 0x78 -> LATIN SMALL LETTER X
- u'y' # 0x79 -> LATIN SMALL LETTER Y
- u'z' # 0x7A -> LATIN SMALL LETTER Z
- u'{' # 0x7B -> LEFT CURLY BRACKET
- u'|' # 0x7C -> VERTICAL LINE
- u'}' # 0x7D -> RIGHT CURLY BRACKET
- u'~' # 0x7E -> TILDE
- u'\x7f' # 0x7F -> DELETE
- u'\x80' # 0x80 -> <control>
- u'\x81' # 0x81 -> <control>
- u'\x82' # 0x82 -> <control>
- u'\x83' # 0x83 -> <control>
- u'\x84' # 0x84 -> <control>
- u'\x85' # 0x85 -> <control>
- u'\x86' # 0x86 -> <control>
- u'\x87' # 0x87 -> <control>
- u'\x88' # 0x88 -> <control>
- u'\x89' # 0x89 -> <control>
- u'\x8a' # 0x8A -> <control>
- u'\x8b' # 0x8B -> <control>
- u'\x8c' # 0x8C -> <control>
- u'\x8d' # 0x8D -> <control>
- u'\x8e' # 0x8E -> <control>
- u'\x8f' # 0x8F -> <control>
- u'\x90' # 0x90 -> <control>
- u'\x91' # 0x91 -> <control>
- u'\x92' # 0x92 -> <control>
- u'\x93' # 0x93 -> <control>
- u'\x94' # 0x94 -> <control>
- u'\x95' # 0x95 -> <control>
- u'\x96' # 0x96 -> <control>
- u'\x97' # 0x97 -> <control>
- u'\x98' # 0x98 -> <control>
- u'\x99' # 0x99 -> <control>
- u'\x9a' # 0x9A -> <control>
- u'\x9b' # 0x9B -> <control>
- u'\x9c' # 0x9C -> <control>
- u'\x9d' # 0x9D -> <control>
- u'\x9e' # 0x9E -> <control>
- u'\x9f' # 0x9F -> <control>
- u'\xa0' # 0xA0 -> NO-BREAK SPACE
- u'\ufffe'
- u'\ufffe'
- u'\ufffe'
- u'\xa4' # 0xA4 -> CURRENCY SIGN
- u'\ufffe'
- u'\ufffe'
- u'\ufffe'
- u'\ufffe'
- u'\ufffe'
- u'\ufffe'
- u'\ufffe'
- u'\u060c' # 0xAC -> ARABIC COMMA
- u'\xad' # 0xAD -> SOFT HYPHEN
- u'\ufffe'
- u'\ufffe'
- u'\ufffe'
- u'\ufffe'
- u'\ufffe'
- u'\ufffe'
- u'\ufffe'
- u'\ufffe'
- u'\ufffe'
- u'\ufffe'
- u'\ufffe'
- u'\ufffe'
- u'\ufffe'
- u'\u061b' # 0xBB -> ARABIC SEMICOLON
- u'\ufffe'
- u'\ufffe'
- u'\ufffe'
- u'\u061f' # 0xBF -> ARABIC QUESTION MARK
- u'\ufffe'
- u'\u0621' # 0xC1 -> ARABIC LETTER HAMZA
- u'\u0622' # 0xC2 -> ARABIC LETTER ALEF WITH MADDA ABOVE
- u'\u0623' # 0xC3 -> ARABIC LETTER ALEF WITH HAMZA ABOVE
- u'\u0624' # 0xC4 -> ARABIC LETTER WAW WITH HAMZA ABOVE
- u'\u0625' # 0xC5 -> ARABIC LETTER ALEF WITH HAMZA BELOW
- u'\u0626' # 0xC6 -> ARABIC LETTER YEH WITH HAMZA ABOVE
- u'\u0627' # 0xC7 -> ARABIC LETTER ALEF
- u'\u0628' # 0xC8 -> ARABIC LETTER BEH
- u'\u0629' # 0xC9 -> ARABIC LETTER TEH MARBUTA
- u'\u062a' # 0xCA -> ARABIC LETTER TEH
- u'\u062b' # 0xCB -> ARABIC LETTER THEH
- u'\u062c' # 0xCC -> ARABIC LETTER JEEM
- u'\u062d' # 0xCD -> ARABIC LETTER HAH
- u'\u062e' # 0xCE -> ARABIC LETTER KHAH
- u'\u062f' # 0xCF -> ARABIC LETTER DAL
- u'\u0630' # 0xD0 -> ARABIC LETTER THAL
- u'\u0631' # 0xD1 -> ARABIC LETTER REH
- u'\u0632' # 0xD2 -> ARABIC LETTER ZAIN
- u'\u0633' # 0xD3 -> ARABIC LETTER SEEN
- u'\u0634' # 0xD4 -> ARABIC LETTER SHEEN
- u'\u0635' # 0xD5 -> ARABIC LETTER SAD
- u'\u0636' # 0xD6 -> ARABIC LETTER DAD
- u'\u0637' # 0xD7 -> ARABIC LETTER TAH
- u'\u0638' # 0xD8 -> ARABIC LETTER ZAH
- u'\u0639' # 0xD9 -> ARABIC LETTER AIN
- u'\u063a' # 0xDA -> ARABIC LETTER GHAIN
- u'\ufffe'
- u'\ufffe'
- u'\ufffe'
- u'\ufffe'
- u'\ufffe'
- u'\u0640' # 0xE0 -> ARABIC TATWEEL
- u'\u0641' # 0xE1 -> ARABIC LETTER FEH
- u'\u0642' # 0xE2 -> ARABIC LETTER QAF
- u'\u0643' # 0xE3 -> ARABIC LETTER KAF
- u'\u0644' # 0xE4 -> ARABIC LETTER LAM
- u'\u0645' # 0xE5 -> ARABIC LETTER MEEM
- u'\u0646' # 0xE6 -> ARABIC LETTER NOON
- u'\u0647' # 0xE7 -> ARABIC LETTER HEH
- u'\u0648' # 0xE8 -> ARABIC LETTER WAW
- u'\u0649' # 0xE9 -> ARABIC LETTER ALEF MAKSURA
- u'\u064a' # 0xEA -> ARABIC LETTER YEH
- u'\u064b' # 0xEB -> ARABIC FATHATAN
- u'\u064c' # 0xEC -> ARABIC DAMMATAN
- u'\u064d' # 0xED -> ARABIC KASRATAN
- u'\u064e' # 0xEE -> ARABIC FATHA
- u'\u064f' # 0xEF -> ARABIC DAMMA
- u'\u0650' # 0xF0 -> ARABIC KASRA
- u'\u0651' # 0xF1 -> ARABIC SHADDA
- u'\u0652' # 0xF2 -> ARABIC SUKUN
- u'\ufffe'
- u'\ufffe'
- u'\ufffe'
- u'\ufffe'
- u'\ufffe'
- u'\ufffe'
- u'\ufffe'
- u'\ufffe'
- u'\ufffe'
- u'\ufffe'
- u'\ufffe'
- u'\ufffe'
- u'\ufffe'
-)
-
-### Encoding table
-encoding_table=codecs.charmap_build(decoding_table)
diff --git a/sys/lib/python/encodings/iso8859_7.py b/sys/lib/python/encodings/iso8859_7.py
deleted file mode 100644
index a560023a0..000000000
--- a/sys/lib/python/encodings/iso8859_7.py
+++ /dev/null
@@ -1,307 +0,0 @@
-""" Python Character Mapping Codec iso8859_7 generated from 'MAPPINGS/ISO8859/8859-7.TXT' with gencodec.py.
-
-"""#"
-
-import codecs
-
-### Codec APIs
-
-class Codec(codecs.Codec):
-
- def encode(self,input,errors='strict'):
- return codecs.charmap_encode(input,errors,encoding_table)
-
- def decode(self,input,errors='strict'):
- return codecs.charmap_decode(input,errors,decoding_table)
-
-class IncrementalEncoder(codecs.IncrementalEncoder):
- def encode(self, input, final=False):
- return codecs.charmap_encode(input,self.errors,encoding_table)[0]
-
-class IncrementalDecoder(codecs.IncrementalDecoder):
- def decode(self, input, final=False):
- return codecs.charmap_decode(input,self.errors,decoding_table)[0]
-
-class StreamWriter(Codec,codecs.StreamWriter):
- pass
-
-class StreamReader(Codec,codecs.StreamReader):
- pass
-
-### encodings module API
-
-def getregentry():
- return codecs.CodecInfo(
- name='iso8859-7',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
-
-
-### Decoding Table
-
-decoding_table = (
- u'\x00' # 0x00 -> NULL
- u'\x01' # 0x01 -> START OF HEADING
- u'\x02' # 0x02 -> START OF TEXT
- u'\x03' # 0x03 -> END OF TEXT
- u'\x04' # 0x04 -> END OF TRANSMISSION
- u'\x05' # 0x05 -> ENQUIRY
- u'\x06' # 0x06 -> ACKNOWLEDGE
- u'\x07' # 0x07 -> BELL
- u'\x08' # 0x08 -> BACKSPACE
- u'\t' # 0x09 -> HORIZONTAL TABULATION
- u'\n' # 0x0A -> LINE FEED
- u'\x0b' # 0x0B -> VERTICAL TABULATION
- u'\x0c' # 0x0C -> FORM FEED
- u'\r' # 0x0D -> CARRIAGE RETURN
- u'\x0e' # 0x0E -> SHIFT OUT
- u'\x0f' # 0x0F -> SHIFT IN
- u'\x10' # 0x10 -> DATA LINK ESCAPE
- u'\x11' # 0x11 -> DEVICE CONTROL ONE
- u'\x12' # 0x12 -> DEVICE CONTROL TWO
- u'\x13' # 0x13 -> DEVICE CONTROL THREE
- u'\x14' # 0x14 -> DEVICE CONTROL FOUR
- u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
- u'\x16' # 0x16 -> SYNCHRONOUS IDLE
- u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
- u'\x18' # 0x18 -> CANCEL
- u'\x19' # 0x19 -> END OF MEDIUM
- u'\x1a' # 0x1A -> SUBSTITUTE
- u'\x1b' # 0x1B -> ESCAPE
- u'\x1c' # 0x1C -> FILE SEPARATOR
- u'\x1d' # 0x1D -> GROUP SEPARATOR
- u'\x1e' # 0x1E -> RECORD SEPARATOR
- u'\x1f' # 0x1F -> UNIT SEPARATOR
- u' ' # 0x20 -> SPACE
- u'!' # 0x21 -> EXCLAMATION MARK
- u'"' # 0x22 -> QUOTATION MARK
- u'#' # 0x23 -> NUMBER SIGN
- u'$' # 0x24 -> DOLLAR SIGN
- u'%' # 0x25 -> PERCENT SIGN
- u'&' # 0x26 -> AMPERSAND
- u"'" # 0x27 -> APOSTROPHE
- u'(' # 0x28 -> LEFT PARENTHESIS
- u')' # 0x29 -> RIGHT PARENTHESIS
- u'*' # 0x2A -> ASTERISK
- u'+' # 0x2B -> PLUS SIGN
- u',' # 0x2C -> COMMA
- u'-' # 0x2D -> HYPHEN-MINUS
- u'.' # 0x2E -> FULL STOP
- u'/' # 0x2F -> SOLIDUS
- u'0' # 0x30 -> DIGIT ZERO
- u'1' # 0x31 -> DIGIT ONE
- u'2' # 0x32 -> DIGIT TWO
- u'3' # 0x33 -> DIGIT THREE
- u'4' # 0x34 -> DIGIT FOUR
- u'5' # 0x35 -> DIGIT FIVE
- u'6' # 0x36 -> DIGIT SIX
- u'7' # 0x37 -> DIGIT SEVEN
- u'8' # 0x38 -> DIGIT EIGHT
- u'9' # 0x39 -> DIGIT NINE
- u':' # 0x3A -> COLON
- u';' # 0x3B -> SEMICOLON
- u'<' # 0x3C -> LESS-THAN SIGN
- u'=' # 0x3D -> EQUALS SIGN
- u'>' # 0x3E -> GREATER-THAN SIGN
- u'?' # 0x3F -> QUESTION MARK
- u'@' # 0x40 -> COMMERCIAL AT
- u'A' # 0x41 -> LATIN CAPITAL LETTER A
- u'B' # 0x42 -> LATIN CAPITAL LETTER B
- u'C' # 0x43 -> LATIN CAPITAL LETTER C
- u'D' # 0x44 -> LATIN CAPITAL LETTER D
- u'E' # 0x45 -> LATIN CAPITAL LETTER E
- u'F' # 0x46 -> LATIN CAPITAL LETTER F
- u'G' # 0x47 -> LATIN CAPITAL LETTER G
- u'H' # 0x48 -> LATIN CAPITAL LETTER H
- u'I' # 0x49 -> LATIN CAPITAL LETTER I
- u'J' # 0x4A -> LATIN CAPITAL LETTER J
- u'K' # 0x4B -> LATIN CAPITAL LETTER K
- u'L' # 0x4C -> LATIN CAPITAL LETTER L
- u'M' # 0x4D -> LATIN CAPITAL LETTER M
- u'N' # 0x4E -> LATIN CAPITAL LETTER N
- u'O' # 0x4F -> LATIN CAPITAL LETTER O
- u'P' # 0x50 -> LATIN CAPITAL LETTER P
- u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
- u'R' # 0x52 -> LATIN CAPITAL LETTER R
- u'S' # 0x53 -> LATIN CAPITAL LETTER S
- u'T' # 0x54 -> LATIN CAPITAL LETTER T
- u'U' # 0x55 -> LATIN CAPITAL LETTER U
- u'V' # 0x56 -> LATIN CAPITAL LETTER V
- u'W' # 0x57 -> LATIN CAPITAL LETTER W
- u'X' # 0x58 -> LATIN CAPITAL LETTER X
- u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
- u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
- u'[' # 0x5B -> LEFT SQUARE BRACKET
- u'\\' # 0x5C -> REVERSE SOLIDUS
- u']' # 0x5D -> RIGHT SQUARE BRACKET
- u'^' # 0x5E -> CIRCUMFLEX ACCENT
- u'_' # 0x5F -> LOW LINE
- u'`' # 0x60 -> GRAVE ACCENT
- u'a' # 0x61 -> LATIN SMALL LETTER A
- u'b' # 0x62 -> LATIN SMALL LETTER B
- u'c' # 0x63 -> LATIN SMALL LETTER C
- u'd' # 0x64 -> LATIN SMALL LETTER D
- u'e' # 0x65 -> LATIN SMALL LETTER E
- u'f' # 0x66 -> LATIN SMALL LETTER F
- u'g' # 0x67 -> LATIN SMALL LETTER G
- u'h' # 0x68 -> LATIN SMALL LETTER H
- u'i' # 0x69 -> LATIN SMALL LETTER I
- u'j' # 0x6A -> LATIN SMALL LETTER J
- u'k' # 0x6B -> LATIN SMALL LETTER K
- u'l' # 0x6C -> LATIN SMALL LETTER L
- u'm' # 0x6D -> LATIN SMALL LETTER M
- u'n' # 0x6E -> LATIN SMALL LETTER N
- u'o' # 0x6F -> LATIN SMALL LETTER O
- u'p' # 0x70 -> LATIN SMALL LETTER P
- u'q' # 0x71 -> LATIN SMALL LETTER Q
- u'r' # 0x72 -> LATIN SMALL LETTER R
- u's' # 0x73 -> LATIN SMALL LETTER S
- u't' # 0x74 -> LATIN SMALL LETTER T
- u'u' # 0x75 -> LATIN SMALL LETTER U
- u'v' # 0x76 -> LATIN SMALL LETTER V
- u'w' # 0x77 -> LATIN SMALL LETTER W
- u'x' # 0x78 -> LATIN SMALL LETTER X
- u'y' # 0x79 -> LATIN SMALL LETTER Y
- u'z' # 0x7A -> LATIN SMALL LETTER Z
- u'{' # 0x7B -> LEFT CURLY BRACKET
- u'|' # 0x7C -> VERTICAL LINE
- u'}' # 0x7D -> RIGHT CURLY BRACKET
- u'~' # 0x7E -> TILDE
- u'\x7f' # 0x7F -> DELETE
- u'\x80' # 0x80 -> <control>
- u'\x81' # 0x81 -> <control>
- u'\x82' # 0x82 -> <control>
- u'\x83' # 0x83 -> <control>
- u'\x84' # 0x84 -> <control>
- u'\x85' # 0x85 -> <control>
- u'\x86' # 0x86 -> <control>
- u'\x87' # 0x87 -> <control>
- u'\x88' # 0x88 -> <control>
- u'\x89' # 0x89 -> <control>
- u'\x8a' # 0x8A -> <control>
- u'\x8b' # 0x8B -> <control>
- u'\x8c' # 0x8C -> <control>
- u'\x8d' # 0x8D -> <control>
- u'\x8e' # 0x8E -> <control>
- u'\x8f' # 0x8F -> <control>
- u'\x90' # 0x90 -> <control>
- u'\x91' # 0x91 -> <control>
- u'\x92' # 0x92 -> <control>
- u'\x93' # 0x93 -> <control>
- u'\x94' # 0x94 -> <control>
- u'\x95' # 0x95 -> <control>
- u'\x96' # 0x96 -> <control>
- u'\x97' # 0x97 -> <control>
- u'\x98' # 0x98 -> <control>
- u'\x99' # 0x99 -> <control>
- u'\x9a' # 0x9A -> <control>
- u'\x9b' # 0x9B -> <control>
- u'\x9c' # 0x9C -> <control>
- u'\x9d' # 0x9D -> <control>
- u'\x9e' # 0x9E -> <control>
- u'\x9f' # 0x9F -> <control>
- u'\xa0' # 0xA0 -> NO-BREAK SPACE
- u'\u2018' # 0xA1 -> LEFT SINGLE QUOTATION MARK
- u'\u2019' # 0xA2 -> RIGHT SINGLE QUOTATION MARK
- u'\xa3' # 0xA3 -> POUND SIGN
- u'\u20ac' # 0xA4 -> EURO SIGN
- u'\u20af' # 0xA5 -> DRACHMA SIGN
- u'\xa6' # 0xA6 -> BROKEN BAR
- u'\xa7' # 0xA7 -> SECTION SIGN
- u'\xa8' # 0xA8 -> DIAERESIS
- u'\xa9' # 0xA9 -> COPYRIGHT SIGN
- u'\u037a' # 0xAA -> GREEK YPOGEGRAMMENI
- u'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
- u'\xac' # 0xAC -> NOT SIGN
- u'\xad' # 0xAD -> SOFT HYPHEN
- u'\ufffe'
- u'\u2015' # 0xAF -> HORIZONTAL BAR
- u'\xb0' # 0xB0 -> DEGREE SIGN
- u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
- u'\xb2' # 0xB2 -> SUPERSCRIPT TWO
- u'\xb3' # 0xB3 -> SUPERSCRIPT THREE
- u'\u0384' # 0xB4 -> GREEK TONOS
- u'\u0385' # 0xB5 -> GREEK DIALYTIKA TONOS
- u'\u0386' # 0xB6 -> GREEK CAPITAL LETTER ALPHA WITH TONOS
- u'\xb7' # 0xB7 -> MIDDLE DOT
- u'\u0388' # 0xB8 -> GREEK CAPITAL LETTER EPSILON WITH TONOS
- u'\u0389' # 0xB9 -> GREEK CAPITAL LETTER ETA WITH TONOS
- u'\u038a' # 0xBA -> GREEK CAPITAL LETTER IOTA WITH TONOS
- u'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
- u'\u038c' # 0xBC -> GREEK CAPITAL LETTER OMICRON WITH TONOS
- u'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
- u'\u038e' # 0xBE -> GREEK CAPITAL LETTER UPSILON WITH TONOS
- u'\u038f' # 0xBF -> GREEK CAPITAL LETTER OMEGA WITH TONOS
- u'\u0390' # 0xC0 -> GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS
- u'\u0391' # 0xC1 -> GREEK CAPITAL LETTER ALPHA
- u'\u0392' # 0xC2 -> GREEK CAPITAL LETTER BETA
- u'\u0393' # 0xC3 -> GREEK CAPITAL LETTER GAMMA
- u'\u0394' # 0xC4 -> GREEK CAPITAL LETTER DELTA
- u'\u0395' # 0xC5 -> GREEK CAPITAL LETTER EPSILON
- u'\u0396' # 0xC6 -> GREEK CAPITAL LETTER ZETA
- u'\u0397' # 0xC7 -> GREEK CAPITAL LETTER ETA
- u'\u0398' # 0xC8 -> GREEK CAPITAL LETTER THETA
- u'\u0399' # 0xC9 -> GREEK CAPITAL LETTER IOTA
- u'\u039a' # 0xCA -> GREEK CAPITAL LETTER KAPPA
- u'\u039b' # 0xCB -> GREEK CAPITAL LETTER LAMDA
- u'\u039c' # 0xCC -> GREEK CAPITAL LETTER MU
- u'\u039d' # 0xCD -> GREEK CAPITAL LETTER NU
- u'\u039e' # 0xCE -> GREEK CAPITAL LETTER XI
- u'\u039f' # 0xCF -> GREEK CAPITAL LETTER OMICRON
- u'\u03a0' # 0xD0 -> GREEK CAPITAL LETTER PI
- u'\u03a1' # 0xD1 -> GREEK CAPITAL LETTER RHO
- u'\ufffe'
- u'\u03a3' # 0xD3 -> GREEK CAPITAL LETTER SIGMA
- u'\u03a4' # 0xD4 -> GREEK CAPITAL LETTER TAU
- u'\u03a5' # 0xD5 -> GREEK CAPITAL LETTER UPSILON
- u'\u03a6' # 0xD6 -> GREEK CAPITAL LETTER PHI
- u'\u03a7' # 0xD7 -> GREEK CAPITAL LETTER CHI
- u'\u03a8' # 0xD8 -> GREEK CAPITAL LETTER PSI
- u'\u03a9' # 0xD9 -> GREEK CAPITAL LETTER OMEGA
- u'\u03aa' # 0xDA -> GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
- u'\u03ab' # 0xDB -> GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
- u'\u03ac' # 0xDC -> GREEK SMALL LETTER ALPHA WITH TONOS
- u'\u03ad' # 0xDD -> GREEK SMALL LETTER EPSILON WITH TONOS
- u'\u03ae' # 0xDE -> GREEK SMALL LETTER ETA WITH TONOS
- u'\u03af' # 0xDF -> GREEK SMALL LETTER IOTA WITH TONOS
- u'\u03b0' # 0xE0 -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS
- u'\u03b1' # 0xE1 -> GREEK SMALL LETTER ALPHA
- u'\u03b2' # 0xE2 -> GREEK SMALL LETTER BETA
- u'\u03b3' # 0xE3 -> GREEK SMALL LETTER GAMMA
- u'\u03b4' # 0xE4 -> GREEK SMALL LETTER DELTA
- u'\u03b5' # 0xE5 -> GREEK SMALL LETTER EPSILON
- u'\u03b6' # 0xE6 -> GREEK SMALL LETTER ZETA
- u'\u03b7' # 0xE7 -> GREEK SMALL LETTER ETA
- u'\u03b8' # 0xE8 -> GREEK SMALL LETTER THETA
- u'\u03b9' # 0xE9 -> GREEK SMALL LETTER IOTA
- u'\u03ba' # 0xEA -> GREEK SMALL LETTER KAPPA
- u'\u03bb' # 0xEB -> GREEK SMALL LETTER LAMDA
- u'\u03bc' # 0xEC -> GREEK SMALL LETTER MU
- u'\u03bd' # 0xED -> GREEK SMALL LETTER NU
- u'\u03be' # 0xEE -> GREEK SMALL LETTER XI
- u'\u03bf' # 0xEF -> GREEK SMALL LETTER OMICRON
- u'\u03c0' # 0xF0 -> GREEK SMALL LETTER PI
- u'\u03c1' # 0xF1 -> GREEK SMALL LETTER RHO
- u'\u03c2' # 0xF2 -> GREEK SMALL LETTER FINAL SIGMA
- u'\u03c3' # 0xF3 -> GREEK SMALL LETTER SIGMA
- u'\u03c4' # 0xF4 -> GREEK SMALL LETTER TAU
- u'\u03c5' # 0xF5 -> GREEK SMALL LETTER UPSILON
- u'\u03c6' # 0xF6 -> GREEK SMALL LETTER PHI
- u'\u03c7' # 0xF7 -> GREEK SMALL LETTER CHI
- u'\u03c8' # 0xF8 -> GREEK SMALL LETTER PSI
- u'\u03c9' # 0xF9 -> GREEK SMALL LETTER OMEGA
- u'\u03ca' # 0xFA -> GREEK SMALL LETTER IOTA WITH DIALYTIKA
- u'\u03cb' # 0xFB -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA
- u'\u03cc' # 0xFC -> GREEK SMALL LETTER OMICRON WITH TONOS
- u'\u03cd' # 0xFD -> GREEK SMALL LETTER UPSILON WITH TONOS
- u'\u03ce' # 0xFE -> GREEK SMALL LETTER OMEGA WITH TONOS
- u'\ufffe'
-)
-
-### Encoding table
-encoding_table=codecs.charmap_build(decoding_table)
diff --git a/sys/lib/python/encodings/iso8859_8.py b/sys/lib/python/encodings/iso8859_8.py
deleted file mode 100644
index 43cf2138b..000000000
--- a/sys/lib/python/encodings/iso8859_8.py
+++ /dev/null
@@ -1,307 +0,0 @@
-""" Python Character Mapping Codec iso8859_8 generated from 'MAPPINGS/ISO8859/8859-8.TXT' with gencodec.py.
-
-"""#"
-
-import codecs
-
-### Codec APIs
-
-class Codec(codecs.Codec):
-
- def encode(self,input,errors='strict'):
- return codecs.charmap_encode(input,errors,encoding_table)
-
- def decode(self,input,errors='strict'):
- return codecs.charmap_decode(input,errors,decoding_table)
-
-class IncrementalEncoder(codecs.IncrementalEncoder):
- def encode(self, input, final=False):
- return codecs.charmap_encode(input,self.errors,encoding_table)[0]
-
-class IncrementalDecoder(codecs.IncrementalDecoder):
- def decode(self, input, final=False):
- return codecs.charmap_decode(input,self.errors,decoding_table)[0]
-
-class StreamWriter(Codec,codecs.StreamWriter):
- pass
-
-class StreamReader(Codec,codecs.StreamReader):
- pass
-
-### encodings module API
-
-def getregentry():
- return codecs.CodecInfo(
- name='iso8859-8',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
-
-
-### Decoding Table
-
-decoding_table = (
- u'\x00' # 0x00 -> NULL
- u'\x01' # 0x01 -> START OF HEADING
- u'\x02' # 0x02 -> START OF TEXT
- u'\x03' # 0x03 -> END OF TEXT
- u'\x04' # 0x04 -> END OF TRANSMISSION
- u'\x05' # 0x05 -> ENQUIRY
- u'\x06' # 0x06 -> ACKNOWLEDGE
- u'\x07' # 0x07 -> BELL
- u'\x08' # 0x08 -> BACKSPACE
- u'\t' # 0x09 -> HORIZONTAL TABULATION
- u'\n' # 0x0A -> LINE FEED
- u'\x0b' # 0x0B -> VERTICAL TABULATION
- u'\x0c' # 0x0C -> FORM FEED
- u'\r' # 0x0D -> CARRIAGE RETURN
- u'\x0e' # 0x0E -> SHIFT OUT
- u'\x0f' # 0x0F -> SHIFT IN
- u'\x10' # 0x10 -> DATA LINK ESCAPE
- u'\x11' # 0x11 -> DEVICE CONTROL ONE
- u'\x12' # 0x12 -> DEVICE CONTROL TWO
- u'\x13' # 0x13 -> DEVICE CONTROL THREE
- u'\x14' # 0x14 -> DEVICE CONTROL FOUR
- u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
- u'\x16' # 0x16 -> SYNCHRONOUS IDLE
- u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
- u'\x18' # 0x18 -> CANCEL
- u'\x19' # 0x19 -> END OF MEDIUM
- u'\x1a' # 0x1A -> SUBSTITUTE
- u'\x1b' # 0x1B -> ESCAPE
- u'\x1c' # 0x1C -> FILE SEPARATOR
- u'\x1d' # 0x1D -> GROUP SEPARATOR
- u'\x1e' # 0x1E -> RECORD SEPARATOR
- u'\x1f' # 0x1F -> UNIT SEPARATOR
- u' ' # 0x20 -> SPACE
- u'!' # 0x21 -> EXCLAMATION MARK
- u'"' # 0x22 -> QUOTATION MARK
- u'#' # 0x23 -> NUMBER SIGN
- u'$' # 0x24 -> DOLLAR SIGN
- u'%' # 0x25 -> PERCENT SIGN
- u'&' # 0x26 -> AMPERSAND
- u"'" # 0x27 -> APOSTROPHE
- u'(' # 0x28 -> LEFT PARENTHESIS
- u')' # 0x29 -> RIGHT PARENTHESIS
- u'*' # 0x2A -> ASTERISK
- u'+' # 0x2B -> PLUS SIGN
- u',' # 0x2C -> COMMA
- u'-' # 0x2D -> HYPHEN-MINUS
- u'.' # 0x2E -> FULL STOP
- u'/' # 0x2F -> SOLIDUS
- u'0' # 0x30 -> DIGIT ZERO
- u'1' # 0x31 -> DIGIT ONE
- u'2' # 0x32 -> DIGIT TWO
- u'3' # 0x33 -> DIGIT THREE
- u'4' # 0x34 -> DIGIT FOUR
- u'5' # 0x35 -> DIGIT FIVE
- u'6' # 0x36 -> DIGIT SIX
- u'7' # 0x37 -> DIGIT SEVEN
- u'8' # 0x38 -> DIGIT EIGHT
- u'9' # 0x39 -> DIGIT NINE
- u':' # 0x3A -> COLON
- u';' # 0x3B -> SEMICOLON
- u'<' # 0x3C -> LESS-THAN SIGN
- u'=' # 0x3D -> EQUALS SIGN
- u'>' # 0x3E -> GREATER-THAN SIGN
- u'?' # 0x3F -> QUESTION MARK
- u'@' # 0x40 -> COMMERCIAL AT
- u'A' # 0x41 -> LATIN CAPITAL LETTER A
- u'B' # 0x42 -> LATIN CAPITAL LETTER B
- u'C' # 0x43 -> LATIN CAPITAL LETTER C
- u'D' # 0x44 -> LATIN CAPITAL LETTER D
- u'E' # 0x45 -> LATIN CAPITAL LETTER E
- u'F' # 0x46 -> LATIN CAPITAL LETTER F
- u'G' # 0x47 -> LATIN CAPITAL LETTER G
- u'H' # 0x48 -> LATIN CAPITAL LETTER H
- u'I' # 0x49 -> LATIN CAPITAL LETTER I
- u'J' # 0x4A -> LATIN CAPITAL LETTER J
- u'K' # 0x4B -> LATIN CAPITAL LETTER K
- u'L' # 0x4C -> LATIN CAPITAL LETTER L
- u'M' # 0x4D -> LATIN CAPITAL LETTER M
- u'N' # 0x4E -> LATIN CAPITAL LETTER N
- u'O' # 0x4F -> LATIN CAPITAL LETTER O
- u'P' # 0x50 -> LATIN CAPITAL LETTER P
- u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
- u'R' # 0x52 -> LATIN CAPITAL LETTER R
- u'S' # 0x53 -> LATIN CAPITAL LETTER S
- u'T' # 0x54 -> LATIN CAPITAL LETTER T
- u'U' # 0x55 -> LATIN CAPITAL LETTER U
- u'V' # 0x56 -> LATIN CAPITAL LETTER V
- u'W' # 0x57 -> LATIN CAPITAL LETTER W
- u'X' # 0x58 -> LATIN CAPITAL LETTER X
- u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
- u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
- u'[' # 0x5B -> LEFT SQUARE BRACKET
- u'\\' # 0x5C -> REVERSE SOLIDUS
- u']' # 0x5D -> RIGHT SQUARE BRACKET
- u'^' # 0x5E -> CIRCUMFLEX ACCENT
- u'_' # 0x5F -> LOW LINE
- u'`' # 0x60 -> GRAVE ACCENT
- u'a' # 0x61 -> LATIN SMALL LETTER A
- u'b' # 0x62 -> LATIN SMALL LETTER B
- u'c' # 0x63 -> LATIN SMALL LETTER C
- u'd' # 0x64 -> LATIN SMALL LETTER D
- u'e' # 0x65 -> LATIN SMALL LETTER E
- u'f' # 0x66 -> LATIN SMALL LETTER F
- u'g' # 0x67 -> LATIN SMALL LETTER G
- u'h' # 0x68 -> LATIN SMALL LETTER H
- u'i' # 0x69 -> LATIN SMALL LETTER I
- u'j' # 0x6A -> LATIN SMALL LETTER J
- u'k' # 0x6B -> LATIN SMALL LETTER K
- u'l' # 0x6C -> LATIN SMALL LETTER L
- u'm' # 0x6D -> LATIN SMALL LETTER M
- u'n' # 0x6E -> LATIN SMALL LETTER N
- u'o' # 0x6F -> LATIN SMALL LETTER O
- u'p' # 0x70 -> LATIN SMALL LETTER P
- u'q' # 0x71 -> LATIN SMALL LETTER Q
- u'r' # 0x72 -> LATIN SMALL LETTER R
- u's' # 0x73 -> LATIN SMALL LETTER S
- u't' # 0x74 -> LATIN SMALL LETTER T
- u'u' # 0x75 -> LATIN SMALL LETTER U
- u'v' # 0x76 -> LATIN SMALL LETTER V
- u'w' # 0x77 -> LATIN SMALL LETTER W
- u'x' # 0x78 -> LATIN SMALL LETTER X
- u'y' # 0x79 -> LATIN SMALL LETTER Y
- u'z' # 0x7A -> LATIN SMALL LETTER Z
- u'{' # 0x7B -> LEFT CURLY BRACKET
- u'|' # 0x7C -> VERTICAL LINE
- u'}' # 0x7D -> RIGHT CURLY BRACKET
- u'~' # 0x7E -> TILDE
- u'\x7f' # 0x7F -> DELETE
- u'\x80' # 0x80 -> <control>
- u'\x81' # 0x81 -> <control>
- u'\x82' # 0x82 -> <control>
- u'\x83' # 0x83 -> <control>
- u'\x84' # 0x84 -> <control>
- u'\x85' # 0x85 -> <control>
- u'\x86' # 0x86 -> <control>
- u'\x87' # 0x87 -> <control>
- u'\x88' # 0x88 -> <control>
- u'\x89' # 0x89 -> <control>
- u'\x8a' # 0x8A -> <control>
- u'\x8b' # 0x8B -> <control>
- u'\x8c' # 0x8C -> <control>
- u'\x8d' # 0x8D -> <control>
- u'\x8e' # 0x8E -> <control>
- u'\x8f' # 0x8F -> <control>
- u'\x90' # 0x90 -> <control>
- u'\x91' # 0x91 -> <control>
- u'\x92' # 0x92 -> <control>
- u'\x93' # 0x93 -> <control>
- u'\x94' # 0x94 -> <control>
- u'\x95' # 0x95 -> <control>
- u'\x96' # 0x96 -> <control>
- u'\x97' # 0x97 -> <control>
- u'\x98' # 0x98 -> <control>
- u'\x99' # 0x99 -> <control>
- u'\x9a' # 0x9A -> <control>
- u'\x9b' # 0x9B -> <control>
- u'\x9c' # 0x9C -> <control>
- u'\x9d' # 0x9D -> <control>
- u'\x9e' # 0x9E -> <control>
- u'\x9f' # 0x9F -> <control>
- u'\xa0' # 0xA0 -> NO-BREAK SPACE
- u'\ufffe'
- u'\xa2' # 0xA2 -> CENT SIGN
- u'\xa3' # 0xA3 -> POUND SIGN
- u'\xa4' # 0xA4 -> CURRENCY SIGN
- u'\xa5' # 0xA5 -> YEN SIGN
- u'\xa6' # 0xA6 -> BROKEN BAR
- u'\xa7' # 0xA7 -> SECTION SIGN
- u'\xa8' # 0xA8 -> DIAERESIS
- u'\xa9' # 0xA9 -> COPYRIGHT SIGN
- u'\xd7' # 0xAA -> MULTIPLICATION SIGN
- u'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
- u'\xac' # 0xAC -> NOT SIGN
- u'\xad' # 0xAD -> SOFT HYPHEN
- u'\xae' # 0xAE -> REGISTERED SIGN
- u'\xaf' # 0xAF -> MACRON
- u'\xb0' # 0xB0 -> DEGREE SIGN
- u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
- u'\xb2' # 0xB2 -> SUPERSCRIPT TWO
- u'\xb3' # 0xB3 -> SUPERSCRIPT THREE
- u'\xb4' # 0xB4 -> ACUTE ACCENT
- u'\xb5' # 0xB5 -> MICRO SIGN
- u'\xb6' # 0xB6 -> PILCROW SIGN
- u'\xb7' # 0xB7 -> MIDDLE DOT
- u'\xb8' # 0xB8 -> CEDILLA
- u'\xb9' # 0xB9 -> SUPERSCRIPT ONE
- u'\xf7' # 0xBA -> DIVISION SIGN
- u'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
- u'\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER
- u'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
- u'\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS
- u'\ufffe'
- u'\ufffe'
- u'\ufffe'
- u'\ufffe'
- u'\ufffe'
- u'\ufffe'
- u'\ufffe'
- u'\ufffe'
- u'\ufffe'
- u'\ufffe'
- u'\ufffe'
- u'\ufffe'
- u'\ufffe'
- u'\ufffe'
- u'\ufffe'
- u'\ufffe'
- u'\ufffe'
- u'\ufffe'
- u'\ufffe'
- u'\ufffe'
- u'\ufffe'
- u'\ufffe'
- u'\ufffe'
- u'\ufffe'
- u'\ufffe'
- u'\ufffe'
- u'\ufffe'
- u'\ufffe'
- u'\ufffe'
- u'\ufffe'
- u'\ufffe'
- u'\ufffe'
- u'\u2017' # 0xDF -> DOUBLE LOW LINE
- u'\u05d0' # 0xE0 -> HEBREW LETTER ALEF
- u'\u05d1' # 0xE1 -> HEBREW LETTER BET
- u'\u05d2' # 0xE2 -> HEBREW LETTER GIMEL
- u'\u05d3' # 0xE3 -> HEBREW LETTER DALET
- u'\u05d4' # 0xE4 -> HEBREW LETTER HE
- u'\u05d5' # 0xE5 -> HEBREW LETTER VAV
- u'\u05d6' # 0xE6 -> HEBREW LETTER ZAYIN
- u'\u05d7' # 0xE7 -> HEBREW LETTER HET
- u'\u05d8' # 0xE8 -> HEBREW LETTER TET
- u'\u05d9' # 0xE9 -> HEBREW LETTER YOD
- u'\u05da' # 0xEA -> HEBREW LETTER FINAL KAF
- u'\u05db' # 0xEB -> HEBREW LETTER KAF
- u'\u05dc' # 0xEC -> HEBREW LETTER LAMED
- u'\u05dd' # 0xED -> HEBREW LETTER FINAL MEM
- u'\u05de' # 0xEE -> HEBREW LETTER MEM
- u'\u05df' # 0xEF -> HEBREW LETTER FINAL NUN
- u'\u05e0' # 0xF0 -> HEBREW LETTER NUN
- u'\u05e1' # 0xF1 -> HEBREW LETTER SAMEKH
- u'\u05e2' # 0xF2 -> HEBREW LETTER AYIN
- u'\u05e3' # 0xF3 -> HEBREW LETTER FINAL PE
- u'\u05e4' # 0xF4 -> HEBREW LETTER PE
- u'\u05e5' # 0xF5 -> HEBREW LETTER FINAL TSADI
- u'\u05e6' # 0xF6 -> HEBREW LETTER TSADI
- u'\u05e7' # 0xF7 -> HEBREW LETTER QOF
- u'\u05e8' # 0xF8 -> HEBREW LETTER RESH
- u'\u05e9' # 0xF9 -> HEBREW LETTER SHIN
- u'\u05ea' # 0xFA -> HEBREW LETTER TAV
- u'\ufffe'
- u'\ufffe'
- u'\u200e' # 0xFD -> LEFT-TO-RIGHT MARK
- u'\u200f' # 0xFE -> RIGHT-TO-LEFT MARK
- u'\ufffe'
-)
-
-### Encoding table
-encoding_table=codecs.charmap_build(decoding_table)
diff --git a/sys/lib/python/encodings/iso8859_9.py b/sys/lib/python/encodings/iso8859_9.py
deleted file mode 100644
index b8029382c..000000000
--- a/sys/lib/python/encodings/iso8859_9.py
+++ /dev/null
@@ -1,307 +0,0 @@
-""" Python Character Mapping Codec iso8859_9 generated from 'MAPPINGS/ISO8859/8859-9.TXT' with gencodec.py.
-
-"""#"
-
-import codecs
-
-### Codec APIs
-
-class Codec(codecs.Codec):
-
- def encode(self,input,errors='strict'):
- return codecs.charmap_encode(input,errors,encoding_table)
-
- def decode(self,input,errors='strict'):
- return codecs.charmap_decode(input,errors,decoding_table)
-
-class IncrementalEncoder(codecs.IncrementalEncoder):
- def encode(self, input, final=False):
- return codecs.charmap_encode(input,self.errors,encoding_table)[0]
-
-class IncrementalDecoder(codecs.IncrementalDecoder):
- def decode(self, input, final=False):
- return codecs.charmap_decode(input,self.errors,decoding_table)[0]
-
-class StreamWriter(Codec,codecs.StreamWriter):
- pass
-
-class StreamReader(Codec,codecs.StreamReader):
- pass
-
-### encodings module API
-
-def getregentry():
- return codecs.CodecInfo(
- name='iso8859-9',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
-
-
-### Decoding Table
-
-decoding_table = (
- u'\x00' # 0x00 -> NULL
- u'\x01' # 0x01 -> START OF HEADING
- u'\x02' # 0x02 -> START OF TEXT
- u'\x03' # 0x03 -> END OF TEXT
- u'\x04' # 0x04 -> END OF TRANSMISSION
- u'\x05' # 0x05 -> ENQUIRY
- u'\x06' # 0x06 -> ACKNOWLEDGE
- u'\x07' # 0x07 -> BELL
- u'\x08' # 0x08 -> BACKSPACE
- u'\t' # 0x09 -> HORIZONTAL TABULATION
- u'\n' # 0x0A -> LINE FEED
- u'\x0b' # 0x0B -> VERTICAL TABULATION
- u'\x0c' # 0x0C -> FORM FEED
- u'\r' # 0x0D -> CARRIAGE RETURN
- u'\x0e' # 0x0E -> SHIFT OUT
- u'\x0f' # 0x0F -> SHIFT IN
- u'\x10' # 0x10 -> DATA LINK ESCAPE
- u'\x11' # 0x11 -> DEVICE CONTROL ONE
- u'\x12' # 0x12 -> DEVICE CONTROL TWO
- u'\x13' # 0x13 -> DEVICE CONTROL THREE
- u'\x14' # 0x14 -> DEVICE CONTROL FOUR
- u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
- u'\x16' # 0x16 -> SYNCHRONOUS IDLE
- u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
- u'\x18' # 0x18 -> CANCEL
- u'\x19' # 0x19 -> END OF MEDIUM
- u'\x1a' # 0x1A -> SUBSTITUTE
- u'\x1b' # 0x1B -> ESCAPE
- u'\x1c' # 0x1C -> FILE SEPARATOR
- u'\x1d' # 0x1D -> GROUP SEPARATOR
- u'\x1e' # 0x1E -> RECORD SEPARATOR
- u'\x1f' # 0x1F -> UNIT SEPARATOR
- u' ' # 0x20 -> SPACE
- u'!' # 0x21 -> EXCLAMATION MARK
- u'"' # 0x22 -> QUOTATION MARK
- u'#' # 0x23 -> NUMBER SIGN
- u'$' # 0x24 -> DOLLAR SIGN
- u'%' # 0x25 -> PERCENT SIGN
- u'&' # 0x26 -> AMPERSAND
- u"'" # 0x27 -> APOSTROPHE
- u'(' # 0x28 -> LEFT PARENTHESIS
- u')' # 0x29 -> RIGHT PARENTHESIS
- u'*' # 0x2A -> ASTERISK
- u'+' # 0x2B -> PLUS SIGN
- u',' # 0x2C -> COMMA
- u'-' # 0x2D -> HYPHEN-MINUS
- u'.' # 0x2E -> FULL STOP
- u'/' # 0x2F -> SOLIDUS
- u'0' # 0x30 -> DIGIT ZERO
- u'1' # 0x31 -> DIGIT ONE
- u'2' # 0x32 -> DIGIT TWO
- u'3' # 0x33 -> DIGIT THREE
- u'4' # 0x34 -> DIGIT FOUR
- u'5' # 0x35 -> DIGIT FIVE
- u'6' # 0x36 -> DIGIT SIX
- u'7' # 0x37 -> DIGIT SEVEN
- u'8' # 0x38 -> DIGIT EIGHT
- u'9' # 0x39 -> DIGIT NINE
- u':' # 0x3A -> COLON
- u';' # 0x3B -> SEMICOLON
- u'<' # 0x3C -> LESS-THAN SIGN
- u'=' # 0x3D -> EQUALS SIGN
- u'>' # 0x3E -> GREATER-THAN SIGN
- u'?' # 0x3F -> QUESTION MARK
- u'@' # 0x40 -> COMMERCIAL AT
- u'A' # 0x41 -> LATIN CAPITAL LETTER A
- u'B' # 0x42 -> LATIN CAPITAL LETTER B
- u'C' # 0x43 -> LATIN CAPITAL LETTER C
- u'D' # 0x44 -> LATIN CAPITAL LETTER D
- u'E' # 0x45 -> LATIN CAPITAL LETTER E
- u'F' # 0x46 -> LATIN CAPITAL LETTER F
- u'G' # 0x47 -> LATIN CAPITAL LETTER G
- u'H' # 0x48 -> LATIN CAPITAL LETTER H
- u'I' # 0x49 -> LATIN CAPITAL LETTER I
- u'J' # 0x4A -> LATIN CAPITAL LETTER J
- u'K' # 0x4B -> LATIN CAPITAL LETTER K
- u'L' # 0x4C -> LATIN CAPITAL LETTER L
- u'M' # 0x4D -> LATIN CAPITAL LETTER M
- u'N' # 0x4E -> LATIN CAPITAL LETTER N
- u'O' # 0x4F -> LATIN CAPITAL LETTER O
- u'P' # 0x50 -> LATIN CAPITAL LETTER P
- u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
- u'R' # 0x52 -> LATIN CAPITAL LETTER R
- u'S' # 0x53 -> LATIN CAPITAL LETTER S
- u'T' # 0x54 -> LATIN CAPITAL LETTER T
- u'U' # 0x55 -> LATIN CAPITAL LETTER U
- u'V' # 0x56 -> LATIN CAPITAL LETTER V
- u'W' # 0x57 -> LATIN CAPITAL LETTER W
- u'X' # 0x58 -> LATIN CAPITAL LETTER X
- u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
- u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
- u'[' # 0x5B -> LEFT SQUARE BRACKET
- u'\\' # 0x5C -> REVERSE SOLIDUS
- u']' # 0x5D -> RIGHT SQUARE BRACKET
- u'^' # 0x5E -> CIRCUMFLEX ACCENT
- u'_' # 0x5F -> LOW LINE
- u'`' # 0x60 -> GRAVE ACCENT
- u'a' # 0x61 -> LATIN SMALL LETTER A
- u'b' # 0x62 -> LATIN SMALL LETTER B
- u'c' # 0x63 -> LATIN SMALL LETTER C
- u'd' # 0x64 -> LATIN SMALL LETTER D
- u'e' # 0x65 -> LATIN SMALL LETTER E
- u'f' # 0x66 -> LATIN SMALL LETTER F
- u'g' # 0x67 -> LATIN SMALL LETTER G
- u'h' # 0x68 -> LATIN SMALL LETTER H
- u'i' # 0x69 -> LATIN SMALL LETTER I
- u'j' # 0x6A -> LATIN SMALL LETTER J
- u'k' # 0x6B -> LATIN SMALL LETTER K
- u'l' # 0x6C -> LATIN SMALL LETTER L
- u'm' # 0x6D -> LATIN SMALL LETTER M
- u'n' # 0x6E -> LATIN SMALL LETTER N
- u'o' # 0x6F -> LATIN SMALL LETTER O
- u'p' # 0x70 -> LATIN SMALL LETTER P
- u'q' # 0x71 -> LATIN SMALL LETTER Q
- u'r' # 0x72 -> LATIN SMALL LETTER R
- u's' # 0x73 -> LATIN SMALL LETTER S
- u't' # 0x74 -> LATIN SMALL LETTER T
- u'u' # 0x75 -> LATIN SMALL LETTER U
- u'v' # 0x76 -> LATIN SMALL LETTER V
- u'w' # 0x77 -> LATIN SMALL LETTER W
- u'x' # 0x78 -> LATIN SMALL LETTER X
- u'y' # 0x79 -> LATIN SMALL LETTER Y
- u'z' # 0x7A -> LATIN SMALL LETTER Z
- u'{' # 0x7B -> LEFT CURLY BRACKET
- u'|' # 0x7C -> VERTICAL LINE
- u'}' # 0x7D -> RIGHT CURLY BRACKET
- u'~' # 0x7E -> TILDE
- u'\x7f' # 0x7F -> DELETE
- u'\x80' # 0x80 -> <control>
- u'\x81' # 0x81 -> <control>
- u'\x82' # 0x82 -> <control>
- u'\x83' # 0x83 -> <control>
- u'\x84' # 0x84 -> <control>
- u'\x85' # 0x85 -> <control>
- u'\x86' # 0x86 -> <control>
- u'\x87' # 0x87 -> <control>
- u'\x88' # 0x88 -> <control>
- u'\x89' # 0x89 -> <control>
- u'\x8a' # 0x8A -> <control>
- u'\x8b' # 0x8B -> <control>
- u'\x8c' # 0x8C -> <control>
- u'\x8d' # 0x8D -> <control>
- u'\x8e' # 0x8E -> <control>
- u'\x8f' # 0x8F -> <control>
- u'\x90' # 0x90 -> <control>
- u'\x91' # 0x91 -> <control>
- u'\x92' # 0x92 -> <control>
- u'\x93' # 0x93 -> <control>
- u'\x94' # 0x94 -> <control>
- u'\x95' # 0x95 -> <control>
- u'\x96' # 0x96 -> <control>
- u'\x97' # 0x97 -> <control>
- u'\x98' # 0x98 -> <control>
- u'\x99' # 0x99 -> <control>
- u'\x9a' # 0x9A -> <control>
- u'\x9b' # 0x9B -> <control>
- u'\x9c' # 0x9C -> <control>
- u'\x9d' # 0x9D -> <control>
- u'\x9e' # 0x9E -> <control>
- u'\x9f' # 0x9F -> <control>
- u'\xa0' # 0xA0 -> NO-BREAK SPACE
- u'\xa1' # 0xA1 -> INVERTED EXCLAMATION MARK
- u'\xa2' # 0xA2 -> CENT SIGN
- u'\xa3' # 0xA3 -> POUND SIGN
- u'\xa4' # 0xA4 -> CURRENCY SIGN
- u'\xa5' # 0xA5 -> YEN SIGN
- u'\xa6' # 0xA6 -> BROKEN BAR
- u'\xa7' # 0xA7 -> SECTION SIGN
- u'\xa8' # 0xA8 -> DIAERESIS
- u'\xa9' # 0xA9 -> COPYRIGHT SIGN
- u'\xaa' # 0xAA -> FEMININE ORDINAL INDICATOR
- u'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
- u'\xac' # 0xAC -> NOT SIGN
- u'\xad' # 0xAD -> SOFT HYPHEN
- u'\xae' # 0xAE -> REGISTERED SIGN
- u'\xaf' # 0xAF -> MACRON
- u'\xb0' # 0xB0 -> DEGREE SIGN
- u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
- u'\xb2' # 0xB2 -> SUPERSCRIPT TWO
- u'\xb3' # 0xB3 -> SUPERSCRIPT THREE
- u'\xb4' # 0xB4 -> ACUTE ACCENT
- u'\xb5' # 0xB5 -> MICRO SIGN
- u'\xb6' # 0xB6 -> PILCROW SIGN
- u'\xb7' # 0xB7 -> MIDDLE DOT
- u'\xb8' # 0xB8 -> CEDILLA
- u'\xb9' # 0xB9 -> SUPERSCRIPT ONE
- u'\xba' # 0xBA -> MASCULINE ORDINAL INDICATOR
- u'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
- u'\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER
- u'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
- u'\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS
- u'\xbf' # 0xBF -> INVERTED QUESTION MARK
- u'\xc0' # 0xC0 -> LATIN CAPITAL LETTER A WITH GRAVE
- u'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
- u'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
- u'\xc3' # 0xC3 -> LATIN CAPITAL LETTER A WITH TILDE
- u'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
- u'\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE
- u'\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE
- u'\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA
- u'\xc8' # 0xC8 -> LATIN CAPITAL LETTER E WITH GRAVE
- u'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
- u'\xca' # 0xCA -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
- u'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
- u'\xcc' # 0xCC -> LATIN CAPITAL LETTER I WITH GRAVE
- u'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
- u'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
- u'\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS
- u'\u011e' # 0xD0 -> LATIN CAPITAL LETTER G WITH BREVE
- u'\xd1' # 0xD1 -> LATIN CAPITAL LETTER N WITH TILDE
- u'\xd2' # 0xD2 -> LATIN CAPITAL LETTER O WITH GRAVE
- u'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
- u'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
- u'\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE
- u'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
- u'\xd7' # 0xD7 -> MULTIPLICATION SIGN
- u'\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE
- u'\xd9' # 0xD9 -> LATIN CAPITAL LETTER U WITH GRAVE
- u'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
- u'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
- u'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
- u'\u0130' # 0xDD -> LATIN CAPITAL LETTER I WITH DOT ABOVE
- u'\u015e' # 0xDE -> LATIN CAPITAL LETTER S WITH CEDILLA
- u'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S
- u'\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE
- u'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
- u'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
- u'\xe3' # 0xE3 -> LATIN SMALL LETTER A WITH TILDE
- u'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
- u'\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE
- u'\xe6' # 0xE6 -> LATIN SMALL LETTER AE
- u'\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA
- u'\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE
- u'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
- u'\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX
- u'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
- u'\xec' # 0xEC -> LATIN SMALL LETTER I WITH GRAVE
- u'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
- u'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
- u'\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS
- u'\u011f' # 0xF0 -> LATIN SMALL LETTER G WITH BREVE
- u'\xf1' # 0xF1 -> LATIN SMALL LETTER N WITH TILDE
- u'\xf2' # 0xF2 -> LATIN SMALL LETTER O WITH GRAVE
- u'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
- u'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
- u'\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE
- u'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
- u'\xf7' # 0xF7 -> DIVISION SIGN
- u'\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE
- u'\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE
- u'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
- u'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
- u'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
- u'\u0131' # 0xFD -> LATIN SMALL LETTER DOTLESS I
- u'\u015f' # 0xFE -> LATIN SMALL LETTER S WITH CEDILLA
- u'\xff' # 0xFF -> LATIN SMALL LETTER Y WITH DIAERESIS
-)
-
-### Encoding table
-encoding_table=codecs.charmap_build(decoding_table)
diff --git a/sys/lib/python/encodings/johab.py b/sys/lib/python/encodings/johab.py
deleted file mode 100644
index 512aeeb73..000000000
--- a/sys/lib/python/encodings/johab.py
+++ /dev/null
@@ -1,39 +0,0 @@
-#
-# johab.py: Python Unicode Codec for JOHAB
-#
-# Written by Hye-Shik Chang <perky@FreeBSD.org>
-#
-
-import _codecs_kr, codecs
-import _multibytecodec as mbc
-
-codec = _codecs_kr.getcodec('johab')
-
-class Codec(codecs.Codec):
- encode = codec.encode
- decode = codec.decode
-
-class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
- codecs.IncrementalEncoder):
- codec = codec
-
-class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
- codecs.IncrementalDecoder):
- codec = codec
-
-class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
- codec = codec
-
-class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
- codec = codec
-
-def getregentry():
- return codecs.CodecInfo(
- name='johab',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
diff --git a/sys/lib/python/encodings/koi8_r.py b/sys/lib/python/encodings/koi8_r.py
deleted file mode 100644
index f9eb82c0d..000000000
--- a/sys/lib/python/encodings/koi8_r.py
+++ /dev/null
@@ -1,307 +0,0 @@
-""" Python Character Mapping Codec koi8_r generated from 'MAPPINGS/VENDORS/MISC/KOI8-R.TXT' with gencodec.py.
-
-"""#"
-
-import codecs
-
-### Codec APIs
-
-class Codec(codecs.Codec):
-
- def encode(self,input,errors='strict'):
- return codecs.charmap_encode(input,errors,encoding_table)
-
- def decode(self,input,errors='strict'):
- return codecs.charmap_decode(input,errors,decoding_table)
-
-class IncrementalEncoder(codecs.IncrementalEncoder):
- def encode(self, input, final=False):
- return codecs.charmap_encode(input,self.errors,encoding_table)[0]
-
-class IncrementalDecoder(codecs.IncrementalDecoder):
- def decode(self, input, final=False):
- return codecs.charmap_decode(input,self.errors,decoding_table)[0]
-
-class StreamWriter(Codec,codecs.StreamWriter):
- pass
-
-class StreamReader(Codec,codecs.StreamReader):
- pass
-
-### encodings module API
-
-def getregentry():
- return codecs.CodecInfo(
- name='koi8-r',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
-
-
-### Decoding Table
-
-decoding_table = (
- u'\x00' # 0x00 -> NULL
- u'\x01' # 0x01 -> START OF HEADING
- u'\x02' # 0x02 -> START OF TEXT
- u'\x03' # 0x03 -> END OF TEXT
- u'\x04' # 0x04 -> END OF TRANSMISSION
- u'\x05' # 0x05 -> ENQUIRY
- u'\x06' # 0x06 -> ACKNOWLEDGE
- u'\x07' # 0x07 -> BELL
- u'\x08' # 0x08 -> BACKSPACE
- u'\t' # 0x09 -> HORIZONTAL TABULATION
- u'\n' # 0x0A -> LINE FEED
- u'\x0b' # 0x0B -> VERTICAL TABULATION
- u'\x0c' # 0x0C -> FORM FEED
- u'\r' # 0x0D -> CARRIAGE RETURN
- u'\x0e' # 0x0E -> SHIFT OUT
- u'\x0f' # 0x0F -> SHIFT IN
- u'\x10' # 0x10 -> DATA LINK ESCAPE
- u'\x11' # 0x11 -> DEVICE CONTROL ONE
- u'\x12' # 0x12 -> DEVICE CONTROL TWO
- u'\x13' # 0x13 -> DEVICE CONTROL THREE
- u'\x14' # 0x14 -> DEVICE CONTROL FOUR
- u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
- u'\x16' # 0x16 -> SYNCHRONOUS IDLE
- u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
- u'\x18' # 0x18 -> CANCEL
- u'\x19' # 0x19 -> END OF MEDIUM
- u'\x1a' # 0x1A -> SUBSTITUTE
- u'\x1b' # 0x1B -> ESCAPE
- u'\x1c' # 0x1C -> FILE SEPARATOR
- u'\x1d' # 0x1D -> GROUP SEPARATOR
- u'\x1e' # 0x1E -> RECORD SEPARATOR
- u'\x1f' # 0x1F -> UNIT SEPARATOR
- u' ' # 0x20 -> SPACE
- u'!' # 0x21 -> EXCLAMATION MARK
- u'"' # 0x22 -> QUOTATION MARK
- u'#' # 0x23 -> NUMBER SIGN
- u'$' # 0x24 -> DOLLAR SIGN
- u'%' # 0x25 -> PERCENT SIGN
- u'&' # 0x26 -> AMPERSAND
- u"'" # 0x27 -> APOSTROPHE
- u'(' # 0x28 -> LEFT PARENTHESIS
- u')' # 0x29 -> RIGHT PARENTHESIS
- u'*' # 0x2A -> ASTERISK
- u'+' # 0x2B -> PLUS SIGN
- u',' # 0x2C -> COMMA
- u'-' # 0x2D -> HYPHEN-MINUS
- u'.' # 0x2E -> FULL STOP
- u'/' # 0x2F -> SOLIDUS
- u'0' # 0x30 -> DIGIT ZERO
- u'1' # 0x31 -> DIGIT ONE
- u'2' # 0x32 -> DIGIT TWO
- u'3' # 0x33 -> DIGIT THREE
- u'4' # 0x34 -> DIGIT FOUR
- u'5' # 0x35 -> DIGIT FIVE
- u'6' # 0x36 -> DIGIT SIX
- u'7' # 0x37 -> DIGIT SEVEN
- u'8' # 0x38 -> DIGIT EIGHT
- u'9' # 0x39 -> DIGIT NINE
- u':' # 0x3A -> COLON
- u';' # 0x3B -> SEMICOLON
- u'<' # 0x3C -> LESS-THAN SIGN
- u'=' # 0x3D -> EQUALS SIGN
- u'>' # 0x3E -> GREATER-THAN SIGN
- u'?' # 0x3F -> QUESTION MARK
- u'@' # 0x40 -> COMMERCIAL AT
- u'A' # 0x41 -> LATIN CAPITAL LETTER A
- u'B' # 0x42 -> LATIN CAPITAL LETTER B
- u'C' # 0x43 -> LATIN CAPITAL LETTER C
- u'D' # 0x44 -> LATIN CAPITAL LETTER D
- u'E' # 0x45 -> LATIN CAPITAL LETTER E
- u'F' # 0x46 -> LATIN CAPITAL LETTER F
- u'G' # 0x47 -> LATIN CAPITAL LETTER G
- u'H' # 0x48 -> LATIN CAPITAL LETTER H
- u'I' # 0x49 -> LATIN CAPITAL LETTER I
- u'J' # 0x4A -> LATIN CAPITAL LETTER J
- u'K' # 0x4B -> LATIN CAPITAL LETTER K
- u'L' # 0x4C -> LATIN CAPITAL LETTER L
- u'M' # 0x4D -> LATIN CAPITAL LETTER M
- u'N' # 0x4E -> LATIN CAPITAL LETTER N
- u'O' # 0x4F -> LATIN CAPITAL LETTER O
- u'P' # 0x50 -> LATIN CAPITAL LETTER P
- u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
- u'R' # 0x52 -> LATIN CAPITAL LETTER R
- u'S' # 0x53 -> LATIN CAPITAL LETTER S
- u'T' # 0x54 -> LATIN CAPITAL LETTER T
- u'U' # 0x55 -> LATIN CAPITAL LETTER U
- u'V' # 0x56 -> LATIN CAPITAL LETTER V
- u'W' # 0x57 -> LATIN CAPITAL LETTER W
- u'X' # 0x58 -> LATIN CAPITAL LETTER X
- u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
- u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
- u'[' # 0x5B -> LEFT SQUARE BRACKET
- u'\\' # 0x5C -> REVERSE SOLIDUS
- u']' # 0x5D -> RIGHT SQUARE BRACKET
- u'^' # 0x5E -> CIRCUMFLEX ACCENT
- u'_' # 0x5F -> LOW LINE
- u'`' # 0x60 -> GRAVE ACCENT
- u'a' # 0x61 -> LATIN SMALL LETTER A
- u'b' # 0x62 -> LATIN SMALL LETTER B
- u'c' # 0x63 -> LATIN SMALL LETTER C
- u'd' # 0x64 -> LATIN SMALL LETTER D
- u'e' # 0x65 -> LATIN SMALL LETTER E
- u'f' # 0x66 -> LATIN SMALL LETTER F
- u'g' # 0x67 -> LATIN SMALL LETTER G
- u'h' # 0x68 -> LATIN SMALL LETTER H
- u'i' # 0x69 -> LATIN SMALL LETTER I
- u'j' # 0x6A -> LATIN SMALL LETTER J
- u'k' # 0x6B -> LATIN SMALL LETTER K
- u'l' # 0x6C -> LATIN SMALL LETTER L
- u'm' # 0x6D -> LATIN SMALL LETTER M
- u'n' # 0x6E -> LATIN SMALL LETTER N
- u'o' # 0x6F -> LATIN SMALL LETTER O
- u'p' # 0x70 -> LATIN SMALL LETTER P
- u'q' # 0x71 -> LATIN SMALL LETTER Q
- u'r' # 0x72 -> LATIN SMALL LETTER R
- u's' # 0x73 -> LATIN SMALL LETTER S
- u't' # 0x74 -> LATIN SMALL LETTER T
- u'u' # 0x75 -> LATIN SMALL LETTER U
- u'v' # 0x76 -> LATIN SMALL LETTER V
- u'w' # 0x77 -> LATIN SMALL LETTER W
- u'x' # 0x78 -> LATIN SMALL LETTER X
- u'y' # 0x79 -> LATIN SMALL LETTER Y
- u'z' # 0x7A -> LATIN SMALL LETTER Z
- u'{' # 0x7B -> LEFT CURLY BRACKET
- u'|' # 0x7C -> VERTICAL LINE
- u'}' # 0x7D -> RIGHT CURLY BRACKET
- u'~' # 0x7E -> TILDE
- u'\x7f' # 0x7F -> DELETE
- u'\u2500' # 0x80 -> BOX DRAWINGS LIGHT HORIZONTAL
- u'\u2502' # 0x81 -> BOX DRAWINGS LIGHT VERTICAL
- u'\u250c' # 0x82 -> BOX DRAWINGS LIGHT DOWN AND RIGHT
- u'\u2510' # 0x83 -> BOX DRAWINGS LIGHT DOWN AND LEFT
- u'\u2514' # 0x84 -> BOX DRAWINGS LIGHT UP AND RIGHT
- u'\u2518' # 0x85 -> BOX DRAWINGS LIGHT UP AND LEFT
- u'\u251c' # 0x86 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
- u'\u2524' # 0x87 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
- u'\u252c' # 0x88 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
- u'\u2534' # 0x89 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
- u'\u253c' # 0x8A -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
- u'\u2580' # 0x8B -> UPPER HALF BLOCK
- u'\u2584' # 0x8C -> LOWER HALF BLOCK
- u'\u2588' # 0x8D -> FULL BLOCK
- u'\u258c' # 0x8E -> LEFT HALF BLOCK
- u'\u2590' # 0x8F -> RIGHT HALF BLOCK
- u'\u2591' # 0x90 -> LIGHT SHADE
- u'\u2592' # 0x91 -> MEDIUM SHADE
- u'\u2593' # 0x92 -> DARK SHADE
- u'\u2320' # 0x93 -> TOP HALF INTEGRAL
- u'\u25a0' # 0x94 -> BLACK SQUARE
- u'\u2219' # 0x95 -> BULLET OPERATOR
- u'\u221a' # 0x96 -> SQUARE ROOT
- u'\u2248' # 0x97 -> ALMOST EQUAL TO
- u'\u2264' # 0x98 -> LESS-THAN OR EQUAL TO
- u'\u2265' # 0x99 -> GREATER-THAN OR EQUAL TO
- u'\xa0' # 0x9A -> NO-BREAK SPACE
- u'\u2321' # 0x9B -> BOTTOM HALF INTEGRAL
- u'\xb0' # 0x9C -> DEGREE SIGN
- u'\xb2' # 0x9D -> SUPERSCRIPT TWO
- u'\xb7' # 0x9E -> MIDDLE DOT
- u'\xf7' # 0x9F -> DIVISION SIGN
- u'\u2550' # 0xA0 -> BOX DRAWINGS DOUBLE HORIZONTAL
- u'\u2551' # 0xA1 -> BOX DRAWINGS DOUBLE VERTICAL
- u'\u2552' # 0xA2 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
- u'\u0451' # 0xA3 -> CYRILLIC SMALL LETTER IO
- u'\u2553' # 0xA4 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
- u'\u2554' # 0xA5 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
- u'\u2555' # 0xA6 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
- u'\u2556' # 0xA7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
- u'\u2557' # 0xA8 -> BOX DRAWINGS DOUBLE DOWN AND LEFT
- u'\u2558' # 0xA9 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
- u'\u2559' # 0xAA -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
- u'\u255a' # 0xAB -> BOX DRAWINGS DOUBLE UP AND RIGHT
- u'\u255b' # 0xAC -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
- u'\u255c' # 0xAD -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
- u'\u255d' # 0xAE -> BOX DRAWINGS DOUBLE UP AND LEFT
- u'\u255e' # 0xAF -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
- u'\u255f' # 0xB0 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
- u'\u2560' # 0xB1 -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
- u'\u2561' # 0xB2 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
- u'\u0401' # 0xB3 -> CYRILLIC CAPITAL LETTER IO
- u'\u2562' # 0xB4 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
- u'\u2563' # 0xB5 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
- u'\u2564' # 0xB6 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
- u'\u2565' # 0xB7 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
- u'\u2566' # 0xB8 -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
- u'\u2567' # 0xB9 -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
- u'\u2568' # 0xBA -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
- u'\u2569' # 0xBB -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
- u'\u256a' # 0xBC -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
- u'\u256b' # 0xBD -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
- u'\u256c' # 0xBE -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
- u'\xa9' # 0xBF -> COPYRIGHT SIGN
- u'\u044e' # 0xC0 -> CYRILLIC SMALL LETTER YU
- u'\u0430' # 0xC1 -> CYRILLIC SMALL LETTER A
- u'\u0431' # 0xC2 -> CYRILLIC SMALL LETTER BE
- u'\u0446' # 0xC3 -> CYRILLIC SMALL LETTER TSE
- u'\u0434' # 0xC4 -> CYRILLIC SMALL LETTER DE
- u'\u0435' # 0xC5 -> CYRILLIC SMALL LETTER IE
- u'\u0444' # 0xC6 -> CYRILLIC SMALL LETTER EF
- u'\u0433' # 0xC7 -> CYRILLIC SMALL LETTER GHE
- u'\u0445' # 0xC8 -> CYRILLIC SMALL LETTER HA
- u'\u0438' # 0xC9 -> CYRILLIC SMALL LETTER I
- u'\u0439' # 0xCA -> CYRILLIC SMALL LETTER SHORT I
- u'\u043a' # 0xCB -> CYRILLIC SMALL LETTER KA
- u'\u043b' # 0xCC -> CYRILLIC SMALL LETTER EL
- u'\u043c' # 0xCD -> CYRILLIC SMALL LETTER EM
- u'\u043d' # 0xCE -> CYRILLIC SMALL LETTER EN
- u'\u043e' # 0xCF -> CYRILLIC SMALL LETTER O
- u'\u043f' # 0xD0 -> CYRILLIC SMALL LETTER PE
- u'\u044f' # 0xD1 -> CYRILLIC SMALL LETTER YA
- u'\u0440' # 0xD2 -> CYRILLIC SMALL LETTER ER
- u'\u0441' # 0xD3 -> CYRILLIC SMALL LETTER ES
- u'\u0442' # 0xD4 -> CYRILLIC SMALL LETTER TE
- u'\u0443' # 0xD5 -> CYRILLIC SMALL LETTER U
- u'\u0436' # 0xD6 -> CYRILLIC SMALL LETTER ZHE
- u'\u0432' # 0xD7 -> CYRILLIC SMALL LETTER VE
- u'\u044c' # 0xD8 -> CYRILLIC SMALL LETTER SOFT SIGN
- u'\u044b' # 0xD9 -> CYRILLIC SMALL LETTER YERU
- u'\u0437' # 0xDA -> CYRILLIC SMALL LETTER ZE
- u'\u0448' # 0xDB -> CYRILLIC SMALL LETTER SHA
- u'\u044d' # 0xDC -> CYRILLIC SMALL LETTER E
- u'\u0449' # 0xDD -> CYRILLIC SMALL LETTER SHCHA
- u'\u0447' # 0xDE -> CYRILLIC SMALL LETTER CHE
- u'\u044a' # 0xDF -> CYRILLIC SMALL LETTER HARD SIGN
- u'\u042e' # 0xE0 -> CYRILLIC CAPITAL LETTER YU
- u'\u0410' # 0xE1 -> CYRILLIC CAPITAL LETTER A
- u'\u0411' # 0xE2 -> CYRILLIC CAPITAL LETTER BE
- u'\u0426' # 0xE3 -> CYRILLIC CAPITAL LETTER TSE
- u'\u0414' # 0xE4 -> CYRILLIC CAPITAL LETTER DE
- u'\u0415' # 0xE5 -> CYRILLIC CAPITAL LETTER IE
- u'\u0424' # 0xE6 -> CYRILLIC CAPITAL LETTER EF
- u'\u0413' # 0xE7 -> CYRILLIC CAPITAL LETTER GHE
- u'\u0425' # 0xE8 -> CYRILLIC CAPITAL LETTER HA
- u'\u0418' # 0xE9 -> CYRILLIC CAPITAL LETTER I
- u'\u0419' # 0xEA -> CYRILLIC CAPITAL LETTER SHORT I
- u'\u041a' # 0xEB -> CYRILLIC CAPITAL LETTER KA
- u'\u041b' # 0xEC -> CYRILLIC CAPITAL LETTER EL
- u'\u041c' # 0xED -> CYRILLIC CAPITAL LETTER EM
- u'\u041d' # 0xEE -> CYRILLIC CAPITAL LETTER EN
- u'\u041e' # 0xEF -> CYRILLIC CAPITAL LETTER O
- u'\u041f' # 0xF0 -> CYRILLIC CAPITAL LETTER PE
- u'\u042f' # 0xF1 -> CYRILLIC CAPITAL LETTER YA
- u'\u0420' # 0xF2 -> CYRILLIC CAPITAL LETTER ER
- u'\u0421' # 0xF3 -> CYRILLIC CAPITAL LETTER ES
- u'\u0422' # 0xF4 -> CYRILLIC CAPITAL LETTER TE
- u'\u0423' # 0xF5 -> CYRILLIC CAPITAL LETTER U
- u'\u0416' # 0xF6 -> CYRILLIC CAPITAL LETTER ZHE
- u'\u0412' # 0xF7 -> CYRILLIC CAPITAL LETTER VE
- u'\u042c' # 0xF8 -> CYRILLIC CAPITAL LETTER SOFT SIGN
- u'\u042b' # 0xF9 -> CYRILLIC CAPITAL LETTER YERU
- u'\u0417' # 0xFA -> CYRILLIC CAPITAL LETTER ZE
- u'\u0428' # 0xFB -> CYRILLIC CAPITAL LETTER SHA
- u'\u042d' # 0xFC -> CYRILLIC CAPITAL LETTER E
- u'\u0429' # 0xFD -> CYRILLIC CAPITAL LETTER SHCHA
- u'\u0427' # 0xFE -> CYRILLIC CAPITAL LETTER CHE
- u'\u042a' # 0xFF -> CYRILLIC CAPITAL LETTER HARD SIGN
-)
-
-### Encoding table
-encoding_table=codecs.charmap_build(decoding_table)
diff --git a/sys/lib/python/encodings/koi8_u.py b/sys/lib/python/encodings/koi8_u.py
deleted file mode 100644
index a9317b12b..000000000
--- a/sys/lib/python/encodings/koi8_u.py
+++ /dev/null
@@ -1,307 +0,0 @@
-""" Python Character Mapping Codec koi8_u generated from 'python-mappings/KOI8-U.TXT' with gencodec.py.
-
-"""#"
-
-import codecs
-
-### Codec APIs
-
-class Codec(codecs.Codec):
-
- def encode(self,input,errors='strict'):
- return codecs.charmap_encode(input,errors,encoding_table)
-
- def decode(self,input,errors='strict'):
- return codecs.charmap_decode(input,errors,decoding_table)
-
-class IncrementalEncoder(codecs.IncrementalEncoder):
- def encode(self, input, final=False):
- return codecs.charmap_encode(input,self.errors,encoding_table)[0]
-
-class IncrementalDecoder(codecs.IncrementalDecoder):
- def decode(self, input, final=False):
- return codecs.charmap_decode(input,self.errors,decoding_table)[0]
-
-class StreamWriter(Codec,codecs.StreamWriter):
- pass
-
-class StreamReader(Codec,codecs.StreamReader):
- pass
-
-### encodings module API
-
-def getregentry():
- return codecs.CodecInfo(
- name='koi8-u',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
-
-
-### Decoding Table
-
-decoding_table = (
- u'\x00' # 0x00 -> NULL
- u'\x01' # 0x01 -> START OF HEADING
- u'\x02' # 0x02 -> START OF TEXT
- u'\x03' # 0x03 -> END OF TEXT
- u'\x04' # 0x04 -> END OF TRANSMISSION
- u'\x05' # 0x05 -> ENQUIRY
- u'\x06' # 0x06 -> ACKNOWLEDGE
- u'\x07' # 0x07 -> BELL
- u'\x08' # 0x08 -> BACKSPACE
- u'\t' # 0x09 -> HORIZONTAL TABULATION
- u'\n' # 0x0A -> LINE FEED
- u'\x0b' # 0x0B -> VERTICAL TABULATION
- u'\x0c' # 0x0C -> FORM FEED
- u'\r' # 0x0D -> CARRIAGE RETURN
- u'\x0e' # 0x0E -> SHIFT OUT
- u'\x0f' # 0x0F -> SHIFT IN
- u'\x10' # 0x10 -> DATA LINK ESCAPE
- u'\x11' # 0x11 -> DEVICE CONTROL ONE
- u'\x12' # 0x12 -> DEVICE CONTROL TWO
- u'\x13' # 0x13 -> DEVICE CONTROL THREE
- u'\x14' # 0x14 -> DEVICE CONTROL FOUR
- u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
- u'\x16' # 0x16 -> SYNCHRONOUS IDLE
- u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
- u'\x18' # 0x18 -> CANCEL
- u'\x19' # 0x19 -> END OF MEDIUM
- u'\x1a' # 0x1A -> SUBSTITUTE
- u'\x1b' # 0x1B -> ESCAPE
- u'\x1c' # 0x1C -> FILE SEPARATOR
- u'\x1d' # 0x1D -> GROUP SEPARATOR
- u'\x1e' # 0x1E -> RECORD SEPARATOR
- u'\x1f' # 0x1F -> UNIT SEPARATOR
- u' ' # 0x20 -> SPACE
- u'!' # 0x21 -> EXCLAMATION MARK
- u'"' # 0x22 -> QUOTATION MARK
- u'#' # 0x23 -> NUMBER SIGN
- u'$' # 0x24 -> DOLLAR SIGN
- u'%' # 0x25 -> PERCENT SIGN
- u'&' # 0x26 -> AMPERSAND
- u"'" # 0x27 -> APOSTROPHE
- u'(' # 0x28 -> LEFT PARENTHESIS
- u')' # 0x29 -> RIGHT PARENTHESIS
- u'*' # 0x2A -> ASTERISK
- u'+' # 0x2B -> PLUS SIGN
- u',' # 0x2C -> COMMA
- u'-' # 0x2D -> HYPHEN-MINUS
- u'.' # 0x2E -> FULL STOP
- u'/' # 0x2F -> SOLIDUS
- u'0' # 0x30 -> DIGIT ZERO
- u'1' # 0x31 -> DIGIT ONE
- u'2' # 0x32 -> DIGIT TWO
- u'3' # 0x33 -> DIGIT THREE
- u'4' # 0x34 -> DIGIT FOUR
- u'5' # 0x35 -> DIGIT FIVE
- u'6' # 0x36 -> DIGIT SIX
- u'7' # 0x37 -> DIGIT SEVEN
- u'8' # 0x38 -> DIGIT EIGHT
- u'9' # 0x39 -> DIGIT NINE
- u':' # 0x3A -> COLON
- u';' # 0x3B -> SEMICOLON
- u'<' # 0x3C -> LESS-THAN SIGN
- u'=' # 0x3D -> EQUALS SIGN
- u'>' # 0x3E -> GREATER-THAN SIGN
- u'?' # 0x3F -> QUESTION MARK
- u'@' # 0x40 -> COMMERCIAL AT
- u'A' # 0x41 -> LATIN CAPITAL LETTER A
- u'B' # 0x42 -> LATIN CAPITAL LETTER B
- u'C' # 0x43 -> LATIN CAPITAL LETTER C
- u'D' # 0x44 -> LATIN CAPITAL LETTER D
- u'E' # 0x45 -> LATIN CAPITAL LETTER E
- u'F' # 0x46 -> LATIN CAPITAL LETTER F
- u'G' # 0x47 -> LATIN CAPITAL LETTER G
- u'H' # 0x48 -> LATIN CAPITAL LETTER H
- u'I' # 0x49 -> LATIN CAPITAL LETTER I
- u'J' # 0x4A -> LATIN CAPITAL LETTER J
- u'K' # 0x4B -> LATIN CAPITAL LETTER K
- u'L' # 0x4C -> LATIN CAPITAL LETTER L
- u'M' # 0x4D -> LATIN CAPITAL LETTER M
- u'N' # 0x4E -> LATIN CAPITAL LETTER N
- u'O' # 0x4F -> LATIN CAPITAL LETTER O
- u'P' # 0x50 -> LATIN CAPITAL LETTER P
- u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
- u'R' # 0x52 -> LATIN CAPITAL LETTER R
- u'S' # 0x53 -> LATIN CAPITAL LETTER S
- u'T' # 0x54 -> LATIN CAPITAL LETTER T
- u'U' # 0x55 -> LATIN CAPITAL LETTER U
- u'V' # 0x56 -> LATIN CAPITAL LETTER V
- u'W' # 0x57 -> LATIN CAPITAL LETTER W
- u'X' # 0x58 -> LATIN CAPITAL LETTER X
- u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
- u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
- u'[' # 0x5B -> LEFT SQUARE BRACKET
- u'\\' # 0x5C -> REVERSE SOLIDUS
- u']' # 0x5D -> RIGHT SQUARE BRACKET
- u'^' # 0x5E -> CIRCUMFLEX ACCENT
- u'_' # 0x5F -> LOW LINE
- u'`' # 0x60 -> GRAVE ACCENT
- u'a' # 0x61 -> LATIN SMALL LETTER A
- u'b' # 0x62 -> LATIN SMALL LETTER B
- u'c' # 0x63 -> LATIN SMALL LETTER C
- u'd' # 0x64 -> LATIN SMALL LETTER D
- u'e' # 0x65 -> LATIN SMALL LETTER E
- u'f' # 0x66 -> LATIN SMALL LETTER F
- u'g' # 0x67 -> LATIN SMALL LETTER G
- u'h' # 0x68 -> LATIN SMALL LETTER H
- u'i' # 0x69 -> LATIN SMALL LETTER I
- u'j' # 0x6A -> LATIN SMALL LETTER J
- u'k' # 0x6B -> LATIN SMALL LETTER K
- u'l' # 0x6C -> LATIN SMALL LETTER L
- u'm' # 0x6D -> LATIN SMALL LETTER M
- u'n' # 0x6E -> LATIN SMALL LETTER N
- u'o' # 0x6F -> LATIN SMALL LETTER O
- u'p' # 0x70 -> LATIN SMALL LETTER P
- u'q' # 0x71 -> LATIN SMALL LETTER Q
- u'r' # 0x72 -> LATIN SMALL LETTER R
- u's' # 0x73 -> LATIN SMALL LETTER S
- u't' # 0x74 -> LATIN SMALL LETTER T
- u'u' # 0x75 -> LATIN SMALL LETTER U
- u'v' # 0x76 -> LATIN SMALL LETTER V
- u'w' # 0x77 -> LATIN SMALL LETTER W
- u'x' # 0x78 -> LATIN SMALL LETTER X
- u'y' # 0x79 -> LATIN SMALL LETTER Y
- u'z' # 0x7A -> LATIN SMALL LETTER Z
- u'{' # 0x7B -> LEFT CURLY BRACKET
- u'|' # 0x7C -> VERTICAL LINE
- u'}' # 0x7D -> RIGHT CURLY BRACKET
- u'~' # 0x7E -> TILDE
- u'\x7f' # 0x7F -> DELETE
- u'\u2500' # 0x80 -> BOX DRAWINGS LIGHT HORIZONTAL
- u'\u2502' # 0x81 -> BOX DRAWINGS LIGHT VERTICAL
- u'\u250c' # 0x82 -> BOX DRAWINGS LIGHT DOWN AND RIGHT
- u'\u2510' # 0x83 -> BOX DRAWINGS LIGHT DOWN AND LEFT
- u'\u2514' # 0x84 -> BOX DRAWINGS LIGHT UP AND RIGHT
- u'\u2518' # 0x85 -> BOX DRAWINGS LIGHT UP AND LEFT
- u'\u251c' # 0x86 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
- u'\u2524' # 0x87 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
- u'\u252c' # 0x88 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
- u'\u2534' # 0x89 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
- u'\u253c' # 0x8A -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
- u'\u2580' # 0x8B -> UPPER HALF BLOCK
- u'\u2584' # 0x8C -> LOWER HALF BLOCK
- u'\u2588' # 0x8D -> FULL BLOCK
- u'\u258c' # 0x8E -> LEFT HALF BLOCK
- u'\u2590' # 0x8F -> RIGHT HALF BLOCK
- u'\u2591' # 0x90 -> LIGHT SHADE
- u'\u2592' # 0x91 -> MEDIUM SHADE
- u'\u2593' # 0x92 -> DARK SHADE
- u'\u2320' # 0x93 -> TOP HALF INTEGRAL
- u'\u25a0' # 0x94 -> BLACK SQUARE
- u'\u2219' # 0x95 -> BULLET OPERATOR
- u'\u221a' # 0x96 -> SQUARE ROOT
- u'\u2248' # 0x97 -> ALMOST EQUAL TO
- u'\u2264' # 0x98 -> LESS-THAN OR EQUAL TO
- u'\u2265' # 0x99 -> GREATER-THAN OR EQUAL TO
- u'\xa0' # 0x9A -> NO-BREAK SPACE
- u'\u2321' # 0x9B -> BOTTOM HALF INTEGRAL
- u'\xb0' # 0x9C -> DEGREE SIGN
- u'\xb2' # 0x9D -> SUPERSCRIPT TWO
- u'\xb7' # 0x9E -> MIDDLE DOT
- u'\xf7' # 0x9F -> DIVISION SIGN
- u'\u2550' # 0xA0 -> BOX DRAWINGS DOUBLE HORIZONTAL
- u'\u2551' # 0xA1 -> BOX DRAWINGS DOUBLE VERTICAL
- u'\u2552' # 0xA2 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
- u'\u0451' # 0xA3 -> CYRILLIC SMALL LETTER IO
- u'\u0454' # 0xA4 -> CYRILLIC SMALL LETTER UKRAINIAN IE
- u'\u2554' # 0xA5 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
- u'\u0456' # 0xA6 -> CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
- u'\u0457' # 0xA7 -> CYRILLIC SMALL LETTER YI (UKRAINIAN)
- u'\u2557' # 0xA8 -> BOX DRAWINGS DOUBLE DOWN AND LEFT
- u'\u2558' # 0xA9 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
- u'\u2559' # 0xAA -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
- u'\u255a' # 0xAB -> BOX DRAWINGS DOUBLE UP AND RIGHT
- u'\u255b' # 0xAC -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
- u'\u0491' # 0xAD -> CYRILLIC SMALL LETTER UKRAINIAN GHE WITH UPTURN
- u'\u255d' # 0xAE -> BOX DRAWINGS DOUBLE UP AND LEFT
- u'\u255e' # 0xAF -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
- u'\u255f' # 0xB0 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
- u'\u2560' # 0xB1 -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
- u'\u2561' # 0xB2 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
- u'\u0401' # 0xB3 -> CYRILLIC CAPITAL LETTER IO
- u'\u0404' # 0xB4 -> CYRILLIC CAPITAL LETTER UKRAINIAN IE
- u'\u2563' # 0xB5 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
- u'\u0406' # 0xB6 -> CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
- u'\u0407' # 0xB7 -> CYRILLIC CAPITAL LETTER YI (UKRAINIAN)
- u'\u2566' # 0xB8 -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
- u'\u2567' # 0xB9 -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
- u'\u2568' # 0xBA -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
- u'\u2569' # 0xBB -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
- u'\u256a' # 0xBC -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
- u'\u0490' # 0xBD -> CYRILLIC CAPITAL LETTER UKRAINIAN GHE WITH UPTURN
- u'\u256c' # 0xBE -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
- u'\xa9' # 0xBF -> COPYRIGHT SIGN
- u'\u044e' # 0xC0 -> CYRILLIC SMALL LETTER YU
- u'\u0430' # 0xC1 -> CYRILLIC SMALL LETTER A
- u'\u0431' # 0xC2 -> CYRILLIC SMALL LETTER BE
- u'\u0446' # 0xC3 -> CYRILLIC SMALL LETTER TSE
- u'\u0434' # 0xC4 -> CYRILLIC SMALL LETTER DE
- u'\u0435' # 0xC5 -> CYRILLIC SMALL LETTER IE
- u'\u0444' # 0xC6 -> CYRILLIC SMALL LETTER EF
- u'\u0433' # 0xC7 -> CYRILLIC SMALL LETTER GHE
- u'\u0445' # 0xC8 -> CYRILLIC SMALL LETTER HA
- u'\u0438' # 0xC9 -> CYRILLIC SMALL LETTER I
- u'\u0439' # 0xCA -> CYRILLIC SMALL LETTER SHORT I
- u'\u043a' # 0xCB -> CYRILLIC SMALL LETTER KA
- u'\u043b' # 0xCC -> CYRILLIC SMALL LETTER EL
- u'\u043c' # 0xCD -> CYRILLIC SMALL LETTER EM
- u'\u043d' # 0xCE -> CYRILLIC SMALL LETTER EN
- u'\u043e' # 0xCF -> CYRILLIC SMALL LETTER O
- u'\u043f' # 0xD0 -> CYRILLIC SMALL LETTER PE
- u'\u044f' # 0xD1 -> CYRILLIC SMALL LETTER YA
- u'\u0440' # 0xD2 -> CYRILLIC SMALL LETTER ER
- u'\u0441' # 0xD3 -> CYRILLIC SMALL LETTER ES
- u'\u0442' # 0xD4 -> CYRILLIC SMALL LETTER TE
- u'\u0443' # 0xD5 -> CYRILLIC SMALL LETTER U
- u'\u0436' # 0xD6 -> CYRILLIC SMALL LETTER ZHE
- u'\u0432' # 0xD7 -> CYRILLIC SMALL LETTER VE
- u'\u044c' # 0xD8 -> CYRILLIC SMALL LETTER SOFT SIGN
- u'\u044b' # 0xD9 -> CYRILLIC SMALL LETTER YERU
- u'\u0437' # 0xDA -> CYRILLIC SMALL LETTER ZE
- u'\u0448' # 0xDB -> CYRILLIC SMALL LETTER SHA
- u'\u044d' # 0xDC -> CYRILLIC SMALL LETTER E
- u'\u0449' # 0xDD -> CYRILLIC SMALL LETTER SHCHA
- u'\u0447' # 0xDE -> CYRILLIC SMALL LETTER CHE
- u'\u044a' # 0xDF -> CYRILLIC SMALL LETTER HARD SIGN
- u'\u042e' # 0xE0 -> CYRILLIC CAPITAL LETTER YU
- u'\u0410' # 0xE1 -> CYRILLIC CAPITAL LETTER A
- u'\u0411' # 0xE2 -> CYRILLIC CAPITAL LETTER BE
- u'\u0426' # 0xE3 -> CYRILLIC CAPITAL LETTER TSE
- u'\u0414' # 0xE4 -> CYRILLIC CAPITAL LETTER DE
- u'\u0415' # 0xE5 -> CYRILLIC CAPITAL LETTER IE
- u'\u0424' # 0xE6 -> CYRILLIC CAPITAL LETTER EF
- u'\u0413' # 0xE7 -> CYRILLIC CAPITAL LETTER GHE
- u'\u0425' # 0xE8 -> CYRILLIC CAPITAL LETTER HA
- u'\u0418' # 0xE9 -> CYRILLIC CAPITAL LETTER I
- u'\u0419' # 0xEA -> CYRILLIC CAPITAL LETTER SHORT I
- u'\u041a' # 0xEB -> CYRILLIC CAPITAL LETTER KA
- u'\u041b' # 0xEC -> CYRILLIC CAPITAL LETTER EL
- u'\u041c' # 0xED -> CYRILLIC CAPITAL LETTER EM
- u'\u041d' # 0xEE -> CYRILLIC CAPITAL LETTER EN
- u'\u041e' # 0xEF -> CYRILLIC CAPITAL LETTER O
- u'\u041f' # 0xF0 -> CYRILLIC CAPITAL LETTER PE
- u'\u042f' # 0xF1 -> CYRILLIC CAPITAL LETTER YA
- u'\u0420' # 0xF2 -> CYRILLIC CAPITAL LETTER ER
- u'\u0421' # 0xF3 -> CYRILLIC CAPITAL LETTER ES
- u'\u0422' # 0xF4 -> CYRILLIC CAPITAL LETTER TE
- u'\u0423' # 0xF5 -> CYRILLIC CAPITAL LETTER U
- u'\u0416' # 0xF6 -> CYRILLIC CAPITAL LETTER ZHE
- u'\u0412' # 0xF7 -> CYRILLIC CAPITAL LETTER VE
- u'\u042c' # 0xF8 -> CYRILLIC CAPITAL LETTER SOFT SIGN
- u'\u042b' # 0xF9 -> CYRILLIC CAPITAL LETTER YERU
- u'\u0417' # 0xFA -> CYRILLIC CAPITAL LETTER ZE
- u'\u0428' # 0xFB -> CYRILLIC CAPITAL LETTER SHA
- u'\u042d' # 0xFC -> CYRILLIC CAPITAL LETTER E
- u'\u0429' # 0xFD -> CYRILLIC CAPITAL LETTER SHCHA
- u'\u0427' # 0xFE -> CYRILLIC CAPITAL LETTER CHE
- u'\u042a' # 0xFF -> CYRILLIC CAPITAL LETTER HARD SIGN
-)
-
-### Encoding table
-encoding_table=codecs.charmap_build(decoding_table)
diff --git a/sys/lib/python/encodings/latin_1.py b/sys/lib/python/encodings/latin_1.py
deleted file mode 100644
index 370160c0c..000000000
--- a/sys/lib/python/encodings/latin_1.py
+++ /dev/null
@@ -1,50 +0,0 @@
-""" Python 'latin-1' Codec
-
-
-Written by Marc-Andre Lemburg (mal@lemburg.com).
-
-(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
-
-"""
-import codecs
-
-### Codec APIs
-
-class Codec(codecs.Codec):
-
- # Note: Binding these as C functions will result in the class not
- # converting them to methods. This is intended.
- encode = codecs.latin_1_encode
- decode = codecs.latin_1_decode
-
-class IncrementalEncoder(codecs.IncrementalEncoder):
- def encode(self, input, final=False):
- return codecs.latin_1_encode(input,self.errors)[0]
-
-class IncrementalDecoder(codecs.IncrementalDecoder):
- def decode(self, input, final=False):
- return codecs.latin_1_decode(input,self.errors)[0]
-
-class StreamWriter(Codec,codecs.StreamWriter):
- pass
-
-class StreamReader(Codec,codecs.StreamReader):
- pass
-
-class StreamConverter(StreamWriter,StreamReader):
-
- encode = codecs.latin_1_decode
- decode = codecs.latin_1_encode
-
-### encodings module API
-
-def getregentry():
- return codecs.CodecInfo(
- name='iso8859-1',
- encode=Codec.encode,
- decode=Codec.decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
diff --git a/sys/lib/python/encodings/mac_arabic.py b/sys/lib/python/encodings/mac_arabic.py
deleted file mode 100644
index 7a7d3c5f7..000000000
--- a/sys/lib/python/encodings/mac_arabic.py
+++ /dev/null
@@ -1,698 +0,0 @@
-""" Python Character Mapping Codec generated from 'VENDORS/APPLE/ARABIC.TXT' with gencodec.py.
-
-"""#"
-
-import codecs
-
-### Codec APIs
-
-class Codec(codecs.Codec):
-
- def encode(self,input,errors='strict'):
- return codecs.charmap_encode(input,errors,encoding_map)
-
- def decode(self,input,errors='strict'):
- return codecs.charmap_decode(input,errors,decoding_table)
-
-class IncrementalEncoder(codecs.IncrementalEncoder):
- def encode(self, input, final=False):
- return codecs.charmap_encode(input,self.errors,encoding_map)[0]
-
-class IncrementalDecoder(codecs.IncrementalDecoder):
- def decode(self, input, final=False):
- return codecs.charmap_decode(input,self.errors,decoding_table)[0]
-
-class StreamWriter(Codec,codecs.StreamWriter):
- pass
-
-class StreamReader(Codec,codecs.StreamReader):
- pass
-
-### encodings module API
-
-def getregentry():
- return codecs.CodecInfo(
- name='mac-arabic',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
-
-### Decoding Map
-
-decoding_map = codecs.make_identity_dict(range(256))
-decoding_map.update({
- 0x0080: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
- 0x0081: 0x00a0, # NO-BREAK SPACE, right-left
- 0x0082: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
- 0x0083: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
- 0x0084: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE
- 0x0085: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
- 0x0086: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
- 0x0087: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
- 0x0088: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
- 0x0089: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
- 0x008a: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
- 0x008b: 0x06ba, # ARABIC LETTER NOON GHUNNA
- 0x008c: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK, right-left
- 0x008d: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
- 0x008e: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
- 0x008f: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
- 0x0090: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
- 0x0091: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
- 0x0092: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
- 0x0093: 0x2026, # HORIZONTAL ELLIPSIS, right-left
- 0x0094: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX
- 0x0095: 0x00ef, # LATIN SMALL LETTER I WITH DIAERESIS
- 0x0096: 0x00f1, # LATIN SMALL LETTER N WITH TILDE
- 0x0097: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
- 0x0098: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK, right-left
- 0x0099: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
- 0x009a: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
- 0x009b: 0x00f7, # DIVISION SIGN, right-left
- 0x009c: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
- 0x009d: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE
- 0x009e: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX
- 0x009f: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
- 0x00a0: 0x0020, # SPACE, right-left
- 0x00a1: 0x0021, # EXCLAMATION MARK, right-left
- 0x00a2: 0x0022, # QUOTATION MARK, right-left
- 0x00a3: 0x0023, # NUMBER SIGN, right-left
- 0x00a4: 0x0024, # DOLLAR SIGN, right-left
- 0x00a5: 0x066a, # ARABIC PERCENT SIGN
- 0x00a6: 0x0026, # AMPERSAND, right-left
- 0x00a7: 0x0027, # APOSTROPHE, right-left
- 0x00a8: 0x0028, # LEFT PARENTHESIS, right-left
- 0x00a9: 0x0029, # RIGHT PARENTHESIS, right-left
- 0x00aa: 0x002a, # ASTERISK, right-left
- 0x00ab: 0x002b, # PLUS SIGN, right-left
- 0x00ac: 0x060c, # ARABIC COMMA
- 0x00ad: 0x002d, # HYPHEN-MINUS, right-left
- 0x00ae: 0x002e, # FULL STOP, right-left
- 0x00af: 0x002f, # SOLIDUS, right-left
- 0x00b0: 0x0660, # ARABIC-INDIC DIGIT ZERO, right-left (need override)
- 0x00b1: 0x0661, # ARABIC-INDIC DIGIT ONE, right-left (need override)
- 0x00b2: 0x0662, # ARABIC-INDIC DIGIT TWO, right-left (need override)
- 0x00b3: 0x0663, # ARABIC-INDIC DIGIT THREE, right-left (need override)
- 0x00b4: 0x0664, # ARABIC-INDIC DIGIT FOUR, right-left (need override)
- 0x00b5: 0x0665, # ARABIC-INDIC DIGIT FIVE, right-left (need override)
- 0x00b6: 0x0666, # ARABIC-INDIC DIGIT SIX, right-left (need override)
- 0x00b7: 0x0667, # ARABIC-INDIC DIGIT SEVEN, right-left (need override)
- 0x00b8: 0x0668, # ARABIC-INDIC DIGIT EIGHT, right-left (need override)
- 0x00b9: 0x0669, # ARABIC-INDIC DIGIT NINE, right-left (need override)
- 0x00ba: 0x003a, # COLON, right-left
- 0x00bb: 0x061b, # ARABIC SEMICOLON
- 0x00bc: 0x003c, # LESS-THAN SIGN, right-left
- 0x00bd: 0x003d, # EQUALS SIGN, right-left
- 0x00be: 0x003e, # GREATER-THAN SIGN, right-left
- 0x00bf: 0x061f, # ARABIC QUESTION MARK
- 0x00c0: 0x274a, # EIGHT TEARDROP-SPOKED PROPELLER ASTERISK, right-left
- 0x00c1: 0x0621, # ARABIC LETTER HAMZA
- 0x00c2: 0x0622, # ARABIC LETTER ALEF WITH MADDA ABOVE
- 0x00c3: 0x0623, # ARABIC LETTER ALEF WITH HAMZA ABOVE
- 0x00c4: 0x0624, # ARABIC LETTER WAW WITH HAMZA ABOVE
- 0x00c5: 0x0625, # ARABIC LETTER ALEF WITH HAMZA BELOW
- 0x00c6: 0x0626, # ARABIC LETTER YEH WITH HAMZA ABOVE
- 0x00c7: 0x0627, # ARABIC LETTER ALEF
- 0x00c8: 0x0628, # ARABIC LETTER BEH
- 0x00c9: 0x0629, # ARABIC LETTER TEH MARBUTA
- 0x00ca: 0x062a, # ARABIC LETTER TEH
- 0x00cb: 0x062b, # ARABIC LETTER THEH
- 0x00cc: 0x062c, # ARABIC LETTER JEEM
- 0x00cd: 0x062d, # ARABIC LETTER HAH
- 0x00ce: 0x062e, # ARABIC LETTER KHAH
- 0x00cf: 0x062f, # ARABIC LETTER DAL
- 0x00d0: 0x0630, # ARABIC LETTER THAL
- 0x00d1: 0x0631, # ARABIC LETTER REH
- 0x00d2: 0x0632, # ARABIC LETTER ZAIN
- 0x00d3: 0x0633, # ARABIC LETTER SEEN
- 0x00d4: 0x0634, # ARABIC LETTER SHEEN
- 0x00d5: 0x0635, # ARABIC LETTER SAD
- 0x00d6: 0x0636, # ARABIC LETTER DAD
- 0x00d7: 0x0637, # ARABIC LETTER TAH
- 0x00d8: 0x0638, # ARABIC LETTER ZAH
- 0x00d9: 0x0639, # ARABIC LETTER AIN
- 0x00da: 0x063a, # ARABIC LETTER GHAIN
- 0x00db: 0x005b, # LEFT SQUARE BRACKET, right-left
- 0x00dc: 0x005c, # REVERSE SOLIDUS, right-left
- 0x00dd: 0x005d, # RIGHT SQUARE BRACKET, right-left
- 0x00de: 0x005e, # CIRCUMFLEX ACCENT, right-left
- 0x00df: 0x005f, # LOW LINE, right-left
- 0x00e0: 0x0640, # ARABIC TATWEEL
- 0x00e1: 0x0641, # ARABIC LETTER FEH
- 0x00e2: 0x0642, # ARABIC LETTER QAF
- 0x00e3: 0x0643, # ARABIC LETTER KAF
- 0x00e4: 0x0644, # ARABIC LETTER LAM
- 0x00e5: 0x0645, # ARABIC LETTER MEEM
- 0x00e6: 0x0646, # ARABIC LETTER NOON
- 0x00e7: 0x0647, # ARABIC LETTER HEH
- 0x00e8: 0x0648, # ARABIC LETTER WAW
- 0x00e9: 0x0649, # ARABIC LETTER ALEF MAKSURA
- 0x00ea: 0x064a, # ARABIC LETTER YEH
- 0x00eb: 0x064b, # ARABIC FATHATAN
- 0x00ec: 0x064c, # ARABIC DAMMATAN
- 0x00ed: 0x064d, # ARABIC KASRATAN
- 0x00ee: 0x064e, # ARABIC FATHA
- 0x00ef: 0x064f, # ARABIC DAMMA
- 0x00f0: 0x0650, # ARABIC KASRA
- 0x00f1: 0x0651, # ARABIC SHADDA
- 0x00f2: 0x0652, # ARABIC SUKUN
- 0x00f3: 0x067e, # ARABIC LETTER PEH
- 0x00f4: 0x0679, # ARABIC LETTER TTEH
- 0x00f5: 0x0686, # ARABIC LETTER TCHEH
- 0x00f6: 0x06d5, # ARABIC LETTER AE
- 0x00f7: 0x06a4, # ARABIC LETTER VEH
- 0x00f8: 0x06af, # ARABIC LETTER GAF
- 0x00f9: 0x0688, # ARABIC LETTER DDAL
- 0x00fa: 0x0691, # ARABIC LETTER RREH
- 0x00fb: 0x007b, # LEFT CURLY BRACKET, right-left
- 0x00fc: 0x007c, # VERTICAL LINE, right-left
- 0x00fd: 0x007d, # RIGHT CURLY BRACKET, right-left
- 0x00fe: 0x0698, # ARABIC LETTER JEH
- 0x00ff: 0x06d2, # ARABIC LETTER YEH BARREE
-})
-
-### Decoding Table
-
-decoding_table = (
- u'\x00' # 0x0000 -> CONTROL CHARACTER
- u'\x01' # 0x0001 -> CONTROL CHARACTER
- u'\x02' # 0x0002 -> CONTROL CHARACTER
- u'\x03' # 0x0003 -> CONTROL CHARACTER
- u'\x04' # 0x0004 -> CONTROL CHARACTER
- u'\x05' # 0x0005 -> CONTROL CHARACTER
- u'\x06' # 0x0006 -> CONTROL CHARACTER
- u'\x07' # 0x0007 -> CONTROL CHARACTER
- u'\x08' # 0x0008 -> CONTROL CHARACTER
- u'\t' # 0x0009 -> CONTROL CHARACTER
- u'\n' # 0x000a -> CONTROL CHARACTER
- u'\x0b' # 0x000b -> CONTROL CHARACTER
- u'\x0c' # 0x000c -> CONTROL CHARACTER
- u'\r' # 0x000d -> CONTROL CHARACTER
- u'\x0e' # 0x000e -> CONTROL CHARACTER
- u'\x0f' # 0x000f -> CONTROL CHARACTER
- u'\x10' # 0x0010 -> CONTROL CHARACTER
- u'\x11' # 0x0011 -> CONTROL CHARACTER
- u'\x12' # 0x0012 -> CONTROL CHARACTER
- u'\x13' # 0x0013 -> CONTROL CHARACTER
- u'\x14' # 0x0014 -> CONTROL CHARACTER
- u'\x15' # 0x0015 -> CONTROL CHARACTER
- u'\x16' # 0x0016 -> CONTROL CHARACTER
- u'\x17' # 0x0017 -> CONTROL CHARACTER
- u'\x18' # 0x0018 -> CONTROL CHARACTER
- u'\x19' # 0x0019 -> CONTROL CHARACTER
- u'\x1a' # 0x001a -> CONTROL CHARACTER
- u'\x1b' # 0x001b -> CONTROL CHARACTER
- u'\x1c' # 0x001c -> CONTROL CHARACTER
- u'\x1d' # 0x001d -> CONTROL CHARACTER
- u'\x1e' # 0x001e -> CONTROL CHARACTER
- u'\x1f' # 0x001f -> CONTROL CHARACTER
- u' ' # 0x0020 -> SPACE, left-right
- u'!' # 0x0021 -> EXCLAMATION MARK, left-right
- u'"' # 0x0022 -> QUOTATION MARK, left-right
- u'#' # 0x0023 -> NUMBER SIGN, left-right
- u'$' # 0x0024 -> DOLLAR SIGN, left-right
- u'%' # 0x0025 -> PERCENT SIGN, left-right
- u'&' # 0x0026 -> AMPERSAND, left-right
- u"'" # 0x0027 -> APOSTROPHE, left-right
- u'(' # 0x0028 -> LEFT PARENTHESIS, left-right
- u')' # 0x0029 -> RIGHT PARENTHESIS, left-right
- u'*' # 0x002a -> ASTERISK, left-right
- u'+' # 0x002b -> PLUS SIGN, left-right
- u',' # 0x002c -> COMMA, left-right; in Arabic-script context, displayed as 0x066C ARABIC THOUSANDS SEPARATOR
- u'-' # 0x002d -> HYPHEN-MINUS, left-right
- u'.' # 0x002e -> FULL STOP, left-right; in Arabic-script context, displayed as 0x066B ARABIC DECIMAL SEPARATOR
- u'/' # 0x002f -> SOLIDUS, left-right
- u'0' # 0x0030 -> DIGIT ZERO; in Arabic-script context, displayed as 0x0660 ARABIC-INDIC DIGIT ZERO
- u'1' # 0x0031 -> DIGIT ONE; in Arabic-script context, displayed as 0x0661 ARABIC-INDIC DIGIT ONE
- u'2' # 0x0032 -> DIGIT TWO; in Arabic-script context, displayed as 0x0662 ARABIC-INDIC DIGIT TWO
- u'3' # 0x0033 -> DIGIT THREE; in Arabic-script context, displayed as 0x0663 ARABIC-INDIC DIGIT THREE
- u'4' # 0x0034 -> DIGIT FOUR; in Arabic-script context, displayed as 0x0664 ARABIC-INDIC DIGIT FOUR
- u'5' # 0x0035 -> DIGIT FIVE; in Arabic-script context, displayed as 0x0665 ARABIC-INDIC DIGIT FIVE
- u'6' # 0x0036 -> DIGIT SIX; in Arabic-script context, displayed as 0x0666 ARABIC-INDIC DIGIT SIX
- u'7' # 0x0037 -> DIGIT SEVEN; in Arabic-script context, displayed as 0x0667 ARABIC-INDIC DIGIT SEVEN
- u'8' # 0x0038 -> DIGIT EIGHT; in Arabic-script context, displayed as 0x0668 ARABIC-INDIC DIGIT EIGHT
- u'9' # 0x0039 -> DIGIT NINE; in Arabic-script context, displayed as 0x0669 ARABIC-INDIC DIGIT NINE
- u':' # 0x003a -> COLON, left-right
- u';' # 0x003b -> SEMICOLON, left-right
- u'<' # 0x003c -> LESS-THAN SIGN, left-right
- u'=' # 0x003d -> EQUALS SIGN, left-right
- u'>' # 0x003e -> GREATER-THAN SIGN, left-right
- u'?' # 0x003f -> QUESTION MARK, left-right
- u'@' # 0x0040 -> COMMERCIAL AT
- u'A' # 0x0041 -> LATIN CAPITAL LETTER A
- u'B' # 0x0042 -> LATIN CAPITAL LETTER B
- u'C' # 0x0043 -> LATIN CAPITAL LETTER C
- u'D' # 0x0044 -> LATIN CAPITAL LETTER D
- u'E' # 0x0045 -> LATIN CAPITAL LETTER E
- u'F' # 0x0046 -> LATIN CAPITAL LETTER F
- u'G' # 0x0047 -> LATIN CAPITAL LETTER G
- u'H' # 0x0048 -> LATIN CAPITAL LETTER H
- u'I' # 0x0049 -> LATIN CAPITAL LETTER I
- u'J' # 0x004a -> LATIN CAPITAL LETTER J
- u'K' # 0x004b -> LATIN CAPITAL LETTER K
- u'L' # 0x004c -> LATIN CAPITAL LETTER L
- u'M' # 0x004d -> LATIN CAPITAL LETTER M
- u'N' # 0x004e -> LATIN CAPITAL LETTER N
- u'O' # 0x004f -> LATIN CAPITAL LETTER O
- u'P' # 0x0050 -> LATIN CAPITAL LETTER P
- u'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
- u'R' # 0x0052 -> LATIN CAPITAL LETTER R
- u'S' # 0x0053 -> LATIN CAPITAL LETTER S
- u'T' # 0x0054 -> LATIN CAPITAL LETTER T
- u'U' # 0x0055 -> LATIN CAPITAL LETTER U
- u'V' # 0x0056 -> LATIN CAPITAL LETTER V
- u'W' # 0x0057 -> LATIN CAPITAL LETTER W
- u'X' # 0x0058 -> LATIN CAPITAL LETTER X
- u'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
- u'Z' # 0x005a -> LATIN CAPITAL LETTER Z
- u'[' # 0x005b -> LEFT SQUARE BRACKET, left-right
- u'\\' # 0x005c -> REVERSE SOLIDUS, left-right
- u']' # 0x005d -> RIGHT SQUARE BRACKET, left-right
- u'^' # 0x005e -> CIRCUMFLEX ACCENT, left-right
- u'_' # 0x005f -> LOW LINE, left-right
- u'`' # 0x0060 -> GRAVE ACCENT
- u'a' # 0x0061 -> LATIN SMALL LETTER A
- u'b' # 0x0062 -> LATIN SMALL LETTER B
- u'c' # 0x0063 -> LATIN SMALL LETTER C
- u'd' # 0x0064 -> LATIN SMALL LETTER D
- u'e' # 0x0065 -> LATIN SMALL LETTER E
- u'f' # 0x0066 -> LATIN SMALL LETTER F
- u'g' # 0x0067 -> LATIN SMALL LETTER G
- u'h' # 0x0068 -> LATIN SMALL LETTER H
- u'i' # 0x0069 -> LATIN SMALL LETTER I
- u'j' # 0x006a -> LATIN SMALL LETTER J
- u'k' # 0x006b -> LATIN SMALL LETTER K
- u'l' # 0x006c -> LATIN SMALL LETTER L
- u'm' # 0x006d -> LATIN SMALL LETTER M
- u'n' # 0x006e -> LATIN SMALL LETTER N
- u'o' # 0x006f -> LATIN SMALL LETTER O
- u'p' # 0x0070 -> LATIN SMALL LETTER P
- u'q' # 0x0071 -> LATIN SMALL LETTER Q
- u'r' # 0x0072 -> LATIN SMALL LETTER R
- u's' # 0x0073 -> LATIN SMALL LETTER S
- u't' # 0x0074 -> LATIN SMALL LETTER T
- u'u' # 0x0075 -> LATIN SMALL LETTER U
- u'v' # 0x0076 -> LATIN SMALL LETTER V
- u'w' # 0x0077 -> LATIN SMALL LETTER W
- u'x' # 0x0078 -> LATIN SMALL LETTER X
- u'y' # 0x0079 -> LATIN SMALL LETTER Y
- u'z' # 0x007a -> LATIN SMALL LETTER Z
- u'{' # 0x007b -> LEFT CURLY BRACKET, left-right
- u'|' # 0x007c -> VERTICAL LINE, left-right
- u'}' # 0x007d -> RIGHT CURLY BRACKET, left-right
- u'~' # 0x007e -> TILDE
- u'\x7f' # 0x007f -> CONTROL CHARACTER
- u'\xc4' # 0x0080 -> LATIN CAPITAL LETTER A WITH DIAERESIS
- u'\xa0' # 0x0081 -> NO-BREAK SPACE, right-left
- u'\xc7' # 0x0082 -> LATIN CAPITAL LETTER C WITH CEDILLA
- u'\xc9' # 0x0083 -> LATIN CAPITAL LETTER E WITH ACUTE
- u'\xd1' # 0x0084 -> LATIN CAPITAL LETTER N WITH TILDE
- u'\xd6' # 0x0085 -> LATIN CAPITAL LETTER O WITH DIAERESIS
- u'\xdc' # 0x0086 -> LATIN CAPITAL LETTER U WITH DIAERESIS
- u'\xe1' # 0x0087 -> LATIN SMALL LETTER A WITH ACUTE
- u'\xe0' # 0x0088 -> LATIN SMALL LETTER A WITH GRAVE
- u'\xe2' # 0x0089 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
- u'\xe4' # 0x008a -> LATIN SMALL LETTER A WITH DIAERESIS
- u'\u06ba' # 0x008b -> ARABIC LETTER NOON GHUNNA
- u'\xab' # 0x008c -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK, right-left
- u'\xe7' # 0x008d -> LATIN SMALL LETTER C WITH CEDILLA
- u'\xe9' # 0x008e -> LATIN SMALL LETTER E WITH ACUTE
- u'\xe8' # 0x008f -> LATIN SMALL LETTER E WITH GRAVE
- u'\xea' # 0x0090 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
- u'\xeb' # 0x0091 -> LATIN SMALL LETTER E WITH DIAERESIS
- u'\xed' # 0x0092 -> LATIN SMALL LETTER I WITH ACUTE
- u'\u2026' # 0x0093 -> HORIZONTAL ELLIPSIS, right-left
- u'\xee' # 0x0094 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
- u'\xef' # 0x0095 -> LATIN SMALL LETTER I WITH DIAERESIS
- u'\xf1' # 0x0096 -> LATIN SMALL LETTER N WITH TILDE
- u'\xf3' # 0x0097 -> LATIN SMALL LETTER O WITH ACUTE
- u'\xbb' # 0x0098 -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK, right-left
- u'\xf4' # 0x0099 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
- u'\xf6' # 0x009a -> LATIN SMALL LETTER O WITH DIAERESIS
- u'\xf7' # 0x009b -> DIVISION SIGN, right-left
- u'\xfa' # 0x009c -> LATIN SMALL LETTER U WITH ACUTE
- u'\xf9' # 0x009d -> LATIN SMALL LETTER U WITH GRAVE
- u'\xfb' # 0x009e -> LATIN SMALL LETTER U WITH CIRCUMFLEX
- u'\xfc' # 0x009f -> LATIN SMALL LETTER U WITH DIAERESIS
- u' ' # 0x00a0 -> SPACE, right-left
- u'!' # 0x00a1 -> EXCLAMATION MARK, right-left
- u'"' # 0x00a2 -> QUOTATION MARK, right-left
- u'#' # 0x00a3 -> NUMBER SIGN, right-left
- u'$' # 0x00a4 -> DOLLAR SIGN, right-left
- u'\u066a' # 0x00a5 -> ARABIC PERCENT SIGN
- u'&' # 0x00a6 -> AMPERSAND, right-left
- u"'" # 0x00a7 -> APOSTROPHE, right-left
- u'(' # 0x00a8 -> LEFT PARENTHESIS, right-left
- u')' # 0x00a9 -> RIGHT PARENTHESIS, right-left
- u'*' # 0x00aa -> ASTERISK, right-left
- u'+' # 0x00ab -> PLUS SIGN, right-left
- u'\u060c' # 0x00ac -> ARABIC COMMA
- u'-' # 0x00ad -> HYPHEN-MINUS, right-left
- u'.' # 0x00ae -> FULL STOP, right-left
- u'/' # 0x00af -> SOLIDUS, right-left
- u'\u0660' # 0x00b0 -> ARABIC-INDIC DIGIT ZERO, right-left (need override)
- u'\u0661' # 0x00b1 -> ARABIC-INDIC DIGIT ONE, right-left (need override)
- u'\u0662' # 0x00b2 -> ARABIC-INDIC DIGIT TWO, right-left (need override)
- u'\u0663' # 0x00b3 -> ARABIC-INDIC DIGIT THREE, right-left (need override)
- u'\u0664' # 0x00b4 -> ARABIC-INDIC DIGIT FOUR, right-left (need override)
- u'\u0665' # 0x00b5 -> ARABIC-INDIC DIGIT FIVE, right-left (need override)
- u'\u0666' # 0x00b6 -> ARABIC-INDIC DIGIT SIX, right-left (need override)
- u'\u0667' # 0x00b7 -> ARABIC-INDIC DIGIT SEVEN, right-left (need override)
- u'\u0668' # 0x00b8 -> ARABIC-INDIC DIGIT EIGHT, right-left (need override)
- u'\u0669' # 0x00b9 -> ARABIC-INDIC DIGIT NINE, right-left (need override)
- u':' # 0x00ba -> COLON, right-left
- u'\u061b' # 0x00bb -> ARABIC SEMICOLON
- u'<' # 0x00bc -> LESS-THAN SIGN, right-left
- u'=' # 0x00bd -> EQUALS SIGN, right-left
- u'>' # 0x00be -> GREATER-THAN SIGN, right-left
- u'\u061f' # 0x00bf -> ARABIC QUESTION MARK
- u'\u274a' # 0x00c0 -> EIGHT TEARDROP-SPOKED PROPELLER ASTERISK, right-left
- u'\u0621' # 0x00c1 -> ARABIC LETTER HAMZA
- u'\u0622' # 0x00c2 -> ARABIC LETTER ALEF WITH MADDA ABOVE
- u'\u0623' # 0x00c3 -> ARABIC LETTER ALEF WITH HAMZA ABOVE
- u'\u0624' # 0x00c4 -> ARABIC LETTER WAW WITH HAMZA ABOVE
- u'\u0625' # 0x00c5 -> ARABIC LETTER ALEF WITH HAMZA BELOW
- u'\u0626' # 0x00c6 -> ARABIC LETTER YEH WITH HAMZA ABOVE
- u'\u0627' # 0x00c7 -> ARABIC LETTER ALEF
- u'\u0628' # 0x00c8 -> ARABIC LETTER BEH
- u'\u0629' # 0x00c9 -> ARABIC LETTER TEH MARBUTA
- u'\u062a' # 0x00ca -> ARABIC LETTER TEH
- u'\u062b' # 0x00cb -> ARABIC LETTER THEH
- u'\u062c' # 0x00cc -> ARABIC LETTER JEEM
- u'\u062d' # 0x00cd -> ARABIC LETTER HAH
- u'\u062e' # 0x00ce -> ARABIC LETTER KHAH
- u'\u062f' # 0x00cf -> ARABIC LETTER DAL
- u'\u0630' # 0x00d0 -> ARABIC LETTER THAL
- u'\u0631' # 0x00d1 -> ARABIC LETTER REH
- u'\u0632' # 0x00d2 -> ARABIC LETTER ZAIN
- u'\u0633' # 0x00d3 -> ARABIC LETTER SEEN
- u'\u0634' # 0x00d4 -> ARABIC LETTER SHEEN
- u'\u0635' # 0x00d5 -> ARABIC LETTER SAD
- u'\u0636' # 0x00d6 -> ARABIC LETTER DAD
- u'\u0637' # 0x00d7 -> ARABIC LETTER TAH
- u'\u0638' # 0x00d8 -> ARABIC LETTER ZAH
- u'\u0639' # 0x00d9 -> ARABIC LETTER AIN
- u'\u063a' # 0x00da -> ARABIC LETTER GHAIN
- u'[' # 0x00db -> LEFT SQUARE BRACKET, right-left
- u'\\' # 0x00dc -> REVERSE SOLIDUS, right-left
- u']' # 0x00dd -> RIGHT SQUARE BRACKET, right-left
- u'^' # 0x00de -> CIRCUMFLEX ACCENT, right-left
- u'_' # 0x00df -> LOW LINE, right-left
- u'\u0640' # 0x00e0 -> ARABIC TATWEEL
- u'\u0641' # 0x00e1 -> ARABIC LETTER FEH
- u'\u0642' # 0x00e2 -> ARABIC LETTER QAF
- u'\u0643' # 0x00e3 -> ARABIC LETTER KAF
- u'\u0644' # 0x00e4 -> ARABIC LETTER LAM
- u'\u0645' # 0x00e5 -> ARABIC LETTER MEEM
- u'\u0646' # 0x00e6 -> ARABIC LETTER NOON
- u'\u0647' # 0x00e7 -> ARABIC LETTER HEH
- u'\u0648' # 0x00e8 -> ARABIC LETTER WAW
- u'\u0649' # 0x00e9 -> ARABIC LETTER ALEF MAKSURA
- u'\u064a' # 0x00ea -> ARABIC LETTER YEH
- u'\u064b' # 0x00eb -> ARABIC FATHATAN
- u'\u064c' # 0x00ec -> ARABIC DAMMATAN
- u'\u064d' # 0x00ed -> ARABIC KASRATAN
- u'\u064e' # 0x00ee -> ARABIC FATHA
- u'\u064f' # 0x00ef -> ARABIC DAMMA
- u'\u0650' # 0x00f0 -> ARABIC KASRA
- u'\u0651' # 0x00f1 -> ARABIC SHADDA
- u'\u0652' # 0x00f2 -> ARABIC SUKUN
- u'\u067e' # 0x00f3 -> ARABIC LETTER PEH
- u'\u0679' # 0x00f4 -> ARABIC LETTER TTEH
- u'\u0686' # 0x00f5 -> ARABIC LETTER TCHEH
- u'\u06d5' # 0x00f6 -> ARABIC LETTER AE
- u'\u06a4' # 0x00f7 -> ARABIC LETTER VEH
- u'\u06af' # 0x00f8 -> ARABIC LETTER GAF
- u'\u0688' # 0x00f9 -> ARABIC LETTER DDAL
- u'\u0691' # 0x00fa -> ARABIC LETTER RREH
- u'{' # 0x00fb -> LEFT CURLY BRACKET, right-left
- u'|' # 0x00fc -> VERTICAL LINE, right-left
- u'}' # 0x00fd -> RIGHT CURLY BRACKET, right-left
- u'\u0698' # 0x00fe -> ARABIC LETTER JEH
- u'\u06d2' # 0x00ff -> ARABIC LETTER YEH BARREE
-)
-
-### Encoding Map
-
-encoding_map = {
- 0x0000: 0x0000, # CONTROL CHARACTER
- 0x0001: 0x0001, # CONTROL CHARACTER
- 0x0002: 0x0002, # CONTROL CHARACTER
- 0x0003: 0x0003, # CONTROL CHARACTER
- 0x0004: 0x0004, # CONTROL CHARACTER
- 0x0005: 0x0005, # CONTROL CHARACTER
- 0x0006: 0x0006, # CONTROL CHARACTER
- 0x0007: 0x0007, # CONTROL CHARACTER
- 0x0008: 0x0008, # CONTROL CHARACTER
- 0x0009: 0x0009, # CONTROL CHARACTER
- 0x000a: 0x000a, # CONTROL CHARACTER
- 0x000b: 0x000b, # CONTROL CHARACTER
- 0x000c: 0x000c, # CONTROL CHARACTER
- 0x000d: 0x000d, # CONTROL CHARACTER
- 0x000e: 0x000e, # CONTROL CHARACTER
- 0x000f: 0x000f, # CONTROL CHARACTER
- 0x0010: 0x0010, # CONTROL CHARACTER
- 0x0011: 0x0011, # CONTROL CHARACTER
- 0x0012: 0x0012, # CONTROL CHARACTER
- 0x0013: 0x0013, # CONTROL CHARACTER
- 0x0014: 0x0014, # CONTROL CHARACTER
- 0x0015: 0x0015, # CONTROL CHARACTER
- 0x0016: 0x0016, # CONTROL CHARACTER
- 0x0017: 0x0017, # CONTROL CHARACTER
- 0x0018: 0x0018, # CONTROL CHARACTER
- 0x0019: 0x0019, # CONTROL CHARACTER
- 0x001a: 0x001a, # CONTROL CHARACTER
- 0x001b: 0x001b, # CONTROL CHARACTER
- 0x001c: 0x001c, # CONTROL CHARACTER
- 0x001d: 0x001d, # CONTROL CHARACTER
- 0x001e: 0x001e, # CONTROL CHARACTER
- 0x001f: 0x001f, # CONTROL CHARACTER
- 0x0020: 0x0020, # SPACE, left-right
- 0x0020: 0x00a0, # SPACE, right-left
- 0x0021: 0x0021, # EXCLAMATION MARK, left-right
- 0x0021: 0x00a1, # EXCLAMATION MARK, right-left
- 0x0022: 0x0022, # QUOTATION MARK, left-right
- 0x0022: 0x00a2, # QUOTATION MARK, right-left
- 0x0023: 0x0023, # NUMBER SIGN, left-right
- 0x0023: 0x00a3, # NUMBER SIGN, right-left
- 0x0024: 0x0024, # DOLLAR SIGN, left-right
- 0x0024: 0x00a4, # DOLLAR SIGN, right-left
- 0x0025: 0x0025, # PERCENT SIGN, left-right
- 0x0026: 0x0026, # AMPERSAND, left-right
- 0x0026: 0x00a6, # AMPERSAND, right-left
- 0x0027: 0x0027, # APOSTROPHE, left-right
- 0x0027: 0x00a7, # APOSTROPHE, right-left
- 0x0028: 0x0028, # LEFT PARENTHESIS, left-right
- 0x0028: 0x00a8, # LEFT PARENTHESIS, right-left
- 0x0029: 0x0029, # RIGHT PARENTHESIS, left-right
- 0x0029: 0x00a9, # RIGHT PARENTHESIS, right-left
- 0x002a: 0x002a, # ASTERISK, left-right
- 0x002a: 0x00aa, # ASTERISK, right-left
- 0x002b: 0x002b, # PLUS SIGN, left-right
- 0x002b: 0x00ab, # PLUS SIGN, right-left
- 0x002c: 0x002c, # COMMA, left-right; in Arabic-script context, displayed as 0x066C ARABIC THOUSANDS SEPARATOR
- 0x002d: 0x002d, # HYPHEN-MINUS, left-right
- 0x002d: 0x00ad, # HYPHEN-MINUS, right-left
- 0x002e: 0x002e, # FULL STOP, left-right; in Arabic-script context, displayed as 0x066B ARABIC DECIMAL SEPARATOR
- 0x002e: 0x00ae, # FULL STOP, right-left
- 0x002f: 0x002f, # SOLIDUS, left-right
- 0x002f: 0x00af, # SOLIDUS, right-left
- 0x0030: 0x0030, # DIGIT ZERO; in Arabic-script context, displayed as 0x0660 ARABIC-INDIC DIGIT ZERO
- 0x0031: 0x0031, # DIGIT ONE; in Arabic-script context, displayed as 0x0661 ARABIC-INDIC DIGIT ONE
- 0x0032: 0x0032, # DIGIT TWO; in Arabic-script context, displayed as 0x0662 ARABIC-INDIC DIGIT TWO
- 0x0033: 0x0033, # DIGIT THREE; in Arabic-script context, displayed as 0x0663 ARABIC-INDIC DIGIT THREE
- 0x0034: 0x0034, # DIGIT FOUR; in Arabic-script context, displayed as 0x0664 ARABIC-INDIC DIGIT FOUR
- 0x0035: 0x0035, # DIGIT FIVE; in Arabic-script context, displayed as 0x0665 ARABIC-INDIC DIGIT FIVE
- 0x0036: 0x0036, # DIGIT SIX; in Arabic-script context, displayed as 0x0666 ARABIC-INDIC DIGIT SIX
- 0x0037: 0x0037, # DIGIT SEVEN; in Arabic-script context, displayed as 0x0667 ARABIC-INDIC DIGIT SEVEN
- 0x0038: 0x0038, # DIGIT EIGHT; in Arabic-script context, displayed as 0x0668 ARABIC-INDIC DIGIT EIGHT
- 0x0039: 0x0039, # DIGIT NINE; in Arabic-script context, displayed as 0x0669 ARABIC-INDIC DIGIT NINE
- 0x003a: 0x003a, # COLON, left-right
- 0x003a: 0x00ba, # COLON, right-left
- 0x003b: 0x003b, # SEMICOLON, left-right
- 0x003c: 0x003c, # LESS-THAN SIGN, left-right
- 0x003c: 0x00bc, # LESS-THAN SIGN, right-left
- 0x003d: 0x003d, # EQUALS SIGN, left-right
- 0x003d: 0x00bd, # EQUALS SIGN, right-left
- 0x003e: 0x003e, # GREATER-THAN SIGN, left-right
- 0x003e: 0x00be, # GREATER-THAN SIGN, right-left
- 0x003f: 0x003f, # QUESTION MARK, left-right
- 0x0040: 0x0040, # COMMERCIAL AT
- 0x0041: 0x0041, # LATIN CAPITAL LETTER A
- 0x0042: 0x0042, # LATIN CAPITAL LETTER B
- 0x0043: 0x0043, # LATIN CAPITAL LETTER C
- 0x0044: 0x0044, # LATIN CAPITAL LETTER D
- 0x0045: 0x0045, # LATIN CAPITAL LETTER E
- 0x0046: 0x0046, # LATIN CAPITAL LETTER F
- 0x0047: 0x0047, # LATIN CAPITAL LETTER G
- 0x0048: 0x0048, # LATIN CAPITAL LETTER H
- 0x0049: 0x0049, # LATIN CAPITAL LETTER I
- 0x004a: 0x004a, # LATIN CAPITAL LETTER J
- 0x004b: 0x004b, # LATIN CAPITAL LETTER K
- 0x004c: 0x004c, # LATIN CAPITAL LETTER L
- 0x004d: 0x004d, # LATIN CAPITAL LETTER M
- 0x004e: 0x004e, # LATIN CAPITAL LETTER N
- 0x004f: 0x004f, # LATIN CAPITAL LETTER O
- 0x0050: 0x0050, # LATIN CAPITAL LETTER P
- 0x0051: 0x0051, # LATIN CAPITAL LETTER Q
- 0x0052: 0x0052, # LATIN CAPITAL LETTER R
- 0x0053: 0x0053, # LATIN CAPITAL LETTER S
- 0x0054: 0x0054, # LATIN CAPITAL LETTER T
- 0x0055: 0x0055, # LATIN CAPITAL LETTER U
- 0x0056: 0x0056, # LATIN CAPITAL LETTER V
- 0x0057: 0x0057, # LATIN CAPITAL LETTER W
- 0x0058: 0x0058, # LATIN CAPITAL LETTER X
- 0x0059: 0x0059, # LATIN CAPITAL LETTER Y
- 0x005a: 0x005a, # LATIN CAPITAL LETTER Z
- 0x005b: 0x005b, # LEFT SQUARE BRACKET, left-right
- 0x005b: 0x00db, # LEFT SQUARE BRACKET, right-left
- 0x005c: 0x005c, # REVERSE SOLIDUS, left-right
- 0x005c: 0x00dc, # REVERSE SOLIDUS, right-left
- 0x005d: 0x005d, # RIGHT SQUARE BRACKET, left-right
- 0x005d: 0x00dd, # RIGHT SQUARE BRACKET, right-left
- 0x005e: 0x005e, # CIRCUMFLEX ACCENT, left-right
- 0x005e: 0x00de, # CIRCUMFLEX ACCENT, right-left
- 0x005f: 0x005f, # LOW LINE, left-right
- 0x005f: 0x00df, # LOW LINE, right-left
- 0x0060: 0x0060, # GRAVE ACCENT
- 0x0061: 0x0061, # LATIN SMALL LETTER A
- 0x0062: 0x0062, # LATIN SMALL LETTER B
- 0x0063: 0x0063, # LATIN SMALL LETTER C
- 0x0064: 0x0064, # LATIN SMALL LETTER D
- 0x0065: 0x0065, # LATIN SMALL LETTER E
- 0x0066: 0x0066, # LATIN SMALL LETTER F
- 0x0067: 0x0067, # LATIN SMALL LETTER G
- 0x0068: 0x0068, # LATIN SMALL LETTER H
- 0x0069: 0x0069, # LATIN SMALL LETTER I
- 0x006a: 0x006a, # LATIN SMALL LETTER J
- 0x006b: 0x006b, # LATIN SMALL LETTER K
- 0x006c: 0x006c, # LATIN SMALL LETTER L
- 0x006d: 0x006d, # LATIN SMALL LETTER M
- 0x006e: 0x006e, # LATIN SMALL LETTER N
- 0x006f: 0x006f, # LATIN SMALL LETTER O
- 0x0070: 0x0070, # LATIN SMALL LETTER P
- 0x0071: 0x0071, # LATIN SMALL LETTER Q
- 0x0072: 0x0072, # LATIN SMALL LETTER R
- 0x0073: 0x0073, # LATIN SMALL LETTER S
- 0x0074: 0x0074, # LATIN SMALL LETTER T
- 0x0075: 0x0075, # LATIN SMALL LETTER U
- 0x0076: 0x0076, # LATIN SMALL LETTER V
- 0x0077: 0x0077, # LATIN SMALL LETTER W
- 0x0078: 0x0078, # LATIN SMALL LETTER X
- 0x0079: 0x0079, # LATIN SMALL LETTER Y
- 0x007a: 0x007a, # LATIN SMALL LETTER Z
- 0x007b: 0x007b, # LEFT CURLY BRACKET, left-right
- 0x007b: 0x00fb, # LEFT CURLY BRACKET, right-left
- 0x007c: 0x007c, # VERTICAL LINE, left-right
- 0x007c: 0x00fc, # VERTICAL LINE, right-left
- 0x007d: 0x007d, # RIGHT CURLY BRACKET, left-right
- 0x007d: 0x00fd, # RIGHT CURLY BRACKET, right-left
- 0x007e: 0x007e, # TILDE
- 0x007f: 0x007f, # CONTROL CHARACTER
- 0x00a0: 0x0081, # NO-BREAK SPACE, right-left
- 0x00ab: 0x008c, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK, right-left
- 0x00bb: 0x0098, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK, right-left
- 0x00c4: 0x0080, # LATIN CAPITAL LETTER A WITH DIAERESIS
- 0x00c7: 0x0082, # LATIN CAPITAL LETTER C WITH CEDILLA
- 0x00c9: 0x0083, # LATIN CAPITAL LETTER E WITH ACUTE
- 0x00d1: 0x0084, # LATIN CAPITAL LETTER N WITH TILDE
- 0x00d6: 0x0085, # LATIN CAPITAL LETTER O WITH DIAERESIS
- 0x00dc: 0x0086, # LATIN CAPITAL LETTER U WITH DIAERESIS
- 0x00e0: 0x0088, # LATIN SMALL LETTER A WITH GRAVE
- 0x00e1: 0x0087, # LATIN SMALL LETTER A WITH ACUTE
- 0x00e2: 0x0089, # LATIN SMALL LETTER A WITH CIRCUMFLEX
- 0x00e4: 0x008a, # LATIN SMALL LETTER A WITH DIAERESIS
- 0x00e7: 0x008d, # LATIN SMALL LETTER C WITH CEDILLA
- 0x00e8: 0x008f, # LATIN SMALL LETTER E WITH GRAVE
- 0x00e9: 0x008e, # LATIN SMALL LETTER E WITH ACUTE
- 0x00ea: 0x0090, # LATIN SMALL LETTER E WITH CIRCUMFLEX
- 0x00eb: 0x0091, # LATIN SMALL LETTER E WITH DIAERESIS
- 0x00ed: 0x0092, # LATIN SMALL LETTER I WITH ACUTE
- 0x00ee: 0x0094, # LATIN SMALL LETTER I WITH CIRCUMFLEX
- 0x00ef: 0x0095, # LATIN SMALL LETTER I WITH DIAERESIS
- 0x00f1: 0x0096, # LATIN SMALL LETTER N WITH TILDE
- 0x00f3: 0x0097, # LATIN SMALL LETTER O WITH ACUTE
- 0x00f4: 0x0099, # LATIN SMALL LETTER O WITH CIRCUMFLEX
- 0x00f6: 0x009a, # LATIN SMALL LETTER O WITH DIAERESIS
- 0x00f7: 0x009b, # DIVISION SIGN, right-left
- 0x00f9: 0x009d, # LATIN SMALL LETTER U WITH GRAVE
- 0x00fa: 0x009c, # LATIN SMALL LETTER U WITH ACUTE
- 0x00fb: 0x009e, # LATIN SMALL LETTER U WITH CIRCUMFLEX
- 0x00fc: 0x009f, # LATIN SMALL LETTER U WITH DIAERESIS
- 0x060c: 0x00ac, # ARABIC COMMA
- 0x061b: 0x00bb, # ARABIC SEMICOLON
- 0x061f: 0x00bf, # ARABIC QUESTION MARK
- 0x0621: 0x00c1, # ARABIC LETTER HAMZA
- 0x0622: 0x00c2, # ARABIC LETTER ALEF WITH MADDA ABOVE
- 0x0623: 0x00c3, # ARABIC LETTER ALEF WITH HAMZA ABOVE
- 0x0624: 0x00c4, # ARABIC LETTER WAW WITH HAMZA ABOVE
- 0x0625: 0x00c5, # ARABIC LETTER ALEF WITH HAMZA BELOW
- 0x0626: 0x00c6, # ARABIC LETTER YEH WITH HAMZA ABOVE
- 0x0627: 0x00c7, # ARABIC LETTER ALEF
- 0x0628: 0x00c8, # ARABIC LETTER BEH
- 0x0629: 0x00c9, # ARABIC LETTER TEH MARBUTA
- 0x062a: 0x00ca, # ARABIC LETTER TEH
- 0x062b: 0x00cb, # ARABIC LETTER THEH
- 0x062c: 0x00cc, # ARABIC LETTER JEEM
- 0x062d: 0x00cd, # ARABIC LETTER HAH
- 0x062e: 0x00ce, # ARABIC LETTER KHAH
- 0x062f: 0x00cf, # ARABIC LETTER DAL
- 0x0630: 0x00d0, # ARABIC LETTER THAL
- 0x0631: 0x00d1, # ARABIC LETTER REH
- 0x0632: 0x00d2, # ARABIC LETTER ZAIN
- 0x0633: 0x00d3, # ARABIC LETTER SEEN
- 0x0634: 0x00d4, # ARABIC LETTER SHEEN
- 0x0635: 0x00d5, # ARABIC LETTER SAD
- 0x0636: 0x00d6, # ARABIC LETTER DAD
- 0x0637: 0x00d7, # ARABIC LETTER TAH
- 0x0638: 0x00d8, # ARABIC LETTER ZAH
- 0x0639: 0x00d9, # ARABIC LETTER AIN
- 0x063a: 0x00da, # ARABIC LETTER GHAIN
- 0x0640: 0x00e0, # ARABIC TATWEEL
- 0x0641: 0x00e1, # ARABIC LETTER FEH
- 0x0642: 0x00e2, # ARABIC LETTER QAF
- 0x0643: 0x00e3, # ARABIC LETTER KAF
- 0x0644: 0x00e4, # ARABIC LETTER LAM
- 0x0645: 0x00e5, # ARABIC LETTER MEEM
- 0x0646: 0x00e6, # ARABIC LETTER NOON
- 0x0647: 0x00e7, # ARABIC LETTER HEH
- 0x0648: 0x00e8, # ARABIC LETTER WAW
- 0x0649: 0x00e9, # ARABIC LETTER ALEF MAKSURA
- 0x064a: 0x00ea, # ARABIC LETTER YEH
- 0x064b: 0x00eb, # ARABIC FATHATAN
- 0x064c: 0x00ec, # ARABIC DAMMATAN
- 0x064d: 0x00ed, # ARABIC KASRATAN
- 0x064e: 0x00ee, # ARABIC FATHA
- 0x064f: 0x00ef, # ARABIC DAMMA
- 0x0650: 0x00f0, # ARABIC KASRA
- 0x0651: 0x00f1, # ARABIC SHADDA
- 0x0652: 0x00f2, # ARABIC SUKUN
- 0x0660: 0x00b0, # ARABIC-INDIC DIGIT ZERO, right-left (need override)
- 0x0661: 0x00b1, # ARABIC-INDIC DIGIT ONE, right-left (need override)
- 0x0662: 0x00b2, # ARABIC-INDIC DIGIT TWO, right-left (need override)
- 0x0663: 0x00b3, # ARABIC-INDIC DIGIT THREE, right-left (need override)
- 0x0664: 0x00b4, # ARABIC-INDIC DIGIT FOUR, right-left (need override)
- 0x0665: 0x00b5, # ARABIC-INDIC DIGIT FIVE, right-left (need override)
- 0x0666: 0x00b6, # ARABIC-INDIC DIGIT SIX, right-left (need override)
- 0x0667: 0x00b7, # ARABIC-INDIC DIGIT SEVEN, right-left (need override)
- 0x0668: 0x00b8, # ARABIC-INDIC DIGIT EIGHT, right-left (need override)
- 0x0669: 0x00b9, # ARABIC-INDIC DIGIT NINE, right-left (need override)
- 0x066a: 0x00a5, # ARABIC PERCENT SIGN
- 0x0679: 0x00f4, # ARABIC LETTER TTEH
- 0x067e: 0x00f3, # ARABIC LETTER PEH
- 0x0686: 0x00f5, # ARABIC LETTER TCHEH
- 0x0688: 0x00f9, # ARABIC LETTER DDAL
- 0x0691: 0x00fa, # ARABIC LETTER RREH
- 0x0698: 0x00fe, # ARABIC LETTER JEH
- 0x06a4: 0x00f7, # ARABIC LETTER VEH
- 0x06af: 0x00f8, # ARABIC LETTER GAF
- 0x06ba: 0x008b, # ARABIC LETTER NOON GHUNNA
- 0x06d2: 0x00ff, # ARABIC LETTER YEH BARREE
- 0x06d5: 0x00f6, # ARABIC LETTER AE
- 0x2026: 0x0093, # HORIZONTAL ELLIPSIS, right-left
- 0x274a: 0x00c0, # EIGHT TEARDROP-SPOKED PROPELLER ASTERISK, right-left
-}
diff --git a/sys/lib/python/encodings/mac_centeuro.py b/sys/lib/python/encodings/mac_centeuro.py
deleted file mode 100644
index 483c8212a..000000000
--- a/sys/lib/python/encodings/mac_centeuro.py
+++ /dev/null
@@ -1,307 +0,0 @@
-""" Python Character Mapping Codec mac_centeuro generated from 'MAPPINGS/VENDORS/APPLE/CENTEURO.TXT' with gencodec.py.
-
-"""#"
-
-import codecs
-
-### Codec APIs
-
-class Codec(codecs.Codec):
-
- def encode(self,input,errors='strict'):
- return codecs.charmap_encode(input,errors,encoding_table)
-
- def decode(self,input,errors='strict'):
- return codecs.charmap_decode(input,errors,decoding_table)
-
-class IncrementalEncoder(codecs.IncrementalEncoder):
- def encode(self, input, final=False):
- return codecs.charmap_encode(input,self.errors,encoding_table)[0]
-
-class IncrementalDecoder(codecs.IncrementalDecoder):
- def decode(self, input, final=False):
- return codecs.charmap_decode(input,self.errors,decoding_table)[0]
-
-class StreamWriter(Codec,codecs.StreamWriter):
- pass
-
-class StreamReader(Codec,codecs.StreamReader):
- pass
-
-### encodings module API
-
-def getregentry():
- return codecs.CodecInfo(
- name='mac-centeuro',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
-
-
-### Decoding Table
-
-decoding_table = (
- u'\x00' # 0x00 -> CONTROL CHARACTER
- u'\x01' # 0x01 -> CONTROL CHARACTER
- u'\x02' # 0x02 -> CONTROL CHARACTER
- u'\x03' # 0x03 -> CONTROL CHARACTER
- u'\x04' # 0x04 -> CONTROL CHARACTER
- u'\x05' # 0x05 -> CONTROL CHARACTER
- u'\x06' # 0x06 -> CONTROL CHARACTER
- u'\x07' # 0x07 -> CONTROL CHARACTER
- u'\x08' # 0x08 -> CONTROL CHARACTER
- u'\t' # 0x09 -> CONTROL CHARACTER
- u'\n' # 0x0A -> CONTROL CHARACTER
- u'\x0b' # 0x0B -> CONTROL CHARACTER
- u'\x0c' # 0x0C -> CONTROL CHARACTER
- u'\r' # 0x0D -> CONTROL CHARACTER
- u'\x0e' # 0x0E -> CONTROL CHARACTER
- u'\x0f' # 0x0F -> CONTROL CHARACTER
- u'\x10' # 0x10 -> CONTROL CHARACTER
- u'\x11' # 0x11 -> CONTROL CHARACTER
- u'\x12' # 0x12 -> CONTROL CHARACTER
- u'\x13' # 0x13 -> CONTROL CHARACTER
- u'\x14' # 0x14 -> CONTROL CHARACTER
- u'\x15' # 0x15 -> CONTROL CHARACTER
- u'\x16' # 0x16 -> CONTROL CHARACTER
- u'\x17' # 0x17 -> CONTROL CHARACTER
- u'\x18' # 0x18 -> CONTROL CHARACTER
- u'\x19' # 0x19 -> CONTROL CHARACTER
- u'\x1a' # 0x1A -> CONTROL CHARACTER
- u'\x1b' # 0x1B -> CONTROL CHARACTER
- u'\x1c' # 0x1C -> CONTROL CHARACTER
- u'\x1d' # 0x1D -> CONTROL CHARACTER
- u'\x1e' # 0x1E -> CONTROL CHARACTER
- u'\x1f' # 0x1F -> CONTROL CHARACTER
- u' ' # 0x20 -> SPACE
- u'!' # 0x21 -> EXCLAMATION MARK
- u'"' # 0x22 -> QUOTATION MARK
- u'#' # 0x23 -> NUMBER SIGN
- u'$' # 0x24 -> DOLLAR SIGN
- u'%' # 0x25 -> PERCENT SIGN
- u'&' # 0x26 -> AMPERSAND
- u"'" # 0x27 -> APOSTROPHE
- u'(' # 0x28 -> LEFT PARENTHESIS
- u')' # 0x29 -> RIGHT PARENTHESIS
- u'*' # 0x2A -> ASTERISK
- u'+' # 0x2B -> PLUS SIGN
- u',' # 0x2C -> COMMA
- u'-' # 0x2D -> HYPHEN-MINUS
- u'.' # 0x2E -> FULL STOP
- u'/' # 0x2F -> SOLIDUS
- u'0' # 0x30 -> DIGIT ZERO
- u'1' # 0x31 -> DIGIT ONE
- u'2' # 0x32 -> DIGIT TWO
- u'3' # 0x33 -> DIGIT THREE
- u'4' # 0x34 -> DIGIT FOUR
- u'5' # 0x35 -> DIGIT FIVE
- u'6' # 0x36 -> DIGIT SIX
- u'7' # 0x37 -> DIGIT SEVEN
- u'8' # 0x38 -> DIGIT EIGHT
- u'9' # 0x39 -> DIGIT NINE
- u':' # 0x3A -> COLON
- u';' # 0x3B -> SEMICOLON
- u'<' # 0x3C -> LESS-THAN SIGN
- u'=' # 0x3D -> EQUALS SIGN
- u'>' # 0x3E -> GREATER-THAN SIGN
- u'?' # 0x3F -> QUESTION MARK
- u'@' # 0x40 -> COMMERCIAL AT
- u'A' # 0x41 -> LATIN CAPITAL LETTER A
- u'B' # 0x42 -> LATIN CAPITAL LETTER B
- u'C' # 0x43 -> LATIN CAPITAL LETTER C
- u'D' # 0x44 -> LATIN CAPITAL LETTER D
- u'E' # 0x45 -> LATIN CAPITAL LETTER E
- u'F' # 0x46 -> LATIN CAPITAL LETTER F
- u'G' # 0x47 -> LATIN CAPITAL LETTER G
- u'H' # 0x48 -> LATIN CAPITAL LETTER H
- u'I' # 0x49 -> LATIN CAPITAL LETTER I
- u'J' # 0x4A -> LATIN CAPITAL LETTER J
- u'K' # 0x4B -> LATIN CAPITAL LETTER K
- u'L' # 0x4C -> LATIN CAPITAL LETTER L
- u'M' # 0x4D -> LATIN CAPITAL LETTER M
- u'N' # 0x4E -> LATIN CAPITAL LETTER N
- u'O' # 0x4F -> LATIN CAPITAL LETTER O
- u'P' # 0x50 -> LATIN CAPITAL LETTER P
- u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
- u'R' # 0x52 -> LATIN CAPITAL LETTER R
- u'S' # 0x53 -> LATIN CAPITAL LETTER S
- u'T' # 0x54 -> LATIN CAPITAL LETTER T
- u'U' # 0x55 -> LATIN CAPITAL LETTER U
- u'V' # 0x56 -> LATIN CAPITAL LETTER V
- u'W' # 0x57 -> LATIN CAPITAL LETTER W
- u'X' # 0x58 -> LATIN CAPITAL LETTER X
- u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
- u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
- u'[' # 0x5B -> LEFT SQUARE BRACKET
- u'\\' # 0x5C -> REVERSE SOLIDUS
- u']' # 0x5D -> RIGHT SQUARE BRACKET
- u'^' # 0x5E -> CIRCUMFLEX ACCENT
- u'_' # 0x5F -> LOW LINE
- u'`' # 0x60 -> GRAVE ACCENT
- u'a' # 0x61 -> LATIN SMALL LETTER A
- u'b' # 0x62 -> LATIN SMALL LETTER B
- u'c' # 0x63 -> LATIN SMALL LETTER C
- u'd' # 0x64 -> LATIN SMALL LETTER D
- u'e' # 0x65 -> LATIN SMALL LETTER E
- u'f' # 0x66 -> LATIN SMALL LETTER F
- u'g' # 0x67 -> LATIN SMALL LETTER G
- u'h' # 0x68 -> LATIN SMALL LETTER H
- u'i' # 0x69 -> LATIN SMALL LETTER I
- u'j' # 0x6A -> LATIN SMALL LETTER J
- u'k' # 0x6B -> LATIN SMALL LETTER K
- u'l' # 0x6C -> LATIN SMALL LETTER L
- u'm' # 0x6D -> LATIN SMALL LETTER M
- u'n' # 0x6E -> LATIN SMALL LETTER N
- u'o' # 0x6F -> LATIN SMALL LETTER O
- u'p' # 0x70 -> LATIN SMALL LETTER P
- u'q' # 0x71 -> LATIN SMALL LETTER Q
- u'r' # 0x72 -> LATIN SMALL LETTER R
- u's' # 0x73 -> LATIN SMALL LETTER S
- u't' # 0x74 -> LATIN SMALL LETTER T
- u'u' # 0x75 -> LATIN SMALL LETTER U
- u'v' # 0x76 -> LATIN SMALL LETTER V
- u'w' # 0x77 -> LATIN SMALL LETTER W
- u'x' # 0x78 -> LATIN SMALL LETTER X
- u'y' # 0x79 -> LATIN SMALL LETTER Y
- u'z' # 0x7A -> LATIN SMALL LETTER Z
- u'{' # 0x7B -> LEFT CURLY BRACKET
- u'|' # 0x7C -> VERTICAL LINE
- u'}' # 0x7D -> RIGHT CURLY BRACKET
- u'~' # 0x7E -> TILDE
- u'\x7f' # 0x7F -> CONTROL CHARACTER
- u'\xc4' # 0x80 -> LATIN CAPITAL LETTER A WITH DIAERESIS
- u'\u0100' # 0x81 -> LATIN CAPITAL LETTER A WITH MACRON
- u'\u0101' # 0x82 -> LATIN SMALL LETTER A WITH MACRON
- u'\xc9' # 0x83 -> LATIN CAPITAL LETTER E WITH ACUTE
- u'\u0104' # 0x84 -> LATIN CAPITAL LETTER A WITH OGONEK
- u'\xd6' # 0x85 -> LATIN CAPITAL LETTER O WITH DIAERESIS
- u'\xdc' # 0x86 -> LATIN CAPITAL LETTER U WITH DIAERESIS
- u'\xe1' # 0x87 -> LATIN SMALL LETTER A WITH ACUTE
- u'\u0105' # 0x88 -> LATIN SMALL LETTER A WITH OGONEK
- u'\u010c' # 0x89 -> LATIN CAPITAL LETTER C WITH CARON
- u'\xe4' # 0x8A -> LATIN SMALL LETTER A WITH DIAERESIS
- u'\u010d' # 0x8B -> LATIN SMALL LETTER C WITH CARON
- u'\u0106' # 0x8C -> LATIN CAPITAL LETTER C WITH ACUTE
- u'\u0107' # 0x8D -> LATIN SMALL LETTER C WITH ACUTE
- u'\xe9' # 0x8E -> LATIN SMALL LETTER E WITH ACUTE
- u'\u0179' # 0x8F -> LATIN CAPITAL LETTER Z WITH ACUTE
- u'\u017a' # 0x90 -> LATIN SMALL LETTER Z WITH ACUTE
- u'\u010e' # 0x91 -> LATIN CAPITAL LETTER D WITH CARON
- u'\xed' # 0x92 -> LATIN SMALL LETTER I WITH ACUTE
- u'\u010f' # 0x93 -> LATIN SMALL LETTER D WITH CARON
- u'\u0112' # 0x94 -> LATIN CAPITAL LETTER E WITH MACRON
- u'\u0113' # 0x95 -> LATIN SMALL LETTER E WITH MACRON
- u'\u0116' # 0x96 -> LATIN CAPITAL LETTER E WITH DOT ABOVE
- u'\xf3' # 0x97 -> LATIN SMALL LETTER O WITH ACUTE
- u'\u0117' # 0x98 -> LATIN SMALL LETTER E WITH DOT ABOVE
- u'\xf4' # 0x99 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
- u'\xf6' # 0x9A -> LATIN SMALL LETTER O WITH DIAERESIS
- u'\xf5' # 0x9B -> LATIN SMALL LETTER O WITH TILDE
- u'\xfa' # 0x9C -> LATIN SMALL LETTER U WITH ACUTE
- u'\u011a' # 0x9D -> LATIN CAPITAL LETTER E WITH CARON
- u'\u011b' # 0x9E -> LATIN SMALL LETTER E WITH CARON
- u'\xfc' # 0x9F -> LATIN SMALL LETTER U WITH DIAERESIS
- u'\u2020' # 0xA0 -> DAGGER
- u'\xb0' # 0xA1 -> DEGREE SIGN
- u'\u0118' # 0xA2 -> LATIN CAPITAL LETTER E WITH OGONEK
- u'\xa3' # 0xA3 -> POUND SIGN
- u'\xa7' # 0xA4 -> SECTION SIGN
- u'\u2022' # 0xA5 -> BULLET
- u'\xb6' # 0xA6 -> PILCROW SIGN
- u'\xdf' # 0xA7 -> LATIN SMALL LETTER SHARP S
- u'\xae' # 0xA8 -> REGISTERED SIGN
- u'\xa9' # 0xA9 -> COPYRIGHT SIGN
- u'\u2122' # 0xAA -> TRADE MARK SIGN
- u'\u0119' # 0xAB -> LATIN SMALL LETTER E WITH OGONEK
- u'\xa8' # 0xAC -> DIAERESIS
- u'\u2260' # 0xAD -> NOT EQUAL TO
- u'\u0123' # 0xAE -> LATIN SMALL LETTER G WITH CEDILLA
- u'\u012e' # 0xAF -> LATIN CAPITAL LETTER I WITH OGONEK
- u'\u012f' # 0xB0 -> LATIN SMALL LETTER I WITH OGONEK
- u'\u012a' # 0xB1 -> LATIN CAPITAL LETTER I WITH MACRON
- u'\u2264' # 0xB2 -> LESS-THAN OR EQUAL TO
- u'\u2265' # 0xB3 -> GREATER-THAN OR EQUAL TO
- u'\u012b' # 0xB4 -> LATIN SMALL LETTER I WITH MACRON
- u'\u0136' # 0xB5 -> LATIN CAPITAL LETTER K WITH CEDILLA
- u'\u2202' # 0xB6 -> PARTIAL DIFFERENTIAL
- u'\u2211' # 0xB7 -> N-ARY SUMMATION
- u'\u0142' # 0xB8 -> LATIN SMALL LETTER L WITH STROKE
- u'\u013b' # 0xB9 -> LATIN CAPITAL LETTER L WITH CEDILLA
- u'\u013c' # 0xBA -> LATIN SMALL LETTER L WITH CEDILLA
- u'\u013d' # 0xBB -> LATIN CAPITAL LETTER L WITH CARON
- u'\u013e' # 0xBC -> LATIN SMALL LETTER L WITH CARON
- u'\u0139' # 0xBD -> LATIN CAPITAL LETTER L WITH ACUTE
- u'\u013a' # 0xBE -> LATIN SMALL LETTER L WITH ACUTE
- u'\u0145' # 0xBF -> LATIN CAPITAL LETTER N WITH CEDILLA
- u'\u0146' # 0xC0 -> LATIN SMALL LETTER N WITH CEDILLA
- u'\u0143' # 0xC1 -> LATIN CAPITAL LETTER N WITH ACUTE
- u'\xac' # 0xC2 -> NOT SIGN
- u'\u221a' # 0xC3 -> SQUARE ROOT
- u'\u0144' # 0xC4 -> LATIN SMALL LETTER N WITH ACUTE
- u'\u0147' # 0xC5 -> LATIN CAPITAL LETTER N WITH CARON
- u'\u2206' # 0xC6 -> INCREMENT
- u'\xab' # 0xC7 -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
- u'\xbb' # 0xC8 -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
- u'\u2026' # 0xC9 -> HORIZONTAL ELLIPSIS
- u'\xa0' # 0xCA -> NO-BREAK SPACE
- u'\u0148' # 0xCB -> LATIN SMALL LETTER N WITH CARON
- u'\u0150' # 0xCC -> LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
- u'\xd5' # 0xCD -> LATIN CAPITAL LETTER O WITH TILDE
- u'\u0151' # 0xCE -> LATIN SMALL LETTER O WITH DOUBLE ACUTE
- u'\u014c' # 0xCF -> LATIN CAPITAL LETTER O WITH MACRON
- u'\u2013' # 0xD0 -> EN DASH
- u'\u2014' # 0xD1 -> EM DASH
- u'\u201c' # 0xD2 -> LEFT DOUBLE QUOTATION MARK
- u'\u201d' # 0xD3 -> RIGHT DOUBLE QUOTATION MARK
- u'\u2018' # 0xD4 -> LEFT SINGLE QUOTATION MARK
- u'\u2019' # 0xD5 -> RIGHT SINGLE QUOTATION MARK
- u'\xf7' # 0xD6 -> DIVISION SIGN
- u'\u25ca' # 0xD7 -> LOZENGE
- u'\u014d' # 0xD8 -> LATIN SMALL LETTER O WITH MACRON
- u'\u0154' # 0xD9 -> LATIN CAPITAL LETTER R WITH ACUTE
- u'\u0155' # 0xDA -> LATIN SMALL LETTER R WITH ACUTE
- u'\u0158' # 0xDB -> LATIN CAPITAL LETTER R WITH CARON
- u'\u2039' # 0xDC -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
- u'\u203a' # 0xDD -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
- u'\u0159' # 0xDE -> LATIN SMALL LETTER R WITH CARON
- u'\u0156' # 0xDF -> LATIN CAPITAL LETTER R WITH CEDILLA
- u'\u0157' # 0xE0 -> LATIN SMALL LETTER R WITH CEDILLA
- u'\u0160' # 0xE1 -> LATIN CAPITAL LETTER S WITH CARON
- u'\u201a' # 0xE2 -> SINGLE LOW-9 QUOTATION MARK
- u'\u201e' # 0xE3 -> DOUBLE LOW-9 QUOTATION MARK
- u'\u0161' # 0xE4 -> LATIN SMALL LETTER S WITH CARON
- u'\u015a' # 0xE5 -> LATIN CAPITAL LETTER S WITH ACUTE
- u'\u015b' # 0xE6 -> LATIN SMALL LETTER S WITH ACUTE
- u'\xc1' # 0xE7 -> LATIN CAPITAL LETTER A WITH ACUTE
- u'\u0164' # 0xE8 -> LATIN CAPITAL LETTER T WITH CARON
- u'\u0165' # 0xE9 -> LATIN SMALL LETTER T WITH CARON
- u'\xcd' # 0xEA -> LATIN CAPITAL LETTER I WITH ACUTE
- u'\u017d' # 0xEB -> LATIN CAPITAL LETTER Z WITH CARON
- u'\u017e' # 0xEC -> LATIN SMALL LETTER Z WITH CARON
- u'\u016a' # 0xED -> LATIN CAPITAL LETTER U WITH MACRON
- u'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
- u'\xd4' # 0xEF -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
- u'\u016b' # 0xF0 -> LATIN SMALL LETTER U WITH MACRON
- u'\u016e' # 0xF1 -> LATIN CAPITAL LETTER U WITH RING ABOVE
- u'\xda' # 0xF2 -> LATIN CAPITAL LETTER U WITH ACUTE
- u'\u016f' # 0xF3 -> LATIN SMALL LETTER U WITH RING ABOVE
- u'\u0170' # 0xF4 -> LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
- u'\u0171' # 0xF5 -> LATIN SMALL LETTER U WITH DOUBLE ACUTE
- u'\u0172' # 0xF6 -> LATIN CAPITAL LETTER U WITH OGONEK
- u'\u0173' # 0xF7 -> LATIN SMALL LETTER U WITH OGONEK
- u'\xdd' # 0xF8 -> LATIN CAPITAL LETTER Y WITH ACUTE
- u'\xfd' # 0xF9 -> LATIN SMALL LETTER Y WITH ACUTE
- u'\u0137' # 0xFA -> LATIN SMALL LETTER K WITH CEDILLA
- u'\u017b' # 0xFB -> LATIN CAPITAL LETTER Z WITH DOT ABOVE
- u'\u0141' # 0xFC -> LATIN CAPITAL LETTER L WITH STROKE
- u'\u017c' # 0xFD -> LATIN SMALL LETTER Z WITH DOT ABOVE
- u'\u0122' # 0xFE -> LATIN CAPITAL LETTER G WITH CEDILLA
- u'\u02c7' # 0xFF -> CARON
-)
-
-### Encoding table
-encoding_table=codecs.charmap_build(decoding_table)
diff --git a/sys/lib/python/encodings/mac_croatian.py b/sys/lib/python/encodings/mac_croatian.py
deleted file mode 100644
index f57f7b4b3..000000000
--- a/sys/lib/python/encodings/mac_croatian.py
+++ /dev/null
@@ -1,307 +0,0 @@
-""" Python Character Mapping Codec mac_croatian generated from 'MAPPINGS/VENDORS/APPLE/CROATIAN.TXT' with gencodec.py.
-
-"""#"
-
-import codecs
-
-### Codec APIs
-
-class Codec(codecs.Codec):
-
- def encode(self,input,errors='strict'):
- return codecs.charmap_encode(input,errors,encoding_table)
-
- def decode(self,input,errors='strict'):
- return codecs.charmap_decode(input,errors,decoding_table)
-
-class IncrementalEncoder(codecs.IncrementalEncoder):
- def encode(self, input, final=False):
- return codecs.charmap_encode(input,self.errors,encoding_table)[0]
-
-class IncrementalDecoder(codecs.IncrementalDecoder):
- def decode(self, input, final=False):
- return codecs.charmap_decode(input,self.errors,decoding_table)[0]
-
-class StreamWriter(Codec,codecs.StreamWriter):
- pass
-
-class StreamReader(Codec,codecs.StreamReader):
- pass
-
-### encodings module API
-
-def getregentry():
- return codecs.CodecInfo(
- name='mac-croatian',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
-
-
-### Decoding Table
-
-decoding_table = (
- u'\x00' # 0x00 -> CONTROL CHARACTER
- u'\x01' # 0x01 -> CONTROL CHARACTER
- u'\x02' # 0x02 -> CONTROL CHARACTER
- u'\x03' # 0x03 -> CONTROL CHARACTER
- u'\x04' # 0x04 -> CONTROL CHARACTER
- u'\x05' # 0x05 -> CONTROL CHARACTER
- u'\x06' # 0x06 -> CONTROL CHARACTER
- u'\x07' # 0x07 -> CONTROL CHARACTER
- u'\x08' # 0x08 -> CONTROL CHARACTER
- u'\t' # 0x09 -> CONTROL CHARACTER
- u'\n' # 0x0A -> CONTROL CHARACTER
- u'\x0b' # 0x0B -> CONTROL CHARACTER
- u'\x0c' # 0x0C -> CONTROL CHARACTER
- u'\r' # 0x0D -> CONTROL CHARACTER
- u'\x0e' # 0x0E -> CONTROL CHARACTER
- u'\x0f' # 0x0F -> CONTROL CHARACTER
- u'\x10' # 0x10 -> CONTROL CHARACTER
- u'\x11' # 0x11 -> CONTROL CHARACTER
- u'\x12' # 0x12 -> CONTROL CHARACTER
- u'\x13' # 0x13 -> CONTROL CHARACTER
- u'\x14' # 0x14 -> CONTROL CHARACTER
- u'\x15' # 0x15 -> CONTROL CHARACTER
- u'\x16' # 0x16 -> CONTROL CHARACTER
- u'\x17' # 0x17 -> CONTROL CHARACTER
- u'\x18' # 0x18 -> CONTROL CHARACTER
- u'\x19' # 0x19 -> CONTROL CHARACTER
- u'\x1a' # 0x1A -> CONTROL CHARACTER
- u'\x1b' # 0x1B -> CONTROL CHARACTER
- u'\x1c' # 0x1C -> CONTROL CHARACTER
- u'\x1d' # 0x1D -> CONTROL CHARACTER
- u'\x1e' # 0x1E -> CONTROL CHARACTER
- u'\x1f' # 0x1F -> CONTROL CHARACTER
- u' ' # 0x20 -> SPACE
- u'!' # 0x21 -> EXCLAMATION MARK
- u'"' # 0x22 -> QUOTATION MARK
- u'#' # 0x23 -> NUMBER SIGN
- u'$' # 0x24 -> DOLLAR SIGN
- u'%' # 0x25 -> PERCENT SIGN
- u'&' # 0x26 -> AMPERSAND
- u"'" # 0x27 -> APOSTROPHE
- u'(' # 0x28 -> LEFT PARENTHESIS
- u')' # 0x29 -> RIGHT PARENTHESIS
- u'*' # 0x2A -> ASTERISK
- u'+' # 0x2B -> PLUS SIGN
- u',' # 0x2C -> COMMA
- u'-' # 0x2D -> HYPHEN-MINUS
- u'.' # 0x2E -> FULL STOP
- u'/' # 0x2F -> SOLIDUS
- u'0' # 0x30 -> DIGIT ZERO
- u'1' # 0x31 -> DIGIT ONE
- u'2' # 0x32 -> DIGIT TWO
- u'3' # 0x33 -> DIGIT THREE
- u'4' # 0x34 -> DIGIT FOUR
- u'5' # 0x35 -> DIGIT FIVE
- u'6' # 0x36 -> DIGIT SIX
- u'7' # 0x37 -> DIGIT SEVEN
- u'8' # 0x38 -> DIGIT EIGHT
- u'9' # 0x39 -> DIGIT NINE
- u':' # 0x3A -> COLON
- u';' # 0x3B -> SEMICOLON
- u'<' # 0x3C -> LESS-THAN SIGN
- u'=' # 0x3D -> EQUALS SIGN
- u'>' # 0x3E -> GREATER-THAN SIGN
- u'?' # 0x3F -> QUESTION MARK
- u'@' # 0x40 -> COMMERCIAL AT
- u'A' # 0x41 -> LATIN CAPITAL LETTER A
- u'B' # 0x42 -> LATIN CAPITAL LETTER B
- u'C' # 0x43 -> LATIN CAPITAL LETTER C
- u'D' # 0x44 -> LATIN CAPITAL LETTER D
- u'E' # 0x45 -> LATIN CAPITAL LETTER E
- u'F' # 0x46 -> LATIN CAPITAL LETTER F
- u'G' # 0x47 -> LATIN CAPITAL LETTER G
- u'H' # 0x48 -> LATIN CAPITAL LETTER H
- u'I' # 0x49 -> LATIN CAPITAL LETTER I
- u'J' # 0x4A -> LATIN CAPITAL LETTER J
- u'K' # 0x4B -> LATIN CAPITAL LETTER K
- u'L' # 0x4C -> LATIN CAPITAL LETTER L
- u'M' # 0x4D -> LATIN CAPITAL LETTER M
- u'N' # 0x4E -> LATIN CAPITAL LETTER N
- u'O' # 0x4F -> LATIN CAPITAL LETTER O
- u'P' # 0x50 -> LATIN CAPITAL LETTER P
- u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
- u'R' # 0x52 -> LATIN CAPITAL LETTER R
- u'S' # 0x53 -> LATIN CAPITAL LETTER S
- u'T' # 0x54 -> LATIN CAPITAL LETTER T
- u'U' # 0x55 -> LATIN CAPITAL LETTER U
- u'V' # 0x56 -> LATIN CAPITAL LETTER V
- u'W' # 0x57 -> LATIN CAPITAL LETTER W
- u'X' # 0x58 -> LATIN CAPITAL LETTER X
- u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
- u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
- u'[' # 0x5B -> LEFT SQUARE BRACKET
- u'\\' # 0x5C -> REVERSE SOLIDUS
- u']' # 0x5D -> RIGHT SQUARE BRACKET
- u'^' # 0x5E -> CIRCUMFLEX ACCENT
- u'_' # 0x5F -> LOW LINE
- u'`' # 0x60 -> GRAVE ACCENT
- u'a' # 0x61 -> LATIN SMALL LETTER A
- u'b' # 0x62 -> LATIN SMALL LETTER B
- u'c' # 0x63 -> LATIN SMALL LETTER C
- u'd' # 0x64 -> LATIN SMALL LETTER D
- u'e' # 0x65 -> LATIN SMALL LETTER E
- u'f' # 0x66 -> LATIN SMALL LETTER F
- u'g' # 0x67 -> LATIN SMALL LETTER G
- u'h' # 0x68 -> LATIN SMALL LETTER H
- u'i' # 0x69 -> LATIN SMALL LETTER I
- u'j' # 0x6A -> LATIN SMALL LETTER J
- u'k' # 0x6B -> LATIN SMALL LETTER K
- u'l' # 0x6C -> LATIN SMALL LETTER L
- u'm' # 0x6D -> LATIN SMALL LETTER M
- u'n' # 0x6E -> LATIN SMALL LETTER N
- u'o' # 0x6F -> LATIN SMALL LETTER O
- u'p' # 0x70 -> LATIN SMALL LETTER P
- u'q' # 0x71 -> LATIN SMALL LETTER Q
- u'r' # 0x72 -> LATIN SMALL LETTER R
- u's' # 0x73 -> LATIN SMALL LETTER S
- u't' # 0x74 -> LATIN SMALL LETTER T
- u'u' # 0x75 -> LATIN SMALL LETTER U
- u'v' # 0x76 -> LATIN SMALL LETTER V
- u'w' # 0x77 -> LATIN SMALL LETTER W
- u'x' # 0x78 -> LATIN SMALL LETTER X
- u'y' # 0x79 -> LATIN SMALL LETTER Y
- u'z' # 0x7A -> LATIN SMALL LETTER Z
- u'{' # 0x7B -> LEFT CURLY BRACKET
- u'|' # 0x7C -> VERTICAL LINE
- u'}' # 0x7D -> RIGHT CURLY BRACKET
- u'~' # 0x7E -> TILDE
- u'\x7f' # 0x7F -> CONTROL CHARACTER
- u'\xc4' # 0x80 -> LATIN CAPITAL LETTER A WITH DIAERESIS
- u'\xc5' # 0x81 -> LATIN CAPITAL LETTER A WITH RING ABOVE
- u'\xc7' # 0x82 -> LATIN CAPITAL LETTER C WITH CEDILLA
- u'\xc9' # 0x83 -> LATIN CAPITAL LETTER E WITH ACUTE
- u'\xd1' # 0x84 -> LATIN CAPITAL LETTER N WITH TILDE
- u'\xd6' # 0x85 -> LATIN CAPITAL LETTER O WITH DIAERESIS
- u'\xdc' # 0x86 -> LATIN CAPITAL LETTER U WITH DIAERESIS
- u'\xe1' # 0x87 -> LATIN SMALL LETTER A WITH ACUTE
- u'\xe0' # 0x88 -> LATIN SMALL LETTER A WITH GRAVE
- u'\xe2' # 0x89 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
- u'\xe4' # 0x8A -> LATIN SMALL LETTER A WITH DIAERESIS
- u'\xe3' # 0x8B -> LATIN SMALL LETTER A WITH TILDE
- u'\xe5' # 0x8C -> LATIN SMALL LETTER A WITH RING ABOVE
- u'\xe7' # 0x8D -> LATIN SMALL LETTER C WITH CEDILLA
- u'\xe9' # 0x8E -> LATIN SMALL LETTER E WITH ACUTE
- u'\xe8' # 0x8F -> LATIN SMALL LETTER E WITH GRAVE
- u'\xea' # 0x90 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
- u'\xeb' # 0x91 -> LATIN SMALL LETTER E WITH DIAERESIS
- u'\xed' # 0x92 -> LATIN SMALL LETTER I WITH ACUTE
- u'\xec' # 0x93 -> LATIN SMALL LETTER I WITH GRAVE
- u'\xee' # 0x94 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
- u'\xef' # 0x95 -> LATIN SMALL LETTER I WITH DIAERESIS
- u'\xf1' # 0x96 -> LATIN SMALL LETTER N WITH TILDE
- u'\xf3' # 0x97 -> LATIN SMALL LETTER O WITH ACUTE
- u'\xf2' # 0x98 -> LATIN SMALL LETTER O WITH GRAVE
- u'\xf4' # 0x99 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
- u'\xf6' # 0x9A -> LATIN SMALL LETTER O WITH DIAERESIS
- u'\xf5' # 0x9B -> LATIN SMALL LETTER O WITH TILDE
- u'\xfa' # 0x9C -> LATIN SMALL LETTER U WITH ACUTE
- u'\xf9' # 0x9D -> LATIN SMALL LETTER U WITH GRAVE
- u'\xfb' # 0x9E -> LATIN SMALL LETTER U WITH CIRCUMFLEX
- u'\xfc' # 0x9F -> LATIN SMALL LETTER U WITH DIAERESIS
- u'\u2020' # 0xA0 -> DAGGER
- u'\xb0' # 0xA1 -> DEGREE SIGN
- u'\xa2' # 0xA2 -> CENT SIGN
- u'\xa3' # 0xA3 -> POUND SIGN
- u'\xa7' # 0xA4 -> SECTION SIGN
- u'\u2022' # 0xA5 -> BULLET
- u'\xb6' # 0xA6 -> PILCROW SIGN
- u'\xdf' # 0xA7 -> LATIN SMALL LETTER SHARP S
- u'\xae' # 0xA8 -> REGISTERED SIGN
- u'\u0160' # 0xA9 -> LATIN CAPITAL LETTER S WITH CARON
- u'\u2122' # 0xAA -> TRADE MARK SIGN
- u'\xb4' # 0xAB -> ACUTE ACCENT
- u'\xa8' # 0xAC -> DIAERESIS
- u'\u2260' # 0xAD -> NOT EQUAL TO
- u'\u017d' # 0xAE -> LATIN CAPITAL LETTER Z WITH CARON
- u'\xd8' # 0xAF -> LATIN CAPITAL LETTER O WITH STROKE
- u'\u221e' # 0xB0 -> INFINITY
- u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
- u'\u2264' # 0xB2 -> LESS-THAN OR EQUAL TO
- u'\u2265' # 0xB3 -> GREATER-THAN OR EQUAL TO
- u'\u2206' # 0xB4 -> INCREMENT
- u'\xb5' # 0xB5 -> MICRO SIGN
- u'\u2202' # 0xB6 -> PARTIAL DIFFERENTIAL
- u'\u2211' # 0xB7 -> N-ARY SUMMATION
- u'\u220f' # 0xB8 -> N-ARY PRODUCT
- u'\u0161' # 0xB9 -> LATIN SMALL LETTER S WITH CARON
- u'\u222b' # 0xBA -> INTEGRAL
- u'\xaa' # 0xBB -> FEMININE ORDINAL INDICATOR
- u'\xba' # 0xBC -> MASCULINE ORDINAL INDICATOR
- u'\u03a9' # 0xBD -> GREEK CAPITAL LETTER OMEGA
- u'\u017e' # 0xBE -> LATIN SMALL LETTER Z WITH CARON
- u'\xf8' # 0xBF -> LATIN SMALL LETTER O WITH STROKE
- u'\xbf' # 0xC0 -> INVERTED QUESTION MARK
- u'\xa1' # 0xC1 -> INVERTED EXCLAMATION MARK
- u'\xac' # 0xC2 -> NOT SIGN
- u'\u221a' # 0xC3 -> SQUARE ROOT
- u'\u0192' # 0xC4 -> LATIN SMALL LETTER F WITH HOOK
- u'\u2248' # 0xC5 -> ALMOST EQUAL TO
- u'\u0106' # 0xC6 -> LATIN CAPITAL LETTER C WITH ACUTE
- u'\xab' # 0xC7 -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
- u'\u010c' # 0xC8 -> LATIN CAPITAL LETTER C WITH CARON
- u'\u2026' # 0xC9 -> HORIZONTAL ELLIPSIS
- u'\xa0' # 0xCA -> NO-BREAK SPACE
- u'\xc0' # 0xCB -> LATIN CAPITAL LETTER A WITH GRAVE
- u'\xc3' # 0xCC -> LATIN CAPITAL LETTER A WITH TILDE
- u'\xd5' # 0xCD -> LATIN CAPITAL LETTER O WITH TILDE
- u'\u0152' # 0xCE -> LATIN CAPITAL LIGATURE OE
- u'\u0153' # 0xCF -> LATIN SMALL LIGATURE OE
- u'\u0110' # 0xD0 -> LATIN CAPITAL LETTER D WITH STROKE
- u'\u2014' # 0xD1 -> EM DASH
- u'\u201c' # 0xD2 -> LEFT DOUBLE QUOTATION MARK
- u'\u201d' # 0xD3 -> RIGHT DOUBLE QUOTATION MARK
- u'\u2018' # 0xD4 -> LEFT SINGLE QUOTATION MARK
- u'\u2019' # 0xD5 -> RIGHT SINGLE QUOTATION MARK
- u'\xf7' # 0xD6 -> DIVISION SIGN
- u'\u25ca' # 0xD7 -> LOZENGE
- u'\uf8ff' # 0xD8 -> Apple logo
- u'\xa9' # 0xD9 -> COPYRIGHT SIGN
- u'\u2044' # 0xDA -> FRACTION SLASH
- u'\u20ac' # 0xDB -> EURO SIGN
- u'\u2039' # 0xDC -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
- u'\u203a' # 0xDD -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
- u'\xc6' # 0xDE -> LATIN CAPITAL LETTER AE
- u'\xbb' # 0xDF -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
- u'\u2013' # 0xE0 -> EN DASH
- u'\xb7' # 0xE1 -> MIDDLE DOT
- u'\u201a' # 0xE2 -> SINGLE LOW-9 QUOTATION MARK
- u'\u201e' # 0xE3 -> DOUBLE LOW-9 QUOTATION MARK
- u'\u2030' # 0xE4 -> PER MILLE SIGN
- u'\xc2' # 0xE5 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
- u'\u0107' # 0xE6 -> LATIN SMALL LETTER C WITH ACUTE
- u'\xc1' # 0xE7 -> LATIN CAPITAL LETTER A WITH ACUTE
- u'\u010d' # 0xE8 -> LATIN SMALL LETTER C WITH CARON
- u'\xc8' # 0xE9 -> LATIN CAPITAL LETTER E WITH GRAVE
- u'\xcd' # 0xEA -> LATIN CAPITAL LETTER I WITH ACUTE
- u'\xce' # 0xEB -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
- u'\xcf' # 0xEC -> LATIN CAPITAL LETTER I WITH DIAERESIS
- u'\xcc' # 0xED -> LATIN CAPITAL LETTER I WITH GRAVE
- u'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
- u'\xd4' # 0xEF -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
- u'\u0111' # 0xF0 -> LATIN SMALL LETTER D WITH STROKE
- u'\xd2' # 0xF1 -> LATIN CAPITAL LETTER O WITH GRAVE
- u'\xda' # 0xF2 -> LATIN CAPITAL LETTER U WITH ACUTE
- u'\xdb' # 0xF3 -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
- u'\xd9' # 0xF4 -> LATIN CAPITAL LETTER U WITH GRAVE
- u'\u0131' # 0xF5 -> LATIN SMALL LETTER DOTLESS I
- u'\u02c6' # 0xF6 -> MODIFIER LETTER CIRCUMFLEX ACCENT
- u'\u02dc' # 0xF7 -> SMALL TILDE
- u'\xaf' # 0xF8 -> MACRON
- u'\u03c0' # 0xF9 -> GREEK SMALL LETTER PI
- u'\xcb' # 0xFA -> LATIN CAPITAL LETTER E WITH DIAERESIS
- u'\u02da' # 0xFB -> RING ABOVE
- u'\xb8' # 0xFC -> CEDILLA
- u'\xca' # 0xFD -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
- u'\xe6' # 0xFE -> LATIN SMALL LETTER AE
- u'\u02c7' # 0xFF -> CARON
-)
-
-### Encoding table
-encoding_table=codecs.charmap_build(decoding_table)
diff --git a/sys/lib/python/encodings/mac_cyrillic.py b/sys/lib/python/encodings/mac_cyrillic.py
deleted file mode 100644
index 63324a14b..000000000
--- a/sys/lib/python/encodings/mac_cyrillic.py
+++ /dev/null
@@ -1,307 +0,0 @@
-""" Python Character Mapping Codec mac_cyrillic generated from 'MAPPINGS/VENDORS/APPLE/CYRILLIC.TXT' with gencodec.py.
-
-"""#"
-
-import codecs
-
-### Codec APIs
-
-class Codec(codecs.Codec):
-
- def encode(self,input,errors='strict'):
- return codecs.charmap_encode(input,errors,encoding_table)
-
- def decode(self,input,errors='strict'):
- return codecs.charmap_decode(input,errors,decoding_table)
-
-class IncrementalEncoder(codecs.IncrementalEncoder):
- def encode(self, input, final=False):
- return codecs.charmap_encode(input,self.errors,encoding_table)[0]
-
-class IncrementalDecoder(codecs.IncrementalDecoder):
- def decode(self, input, final=False):
- return codecs.charmap_decode(input,self.errors,decoding_table)[0]
-
-class StreamWriter(Codec,codecs.StreamWriter):
- pass
-
-class StreamReader(Codec,codecs.StreamReader):
- pass
-
-### encodings module API
-
-def getregentry():
- return codecs.CodecInfo(
- name='mac-cyrillic',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
-
-
-### Decoding Table
-
-decoding_table = (
- u'\x00' # 0x00 -> CONTROL CHARACTER
- u'\x01' # 0x01 -> CONTROL CHARACTER
- u'\x02' # 0x02 -> CONTROL CHARACTER
- u'\x03' # 0x03 -> CONTROL CHARACTER
- u'\x04' # 0x04 -> CONTROL CHARACTER
- u'\x05' # 0x05 -> CONTROL CHARACTER
- u'\x06' # 0x06 -> CONTROL CHARACTER
- u'\x07' # 0x07 -> CONTROL CHARACTER
- u'\x08' # 0x08 -> CONTROL CHARACTER
- u'\t' # 0x09 -> CONTROL CHARACTER
- u'\n' # 0x0A -> CONTROL CHARACTER
- u'\x0b' # 0x0B -> CONTROL CHARACTER
- u'\x0c' # 0x0C -> CONTROL CHARACTER
- u'\r' # 0x0D -> CONTROL CHARACTER
- u'\x0e' # 0x0E -> CONTROL CHARACTER
- u'\x0f' # 0x0F -> CONTROL CHARACTER
- u'\x10' # 0x10 -> CONTROL CHARACTER
- u'\x11' # 0x11 -> CONTROL CHARACTER
- u'\x12' # 0x12 -> CONTROL CHARACTER
- u'\x13' # 0x13 -> CONTROL CHARACTER
- u'\x14' # 0x14 -> CONTROL CHARACTER
- u'\x15' # 0x15 -> CONTROL CHARACTER
- u'\x16' # 0x16 -> CONTROL CHARACTER
- u'\x17' # 0x17 -> CONTROL CHARACTER
- u'\x18' # 0x18 -> CONTROL CHARACTER
- u'\x19' # 0x19 -> CONTROL CHARACTER
- u'\x1a' # 0x1A -> CONTROL CHARACTER
- u'\x1b' # 0x1B -> CONTROL CHARACTER
- u'\x1c' # 0x1C -> CONTROL CHARACTER
- u'\x1d' # 0x1D -> CONTROL CHARACTER
- u'\x1e' # 0x1E -> CONTROL CHARACTER
- u'\x1f' # 0x1F -> CONTROL CHARACTER
- u' ' # 0x20 -> SPACE
- u'!' # 0x21 -> EXCLAMATION MARK
- u'"' # 0x22 -> QUOTATION MARK
- u'#' # 0x23 -> NUMBER SIGN
- u'$' # 0x24 -> DOLLAR SIGN
- u'%' # 0x25 -> PERCENT SIGN
- u'&' # 0x26 -> AMPERSAND
- u"'" # 0x27 -> APOSTROPHE
- u'(' # 0x28 -> LEFT PARENTHESIS
- u')' # 0x29 -> RIGHT PARENTHESIS
- u'*' # 0x2A -> ASTERISK
- u'+' # 0x2B -> PLUS SIGN
- u',' # 0x2C -> COMMA
- u'-' # 0x2D -> HYPHEN-MINUS
- u'.' # 0x2E -> FULL STOP
- u'/' # 0x2F -> SOLIDUS
- u'0' # 0x30 -> DIGIT ZERO
- u'1' # 0x31 -> DIGIT ONE
- u'2' # 0x32 -> DIGIT TWO
- u'3' # 0x33 -> DIGIT THREE
- u'4' # 0x34 -> DIGIT FOUR
- u'5' # 0x35 -> DIGIT FIVE
- u'6' # 0x36 -> DIGIT SIX
- u'7' # 0x37 -> DIGIT SEVEN
- u'8' # 0x38 -> DIGIT EIGHT
- u'9' # 0x39 -> DIGIT NINE
- u':' # 0x3A -> COLON
- u';' # 0x3B -> SEMICOLON
- u'<' # 0x3C -> LESS-THAN SIGN
- u'=' # 0x3D -> EQUALS SIGN
- u'>' # 0x3E -> GREATER-THAN SIGN
- u'?' # 0x3F -> QUESTION MARK
- u'@' # 0x40 -> COMMERCIAL AT
- u'A' # 0x41 -> LATIN CAPITAL LETTER A
- u'B' # 0x42 -> LATIN CAPITAL LETTER B
- u'C' # 0x43 -> LATIN CAPITAL LETTER C
- u'D' # 0x44 -> LATIN CAPITAL LETTER D
- u'E' # 0x45 -> LATIN CAPITAL LETTER E
- u'F' # 0x46 -> LATIN CAPITAL LETTER F
- u'G' # 0x47 -> LATIN CAPITAL LETTER G
- u'H' # 0x48 -> LATIN CAPITAL LETTER H
- u'I' # 0x49 -> LATIN CAPITAL LETTER I
- u'J' # 0x4A -> LATIN CAPITAL LETTER J
- u'K' # 0x4B -> LATIN CAPITAL LETTER K
- u'L' # 0x4C -> LATIN CAPITAL LETTER L
- u'M' # 0x4D -> LATIN CAPITAL LETTER M
- u'N' # 0x4E -> LATIN CAPITAL LETTER N
- u'O' # 0x4F -> LATIN CAPITAL LETTER O
- u'P' # 0x50 -> LATIN CAPITAL LETTER P
- u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
- u'R' # 0x52 -> LATIN CAPITAL LETTER R
- u'S' # 0x53 -> LATIN CAPITAL LETTER S
- u'T' # 0x54 -> LATIN CAPITAL LETTER T
- u'U' # 0x55 -> LATIN CAPITAL LETTER U
- u'V' # 0x56 -> LATIN CAPITAL LETTER V
- u'W' # 0x57 -> LATIN CAPITAL LETTER W
- u'X' # 0x58 -> LATIN CAPITAL LETTER X
- u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
- u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
- u'[' # 0x5B -> LEFT SQUARE BRACKET
- u'\\' # 0x5C -> REVERSE SOLIDUS
- u']' # 0x5D -> RIGHT SQUARE BRACKET
- u'^' # 0x5E -> CIRCUMFLEX ACCENT
- u'_' # 0x5F -> LOW LINE
- u'`' # 0x60 -> GRAVE ACCENT
- u'a' # 0x61 -> LATIN SMALL LETTER A
- u'b' # 0x62 -> LATIN SMALL LETTER B
- u'c' # 0x63 -> LATIN SMALL LETTER C
- u'd' # 0x64 -> LATIN SMALL LETTER D
- u'e' # 0x65 -> LATIN SMALL LETTER E
- u'f' # 0x66 -> LATIN SMALL LETTER F
- u'g' # 0x67 -> LATIN SMALL LETTER G
- u'h' # 0x68 -> LATIN SMALL LETTER H
- u'i' # 0x69 -> LATIN SMALL LETTER I
- u'j' # 0x6A -> LATIN SMALL LETTER J
- u'k' # 0x6B -> LATIN SMALL LETTER K
- u'l' # 0x6C -> LATIN SMALL LETTER L
- u'm' # 0x6D -> LATIN SMALL LETTER M
- u'n' # 0x6E -> LATIN SMALL LETTER N
- u'o' # 0x6F -> LATIN SMALL LETTER O
- u'p' # 0x70 -> LATIN SMALL LETTER P
- u'q' # 0x71 -> LATIN SMALL LETTER Q
- u'r' # 0x72 -> LATIN SMALL LETTER R
- u's' # 0x73 -> LATIN SMALL LETTER S
- u't' # 0x74 -> LATIN SMALL LETTER T
- u'u' # 0x75 -> LATIN SMALL LETTER U
- u'v' # 0x76 -> LATIN SMALL LETTER V
- u'w' # 0x77 -> LATIN SMALL LETTER W
- u'x' # 0x78 -> LATIN SMALL LETTER X
- u'y' # 0x79 -> LATIN SMALL LETTER Y
- u'z' # 0x7A -> LATIN SMALL LETTER Z
- u'{' # 0x7B -> LEFT CURLY BRACKET
- u'|' # 0x7C -> VERTICAL LINE
- u'}' # 0x7D -> RIGHT CURLY BRACKET
- u'~' # 0x7E -> TILDE
- u'\x7f' # 0x7F -> CONTROL CHARACTER
- u'\u0410' # 0x80 -> CYRILLIC CAPITAL LETTER A
- u'\u0411' # 0x81 -> CYRILLIC CAPITAL LETTER BE
- u'\u0412' # 0x82 -> CYRILLIC CAPITAL LETTER VE
- u'\u0413' # 0x83 -> CYRILLIC CAPITAL LETTER GHE
- u'\u0414' # 0x84 -> CYRILLIC CAPITAL LETTER DE
- u'\u0415' # 0x85 -> CYRILLIC CAPITAL LETTER IE
- u'\u0416' # 0x86 -> CYRILLIC CAPITAL LETTER ZHE
- u'\u0417' # 0x87 -> CYRILLIC CAPITAL LETTER ZE
- u'\u0418' # 0x88 -> CYRILLIC CAPITAL LETTER I
- u'\u0419' # 0x89 -> CYRILLIC CAPITAL LETTER SHORT I
- u'\u041a' # 0x8A -> CYRILLIC CAPITAL LETTER KA
- u'\u041b' # 0x8B -> CYRILLIC CAPITAL LETTER EL
- u'\u041c' # 0x8C -> CYRILLIC CAPITAL LETTER EM
- u'\u041d' # 0x8D -> CYRILLIC CAPITAL LETTER EN
- u'\u041e' # 0x8E -> CYRILLIC CAPITAL LETTER O
- u'\u041f' # 0x8F -> CYRILLIC CAPITAL LETTER PE
- u'\u0420' # 0x90 -> CYRILLIC CAPITAL LETTER ER
- u'\u0421' # 0x91 -> CYRILLIC CAPITAL LETTER ES
- u'\u0422' # 0x92 -> CYRILLIC CAPITAL LETTER TE
- u'\u0423' # 0x93 -> CYRILLIC CAPITAL LETTER U
- u'\u0424' # 0x94 -> CYRILLIC CAPITAL LETTER EF
- u'\u0425' # 0x95 -> CYRILLIC CAPITAL LETTER HA
- u'\u0426' # 0x96 -> CYRILLIC CAPITAL LETTER TSE
- u'\u0427' # 0x97 -> CYRILLIC CAPITAL LETTER CHE
- u'\u0428' # 0x98 -> CYRILLIC CAPITAL LETTER SHA
- u'\u0429' # 0x99 -> CYRILLIC CAPITAL LETTER SHCHA
- u'\u042a' # 0x9A -> CYRILLIC CAPITAL LETTER HARD SIGN
- u'\u042b' # 0x9B -> CYRILLIC CAPITAL LETTER YERU
- u'\u042c' # 0x9C -> CYRILLIC CAPITAL LETTER SOFT SIGN
- u'\u042d' # 0x9D -> CYRILLIC CAPITAL LETTER E
- u'\u042e' # 0x9E -> CYRILLIC CAPITAL LETTER YU
- u'\u042f' # 0x9F -> CYRILLIC CAPITAL LETTER YA
- u'\u2020' # 0xA0 -> DAGGER
- u'\xb0' # 0xA1 -> DEGREE SIGN
- u'\u0490' # 0xA2 -> CYRILLIC CAPITAL LETTER GHE WITH UPTURN
- u'\xa3' # 0xA3 -> POUND SIGN
- u'\xa7' # 0xA4 -> SECTION SIGN
- u'\u2022' # 0xA5 -> BULLET
- u'\xb6' # 0xA6 -> PILCROW SIGN
- u'\u0406' # 0xA7 -> CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
- u'\xae' # 0xA8 -> REGISTERED SIGN
- u'\xa9' # 0xA9 -> COPYRIGHT SIGN
- u'\u2122' # 0xAA -> TRADE MARK SIGN
- u'\u0402' # 0xAB -> CYRILLIC CAPITAL LETTER DJE
- u'\u0452' # 0xAC -> CYRILLIC SMALL LETTER DJE
- u'\u2260' # 0xAD -> NOT EQUAL TO
- u'\u0403' # 0xAE -> CYRILLIC CAPITAL LETTER GJE
- u'\u0453' # 0xAF -> CYRILLIC SMALL LETTER GJE
- u'\u221e' # 0xB0 -> INFINITY
- u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
- u'\u2264' # 0xB2 -> LESS-THAN OR EQUAL TO
- u'\u2265' # 0xB3 -> GREATER-THAN OR EQUAL TO
- u'\u0456' # 0xB4 -> CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
- u'\xb5' # 0xB5 -> MICRO SIGN
- u'\u0491' # 0xB6 -> CYRILLIC SMALL LETTER GHE WITH UPTURN
- u'\u0408' # 0xB7 -> CYRILLIC CAPITAL LETTER JE
- u'\u0404' # 0xB8 -> CYRILLIC CAPITAL LETTER UKRAINIAN IE
- u'\u0454' # 0xB9 -> CYRILLIC SMALL LETTER UKRAINIAN IE
- u'\u0407' # 0xBA -> CYRILLIC CAPITAL LETTER YI
- u'\u0457' # 0xBB -> CYRILLIC SMALL LETTER YI
- u'\u0409' # 0xBC -> CYRILLIC CAPITAL LETTER LJE
- u'\u0459' # 0xBD -> CYRILLIC SMALL LETTER LJE
- u'\u040a' # 0xBE -> CYRILLIC CAPITAL LETTER NJE
- u'\u045a' # 0xBF -> CYRILLIC SMALL LETTER NJE
- u'\u0458' # 0xC0 -> CYRILLIC SMALL LETTER JE
- u'\u0405' # 0xC1 -> CYRILLIC CAPITAL LETTER DZE
- u'\xac' # 0xC2 -> NOT SIGN
- u'\u221a' # 0xC3 -> SQUARE ROOT
- u'\u0192' # 0xC4 -> LATIN SMALL LETTER F WITH HOOK
- u'\u2248' # 0xC5 -> ALMOST EQUAL TO
- u'\u2206' # 0xC6 -> INCREMENT
- u'\xab' # 0xC7 -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
- u'\xbb' # 0xC8 -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
- u'\u2026' # 0xC9 -> HORIZONTAL ELLIPSIS
- u'\xa0' # 0xCA -> NO-BREAK SPACE
- u'\u040b' # 0xCB -> CYRILLIC CAPITAL LETTER TSHE
- u'\u045b' # 0xCC -> CYRILLIC SMALL LETTER TSHE
- u'\u040c' # 0xCD -> CYRILLIC CAPITAL LETTER KJE
- u'\u045c' # 0xCE -> CYRILLIC SMALL LETTER KJE
- u'\u0455' # 0xCF -> CYRILLIC SMALL LETTER DZE
- u'\u2013' # 0xD0 -> EN DASH
- u'\u2014' # 0xD1 -> EM DASH
- u'\u201c' # 0xD2 -> LEFT DOUBLE QUOTATION MARK
- u'\u201d' # 0xD3 -> RIGHT DOUBLE QUOTATION MARK
- u'\u2018' # 0xD4 -> LEFT SINGLE QUOTATION MARK
- u'\u2019' # 0xD5 -> RIGHT SINGLE QUOTATION MARK
- u'\xf7' # 0xD6 -> DIVISION SIGN
- u'\u201e' # 0xD7 -> DOUBLE LOW-9 QUOTATION MARK
- u'\u040e' # 0xD8 -> CYRILLIC CAPITAL LETTER SHORT U
- u'\u045e' # 0xD9 -> CYRILLIC SMALL LETTER SHORT U
- u'\u040f' # 0xDA -> CYRILLIC CAPITAL LETTER DZHE
- u'\u045f' # 0xDB -> CYRILLIC SMALL LETTER DZHE
- u'\u2116' # 0xDC -> NUMERO SIGN
- u'\u0401' # 0xDD -> CYRILLIC CAPITAL LETTER IO
- u'\u0451' # 0xDE -> CYRILLIC SMALL LETTER IO
- u'\u044f' # 0xDF -> CYRILLIC SMALL LETTER YA
- u'\u0430' # 0xE0 -> CYRILLIC SMALL LETTER A
- u'\u0431' # 0xE1 -> CYRILLIC SMALL LETTER BE
- u'\u0432' # 0xE2 -> CYRILLIC SMALL LETTER VE
- u'\u0433' # 0xE3 -> CYRILLIC SMALL LETTER GHE
- u'\u0434' # 0xE4 -> CYRILLIC SMALL LETTER DE
- u'\u0435' # 0xE5 -> CYRILLIC SMALL LETTER IE
- u'\u0436' # 0xE6 -> CYRILLIC SMALL LETTER ZHE
- u'\u0437' # 0xE7 -> CYRILLIC SMALL LETTER ZE
- u'\u0438' # 0xE8 -> CYRILLIC SMALL LETTER I
- u'\u0439' # 0xE9 -> CYRILLIC SMALL LETTER SHORT I
- u'\u043a' # 0xEA -> CYRILLIC SMALL LETTER KA
- u'\u043b' # 0xEB -> CYRILLIC SMALL LETTER EL
- u'\u043c' # 0xEC -> CYRILLIC SMALL LETTER EM
- u'\u043d' # 0xED -> CYRILLIC SMALL LETTER EN
- u'\u043e' # 0xEE -> CYRILLIC SMALL LETTER O
- u'\u043f' # 0xEF -> CYRILLIC SMALL LETTER PE
- u'\u0440' # 0xF0 -> CYRILLIC SMALL LETTER ER
- u'\u0441' # 0xF1 -> CYRILLIC SMALL LETTER ES
- u'\u0442' # 0xF2 -> CYRILLIC SMALL LETTER TE
- u'\u0443' # 0xF3 -> CYRILLIC SMALL LETTER U
- u'\u0444' # 0xF4 -> CYRILLIC SMALL LETTER EF
- u'\u0445' # 0xF5 -> CYRILLIC SMALL LETTER HA
- u'\u0446' # 0xF6 -> CYRILLIC SMALL LETTER TSE
- u'\u0447' # 0xF7 -> CYRILLIC SMALL LETTER CHE
- u'\u0448' # 0xF8 -> CYRILLIC SMALL LETTER SHA
- u'\u0449' # 0xF9 -> CYRILLIC SMALL LETTER SHCHA
- u'\u044a' # 0xFA -> CYRILLIC SMALL LETTER HARD SIGN
- u'\u044b' # 0xFB -> CYRILLIC SMALL LETTER YERU
- u'\u044c' # 0xFC -> CYRILLIC SMALL LETTER SOFT SIGN
- u'\u044d' # 0xFD -> CYRILLIC SMALL LETTER E
- u'\u044e' # 0xFE -> CYRILLIC SMALL LETTER YU
- u'\u20ac' # 0xFF -> EURO SIGN
-)
-
-### Encoding table
-encoding_table=codecs.charmap_build(decoding_table)
diff --git a/sys/lib/python/encodings/mac_farsi.py b/sys/lib/python/encodings/mac_farsi.py
deleted file mode 100644
index 9dbd76a23..000000000
--- a/sys/lib/python/encodings/mac_farsi.py
+++ /dev/null
@@ -1,307 +0,0 @@
-""" Python Character Mapping Codec mac_farsi generated from 'MAPPINGS/VENDORS/APPLE/FARSI.TXT' with gencodec.py.
-
-"""#"
-
-import codecs
-
-### Codec APIs
-
-class Codec(codecs.Codec):
-
- def encode(self,input,errors='strict'):
- return codecs.charmap_encode(input,errors,encoding_table)
-
- def decode(self,input,errors='strict'):
- return codecs.charmap_decode(input,errors,decoding_table)
-
-class IncrementalEncoder(codecs.IncrementalEncoder):
- def encode(self, input, final=False):
- return codecs.charmap_encode(input,self.errors,encoding_table)[0]
-
-class IncrementalDecoder(codecs.IncrementalDecoder):
- def decode(self, input, final=False):
- return codecs.charmap_decode(input,self.errors,decoding_table)[0]
-
-class StreamWriter(Codec,codecs.StreamWriter):
- pass
-
-class StreamReader(Codec,codecs.StreamReader):
- pass
-
-### encodings module API
-
-def getregentry():
- return codecs.CodecInfo(
- name='mac-farsi',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
-
-
-### Decoding Table
-
-decoding_table = (
- u'\x00' # 0x00 -> CONTROL CHARACTER
- u'\x01' # 0x01 -> CONTROL CHARACTER
- u'\x02' # 0x02 -> CONTROL CHARACTER
- u'\x03' # 0x03 -> CONTROL CHARACTER
- u'\x04' # 0x04 -> CONTROL CHARACTER
- u'\x05' # 0x05 -> CONTROL CHARACTER
- u'\x06' # 0x06 -> CONTROL CHARACTER
- u'\x07' # 0x07 -> CONTROL CHARACTER
- u'\x08' # 0x08 -> CONTROL CHARACTER
- u'\t' # 0x09 -> CONTROL CHARACTER
- u'\n' # 0x0A -> CONTROL CHARACTER
- u'\x0b' # 0x0B -> CONTROL CHARACTER
- u'\x0c' # 0x0C -> CONTROL CHARACTER
- u'\r' # 0x0D -> CONTROL CHARACTER
- u'\x0e' # 0x0E -> CONTROL CHARACTER
- u'\x0f' # 0x0F -> CONTROL CHARACTER
- u'\x10' # 0x10 -> CONTROL CHARACTER
- u'\x11' # 0x11 -> CONTROL CHARACTER
- u'\x12' # 0x12 -> CONTROL CHARACTER
- u'\x13' # 0x13 -> CONTROL CHARACTER
- u'\x14' # 0x14 -> CONTROL CHARACTER
- u'\x15' # 0x15 -> CONTROL CHARACTER
- u'\x16' # 0x16 -> CONTROL CHARACTER
- u'\x17' # 0x17 -> CONTROL CHARACTER
- u'\x18' # 0x18 -> CONTROL CHARACTER
- u'\x19' # 0x19 -> CONTROL CHARACTER
- u'\x1a' # 0x1A -> CONTROL CHARACTER
- u'\x1b' # 0x1B -> CONTROL CHARACTER
- u'\x1c' # 0x1C -> CONTROL CHARACTER
- u'\x1d' # 0x1D -> CONTROL CHARACTER
- u'\x1e' # 0x1E -> CONTROL CHARACTER
- u'\x1f' # 0x1F -> CONTROL CHARACTER
- u' ' # 0x20 -> SPACE, left-right
- u'!' # 0x21 -> EXCLAMATION MARK, left-right
- u'"' # 0x22 -> QUOTATION MARK, left-right
- u'#' # 0x23 -> NUMBER SIGN, left-right
- u'$' # 0x24 -> DOLLAR SIGN, left-right
- u'%' # 0x25 -> PERCENT SIGN, left-right
- u'&' # 0x26 -> AMPERSAND, left-right
- u"'" # 0x27 -> APOSTROPHE, left-right
- u'(' # 0x28 -> LEFT PARENTHESIS, left-right
- u')' # 0x29 -> RIGHT PARENTHESIS, left-right
- u'*' # 0x2A -> ASTERISK, left-right
- u'+' # 0x2B -> PLUS SIGN, left-right
- u',' # 0x2C -> COMMA, left-right; in Arabic-script context, displayed as 0x066C ARABIC THOUSANDS SEPARATOR
- u'-' # 0x2D -> HYPHEN-MINUS, left-right
- u'.' # 0x2E -> FULL STOP, left-right; in Arabic-script context, displayed as 0x066B ARABIC DECIMAL SEPARATOR
- u'/' # 0x2F -> SOLIDUS, left-right
- u'0' # 0x30 -> DIGIT ZERO; in Arabic-script context, displayed as 0x06F0 EXTENDED ARABIC-INDIC DIGIT ZERO
- u'1' # 0x31 -> DIGIT ONE; in Arabic-script context, displayed as 0x06F1 EXTENDED ARABIC-INDIC DIGIT ONE
- u'2' # 0x32 -> DIGIT TWO; in Arabic-script context, displayed as 0x06F2 EXTENDED ARABIC-INDIC DIGIT TWO
- u'3' # 0x33 -> DIGIT THREE; in Arabic-script context, displayed as 0x06F3 EXTENDED ARABIC-INDIC DIGIT THREE
- u'4' # 0x34 -> DIGIT FOUR; in Arabic-script context, displayed as 0x06F4 EXTENDED ARABIC-INDIC DIGIT FOUR
- u'5' # 0x35 -> DIGIT FIVE; in Arabic-script context, displayed as 0x06F5 EXTENDED ARABIC-INDIC DIGIT FIVE
- u'6' # 0x36 -> DIGIT SIX; in Arabic-script context, displayed as 0x06F6 EXTENDED ARABIC-INDIC DIGIT SIX
- u'7' # 0x37 -> DIGIT SEVEN; in Arabic-script context, displayed as 0x06F7 EXTENDED ARABIC-INDIC DIGIT SEVEN
- u'8' # 0x38 -> DIGIT EIGHT; in Arabic-script context, displayed as 0x06F8 EXTENDED ARABIC-INDIC DIGIT EIGHT
- u'9' # 0x39 -> DIGIT NINE; in Arabic-script context, displayed as 0x06F9 EXTENDED ARABIC-INDIC DIGIT NINE
- u':' # 0x3A -> COLON, left-right
- u';' # 0x3B -> SEMICOLON, left-right
- u'<' # 0x3C -> LESS-THAN SIGN, left-right
- u'=' # 0x3D -> EQUALS SIGN, left-right
- u'>' # 0x3E -> GREATER-THAN SIGN, left-right
- u'?' # 0x3F -> QUESTION MARK, left-right
- u'@' # 0x40 -> COMMERCIAL AT
- u'A' # 0x41 -> LATIN CAPITAL LETTER A
- u'B' # 0x42 -> LATIN CAPITAL LETTER B
- u'C' # 0x43 -> LATIN CAPITAL LETTER C
- u'D' # 0x44 -> LATIN CAPITAL LETTER D
- u'E' # 0x45 -> LATIN CAPITAL LETTER E
- u'F' # 0x46 -> LATIN CAPITAL LETTER F
- u'G' # 0x47 -> LATIN CAPITAL LETTER G
- u'H' # 0x48 -> LATIN CAPITAL LETTER H
- u'I' # 0x49 -> LATIN CAPITAL LETTER I
- u'J' # 0x4A -> LATIN CAPITAL LETTER J
- u'K' # 0x4B -> LATIN CAPITAL LETTER K
- u'L' # 0x4C -> LATIN CAPITAL LETTER L
- u'M' # 0x4D -> LATIN CAPITAL LETTER M
- u'N' # 0x4E -> LATIN CAPITAL LETTER N
- u'O' # 0x4F -> LATIN CAPITAL LETTER O
- u'P' # 0x50 -> LATIN CAPITAL LETTER P
- u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
- u'R' # 0x52 -> LATIN CAPITAL LETTER R
- u'S' # 0x53 -> LATIN CAPITAL LETTER S
- u'T' # 0x54 -> LATIN CAPITAL LETTER T
- u'U' # 0x55 -> LATIN CAPITAL LETTER U
- u'V' # 0x56 -> LATIN CAPITAL LETTER V
- u'W' # 0x57 -> LATIN CAPITAL LETTER W
- u'X' # 0x58 -> LATIN CAPITAL LETTER X
- u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
- u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
- u'[' # 0x5B -> LEFT SQUARE BRACKET, left-right
- u'\\' # 0x5C -> REVERSE SOLIDUS, left-right
- u']' # 0x5D -> RIGHT SQUARE BRACKET, left-right
- u'^' # 0x5E -> CIRCUMFLEX ACCENT, left-right
- u'_' # 0x5F -> LOW LINE, left-right
- u'`' # 0x60 -> GRAVE ACCENT
- u'a' # 0x61 -> LATIN SMALL LETTER A
- u'b' # 0x62 -> LATIN SMALL LETTER B
- u'c' # 0x63 -> LATIN SMALL LETTER C
- u'd' # 0x64 -> LATIN SMALL LETTER D
- u'e' # 0x65 -> LATIN SMALL LETTER E
- u'f' # 0x66 -> LATIN SMALL LETTER F
- u'g' # 0x67 -> LATIN SMALL LETTER G
- u'h' # 0x68 -> LATIN SMALL LETTER H
- u'i' # 0x69 -> LATIN SMALL LETTER I
- u'j' # 0x6A -> LATIN SMALL LETTER J
- u'k' # 0x6B -> LATIN SMALL LETTER K
- u'l' # 0x6C -> LATIN SMALL LETTER L
- u'm' # 0x6D -> LATIN SMALL LETTER M
- u'n' # 0x6E -> LATIN SMALL LETTER N
- u'o' # 0x6F -> LATIN SMALL LETTER O
- u'p' # 0x70 -> LATIN SMALL LETTER P
- u'q' # 0x71 -> LATIN SMALL LETTER Q
- u'r' # 0x72 -> LATIN SMALL LETTER R
- u's' # 0x73 -> LATIN SMALL LETTER S
- u't' # 0x74 -> LATIN SMALL LETTER T
- u'u' # 0x75 -> LATIN SMALL LETTER U
- u'v' # 0x76 -> LATIN SMALL LETTER V
- u'w' # 0x77 -> LATIN SMALL LETTER W
- u'x' # 0x78 -> LATIN SMALL LETTER X
- u'y' # 0x79 -> LATIN SMALL LETTER Y
- u'z' # 0x7A -> LATIN SMALL LETTER Z
- u'{' # 0x7B -> LEFT CURLY BRACKET, left-right
- u'|' # 0x7C -> VERTICAL LINE, left-right
- u'}' # 0x7D -> RIGHT CURLY BRACKET, left-right
- u'~' # 0x7E -> TILDE
- u'\x7f' # 0x7F -> CONTROL CHARACTER
- u'\xc4' # 0x80 -> LATIN CAPITAL LETTER A WITH DIAERESIS
- u'\xa0' # 0x81 -> NO-BREAK SPACE, right-left
- u'\xc7' # 0x82 -> LATIN CAPITAL LETTER C WITH CEDILLA
- u'\xc9' # 0x83 -> LATIN CAPITAL LETTER E WITH ACUTE
- u'\xd1' # 0x84 -> LATIN CAPITAL LETTER N WITH TILDE
- u'\xd6' # 0x85 -> LATIN CAPITAL LETTER O WITH DIAERESIS
- u'\xdc' # 0x86 -> LATIN CAPITAL LETTER U WITH DIAERESIS
- u'\xe1' # 0x87 -> LATIN SMALL LETTER A WITH ACUTE
- u'\xe0' # 0x88 -> LATIN SMALL LETTER A WITH GRAVE
- u'\xe2' # 0x89 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
- u'\xe4' # 0x8A -> LATIN SMALL LETTER A WITH DIAERESIS
- u'\u06ba' # 0x8B -> ARABIC LETTER NOON GHUNNA
- u'\xab' # 0x8C -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK, right-left
- u'\xe7' # 0x8D -> LATIN SMALL LETTER C WITH CEDILLA
- u'\xe9' # 0x8E -> LATIN SMALL LETTER E WITH ACUTE
- u'\xe8' # 0x8F -> LATIN SMALL LETTER E WITH GRAVE
- u'\xea' # 0x90 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
- u'\xeb' # 0x91 -> LATIN SMALL LETTER E WITH DIAERESIS
- u'\xed' # 0x92 -> LATIN SMALL LETTER I WITH ACUTE
- u'\u2026' # 0x93 -> HORIZONTAL ELLIPSIS, right-left
- u'\xee' # 0x94 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
- u'\xef' # 0x95 -> LATIN SMALL LETTER I WITH DIAERESIS
- u'\xf1' # 0x96 -> LATIN SMALL LETTER N WITH TILDE
- u'\xf3' # 0x97 -> LATIN SMALL LETTER O WITH ACUTE
- u'\xbb' # 0x98 -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK, right-left
- u'\xf4' # 0x99 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
- u'\xf6' # 0x9A -> LATIN SMALL LETTER O WITH DIAERESIS
- u'\xf7' # 0x9B -> DIVISION SIGN, right-left
- u'\xfa' # 0x9C -> LATIN SMALL LETTER U WITH ACUTE
- u'\xf9' # 0x9D -> LATIN SMALL LETTER U WITH GRAVE
- u'\xfb' # 0x9E -> LATIN SMALL LETTER U WITH CIRCUMFLEX
- u'\xfc' # 0x9F -> LATIN SMALL LETTER U WITH DIAERESIS
- u' ' # 0xA0 -> SPACE, right-left
- u'!' # 0xA1 -> EXCLAMATION MARK, right-left
- u'"' # 0xA2 -> QUOTATION MARK, right-left
- u'#' # 0xA3 -> NUMBER SIGN, right-left
- u'$' # 0xA4 -> DOLLAR SIGN, right-left
- u'\u066a' # 0xA5 -> ARABIC PERCENT SIGN
- u'&' # 0xA6 -> AMPERSAND, right-left
- u"'" # 0xA7 -> APOSTROPHE, right-left
- u'(' # 0xA8 -> LEFT PARENTHESIS, right-left
- u')' # 0xA9 -> RIGHT PARENTHESIS, right-left
- u'*' # 0xAA -> ASTERISK, right-left
- u'+' # 0xAB -> PLUS SIGN, right-left
- u'\u060c' # 0xAC -> ARABIC COMMA
- u'-' # 0xAD -> HYPHEN-MINUS, right-left
- u'.' # 0xAE -> FULL STOP, right-left
- u'/' # 0xAF -> SOLIDUS, right-left
- u'\u06f0' # 0xB0 -> EXTENDED ARABIC-INDIC DIGIT ZERO, right-left (need override)
- u'\u06f1' # 0xB1 -> EXTENDED ARABIC-INDIC DIGIT ONE, right-left (need override)
- u'\u06f2' # 0xB2 -> EXTENDED ARABIC-INDIC DIGIT TWO, right-left (need override)
- u'\u06f3' # 0xB3 -> EXTENDED ARABIC-INDIC DIGIT THREE, right-left (need override)
- u'\u06f4' # 0xB4 -> EXTENDED ARABIC-INDIC DIGIT FOUR, right-left (need override)
- u'\u06f5' # 0xB5 -> EXTENDED ARABIC-INDIC DIGIT FIVE, right-left (need override)
- u'\u06f6' # 0xB6 -> EXTENDED ARABIC-INDIC DIGIT SIX, right-left (need override)
- u'\u06f7' # 0xB7 -> EXTENDED ARABIC-INDIC DIGIT SEVEN, right-left (need override)
- u'\u06f8' # 0xB8 -> EXTENDED ARABIC-INDIC DIGIT EIGHT, right-left (need override)
- u'\u06f9' # 0xB9 -> EXTENDED ARABIC-INDIC DIGIT NINE, right-left (need override)
- u':' # 0xBA -> COLON, right-left
- u'\u061b' # 0xBB -> ARABIC SEMICOLON
- u'<' # 0xBC -> LESS-THAN SIGN, right-left
- u'=' # 0xBD -> EQUALS SIGN, right-left
- u'>' # 0xBE -> GREATER-THAN SIGN, right-left
- u'\u061f' # 0xBF -> ARABIC QUESTION MARK
- u'\u274a' # 0xC0 -> EIGHT TEARDROP-SPOKED PROPELLER ASTERISK, right-left
- u'\u0621' # 0xC1 -> ARABIC LETTER HAMZA
- u'\u0622' # 0xC2 -> ARABIC LETTER ALEF WITH MADDA ABOVE
- u'\u0623' # 0xC3 -> ARABIC LETTER ALEF WITH HAMZA ABOVE
- u'\u0624' # 0xC4 -> ARABIC LETTER WAW WITH HAMZA ABOVE
- u'\u0625' # 0xC5 -> ARABIC LETTER ALEF WITH HAMZA BELOW
- u'\u0626' # 0xC6 -> ARABIC LETTER YEH WITH HAMZA ABOVE
- u'\u0627' # 0xC7 -> ARABIC LETTER ALEF
- u'\u0628' # 0xC8 -> ARABIC LETTER BEH
- u'\u0629' # 0xC9 -> ARABIC LETTER TEH MARBUTA
- u'\u062a' # 0xCA -> ARABIC LETTER TEH
- u'\u062b' # 0xCB -> ARABIC LETTER THEH
- u'\u062c' # 0xCC -> ARABIC LETTER JEEM
- u'\u062d' # 0xCD -> ARABIC LETTER HAH
- u'\u062e' # 0xCE -> ARABIC LETTER KHAH
- u'\u062f' # 0xCF -> ARABIC LETTER DAL
- u'\u0630' # 0xD0 -> ARABIC LETTER THAL
- u'\u0631' # 0xD1 -> ARABIC LETTER REH
- u'\u0632' # 0xD2 -> ARABIC LETTER ZAIN
- u'\u0633' # 0xD3 -> ARABIC LETTER SEEN
- u'\u0634' # 0xD4 -> ARABIC LETTER SHEEN
- u'\u0635' # 0xD5 -> ARABIC LETTER SAD
- u'\u0636' # 0xD6 -> ARABIC LETTER DAD
- u'\u0637' # 0xD7 -> ARABIC LETTER TAH
- u'\u0638' # 0xD8 -> ARABIC LETTER ZAH
- u'\u0639' # 0xD9 -> ARABIC LETTER AIN
- u'\u063a' # 0xDA -> ARABIC LETTER GHAIN
- u'[' # 0xDB -> LEFT SQUARE BRACKET, right-left
- u'\\' # 0xDC -> REVERSE SOLIDUS, right-left
- u']' # 0xDD -> RIGHT SQUARE BRACKET, right-left
- u'^' # 0xDE -> CIRCUMFLEX ACCENT, right-left
- u'_' # 0xDF -> LOW LINE, right-left
- u'\u0640' # 0xE0 -> ARABIC TATWEEL
- u'\u0641' # 0xE1 -> ARABIC LETTER FEH
- u'\u0642' # 0xE2 -> ARABIC LETTER QAF
- u'\u0643' # 0xE3 -> ARABIC LETTER KAF
- u'\u0644' # 0xE4 -> ARABIC LETTER LAM
- u'\u0645' # 0xE5 -> ARABIC LETTER MEEM
- u'\u0646' # 0xE6 -> ARABIC LETTER NOON
- u'\u0647' # 0xE7 -> ARABIC LETTER HEH
- u'\u0648' # 0xE8 -> ARABIC LETTER WAW
- u'\u0649' # 0xE9 -> ARABIC LETTER ALEF MAKSURA
- u'\u064a' # 0xEA -> ARABIC LETTER YEH
- u'\u064b' # 0xEB -> ARABIC FATHATAN
- u'\u064c' # 0xEC -> ARABIC DAMMATAN
- u'\u064d' # 0xED -> ARABIC KASRATAN
- u'\u064e' # 0xEE -> ARABIC FATHA
- u'\u064f' # 0xEF -> ARABIC DAMMA
- u'\u0650' # 0xF0 -> ARABIC KASRA
- u'\u0651' # 0xF1 -> ARABIC SHADDA
- u'\u0652' # 0xF2 -> ARABIC SUKUN
- u'\u067e' # 0xF3 -> ARABIC LETTER PEH
- u'\u0679' # 0xF4 -> ARABIC LETTER TTEH
- u'\u0686' # 0xF5 -> ARABIC LETTER TCHEH
- u'\u06d5' # 0xF6 -> ARABIC LETTER AE
- u'\u06a4' # 0xF7 -> ARABIC LETTER VEH
- u'\u06af' # 0xF8 -> ARABIC LETTER GAF
- u'\u0688' # 0xF9 -> ARABIC LETTER DDAL
- u'\u0691' # 0xFA -> ARABIC LETTER RREH
- u'{' # 0xFB -> LEFT CURLY BRACKET, right-left
- u'|' # 0xFC -> VERTICAL LINE, right-left
- u'}' # 0xFD -> RIGHT CURLY BRACKET, right-left
- u'\u0698' # 0xFE -> ARABIC LETTER JEH
- u'\u06d2' # 0xFF -> ARABIC LETTER YEH BARREE
-)
-
-### Encoding table
-encoding_table=codecs.charmap_build(decoding_table)
diff --git a/sys/lib/python/encodings/mac_greek.py b/sys/lib/python/encodings/mac_greek.py
deleted file mode 100644
index 68f4fff0d..000000000
--- a/sys/lib/python/encodings/mac_greek.py
+++ /dev/null
@@ -1,307 +0,0 @@
-""" Python Character Mapping Codec mac_greek generated from 'MAPPINGS/VENDORS/APPLE/GREEK.TXT' with gencodec.py.
-
-"""#"
-
-import codecs
-
-### Codec APIs
-
-class Codec(codecs.Codec):
-
- def encode(self,input,errors='strict'):
- return codecs.charmap_encode(input,errors,encoding_table)
-
- def decode(self,input,errors='strict'):
- return codecs.charmap_decode(input,errors,decoding_table)
-
-class IncrementalEncoder(codecs.IncrementalEncoder):
- def encode(self, input, final=False):
- return codecs.charmap_encode(input,self.errors,encoding_table)[0]
-
-class IncrementalDecoder(codecs.IncrementalDecoder):
- def decode(self, input, final=False):
- return codecs.charmap_decode(input,self.errors,decoding_table)[0]
-
-class StreamWriter(Codec,codecs.StreamWriter):
- pass
-
-class StreamReader(Codec,codecs.StreamReader):
- pass
-
-### encodings module API
-
-def getregentry():
- return codecs.CodecInfo(
- name='mac-greek',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
-
-
-### Decoding Table
-
-decoding_table = (
- u'\x00' # 0x00 -> CONTROL CHARACTER
- u'\x01' # 0x01 -> CONTROL CHARACTER
- u'\x02' # 0x02 -> CONTROL CHARACTER
- u'\x03' # 0x03 -> CONTROL CHARACTER
- u'\x04' # 0x04 -> CONTROL CHARACTER
- u'\x05' # 0x05 -> CONTROL CHARACTER
- u'\x06' # 0x06 -> CONTROL CHARACTER
- u'\x07' # 0x07 -> CONTROL CHARACTER
- u'\x08' # 0x08 -> CONTROL CHARACTER
- u'\t' # 0x09 -> CONTROL CHARACTER
- u'\n' # 0x0A -> CONTROL CHARACTER
- u'\x0b' # 0x0B -> CONTROL CHARACTER
- u'\x0c' # 0x0C -> CONTROL CHARACTER
- u'\r' # 0x0D -> CONTROL CHARACTER
- u'\x0e' # 0x0E -> CONTROL CHARACTER
- u'\x0f' # 0x0F -> CONTROL CHARACTER
- u'\x10' # 0x10 -> CONTROL CHARACTER
- u'\x11' # 0x11 -> CONTROL CHARACTER
- u'\x12' # 0x12 -> CONTROL CHARACTER
- u'\x13' # 0x13 -> CONTROL CHARACTER
- u'\x14' # 0x14 -> CONTROL CHARACTER
- u'\x15' # 0x15 -> CONTROL CHARACTER
- u'\x16' # 0x16 -> CONTROL CHARACTER
- u'\x17' # 0x17 -> CONTROL CHARACTER
- u'\x18' # 0x18 -> CONTROL CHARACTER
- u'\x19' # 0x19 -> CONTROL CHARACTER
- u'\x1a' # 0x1A -> CONTROL CHARACTER
- u'\x1b' # 0x1B -> CONTROL CHARACTER
- u'\x1c' # 0x1C -> CONTROL CHARACTER
- u'\x1d' # 0x1D -> CONTROL CHARACTER
- u'\x1e' # 0x1E -> CONTROL CHARACTER
- u'\x1f' # 0x1F -> CONTROL CHARACTER
- u' ' # 0x20 -> SPACE
- u'!' # 0x21 -> EXCLAMATION MARK
- u'"' # 0x22 -> QUOTATION MARK
- u'#' # 0x23 -> NUMBER SIGN
- u'$' # 0x24 -> DOLLAR SIGN
- u'%' # 0x25 -> PERCENT SIGN
- u'&' # 0x26 -> AMPERSAND
- u"'" # 0x27 -> APOSTROPHE
- u'(' # 0x28 -> LEFT PARENTHESIS
- u')' # 0x29 -> RIGHT PARENTHESIS
- u'*' # 0x2A -> ASTERISK
- u'+' # 0x2B -> PLUS SIGN
- u',' # 0x2C -> COMMA
- u'-' # 0x2D -> HYPHEN-MINUS
- u'.' # 0x2E -> FULL STOP
- u'/' # 0x2F -> SOLIDUS
- u'0' # 0x30 -> DIGIT ZERO
- u'1' # 0x31 -> DIGIT ONE
- u'2' # 0x32 -> DIGIT TWO
- u'3' # 0x33 -> DIGIT THREE
- u'4' # 0x34 -> DIGIT FOUR
- u'5' # 0x35 -> DIGIT FIVE
- u'6' # 0x36 -> DIGIT SIX
- u'7' # 0x37 -> DIGIT SEVEN
- u'8' # 0x38 -> DIGIT EIGHT
- u'9' # 0x39 -> DIGIT NINE
- u':' # 0x3A -> COLON
- u';' # 0x3B -> SEMICOLON
- u'<' # 0x3C -> LESS-THAN SIGN
- u'=' # 0x3D -> EQUALS SIGN
- u'>' # 0x3E -> GREATER-THAN SIGN
- u'?' # 0x3F -> QUESTION MARK
- u'@' # 0x40 -> COMMERCIAL AT
- u'A' # 0x41 -> LATIN CAPITAL LETTER A
- u'B' # 0x42 -> LATIN CAPITAL LETTER B
- u'C' # 0x43 -> LATIN CAPITAL LETTER C
- u'D' # 0x44 -> LATIN CAPITAL LETTER D
- u'E' # 0x45 -> LATIN CAPITAL LETTER E
- u'F' # 0x46 -> LATIN CAPITAL LETTER F
- u'G' # 0x47 -> LATIN CAPITAL LETTER G
- u'H' # 0x48 -> LATIN CAPITAL LETTER H
- u'I' # 0x49 -> LATIN CAPITAL LETTER I
- u'J' # 0x4A -> LATIN CAPITAL LETTER J
- u'K' # 0x4B -> LATIN CAPITAL LETTER K
- u'L' # 0x4C -> LATIN CAPITAL LETTER L
- u'M' # 0x4D -> LATIN CAPITAL LETTER M
- u'N' # 0x4E -> LATIN CAPITAL LETTER N
- u'O' # 0x4F -> LATIN CAPITAL LETTER O
- u'P' # 0x50 -> LATIN CAPITAL LETTER P
- u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
- u'R' # 0x52 -> LATIN CAPITAL LETTER R
- u'S' # 0x53 -> LATIN CAPITAL LETTER S
- u'T' # 0x54 -> LATIN CAPITAL LETTER T
- u'U' # 0x55 -> LATIN CAPITAL LETTER U
- u'V' # 0x56 -> LATIN CAPITAL LETTER V
- u'W' # 0x57 -> LATIN CAPITAL LETTER W
- u'X' # 0x58 -> LATIN CAPITAL LETTER X
- u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
- u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
- u'[' # 0x5B -> LEFT SQUARE BRACKET
- u'\\' # 0x5C -> REVERSE SOLIDUS
- u']' # 0x5D -> RIGHT SQUARE BRACKET
- u'^' # 0x5E -> CIRCUMFLEX ACCENT
- u'_' # 0x5F -> LOW LINE
- u'`' # 0x60 -> GRAVE ACCENT
- u'a' # 0x61 -> LATIN SMALL LETTER A
- u'b' # 0x62 -> LATIN SMALL LETTER B
- u'c' # 0x63 -> LATIN SMALL LETTER C
- u'd' # 0x64 -> LATIN SMALL LETTER D
- u'e' # 0x65 -> LATIN SMALL LETTER E
- u'f' # 0x66 -> LATIN SMALL LETTER F
- u'g' # 0x67 -> LATIN SMALL LETTER G
- u'h' # 0x68 -> LATIN SMALL LETTER H
- u'i' # 0x69 -> LATIN SMALL LETTER I
- u'j' # 0x6A -> LATIN SMALL LETTER J
- u'k' # 0x6B -> LATIN SMALL LETTER K
- u'l' # 0x6C -> LATIN SMALL LETTER L
- u'm' # 0x6D -> LATIN SMALL LETTER M
- u'n' # 0x6E -> LATIN SMALL LETTER N
- u'o' # 0x6F -> LATIN SMALL LETTER O
- u'p' # 0x70 -> LATIN SMALL LETTER P
- u'q' # 0x71 -> LATIN SMALL LETTER Q
- u'r' # 0x72 -> LATIN SMALL LETTER R
- u's' # 0x73 -> LATIN SMALL LETTER S
- u't' # 0x74 -> LATIN SMALL LETTER T
- u'u' # 0x75 -> LATIN SMALL LETTER U
- u'v' # 0x76 -> LATIN SMALL LETTER V
- u'w' # 0x77 -> LATIN SMALL LETTER W
- u'x' # 0x78 -> LATIN SMALL LETTER X
- u'y' # 0x79 -> LATIN SMALL LETTER Y
- u'z' # 0x7A -> LATIN SMALL LETTER Z
- u'{' # 0x7B -> LEFT CURLY BRACKET
- u'|' # 0x7C -> VERTICAL LINE
- u'}' # 0x7D -> RIGHT CURLY BRACKET
- u'~' # 0x7E -> TILDE
- u'\x7f' # 0x7F -> CONTROL CHARACTER
- u'\xc4' # 0x80 -> LATIN CAPITAL LETTER A WITH DIAERESIS
- u'\xb9' # 0x81 -> SUPERSCRIPT ONE
- u'\xb2' # 0x82 -> SUPERSCRIPT TWO
- u'\xc9' # 0x83 -> LATIN CAPITAL LETTER E WITH ACUTE
- u'\xb3' # 0x84 -> SUPERSCRIPT THREE
- u'\xd6' # 0x85 -> LATIN CAPITAL LETTER O WITH DIAERESIS
- u'\xdc' # 0x86 -> LATIN CAPITAL LETTER U WITH DIAERESIS
- u'\u0385' # 0x87 -> GREEK DIALYTIKA TONOS
- u'\xe0' # 0x88 -> LATIN SMALL LETTER A WITH GRAVE
- u'\xe2' # 0x89 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
- u'\xe4' # 0x8A -> LATIN SMALL LETTER A WITH DIAERESIS
- u'\u0384' # 0x8B -> GREEK TONOS
- u'\xa8' # 0x8C -> DIAERESIS
- u'\xe7' # 0x8D -> LATIN SMALL LETTER C WITH CEDILLA
- u'\xe9' # 0x8E -> LATIN SMALL LETTER E WITH ACUTE
- u'\xe8' # 0x8F -> LATIN SMALL LETTER E WITH GRAVE
- u'\xea' # 0x90 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
- u'\xeb' # 0x91 -> LATIN SMALL LETTER E WITH DIAERESIS
- u'\xa3' # 0x92 -> POUND SIGN
- u'\u2122' # 0x93 -> TRADE MARK SIGN
- u'\xee' # 0x94 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
- u'\xef' # 0x95 -> LATIN SMALL LETTER I WITH DIAERESIS
- u'\u2022' # 0x96 -> BULLET
- u'\xbd' # 0x97 -> VULGAR FRACTION ONE HALF
- u'\u2030' # 0x98 -> PER MILLE SIGN
- u'\xf4' # 0x99 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
- u'\xf6' # 0x9A -> LATIN SMALL LETTER O WITH DIAERESIS
- u'\xa6' # 0x9B -> BROKEN BAR
- u'\u20ac' # 0x9C -> EURO SIGN # before Mac OS 9.2.2, was SOFT HYPHEN
- u'\xf9' # 0x9D -> LATIN SMALL LETTER U WITH GRAVE
- u'\xfb' # 0x9E -> LATIN SMALL LETTER U WITH CIRCUMFLEX
- u'\xfc' # 0x9F -> LATIN SMALL LETTER U WITH DIAERESIS
- u'\u2020' # 0xA0 -> DAGGER
- u'\u0393' # 0xA1 -> GREEK CAPITAL LETTER GAMMA
- u'\u0394' # 0xA2 -> GREEK CAPITAL LETTER DELTA
- u'\u0398' # 0xA3 -> GREEK CAPITAL LETTER THETA
- u'\u039b' # 0xA4 -> GREEK CAPITAL LETTER LAMDA
- u'\u039e' # 0xA5 -> GREEK CAPITAL LETTER XI
- u'\u03a0' # 0xA6 -> GREEK CAPITAL LETTER PI
- u'\xdf' # 0xA7 -> LATIN SMALL LETTER SHARP S
- u'\xae' # 0xA8 -> REGISTERED SIGN
- u'\xa9' # 0xA9 -> COPYRIGHT SIGN
- u'\u03a3' # 0xAA -> GREEK CAPITAL LETTER SIGMA
- u'\u03aa' # 0xAB -> GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
- u'\xa7' # 0xAC -> SECTION SIGN
- u'\u2260' # 0xAD -> NOT EQUAL TO
- u'\xb0' # 0xAE -> DEGREE SIGN
- u'\xb7' # 0xAF -> MIDDLE DOT
- u'\u0391' # 0xB0 -> GREEK CAPITAL LETTER ALPHA
- u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
- u'\u2264' # 0xB2 -> LESS-THAN OR EQUAL TO
- u'\u2265' # 0xB3 -> GREATER-THAN OR EQUAL TO
- u'\xa5' # 0xB4 -> YEN SIGN
- u'\u0392' # 0xB5 -> GREEK CAPITAL LETTER BETA
- u'\u0395' # 0xB6 -> GREEK CAPITAL LETTER EPSILON
- u'\u0396' # 0xB7 -> GREEK CAPITAL LETTER ZETA
- u'\u0397' # 0xB8 -> GREEK CAPITAL LETTER ETA
- u'\u0399' # 0xB9 -> GREEK CAPITAL LETTER IOTA
- u'\u039a' # 0xBA -> GREEK CAPITAL LETTER KAPPA
- u'\u039c' # 0xBB -> GREEK CAPITAL LETTER MU
- u'\u03a6' # 0xBC -> GREEK CAPITAL LETTER PHI
- u'\u03ab' # 0xBD -> GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
- u'\u03a8' # 0xBE -> GREEK CAPITAL LETTER PSI
- u'\u03a9' # 0xBF -> GREEK CAPITAL LETTER OMEGA
- u'\u03ac' # 0xC0 -> GREEK SMALL LETTER ALPHA WITH TONOS
- u'\u039d' # 0xC1 -> GREEK CAPITAL LETTER NU
- u'\xac' # 0xC2 -> NOT SIGN
- u'\u039f' # 0xC3 -> GREEK CAPITAL LETTER OMICRON
- u'\u03a1' # 0xC4 -> GREEK CAPITAL LETTER RHO
- u'\u2248' # 0xC5 -> ALMOST EQUAL TO
- u'\u03a4' # 0xC6 -> GREEK CAPITAL LETTER TAU
- u'\xab' # 0xC7 -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
- u'\xbb' # 0xC8 -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
- u'\u2026' # 0xC9 -> HORIZONTAL ELLIPSIS
- u'\xa0' # 0xCA -> NO-BREAK SPACE
- u'\u03a5' # 0xCB -> GREEK CAPITAL LETTER UPSILON
- u'\u03a7' # 0xCC -> GREEK CAPITAL LETTER CHI
- u'\u0386' # 0xCD -> GREEK CAPITAL LETTER ALPHA WITH TONOS
- u'\u0388' # 0xCE -> GREEK CAPITAL LETTER EPSILON WITH TONOS
- u'\u0153' # 0xCF -> LATIN SMALL LIGATURE OE
- u'\u2013' # 0xD0 -> EN DASH
- u'\u2015' # 0xD1 -> HORIZONTAL BAR
- u'\u201c' # 0xD2 -> LEFT DOUBLE QUOTATION MARK
- u'\u201d' # 0xD3 -> RIGHT DOUBLE QUOTATION MARK
- u'\u2018' # 0xD4 -> LEFT SINGLE QUOTATION MARK
- u'\u2019' # 0xD5 -> RIGHT SINGLE QUOTATION MARK
- u'\xf7' # 0xD6 -> DIVISION SIGN
- u'\u0389' # 0xD7 -> GREEK CAPITAL LETTER ETA WITH TONOS
- u'\u038a' # 0xD8 -> GREEK CAPITAL LETTER IOTA WITH TONOS
- u'\u038c' # 0xD9 -> GREEK CAPITAL LETTER OMICRON WITH TONOS
- u'\u038e' # 0xDA -> GREEK CAPITAL LETTER UPSILON WITH TONOS
- u'\u03ad' # 0xDB -> GREEK SMALL LETTER EPSILON WITH TONOS
- u'\u03ae' # 0xDC -> GREEK SMALL LETTER ETA WITH TONOS
- u'\u03af' # 0xDD -> GREEK SMALL LETTER IOTA WITH TONOS
- u'\u03cc' # 0xDE -> GREEK SMALL LETTER OMICRON WITH TONOS
- u'\u038f' # 0xDF -> GREEK CAPITAL LETTER OMEGA WITH TONOS
- u'\u03cd' # 0xE0 -> GREEK SMALL LETTER UPSILON WITH TONOS
- u'\u03b1' # 0xE1 -> GREEK SMALL LETTER ALPHA
- u'\u03b2' # 0xE2 -> GREEK SMALL LETTER BETA
- u'\u03c8' # 0xE3 -> GREEK SMALL LETTER PSI
- u'\u03b4' # 0xE4 -> GREEK SMALL LETTER DELTA
- u'\u03b5' # 0xE5 -> GREEK SMALL LETTER EPSILON
- u'\u03c6' # 0xE6 -> GREEK SMALL LETTER PHI
- u'\u03b3' # 0xE7 -> GREEK SMALL LETTER GAMMA
- u'\u03b7' # 0xE8 -> GREEK SMALL LETTER ETA
- u'\u03b9' # 0xE9 -> GREEK SMALL LETTER IOTA
- u'\u03be' # 0xEA -> GREEK SMALL LETTER XI
- u'\u03ba' # 0xEB -> GREEK SMALL LETTER KAPPA
- u'\u03bb' # 0xEC -> GREEK SMALL LETTER LAMDA
- u'\u03bc' # 0xED -> GREEK SMALL LETTER MU
- u'\u03bd' # 0xEE -> GREEK SMALL LETTER NU
- u'\u03bf' # 0xEF -> GREEK SMALL LETTER OMICRON
- u'\u03c0' # 0xF0 -> GREEK SMALL LETTER PI
- u'\u03ce' # 0xF1 -> GREEK SMALL LETTER OMEGA WITH TONOS
- u'\u03c1' # 0xF2 -> GREEK SMALL LETTER RHO
- u'\u03c3' # 0xF3 -> GREEK SMALL LETTER SIGMA
- u'\u03c4' # 0xF4 -> GREEK SMALL LETTER TAU
- u'\u03b8' # 0xF5 -> GREEK SMALL LETTER THETA
- u'\u03c9' # 0xF6 -> GREEK SMALL LETTER OMEGA
- u'\u03c2' # 0xF7 -> GREEK SMALL LETTER FINAL SIGMA
- u'\u03c7' # 0xF8 -> GREEK SMALL LETTER CHI
- u'\u03c5' # 0xF9 -> GREEK SMALL LETTER UPSILON
- u'\u03b6' # 0xFA -> GREEK SMALL LETTER ZETA
- u'\u03ca' # 0xFB -> GREEK SMALL LETTER IOTA WITH DIALYTIKA
- u'\u03cb' # 0xFC -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA
- u'\u0390' # 0xFD -> GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS
- u'\u03b0' # 0xFE -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS
- u'\xad' # 0xFF -> SOFT HYPHEN # before Mac OS 9.2.2, was undefined
-)
-
-### Encoding table
-encoding_table=codecs.charmap_build(decoding_table)
diff --git a/sys/lib/python/encodings/mac_iceland.py b/sys/lib/python/encodings/mac_iceland.py
deleted file mode 100644
index c24add2ad..000000000
--- a/sys/lib/python/encodings/mac_iceland.py
+++ /dev/null
@@ -1,307 +0,0 @@
-""" Python Character Mapping Codec mac_iceland generated from 'MAPPINGS/VENDORS/APPLE/ICELAND.TXT' with gencodec.py.
-
-"""#"
-
-import codecs
-
-### Codec APIs
-
-class Codec(codecs.Codec):
-
- def encode(self,input,errors='strict'):
- return codecs.charmap_encode(input,errors,encoding_table)
-
- def decode(self,input,errors='strict'):
- return codecs.charmap_decode(input,errors,decoding_table)
-
-class IncrementalEncoder(codecs.IncrementalEncoder):
- def encode(self, input, final=False):
- return codecs.charmap_encode(input,self.errors,encoding_table)[0]
-
-class IncrementalDecoder(codecs.IncrementalDecoder):
- def decode(self, input, final=False):
- return codecs.charmap_decode(input,self.errors,decoding_table)[0]
-
-class StreamWriter(Codec,codecs.StreamWriter):
- pass
-
-class StreamReader(Codec,codecs.StreamReader):
- pass
-
-### encodings module API
-
-def getregentry():
- return codecs.CodecInfo(
- name='mac-iceland',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
-
-
-### Decoding Table
-
-decoding_table = (
- u'\x00' # 0x00 -> CONTROL CHARACTER
- u'\x01' # 0x01 -> CONTROL CHARACTER
- u'\x02' # 0x02 -> CONTROL CHARACTER
- u'\x03' # 0x03 -> CONTROL CHARACTER
- u'\x04' # 0x04 -> CONTROL CHARACTER
- u'\x05' # 0x05 -> CONTROL CHARACTER
- u'\x06' # 0x06 -> CONTROL CHARACTER
- u'\x07' # 0x07 -> CONTROL CHARACTER
- u'\x08' # 0x08 -> CONTROL CHARACTER
- u'\t' # 0x09 -> CONTROL CHARACTER
- u'\n' # 0x0A -> CONTROL CHARACTER
- u'\x0b' # 0x0B -> CONTROL CHARACTER
- u'\x0c' # 0x0C -> CONTROL CHARACTER
- u'\r' # 0x0D -> CONTROL CHARACTER
- u'\x0e' # 0x0E -> CONTROL CHARACTER
- u'\x0f' # 0x0F -> CONTROL CHARACTER
- u'\x10' # 0x10 -> CONTROL CHARACTER
- u'\x11' # 0x11 -> CONTROL CHARACTER
- u'\x12' # 0x12 -> CONTROL CHARACTER
- u'\x13' # 0x13 -> CONTROL CHARACTER
- u'\x14' # 0x14 -> CONTROL CHARACTER
- u'\x15' # 0x15 -> CONTROL CHARACTER
- u'\x16' # 0x16 -> CONTROL CHARACTER
- u'\x17' # 0x17 -> CONTROL CHARACTER
- u'\x18' # 0x18 -> CONTROL CHARACTER
- u'\x19' # 0x19 -> CONTROL CHARACTER
- u'\x1a' # 0x1A -> CONTROL CHARACTER
- u'\x1b' # 0x1B -> CONTROL CHARACTER
- u'\x1c' # 0x1C -> CONTROL CHARACTER
- u'\x1d' # 0x1D -> CONTROL CHARACTER
- u'\x1e' # 0x1E -> CONTROL CHARACTER
- u'\x1f' # 0x1F -> CONTROL CHARACTER
- u' ' # 0x20 -> SPACE
- u'!' # 0x21 -> EXCLAMATION MARK
- u'"' # 0x22 -> QUOTATION MARK
- u'#' # 0x23 -> NUMBER SIGN
- u'$' # 0x24 -> DOLLAR SIGN
- u'%' # 0x25 -> PERCENT SIGN
- u'&' # 0x26 -> AMPERSAND
- u"'" # 0x27 -> APOSTROPHE
- u'(' # 0x28 -> LEFT PARENTHESIS
- u')' # 0x29 -> RIGHT PARENTHESIS
- u'*' # 0x2A -> ASTERISK
- u'+' # 0x2B -> PLUS SIGN
- u',' # 0x2C -> COMMA
- u'-' # 0x2D -> HYPHEN-MINUS
- u'.' # 0x2E -> FULL STOP
- u'/' # 0x2F -> SOLIDUS
- u'0' # 0x30 -> DIGIT ZERO
- u'1' # 0x31 -> DIGIT ONE
- u'2' # 0x32 -> DIGIT TWO
- u'3' # 0x33 -> DIGIT THREE
- u'4' # 0x34 -> DIGIT FOUR
- u'5' # 0x35 -> DIGIT FIVE
- u'6' # 0x36 -> DIGIT SIX
- u'7' # 0x37 -> DIGIT SEVEN
- u'8' # 0x38 -> DIGIT EIGHT
- u'9' # 0x39 -> DIGIT NINE
- u':' # 0x3A -> COLON
- u';' # 0x3B -> SEMICOLON
- u'<' # 0x3C -> LESS-THAN SIGN
- u'=' # 0x3D -> EQUALS SIGN
- u'>' # 0x3E -> GREATER-THAN SIGN
- u'?' # 0x3F -> QUESTION MARK
- u'@' # 0x40 -> COMMERCIAL AT
- u'A' # 0x41 -> LATIN CAPITAL LETTER A
- u'B' # 0x42 -> LATIN CAPITAL LETTER B
- u'C' # 0x43 -> LATIN CAPITAL LETTER C
- u'D' # 0x44 -> LATIN CAPITAL LETTER D
- u'E' # 0x45 -> LATIN CAPITAL LETTER E
- u'F' # 0x46 -> LATIN CAPITAL LETTER F
- u'G' # 0x47 -> LATIN CAPITAL LETTER G
- u'H' # 0x48 -> LATIN CAPITAL LETTER H
- u'I' # 0x49 -> LATIN CAPITAL LETTER I
- u'J' # 0x4A -> LATIN CAPITAL LETTER J
- u'K' # 0x4B -> LATIN CAPITAL LETTER K
- u'L' # 0x4C -> LATIN CAPITAL LETTER L
- u'M' # 0x4D -> LATIN CAPITAL LETTER M
- u'N' # 0x4E -> LATIN CAPITAL LETTER N
- u'O' # 0x4F -> LATIN CAPITAL LETTER O
- u'P' # 0x50 -> LATIN CAPITAL LETTER P
- u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
- u'R' # 0x52 -> LATIN CAPITAL LETTER R
- u'S' # 0x53 -> LATIN CAPITAL LETTER S
- u'T' # 0x54 -> LATIN CAPITAL LETTER T
- u'U' # 0x55 -> LATIN CAPITAL LETTER U
- u'V' # 0x56 -> LATIN CAPITAL LETTER V
- u'W' # 0x57 -> LATIN CAPITAL LETTER W
- u'X' # 0x58 -> LATIN CAPITAL LETTER X
- u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
- u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
- u'[' # 0x5B -> LEFT SQUARE BRACKET
- u'\\' # 0x5C -> REVERSE SOLIDUS
- u']' # 0x5D -> RIGHT SQUARE BRACKET
- u'^' # 0x5E -> CIRCUMFLEX ACCENT
- u'_' # 0x5F -> LOW LINE
- u'`' # 0x60 -> GRAVE ACCENT
- u'a' # 0x61 -> LATIN SMALL LETTER A
- u'b' # 0x62 -> LATIN SMALL LETTER B
- u'c' # 0x63 -> LATIN SMALL LETTER C
- u'd' # 0x64 -> LATIN SMALL LETTER D
- u'e' # 0x65 -> LATIN SMALL LETTER E
- u'f' # 0x66 -> LATIN SMALL LETTER F
- u'g' # 0x67 -> LATIN SMALL LETTER G
- u'h' # 0x68 -> LATIN SMALL LETTER H
- u'i' # 0x69 -> LATIN SMALL LETTER I
- u'j' # 0x6A -> LATIN SMALL LETTER J
- u'k' # 0x6B -> LATIN SMALL LETTER K
- u'l' # 0x6C -> LATIN SMALL LETTER L
- u'm' # 0x6D -> LATIN SMALL LETTER M
- u'n' # 0x6E -> LATIN SMALL LETTER N
- u'o' # 0x6F -> LATIN SMALL LETTER O
- u'p' # 0x70 -> LATIN SMALL LETTER P
- u'q' # 0x71 -> LATIN SMALL LETTER Q
- u'r' # 0x72 -> LATIN SMALL LETTER R
- u's' # 0x73 -> LATIN SMALL LETTER S
- u't' # 0x74 -> LATIN SMALL LETTER T
- u'u' # 0x75 -> LATIN SMALL LETTER U
- u'v' # 0x76 -> LATIN SMALL LETTER V
- u'w' # 0x77 -> LATIN SMALL LETTER W
- u'x' # 0x78 -> LATIN SMALL LETTER X
- u'y' # 0x79 -> LATIN SMALL LETTER Y
- u'z' # 0x7A -> LATIN SMALL LETTER Z
- u'{' # 0x7B -> LEFT CURLY BRACKET
- u'|' # 0x7C -> VERTICAL LINE
- u'}' # 0x7D -> RIGHT CURLY BRACKET
- u'~' # 0x7E -> TILDE
- u'\x7f' # 0x7F -> CONTROL CHARACTER
- u'\xc4' # 0x80 -> LATIN CAPITAL LETTER A WITH DIAERESIS
- u'\xc5' # 0x81 -> LATIN CAPITAL LETTER A WITH RING ABOVE
- u'\xc7' # 0x82 -> LATIN CAPITAL LETTER C WITH CEDILLA
- u'\xc9' # 0x83 -> LATIN CAPITAL LETTER E WITH ACUTE
- u'\xd1' # 0x84 -> LATIN CAPITAL LETTER N WITH TILDE
- u'\xd6' # 0x85 -> LATIN CAPITAL LETTER O WITH DIAERESIS
- u'\xdc' # 0x86 -> LATIN CAPITAL LETTER U WITH DIAERESIS
- u'\xe1' # 0x87 -> LATIN SMALL LETTER A WITH ACUTE
- u'\xe0' # 0x88 -> LATIN SMALL LETTER A WITH GRAVE
- u'\xe2' # 0x89 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
- u'\xe4' # 0x8A -> LATIN SMALL LETTER A WITH DIAERESIS
- u'\xe3' # 0x8B -> LATIN SMALL LETTER A WITH TILDE
- u'\xe5' # 0x8C -> LATIN SMALL LETTER A WITH RING ABOVE
- u'\xe7' # 0x8D -> LATIN SMALL LETTER C WITH CEDILLA
- u'\xe9' # 0x8E -> LATIN SMALL LETTER E WITH ACUTE
- u'\xe8' # 0x8F -> LATIN SMALL LETTER E WITH GRAVE
- u'\xea' # 0x90 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
- u'\xeb' # 0x91 -> LATIN SMALL LETTER E WITH DIAERESIS
- u'\xed' # 0x92 -> LATIN SMALL LETTER I WITH ACUTE
- u'\xec' # 0x93 -> LATIN SMALL LETTER I WITH GRAVE
- u'\xee' # 0x94 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
- u'\xef' # 0x95 -> LATIN SMALL LETTER I WITH DIAERESIS
- u'\xf1' # 0x96 -> LATIN SMALL LETTER N WITH TILDE
- u'\xf3' # 0x97 -> LATIN SMALL LETTER O WITH ACUTE
- u'\xf2' # 0x98 -> LATIN SMALL LETTER O WITH GRAVE
- u'\xf4' # 0x99 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
- u'\xf6' # 0x9A -> LATIN SMALL LETTER O WITH DIAERESIS
- u'\xf5' # 0x9B -> LATIN SMALL LETTER O WITH TILDE
- u'\xfa' # 0x9C -> LATIN SMALL LETTER U WITH ACUTE
- u'\xf9' # 0x9D -> LATIN SMALL LETTER U WITH GRAVE
- u'\xfb' # 0x9E -> LATIN SMALL LETTER U WITH CIRCUMFLEX
- u'\xfc' # 0x9F -> LATIN SMALL LETTER U WITH DIAERESIS
- u'\xdd' # 0xA0 -> LATIN CAPITAL LETTER Y WITH ACUTE
- u'\xb0' # 0xA1 -> DEGREE SIGN
- u'\xa2' # 0xA2 -> CENT SIGN
- u'\xa3' # 0xA3 -> POUND SIGN
- u'\xa7' # 0xA4 -> SECTION SIGN
- u'\u2022' # 0xA5 -> BULLET
- u'\xb6' # 0xA6 -> PILCROW SIGN
- u'\xdf' # 0xA7 -> LATIN SMALL LETTER SHARP S
- u'\xae' # 0xA8 -> REGISTERED SIGN
- u'\xa9' # 0xA9 -> COPYRIGHT SIGN
- u'\u2122' # 0xAA -> TRADE MARK SIGN
- u'\xb4' # 0xAB -> ACUTE ACCENT
- u'\xa8' # 0xAC -> DIAERESIS
- u'\u2260' # 0xAD -> NOT EQUAL TO
- u'\xc6' # 0xAE -> LATIN CAPITAL LETTER AE
- u'\xd8' # 0xAF -> LATIN CAPITAL LETTER O WITH STROKE
- u'\u221e' # 0xB0 -> INFINITY
- u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
- u'\u2264' # 0xB2 -> LESS-THAN OR EQUAL TO
- u'\u2265' # 0xB3 -> GREATER-THAN OR EQUAL TO
- u'\xa5' # 0xB4 -> YEN SIGN
- u'\xb5' # 0xB5 -> MICRO SIGN
- u'\u2202' # 0xB6 -> PARTIAL DIFFERENTIAL
- u'\u2211' # 0xB7 -> N-ARY SUMMATION
- u'\u220f' # 0xB8 -> N-ARY PRODUCT
- u'\u03c0' # 0xB9 -> GREEK SMALL LETTER PI
- u'\u222b' # 0xBA -> INTEGRAL
- u'\xaa' # 0xBB -> FEMININE ORDINAL INDICATOR
- u'\xba' # 0xBC -> MASCULINE ORDINAL INDICATOR
- u'\u03a9' # 0xBD -> GREEK CAPITAL LETTER OMEGA
- u'\xe6' # 0xBE -> LATIN SMALL LETTER AE
- u'\xf8' # 0xBF -> LATIN SMALL LETTER O WITH STROKE
- u'\xbf' # 0xC0 -> INVERTED QUESTION MARK
- u'\xa1' # 0xC1 -> INVERTED EXCLAMATION MARK
- u'\xac' # 0xC2 -> NOT SIGN
- u'\u221a' # 0xC3 -> SQUARE ROOT
- u'\u0192' # 0xC4 -> LATIN SMALL LETTER F WITH HOOK
- u'\u2248' # 0xC5 -> ALMOST EQUAL TO
- u'\u2206' # 0xC6 -> INCREMENT
- u'\xab' # 0xC7 -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
- u'\xbb' # 0xC8 -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
- u'\u2026' # 0xC9 -> HORIZONTAL ELLIPSIS
- u'\xa0' # 0xCA -> NO-BREAK SPACE
- u'\xc0' # 0xCB -> LATIN CAPITAL LETTER A WITH GRAVE
- u'\xc3' # 0xCC -> LATIN CAPITAL LETTER A WITH TILDE
- u'\xd5' # 0xCD -> LATIN CAPITAL LETTER O WITH TILDE
- u'\u0152' # 0xCE -> LATIN CAPITAL LIGATURE OE
- u'\u0153' # 0xCF -> LATIN SMALL LIGATURE OE
- u'\u2013' # 0xD0 -> EN DASH
- u'\u2014' # 0xD1 -> EM DASH
- u'\u201c' # 0xD2 -> LEFT DOUBLE QUOTATION MARK
- u'\u201d' # 0xD3 -> RIGHT DOUBLE QUOTATION MARK
- u'\u2018' # 0xD4 -> LEFT SINGLE QUOTATION MARK
- u'\u2019' # 0xD5 -> RIGHT SINGLE QUOTATION MARK
- u'\xf7' # 0xD6 -> DIVISION SIGN
- u'\u25ca' # 0xD7 -> LOZENGE
- u'\xff' # 0xD8 -> LATIN SMALL LETTER Y WITH DIAERESIS
- u'\u0178' # 0xD9 -> LATIN CAPITAL LETTER Y WITH DIAERESIS
- u'\u2044' # 0xDA -> FRACTION SLASH
- u'\u20ac' # 0xDB -> EURO SIGN
- u'\xd0' # 0xDC -> LATIN CAPITAL LETTER ETH
- u'\xf0' # 0xDD -> LATIN SMALL LETTER ETH
- u'\xde' # 0xDE -> LATIN CAPITAL LETTER THORN
- u'\xfe' # 0xDF -> LATIN SMALL LETTER THORN
- u'\xfd' # 0xE0 -> LATIN SMALL LETTER Y WITH ACUTE
- u'\xb7' # 0xE1 -> MIDDLE DOT
- u'\u201a' # 0xE2 -> SINGLE LOW-9 QUOTATION MARK
- u'\u201e' # 0xE3 -> DOUBLE LOW-9 QUOTATION MARK
- u'\u2030' # 0xE4 -> PER MILLE SIGN
- u'\xc2' # 0xE5 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
- u'\xca' # 0xE6 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
- u'\xc1' # 0xE7 -> LATIN CAPITAL LETTER A WITH ACUTE
- u'\xcb' # 0xE8 -> LATIN CAPITAL LETTER E WITH DIAERESIS
- u'\xc8' # 0xE9 -> LATIN CAPITAL LETTER E WITH GRAVE
- u'\xcd' # 0xEA -> LATIN CAPITAL LETTER I WITH ACUTE
- u'\xce' # 0xEB -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
- u'\xcf' # 0xEC -> LATIN CAPITAL LETTER I WITH DIAERESIS
- u'\xcc' # 0xED -> LATIN CAPITAL LETTER I WITH GRAVE
- u'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
- u'\xd4' # 0xEF -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
- u'\uf8ff' # 0xF0 -> Apple logo
- u'\xd2' # 0xF1 -> LATIN CAPITAL LETTER O WITH GRAVE
- u'\xda' # 0xF2 -> LATIN CAPITAL LETTER U WITH ACUTE
- u'\xdb' # 0xF3 -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
- u'\xd9' # 0xF4 -> LATIN CAPITAL LETTER U WITH GRAVE
- u'\u0131' # 0xF5 -> LATIN SMALL LETTER DOTLESS I
- u'\u02c6' # 0xF6 -> MODIFIER LETTER CIRCUMFLEX ACCENT
- u'\u02dc' # 0xF7 -> SMALL TILDE
- u'\xaf' # 0xF8 -> MACRON
- u'\u02d8' # 0xF9 -> BREVE
- u'\u02d9' # 0xFA -> DOT ABOVE
- u'\u02da' # 0xFB -> RING ABOVE
- u'\xb8' # 0xFC -> CEDILLA
- u'\u02dd' # 0xFD -> DOUBLE ACUTE ACCENT
- u'\u02db' # 0xFE -> OGONEK
- u'\u02c7' # 0xFF -> CARON
-)
-
-### Encoding table
-encoding_table=codecs.charmap_build(decoding_table)
diff --git a/sys/lib/python/encodings/mac_latin2.py b/sys/lib/python/encodings/mac_latin2.py
deleted file mode 100644
index e322be236..000000000
--- a/sys/lib/python/encodings/mac_latin2.py
+++ /dev/null
@@ -1,183 +0,0 @@
-""" Python Character Mapping Codec generated from 'LATIN2.TXT' with gencodec.py.
-
-Written by Marc-Andre Lemburg (mal@lemburg.com).
-
-(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
-(c) Copyright 2000 Guido van Rossum.
-
-"""#"
-
-import codecs
-
-### Codec APIs
-
-class Codec(codecs.Codec):
-
- def encode(self,input,errors='strict'):
- return codecs.charmap_encode(input,errors,encoding_map)
-
- def decode(self,input,errors='strict'):
- return codecs.charmap_decode(input,errors,decoding_map)
-
-class IncrementalEncoder(codecs.IncrementalEncoder):
- def encode(self, input, final=False):
- return codecs.charmap_encode(input,self.errors,encoding_map)[0]
-
-class IncrementalDecoder(codecs.IncrementalDecoder):
- def decode(self, input, final=False):
- return codecs.charmap_decode(input,self.errors,decoding_map)[0]
-
-class StreamWriter(Codec,codecs.StreamWriter):
- pass
-
-class StreamReader(Codec,codecs.StreamReader):
- pass
-
-### encodings module API
-
-def getregentry():
- return codecs.CodecInfo(
- name='mac-latin2',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
-
-### Decoding Map
-
-decoding_map = codecs.make_identity_dict(range(256))
-decoding_map.update({
- 0x0080: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
- 0x0081: 0x0100, # LATIN CAPITAL LETTER A WITH MACRON
- 0x0082: 0x0101, # LATIN SMALL LETTER A WITH MACRON
- 0x0083: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
- 0x0084: 0x0104, # LATIN CAPITAL LETTER A WITH OGONEK
- 0x0085: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
- 0x0086: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
- 0x0087: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
- 0x0088: 0x0105, # LATIN SMALL LETTER A WITH OGONEK
- 0x0089: 0x010c, # LATIN CAPITAL LETTER C WITH CARON
- 0x008a: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
- 0x008b: 0x010d, # LATIN SMALL LETTER C WITH CARON
- 0x008c: 0x0106, # LATIN CAPITAL LETTER C WITH ACUTE
- 0x008d: 0x0107, # LATIN SMALL LETTER C WITH ACUTE
- 0x008e: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
- 0x008f: 0x0179, # LATIN CAPITAL LETTER Z WITH ACUTE
- 0x0090: 0x017a, # LATIN SMALL LETTER Z WITH ACUTE
- 0x0091: 0x010e, # LATIN CAPITAL LETTER D WITH CARON
- 0x0092: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
- 0x0093: 0x010f, # LATIN SMALL LETTER D WITH CARON
- 0x0094: 0x0112, # LATIN CAPITAL LETTER E WITH MACRON
- 0x0095: 0x0113, # LATIN SMALL LETTER E WITH MACRON
- 0x0096: 0x0116, # LATIN CAPITAL LETTER E WITH DOT ABOVE
- 0x0097: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
- 0x0098: 0x0117, # LATIN SMALL LETTER E WITH DOT ABOVE
- 0x0099: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
- 0x009a: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
- 0x009b: 0x00f5, # LATIN SMALL LETTER O WITH TILDE
- 0x009c: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
- 0x009d: 0x011a, # LATIN CAPITAL LETTER E WITH CARON
- 0x009e: 0x011b, # LATIN SMALL LETTER E WITH CARON
- 0x009f: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
- 0x00a0: 0x2020, # DAGGER
- 0x00a1: 0x00b0, # DEGREE SIGN
- 0x00a2: 0x0118, # LATIN CAPITAL LETTER E WITH OGONEK
- 0x00a4: 0x00a7, # SECTION SIGN
- 0x00a5: 0x2022, # BULLET
- 0x00a6: 0x00b6, # PILCROW SIGN
- 0x00a7: 0x00df, # LATIN SMALL LETTER SHARP S
- 0x00a8: 0x00ae, # REGISTERED SIGN
- 0x00aa: 0x2122, # TRADE MARK SIGN
- 0x00ab: 0x0119, # LATIN SMALL LETTER E WITH OGONEK
- 0x00ac: 0x00a8, # DIAERESIS
- 0x00ad: 0x2260, # NOT EQUAL TO
- 0x00ae: 0x0123, # LATIN SMALL LETTER G WITH CEDILLA
- 0x00af: 0x012e, # LATIN CAPITAL LETTER I WITH OGONEK
- 0x00b0: 0x012f, # LATIN SMALL LETTER I WITH OGONEK
- 0x00b1: 0x012a, # LATIN CAPITAL LETTER I WITH MACRON
- 0x00b2: 0x2264, # LESS-THAN OR EQUAL TO
- 0x00b3: 0x2265, # GREATER-THAN OR EQUAL TO
- 0x00b4: 0x012b, # LATIN SMALL LETTER I WITH MACRON
- 0x00b5: 0x0136, # LATIN CAPITAL LETTER K WITH CEDILLA
- 0x00b6: 0x2202, # PARTIAL DIFFERENTIAL
- 0x00b7: 0x2211, # N-ARY SUMMATION
- 0x00b8: 0x0142, # LATIN SMALL LETTER L WITH STROKE
- 0x00b9: 0x013b, # LATIN CAPITAL LETTER L WITH CEDILLA
- 0x00ba: 0x013c, # LATIN SMALL LETTER L WITH CEDILLA
- 0x00bb: 0x013d, # LATIN CAPITAL LETTER L WITH CARON
- 0x00bc: 0x013e, # LATIN SMALL LETTER L WITH CARON
- 0x00bd: 0x0139, # LATIN CAPITAL LETTER L WITH ACUTE
- 0x00be: 0x013a, # LATIN SMALL LETTER L WITH ACUTE
- 0x00bf: 0x0145, # LATIN CAPITAL LETTER N WITH CEDILLA
- 0x00c0: 0x0146, # LATIN SMALL LETTER N WITH CEDILLA
- 0x00c1: 0x0143, # LATIN CAPITAL LETTER N WITH ACUTE
- 0x00c2: 0x00ac, # NOT SIGN
- 0x00c3: 0x221a, # SQUARE ROOT
- 0x00c4: 0x0144, # LATIN SMALL LETTER N WITH ACUTE
- 0x00c5: 0x0147, # LATIN CAPITAL LETTER N WITH CARON
- 0x00c6: 0x2206, # INCREMENT
- 0x00c7: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
- 0x00c8: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
- 0x00c9: 0x2026, # HORIZONTAL ELLIPSIS
- 0x00ca: 0x00a0, # NO-BREAK SPACE
- 0x00cb: 0x0148, # LATIN SMALL LETTER N WITH CARON
- 0x00cc: 0x0150, # LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
- 0x00cd: 0x00d5, # LATIN CAPITAL LETTER O WITH TILDE
- 0x00ce: 0x0151, # LATIN SMALL LETTER O WITH DOUBLE ACUTE
- 0x00cf: 0x014c, # LATIN CAPITAL LETTER O WITH MACRON
- 0x00d0: 0x2013, # EN DASH
- 0x00d1: 0x2014, # EM DASH
- 0x00d2: 0x201c, # LEFT DOUBLE QUOTATION MARK
- 0x00d3: 0x201d, # RIGHT DOUBLE QUOTATION MARK
- 0x00d4: 0x2018, # LEFT SINGLE QUOTATION MARK
- 0x00d5: 0x2019, # RIGHT SINGLE QUOTATION MARK
- 0x00d6: 0x00f7, # DIVISION SIGN
- 0x00d7: 0x25ca, # LOZENGE
- 0x00d8: 0x014d, # LATIN SMALL LETTER O WITH MACRON
- 0x00d9: 0x0154, # LATIN CAPITAL LETTER R WITH ACUTE
- 0x00da: 0x0155, # LATIN SMALL LETTER R WITH ACUTE
- 0x00db: 0x0158, # LATIN CAPITAL LETTER R WITH CARON
- 0x00dc: 0x2039, # SINGLE LEFT-POINTING ANGLE QUOTATION MARK
- 0x00dd: 0x203a, # SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
- 0x00de: 0x0159, # LATIN SMALL LETTER R WITH CARON
- 0x00df: 0x0156, # LATIN CAPITAL LETTER R WITH CEDILLA
- 0x00e0: 0x0157, # LATIN SMALL LETTER R WITH CEDILLA
- 0x00e1: 0x0160, # LATIN CAPITAL LETTER S WITH CARON
- 0x00e2: 0x201a, # SINGLE LOW-9 QUOTATION MARK
- 0x00e3: 0x201e, # DOUBLE LOW-9 QUOTATION MARK
- 0x00e4: 0x0161, # LATIN SMALL LETTER S WITH CARON
- 0x00e5: 0x015a, # LATIN CAPITAL LETTER S WITH ACUTE
- 0x00e6: 0x015b, # LATIN SMALL LETTER S WITH ACUTE
- 0x00e7: 0x00c1, # LATIN CAPITAL LETTER A WITH ACUTE
- 0x00e8: 0x0164, # LATIN CAPITAL LETTER T WITH CARON
- 0x00e9: 0x0165, # LATIN SMALL LETTER T WITH CARON
- 0x00ea: 0x00cd, # LATIN CAPITAL LETTER I WITH ACUTE
- 0x00eb: 0x017d, # LATIN CAPITAL LETTER Z WITH CARON
- 0x00ec: 0x017e, # LATIN SMALL LETTER Z WITH CARON
- 0x00ed: 0x016a, # LATIN CAPITAL LETTER U WITH MACRON
- 0x00ee: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE
- 0x00ef: 0x00d4, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
- 0x00f0: 0x016b, # LATIN SMALL LETTER U WITH MACRON
- 0x00f1: 0x016e, # LATIN CAPITAL LETTER U WITH RING ABOVE
- 0x00f2: 0x00da, # LATIN CAPITAL LETTER U WITH ACUTE
- 0x00f3: 0x016f, # LATIN SMALL LETTER U WITH RING ABOVE
- 0x00f4: 0x0170, # LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
- 0x00f5: 0x0171, # LATIN SMALL LETTER U WITH DOUBLE ACUTE
- 0x00f6: 0x0172, # LATIN CAPITAL LETTER U WITH OGONEK
- 0x00f7: 0x0173, # LATIN SMALL LETTER U WITH OGONEK
- 0x00f8: 0x00dd, # LATIN CAPITAL LETTER Y WITH ACUTE
- 0x00f9: 0x00fd, # LATIN SMALL LETTER Y WITH ACUTE
- 0x00fa: 0x0137, # LATIN SMALL LETTER K WITH CEDILLA
- 0x00fb: 0x017b, # LATIN CAPITAL LETTER Z WITH DOT ABOVE
- 0x00fc: 0x0141, # LATIN CAPITAL LETTER L WITH STROKE
- 0x00fd: 0x017c, # LATIN SMALL LETTER Z WITH DOT ABOVE
- 0x00fe: 0x0122, # LATIN CAPITAL LETTER G WITH CEDILLA
- 0x00ff: 0x02c7, # CARON
-})
-
-### Encoding Map
-
-encoding_map = codecs.make_encoding_map(decoding_map)
diff --git a/sys/lib/python/encodings/mac_roman.py b/sys/lib/python/encodings/mac_roman.py
deleted file mode 100644
index 62605ec63..000000000
--- a/sys/lib/python/encodings/mac_roman.py
+++ /dev/null
@@ -1,307 +0,0 @@
-""" Python Character Mapping Codec mac_roman generated from 'MAPPINGS/VENDORS/APPLE/ROMAN.TXT' with gencodec.py.
-
-"""#"
-
-import codecs
-
-### Codec APIs
-
-class Codec(codecs.Codec):
-
- def encode(self,input,errors='strict'):
- return codecs.charmap_encode(input,errors,encoding_table)
-
- def decode(self,input,errors='strict'):
- return codecs.charmap_decode(input,errors,decoding_table)
-
-class IncrementalEncoder(codecs.IncrementalEncoder):
- def encode(self, input, final=False):
- return codecs.charmap_encode(input,self.errors,encoding_table)[0]
-
-class IncrementalDecoder(codecs.IncrementalDecoder):
- def decode(self, input, final=False):
- return codecs.charmap_decode(input,self.errors,decoding_table)[0]
-
-class StreamWriter(Codec,codecs.StreamWriter):
- pass
-
-class StreamReader(Codec,codecs.StreamReader):
- pass
-
-### encodings module API
-
-def getregentry():
- return codecs.CodecInfo(
- name='mac-roman',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
-
-
-### Decoding Table
-
-decoding_table = (
- u'\x00' # 0x00 -> CONTROL CHARACTER
- u'\x01' # 0x01 -> CONTROL CHARACTER
- u'\x02' # 0x02 -> CONTROL CHARACTER
- u'\x03' # 0x03 -> CONTROL CHARACTER
- u'\x04' # 0x04 -> CONTROL CHARACTER
- u'\x05' # 0x05 -> CONTROL CHARACTER
- u'\x06' # 0x06 -> CONTROL CHARACTER
- u'\x07' # 0x07 -> CONTROL CHARACTER
- u'\x08' # 0x08 -> CONTROL CHARACTER
- u'\t' # 0x09 -> CONTROL CHARACTER
- u'\n' # 0x0A -> CONTROL CHARACTER
- u'\x0b' # 0x0B -> CONTROL CHARACTER
- u'\x0c' # 0x0C -> CONTROL CHARACTER
- u'\r' # 0x0D -> CONTROL CHARACTER
- u'\x0e' # 0x0E -> CONTROL CHARACTER
- u'\x0f' # 0x0F -> CONTROL CHARACTER
- u'\x10' # 0x10 -> CONTROL CHARACTER
- u'\x11' # 0x11 -> CONTROL CHARACTER
- u'\x12' # 0x12 -> CONTROL CHARACTER
- u'\x13' # 0x13 -> CONTROL CHARACTER
- u'\x14' # 0x14 -> CONTROL CHARACTER
- u'\x15' # 0x15 -> CONTROL CHARACTER
- u'\x16' # 0x16 -> CONTROL CHARACTER
- u'\x17' # 0x17 -> CONTROL CHARACTER
- u'\x18' # 0x18 -> CONTROL CHARACTER
- u'\x19' # 0x19 -> CONTROL CHARACTER
- u'\x1a' # 0x1A -> CONTROL CHARACTER
- u'\x1b' # 0x1B -> CONTROL CHARACTER
- u'\x1c' # 0x1C -> CONTROL CHARACTER
- u'\x1d' # 0x1D -> CONTROL CHARACTER
- u'\x1e' # 0x1E -> CONTROL CHARACTER
- u'\x1f' # 0x1F -> CONTROL CHARACTER
- u' ' # 0x20 -> SPACE
- u'!' # 0x21 -> EXCLAMATION MARK
- u'"' # 0x22 -> QUOTATION MARK
- u'#' # 0x23 -> NUMBER SIGN
- u'$' # 0x24 -> DOLLAR SIGN
- u'%' # 0x25 -> PERCENT SIGN
- u'&' # 0x26 -> AMPERSAND
- u"'" # 0x27 -> APOSTROPHE
- u'(' # 0x28 -> LEFT PARENTHESIS
- u')' # 0x29 -> RIGHT PARENTHESIS
- u'*' # 0x2A -> ASTERISK
- u'+' # 0x2B -> PLUS SIGN
- u',' # 0x2C -> COMMA
- u'-' # 0x2D -> HYPHEN-MINUS
- u'.' # 0x2E -> FULL STOP
- u'/' # 0x2F -> SOLIDUS
- u'0' # 0x30 -> DIGIT ZERO
- u'1' # 0x31 -> DIGIT ONE
- u'2' # 0x32 -> DIGIT TWO
- u'3' # 0x33 -> DIGIT THREE
- u'4' # 0x34 -> DIGIT FOUR
- u'5' # 0x35 -> DIGIT FIVE
- u'6' # 0x36 -> DIGIT SIX
- u'7' # 0x37 -> DIGIT SEVEN
- u'8' # 0x38 -> DIGIT EIGHT
- u'9' # 0x39 -> DIGIT NINE
- u':' # 0x3A -> COLON
- u';' # 0x3B -> SEMICOLON
- u'<' # 0x3C -> LESS-THAN SIGN
- u'=' # 0x3D -> EQUALS SIGN
- u'>' # 0x3E -> GREATER-THAN SIGN
- u'?' # 0x3F -> QUESTION MARK
- u'@' # 0x40 -> COMMERCIAL AT
- u'A' # 0x41 -> LATIN CAPITAL LETTER A
- u'B' # 0x42 -> LATIN CAPITAL LETTER B
- u'C' # 0x43 -> LATIN CAPITAL LETTER C
- u'D' # 0x44 -> LATIN CAPITAL LETTER D
- u'E' # 0x45 -> LATIN CAPITAL LETTER E
- u'F' # 0x46 -> LATIN CAPITAL LETTER F
- u'G' # 0x47 -> LATIN CAPITAL LETTER G
- u'H' # 0x48 -> LATIN CAPITAL LETTER H
- u'I' # 0x49 -> LATIN CAPITAL LETTER I
- u'J' # 0x4A -> LATIN CAPITAL LETTER J
- u'K' # 0x4B -> LATIN CAPITAL LETTER K
- u'L' # 0x4C -> LATIN CAPITAL LETTER L
- u'M' # 0x4D -> LATIN CAPITAL LETTER M
- u'N' # 0x4E -> LATIN CAPITAL LETTER N
- u'O' # 0x4F -> LATIN CAPITAL LETTER O
- u'P' # 0x50 -> LATIN CAPITAL LETTER P
- u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
- u'R' # 0x52 -> LATIN CAPITAL LETTER R
- u'S' # 0x53 -> LATIN CAPITAL LETTER S
- u'T' # 0x54 -> LATIN CAPITAL LETTER T
- u'U' # 0x55 -> LATIN CAPITAL LETTER U
- u'V' # 0x56 -> LATIN CAPITAL LETTER V
- u'W' # 0x57 -> LATIN CAPITAL LETTER W
- u'X' # 0x58 -> LATIN CAPITAL LETTER X
- u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
- u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
- u'[' # 0x5B -> LEFT SQUARE BRACKET
- u'\\' # 0x5C -> REVERSE SOLIDUS
- u']' # 0x5D -> RIGHT SQUARE BRACKET
- u'^' # 0x5E -> CIRCUMFLEX ACCENT
- u'_' # 0x5F -> LOW LINE
- u'`' # 0x60 -> GRAVE ACCENT
- u'a' # 0x61 -> LATIN SMALL LETTER A
- u'b' # 0x62 -> LATIN SMALL LETTER B
- u'c' # 0x63 -> LATIN SMALL LETTER C
- u'd' # 0x64 -> LATIN SMALL LETTER D
- u'e' # 0x65 -> LATIN SMALL LETTER E
- u'f' # 0x66 -> LATIN SMALL LETTER F
- u'g' # 0x67 -> LATIN SMALL LETTER G
- u'h' # 0x68 -> LATIN SMALL LETTER H
- u'i' # 0x69 -> LATIN SMALL LETTER I
- u'j' # 0x6A -> LATIN SMALL LETTER J
- u'k' # 0x6B -> LATIN SMALL LETTER K
- u'l' # 0x6C -> LATIN SMALL LETTER L
- u'm' # 0x6D -> LATIN SMALL LETTER M
- u'n' # 0x6E -> LATIN SMALL LETTER N
- u'o' # 0x6F -> LATIN SMALL LETTER O
- u'p' # 0x70 -> LATIN SMALL LETTER P
- u'q' # 0x71 -> LATIN SMALL LETTER Q
- u'r' # 0x72 -> LATIN SMALL LETTER R
- u's' # 0x73 -> LATIN SMALL LETTER S
- u't' # 0x74 -> LATIN SMALL LETTER T
- u'u' # 0x75 -> LATIN SMALL LETTER U
- u'v' # 0x76 -> LATIN SMALL LETTER V
- u'w' # 0x77 -> LATIN SMALL LETTER W
- u'x' # 0x78 -> LATIN SMALL LETTER X
- u'y' # 0x79 -> LATIN SMALL LETTER Y
- u'z' # 0x7A -> LATIN SMALL LETTER Z
- u'{' # 0x7B -> LEFT CURLY BRACKET
- u'|' # 0x7C -> VERTICAL LINE
- u'}' # 0x7D -> RIGHT CURLY BRACKET
- u'~' # 0x7E -> TILDE
- u'\x7f' # 0x7F -> CONTROL CHARACTER
- u'\xc4' # 0x80 -> LATIN CAPITAL LETTER A WITH DIAERESIS
- u'\xc5' # 0x81 -> LATIN CAPITAL LETTER A WITH RING ABOVE
- u'\xc7' # 0x82 -> LATIN CAPITAL LETTER C WITH CEDILLA
- u'\xc9' # 0x83 -> LATIN CAPITAL LETTER E WITH ACUTE
- u'\xd1' # 0x84 -> LATIN CAPITAL LETTER N WITH TILDE
- u'\xd6' # 0x85 -> LATIN CAPITAL LETTER O WITH DIAERESIS
- u'\xdc' # 0x86 -> LATIN CAPITAL LETTER U WITH DIAERESIS
- u'\xe1' # 0x87 -> LATIN SMALL LETTER A WITH ACUTE
- u'\xe0' # 0x88 -> LATIN SMALL LETTER A WITH GRAVE
- u'\xe2' # 0x89 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
- u'\xe4' # 0x8A -> LATIN SMALL LETTER A WITH DIAERESIS
- u'\xe3' # 0x8B -> LATIN SMALL LETTER A WITH TILDE
- u'\xe5' # 0x8C -> LATIN SMALL LETTER A WITH RING ABOVE
- u'\xe7' # 0x8D -> LATIN SMALL LETTER C WITH CEDILLA
- u'\xe9' # 0x8E -> LATIN SMALL LETTER E WITH ACUTE
- u'\xe8' # 0x8F -> LATIN SMALL LETTER E WITH GRAVE
- u'\xea' # 0x90 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
- u'\xeb' # 0x91 -> LATIN SMALL LETTER E WITH DIAERESIS
- u'\xed' # 0x92 -> LATIN SMALL LETTER I WITH ACUTE
- u'\xec' # 0x93 -> LATIN SMALL LETTER I WITH GRAVE
- u'\xee' # 0x94 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
- u'\xef' # 0x95 -> LATIN SMALL LETTER I WITH DIAERESIS
- u'\xf1' # 0x96 -> LATIN SMALL LETTER N WITH TILDE
- u'\xf3' # 0x97 -> LATIN SMALL LETTER O WITH ACUTE
- u'\xf2' # 0x98 -> LATIN SMALL LETTER O WITH GRAVE
- u'\xf4' # 0x99 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
- u'\xf6' # 0x9A -> LATIN SMALL LETTER O WITH DIAERESIS
- u'\xf5' # 0x9B -> LATIN SMALL LETTER O WITH TILDE
- u'\xfa' # 0x9C -> LATIN SMALL LETTER U WITH ACUTE
- u'\xf9' # 0x9D -> LATIN SMALL LETTER U WITH GRAVE
- u'\xfb' # 0x9E -> LATIN SMALL LETTER U WITH CIRCUMFLEX
- u'\xfc' # 0x9F -> LATIN SMALL LETTER U WITH DIAERESIS
- u'\u2020' # 0xA0 -> DAGGER
- u'\xb0' # 0xA1 -> DEGREE SIGN
- u'\xa2' # 0xA2 -> CENT SIGN
- u'\xa3' # 0xA3 -> POUND SIGN
- u'\xa7' # 0xA4 -> SECTION SIGN
- u'\u2022' # 0xA5 -> BULLET
- u'\xb6' # 0xA6 -> PILCROW SIGN
- u'\xdf' # 0xA7 -> LATIN SMALL LETTER SHARP S
- u'\xae' # 0xA8 -> REGISTERED SIGN
- u'\xa9' # 0xA9 -> COPYRIGHT SIGN
- u'\u2122' # 0xAA -> TRADE MARK SIGN
- u'\xb4' # 0xAB -> ACUTE ACCENT
- u'\xa8' # 0xAC -> DIAERESIS
- u'\u2260' # 0xAD -> NOT EQUAL TO
- u'\xc6' # 0xAE -> LATIN CAPITAL LETTER AE
- u'\xd8' # 0xAF -> LATIN CAPITAL LETTER O WITH STROKE
- u'\u221e' # 0xB0 -> INFINITY
- u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
- u'\u2264' # 0xB2 -> LESS-THAN OR EQUAL TO
- u'\u2265' # 0xB3 -> GREATER-THAN OR EQUAL TO
- u'\xa5' # 0xB4 -> YEN SIGN
- u'\xb5' # 0xB5 -> MICRO SIGN
- u'\u2202' # 0xB6 -> PARTIAL DIFFERENTIAL
- u'\u2211' # 0xB7 -> N-ARY SUMMATION
- u'\u220f' # 0xB8 -> N-ARY PRODUCT
- u'\u03c0' # 0xB9 -> GREEK SMALL LETTER PI
- u'\u222b' # 0xBA -> INTEGRAL
- u'\xaa' # 0xBB -> FEMININE ORDINAL INDICATOR
- u'\xba' # 0xBC -> MASCULINE ORDINAL INDICATOR
- u'\u03a9' # 0xBD -> GREEK CAPITAL LETTER OMEGA
- u'\xe6' # 0xBE -> LATIN SMALL LETTER AE
- u'\xf8' # 0xBF -> LATIN SMALL LETTER O WITH STROKE
- u'\xbf' # 0xC0 -> INVERTED QUESTION MARK
- u'\xa1' # 0xC1 -> INVERTED EXCLAMATION MARK
- u'\xac' # 0xC2 -> NOT SIGN
- u'\u221a' # 0xC3 -> SQUARE ROOT
- u'\u0192' # 0xC4 -> LATIN SMALL LETTER F WITH HOOK
- u'\u2248' # 0xC5 -> ALMOST EQUAL TO
- u'\u2206' # 0xC6 -> INCREMENT
- u'\xab' # 0xC7 -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
- u'\xbb' # 0xC8 -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
- u'\u2026' # 0xC9 -> HORIZONTAL ELLIPSIS
- u'\xa0' # 0xCA -> NO-BREAK SPACE
- u'\xc0' # 0xCB -> LATIN CAPITAL LETTER A WITH GRAVE
- u'\xc3' # 0xCC -> LATIN CAPITAL LETTER A WITH TILDE
- u'\xd5' # 0xCD -> LATIN CAPITAL LETTER O WITH TILDE
- u'\u0152' # 0xCE -> LATIN CAPITAL LIGATURE OE
- u'\u0153' # 0xCF -> LATIN SMALL LIGATURE OE
- u'\u2013' # 0xD0 -> EN DASH
- u'\u2014' # 0xD1 -> EM DASH
- u'\u201c' # 0xD2 -> LEFT DOUBLE QUOTATION MARK
- u'\u201d' # 0xD3 -> RIGHT DOUBLE QUOTATION MARK
- u'\u2018' # 0xD4 -> LEFT SINGLE QUOTATION MARK
- u'\u2019' # 0xD5 -> RIGHT SINGLE QUOTATION MARK
- u'\xf7' # 0xD6 -> DIVISION SIGN
- u'\u25ca' # 0xD7 -> LOZENGE
- u'\xff' # 0xD8 -> LATIN SMALL LETTER Y WITH DIAERESIS
- u'\u0178' # 0xD9 -> LATIN CAPITAL LETTER Y WITH DIAERESIS
- u'\u2044' # 0xDA -> FRACTION SLASH
- u'\u20ac' # 0xDB -> EURO SIGN
- u'\u2039' # 0xDC -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
- u'\u203a' # 0xDD -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
- u'\ufb01' # 0xDE -> LATIN SMALL LIGATURE FI
- u'\ufb02' # 0xDF -> LATIN SMALL LIGATURE FL
- u'\u2021' # 0xE0 -> DOUBLE DAGGER
- u'\xb7' # 0xE1 -> MIDDLE DOT
- u'\u201a' # 0xE2 -> SINGLE LOW-9 QUOTATION MARK
- u'\u201e' # 0xE3 -> DOUBLE LOW-9 QUOTATION MARK
- u'\u2030' # 0xE4 -> PER MILLE SIGN
- u'\xc2' # 0xE5 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
- u'\xca' # 0xE6 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
- u'\xc1' # 0xE7 -> LATIN CAPITAL LETTER A WITH ACUTE
- u'\xcb' # 0xE8 -> LATIN CAPITAL LETTER E WITH DIAERESIS
- u'\xc8' # 0xE9 -> LATIN CAPITAL LETTER E WITH GRAVE
- u'\xcd' # 0xEA -> LATIN CAPITAL LETTER I WITH ACUTE
- u'\xce' # 0xEB -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
- u'\xcf' # 0xEC -> LATIN CAPITAL LETTER I WITH DIAERESIS
- u'\xcc' # 0xED -> LATIN CAPITAL LETTER I WITH GRAVE
- u'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
- u'\xd4' # 0xEF -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
- u'\uf8ff' # 0xF0 -> Apple logo
- u'\xd2' # 0xF1 -> LATIN CAPITAL LETTER O WITH GRAVE
- u'\xda' # 0xF2 -> LATIN CAPITAL LETTER U WITH ACUTE
- u'\xdb' # 0xF3 -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
- u'\xd9' # 0xF4 -> LATIN CAPITAL LETTER U WITH GRAVE
- u'\u0131' # 0xF5 -> LATIN SMALL LETTER DOTLESS I
- u'\u02c6' # 0xF6 -> MODIFIER LETTER CIRCUMFLEX ACCENT
- u'\u02dc' # 0xF7 -> SMALL TILDE
- u'\xaf' # 0xF8 -> MACRON
- u'\u02d8' # 0xF9 -> BREVE
- u'\u02d9' # 0xFA -> DOT ABOVE
- u'\u02da' # 0xFB -> RING ABOVE
- u'\xb8' # 0xFC -> CEDILLA
- u'\u02dd' # 0xFD -> DOUBLE ACUTE ACCENT
- u'\u02db' # 0xFE -> OGONEK
- u'\u02c7' # 0xFF -> CARON
-)
-
-### Encoding table
-encoding_table=codecs.charmap_build(decoding_table)
diff --git a/sys/lib/python/encodings/mac_romanian.py b/sys/lib/python/encodings/mac_romanian.py
deleted file mode 100644
index 5bd5ae862..000000000
--- a/sys/lib/python/encodings/mac_romanian.py
+++ /dev/null
@@ -1,307 +0,0 @@
-""" Python Character Mapping Codec mac_romanian generated from 'MAPPINGS/VENDORS/APPLE/ROMANIAN.TXT' with gencodec.py.
-
-"""#"
-
-import codecs
-
-### Codec APIs
-
-class Codec(codecs.Codec):
-
- def encode(self,input,errors='strict'):
- return codecs.charmap_encode(input,errors,encoding_table)
-
- def decode(self,input,errors='strict'):
- return codecs.charmap_decode(input,errors,decoding_table)
-
-class IncrementalEncoder(codecs.IncrementalEncoder):
- def encode(self, input, final=False):
- return codecs.charmap_encode(input,self.errors,encoding_table)[0]
-
-class IncrementalDecoder(codecs.IncrementalDecoder):
- def decode(self, input, final=False):
- return codecs.charmap_decode(input,self.errors,decoding_table)[0]
-
-class StreamWriter(Codec,codecs.StreamWriter):
- pass
-
-class StreamReader(Codec,codecs.StreamReader):
- pass
-
-### encodings module API
-
-def getregentry():
- return codecs.CodecInfo(
- name='mac-romanian',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
-
-
-### Decoding Table
-
-decoding_table = (
- u'\x00' # 0x00 -> CONTROL CHARACTER
- u'\x01' # 0x01 -> CONTROL CHARACTER
- u'\x02' # 0x02 -> CONTROL CHARACTER
- u'\x03' # 0x03 -> CONTROL CHARACTER
- u'\x04' # 0x04 -> CONTROL CHARACTER
- u'\x05' # 0x05 -> CONTROL CHARACTER
- u'\x06' # 0x06 -> CONTROL CHARACTER
- u'\x07' # 0x07 -> CONTROL CHARACTER
- u'\x08' # 0x08 -> CONTROL CHARACTER
- u'\t' # 0x09 -> CONTROL CHARACTER
- u'\n' # 0x0A -> CONTROL CHARACTER
- u'\x0b' # 0x0B -> CONTROL CHARACTER
- u'\x0c' # 0x0C -> CONTROL CHARACTER
- u'\r' # 0x0D -> CONTROL CHARACTER
- u'\x0e' # 0x0E -> CONTROL CHARACTER
- u'\x0f' # 0x0F -> CONTROL CHARACTER
- u'\x10' # 0x10 -> CONTROL CHARACTER
- u'\x11' # 0x11 -> CONTROL CHARACTER
- u'\x12' # 0x12 -> CONTROL CHARACTER
- u'\x13' # 0x13 -> CONTROL CHARACTER
- u'\x14' # 0x14 -> CONTROL CHARACTER
- u'\x15' # 0x15 -> CONTROL CHARACTER
- u'\x16' # 0x16 -> CONTROL CHARACTER
- u'\x17' # 0x17 -> CONTROL CHARACTER
- u'\x18' # 0x18 -> CONTROL CHARACTER
- u'\x19' # 0x19 -> CONTROL CHARACTER
- u'\x1a' # 0x1A -> CONTROL CHARACTER
- u'\x1b' # 0x1B -> CONTROL CHARACTER
- u'\x1c' # 0x1C -> CONTROL CHARACTER
- u'\x1d' # 0x1D -> CONTROL CHARACTER
- u'\x1e' # 0x1E -> CONTROL CHARACTER
- u'\x1f' # 0x1F -> CONTROL CHARACTER
- u' ' # 0x20 -> SPACE
- u'!' # 0x21 -> EXCLAMATION MARK
- u'"' # 0x22 -> QUOTATION MARK
- u'#' # 0x23 -> NUMBER SIGN
- u'$' # 0x24 -> DOLLAR SIGN
- u'%' # 0x25 -> PERCENT SIGN
- u'&' # 0x26 -> AMPERSAND
- u"'" # 0x27 -> APOSTROPHE
- u'(' # 0x28 -> LEFT PARENTHESIS
- u')' # 0x29 -> RIGHT PARENTHESIS
- u'*' # 0x2A -> ASTERISK
- u'+' # 0x2B -> PLUS SIGN
- u',' # 0x2C -> COMMA
- u'-' # 0x2D -> HYPHEN-MINUS
- u'.' # 0x2E -> FULL STOP
- u'/' # 0x2F -> SOLIDUS
- u'0' # 0x30 -> DIGIT ZERO
- u'1' # 0x31 -> DIGIT ONE
- u'2' # 0x32 -> DIGIT TWO
- u'3' # 0x33 -> DIGIT THREE
- u'4' # 0x34 -> DIGIT FOUR
- u'5' # 0x35 -> DIGIT FIVE
- u'6' # 0x36 -> DIGIT SIX
- u'7' # 0x37 -> DIGIT SEVEN
- u'8' # 0x38 -> DIGIT EIGHT
- u'9' # 0x39 -> DIGIT NINE
- u':' # 0x3A -> COLON
- u';' # 0x3B -> SEMICOLON
- u'<' # 0x3C -> LESS-THAN SIGN
- u'=' # 0x3D -> EQUALS SIGN
- u'>' # 0x3E -> GREATER-THAN SIGN
- u'?' # 0x3F -> QUESTION MARK
- u'@' # 0x40 -> COMMERCIAL AT
- u'A' # 0x41 -> LATIN CAPITAL LETTER A
- u'B' # 0x42 -> LATIN CAPITAL LETTER B
- u'C' # 0x43 -> LATIN CAPITAL LETTER C
- u'D' # 0x44 -> LATIN CAPITAL LETTER D
- u'E' # 0x45 -> LATIN CAPITAL LETTER E
- u'F' # 0x46 -> LATIN CAPITAL LETTER F
- u'G' # 0x47 -> LATIN CAPITAL LETTER G
- u'H' # 0x48 -> LATIN CAPITAL LETTER H
- u'I' # 0x49 -> LATIN CAPITAL LETTER I
- u'J' # 0x4A -> LATIN CAPITAL LETTER J
- u'K' # 0x4B -> LATIN CAPITAL LETTER K
- u'L' # 0x4C -> LATIN CAPITAL LETTER L
- u'M' # 0x4D -> LATIN CAPITAL LETTER M
- u'N' # 0x4E -> LATIN CAPITAL LETTER N
- u'O' # 0x4F -> LATIN CAPITAL LETTER O
- u'P' # 0x50 -> LATIN CAPITAL LETTER P
- u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
- u'R' # 0x52 -> LATIN CAPITAL LETTER R
- u'S' # 0x53 -> LATIN CAPITAL LETTER S
- u'T' # 0x54 -> LATIN CAPITAL LETTER T
- u'U' # 0x55 -> LATIN CAPITAL LETTER U
- u'V' # 0x56 -> LATIN CAPITAL LETTER V
- u'W' # 0x57 -> LATIN CAPITAL LETTER W
- u'X' # 0x58 -> LATIN CAPITAL LETTER X
- u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
- u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
- u'[' # 0x5B -> LEFT SQUARE BRACKET
- u'\\' # 0x5C -> REVERSE SOLIDUS
- u']' # 0x5D -> RIGHT SQUARE BRACKET
- u'^' # 0x5E -> CIRCUMFLEX ACCENT
- u'_' # 0x5F -> LOW LINE
- u'`' # 0x60 -> GRAVE ACCENT
- u'a' # 0x61 -> LATIN SMALL LETTER A
- u'b' # 0x62 -> LATIN SMALL LETTER B
- u'c' # 0x63 -> LATIN SMALL LETTER C
- u'd' # 0x64 -> LATIN SMALL LETTER D
- u'e' # 0x65 -> LATIN SMALL LETTER E
- u'f' # 0x66 -> LATIN SMALL LETTER F
- u'g' # 0x67 -> LATIN SMALL LETTER G
- u'h' # 0x68 -> LATIN SMALL LETTER H
- u'i' # 0x69 -> LATIN SMALL LETTER I
- u'j' # 0x6A -> LATIN SMALL LETTER J
- u'k' # 0x6B -> LATIN SMALL LETTER K
- u'l' # 0x6C -> LATIN SMALL LETTER L
- u'm' # 0x6D -> LATIN SMALL LETTER M
- u'n' # 0x6E -> LATIN SMALL LETTER N
- u'o' # 0x6F -> LATIN SMALL LETTER O
- u'p' # 0x70 -> LATIN SMALL LETTER P
- u'q' # 0x71 -> LATIN SMALL LETTER Q
- u'r' # 0x72 -> LATIN SMALL LETTER R
- u's' # 0x73 -> LATIN SMALL LETTER S
- u't' # 0x74 -> LATIN SMALL LETTER T
- u'u' # 0x75 -> LATIN SMALL LETTER U
- u'v' # 0x76 -> LATIN SMALL LETTER V
- u'w' # 0x77 -> LATIN SMALL LETTER W
- u'x' # 0x78 -> LATIN SMALL LETTER X
- u'y' # 0x79 -> LATIN SMALL LETTER Y
- u'z' # 0x7A -> LATIN SMALL LETTER Z
- u'{' # 0x7B -> LEFT CURLY BRACKET
- u'|' # 0x7C -> VERTICAL LINE
- u'}' # 0x7D -> RIGHT CURLY BRACKET
- u'~' # 0x7E -> TILDE
- u'\x7f' # 0x7F -> CONTROL CHARACTER
- u'\xc4' # 0x80 -> LATIN CAPITAL LETTER A WITH DIAERESIS
- u'\xc5' # 0x81 -> LATIN CAPITAL LETTER A WITH RING ABOVE
- u'\xc7' # 0x82 -> LATIN CAPITAL LETTER C WITH CEDILLA
- u'\xc9' # 0x83 -> LATIN CAPITAL LETTER E WITH ACUTE
- u'\xd1' # 0x84 -> LATIN CAPITAL LETTER N WITH TILDE
- u'\xd6' # 0x85 -> LATIN CAPITAL LETTER O WITH DIAERESIS
- u'\xdc' # 0x86 -> LATIN CAPITAL LETTER U WITH DIAERESIS
- u'\xe1' # 0x87 -> LATIN SMALL LETTER A WITH ACUTE
- u'\xe0' # 0x88 -> LATIN SMALL LETTER A WITH GRAVE
- u'\xe2' # 0x89 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
- u'\xe4' # 0x8A -> LATIN SMALL LETTER A WITH DIAERESIS
- u'\xe3' # 0x8B -> LATIN SMALL LETTER A WITH TILDE
- u'\xe5' # 0x8C -> LATIN SMALL LETTER A WITH RING ABOVE
- u'\xe7' # 0x8D -> LATIN SMALL LETTER C WITH CEDILLA
- u'\xe9' # 0x8E -> LATIN SMALL LETTER E WITH ACUTE
- u'\xe8' # 0x8F -> LATIN SMALL LETTER E WITH GRAVE
- u'\xea' # 0x90 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
- u'\xeb' # 0x91 -> LATIN SMALL LETTER E WITH DIAERESIS
- u'\xed' # 0x92 -> LATIN SMALL LETTER I WITH ACUTE
- u'\xec' # 0x93 -> LATIN SMALL LETTER I WITH GRAVE
- u'\xee' # 0x94 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
- u'\xef' # 0x95 -> LATIN SMALL LETTER I WITH DIAERESIS
- u'\xf1' # 0x96 -> LATIN SMALL LETTER N WITH TILDE
- u'\xf3' # 0x97 -> LATIN SMALL LETTER O WITH ACUTE
- u'\xf2' # 0x98 -> LATIN SMALL LETTER O WITH GRAVE
- u'\xf4' # 0x99 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
- u'\xf6' # 0x9A -> LATIN SMALL LETTER O WITH DIAERESIS
- u'\xf5' # 0x9B -> LATIN SMALL LETTER O WITH TILDE
- u'\xfa' # 0x9C -> LATIN SMALL LETTER U WITH ACUTE
- u'\xf9' # 0x9D -> LATIN SMALL LETTER U WITH GRAVE
- u'\xfb' # 0x9E -> LATIN SMALL LETTER U WITH CIRCUMFLEX
- u'\xfc' # 0x9F -> LATIN SMALL LETTER U WITH DIAERESIS
- u'\u2020' # 0xA0 -> DAGGER
- u'\xb0' # 0xA1 -> DEGREE SIGN
- u'\xa2' # 0xA2 -> CENT SIGN
- u'\xa3' # 0xA3 -> POUND SIGN
- u'\xa7' # 0xA4 -> SECTION SIGN
- u'\u2022' # 0xA5 -> BULLET
- u'\xb6' # 0xA6 -> PILCROW SIGN
- u'\xdf' # 0xA7 -> LATIN SMALL LETTER SHARP S
- u'\xae' # 0xA8 -> REGISTERED SIGN
- u'\xa9' # 0xA9 -> COPYRIGHT SIGN
- u'\u2122' # 0xAA -> TRADE MARK SIGN
- u'\xb4' # 0xAB -> ACUTE ACCENT
- u'\xa8' # 0xAC -> DIAERESIS
- u'\u2260' # 0xAD -> NOT EQUAL TO
- u'\u0102' # 0xAE -> LATIN CAPITAL LETTER A WITH BREVE
- u'\u0218' # 0xAF -> LATIN CAPITAL LETTER S WITH COMMA BELOW # for Unicode 3.0 and later
- u'\u221e' # 0xB0 -> INFINITY
- u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
- u'\u2264' # 0xB2 -> LESS-THAN OR EQUAL TO
- u'\u2265' # 0xB3 -> GREATER-THAN OR EQUAL TO
- u'\xa5' # 0xB4 -> YEN SIGN
- u'\xb5' # 0xB5 -> MICRO SIGN
- u'\u2202' # 0xB6 -> PARTIAL DIFFERENTIAL
- u'\u2211' # 0xB7 -> N-ARY SUMMATION
- u'\u220f' # 0xB8 -> N-ARY PRODUCT
- u'\u03c0' # 0xB9 -> GREEK SMALL LETTER PI
- u'\u222b' # 0xBA -> INTEGRAL
- u'\xaa' # 0xBB -> FEMININE ORDINAL INDICATOR
- u'\xba' # 0xBC -> MASCULINE ORDINAL INDICATOR
- u'\u03a9' # 0xBD -> GREEK CAPITAL LETTER OMEGA
- u'\u0103' # 0xBE -> LATIN SMALL LETTER A WITH BREVE
- u'\u0219' # 0xBF -> LATIN SMALL LETTER S WITH COMMA BELOW # for Unicode 3.0 and later
- u'\xbf' # 0xC0 -> INVERTED QUESTION MARK
- u'\xa1' # 0xC1 -> INVERTED EXCLAMATION MARK
- u'\xac' # 0xC2 -> NOT SIGN
- u'\u221a' # 0xC3 -> SQUARE ROOT
- u'\u0192' # 0xC4 -> LATIN SMALL LETTER F WITH HOOK
- u'\u2248' # 0xC5 -> ALMOST EQUAL TO
- u'\u2206' # 0xC6 -> INCREMENT
- u'\xab' # 0xC7 -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
- u'\xbb' # 0xC8 -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
- u'\u2026' # 0xC9 -> HORIZONTAL ELLIPSIS
- u'\xa0' # 0xCA -> NO-BREAK SPACE
- u'\xc0' # 0xCB -> LATIN CAPITAL LETTER A WITH GRAVE
- u'\xc3' # 0xCC -> LATIN CAPITAL LETTER A WITH TILDE
- u'\xd5' # 0xCD -> LATIN CAPITAL LETTER O WITH TILDE
- u'\u0152' # 0xCE -> LATIN CAPITAL LIGATURE OE
- u'\u0153' # 0xCF -> LATIN SMALL LIGATURE OE
- u'\u2013' # 0xD0 -> EN DASH
- u'\u2014' # 0xD1 -> EM DASH
- u'\u201c' # 0xD2 -> LEFT DOUBLE QUOTATION MARK
- u'\u201d' # 0xD3 -> RIGHT DOUBLE QUOTATION MARK
- u'\u2018' # 0xD4 -> LEFT SINGLE QUOTATION MARK
- u'\u2019' # 0xD5 -> RIGHT SINGLE QUOTATION MARK
- u'\xf7' # 0xD6 -> DIVISION SIGN
- u'\u25ca' # 0xD7 -> LOZENGE
- u'\xff' # 0xD8 -> LATIN SMALL LETTER Y WITH DIAERESIS
- u'\u0178' # 0xD9 -> LATIN CAPITAL LETTER Y WITH DIAERESIS
- u'\u2044' # 0xDA -> FRACTION SLASH
- u'\u20ac' # 0xDB -> EURO SIGN
- u'\u2039' # 0xDC -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
- u'\u203a' # 0xDD -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
- u'\u021a' # 0xDE -> LATIN CAPITAL LETTER T WITH COMMA BELOW # for Unicode 3.0 and later
- u'\u021b' # 0xDF -> LATIN SMALL LETTER T WITH COMMA BELOW # for Unicode 3.0 and later
- u'\u2021' # 0xE0 -> DOUBLE DAGGER
- u'\xb7' # 0xE1 -> MIDDLE DOT
- u'\u201a' # 0xE2 -> SINGLE LOW-9 QUOTATION MARK
- u'\u201e' # 0xE3 -> DOUBLE LOW-9 QUOTATION MARK
- u'\u2030' # 0xE4 -> PER MILLE SIGN
- u'\xc2' # 0xE5 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
- u'\xca' # 0xE6 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
- u'\xc1' # 0xE7 -> LATIN CAPITAL LETTER A WITH ACUTE
- u'\xcb' # 0xE8 -> LATIN CAPITAL LETTER E WITH DIAERESIS
- u'\xc8' # 0xE9 -> LATIN CAPITAL LETTER E WITH GRAVE
- u'\xcd' # 0xEA -> LATIN CAPITAL LETTER I WITH ACUTE
- u'\xce' # 0xEB -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
- u'\xcf' # 0xEC -> LATIN CAPITAL LETTER I WITH DIAERESIS
- u'\xcc' # 0xED -> LATIN CAPITAL LETTER I WITH GRAVE
- u'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
- u'\xd4' # 0xEF -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
- u'\uf8ff' # 0xF0 -> Apple logo
- u'\xd2' # 0xF1 -> LATIN CAPITAL LETTER O WITH GRAVE
- u'\xda' # 0xF2 -> LATIN CAPITAL LETTER U WITH ACUTE
- u'\xdb' # 0xF3 -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
- u'\xd9' # 0xF4 -> LATIN CAPITAL LETTER U WITH GRAVE
- u'\u0131' # 0xF5 -> LATIN SMALL LETTER DOTLESS I
- u'\u02c6' # 0xF6 -> MODIFIER LETTER CIRCUMFLEX ACCENT
- u'\u02dc' # 0xF7 -> SMALL TILDE
- u'\xaf' # 0xF8 -> MACRON
- u'\u02d8' # 0xF9 -> BREVE
- u'\u02d9' # 0xFA -> DOT ABOVE
- u'\u02da' # 0xFB -> RING ABOVE
- u'\xb8' # 0xFC -> CEDILLA
- u'\u02dd' # 0xFD -> DOUBLE ACUTE ACCENT
- u'\u02db' # 0xFE -> OGONEK
- u'\u02c7' # 0xFF -> CARON
-)
-
-### Encoding table
-encoding_table=codecs.charmap_build(decoding_table)
diff --git a/sys/lib/python/encodings/mac_turkish.py b/sys/lib/python/encodings/mac_turkish.py
deleted file mode 100644
index 0787f4990..000000000
--- a/sys/lib/python/encodings/mac_turkish.py
+++ /dev/null
@@ -1,307 +0,0 @@
-""" Python Character Mapping Codec mac_turkish generated from 'MAPPINGS/VENDORS/APPLE/TURKISH.TXT' with gencodec.py.
-
-"""#"
-
-import codecs
-
-### Codec APIs
-
-class Codec(codecs.Codec):
-
- def encode(self,input,errors='strict'):
- return codecs.charmap_encode(input,errors,encoding_table)
-
- def decode(self,input,errors='strict'):
- return codecs.charmap_decode(input,errors,decoding_table)
-
-class IncrementalEncoder(codecs.IncrementalEncoder):
- def encode(self, input, final=False):
- return codecs.charmap_encode(input,self.errors,encoding_table)[0]
-
-class IncrementalDecoder(codecs.IncrementalDecoder):
- def decode(self, input, final=False):
- return codecs.charmap_decode(input,self.errors,decoding_table)[0]
-
-class StreamWriter(Codec,codecs.StreamWriter):
- pass
-
-class StreamReader(Codec,codecs.StreamReader):
- pass
-
-### encodings module API
-
-def getregentry():
- return codecs.CodecInfo(
- name='mac-turkish',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
-
-
-### Decoding Table
-
-decoding_table = (
- u'\x00' # 0x00 -> CONTROL CHARACTER
- u'\x01' # 0x01 -> CONTROL CHARACTER
- u'\x02' # 0x02 -> CONTROL CHARACTER
- u'\x03' # 0x03 -> CONTROL CHARACTER
- u'\x04' # 0x04 -> CONTROL CHARACTER
- u'\x05' # 0x05 -> CONTROL CHARACTER
- u'\x06' # 0x06 -> CONTROL CHARACTER
- u'\x07' # 0x07 -> CONTROL CHARACTER
- u'\x08' # 0x08 -> CONTROL CHARACTER
- u'\t' # 0x09 -> CONTROL CHARACTER
- u'\n' # 0x0A -> CONTROL CHARACTER
- u'\x0b' # 0x0B -> CONTROL CHARACTER
- u'\x0c' # 0x0C -> CONTROL CHARACTER
- u'\r' # 0x0D -> CONTROL CHARACTER
- u'\x0e' # 0x0E -> CONTROL CHARACTER
- u'\x0f' # 0x0F -> CONTROL CHARACTER
- u'\x10' # 0x10 -> CONTROL CHARACTER
- u'\x11' # 0x11 -> CONTROL CHARACTER
- u'\x12' # 0x12 -> CONTROL CHARACTER
- u'\x13' # 0x13 -> CONTROL CHARACTER
- u'\x14' # 0x14 -> CONTROL CHARACTER
- u'\x15' # 0x15 -> CONTROL CHARACTER
- u'\x16' # 0x16 -> CONTROL CHARACTER
- u'\x17' # 0x17 -> CONTROL CHARACTER
- u'\x18' # 0x18 -> CONTROL CHARACTER
- u'\x19' # 0x19 -> CONTROL CHARACTER
- u'\x1a' # 0x1A -> CONTROL CHARACTER
- u'\x1b' # 0x1B -> CONTROL CHARACTER
- u'\x1c' # 0x1C -> CONTROL CHARACTER
- u'\x1d' # 0x1D -> CONTROL CHARACTER
- u'\x1e' # 0x1E -> CONTROL CHARACTER
- u'\x1f' # 0x1F -> CONTROL CHARACTER
- u' ' # 0x20 -> SPACE
- u'!' # 0x21 -> EXCLAMATION MARK
- u'"' # 0x22 -> QUOTATION MARK
- u'#' # 0x23 -> NUMBER SIGN
- u'$' # 0x24 -> DOLLAR SIGN
- u'%' # 0x25 -> PERCENT SIGN
- u'&' # 0x26 -> AMPERSAND
- u"'" # 0x27 -> APOSTROPHE
- u'(' # 0x28 -> LEFT PARENTHESIS
- u')' # 0x29 -> RIGHT PARENTHESIS
- u'*' # 0x2A -> ASTERISK
- u'+' # 0x2B -> PLUS SIGN
- u',' # 0x2C -> COMMA
- u'-' # 0x2D -> HYPHEN-MINUS
- u'.' # 0x2E -> FULL STOP
- u'/' # 0x2F -> SOLIDUS
- u'0' # 0x30 -> DIGIT ZERO
- u'1' # 0x31 -> DIGIT ONE
- u'2' # 0x32 -> DIGIT TWO
- u'3' # 0x33 -> DIGIT THREE
- u'4' # 0x34 -> DIGIT FOUR
- u'5' # 0x35 -> DIGIT FIVE
- u'6' # 0x36 -> DIGIT SIX
- u'7' # 0x37 -> DIGIT SEVEN
- u'8' # 0x38 -> DIGIT EIGHT
- u'9' # 0x39 -> DIGIT NINE
- u':' # 0x3A -> COLON
- u';' # 0x3B -> SEMICOLON
- u'<' # 0x3C -> LESS-THAN SIGN
- u'=' # 0x3D -> EQUALS SIGN
- u'>' # 0x3E -> GREATER-THAN SIGN
- u'?' # 0x3F -> QUESTION MARK
- u'@' # 0x40 -> COMMERCIAL AT
- u'A' # 0x41 -> LATIN CAPITAL LETTER A
- u'B' # 0x42 -> LATIN CAPITAL LETTER B
- u'C' # 0x43 -> LATIN CAPITAL LETTER C
- u'D' # 0x44 -> LATIN CAPITAL LETTER D
- u'E' # 0x45 -> LATIN CAPITAL LETTER E
- u'F' # 0x46 -> LATIN CAPITAL LETTER F
- u'G' # 0x47 -> LATIN CAPITAL LETTER G
- u'H' # 0x48 -> LATIN CAPITAL LETTER H
- u'I' # 0x49 -> LATIN CAPITAL LETTER I
- u'J' # 0x4A -> LATIN CAPITAL LETTER J
- u'K' # 0x4B -> LATIN CAPITAL LETTER K
- u'L' # 0x4C -> LATIN CAPITAL LETTER L
- u'M' # 0x4D -> LATIN CAPITAL LETTER M
- u'N' # 0x4E -> LATIN CAPITAL LETTER N
- u'O' # 0x4F -> LATIN CAPITAL LETTER O
- u'P' # 0x50 -> LATIN CAPITAL LETTER P
- u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
- u'R' # 0x52 -> LATIN CAPITAL LETTER R
- u'S' # 0x53 -> LATIN CAPITAL LETTER S
- u'T' # 0x54 -> LATIN CAPITAL LETTER T
- u'U' # 0x55 -> LATIN CAPITAL LETTER U
- u'V' # 0x56 -> LATIN CAPITAL LETTER V
- u'W' # 0x57 -> LATIN CAPITAL LETTER W
- u'X' # 0x58 -> LATIN CAPITAL LETTER X
- u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
- u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
- u'[' # 0x5B -> LEFT SQUARE BRACKET
- u'\\' # 0x5C -> REVERSE SOLIDUS
- u']' # 0x5D -> RIGHT SQUARE BRACKET
- u'^' # 0x5E -> CIRCUMFLEX ACCENT
- u'_' # 0x5F -> LOW LINE
- u'`' # 0x60 -> GRAVE ACCENT
- u'a' # 0x61 -> LATIN SMALL LETTER A
- u'b' # 0x62 -> LATIN SMALL LETTER B
- u'c' # 0x63 -> LATIN SMALL LETTER C
- u'd' # 0x64 -> LATIN SMALL LETTER D
- u'e' # 0x65 -> LATIN SMALL LETTER E
- u'f' # 0x66 -> LATIN SMALL LETTER F
- u'g' # 0x67 -> LATIN SMALL LETTER G
- u'h' # 0x68 -> LATIN SMALL LETTER H
- u'i' # 0x69 -> LATIN SMALL LETTER I
- u'j' # 0x6A -> LATIN SMALL LETTER J
- u'k' # 0x6B -> LATIN SMALL LETTER K
- u'l' # 0x6C -> LATIN SMALL LETTER L
- u'm' # 0x6D -> LATIN SMALL LETTER M
- u'n' # 0x6E -> LATIN SMALL LETTER N
- u'o' # 0x6F -> LATIN SMALL LETTER O
- u'p' # 0x70 -> LATIN SMALL LETTER P
- u'q' # 0x71 -> LATIN SMALL LETTER Q
- u'r' # 0x72 -> LATIN SMALL LETTER R
- u's' # 0x73 -> LATIN SMALL LETTER S
- u't' # 0x74 -> LATIN SMALL LETTER T
- u'u' # 0x75 -> LATIN SMALL LETTER U
- u'v' # 0x76 -> LATIN SMALL LETTER V
- u'w' # 0x77 -> LATIN SMALL LETTER W
- u'x' # 0x78 -> LATIN SMALL LETTER X
- u'y' # 0x79 -> LATIN SMALL LETTER Y
- u'z' # 0x7A -> LATIN SMALL LETTER Z
- u'{' # 0x7B -> LEFT CURLY BRACKET
- u'|' # 0x7C -> VERTICAL LINE
- u'}' # 0x7D -> RIGHT CURLY BRACKET
- u'~' # 0x7E -> TILDE
- u'\x7f' # 0x7F -> CONTROL CHARACTER
- u'\xc4' # 0x80 -> LATIN CAPITAL LETTER A WITH DIAERESIS
- u'\xc5' # 0x81 -> LATIN CAPITAL LETTER A WITH RING ABOVE
- u'\xc7' # 0x82 -> LATIN CAPITAL LETTER C WITH CEDILLA
- u'\xc9' # 0x83 -> LATIN CAPITAL LETTER E WITH ACUTE
- u'\xd1' # 0x84 -> LATIN CAPITAL LETTER N WITH TILDE
- u'\xd6' # 0x85 -> LATIN CAPITAL LETTER O WITH DIAERESIS
- u'\xdc' # 0x86 -> LATIN CAPITAL LETTER U WITH DIAERESIS
- u'\xe1' # 0x87 -> LATIN SMALL LETTER A WITH ACUTE
- u'\xe0' # 0x88 -> LATIN SMALL LETTER A WITH GRAVE
- u'\xe2' # 0x89 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
- u'\xe4' # 0x8A -> LATIN SMALL LETTER A WITH DIAERESIS
- u'\xe3' # 0x8B -> LATIN SMALL LETTER A WITH TILDE
- u'\xe5' # 0x8C -> LATIN SMALL LETTER A WITH RING ABOVE
- u'\xe7' # 0x8D -> LATIN SMALL LETTER C WITH CEDILLA
- u'\xe9' # 0x8E -> LATIN SMALL LETTER E WITH ACUTE
- u'\xe8' # 0x8F -> LATIN SMALL LETTER E WITH GRAVE
- u'\xea' # 0x90 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
- u'\xeb' # 0x91 -> LATIN SMALL LETTER E WITH DIAERESIS
- u'\xed' # 0x92 -> LATIN SMALL LETTER I WITH ACUTE
- u'\xec' # 0x93 -> LATIN SMALL LETTER I WITH GRAVE
- u'\xee' # 0x94 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
- u'\xef' # 0x95 -> LATIN SMALL LETTER I WITH DIAERESIS
- u'\xf1' # 0x96 -> LATIN SMALL LETTER N WITH TILDE
- u'\xf3' # 0x97 -> LATIN SMALL LETTER O WITH ACUTE
- u'\xf2' # 0x98 -> LATIN SMALL LETTER O WITH GRAVE
- u'\xf4' # 0x99 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
- u'\xf6' # 0x9A -> LATIN SMALL LETTER O WITH DIAERESIS
- u'\xf5' # 0x9B -> LATIN SMALL LETTER O WITH TILDE
- u'\xfa' # 0x9C -> LATIN SMALL LETTER U WITH ACUTE
- u'\xf9' # 0x9D -> LATIN SMALL LETTER U WITH GRAVE
- u'\xfb' # 0x9E -> LATIN SMALL LETTER U WITH CIRCUMFLEX
- u'\xfc' # 0x9F -> LATIN SMALL LETTER U WITH DIAERESIS
- u'\u2020' # 0xA0 -> DAGGER
- u'\xb0' # 0xA1 -> DEGREE SIGN
- u'\xa2' # 0xA2 -> CENT SIGN
- u'\xa3' # 0xA3 -> POUND SIGN
- u'\xa7' # 0xA4 -> SECTION SIGN
- u'\u2022' # 0xA5 -> BULLET
- u'\xb6' # 0xA6 -> PILCROW SIGN
- u'\xdf' # 0xA7 -> LATIN SMALL LETTER SHARP S
- u'\xae' # 0xA8 -> REGISTERED SIGN
- u'\xa9' # 0xA9 -> COPYRIGHT SIGN
- u'\u2122' # 0xAA -> TRADE MARK SIGN
- u'\xb4' # 0xAB -> ACUTE ACCENT
- u'\xa8' # 0xAC -> DIAERESIS
- u'\u2260' # 0xAD -> NOT EQUAL TO
- u'\xc6' # 0xAE -> LATIN CAPITAL LETTER AE
- u'\xd8' # 0xAF -> LATIN CAPITAL LETTER O WITH STROKE
- u'\u221e' # 0xB0 -> INFINITY
- u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
- u'\u2264' # 0xB2 -> LESS-THAN OR EQUAL TO
- u'\u2265' # 0xB3 -> GREATER-THAN OR EQUAL TO
- u'\xa5' # 0xB4 -> YEN SIGN
- u'\xb5' # 0xB5 -> MICRO SIGN
- u'\u2202' # 0xB6 -> PARTIAL DIFFERENTIAL
- u'\u2211' # 0xB7 -> N-ARY SUMMATION
- u'\u220f' # 0xB8 -> N-ARY PRODUCT
- u'\u03c0' # 0xB9 -> GREEK SMALL LETTER PI
- u'\u222b' # 0xBA -> INTEGRAL
- u'\xaa' # 0xBB -> FEMININE ORDINAL INDICATOR
- u'\xba' # 0xBC -> MASCULINE ORDINAL INDICATOR
- u'\u03a9' # 0xBD -> GREEK CAPITAL LETTER OMEGA
- u'\xe6' # 0xBE -> LATIN SMALL LETTER AE
- u'\xf8' # 0xBF -> LATIN SMALL LETTER O WITH STROKE
- u'\xbf' # 0xC0 -> INVERTED QUESTION MARK
- u'\xa1' # 0xC1 -> INVERTED EXCLAMATION MARK
- u'\xac' # 0xC2 -> NOT SIGN
- u'\u221a' # 0xC3 -> SQUARE ROOT
- u'\u0192' # 0xC4 -> LATIN SMALL LETTER F WITH HOOK
- u'\u2248' # 0xC5 -> ALMOST EQUAL TO
- u'\u2206' # 0xC6 -> INCREMENT
- u'\xab' # 0xC7 -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
- u'\xbb' # 0xC8 -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
- u'\u2026' # 0xC9 -> HORIZONTAL ELLIPSIS
- u'\xa0' # 0xCA -> NO-BREAK SPACE
- u'\xc0' # 0xCB -> LATIN CAPITAL LETTER A WITH GRAVE
- u'\xc3' # 0xCC -> LATIN CAPITAL LETTER A WITH TILDE
- u'\xd5' # 0xCD -> LATIN CAPITAL LETTER O WITH TILDE
- u'\u0152' # 0xCE -> LATIN CAPITAL LIGATURE OE
- u'\u0153' # 0xCF -> LATIN SMALL LIGATURE OE
- u'\u2013' # 0xD0 -> EN DASH
- u'\u2014' # 0xD1 -> EM DASH
- u'\u201c' # 0xD2 -> LEFT DOUBLE QUOTATION MARK
- u'\u201d' # 0xD3 -> RIGHT DOUBLE QUOTATION MARK
- u'\u2018' # 0xD4 -> LEFT SINGLE QUOTATION MARK
- u'\u2019' # 0xD5 -> RIGHT SINGLE QUOTATION MARK
- u'\xf7' # 0xD6 -> DIVISION SIGN
- u'\u25ca' # 0xD7 -> LOZENGE
- u'\xff' # 0xD8 -> LATIN SMALL LETTER Y WITH DIAERESIS
- u'\u0178' # 0xD9 -> LATIN CAPITAL LETTER Y WITH DIAERESIS
- u'\u011e' # 0xDA -> LATIN CAPITAL LETTER G WITH BREVE
- u'\u011f' # 0xDB -> LATIN SMALL LETTER G WITH BREVE
- u'\u0130' # 0xDC -> LATIN CAPITAL LETTER I WITH DOT ABOVE
- u'\u0131' # 0xDD -> LATIN SMALL LETTER DOTLESS I
- u'\u015e' # 0xDE -> LATIN CAPITAL LETTER S WITH CEDILLA
- u'\u015f' # 0xDF -> LATIN SMALL LETTER S WITH CEDILLA
- u'\u2021' # 0xE0 -> DOUBLE DAGGER
- u'\xb7' # 0xE1 -> MIDDLE DOT
- u'\u201a' # 0xE2 -> SINGLE LOW-9 QUOTATION MARK
- u'\u201e' # 0xE3 -> DOUBLE LOW-9 QUOTATION MARK
- u'\u2030' # 0xE4 -> PER MILLE SIGN
- u'\xc2' # 0xE5 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
- u'\xca' # 0xE6 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
- u'\xc1' # 0xE7 -> LATIN CAPITAL LETTER A WITH ACUTE
- u'\xcb' # 0xE8 -> LATIN CAPITAL LETTER E WITH DIAERESIS
- u'\xc8' # 0xE9 -> LATIN CAPITAL LETTER E WITH GRAVE
- u'\xcd' # 0xEA -> LATIN CAPITAL LETTER I WITH ACUTE
- u'\xce' # 0xEB -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
- u'\xcf' # 0xEC -> LATIN CAPITAL LETTER I WITH DIAERESIS
- u'\xcc' # 0xED -> LATIN CAPITAL LETTER I WITH GRAVE
- u'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
- u'\xd4' # 0xEF -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
- u'\uf8ff' # 0xF0 -> Apple logo
- u'\xd2' # 0xF1 -> LATIN CAPITAL LETTER O WITH GRAVE
- u'\xda' # 0xF2 -> LATIN CAPITAL LETTER U WITH ACUTE
- u'\xdb' # 0xF3 -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
- u'\xd9' # 0xF4 -> LATIN CAPITAL LETTER U WITH GRAVE
- u'\uf8a0' # 0xF5 -> undefined1
- u'\u02c6' # 0xF6 -> MODIFIER LETTER CIRCUMFLEX ACCENT
- u'\u02dc' # 0xF7 -> SMALL TILDE
- u'\xaf' # 0xF8 -> MACRON
- u'\u02d8' # 0xF9 -> BREVE
- u'\u02d9' # 0xFA -> DOT ABOVE
- u'\u02da' # 0xFB -> RING ABOVE
- u'\xb8' # 0xFC -> CEDILLA
- u'\u02dd' # 0xFD -> DOUBLE ACUTE ACCENT
- u'\u02db' # 0xFE -> OGONEK
- u'\u02c7' # 0xFF -> CARON
-)
-
-### Encoding table
-encoding_table=codecs.charmap_build(decoding_table)
diff --git a/sys/lib/python/encodings/mbcs.py b/sys/lib/python/encodings/mbcs.py
deleted file mode 100644
index baf46cbd4..000000000
--- a/sys/lib/python/encodings/mbcs.py
+++ /dev/null
@@ -1,47 +0,0 @@
-""" Python 'mbcs' Codec for Windows
-
-
-Cloned by Mark Hammond (mhammond@skippinet.com.au) from ascii.py,
-which was written by Marc-Andre Lemburg (mal@lemburg.com).
-
-(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
-
-"""
-# Import them explicitly to cause an ImportError
-# on non-Windows systems
-from codecs import mbcs_encode, mbcs_decode
-# for IncrementalDecoder, IncrementalEncoder, ...
-import codecs
-
-### Codec APIs
-
-encode = mbcs_encode
-
-def decode(input, errors='strict'):
- return mbcs_decode(input, errors, True)
-
-class IncrementalEncoder(codecs.IncrementalEncoder):
- def encode(self, input, final=False):
- return mbcs_encode(input, self.errors)[0]
-
-class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
- _buffer_decode = mbcs_decode
-
-class StreamWriter(codecs.StreamWriter):
- encode = mbcs_encode
-
-class StreamReader(codecs.StreamReader):
- decode = mbcs_decode
-
-### encodings module API
-
-def getregentry():
- return codecs.CodecInfo(
- name='mbcs',
- encode=encode,
- decode=decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
diff --git a/sys/lib/python/encodings/palmos.py b/sys/lib/python/encodings/palmos.py
deleted file mode 100644
index 4b77e2ba9..000000000
--- a/sys/lib/python/encodings/palmos.py
+++ /dev/null
@@ -1,83 +0,0 @@
-""" Python Character Mapping Codec for PalmOS 3.5.
-
-Written by Sjoerd Mullender (sjoerd@acm.org); based on iso8859_15.py.
-
-"""#"
-
-import codecs
-
-### Codec APIs
-
-class Codec(codecs.Codec):
- def encode(self,input,errors='strict'):
- return codecs.charmap_encode(input,errors,encoding_map)
-
- def decode(self,input,errors='strict'):
- return codecs.charmap_decode(input,errors,decoding_map)
-
-class IncrementalEncoder(codecs.IncrementalEncoder):
- def encode(self, input, final=False):
- return codecs.charmap_encode(input,self.errors,encoding_map)[0]
-
-class IncrementalDecoder(codecs.IncrementalDecoder):
- def decode(self, input, final=False):
- return codecs.charmap_decode(input,self.errors,decoding_map)[0]
-
-class StreamWriter(Codec,codecs.StreamWriter):
- pass
-
-class StreamReader(Codec,codecs.StreamReader):
- pass
-
-### encodings module API
-
-def getregentry():
- return codecs.CodecInfo(
- name='palmos',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
-
-### Decoding Map
-
-decoding_map = codecs.make_identity_dict(range(256))
-
-# The PalmOS character set is mostly iso-8859-1 with some differences.
-decoding_map.update({
- 0x0080: 0x20ac, # EURO SIGN
- 0x0082: 0x201a, # SINGLE LOW-9 QUOTATION MARK
- 0x0083: 0x0192, # LATIN SMALL LETTER F WITH HOOK
- 0x0084: 0x201e, # DOUBLE LOW-9 QUOTATION MARK
- 0x0085: 0x2026, # HORIZONTAL ELLIPSIS
- 0x0086: 0x2020, # DAGGER
- 0x0087: 0x2021, # DOUBLE DAGGER
- 0x0088: 0x02c6, # MODIFIER LETTER CIRCUMFLEX ACCENT
- 0x0089: 0x2030, # PER MILLE SIGN
- 0x008a: 0x0160, # LATIN CAPITAL LETTER S WITH CARON
- 0x008b: 0x2039, # SINGLE LEFT-POINTING ANGLE QUOTATION MARK
- 0x008c: 0x0152, # LATIN CAPITAL LIGATURE OE
- 0x008d: 0x2666, # BLACK DIAMOND SUIT
- 0x008e: 0x2663, # BLACK CLUB SUIT
- 0x008f: 0x2665, # BLACK HEART SUIT
- 0x0090: 0x2660, # BLACK SPADE SUIT
- 0x0091: 0x2018, # LEFT SINGLE QUOTATION MARK
- 0x0092: 0x2019, # RIGHT SINGLE QUOTATION MARK
- 0x0093: 0x201c, # LEFT DOUBLE QUOTATION MARK
- 0x0094: 0x201d, # RIGHT DOUBLE QUOTATION MARK
- 0x0095: 0x2022, # BULLET
- 0x0096: 0x2013, # EN DASH
- 0x0097: 0x2014, # EM DASH
- 0x0098: 0x02dc, # SMALL TILDE
- 0x0099: 0x2122, # TRADE MARK SIGN
- 0x009a: 0x0161, # LATIN SMALL LETTER S WITH CARON
- 0x009c: 0x0153, # LATIN SMALL LIGATURE OE
- 0x009f: 0x0178, # LATIN CAPITAL LETTER Y WITH DIAERESIS
-})
-
-### Encoding Map
-
-encoding_map = codecs.make_encoding_map(decoding_map)
diff --git a/sys/lib/python/encodings/ptcp154.py b/sys/lib/python/encodings/ptcp154.py
deleted file mode 100644
index aef897538..000000000
--- a/sys/lib/python/encodings/ptcp154.py
+++ /dev/null
@@ -1,175 +0,0 @@
-""" Python Character Mapping Codec generated from 'PTCP154.txt' with gencodec.py.
-
-Written by Marc-Andre Lemburg (mal@lemburg.com).
-
-(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
-(c) Copyright 2000 Guido van Rossum.
-
-"""#"
-
-import codecs
-
-### Codec APIs
-
-class Codec(codecs.Codec):
-
- def encode(self,input,errors='strict'):
- return codecs.charmap_encode(input,errors,encoding_map)
-
- def decode(self,input,errors='strict'):
- return codecs.charmap_decode(input,errors,decoding_map)
-
-class IncrementalEncoder(codecs.IncrementalEncoder):
- def encode(self, input, final=False):
- return codecs.charmap_encode(input,self.errors,encoding_map)[0]
-
-class IncrementalDecoder(codecs.IncrementalDecoder):
- def decode(self, input, final=False):
- return codecs.charmap_decode(input,self.errors,decoding_map)[0]
-
-class StreamWriter(Codec,codecs.StreamWriter):
- pass
-
-class StreamReader(Codec,codecs.StreamReader):
- pass
-
-### encodings module API
-
-def getregentry():
- return codecs.CodecInfo(
- name='ptcp154',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
-
-### Decoding Map
-
-decoding_map = codecs.make_identity_dict(range(256))
-decoding_map.update({
- 0x0080: 0x0496, # CYRILLIC CAPITAL LETTER ZHE WITH DESCENDER
- 0x0081: 0x0492, # CYRILLIC CAPITAL LETTER GHE WITH STROKE
- 0x0082: 0x04ee, # CYRILLIC CAPITAL LETTER U WITH MACRON
- 0x0083: 0x0493, # CYRILLIC SMALL LETTER GHE WITH STROKE
- 0x0084: 0x201e, # DOUBLE LOW-9 QUOTATION MARK
- 0x0085: 0x2026, # HORIZONTAL ELLIPSIS
- 0x0086: 0x04b6, # CYRILLIC CAPITAL LETTER CHE WITH DESCENDER
- 0x0087: 0x04ae, # CYRILLIC CAPITAL LETTER STRAIGHT U
- 0x0088: 0x04b2, # CYRILLIC CAPITAL LETTER HA WITH DESCENDER
- 0x0089: 0x04af, # CYRILLIC SMALL LETTER STRAIGHT U
- 0x008a: 0x04a0, # CYRILLIC CAPITAL LETTER BASHKIR KA
- 0x008b: 0x04e2, # CYRILLIC CAPITAL LETTER I WITH MACRON
- 0x008c: 0x04a2, # CYRILLIC CAPITAL LETTER EN WITH DESCENDER
- 0x008d: 0x049a, # CYRILLIC CAPITAL LETTER KA WITH DESCENDER
- 0x008e: 0x04ba, # CYRILLIC CAPITAL LETTER SHHA
- 0x008f: 0x04b8, # CYRILLIC CAPITAL LETTER CHE WITH VERTICAL STROKE
- 0x0090: 0x0497, # CYRILLIC SMALL LETTER ZHE WITH DESCENDER
- 0x0091: 0x2018, # LEFT SINGLE QUOTATION MARK
- 0x0092: 0x2019, # RIGHT SINGLE QUOTATION MARK
- 0x0093: 0x201c, # LEFT DOUBLE QUOTATION MARK
- 0x0094: 0x201d, # RIGHT DOUBLE QUOTATION MARK
- 0x0095: 0x2022, # BULLET
- 0x0096: 0x2013, # EN DASH
- 0x0097: 0x2014, # EM DASH
- 0x0098: 0x04b3, # CYRILLIC SMALL LETTER HA WITH DESCENDER
- 0x0099: 0x04b7, # CYRILLIC SMALL LETTER CHE WITH DESCENDER
- 0x009a: 0x04a1, # CYRILLIC SMALL LETTER BASHKIR KA
- 0x009b: 0x04e3, # CYRILLIC SMALL LETTER I WITH MACRON
- 0x009c: 0x04a3, # CYRILLIC SMALL LETTER EN WITH DESCENDER
- 0x009d: 0x049b, # CYRILLIC SMALL LETTER KA WITH DESCENDER
- 0x009e: 0x04bb, # CYRILLIC SMALL LETTER SHHA
- 0x009f: 0x04b9, # CYRILLIC SMALL LETTER CHE WITH VERTICAL STROKE
- 0x00a1: 0x040e, # CYRILLIC CAPITAL LETTER SHORT U (Byelorussian)
- 0x00a2: 0x045e, # CYRILLIC SMALL LETTER SHORT U (Byelorussian)
- 0x00a3: 0x0408, # CYRILLIC CAPITAL LETTER JE
- 0x00a4: 0x04e8, # CYRILLIC CAPITAL LETTER BARRED O
- 0x00a5: 0x0498, # CYRILLIC CAPITAL LETTER ZE WITH DESCENDER
- 0x00a6: 0x04b0, # CYRILLIC CAPITAL LETTER STRAIGHT U WITH STROKE
- 0x00a8: 0x0401, # CYRILLIC CAPITAL LETTER IO
- 0x00aa: 0x04d8, # CYRILLIC CAPITAL LETTER SCHWA
- 0x00ad: 0x04ef, # CYRILLIC SMALL LETTER U WITH MACRON
- 0x00af: 0x049c, # CYRILLIC CAPITAL LETTER KA WITH VERTICAL STROKE
- 0x00b1: 0x04b1, # CYRILLIC SMALL LETTER STRAIGHT U WITH STROKE
- 0x00b2: 0x0406, # CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
- 0x00b3: 0x0456, # CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
- 0x00b4: 0x0499, # CYRILLIC SMALL LETTER ZE WITH DESCENDER
- 0x00b5: 0x04e9, # CYRILLIC SMALL LETTER BARRED O
- 0x00b8: 0x0451, # CYRILLIC SMALL LETTER IO
- 0x00b9: 0x2116, # NUMERO SIGN
- 0x00ba: 0x04d9, # CYRILLIC SMALL LETTER SCHWA
- 0x00bc: 0x0458, # CYRILLIC SMALL LETTER JE
- 0x00bd: 0x04aa, # CYRILLIC CAPITAL LETTER ES WITH DESCENDER
- 0x00be: 0x04ab, # CYRILLIC SMALL LETTER ES WITH DESCENDER
- 0x00bf: 0x049d, # CYRILLIC SMALL LETTER KA WITH VERTICAL STROKE
- 0x00c0: 0x0410, # CYRILLIC CAPITAL LETTER A
- 0x00c1: 0x0411, # CYRILLIC CAPITAL LETTER BE
- 0x00c2: 0x0412, # CYRILLIC CAPITAL LETTER VE
- 0x00c3: 0x0413, # CYRILLIC CAPITAL LETTER GHE
- 0x00c4: 0x0414, # CYRILLIC CAPITAL LETTER DE
- 0x00c5: 0x0415, # CYRILLIC CAPITAL LETTER IE
- 0x00c6: 0x0416, # CYRILLIC CAPITAL LETTER ZHE
- 0x00c7: 0x0417, # CYRILLIC CAPITAL LETTER ZE
- 0x00c8: 0x0418, # CYRILLIC CAPITAL LETTER I
- 0x00c9: 0x0419, # CYRILLIC CAPITAL LETTER SHORT I
- 0x00ca: 0x041a, # CYRILLIC CAPITAL LETTER KA
- 0x00cb: 0x041b, # CYRILLIC CAPITAL LETTER EL
- 0x00cc: 0x041c, # CYRILLIC CAPITAL LETTER EM
- 0x00cd: 0x041d, # CYRILLIC CAPITAL LETTER EN
- 0x00ce: 0x041e, # CYRILLIC CAPITAL LETTER O
- 0x00cf: 0x041f, # CYRILLIC CAPITAL LETTER PE
- 0x00d0: 0x0420, # CYRILLIC CAPITAL LETTER ER
- 0x00d1: 0x0421, # CYRILLIC CAPITAL LETTER ES
- 0x00d2: 0x0422, # CYRILLIC CAPITAL LETTER TE
- 0x00d3: 0x0423, # CYRILLIC CAPITAL LETTER U
- 0x00d4: 0x0424, # CYRILLIC CAPITAL LETTER EF
- 0x00d5: 0x0425, # CYRILLIC CAPITAL LETTER HA
- 0x00d6: 0x0426, # CYRILLIC CAPITAL LETTER TSE
- 0x00d7: 0x0427, # CYRILLIC CAPITAL LETTER CHE
- 0x00d8: 0x0428, # CYRILLIC CAPITAL LETTER SHA
- 0x00d9: 0x0429, # CYRILLIC CAPITAL LETTER SHCHA
- 0x00da: 0x042a, # CYRILLIC CAPITAL LETTER HARD SIGN
- 0x00db: 0x042b, # CYRILLIC CAPITAL LETTER YERU
- 0x00dc: 0x042c, # CYRILLIC CAPITAL LETTER SOFT SIGN
- 0x00dd: 0x042d, # CYRILLIC CAPITAL LETTER E
- 0x00de: 0x042e, # CYRILLIC CAPITAL LETTER YU
- 0x00df: 0x042f, # CYRILLIC CAPITAL LETTER YA
- 0x00e0: 0x0430, # CYRILLIC SMALL LETTER A
- 0x00e1: 0x0431, # CYRILLIC SMALL LETTER BE
- 0x00e2: 0x0432, # CYRILLIC SMALL LETTER VE
- 0x00e3: 0x0433, # CYRILLIC SMALL LETTER GHE
- 0x00e4: 0x0434, # CYRILLIC SMALL LETTER DE
- 0x00e5: 0x0435, # CYRILLIC SMALL LETTER IE
- 0x00e6: 0x0436, # CYRILLIC SMALL LETTER ZHE
- 0x00e7: 0x0437, # CYRILLIC SMALL LETTER ZE
- 0x00e8: 0x0438, # CYRILLIC SMALL LETTER I
- 0x00e9: 0x0439, # CYRILLIC SMALL LETTER SHORT I
- 0x00ea: 0x043a, # CYRILLIC SMALL LETTER KA
- 0x00eb: 0x043b, # CYRILLIC SMALL LETTER EL
- 0x00ec: 0x043c, # CYRILLIC SMALL LETTER EM
- 0x00ed: 0x043d, # CYRILLIC SMALL LETTER EN
- 0x00ee: 0x043e, # CYRILLIC SMALL LETTER O
- 0x00ef: 0x043f, # CYRILLIC SMALL LETTER PE
- 0x00f0: 0x0440, # CYRILLIC SMALL LETTER ER
- 0x00f1: 0x0441, # CYRILLIC SMALL LETTER ES
- 0x00f2: 0x0442, # CYRILLIC SMALL LETTER TE
- 0x00f3: 0x0443, # CYRILLIC SMALL LETTER U
- 0x00f4: 0x0444, # CYRILLIC SMALL LETTER EF
- 0x00f5: 0x0445, # CYRILLIC SMALL LETTER HA
- 0x00f6: 0x0446, # CYRILLIC SMALL LETTER TSE
- 0x00f7: 0x0447, # CYRILLIC SMALL LETTER CHE
- 0x00f8: 0x0448, # CYRILLIC SMALL LETTER SHA
- 0x00f9: 0x0449, # CYRILLIC SMALL LETTER SHCHA
- 0x00fa: 0x044a, # CYRILLIC SMALL LETTER HARD SIGN
- 0x00fb: 0x044b, # CYRILLIC SMALL LETTER YERU
- 0x00fc: 0x044c, # CYRILLIC SMALL LETTER SOFT SIGN
- 0x00fd: 0x044d, # CYRILLIC SMALL LETTER E
- 0x00fe: 0x044e, # CYRILLIC SMALL LETTER YU
- 0x00ff: 0x044f, # CYRILLIC SMALL LETTER YA
-})
-
-### Encoding Map
-
-encoding_map = codecs.make_encoding_map(decoding_map)
diff --git a/sys/lib/python/encodings/punycode.py b/sys/lib/python/encodings/punycode.py
deleted file mode 100644
index d97200fd3..000000000
--- a/sys/lib/python/encodings/punycode.py
+++ /dev/null
@@ -1,238 +0,0 @@
-# -*- coding: iso-8859-1 -*-
-""" Codec for the Punicode encoding, as specified in RFC 3492
-
-Written by Martin v. Löwis.
-"""
-
-import codecs
-
-##################### Encoding #####################################
-
-def segregate(str):
- """3.1 Basic code point segregation"""
- base = []
- extended = {}
- for c in str:
- if ord(c) < 128:
- base.append(c)
- else:
- extended[c] = 1
- extended = extended.keys()
- extended.sort()
- return "".join(base).encode("ascii"),extended
-
-def selective_len(str, max):
- """Return the length of str, considering only characters below max."""
- res = 0
- for c in str:
- if ord(c) < max:
- res += 1
- return res
-
-def selective_find(str, char, index, pos):
- """Return a pair (index, pos), indicating the next occurrence of
- char in str. index is the position of the character considering
- only ordinals up to and including char, and pos is the position in
- the full string. index/pos is the starting position in the full
- string."""
-
- l = len(str)
- while 1:
- pos += 1
- if pos == l:
- return (-1, -1)
- c = str[pos]
- if c == char:
- return index+1, pos
- elif c < char:
- index += 1
-
-def insertion_unsort(str, extended):
- """3.2 Insertion unsort coding"""
- oldchar = 0x80
- result = []
- oldindex = -1
- for c in extended:
- index = pos = -1
- char = ord(c)
- curlen = selective_len(str, char)
- delta = (curlen+1) * (char - oldchar)
- while 1:
- index,pos = selective_find(str,c,index,pos)
- if index == -1:
- break
- delta += index - oldindex
- result.append(delta-1)
- oldindex = index
- delta = 0
- oldchar = char
-
- return result
-
-def T(j, bias):
- # Punycode parameters: tmin = 1, tmax = 26, base = 36
- res = 36 * (j + 1) - bias
- if res < 1: return 1
- if res > 26: return 26
- return res
-
-digits = "abcdefghijklmnopqrstuvwxyz0123456789"
-def generate_generalized_integer(N, bias):
- """3.3 Generalized variable-length integers"""
- result = []
- j = 0
- while 1:
- t = T(j, bias)
- if N < t:
- result.append(digits[N])
- return result
- result.append(digits[t + ((N - t) % (36 - t))])
- N = (N - t) // (36 - t)
- j += 1
-
-def adapt(delta, first, numchars):
- if first:
- delta //= 700
- else:
- delta //= 2
- delta += delta // numchars
- # ((base - tmin) * tmax) // 2 == 455
- divisions = 0
- while delta > 455:
- delta = delta // 35 # base - tmin
- divisions += 36
- bias = divisions + (36 * delta // (delta + 38))
- return bias
-
-
-def generate_integers(baselen, deltas):
- """3.4 Bias adaptation"""
- # Punycode parameters: initial bias = 72, damp = 700, skew = 38
- result = []
- bias = 72
- for points, delta in enumerate(deltas):
- s = generate_generalized_integer(delta, bias)
- result.extend(s)
- bias = adapt(delta, points==0, baselen+points+1)
- return "".join(result)
-
-def punycode_encode(text):
- base, extended = segregate(text)
- base = base.encode("ascii")
- deltas = insertion_unsort(text, extended)
- extended = generate_integers(len(base), deltas)
- if base:
- return base + "-" + extended
- return extended
-
-##################### Decoding #####################################
-
-def decode_generalized_number(extended, extpos, bias, errors):
- """3.3 Generalized variable-length integers"""
- result = 0
- w = 1
- j = 0
- while 1:
- try:
- char = ord(extended[extpos])
- except IndexError:
- if errors == "strict":
- raise UnicodeError, "incomplete punicode string"
- return extpos + 1, None
- extpos += 1
- if 0x41 <= char <= 0x5A: # A-Z
- digit = char - 0x41
- elif 0x30 <= char <= 0x39:
- digit = char - 22 # 0x30-26
- elif errors == "strict":
- raise UnicodeError("Invalid extended code point '%s'"
- % extended[extpos])
- else:
- return extpos, None
- t = T(j, bias)
- result += digit * w
- if digit < t:
- return extpos, result
- w = w * (36 - t)
- j += 1
-
-
-def insertion_sort(base, extended, errors):
- """3.2 Insertion unsort coding"""
- char = 0x80
- pos = -1
- bias = 72
- extpos = 0
- while extpos < len(extended):
- newpos, delta = decode_generalized_number(extended, extpos,
- bias, errors)
- if delta is None:
- # There was an error in decoding. We can't continue because
- # synchronization is lost.
- return base
- pos += delta+1
- char += pos // (len(base) + 1)
- if char > 0x10FFFF:
- if errors == "strict":
- raise UnicodeError, ("Invalid character U+%x" % char)
- char = ord('?')
- pos = pos % (len(base) + 1)
- base = base[:pos] + unichr(char) + base[pos:]
- bias = adapt(delta, (extpos == 0), len(base))
- extpos = newpos
- return base
-
-def punycode_decode(text, errors):
- pos = text.rfind("-")
- if pos == -1:
- base = ""
- extended = text
- else:
- base = text[:pos]
- extended = text[pos+1:]
- base = unicode(base, "ascii", errors)
- extended = extended.upper()
- return insertion_sort(base, extended, errors)
-
-### Codec APIs
-
-class Codec(codecs.Codec):
-
- def encode(self,input,errors='strict'):
- res = punycode_encode(input)
- return res, len(input)
-
- def decode(self,input,errors='strict'):
- if errors not in ('strict', 'replace', 'ignore'):
- raise UnicodeError, "Unsupported error handling "+errors
- res = punycode_decode(input, errors)
- return res, len(input)
-
-class IncrementalEncoder(codecs.IncrementalEncoder):
- def encode(self, input, final=False):
- return punycode_encode(input)
-
-class IncrementalDecoder(codecs.IncrementalDecoder):
- def decode(self, input, final=False):
- if self.errors not in ('strict', 'replace', 'ignore'):
- raise UnicodeError, "Unsupported error handling "+self.errors
- return punycode_decode(input, self.errors)
-
-class StreamWriter(Codec,codecs.StreamWriter):
- pass
-
-class StreamReader(Codec,codecs.StreamReader):
- pass
-
-### encodings module API
-
-def getregentry():
- return codecs.CodecInfo(
- name='punycode',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamwriter=StreamWriter,
- streamreader=StreamReader,
- )
diff --git a/sys/lib/python/encodings/quopri_codec.py b/sys/lib/python/encodings/quopri_codec.py
deleted file mode 100644
index b802ae62d..000000000
--- a/sys/lib/python/encodings/quopri_codec.py
+++ /dev/null
@@ -1,74 +0,0 @@
-"""Codec for quoted-printable encoding.
-
-Like base64 and rot13, this returns Python strings, not Unicode.
-"""
-
-import codecs, quopri
-try:
- from cStringIO import StringIO
-except ImportError:
- from StringIO import StringIO
-
-def quopri_encode(input, errors='strict'):
- """Encode the input, returning a tuple (output object, length consumed).
-
- errors defines the error handling to apply. It defaults to
- 'strict' handling which is the only currently supported
- error handling for this codec.
-
- """
- assert errors == 'strict'
- f = StringIO(input)
- g = StringIO()
- quopri.encode(f, g, 1)
- output = g.getvalue()
- return (output, len(input))
-
-def quopri_decode(input, errors='strict'):
- """Decode the input, returning a tuple (output object, length consumed).
-
- errors defines the error handling to apply. It defaults to
- 'strict' handling which is the only currently supported
- error handling for this codec.
-
- """
- assert errors == 'strict'
- f = StringIO(input)
- g = StringIO()
- quopri.decode(f, g)
- output = g.getvalue()
- return (output, len(input))
-
-class Codec(codecs.Codec):
-
- def encode(self, input,errors='strict'):
- return quopri_encode(input,errors)
- def decode(self, input,errors='strict'):
- return quopri_decode(input,errors)
-
-class IncrementalEncoder(codecs.IncrementalEncoder):
- def encode(self, input, final=False):
- return quopri_encode(input, self.errors)[0]
-
-class IncrementalDecoder(codecs.IncrementalDecoder):
- def decode(self, input, final=False):
- return quopri_decode(input, self.errors)[0]
-
-class StreamWriter(Codec, codecs.StreamWriter):
- pass
-
-class StreamReader(Codec,codecs.StreamReader):
- pass
-
-# encodings module API
-
-def getregentry():
- return codecs.CodecInfo(
- name='quopri',
- encode=quopri_encode,
- decode=quopri_decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamwriter=StreamWriter,
- streamreader=StreamReader,
- )
diff --git a/sys/lib/python/encodings/raw_unicode_escape.py b/sys/lib/python/encodings/raw_unicode_escape.py
deleted file mode 100644
index 2b919b40d..000000000
--- a/sys/lib/python/encodings/raw_unicode_escape.py
+++ /dev/null
@@ -1,45 +0,0 @@
-""" Python 'raw-unicode-escape' Codec
-
-
-Written by Marc-Andre Lemburg (mal@lemburg.com).
-
-(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
-
-"""
-import codecs
-
-### Codec APIs
-
-class Codec(codecs.Codec):
-
- # Note: Binding these as C functions will result in the class not
- # converting them to methods. This is intended.
- encode = codecs.raw_unicode_escape_encode
- decode = codecs.raw_unicode_escape_decode
-
-class IncrementalEncoder(codecs.IncrementalEncoder):
- def encode(self, input, final=False):
- return codecs.raw_unicode_escape_encode(input, self.errors)[0]
-
-class IncrementalDecoder(codecs.IncrementalDecoder):
- def decode(self, input, final=False):
- return codecs.raw_unicode_escape_decode(input, self.errors)[0]
-
-class StreamWriter(Codec,codecs.StreamWriter):
- pass
-
-class StreamReader(Codec,codecs.StreamReader):
- pass
-
-### encodings module API
-
-def getregentry():
- return codecs.CodecInfo(
- name='raw-unicode-escape',
- encode=Codec.encode,
- decode=Codec.decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamwriter=StreamWriter,
- streamreader=StreamReader,
- )
diff --git a/sys/lib/python/encodings/rot_13.py b/sys/lib/python/encodings/rot_13.py
deleted file mode 100644
index 52b6431cf..000000000
--- a/sys/lib/python/encodings/rot_13.py
+++ /dev/null
@@ -1,118 +0,0 @@
-#!/usr/bin/env python
-""" Python Character Mapping Codec for ROT13.
-
- See http://ucsub.colorado.edu/~kominek/rot13/ for details.
-
- Written by Marc-Andre Lemburg (mal@lemburg.com).
-
-"""#"
-
-import codecs
-
-### Codec APIs
-
-class Codec(codecs.Codec):
-
- def encode(self,input,errors='strict'):
- return codecs.charmap_encode(input,errors,encoding_map)
-
- def decode(self,input,errors='strict'):
- return codecs.charmap_decode(input,errors,decoding_map)
-
-class IncrementalEncoder(codecs.IncrementalEncoder):
- def encode(self, input, final=False):
- return codecs.charmap_encode(input,self.errors,encoding_map)[0]
-
-class IncrementalDecoder(codecs.IncrementalDecoder):
- def decode(self, input, final=False):
- return codecs.charmap_decode(input,self.errors,decoding_map)[0]
-
-class StreamWriter(Codec,codecs.StreamWriter):
- pass
-
-class StreamReader(Codec,codecs.StreamReader):
- pass
-
-### encodings module API
-
-def getregentry():
- return codecs.CodecInfo(
- name='rot-13',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamwriter=StreamWriter,
- streamreader=StreamReader,
- )
-
-### Decoding Map
-
-decoding_map = codecs.make_identity_dict(range(256))
-decoding_map.update({
- 0x0041: 0x004e,
- 0x0042: 0x004f,
- 0x0043: 0x0050,
- 0x0044: 0x0051,
- 0x0045: 0x0052,
- 0x0046: 0x0053,
- 0x0047: 0x0054,
- 0x0048: 0x0055,
- 0x0049: 0x0056,
- 0x004a: 0x0057,
- 0x004b: 0x0058,
- 0x004c: 0x0059,
- 0x004d: 0x005a,
- 0x004e: 0x0041,
- 0x004f: 0x0042,
- 0x0050: 0x0043,
- 0x0051: 0x0044,
- 0x0052: 0x0045,
- 0x0053: 0x0046,
- 0x0054: 0x0047,
- 0x0055: 0x0048,
- 0x0056: 0x0049,
- 0x0057: 0x004a,
- 0x0058: 0x004b,
- 0x0059: 0x004c,
- 0x005a: 0x004d,
- 0x0061: 0x006e,
- 0x0062: 0x006f,
- 0x0063: 0x0070,
- 0x0064: 0x0071,
- 0x0065: 0x0072,
- 0x0066: 0x0073,
- 0x0067: 0x0074,
- 0x0068: 0x0075,
- 0x0069: 0x0076,
- 0x006a: 0x0077,
- 0x006b: 0x0078,
- 0x006c: 0x0079,
- 0x006d: 0x007a,
- 0x006e: 0x0061,
- 0x006f: 0x0062,
- 0x0070: 0x0063,
- 0x0071: 0x0064,
- 0x0072: 0x0065,
- 0x0073: 0x0066,
- 0x0074: 0x0067,
- 0x0075: 0x0068,
- 0x0076: 0x0069,
- 0x0077: 0x006a,
- 0x0078: 0x006b,
- 0x0079: 0x006c,
- 0x007a: 0x006d,
-})
-
-### Encoding Map
-
-encoding_map = codecs.make_encoding_map(decoding_map)
-
-### Filter API
-
-def rot13(infile, outfile):
- outfile.write(infile.read().encode('rot-13'))
-
-if __name__ == '__main__':
- import sys
- rot13(sys.stdin, sys.stdout)
diff --git a/sys/lib/python/encodings/shift_jis.py b/sys/lib/python/encodings/shift_jis.py
deleted file mode 100644
index 833811727..000000000
--- a/sys/lib/python/encodings/shift_jis.py
+++ /dev/null
@@ -1,39 +0,0 @@
-#
-# shift_jis.py: Python Unicode Codec for SHIFT_JIS
-#
-# Written by Hye-Shik Chang <perky@FreeBSD.org>
-#
-
-import _codecs_jp, codecs
-import _multibytecodec as mbc
-
-codec = _codecs_jp.getcodec('shift_jis')
-
-class Codec(codecs.Codec):
- encode = codec.encode
- decode = codec.decode
-
-class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
- codecs.IncrementalEncoder):
- codec = codec
-
-class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
- codecs.IncrementalDecoder):
- codec = codec
-
-class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
- codec = codec
-
-class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
- codec = codec
-
-def getregentry():
- return codecs.CodecInfo(
- name='shift_jis',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
diff --git a/sys/lib/python/encodings/shift_jis_2004.py b/sys/lib/python/encodings/shift_jis_2004.py
deleted file mode 100644
index 161b1e86f..000000000
--- a/sys/lib/python/encodings/shift_jis_2004.py
+++ /dev/null
@@ -1,39 +0,0 @@
-#
-# shift_jis_2004.py: Python Unicode Codec for SHIFT_JIS_2004
-#
-# Written by Hye-Shik Chang <perky@FreeBSD.org>
-#
-
-import _codecs_jp, codecs
-import _multibytecodec as mbc
-
-codec = _codecs_jp.getcodec('shift_jis_2004')
-
-class Codec(codecs.Codec):
- encode = codec.encode
- decode = codec.decode
-
-class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
- codecs.IncrementalEncoder):
- codec = codec
-
-class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
- codecs.IncrementalDecoder):
- codec = codec
-
-class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
- codec = codec
-
-class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
- codec = codec
-
-def getregentry():
- return codecs.CodecInfo(
- name='shift_jis_2004',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
diff --git a/sys/lib/python/encodings/shift_jisx0213.py b/sys/lib/python/encodings/shift_jisx0213.py
deleted file mode 100644
index cb653f530..000000000
--- a/sys/lib/python/encodings/shift_jisx0213.py
+++ /dev/null
@@ -1,39 +0,0 @@
-#
-# shift_jisx0213.py: Python Unicode Codec for SHIFT_JISX0213
-#
-# Written by Hye-Shik Chang <perky@FreeBSD.org>
-#
-
-import _codecs_jp, codecs
-import _multibytecodec as mbc
-
-codec = _codecs_jp.getcodec('shift_jisx0213')
-
-class Codec(codecs.Codec):
- encode = codec.encode
- decode = codec.decode
-
-class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
- codecs.IncrementalEncoder):
- codec = codec
-
-class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
- codecs.IncrementalDecoder):
- codec = codec
-
-class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
- codec = codec
-
-class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
- codec = codec
-
-def getregentry():
- return codecs.CodecInfo(
- name='shift_jisx0213',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
diff --git a/sys/lib/python/encodings/string_escape.py b/sys/lib/python/encodings/string_escape.py
deleted file mode 100644
index e329a2607..000000000
--- a/sys/lib/python/encodings/string_escape.py
+++ /dev/null
@@ -1,38 +0,0 @@
-# -*- coding: iso-8859-1 -*-
-""" Python 'escape' Codec
-
-
-Written by Martin v. Löwis (martin@v.loewis.de).
-
-"""
-import codecs
-
-class Codec(codecs.Codec):
-
- encode = codecs.escape_encode
- decode = codecs.escape_decode
-
-class IncrementalEncoder(codecs.IncrementalEncoder):
- def encode(self, input, final=False):
- return codecs.escape_encode(input, self.errors)[0]
-
-class IncrementalDecoder(codecs.IncrementalDecoder):
- def decode(self, input, final=False):
- return codecs.escape_decode(input, self.errors)[0]
-
-class StreamWriter(Codec,codecs.StreamWriter):
- pass
-
-class StreamReader(Codec,codecs.StreamReader):
- pass
-
-def getregentry():
- return codecs.CodecInfo(
- name='string-escape',
- encode=Codec.encode,
- decode=Codec.decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamwriter=StreamWriter,
- streamreader=StreamReader,
- )
diff --git a/sys/lib/python/encodings/tis_620.py b/sys/lib/python/encodings/tis_620.py
deleted file mode 100644
index b2cd22b23..000000000
--- a/sys/lib/python/encodings/tis_620.py
+++ /dev/null
@@ -1,307 +0,0 @@
-""" Python Character Mapping Codec tis_620 generated from 'python-mappings/TIS-620.TXT' with gencodec.py.
-
-"""#"
-
-import codecs
-
-### Codec APIs
-
-class Codec(codecs.Codec):
-
- def encode(self,input,errors='strict'):
- return codecs.charmap_encode(input,errors,encoding_table)
-
- def decode(self,input,errors='strict'):
- return codecs.charmap_decode(input,errors,decoding_table)
-
-class IncrementalEncoder(codecs.IncrementalEncoder):
- def encode(self, input, final=False):
- return codecs.charmap_encode(input,self.errors,encoding_table)[0]
-
-class IncrementalDecoder(codecs.IncrementalDecoder):
- def decode(self, input, final=False):
- return codecs.charmap_decode(input,self.errors,decoding_table)[0]
-
-class StreamWriter(Codec,codecs.StreamWriter):
- pass
-
-class StreamReader(Codec,codecs.StreamReader):
- pass
-
-### encodings module API
-
-def getregentry():
- return codecs.CodecInfo(
- name='tis-620',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
-
-
-### Decoding Table
-
-decoding_table = (
- u'\x00' # 0x00 -> NULL
- u'\x01' # 0x01 -> START OF HEADING
- u'\x02' # 0x02 -> START OF TEXT
- u'\x03' # 0x03 -> END OF TEXT
- u'\x04' # 0x04 -> END OF TRANSMISSION
- u'\x05' # 0x05 -> ENQUIRY
- u'\x06' # 0x06 -> ACKNOWLEDGE
- u'\x07' # 0x07 -> BELL
- u'\x08' # 0x08 -> BACKSPACE
- u'\t' # 0x09 -> HORIZONTAL TABULATION
- u'\n' # 0x0A -> LINE FEED
- u'\x0b' # 0x0B -> VERTICAL TABULATION
- u'\x0c' # 0x0C -> FORM FEED
- u'\r' # 0x0D -> CARRIAGE RETURN
- u'\x0e' # 0x0E -> SHIFT OUT
- u'\x0f' # 0x0F -> SHIFT IN
- u'\x10' # 0x10 -> DATA LINK ESCAPE
- u'\x11' # 0x11 -> DEVICE CONTROL ONE
- u'\x12' # 0x12 -> DEVICE CONTROL TWO
- u'\x13' # 0x13 -> DEVICE CONTROL THREE
- u'\x14' # 0x14 -> DEVICE CONTROL FOUR
- u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
- u'\x16' # 0x16 -> SYNCHRONOUS IDLE
- u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
- u'\x18' # 0x18 -> CANCEL
- u'\x19' # 0x19 -> END OF MEDIUM
- u'\x1a' # 0x1A -> SUBSTITUTE
- u'\x1b' # 0x1B -> ESCAPE
- u'\x1c' # 0x1C -> FILE SEPARATOR
- u'\x1d' # 0x1D -> GROUP SEPARATOR
- u'\x1e' # 0x1E -> RECORD SEPARATOR
- u'\x1f' # 0x1F -> UNIT SEPARATOR
- u' ' # 0x20 -> SPACE
- u'!' # 0x21 -> EXCLAMATION MARK
- u'"' # 0x22 -> QUOTATION MARK
- u'#' # 0x23 -> NUMBER SIGN
- u'$' # 0x24 -> DOLLAR SIGN
- u'%' # 0x25 -> PERCENT SIGN
- u'&' # 0x26 -> AMPERSAND
- u"'" # 0x27 -> APOSTROPHE
- u'(' # 0x28 -> LEFT PARENTHESIS
- u')' # 0x29 -> RIGHT PARENTHESIS
- u'*' # 0x2A -> ASTERISK
- u'+' # 0x2B -> PLUS SIGN
- u',' # 0x2C -> COMMA
- u'-' # 0x2D -> HYPHEN-MINUS
- u'.' # 0x2E -> FULL STOP
- u'/' # 0x2F -> SOLIDUS
- u'0' # 0x30 -> DIGIT ZERO
- u'1' # 0x31 -> DIGIT ONE
- u'2' # 0x32 -> DIGIT TWO
- u'3' # 0x33 -> DIGIT THREE
- u'4' # 0x34 -> DIGIT FOUR
- u'5' # 0x35 -> DIGIT FIVE
- u'6' # 0x36 -> DIGIT SIX
- u'7' # 0x37 -> DIGIT SEVEN
- u'8' # 0x38 -> DIGIT EIGHT
- u'9' # 0x39 -> DIGIT NINE
- u':' # 0x3A -> COLON
- u';' # 0x3B -> SEMICOLON
- u'<' # 0x3C -> LESS-THAN SIGN
- u'=' # 0x3D -> EQUALS SIGN
- u'>' # 0x3E -> GREATER-THAN SIGN
- u'?' # 0x3F -> QUESTION MARK
- u'@' # 0x40 -> COMMERCIAL AT
- u'A' # 0x41 -> LATIN CAPITAL LETTER A
- u'B' # 0x42 -> LATIN CAPITAL LETTER B
- u'C' # 0x43 -> LATIN CAPITAL LETTER C
- u'D' # 0x44 -> LATIN CAPITAL LETTER D
- u'E' # 0x45 -> LATIN CAPITAL LETTER E
- u'F' # 0x46 -> LATIN CAPITAL LETTER F
- u'G' # 0x47 -> LATIN CAPITAL LETTER G
- u'H' # 0x48 -> LATIN CAPITAL LETTER H
- u'I' # 0x49 -> LATIN CAPITAL LETTER I
- u'J' # 0x4A -> LATIN CAPITAL LETTER J
- u'K' # 0x4B -> LATIN CAPITAL LETTER K
- u'L' # 0x4C -> LATIN CAPITAL LETTER L
- u'M' # 0x4D -> LATIN CAPITAL LETTER M
- u'N' # 0x4E -> LATIN CAPITAL LETTER N
- u'O' # 0x4F -> LATIN CAPITAL LETTER O
- u'P' # 0x50 -> LATIN CAPITAL LETTER P
- u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
- u'R' # 0x52 -> LATIN CAPITAL LETTER R
- u'S' # 0x53 -> LATIN CAPITAL LETTER S
- u'T' # 0x54 -> LATIN CAPITAL LETTER T
- u'U' # 0x55 -> LATIN CAPITAL LETTER U
- u'V' # 0x56 -> LATIN CAPITAL LETTER V
- u'W' # 0x57 -> LATIN CAPITAL LETTER W
- u'X' # 0x58 -> LATIN CAPITAL LETTER X
- u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
- u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
- u'[' # 0x5B -> LEFT SQUARE BRACKET
- u'\\' # 0x5C -> REVERSE SOLIDUS
- u']' # 0x5D -> RIGHT SQUARE BRACKET
- u'^' # 0x5E -> CIRCUMFLEX ACCENT
- u'_' # 0x5F -> LOW LINE
- u'`' # 0x60 -> GRAVE ACCENT
- u'a' # 0x61 -> LATIN SMALL LETTER A
- u'b' # 0x62 -> LATIN SMALL LETTER B
- u'c' # 0x63 -> LATIN SMALL LETTER C
- u'd' # 0x64 -> LATIN SMALL LETTER D
- u'e' # 0x65 -> LATIN SMALL LETTER E
- u'f' # 0x66 -> LATIN SMALL LETTER F
- u'g' # 0x67 -> LATIN SMALL LETTER G
- u'h' # 0x68 -> LATIN SMALL LETTER H
- u'i' # 0x69 -> LATIN SMALL LETTER I
- u'j' # 0x6A -> LATIN SMALL LETTER J
- u'k' # 0x6B -> LATIN SMALL LETTER K
- u'l' # 0x6C -> LATIN SMALL LETTER L
- u'm' # 0x6D -> LATIN SMALL LETTER M
- u'n' # 0x6E -> LATIN SMALL LETTER N
- u'o' # 0x6F -> LATIN SMALL LETTER O
- u'p' # 0x70 -> LATIN SMALL LETTER P
- u'q' # 0x71 -> LATIN SMALL LETTER Q
- u'r' # 0x72 -> LATIN SMALL LETTER R
- u's' # 0x73 -> LATIN SMALL LETTER S
- u't' # 0x74 -> LATIN SMALL LETTER T
- u'u' # 0x75 -> LATIN SMALL LETTER U
- u'v' # 0x76 -> LATIN SMALL LETTER V
- u'w' # 0x77 -> LATIN SMALL LETTER W
- u'x' # 0x78 -> LATIN SMALL LETTER X
- u'y' # 0x79 -> LATIN SMALL LETTER Y
- u'z' # 0x7A -> LATIN SMALL LETTER Z
- u'{' # 0x7B -> LEFT CURLY BRACKET
- u'|' # 0x7C -> VERTICAL LINE
- u'}' # 0x7D -> RIGHT CURLY BRACKET
- u'~' # 0x7E -> TILDE
- u'\x7f' # 0x7F -> DELETE
- u'\x80' # 0x80 -> <control>
- u'\x81' # 0x81 -> <control>
- u'\x82' # 0x82 -> <control>
- u'\x83' # 0x83 -> <control>
- u'\x84' # 0x84 -> <control>
- u'\x85' # 0x85 -> <control>
- u'\x86' # 0x86 -> <control>
- u'\x87' # 0x87 -> <control>
- u'\x88' # 0x88 -> <control>
- u'\x89' # 0x89 -> <control>
- u'\x8a' # 0x8A -> <control>
- u'\x8b' # 0x8B -> <control>
- u'\x8c' # 0x8C -> <control>
- u'\x8d' # 0x8D -> <control>
- u'\x8e' # 0x8E -> <control>
- u'\x8f' # 0x8F -> <control>
- u'\x90' # 0x90 -> <control>
- u'\x91' # 0x91 -> <control>
- u'\x92' # 0x92 -> <control>
- u'\x93' # 0x93 -> <control>
- u'\x94' # 0x94 -> <control>
- u'\x95' # 0x95 -> <control>
- u'\x96' # 0x96 -> <control>
- u'\x97' # 0x97 -> <control>
- u'\x98' # 0x98 -> <control>
- u'\x99' # 0x99 -> <control>
- u'\x9a' # 0x9A -> <control>
- u'\x9b' # 0x9B -> <control>
- u'\x9c' # 0x9C -> <control>
- u'\x9d' # 0x9D -> <control>
- u'\x9e' # 0x9E -> <control>
- u'\x9f' # 0x9F -> <control>
- u'\ufffe'
- u'\u0e01' # 0xA1 -> THAI CHARACTER KO KAI
- u'\u0e02' # 0xA2 -> THAI CHARACTER KHO KHAI
- u'\u0e03' # 0xA3 -> THAI CHARACTER KHO KHUAT
- u'\u0e04' # 0xA4 -> THAI CHARACTER KHO KHWAI
- u'\u0e05' # 0xA5 -> THAI CHARACTER KHO KHON
- u'\u0e06' # 0xA6 -> THAI CHARACTER KHO RAKHANG
- u'\u0e07' # 0xA7 -> THAI CHARACTER NGO NGU
- u'\u0e08' # 0xA8 -> THAI CHARACTER CHO CHAN
- u'\u0e09' # 0xA9 -> THAI CHARACTER CHO CHING
- u'\u0e0a' # 0xAA -> THAI CHARACTER CHO CHANG
- u'\u0e0b' # 0xAB -> THAI CHARACTER SO SO
- u'\u0e0c' # 0xAC -> THAI CHARACTER CHO CHOE
- u'\u0e0d' # 0xAD -> THAI CHARACTER YO YING
- u'\u0e0e' # 0xAE -> THAI CHARACTER DO CHADA
- u'\u0e0f' # 0xAF -> THAI CHARACTER TO PATAK
- u'\u0e10' # 0xB0 -> THAI CHARACTER THO THAN
- u'\u0e11' # 0xB1 -> THAI CHARACTER THO NANGMONTHO
- u'\u0e12' # 0xB2 -> THAI CHARACTER THO PHUTHAO
- u'\u0e13' # 0xB3 -> THAI CHARACTER NO NEN
- u'\u0e14' # 0xB4 -> THAI CHARACTER DO DEK
- u'\u0e15' # 0xB5 -> THAI CHARACTER TO TAO
- u'\u0e16' # 0xB6 -> THAI CHARACTER THO THUNG
- u'\u0e17' # 0xB7 -> THAI CHARACTER THO THAHAN
- u'\u0e18' # 0xB8 -> THAI CHARACTER THO THONG
- u'\u0e19' # 0xB9 -> THAI CHARACTER NO NU
- u'\u0e1a' # 0xBA -> THAI CHARACTER BO BAIMAI
- u'\u0e1b' # 0xBB -> THAI CHARACTER PO PLA
- u'\u0e1c' # 0xBC -> THAI CHARACTER PHO PHUNG
- u'\u0e1d' # 0xBD -> THAI CHARACTER FO FA
- u'\u0e1e' # 0xBE -> THAI CHARACTER PHO PHAN
- u'\u0e1f' # 0xBF -> THAI CHARACTER FO FAN
- u'\u0e20' # 0xC0 -> THAI CHARACTER PHO SAMPHAO
- u'\u0e21' # 0xC1 -> THAI CHARACTER MO MA
- u'\u0e22' # 0xC2 -> THAI CHARACTER YO YAK
- u'\u0e23' # 0xC3 -> THAI CHARACTER RO RUA
- u'\u0e24' # 0xC4 -> THAI CHARACTER RU
- u'\u0e25' # 0xC5 -> THAI CHARACTER LO LING
- u'\u0e26' # 0xC6 -> THAI CHARACTER LU
- u'\u0e27' # 0xC7 -> THAI CHARACTER WO WAEN
- u'\u0e28' # 0xC8 -> THAI CHARACTER SO SALA
- u'\u0e29' # 0xC9 -> THAI CHARACTER SO RUSI
- u'\u0e2a' # 0xCA -> THAI CHARACTER SO SUA
- u'\u0e2b' # 0xCB -> THAI CHARACTER HO HIP
- u'\u0e2c' # 0xCC -> THAI CHARACTER LO CHULA
- u'\u0e2d' # 0xCD -> THAI CHARACTER O ANG
- u'\u0e2e' # 0xCE -> THAI CHARACTER HO NOKHUK
- u'\u0e2f' # 0xCF -> THAI CHARACTER PAIYANNOI
- u'\u0e30' # 0xD0 -> THAI CHARACTER SARA A
- u'\u0e31' # 0xD1 -> THAI CHARACTER MAI HAN-AKAT
- u'\u0e32' # 0xD2 -> THAI CHARACTER SARA AA
- u'\u0e33' # 0xD3 -> THAI CHARACTER SARA AM
- u'\u0e34' # 0xD4 -> THAI CHARACTER SARA I
- u'\u0e35' # 0xD5 -> THAI CHARACTER SARA II
- u'\u0e36' # 0xD6 -> THAI CHARACTER SARA UE
- u'\u0e37' # 0xD7 -> THAI CHARACTER SARA UEE
- u'\u0e38' # 0xD8 -> THAI CHARACTER SARA U
- u'\u0e39' # 0xD9 -> THAI CHARACTER SARA UU
- u'\u0e3a' # 0xDA -> THAI CHARACTER PHINTHU
- u'\ufffe'
- u'\ufffe'
- u'\ufffe'
- u'\ufffe'
- u'\u0e3f' # 0xDF -> THAI CURRENCY SYMBOL BAHT
- u'\u0e40' # 0xE0 -> THAI CHARACTER SARA E
- u'\u0e41' # 0xE1 -> THAI CHARACTER SARA AE
- u'\u0e42' # 0xE2 -> THAI CHARACTER SARA O
- u'\u0e43' # 0xE3 -> THAI CHARACTER SARA AI MAIMUAN
- u'\u0e44' # 0xE4 -> THAI CHARACTER SARA AI MAIMALAI
- u'\u0e45' # 0xE5 -> THAI CHARACTER LAKKHANGYAO
- u'\u0e46' # 0xE6 -> THAI CHARACTER MAIYAMOK
- u'\u0e47' # 0xE7 -> THAI CHARACTER MAITAIKHU
- u'\u0e48' # 0xE8 -> THAI CHARACTER MAI EK
- u'\u0e49' # 0xE9 -> THAI CHARACTER MAI THO
- u'\u0e4a' # 0xEA -> THAI CHARACTER MAI TRI
- u'\u0e4b' # 0xEB -> THAI CHARACTER MAI CHATTAWA
- u'\u0e4c' # 0xEC -> THAI CHARACTER THANTHAKHAT
- u'\u0e4d' # 0xED -> THAI CHARACTER NIKHAHIT
- u'\u0e4e' # 0xEE -> THAI CHARACTER YAMAKKAN
- u'\u0e4f' # 0xEF -> THAI CHARACTER FONGMAN
- u'\u0e50' # 0xF0 -> THAI DIGIT ZERO
- u'\u0e51' # 0xF1 -> THAI DIGIT ONE
- u'\u0e52' # 0xF2 -> THAI DIGIT TWO
- u'\u0e53' # 0xF3 -> THAI DIGIT THREE
- u'\u0e54' # 0xF4 -> THAI DIGIT FOUR
- u'\u0e55' # 0xF5 -> THAI DIGIT FIVE
- u'\u0e56' # 0xF6 -> THAI DIGIT SIX
- u'\u0e57' # 0xF7 -> THAI DIGIT SEVEN
- u'\u0e58' # 0xF8 -> THAI DIGIT EIGHT
- u'\u0e59' # 0xF9 -> THAI DIGIT NINE
- u'\u0e5a' # 0xFA -> THAI CHARACTER ANGKHANKHU
- u'\u0e5b' # 0xFB -> THAI CHARACTER KHOMUT
- u'\ufffe'
- u'\ufffe'
- u'\ufffe'
- u'\ufffe'
-)
-
-### Encoding table
-encoding_table=codecs.charmap_build(decoding_table)
diff --git a/sys/lib/python/encodings/undefined.py b/sys/lib/python/encodings/undefined.py
deleted file mode 100644
index 469028835..000000000
--- a/sys/lib/python/encodings/undefined.py
+++ /dev/null
@@ -1,49 +0,0 @@
-""" Python 'undefined' Codec
-
- This codec will always raise a ValueError exception when being
- used. It is intended for use by the site.py file to switch off
- automatic string to Unicode coercion.
-
-Written by Marc-Andre Lemburg (mal@lemburg.com).
-
-(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
-
-"""
-import codecs
-
-### Codec APIs
-
-class Codec(codecs.Codec):
-
- def encode(self,input,errors='strict'):
- raise UnicodeError("undefined encoding")
-
- def decode(self,input,errors='strict'):
- raise UnicodeError("undefined encoding")
-
-class IncrementalEncoder(codecs.IncrementalEncoder):
- def encode(self, input, final=False):
- raise UnicodeError("undefined encoding")
-
-class IncrementalDecoder(codecs.IncrementalDecoder):
- def decode(self, input, final=False):
- raise UnicodeError("undefined encoding")
-
-class StreamWriter(Codec,codecs.StreamWriter):
- pass
-
-class StreamReader(Codec,codecs.StreamReader):
- pass
-
-### encodings module API
-
-def getregentry():
- return codecs.CodecInfo(
- name='undefined',
- encode=Codec().encode,
- decode=Codec().decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamwriter=StreamWriter,
- streamreader=StreamReader,
- )
diff --git a/sys/lib/python/encodings/unicode_escape.py b/sys/lib/python/encodings/unicode_escape.py
deleted file mode 100644
index 817f93265..000000000
--- a/sys/lib/python/encodings/unicode_escape.py
+++ /dev/null
@@ -1,45 +0,0 @@
-""" Python 'unicode-escape' Codec
-
-
-Written by Marc-Andre Lemburg (mal@lemburg.com).
-
-(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
-
-"""
-import codecs
-
-### Codec APIs
-
-class Codec(codecs.Codec):
-
- # Note: Binding these as C functions will result in the class not
- # converting them to methods. This is intended.
- encode = codecs.unicode_escape_encode
- decode = codecs.unicode_escape_decode
-
-class IncrementalEncoder(codecs.IncrementalEncoder):
- def encode(self, input, final=False):
- return codecs.unicode_escape_encode(input, self.errors)[0]
-
-class IncrementalDecoder(codecs.IncrementalDecoder):
- def decode(self, input, final=False):
- return codecs.unicode_escape_decode(input, self.errors)[0]
-
-class StreamWriter(Codec,codecs.StreamWriter):
- pass
-
-class StreamReader(Codec,codecs.StreamReader):
- pass
-
-### encodings module API
-
-def getregentry():
- return codecs.CodecInfo(
- name='unicode-escape',
- encode=Codec.encode,
- decode=Codec.decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamwriter=StreamWriter,
- streamreader=StreamReader,
- )
diff --git a/sys/lib/python/encodings/unicode_internal.py b/sys/lib/python/encodings/unicode_internal.py
deleted file mode 100644
index df3e7752d..000000000
--- a/sys/lib/python/encodings/unicode_internal.py
+++ /dev/null
@@ -1,45 +0,0 @@
-""" Python 'unicode-internal' Codec
-
-
-Written by Marc-Andre Lemburg (mal@lemburg.com).
-
-(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
-
-"""
-import codecs
-
-### Codec APIs
-
-class Codec(codecs.Codec):
-
- # Note: Binding these as C functions will result in the class not
- # converting them to methods. This is intended.
- encode = codecs.unicode_internal_encode
- decode = codecs.unicode_internal_decode
-
-class IncrementalEncoder(codecs.IncrementalEncoder):
- def encode(self, input, final=False):
- return codecs.unicode_internal_encode(input, self.errors)[0]
-
-class IncrementalDecoder(codecs.IncrementalDecoder):
- def decode(self, input, final=False):
- return codecs.unicode_internal_decode(input, self.errors)[0]
-
-class StreamWriter(Codec,codecs.StreamWriter):
- pass
-
-class StreamReader(Codec,codecs.StreamReader):
- pass
-
-### encodings module API
-
-def getregentry():
- return codecs.CodecInfo(
- name='unicode-internal',
- encode=Codec.encode,
- decode=Codec.decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamwriter=StreamWriter,
- streamreader=StreamReader,
- )
diff --git a/sys/lib/python/encodings/utf_16.py b/sys/lib/python/encodings/utf_16.py
deleted file mode 100644
index eff08f387..000000000
--- a/sys/lib/python/encodings/utf_16.py
+++ /dev/null
@@ -1,104 +0,0 @@
-""" Python 'utf-16' Codec
-
-
-Written by Marc-Andre Lemburg (mal@lemburg.com).
-
-(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
-
-"""
-import codecs, sys
-
-### Codec APIs
-
-encode = codecs.utf_16_encode
-
-def decode(input, errors='strict'):
- return codecs.utf_16_decode(input, errors, True)
-
-class IncrementalEncoder(codecs.IncrementalEncoder):
- def __init__(self, errors='strict'):
- codecs.IncrementalEncoder.__init__(self, errors)
- self.encoder = None
-
- def encode(self, input, final=False):
- if self.encoder is None:
- result = codecs.utf_16_encode(input, self.errors)[0]
- if sys.byteorder == 'little':
- self.encoder = codecs.utf_16_le_encode
- else:
- self.encoder = codecs.utf_16_be_encode
- return result
- return self.encoder(input, self.errors)[0]
-
- def reset(self):
- codecs.IncrementalEncoder.reset(self)
- self.encoder = None
-
-class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
- def __init__(self, errors='strict'):
- codecs.BufferedIncrementalDecoder.__init__(self, errors)
- self.decoder = None
-
- def _buffer_decode(self, input, errors, final):
- if self.decoder is None:
- (output, consumed, byteorder) = \
- codecs.utf_16_ex_decode(input, errors, 0, final)
- if byteorder == -1:
- self.decoder = codecs.utf_16_le_decode
- elif byteorder == 1:
- self.decoder = codecs.utf_16_be_decode
- elif consumed >= 2:
- raise UnicodeError("UTF-16 stream does not start with BOM")
- return (output, consumed)
- return self.decoder(input, self.errors, final)
-
- def reset(self):
- codecs.BufferedIncrementalDecoder.reset(self)
- self.decoder = None
-
-class StreamWriter(codecs.StreamWriter):
- def __init__(self, stream, errors='strict'):
- self.bom_written = False
- codecs.StreamWriter.__init__(self, stream, errors)
-
- def encode(self, input, errors='strict'):
- self.bom_written = True
- result = codecs.utf_16_encode(input, errors)
- if sys.byteorder == 'little':
- self.encode = codecs.utf_16_le_encode
- else:
- self.encode = codecs.utf_16_be_encode
- return result
-
-class StreamReader(codecs.StreamReader):
-
- def reset(self):
- codecs.StreamReader.reset(self)
- try:
- del self.decode
- except AttributeError:
- pass
-
- def decode(self, input, errors='strict'):
- (object, consumed, byteorder) = \
- codecs.utf_16_ex_decode(input, errors, 0, False)
- if byteorder == -1:
- self.decode = codecs.utf_16_le_decode
- elif byteorder == 1:
- self.decode = codecs.utf_16_be_decode
- elif consumed>=2:
- raise UnicodeError,"UTF-16 stream does not start with BOM"
- return (object, consumed)
-
-### encodings module API
-
-def getregentry():
- return codecs.CodecInfo(
- name='utf-16',
- encode=encode,
- decode=decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
diff --git a/sys/lib/python/encodings/utf_16_be.py b/sys/lib/python/encodings/utf_16_be.py
deleted file mode 100644
index 86b458eb9..000000000
--- a/sys/lib/python/encodings/utf_16_be.py
+++ /dev/null
@@ -1,42 +0,0 @@
-""" Python 'utf-16-be' Codec
-
-
-Written by Marc-Andre Lemburg (mal@lemburg.com).
-
-(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
-
-"""
-import codecs
-
-### Codec APIs
-
-encode = codecs.utf_16_be_encode
-
-def decode(input, errors='strict'):
- return codecs.utf_16_be_decode(input, errors, True)
-
-class IncrementalEncoder(codecs.IncrementalEncoder):
- def encode(self, input, final=False):
- return codecs.utf_16_be_encode(input, self.errors)[0]
-
-class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
- _buffer_decode = codecs.utf_16_be_decode
-
-class StreamWriter(codecs.StreamWriter):
- encode = codecs.utf_16_be_encode
-
-class StreamReader(codecs.StreamReader):
- decode = codecs.utf_16_be_decode
-
-### encodings module API
-
-def getregentry():
- return codecs.CodecInfo(
- name='utf-16-be',
- encode=encode,
- decode=decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
diff --git a/sys/lib/python/encodings/utf_16_le.py b/sys/lib/python/encodings/utf_16_le.py
deleted file mode 100644
index ec454142e..000000000
--- a/sys/lib/python/encodings/utf_16_le.py
+++ /dev/null
@@ -1,42 +0,0 @@
-""" Python 'utf-16-le' Codec
-
-
-Written by Marc-Andre Lemburg (mal@lemburg.com).
-
-(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
-
-"""
-import codecs
-
-### Codec APIs
-
-encode = codecs.utf_16_le_encode
-
-def decode(input, errors='strict'):
- return codecs.utf_16_le_decode(input, errors, True)
-
-class IncrementalEncoder(codecs.IncrementalEncoder):
- def encode(self, input, final=False):
- return codecs.utf_16_le_encode(input, self.errors)[0]
-
-class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
- _buffer_decode = codecs.utf_16_le_decode
-
-class StreamWriter(codecs.StreamWriter):
- encode = codecs.utf_16_le_encode
-
-class StreamReader(codecs.StreamReader):
- decode = codecs.utf_16_le_decode
-
-### encodings module API
-
-def getregentry():
- return codecs.CodecInfo(
- name='utf-16-le',
- encode=encode,
- decode=decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
diff --git a/sys/lib/python/encodings/utf_7.py b/sys/lib/python/encodings/utf_7.py
deleted file mode 100644
index d78d1926b..000000000
--- a/sys/lib/python/encodings/utf_7.py
+++ /dev/null
@@ -1,41 +0,0 @@
-""" Python 'utf-7' Codec
-
-Written by Brian Quinlan (brian@sweetapp.com).
-"""
-import codecs
-
-### Codec APIs
-
-class Codec(codecs.Codec):
-
- # Note: Binding these as C functions will result in the class not
- # converting them to methods. This is intended.
- encode = codecs.utf_7_encode
- decode = codecs.utf_7_decode
-
-class IncrementalEncoder(codecs.IncrementalEncoder):
- def encode(self, input, final=False):
- return codecs.utf_7_encode(input, self.errors)[0]
-
-class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
- def _buffer_decode(self, input, errors, final):
- return codecs.utf_7_decode(input, self.errors)
-
-class StreamWriter(Codec,codecs.StreamWriter):
- pass
-
-class StreamReader(Codec,codecs.StreamReader):
- pass
-
-### encodings module API
-
-def getregentry():
- return codecs.CodecInfo(
- name='utf-7',
- encode=Codec.encode,
- decode=Codec.decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
diff --git a/sys/lib/python/encodings/utf_8.py b/sys/lib/python/encodings/utf_8.py
deleted file mode 100644
index 1bf633657..000000000
--- a/sys/lib/python/encodings/utf_8.py
+++ /dev/null
@@ -1,42 +0,0 @@
-""" Python 'utf-8' Codec
-
-
-Written by Marc-Andre Lemburg (mal@lemburg.com).
-
-(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
-
-"""
-import codecs
-
-### Codec APIs
-
-encode = codecs.utf_8_encode
-
-def decode(input, errors='strict'):
- return codecs.utf_8_decode(input, errors, True)
-
-class IncrementalEncoder(codecs.IncrementalEncoder):
- def encode(self, input, final=False):
- return codecs.utf_8_encode(input, self.errors)[0]
-
-class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
- _buffer_decode = codecs.utf_8_decode
-
-class StreamWriter(codecs.StreamWriter):
- encode = codecs.utf_8_encode
-
-class StreamReader(codecs.StreamReader):
- decode = codecs.utf_8_decode
-
-### encodings module API
-
-def getregentry():
- return codecs.CodecInfo(
- name='utf-8',
- encode=encode,
- decode=decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
diff --git a/sys/lib/python/encodings/utf_8_sig.py b/sys/lib/python/encodings/utf_8_sig.py
deleted file mode 100644
index d751da69c..000000000
--- a/sys/lib/python/encodings/utf_8_sig.py
+++ /dev/null
@@ -1,100 +0,0 @@
-""" Python 'utf-8-sig' Codec
-This work similar to UTF-8 with the following changes:
-
-* On encoding/writing a UTF-8 encoded BOM will be prepended/written as the
- first three bytes.
-
-* On decoding/reading if the first three bytes are a UTF-8 encoded BOM, these
- bytes will be skipped.
-"""
-import codecs
-
-### Codec APIs
-
-def encode(input, errors='strict'):
- return (codecs.BOM_UTF8 + codecs.utf_8_encode(input, errors)[0], len(input))
-
-def decode(input, errors='strict'):
- prefix = 0
- if input[:3] == codecs.BOM_UTF8:
- input = input[3:]
- prefix = 3
- (output, consumed) = codecs.utf_8_decode(input, errors, True)
- return (output, consumed+prefix)
-
-class IncrementalEncoder(codecs.IncrementalEncoder):
- def __init__(self, errors='strict'):
- codecs.IncrementalEncoder.__init__(self, errors)
- self.first = True
-
- def encode(self, input, final=False):
- if self.first:
- self.first = False
- return codecs.BOM_UTF8 + codecs.utf_8_encode(input, self.errors)[0]
- else:
- return codecs.utf_8_encode(input, self.errors)[0]
-
- def reset(self):
- codecs.IncrementalEncoder.reset(self)
- self.first = True
-
-class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
- def __init__(self, errors='strict'):
- codecs.BufferedIncrementalDecoder.__init__(self, errors)
- self.first = True
-
- def _buffer_decode(self, input, errors, final):
- if self.first and codecs.BOM_UTF8.startswith(input): # might be a BOM
- if len(input) < 3:
- # not enough data to decide if this really is a BOM
- # => try again on the next call
- return (u"", 0)
- (output, consumed) = codecs.utf_8_decode(input[3:], errors, final)
- self.first = False
- return (output, consumed+3)
- return codecs.utf_8_decode(input, errors, final)
-
- def reset(self):
- codecs.BufferedIncrementalDecoder.reset(self)
- self.first = True
-
-class StreamWriter(codecs.StreamWriter):
- def reset(self):
- codecs.StreamWriter.reset(self)
- try:
- del self.encode
- except AttributeError:
- pass
-
- def encode(self, input, errors='strict'):
- self.encode = codecs.utf_8_encode
- return encode(input, errors)
-
-class StreamReader(codecs.StreamReader):
- def reset(self):
- codecs.StreamReader.reset(self)
- try:
- del self.decode
- except AttributeError:
- pass
-
- def decode(self, input, errors='strict'):
- if len(input) < 3 and codecs.BOM_UTF8.startswith(input):
- # not enough data to decide if this is a BOM
- # => try again on the next call
- return (u"", 0)
- self.decode = codecs.utf_8_decode
- return decode(input, errors)
-
-### encodings module API
-
-def getregentry():
- return codecs.CodecInfo(
- name='utf-8-sig',
- encode=encode,
- decode=decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
diff --git a/sys/lib/python/encodings/uu_codec.py b/sys/lib/python/encodings/uu_codec.py
deleted file mode 100644
index 43fb93c1b..000000000
--- a/sys/lib/python/encodings/uu_codec.py
+++ /dev/null
@@ -1,128 +0,0 @@
-""" Python 'uu_codec' Codec - UU content transfer encoding
-
- Unlike most of the other codecs which target Unicode, this codec
- will return Python string objects for both encode and decode.
-
- Written by Marc-Andre Lemburg (mal@lemburg.com). Some details were
- adapted from uu.py which was written by Lance Ellinghouse and
- modified by Jack Jansen and Fredrik Lundh.
-
-"""
-import codecs, binascii
-
-### Codec APIs
-
-def uu_encode(input,errors='strict',filename='<data>',mode=0666):
-
- """ Encodes the object input and returns a tuple (output
- object, length consumed).
-
- errors defines the error handling to apply. It defaults to
- 'strict' handling which is the only currently supported
- error handling for this codec.
-
- """
- assert errors == 'strict'
- from cStringIO import StringIO
- from binascii import b2a_uu
- infile = StringIO(input)
- outfile = StringIO()
- read = infile.read
- write = outfile.write
-
- # Encode
- write('begin %o %s\n' % (mode & 0777, filename))
- chunk = read(45)
- while chunk:
- write(b2a_uu(chunk))
- chunk = read(45)
- write(' \nend\n')
-
- return (outfile.getvalue(), len(input))
-
-def uu_decode(input,errors='strict'):
-
- """ Decodes the object input and returns a tuple (output
- object, length consumed).
-
- input must be an object which provides the bf_getreadbuf
- buffer slot. Python strings, buffer objects and memory
- mapped files are examples of objects providing this slot.
-
- errors defines the error handling to apply. It defaults to
- 'strict' handling which is the only currently supported
- error handling for this codec.
-
- Note: filename and file mode information in the input data is
- ignored.
-
- """
- assert errors == 'strict'
- from cStringIO import StringIO
- from binascii import a2b_uu
- infile = StringIO(input)
- outfile = StringIO()
- readline = infile.readline
- write = outfile.write
-
- # Find start of encoded data
- while 1:
- s = readline()
- if not s:
- raise ValueError, 'Missing "begin" line in input data'
- if s[:5] == 'begin':
- break
-
- # Decode
- while 1:
- s = readline()
- if not s or \
- s == 'end\n':
- break
- try:
- data = a2b_uu(s)
- except binascii.Error, v:
- # Workaround for broken uuencoders by /Fredrik Lundh
- nbytes = (((ord(s[0])-32) & 63) * 4 + 5) / 3
- data = a2b_uu(s[:nbytes])
- #sys.stderr.write("Warning: %s\n" % str(v))
- write(data)
- if not s:
- raise ValueError, 'Truncated input data'
-
- return (outfile.getvalue(), len(input))
-
-class Codec(codecs.Codec):
-
- def encode(self,input,errors='strict'):
- return uu_encode(input,errors)
-
- def decode(self,input,errors='strict'):
- return uu_decode(input,errors)
-
-class IncrementalEncoder(codecs.IncrementalEncoder):
- def encode(self, input, final=False):
- return uu_encode(input, self.errors)[0]
-
-class IncrementalDecoder(codecs.IncrementalDecoder):
- def decode(self, input, final=False):
- return uu_decode(input, self.errors)[0]
-
-class StreamWriter(Codec,codecs.StreamWriter):
- pass
-
-class StreamReader(Codec,codecs.StreamReader):
- pass
-
-### encodings module API
-
-def getregentry():
- return codecs.CodecInfo(
- name='uu',
- encode=uu_encode,
- decode=uu_decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
diff --git a/sys/lib/python/encodings/zlib_codec.py b/sys/lib/python/encodings/zlib_codec.py
deleted file mode 100644
index 3419f9f48..000000000
--- a/sys/lib/python/encodings/zlib_codec.py
+++ /dev/null
@@ -1,102 +0,0 @@
-""" Python 'zlib_codec' Codec - zlib compression encoding
-
- Unlike most of the other codecs which target Unicode, this codec
- will return Python string objects for both encode and decode.
-
- Written by Marc-Andre Lemburg (mal@lemburg.com).
-
-"""
-import codecs
-import zlib # this codec needs the optional zlib module !
-
-### Codec APIs
-
-def zlib_encode(input,errors='strict'):
-
- """ Encodes the object input and returns a tuple (output
- object, length consumed).
-
- errors defines the error handling to apply. It defaults to
- 'strict' handling which is the only currently supported
- error handling for this codec.
-
- """
- assert errors == 'strict'
- output = zlib.compress(input)
- return (output, len(input))
-
-def zlib_decode(input,errors='strict'):
-
- """ Decodes the object input and returns a tuple (output
- object, length consumed).
-
- input must be an object which provides the bf_getreadbuf
- buffer slot. Python strings, buffer objects and memory
- mapped files are examples of objects providing this slot.
-
- errors defines the error handling to apply. It defaults to
- 'strict' handling which is the only currently supported
- error handling for this codec.
-
- """
- assert errors == 'strict'
- output = zlib.decompress(input)
- return (output, len(input))
-
-class Codec(codecs.Codec):
-
- def encode(self, input, errors='strict'):
- return zlib_encode(input, errors)
- def decode(self, input, errors='strict'):
- return zlib_decode(input, errors)
-
-class IncrementalEncoder(codecs.IncrementalEncoder):
- def __init__(self, errors='strict'):
- assert errors == 'strict'
- self.errors = errors
- self.compressobj = zlib.compressobj()
-
- def encode(self, input, final=False):
- if final:
- c = self.compressobj.compress(input)
- return c + self.compressobj.flush()
- else:
- return self.compressobj.compress(input)
-
- def reset(self):
- self.compressobj = zlib.compressobj()
-
-class IncrementalDecoder(codecs.IncrementalDecoder):
- def __init__(self, errors='strict'):
- assert errors == 'strict'
- self.errors = errors
- self.decompressobj = zlib.decompressobj()
-
- def decode(self, input, final=False):
- if final:
- c = self.decompressobj.decompress(input)
- return c + self.decompressobj.flush()
- else:
- return self.decompressobj.decompress(input)
-
- def reset(self):
- self.decompressobj = zlib.decompressobj()
-
-class StreamWriter(Codec,codecs.StreamWriter):
- pass
-
-class StreamReader(Codec,codecs.StreamReader):
- pass
-
-### encodings module API
-
-def getregentry():
- return codecs.CodecInfo(
- name='zlib',
- encode=zlib_encode,
- decode=zlib_decode,
- incrementalencoder=IncrementalEncoder,
- incrementaldecoder=IncrementalDecoder,
- streamreader=StreamReader,
- streamwriter=StreamWriter,
- )
diff --git a/sys/lib/python/factotum.py b/sys/lib/python/factotum.py
deleted file mode 100644
index 0a8ebcd3a..000000000
--- a/sys/lib/python/factotum.py
+++ /dev/null
@@ -1,102 +0,0 @@
-'''factotum for py'''
-
-import subprocess
-
-class FactotumError(Exception):
- pass
-
-class PhaseError(Exception):
- pass
-
-class NeedkeyError(Exception):
- pass
-
-class Factotum:
- def start(self, **args):
- self.f = open('/mnt/factotum/rpc', 'r+', 0)
- msg = 'start'
- for k, v in args.iteritems():
- msg += ' ' + k + '=\'' + v + '\''
- while True:
- self.f.write(msg)
- ret = self.f.read(4096)
- if ret[:7] != "needkey": break
- self.needkey(ret[8:])
- if ret == "ok": return
- if ret[:5] == "error": raise FactotumError(ret[6:])
- raise FactotumError("start: unexpected " + ret)
- def needkey(self, string):
- subprocess.call(['/bin/auth/factotum', '-g', string])
- def read(self):
- while True:
- self.f.write('read')
- ret = self.f.read(4096)
- if ret[:7] != "needkey": break
- self.needkey(ret[8:])
- if ret == "ok": return ""
- if ret[:3] == "ok ": return ret[3:]
- if ret[:5] == "error": raise FactotumError(ret[6:])
- if ret[:5] == "phase": raise PhaseError(ret[6:])
- raise FactotumError("read: unexpected " + ret)
- def write(self, data):
- while True:
- self.f.write('write ' + data)
- ret = self.f.read(4096)
- if ret[:7] != "needkey": break
- self.needkey(ret[8:])
- if ret == "ok": return 0
- if ret[:3] == "toosmall ": return int(ret[4:])
- if ret[:5] == "error": raise FactotumError(ret[6:])
- if ret[:5] == "phase": raise PhaseError(ret[6:])
- raise FactotumError("write: unexpected " + ret)
- def attr(self):
- self.f.write('attr')
- ret = self.f.read(4096)
- if ret[:5] == "error": raise FactotumError(ret[6:])
- if ret[:3] == "ok ":
- dict = {}
- ret = ret[3:]
- mode = 0
- key = ""
- value = ""
- while ret != "":
- if mode == 0:
- if ret[0] == '=':
- if ret[1] == '\'':
- mode = 2
- ret = ret[1:]
- else:
- mode = 1
- else:
- key += ret[0]
- elif mode == 1:
- if ret[0] == ' ':
- dict[key] = value
- key = ""
- value = ""
- mode = 0
- else:
- value += ret[0]
- elif mode == 2:
- if ret[0] == '\'':
- ret = ret[1:]
- dict[key] = value
- key = ""
- value = ""
- mode = 0
- else:
- value += ret[0]
- if ret != "": ret = ret[1:]
- if key != "":
- dict[key] = value
- return dict
- raise FactotumError("unexpected " + ret)
- def close(self):
- self.f.close()
- def delkey(self, **args):
- f = open('/mnt/factotum/ctl', 'w', 0)
- msg = 'delkey'
- for k, v in args.iteritems():
- msg += ' ' + k + '=\'' + v + '\''
- f.write(msg)
- f.close()
diff --git a/sys/lib/python/filecmp.py b/sys/lib/python/filecmp.py
deleted file mode 100644
index 35cedef6b..000000000
--- a/sys/lib/python/filecmp.py
+++ /dev/null
@@ -1,297 +0,0 @@
-"""Utilities for comparing files and directories.
-
-Classes:
- dircmp
-
-Functions:
- cmp(f1, f2, shallow=1) -> int
- cmpfiles(a, b, common) -> ([], [], [])
-
-"""
-
-import os
-import stat
-import warnings
-from itertools import ifilter, ifilterfalse, imap, izip
-
-__all__ = ["cmp","dircmp","cmpfiles"]
-
-_cache = {}
-BUFSIZE=8*1024
-
-def cmp(f1, f2, shallow=1):
- """Compare two files.
-
- Arguments:
-
- f1 -- First file name
-
- f2 -- Second file name
-
- shallow -- Just check stat signature (do not read the files).
- defaults to 1.
-
- Return value:
-
- True if the files are the same, False otherwise.
-
- This function uses a cache for past comparisons and the results,
- with a cache invalidation mechanism relying on stale signatures.
-
- """
-
- s1 = _sig(os.stat(f1))
- s2 = _sig(os.stat(f2))
- if s1[0] != stat.S_IFREG or s2[0] != stat.S_IFREG:
- return False
- if shallow and s1 == s2:
- return True
- if s1[1] != s2[1]:
- return False
-
- result = _cache.get((f1, f2))
- if result and (s1, s2) == result[:2]:
- return result[2]
- outcome = _do_cmp(f1, f2)
- _cache[f1, f2] = s1, s2, outcome
- return outcome
-
-def _sig(st):
- return (stat.S_IFMT(st.st_mode),
- st.st_size,
- st.st_mtime)
-
-def _do_cmp(f1, f2):
- bufsize = BUFSIZE
- fp1 = open(f1, 'rb')
- fp2 = open(f2, 'rb')
- while True:
- b1 = fp1.read(bufsize)
- b2 = fp2.read(bufsize)
- if b1 != b2:
- return False
- if not b1:
- return True
-
-# Directory comparison class.
-#
-class dircmp:
- """A class that manages the comparison of 2 directories.
-
- dircmp(a,b,ignore=None,hide=None)
- A and B are directories.
- IGNORE is a list of names to ignore,
- defaults to ['RCS', 'CVS', 'tags'].
- HIDE is a list of names to hide,
- defaults to [os.curdir, os.pardir].
-
- High level usage:
- x = dircmp(dir1, dir2)
- x.report() -> prints a report on the differences between dir1 and dir2
- or
- x.report_partial_closure() -> prints report on differences between dir1
- and dir2, and reports on common immediate subdirectories.
- x.report_full_closure() -> like report_partial_closure,
- but fully recursive.
-
- Attributes:
- left_list, right_list: The files in dir1 and dir2,
- filtered by hide and ignore.
- common: a list of names in both dir1 and dir2.
- left_only, right_only: names only in dir1, dir2.
- common_dirs: subdirectories in both dir1 and dir2.
- common_files: files in both dir1 and dir2.
- common_funny: names in both dir1 and dir2 where the type differs between
- dir1 and dir2, or the name is not stat-able.
- same_files: list of identical files.
- diff_files: list of filenames which differ.
- funny_files: list of files which could not be compared.
- subdirs: a dictionary of dircmp objects, keyed by names in common_dirs.
- """
-
- def __init__(self, a, b, ignore=None, hide=None): # Initialize
- self.left = a
- self.right = b
- if hide is None:
- self.hide = [os.curdir, os.pardir] # Names never to be shown
- else:
- self.hide = hide
- if ignore is None:
- self.ignore = ['RCS', 'CVS', 'tags'] # Names ignored in comparison
- else:
- self.ignore = ignore
-
- def phase0(self): # Compare everything except common subdirectories
- self.left_list = _filter(os.listdir(self.left),
- self.hide+self.ignore)
- self.right_list = _filter(os.listdir(self.right),
- self.hide+self.ignore)
- self.left_list.sort()
- self.right_list.sort()
-
- def phase1(self): # Compute common names
- a = dict(izip(imap(os.path.normcase, self.left_list), self.left_list))
- b = dict(izip(imap(os.path.normcase, self.right_list), self.right_list))
- self.common = map(a.__getitem__, ifilter(b.has_key, a))
- self.left_only = map(a.__getitem__, ifilterfalse(b.has_key, a))
- self.right_only = map(b.__getitem__, ifilterfalse(a.has_key, b))
-
- def phase2(self): # Distinguish files, directories, funnies
- self.common_dirs = []
- self.common_files = []
- self.common_funny = []
-
- for x in self.common:
- a_path = os.path.join(self.left, x)
- b_path = os.path.join(self.right, x)
-
- ok = 1
- try:
- a_stat = os.stat(a_path)
- except os.error, why:
- # print 'Can\'t stat', a_path, ':', why[1]
- ok = 0
- try:
- b_stat = os.stat(b_path)
- except os.error, why:
- # print 'Can\'t stat', b_path, ':', why[1]
- ok = 0
-
- if ok:
- a_type = stat.S_IFMT(a_stat.st_mode)
- b_type = stat.S_IFMT(b_stat.st_mode)
- if a_type != b_type:
- self.common_funny.append(x)
- elif stat.S_ISDIR(a_type):
- self.common_dirs.append(x)
- elif stat.S_ISREG(a_type):
- self.common_files.append(x)
- else:
- self.common_funny.append(x)
- else:
- self.common_funny.append(x)
-
- def phase3(self): # Find out differences between common files
- xx = cmpfiles(self.left, self.right, self.common_files)
- self.same_files, self.diff_files, self.funny_files = xx
-
- def phase4(self): # Find out differences between common subdirectories
- # A new dircmp object is created for each common subdirectory,
- # these are stored in a dictionary indexed by filename.
- # The hide and ignore properties are inherited from the parent
- self.subdirs = {}
- for x in self.common_dirs:
- a_x = os.path.join(self.left, x)
- b_x = os.path.join(self.right, x)
- self.subdirs[x] = dircmp(a_x, b_x, self.ignore, self.hide)
-
- def phase4_closure(self): # Recursively call phase4() on subdirectories
- self.phase4()
- for sd in self.subdirs.itervalues():
- sd.phase4_closure()
-
- def report(self): # Print a report on the differences between a and b
- # Output format is purposely lousy
- print 'diff', self.left, self.right
- if self.left_only:
- self.left_only.sort()
- print 'Only in', self.left, ':', self.left_only
- if self.right_only:
- self.right_only.sort()
- print 'Only in', self.right, ':', self.right_only
- if self.same_files:
- self.same_files.sort()
- print 'Identical files :', self.same_files
- if self.diff_files:
- self.diff_files.sort()
- print 'Differing files :', self.diff_files
- if self.funny_files:
- self.funny_files.sort()
- print 'Trouble with common files :', self.funny_files
- if self.common_dirs:
- self.common_dirs.sort()
- print 'Common subdirectories :', self.common_dirs
- if self.common_funny:
- self.common_funny.sort()
- print 'Common funny cases :', self.common_funny
-
- def report_partial_closure(self): # Print reports on self and on subdirs
- self.report()
- for sd in self.subdirs.itervalues():
- print
- sd.report()
-
- def report_full_closure(self): # Report on self and subdirs recursively
- self.report()
- for sd in self.subdirs.itervalues():
- print
- sd.report_full_closure()
-
- methodmap = dict(subdirs=phase4,
- same_files=phase3, diff_files=phase3, funny_files=phase3,
- common_dirs = phase2, common_files=phase2, common_funny=phase2,
- common=phase1, left_only=phase1, right_only=phase1,
- left_list=phase0, right_list=phase0)
-
- def __getattr__(self, attr):
- if attr not in self.methodmap:
- raise AttributeError, attr
- self.methodmap[attr](self)
- return getattr(self, attr)
-
-def cmpfiles(a, b, common, shallow=1):
- """Compare common files in two directories.
-
- a, b -- directory names
- common -- list of file names found in both directories
- shallow -- if true, do comparison based solely on stat() information
-
- Returns a tuple of three lists:
- files that compare equal
- files that are different
- filenames that aren't regular files.
-
- """
- res = ([], [], [])
- for x in common:
- ax = os.path.join(a, x)
- bx = os.path.join(b, x)
- res[_cmp(ax, bx, shallow)].append(x)
- return res
-
-
-# Compare two files.
-# Return:
-# 0 for equal
-# 1 for different
-# 2 for funny cases (can't stat, etc.)
-#
-def _cmp(a, b, sh, abs=abs, cmp=cmp):
- try:
- return not abs(cmp(a, b, sh))
- except os.error:
- return 2
-
-
-# Return a copy with items that occur in skip removed.
-#
-def _filter(flist, skip):
- return list(ifilterfalse(skip.__contains__, flist))
-
-
-# Demonstration and testing.
-#
-def demo():
- import sys
- import getopt
- options, args = getopt.getopt(sys.argv[1:], 'r')
- if len(args) != 2:
- raise getopt.GetoptError('need exactly two args', None)
- dd = dircmp(args[0], args[1])
- if ('-r', '') in options:
- dd.report_full_closure()
- else:
- dd.report()
-
-if __name__ == '__main__':
- demo()
diff --git a/sys/lib/python/fileinput.py b/sys/lib/python/fileinput.py
deleted file mode 100644
index 19932ca8e..000000000
--- a/sys/lib/python/fileinput.py
+++ /dev/null
@@ -1,413 +0,0 @@
-"""Helper class to quickly write a loop over all standard input files.
-
-Typical use is:
-
- import fileinput
- for line in fileinput.input():
- process(line)
-
-This iterates over the lines of all files listed in sys.argv[1:],
-defaulting to sys.stdin if the list is empty. If a filename is '-' it
-is also replaced by sys.stdin. To specify an alternative list of
-filenames, pass it as the argument to input(). A single file name is
-also allowed.
-
-Functions filename(), lineno() return the filename and cumulative line
-number of the line that has just been read; filelineno() returns its
-line number in the current file; isfirstline() returns true iff the
-line just read is the first line of its file; isstdin() returns true
-iff the line was read from sys.stdin. Function nextfile() closes the
-current file so that the next iteration will read the first line from
-the next file (if any); lines not read from the file will not count
-towards the cumulative line count; the filename is not changed until
-after the first line of the next file has been read. Function close()
-closes the sequence.
-
-Before any lines have been read, filename() returns None and both line
-numbers are zero; nextfile() has no effect. After all lines have been
-read, filename() and the line number functions return the values
-pertaining to the last line read; nextfile() has no effect.
-
-All files are opened in text mode by default, you can override this by
-setting the mode parameter to input() or FileInput.__init__().
-If an I/O error occurs during opening or reading a file, the IOError
-exception is raised.
-
-If sys.stdin is used more than once, the second and further use will
-return no lines, except perhaps for interactive use, or if it has been
-explicitly reset (e.g. using sys.stdin.seek(0)).
-
-Empty files are opened and immediately closed; the only time their
-presence in the list of filenames is noticeable at all is when the
-last file opened is empty.
-
-It is possible that the last line of a file doesn't end in a newline
-character; otherwise lines are returned including the trailing
-newline.
-
-Class FileInput is the implementation; its methods filename(),
-lineno(), fileline(), isfirstline(), isstdin(), nextfile() and close()
-correspond to the functions in the module. In addition it has a
-readline() method which returns the next input line, and a
-__getitem__() method which implements the sequence behavior. The
-sequence must be accessed in strictly sequential order; sequence
-access and readline() cannot be mixed.
-
-Optional in-place filtering: if the keyword argument inplace=1 is
-passed to input() or to the FileInput constructor, the file is moved
-to a backup file and standard output is directed to the input file.
-This makes it possible to write a filter that rewrites its input file
-in place. If the keyword argument backup=".<some extension>" is also
-given, it specifies the extension for the backup file, and the backup
-file remains around; by default, the extension is ".bak" and it is
-deleted when the output file is closed. In-place filtering is
-disabled when standard input is read. XXX The current implementation
-does not work for MS-DOS 8+3 filesystems.
-
-Performance: this module is unfortunately one of the slower ways of
-processing large numbers of input lines. Nevertheless, a significant
-speed-up has been obtained by using readlines(bufsize) instead of
-readline(). A new keyword argument, bufsize=N, is present on the
-input() function and the FileInput() class to override the default
-buffer size.
-
-XXX Possible additions:
-
-- optional getopt argument processing
-- isatty()
-- read(), read(size), even readlines()
-
-"""
-
-import sys, os
-
-__all__ = ["input","close","nextfile","filename","lineno","filelineno",
- "isfirstline","isstdin","FileInput"]
-
-_state = None
-
-DEFAULT_BUFSIZE = 8*1024
-
-def input(files=None, inplace=0, backup="", bufsize=0,
- mode="r", openhook=None):
- """input([files[, inplace[, backup[, mode[, openhook]]]]])
-
- Create an instance of the FileInput class. The instance will be used
- as global state for the functions of this module, and is also returned
- to use during iteration. The parameters to this function will be passed
- along to the constructor of the FileInput class.
- """
- global _state
- if _state and _state._file:
- raise RuntimeError, "input() already active"
- _state = FileInput(files, inplace, backup, bufsize, mode, openhook)
- return _state
-
-def close():
- """Close the sequence."""
- global _state
- state = _state
- _state = None
- if state:
- state.close()
-
-def nextfile():
- """
- Close the current file so that the next iteration will read the first
- line from the next file (if any); lines not read from the file will
- not count towards the cumulative line count. The filename is not
- changed until after the first line of the next file has been read.
- Before the first line has been read, this function has no effect;
- it cannot be used to skip the first file. After the last line of the
- last file has been read, this function has no effect.
- """
- if not _state:
- raise RuntimeError, "no active input()"
- return _state.nextfile()
-
-def filename():
- """
- Return the name of the file currently being read.
- Before the first line has been read, returns None.
- """
- if not _state:
- raise RuntimeError, "no active input()"
- return _state.filename()
-
-def lineno():
- """
- Return the cumulative line number of the line that has just been read.
- Before the first line has been read, returns 0. After the last line
- of the last file has been read, returns the line number of that line.
- """
- if not _state:
- raise RuntimeError, "no active input()"
- return _state.lineno()
-
-def filelineno():
- """
- Return the line number in the current file. Before the first line
- has been read, returns 0. After the last line of the last file has
- been read, returns the line number of that line within the file.
- """
- if not _state:
- raise RuntimeError, "no active input()"
- return _state.filelineno()
-
-def fileno():
- """
- Return the file number of the current file. When no file is currently
- opened, returns -1.
- """
- if not _state:
- raise RuntimeError, "no active input()"
- return _state.fileno()
-
-def isfirstline():
- """
- Returns true the line just read is the first line of its file,
- otherwise returns false.
- """
- if not _state:
- raise RuntimeError, "no active input()"
- return _state.isfirstline()
-
-def isstdin():
- """
- Returns true if the last line was read from sys.stdin,
- otherwise returns false.
- """
- if not _state:
- raise RuntimeError, "no active input()"
- return _state.isstdin()
-
-class FileInput:
- """class FileInput([files[, inplace[, backup[, mode[, openhook]]]]])
-
- Class FileInput is the implementation of the module; its methods
- filename(), lineno(), fileline(), isfirstline(), isstdin(), fileno(),
- nextfile() and close() correspond to the functions of the same name
- in the module.
- In addition it has a readline() method which returns the next
- input line, and a __getitem__() method which implements the
- sequence behavior. The sequence must be accessed in strictly
- sequential order; random access and readline() cannot be mixed.
- """
-
- def __init__(self, files=None, inplace=0, backup="", bufsize=0,
- mode="r", openhook=None):
- if isinstance(files, basestring):
- files = (files,)
- else:
- if files is None:
- files = sys.argv[1:]
- if not files:
- files = ('-',)
- else:
- files = tuple(files)
- self._files = files
- self._inplace = inplace
- self._backup = backup
- self._bufsize = bufsize or DEFAULT_BUFSIZE
- self._savestdout = None
- self._output = None
- self._filename = None
- self._lineno = 0
- self._filelineno = 0
- self._file = None
- self._isstdin = False
- self._backupfilename = None
- self._buffer = []
- self._bufindex = 0
- # restrict mode argument to reading modes
- if mode not in ('r', 'rU', 'U', 'rb'):
- raise ValueError("FileInput opening mode must be one of "
- "'r', 'rU', 'U' and 'rb'")
- self._mode = mode
- if inplace and openhook:
- raise ValueError("FileInput cannot use an opening hook in inplace mode")
- elif openhook and not callable(openhook):
- raise ValueError("FileInput openhook must be callable")
- self._openhook = openhook
-
- def __del__(self):
- self.close()
-
- def close(self):
- self.nextfile()
- self._files = ()
-
- def __iter__(self):
- return self
-
- def next(self):
- try:
- line = self._buffer[self._bufindex]
- except IndexError:
- pass
- else:
- self._bufindex += 1
- self._lineno += 1
- self._filelineno += 1
- return line
- line = self.readline()
- if not line:
- raise StopIteration
- return line
-
- def __getitem__(self, i):
- if i != self._lineno:
- raise RuntimeError, "accessing lines out of order"
- try:
- return self.next()
- except StopIteration:
- raise IndexError, "end of input reached"
-
- def nextfile(self):
- savestdout = self._savestdout
- self._savestdout = 0
- if savestdout:
- sys.stdout = savestdout
-
- output = self._output
- self._output = 0
- if output:
- output.close()
-
- file = self._file
- self._file = 0
- if file and not self._isstdin:
- file.close()
-
- backupfilename = self._backupfilename
- self._backupfilename = 0
- if backupfilename and not self._backup:
- try: os.unlink(backupfilename)
- except OSError: pass
-
- self._isstdin = False
- self._buffer = []
- self._bufindex = 0
-
- def readline(self):
- try:
- line = self._buffer[self._bufindex]
- except IndexError:
- pass
- else:
- self._bufindex += 1
- self._lineno += 1
- self._filelineno += 1
- return line
- if not self._file:
- if not self._files:
- return ""
- self._filename = self._files[0]
- self._files = self._files[1:]
- self._filelineno = 0
- self._file = None
- self._isstdin = False
- self._backupfilename = 0
- if self._filename == '-':
- self._filename = '<stdin>'
- self._file = sys.stdin
- self._isstdin = True
- else:
- if self._inplace:
- self._backupfilename = (
- self._filename + (self._backup or os.extsep+"bak"))
- try: os.unlink(self._backupfilename)
- except os.error: pass
- # The next few lines may raise IOError
- os.rename(self._filename, self._backupfilename)
- self._file = open(self._backupfilename, self._mode)
- try:
- perm = os.fstat(self._file.fileno()).st_mode
- except OSError:
- self._output = open(self._filename, "w")
- else:
- fd = os.open(self._filename,
- os.O_CREAT | os.O_WRONLY | os.O_TRUNC,
- perm)
- self._output = os.fdopen(fd, "w")
- try:
- if hasattr(os, 'chmod'):
- os.chmod(self._filename, perm)
- except OSError:
- pass
- self._savestdout = sys.stdout
- sys.stdout = self._output
- else:
- # This may raise IOError
- if self._openhook:
- self._file = self._openhook(self._filename, self._mode)
- else:
- self._file = open(self._filename, self._mode)
- self._buffer = self._file.readlines(self._bufsize)
- self._bufindex = 0
- if not self._buffer:
- self.nextfile()
- # Recursive call
- return self.readline()
-
- def filename(self):
- return self._filename
-
- def lineno(self):
- return self._lineno
-
- def filelineno(self):
- return self._filelineno
-
- def fileno(self):
- if self._file:
- try:
- return self._file.fileno()
- except ValueError:
- return -1
- else:
- return -1
-
- def isfirstline(self):
- return self._filelineno == 1
-
- def isstdin(self):
- return self._isstdin
-
-
-def hook_compressed(filename, mode):
- ext = os.path.splitext(filename)[1]
- if ext == '.gz':
- import gzip
- return gzip.open(filename, mode)
- elif ext == '.bz2':
- import bz2
- return bz2.BZ2File(filename, mode)
- else:
- return open(filename, mode)
-
-
-def hook_encoded(encoding):
- import codecs
- def openhook(filename, mode):
- return codecs.open(filename, mode, encoding)
- return openhook
-
-
-def _test():
- import getopt
- inplace = 0
- backup = 0
- opts, args = getopt.getopt(sys.argv[1:], "ib:")
- for o, a in opts:
- if o == '-i': inplace = 1
- if o == '-b': backup = a
- for line in input(args, inplace=inplace, backup=backup):
- if line[-1:] == '\n': line = line[:-1]
- if line[-1:] == '\r': line = line[:-1]
- print "%d: %s[%d]%s %s" % (lineno(), filename(), filelineno(),
- isfirstline() and "*" or "", line)
- print "%d: %s[%d]" % (lineno(), filename(), filelineno())
-
-if __name__ == '__main__':
- _test()
diff --git a/sys/lib/python/fnmatch.py b/sys/lib/python/fnmatch.py
deleted file mode 100644
index 3bf246391..000000000
--- a/sys/lib/python/fnmatch.py
+++ /dev/null
@@ -1,107 +0,0 @@
-"""Filename matching with shell patterns.
-
-fnmatch(FILENAME, PATTERN) matches according to the local convention.
-fnmatchcase(FILENAME, PATTERN) always takes case in account.
-
-The functions operate by translating the pattern into a regular
-expression. They cache the compiled regular expressions for speed.
-
-The function translate(PATTERN) returns a regular expression
-corresponding to PATTERN. (It does not compile it.)
-"""
-
-import re
-
-__all__ = ["filter", "fnmatch","fnmatchcase","translate"]
-
-_cache = {}
-
-def fnmatch(name, pat):
- """Test whether FILENAME matches PATTERN.
-
- Patterns are Unix shell style:
-
- * matches everything
- ? matches any single character
- [seq] matches any character in seq
- [!seq] matches any char not in seq
-
- An initial period in FILENAME is not special.
- Both FILENAME and PATTERN are first case-normalized
- if the operating system requires it.
- If you don't want this, use fnmatchcase(FILENAME, PATTERN).
- """
-
- import os
- name = os.path.normcase(name)
- pat = os.path.normcase(pat)
- return fnmatchcase(name, pat)
-
-def filter(names, pat):
- """Return the subset of the list NAMES that match PAT"""
- import os,posixpath
- result=[]
- pat=os.path.normcase(pat)
- if not pat in _cache:
- res = translate(pat)
- _cache[pat] = re.compile(res)
- match=_cache[pat].match
- if os.path is posixpath:
- # normcase on posix is NOP. Optimize it away from the loop.
- for name in names:
- if match(name):
- result.append(name)
- else:
- for name in names:
- if match(os.path.normcase(name)):
- result.append(name)
- return result
-
-def fnmatchcase(name, pat):
- """Test whether FILENAME matches PATTERN, including case.
-
- This is a version of fnmatch() which doesn't case-normalize
- its arguments.
- """
-
- if not pat in _cache:
- res = translate(pat)
- _cache[pat] = re.compile(res)
- return _cache[pat].match(name) is not None
-
-def translate(pat):
- """Translate a shell PATTERN to a regular expression.
-
- There is no way to quote meta-characters.
- """
-
- i, n = 0, len(pat)
- res = ''
- while i < n:
- c = pat[i]
- i = i+1
- if c == '*':
- res = res + '.*'
- elif c == '?':
- res = res + '.'
- elif c == '[':
- j = i
- if j < n and pat[j] == '!':
- j = j+1
- if j < n and pat[j] == ']':
- j = j+1
- while j < n and pat[j] != ']':
- j = j+1
- if j >= n:
- res = res + '\\['
- else:
- stuff = pat[i:j].replace('\\','\\\\')
- i = j+1
- if stuff[0] == '!':
- stuff = '^' + stuff[1:]
- elif stuff[0] == '^':
- stuff = '\\' + stuff
- res = '%s[%s]' % (res, stuff)
- else:
- res = res + re.escape(c)
- return res + "$"
diff --git a/sys/lib/python/formatter.py b/sys/lib/python/formatter.py
deleted file mode 100644
index fa2b38938..000000000
--- a/sys/lib/python/formatter.py
+++ /dev/null
@@ -1,447 +0,0 @@
-"""Generic output formatting.
-
-Formatter objects transform an abstract flow of formatting events into
-specific output events on writer objects. Formatters manage several stack
-structures to allow various properties of a writer object to be changed and
-restored; writers need not be able to handle relative changes nor any sort
-of ``change back'' operation. Specific writer properties which may be
-controlled via formatter objects are horizontal alignment, font, and left
-margin indentations. A mechanism is provided which supports providing
-arbitrary, non-exclusive style settings to a writer as well. Additional
-interfaces facilitate formatting events which are not reversible, such as
-paragraph separation.
-
-Writer objects encapsulate device interfaces. Abstract devices, such as
-file formats, are supported as well as physical devices. The provided
-implementations all work with abstract devices. The interface makes
-available mechanisms for setting the properties which formatter objects
-manage and inserting data into the output.
-"""
-
-import sys
-
-
-AS_IS = None
-
-
-class NullFormatter:
- """A formatter which does nothing.
-
- If the writer parameter is omitted, a NullWriter instance is created.
- No methods of the writer are called by NullFormatter instances.
-
- Implementations should inherit from this class if implementing a writer
- interface but don't need to inherit any implementation.
-
- """
-
- def __init__(self, writer=None):
- if writer is None:
- writer = NullWriter()
- self.writer = writer
- def end_paragraph(self, blankline): pass
- def add_line_break(self): pass
- def add_hor_rule(self, *args, **kw): pass
- def add_label_data(self, format, counter, blankline=None): pass
- def add_flowing_data(self, data): pass
- def add_literal_data(self, data): pass
- def flush_softspace(self): pass
- def push_alignment(self, align): pass
- def pop_alignment(self): pass
- def push_font(self, x): pass
- def pop_font(self): pass
- def push_margin(self, margin): pass
- def pop_margin(self): pass
- def set_spacing(self, spacing): pass
- def push_style(self, *styles): pass
- def pop_style(self, n=1): pass
- def assert_line_data(self, flag=1): pass
-
-
-class AbstractFormatter:
- """The standard formatter.
-
- This implementation has demonstrated wide applicability to many writers,
- and may be used directly in most circumstances. It has been used to
- implement a full-featured World Wide Web browser.
-
- """
-
- # Space handling policy: blank spaces at the boundary between elements
- # are handled by the outermost context. "Literal" data is not checked
- # to determine context, so spaces in literal data are handled directly
- # in all circumstances.
-
- def __init__(self, writer):
- self.writer = writer # Output device
- self.align = None # Current alignment
- self.align_stack = [] # Alignment stack
- self.font_stack = [] # Font state
- self.margin_stack = [] # Margin state
- self.spacing = None # Vertical spacing state
- self.style_stack = [] # Other state, e.g. color
- self.nospace = 1 # Should leading space be suppressed
- self.softspace = 0 # Should a space be inserted
- self.para_end = 1 # Just ended a paragraph
- self.parskip = 0 # Skipped space between paragraphs?
- self.hard_break = 1 # Have a hard break
- self.have_label = 0
-
- def end_paragraph(self, blankline):
- if not self.hard_break:
- self.writer.send_line_break()
- self.have_label = 0
- if self.parskip < blankline and not self.have_label:
- self.writer.send_paragraph(blankline - self.parskip)
- self.parskip = blankline
- self.have_label = 0
- self.hard_break = self.nospace = self.para_end = 1
- self.softspace = 0
-
- def add_line_break(self):
- if not (self.hard_break or self.para_end):
- self.writer.send_line_break()
- self.have_label = self.parskip = 0
- self.hard_break = self.nospace = 1
- self.softspace = 0
-
- def add_hor_rule(self, *args, **kw):
- if not self.hard_break:
- self.writer.send_line_break()
- self.writer.send_hor_rule(*args, **kw)
- self.hard_break = self.nospace = 1
- self.have_label = self.para_end = self.softspace = self.parskip = 0
-
- def add_label_data(self, format, counter, blankline = None):
- if self.have_label or not self.hard_break:
- self.writer.send_line_break()
- if not self.para_end:
- self.writer.send_paragraph((blankline and 1) or 0)
- if isinstance(format, str):
- self.writer.send_label_data(self.format_counter(format, counter))
- else:
- self.writer.send_label_data(format)
- self.nospace = self.have_label = self.hard_break = self.para_end = 1
- self.softspace = self.parskip = 0
-
- def format_counter(self, format, counter):
- label = ''
- for c in format:
- if c == '1':
- label = label + ('%d' % counter)
- elif c in 'aA':
- if counter > 0:
- label = label + self.format_letter(c, counter)
- elif c in 'iI':
- if counter > 0:
- label = label + self.format_roman(c, counter)
- else:
- label = label + c
- return label
-
- def format_letter(self, case, counter):
- label = ''
- while counter > 0:
- counter, x = divmod(counter-1, 26)
- # This makes a strong assumption that lowercase letters
- # and uppercase letters form two contiguous blocks, with
- # letters in order!
- s = chr(ord(case) + x)
- label = s + label
- return label
-
- def format_roman(self, case, counter):
- ones = ['i', 'x', 'c', 'm']
- fives = ['v', 'l', 'd']
- label, index = '', 0
- # This will die of IndexError when counter is too big
- while counter > 0:
- counter, x = divmod(counter, 10)
- if x == 9:
- label = ones[index] + ones[index+1] + label
- elif x == 4:
- label = ones[index] + fives[index] + label
- else:
- if x >= 5:
- s = fives[index]
- x = x-5
- else:
- s = ''
- s = s + ones[index]*x
- label = s + label
- index = index + 1
- if case == 'I':
- return label.upper()
- return label
-
- def add_flowing_data(self, data):
- if not data: return
- prespace = data[:1].isspace()
- postspace = data[-1:].isspace()
- data = " ".join(data.split())
- if self.nospace and not data:
- return
- elif prespace or self.softspace:
- if not data:
- if not self.nospace:
- self.softspace = 1
- self.parskip = 0
- return
- if not self.nospace:
- data = ' ' + data
- self.hard_break = self.nospace = self.para_end = \
- self.parskip = self.have_label = 0
- self.softspace = postspace
- self.writer.send_flowing_data(data)
-
- def add_literal_data(self, data):
- if not data: return
- if self.softspace:
- self.writer.send_flowing_data(" ")
- self.hard_break = data[-1:] == '\n'
- self.nospace = self.para_end = self.softspace = \
- self.parskip = self.have_label = 0
- self.writer.send_literal_data(data)
-
- def flush_softspace(self):
- if self.softspace:
- self.hard_break = self.para_end = self.parskip = \
- self.have_label = self.softspace = 0
- self.nospace = 1
- self.writer.send_flowing_data(' ')
-
- def push_alignment(self, align):
- if align and align != self.align:
- self.writer.new_alignment(align)
- self.align = align
- self.align_stack.append(align)
- else:
- self.align_stack.append(self.align)
-
- def pop_alignment(self):
- if self.align_stack:
- del self.align_stack[-1]
- if self.align_stack:
- self.align = align = self.align_stack[-1]
- self.writer.new_alignment(align)
- else:
- self.align = None
- self.writer.new_alignment(None)
-
- def push_font(self, (size, i, b, tt)):
- if self.softspace:
- self.hard_break = self.para_end = self.softspace = 0
- self.nospace = 1
- self.writer.send_flowing_data(' ')
- if self.font_stack:
- csize, ci, cb, ctt = self.font_stack[-1]
- if size is AS_IS: size = csize
- if i is AS_IS: i = ci
- if b is AS_IS: b = cb
- if tt is AS_IS: tt = ctt
- font = (size, i, b, tt)
- self.font_stack.append(font)
- self.writer.new_font(font)
-
- def pop_font(self):
- if self.font_stack:
- del self.font_stack[-1]
- if self.font_stack:
- font = self.font_stack[-1]
- else:
- font = None
- self.writer.new_font(font)
-
- def push_margin(self, margin):
- self.margin_stack.append(margin)
- fstack = filter(None, self.margin_stack)
- if not margin and fstack:
- margin = fstack[-1]
- self.writer.new_margin(margin, len(fstack))
-
- def pop_margin(self):
- if self.margin_stack:
- del self.margin_stack[-1]
- fstack = filter(None, self.margin_stack)
- if fstack:
- margin = fstack[-1]
- else:
- margin = None
- self.writer.new_margin(margin, len(fstack))
-
- def set_spacing(self, spacing):
- self.spacing = spacing
- self.writer.new_spacing(spacing)
-
- def push_style(self, *styles):
- if self.softspace:
- self.hard_break = self.para_end = self.softspace = 0
- self.nospace = 1
- self.writer.send_flowing_data(' ')
- for style in styles:
- self.style_stack.append(style)
- self.writer.new_styles(tuple(self.style_stack))
-
- def pop_style(self, n=1):
- del self.style_stack[-n:]
- self.writer.new_styles(tuple(self.style_stack))
-
- def assert_line_data(self, flag=1):
- self.nospace = self.hard_break = not flag
- self.para_end = self.parskip = self.have_label = 0
-
-
-class NullWriter:
- """Minimal writer interface to use in testing & inheritance.
-
- A writer which only provides the interface definition; no actions are
- taken on any methods. This should be the base class for all writers
- which do not need to inherit any implementation methods.
-
- """
- def __init__(self): pass
- def flush(self): pass
- def new_alignment(self, align): pass
- def new_font(self, font): pass
- def new_margin(self, margin, level): pass
- def new_spacing(self, spacing): pass
- def new_styles(self, styles): pass
- def send_paragraph(self, blankline): pass
- def send_line_break(self): pass
- def send_hor_rule(self, *args, **kw): pass
- def send_label_data(self, data): pass
- def send_flowing_data(self, data): pass
- def send_literal_data(self, data): pass
-
-
-class AbstractWriter(NullWriter):
- """A writer which can be used in debugging formatters, but not much else.
-
- Each method simply announces itself by printing its name and
- arguments on standard output.
-
- """
-
- def new_alignment(self, align):
- print "new_alignment(%r)" % (align,)
-
- def new_font(self, font):
- print "new_font(%r)" % (font,)
-
- def new_margin(self, margin, level):
- print "new_margin(%r, %d)" % (margin, level)
-
- def new_spacing(self, spacing):
- print "new_spacing(%r)" % (spacing,)
-
- def new_styles(self, styles):
- print "new_styles(%r)" % (styles,)
-
- def send_paragraph(self, blankline):
- print "send_paragraph(%r)" % (blankline,)
-
- def send_line_break(self):
- print "send_line_break()"
-
- def send_hor_rule(self, *args, **kw):
- print "send_hor_rule()"
-
- def send_label_data(self, data):
- print "send_label_data(%r)" % (data,)
-
- def send_flowing_data(self, data):
- print "send_flowing_data(%r)" % (data,)
-
- def send_literal_data(self, data):
- print "send_literal_data(%r)" % (data,)
-
-
-class DumbWriter(NullWriter):
- """Simple writer class which writes output on the file object passed in
- as the file parameter or, if file is omitted, on standard output. The
- output is simply word-wrapped to the number of columns specified by
- the maxcol parameter. This class is suitable for reflowing a sequence
- of paragraphs.
-
- """
-
- def __init__(self, file=None, maxcol=72):
- self.file = file or sys.stdout
- self.maxcol = maxcol
- NullWriter.__init__(self)
- self.reset()
-
- def reset(self):
- self.col = 0
- self.atbreak = 0
-
- def send_paragraph(self, blankline):
- self.file.write('\n'*blankline)
- self.col = 0
- self.atbreak = 0
-
- def send_line_break(self):
- self.file.write('\n')
- self.col = 0
- self.atbreak = 0
-
- def send_hor_rule(self, *args, **kw):
- self.file.write('\n')
- self.file.write('-'*self.maxcol)
- self.file.write('\n')
- self.col = 0
- self.atbreak = 0
-
- def send_literal_data(self, data):
- self.file.write(data)
- i = data.rfind('\n')
- if i >= 0:
- self.col = 0
- data = data[i+1:]
- data = data.expandtabs()
- self.col = self.col + len(data)
- self.atbreak = 0
-
- def send_flowing_data(self, data):
- if not data: return
- atbreak = self.atbreak or data[0].isspace()
- col = self.col
- maxcol = self.maxcol
- write = self.file.write
- for word in data.split():
- if atbreak:
- if col + len(word) >= maxcol:
- write('\n')
- col = 0
- else:
- write(' ')
- col = col + 1
- write(word)
- col = col + len(word)
- atbreak = 1
- self.col = col
- self.atbreak = data[-1].isspace()
-
-
-def test(file = None):
- w = DumbWriter()
- f = AbstractFormatter(w)
- if file is not None:
- fp = open(file)
- elif sys.argv[1:]:
- fp = open(sys.argv[1])
- else:
- fp = sys.stdin
- while 1:
- line = fp.readline()
- if not line:
- break
- if line == '\n':
- f.end_paragraph(1)
- else:
- f.add_flowing_data(line)
- f.end_paragraph(0)
-
-
-if __name__ == '__main__':
- test()
diff --git a/sys/lib/python/fpformat.py b/sys/lib/python/fpformat.py
deleted file mode 100644
index 0ae86a913..000000000
--- a/sys/lib/python/fpformat.py
+++ /dev/null
@@ -1,142 +0,0 @@
-"""General floating point formatting functions.
-
-Functions:
-fix(x, digits_behind)
-sci(x, digits_behind)
-
-Each takes a number or a string and a number of digits as arguments.
-
-Parameters:
-x: number to be formatted; or a string resembling a number
-digits_behind: number of digits behind the decimal point
-"""
-
-import re
-
-__all__ = ["fix","sci","NotANumber"]
-
-# Compiled regular expression to "decode" a number
-decoder = re.compile(r'^([-+]?)0*(\d*)((?:\.\d*)?)(([eE][-+]?\d+)?)$')
-# \0 the whole thing
-# \1 leading sign or empty
-# \2 digits left of decimal point
-# \3 fraction (empty or begins with point)
-# \4 exponent part (empty or begins with 'e' or 'E')
-
-try:
- class NotANumber(ValueError):
- pass
-except TypeError:
- NotANumber = 'fpformat.NotANumber'
-
-def extract(s):
- """Return (sign, intpart, fraction, expo) or raise an exception:
- sign is '+' or '-'
- intpart is 0 or more digits beginning with a nonzero
- fraction is 0 or more digits
- expo is an integer"""
- res = decoder.match(s)
- if res is None: raise NotANumber, s
- sign, intpart, fraction, exppart = res.group(1,2,3,4)
- if sign == '+': sign = ''
- if fraction: fraction = fraction[1:]
- if exppart: expo = int(exppart[1:])
- else: expo = 0
- return sign, intpart, fraction, expo
-
-def unexpo(intpart, fraction, expo):
- """Remove the exponent by changing intpart and fraction."""
- if expo > 0: # Move the point left
- f = len(fraction)
- intpart, fraction = intpart + fraction[:expo], fraction[expo:]
- if expo > f:
- intpart = intpart + '0'*(expo-f)
- elif expo < 0: # Move the point right
- i = len(intpart)
- intpart, fraction = intpart[:expo], intpart[expo:] + fraction
- if expo < -i:
- fraction = '0'*(-expo-i) + fraction
- return intpart, fraction
-
-def roundfrac(intpart, fraction, digs):
- """Round or extend the fraction to size digs."""
- f = len(fraction)
- if f <= digs:
- return intpart, fraction + '0'*(digs-f)
- i = len(intpart)
- if i+digs < 0:
- return '0'*-digs, ''
- total = intpart + fraction
- nextdigit = total[i+digs]
- if nextdigit >= '5': # Hard case: increment last digit, may have carry!
- n = i + digs - 1
- while n >= 0:
- if total[n] != '9': break
- n = n-1
- else:
- total = '0' + total
- i = i+1
- n = 0
- total = total[:n] + chr(ord(total[n]) + 1) + '0'*(len(total)-n-1)
- intpart, fraction = total[:i], total[i:]
- if digs >= 0:
- return intpart, fraction[:digs]
- else:
- return intpart[:digs] + '0'*-digs, ''
-
-def fix(x, digs):
- """Format x as [-]ddd.ddd with 'digs' digits after the point
- and at least one digit before.
- If digs <= 0, the point is suppressed."""
- if type(x) != type(''): x = repr(x)
- try:
- sign, intpart, fraction, expo = extract(x)
- except NotANumber:
- return x
- intpart, fraction = unexpo(intpart, fraction, expo)
- intpart, fraction = roundfrac(intpart, fraction, digs)
- while intpart and intpart[0] == '0': intpart = intpart[1:]
- if intpart == '': intpart = '0'
- if digs > 0: return sign + intpart + '.' + fraction
- else: return sign + intpart
-
-def sci(x, digs):
- """Format x as [-]d.dddE[+-]ddd with 'digs' digits after the point
- and exactly one digit before.
- If digs is <= 0, one digit is kept and the point is suppressed."""
- if type(x) != type(''): x = repr(x)
- sign, intpart, fraction, expo = extract(x)
- if not intpart:
- while fraction and fraction[0] == '0':
- fraction = fraction[1:]
- expo = expo - 1
- if fraction:
- intpart, fraction = fraction[0], fraction[1:]
- expo = expo - 1
- else:
- intpart = '0'
- else:
- expo = expo + len(intpart) - 1
- intpart, fraction = intpart[0], intpart[1:] + fraction
- digs = max(0, digs)
- intpart, fraction = roundfrac(intpart, fraction, digs)
- if len(intpart) > 1:
- intpart, fraction, expo = \
- intpart[0], intpart[1:] + fraction[:-1], \
- expo + len(intpart) - 1
- s = sign + intpart
- if digs > 0: s = s + '.' + fraction
- e = repr(abs(expo))
- e = '0'*(3-len(e)) + e
- if expo < 0: e = '-' + e
- else: e = '+' + e
- return s + 'e' + e
-
-def test():
- """Interactive test run."""
- try:
- while 1:
- x, digs = input('Enter (x, digs): ')
- print x, fix(x, digs), sci(x, digs)
- except (EOFError, KeyboardInterrupt):
- pass
diff --git a/sys/lib/python/ftplib.py b/sys/lib/python/ftplib.py
deleted file mode 100644
index 9cb67dd55..000000000
--- a/sys/lib/python/ftplib.py
+++ /dev/null
@@ -1,823 +0,0 @@
-"""An FTP client class and some helper functions.
-
-Based on RFC 959: File Transfer Protocol (FTP), by J. Postel and J. Reynolds
-
-Example:
-
->>> from ftplib import FTP
->>> ftp = FTP('ftp.python.org') # connect to host, default port
->>> ftp.login() # default, i.e.: user anonymous, passwd anonymous@
-'230 Guest login ok, access restrictions apply.'
->>> ftp.retrlines('LIST') # list directory contents
-total 9
-drwxr-xr-x 8 root wheel 1024 Jan 3 1994 .
-drwxr-xr-x 8 root wheel 1024 Jan 3 1994 ..
-drwxr-xr-x 2 root wheel 1024 Jan 3 1994 bin
-drwxr-xr-x 2 root wheel 1024 Jan 3 1994 etc
-d-wxrwxr-x 2 ftp wheel 1024 Sep 5 13:43 incoming
-drwxr-xr-x 2 root wheel 1024 Nov 17 1993 lib
-drwxr-xr-x 6 1094 wheel 1024 Sep 13 19:07 pub
-drwxr-xr-x 3 root wheel 1024 Jan 3 1994 usr
--rw-r--r-- 1 root root 312 Aug 1 1994 welcome.msg
-'226 Transfer complete.'
->>> ftp.quit()
-'221 Goodbye.'
->>>
-
-A nice test that reveals some of the network dialogue would be:
-python ftplib.py -d localhost -l -p -l
-"""
-
-#
-# Changes and improvements suggested by Steve Majewski.
-# Modified by Jack to work on the mac.
-# Modified by Siebren to support docstrings and PASV.
-#
-
-import os
-import sys
-
-# Import SOCKS module if it exists, else standard socket module socket
-try:
- import SOCKS; socket = SOCKS; del SOCKS # import SOCKS as socket
- from socket import getfqdn; socket.getfqdn = getfqdn; del getfqdn
-except ImportError:
- import socket
-
-__all__ = ["FTP","Netrc"]
-
-# Magic number from <socket.h>
-MSG_OOB = 0x1 # Process data out of band
-
-
-# The standard FTP server control port
-FTP_PORT = 21
-
-
-# Exception raised when an error or invalid response is received
-class Error(Exception): pass
-class error_reply(Error): pass # unexpected [123]xx reply
-class error_temp(Error): pass # 4xx errors
-class error_perm(Error): pass # 5xx errors
-class error_proto(Error): pass # response does not begin with [1-5]
-
-
-# All exceptions (hopefully) that may be raised here and that aren't
-# (always) programming errors on our side
-all_errors = (Error, socket.error, IOError, EOFError)
-
-
-# Line terminators (we always output CRLF, but accept any of CRLF, CR, LF)
-CRLF = '\r\n'
-
-
-# The class itself
-class FTP:
-
- '''An FTP client class.
-
- To create a connection, call the class using these argument:
- host, user, passwd, acct
- These are all strings, and have default value ''.
- Then use self.connect() with optional host and port argument.
-
- To download a file, use ftp.retrlines('RETR ' + filename),
- or ftp.retrbinary() with slightly different arguments.
- To upload a file, use ftp.storlines() or ftp.storbinary(),
- which have an open file as argument (see their definitions
- below for details).
- The download/upload functions first issue appropriate TYPE
- and PORT or PASV commands.
-'''
-
- debugging = 0
- host = ''
- port = FTP_PORT
- sock = None
- file = None
- welcome = None
- passiveserver = 1
-
- # Initialization method (called by class instantiation).
- # Initialize host to localhost, port to standard ftp port
- # Optional arguments are host (for connect()),
- # and user, passwd, acct (for login())
- def __init__(self, host='', user='', passwd='', acct=''):
- if host:
- self.connect(host)
- if user: self.login(user, passwd, acct)
-
- def connect(self, host = '', port = 0):
- '''Connect to host. Arguments are:
- - host: hostname to connect to (string, default previous host)
- - port: port to connect to (integer, default previous port)'''
- if host: self.host = host
- if port: self.port = port
- msg = "getaddrinfo returns an empty list"
- for res in socket.getaddrinfo(self.host, self.port, 0, socket.SOCK_STREAM):
- af, socktype, proto, canonname, sa = res
- try:
- self.sock = socket.socket(af, socktype, proto)
- self.sock.connect(sa)
- except socket.error, msg:
- if self.sock:
- self.sock.close()
- self.sock = None
- continue
- break
- if not self.sock:
- raise socket.error, msg
- self.af = af
- self.file = self.sock.makefile('rb')
- self.welcome = self.getresp()
- return self.welcome
-
- def getwelcome(self):
- '''Get the welcome message from the server.
- (this is read and squirreled away by connect())'''
- if self.debugging:
- print '*welcome*', self.sanitize(self.welcome)
- return self.welcome
-
- def set_debuglevel(self, level):
- '''Set the debugging level.
- The required argument level means:
- 0: no debugging output (default)
- 1: print commands and responses but not body text etc.
- 2: also print raw lines read and sent before stripping CR/LF'''
- self.debugging = level
- debug = set_debuglevel
-
- def set_pasv(self, val):
- '''Use passive or active mode for data transfers.
- With a false argument, use the normal PORT mode,
- With a true argument, use the PASV command.'''
- self.passiveserver = val
-
- # Internal: "sanitize" a string for printing
- def sanitize(self, s):
- if s[:5] == 'pass ' or s[:5] == 'PASS ':
- i = len(s)
- while i > 5 and s[i-1] in '\r\n':
- i = i-1
- s = s[:5] + '*'*(i-5) + s[i:]
- return repr(s)
-
- # Internal: send one line to the server, appending CRLF
- def putline(self, line):
- line = line + CRLF
- if self.debugging > 1: print '*put*', self.sanitize(line)
- self.sock.sendall(line)
-
- # Internal: send one command to the server (through putline())
- def putcmd(self, line):
- if self.debugging: print '*cmd*', self.sanitize(line)
- self.putline(line)
-
- # Internal: return one line from the server, stripping CRLF.
- # Raise EOFError if the connection is closed
- def getline(self):
- line = self.file.readline()
- if self.debugging > 1:
- print '*get*', self.sanitize(line)
- if not line: raise EOFError
- if line[-2:] == CRLF: line = line[:-2]
- elif line[-1:] in CRLF: line = line[:-1]
- return line
-
- # Internal: get a response from the server, which may possibly
- # consist of multiple lines. Return a single string with no
- # trailing CRLF. If the response consists of multiple lines,
- # these are separated by '\n' characters in the string
- def getmultiline(self):
- line = self.getline()
- if line[3:4] == '-':
- code = line[:3]
- while 1:
- nextline = self.getline()
- line = line + ('\n' + nextline)
- if nextline[:3] == code and \
- nextline[3:4] != '-':
- break
- return line
-
- # Internal: get a response from the server.
- # Raise various errors if the response indicates an error
- def getresp(self):
- resp = self.getmultiline()
- if self.debugging: print '*resp*', self.sanitize(resp)
- self.lastresp = resp[:3]
- c = resp[:1]
- if c in ('1', '2', '3'):
- return resp
- if c == '4':
- raise error_temp, resp
- if c == '5':
- raise error_perm, resp
- raise error_proto, resp
-
- def voidresp(self):
- """Expect a response beginning with '2'."""
- resp = self.getresp()
- if resp[0] != '2':
- raise error_reply, resp
- return resp
-
- def abort(self):
- '''Abort a file transfer. Uses out-of-band data.
- This does not follow the procedure from the RFC to send Telnet
- IP and Synch; that doesn't seem to work with the servers I've
- tried. Instead, just send the ABOR command as OOB data.'''
- line = 'ABOR' + CRLF
- if self.debugging > 1: print '*put urgent*', self.sanitize(line)
- self.sock.sendall(line, MSG_OOB)
- resp = self.getmultiline()
- if resp[:3] not in ('426', '226'):
- raise error_proto, resp
-
- def sendcmd(self, cmd):
- '''Send a command and return the response.'''
- self.putcmd(cmd)
- return self.getresp()
-
- def voidcmd(self, cmd):
- """Send a command and expect a response beginning with '2'."""
- self.putcmd(cmd)
- return self.voidresp()
-
- def sendport(self, host, port):
- '''Send a PORT command with the current host and the given
- port number.
- '''
- hbytes = host.split('.')
- pbytes = [repr(port/256), repr(port%256)]
- bytes = hbytes + pbytes
- cmd = 'PORT ' + ','.join(bytes)
- return self.voidcmd(cmd)
-
- def sendeprt(self, host, port):
- '''Send a EPRT command with the current host and the given port number.'''
- af = 0
- if self.af == socket.AF_INET:
- af = 1
- if self.af == socket.AF_INET6:
- af = 2
- if af == 0:
- raise error_proto, 'unsupported address family'
- fields = ['', repr(af), host, repr(port), '']
- cmd = 'EPRT ' + '|'.join(fields)
- return self.voidcmd(cmd)
-
- def makeport(self):
- '''Create a new socket and send a PORT command for it.'''
- msg = "getaddrinfo returns an empty list"
- sock = None
- for res in socket.getaddrinfo(None, 0, self.af, socket.SOCK_STREAM, 0, socket.AI_PASSIVE):
- af, socktype, proto, canonname, sa = res
- try:
- sock = socket.socket(af, socktype, proto)
- sock.bind(sa)
- except socket.error, msg:
- if sock:
- sock.close()
- sock = None
- continue
- break
- if not sock:
- raise socket.error, msg
- sock.listen(1)
- port = sock.getsockname()[1] # Get proper port
- host = self.sock.getsockname()[0] # Get proper host
- if self.af == socket.AF_INET:
- resp = self.sendport(host, port)
- else:
- resp = self.sendeprt(host, port)
- return sock
-
- def makepasv(self):
- if self.af == socket.AF_INET:
- host, port = parse227(self.sendcmd('PASV'))
- else:
- host, port = parse229(self.sendcmd('EPSV'), self.sock.getpeername())
- return host, port
-
- def ntransfercmd(self, cmd, rest=None):
- """Initiate a transfer over the data connection.
-
- If the transfer is active, send a port command and the
- transfer command, and accept the connection. If the server is
- passive, send a pasv command, connect to it, and start the
- transfer command. Either way, return the socket for the
- connection and the expected size of the transfer. The
- expected size may be None if it could not be determined.
-
- Optional `rest' argument can be a string that is sent as the
- argument to a RESTART command. This is essentially a server
- marker used to tell the server to skip over any data up to the
- given marker.
- """
- size = None
- if self.passiveserver:
- host, port = self.makepasv()
- af, socktype, proto, canon, sa = socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM)[0]
- conn = socket.socket(af, socktype, proto)
- conn.connect(sa)
- if rest is not None:
- self.sendcmd("REST %s" % rest)
- resp = self.sendcmd(cmd)
- # Some servers apparently send a 200 reply to
- # a LIST or STOR command, before the 150 reply
- # (and way before the 226 reply). This seems to
- # be in violation of the protocol (which only allows
- # 1xx or error messages for LIST), so we just discard
- # this response.
- if resp[0] == '2':
- resp = self.getresp()
- if resp[0] != '1':
- raise error_reply, resp
- else:
- sock = self.makeport()
- if rest is not None:
- self.sendcmd("REST %s" % rest)
- resp = self.sendcmd(cmd)
- # See above.
- if resp[0] == '2':
- resp = self.getresp()
- if resp[0] != '1':
- raise error_reply, resp
- conn, sockaddr = sock.accept()
- if resp[:3] == '150':
- # this is conditional in case we received a 125
- size = parse150(resp)
- return conn, size
-
- def transfercmd(self, cmd, rest=None):
- """Like ntransfercmd() but returns only the socket."""
- return self.ntransfercmd(cmd, rest)[0]
-
- def login(self, user = '', passwd = '', acct = ''):
- '''Login, default anonymous.'''
- if not user: user = 'anonymous'
- if not passwd: passwd = ''
- if not acct: acct = ''
- if user == 'anonymous' and passwd in ('', '-'):
- # If there is no anonymous ftp password specified
- # then we'll just use anonymous@
- # We don't send any other thing because:
- # - We want to remain anonymous
- # - We want to stop SPAM
- # - We don't want to let ftp sites to discriminate by the user,
- # host or country.
- passwd = passwd + 'anonymous@'
- resp = self.sendcmd('USER ' + user)
- if resp[0] == '3': resp = self.sendcmd('PASS ' + passwd)
- if resp[0] == '3': resp = self.sendcmd('ACCT ' + acct)
- if resp[0] != '2':
- raise error_reply, resp
- return resp
-
- def retrbinary(self, cmd, callback, blocksize=8192, rest=None):
- """Retrieve data in binary mode.
-
- `cmd' is a RETR command. `callback' is a callback function is
- called for each block. No more than `blocksize' number of
- bytes will be read from the socket. Optional `rest' is passed
- to transfercmd().
-
- A new port is created for you. Return the response code.
- """
- self.voidcmd('TYPE I')
- conn = self.transfercmd(cmd, rest)
- while 1:
- data = conn.recv(blocksize)
- if not data:
- break
- callback(data)
- conn.close()
- return self.voidresp()
-
- def retrlines(self, cmd, callback = None):
- '''Retrieve data in line mode.
- The argument is a RETR or LIST command.
- The callback function (2nd argument) is called for each line,
- with trailing CRLF stripped. This creates a new port for you.
- print_line() is the default callback.'''
- if callback is None: callback = print_line
- resp = self.sendcmd('TYPE A')
- conn = self.transfercmd(cmd)
- fp = conn.makefile('rb')
- while 1:
- line = fp.readline()
- if self.debugging > 2: print '*retr*', repr(line)
- if not line:
- break
- if line[-2:] == CRLF:
- line = line[:-2]
- elif line[-1:] == '\n':
- line = line[:-1]
- callback(line)
- fp.close()
- conn.close()
- return self.voidresp()
-
- def storbinary(self, cmd, fp, blocksize=8192):
- '''Store a file in binary mode.'''
- self.voidcmd('TYPE I')
- conn = self.transfercmd(cmd)
- while 1:
- buf = fp.read(blocksize)
- if not buf: break
- conn.sendall(buf)
- conn.close()
- return self.voidresp()
-
- def storlines(self, cmd, fp):
- '''Store a file in line mode.'''
- self.voidcmd('TYPE A')
- conn = self.transfercmd(cmd)
- while 1:
- buf = fp.readline()
- if not buf: break
- if buf[-2:] != CRLF:
- if buf[-1] in CRLF: buf = buf[:-1]
- buf = buf + CRLF
- conn.sendall(buf)
- conn.close()
- return self.voidresp()
-
- def acct(self, password):
- '''Send new account name.'''
- cmd = 'ACCT ' + password
- return self.voidcmd(cmd)
-
- def nlst(self, *args):
- '''Return a list of files in a given directory (default the current).'''
- cmd = 'NLST'
- for arg in args:
- cmd = cmd + (' ' + arg)
- files = []
- self.retrlines(cmd, files.append)
- return files
-
- def dir(self, *args):
- '''List a directory in long form.
- By default list current directory to stdout.
- Optional last argument is callback function; all
- non-empty arguments before it are concatenated to the
- LIST command. (This *should* only be used for a pathname.)'''
- cmd = 'LIST'
- func = None
- if args[-1:] and type(args[-1]) != type(''):
- args, func = args[:-1], args[-1]
- for arg in args:
- if arg:
- cmd = cmd + (' ' + arg)
- self.retrlines(cmd, func)
-
- def rename(self, fromname, toname):
- '''Rename a file.'''
- resp = self.sendcmd('RNFR ' + fromname)
- if resp[0] != '3':
- raise error_reply, resp
- return self.voidcmd('RNTO ' + toname)
-
- def delete(self, filename):
- '''Delete a file.'''
- resp = self.sendcmd('DELE ' + filename)
- if resp[:3] in ('250', '200'):
- return resp
- elif resp[:1] == '5':
- raise error_perm, resp
- else:
- raise error_reply, resp
-
- def cwd(self, dirname):
- '''Change to a directory.'''
- if dirname == '..':
- try:
- return self.voidcmd('CDUP')
- except error_perm, msg:
- if msg.args[0][:3] != '500':
- raise
- elif dirname == '':
- dirname = '.' # does nothing, but could return error
- cmd = 'CWD ' + dirname
- return self.voidcmd(cmd)
-
- def size(self, filename):
- '''Retrieve the size of a file.'''
- # Note that the RFC doesn't say anything about 'SIZE'
- resp = self.sendcmd('SIZE ' + filename)
- if resp[:3] == '213':
- s = resp[3:].strip()
- try:
- return int(s)
- except (OverflowError, ValueError):
- return long(s)
-
- def mkd(self, dirname):
- '''Make a directory, return its full pathname.'''
- resp = self.sendcmd('MKD ' + dirname)
- return parse257(resp)
-
- def rmd(self, dirname):
- '''Remove a directory.'''
- return self.voidcmd('RMD ' + dirname)
-
- def pwd(self):
- '''Return current working directory.'''
- resp = self.sendcmd('PWD')
- return parse257(resp)
-
- def quit(self):
- '''Quit, and close the connection.'''
- resp = self.voidcmd('QUIT')
- self.close()
- return resp
-
- def close(self):
- '''Close the connection without assuming anything about it.'''
- if self.file:
- self.file.close()
- self.sock.close()
- self.file = self.sock = None
-
-
-_150_re = None
-
-def parse150(resp):
- '''Parse the '150' response for a RETR request.
- Returns the expected transfer size or None; size is not guaranteed to
- be present in the 150 message.
- '''
- if resp[:3] != '150':
- raise error_reply, resp
- global _150_re
- if _150_re is None:
- import re
- _150_re = re.compile("150 .* \((\d+) bytes\)", re.IGNORECASE)
- m = _150_re.match(resp)
- if not m:
- return None
- s = m.group(1)
- try:
- return int(s)
- except (OverflowError, ValueError):
- return long(s)
-
-
-_227_re = None
-
-def parse227(resp):
- '''Parse the '227' response for a PASV request.
- Raises error_proto if it does not contain '(h1,h2,h3,h4,p1,p2)'
- Return ('host.addr.as.numbers', port#) tuple.'''
-
- if resp[:3] != '227':
- raise error_reply, resp
- global _227_re
- if _227_re is None:
- import re
- _227_re = re.compile(r'(\d+),(\d+),(\d+),(\d+),(\d+),(\d+)')
- m = _227_re.search(resp)
- if not m:
- raise error_proto, resp
- numbers = m.groups()
- host = '.'.join(numbers[:4])
- port = (int(numbers[4]) << 8) + int(numbers[5])
- return host, port
-
-
-def parse229(resp, peer):
- '''Parse the '229' response for a EPSV request.
- Raises error_proto if it does not contain '(|||port|)'
- Return ('host.addr.as.numbers', port#) tuple.'''
-
- if resp[:3] != '229':
- raise error_reply, resp
- left = resp.find('(')
- if left < 0: raise error_proto, resp
- right = resp.find(')', left + 1)
- if right < 0:
- raise error_proto, resp # should contain '(|||port|)'
- if resp[left + 1] != resp[right - 1]:
- raise error_proto, resp
- parts = resp[left + 1:right].split(resp[left+1])
- if len(parts) != 5:
- raise error_proto, resp
- host = peer[0]
- port = int(parts[3])
- return host, port
-
-
-def parse257(resp):
- '''Parse the '257' response for a MKD or PWD request.
- This is a response to a MKD or PWD request: a directory name.
- Returns the directoryname in the 257 reply.'''
-
- if resp[:3] != '257':
- raise error_reply, resp
- if resp[3:5] != ' "':
- return '' # Not compliant to RFC 959, but UNIX ftpd does this
- dirname = ''
- i = 5
- n = len(resp)
- while i < n:
- c = resp[i]
- i = i+1
- if c == '"':
- if i >= n or resp[i] != '"':
- break
- i = i+1
- dirname = dirname + c
- return dirname
-
-
-def print_line(line):
- '''Default retrlines callback to print a line.'''
- print line
-
-
-def ftpcp(source, sourcename, target, targetname = '', type = 'I'):
- '''Copy file from one FTP-instance to another.'''
- if not targetname: targetname = sourcename
- type = 'TYPE ' + type
- source.voidcmd(type)
- target.voidcmd(type)
- sourcehost, sourceport = parse227(source.sendcmd('PASV'))
- target.sendport(sourcehost, sourceport)
- # RFC 959: the user must "listen" [...] BEFORE sending the
- # transfer request.
- # So: STOR before RETR, because here the target is a "user".
- treply = target.sendcmd('STOR ' + targetname)
- if treply[:3] not in ('125', '150'): raise error_proto # RFC 959
- sreply = source.sendcmd('RETR ' + sourcename)
- if sreply[:3] not in ('125', '150'): raise error_proto # RFC 959
- source.voidresp()
- target.voidresp()
-
-
-class Netrc:
- """Class to parse & provide access to 'netrc' format files.
-
- See the netrc(4) man page for information on the file format.
-
- WARNING: This class is obsolete -- use module netrc instead.
-
- """
- __defuser = None
- __defpasswd = None
- __defacct = None
-
- def __init__(self, filename=None):
- if filename is None:
- if "HOME" in os.environ:
- filename = os.path.join(os.environ["HOME"],
- ".netrc")
- else:
- raise IOError, \
- "specify file to load or set $HOME"
- self.__hosts = {}
- self.__macros = {}
- fp = open(filename, "r")
- in_macro = 0
- while 1:
- line = fp.readline()
- if not line: break
- if in_macro and line.strip():
- macro_lines.append(line)
- continue
- elif in_macro:
- self.__macros[macro_name] = tuple(macro_lines)
- in_macro = 0
- words = line.split()
- host = user = passwd = acct = None
- default = 0
- i = 0
- while i < len(words):
- w1 = words[i]
- if i+1 < len(words):
- w2 = words[i + 1]
- else:
- w2 = None
- if w1 == 'default':
- default = 1
- elif w1 == 'machine' and w2:
- host = w2.lower()
- i = i + 1
- elif w1 == 'login' and w2:
- user = w2
- i = i + 1
- elif w1 == 'password' and w2:
- passwd = w2
- i = i + 1
- elif w1 == 'account' and w2:
- acct = w2
- i = i + 1
- elif w1 == 'macdef' and w2:
- macro_name = w2
- macro_lines = []
- in_macro = 1
- break
- i = i + 1
- if default:
- self.__defuser = user or self.__defuser
- self.__defpasswd = passwd or self.__defpasswd
- self.__defacct = acct or self.__defacct
- if host:
- if host in self.__hosts:
- ouser, opasswd, oacct = \
- self.__hosts[host]
- user = user or ouser
- passwd = passwd or opasswd
- acct = acct or oacct
- self.__hosts[host] = user, passwd, acct
- fp.close()
-
- def get_hosts(self):
- """Return a list of hosts mentioned in the .netrc file."""
- return self.__hosts.keys()
-
- def get_account(self, host):
- """Returns login information for the named host.
-
- The return value is a triple containing userid,
- password, and the accounting field.
-
- """
- host = host.lower()
- user = passwd = acct = None
- if host in self.__hosts:
- user, passwd, acct = self.__hosts[host]
- user = user or self.__defuser
- passwd = passwd or self.__defpasswd
- acct = acct or self.__defacct
- return user, passwd, acct
-
- def get_macros(self):
- """Return a list of all defined macro names."""
- return self.__macros.keys()
-
- def get_macro(self, macro):
- """Return a sequence of lines which define a named macro."""
- return self.__macros[macro]
-
-
-
-def test():
- '''Test program.
- Usage: ftp [-d] [-r[file]] host [-l[dir]] [-d[dir]] [-p] [file] ...
-
- -d dir
- -l list
- -p password
- '''
-
- if len(sys.argv) < 2:
- print test.__doc__
- sys.exit(0)
-
- debugging = 0
- rcfile = None
- while sys.argv[1] == '-d':
- debugging = debugging+1
- del sys.argv[1]
- if sys.argv[1][:2] == '-r':
- # get name of alternate ~/.netrc file:
- rcfile = sys.argv[1][2:]
- del sys.argv[1]
- host = sys.argv[1]
- ftp = FTP(host)
- ftp.set_debuglevel(debugging)
- userid = passwd = acct = ''
- try:
- netrc = Netrc(rcfile)
- except IOError:
- if rcfile is not None:
- sys.stderr.write("Could not open account file"
- " -- using anonymous login.")
- else:
- try:
- userid, passwd, acct = netrc.get_account(host)
- except KeyError:
- # no account for host
- sys.stderr.write(
- "No account -- using anonymous login.")
- ftp.login(userid, passwd, acct)
- for file in sys.argv[2:]:
- if file[:2] == '-l':
- ftp.dir(file[2:])
- elif file[:2] == '-d':
- cmd = 'CWD'
- if file[2:]: cmd = cmd + ' ' + file[2:]
- resp = ftp.sendcmd(cmd)
- elif file == '-p':
- ftp.set_pasv(not ftp.passiveserver)
- else:
- ftp.retrbinary('RETR ' + file, \
- sys.stdout.write, 1024)
- ftp.quit()
-
-
-if __name__ == '__main__':
- test()
diff --git a/sys/lib/python/functools.py b/sys/lib/python/functools.py
deleted file mode 100644
index 96430365c..000000000
--- a/sys/lib/python/functools.py
+++ /dev/null
@@ -1,51 +0,0 @@
-"""functools.py - Tools for working with functions and callable objects
-"""
-# Python module wrapper for _functools C module
-# to allow utilities written in Python to be added
-# to the functools module.
-# Written by Nick Coghlan <ncoghlan at gmail.com>
-# Copyright (C) 2006 Python Software Foundation.
-# See C source code for _functools credits/copyright
-
-from _functools import partial
-
-# update_wrapper() and wraps() are tools to help write
-# wrapper functions that can handle naive introspection
-
-WRAPPER_ASSIGNMENTS = ('__module__', '__name__', '__doc__')
-WRAPPER_UPDATES = ('__dict__',)
-def update_wrapper(wrapper,
- wrapped,
- assigned = WRAPPER_ASSIGNMENTS,
- updated = WRAPPER_UPDATES):
- """Update a wrapper function to look like the wrapped function
-
- wrapper is the function to be updated
- wrapped is the original function
- assigned is a tuple naming the attributes assigned directly
- from the wrapped function to the wrapper function (defaults to
- functools.WRAPPER_ASSIGNMENTS)
- updated is a tuple naming the attributes off the wrapper that
- are updated with the corresponding attribute from the wrapped
- function (defaults to functools.WRAPPER_UPDATES)
- """
- for attr in assigned:
- setattr(wrapper, attr, getattr(wrapped, attr))
- for attr in updated:
- getattr(wrapper, attr).update(getattr(wrapped, attr, {}))
- # Return the wrapper so this can be used as a decorator via partial()
- return wrapper
-
-def wraps(wrapped,
- assigned = WRAPPER_ASSIGNMENTS,
- updated = WRAPPER_UPDATES):
- """Decorator factory to apply update_wrapper() to a wrapper function
-
- Returns a decorator that invokes update_wrapper() with the decorated
- function as the wrapper argument and the arguments to wraps() as the
- remaining arguments. Default arguments are as for update_wrapper().
- This is a convenience function to simplify applying partial() to
- update_wrapper().
- """
- return partial(update_wrapper, wrapped=wrapped,
- assigned=assigned, updated=updated)
diff --git a/sys/lib/python/getopt.py b/sys/lib/python/getopt.py
deleted file mode 100644
index 04e881ec7..000000000
--- a/sys/lib/python/getopt.py
+++ /dev/null
@@ -1,211 +0,0 @@
-# -*- coding: iso-8859-1 -*-
-"""Parser for command line options.
-
-This module helps scripts to parse the command line arguments in
-sys.argv. It supports the same conventions as the Unix getopt()
-function (including the special meanings of arguments of the form `-'
-and `--'). Long options similar to those supported by GNU software
-may be used as well via an optional third argument. This module
-provides two functions and an exception:
-
-getopt() -- Parse command line options
-gnu_getopt() -- Like getopt(), but allow option and non-option arguments
-to be intermixed.
-GetoptError -- exception (class) raised with 'opt' attribute, which is the
-option involved with the exception.
-"""
-
-# Long option support added by Lars Wirzenius <liw@iki.fi>.
-#
-# Gerrit Holl <gerrit@nl.linux.org> moved the string-based exceptions
-# to class-based exceptions.
-#
-# Peter Åstrand <astrand@lysator.liu.se> added gnu_getopt().
-#
-# TODO for gnu_getopt():
-#
-# - GNU getopt_long_only mechanism
-# - allow the caller to specify ordering
-# - RETURN_IN_ORDER option
-# - GNU extension with '-' as first character of option string
-# - optional arguments, specified by double colons
-# - a option string with a W followed by semicolon should
-# treat "-W foo" as "--foo"
-
-__all__ = ["GetoptError","error","getopt","gnu_getopt"]
-
-import os
-
-class GetoptError(Exception):
- opt = ''
- msg = ''
- def __init__(self, msg, opt=''):
- self.msg = msg
- self.opt = opt
- Exception.__init__(self, msg, opt)
-
- def __str__(self):
- return self.msg
-
-error = GetoptError # backward compatibility
-
-def getopt(args, shortopts, longopts = []):
- """getopt(args, options[, long_options]) -> opts, args
-
- Parses command line options and parameter list. args is the
- argument list to be parsed, without the leading reference to the
- running program. Typically, this means "sys.argv[1:]". shortopts
- is the string of option letters that the script wants to
- recognize, with options that require an argument followed by a
- colon (i.e., the same format that Unix getopt() uses). If
- specified, longopts is a list of strings with the names of the
- long options which should be supported. The leading '--'
- characters should not be included in the option name. Options
- which require an argument should be followed by an equal sign
- ('=').
-
- The return value consists of two elements: the first is a list of
- (option, value) pairs; the second is the list of program arguments
- left after the option list was stripped (this is a trailing slice
- of the first argument). Each option-and-value pair returned has
- the option as its first element, prefixed with a hyphen (e.g.,
- '-x'), and the option argument as its second element, or an empty
- string if the option has no argument. The options occur in the
- list in the same order in which they were found, thus allowing
- multiple occurrences. Long and short options may be mixed.
-
- """
-
- opts = []
- if type(longopts) == type(""):
- longopts = [longopts]
- else:
- longopts = list(longopts)
- while args and args[0].startswith('-') and args[0] != '-':
- if args[0] == '--':
- args = args[1:]
- break
- if args[0].startswith('--'):
- opts, args = do_longs(opts, args[0][2:], longopts, args[1:])
- else:
- opts, args = do_shorts(opts, args[0][1:], shortopts, args[1:])
-
- return opts, args
-
-def gnu_getopt(args, shortopts, longopts = []):
- """getopt(args, options[, long_options]) -> opts, args
-
- This function works like getopt(), except that GNU style scanning
- mode is used by default. This means that option and non-option
- arguments may be intermixed. The getopt() function stops
- processing options as soon as a non-option argument is
- encountered.
-
- If the first character of the option string is `+', or if the
- environment variable POSIXLY_CORRECT is set, then option
- processing stops as soon as a non-option argument is encountered.
-
- """
-
- opts = []
- prog_args = []
- if isinstance(longopts, str):
- longopts = [longopts]
- else:
- longopts = list(longopts)
-
- # Allow options after non-option arguments?
- if shortopts.startswith('+'):
- shortopts = shortopts[1:]
- all_options_first = True
- elif os.environ.get("POSIXLY_CORRECT"):
- all_options_first = True
- else:
- all_options_first = False
-
- while args:
- if args[0] == '--':
- prog_args += args[1:]
- break
-
- if args[0][:2] == '--':
- opts, args = do_longs(opts, args[0][2:], longopts, args[1:])
- elif args[0][:1] == '-':
- opts, args = do_shorts(opts, args[0][1:], shortopts, args[1:])
- else:
- if all_options_first:
- prog_args += args
- break
- else:
- prog_args.append(args[0])
- args = args[1:]
-
- return opts, prog_args
-
-def do_longs(opts, opt, longopts, args):
- try:
- i = opt.index('=')
- except ValueError:
- optarg = None
- else:
- opt, optarg = opt[:i], opt[i+1:]
-
- has_arg, opt = long_has_args(opt, longopts)
- if has_arg:
- if optarg is None:
- if not args:
- raise GetoptError('option --%s requires argument' % opt, opt)
- optarg, args = args[0], args[1:]
- elif optarg:
- raise GetoptError('option --%s must not have an argument' % opt, opt)
- opts.append(('--' + opt, optarg or ''))
- return opts, args
-
-# Return:
-# has_arg?
-# full option name
-def long_has_args(opt, longopts):
- possibilities = [o for o in longopts if o.startswith(opt)]
- if not possibilities:
- raise GetoptError('option --%s not recognized' % opt, opt)
- # Is there an exact match?
- if opt in possibilities:
- return False, opt
- elif opt + '=' in possibilities:
- return True, opt
- # No exact match, so better be unique.
- if len(possibilities) > 1:
- # XXX since possibilities contains all valid continuations, might be
- # nice to work them into the error msg
- raise GetoptError('option --%s not a unique prefix' % opt, opt)
- assert len(possibilities) == 1
- unique_match = possibilities[0]
- has_arg = unique_match.endswith('=')
- if has_arg:
- unique_match = unique_match[:-1]
- return has_arg, unique_match
-
-def do_shorts(opts, optstring, shortopts, args):
- while optstring != '':
- opt, optstring = optstring[0], optstring[1:]
- if short_has_arg(opt, shortopts):
- if optstring == '':
- if not args:
- raise GetoptError('option -%s requires argument' % opt,
- opt)
- optstring, args = args[0], args[1:]
- optarg, optstring = optstring, ''
- else:
- optarg = ''
- opts.append(('-' + opt, optarg))
- return opts, args
-
-def short_has_arg(opt, shortopts):
- for i in range(len(shortopts)):
- if opt == shortopts[i] != ':':
- return shortopts.startswith(':', i+1)
- raise GetoptError('option -%s not recognized' % opt, opt)
-
-if __name__ == '__main__':
- import sys
- print getopt(sys.argv[1:], "a:b", ["alpha=", "beta"])
diff --git a/sys/lib/python/getpass.py b/sys/lib/python/getpass.py
deleted file mode 100644
index 7cc0560c7..000000000
--- a/sys/lib/python/getpass.py
+++ /dev/null
@@ -1,127 +0,0 @@
-"""Utilities to get a password and/or the current user name.
-
-getpass(prompt) - prompt for a password, with echo turned off
-getuser() - get the user name from the environment or password database
-
-On Windows, the msvcrt module will be used.
-On the Mac EasyDialogs.AskPassword is used, if available.
-
-"""
-
-# Authors: Piers Lauder (original)
-# Guido van Rossum (Windows support and cleanup)
-
-import sys
-
-__all__ = ["getpass","getuser"]
-
-def unix_getpass(prompt='Password: ', stream=None):
- """Prompt for a password, with echo turned off.
- The prompt is written on stream, by default stdout.
-
- Restore terminal settings at end.
- """
- if stream is None:
- stream = sys.stdout
-
- try:
- fd = sys.stdin.fileno()
- except:
- return default_getpass(prompt)
-
- old = termios.tcgetattr(fd) # a copy to save
- new = old[:]
-
- new[3] = new[3] & ~termios.ECHO # 3 == 'lflags'
- try:
- termios.tcsetattr(fd, termios.TCSADRAIN, new)
- passwd = _raw_input(prompt, stream)
- finally:
- termios.tcsetattr(fd, termios.TCSADRAIN, old)
-
- stream.write('\n')
- return passwd
-
-
-def win_getpass(prompt='Password: ', stream=None):
- """Prompt for password with echo off, using Windows getch()."""
- if sys.stdin is not sys.__stdin__:
- return default_getpass(prompt, stream)
- import msvcrt
- for c in prompt:
- msvcrt.putch(c)
- pw = ""
- while 1:
- c = msvcrt.getch()
- if c == '\r' or c == '\n':
- break
- if c == '\003':
- raise KeyboardInterrupt
- if c == '\b':
- pw = pw[:-1]
- else:
- pw = pw + c
- msvcrt.putch('\r')
- msvcrt.putch('\n')
- return pw
-
-def default_getpass(prompt='Password: ', stream=None):
- try:
- ctl = open("/dev/consctl", "w")
- ctl.write("rawon")
- ctl.flush()
- buf = _raw_input(prompt, stream)
- ctl.write("rawoff")
- ctl.flush()
- ctl.close()
- return buf;
- except:
- buf = _raw_input(prompt, stream)
- return buf
-
-def _raw_input(prompt="", stream=None):
- # A raw_input() replacement that doesn't save the string in the
- # GNU readline history.
- if stream is None:
- stream = sys.stdout
- prompt = str(prompt)
- if prompt:
- stream.write(prompt)
- stream.flush()
- line = sys.stdin.readline()
- if not line:
- raise EOFError
- if line[-1] == '\n':
- line = line[:-1]
- return line
-
-
-def getuser():
- """Get the username from the environment or password database.
-
- First try various environment variables, then the password
- database. This works on Windows as long as USERNAME is set.
-
- """
-
- import os
-
- for name in ('LOGNAME', 'USER', 'LNAME', 'USERNAME'):
- user = os.environ.get(name)
- if user:
- return user
-
- # If this fails, the exception will "explain" why
- import pwd
- return pwd.getpwuid(os.getuid())[0]
-
-# Bind the name getpass to the appropriate function
-try:
- import termios
- # it's possible there is an incompatible termios from the
- # McMillan Installer, make sure we have a UNIX-compatible termios
- termios.tcgetattr, termios.tcsetattr
-except (ImportError, AttributeError):
- getpass = default_getpass
-else:
- getpass = unix_getpass
diff --git a/sys/lib/python/gettext.py b/sys/lib/python/gettext.py
deleted file mode 100644
index 90ebc5180..000000000
--- a/sys/lib/python/gettext.py
+++ /dev/null
@@ -1,591 +0,0 @@
-"""Internationalization and localization support.
-
-This module provides internationalization (I18N) and localization (L10N)
-support for your Python programs by providing an interface to the GNU gettext
-message catalog library.
-
-I18N refers to the operation by which a program is made aware of multiple
-languages. L10N refers to the adaptation of your program, once
-internationalized, to the local language and cultural habits.
-
-"""
-
-# This module represents the integration of work, contributions, feedback, and
-# suggestions from the following people:
-#
-# Martin von Loewis, who wrote the initial implementation of the underlying
-# C-based libintlmodule (later renamed _gettext), along with a skeletal
-# gettext.py implementation.
-#
-# Peter Funk, who wrote fintl.py, a fairly complete wrapper around intlmodule,
-# which also included a pure-Python implementation to read .mo files if
-# intlmodule wasn't available.
-#
-# James Henstridge, who also wrote a gettext.py module, which has some
-# interesting, but currently unsupported experimental features: the notion of
-# a Catalog class and instances, and the ability to add to a catalog file via
-# a Python API.
-#
-# Barry Warsaw integrated these modules, wrote the .install() API and code,
-# and conformed all C and Python code to Python's coding standards.
-#
-# Francois Pinard and Marc-Andre Lemburg also contributed valuably to this
-# module.
-#
-# J. David Ibanez implemented plural forms. Bruno Haible fixed some bugs.
-#
-# TODO:
-# - Lazy loading of .mo files. Currently the entire catalog is loaded into
-# memory, but that's probably bad for large translated programs. Instead,
-# the lexical sort of original strings in GNU .mo files should be exploited
-# to do binary searches and lazy initializations. Or you might want to use
-# the undocumented double-hash algorithm for .mo files with hash tables, but
-# you'll need to study the GNU gettext code to do this.
-#
-# - Support Solaris .mo file formats. Unfortunately, we've been unable to
-# find this format documented anywhere.
-
-
-import locale, copy, os, re, struct, sys
-from errno import ENOENT
-
-
-__all__ = ['NullTranslations', 'GNUTranslations', 'Catalog',
- 'find', 'translation', 'install', 'textdomain', 'bindtextdomain',
- 'dgettext', 'dngettext', 'gettext', 'ngettext',
- ]
-
-_default_localedir = os.path.join(sys.prefix, 'share', 'locale')
-
-
-def test(condition, true, false):
- """
- Implements the C expression:
-
- condition ? true : false
-
- Required to correctly interpret plural forms.
- """
- if condition:
- return true
- else:
- return false
-
-
-def c2py(plural):
- """Gets a C expression as used in PO files for plural forms and returns a
- Python lambda function that implements an equivalent expression.
- """
- # Security check, allow only the "n" identifier
- try:
- from cStringIO import StringIO
- except ImportError:
- from StringIO import StringIO
- import token, tokenize
- tokens = tokenize.generate_tokens(StringIO(plural).readline)
- try:
- danger = [x for x in tokens if x[0] == token.NAME and x[1] != 'n']
- except tokenize.TokenError:
- raise ValueError, \
- 'plural forms expression error, maybe unbalanced parenthesis'
- else:
- if danger:
- raise ValueError, 'plural forms expression could be dangerous'
-
- # Replace some C operators by their Python equivalents
- plural = plural.replace('&&', ' and ')
- plural = plural.replace('||', ' or ')
-
- expr = re.compile(r'\!([^=])')
- plural = expr.sub(' not \\1', plural)
-
- # Regular expression and replacement function used to transform
- # "a?b:c" to "test(a,b,c)".
- expr = re.compile(r'(.*?)\?(.*?):(.*)')
- def repl(x):
- return "test(%s, %s, %s)" % (x.group(1), x.group(2),
- expr.sub(repl, x.group(3)))
-
- # Code to transform the plural expression, taking care of parentheses
- stack = ['']
- for c in plural:
- if c == '(':
- stack.append('')
- elif c == ')':
- if len(stack) == 1:
- # Actually, we never reach this code, because unbalanced
- # parentheses get caught in the security check at the
- # beginning.
- raise ValueError, 'unbalanced parenthesis in plural form'
- s = expr.sub(repl, stack.pop())
- stack[-1] += '(%s)' % s
- else:
- stack[-1] += c
- plural = expr.sub(repl, stack.pop())
-
- return eval('lambda n: int(%s)' % plural)
-
-
-
-def _expand_lang(locale):
- from locale import normalize
- locale = normalize(locale)
- COMPONENT_CODESET = 1 << 0
- COMPONENT_TERRITORY = 1 << 1
- COMPONENT_MODIFIER = 1 << 2
- # split up the locale into its base components
- mask = 0
- pos = locale.find('@')
- if pos >= 0:
- modifier = locale[pos:]
- locale = locale[:pos]
- mask |= COMPONENT_MODIFIER
- else:
- modifier = ''
- pos = locale.find('.')
- if pos >= 0:
- codeset = locale[pos:]
- locale = locale[:pos]
- mask |= COMPONENT_CODESET
- else:
- codeset = ''
- pos = locale.find('_')
- if pos >= 0:
- territory = locale[pos:]
- locale = locale[:pos]
- mask |= COMPONENT_TERRITORY
- else:
- territory = ''
- language = locale
- ret = []
- for i in range(mask+1):
- if not (i & ~mask): # if all components for this combo exist ...
- val = language
- if i & COMPONENT_TERRITORY: val += territory
- if i & COMPONENT_CODESET: val += codeset
- if i & COMPONENT_MODIFIER: val += modifier
- ret.append(val)
- ret.reverse()
- return ret
-
-
-
-class NullTranslations:
- def __init__(self, fp=None):
- self._info = {}
- self._charset = None
- self._output_charset = None
- self._fallback = None
- if fp is not None:
- self._parse(fp)
-
- def _parse(self, fp):
- pass
-
- def add_fallback(self, fallback):
- if self._fallback:
- self._fallback.add_fallback(fallback)
- else:
- self._fallback = fallback
-
- def gettext(self, message):
- if self._fallback:
- return self._fallback.gettext(message)
- return message
-
- def lgettext(self, message):
- if self._fallback:
- return self._fallback.lgettext(message)
- return message
-
- def ngettext(self, msgid1, msgid2, n):
- if self._fallback:
- return self._fallback.ngettext(msgid1, msgid2, n)
- if n == 1:
- return msgid1
- else:
- return msgid2
-
- def lngettext(self, msgid1, msgid2, n):
- if self._fallback:
- return self._fallback.lngettext(msgid1, msgid2, n)
- if n == 1:
- return msgid1
- else:
- return msgid2
-
- def ugettext(self, message):
- if self._fallback:
- return self._fallback.ugettext(message)
- return unicode(message)
-
- def ungettext(self, msgid1, msgid2, n):
- if self._fallback:
- return self._fallback.ungettext(msgid1, msgid2, n)
- if n == 1:
- return unicode(msgid1)
- else:
- return unicode(msgid2)
-
- def info(self):
- return self._info
-
- def charset(self):
- return self._charset
-
- def output_charset(self):
- return self._output_charset
-
- def set_output_charset(self, charset):
- self._output_charset = charset
-
- def install(self, unicode=False, names=None):
- import __builtin__
- __builtin__.__dict__['_'] = unicode and self.ugettext or self.gettext
- if hasattr(names, "__contains__"):
- if "gettext" in names:
- __builtin__.__dict__['gettext'] = __builtin__.__dict__['_']
- if "ngettext" in names:
- __builtin__.__dict__['ngettext'] = (unicode and self.ungettext
- or self.ngettext)
- if "lgettext" in names:
- __builtin__.__dict__['lgettext'] = self.lgettext
- if "lngettext" in names:
- __builtin__.__dict__['lngettext'] = self.lngettext
-
-
-class GNUTranslations(NullTranslations):
- # Magic number of .mo files
- LE_MAGIC = 0x950412deL
- BE_MAGIC = 0xde120495L
-
- def _parse(self, fp):
- """Override this method to support alternative .mo formats."""
- unpack = struct.unpack
- filename = getattr(fp, 'name', '')
- # Parse the .mo file header, which consists of 5 little endian 32
- # bit words.
- self._catalog = catalog = {}
- self.plural = lambda n: int(n != 1) # germanic plural by default
- buf = fp.read()
- buflen = len(buf)
- # Are we big endian or little endian?
- magic = unpack('<I', buf[:4])[0]
- if magic == self.LE_MAGIC:
- version, msgcount, masteridx, transidx = unpack('<4I', buf[4:20])
- ii = '<II'
- elif magic == self.BE_MAGIC:
- version, msgcount, masteridx, transidx = unpack('>4I', buf[4:20])
- ii = '>II'
- else:
- raise IOError(0, 'Bad magic number', filename)
- # Now put all messages from the .mo file buffer into the catalog
- # dictionary.
- for i in xrange(0, msgcount):
- mlen, moff = unpack(ii, buf[masteridx:masteridx+8])
- mend = moff + mlen
- tlen, toff = unpack(ii, buf[transidx:transidx+8])
- tend = toff + tlen
- if mend < buflen and tend < buflen:
- msg = buf[moff:mend]
- tmsg = buf[toff:tend]
- else:
- raise IOError(0, 'File is corrupt', filename)
- # See if we're looking at GNU .mo conventions for metadata
- if mlen == 0:
- # Catalog description
- lastk = k = None
- for item in tmsg.splitlines():
- item = item.strip()
- if not item:
- continue
- if ':' in item:
- k, v = item.split(':', 1)
- k = k.strip().lower()
- v = v.strip()
- self._info[k] = v
- lastk = k
- elif lastk:
- self._info[lastk] += '\n' + item
- if k == 'content-type':
- self._charset = v.split('charset=')[1]
- elif k == 'plural-forms':
- v = v.split(';')
- plural = v[1].split('plural=')[1]
- self.plural = c2py(plural)
- # Note: we unconditionally convert both msgids and msgstrs to
- # Unicode using the character encoding specified in the charset
- # parameter of the Content-Type header. The gettext documentation
- # strongly encourages msgids to be us-ascii, but some appliations
- # require alternative encodings (e.g. Zope's ZCML and ZPT). For
- # traditional gettext applications, the msgid conversion will
- # cause no problems since us-ascii should always be a subset of
- # the charset encoding. We may want to fall back to 8-bit msgids
- # if the Unicode conversion fails.
- if '\x00' in msg:
- # Plural forms
- msgid1, msgid2 = msg.split('\x00')
- tmsg = tmsg.split('\x00')
- if self._charset:
- msgid1 = unicode(msgid1, self._charset)
- tmsg = [unicode(x, self._charset) for x in tmsg]
- for i in range(len(tmsg)):
- catalog[(msgid1, i)] = tmsg[i]
- else:
- if self._charset:
- msg = unicode(msg, self._charset)
- tmsg = unicode(tmsg, self._charset)
- catalog[msg] = tmsg
- # advance to next entry in the seek tables
- masteridx += 8
- transidx += 8
-
- def gettext(self, message):
- missing = object()
- tmsg = self._catalog.get(message, missing)
- if tmsg is missing:
- if self._fallback:
- return self._fallback.gettext(message)
- return message
- # Encode the Unicode tmsg back to an 8-bit string, if possible
- if self._output_charset:
- return tmsg.encode(self._output_charset)
- elif self._charset:
- return tmsg.encode(self._charset)
- return tmsg
-
- def lgettext(self, message):
- missing = object()
- tmsg = self._catalog.get(message, missing)
- if tmsg is missing:
- if self._fallback:
- return self._fallback.lgettext(message)
- return message
- if self._output_charset:
- return tmsg.encode(self._output_charset)
- return tmsg.encode(locale.getpreferredencoding())
-
- def ngettext(self, msgid1, msgid2, n):
- try:
- tmsg = self._catalog[(msgid1, self.plural(n))]
- if self._output_charset:
- return tmsg.encode(self._output_charset)
- elif self._charset:
- return tmsg.encode(self._charset)
- return tmsg
- except KeyError:
- if self._fallback:
- return self._fallback.ngettext(msgid1, msgid2, n)
- if n == 1:
- return msgid1
- else:
- return msgid2
-
- def lngettext(self, msgid1, msgid2, n):
- try:
- tmsg = self._catalog[(msgid1, self.plural(n))]
- if self._output_charset:
- return tmsg.encode(self._output_charset)
- return tmsg.encode(locale.getpreferredencoding())
- except KeyError:
- if self._fallback:
- return self._fallback.lngettext(msgid1, msgid2, n)
- if n == 1:
- return msgid1
- else:
- return msgid2
-
- def ugettext(self, message):
- missing = object()
- tmsg = self._catalog.get(message, missing)
- if tmsg is missing:
- if self._fallback:
- return self._fallback.ugettext(message)
- return unicode(message)
- return tmsg
-
- def ungettext(self, msgid1, msgid2, n):
- try:
- tmsg = self._catalog[(msgid1, self.plural(n))]
- except KeyError:
- if self._fallback:
- return self._fallback.ungettext(msgid1, msgid2, n)
- if n == 1:
- tmsg = unicode(msgid1)
- else:
- tmsg = unicode(msgid2)
- return tmsg
-
-
-# Locate a .mo file using the gettext strategy
-def find(domain, localedir=None, languages=None, all=0):
- # Get some reasonable defaults for arguments that were not supplied
- if localedir is None:
- localedir = _default_localedir
- if languages is None:
- languages = []
- for envar in ('LANGUAGE', 'LC_ALL', 'LC_MESSAGES', 'LANG'):
- val = os.environ.get(envar)
- if val:
- languages = val.split(':')
- break
- if 'C' not in languages:
- languages.append('C')
- # now normalize and expand the languages
- nelangs = []
- for lang in languages:
- for nelang in _expand_lang(lang):
- if nelang not in nelangs:
- nelangs.append(nelang)
- # select a language
- if all:
- result = []
- else:
- result = None
- for lang in nelangs:
- if lang == 'C':
- break
- mofile = os.path.join(localedir, lang, 'LC_MESSAGES', '%s.mo' % domain)
- if os.path.exists(mofile):
- if all:
- result.append(mofile)
- else:
- return mofile
- return result
-
-
-
-# a mapping between absolute .mo file path and Translation object
-_translations = {}
-
-def translation(domain, localedir=None, languages=None,
- class_=None, fallback=False, codeset=None):
- if class_ is None:
- class_ = GNUTranslations
- mofiles = find(domain, localedir, languages, all=1)
- if not mofiles:
- if fallback:
- return NullTranslations()
- raise IOError(ENOENT, 'No translation file found for domain', domain)
- # TBD: do we need to worry about the file pointer getting collected?
- # Avoid opening, reading, and parsing the .mo file after it's been done
- # once.
- result = None
- for mofile in mofiles:
- key = os.path.abspath(mofile)
- t = _translations.get(key)
- if t is None:
- t = _translations.setdefault(key, class_(open(mofile, 'rb')))
- # Copy the translation object to allow setting fallbacks and
- # output charset. All other instance data is shared with the
- # cached object.
- t = copy.copy(t)
- if codeset:
- t.set_output_charset(codeset)
- if result is None:
- result = t
- else:
- result.add_fallback(t)
- return result
-
-
-def install(domain, localedir=None, unicode=False, codeset=None, names=None):
- t = translation(domain, localedir, fallback=True, codeset=codeset)
- t.install(unicode, names)
-
-
-
-# a mapping b/w domains and locale directories
-_localedirs = {}
-# a mapping b/w domains and codesets
-_localecodesets = {}
-# current global domain, `messages' used for compatibility w/ GNU gettext
-_current_domain = 'messages'
-
-
-def textdomain(domain=None):
- global _current_domain
- if domain is not None:
- _current_domain = domain
- return _current_domain
-
-
-def bindtextdomain(domain, localedir=None):
- global _localedirs
- if localedir is not None:
- _localedirs[domain] = localedir
- return _localedirs.get(domain, _default_localedir)
-
-
-def bind_textdomain_codeset(domain, codeset=None):
- global _localecodesets
- if codeset is not None:
- _localecodesets[domain] = codeset
- return _localecodesets.get(domain)
-
-
-def dgettext(domain, message):
- try:
- t = translation(domain, _localedirs.get(domain, None),
- codeset=_localecodesets.get(domain))
- except IOError:
- return message
- return t.gettext(message)
-
-def ldgettext(domain, message):
- try:
- t = translation(domain, _localedirs.get(domain, None),
- codeset=_localecodesets.get(domain))
- except IOError:
- return message
- return t.lgettext(message)
-
-def dngettext(domain, msgid1, msgid2, n):
- try:
- t = translation(domain, _localedirs.get(domain, None),
- codeset=_localecodesets.get(domain))
- except IOError:
- if n == 1:
- return msgid1
- else:
- return msgid2
- return t.ngettext(msgid1, msgid2, n)
-
-def ldngettext(domain, msgid1, msgid2, n):
- try:
- t = translation(domain, _localedirs.get(domain, None),
- codeset=_localecodesets.get(domain))
- except IOError:
- if n == 1:
- return msgid1
- else:
- return msgid2
- return t.lngettext(msgid1, msgid2, n)
-
-def gettext(message):
- return dgettext(_current_domain, message)
-
-def lgettext(message):
- return ldgettext(_current_domain, message)
-
-def ngettext(msgid1, msgid2, n):
- return dngettext(_current_domain, msgid1, msgid2, n)
-
-def lngettext(msgid1, msgid2, n):
- return ldngettext(_current_domain, msgid1, msgid2, n)
-
-# dcgettext() has been deemed unnecessary and is not implemented.
-
-# James Henstridge's Catalog constructor from GNOME gettext. Documented usage
-# was:
-#
-# import gettext
-# cat = gettext.Catalog(PACKAGE, localedir=LOCALEDIR)
-# _ = cat.gettext
-# print _('Hello World')
-
-# The resulting catalog object currently don't support access through a
-# dictionary API, which was supported (but apparently unused) in GNOME
-# gettext.
-
-Catalog = translation
diff --git a/sys/lib/python/glob.py b/sys/lib/python/glob.py
deleted file mode 100644
index 95656cc1a..000000000
--- a/sys/lib/python/glob.py
+++ /dev/null
@@ -1,74 +0,0 @@
-"""Filename globbing utility."""
-
-import os
-import fnmatch
-import re
-
-__all__ = ["glob", "iglob"]
-
-def glob(pathname):
- """Return a list of paths matching a pathname pattern.
-
- The pattern may contain simple shell-style wildcards a la fnmatch.
-
- """
- return list(iglob(pathname))
-
-def iglob(pathname):
- """Return a list of paths matching a pathname pattern.
-
- The pattern may contain simple shell-style wildcards a la fnmatch.
-
- """
- if not has_magic(pathname):
- if os.path.lexists(pathname):
- yield pathname
- return
- dirname, basename = os.path.split(pathname)
- if not dirname:
- for name in glob1(os.curdir, basename):
- yield name
- return
- if has_magic(dirname):
- dirs = iglob(dirname)
- else:
- dirs = [dirname]
- if has_magic(basename):
- glob_in_dir = glob1
- else:
- glob_in_dir = glob0
- for dirname in dirs:
- for name in glob_in_dir(dirname, basename):
- yield os.path.join(dirname, name)
-
-# These 2 helper functions non-recursively glob inside a literal directory.
-# They return a list of basenames. `glob1` accepts a pattern while `glob0`
-# takes a literal basename (so it only has to check for its existence).
-
-def glob1(dirname, pattern):
- if not dirname:
- dirname = os.curdir
- try:
- names = os.listdir(dirname)
- except os.error:
- return []
- if pattern[0]!='.':
- names=filter(lambda x: x[0]!='.',names)
- return fnmatch.filter(names,pattern)
-
-def glob0(dirname, basename):
- if basename == '':
- # `os.path.split()` returns an empty basename for paths ending with a
- # directory separator. 'q*x/' should match only directories.
- if os.path.isdir(dirname):
- return [basename]
- else:
- if os.path.lexists(os.path.join(dirname, basename)):
- return [basename]
- return []
-
-
-magic_check = re.compile('[*?[]')
-
-def has_magic(s):
- return magic_check.search(s) is not None
diff --git a/sys/lib/python/gopherlib.py b/sys/lib/python/gopherlib.py
deleted file mode 100644
index d789161e6..000000000
--- a/sys/lib/python/gopherlib.py
+++ /dev/null
@@ -1,209 +0,0 @@
-"""Gopher protocol client interface."""
-
-__all__ = ["send_selector","send_query"]
-
-import warnings
-warnings.warn("the gopherlib module is deprecated", DeprecationWarning,
- stacklevel=2)
-
-# Default selector, host and port
-DEF_SELECTOR = '1/'
-DEF_HOST = 'gopher.micro.umn.edu'
-DEF_PORT = 70
-
-# Recognized file types
-A_TEXT = '0'
-A_MENU = '1'
-A_CSO = '2'
-A_ERROR = '3'
-A_MACBINHEX = '4'
-A_PCBINHEX = '5'
-A_UUENCODED = '6'
-A_INDEX = '7'
-A_TELNET = '8'
-A_BINARY = '9'
-A_DUPLICATE = '+'
-A_SOUND = 's'
-A_EVENT = 'e'
-A_CALENDAR = 'c'
-A_HTML = 'h'
-A_TN3270 = 'T'
-A_MIME = 'M'
-A_IMAGE = 'I'
-A_WHOIS = 'w'
-A_QUERY = 'q'
-A_GIF = 'g'
-A_HTML = 'h' # HTML file
-A_WWW = 'w' # WWW address
-A_PLUS_IMAGE = ':'
-A_PLUS_MOVIE = ';'
-A_PLUS_SOUND = '<'
-
-
-_names = dir()
-_type_to_name_map = {}
-def type_to_name(gtype):
- """Map all file types to strings; unknown types become TYPE='x'."""
- global _type_to_name_map
- if _type_to_name_map=={}:
- for name in _names:
- if name[:2] == 'A_':
- _type_to_name_map[eval(name)] = name[2:]
- if gtype in _type_to_name_map:
- return _type_to_name_map[gtype]
- return 'TYPE=%r' % (gtype,)
-
-# Names for characters and strings
-CRLF = '\r\n'
-TAB = '\t'
-
-def send_selector(selector, host, port = 0):
- """Send a selector to a given host and port, return a file with the reply."""
- import socket
- if not port:
- i = host.find(':')
- if i >= 0:
- host, port = host[:i], int(host[i+1:])
- if not port:
- port = DEF_PORT
- elif type(port) == type(''):
- port = int(port)
- s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- s.connect((host, port))
- s.sendall(selector + CRLF)
- s.shutdown(1)
- return s.makefile('rb')
-
-def send_query(selector, query, host, port = 0):
- """Send a selector and a query string."""
- return send_selector(selector + '\t' + query, host, port)
-
-def path_to_selector(path):
- """Takes a path as returned by urlparse and returns the appropriate selector."""
- if path=="/":
- return "/"
- else:
- return path[2:] # Cuts initial slash and data type identifier
-
-def path_to_datatype_name(path):
- """Takes a path as returned by urlparse and maps it to a string.
- See section 3.4 of RFC 1738 for details."""
- if path=="/":
- # No way to tell, although "INDEX" is likely
- return "TYPE='unknown'"
- else:
- return type_to_name(path[1])
-
-# The following functions interpret the data returned by the gopher
-# server according to the expected type, e.g. textfile or directory
-
-def get_directory(f):
- """Get a directory in the form of a list of entries."""
- entries = []
- while 1:
- line = f.readline()
- if not line:
- print '(Unexpected EOF from server)'
- break
- if line[-2:] == CRLF:
- line = line[:-2]
- elif line[-1:] in CRLF:
- line = line[:-1]
- if line == '.':
- break
- if not line:
- print '(Empty line from server)'
- continue
- gtype = line[0]
- parts = line[1:].split(TAB)
- if len(parts) < 4:
- print '(Bad line from server: %r)' % (line,)
- continue
- if len(parts) > 4:
- if parts[4:] != ['+']:
- print '(Extra info from server:',
- print parts[4:], ')'
- else:
- parts.append('')
- parts.insert(0, gtype)
- entries.append(parts)
- return entries
-
-def get_textfile(f):
- """Get a text file as a list of lines, with trailing CRLF stripped."""
- lines = []
- get_alt_textfile(f, lines.append)
- return lines
-
-def get_alt_textfile(f, func):
- """Get a text file and pass each line to a function, with trailing CRLF stripped."""
- while 1:
- line = f.readline()
- if not line:
- print '(Unexpected EOF from server)'
- break
- if line[-2:] == CRLF:
- line = line[:-2]
- elif line[-1:] in CRLF:
- line = line[:-1]
- if line == '.':
- break
- if line[:2] == '..':
- line = line[1:]
- func(line)
-
-def get_binary(f):
- """Get a binary file as one solid data block."""
- data = f.read()
- return data
-
-def get_alt_binary(f, func, blocksize):
- """Get a binary file and pass each block to a function."""
- while 1:
- data = f.read(blocksize)
- if not data:
- break
- func(data)
-
-def test():
- """Trivial test program."""
- import sys
- import getopt
- opts, args = getopt.getopt(sys.argv[1:], '')
- selector = DEF_SELECTOR
- type = selector[0]
- host = DEF_HOST
- if args:
- host = args[0]
- args = args[1:]
- if args:
- type = args[0]
- args = args[1:]
- if len(type) > 1:
- type, selector = type[0], type
- else:
- selector = ''
- if args:
- selector = args[0]
- args = args[1:]
- query = ''
- if args:
- query = args[0]
- args = args[1:]
- if type == A_INDEX:
- f = send_query(selector, query, host)
- else:
- f = send_selector(selector, host)
- if type == A_TEXT:
- lines = get_textfile(f)
- for item in lines: print item
- elif type in (A_MENU, A_INDEX):
- entries = get_directory(f)
- for item in entries: print item
- else:
- data = get_binary(f)
- print 'binary data:', len(data), 'bytes:', repr(data[:100])[:40]
-
-# Run the test when run as script
-if __name__ == '__main__':
- test()
diff --git a/sys/lib/python/gzip.py b/sys/lib/python/gzip.py
deleted file mode 100644
index 0bf29e86b..000000000
--- a/sys/lib/python/gzip.py
+++ /dev/null
@@ -1,490 +0,0 @@
-"""Functions that read and write gzipped files.
-
-The user of the file doesn't have to worry about the compression,
-but random access is not allowed."""
-
-# based on Andrew Kuchling's minigzip.py distributed with the zlib module
-
-import struct, sys, time
-import zlib
-import __builtin__
-
-__all__ = ["GzipFile","open"]
-
-FTEXT, FHCRC, FEXTRA, FNAME, FCOMMENT = 1, 2, 4, 8, 16
-
-READ, WRITE = 1, 2
-
-def U32(i):
- """Return i as an unsigned integer, assuming it fits in 32 bits.
-
- If it's >= 2GB when viewed as a 32-bit unsigned int, return a long.
- """
- if i < 0:
- i += 1L << 32
- return i
-
-def LOWU32(i):
- """Return the low-order 32 bits of an int, as a non-negative int."""
- return i & 0xFFFFFFFFL
-
-def write32(output, value):
- output.write(struct.pack("<l", value))
-
-def write32u(output, value):
- # The L format writes the bit pattern correctly whether signed
- # or unsigned.
- output.write(struct.pack("<L", value))
-
-def read32(input):
- return struct.unpack("<l", input.read(4))[0]
-
-def open(filename, mode="rb", compresslevel=9):
- """Shorthand for GzipFile(filename, mode, compresslevel).
-
- The filename argument is required; mode defaults to 'rb'
- and compresslevel defaults to 9.
-
- """
- return GzipFile(filename, mode, compresslevel)
-
-class GzipFile:
- """The GzipFile class simulates most of the methods of a file object with
- the exception of the readinto() and truncate() methods.
-
- """
-
- myfileobj = None
- max_read_chunk = 10 * 1024 * 1024 # 10Mb
-
- def __init__(self, filename=None, mode=None,
- compresslevel=9, fileobj=None):
- """Constructor for the GzipFile class.
-
- At least one of fileobj and filename must be given a
- non-trivial value.
-
- The new class instance is based on fileobj, which can be a regular
- file, a StringIO object, or any other object which simulates a file.
- It defaults to None, in which case filename is opened to provide
- a file object.
-
- When fileobj is not None, the filename argument is only used to be
- included in the gzip file header, which may includes the original
- filename of the uncompressed file. It defaults to the filename of
- fileobj, if discernible; otherwise, it defaults to the empty string,
- and in this case the original filename is not included in the header.
-
- The mode argument can be any of 'r', 'rb', 'a', 'ab', 'w', or 'wb',
- depending on whether the file will be read or written. The default
- is the mode of fileobj if discernible; otherwise, the default is 'rb'.
- Be aware that only the 'rb', 'ab', and 'wb' values should be used
- for cross-platform portability.
-
- The compresslevel argument is an integer from 1 to 9 controlling the
- level of compression; 1 is fastest and produces the least compression,
- and 9 is slowest and produces the most compression. The default is 9.
-
- """
-
- # guarantee the file is opened in binary mode on platforms
- # that care about that sort of thing
- if mode and 'b' not in mode:
- mode += 'b'
- if fileobj is None:
- fileobj = self.myfileobj = __builtin__.open(filename, mode or 'rb')
- if filename is None:
- if hasattr(fileobj, 'name'): filename = fileobj.name
- else: filename = ''
- if mode is None:
- if hasattr(fileobj, 'mode'): mode = fileobj.mode
- else: mode = 'rb'
-
- if mode[0:1] == 'r':
- self.mode = READ
- # Set flag indicating start of a new member
- self._new_member = True
- self.extrabuf = ""
- self.extrasize = 0
- self.filename = filename
- # Starts small, scales exponentially
- self.min_readsize = 100
-
- elif mode[0:1] == 'w' or mode[0:1] == 'a':
- self.mode = WRITE
- self._init_write(filename)
- self.compress = zlib.compressobj(compresslevel,
- zlib.DEFLATED,
- -zlib.MAX_WBITS,
- zlib.DEF_MEM_LEVEL,
- 0)
- else:
- raise IOError, "Mode " + mode + " not supported"
-
- self.fileobj = fileobj
- self.offset = 0
-
- if self.mode == WRITE:
- self._write_gzip_header()
-
- def __repr__(self):
- s = repr(self.fileobj)
- return '<gzip ' + s[1:-1] + ' ' + hex(id(self)) + '>'
-
- def _init_write(self, filename):
- if filename[-3:] != '.gz':
- filename = filename + '.gz'
- self.filename = filename
- self.crc = zlib.crc32("")
- self.size = 0
- self.writebuf = []
- self.bufsize = 0
-
- def _write_gzip_header(self):
- self.fileobj.write('\037\213') # magic header
- self.fileobj.write('\010') # compression method
- fname = self.filename[:-3]
- flags = 0
- if fname:
- flags = FNAME
- self.fileobj.write(chr(flags))
- write32u(self.fileobj, long(time.time()))
- self.fileobj.write('\002')
- self.fileobj.write('\377')
- if fname:
- self.fileobj.write(fname + '\000')
-
- def _init_read(self):
- self.crc = zlib.crc32("")
- self.size = 0
-
- def _read_gzip_header(self):
- magic = self.fileobj.read(2)
- if magic != '\037\213':
- raise IOError, 'Not a gzipped file'
- method = ord( self.fileobj.read(1) )
- if method != 8:
- raise IOError, 'Unknown compression method'
- flag = ord( self.fileobj.read(1) )
- # modtime = self.fileobj.read(4)
- # extraflag = self.fileobj.read(1)
- # os = self.fileobj.read(1)
- self.fileobj.read(6)
-
- if flag & FEXTRA:
- # Read & discard the extra field, if present
- xlen = ord(self.fileobj.read(1))
- xlen = xlen + 256*ord(self.fileobj.read(1))
- self.fileobj.read(xlen)
- if flag & FNAME:
- # Read and discard a null-terminated string containing the filename
- while True:
- s = self.fileobj.read(1)
- if not s or s=='\000':
- break
- if flag & FCOMMENT:
- # Read and discard a null-terminated string containing a comment
- while True:
- s = self.fileobj.read(1)
- if not s or s=='\000':
- break
- if flag & FHCRC:
- self.fileobj.read(2) # Read & discard the 16-bit header CRC
-
-
- def write(self,data):
- if self.mode != WRITE:
- import errno
- raise IOError(errno.EBADF, "write() on read-only GzipFile object")
-
- if self.fileobj is None:
- raise ValueError, "write() on closed GzipFile object"
- if len(data) > 0:
- self.size = self.size + len(data)
- self.crc = zlib.crc32(data, self.crc)
- self.fileobj.write( self.compress.compress(data) )
- self.offset += len(data)
-
- def read(self, size=-1):
- if self.mode != READ:
- import errno
- raise IOError(errno.EBADF, "read() on write-only GzipFile object")
-
- if self.extrasize <= 0 and self.fileobj is None:
- return ''
-
- readsize = 1024
- if size < 0: # get the whole thing
- try:
- while True:
- self._read(readsize)
- readsize = min(self.max_read_chunk, readsize * 2)
- except EOFError:
- size = self.extrasize
- else: # just get some more of it
- try:
- while size > self.extrasize:
- self._read(readsize)
- readsize = min(self.max_read_chunk, readsize * 2)
- except EOFError:
- if size > self.extrasize:
- size = self.extrasize
-
- chunk = self.extrabuf[:size]
- self.extrabuf = self.extrabuf[size:]
- self.extrasize = self.extrasize - size
-
- self.offset += size
- return chunk
-
- def _unread(self, buf):
- self.extrabuf = buf + self.extrabuf
- self.extrasize = len(buf) + self.extrasize
- self.offset -= len(buf)
-
- def _read(self, size=1024):
- if self.fileobj is None:
- raise EOFError, "Reached EOF"
-
- if self._new_member:
- # If the _new_member flag is set, we have to
- # jump to the next member, if there is one.
- #
- # First, check if we're at the end of the file;
- # if so, it's time to stop; no more members to read.
- pos = self.fileobj.tell() # Save current position
- self.fileobj.seek(0, 2) # Seek to end of file
- if pos == self.fileobj.tell():
- raise EOFError, "Reached EOF"
- else:
- self.fileobj.seek( pos ) # Return to original position
-
- self._init_read()
- self._read_gzip_header()
- self.decompress = zlib.decompressobj(-zlib.MAX_WBITS)
- self._new_member = False
-
- # Read a chunk of data from the file
- buf = self.fileobj.read(size)
-
- # If the EOF has been reached, flush the decompression object
- # and mark this object as finished.
-
- if buf == "":
- uncompress = self.decompress.flush()
- self._read_eof()
- self._add_read_data( uncompress )
- raise EOFError, 'Reached EOF'
-
- uncompress = self.decompress.decompress(buf)
- self._add_read_data( uncompress )
-
- if self.decompress.unused_data != "":
- # Ending case: we've come to the end of a member in the file,
- # so seek back to the start of the unused data, finish up
- # this member, and read a new gzip header.
- # (The number of bytes to seek back is the length of the unused
- # data, minus 8 because _read_eof() will rewind a further 8 bytes)
- self.fileobj.seek( -len(self.decompress.unused_data)+8, 1)
-
- # Check the CRC and file size, and set the flag so we read
- # a new member on the next call
- self._read_eof()
- self._new_member = True
-
- def _add_read_data(self, data):
- self.crc = zlib.crc32(data, self.crc)
- self.extrabuf = self.extrabuf + data
- self.extrasize = self.extrasize + len(data)
- self.size = self.size + len(data)
-
- def _read_eof(self):
- # We've read to the end of the file, so we have to rewind in order
- # to reread the 8 bytes containing the CRC and the file size.
- # We check the that the computed CRC and size of the
- # uncompressed data matches the stored values. Note that the size
- # stored is the true file size mod 2**32.
- self.fileobj.seek(-8, 1)
- crc32 = read32(self.fileobj)
- isize = U32(read32(self.fileobj)) # may exceed 2GB
- if U32(crc32) != U32(self.crc):
- raise IOError, "CRC check failed"
- elif isize != LOWU32(self.size):
- raise IOError, "Incorrect length of data produced"
-
- def close(self):
- if self.mode == WRITE:
- self.fileobj.write(self.compress.flush())
- # The native zlib crc is an unsigned 32-bit integer, but
- # the Python wrapper implicitly casts that to a signed C
- # long. So, on a 32-bit box self.crc may "look negative",
- # while the same crc on a 64-bit box may "look positive".
- # To avoid irksome warnings from the `struct` module, force
- # it to look positive on all boxes.
- write32u(self.fileobj, LOWU32(self.crc))
- # self.size may exceed 2GB, or even 4GB
- write32u(self.fileobj, LOWU32(self.size))
- self.fileobj = None
- elif self.mode == READ:
- self.fileobj = None
- if self.myfileobj:
- self.myfileobj.close()
- self.myfileobj = None
-
- def __del__(self):
- try:
- if (self.myfileobj is None and
- self.fileobj is None):
- return
- except AttributeError:
- return
- self.close()
-
- def flush(self,zlib_mode=zlib.Z_SYNC_FLUSH):
- if self.mode == WRITE:
- # Ensure the compressor's buffer is flushed
- self.fileobj.write(self.compress.flush(zlib_mode))
- self.fileobj.flush()
-
- def fileno(self):
- """Invoke the underlying file object's fileno() method.
-
- This will raise AttributeError if the underlying file object
- doesn't support fileno().
- """
- return self.fileobj.fileno()
-
- def isatty(self):
- return False
-
- def tell(self):
- return self.offset
-
- def rewind(self):
- '''Return the uncompressed stream file position indicator to the
- beginning of the file'''
- if self.mode != READ:
- raise IOError("Can't rewind in write mode")
- self.fileobj.seek(0)
- self._new_member = True
- self.extrabuf = ""
- self.extrasize = 0
- self.offset = 0
-
- def seek(self, offset):
- if self.mode == WRITE:
- if offset < self.offset:
- raise IOError('Negative seek in write mode')
- count = offset - self.offset
- for i in range(count // 1024):
- self.write(1024 * '\0')
- self.write((count % 1024) * '\0')
- elif self.mode == READ:
- if offset < self.offset:
- # for negative seek, rewind and do positive seek
- self.rewind()
- count = offset - self.offset
- for i in range(count // 1024):
- self.read(1024)
- self.read(count % 1024)
-
- def readline(self, size=-1):
- if size < 0:
- size = sys.maxint
- readsize = self.min_readsize
- else:
- readsize = size
- bufs = []
- while size != 0:
- c = self.read(readsize)
- i = c.find('\n')
-
- # We set i=size to break out of the loop under two
- # conditions: 1) there's no newline, and the chunk is
- # larger than size, or 2) there is a newline, but the
- # resulting line would be longer than 'size'.
- if (size <= i) or (i == -1 and len(c) > size):
- i = size - 1
-
- if i >= 0 or c == '':
- bufs.append(c[:i + 1]) # Add portion of last chunk
- self._unread(c[i + 1:]) # Push back rest of chunk
- break
-
- # Append chunk to list, decrease 'size',
- bufs.append(c)
- size = size - len(c)
- readsize = min(size, readsize * 2)
- if readsize > self.min_readsize:
- self.min_readsize = min(readsize, self.min_readsize * 2, 512)
- return ''.join(bufs) # Return resulting line
-
- def readlines(self, sizehint=0):
- # Negative numbers result in reading all the lines
- if sizehint <= 0:
- sizehint = sys.maxint
- L = []
- while sizehint > 0:
- line = self.readline()
- if line == "":
- break
- L.append(line)
- sizehint = sizehint - len(line)
-
- return L
-
- def writelines(self, L):
- for line in L:
- self.write(line)
-
- def __iter__(self):
- return self
-
- def next(self):
- line = self.readline()
- if line:
- return line
- else:
- raise StopIteration
-
-
-def _test():
- # Act like gzip; with -d, act like gunzip.
- # The input file is not deleted, however, nor are any other gzip
- # options or features supported.
- args = sys.argv[1:]
- decompress = args and args[0] == "-d"
- if decompress:
- args = args[1:]
- if not args:
- args = ["-"]
- for arg in args:
- if decompress:
- if arg == "-":
- f = GzipFile(filename="", mode="rb", fileobj=sys.stdin)
- g = sys.stdout
- else:
- if arg[-3:] != ".gz":
- print "filename doesn't end in .gz:", repr(arg)
- continue
- f = open(arg, "rb")
- g = __builtin__.open(arg[:-3], "wb")
- else:
- if arg == "-":
- f = sys.stdin
- g = GzipFile(filename="", mode="wb", fileobj=sys.stdout)
- else:
- f = __builtin__.open(arg, "rb")
- g = open(arg + ".gz", "wb")
- while True:
- chunk = f.read(1024)
- if not chunk:
- break
- g.write(chunk)
- if g is not sys.stdout:
- g.close()
- if f is not sys.stdin:
- f.close()
-
-if __name__ == '__main__':
- _test()
diff --git a/sys/lib/python/hashlib.py b/sys/lib/python/hashlib.py
deleted file mode 100644
index 2a09dc344..000000000
--- a/sys/lib/python/hashlib.py
+++ /dev/null
@@ -1,82 +0,0 @@
-# $Id: hashlib.py 52533 2006-10-29 18:01:12Z georg.brandl $
-#
-# Copyright (C) 2005 Gregory P. Smith (greg@electricrain.com)
-# Licensed to PSF under a Contributor Agreement.
-#
-
-__doc__ = """hashlib module - A common interface to many hash functions.
-
-new(name, string='') - returns a new hash object implementing the
- given hash function; initializing the hash
- using the given string data.
-
-Named constructor functions are also available, these are much faster
-than using new():
-
-md5(), sha1(), sha224(), sha256(), sha384(), and sha512()
-
-More algorithms may be available on your platform but the above are
-guaranteed to exist.
-
-Choose your hash function wisely. Some have known collision weaknesses.
-sha384 and sha512 will be slow on 32 bit platforms.
-
-Hash objects have these methods:
- - update(arg): Update the hash object with the string arg. Repeated calls
- are equivalent to a single call with the concatenation of all
- the arguments.
- - digest(): Return the digest of the strings passed to the update() method
- so far. This may contain non-ASCII characters, including
- NUL bytes.
- - hexdigest(): Like digest() except the digest is returned as a string of
- double length, containing only hexadecimal digits.
- - copy(): Return a copy (clone) of the hash object. This can be used to
- efficiently compute the digests of strings that share a common
- initial substring.
-
-For example, to obtain the digest of the string 'Nobody inspects the
-spammish repetition':
-
- >>> import hashlib
- >>> m = hashlib.md5()
- >>> m.update("Nobody inspects")
- >>> m.update(" the spammish repetition")
- >>> m.digest()
- '\xbbd\x9c\x83\xdd\x1e\xa5\xc9\xd9\xde\xc9\xa1\x8d\xf0\xff\xe9'
-
-More condensed:
-
- >>> hashlib.sha224("Nobody inspects the spammish repetition").hexdigest()
- 'a4337bc45a8fc544c03f52dc550cd6e1e87021bc896588bd79e901e2'
-
-"""
-
-try:
- import _sechash
- md5 = _sechash.md5
- sha1 = _sechash.sha1
- sha224 = _sechash.sha224
- sha256 = _sechash.sha256
- sha384 = _sechash.sha384
- sha512 = _sechash.sha512
-except ImportError:
- import _hashlib
- md5 = _hashlib.openssl_md5
- sha1 = _hashlib.openssl_sha1
- sha224 = _hashlib.openssl_sha224
- sha256 = _hashlib.openssl_sha256
- sha384 = _hashlib.openssl_sha384
- sha512 = _hashlib.openssl_sha512
-
-algs = dict()
-for a in [md5, sha1, sha224, sha256, sha384, sha512]:
- algs[a().name.lower()] = a
-
-def new(name, string=''):
- """new(name, string='') - Return a new hashing object using the named algorithm;
- optionally initialized with a string.
- """
- a = algs[name.lower()]
- if a != None:
- return a(string)
- raise ValueError, "unsupported hash type"
diff --git a/sys/lib/python/heapq.py b/sys/lib/python/heapq.py
deleted file mode 100644
index 753c3b7ec..000000000
--- a/sys/lib/python/heapq.py
+++ /dev/null
@@ -1,343 +0,0 @@
-# -*- coding: Latin-1 -*-
-
-"""Heap queue algorithm (a.k.a. priority queue).
-
-Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for
-all k, counting elements from 0. For the sake of comparison,
-non-existing elements are considered to be infinite. The interesting
-property of a heap is that a[0] is always its smallest element.
-
-Usage:
-
-heap = [] # creates an empty heap
-heappush(heap, item) # pushes a new item on the heap
-item = heappop(heap) # pops the smallest item from the heap
-item = heap[0] # smallest item on the heap without popping it
-heapify(x) # transforms list into a heap, in-place, in linear time
-item = heapreplace(heap, item) # pops and returns smallest item, and adds
- # new item; the heap size is unchanged
-
-Our API differs from textbook heap algorithms as follows:
-
-- We use 0-based indexing. This makes the relationship between the
- index for a node and the indexes for its children slightly less
- obvious, but is more suitable since Python uses 0-based indexing.
-
-- Our heappop() method returns the smallest item, not the largest.
-
-These two make it possible to view the heap as a regular Python list
-without surprises: heap[0] is the smallest item, and heap.sort()
-maintains the heap invariant!
-"""
-
-# Original code by Kevin O'Connor, augmented by Tim Peters and Raymond Hettinger
-
-__about__ = """Heap queues
-
-[explanation by François Pinard]
-
-Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for
-all k, counting elements from 0. For the sake of comparison,
-non-existing elements are considered to be infinite. The interesting
-property of a heap is that a[0] is always its smallest element.
-
-The strange invariant above is meant to be an efficient memory
-representation for a tournament. The numbers below are `k', not a[k]:
-
- 0
-
- 1 2
-
- 3 4 5 6
-
- 7 8 9 10 11 12 13 14
-
- 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
-
-
-In the tree above, each cell `k' is topping `2*k+1' and `2*k+2'. In
-an usual binary tournament we see in sports, each cell is the winner
-over the two cells it tops, and we can trace the winner down the tree
-to see all opponents s/he had. However, in many computer applications
-of such tournaments, we do not need to trace the history of a winner.
-To be more memory efficient, when a winner is promoted, we try to
-replace it by something else at a lower level, and the rule becomes
-that a cell and the two cells it tops contain three different items,
-but the top cell "wins" over the two topped cells.
-
-If this heap invariant is protected at all time, index 0 is clearly
-the overall winner. The simplest algorithmic way to remove it and
-find the "next" winner is to move some loser (let's say cell 30 in the
-diagram above) into the 0 position, and then percolate this new 0 down
-the tree, exchanging values, until the invariant is re-established.
-This is clearly logarithmic on the total number of items in the tree.
-By iterating over all items, you get an O(n ln n) sort.
-
-A nice feature of this sort is that you can efficiently insert new
-items while the sort is going on, provided that the inserted items are
-not "better" than the last 0'th element you extracted. This is
-especially useful in simulation contexts, where the tree holds all
-incoming events, and the "win" condition means the smallest scheduled
-time. When an event schedule other events for execution, they are
-scheduled into the future, so they can easily go into the heap. So, a
-heap is a good structure for implementing schedulers (this is what I
-used for my MIDI sequencer :-).
-
-Various structures for implementing schedulers have been extensively
-studied, and heaps are good for this, as they are reasonably speedy,
-the speed is almost constant, and the worst case is not much different
-than the average case. However, there are other representations which
-are more efficient overall, yet the worst cases might be terrible.
-
-Heaps are also very useful in big disk sorts. You most probably all
-know that a big sort implies producing "runs" (which are pre-sorted
-sequences, which size is usually related to the amount of CPU memory),
-followed by a merging passes for these runs, which merging is often
-very cleverly organised[1]. It is very important that the initial
-sort produces the longest runs possible. Tournaments are a good way
-to that. If, using all the memory available to hold a tournament, you
-replace and percolate items that happen to fit the current run, you'll
-produce runs which are twice the size of the memory for random input,
-and much better for input fuzzily ordered.
-
-Moreover, if you output the 0'th item on disk and get an input which
-may not fit in the current tournament (because the value "wins" over
-the last output value), it cannot fit in the heap, so the size of the
-heap decreases. The freed memory could be cleverly reused immediately
-for progressively building a second heap, which grows at exactly the
-same rate the first heap is melting. When the first heap completely
-vanishes, you switch heaps and start a new run. Clever and quite
-effective!
-
-In a word, heaps are useful memory structures to know. I use them in
-a few applications, and I think it is good to keep a `heap' module
-around. :-)
-
---------------------
-[1] The disk balancing algorithms which are current, nowadays, are
-more annoying than clever, and this is a consequence of the seeking
-capabilities of the disks. On devices which cannot seek, like big
-tape drives, the story was quite different, and one had to be very
-clever to ensure (far in advance) that each tape movement will be the
-most effective possible (that is, will best participate at
-"progressing" the merge). Some tapes were even able to read
-backwards, and this was also used to avoid the rewinding time.
-Believe me, real good tape sorts were quite spectacular to watch!
-From all times, sorting has always been a Great Art! :-)
-"""
-
-__all__ = ['heappush', 'heappop', 'heapify', 'heapreplace', 'nlargest',
- 'nsmallest']
-
-from itertools import islice, repeat, count, imap, izip, tee
-from operator import itemgetter, neg
-import bisect
-
-def heappush(heap, item):
- """Push item onto heap, maintaining the heap invariant."""
- heap.append(item)
- _siftdown(heap, 0, len(heap)-1)
-
-def heappop(heap):
- """Pop the smallest item off the heap, maintaining the heap invariant."""
- lastelt = heap.pop() # raises appropriate IndexError if heap is empty
- if heap:
- returnitem = heap[0]
- heap[0] = lastelt
- _siftup(heap, 0)
- else:
- returnitem = lastelt
- return returnitem
-
-def heapreplace(heap, item):
- """Pop and return the current smallest value, and add the new item.
-
- This is more efficient than heappop() followed by heappush(), and can be
- more appropriate when using a fixed-size heap. Note that the value
- returned may be larger than item! That constrains reasonable uses of
- this routine unless written as part of a conditional replacement:
-
- if item > heap[0]:
- item = heapreplace(heap, item)
- """
- returnitem = heap[0] # raises appropriate IndexError if heap is empty
- heap[0] = item
- _siftup(heap, 0)
- return returnitem
-
-def heapify(x):
- """Transform list into a heap, in-place, in O(len(heap)) time."""
- n = len(x)
- # Transform bottom-up. The largest index there's any point to looking at
- # is the largest with a child index in-range, so must have 2*i + 1 < n,
- # or i < (n-1)/2. If n is even = 2*j, this is (2*j-1)/2 = j-1/2 so
- # j-1 is the largest, which is n//2 - 1. If n is odd = 2*j+1, this is
- # (2*j+1-1)/2 = j so j-1 is the largest, and that's again n//2-1.
- for i in reversed(xrange(n//2)):
- _siftup(x, i)
-
-def nlargest(n, iterable):
- """Find the n largest elements in a dataset.
-
- Equivalent to: sorted(iterable, reverse=True)[:n]
- """
- it = iter(iterable)
- result = list(islice(it, n))
- if not result:
- return result
- heapify(result)
- _heapreplace = heapreplace
- sol = result[0] # sol --> smallest of the nlargest
- for elem in it:
- if elem <= sol:
- continue
- _heapreplace(result, elem)
- sol = result[0]
- result.sort(reverse=True)
- return result
-
-def nsmallest(n, iterable):
- """Find the n smallest elements in a dataset.
-
- Equivalent to: sorted(iterable)[:n]
- """
- if hasattr(iterable, '__len__') and n * 10 <= len(iterable):
- # For smaller values of n, the bisect method is faster than a minheap.
- # It is also memory efficient, consuming only n elements of space.
- it = iter(iterable)
- result = sorted(islice(it, 0, n))
- if not result:
- return result
- insort = bisect.insort
- pop = result.pop
- los = result[-1] # los --> Largest of the nsmallest
- for elem in it:
- if los <= elem:
- continue
- insort(result, elem)
- pop()
- los = result[-1]
- return result
- # An alternative approach manifests the whole iterable in memory but
- # saves comparisons by heapifying all at once. Also, saves time
- # over bisect.insort() which has O(n) data movement time for every
- # insertion. Finding the n smallest of an m length iterable requires
- # O(m) + O(n log m) comparisons.
- h = list(iterable)
- heapify(h)
- return map(heappop, repeat(h, min(n, len(h))))
-
-# 'heap' is a heap at all indices >= startpos, except possibly for pos. pos
-# is the index of a leaf with a possibly out-of-order value. Restore the
-# heap invariant.
-def _siftdown(heap, startpos, pos):
- newitem = heap[pos]
- # Follow the path to the root, moving parents down until finding a place
- # newitem fits.
- while pos > startpos:
- parentpos = (pos - 1) >> 1
- parent = heap[parentpos]
- if parent <= newitem:
- break
- heap[pos] = parent
- pos = parentpos
- heap[pos] = newitem
-
-# The child indices of heap index pos are already heaps, and we want to make
-# a heap at index pos too. We do this by bubbling the smaller child of
-# pos up (and so on with that child's children, etc) until hitting a leaf,
-# then using _siftdown to move the oddball originally at index pos into place.
-#
-# We *could* break out of the loop as soon as we find a pos where newitem <=
-# both its children, but turns out that's not a good idea, and despite that
-# many books write the algorithm that way. During a heap pop, the last array
-# element is sifted in, and that tends to be large, so that comparing it
-# against values starting from the root usually doesn't pay (= usually doesn't
-# get us out of the loop early). See Knuth, Volume 3, where this is
-# explained and quantified in an exercise.
-#
-# Cutting the # of comparisons is important, since these routines have no
-# way to extract "the priority" from an array element, so that intelligence
-# is likely to be hiding in custom __cmp__ methods, or in array elements
-# storing (priority, record) tuples. Comparisons are thus potentially
-# expensive.
-#
-# On random arrays of length 1000, making this change cut the number of
-# comparisons made by heapify() a little, and those made by exhaustive
-# heappop() a lot, in accord with theory. Here are typical results from 3
-# runs (3 just to demonstrate how small the variance is):
-#
-# Compares needed by heapify Compares needed by 1000 heappops
-# -------------------------- --------------------------------
-# 1837 cut to 1663 14996 cut to 8680
-# 1855 cut to 1659 14966 cut to 8678
-# 1847 cut to 1660 15024 cut to 8703
-#
-# Building the heap by using heappush() 1000 times instead required
-# 2198, 2148, and 2219 compares: heapify() is more efficient, when
-# you can use it.
-#
-# The total compares needed by list.sort() on the same lists were 8627,
-# 8627, and 8632 (this should be compared to the sum of heapify() and
-# heappop() compares): list.sort() is (unsurprisingly!) more efficient
-# for sorting.
-
-def _siftup(heap, pos):
- endpos = len(heap)
- startpos = pos
- newitem = heap[pos]
- # Bubble up the smaller child until hitting a leaf.
- childpos = 2*pos + 1 # leftmost child position
- while childpos < endpos:
- # Set childpos to index of smaller child.
- rightpos = childpos + 1
- if rightpos < endpos and heap[rightpos] <= heap[childpos]:
- childpos = rightpos
- # Move the smaller child up.
- heap[pos] = heap[childpos]
- pos = childpos
- childpos = 2*pos + 1
- # The leaf at pos is empty now. Put newitem there, and bubble it up
- # to its final resting place (by sifting its parents down).
- heap[pos] = newitem
- _siftdown(heap, startpos, pos)
-
-# If available, use C implementation
-try:
- from _heapq import heappush, heappop, heapify, heapreplace, nlargest, nsmallest
-except ImportError:
- pass
-
-# Extend the implementations of nsmallest and nlargest to use a key= argument
-_nsmallest = nsmallest
-def nsmallest(n, iterable, key=None):
- """Find the n smallest elements in a dataset.
-
- Equivalent to: sorted(iterable, key=key)[:n]
- """
- in1, in2 = tee(iterable)
- it = izip(imap(key, in1), count(), in2) # decorate
- result = _nsmallest(n, it)
- return map(itemgetter(2), result) # undecorate
-
-_nlargest = nlargest
-def nlargest(n, iterable, key=None):
- """Find the n largest elements in a dataset.
-
- Equivalent to: sorted(iterable, key=key, reverse=True)[:n]
- """
- in1, in2 = tee(iterable)
- it = izip(imap(key, in1), imap(neg, count()), in2) # decorate
- result = _nlargest(n, it)
- return map(itemgetter(2), result) # undecorate
-
-if __name__ == "__main__":
- # Simple sanity test
- heap = []
- data = [1, 3, 5, 7, 9, 2, 4, 6, 8, 0]
- for item in data:
- heappush(heap, item)
- sort = []
- while heap:
- sort.append(heappop(heap))
- print sort
diff --git a/sys/lib/python/hgext/__init__.py b/sys/lib/python/hgext/__init__.py
deleted file mode 100644
index fdffa2a0f..000000000
--- a/sys/lib/python/hgext/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-# placeholder
diff --git a/sys/lib/python/hgext/acl.py b/sys/lib/python/hgext/acl.py
deleted file mode 100644
index f9b3927af..000000000
--- a/sys/lib/python/hgext/acl.py
+++ /dev/null
@@ -1,107 +0,0 @@
-# acl.py - changeset access control for mercurial
-#
-# Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-#
-
-'''hooks for controlling repository access
-
-This hook makes it possible to allow or deny write access to portions
-of a repository when receiving incoming changesets.
-
-The authorization is matched based on the local user name on the
-system where the hook runs, and not the committer of the original
-changeset (since the latter is merely informative).
-
-The acl hook is best used along with a restricted shell like hgsh,
-preventing authenticating users from doing anything other than
-pushing or pulling. The hook is not safe to use if users have
-interactive shell access, as they can then disable the hook.
-Nor is it safe if remote users share an account, because then there
-is no way to distinguish them.
-
-To use this hook, configure the acl extension in your hgrc like this::
-
- [extensions]
- hgext.acl =
-
- [hooks]
- pretxnchangegroup.acl = python:hgext.acl.hook
-
- [acl]
- # Check whether the source of incoming changes is in this list
- # ("serve" == ssh or http, "push", "pull", "bundle")
- sources = serve
-
-The allow and deny sections take a subtree pattern as key (with a glob
-syntax by default), and a comma separated list of users as the
-corresponding value. The deny list is checked before the allow list
-is. ::
-
- [acl.allow]
- # If acl.allow is not present, all users are allowed by default.
- # An empty acl.allow section means no users allowed.
- docs/** = doc_writer
- .hgtags = release_engineer
-
- [acl.deny]
- # If acl.deny is not present, no users are refused by default.
- # An empty acl.deny section means all users allowed.
- glob pattern = user4, user5
- ** = user6
-'''
-
-from mercurial.i18n import _
-from mercurial import util, match
-import getpass, urllib
-
-def buildmatch(ui, repo, user, key):
- '''return tuple of (match function, list enabled).'''
- if not ui.has_section(key):
- ui.debug(_('acl: %s not enabled\n') % key)
- return None
-
- pats = [pat for pat, users in ui.configitems(key)
- if user in users.replace(',', ' ').split()]
- ui.debug(_('acl: %s enabled, %d entries for user %s\n') %
- (key, len(pats), user))
- if pats:
- return match.match(repo.root, '', pats)
- return match.exact(repo.root, '', [])
-
-
-def hook(ui, repo, hooktype, node=None, source=None, **kwargs):
- if hooktype != 'pretxnchangegroup':
- raise util.Abort(_('config error - hook type "%s" cannot stop '
- 'incoming changesets') % hooktype)
- if source not in ui.config('acl', 'sources', 'serve').split():
- ui.debug(_('acl: changes have source "%s" - skipping\n') % source)
- return
-
- user = None
- if source == 'serve' and 'url' in kwargs:
- url = kwargs['url'].split(':')
- if url[0] == 'remote' and url[1].startswith('http'):
- user = urllib.unquote(url[3])
-
- if user is None:
- user = getpass.getuser()
-
- cfg = ui.config('acl', 'config')
- if cfg:
- ui.readconfig(cfg, sections = ['acl.allow', 'acl.deny'])
- allow = buildmatch(ui, repo, user, 'acl.allow')
- deny = buildmatch(ui, repo, user, 'acl.deny')
-
- for rev in xrange(repo[node], len(repo)):
- ctx = repo[rev]
- for f in ctx.files():
- if deny and deny(f):
- ui.debug(_('acl: user %s denied on %s\n') % (user, f))
- raise util.Abort(_('acl: access denied for changeset %s') % ctx)
- if allow and not allow(f):
- ui.debug(_('acl: user %s not allowed on %s\n') % (user, f))
- raise util.Abort(_('acl: access denied for changeset %s') % ctx)
- ui.debug(_('acl: allowing changeset %s\n') % ctx)
diff --git a/sys/lib/python/hgext/bookmarks.py b/sys/lib/python/hgext/bookmarks.py
deleted file mode 100644
index 58aaec4fa..000000000
--- a/sys/lib/python/hgext/bookmarks.py
+++ /dev/null
@@ -1,340 +0,0 @@
-# Mercurial extension to provide the 'hg bookmark' command
-#
-# Copyright 2008 David Soria Parra <dsp@php.net>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-'''track a line of development with movable markers
-
-Bookmarks are local movable markers to changesets. Every bookmark
-points to a changeset identified by its hash. If you commit a
-changeset that is based on a changeset that has a bookmark on it, the
-bookmark shifts to the new changeset.
-
-It is possible to use bookmark names in every revision lookup (e.g. hg
-merge, hg update).
-
-By default, when several bookmarks point to the same changeset, they
-will all move forward together. It is possible to obtain a more
-git-like experience by adding the following configuration option to
-your .hgrc::
-
- [bookmarks]
- track.current = True
-
-This will cause Mercurial to track the bookmark that you are currently
-using, and only update it. This is similar to git's approach to
-branching.
-'''
-
-from mercurial.i18n import _
-from mercurial.node import nullid, nullrev, hex, short
-from mercurial import util, commands, localrepo, repair, extensions
-import os
-
-def parse(repo):
- '''Parse .hg/bookmarks file and return a dictionary
-
- Bookmarks are stored as {HASH}\\s{NAME}\\n (localtags format) values
- in the .hg/bookmarks file. They are read by the parse() method and
- returned as a dictionary with name => hash values.
-
- The parsed dictionary is cached until a write() operation is done.
- '''
- try:
- if repo._bookmarks:
- return repo._bookmarks
- repo._bookmarks = {}
- for line in repo.opener('bookmarks'):
- sha, refspec = line.strip().split(' ', 1)
- repo._bookmarks[refspec] = repo.lookup(sha)
- except:
- pass
- return repo._bookmarks
-
-def write(repo, refs):
- '''Write bookmarks
-
- Write the given bookmark => hash dictionary to the .hg/bookmarks file
- in a format equal to those of localtags.
-
- We also store a backup of the previous state in undo.bookmarks that
- can be copied back on rollback.
- '''
- if os.path.exists(repo.join('bookmarks')):
- util.copyfile(repo.join('bookmarks'), repo.join('undo.bookmarks'))
- if current(repo) not in refs:
- setcurrent(repo, None)
- wlock = repo.wlock()
- try:
- file = repo.opener('bookmarks', 'w', atomictemp=True)
- for refspec, node in refs.iteritems():
- file.write("%s %s\n" % (hex(node), refspec))
- file.rename()
- finally:
- wlock.release()
-
-def current(repo):
- '''Get the current bookmark
-
- If we use gittishsh branches we have a current bookmark that
- we are on. This function returns the name of the bookmark. It
- is stored in .hg/bookmarks.current
- '''
- if repo._bookmarkcurrent:
- return repo._bookmarkcurrent
- mark = None
- if os.path.exists(repo.join('bookmarks.current')):
- file = repo.opener('bookmarks.current')
- # No readline() in posixfile_nt, reading everything is cheap
- mark = (file.readlines() or [''])[0]
- if mark == '':
- mark = None
- file.close()
- repo._bookmarkcurrent = mark
- return mark
-
-def setcurrent(repo, mark):
- '''Set the name of the bookmark that we are currently on
-
- Set the name of the bookmark that we are on (hg update <bookmark>).
- The name is recorded in .hg/bookmarks.current
- '''
- if current(repo) == mark:
- return
-
- refs = parse(repo)
-
- # do not update if we do update to a rev equal to the current bookmark
- if (mark and mark not in refs and
- current(repo) and refs[current(repo)] == repo.changectx('.').node()):
- return
- if mark not in refs:
- mark = ''
- wlock = repo.wlock()
- try:
- file = repo.opener('bookmarks.current', 'w', atomictemp=True)
- file.write(mark)
- file.rename()
- finally:
- wlock.release()
- repo._bookmarkcurrent = mark
-
-def bookmark(ui, repo, mark=None, rev=None, force=False, delete=False, rename=None):
- '''track a line of development with movable markers
-
- Bookmarks are pointers to certain commits that move when
- committing. Bookmarks are local. They can be renamed, copied and
- deleted. It is possible to use bookmark names in 'hg merge' and
- 'hg update' to merge and update respectively to a given bookmark.
-
- You can use 'hg bookmark NAME' to set a bookmark on the working
- directory's parent revision with the given name. If you specify
- a revision using -r REV (where REV may be an existing bookmark),
- the bookmark is assigned to that revision.
- '''
- hexfn = ui.debugflag and hex or short
- marks = parse(repo)
- cur = repo.changectx('.').node()
-
- if rename:
- if rename not in marks:
- raise util.Abort(_("a bookmark of this name does not exist"))
- if mark in marks and not force:
- raise util.Abort(_("a bookmark of the same name already exists"))
- if mark is None:
- raise util.Abort(_("new bookmark name required"))
- marks[mark] = marks[rename]
- del marks[rename]
- if current(repo) == rename:
- setcurrent(repo, mark)
- write(repo, marks)
- return
-
- if delete:
- if mark is None:
- raise util.Abort(_("bookmark name required"))
- if mark not in marks:
- raise util.Abort(_("a bookmark of this name does not exist"))
- if mark == current(repo):
- setcurrent(repo, None)
- del marks[mark]
- write(repo, marks)
- return
-
- if mark != None:
- if "\n" in mark:
- raise util.Abort(_("bookmark name cannot contain newlines"))
- mark = mark.strip()
- if mark in marks and not force:
- raise util.Abort(_("a bookmark of the same name already exists"))
- if ((mark in repo.branchtags() or mark == repo.dirstate.branch())
- and not force):
- raise util.Abort(
- _("a bookmark cannot have the name of an existing branch"))
- if rev:
- marks[mark] = repo.lookup(rev)
- else:
- marks[mark] = repo.changectx('.').node()
- setcurrent(repo, mark)
- write(repo, marks)
- return
-
- if mark is None:
- if rev:
- raise util.Abort(_("bookmark name required"))
- if len(marks) == 0:
- ui.status("no bookmarks set\n")
- else:
- for bmark, n in marks.iteritems():
- if ui.configbool('bookmarks', 'track.current'):
- prefix = (bmark == current(repo) and n == cur) and '*' or ' '
- else:
- prefix = (n == cur) and '*' or ' '
-
- ui.write(" %s %-25s %d:%s\n" % (
- prefix, bmark, repo.changelog.rev(n), hexfn(n)))
- return
-
-def _revstostrip(changelog, node):
- srev = changelog.rev(node)
- tostrip = [srev]
- saveheads = []
- for r in xrange(srev, len(changelog)):
- parents = changelog.parentrevs(r)
- if parents[0] in tostrip or parents[1] in tostrip:
- tostrip.append(r)
- if parents[1] != nullrev:
- for p in parents:
- if p not in tostrip and p > srev:
- saveheads.append(p)
- return [r for r in tostrip if r not in saveheads]
-
-def strip(oldstrip, ui, repo, node, backup="all"):
- """Strip bookmarks if revisions are stripped using
- the mercurial.strip method. This usually happens during
- qpush and qpop"""
- revisions = _revstostrip(repo.changelog, node)
- marks = parse(repo)
- update = []
- for mark, n in marks.iteritems():
- if repo.changelog.rev(n) in revisions:
- update.append(mark)
- oldstrip(ui, repo, node, backup)
- if len(update) > 0:
- for m in update:
- marks[m] = repo.changectx('.').node()
- write(repo, marks)
-
-def reposetup(ui, repo):
- if not isinstance(repo, localrepo.localrepository):
- return
-
- # init a bookmark cache as otherwise we would get a infinite reading
- # in lookup()
- repo._bookmarks = None
- repo._bookmarkcurrent = None
-
- class bookmark_repo(repo.__class__):
- def rollback(self):
- if os.path.exists(self.join('undo.bookmarks')):
- util.rename(self.join('undo.bookmarks'), self.join('bookmarks'))
- return super(bookmark_repo, self).rollback()
-
- def lookup(self, key):
- if self._bookmarks is None:
- self._bookmarks = parse(self)
- if key in self._bookmarks:
- key = self._bookmarks[key]
- return super(bookmark_repo, self).lookup(key)
-
- def commitctx(self, ctx, error=False):
- """Add a revision to the repository and
- move the bookmark"""
- wlock = self.wlock() # do both commit and bookmark with lock held
- try:
- node = super(bookmark_repo, self).commitctx(ctx, error)
- if node is None:
- return None
- parents = self.changelog.parents(node)
- if parents[1] == nullid:
- parents = (parents[0],)
- marks = parse(self)
- update = False
- if ui.configbool('bookmarks', 'track.current'):
- mark = current(self)
- if mark and marks[mark] in parents:
- marks[mark] = node
- update = True
- else:
- for mark, n in marks.items():
- if n in parents:
- marks[mark] = node
- update = True
- if update:
- write(self, marks)
- return node
- finally:
- wlock.release()
-
- def addchangegroup(self, source, srctype, url, emptyok=False):
- parents = self.dirstate.parents()
-
- result = super(bookmark_repo, self).addchangegroup(
- source, srctype, url, emptyok)
- if result > 1:
- # We have more heads than before
- return result
- node = self.changelog.tip()
- marks = parse(self)
- update = False
- if ui.configbool('bookmarks', 'track.current'):
- mark = current(self)
- if mark and marks[mark] in parents:
- marks[mark] = node
- update = True
- else:
- for mark, n in marks.items():
- if n in parents:
- marks[mark] = node
- update = True
- if update:
- write(self, marks)
- return result
-
- def _findtags(self):
- """Merge bookmarks with normal tags"""
- (tags, tagtypes) = super(bookmark_repo, self)._findtags()
- tags.update(parse(self))
- return (tags, tagtypes)
-
- repo.__class__ = bookmark_repo
-
-def uisetup(ui):
- extensions.wrapfunction(repair, "strip", strip)
- if ui.configbool('bookmarks', 'track.current'):
- extensions.wrapcommand(commands.table, 'update', updatecurbookmark)
-
-def updatecurbookmark(orig, ui, repo, *args, **opts):
- '''Set the current bookmark
-
- If the user updates to a bookmark we update the .hg/bookmarks.current
- file.
- '''
- res = orig(ui, repo, *args, **opts)
- rev = opts['rev']
- if not rev and len(args) > 0:
- rev = args[0]
- setcurrent(repo, rev)
- return res
-
-cmdtable = {
- "bookmarks":
- (bookmark,
- [('f', 'force', False, _('force')),
- ('r', 'rev', '', _('revision')),
- ('d', 'delete', False, _('delete a given bookmark')),
- ('m', 'rename', '', _('rename a given bookmark'))],
- _('hg bookmarks [-f] [-d] [-m NAME] [-r REV] [NAME]')),
-}
diff --git a/sys/lib/python/hgext/bugzilla.py b/sys/lib/python/hgext/bugzilla.py
deleted file mode 100644
index 774ed3385..000000000
--- a/sys/lib/python/hgext/bugzilla.py
+++ /dev/null
@@ -1,439 +0,0 @@
-# bugzilla.py - bugzilla integration for mercurial
-#
-# Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-'''hooks for integrating with the Bugzilla bug tracker
-
-This hook extension adds comments on bugs in Bugzilla when changesets
-that refer to bugs by Bugzilla ID are seen. The hook does not change
-bug status.
-
-The hook updates the Bugzilla database directly. Only Bugzilla
-installations using MySQL are supported.
-
-The hook relies on a Bugzilla script to send bug change notification
-emails. That script changes between Bugzilla versions; the
-'processmail' script used prior to 2.18 is replaced in 2.18 and
-subsequent versions by 'config/sendbugmail.pl'. Note that these will
-be run by Mercurial as the user pushing the change; you will need to
-ensure the Bugzilla install file permissions are set appropriately.
-
-The extension is configured through three different configuration
-sections. These keys are recognized in the [bugzilla] section:
-
-host
- Hostname of the MySQL server holding the Bugzilla database.
-
-db
- Name of the Bugzilla database in MySQL. Default 'bugs'.
-
-user
- Username to use to access MySQL server. Default 'bugs'.
-
-password
- Password to use to access MySQL server.
-
-timeout
- Database connection timeout (seconds). Default 5.
-
-version
- Bugzilla version. Specify '3.0' for Bugzilla versions 3.0 and later,
- '2.18' for Bugzilla versions from 2.18 and '2.16' for versions prior
- to 2.18.
-
-bzuser
- Fallback Bugzilla user name to record comments with, if changeset
- committer cannot be found as a Bugzilla user.
-
-bzdir
- Bugzilla install directory. Used by default notify. Default
- '/var/www/html/bugzilla'.
-
-notify
- The command to run to get Bugzilla to send bug change notification
- emails. Substitutes from a map with 3 keys, 'bzdir', 'id' (bug id)
- and 'user' (committer bugzilla email). Default depends on version;
- from 2.18 it is "cd %(bzdir)s && perl -T contrib/sendbugmail.pl
- %(id)s %(user)s".
-
-regexp
- Regular expression to match bug IDs in changeset commit message.
- Must contain one "()" group. The default expression matches 'Bug
- 1234', 'Bug no. 1234', 'Bug number 1234', 'Bugs 1234,5678', 'Bug
- 1234 and 5678' and variations thereof. Matching is case insensitive.
-
-style
- The style file to use when formatting comments.
-
-template
- Template to use when formatting comments. Overrides style if
- specified. In addition to the usual Mercurial keywords, the
- extension specifies::
-
- {bug} The Bugzilla bug ID.
- {root} The full pathname of the Mercurial repository.
- {webroot} Stripped pathname of the Mercurial repository.
- {hgweb} Base URL for browsing Mercurial repositories.
-
- Default 'changeset {node|short} in repo {root} refers '
- 'to bug {bug}.\\ndetails:\\n\\t{desc|tabindent}'
-
-strip
- The number of slashes to strip from the front of {root} to produce
- {webroot}. Default 0.
-
-usermap
- Path of file containing Mercurial committer ID to Bugzilla user ID
- mappings. If specified, the file should contain one mapping per
- line, "committer"="Bugzilla user". See also the [usermap] section.
-
-The [usermap] section is used to specify mappings of Mercurial
-committer ID to Bugzilla user ID. See also [bugzilla].usermap.
-"committer"="Bugzilla user"
-
-Finally, the [web] section supports one entry:
-
-baseurl
- Base URL for browsing Mercurial repositories. Reference from
- templates as {hgweb}.
-
-Activating the extension::
-
- [extensions]
- hgext.bugzilla =
-
- [hooks]
- # run bugzilla hook on every change pulled or pushed in here
- incoming.bugzilla = python:hgext.bugzilla.hook
-
-Example configuration:
-
-This example configuration is for a collection of Mercurial
-repositories in /var/local/hg/repos/ used with a local Bugzilla 3.2
-installation in /opt/bugzilla-3.2. ::
-
- [bugzilla]
- host=localhost
- password=XYZZY
- version=3.0
- bzuser=unknown@domain.com
- bzdir=/opt/bugzilla-3.2
- template=Changeset {node|short} in {root|basename}.
- {hgweb}/{webroot}/rev/{node|short}\\n
- {desc}\\n
- strip=5
-
- [web]
- baseurl=http://dev.domain.com/hg
-
- [usermap]
- user@emaildomain.com=user.name@bugzilladomain.com
-
-Commits add a comment to the Bugzilla bug record of the form::
-
- Changeset 3b16791d6642 in repository-name.
- http://dev.domain.com/hg/repository-name/rev/3b16791d6642
-
- Changeset commit comment. Bug 1234.
-'''
-
-from mercurial.i18n import _
-from mercurial.node import short
-from mercurial import cmdutil, templater, util
-import re, time
-
-MySQLdb = None
-
-def buglist(ids):
- return '(' + ','.join(map(str, ids)) + ')'
-
-class bugzilla_2_16(object):
- '''support for bugzilla version 2.16.'''
-
- def __init__(self, ui):
- self.ui = ui
- host = self.ui.config('bugzilla', 'host', 'localhost')
- user = self.ui.config('bugzilla', 'user', 'bugs')
- passwd = self.ui.config('bugzilla', 'password')
- db = self.ui.config('bugzilla', 'db', 'bugs')
- timeout = int(self.ui.config('bugzilla', 'timeout', 5))
- usermap = self.ui.config('bugzilla', 'usermap')
- if usermap:
- self.ui.readconfig(usermap, sections=['usermap'])
- self.ui.note(_('connecting to %s:%s as %s, password %s\n') %
- (host, db, user, '*' * len(passwd)))
- self.conn = MySQLdb.connect(host=host, user=user, passwd=passwd,
- db=db, connect_timeout=timeout)
- self.cursor = self.conn.cursor()
- self.longdesc_id = self.get_longdesc_id()
- self.user_ids = {}
- self.default_notify = "cd %(bzdir)s && ./processmail %(id)s %(user)s"
-
- def run(self, *args, **kwargs):
- '''run a query.'''
- self.ui.note(_('query: %s %s\n') % (args, kwargs))
- try:
- self.cursor.execute(*args, **kwargs)
- except MySQLdb.MySQLError:
- self.ui.note(_('failed query: %s %s\n') % (args, kwargs))
- raise
-
- def get_longdesc_id(self):
- '''get identity of longdesc field'''
- self.run('select fieldid from fielddefs where name = "longdesc"')
- ids = self.cursor.fetchall()
- if len(ids) != 1:
- raise util.Abort(_('unknown database schema'))
- return ids[0][0]
-
- def filter_real_bug_ids(self, ids):
- '''filter not-existing bug ids from list.'''
- self.run('select bug_id from bugs where bug_id in %s' % buglist(ids))
- return sorted([c[0] for c in self.cursor.fetchall()])
-
- def filter_unknown_bug_ids(self, node, ids):
- '''filter bug ids from list that already refer to this changeset.'''
-
- self.run('''select bug_id from longdescs where
- bug_id in %s and thetext like "%%%s%%"''' %
- (buglist(ids), short(node)))
- unknown = set(ids)
- for (id,) in self.cursor.fetchall():
- self.ui.status(_('bug %d already knows about changeset %s\n') %
- (id, short(node)))
- unknown.discard(id)
- return sorted(unknown)
-
- def notify(self, ids, committer):
- '''tell bugzilla to send mail.'''
-
- self.ui.status(_('telling bugzilla to send mail:\n'))
- (user, userid) = self.get_bugzilla_user(committer)
- for id in ids:
- self.ui.status(_(' bug %s\n') % id)
- cmdfmt = self.ui.config('bugzilla', 'notify', self.default_notify)
- bzdir = self.ui.config('bugzilla', 'bzdir', '/var/www/html/bugzilla')
- try:
- # Backwards-compatible with old notify string, which
- # took one string. This will throw with a new format
- # string.
- cmd = cmdfmt % id
- except TypeError:
- cmd = cmdfmt % {'bzdir': bzdir, 'id': id, 'user': user}
- self.ui.note(_('running notify command %s\n') % cmd)
- fp = util.popen('(%s) 2>&1' % cmd)
- out = fp.read()
- ret = fp.close()
- if ret:
- self.ui.warn(out)
- raise util.Abort(_('bugzilla notify command %s') %
- util.explain_exit(ret)[0])
- self.ui.status(_('done\n'))
-
- def get_user_id(self, user):
- '''look up numeric bugzilla user id.'''
- try:
- return self.user_ids[user]
- except KeyError:
- try:
- userid = int(user)
- except ValueError:
- self.ui.note(_('looking up user %s\n') % user)
- self.run('''select userid from profiles
- where login_name like %s''', user)
- all = self.cursor.fetchall()
- if len(all) != 1:
- raise KeyError(user)
- userid = int(all[0][0])
- self.user_ids[user] = userid
- return userid
-
- def map_committer(self, user):
- '''map name of committer to bugzilla user name.'''
- for committer, bzuser in self.ui.configitems('usermap'):
- if committer.lower() == user.lower():
- return bzuser
- return user
-
- def get_bugzilla_user(self, committer):
- '''see if committer is a registered bugzilla user. Return
- bugzilla username and userid if so. If not, return default
- bugzilla username and userid.'''
- user = self.map_committer(committer)
- try:
- userid = self.get_user_id(user)
- except KeyError:
- try:
- defaultuser = self.ui.config('bugzilla', 'bzuser')
- if not defaultuser:
- raise util.Abort(_('cannot find bugzilla user id for %s') %
- user)
- userid = self.get_user_id(defaultuser)
- user = defaultuser
- except KeyError:
- raise util.Abort(_('cannot find bugzilla user id for %s or %s') %
- (user, defaultuser))
- return (user, userid)
-
- def add_comment(self, bugid, text, committer):
- '''add comment to bug. try adding comment as committer of
- changeset, otherwise as default bugzilla user.'''
- (user, userid) = self.get_bugzilla_user(committer)
- now = time.strftime('%Y-%m-%d %H:%M:%S')
- self.run('''insert into longdescs
- (bug_id, who, bug_when, thetext)
- values (%s, %s, %s, %s)''',
- (bugid, userid, now, text))
- self.run('''insert into bugs_activity (bug_id, who, bug_when, fieldid)
- values (%s, %s, %s, %s)''',
- (bugid, userid, now, self.longdesc_id))
- self.conn.commit()
-
-class bugzilla_2_18(bugzilla_2_16):
- '''support for bugzilla 2.18 series.'''
-
- def __init__(self, ui):
- bugzilla_2_16.__init__(self, ui)
- self.default_notify = "cd %(bzdir)s && perl -T contrib/sendbugmail.pl %(id)s %(user)s"
-
-class bugzilla_3_0(bugzilla_2_18):
- '''support for bugzilla 3.0 series.'''
-
- def __init__(self, ui):
- bugzilla_2_18.__init__(self, ui)
-
- def get_longdesc_id(self):
- '''get identity of longdesc field'''
- self.run('select id from fielddefs where name = "longdesc"')
- ids = self.cursor.fetchall()
- if len(ids) != 1:
- raise util.Abort(_('unknown database schema'))
- return ids[0][0]
-
-class bugzilla(object):
- # supported versions of bugzilla. different versions have
- # different schemas.
- _versions = {
- '2.16': bugzilla_2_16,
- '2.18': bugzilla_2_18,
- '3.0': bugzilla_3_0
- }
-
- _default_bug_re = (r'bugs?\s*,?\s*(?:#|nos?\.?|num(?:ber)?s?)?\s*'
- r'((?:\d+\s*(?:,?\s*(?:and)?)?\s*)+)')
-
- _bz = None
-
- def __init__(self, ui, repo):
- self.ui = ui
- self.repo = repo
-
- def bz(self):
- '''return object that knows how to talk to bugzilla version in
- use.'''
-
- if bugzilla._bz is None:
- bzversion = self.ui.config('bugzilla', 'version')
- try:
- bzclass = bugzilla._versions[bzversion]
- except KeyError:
- raise util.Abort(_('bugzilla version %s not supported') %
- bzversion)
- bugzilla._bz = bzclass(self.ui)
- return bugzilla._bz
-
- def __getattr__(self, key):
- return getattr(self.bz(), key)
-
- _bug_re = None
- _split_re = None
-
- def find_bug_ids(self, ctx):
- '''find valid bug ids that are referred to in changeset
- comments and that do not already have references to this
- changeset.'''
-
- if bugzilla._bug_re is None:
- bugzilla._bug_re = re.compile(
- self.ui.config('bugzilla', 'regexp', bugzilla._default_bug_re),
- re.IGNORECASE)
- bugzilla._split_re = re.compile(r'\D+')
- start = 0
- ids = set()
- while True:
- m = bugzilla._bug_re.search(ctx.description(), start)
- if not m:
- break
- start = m.end()
- for id in bugzilla._split_re.split(m.group(1)):
- if not id: continue
- ids.add(int(id))
- if ids:
- ids = self.filter_real_bug_ids(ids)
- if ids:
- ids = self.filter_unknown_bug_ids(ctx.node(), ids)
- return ids
-
- def update(self, bugid, ctx):
- '''update bugzilla bug with reference to changeset.'''
-
- def webroot(root):
- '''strip leading prefix of repo root and turn into
- url-safe path.'''
- count = int(self.ui.config('bugzilla', 'strip', 0))
- root = util.pconvert(root)
- while count > 0:
- c = root.find('/')
- if c == -1:
- break
- root = root[c+1:]
- count -= 1
- return root
-
- mapfile = self.ui.config('bugzilla', 'style')
- tmpl = self.ui.config('bugzilla', 'template')
- t = cmdutil.changeset_templater(self.ui, self.repo,
- False, None, mapfile, False)
- if not mapfile and not tmpl:
- tmpl = _('changeset {node|short} in repo {root} refers '
- 'to bug {bug}.\ndetails:\n\t{desc|tabindent}')
- if tmpl:
- tmpl = templater.parsestring(tmpl, quoted=False)
- t.use_template(tmpl)
- self.ui.pushbuffer()
- t.show(ctx, changes=ctx.changeset(),
- bug=str(bugid),
- hgweb=self.ui.config('web', 'baseurl'),
- root=self.repo.root,
- webroot=webroot(self.repo.root))
- data = self.ui.popbuffer()
- self.add_comment(bugid, data, util.email(ctx.user()))
-
-def hook(ui, repo, hooktype, node=None, **kwargs):
- '''add comment to bugzilla for each changeset that refers to a
- bugzilla bug id. only add a comment once per bug, so same change
- seen multiple times does not fill bug with duplicate data.'''
- try:
- import MySQLdb as mysql
- global MySQLdb
- MySQLdb = mysql
- except ImportError, err:
- raise util.Abort(_('python mysql support not available: %s') % err)
-
- if node is None:
- raise util.Abort(_('hook type %s does not pass a changeset id') %
- hooktype)
- try:
- bz = bugzilla(ui, repo)
- ctx = repo[node]
- ids = bz.find_bug_ids(ctx)
- if ids:
- for id in ids:
- bz.update(id, ctx)
- bz.notify(ids, util.email(ctx.user()))
- except MySQLdb.MySQLError, err:
- raise util.Abort(_('database error: %s') % err[1])
-
diff --git a/sys/lib/python/hgext/children.py b/sys/lib/python/hgext/children.py
deleted file mode 100644
index 35ddeca43..000000000
--- a/sys/lib/python/hgext/children.py
+++ /dev/null
@@ -1,44 +0,0 @@
-# Mercurial extension to provide the 'hg children' command
-#
-# Copyright 2007 by Intevation GmbH <intevation@intevation.de>
-#
-# Author(s):
-# Thomas Arendsen Hein <thomas@intevation.de>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-'''command to display child changesets'''
-
-from mercurial import cmdutil
-from mercurial.commands import templateopts
-from mercurial.i18n import _
-
-
-def children(ui, repo, file_=None, **opts):
- """show the children of the given or working directory revision
-
- Print the children of the working directory's revisions. If a
- revision is given via -r/--rev, the children of that revision will
- be printed. If a file argument is given, revision in which the
- file was last changed (after the working directory revision or the
- argument to --rev if given) is printed.
- """
- rev = opts.get('rev')
- if file_:
- ctx = repo.filectx(file_, changeid=rev)
- else:
- ctx = repo[rev]
-
- displayer = cmdutil.show_changeset(ui, repo, opts)
- for cctx in ctx.children():
- displayer.show(cctx)
-
-
-cmdtable = {
- "children":
- (children,
- [('r', 'rev', '', _('show children of the specified revision')),
- ] + templateopts,
- _('hg children [-r REV] [FILE]')),
-}
diff --git a/sys/lib/python/hgext/churn.py b/sys/lib/python/hgext/churn.py
deleted file mode 100644
index 930009ab3..000000000
--- a/sys/lib/python/hgext/churn.py
+++ /dev/null
@@ -1,174 +0,0 @@
-# churn.py - create a graph of revisions count grouped by template
-#
-# Copyright 2006 Josef "Jeff" Sipek <jeffpc@josefsipek.net>
-# Copyright 2008 Alexander Solovyov <piranha@piranha.org.ua>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-'''command to display statistics about repository history'''
-
-from mercurial.i18n import _
-from mercurial import patch, cmdutil, util, templater
-import sys, os
-import time, datetime
-
-def maketemplater(ui, repo, tmpl):
- tmpl = templater.parsestring(tmpl, quoted=False)
- try:
- t = cmdutil.changeset_templater(ui, repo, False, None, None, False)
- except SyntaxError, inst:
- raise util.Abort(inst.args[0])
- t.use_template(tmpl)
- return t
-
-def changedlines(ui, repo, ctx1, ctx2, fns):
- lines = 0
- fmatch = cmdutil.matchfiles(repo, fns)
- diff = ''.join(patch.diff(repo, ctx1.node(), ctx2.node(), fmatch))
- for l in diff.split('\n'):
- if (l.startswith("+") and not l.startswith("+++ ") or
- l.startswith("-") and not l.startswith("--- ")):
- lines += 1
- return lines
-
-def countrate(ui, repo, amap, *pats, **opts):
- """Calculate stats"""
- if opts.get('dateformat'):
- def getkey(ctx):
- t, tz = ctx.date()
- date = datetime.datetime(*time.gmtime(float(t) - tz)[:6])
- return date.strftime(opts['dateformat'])
- else:
- tmpl = opts.get('template', '{author|email}')
- tmpl = maketemplater(ui, repo, tmpl)
- def getkey(ctx):
- ui.pushbuffer()
- tmpl.show(ctx)
- return ui.popbuffer()
-
- count = pct = 0
- rate = {}
- df = False
- if opts.get('date'):
- df = util.matchdate(opts['date'])
-
- get = util.cachefunc(lambda r: repo[r].changeset())
- changeiter, matchfn = cmdutil.walkchangerevs(ui, repo, pats, get, opts)
- for st, rev, fns in changeiter:
- if not st == 'add':
- continue
- if df and not df(get(rev)[2][0]): # doesn't match date format
- continue
-
- ctx = repo[rev]
- key = getkey(ctx)
- key = amap.get(key, key) # alias remap
- if opts.get('changesets'):
- rate[key] = rate.get(key, 0) + 1
- else:
- parents = ctx.parents()
- if len(parents) > 1:
- ui.note(_('Revision %d is a merge, ignoring...\n') % (rev,))
- continue
-
- ctx1 = parents[0]
- lines = changedlines(ui, repo, ctx1, ctx, fns)
- rate[key] = rate.get(key, 0) + lines
-
- if opts.get('progress'):
- count += 1
- newpct = int(100.0 * count / max(len(repo), 1))
- if pct < newpct:
- pct = newpct
- ui.write("\r" + _("generating stats: %d%%") % pct)
- sys.stdout.flush()
-
- if opts.get('progress'):
- ui.write("\r")
- sys.stdout.flush()
-
- return rate
-
-
-def churn(ui, repo, *pats, **opts):
- '''histogram of changes to the repository
-
- This command will display a histogram representing the number
- of changed lines or revisions, grouped according to the given
- template. The default template will group changes by author.
- The --dateformat option may be used to group the results by
- date instead.
-
- Statistics are based on the number of changed lines, or
- alternatively the number of matching revisions if the
- --changesets option is specified.
-
- Examples::
-
- # display count of changed lines for every committer
- hg churn -t '{author|email}'
-
- # display daily activity graph
- hg churn -f '%H' -s -c
-
- # display activity of developers by month
- hg churn -f '%Y-%m' -s -c
-
- # display count of lines changed in every year
- hg churn -f '%Y' -s
-
- It is possible to map alternate email addresses to a main address
- by providing a file using the following format::
-
- <alias email> <actual email>
-
- Such a file may be specified with the --aliases option, otherwise
- a .hgchurn file will be looked for in the working directory root.
- '''
- def pad(s, l):
- return (s + " " * l)[:l]
-
- amap = {}
- aliases = opts.get('aliases')
- if not aliases and os.path.exists(repo.wjoin('.hgchurn')):
- aliases = repo.wjoin('.hgchurn')
- if aliases:
- for l in open(aliases, "r"):
- l = l.strip()
- alias, actual = l.split()
- amap[alias] = actual
-
- rate = countrate(ui, repo, amap, *pats, **opts).items()
- if not rate:
- return
-
- sortkey = ((not opts.get('sort')) and (lambda x: -x[1]) or None)
- rate.sort(key=sortkey)
-
- maxcount = float(max([v for k, v in rate]))
- maxname = max([len(k) for k, v in rate])
-
- ttywidth = util.termwidth()
- ui.debug(_("assuming %i character terminal\n") % ttywidth)
- width = ttywidth - maxname - 2 - 6 - 2 - 2
-
- for date, count in rate:
- print "%s %6d %s" % (pad(date, maxname), count,
- "*" * int(count * width / maxcount))
-
-
-cmdtable = {
- "churn":
- (churn,
- [('r', 'rev', [], _('count rate for the specified revision or range')),
- ('d', 'date', '', _('count rate for revisions matching date spec')),
- ('t', 'template', '{author|email}', _('template to group changesets')),
- ('f', 'dateformat', '',
- _('strftime-compatible format for grouping by date')),
- ('c', 'changesets', False, _('count rate by number of changesets')),
- ('s', 'sort', False, _('sort by key (default: sort by count)')),
- ('', 'aliases', '', _('file with email aliases')),
- ('', 'progress', None, _('show progress'))],
- _("hg churn [-d DATE] [-r REV] [--aliases FILE] [--progress] [FILE]")),
-}
diff --git a/sys/lib/python/hgext/color.py b/sys/lib/python/hgext/color.py
deleted file mode 100644
index 4a736db43..000000000
--- a/sys/lib/python/hgext/color.py
+++ /dev/null
@@ -1,286 +0,0 @@
-# color.py color output for the status and qseries commands
-#
-# Copyright (C) 2007 Kevin Christen <kevin.christen@gmail.com>
-#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License as published by the
-# Free Software Foundation; either version 2 of the License, or (at your
-# option) any later version.
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
-# Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-
-'''colorize output from some commands
-
-This extension modifies the status command to add color to its output
-to reflect file status, the qseries command to add color to reflect
-patch status (applied, unapplied, missing), and to diff-related
-commands to highlight additions, removals, diff headers, and trailing
-whitespace.
-
-Other effects in addition to color, like bold and underlined text, are
-also available. Effects are rendered with the ECMA-48 SGR control
-function (aka ANSI escape codes). This module also provides the
-render_text function, which can be used to add effects to any text.
-
-Default effects may be overridden from the .hgrc file::
-
- [color]
- status.modified = blue bold underline red_background
- status.added = green bold
- status.removed = red bold blue_background
- status.deleted = cyan bold underline
- status.unknown = magenta bold underline
- status.ignored = black bold
-
- # 'none' turns off all effects
- status.clean = none
- status.copied = none
-
- qseries.applied = blue bold underline
- qseries.unapplied = black bold
- qseries.missing = red bold
-
- diff.diffline = bold
- diff.extended = cyan bold
- diff.file_a = red bold
- diff.file_b = green bold
- diff.hunk = magenta
- diff.deleted = red
- diff.inserted = green
- diff.changed = white
- diff.trailingwhitespace = bold red_background
-'''
-
-import os, sys
-import itertools
-
-from mercurial import cmdutil, commands, extensions, error
-from mercurial.i18n import _
-
-# start and stop parameters for effects
-_effect_params = {'none': 0,
- 'black': 30,
- 'red': 31,
- 'green': 32,
- 'yellow': 33,
- 'blue': 34,
- 'magenta': 35,
- 'cyan': 36,
- 'white': 37,
- 'bold': 1,
- 'italic': 3,
- 'underline': 4,
- 'inverse': 7,
- 'black_background': 40,
- 'red_background': 41,
- 'green_background': 42,
- 'yellow_background': 43,
- 'blue_background': 44,
- 'purple_background': 45,
- 'cyan_background': 46,
- 'white_background': 47}
-
-def render_effects(text, effects):
- 'Wrap text in commands to turn on each effect.'
- start = [str(_effect_params[e]) for e in ['none'] + effects]
- start = '\033[' + ';'.join(start) + 'm'
- stop = '\033[' + str(_effect_params['none']) + 'm'
- return ''.join([start, text, stop])
-
-def colorstatus(orig, ui, repo, *pats, **opts):
- '''run the status command with colored output'''
-
- delimiter = opts['print0'] and '\0' or '\n'
-
- nostatus = opts.get('no_status')
- opts['no_status'] = False
- # run status and capture its output
- ui.pushbuffer()
- retval = orig(ui, repo, *pats, **opts)
- # filter out empty strings
- lines_with_status = [ line for line in ui.popbuffer().split(delimiter) if line ]
-
- if nostatus:
- lines = [l[2:] for l in lines_with_status]
- else:
- lines = lines_with_status
-
- # apply color to output and display it
- for i in xrange(len(lines)):
- status = _status_abbreviations[lines_with_status[i][0]]
- effects = _status_effects[status]
- if effects:
- lines[i] = render_effects(lines[i], effects)
- ui.write(lines[i] + delimiter)
- return retval
-
-_status_abbreviations = { 'M': 'modified',
- 'A': 'added',
- 'R': 'removed',
- '!': 'deleted',
- '?': 'unknown',
- 'I': 'ignored',
- 'C': 'clean',
- ' ': 'copied', }
-
-_status_effects = { 'modified': ['blue', 'bold'],
- 'added': ['green', 'bold'],
- 'removed': ['red', 'bold'],
- 'deleted': ['cyan', 'bold', 'underline'],
- 'unknown': ['magenta', 'bold', 'underline'],
- 'ignored': ['black', 'bold'],
- 'clean': ['none'],
- 'copied': ['none'], }
-
-def colorqseries(orig, ui, repo, *dummy, **opts):
- '''run the qseries command with colored output'''
- ui.pushbuffer()
- retval = orig(ui, repo, **opts)
- patchlines = ui.popbuffer().splitlines()
- patchnames = repo.mq.series
-
- for patch, patchname in itertools.izip(patchlines, patchnames):
- if opts['missing']:
- effects = _patch_effects['missing']
- # Determine if patch is applied.
- elif [ applied for applied in repo.mq.applied
- if patchname == applied.name ]:
- effects = _patch_effects['applied']
- else:
- effects = _patch_effects['unapplied']
-
- patch = patch.replace(patchname, render_effects(patchname, effects), 1)
- ui.write(patch + '\n')
- return retval
-
-_patch_effects = { 'applied': ['blue', 'bold', 'underline'],
- 'missing': ['red', 'bold'],
- 'unapplied': ['black', 'bold'], }
-
-def colorwrap(orig, s):
- '''wrap ui.write for colored diff output'''
- lines = s.split('\n')
- for i, line in enumerate(lines):
- stripline = line
- if line and line[0] in '+-':
- # highlight trailing whitespace, but only in changed lines
- stripline = line.rstrip()
- for prefix, style in _diff_prefixes:
- if stripline.startswith(prefix):
- lines[i] = render_effects(stripline, _diff_effects[style])
- break
- if line != stripline:
- lines[i] += render_effects(
- line[len(stripline):], _diff_effects['trailingwhitespace'])
- orig('\n'.join(lines))
-
-def colorshowpatch(orig, self, node):
- '''wrap cmdutil.changeset_printer.showpatch with colored output'''
- oldwrite = extensions.wrapfunction(self.ui, 'write', colorwrap)
- try:
- orig(self, node)
- finally:
- self.ui.write = oldwrite
-
-def colordiff(orig, ui, repo, *pats, **opts):
- '''run the diff command with colored output'''
- oldwrite = extensions.wrapfunction(ui, 'write', colorwrap)
- try:
- orig(ui, repo, *pats, **opts)
- finally:
- ui.write = oldwrite
-
-_diff_prefixes = [('diff', 'diffline'),
- ('copy', 'extended'),
- ('rename', 'extended'),
- ('old', 'extended'),
- ('new', 'extended'),
- ('deleted', 'extended'),
- ('---', 'file_a'),
- ('+++', 'file_b'),
- ('@', 'hunk'),
- ('-', 'deleted'),
- ('+', 'inserted')]
-
-_diff_effects = {'diffline': ['bold'],
- 'extended': ['cyan', 'bold'],
- 'file_a': ['red', 'bold'],
- 'file_b': ['green', 'bold'],
- 'hunk': ['magenta'],
- 'deleted': ['red'],
- 'inserted': ['green'],
- 'changed': ['white'],
- 'trailingwhitespace': ['bold', 'red_background']}
-
-_ui = None
-
-def uisetup(ui):
- '''Initialize the extension.'''
- global _ui
- _ui = ui
- _setupcmd(ui, 'diff', commands.table, colordiff, _diff_effects)
- _setupcmd(ui, 'incoming', commands.table, None, _diff_effects)
- _setupcmd(ui, 'log', commands.table, None, _diff_effects)
- _setupcmd(ui, 'outgoing', commands.table, None, _diff_effects)
- _setupcmd(ui, 'tip', commands.table, None, _diff_effects)
- _setupcmd(ui, 'status', commands.table, colorstatus, _status_effects)
-
-def extsetup():
- try:
- mq = extensions.find('mq')
- try:
- # If we are loaded after mq, we must wrap commands.table
- _setupcmd(_ui, 'qdiff', commands.table, colordiff, _diff_effects)
- _setupcmd(_ui, 'qseries', commands.table, colorqseries, _patch_effects)
- except error.UnknownCommand:
- # Otherwise we wrap mq.cmdtable
- _setupcmd(_ui, 'qdiff', mq.cmdtable, colordiff, _diff_effects)
- _setupcmd(_ui, 'qseries', mq.cmdtable, colorqseries, _patch_effects)
- except KeyError:
- # The mq extension is not enabled
- pass
-
-def _setupcmd(ui, cmd, table, func, effectsmap):
- '''patch in command to command table and load effect map'''
- def nocolor(orig, *args, **opts):
-
- if (opts['no_color'] or opts['color'] == 'never' or
- (opts['color'] == 'auto' and (os.environ.get('TERM') == 'dumb'
- or not sys.__stdout__.isatty()))):
- return orig(*args, **opts)
-
- oldshowpatch = extensions.wrapfunction(cmdutil.changeset_printer,
- 'showpatch', colorshowpatch)
- try:
- if func is not None:
- return func(orig, *args, **opts)
- return orig(*args, **opts)
- finally:
- cmdutil.changeset_printer.showpatch = oldshowpatch
-
- entry = extensions.wrapcommand(table, cmd, nocolor)
- entry[1].extend([
- ('', 'color', 'auto', _("when to colorize (always, auto, or never)")),
- ('', 'no-color', None, _("don't colorize output")),
- ])
-
- for status in effectsmap:
- configkey = cmd + '.' + status
- effects = ui.configlist('color', configkey)
- if effects:
- good = []
- for e in effects:
- if e in _effect_params:
- good.append(e)
- else:
- ui.warn(_("ignoring unknown color/effect %r "
- "(configured in color.%s)\n")
- % (e, configkey))
- effectsmap[status] = good
diff --git a/sys/lib/python/hgext/convert/__init__.py b/sys/lib/python/hgext/convert/__init__.py
deleted file mode 100644
index 2d04dc34a..000000000
--- a/sys/lib/python/hgext/convert/__init__.py
+++ /dev/null
@@ -1,296 +0,0 @@
-# convert.py Foreign SCM converter
-#
-# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-'''import revisions from foreign VCS repositories into Mercurial'''
-
-import convcmd
-import cvsps
-import subversion
-from mercurial import commands
-from mercurial.i18n import _
-
-# Commands definition was moved elsewhere to ease demandload job.
-
-def convert(ui, src, dest=None, revmapfile=None, **opts):
- """convert a foreign SCM repository to a Mercurial one.
-
- Accepted source formats [identifiers]:
-
- - Mercurial [hg]
- - CVS [cvs]
- - Darcs [darcs]
- - git [git]
- - Subversion [svn]
- - Monotone [mtn]
- - GNU Arch [gnuarch]
- - Bazaar [bzr]
- - Perforce [p4]
-
- Accepted destination formats [identifiers]:
-
- - Mercurial [hg]
- - Subversion [svn] (history on branches is not preserved)
-
- If no revision is given, all revisions will be converted.
- Otherwise, convert will only import up to the named revision
- (given in a format understood by the source).
-
- If no destination directory name is specified, it defaults to the
- basename of the source with '-hg' appended. If the destination
- repository doesn't exist, it will be created.
-
- By default, all sources except Mercurial will use --branchsort.
- Mercurial uses --sourcesort to preserve original revision numbers
- order. Sort modes have the following effects:
-
- --branchsort convert from parent to child revision when possible,
- which means branches are usually converted one after
- the other. It generates more compact repositories.
-
- --datesort sort revisions by date. Converted repositories have
- good-looking changelogs but are often an order of
- magnitude larger than the same ones generated by
- --branchsort.
-
- --sourcesort try to preserve source revisions order, only
- supported by Mercurial sources.
-
- If <REVMAP> isn't given, it will be put in a default location
- (<dest>/.hg/shamap by default). The <REVMAP> is a simple text file
- that maps each source commit ID to the destination ID for that
- revision, like so::
-
- <source ID> <destination ID>
-
- If the file doesn't exist, it's automatically created. It's
- updated on each commit copied, so convert-repo can be interrupted
- and can be run repeatedly to copy new commits.
-
- The [username mapping] file is a simple text file that maps each
- source commit author to a destination commit author. It is handy
- for source SCMs that use unix logins to identify authors (eg:
- CVS). One line per author mapping and the line format is:
- srcauthor=whatever string you want
-
- The filemap is a file that allows filtering and remapping of files
- and directories. Comment lines start with '#'. Each line can
- contain one of the following directives::
-
- include path/to/file
-
- exclude path/to/file
-
- rename from/file to/file
-
- The 'include' directive causes a file, or all files under a
- directory, to be included in the destination repository, and the
- exclusion of all other files and directories not explicitly
- included. The 'exclude' directive causes files or directories to
- be omitted. The 'rename' directive renames a file or directory. To
- rename from a subdirectory into the root of the repository, use
- '.' as the path to rename to.
-
- The splicemap is a file that allows insertion of synthetic
- history, letting you specify the parents of a revision. This is
- useful if you want to e.g. give a Subversion merge two parents, or
- graft two disconnected series of history together. Each entry
- contains a key, followed by a space, followed by one or two
- comma-separated values. The key is the revision ID in the source
- revision control system whose parents should be modified (same
- format as a key in .hg/shamap). The values are the revision IDs
- (in either the source or destination revision control system) that
- should be used as the new parents for that node.
-
- The branchmap is a file that allows you to rename a branch when it is
- being brought in from whatever external repository. When used in
- conjunction with a splicemap, it allows for a powerful combination
- to help fix even the most badly mismanaged repositories and turn them
- into nicely structured Mercurial repositories. The branchmap contains
- lines of the form "original_branch_name new_branch_name".
- "original_branch_name" is the name of the branch in the source
- repository, and "new_branch_name" is the name of the branch is the
- destination repository. This can be used to (for instance) move code
- in one repository from "default" to a named branch.
-
- Mercurial Source
- ----------------
-
- --config convert.hg.ignoreerrors=False (boolean)
- ignore integrity errors when reading. Use it to fix Mercurial
- repositories with missing revlogs, by converting from and to
- Mercurial.
- --config convert.hg.saverev=False (boolean)
- store original revision ID in changeset (forces target IDs to
- change)
- --config convert.hg.startrev=0 (hg revision identifier)
- convert start revision and its descendants
-
- CVS Source
- ----------
-
- CVS source will use a sandbox (i.e. a checked-out copy) from CVS
- to indicate the starting point of what will be converted. Direct
- access to the repository files is not needed, unless of course the
- repository is :local:. The conversion uses the top level directory
- in the sandbox to find the CVS repository, and then uses CVS rlog
- commands to find files to convert. This means that unless a
- filemap is given, all files under the starting directory will be
- converted, and that any directory reorganization in the CVS
- sandbox is ignored.
-
- Because CVS does not have changesets, it is necessary to collect
- individual commits to CVS and merge them into changesets. CVS
- source uses its internal changeset merging code by default but can
- be configured to call the external 'cvsps' program by setting::
-
- --config convert.cvsps='cvsps -A -u --cvs-direct -q'
-
- This option is deprecated and will be removed in Mercurial 1.4.
-
- The options shown are the defaults.
-
- Internal cvsps is selected by setting ::
-
- --config convert.cvsps=builtin
-
- and has a few more configurable options:
-
- --config convert.cvsps.cache=True (boolean)
- Set to False to disable remote log caching, for testing and
- debugging purposes.
- --config convert.cvsps.fuzz=60 (integer)
- Specify the maximum time (in seconds) that is allowed between
- commits with identical user and log message in a single
- changeset. When very large files were checked in as part of a
- changeset then the default may not be long enough.
- --config convert.cvsps.mergeto='{{mergetobranch ([-\\w]+)}}'
- Specify a regular expression to which commit log messages are
- matched. If a match occurs, then the conversion process will
- insert a dummy revision merging the branch on which this log
- message occurs to the branch indicated in the regex.
- --config convert.cvsps.mergefrom='{{mergefrombranch ([-\\w]+)}}'
- Specify a regular expression to which commit log messages are
- matched. If a match occurs, then the conversion process will
- add the most recent revision on the branch indicated in the
- regex as the second parent of the changeset.
-
- The hgext/convert/cvsps wrapper script allows the builtin
- changeset merging code to be run without doing a conversion. Its
- parameters and output are similar to that of cvsps 2.1.
-
- Subversion Source
- -----------------
-
- Subversion source detects classical trunk/branches/tags layouts.
- By default, the supplied "svn://repo/path/" source URL is
- converted as a single branch. If "svn://repo/path/trunk" exists it
- replaces the default branch. If "svn://repo/path/branches" exists,
- its subdirectories are listed as possible branches. If
- "svn://repo/path/tags" exists, it is looked for tags referencing
- converted branches. Default "trunk", "branches" and "tags" values
- can be overridden with following options. Set them to paths
- relative to the source URL, or leave them blank to disable auto
- detection.
-
- --config convert.svn.branches=branches (directory name)
- specify the directory containing branches
- --config convert.svn.tags=tags (directory name)
- specify the directory containing tags
- --config convert.svn.trunk=trunk (directory name)
- specify the name of the trunk branch
-
- Source history can be retrieved starting at a specific revision,
- instead of being integrally converted. Only single branch
- conversions are supported.
-
- --config convert.svn.startrev=0 (svn revision number)
- specify start Subversion revision.
-
- Perforce Source
- ---------------
-
- The Perforce (P4) importer can be given a p4 depot path or a
- client specification as source. It will convert all files in the
- source to a flat Mercurial repository, ignoring labels, branches
- and integrations. Note that when a depot path is given you then
- usually should specify a target directory, because otherwise the
- target may be named ...-hg.
-
- It is possible to limit the amount of source history to be
- converted by specifying an initial Perforce revision.
-
- --config convert.p4.startrev=0 (perforce changelist number)
- specify initial Perforce revision.
-
- Mercurial Destination
- ---------------------
-
- --config convert.hg.clonebranches=False (boolean)
- dispatch source branches in separate clones.
- --config convert.hg.tagsbranch=default (branch name)
- tag revisions branch name
- --config convert.hg.usebranchnames=True (boolean)
- preserve branch names
-
- """
- return convcmd.convert(ui, src, dest, revmapfile, **opts)
-
-def debugsvnlog(ui, **opts):
- return subversion.debugsvnlog(ui, **opts)
-
-def debugcvsps(ui, *args, **opts):
- '''create changeset information from CVS
-
- This command is intended as a debugging tool for the CVS to
- Mercurial converter, and can be used as a direct replacement for
- cvsps.
-
- Hg debugcvsps reads the CVS rlog for current directory (or any
- named directory) in the CVS repository, and converts the log to a
- series of changesets based on matching commit log entries and
- dates.'''
- return cvsps.debugcvsps(ui, *args, **opts)
-
-commands.norepo += " convert debugsvnlog debugcvsps"
-
-cmdtable = {
- "convert":
- (convert,
- [('A', 'authors', '', _('username mapping filename')),
- ('d', 'dest-type', '', _('destination repository type')),
- ('', 'filemap', '', _('remap file names using contents of file')),
- ('r', 'rev', '', _('import up to target revision REV')),
- ('s', 'source-type', '', _('source repository type')),
- ('', 'splicemap', '', _('splice synthesized history into place')),
- ('', 'branchmap', '', _('change branch names while converting')),
- ('', 'branchsort', None, _('try to sort changesets by branches')),
- ('', 'datesort', None, _('try to sort changesets by date')),
- ('', 'sourcesort', None, _('preserve source changesets order'))],
- _('hg convert [OPTION]... SOURCE [DEST [REVMAP]]')),
- "debugsvnlog":
- (debugsvnlog,
- [],
- 'hg debugsvnlog'),
- "debugcvsps":
- (debugcvsps,
- [
- # Main options shared with cvsps-2.1
- ('b', 'branches', [], _('only return changes on specified branches')),
- ('p', 'prefix', '', _('prefix to remove from file names')),
- ('r', 'revisions', [], _('only return changes after or between specified tags')),
- ('u', 'update-cache', None, _("update cvs log cache")),
- ('x', 'new-cache', None, _("create new cvs log cache")),
- ('z', 'fuzz', 60, _('set commit time fuzz in seconds')),
- ('', 'root', '', _('specify cvsroot')),
- # Options specific to builtin cvsps
- ('', 'parents', '', _('show parent changesets')),
- ('', 'ancestors', '', _('show current changeset in ancestor branches')),
- # Options that are ignored for compatibility with cvsps-2.1
- ('A', 'cvs-direct', None, _('ignored for compatibility')),
- ],
- _('hg debugcvsps [OPTION]... [PATH]...')),
-}
diff --git a/sys/lib/python/hgext/convert/bzr.py b/sys/lib/python/hgext/convert/bzr.py
deleted file mode 100644
index 6d2abe0bb..000000000
--- a/sys/lib/python/hgext/convert/bzr.py
+++ /dev/null
@@ -1,259 +0,0 @@
-# bzr.py - bzr support for the convert extension
-#
-# Copyright 2008, 2009 Marek Kubica <marek@xivilization.net> and others
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-# This module is for handling 'bzr', that was formerly known as Bazaar-NG;
-# it cannot access 'bar' repositories, but they were never used very much
-
-import os
-from mercurial import demandimport
-# these do not work with demandimport, blacklist
-demandimport.ignore.extend([
- 'bzrlib.transactions',
- 'bzrlib.urlutils',
- ])
-
-from mercurial.i18n import _
-from mercurial import util
-from common import NoRepo, commit, converter_source
-
-try:
- # bazaar imports
- from bzrlib import branch, revision, errors
- from bzrlib.revisionspec import RevisionSpec
-except ImportError:
- pass
-
-supportedkinds = ('file', 'symlink')
-
-class bzr_source(converter_source):
- """Reads Bazaar repositories by using the Bazaar Python libraries"""
-
- def __init__(self, ui, path, rev=None):
- super(bzr_source, self).__init__(ui, path, rev=rev)
-
- if not os.path.exists(os.path.join(path, '.bzr')):
- raise NoRepo('%s does not look like a Bazaar repo' % path)
-
- try:
- # access bzrlib stuff
- branch
- except NameError:
- raise NoRepo('Bazaar modules could not be loaded')
-
- path = os.path.abspath(path)
- self._checkrepotype(path)
- self.branch = branch.Branch.open(path)
- self.sourcerepo = self.branch.repository
- self._parentids = {}
-
- def _checkrepotype(self, path):
- # Lightweight checkouts detection is informational but probably
- # fragile at API level. It should not terminate the conversion.
- try:
- from bzrlib import bzrdir
- dir = bzrdir.BzrDir.open_containing(path)[0]
- try:
- tree = dir.open_workingtree(recommend_upgrade=False)
- branch = tree.branch
- except (errors.NoWorkingTree, errors.NotLocalUrl), e:
- tree = None
- branch = dir.open_branch()
- if (tree is not None and tree.bzrdir.root_transport.base !=
- branch.bzrdir.root_transport.base):
- self.ui.warn(_('warning: lightweight checkouts may cause '
- 'conversion failures, try with a regular '
- 'branch instead.\n'))
- except:
- self.ui.note(_('bzr source type could not be determined\n'))
-
- def before(self):
- """Before the conversion begins, acquire a read lock
- for all the operations that might need it. Fortunately
- read locks don't block other reads or writes to the
- repository, so this shouldn't have any impact on the usage of
- the source repository.
-
- The alternative would be locking on every operation that
- needs locks (there are currently two: getting the file and
- getting the parent map) and releasing immediately after,
- but this approach can take even 40% longer."""
- self.sourcerepo.lock_read()
-
- def after(self):
- self.sourcerepo.unlock()
-
- def getheads(self):
- if not self.rev:
- return [self.branch.last_revision()]
- try:
- r = RevisionSpec.from_string(self.rev)
- info = r.in_history(self.branch)
- except errors.BzrError:
- raise util.Abort(_('%s is not a valid revision in current branch')
- % self.rev)
- return [info.rev_id]
-
- def getfile(self, name, rev):
- revtree = self.sourcerepo.revision_tree(rev)
- fileid = revtree.path2id(name.decode(self.encoding or 'utf-8'))
- kind = None
- if fileid is not None:
- kind = revtree.kind(fileid)
- if kind not in supportedkinds:
- # the file is not available anymore - was deleted
- raise IOError(_('%s is not available in %s anymore') %
- (name, rev))
- if kind == 'symlink':
- target = revtree.get_symlink_target(fileid)
- if target is None:
- raise util.Abort(_('%s.%s symlink has no target')
- % (name, rev))
- return target
- else:
- sio = revtree.get_file(fileid)
- return sio.read()
-
- def getmode(self, name, rev):
- return self._modecache[(name, rev)]
-
- def getchanges(self, version):
- # set up caches: modecache and revtree
- self._modecache = {}
- self._revtree = self.sourcerepo.revision_tree(version)
- # get the parentids from the cache
- parentids = self._parentids.pop(version)
- # only diff against first parent id
- prevtree = self.sourcerepo.revision_tree(parentids[0])
- return self._gettreechanges(self._revtree, prevtree)
-
- def getcommit(self, version):
- rev = self.sourcerepo.get_revision(version)
- # populate parent id cache
- if not rev.parent_ids:
- parents = []
- self._parentids[version] = (revision.NULL_REVISION,)
- else:
- parents = self._filterghosts(rev.parent_ids)
- self._parentids[version] = parents
-
- return commit(parents=parents,
- date='%d %d' % (rev.timestamp, -rev.timezone),
- author=self.recode(rev.committer),
- # bzr returns bytestrings or unicode, depending on the content
- desc=self.recode(rev.message),
- rev=version)
-
- def gettags(self):
- if not self.branch.supports_tags():
- return {}
- tagdict = self.branch.tags.get_tag_dict()
- bytetags = {}
- for name, rev in tagdict.iteritems():
- bytetags[self.recode(name)] = rev
- return bytetags
-
- def getchangedfiles(self, rev, i):
- self._modecache = {}
- curtree = self.sourcerepo.revision_tree(rev)
- if i is not None:
- parentid = self._parentids[rev][i]
- else:
- # no parent id, get the empty revision
- parentid = revision.NULL_REVISION
-
- prevtree = self.sourcerepo.revision_tree(parentid)
- changes = [e[0] for e in self._gettreechanges(curtree, prevtree)[0]]
- return changes
-
- def _gettreechanges(self, current, origin):
- revid = current._revision_id;
- changes = []
- renames = {}
- for (fileid, paths, changed_content, versioned, parent, name,
- kind, executable) in current.iter_changes(origin):
-
- if paths[0] == u'' or paths[1] == u'':
- # ignore changes to tree root
- continue
-
- # bazaar tracks directories, mercurial does not, so
- # we have to rename the directory contents
- if kind[1] == 'directory':
- if kind[0] not in (None, 'directory'):
- # Replacing 'something' with a directory, record it
- # so it can be removed.
- changes.append((self.recode(paths[0]), revid))
-
- if None not in paths and paths[0] != paths[1]:
- # neither an add nor an delete - a move
- # rename all directory contents manually
- subdir = origin.inventory.path2id(paths[0])
- # get all child-entries of the directory
- for name, entry in origin.inventory.iter_entries(subdir):
- # hg does not track directory renames
- if entry.kind == 'directory':
- continue
- frompath = self.recode(paths[0] + '/' + name)
- topath = self.recode(paths[1] + '/' + name)
- # register the files as changed
- changes.append((frompath, revid))
- changes.append((topath, revid))
- # add to mode cache
- mode = ((entry.executable and 'x') or (entry.kind == 'symlink' and 's')
- or '')
- self._modecache[(topath, revid)] = mode
- # register the change as move
- renames[topath] = frompath
-
- # no futher changes, go to the next change
- continue
-
- # we got unicode paths, need to convert them
- path, topath = [self.recode(part) for part in paths]
-
- if topath is None:
- # file deleted
- changes.append((path, revid))
- continue
-
- # renamed
- if path and path != topath:
- renames[topath] = path
- changes.append((path, revid))
-
- # populate the mode cache
- kind, executable = [e[1] for e in (kind, executable)]
- mode = ((executable and 'x') or (kind == 'symlink' and 'l')
- or '')
- self._modecache[(topath, revid)] = mode
- changes.append((topath, revid))
-
- return changes, renames
-
- def _filterghosts(self, ids):
- """Filters out ghost revisions which hg does not support, see
- <http://bazaar-vcs.org/GhostRevision>
- """
- parentmap = self.sourcerepo.get_parent_map(ids)
- parents = tuple([parent for parent in ids if parent in parentmap])
- return parents
-
- def recode(self, s, encoding=None):
- """This version of recode tries to encode unicode to bytecode,
- and preferably using the UTF-8 codec.
- Other types than Unicode are silently returned, this is by
- intention, e.g. the None-type is not going to be encoded but instead
- just passed through
- """
- if not encoding:
- encoding = self.encoding or 'utf-8'
-
- if isinstance(s, unicode):
- return s.encode(encoding)
- else:
- # leave it alone
- return s
diff --git a/sys/lib/python/hgext/convert/common.py b/sys/lib/python/hgext/convert/common.py
deleted file mode 100644
index 0519d99a0..000000000
--- a/sys/lib/python/hgext/convert/common.py
+++ /dev/null
@@ -1,389 +0,0 @@
-# common.py - common code for the convert extension
-#
-# Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-import base64, errno
-import os
-import cPickle as pickle
-from mercurial import util
-from mercurial.i18n import _
-
-def encodeargs(args):
- def encodearg(s):
- lines = base64.encodestring(s)
- lines = [l.splitlines()[0] for l in lines]
- return ''.join(lines)
-
- s = pickle.dumps(args)
- return encodearg(s)
-
-def decodeargs(s):
- s = base64.decodestring(s)
- return pickle.loads(s)
-
-class MissingTool(Exception): pass
-
-def checktool(exe, name=None, abort=True):
- name = name or exe
- if not util.find_exe(exe):
- exc = abort and util.Abort or MissingTool
- raise exc(_('cannot find required "%s" tool') % name)
-
-class NoRepo(Exception): pass
-
-SKIPREV = 'SKIP'
-
-class commit(object):
- def __init__(self, author, date, desc, parents, branch=None, rev=None,
- extra={}, sortkey=None):
- self.author = author or 'unknown'
- self.date = date or '0 0'
- self.desc = desc
- self.parents = parents
- self.branch = branch
- self.rev = rev
- self.extra = extra
- self.sortkey = sortkey
-
-class converter_source(object):
- """Conversion source interface"""
-
- def __init__(self, ui, path=None, rev=None):
- """Initialize conversion source (or raise NoRepo("message")
- exception if path is not a valid repository)"""
- self.ui = ui
- self.path = path
- self.rev = rev
-
- self.encoding = 'utf-8'
-
- def before(self):
- pass
-
- def after(self):
- pass
-
- def setrevmap(self, revmap):
- """set the map of already-converted revisions"""
- pass
-
- def getheads(self):
- """Return a list of this repository's heads"""
- raise NotImplementedError()
-
- def getfile(self, name, rev):
- """Return file contents as a string. rev is the identifier returned
- by a previous call to getchanges(). Raise IOError to indicate that
- name was deleted in rev.
- """
- raise NotImplementedError()
-
- def getmode(self, name, rev):
- """Return file mode, eg. '', 'x', or 'l'. rev is the identifier
- returned by a previous call to getchanges().
- """
- raise NotImplementedError()
-
- def getchanges(self, version):
- """Returns a tuple of (files, copies).
-
- files is a sorted list of (filename, id) tuples for all files
- changed between version and its first parent returned by
- getcommit(). id is the source revision id of the file.
-
- copies is a dictionary of dest: source
- """
- raise NotImplementedError()
-
- def getcommit(self, version):
- """Return the commit object for version"""
- raise NotImplementedError()
-
- def gettags(self):
- """Return the tags as a dictionary of name: revision
-
- Tag names must be UTF-8 strings.
- """
- raise NotImplementedError()
-
- def recode(self, s, encoding=None):
- if not encoding:
- encoding = self.encoding or 'utf-8'
-
- if isinstance(s, unicode):
- return s.encode("utf-8")
- try:
- return s.decode(encoding).encode("utf-8")
- except:
- try:
- return s.decode("latin-1").encode("utf-8")
- except:
- return s.decode(encoding, "replace").encode("utf-8")
-
- def getchangedfiles(self, rev, i):
- """Return the files changed by rev compared to parent[i].
-
- i is an index selecting one of the parents of rev. The return
- value should be the list of files that are different in rev and
- this parent.
-
- If rev has no parents, i is None.
-
- This function is only needed to support --filemap
- """
- raise NotImplementedError()
-
- def converted(self, rev, sinkrev):
- '''Notify the source that a revision has been converted.'''
- pass
-
- def hasnativeorder(self):
- """Return true if this source has a meaningful, native revision
- order. For instance, Mercurial revisions are store sequentially
- while there is no such global ordering with Darcs.
- """
- return False
-
- def lookuprev(self, rev):
- """If rev is a meaningful revision reference in source, return
- the referenced identifier in the same format used by getcommit().
- return None otherwise.
- """
- return None
-
-class converter_sink(object):
- """Conversion sink (target) interface"""
-
- def __init__(self, ui, path):
- """Initialize conversion sink (or raise NoRepo("message")
- exception if path is not a valid repository)
-
- created is a list of paths to remove if a fatal error occurs
- later"""
- self.ui = ui
- self.path = path
- self.created = []
-
- def getheads(self):
- """Return a list of this repository's heads"""
- raise NotImplementedError()
-
- def revmapfile(self):
- """Path to a file that will contain lines
- source_rev_id sink_rev_id
- mapping equivalent revision identifiers for each system."""
- raise NotImplementedError()
-
- def authorfile(self):
- """Path to a file that will contain lines
- srcauthor=dstauthor
- mapping equivalent authors identifiers for each system."""
- return None
-
- def putcommit(self, files, copies, parents, commit, source, revmap):
- """Create a revision with all changed files listed in 'files'
- and having listed parents. 'commit' is a commit object
- containing at a minimum the author, date, and message for this
- changeset. 'files' is a list of (path, version) tuples,
- 'copies' is a dictionary mapping destinations to sources,
- 'source' is the source repository, and 'revmap' is a mapfile
- of source revisions to converted revisions. Only getfile(),
- getmode(), and lookuprev() should be called on 'source'.
-
- Note that the sink repository is not told to update itself to
- a particular revision (or even what that revision would be)
- before it receives the file data.
- """
- raise NotImplementedError()
-
- def puttags(self, tags):
- """Put tags into sink.
-
- tags: {tagname: sink_rev_id, ...} where tagname is an UTF-8 string.
- """
- raise NotImplementedError()
-
- def setbranch(self, branch, pbranches):
- """Set the current branch name. Called before the first putcommit
- on the branch.
- branch: branch name for subsequent commits
- pbranches: (converted parent revision, parent branch) tuples"""
- pass
-
- def setfilemapmode(self, active):
- """Tell the destination that we're using a filemap
-
- Some converter_sources (svn in particular) can claim that a file
- was changed in a revision, even if there was no change. This method
- tells the destination that we're using a filemap and that it should
- filter empty revisions.
- """
- pass
-
- def before(self):
- pass
-
- def after(self):
- pass
-
-
-class commandline(object):
- def __init__(self, ui, command):
- self.ui = ui
- self.command = command
-
- def prerun(self):
- pass
-
- def postrun(self):
- pass
-
- def _cmdline(self, cmd, *args, **kwargs):
- cmdline = [self.command, cmd] + list(args)
- for k, v in kwargs.iteritems():
- if len(k) == 1:
- cmdline.append('-' + k)
- else:
- cmdline.append('--' + k.replace('_', '-'))
- try:
- if len(k) == 1:
- cmdline.append('' + v)
- else:
- cmdline[-1] += '=' + v
- except TypeError:
- pass
- cmdline = [util.shellquote(arg) for arg in cmdline]
- if not self.ui.debugflag:
- cmdline += ['2>', util.nulldev]
- cmdline += ['<', util.nulldev]
- cmdline = ' '.join(cmdline)
- return cmdline
-
- def _run(self, cmd, *args, **kwargs):
- cmdline = self._cmdline(cmd, *args, **kwargs)
- self.ui.debug(_('running: %s\n') % (cmdline,))
- self.prerun()
- try:
- return util.popen(cmdline)
- finally:
- self.postrun()
-
- def run(self, cmd, *args, **kwargs):
- fp = self._run(cmd, *args, **kwargs)
- output = fp.read()
- self.ui.debug(output)
- return output, fp.close()
-
- def runlines(self, cmd, *args, **kwargs):
- fp = self._run(cmd, *args, **kwargs)
- output = fp.readlines()
- self.ui.debug(''.join(output))
- return output, fp.close()
-
- def checkexit(self, status, output=''):
- if status:
- if output:
- self.ui.warn(_('%s error:\n') % self.command)
- self.ui.warn(output)
- msg = util.explain_exit(status)[0]
- raise util.Abort('%s %s' % (self.command, msg))
-
- def run0(self, cmd, *args, **kwargs):
- output, status = self.run(cmd, *args, **kwargs)
- self.checkexit(status, output)
- return output
-
- def runlines0(self, cmd, *args, **kwargs):
- output, status = self.runlines(cmd, *args, **kwargs)
- self.checkexit(status, ''.join(output))
- return output
-
- def getargmax(self):
- if '_argmax' in self.__dict__:
- return self._argmax
-
- # POSIX requires at least 4096 bytes for ARG_MAX
- self._argmax = 4096
- try:
- self._argmax = os.sysconf("SC_ARG_MAX")
- except:
- pass
-
- # Windows shells impose their own limits on command line length,
- # down to 2047 bytes for cmd.exe under Windows NT/2k and 2500 bytes
- # for older 4nt.exe. See http://support.microsoft.com/kb/830473 for
- # details about cmd.exe limitations.
-
- # Since ARG_MAX is for command line _and_ environment, lower our limit
- # (and make happy Windows shells while doing this).
-
- self._argmax = self._argmax/2 - 1
- return self._argmax
-
- def limit_arglist(self, arglist, cmd, *args, **kwargs):
- limit = self.getargmax() - len(self._cmdline(cmd, *args, **kwargs))
- bytes = 0
- fl = []
- for fn in arglist:
- b = len(fn) + 3
- if bytes + b < limit or len(fl) == 0:
- fl.append(fn)
- bytes += b
- else:
- yield fl
- fl = [fn]
- bytes = b
- if fl:
- yield fl
-
- def xargs(self, arglist, cmd, *args, **kwargs):
- for l in self.limit_arglist(arglist, cmd, *args, **kwargs):
- self.run0(cmd, *(list(args) + l), **kwargs)
-
-class mapfile(dict):
- def __init__(self, ui, path):
- super(mapfile, self).__init__()
- self.ui = ui
- self.path = path
- self.fp = None
- self.order = []
- self._read()
-
- def _read(self):
- if not self.path:
- return
- try:
- fp = open(self.path, 'r')
- except IOError, err:
- if err.errno != errno.ENOENT:
- raise
- return
- for i, line in enumerate(fp):
- try:
- key, value = line[:-1].rsplit(' ', 1)
- except ValueError:
- raise util.Abort(_('syntax error in %s(%d): key/value pair expected')
- % (self.path, i+1))
- if key not in self:
- self.order.append(key)
- super(mapfile, self).__setitem__(key, value)
- fp.close()
-
- def __setitem__(self, key, value):
- if self.fp is None:
- try:
- self.fp = open(self.path, 'a')
- except IOError, err:
- raise util.Abort(_('could not open map file %r: %s') %
- (self.path, err.strerror))
- self.fp.write('%s %s\n' % (key, value))
- self.fp.flush()
- super(mapfile, self).__setitem__(key, value)
-
- def close(self):
- if self.fp:
- self.fp.close()
- self.fp = None
diff --git a/sys/lib/python/hgext/convert/convcmd.py b/sys/lib/python/hgext/convert/convcmd.py
deleted file mode 100644
index 50be03af0..000000000
--- a/sys/lib/python/hgext/convert/convcmd.py
+++ /dev/null
@@ -1,396 +0,0 @@
-# convcmd - convert extension commands definition
-#
-# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-from common import NoRepo, MissingTool, SKIPREV, mapfile
-from cvs import convert_cvs
-from darcs import darcs_source
-from git import convert_git
-from hg import mercurial_source, mercurial_sink
-from subversion import svn_source, svn_sink
-from monotone import monotone_source
-from gnuarch import gnuarch_source
-from bzr import bzr_source
-from p4 import p4_source
-import filemap
-
-import os, shutil
-from mercurial import hg, util, encoding
-from mercurial.i18n import _
-
-orig_encoding = 'ascii'
-
-def recode(s):
- if isinstance(s, unicode):
- return s.encode(orig_encoding, 'replace')
- else:
- return s.decode('utf-8').encode(orig_encoding, 'replace')
-
-source_converters = [
- ('cvs', convert_cvs, 'branchsort'),
- ('git', convert_git, 'branchsort'),
- ('svn', svn_source, 'branchsort'),
- ('hg', mercurial_source, 'sourcesort'),
- ('darcs', darcs_source, 'branchsort'),
- ('mtn', monotone_source, 'branchsort'),
- ('gnuarch', gnuarch_source, 'branchsort'),
- ('bzr', bzr_source, 'branchsort'),
- ('p4', p4_source, 'branchsort'),
- ]
-
-sink_converters = [
- ('hg', mercurial_sink),
- ('svn', svn_sink),
- ]
-
-def convertsource(ui, path, type, rev):
- exceptions = []
- for name, source, sortmode in source_converters:
- try:
- if not type or name == type:
- return source(ui, path, rev), sortmode
- except (NoRepo, MissingTool), inst:
- exceptions.append(inst)
- if not ui.quiet:
- for inst in exceptions:
- ui.write("%s\n" % inst)
- raise util.Abort(_('%s: missing or unsupported repository') % path)
-
-def convertsink(ui, path, type):
- for name, sink in sink_converters:
- try:
- if not type or name == type:
- return sink(ui, path)
- except NoRepo, inst:
- ui.note(_("convert: %s\n") % inst)
- raise util.Abort(_('%s: unknown repository type') % path)
-
-class converter(object):
- def __init__(self, ui, source, dest, revmapfile, opts):
-
- self.source = source
- self.dest = dest
- self.ui = ui
- self.opts = opts
- self.commitcache = {}
- self.authors = {}
- self.authorfile = None
-
- # Record converted revisions persistently: maps source revision
- # ID to target revision ID (both strings). (This is how
- # incremental conversions work.)
- self.map = mapfile(ui, revmapfile)
-
- # Read first the dst author map if any
- authorfile = self.dest.authorfile()
- if authorfile and os.path.exists(authorfile):
- self.readauthormap(authorfile)
- # Extend/Override with new author map if necessary
- if opts.get('authors'):
- self.readauthormap(opts.get('authors'))
- self.authorfile = self.dest.authorfile()
-
- self.splicemap = mapfile(ui, opts.get('splicemap'))
- self.branchmap = mapfile(ui, opts.get('branchmap'))
-
- def walktree(self, heads):
- '''Return a mapping that identifies the uncommitted parents of every
- uncommitted changeset.'''
- visit = heads
- known = set()
- parents = {}
- while visit:
- n = visit.pop(0)
- if n in known or n in self.map: continue
- known.add(n)
- commit = self.cachecommit(n)
- parents[n] = []
- for p in commit.parents:
- parents[n].append(p)
- visit.append(p)
-
- return parents
-
- def toposort(self, parents, sortmode):
- '''Return an ordering such that every uncommitted changeset is
- preceeded by all its uncommitted ancestors.'''
-
- def mapchildren(parents):
- """Return a (children, roots) tuple where 'children' maps parent
- revision identifiers to children ones, and 'roots' is the list of
- revisions without parents. 'parents' must be a mapping of revision
- identifier to its parents ones.
- """
- visit = parents.keys()
- seen = set()
- children = {}
- roots = []
-
- while visit:
- n = visit.pop(0)
- if n in seen:
- continue
- seen.add(n)
- # Ensure that nodes without parents are present in the
- # 'children' mapping.
- children.setdefault(n, [])
- hasparent = False
- for p in parents[n]:
- if not p in self.map:
- visit.append(p)
- hasparent = True
- children.setdefault(p, []).append(n)
- if not hasparent:
- roots.append(n)
-
- return children, roots
-
- # Sort functions are supposed to take a list of revisions which
- # can be converted immediately and pick one
-
- def makebranchsorter():
- """If the previously converted revision has a child in the
- eligible revisions list, pick it. Return the list head
- otherwise. Branch sort attempts to minimize branch
- switching, which is harmful for Mercurial backend
- compression.
- """
- prev = [None]
- def picknext(nodes):
- next = nodes[0]
- for n in nodes:
- if prev[0] in parents[n]:
- next = n
- break
- prev[0] = next
- return next
- return picknext
-
- def makesourcesorter():
- """Source specific sort."""
- keyfn = lambda n: self.commitcache[n].sortkey
- def picknext(nodes):
- return sorted(nodes, key=keyfn)[0]
- return picknext
-
- def makedatesorter():
- """Sort revisions by date."""
- dates = {}
- def getdate(n):
- if n not in dates:
- dates[n] = util.parsedate(self.commitcache[n].date)
- return dates[n]
-
- def picknext(nodes):
- return min([(getdate(n), n) for n in nodes])[1]
-
- return picknext
-
- if sortmode == 'branchsort':
- picknext = makebranchsorter()
- elif sortmode == 'datesort':
- picknext = makedatesorter()
- elif sortmode == 'sourcesort':
- picknext = makesourcesorter()
- else:
- raise util.Abort(_('unknown sort mode: %s') % sortmode)
-
- children, actives = mapchildren(parents)
-
- s = []
- pendings = {}
- while actives:
- n = picknext(actives)
- actives.remove(n)
- s.append(n)
-
- # Update dependents list
- for c in children.get(n, []):
- if c not in pendings:
- pendings[c] = [p for p in parents[c] if p not in self.map]
- try:
- pendings[c].remove(n)
- except ValueError:
- raise util.Abort(_('cycle detected between %s and %s')
- % (recode(c), recode(n)))
- if not pendings[c]:
- # Parents are converted, node is eligible
- actives.insert(0, c)
- pendings[c] = None
-
- if len(s) != len(parents):
- raise util.Abort(_("not all revisions were sorted"))
-
- return s
-
- def writeauthormap(self):
- authorfile = self.authorfile
- if authorfile:
- self.ui.status(_('Writing author map file %s\n') % authorfile)
- ofile = open(authorfile, 'w+')
- for author in self.authors:
- ofile.write("%s=%s\n" % (author, self.authors[author]))
- ofile.close()
-
- def readauthormap(self, authorfile):
- afile = open(authorfile, 'r')
- for line in afile:
-
- line = line.strip()
- if not line or line.startswith('#'):
- continue
-
- try:
- srcauthor, dstauthor = line.split('=', 1)
- except ValueError:
- msg = _('Ignoring bad line in author map file %s: %s\n')
- self.ui.warn(msg % (authorfile, line.rstrip()))
- continue
-
- srcauthor = srcauthor.strip()
- dstauthor = dstauthor.strip()
- if self.authors.get(srcauthor) in (None, dstauthor):
- msg = _('mapping author %s to %s\n')
- self.ui.debug(msg % (srcauthor, dstauthor))
- self.authors[srcauthor] = dstauthor
- continue
-
- m = _('overriding mapping for author %s, was %s, will be %s\n')
- self.ui.status(m % (srcauthor, self.authors[srcauthor], dstauthor))
-
- afile.close()
-
- def cachecommit(self, rev):
- commit = self.source.getcommit(rev)
- commit.author = self.authors.get(commit.author, commit.author)
- commit.branch = self.branchmap.get(commit.branch, commit.branch)
- self.commitcache[rev] = commit
- return commit
-
- def copy(self, rev):
- commit = self.commitcache[rev]
-
- changes = self.source.getchanges(rev)
- if isinstance(changes, basestring):
- if changes == SKIPREV:
- dest = SKIPREV
- else:
- dest = self.map[changes]
- self.map[rev] = dest
- return
- files, copies = changes
- pbranches = []
- if commit.parents:
- for prev in commit.parents:
- if prev not in self.commitcache:
- self.cachecommit(prev)
- pbranches.append((self.map[prev],
- self.commitcache[prev].branch))
- self.dest.setbranch(commit.branch, pbranches)
- try:
- parents = self.splicemap[rev].replace(',', ' ').split()
- self.ui.status(_('spliced in %s as parents of %s\n') %
- (parents, rev))
- parents = [self.map.get(p, p) for p in parents]
- except KeyError:
- parents = [b[0] for b in pbranches]
- newnode = self.dest.putcommit(files, copies, parents, commit,
- self.source, self.map)
- self.source.converted(rev, newnode)
- self.map[rev] = newnode
-
- def convert(self, sortmode):
- try:
- self.source.before()
- self.dest.before()
- self.source.setrevmap(self.map)
- self.ui.status(_("scanning source...\n"))
- heads = self.source.getheads()
- parents = self.walktree(heads)
- self.ui.status(_("sorting...\n"))
- t = self.toposort(parents, sortmode)
- num = len(t)
- c = None
-
- self.ui.status(_("converting...\n"))
- for c in t:
- num -= 1
- desc = self.commitcache[c].desc
- if "\n" in desc:
- desc = desc.splitlines()[0]
- # convert log message to local encoding without using
- # tolocal() because encoding.encoding conver() use it as
- # 'utf-8'
- self.ui.status("%d %s\n" % (num, recode(desc)))
- self.ui.note(_("source: %s\n") % recode(c))
- self.copy(c)
-
- tags = self.source.gettags()
- ctags = {}
- for k in tags:
- v = tags[k]
- if self.map.get(v, SKIPREV) != SKIPREV:
- ctags[k] = self.map[v]
-
- if c and ctags:
- nrev = self.dest.puttags(ctags)
- # write another hash correspondence to override the previous
- # one so we don't end up with extra tag heads
- if nrev:
- self.map[c] = nrev
-
- self.writeauthormap()
- finally:
- self.cleanup()
-
- def cleanup(self):
- try:
- self.dest.after()
- finally:
- self.source.after()
- self.map.close()
-
-def convert(ui, src, dest=None, revmapfile=None, **opts):
- global orig_encoding
- orig_encoding = encoding.encoding
- encoding.encoding = 'UTF-8'
-
- if not dest:
- dest = hg.defaultdest(src) + "-hg"
- ui.status(_("assuming destination %s\n") % dest)
-
- destc = convertsink(ui, dest, opts.get('dest_type'))
-
- try:
- srcc, defaultsort = convertsource(ui, src, opts.get('source_type'),
- opts.get('rev'))
- except Exception:
- for path in destc.created:
- shutil.rmtree(path, True)
- raise
-
- sortmodes = ('branchsort', 'datesort', 'sourcesort')
- sortmode = [m for m in sortmodes if opts.get(m)]
- if len(sortmode) > 1:
- raise util.Abort(_('more than one sort mode specified'))
- sortmode = sortmode and sortmode[0] or defaultsort
- if sortmode == 'sourcesort' and not srcc.hasnativeorder():
- raise util.Abort(_('--sourcesort is not supported by this data source'))
-
- fmap = opts.get('filemap')
- if fmap:
- srcc = filemap.filemap_source(ui, srcc, fmap)
- destc.setfilemapmode(True)
-
- if not revmapfile:
- try:
- revmapfile = destc.revmapfile()
- except:
- revmapfile = os.path.join(destc, "map")
-
- c = converter(ui, srcc, destc, revmapfile, opts)
- c.convert(sortmode)
-
diff --git a/sys/lib/python/hgext/convert/cvs.py b/sys/lib/python/hgext/convert/cvs.py
deleted file mode 100644
index c215747be..000000000
--- a/sys/lib/python/hgext/convert/cvs.py
+++ /dev/null
@@ -1,372 +0,0 @@
-# cvs.py: CVS conversion code inspired by hg-cvs-import and git-cvsimport
-#
-# Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-import os, locale, re, socket, errno
-from cStringIO import StringIO
-from mercurial import util
-from mercurial.i18n import _
-
-from common import NoRepo, commit, converter_source, checktool
-import cvsps
-
-class convert_cvs(converter_source):
- def __init__(self, ui, path, rev=None):
- super(convert_cvs, self).__init__(ui, path, rev=rev)
-
- cvs = os.path.join(path, "CVS")
- if not os.path.exists(cvs):
- raise NoRepo("%s does not look like a CVS checkout" % path)
-
- checktool('cvs')
- self.cmd = ui.config('convert', 'cvsps', 'builtin')
- cvspsexe = self.cmd.split(None, 1)[0]
- self.builtin = cvspsexe == 'builtin'
- if not self.builtin:
- ui.warn(_('warning: support for external cvsps is deprecated and '
- 'will be removed in Mercurial 1.4\n'))
-
- if not self.builtin:
- checktool(cvspsexe)
-
- self.changeset = None
- self.files = {}
- self.tags = {}
- self.lastbranch = {}
- self.parent = {}
- self.socket = None
- self.cvsroot = open(os.path.join(cvs, "Root")).read()[:-1]
- self.cvsrepo = open(os.path.join(cvs, "Repository")).read()[:-1]
- self.encoding = locale.getpreferredencoding()
-
- self._connect()
-
- def _parse(self):
- if self.changeset is not None:
- return
- self.changeset = {}
-
- maxrev = 0
- cmd = self.cmd
- if self.rev:
- # TODO: handle tags
- try:
- # patchset number?
- maxrev = int(self.rev)
- except ValueError:
- try:
- # date
- util.parsedate(self.rev, ['%Y/%m/%d %H:%M:%S'])
- cmd = '%s -d "1970/01/01 00:00:01" -d "%s"' % (cmd, self.rev)
- except util.Abort:
- raise util.Abort(_('revision %s is not a patchset number or date') % self.rev)
-
- d = os.getcwd()
- try:
- os.chdir(self.path)
- id = None
- state = 0
- filerevids = {}
-
- if self.builtin:
- # builtin cvsps code
- self.ui.status(_('using builtin cvsps\n'))
-
- cache = 'update'
- if not self.ui.configbool('convert', 'cvsps.cache', True):
- cache = None
- db = cvsps.createlog(self.ui, cache=cache)
- db = cvsps.createchangeset(self.ui, db,
- fuzz=int(self.ui.config('convert', 'cvsps.fuzz', 60)),
- mergeto=self.ui.config('convert', 'cvsps.mergeto', None),
- mergefrom=self.ui.config('convert', 'cvsps.mergefrom', None))
-
- for cs in db:
- if maxrev and cs.id>maxrev:
- break
- id = str(cs.id)
- cs.author = self.recode(cs.author)
- self.lastbranch[cs.branch] = id
- cs.comment = self.recode(cs.comment)
- date = util.datestr(cs.date)
- self.tags.update(dict.fromkeys(cs.tags, id))
-
- files = {}
- for f in cs.entries:
- files[f.file] = "%s%s" % ('.'.join([str(x) for x in f.revision]),
- ['', '(DEAD)'][f.dead])
-
- # add current commit to set
- c = commit(author=cs.author, date=date,
- parents=[str(p.id) for p in cs.parents],
- desc=cs.comment, branch=cs.branch or '')
- self.changeset[id] = c
- self.files[id] = files
- else:
- # external cvsps
- for l in util.popen(cmd):
- if state == 0: # header
- if l.startswith("PatchSet"):
- id = l[9:-2]
- if maxrev and int(id) > maxrev:
- # ignore everything
- state = 3
- elif l.startswith("Date:"):
- date = util.parsedate(l[6:-1], ["%Y/%m/%d %H:%M:%S"])
- date = util.datestr(date)
- elif l.startswith("Branch:"):
- branch = l[8:-1]
- self.parent[id] = self.lastbranch.get(branch, 'bad')
- self.lastbranch[branch] = id
- elif l.startswith("Ancestor branch:"):
- ancestor = l[17:-1]
- # figure out the parent later
- self.parent[id] = self.lastbranch[ancestor]
- elif l.startswith("Author:"):
- author = self.recode(l[8:-1])
- elif l.startswith("Tag:") or l.startswith("Tags:"):
- t = l[l.index(':')+1:]
- t = [ut.strip() for ut in t.split(',')]
- if (len(t) > 1) or (t[0] and (t[0] != "(none)")):
- self.tags.update(dict.fromkeys(t, id))
- elif l.startswith("Log:"):
- # switch to gathering log
- state = 1
- log = ""
- elif state == 1: # log
- if l == "Members: \n":
- # switch to gathering members
- files = {}
- oldrevs = []
- log = self.recode(log[:-1])
- state = 2
- else:
- # gather log
- log += l
- elif state == 2: # members
- if l == "\n": # start of next entry
- state = 0
- p = [self.parent[id]]
- if id == "1":
- p = []
- if branch == "HEAD":
- branch = ""
- if branch:
- latest = 0
- # the last changeset that contains a base
- # file is our parent
- for r in oldrevs:
- latest = max(filerevids.get(r, 0), latest)
- if latest:
- p = [latest]
-
- # add current commit to set
- c = commit(author=author, date=date, parents=p,
- desc=log, branch=branch)
- self.changeset[id] = c
- self.files[id] = files
- else:
- colon = l.rfind(':')
- file = l[1:colon]
- rev = l[colon+1:-2]
- oldrev, rev = rev.split("->")
- files[file] = rev
-
- # save some information for identifying branch points
- oldrevs.append("%s:%s" % (oldrev, file))
- filerevids["%s:%s" % (rev, file)] = id
- elif state == 3:
- # swallow all input
- continue
-
- self.heads = self.lastbranch.values()
- finally:
- os.chdir(d)
-
- def _connect(self):
- root = self.cvsroot
- conntype = None
- user, host = None, None
- cmd = ['cvs', 'server']
-
- self.ui.status(_("connecting to %s\n") % root)
-
- if root.startswith(":pserver:"):
- root = root[9:]
- m = re.match(r'(?:(.*?)(?::(.*?))?@)?([^:\/]*)(?::(\d*))?(.*)',
- root)
- if m:
- conntype = "pserver"
- user, passw, serv, port, root = m.groups()
- if not user:
- user = "anonymous"
- if not port:
- port = 2401
- else:
- port = int(port)
- format0 = ":pserver:%s@%s:%s" % (user, serv, root)
- format1 = ":pserver:%s@%s:%d%s" % (user, serv, port, root)
-
- if not passw:
- passw = "A"
- cvspass = os.path.expanduser("~/.cvspass")
- try:
- pf = open(cvspass)
- for line in pf.read().splitlines():
- part1, part2 = line.split(' ', 1)
- if part1 == '/1':
- # /1 :pserver:user@example.com:2401/cvsroot/foo Ah<Z
- part1, part2 = part2.split(' ', 1)
- format = format1
- else:
- # :pserver:user@example.com:/cvsroot/foo Ah<Z
- format = format0
- if part1 == format:
- passw = part2
- break
- pf.close()
- except IOError, inst:
- if inst.errno != errno.ENOENT:
- if not getattr(inst, 'filename', None):
- inst.filename = cvspass
- raise
-
- sck = socket.socket()
- sck.connect((serv, port))
- sck.send("\n".join(["BEGIN AUTH REQUEST", root, user, passw,
- "END AUTH REQUEST", ""]))
- if sck.recv(128) != "I LOVE YOU\n":
- raise util.Abort(_("CVS pserver authentication failed"))
-
- self.writep = self.readp = sck.makefile('r+')
-
- if not conntype and root.startswith(":local:"):
- conntype = "local"
- root = root[7:]
-
- if not conntype:
- # :ext:user@host/home/user/path/to/cvsroot
- if root.startswith(":ext:"):
- root = root[5:]
- m = re.match(r'(?:([^@:/]+)@)?([^:/]+):?(.*)', root)
- # Do not take Windows path "c:\foo\bar" for a connection strings
- if os.path.isdir(root) or not m:
- conntype = "local"
- else:
- conntype = "rsh"
- user, host, root = m.group(1), m.group(2), m.group(3)
-
- if conntype != "pserver":
- if conntype == "rsh":
- rsh = os.environ.get("CVS_RSH") or "ssh"
- if user:
- cmd = [rsh, '-l', user, host] + cmd
- else:
- cmd = [rsh, host] + cmd
-
- # popen2 does not support argument lists under Windows
- cmd = [util.shellquote(arg) for arg in cmd]
- cmd = util.quotecommand(' '.join(cmd))
- self.writep, self.readp = util.popen2(cmd)
-
- self.realroot = root
-
- self.writep.write("Root %s\n" % root)
- self.writep.write("Valid-responses ok error Valid-requests Mode"
- " M Mbinary E Checked-in Created Updated"
- " Merged Removed\n")
- self.writep.write("valid-requests\n")
- self.writep.flush()
- r = self.readp.readline()
- if not r.startswith("Valid-requests"):
- raise util.Abort(_("unexpected response from CVS server "
- "(expected \"Valid-requests\", but got %r)")
- % r)
- if "UseUnchanged" in r:
- self.writep.write("UseUnchanged\n")
- self.writep.flush()
- r = self.readp.readline()
-
- def getheads(self):
- self._parse()
- return self.heads
-
- def _getfile(self, name, rev):
-
- def chunkedread(fp, count):
- # file-objects returned by socked.makefile() do not handle
- # large read() requests very well.
- chunksize = 65536
- output = StringIO()
- while count > 0:
- data = fp.read(min(count, chunksize))
- if not data:
- raise util.Abort(_("%d bytes missing from remote file") % count)
- count -= len(data)
- output.write(data)
- return output.getvalue()
-
- if rev.endswith("(DEAD)"):
- raise IOError
-
- args = ("-N -P -kk -r %s --" % rev).split()
- args.append(self.cvsrepo + '/' + name)
- for x in args:
- self.writep.write("Argument %s\n" % x)
- self.writep.write("Directory .\n%s\nco\n" % self.realroot)
- self.writep.flush()
-
- data = ""
- while 1:
- line = self.readp.readline()
- if line.startswith("Created ") or line.startswith("Updated "):
- self.readp.readline() # path
- self.readp.readline() # entries
- mode = self.readp.readline()[:-1]
- count = int(self.readp.readline()[:-1])
- data = chunkedread(self.readp, count)
- elif line.startswith(" "):
- data += line[1:]
- elif line.startswith("M "):
- pass
- elif line.startswith("Mbinary "):
- count = int(self.readp.readline()[:-1])
- data = chunkedread(self.readp, count)
- else:
- if line == "ok\n":
- return (data, "x" in mode and "x" or "")
- elif line.startswith("E "):
- self.ui.warn(_("cvs server: %s\n") % line[2:])
- elif line.startswith("Remove"):
- self.readp.readline()
- else:
- raise util.Abort(_("unknown CVS response: %s") % line)
-
- def getfile(self, file, rev):
- self._parse()
- data, mode = self._getfile(file, rev)
- self.modecache[(file, rev)] = mode
- return data
-
- def getmode(self, file, rev):
- return self.modecache[(file, rev)]
-
- def getchanges(self, rev):
- self._parse()
- self.modecache = {}
- return sorted(self.files[rev].iteritems()), {}
-
- def getcommit(self, rev):
- self._parse()
- return self.changeset[rev]
-
- def gettags(self):
- self._parse()
- return self.tags
-
- def getchangedfiles(self, rev, i):
- self._parse()
- return sorted(self.files[rev])
diff --git a/sys/lib/python/hgext/convert/cvsps.py b/sys/lib/python/hgext/convert/cvsps.py
deleted file mode 100644
index 02db47e25..000000000
--- a/sys/lib/python/hgext/convert/cvsps.py
+++ /dev/null
@@ -1,831 +0,0 @@
-#
-# Mercurial built-in replacement for cvsps.
-#
-# Copyright 2008, Frank Kingswood <frank@kingswood-consulting.co.uk>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-import os
-import re
-import cPickle as pickle
-from mercurial import util
-from mercurial.i18n import _
-
-class logentry(object):
- '''Class logentry has the following attributes:
- .author - author name as CVS knows it
- .branch - name of branch this revision is on
- .branches - revision tuple of branches starting at this revision
- .comment - commit message
- .date - the commit date as a (time, tz) tuple
- .dead - true if file revision is dead
- .file - Name of file
- .lines - a tuple (+lines, -lines) or None
- .parent - Previous revision of this entry
- .rcs - name of file as returned from CVS
- .revision - revision number as tuple
- .tags - list of tags on the file
- .synthetic - is this a synthetic "file ... added on ..." revision?
- .mergepoint- the branch that has been merged from
- (if present in rlog output)
- .branchpoints- the branches that start at the current entry
- '''
- def __init__(self, **entries):
- self.__dict__.update(entries)
-
- def __repr__(self):
- return "<%s at 0x%x: %s %s>" % (self.__class__.__name__,
- id(self),
- self.file,
- ".".join(map(str, self.revision)))
-
-class logerror(Exception):
- pass
-
-def getrepopath(cvspath):
- """Return the repository path from a CVS path.
-
- >>> getrepopath('/foo/bar')
- '/foo/bar'
- >>> getrepopath('c:/foo/bar')
- 'c:/foo/bar'
- >>> getrepopath(':pserver:10/foo/bar')
- '/foo/bar'
- >>> getrepopath(':pserver:10c:/foo/bar')
- '/foo/bar'
- >>> getrepopath(':pserver:/foo/bar')
- '/foo/bar'
- >>> getrepopath(':pserver:c:/foo/bar')
- 'c:/foo/bar'
- >>> getrepopath(':pserver:truc@foo.bar:/foo/bar')
- '/foo/bar'
- >>> getrepopath(':pserver:truc@foo.bar:c:/foo/bar')
- 'c:/foo/bar'
- """
- # According to CVS manual, CVS paths are expressed like:
- # [:method:][[user][:password]@]hostname[:[port]]/path/to/repository
- #
- # Unfortunately, Windows absolute paths start with a drive letter
- # like 'c:' making it harder to parse. Here we assume that drive
- # letters are only one character long and any CVS component before
- # the repository path is at least 2 characters long, and use this
- # to disambiguate.
- parts = cvspath.split(':')
- if len(parts) == 1:
- return parts[0]
- # Here there is an ambiguous case if we have a port number
- # immediately followed by a Windows driver letter. We assume this
- # never happens and decide it must be CVS path component,
- # therefore ignoring it.
- if len(parts[-2]) > 1:
- return parts[-1].lstrip('0123456789')
- return parts[-2] + ':' + parts[-1]
-
-def createlog(ui, directory=None, root="", rlog=True, cache=None):
- '''Collect the CVS rlog'''
-
- # Because we store many duplicate commit log messages, reusing strings
- # saves a lot of memory and pickle storage space.
- _scache = {}
- def scache(s):
- "return a shared version of a string"
- return _scache.setdefault(s, s)
-
- ui.status(_('collecting CVS rlog\n'))
-
- log = [] # list of logentry objects containing the CVS state
-
- # patterns to match in CVS (r)log output, by state of use
- re_00 = re.compile('RCS file: (.+)$')
- re_01 = re.compile('cvs \\[r?log aborted\\]: (.+)$')
- re_02 = re.compile('cvs (r?log|server): (.+)\n$')
- re_03 = re.compile("(Cannot access.+CVSROOT)|"
- "(can't create temporary directory.+)$")
- re_10 = re.compile('Working file: (.+)$')
- re_20 = re.compile('symbolic names:')
- re_30 = re.compile('\t(.+): ([\\d.]+)$')
- re_31 = re.compile('----------------------------$')
- re_32 = re.compile('======================================='
- '======================================$')
- re_50 = re.compile('revision ([\\d.]+)(\s+locked by:\s+.+;)?$')
- re_60 = re.compile(r'date:\s+(.+);\s+author:\s+(.+);\s+state:\s+(.+?);'
- r'(\s+lines:\s+(\+\d+)?\s+(-\d+)?;)?'
- r'(.*mergepoint:\s+([^;]+);)?')
- re_70 = re.compile('branches: (.+);$')
-
- file_added_re = re.compile(r'file [^/]+ was (initially )?added on branch')
-
- prefix = '' # leading path to strip of what we get from CVS
-
- if directory is None:
- # Current working directory
-
- # Get the real directory in the repository
- try:
- prefix = open(os.path.join('CVS','Repository')).read().strip()
- if prefix == ".":
- prefix = ""
- directory = prefix
- except IOError:
- raise logerror('Not a CVS sandbox')
-
- if prefix and not prefix.endswith(os.sep):
- prefix += os.sep
-
- # Use the Root file in the sandbox, if it exists
- try:
- root = open(os.path.join('CVS','Root')).read().strip()
- except IOError:
- pass
-
- if not root:
- root = os.environ.get('CVSROOT', '')
-
- # read log cache if one exists
- oldlog = []
- date = None
-
- if cache:
- cachedir = os.path.expanduser('~/.hg.cvsps')
- if not os.path.exists(cachedir):
- os.mkdir(cachedir)
-
- # The cvsps cache pickle needs a uniquified name, based on the
- # repository location. The address may have all sort of nasties
- # in it, slashes, colons and such. So here we take just the
- # alphanumerics, concatenated in a way that does not mix up the
- # various components, so that
- # :pserver:user@server:/path
- # and
- # /pserver/user/server/path
- # are mapped to different cache file names.
- cachefile = root.split(":") + [directory, "cache"]
- cachefile = ['-'.join(re.findall(r'\w+', s)) for s in cachefile if s]
- cachefile = os.path.join(cachedir,
- '.'.join([s for s in cachefile if s]))
-
- if cache == 'update':
- try:
- ui.note(_('reading cvs log cache %s\n') % cachefile)
- oldlog = pickle.load(open(cachefile))
- ui.note(_('cache has %d log entries\n') % len(oldlog))
- except Exception, e:
- ui.note(_('error reading cache: %r\n') % e)
-
- if oldlog:
- date = oldlog[-1].date # last commit date as a (time,tz) tuple
- date = util.datestr(date, '%Y/%m/%d %H:%M:%S %1%2')
-
- # build the CVS commandline
- cmd = ['cvs', '-q']
- if root:
- cmd.append('-d%s' % root)
- p = util.normpath(getrepopath(root))
- if not p.endswith('/'):
- p += '/'
- prefix = p + util.normpath(prefix)
- cmd.append(['log', 'rlog'][rlog])
- if date:
- # no space between option and date string
- cmd.append('-d>%s' % date)
- cmd.append(directory)
-
- # state machine begins here
- tags = {} # dictionary of revisions on current file with their tags
- branchmap = {} # mapping between branch names and revision numbers
- state = 0
- store = False # set when a new record can be appended
-
- cmd = [util.shellquote(arg) for arg in cmd]
- ui.note(_("running %s\n") % (' '.join(cmd)))
- ui.debug(_("prefix=%r directory=%r root=%r\n") % (prefix, directory, root))
-
- pfp = util.popen(' '.join(cmd))
- peek = pfp.readline()
- while True:
- line = peek
- if line == '':
- break
- peek = pfp.readline()
- if line.endswith('\n'):
- line = line[:-1]
- #ui.debug('state=%d line=%r\n' % (state, line))
-
- if state == 0:
- # initial state, consume input until we see 'RCS file'
- match = re_00.match(line)
- if match:
- rcs = match.group(1)
- tags = {}
- if rlog:
- filename = util.normpath(rcs[:-2])
- if filename.startswith(prefix):
- filename = filename[len(prefix):]
- if filename.startswith('/'):
- filename = filename[1:]
- if filename.startswith('Attic/'):
- filename = filename[6:]
- else:
- filename = filename.replace('/Attic/', '/')
- state = 2
- continue
- state = 1
- continue
- match = re_01.match(line)
- if match:
- raise Exception(match.group(1))
- match = re_02.match(line)
- if match:
- raise Exception(match.group(2))
- if re_03.match(line):
- raise Exception(line)
-
- elif state == 1:
- # expect 'Working file' (only when using log instead of rlog)
- match = re_10.match(line)
- assert match, _('RCS file must be followed by working file')
- filename = util.normpath(match.group(1))
- state = 2
-
- elif state == 2:
- # expect 'symbolic names'
- if re_20.match(line):
- branchmap = {}
- state = 3
-
- elif state == 3:
- # read the symbolic names and store as tags
- match = re_30.match(line)
- if match:
- rev = [int(x) for x in match.group(2).split('.')]
-
- # Convert magic branch number to an odd-numbered one
- revn = len(rev)
- if revn > 3 and (revn % 2) == 0 and rev[-2] == 0:
- rev = rev[:-2] + rev[-1:]
- rev = tuple(rev)
-
- if rev not in tags:
- tags[rev] = []
- tags[rev].append(match.group(1))
- branchmap[match.group(1)] = match.group(2)
-
- elif re_31.match(line):
- state = 5
- elif re_32.match(line):
- state = 0
-
- elif state == 4:
- # expecting '------' separator before first revision
- if re_31.match(line):
- state = 5
- else:
- assert not re_32.match(line), _('must have at least '
- 'some revisions')
-
- elif state == 5:
- # expecting revision number and possibly (ignored) lock indication
- # we create the logentry here from values stored in states 0 to 4,
- # as this state is re-entered for subsequent revisions of a file.
- match = re_50.match(line)
- assert match, _('expected revision number')
- e = logentry(rcs=scache(rcs), file=scache(filename),
- revision=tuple([int(x) for x in match.group(1).split('.')]),
- branches=[], parent=None,
- synthetic=False)
- state = 6
-
- elif state == 6:
- # expecting date, author, state, lines changed
- match = re_60.match(line)
- assert match, _('revision must be followed by date line')
- d = match.group(1)
- if d[2] == '/':
- # Y2K
- d = '19' + d
-
- if len(d.split()) != 3:
- # cvs log dates always in GMT
- d = d + ' UTC'
- e.date = util.parsedate(d, ['%y/%m/%d %H:%M:%S',
- '%Y/%m/%d %H:%M:%S',
- '%Y-%m-%d %H:%M:%S'])
- e.author = scache(match.group(2))
- e.dead = match.group(3).lower() == 'dead'
-
- if match.group(5):
- if match.group(6):
- e.lines = (int(match.group(5)), int(match.group(6)))
- else:
- e.lines = (int(match.group(5)), 0)
- elif match.group(6):
- e.lines = (0, int(match.group(6)))
- else:
- e.lines = None
-
- if match.group(7): # cvsnt mergepoint
- myrev = match.group(8).split('.')
- if len(myrev) == 2: # head
- e.mergepoint = 'HEAD'
- else:
- myrev = '.'.join(myrev[:-2] + ['0', myrev[-2]])
- branches = [b for b in branchmap if branchmap[b] == myrev]
- assert len(branches) == 1, 'unknown branch: %s' % e.mergepoint
- e.mergepoint = branches[0]
- else:
- e.mergepoint = None
- e.comment = []
- state = 7
-
- elif state == 7:
- # read the revision numbers of branches that start at this revision
- # or store the commit log message otherwise
- m = re_70.match(line)
- if m:
- e.branches = [tuple([int(y) for y in x.strip().split('.')])
- for x in m.group(1).split(';')]
- state = 8
- elif re_31.match(line) and re_50.match(peek):
- state = 5
- store = True
- elif re_32.match(line):
- state = 0
- store = True
- else:
- e.comment.append(line)
-
- elif state == 8:
- # store commit log message
- if re_31.match(line):
- state = 5
- store = True
- elif re_32.match(line):
- state = 0
- store = True
- else:
- e.comment.append(line)
-
- # When a file is added on a branch B1, CVS creates a synthetic
- # dead trunk revision 1.1 so that the branch has a root.
- # Likewise, if you merge such a file to a later branch B2 (one
- # that already existed when the file was added on B1), CVS
- # creates a synthetic dead revision 1.1.x.1 on B2. Don't drop
- # these revisions now, but mark them synthetic so
- # createchangeset() can take care of them.
- if (store and
- e.dead and
- e.revision[-1] == 1 and # 1.1 or 1.1.x.1
- len(e.comment) == 1 and
- file_added_re.match(e.comment[0])):
- ui.debug(_('found synthetic revision in %s: %r\n')
- % (e.rcs, e.comment[0]))
- e.synthetic = True
-
- if store:
- # clean up the results and save in the log.
- store = False
- e.tags = sorted([scache(x) for x in tags.get(e.revision, [])])
- e.comment = scache('\n'.join(e.comment))
-
- revn = len(e.revision)
- if revn > 3 and (revn % 2) == 0:
- e.branch = tags.get(e.revision[:-1], [None])[0]
- else:
- e.branch = None
-
- # find the branches starting from this revision
- branchpoints = set()
- for branch, revision in branchmap.iteritems():
- revparts = tuple([int(i) for i in revision.split('.')])
- if revparts[-2] == 0 and revparts[-1] % 2 == 0:
- # normal branch
- if revparts[:-2] == e.revision:
- branchpoints.add(branch)
- elif revparts == (1,1,1): # vendor branch
- if revparts in e.branches:
- branchpoints.add(branch)
- e.branchpoints = branchpoints
-
- log.append(e)
-
- if len(log) % 100 == 0:
- ui.status(util.ellipsis('%d %s' % (len(log), e.file), 80)+'\n')
-
- log.sort(key=lambda x: (x.rcs, x.revision))
-
- # find parent revisions of individual files
- versions = {}
- for e in log:
- branch = e.revision[:-1]
- p = versions.get((e.rcs, branch), None)
- if p is None:
- p = e.revision[:-2]
- e.parent = p
- versions[(e.rcs, branch)] = e.revision
-
- # update the log cache
- if cache:
- if log:
- # join up the old and new logs
- log.sort(key=lambda x: x.date)
-
- if oldlog and oldlog[-1].date >= log[0].date:
- raise logerror('Log cache overlaps with new log entries,'
- ' re-run without cache.')
-
- log = oldlog + log
-
- # write the new cachefile
- ui.note(_('writing cvs log cache %s\n') % cachefile)
- pickle.dump(log, open(cachefile, 'w'))
- else:
- log = oldlog
-
- ui.status(_('%d log entries\n') % len(log))
-
- return log
-
-
-class changeset(object):
- '''Class changeset has the following attributes:
- .id - integer identifying this changeset (list index)
- .author - author name as CVS knows it
- .branch - name of branch this changeset is on, or None
- .comment - commit message
- .date - the commit date as a (time,tz) tuple
- .entries - list of logentry objects in this changeset
- .parents - list of one or two parent changesets
- .tags - list of tags on this changeset
- .synthetic - from synthetic revision "file ... added on branch ..."
- .mergepoint- the branch that has been merged from
- (if present in rlog output)
- .branchpoints- the branches that start at the current entry
- '''
- def __init__(self, **entries):
- self.__dict__.update(entries)
-
- def __repr__(self):
- return "<%s at 0x%x: %s>" % (self.__class__.__name__,
- id(self),
- getattr(self, 'id', "(no id)"))
-
-def createchangeset(ui, log, fuzz=60, mergefrom=None, mergeto=None):
- '''Convert log into changesets.'''
-
- ui.status(_('creating changesets\n'))
-
- # Merge changesets
-
- log.sort(key=lambda x: (x.comment, x.author, x.branch, x.date))
-
- changesets = []
- files = set()
- c = None
- for i, e in enumerate(log):
-
- # Check if log entry belongs to the current changeset or not.
-
- # Since CVS is file centric, two different file revisions with
- # different branchpoints should be treated as belonging to two
- # different changesets (and the ordering is important and not
- # honoured by cvsps at this point).
- #
- # Consider the following case:
- # foo 1.1 branchpoints: [MYBRANCH]
- # bar 1.1 branchpoints: [MYBRANCH, MYBRANCH2]
- #
- # Here foo is part only of MYBRANCH, but not MYBRANCH2, e.g. a
- # later version of foo may be in MYBRANCH2, so foo should be the
- # first changeset and bar the next and MYBRANCH and MYBRANCH2
- # should both start off of the bar changeset. No provisions are
- # made to ensure that this is, in fact, what happens.
- if not (c and
- e.comment == c.comment and
- e.author == c.author and
- e.branch == c.branch and
- (not hasattr(e, 'branchpoints') or
- not hasattr (c, 'branchpoints') or
- e.branchpoints == c.branchpoints) and
- ((c.date[0] + c.date[1]) <=
- (e.date[0] + e.date[1]) <=
- (c.date[0] + c.date[1]) + fuzz) and
- e.file not in files):
- c = changeset(comment=e.comment, author=e.author,
- branch=e.branch, date=e.date, entries=[],
- mergepoint=getattr(e, 'mergepoint', None),
- branchpoints=getattr(e, 'branchpoints', set()))
- changesets.append(c)
- files = set()
- if len(changesets) % 100 == 0:
- t = '%d %s' % (len(changesets), repr(e.comment)[1:-1])
- ui.status(util.ellipsis(t, 80) + '\n')
-
- c.entries.append(e)
- files.add(e.file)
- c.date = e.date # changeset date is date of latest commit in it
-
- # Mark synthetic changesets
-
- for c in changesets:
- # Synthetic revisions always get their own changeset, because
- # the log message includes the filename. E.g. if you add file3
- # and file4 on a branch, you get four log entries and three
- # changesets:
- # "File file3 was added on branch ..." (synthetic, 1 entry)
- # "File file4 was added on branch ..." (synthetic, 1 entry)
- # "Add file3 and file4 to fix ..." (real, 2 entries)
- # Hence the check for 1 entry here.
- synth = getattr(c.entries[0], 'synthetic', None)
- c.synthetic = (len(c.entries) == 1 and synth)
-
- # Sort files in each changeset
-
- for c in changesets:
- def pathcompare(l, r):
- 'Mimic cvsps sorting order'
- l = l.split('/')
- r = r.split('/')
- nl = len(l)
- nr = len(r)
- n = min(nl, nr)
- for i in range(n):
- if i + 1 == nl and nl < nr:
- return -1
- elif i + 1 == nr and nl > nr:
- return +1
- elif l[i] < r[i]:
- return -1
- elif l[i] > r[i]:
- return +1
- return 0
- def entitycompare(l, r):
- return pathcompare(l.file, r.file)
-
- c.entries.sort(entitycompare)
-
- # Sort changesets by date
-
- def cscmp(l, r):
- d = sum(l.date) - sum(r.date)
- if d:
- return d
-
- # detect vendor branches and initial commits on a branch
- le = {}
- for e in l.entries:
- le[e.rcs] = e.revision
- re = {}
- for e in r.entries:
- re[e.rcs] = e.revision
-
- d = 0
- for e in l.entries:
- if re.get(e.rcs, None) == e.parent:
- assert not d
- d = 1
- break
-
- for e in r.entries:
- if le.get(e.rcs, None) == e.parent:
- assert not d
- d = -1
- break
-
- return d
-
- changesets.sort(cscmp)
-
- # Collect tags
-
- globaltags = {}
- for c in changesets:
- for e in c.entries:
- for tag in e.tags:
- # remember which is the latest changeset to have this tag
- globaltags[tag] = c
-
- for c in changesets:
- tags = set()
- for e in c.entries:
- tags.update(e.tags)
- # remember tags only if this is the latest changeset to have it
- c.tags = sorted(tag for tag in tags if globaltags[tag] is c)
-
- # Find parent changesets, handle {{mergetobranch BRANCHNAME}}
- # by inserting dummy changesets with two parents, and handle
- # {{mergefrombranch BRANCHNAME}} by setting two parents.
-
- if mergeto is None:
- mergeto = r'{{mergetobranch ([-\w]+)}}'
- if mergeto:
- mergeto = re.compile(mergeto)
-
- if mergefrom is None:
- mergefrom = r'{{mergefrombranch ([-\w]+)}}'
- if mergefrom:
- mergefrom = re.compile(mergefrom)
-
- versions = {} # changeset index where we saw any particular file version
- branches = {} # changeset index where we saw a branch
- n = len(changesets)
- i = 0
- while i<n:
- c = changesets[i]
-
- for f in c.entries:
- versions[(f.rcs, f.revision)] = i
-
- p = None
- if c.branch in branches:
- p = branches[c.branch]
- else:
- # first changeset on a new branch
- # the parent is a changeset with the branch in its
- # branchpoints such that it is the latest possible
- # commit without any intervening, unrelated commits.
-
- for candidate in xrange(i):
- if c.branch not in changesets[candidate].branchpoints:
- if p is not None:
- break
- continue
- p = candidate
-
- c.parents = []
- if p is not None:
- p = changesets[p]
-
- # Ensure no changeset has a synthetic changeset as a parent.
- while p.synthetic:
- assert len(p.parents) <= 1, \
- _('synthetic changeset cannot have multiple parents')
- if p.parents:
- p = p.parents[0]
- else:
- p = None
- break
-
- if p is not None:
- c.parents.append(p)
-
- if c.mergepoint:
- if c.mergepoint == 'HEAD':
- c.mergepoint = None
- c.parents.append(changesets[branches[c.mergepoint]])
-
- if mergefrom:
- m = mergefrom.search(c.comment)
- if m:
- m = m.group(1)
- if m == 'HEAD':
- m = None
- try:
- candidate = changesets[branches[m]]
- except KeyError:
- ui.warn(_("warning: CVS commit message references "
- "non-existent branch %r:\n%s\n")
- % (m, c.comment))
- if m in branches and c.branch != m and not candidate.synthetic:
- c.parents.append(candidate)
-
- if mergeto:
- m = mergeto.search(c.comment)
- if m:
- try:
- m = m.group(1)
- if m == 'HEAD':
- m = None
- except:
- m = None # if no group found then merge to HEAD
- if m in branches and c.branch != m:
- # insert empty changeset for merge
- cc = changeset(author=c.author, branch=m, date=c.date,
- comment='convert-repo: CVS merge from branch %s' % c.branch,
- entries=[], tags=[], parents=[changesets[branches[m]], c])
- changesets.insert(i + 1, cc)
- branches[m] = i + 1
-
- # adjust our loop counters now we have inserted a new entry
- n += 1
- i += 2
- continue
-
- branches[c.branch] = i
- i += 1
-
- # Drop synthetic changesets (safe now that we have ensured no other
- # changesets can have them as parents).
- i = 0
- while i < len(changesets):
- if changesets[i].synthetic:
- del changesets[i]
- else:
- i += 1
-
- # Number changesets
-
- for i, c in enumerate(changesets):
- c.id = i + 1
-
- ui.status(_('%d changeset entries\n') % len(changesets))
-
- return changesets
-
-
-def debugcvsps(ui, *args, **opts):
- '''Read CVS rlog for current directory or named path in
- repository, and convert the log to changesets based on matching
- commit log entries and dates.
- '''
- if opts["new_cache"]:
- cache = "write"
- elif opts["update_cache"]:
- cache = "update"
- else:
- cache = None
-
- revisions = opts["revisions"]
-
- try:
- if args:
- log = []
- for d in args:
- log += createlog(ui, d, root=opts["root"], cache=cache)
- else:
- log = createlog(ui, root=opts["root"], cache=cache)
- except logerror, e:
- ui.write("%r\n"%e)
- return
-
- changesets = createchangeset(ui, log, opts["fuzz"])
- del log
-
- # Print changesets (optionally filtered)
-
- off = len(revisions)
- branches = {} # latest version number in each branch
- ancestors = {} # parent branch
- for cs in changesets:
-
- if opts["ancestors"]:
- if cs.branch not in branches and cs.parents and cs.parents[0].id:
- ancestors[cs.branch] = (changesets[cs.parents[0].id-1].branch,
- cs.parents[0].id)
- branches[cs.branch] = cs.id
-
- # limit by branches
- if opts["branches"] and (cs.branch or 'HEAD') not in opts["branches"]:
- continue
-
- if not off:
- # Note: trailing spaces on several lines here are needed to have
- # bug-for-bug compatibility with cvsps.
- ui.write('---------------------\n')
- ui.write('PatchSet %d \n' % cs.id)
- ui.write('Date: %s\n' % util.datestr(cs.date,
- '%Y/%m/%d %H:%M:%S %1%2'))
- ui.write('Author: %s\n' % cs.author)
- ui.write('Branch: %s\n' % (cs.branch or 'HEAD'))
- ui.write('Tag%s: %s \n' % (['', 's'][len(cs.tags)>1],
- ','.join(cs.tags) or '(none)'))
- branchpoints = getattr(cs, 'branchpoints', None)
- if branchpoints:
- ui.write('Branchpoints: %s \n' % ', '.join(branchpoints))
- if opts["parents"] and cs.parents:
- if len(cs.parents)>1:
- ui.write('Parents: %s\n' % (','.join([str(p.id) for p in cs.parents])))
- else:
- ui.write('Parent: %d\n' % cs.parents[0].id)
-
- if opts["ancestors"]:
- b = cs.branch
- r = []
- while b:
- b, c = ancestors[b]
- r.append('%s:%d:%d' % (b or "HEAD", c, branches[b]))
- if r:
- ui.write('Ancestors: %s\n' % (','.join(r)))
-
- ui.write('Log:\n')
- ui.write('%s\n\n' % cs.comment)
- ui.write('Members: \n')
- for f in cs.entries:
- fn = f.file
- if fn.startswith(opts["prefix"]):
- fn = fn[len(opts["prefix"]):]
- ui.write('\t%s:%s->%s%s \n' % (fn, '.'.join([str(x) for x in f.parent]) or 'INITIAL',
- '.'.join([str(x) for x in f.revision]), ['', '(DEAD)'][f.dead]))
- ui.write('\n')
-
- # have we seen the start tag?
- if revisions and off:
- if revisions[0] == str(cs.id) or \
- revisions[0] in cs.tags:
- off = False
-
- # see if we reached the end tag
- if len(revisions)>1 and not off:
- if revisions[1] == str(cs.id) or \
- revisions[1] in cs.tags:
- break
diff --git a/sys/lib/python/hgext/convert/darcs.py b/sys/lib/python/hgext/convert/darcs.py
deleted file mode 100644
index fd51f38bd..000000000
--- a/sys/lib/python/hgext/convert/darcs.py
+++ /dev/null
@@ -1,135 +0,0 @@
-# darcs.py - darcs support for the convert extension
-#
-# Copyright 2007-2009 Matt Mackall <mpm@selenic.com> and others
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-from common import NoRepo, checktool, commandline, commit, converter_source
-from mercurial.i18n import _
-from mercurial import util
-import os, shutil, tempfile
-
-# The naming drift of ElementTree is fun!
-
-try: from xml.etree.cElementTree import ElementTree
-except ImportError:
- try: from xml.etree.ElementTree import ElementTree
- except ImportError:
- try: from elementtree.cElementTree import ElementTree
- except ImportError:
- try: from elementtree.ElementTree import ElementTree
- except ImportError: ElementTree = None
-
-
-class darcs_source(converter_source, commandline):
- def __init__(self, ui, path, rev=None):
- converter_source.__init__(self, ui, path, rev=rev)
- commandline.__init__(self, ui, 'darcs')
-
- # check for _darcs, ElementTree, _darcs/inventory so that we can
- # easily skip test-convert-darcs if ElementTree is not around
- if not os.path.exists(os.path.join(path, '_darcs', 'inventories')):
- raise NoRepo("%s does not look like a darcs repo" % path)
-
- if not os.path.exists(os.path.join(path, '_darcs')):
- raise NoRepo("%s does not look like a darcs repo" % path)
-
- checktool('darcs')
- version = self.run0('--version').splitlines()[0].strip()
- if version < '2.1':
- raise util.Abort(_('darcs version 2.1 or newer needed (found %r)') %
- version)
-
- if ElementTree is None:
- raise util.Abort(_("Python ElementTree module is not available"))
-
- self.path = os.path.realpath(path)
-
- self.lastrev = None
- self.changes = {}
- self.parents = {}
- self.tags = {}
-
- def before(self):
- self.tmppath = tempfile.mkdtemp(
- prefix='convert-' + os.path.basename(self.path) + '-')
- output, status = self.run('init', repodir=self.tmppath)
- self.checkexit(status)
-
- tree = self.xml('changes', xml_output=True, summary=True,
- repodir=self.path)
- tagname = None
- child = None
- for elt in tree.findall('patch'):
- node = elt.get('hash')
- name = elt.findtext('name', '')
- if name.startswith('TAG '):
- tagname = name[4:].strip()
- elif tagname is not None:
- self.tags[tagname] = node
- tagname = None
- self.changes[node] = elt
- self.parents[child] = [node]
- child = node
- self.parents[child] = []
-
- def after(self):
- self.ui.debug(_('cleaning up %s\n') % self.tmppath)
- shutil.rmtree(self.tmppath, ignore_errors=True)
-
- def xml(self, cmd, **kwargs):
- etree = ElementTree()
- fp = self._run(cmd, **kwargs)
- etree.parse(fp)
- self.checkexit(fp.close())
- return etree.getroot()
-
- def getheads(self):
- return self.parents[None]
-
- def getcommit(self, rev):
- elt = self.changes[rev]
- date = util.strdate(elt.get('local_date'), '%a %b %d %H:%M:%S %Z %Y')
- desc = elt.findtext('name') + '\n' + elt.findtext('comment', '')
- return commit(author=elt.get('author'), date=util.datestr(date),
- desc=desc.strip(), parents=self.parents[rev])
-
- def pull(self, rev):
- output, status = self.run('pull', self.path, all=True,
- match='hash %s' % rev,
- no_test=True, no_posthook=True,
- external_merge='/bin/false',
- repodir=self.tmppath)
- if status:
- if output.find('We have conflicts in') == -1:
- self.checkexit(status, output)
- output, status = self.run('revert', all=True, repodir=self.tmppath)
- self.checkexit(status, output)
-
- def getchanges(self, rev):
- self.pull(rev)
- copies = {}
- changes = []
- for elt in self.changes[rev].find('summary').getchildren():
- if elt.tag in ('add_directory', 'remove_directory'):
- continue
- if elt.tag == 'move':
- changes.append((elt.get('from'), rev))
- copies[elt.get('from')] = elt.get('to')
- else:
- changes.append((elt.text.strip(), rev))
- self.lastrev = rev
- return sorted(changes), copies
-
- def getfile(self, name, rev):
- if rev != self.lastrev:
- raise util.Abort(_('internal calling inconsistency'))
- return open(os.path.join(self.tmppath, name), 'rb').read()
-
- def getmode(self, name, rev):
- mode = os.lstat(os.path.join(self.tmppath, name)).st_mode
- return (mode & 0111) and 'x' or ''
-
- def gettags(self):
- return self.tags
diff --git a/sys/lib/python/hgext/convert/filemap.py b/sys/lib/python/hgext/convert/filemap.py
deleted file mode 100644
index 3c8307ae8..000000000
--- a/sys/lib/python/hgext/convert/filemap.py
+++ /dev/null
@@ -1,359 +0,0 @@
-# Copyright 2007 Bryan O'Sullivan <bos@serpentine.com>
-# Copyright 2007 Alexis S. L. Carvalho <alexis@cecm.usp.br>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-import shlex
-from mercurial.i18n import _
-from mercurial import util
-from common import SKIPREV, converter_source
-
-def rpairs(name):
- yield '.', name
- e = len(name)
- while e != -1:
- yield name[:e], name[e+1:]
- e = name.rfind('/', 0, e)
-
-class filemapper(object):
- '''Map and filter filenames when importing.
- A name can be mapped to itself, a new name, or None (omit from new
- repository).'''
-
- def __init__(self, ui, path=None):
- self.ui = ui
- self.include = {}
- self.exclude = {}
- self.rename = {}
- if path:
- if self.parse(path):
- raise util.Abort(_('errors in filemap'))
-
- def parse(self, path):
- errs = 0
- def check(name, mapping, listname):
- if name in mapping:
- self.ui.warn(_('%s:%d: %r already in %s list\n') %
- (lex.infile, lex.lineno, name, listname))
- return 1
- return 0
- lex = shlex.shlex(open(path), path, True)
- lex.wordchars += '!@#$%^&*()-=+[]{}|;:,./<>?'
- cmd = lex.get_token()
- while cmd:
- if cmd == 'include':
- name = lex.get_token()
- errs += check(name, self.exclude, 'exclude')
- self.include[name] = name
- elif cmd == 'exclude':
- name = lex.get_token()
- errs += check(name, self.include, 'include')
- errs += check(name, self.rename, 'rename')
- self.exclude[name] = name
- elif cmd == 'rename':
- src = lex.get_token()
- dest = lex.get_token()
- errs += check(src, self.exclude, 'exclude')
- self.rename[src] = dest
- elif cmd == 'source':
- errs += self.parse(lex.get_token())
- else:
- self.ui.warn(_('%s:%d: unknown directive %r\n') %
- (lex.infile, lex.lineno, cmd))
- errs += 1
- cmd = lex.get_token()
- return errs
-
- def lookup(self, name, mapping):
- for pre, suf in rpairs(name):
- try:
- return mapping[pre], pre, suf
- except KeyError:
- pass
- return '', name, ''
-
- def __call__(self, name):
- if self.include:
- inc = self.lookup(name, self.include)[0]
- else:
- inc = name
- if self.exclude:
- exc = self.lookup(name, self.exclude)[0]
- else:
- exc = ''
- if not inc or exc:
- return None
- newpre, pre, suf = self.lookup(name, self.rename)
- if newpre:
- if newpre == '.':
- return suf
- if suf:
- return newpre + '/' + suf
- return newpre
- return name
-
- def active(self):
- return bool(self.include or self.exclude or self.rename)
-
-# This class does two additional things compared to a regular source:
-#
-# - Filter and rename files. This is mostly wrapped by the filemapper
-# class above. We hide the original filename in the revision that is
-# returned by getchanges to be able to find things later in getfile
-# and getmode.
-#
-# - Return only revisions that matter for the files we're interested in.
-# This involves rewriting the parents of the original revision to
-# create a graph that is restricted to those revisions.
-#
-# This set of revisions includes not only revisions that directly
-# touch files we're interested in, but also merges that merge two
-# or more interesting revisions.
-
-class filemap_source(converter_source):
- def __init__(self, ui, baseconverter, filemap):
- super(filemap_source, self).__init__(ui)
- self.base = baseconverter
- self.filemapper = filemapper(ui, filemap)
- self.commits = {}
- # if a revision rev has parent p in the original revision graph, then
- # rev will have parent self.parentmap[p] in the restricted graph.
- self.parentmap = {}
- # self.wantedancestors[rev] is the set of all ancestors of rev that
- # are in the restricted graph.
- self.wantedancestors = {}
- self.convertedorder = None
- self._rebuilt = False
- self.origparents = {}
- self.children = {}
- self.seenchildren = {}
-
- def before(self):
- self.base.before()
-
- def after(self):
- self.base.after()
-
- def setrevmap(self, revmap):
- # rebuild our state to make things restartable
- #
- # To avoid calling getcommit for every revision that has already
- # been converted, we rebuild only the parentmap, delaying the
- # rebuild of wantedancestors until we need it (i.e. until a
- # merge).
- #
- # We assume the order argument lists the revisions in
- # topological order, so that we can infer which revisions were
- # wanted by previous runs.
- self._rebuilt = not revmap
- seen = {SKIPREV: SKIPREV}
- dummyset = set()
- converted = []
- for rev in revmap.order:
- mapped = revmap[rev]
- wanted = mapped not in seen
- if wanted:
- seen[mapped] = rev
- self.parentmap[rev] = rev
- else:
- self.parentmap[rev] = seen[mapped]
- self.wantedancestors[rev] = dummyset
- arg = seen[mapped]
- if arg == SKIPREV:
- arg = None
- converted.append((rev, wanted, arg))
- self.convertedorder = converted
- return self.base.setrevmap(revmap)
-
- def rebuild(self):
- if self._rebuilt:
- return True
- self._rebuilt = True
- self.parentmap.clear()
- self.wantedancestors.clear()
- self.seenchildren.clear()
- for rev, wanted, arg in self.convertedorder:
- if rev not in self.origparents:
- self.origparents[rev] = self.getcommit(rev).parents
- if arg is not None:
- self.children[arg] = self.children.get(arg, 0) + 1
-
- for rev, wanted, arg in self.convertedorder:
- parents = self.origparents[rev]
- if wanted:
- self.mark_wanted(rev, parents)
- else:
- self.mark_not_wanted(rev, arg)
- self._discard(arg, *parents)
-
- return True
-
- def getheads(self):
- return self.base.getheads()
-
- def getcommit(self, rev):
- # We want to save a reference to the commit objects to be able
- # to rewrite their parents later on.
- c = self.commits[rev] = self.base.getcommit(rev)
- for p in c.parents:
- self.children[p] = self.children.get(p, 0) + 1
- return c
-
- def _discard(self, *revs):
- for r in revs:
- if r is None:
- continue
- self.seenchildren[r] = self.seenchildren.get(r, 0) + 1
- if self.seenchildren[r] == self.children[r]:
- del self.wantedancestors[r]
- del self.parentmap[r]
- del self.seenchildren[r]
- if self._rebuilt:
- del self.children[r]
-
- def wanted(self, rev, i):
- # Return True if we're directly interested in rev.
- #
- # i is an index selecting one of the parents of rev (if rev
- # has no parents, i is None). getchangedfiles will give us
- # the list of files that are different in rev and in the parent
- # indicated by i. If we're interested in any of these files,
- # we're interested in rev.
- try:
- files = self.base.getchangedfiles(rev, i)
- except NotImplementedError:
- raise util.Abort(_("source repository doesn't support --filemap"))
- for f in files:
- if self.filemapper(f):
- return True
- return False
-
- def mark_not_wanted(self, rev, p):
- # Mark rev as not interesting and update data structures.
-
- if p is None:
- # A root revision. Use SKIPREV to indicate that it doesn't
- # map to any revision in the restricted graph. Put SKIPREV
- # in the set of wanted ancestors to simplify code elsewhere
- self.parentmap[rev] = SKIPREV
- self.wantedancestors[rev] = set((SKIPREV,))
- return
-
- # Reuse the data from our parent.
- self.parentmap[rev] = self.parentmap[p]
- self.wantedancestors[rev] = self.wantedancestors[p]
-
- def mark_wanted(self, rev, parents):
- # Mark rev ss wanted and update data structures.
-
- # rev will be in the restricted graph, so children of rev in
- # the original graph should still have rev as a parent in the
- # restricted graph.
- self.parentmap[rev] = rev
-
- # The set of wanted ancestors of rev is the union of the sets
- # of wanted ancestors of its parents. Plus rev itself.
- wrev = set()
- for p in parents:
- wrev.update(self.wantedancestors[p])
- wrev.add(rev)
- self.wantedancestors[rev] = wrev
-
- def getchanges(self, rev):
- parents = self.commits[rev].parents
- if len(parents) > 1:
- self.rebuild()
-
- # To decide whether we're interested in rev we:
- #
- # - calculate what parents rev will have if it turns out we're
- # interested in it. If it's going to have more than 1 parent,
- # we're interested in it.
- #
- # - otherwise, we'll compare it with the single parent we found.
- # If any of the files we're interested in is different in the
- # the two revisions, we're interested in rev.
-
- # A parent p is interesting if its mapped version (self.parentmap[p]):
- # - is not SKIPREV
- # - is still not in the list of parents (we don't want duplicates)
- # - is not an ancestor of the mapped versions of the other parents
- mparents = []
- wp = None
- for i, p1 in enumerate(parents):
- mp1 = self.parentmap[p1]
- if mp1 == SKIPREV or mp1 in mparents:
- continue
- for p2 in parents:
- if p1 == p2 or mp1 == self.parentmap[p2]:
- continue
- if mp1 in self.wantedancestors[p2]:
- break
- else:
- mparents.append(mp1)
- wp = i
-
- if wp is None and parents:
- wp = 0
-
- self.origparents[rev] = parents
-
- if len(mparents) < 2 and not self.wanted(rev, wp):
- # We don't want this revision.
- # Update our state and tell the convert process to map this
- # revision to the same revision its parent as mapped to.
- p = None
- if parents:
- p = parents[wp]
- self.mark_not_wanted(rev, p)
- self.convertedorder.append((rev, False, p))
- self._discard(*parents)
- return self.parentmap[rev]
-
- # We want this revision.
- # Rewrite the parents of the commit object
- self.commits[rev].parents = mparents
- self.mark_wanted(rev, parents)
- self.convertedorder.append((rev, True, None))
- self._discard(*parents)
-
- # Get the real changes and do the filtering/mapping.
- # To be able to get the files later on in getfile and getmode,
- # we hide the original filename in the rev part of the return
- # value.
- changes, copies = self.base.getchanges(rev)
- newnames = {}
- files = []
- for f, r in changes:
- newf = self.filemapper(f)
- if newf:
- files.append((newf, (f, r)))
- newnames[f] = newf
-
- ncopies = {}
- for c in copies:
- newc = self.filemapper(c)
- if newc:
- newsource = self.filemapper(copies[c])
- if newsource:
- ncopies[newc] = newsource
-
- return files, ncopies
-
- def getfile(self, name, rev):
- realname, realrev = rev
- return self.base.getfile(realname, realrev)
-
- def getmode(self, name, rev):
- realname, realrev = rev
- return self.base.getmode(realname, realrev)
-
- def gettags(self):
- return self.base.gettags()
-
- def hasnativeorder(self):
- return self.base.hasnativeorder()
-
- def lookuprev(self, rev):
- return self.base.lookuprev(rev)
diff --git a/sys/lib/python/hgext/convert/git.py b/sys/lib/python/hgext/convert/git.py
deleted file mode 100644
index d529744ac..000000000
--- a/sys/lib/python/hgext/convert/git.py
+++ /dev/null
@@ -1,152 +0,0 @@
-# git.py - git support for the convert extension
-#
-# Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-import os
-from mercurial import util
-
-from common import NoRepo, commit, converter_source, checktool
-
-class convert_git(converter_source):
- # Windows does not support GIT_DIR= construct while other systems
- # cannot remove environment variable. Just assume none have
- # both issues.
- if hasattr(os, 'unsetenv'):
- def gitcmd(self, s):
- prevgitdir = os.environ.get('GIT_DIR')
- os.environ['GIT_DIR'] = self.path
- try:
- return util.popen(s, 'rb')
- finally:
- if prevgitdir is None:
- del os.environ['GIT_DIR']
- else:
- os.environ['GIT_DIR'] = prevgitdir
- else:
- def gitcmd(self, s):
- return util.popen('GIT_DIR=%s %s' % (self.path, s), 'rb')
-
- def __init__(self, ui, path, rev=None):
- super(convert_git, self).__init__(ui, path, rev=rev)
-
- if os.path.isdir(path + "/.git"):
- path += "/.git"
- if not os.path.exists(path + "/objects"):
- raise NoRepo("%s does not look like a Git repo" % path)
-
- checktool('git', 'git')
-
- self.path = path
-
- def getheads(self):
- if not self.rev:
- return self.gitcmd('git rev-parse --branches --remotes').read().splitlines()
- else:
- fh = self.gitcmd("git rev-parse --verify %s" % self.rev)
- return [fh.read()[:-1]]
-
- def catfile(self, rev, type):
- if rev == "0" * 40: raise IOError()
- fh = self.gitcmd("git cat-file %s %s" % (type, rev))
- return fh.read()
-
- def getfile(self, name, rev):
- return self.catfile(rev, "blob")
-
- def getmode(self, name, rev):
- return self.modecache[(name, rev)]
-
- def getchanges(self, version):
- self.modecache = {}
- fh = self.gitcmd("git diff-tree -z --root -m -r %s" % version)
- changes = []
- seen = set()
- entry = None
- for l in fh.read().split('\x00'):
- if not entry:
- if not l.startswith(':'):
- continue
- entry = l
- continue
- f = l
- if f not in seen:
- seen.add(f)
- entry = entry.split()
- h = entry[3]
- p = (entry[1] == "100755")
- s = (entry[1] == "120000")
- self.modecache[(f, h)] = (p and "x") or (s and "l") or ""
- changes.append((f, h))
- entry = None
- return (changes, {})
-
- def getcommit(self, version):
- c = self.catfile(version, "commit") # read the commit hash
- end = c.find("\n\n")
- message = c[end+2:]
- message = self.recode(message)
- l = c[:end].splitlines()
- parents = []
- author = committer = None
- for e in l[1:]:
- n, v = e.split(" ", 1)
- if n == "author":
- p = v.split()
- tm, tz = p[-2:]
- author = " ".join(p[:-2])
- if author[0] == "<": author = author[1:-1]
- author = self.recode(author)
- if n == "committer":
- p = v.split()
- tm, tz = p[-2:]
- committer = " ".join(p[:-2])
- if committer[0] == "<": committer = committer[1:-1]
- committer = self.recode(committer)
- if n == "parent": parents.append(v)
-
- if committer and committer != author:
- message += "\ncommitter: %s\n" % committer
- tzs, tzh, tzm = tz[-5:-4] + "1", tz[-4:-2], tz[-2:]
- tz = -int(tzs) * (int(tzh) * 3600 + int(tzm))
- date = tm + " " + str(tz)
-
- c = commit(parents=parents, date=date, author=author, desc=message,
- rev=version)
- return c
-
- def gettags(self):
- tags = {}
- fh = self.gitcmd('git ls-remote --tags "%s"' % self.path)
- prefix = 'refs/tags/'
- for line in fh:
- line = line.strip()
- if not line.endswith("^{}"):
- continue
- node, tag = line.split(None, 1)
- if not tag.startswith(prefix):
- continue
- tag = tag[len(prefix):-3]
- tags[tag] = node
-
- return tags
-
- def getchangedfiles(self, version, i):
- changes = []
- if i is None:
- fh = self.gitcmd("git diff-tree --root -m -r %s" % version)
- for l in fh:
- if "\t" not in l:
- continue
- m, f = l[:-1].split("\t")
- changes.append(f)
- fh.close()
- else:
- fh = self.gitcmd('git diff-tree --name-only --root -r %s "%s^%s" --'
- % (version, version, i+1))
- changes = [f.rstrip('\n') for f in fh]
- fh.close()
-
- return changes
diff --git a/sys/lib/python/hgext/convert/gnuarch.py b/sys/lib/python/hgext/convert/gnuarch.py
deleted file mode 100644
index 8d2475e18..000000000
--- a/sys/lib/python/hgext/convert/gnuarch.py
+++ /dev/null
@@ -1,342 +0,0 @@
-# gnuarch.py - GNU Arch support for the convert extension
-#
-# Copyright 2008, 2009 Aleix Conchillo Flaque <aleix@member.fsf.org>
-# and others
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-from common import NoRepo, commandline, commit, converter_source
-from mercurial.i18n import _
-from mercurial import util
-import os, shutil, tempfile, stat, locale
-from email.Parser import Parser
-
-class gnuarch_source(converter_source, commandline):
-
- class gnuarch_rev(object):
- def __init__(self, rev):
- self.rev = rev
- self.summary = ''
- self.date = None
- self.author = ''
- self.continuationof = None
- self.add_files = []
- self.mod_files = []
- self.del_files = []
- self.ren_files = {}
- self.ren_dirs = {}
-
- def __init__(self, ui, path, rev=None):
- super(gnuarch_source, self).__init__(ui, path, rev=rev)
-
- if not os.path.exists(os.path.join(path, '{arch}')):
- raise NoRepo(_("%s does not look like a GNU Arch repo") % path)
-
- # Could use checktool, but we want to check for baz or tla.
- self.execmd = None
- if util.find_exe('baz'):
- self.execmd = 'baz'
- else:
- if util.find_exe('tla'):
- self.execmd = 'tla'
- else:
- raise util.Abort(_('cannot find a GNU Arch tool'))
-
- commandline.__init__(self, ui, self.execmd)
-
- self.path = os.path.realpath(path)
- self.tmppath = None
-
- self.treeversion = None
- self.lastrev = None
- self.changes = {}
- self.parents = {}
- self.tags = {}
- self.modecache = {}
- self.catlogparser = Parser()
- self.locale = locale.getpreferredencoding()
- self.archives = []
-
- def before(self):
- # Get registered archives
- self.archives = [i.rstrip('\n')
- for i in self.runlines0('archives', '-n')]
-
- if self.execmd == 'tla':
- output = self.run0('tree-version', self.path)
- else:
- output = self.run0('tree-version', '-d', self.path)
- self.treeversion = output.strip()
-
- # Get name of temporary directory
- version = self.treeversion.split('/')
- self.tmppath = os.path.join(tempfile.gettempdir(),
- 'hg-%s' % version[1])
-
- # Generate parents dictionary
- self.parents[None] = []
- treeversion = self.treeversion
- child = None
- while treeversion:
- self.ui.status(_('analyzing tree version %s...\n') % treeversion)
-
- archive = treeversion.split('/')[0]
- if archive not in self.archives:
- self.ui.status(_('tree analysis stopped because it points to '
- 'an unregistered archive %s...\n') % archive)
- break
-
- # Get the complete list of revisions for that tree version
- output, status = self.runlines('revisions', '-r', '-f', treeversion)
- self.checkexit(status, 'failed retrieveing revisions for %s' % treeversion)
-
- # No new iteration unless a revision has a continuation-of header
- treeversion = None
-
- for l in output:
- rev = l.strip()
- self.changes[rev] = self.gnuarch_rev(rev)
- self.parents[rev] = []
-
- # Read author, date and summary
- catlog, status = self.run('cat-log', '-d', self.path, rev)
- if status:
- catlog = self.run0('cat-archive-log', rev)
- self._parsecatlog(catlog, rev)
-
- # Populate the parents map
- self.parents[child].append(rev)
-
- # Keep track of the current revision as the child of the next
- # revision scanned
- child = rev
-
- # Check if we have to follow the usual incremental history
- # or if we have to 'jump' to a different treeversion given
- # by the continuation-of header.
- if self.changes[rev].continuationof:
- treeversion = '--'.join(self.changes[rev].continuationof.split('--')[:-1])
- break
-
- # If we reached a base-0 revision w/o any continuation-of
- # header, it means the tree history ends here.
- if rev[-6:] == 'base-0':
- break
-
- def after(self):
- self.ui.debug(_('cleaning up %s\n') % self.tmppath)
- shutil.rmtree(self.tmppath, ignore_errors=True)
-
- def getheads(self):
- return self.parents[None]
-
- def getfile(self, name, rev):
- if rev != self.lastrev:
- raise util.Abort(_('internal calling inconsistency'))
-
- # Raise IOError if necessary (i.e. deleted files).
- if not os.path.exists(os.path.join(self.tmppath, name)):
- raise IOError
-
- data, mode = self._getfile(name, rev)
- self.modecache[(name, rev)] = mode
-
- return data
-
- def getmode(self, name, rev):
- return self.modecache[(name, rev)]
-
- def getchanges(self, rev):
- self.modecache = {}
- self._update(rev)
- changes = []
- copies = {}
-
- for f in self.changes[rev].add_files:
- changes.append((f, rev))
-
- for f in self.changes[rev].mod_files:
- changes.append((f, rev))
-
- for f in self.changes[rev].del_files:
- changes.append((f, rev))
-
- for src in self.changes[rev].ren_files:
- to = self.changes[rev].ren_files[src]
- changes.append((src, rev))
- changes.append((to, rev))
- copies[to] = src
-
- for src in self.changes[rev].ren_dirs:
- to = self.changes[rev].ren_dirs[src]
- chgs, cps = self._rendirchanges(src, to);
- changes += [(f, rev) for f in chgs]
- copies.update(cps)
-
- self.lastrev = rev
- return sorted(set(changes)), copies
-
- def getcommit(self, rev):
- changes = self.changes[rev]
- return commit(author=changes.author, date=changes.date,
- desc=changes.summary, parents=self.parents[rev], rev=rev)
-
- def gettags(self):
- return self.tags
-
- def _execute(self, cmd, *args, **kwargs):
- cmdline = [self.execmd, cmd]
- cmdline += args
- cmdline = [util.shellquote(arg) for arg in cmdline]
- cmdline += ['>', util.nulldev, '2>', util.nulldev]
- cmdline = util.quotecommand(' '.join(cmdline))
- self.ui.debug(cmdline, '\n')
- return os.system(cmdline)
-
- def _update(self, rev):
- self.ui.debug(_('applying revision %s...\n') % rev)
- changeset, status = self.runlines('replay', '-d', self.tmppath,
- rev)
- if status:
- # Something went wrong while merging (baz or tla
- # issue?), get latest revision and try from there
- shutil.rmtree(self.tmppath, ignore_errors=True)
- self._obtainrevision(rev)
- else:
- old_rev = self.parents[rev][0]
- self.ui.debug(_('computing changeset between %s and %s...\n')
- % (old_rev, rev))
- self._parsechangeset(changeset, rev)
-
- def _getfile(self, name, rev):
- mode = os.lstat(os.path.join(self.tmppath, name)).st_mode
- if stat.S_ISLNK(mode):
- data = os.readlink(os.path.join(self.tmppath, name))
- mode = mode and 'l' or ''
- else:
- data = open(os.path.join(self.tmppath, name), 'rb').read()
- mode = (mode & 0111) and 'x' or ''
- return data, mode
-
- def _exclude(self, name):
- exclude = [ '{arch}', '.arch-ids', '.arch-inventory' ]
- for exc in exclude:
- if name.find(exc) != -1:
- return True
- return False
-
- def _readcontents(self, path):
- files = []
- contents = os.listdir(path)
- while len(contents) > 0:
- c = contents.pop()
- p = os.path.join(path, c)
- # os.walk could be used, but here we avoid internal GNU
- # Arch files and directories, thus saving a lot time.
- if not self._exclude(p):
- if os.path.isdir(p):
- contents += [os.path.join(c, f) for f in os.listdir(p)]
- else:
- files.append(c)
- return files
-
- def _rendirchanges(self, src, dest):
- changes = []
- copies = {}
- files = self._readcontents(os.path.join(self.tmppath, dest))
- for f in files:
- s = os.path.join(src, f)
- d = os.path.join(dest, f)
- changes.append(s)
- changes.append(d)
- copies[d] = s
- return changes, copies
-
- def _obtainrevision(self, rev):
- self.ui.debug(_('obtaining revision %s...\n') % rev)
- output = self._execute('get', rev, self.tmppath)
- self.checkexit(output)
- self.ui.debug(_('analyzing revision %s...\n') % rev)
- files = self._readcontents(self.tmppath)
- self.changes[rev].add_files += files
-
- def _stripbasepath(self, path):
- if path.startswith('./'):
- return path[2:]
- return path
-
- def _parsecatlog(self, data, rev):
- try:
- catlog = self.catlogparser.parsestr(data)
-
- # Commit date
- self.changes[rev].date = util.datestr(
- util.strdate(catlog['Standard-date'],
- '%Y-%m-%d %H:%M:%S'))
-
- # Commit author
- self.changes[rev].author = self.recode(catlog['Creator'])
-
- # Commit description
- self.changes[rev].summary = '\n\n'.join((catlog['Summary'],
- catlog.get_payload()))
- self.changes[rev].summary = self.recode(self.changes[rev].summary)
-
- # Commit revision origin when dealing with a branch or tag
- if catlog.has_key('Continuation-of'):
- self.changes[rev].continuationof = self.recode(catlog['Continuation-of'])
- except Exception:
- raise util.Abort(_('could not parse cat-log of %s') % rev)
-
- def _parsechangeset(self, data, rev):
- for l in data:
- l = l.strip()
- # Added file (ignore added directory)
- if l.startswith('A') and not l.startswith('A/'):
- file = self._stripbasepath(l[1:].strip())
- if not self._exclude(file):
- self.changes[rev].add_files.append(file)
- # Deleted file (ignore deleted directory)
- elif l.startswith('D') and not l.startswith('D/'):
- file = self._stripbasepath(l[1:].strip())
- if not self._exclude(file):
- self.changes[rev].del_files.append(file)
- # Modified binary file
- elif l.startswith('Mb'):
- file = self._stripbasepath(l[2:].strip())
- if not self._exclude(file):
- self.changes[rev].mod_files.append(file)
- # Modified link
- elif l.startswith('M->'):
- file = self._stripbasepath(l[3:].strip())
- if not self._exclude(file):
- self.changes[rev].mod_files.append(file)
- # Modified file
- elif l.startswith('M'):
- file = self._stripbasepath(l[1:].strip())
- if not self._exclude(file):
- self.changes[rev].mod_files.append(file)
- # Renamed file (or link)
- elif l.startswith('=>'):
- files = l[2:].strip().split(' ')
- if len(files) == 1:
- files = l[2:].strip().split('\t')
- src = self._stripbasepath(files[0])
- dst = self._stripbasepath(files[1])
- if not self._exclude(src) and not self._exclude(dst):
- self.changes[rev].ren_files[src] = dst
- # Conversion from file to link or from link to file (modified)
- elif l.startswith('ch'):
- file = self._stripbasepath(l[2:].strip())
- if not self._exclude(file):
- self.changes[rev].mod_files.append(file)
- # Renamed directory
- elif l.startswith('/>'):
- dirs = l[2:].strip().split(' ')
- if len(dirs) == 1:
- dirs = l[2:].strip().split('\t')
- src = self._stripbasepath(dirs[0])
- dst = self._stripbasepath(dirs[1])
- if not self._exclude(src) and not self._exclude(dst):
- self.changes[rev].ren_dirs[src] = dst
diff --git a/sys/lib/python/hgext/convert/hg.py b/sys/lib/python/hgext/convert/hg.py
deleted file mode 100644
index 060c1430a..000000000
--- a/sys/lib/python/hgext/convert/hg.py
+++ /dev/null
@@ -1,363 +0,0 @@
-# hg.py - hg backend for convert extension
-#
-# Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-# Notes for hg->hg conversion:
-#
-# * Old versions of Mercurial didn't trim the whitespace from the ends
-# of commit messages, but new versions do. Changesets created by
-# those older versions, then converted, may thus have different
-# hashes for changesets that are otherwise identical.
-#
-# * Using "--config convert.hg.saverev=true" will make the source
-# identifier to be stored in the converted revision. This will cause
-# the converted revision to have a different identity than the
-# source.
-
-
-import os, time, cStringIO
-from mercurial.i18n import _
-from mercurial.node import bin, hex, nullid
-from mercurial import hg, util, context, error
-
-from common import NoRepo, commit, converter_source, converter_sink
-
-class mercurial_sink(converter_sink):
- def __init__(self, ui, path):
- converter_sink.__init__(self, ui, path)
- self.branchnames = ui.configbool('convert', 'hg.usebranchnames', True)
- self.clonebranches = ui.configbool('convert', 'hg.clonebranches', False)
- self.tagsbranch = ui.config('convert', 'hg.tagsbranch', 'default')
- self.lastbranch = None
- if os.path.isdir(path) and len(os.listdir(path)) > 0:
- try:
- self.repo = hg.repository(self.ui, path)
- if not self.repo.local():
- raise NoRepo(_('%s is not a local Mercurial repo') % path)
- except error.RepoError, err:
- ui.traceback()
- raise NoRepo(err.args[0])
- else:
- try:
- ui.status(_('initializing destination %s repository\n') % path)
- self.repo = hg.repository(self.ui, path, create=True)
- if not self.repo.local():
- raise NoRepo(_('%s is not a local Mercurial repo') % path)
- self.created.append(path)
- except error.RepoError:
- ui.traceback()
- raise NoRepo("could not create hg repo %s as sink" % path)
- self.lock = None
- self.wlock = None
- self.filemapmode = False
-
- def before(self):
- self.ui.debug(_('run hg sink pre-conversion action\n'))
- self.wlock = self.repo.wlock()
- self.lock = self.repo.lock()
-
- def after(self):
- self.ui.debug(_('run hg sink post-conversion action\n'))
- self.lock.release()
- self.wlock.release()
-
- def revmapfile(self):
- return os.path.join(self.path, ".hg", "shamap")
-
- def authorfile(self):
- return os.path.join(self.path, ".hg", "authormap")
-
- def getheads(self):
- h = self.repo.changelog.heads()
- return [ hex(x) for x in h ]
-
- def setbranch(self, branch, pbranches):
- if not self.clonebranches:
- return
-
- setbranch = (branch != self.lastbranch)
- self.lastbranch = branch
- if not branch:
- branch = 'default'
- pbranches = [(b[0], b[1] and b[1] or 'default') for b in pbranches]
- pbranch = pbranches and pbranches[0][1] or 'default'
-
- branchpath = os.path.join(self.path, branch)
- if setbranch:
- self.after()
- try:
- self.repo = hg.repository(self.ui, branchpath)
- except:
- self.repo = hg.repository(self.ui, branchpath, create=True)
- self.before()
-
- # pbranches may bring revisions from other branches (merge parents)
- # Make sure we have them, or pull them.
- missings = {}
- for b in pbranches:
- try:
- self.repo.lookup(b[0])
- except:
- missings.setdefault(b[1], []).append(b[0])
-
- if missings:
- self.after()
- for pbranch, heads in missings.iteritems():
- pbranchpath = os.path.join(self.path, pbranch)
- prepo = hg.repository(self.ui, pbranchpath)
- self.ui.note(_('pulling from %s into %s\n') % (pbranch, branch))
- self.repo.pull(prepo, [prepo.lookup(h) for h in heads])
- self.before()
-
- def _rewritetags(self, source, revmap, data):
- fp = cStringIO.StringIO()
- for line in data.splitlines():
- s = line.split(' ', 1)
- if len(s) != 2:
- continue
- revid = revmap.get(source.lookuprev(s[0]))
- if not revid:
- continue
- fp.write('%s %s\n' % (revid, s[1]))
- return fp.getvalue()
-
- def putcommit(self, files, copies, parents, commit, source, revmap):
-
- files = dict(files)
- def getfilectx(repo, memctx, f):
- v = files[f]
- data = source.getfile(f, v)
- e = source.getmode(f, v)
- if f == '.hgtags':
- data = self._rewritetags(source, revmap, data)
- return context.memfilectx(f, data, 'l' in e, 'x' in e, copies.get(f))
-
- pl = []
- for p in parents:
- if p not in pl:
- pl.append(p)
- parents = pl
- nparents = len(parents)
- if self.filemapmode and nparents == 1:
- m1node = self.repo.changelog.read(bin(parents[0]))[0]
- parent = parents[0]
-
- if len(parents) < 2: parents.append(nullid)
- if len(parents) < 2: parents.append(nullid)
- p2 = parents.pop(0)
-
- text = commit.desc
- extra = commit.extra.copy()
- if self.branchnames and commit.branch:
- extra['branch'] = commit.branch
- if commit.rev:
- extra['convert_revision'] = commit.rev
-
- while parents:
- p1 = p2
- p2 = parents.pop(0)
- ctx = context.memctx(self.repo, (p1, p2), text, files.keys(), getfilectx,
- commit.author, commit.date, extra)
- self.repo.commitctx(ctx)
- text = "(octopus merge fixup)\n"
- p2 = hex(self.repo.changelog.tip())
-
- if self.filemapmode and nparents == 1:
- man = self.repo.manifest
- mnode = self.repo.changelog.read(bin(p2))[0]
- if not man.cmp(m1node, man.revision(mnode)):
- self.ui.status(_("filtering out empty revision\n"))
- self.repo.rollback()
- return parent
- return p2
-
- def puttags(self, tags):
- try:
- parentctx = self.repo[self.tagsbranch]
- tagparent = parentctx.node()
- except error.RepoError:
- parentctx = None
- tagparent = nullid
-
- try:
- oldlines = sorted(parentctx['.hgtags'].data().splitlines(True))
- except:
- oldlines = []
-
- newlines = sorted([("%s %s\n" % (tags[tag], tag)) for tag in tags])
- if newlines == oldlines:
- return None
- data = "".join(newlines)
- def getfilectx(repo, memctx, f):
- return context.memfilectx(f, data, False, False, None)
-
- self.ui.status(_("updating tags\n"))
- date = "%s 0" % int(time.mktime(time.gmtime()))
- extra = {'branch': self.tagsbranch}
- ctx = context.memctx(self.repo, (tagparent, None), "update tags",
- [".hgtags"], getfilectx, "convert-repo", date,
- extra)
- self.repo.commitctx(ctx)
- return hex(self.repo.changelog.tip())
-
- def setfilemapmode(self, active):
- self.filemapmode = active
-
-class mercurial_source(converter_source):
- def __init__(self, ui, path, rev=None):
- converter_source.__init__(self, ui, path, rev)
- self.ignoreerrors = ui.configbool('convert', 'hg.ignoreerrors', False)
- self.ignored = set()
- self.saverev = ui.configbool('convert', 'hg.saverev', False)
- try:
- self.repo = hg.repository(self.ui, path)
- # try to provoke an exception if this isn't really a hg
- # repo, but some other bogus compatible-looking url
- if not self.repo.local():
- raise error.RepoError()
- except error.RepoError:
- ui.traceback()
- raise NoRepo("%s is not a local Mercurial repo" % path)
- self.lastrev = None
- self.lastctx = None
- self._changescache = None
- self.convertfp = None
- # Restrict converted revisions to startrev descendants
- startnode = ui.config('convert', 'hg.startrev')
- if startnode is not None:
- try:
- startnode = self.repo.lookup(startnode)
- except error.RepoError:
- raise util.Abort(_('%s is not a valid start revision')
- % startnode)
- startrev = self.repo.changelog.rev(startnode)
- children = {startnode: 1}
- for rev in self.repo.changelog.descendants(startrev):
- children[self.repo.changelog.node(rev)] = 1
- self.keep = children.__contains__
- else:
- self.keep = util.always
-
- def changectx(self, rev):
- if self.lastrev != rev:
- self.lastctx = self.repo[rev]
- self.lastrev = rev
- return self.lastctx
-
- def parents(self, ctx):
- return [p.node() for p in ctx.parents()
- if p and self.keep(p.node())]
-
- def getheads(self):
- if self.rev:
- heads = [self.repo[self.rev].node()]
- else:
- heads = self.repo.heads()
- return [hex(h) for h in heads if self.keep(h)]
-
- def getfile(self, name, rev):
- try:
- return self.changectx(rev)[name].data()
- except error.LookupError, err:
- raise IOError(err)
-
- def getmode(self, name, rev):
- return self.changectx(rev).manifest().flags(name)
-
- def getchanges(self, rev):
- ctx = self.changectx(rev)
- parents = self.parents(ctx)
- if not parents:
- files = sorted(ctx.manifest())
- if self.ignoreerrors:
- # calling getcopies() is a simple way to detect missing
- # revlogs and populate self.ignored
- self.getcopies(ctx, files)
- return [(f, rev) for f in files if f not in self.ignored], {}
- if self._changescache and self._changescache[0] == rev:
- m, a, r = self._changescache[1]
- else:
- m, a, r = self.repo.status(parents[0], ctx.node())[:3]
- # getcopies() detects missing revlogs early, run it before
- # filtering the changes.
- copies = self.getcopies(ctx, m + a)
- changes = [(name, rev) for name in m + a + r
- if name not in self.ignored]
- return sorted(changes), copies
-
- def getcopies(self, ctx, files):
- copies = {}
- for name in files:
- if name in self.ignored:
- continue
- try:
- copysource, copynode = ctx.filectx(name).renamed()
- if copysource in self.ignored or not self.keep(copynode):
- continue
- copies[name] = copysource
- except TypeError:
- pass
- except error.LookupError, e:
- if not self.ignoreerrors:
- raise
- self.ignored.add(name)
- self.ui.warn(_('ignoring: %s\n') % e)
- return copies
-
- def getcommit(self, rev):
- ctx = self.changectx(rev)
- parents = [hex(p) for p in self.parents(ctx)]
- if self.saverev:
- crev = rev
- else:
- crev = None
- return commit(author=ctx.user(), date=util.datestr(ctx.date()),
- desc=ctx.description(), rev=crev, parents=parents,
- branch=ctx.branch(), extra=ctx.extra(),
- sortkey=ctx.rev())
-
- def gettags(self):
- tags = [t for t in self.repo.tagslist() if t[0] != 'tip']
- return dict([(name, hex(node)) for name, node in tags
- if self.keep(node)])
-
- def getchangedfiles(self, rev, i):
- ctx = self.changectx(rev)
- parents = self.parents(ctx)
- if not parents and i is None:
- i = 0
- changes = [], ctx.manifest().keys(), []
- else:
- i = i or 0
- changes = self.repo.status(parents[i], ctx.node())[:3]
- changes = [[f for f in l if f not in self.ignored] for l in changes]
-
- if i == 0:
- self._changescache = (rev, changes)
-
- return changes[0] + changes[1] + changes[2]
-
- def converted(self, rev, destrev):
- if self.convertfp is None:
- self.convertfp = open(os.path.join(self.path, '.hg', 'shamap'),
- 'a')
- self.convertfp.write('%s %s\n' % (destrev, rev))
- self.convertfp.flush()
-
- def before(self):
- self.ui.debug(_('run hg source pre-conversion action\n'))
-
- def after(self):
- self.ui.debug(_('run hg source post-conversion action\n'))
-
- def hasnativeorder(self):
- return True
-
- def lookuprev(self, rev):
- try:
- return hex(self.repo.lookup(rev))
- except error.RepoError:
- return None
diff --git a/sys/lib/python/hgext/convert/monotone.py b/sys/lib/python/hgext/convert/monotone.py
deleted file mode 100644
index 085510ce9..000000000
--- a/sys/lib/python/hgext/convert/monotone.py
+++ /dev/null
@@ -1,217 +0,0 @@
-# monotone.py - monotone support for the convert extension
-#
-# Copyright 2008, 2009 Mikkel Fahnoe Jorgensen <mikkel@dvide.com> and
-# others
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-import os, re
-from mercurial import util
-from common import NoRepo, commit, converter_source, checktool
-from common import commandline
-from mercurial.i18n import _
-
-class monotone_source(converter_source, commandline):
- def __init__(self, ui, path=None, rev=None):
- converter_source.__init__(self, ui, path, rev)
- commandline.__init__(self, ui, 'mtn')
-
- self.ui = ui
- self.path = path
-
- norepo = NoRepo (_("%s does not look like a monotone repo") % path)
- if not os.path.exists(os.path.join(path, '_MTN')):
- # Could be a monotone repository (SQLite db file)
- try:
- header = file(path, 'rb').read(16)
- except:
- header = ''
- if header != 'SQLite format 3\x00':
- raise norepo
-
- # regular expressions for parsing monotone output
- space = r'\s*'
- name = r'\s+"((?:\\"|[^"])*)"\s*'
- value = name
- revision = r'\s+\[(\w+)\]\s*'
- lines = r'(?:.|\n)+'
-
- self.dir_re = re.compile(space + "dir" + name)
- self.file_re = re.compile(space + "file" + name + "content" + revision)
- self.add_file_re = re.compile(space + "add_file" + name + "content" + revision)
- self.patch_re = re.compile(space + "patch" + name + "from" + revision + "to" + revision)
- self.rename_re = re.compile(space + "rename" + name + "to" + name)
- self.delete_re = re.compile(space + "delete" + name)
- self.tag_re = re.compile(space + "tag" + name + "revision" + revision)
- self.cert_re = re.compile(lines + space + "name" + name + "value" + value)
-
- attr = space + "file" + lines + space + "attr" + space
- self.attr_execute_re = re.compile(attr + '"mtn:execute"' + space + '"true"')
-
- # cached data
- self.manifest_rev = None
- self.manifest = None
- self.files = None
- self.dirs = None
-
- checktool('mtn', abort=False)
-
- # test if there are any revisions
- self.rev = None
- try:
- self.getheads()
- except:
- raise norepo
- self.rev = rev
-
- def mtnrun(self, *args, **kwargs):
- kwargs['d'] = self.path
- return self.run0('automate', *args, **kwargs)
-
- def mtnloadmanifest(self, rev):
- if self.manifest_rev == rev:
- return
- self.manifest = self.mtnrun("get_manifest_of", rev).split("\n\n")
- self.manifest_rev = rev
- self.files = {}
- self.dirs = {}
-
- for e in self.manifest:
- m = self.file_re.match(e)
- if m:
- attr = ""
- name = m.group(1)
- node = m.group(2)
- if self.attr_execute_re.match(e):
- attr += "x"
- self.files[name] = (node, attr)
- m = self.dir_re.match(e)
- if m:
- self.dirs[m.group(1)] = True
-
- def mtnisfile(self, name, rev):
- # a non-file could be a directory or a deleted or renamed file
- self.mtnloadmanifest(rev)
- return name in self.files
-
- def mtnisdir(self, name, rev):
- self.mtnloadmanifest(rev)
- return name in self.dirs
-
- def mtngetcerts(self, rev):
- certs = {"author":"<missing>", "date":"<missing>",
- "changelog":"<missing>", "branch":"<missing>"}
- cert_list = self.mtnrun("certs", rev).split('\n\n key "')
- for e in cert_list:
- m = self.cert_re.match(e)
- if m:
- name, value = m.groups()
- value = value.replace(r'\"', '"')
- value = value.replace(r'\\', '\\')
- certs[name] = value
- # Monotone may have subsecond dates: 2005-02-05T09:39:12.364306
- # and all times are stored in UTC
- certs["date"] = certs["date"].split('.')[0] + " UTC"
- return certs
-
- # implement the converter_source interface:
-
- def getheads(self):
- if not self.rev:
- return self.mtnrun("leaves").splitlines()
- else:
- return [self.rev]
-
- def getchanges(self, rev):
- #revision = self.mtncmd("get_revision %s" % rev).split("\n\n")
- revision = self.mtnrun("get_revision", rev).split("\n\n")
- files = {}
- ignoremove = {}
- renameddirs = []
- copies = {}
- for e in revision:
- m = self.add_file_re.match(e)
- if m:
- files[m.group(1)] = rev
- ignoremove[m.group(1)] = rev
- m = self.patch_re.match(e)
- if m:
- files[m.group(1)] = rev
- # Delete/rename is handled later when the convert engine
- # discovers an IOError exception from getfile,
- # but only if we add the "from" file to the list of changes.
- m = self.delete_re.match(e)
- if m:
- files[m.group(1)] = rev
- m = self.rename_re.match(e)
- if m:
- toname = m.group(2)
- fromname = m.group(1)
- if self.mtnisfile(toname, rev):
- ignoremove[toname] = 1
- copies[toname] = fromname
- files[toname] = rev
- files[fromname] = rev
- elif self.mtnisdir(toname, rev):
- renameddirs.append((fromname, toname))
-
- # Directory renames can be handled only once we have recorded
- # all new files
- for fromdir, todir in renameddirs:
- renamed = {}
- for tofile in self.files:
- if tofile in ignoremove:
- continue
- if tofile.startswith(todir + '/'):
- renamed[tofile] = fromdir + tofile[len(todir):]
- # Avoid chained moves like:
- # d1(/a) => d3/d1(/a)
- # d2 => d3
- ignoremove[tofile] = 1
- for tofile, fromfile in renamed.items():
- self.ui.debug (_("copying file in renamed directory "
- "from '%s' to '%s'")
- % (fromfile, tofile), '\n')
- files[tofile] = rev
- copies[tofile] = fromfile
- for fromfile in renamed.values():
- files[fromfile] = rev
-
- return (files.items(), copies)
-
- def getmode(self, name, rev):
- self.mtnloadmanifest(rev)
- node, attr = self.files.get(name, (None, ""))
- return attr
-
- def getfile(self, name, rev):
- if not self.mtnisfile(name, rev):
- raise IOError() # file was deleted or renamed
- try:
- return self.mtnrun("get_file_of", name, r=rev)
- except:
- raise IOError() # file was deleted or renamed
-
- def getcommit(self, rev):
- certs = self.mtngetcerts(rev)
- return commit(
- author=certs["author"],
- date=util.datestr(util.strdate(certs["date"], "%Y-%m-%dT%H:%M:%S")),
- desc=certs["changelog"],
- rev=rev,
- parents=self.mtnrun("parents", rev).splitlines(),
- branch=certs["branch"])
-
- def gettags(self):
- tags = {}
- for e in self.mtnrun("tags").split("\n\n"):
- m = self.tag_re.match(e)
- if m:
- tags[m.group(1)] = m.group(2)
- return tags
-
- def getchangedfiles(self, rev, i):
- # This function is only needed to support --filemap
- # ... and we don't support that
- raise NotImplementedError()
diff --git a/sys/lib/python/hgext/convert/p4.py b/sys/lib/python/hgext/convert/p4.py
deleted file mode 100644
index d65867126..000000000
--- a/sys/lib/python/hgext/convert/p4.py
+++ /dev/null
@@ -1,205 +0,0 @@
-#
-# Perforce source for convert extension.
-#
-# Copyright 2009, Frank Kingswood <frank@kingswood-consulting.co.uk>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-#
-
-from mercurial import util
-from mercurial.i18n import _
-
-from common import commit, converter_source, checktool, NoRepo
-import marshal
-import re
-
-def loaditer(f):
- "Yield the dictionary objects generated by p4"
- try:
- while True:
- d = marshal.load(f)
- if not d:
- break
- yield d
- except EOFError:
- pass
-
-class p4_source(converter_source):
- def __init__(self, ui, path, rev=None):
- super(p4_source, self).__init__(ui, path, rev=rev)
-
- if "/" in path and not path.startswith('//'):
- raise NoRepo('%s does not look like a P4 repo' % path)
-
- checktool('p4', abort=False)
-
- self.p4changes = {}
- self.heads = {}
- self.changeset = {}
- self.files = {}
- self.tags = {}
- self.lastbranch = {}
- self.parent = {}
- self.encoding = "latin_1"
- self.depotname = {} # mapping from local name to depot name
- self.modecache = {}
- self.re_type = re.compile("([a-z]+)?(text|binary|symlink|apple|resource|unicode|utf\d+)(\+\w+)?$")
- self.re_keywords = re.compile(r"\$(Id|Header|Date|DateTime|Change|File|Revision|Author):[^$\n]*\$")
- self.re_keywords_old = re.compile("\$(Id|Header):[^$\n]*\$")
-
- self._parse(ui, path)
-
- def _parse_view(self, path):
- "Read changes affecting the path"
- cmd = 'p4 -G changes -s submitted "%s"' % path
- stdout = util.popen(cmd)
- for d in loaditer(stdout):
- c = d.get("change", None)
- if c:
- self.p4changes[c] = True
-
- def _parse(self, ui, path):
- "Prepare list of P4 filenames and revisions to import"
- ui.status(_('reading p4 views\n'))
-
- # read client spec or view
- if "/" in path:
- self._parse_view(path)
- if path.startswith("//") and path.endswith("/..."):
- views = {path[:-3]:""}
- else:
- views = {"//": ""}
- else:
- cmd = 'p4 -G client -o "%s"' % path
- clientspec = marshal.load(util.popen(cmd))
-
- views = {}
- for client in clientspec:
- if client.startswith("View"):
- sview, cview = clientspec[client].split()
- self._parse_view(sview)
- if sview.endswith("...") and cview.endswith("..."):
- sview = sview[:-3]
- cview = cview[:-3]
- cview = cview[2:]
- cview = cview[cview.find("/") + 1:]
- views[sview] = cview
-
- # list of changes that affect our source files
- self.p4changes = self.p4changes.keys()
- self.p4changes.sort(key=int)
-
- # list with depot pathnames, longest first
- vieworder = views.keys()
- vieworder.sort(key=len, reverse=True)
-
- # handle revision limiting
- startrev = self.ui.config('convert', 'p4.startrev', default=0)
- self.p4changes = [x for x in self.p4changes
- if ((not startrev or int(x) >= int(startrev)) and
- (not self.rev or int(x) <= int(self.rev)))]
-
- # now read the full changelists to get the list of file revisions
- ui.status(_('collecting p4 changelists\n'))
- lastid = None
- for change in self.p4changes:
- cmd = "p4 -G describe %s" % change
- stdout = util.popen(cmd)
- d = marshal.load(stdout)
-
- desc = self.recode(d["desc"])
- shortdesc = desc.split("\n", 1)[0]
- t = '%s %s' % (d["change"], repr(shortdesc)[1:-1])
- ui.status(util.ellipsis(t, 80) + '\n')
-
- if lastid:
- parents = [lastid]
- else:
- parents = []
-
- date = (int(d["time"]), 0) # timezone not set
- c = commit(author=self.recode(d["user"]), date=util.datestr(date),
- parents=parents, desc=desc, branch='', extra={"p4": change})
-
- files = []
- i = 0
- while ("depotFile%d" % i) in d and ("rev%d" % i) in d:
- oldname = d["depotFile%d" % i]
- filename = None
- for v in vieworder:
- if oldname.startswith(v):
- filename = views[v] + oldname[len(v):]
- break
- if filename:
- files.append((filename, d["rev%d" % i]))
- self.depotname[filename] = oldname
- i += 1
- self.changeset[change] = c
- self.files[change] = files
- lastid = change
-
- if lastid:
- self.heads = [lastid]
-
- def getheads(self):
- return self.heads
-
- def getfile(self, name, rev):
- cmd = 'p4 -G print "%s#%s"' % (self.depotname[name], rev)
- stdout = util.popen(cmd)
-
- mode = None
- contents = ""
- keywords = None
-
- for d in loaditer(stdout):
- code = d["code"]
- data = d.get("data")
-
- if code == "error":
- raise IOError(d["generic"], data)
-
- elif code == "stat":
- p4type = self.re_type.match(d["type"])
- if p4type:
- mode = ""
- flags = (p4type.group(1) or "") + (p4type.group(3) or "")
- if "x" in flags:
- mode = "x"
- if p4type.group(2) == "symlink":
- mode = "l"
- if "ko" in flags:
- keywords = self.re_keywords_old
- elif "k" in flags:
- keywords = self.re_keywords
-
- elif code == "text" or code == "binary":
- contents += data
-
- if mode is None:
- raise IOError(0, "bad stat")
-
- self.modecache[(name, rev)] = mode
-
- if keywords:
- contents = keywords.sub("$\\1$", contents)
- if mode == "l" and contents.endswith("\n"):
- contents = contents[:-1]
-
- return contents
-
- def getmode(self, name, rev):
- return self.modecache[(name, rev)]
-
- def getchanges(self, rev):
- return self.files[rev], {}
-
- def getcommit(self, rev):
- return self.changeset[rev]
-
- def gettags(self):
- return self.tags
-
- def getchangedfiles(self, rev, i):
- return sorted([x[0] for x in self.files[rev]])
diff --git a/sys/lib/python/hgext/convert/subversion.py b/sys/lib/python/hgext/convert/subversion.py
deleted file mode 100644
index 5a0367485..000000000
--- a/sys/lib/python/hgext/convert/subversion.py
+++ /dev/null
@@ -1,1136 +0,0 @@
-# Subversion 1.4/1.5 Python API backend
-#
-# Copyright(C) 2007 Daniel Holth et al
-
-import os
-import re
-import sys
-import cPickle as pickle
-import tempfile
-import urllib
-
-from mercurial import strutil, util, encoding
-from mercurial.i18n import _
-
-# Subversion stuff. Works best with very recent Python SVN bindings
-# e.g. SVN 1.5 or backports. Thanks to the bzr folks for enhancing
-# these bindings.
-
-from cStringIO import StringIO
-
-from common import NoRepo, MissingTool, commit, encodeargs, decodeargs
-from common import commandline, converter_source, converter_sink, mapfile
-
-try:
- from svn.core import SubversionException, Pool
- import svn
- import svn.client
- import svn.core
- import svn.ra
- import svn.delta
- import transport
- import warnings
- warnings.filterwarnings('ignore',
- module='svn.core',
- category=DeprecationWarning)
-
-except ImportError:
- pass
-
-class SvnPathNotFound(Exception):
- pass
-
-def geturl(path):
- try:
- return svn.client.url_from_path(svn.core.svn_path_canonicalize(path))
- except SubversionException:
- pass
- if os.path.isdir(path):
- path = os.path.normpath(os.path.abspath(path))
- if os.name == 'nt':
- path = '/' + util.normpath(path)
- # Module URL is later compared with the repository URL returned
- # by svn API, which is UTF-8.
- path = encoding.tolocal(path)
- return 'file://%s' % urllib.quote(path)
- return path
-
-def optrev(number):
- optrev = svn.core.svn_opt_revision_t()
- optrev.kind = svn.core.svn_opt_revision_number
- optrev.value.number = number
- return optrev
-
-class changedpath(object):
- def __init__(self, p):
- self.copyfrom_path = p.copyfrom_path
- self.copyfrom_rev = p.copyfrom_rev
- self.action = p.action
-
-def get_log_child(fp, url, paths, start, end, limit=0, discover_changed_paths=True,
- strict_node_history=False):
- protocol = -1
- def receiver(orig_paths, revnum, author, date, message, pool):
- if orig_paths is not None:
- for k, v in orig_paths.iteritems():
- orig_paths[k] = changedpath(v)
- pickle.dump((orig_paths, revnum, author, date, message),
- fp, protocol)
-
- try:
- # Use an ra of our own so that our parent can consume
- # our results without confusing the server.
- t = transport.SvnRaTransport(url=url)
- svn.ra.get_log(t.ra, paths, start, end, limit,
- discover_changed_paths,
- strict_node_history,
- receiver)
- except SubversionException, (inst, num):
- pickle.dump(num, fp, protocol)
- except IOError:
- # Caller may interrupt the iteration
- pickle.dump(None, fp, protocol)
- else:
- pickle.dump(None, fp, protocol)
- fp.close()
- # With large history, cleanup process goes crazy and suddenly
- # consumes *huge* amount of memory. The output file being closed,
- # there is no need for clean termination.
- os._exit(0)
-
-def debugsvnlog(ui, **opts):
- """Fetch SVN log in a subprocess and channel them back to parent to
- avoid memory collection issues.
- """
- util.set_binary(sys.stdin)
- util.set_binary(sys.stdout)
- args = decodeargs(sys.stdin.read())
- get_log_child(sys.stdout, *args)
-
-class logstream(object):
- """Interruptible revision log iterator."""
- def __init__(self, stdout):
- self._stdout = stdout
-
- def __iter__(self):
- while True:
- entry = pickle.load(self._stdout)
- try:
- orig_paths, revnum, author, date, message = entry
- except:
- if entry is None:
- break
- raise SubversionException("child raised exception", entry)
- yield entry
-
- def close(self):
- if self._stdout:
- self._stdout.close()
- self._stdout = None
-
-
-# Check to see if the given path is a local Subversion repo. Verify this by
-# looking for several svn-specific files and directories in the given
-# directory.
-def filecheck(path, proto):
- for x in ('locks', 'hooks', 'format', 'db', ):
- if not os.path.exists(os.path.join(path, x)):
- return False
- return True
-
-# Check to see if a given path is the root of an svn repo over http. We verify
-# this by requesting a version-controlled URL we know can't exist and looking
-# for the svn-specific "not found" XML.
-def httpcheck(path, proto):
- return ('<m:human-readable errcode="160013">' in
- urllib.urlopen('%s://%s/!svn/ver/0/.svn' % (proto, path)).read())
-
-protomap = {'http': httpcheck,
- 'https': httpcheck,
- 'file': filecheck,
- }
-def issvnurl(url):
- try:
- proto, path = url.split('://', 1)
- path = urllib.url2pathname(path)
- except ValueError:
- proto = 'file'
- path = os.path.abspath(url)
- path = path.replace(os.sep, '/')
- check = protomap.get(proto, lambda p, p2: False)
- while '/' in path:
- if check(path, proto):
- return True
- path = path.rsplit('/', 1)[0]
- return False
-
-# SVN conversion code stolen from bzr-svn and tailor
-#
-# Subversion looks like a versioned filesystem, branches structures
-# are defined by conventions and not enforced by the tool. First,
-# we define the potential branches (modules) as "trunk" and "branches"
-# children directories. Revisions are then identified by their
-# module and revision number (and a repository identifier).
-#
-# The revision graph is really a tree (or a forest). By default, a
-# revision parent is the previous revision in the same module. If the
-# module directory is copied/moved from another module then the
-# revision is the module root and its parent the source revision in
-# the parent module. A revision has at most one parent.
-#
-class svn_source(converter_source):
- def __init__(self, ui, url, rev=None):
- super(svn_source, self).__init__(ui, url, rev=rev)
-
- if not (url.startswith('svn://') or url.startswith('svn+ssh://') or
- (os.path.exists(url) and
- os.path.exists(os.path.join(url, '.svn'))) or
- issvnurl(url)):
- raise NoRepo("%s does not look like a Subversion repo" % url)
-
- try:
- SubversionException
- except NameError:
- raise MissingTool(_('Subversion python bindings could not be loaded'))
-
- try:
- version = svn.core.SVN_VER_MAJOR, svn.core.SVN_VER_MINOR
- if version < (1, 4):
- raise MissingTool(_('Subversion python bindings %d.%d found, '
- '1.4 or later required') % version)
- except AttributeError:
- raise MissingTool(_('Subversion python bindings are too old, 1.4 '
- 'or later required'))
-
- self.lastrevs = {}
-
- latest = None
- try:
- # Support file://path@rev syntax. Useful e.g. to convert
- # deleted branches.
- at = url.rfind('@')
- if at >= 0:
- latest = int(url[at+1:])
- url = url[:at]
- except ValueError:
- pass
- self.url = geturl(url)
- self.encoding = 'UTF-8' # Subversion is always nominal UTF-8
- try:
- self.transport = transport.SvnRaTransport(url=self.url)
- self.ra = self.transport.ra
- self.ctx = self.transport.client
- self.baseurl = svn.ra.get_repos_root(self.ra)
- # Module is either empty or a repository path starting with
- # a slash and not ending with a slash.
- self.module = urllib.unquote(self.url[len(self.baseurl):])
- self.prevmodule = None
- self.rootmodule = self.module
- self.commits = {}
- self.paths = {}
- self.uuid = svn.ra.get_uuid(self.ra)
- except SubversionException:
- ui.traceback()
- raise NoRepo("%s does not look like a Subversion repo" % self.url)
-
- if rev:
- try:
- latest = int(rev)
- except ValueError:
- raise util.Abort(_('svn: revision %s is not an integer') % rev)
-
- self.startrev = self.ui.config('convert', 'svn.startrev', default=0)
- try:
- self.startrev = int(self.startrev)
- if self.startrev < 0:
- self.startrev = 0
- except ValueError:
- raise util.Abort(_('svn: start revision %s is not an integer')
- % self.startrev)
-
- self.head = self.latest(self.module, latest)
- if not self.head:
- raise util.Abort(_('no revision found in module %s')
- % self.module)
- self.last_changed = self.revnum(self.head)
-
- self._changescache = None
-
- if os.path.exists(os.path.join(url, '.svn/entries')):
- self.wc = url
- else:
- self.wc = None
- self.convertfp = None
-
- def setrevmap(self, revmap):
- lastrevs = {}
- for revid in revmap.iterkeys():
- uuid, module, revnum = self.revsplit(revid)
- lastrevnum = lastrevs.setdefault(module, revnum)
- if revnum > lastrevnum:
- lastrevs[module] = revnum
- self.lastrevs = lastrevs
-
- def exists(self, path, optrev):
- try:
- svn.client.ls(self.url.rstrip('/') + '/' + urllib.quote(path),
- optrev, False, self.ctx)
- return True
- except SubversionException:
- return False
-
- def getheads(self):
-
- def isdir(path, revnum):
- kind = self._checkpath(path, revnum)
- return kind == svn.core.svn_node_dir
-
- def getcfgpath(name, rev):
- cfgpath = self.ui.config('convert', 'svn.' + name)
- if cfgpath is not None and cfgpath.strip() == '':
- return None
- path = (cfgpath or name).strip('/')
- if not self.exists(path, rev):
- if cfgpath:
- raise util.Abort(_('expected %s to be at %r, but not found')
- % (name, path))
- return None
- self.ui.note(_('found %s at %r\n') % (name, path))
- return path
-
- rev = optrev(self.last_changed)
- oldmodule = ''
- trunk = getcfgpath('trunk', rev)
- self.tags = getcfgpath('tags', rev)
- branches = getcfgpath('branches', rev)
-
- # If the project has a trunk or branches, we will extract heads
- # from them. We keep the project root otherwise.
- if trunk:
- oldmodule = self.module or ''
- self.module += '/' + trunk
- self.head = self.latest(self.module, self.last_changed)
- if not self.head:
- raise util.Abort(_('no revision found in module %s')
- % self.module)
-
- # First head in the list is the module's head
- self.heads = [self.head]
- if self.tags is not None:
- self.tags = '%s/%s' % (oldmodule , (self.tags or 'tags'))
-
- # Check if branches bring a few more heads to the list
- if branches:
- rpath = self.url.strip('/')
- branchnames = svn.client.ls(rpath + '/' + urllib.quote(branches),
- rev, False, self.ctx)
- for branch in branchnames.keys():
- module = '%s/%s/%s' % (oldmodule, branches, branch)
- if not isdir(module, self.last_changed):
- continue
- brevid = self.latest(module, self.last_changed)
- if not brevid:
- self.ui.note(_('ignoring empty branch %s\n') % branch)
- continue
- self.ui.note(_('found branch %s at %d\n') %
- (branch, self.revnum(brevid)))
- self.heads.append(brevid)
-
- if self.startrev and self.heads:
- if len(self.heads) > 1:
- raise util.Abort(_('svn: start revision is not supported '
- 'with more than one branch'))
- revnum = self.revnum(self.heads[0])
- if revnum < self.startrev:
- raise util.Abort(_('svn: no revision found after start revision %d')
- % self.startrev)
-
- return self.heads
-
- def getfile(self, file, rev):
- data, mode = self._getfile(file, rev)
- self.modecache[(file, rev)] = mode
- return data
-
- def getmode(self, file, rev):
- return self.modecache[(file, rev)]
-
- def getchanges(self, rev):
- if self._changescache and self._changescache[0] == rev:
- return self._changescache[1]
- self._changescache = None
- self.modecache = {}
- (paths, parents) = self.paths[rev]
- if parents:
- files, copies = self.expandpaths(rev, paths, parents)
- else:
- # Perform a full checkout on roots
- uuid, module, revnum = self.revsplit(rev)
- entries = svn.client.ls(self.baseurl + urllib.quote(module),
- optrev(revnum), True, self.ctx)
- files = [n for n,e in entries.iteritems()
- if e.kind == svn.core.svn_node_file]
- copies = {}
-
- files.sort()
- files = zip(files, [rev] * len(files))
-
- # caller caches the result, so free it here to release memory
- del self.paths[rev]
- return (files, copies)
-
- def getchangedfiles(self, rev, i):
- changes = self.getchanges(rev)
- self._changescache = (rev, changes)
- return [f[0] for f in changes[0]]
-
- def getcommit(self, rev):
- if rev not in self.commits:
- uuid, module, revnum = self.revsplit(rev)
- self.module = module
- self.reparent(module)
- # We assume that:
- # - requests for revisions after "stop" come from the
- # revision graph backward traversal. Cache all of them
- # down to stop, they will be used eventually.
- # - requests for revisions before "stop" come to get
- # isolated branches parents. Just fetch what is needed.
- stop = self.lastrevs.get(module, 0)
- if revnum < stop:
- stop = revnum + 1
- self._fetch_revisions(revnum, stop)
- commit = self.commits[rev]
- # caller caches the result, so free it here to release memory
- del self.commits[rev]
- return commit
-
- def gettags(self):
- tags = {}
- if self.tags is None:
- return tags
-
- # svn tags are just a convention, project branches left in a
- # 'tags' directory. There is no other relationship than
- # ancestry, which is expensive to discover and makes them hard
- # to update incrementally. Worse, past revisions may be
- # referenced by tags far away in the future, requiring a deep
- # history traversal on every calculation. Current code
- # performs a single backward traversal, tracking moves within
- # the tags directory (tag renaming) and recording a new tag
- # everytime a project is copied from outside the tags
- # directory. It also lists deleted tags, this behaviour may
- # change in the future.
- pendings = []
- tagspath = self.tags
- start = svn.ra.get_latest_revnum(self.ra)
- try:
- for entry in self._getlog([self.tags], start, self.startrev):
- origpaths, revnum, author, date, message = entry
- copies = [(e.copyfrom_path, e.copyfrom_rev, p) for p, e
- in origpaths.iteritems() if e.copyfrom_path]
- # Apply moves/copies from more specific to general
- copies.sort(reverse=True)
-
- srctagspath = tagspath
- if copies and copies[-1][2] == tagspath:
- # Track tags directory moves
- srctagspath = copies.pop()[0]
-
- for source, sourcerev, dest in copies:
- if not dest.startswith(tagspath + '/'):
- continue
- for tag in pendings:
- if tag[0].startswith(dest):
- tagpath = source + tag[0][len(dest):]
- tag[:2] = [tagpath, sourcerev]
- break
- else:
- pendings.append([source, sourcerev, dest])
-
- # Filter out tags with children coming from different
- # parts of the repository like:
- # /tags/tag.1 (from /trunk:10)
- # /tags/tag.1/foo (from /branches/foo:12)
- # Here/tags/tag.1 discarded as well as its children.
- # It happens with tools like cvs2svn. Such tags cannot
- # be represented in mercurial.
- addeds = dict((p, e.copyfrom_path) for p, e
- in origpaths.iteritems()
- if e.action == 'A' and e.copyfrom_path)
- badroots = set()
- for destroot in addeds:
- for source, sourcerev, dest in pendings:
- if (not dest.startswith(destroot + '/')
- or source.startswith(addeds[destroot] + '/')):
- continue
- badroots.add(destroot)
- break
-
- for badroot in badroots:
- pendings = [p for p in pendings if p[2] != badroot
- and not p[2].startswith(badroot + '/')]
-
- # Tell tag renamings from tag creations
- remainings = []
- for source, sourcerev, dest in pendings:
- tagname = dest.split('/')[-1]
- if source.startswith(srctagspath):
- remainings.append([source, sourcerev, tagname])
- continue
- if tagname in tags:
- # Keep the latest tag value
- continue
- # From revision may be fake, get one with changes
- try:
- tagid = self.latest(source, sourcerev)
- if tagid and tagname not in tags:
- tags[tagname] = tagid
- except SvnPathNotFound:
- # It happens when we are following directories
- # we assumed were copied with their parents
- # but were really created in the tag
- # directory.
- pass
- pendings = remainings
- tagspath = srctagspath
-
- except SubversionException:
- self.ui.note(_('no tags found at revision %d\n') % start)
- return tags
-
- def converted(self, rev, destrev):
- if not self.wc:
- return
- if self.convertfp is None:
- self.convertfp = open(os.path.join(self.wc, '.svn', 'hg-shamap'),
- 'a')
- self.convertfp.write('%s %d\n' % (destrev, self.revnum(rev)))
- self.convertfp.flush()
-
- def revid(self, revnum, module=None):
- return 'svn:%s%s@%s' % (self.uuid, module or self.module, revnum)
-
- def revnum(self, rev):
- return int(rev.split('@')[-1])
-
- def revsplit(self, rev):
- url, revnum = rev.rsplit('@', 1)
- revnum = int(revnum)
- parts = url.split('/', 1)
- uuid = parts.pop(0)[4:]
- mod = ''
- if parts:
- mod = '/' + parts[0]
- return uuid, mod, revnum
-
- def latest(self, path, stop=0):
- """Find the latest revid affecting path, up to stop. It may return
- a revision in a different module, since a branch may be moved without
- a change being reported. Return None if computed module does not
- belong to rootmodule subtree.
- """
- if not path.startswith(self.rootmodule):
- # Requests on foreign branches may be forbidden at server level
- self.ui.debug(_('ignoring foreign branch %r\n') % path)
- return None
-
- if not stop:
- stop = svn.ra.get_latest_revnum(self.ra)
- try:
- prevmodule = self.reparent('')
- dirent = svn.ra.stat(self.ra, path.strip('/'), stop)
- self.reparent(prevmodule)
- except SubversionException:
- dirent = None
- if not dirent:
- raise SvnPathNotFound(_('%s not found up to revision %d') % (path, stop))
-
- # stat() gives us the previous revision on this line of
- # development, but it might be in *another module*. Fetch the
- # log and detect renames down to the latest revision.
- stream = self._getlog([path], stop, dirent.created_rev)
- try:
- for entry in stream:
- paths, revnum, author, date, message = entry
- if revnum <= dirent.created_rev:
- break
-
- for p in paths:
- if not path.startswith(p) or not paths[p].copyfrom_path:
- continue
- newpath = paths[p].copyfrom_path + path[len(p):]
- self.ui.debug(_("branch renamed from %s to %s at %d\n") %
- (path, newpath, revnum))
- path = newpath
- break
- finally:
- stream.close()
-
- if not path.startswith(self.rootmodule):
- self.ui.debug(_('ignoring foreign branch %r\n') % path)
- return None
- return self.revid(dirent.created_rev, path)
-
- def reparent(self, module):
- """Reparent the svn transport and return the previous parent."""
- if self.prevmodule == module:
- return module
- svnurl = self.baseurl + urllib.quote(module)
- prevmodule = self.prevmodule
- if prevmodule is None:
- prevmodule = ''
- self.ui.debug(_("reparent to %s\n") % svnurl)
- svn.ra.reparent(self.ra, svnurl)
- self.prevmodule = module
- return prevmodule
-
- def expandpaths(self, rev, paths, parents):
- entries = []
- # Map of entrypath, revision for finding source of deleted
- # revisions.
- copyfrom = {}
- copies = {}
-
- new_module, revnum = self.revsplit(rev)[1:]
- if new_module != self.module:
- self.module = new_module
- self.reparent(self.module)
-
- for path, ent in paths:
- entrypath = self.getrelpath(path)
-
- kind = self._checkpath(entrypath, revnum)
- if kind == svn.core.svn_node_file:
- entries.append(self.recode(entrypath))
- if not ent.copyfrom_path or not parents:
- continue
- # Copy sources not in parent revisions cannot be
- # represented, ignore their origin for now
- pmodule, prevnum = self.revsplit(parents[0])[1:]
- if ent.copyfrom_rev < prevnum:
- continue
- copyfrom_path = self.getrelpath(ent.copyfrom_path, pmodule)
- if not copyfrom_path:
- continue
- self.ui.debug(_("copied to %s from %s@%s\n") %
- (entrypath, copyfrom_path, ent.copyfrom_rev))
- copies[self.recode(entrypath)] = self.recode(copyfrom_path)
- elif kind == 0: # gone, but had better be a deleted *file*
- self.ui.debug(_("gone from %s\n") % ent.copyfrom_rev)
- pmodule, prevnum = self.revsplit(parents[0])[1:]
- parentpath = pmodule + "/" + entrypath
- self.ui.debug(_("entry %s\n") % parentpath)
-
- # We can avoid the reparent calls if the module has
- # not changed but it probably does not worth the pain.
- prevmodule = self.reparent('')
- fromkind = svn.ra.check_path(self.ra, parentpath.strip('/'), prevnum)
- self.reparent(prevmodule)
-
- if fromkind == svn.core.svn_node_file:
- entries.append(self.recode(entrypath))
- elif fromkind == svn.core.svn_node_dir:
- if ent.action == 'C':
- children = self._find_children(path, prevnum)
- else:
- oroot = parentpath.strip('/')
- nroot = path.strip('/')
- children = self._find_children(oroot, prevnum)
- children = [s.replace(oroot,nroot) for s in children]
-
- for child in children:
- childpath = self.getrelpath("/" + child, pmodule)
- if not childpath:
- continue
- if childpath in copies:
- del copies[childpath]
- entries.append(childpath)
- else:
- self.ui.debug(_('unknown path in revision %d: %s\n') % \
- (revnum, path))
- elif kind == svn.core.svn_node_dir:
- # If the directory just had a prop change,
- # then we shouldn't need to look for its children.
- if ent.action == 'M':
- continue
-
- children = sorted(self._find_children(path, revnum))
- for child in children:
- # Can we move a child directory and its
- # parent in the same commit? (probably can). Could
- # cause problems if instead of revnum -1,
- # we have to look in (copyfrom_path, revnum - 1)
- entrypath = self.getrelpath("/" + child)
- if entrypath:
- # Need to filter out directories here...
- kind = self._checkpath(entrypath, revnum)
- if kind != svn.core.svn_node_dir:
- entries.append(self.recode(entrypath))
-
- # Handle directory copies
- if not ent.copyfrom_path or not parents:
- continue
- # Copy sources not in parent revisions cannot be
- # represented, ignore their origin for now
- pmodule, prevnum = self.revsplit(parents[0])[1:]
- if ent.copyfrom_rev < prevnum:
- continue
- copyfrompath = self.getrelpath(ent.copyfrom_path, pmodule)
- if not copyfrompath:
- continue
- copyfrom[path] = ent
- self.ui.debug(_("mark %s came from %s:%d\n")
- % (path, copyfrompath, ent.copyfrom_rev))
- children = self._find_children(ent.copyfrom_path, ent.copyfrom_rev)
- children.sort()
- for child in children:
- entrypath = self.getrelpath("/" + child, pmodule)
- if not entrypath:
- continue
- copytopath = path + entrypath[len(copyfrompath):]
- copytopath = self.getrelpath(copytopath)
- copies[self.recode(copytopath)] = self.recode(entrypath)
-
- return (list(set(entries)), copies)
-
- def _fetch_revisions(self, from_revnum, to_revnum):
- if from_revnum < to_revnum:
- from_revnum, to_revnum = to_revnum, from_revnum
-
- self.child_cset = None
-
- def parselogentry(orig_paths, revnum, author, date, message):
- """Return the parsed commit object or None, and True if
- the revision is a branch root.
- """
- self.ui.debug(_("parsing revision %d (%d changes)\n") %
- (revnum, len(orig_paths)))
-
- branched = False
- rev = self.revid(revnum)
- # branch log might return entries for a parent we already have
-
- if rev in self.commits or revnum < to_revnum:
- return None, branched
-
- parents = []
- # check whether this revision is the start of a branch or part
- # of a branch renaming
- orig_paths = sorted(orig_paths.iteritems())
- root_paths = [(p,e) for p,e in orig_paths if self.module.startswith(p)]
- if root_paths:
- path, ent = root_paths[-1]
- if ent.copyfrom_path:
- branched = True
- newpath = ent.copyfrom_path + self.module[len(path):]
- # ent.copyfrom_rev may not be the actual last revision
- previd = self.latest(newpath, ent.copyfrom_rev)
- if previd is not None:
- prevmodule, prevnum = self.revsplit(previd)[1:]
- if prevnum >= self.startrev:
- parents = [previd]
- self.ui.note(_('found parent of branch %s at %d: %s\n') %
- (self.module, prevnum, prevmodule))
- else:
- self.ui.debug(_("no copyfrom path, don't know what to do.\n"))
-
- paths = []
- # filter out unrelated paths
- for path, ent in orig_paths:
- if self.getrelpath(path) is None:
- continue
- paths.append((path, ent))
-
- # Example SVN datetime. Includes microseconds.
- # ISO-8601 conformant
- # '2007-01-04T17:35:00.902377Z'
- date = util.parsedate(date[:19] + " UTC", ["%Y-%m-%dT%H:%M:%S"])
-
- log = message and self.recode(message) or ''
- author = author and self.recode(author) or ''
- try:
- branch = self.module.split("/")[-1]
- if branch == 'trunk':
- branch = ''
- except IndexError:
- branch = None
-
- cset = commit(author=author,
- date=util.datestr(date),
- desc=log,
- parents=parents,
- branch=branch,
- rev=rev)
-
- self.commits[rev] = cset
- # The parents list is *shared* among self.paths and the
- # commit object. Both will be updated below.
- self.paths[rev] = (paths, cset.parents)
- if self.child_cset and not self.child_cset.parents:
- self.child_cset.parents[:] = [rev]
- self.child_cset = cset
- return cset, branched
-
- self.ui.note(_('fetching revision log for "%s" from %d to %d\n') %
- (self.module, from_revnum, to_revnum))
-
- try:
- firstcset = None
- lastonbranch = False
- stream = self._getlog([self.module], from_revnum, to_revnum)
- try:
- for entry in stream:
- paths, revnum, author, date, message = entry
- if revnum < self.startrev:
- lastonbranch = True
- break
- if not paths:
- self.ui.debug(_('revision %d has no entries\n') % revnum)
- continue
- cset, lastonbranch = parselogentry(paths, revnum, author,
- date, message)
- if cset:
- firstcset = cset
- if lastonbranch:
- break
- finally:
- stream.close()
-
- if not lastonbranch and firstcset and not firstcset.parents:
- # The first revision of the sequence (the last fetched one)
- # has invalid parents if not a branch root. Find the parent
- # revision now, if any.
- try:
- firstrevnum = self.revnum(firstcset.rev)
- if firstrevnum > 1:
- latest = self.latest(self.module, firstrevnum - 1)
- if latest:
- firstcset.parents.append(latest)
- except SvnPathNotFound:
- pass
- except SubversionException, (inst, num):
- if num == svn.core.SVN_ERR_FS_NO_SUCH_REVISION:
- raise util.Abort(_('svn: branch has no revision %s') % to_revnum)
- raise
-
- def _getfile(self, file, rev):
- # TODO: ra.get_file transmits the whole file instead of diffs.
- mode = ''
- try:
- new_module, revnum = self.revsplit(rev)[1:]
- if self.module != new_module:
- self.module = new_module
- self.reparent(self.module)
- io = StringIO()
- info = svn.ra.get_file(self.ra, file, revnum, io)
- data = io.getvalue()
- # ra.get_files() seems to keep a reference on the input buffer
- # preventing collection. Release it explicitely.
- io.close()
- if isinstance(info, list):
- info = info[-1]
- mode = ("svn:executable" in info) and 'x' or ''
- mode = ("svn:special" in info) and 'l' or mode
- except SubversionException, e:
- notfound = (svn.core.SVN_ERR_FS_NOT_FOUND,
- svn.core.SVN_ERR_RA_DAV_PATH_NOT_FOUND)
- if e.apr_err in notfound: # File not found
- raise IOError()
- raise
- if mode == 'l':
- link_prefix = "link "
- if data.startswith(link_prefix):
- data = data[len(link_prefix):]
- return data, mode
-
- def _find_children(self, path, revnum):
- path = path.strip('/')
- pool = Pool()
- rpath = '/'.join([self.baseurl, urllib.quote(path)]).strip('/')
- return ['%s/%s' % (path, x) for x in
- svn.client.ls(rpath, optrev(revnum), True, self.ctx, pool).keys()]
-
- def getrelpath(self, path, module=None):
- if module is None:
- module = self.module
- # Given the repository url of this wc, say
- # "http://server/plone/CMFPlone/branches/Plone-2_0-branch"
- # extract the "entry" portion (a relative path) from what
- # svn log --xml says, ie
- # "/CMFPlone/branches/Plone-2_0-branch/tests/PloneTestCase.py"
- # that is to say "tests/PloneTestCase.py"
- if path.startswith(module):
- relative = path.rstrip('/')[len(module):]
- if relative.startswith('/'):
- return relative[1:]
- elif relative == '':
- return relative
-
- # The path is outside our tracked tree...
- self.ui.debug(_('%r is not under %r, ignoring\n') % (path, module))
- return None
-
- def _checkpath(self, path, revnum):
- # ra.check_path does not like leading slashes very much, it leads
- # to PROPFIND subversion errors
- return svn.ra.check_path(self.ra, path.strip('/'), revnum)
-
- def _getlog(self, paths, start, end, limit=0, discover_changed_paths=True,
- strict_node_history=False):
- # Normalize path names, svn >= 1.5 only wants paths relative to
- # supplied URL
- relpaths = []
- for p in paths:
- if not p.startswith('/'):
- p = self.module + '/' + p
- relpaths.append(p.strip('/'))
- args = [self.baseurl, relpaths, start, end, limit, discover_changed_paths,
- strict_node_history]
- arg = encodeargs(args)
- hgexe = util.hgexecutable()
- cmd = '%s debugsvnlog' % util.shellquote(hgexe)
- stdin, stdout = util.popen2(cmd)
- stdin.write(arg)
- stdin.close()
- return logstream(stdout)
-
-pre_revprop_change = '''#!/bin/sh
-
-REPOS="$1"
-REV="$2"
-USER="$3"
-PROPNAME="$4"
-ACTION="$5"
-
-if [ "$ACTION" = "M" -a "$PROPNAME" = "svn:log" ]; then exit 0; fi
-if [ "$ACTION" = "A" -a "$PROPNAME" = "hg:convert-branch" ]; then exit 0; fi
-if [ "$ACTION" = "A" -a "$PROPNAME" = "hg:convert-rev" ]; then exit 0; fi
-
-echo "Changing prohibited revision property" >&2
-exit 1
-'''
-
-class svn_sink(converter_sink, commandline):
- commit_re = re.compile(r'Committed revision (\d+).', re.M)
-
- def prerun(self):
- if self.wc:
- os.chdir(self.wc)
-
- def postrun(self):
- if self.wc:
- os.chdir(self.cwd)
-
- def join(self, name):
- return os.path.join(self.wc, '.svn', name)
-
- def revmapfile(self):
- return self.join('hg-shamap')
-
- def authorfile(self):
- return self.join('hg-authormap')
-
- def __init__(self, ui, path):
- converter_sink.__init__(self, ui, path)
- commandline.__init__(self, ui, 'svn')
- self.delete = []
- self.setexec = []
- self.delexec = []
- self.copies = []
- self.wc = None
- self.cwd = os.getcwd()
-
- path = os.path.realpath(path)
-
- created = False
- if os.path.isfile(os.path.join(path, '.svn', 'entries')):
- self.wc = path
- self.run0('update')
- else:
- wcpath = os.path.join(os.getcwd(), os.path.basename(path) + '-wc')
-
- if os.path.isdir(os.path.dirname(path)):
- if not os.path.exists(os.path.join(path, 'db', 'fs-type')):
- ui.status(_('initializing svn repo %r\n') %
- os.path.basename(path))
- commandline(ui, 'svnadmin').run0('create', path)
- created = path
- path = util.normpath(path)
- if not path.startswith('/'):
- path = '/' + path
- path = 'file://' + path
-
- ui.status(_('initializing svn wc %r\n') % os.path.basename(wcpath))
- self.run0('checkout', path, wcpath)
-
- self.wc = wcpath
- self.opener = util.opener(self.wc)
- self.wopener = util.opener(self.wc)
- self.childmap = mapfile(ui, self.join('hg-childmap'))
- self.is_exec = util.checkexec(self.wc) and util.is_exec or None
-
- if created:
- hook = os.path.join(created, 'hooks', 'pre-revprop-change')
- fp = open(hook, 'w')
- fp.write(pre_revprop_change)
- fp.close()
- util.set_flags(hook, False, True)
-
- xport = transport.SvnRaTransport(url=geturl(path))
- self.uuid = svn.ra.get_uuid(xport.ra)
-
- def wjoin(self, *names):
- return os.path.join(self.wc, *names)
-
- def putfile(self, filename, flags, data):
- if 'l' in flags:
- self.wopener.symlink(data, filename)
- else:
- try:
- if os.path.islink(self.wjoin(filename)):
- os.unlink(filename)
- except OSError:
- pass
- self.wopener(filename, 'w').write(data)
-
- if self.is_exec:
- was_exec = self.is_exec(self.wjoin(filename))
- else:
- # On filesystems not supporting execute-bit, there is no way
- # to know if it is set but asking subversion. Setting it
- # systematically is just as expensive and much simpler.
- was_exec = 'x' not in flags
-
- util.set_flags(self.wjoin(filename), False, 'x' in flags)
- if was_exec:
- if 'x' not in flags:
- self.delexec.append(filename)
- else:
- if 'x' in flags:
- self.setexec.append(filename)
-
- def _copyfile(self, source, dest):
- # SVN's copy command pukes if the destination file exists, but
- # our copyfile method expects to record a copy that has
- # already occurred. Cross the semantic gap.
- wdest = self.wjoin(dest)
- exists = os.path.exists(wdest)
- if exists:
- fd, tempname = tempfile.mkstemp(
- prefix='hg-copy-', dir=os.path.dirname(wdest))
- os.close(fd)
- os.unlink(tempname)
- os.rename(wdest, tempname)
- try:
- self.run0('copy', source, dest)
- finally:
- if exists:
- try:
- os.unlink(wdest)
- except OSError:
- pass
- os.rename(tempname, wdest)
-
- def dirs_of(self, files):
- dirs = set()
- for f in files:
- if os.path.isdir(self.wjoin(f)):
- dirs.add(f)
- for i in strutil.rfindall(f, '/'):
- dirs.add(f[:i])
- return dirs
-
- def add_dirs(self, files):
- add_dirs = [d for d in sorted(self.dirs_of(files))
- if not os.path.exists(self.wjoin(d, '.svn', 'entries'))]
- if add_dirs:
- self.xargs(add_dirs, 'add', non_recursive=True, quiet=True)
- return add_dirs
-
- def add_files(self, files):
- if files:
- self.xargs(files, 'add', quiet=True)
- return files
-
- def tidy_dirs(self, names):
- deleted = []
- for d in sorted(self.dirs_of(names), reverse=True):
- wd = self.wjoin(d)
- if os.listdir(wd) == '.svn':
- self.run0('delete', d)
- deleted.append(d)
- return deleted
-
- def addchild(self, parent, child):
- self.childmap[parent] = child
-
- def revid(self, rev):
- return u"svn:%s@%s" % (self.uuid, rev)
-
- def putcommit(self, files, copies, parents, commit, source, revmap):
- # Apply changes to working copy
- for f, v in files:
- try:
- data = source.getfile(f, v)
- except IOError:
- self.delete.append(f)
- else:
- e = source.getmode(f, v)
- self.putfile(f, e, data)
- if f in copies:
- self.copies.append([copies[f], f])
- files = [f[0] for f in files]
-
- for parent in parents:
- try:
- return self.revid(self.childmap[parent])
- except KeyError:
- pass
- entries = set(self.delete)
- files = frozenset(files)
- entries.update(self.add_dirs(files.difference(entries)))
- if self.copies:
- for s, d in self.copies:
- self._copyfile(s, d)
- self.copies = []
- if self.delete:
- self.xargs(self.delete, 'delete')
- self.delete = []
- entries.update(self.add_files(files.difference(entries)))
- entries.update(self.tidy_dirs(entries))
- if self.delexec:
- self.xargs(self.delexec, 'propdel', 'svn:executable')
- self.delexec = []
- if self.setexec:
- self.xargs(self.setexec, 'propset', 'svn:executable', '*')
- self.setexec = []
-
- fd, messagefile = tempfile.mkstemp(prefix='hg-convert-')
- fp = os.fdopen(fd, 'w')
- fp.write(commit.desc)
- fp.close()
- try:
- output = self.run0('commit',
- username=util.shortuser(commit.author),
- file=messagefile,
- encoding='utf-8')
- try:
- rev = self.commit_re.search(output).group(1)
- except AttributeError:
- self.ui.warn(_('unexpected svn output:\n'))
- self.ui.warn(output)
- raise util.Abort(_('unable to cope with svn output'))
- if commit.rev:
- self.run('propset', 'hg:convert-rev', commit.rev,
- revprop=True, revision=rev)
- if commit.branch and commit.branch != 'default':
- self.run('propset', 'hg:convert-branch', commit.branch,
- revprop=True, revision=rev)
- for parent in parents:
- self.addchild(parent, rev)
- return self.revid(rev)
- finally:
- os.unlink(messagefile)
-
- def puttags(self, tags):
- self.ui.warn(_('XXX TAGS NOT IMPLEMENTED YET\n'))
diff --git a/sys/lib/python/hgext/convert/transport.py b/sys/lib/python/hgext/convert/transport.py
deleted file mode 100644
index 0d77cca4d..000000000
--- a/sys/lib/python/hgext/convert/transport.py
+++ /dev/null
@@ -1,128 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright (C) 2007 Daniel Holth <dholth@fastmail.fm>
-# This is a stripped-down version of the original bzr-svn transport.py,
-# Copyright (C) 2006 Jelmer Vernooij <jelmer@samba.org>
-
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-from svn.core import SubversionException, Pool
-import svn.ra
-import svn.client
-import svn.core
-
-# Some older versions of the Python bindings need to be
-# explicitly initialized. But what we want to do probably
-# won't work worth a darn against those libraries anyway!
-svn.ra.initialize()
-
-svn_config = svn.core.svn_config_get_config(None)
-
-
-def _create_auth_baton(pool):
- """Create a Subversion authentication baton. """
- import svn.client
- # Give the client context baton a suite of authentication
- # providers.h
- providers = [
- svn.client.get_simple_provider(pool),
- svn.client.get_username_provider(pool),
- svn.client.get_ssl_client_cert_file_provider(pool),
- svn.client.get_ssl_client_cert_pw_file_provider(pool),
- svn.client.get_ssl_server_trust_file_provider(pool),
- ]
- # Platform-dependant authentication methods
- getprovider = getattr(svn.core, 'svn_auth_get_platform_specific_provider',
- None)
- if getprovider:
- # Available in svn >= 1.6
- for name in ('gnome_keyring', 'keychain', 'kwallet', 'windows'):
- for type in ('simple', 'ssl_client_cert_pw', 'ssl_server_trust'):
- p = getprovider(name, type, pool)
- if p:
- providers.append(p)
- else:
- if hasattr(svn.client, 'get_windows_simple_provider'):
- providers.append(svn.client.get_windows_simple_provider(pool))
-
- return svn.core.svn_auth_open(providers, pool)
-
-class NotBranchError(SubversionException):
- pass
-
-class SvnRaTransport(object):
- """
- Open an ra connection to a Subversion repository.
- """
- def __init__(self, url="", ra=None):
- self.pool = Pool()
- self.svn_url = url
- self.username = ''
- self.password = ''
-
- # Only Subversion 1.4 has reparent()
- if ra is None or not hasattr(svn.ra, 'reparent'):
- self.client = svn.client.create_context(self.pool)
- ab = _create_auth_baton(self.pool)
- if False:
- svn.core.svn_auth_set_parameter(
- ab, svn.core.SVN_AUTH_PARAM_DEFAULT_USERNAME, self.username)
- svn.core.svn_auth_set_parameter(
- ab, svn.core.SVN_AUTH_PARAM_DEFAULT_PASSWORD, self.password)
- self.client.auth_baton = ab
- self.client.config = svn_config
- try:
- self.ra = svn.client.open_ra_session(
- self.svn_url.encode('utf8'),
- self.client, self.pool)
- except SubversionException, (inst, num):
- if num in (svn.core.SVN_ERR_RA_ILLEGAL_URL,
- svn.core.SVN_ERR_RA_LOCAL_REPOS_OPEN_FAILED,
- svn.core.SVN_ERR_BAD_URL):
- raise NotBranchError(url)
- raise
- else:
- self.ra = ra
- svn.ra.reparent(self.ra, self.svn_url.encode('utf8'))
-
- class Reporter(object):
- def __init__(self, (reporter, report_baton)):
- self._reporter = reporter
- self._baton = report_baton
-
- def set_path(self, path, revnum, start_empty, lock_token, pool=None):
- svn.ra.reporter2_invoke_set_path(self._reporter, self._baton,
- path, revnum, start_empty, lock_token, pool)
-
- def delete_path(self, path, pool=None):
- svn.ra.reporter2_invoke_delete_path(self._reporter, self._baton,
- path, pool)
-
- def link_path(self, path, url, revision, start_empty, lock_token,
- pool=None):
- svn.ra.reporter2_invoke_link_path(self._reporter, self._baton,
- path, url, revision, start_empty, lock_token,
- pool)
-
- def finish_report(self, pool=None):
- svn.ra.reporter2_invoke_finish_report(self._reporter,
- self._baton, pool)
-
- def abort_report(self, pool=None):
- svn.ra.reporter2_invoke_abort_report(self._reporter,
- self._baton, pool)
-
- def do_update(self, revnum, path, *args, **kwargs):
- return self.Reporter(svn.ra.do_update(self.ra, revnum, path, *args, **kwargs))
diff --git a/sys/lib/python/hgext/extdiff.py b/sys/lib/python/hgext/extdiff.py
deleted file mode 100644
index 56e29f4df..000000000
--- a/sys/lib/python/hgext/extdiff.py
+++ /dev/null
@@ -1,228 +0,0 @@
-# extdiff.py - external diff program support for mercurial
-#
-# Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-'''command to allow external programs to compare revisions
-
-The extdiff Mercurial extension allows you to use external programs
-to compare revisions, or revision with working directory. The external
-diff programs are called with a configurable set of options and two
-non-option arguments: paths to directories containing snapshots of
-files to compare.
-
-The extdiff extension also allows to configure new diff commands, so
-you do not need to type "hg extdiff -p kdiff3" always. ::
-
- [extdiff]
- # add new command that runs GNU diff(1) in 'context diff' mode
- cdiff = gdiff -Nprc5
- ## or the old way:
- #cmd.cdiff = gdiff
- #opts.cdiff = -Nprc5
-
- # add new command called vdiff, runs kdiff3
- vdiff = kdiff3
-
- # add new command called meld, runs meld (no need to name twice)
- meld =
-
- # add new command called vimdiff, runs gvimdiff with DirDiff plugin
- # (see http://www.vim.org/scripts/script.php?script_id=102) Non
- # English user, be sure to put "let g:DirDiffDynamicDiffText = 1" in
- # your .vimrc
- vimdiff = gvim -f '+next' '+execute "DirDiff" argv(0) argv(1)'
-
-You can use -I/-X and list of file or directory names like normal "hg
-diff" command. The extdiff extension makes snapshots of only needed
-files, so running the external diff program will actually be pretty
-fast (at least faster than having to compare the entire tree).
-'''
-
-from mercurial.i18n import _
-from mercurial.node import short
-from mercurial import cmdutil, util, commands
-import os, shlex, shutil, tempfile
-
-def snapshot(ui, repo, files, node, tmproot):
- '''snapshot files as of some revision
- if not using snapshot, -I/-X does not work and recursive diff
- in tools like kdiff3 and meld displays too many files.'''
- dirname = os.path.basename(repo.root)
- if dirname == "":
- dirname = "root"
- if node is not None:
- dirname = '%s.%s' % (dirname, short(node))
- base = os.path.join(tmproot, dirname)
- os.mkdir(base)
- if node is not None:
- ui.note(_('making snapshot of %d files from rev %s\n') %
- (len(files), short(node)))
- else:
- ui.note(_('making snapshot of %d files from working directory\n') %
- (len(files)))
- wopener = util.opener(base)
- fns_and_mtime = []
- ctx = repo[node]
- for fn in files:
- wfn = util.pconvert(fn)
- if not wfn in ctx:
- # skipping new file after a merge ?
- continue
- ui.note(' %s\n' % wfn)
- dest = os.path.join(base, wfn)
- fctx = ctx[wfn]
- data = repo.wwritedata(wfn, fctx.data())
- if 'l' in fctx.flags():
- wopener.symlink(data, wfn)
- else:
- wopener(wfn, 'w').write(data)
- if 'x' in fctx.flags():
- util.set_flags(dest, False, True)
- if node is None:
- fns_and_mtime.append((dest, repo.wjoin(fn), os.path.getmtime(dest)))
- return dirname, fns_and_mtime
-
-def dodiff(ui, repo, diffcmd, diffopts, pats, opts):
- '''Do the actuall diff:
-
- - copy to a temp structure if diffing 2 internal revisions
- - copy to a temp structure if diffing working revision with
- another one and more than 1 file is changed
- - just invoke the diff for a single file in the working dir
- '''
-
- revs = opts.get('rev')
- change = opts.get('change')
-
- if revs and change:
- msg = _('cannot specify --rev and --change at the same time')
- raise util.Abort(msg)
- elif change:
- node2 = repo.lookup(change)
- node1 = repo[node2].parents()[0].node()
- else:
- node1, node2 = cmdutil.revpair(repo, revs)
-
- matcher = cmdutil.match(repo, pats, opts)
- modified, added, removed = repo.status(node1, node2, matcher)[:3]
- if not (modified or added or removed):
- return 0
-
- tmproot = tempfile.mkdtemp(prefix='extdiff.')
- dir2root = ''
- try:
- # Always make a copy of node1
- dir1 = snapshot(ui, repo, modified + removed, node1, tmproot)[0]
- changes = len(modified) + len(removed) + len(added)
-
- # If node2 in not the wc or there is >1 change, copy it
- if node2 or changes > 1:
- dir2, fns_and_mtime = snapshot(ui, repo, modified + added, node2, tmproot)
- else:
- # This lets the diff tool open the changed file directly
- dir2 = ''
- dir2root = repo.root
- fns_and_mtime = []
-
- # If only one change, diff the files instead of the directories
- if changes == 1 :
- if len(modified):
- dir1 = os.path.join(dir1, util.localpath(modified[0]))
- dir2 = os.path.join(dir2root, dir2, util.localpath(modified[0]))
- elif len(removed) :
- dir1 = os.path.join(dir1, util.localpath(removed[0]))
- dir2 = os.devnull
- else:
- dir1 = os.devnull
- dir2 = os.path.join(dir2root, dir2, util.localpath(added[0]))
-
- cmdline = ('%s %s %s %s' %
- (util.shellquote(diffcmd), ' '.join(diffopts),
- util.shellquote(dir1), util.shellquote(dir2)))
- ui.debug(_('running %r in %s\n') % (cmdline, tmproot))
- util.system(cmdline, cwd=tmproot)
-
- for copy_fn, working_fn, mtime in fns_and_mtime:
- if os.path.getmtime(copy_fn) != mtime:
- ui.debug(_('file changed while diffing. '
- 'Overwriting: %s (src: %s)\n') % (working_fn, copy_fn))
- util.copyfile(copy_fn, working_fn)
-
- return 1
- finally:
- ui.note(_('cleaning up temp directory\n'))
- shutil.rmtree(tmproot)
-
-def extdiff(ui, repo, *pats, **opts):
- '''use external program to diff repository (or selected files)
-
- Show differences between revisions for the specified files, using
- an external program. The default program used is diff, with
- default options "-Npru".
-
- To select a different program, use the -p/--program option. The
- program will be passed the names of two directories to compare. To
- pass additional options to the program, use -o/--option. These
- will be passed before the names of the directories to compare.
-
- When two revision arguments are given, then changes are shown
- between those revisions. If only one revision is specified then
- that revision is compared to the working directory, and, when no
- revisions are specified, the working directory files are compared
- to its parent.'''
- program = opts['program'] or 'diff'
- if opts['program']:
- option = opts['option']
- else:
- option = opts['option'] or ['-Npru']
- return dodiff(ui, repo, program, option, pats, opts)
-
-cmdtable = {
- "extdiff":
- (extdiff,
- [('p', 'program', '', _('comparison program to run')),
- ('o', 'option', [], _('pass option to comparison program')),
- ('r', 'rev', [], _('revision')),
- ('c', 'change', '', _('change made by revision')),
- ] + commands.walkopts,
- _('hg extdiff [OPT]... [FILE]...')),
- }
-
-def uisetup(ui):
- for cmd, path in ui.configitems('extdiff'):
- if cmd.startswith('cmd.'):
- cmd = cmd[4:]
- if not path: path = cmd
- diffopts = ui.config('extdiff', 'opts.' + cmd, '')
- diffopts = diffopts and [diffopts] or []
- elif cmd.startswith('opts.'):
- continue
- else:
- # command = path opts
- if path:
- diffopts = shlex.split(path)
- path = diffopts.pop(0)
- else:
- path, diffopts = cmd, []
- def save(cmd, path, diffopts):
- '''use closure to save diff command to use'''
- def mydiff(ui, repo, *pats, **opts):
- return dodiff(ui, repo, path, diffopts, pats, opts)
- mydiff.__doc__ = _('''\
-use %(path)s to diff repository (or selected files)
-
- Show differences between revisions for the specified files, using the
- %(path)s program.
-
- When two revision arguments are given, then changes are shown between
- those revisions. If only one revision is specified then that revision is
- compared to the working directory, and, when no revisions are specified,
- the working directory files are compared to its parent.\
-''') % dict(path=util.uirepr(path))
- return mydiff
- cmdtable[cmd] = (save(cmd, path, diffopts),
- cmdtable['extdiff'][1][1:],
- _('hg %s [OPTION]... [FILE]...') % cmd)
diff --git a/sys/lib/python/hgext/fetch.py b/sys/lib/python/hgext/fetch.py
deleted file mode 100644
index 05cd3fcc3..000000000
--- a/sys/lib/python/hgext/fetch.py
+++ /dev/null
@@ -1,148 +0,0 @@
-# fetch.py - pull and merge remote changes
-#
-# Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-'''pull, update and merge in one command'''
-
-from mercurial.i18n import _
-from mercurial.node import nullid, short
-from mercurial import commands, cmdutil, hg, util, url, error
-from mercurial.lock import release
-
-def fetch(ui, repo, source='default', **opts):
- '''pull changes from a remote repository, merge new changes if needed.
-
- This finds all changes from the repository at the specified path
- or URL and adds them to the local repository.
-
- If the pulled changes add a new branch head, the head is
- automatically merged, and the result of the merge is committed.
- Otherwise, the working directory is updated to include the new
- changes.
-
- When a merge occurs, the newly pulled changes are assumed to be
- "authoritative". The head of the new changes is used as the first
- parent, with local changes as the second. To switch the merge
- order, use --switch-parent.
-
- See 'hg help dates' for a list of formats valid for -d/--date.
- '''
-
- date = opts.get('date')
- if date:
- opts['date'] = util.parsedate(date)
-
- parent, p2 = repo.dirstate.parents()
- branch = repo.dirstate.branch()
- branchnode = repo.branchtags().get(branch)
- if parent != branchnode:
- raise util.Abort(_('working dir not at branch tip '
- '(use "hg update" to check out branch tip)'))
-
- if p2 != nullid:
- raise util.Abort(_('outstanding uncommitted merge'))
-
- wlock = lock = None
- try:
- wlock = repo.wlock()
- lock = repo.lock()
- mod, add, rem, del_ = repo.status()[:4]
-
- if mod or add or rem:
- raise util.Abort(_('outstanding uncommitted changes'))
- if del_:
- raise util.Abort(_('working directory is missing some files'))
- bheads = repo.branchheads(branch)
- bheads = [head for head in bheads if len(repo[head].children()) == 0]
- if len(bheads) > 1:
- raise util.Abort(_('multiple heads in this branch '
- '(use "hg heads ." and "hg merge" to merge)'))
-
- other = hg.repository(cmdutil.remoteui(repo, opts),
- ui.expandpath(source))
- ui.status(_('pulling from %s\n') %
- url.hidepassword(ui.expandpath(source)))
- revs = None
- if opts['rev']:
- try:
- revs = [other.lookup(rev) for rev in opts['rev']]
- except error.CapabilityError:
- err = _("Other repository doesn't support revision lookup, "
- "so a rev cannot be specified.")
- raise util.Abort(err)
-
- # Are there any changes at all?
- modheads = repo.pull(other, heads=revs)
- if modheads == 0:
- return 0
-
- # Is this a simple fast-forward along the current branch?
- newheads = repo.branchheads(branch)
- newheads = [head for head in newheads if len(repo[head].children()) == 0]
- newchildren = repo.changelog.nodesbetween([parent], newheads)[2]
- if len(newheads) == 1:
- if newchildren[0] != parent:
- return hg.clean(repo, newchildren[0])
- else:
- return
-
- # Are there more than one additional branch heads?
- newchildren = [n for n in newchildren if n != parent]
- newparent = parent
- if newchildren:
- newparent = newchildren[0]
- hg.clean(repo, newparent)
- newheads = [n for n in newheads if n != newparent]
- if len(newheads) > 1:
- ui.status(_('not merging with %d other new branch heads '
- '(use "hg heads ." and "hg merge" to merge them)\n') %
- (len(newheads) - 1))
- return
-
- # Otherwise, let's merge.
- err = False
- if newheads:
- # By default, we consider the repository we're pulling
- # *from* as authoritative, so we merge our changes into
- # theirs.
- if opts['switch_parent']:
- firstparent, secondparent = newparent, newheads[0]
- else:
- firstparent, secondparent = newheads[0], newparent
- ui.status(_('updating to %d:%s\n') %
- (repo.changelog.rev(firstparent),
- short(firstparent)))
- hg.clean(repo, firstparent)
- ui.status(_('merging with %d:%s\n') %
- (repo.changelog.rev(secondparent), short(secondparent)))
- err = hg.merge(repo, secondparent, remind=False)
-
- if not err:
- # we don't translate commit messages
- message = (cmdutil.logmessage(opts) or
- ('Automated merge with %s' %
- url.removeauth(other.url())))
- editor = cmdutil.commiteditor
- if opts.get('force_editor') or opts.get('edit'):
- editor = cmdutil.commitforceeditor
- n = repo.commit(message, opts['user'], opts['date'], editor=editor)
- ui.status(_('new changeset %d:%s merges remote changes '
- 'with local\n') % (repo.changelog.rev(n),
- short(n)))
-
- finally:
- release(lock, wlock)
-
-cmdtable = {
- 'fetch':
- (fetch,
- [('r', 'rev', [], _('a specific revision you would like to pull')),
- ('e', 'edit', None, _('edit commit message')),
- ('', 'force-editor', None, _('edit commit message (DEPRECATED)')),
- ('', 'switch-parent', None, _('switch parents when merging')),
- ] + commands.commitopts + commands.commitopts2 + commands.remoteopts,
- _('hg fetch [SOURCE]')),
-}
diff --git a/sys/lib/python/hgext/gpg.py b/sys/lib/python/hgext/gpg.py
deleted file mode 100644
index 4a2f07d8e..000000000
--- a/sys/lib/python/hgext/gpg.py
+++ /dev/null
@@ -1,284 +0,0 @@
-# Copyright 2005, 2006 Benoit Boissinot <benoit.boissinot@ens-lyon.org>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-'''commands to sign and verify changesets'''
-
-import os, tempfile, binascii
-from mercurial import util, commands, match
-from mercurial import node as hgnode
-from mercurial.i18n import _
-
-class gpg(object):
- def __init__(self, path, key=None):
- self.path = path
- self.key = (key and " --local-user \"%s\"" % key) or ""
-
- def sign(self, data):
- gpgcmd = "%s --sign --detach-sign%s" % (self.path, self.key)
- return util.filter(data, gpgcmd)
-
- def verify(self, data, sig):
- """ returns of the good and bad signatures"""
- sigfile = datafile = None
- try:
- # create temporary files
- fd, sigfile = tempfile.mkstemp(prefix="hg-gpg-", suffix=".sig")
- fp = os.fdopen(fd, 'wb')
- fp.write(sig)
- fp.close()
- fd, datafile = tempfile.mkstemp(prefix="hg-gpg-", suffix=".txt")
- fp = os.fdopen(fd, 'wb')
- fp.write(data)
- fp.close()
- gpgcmd = ("%s --logger-fd 1 --status-fd 1 --verify "
- "\"%s\" \"%s\"" % (self.path, sigfile, datafile))
- ret = util.filter("", gpgcmd)
- finally:
- for f in (sigfile, datafile):
- try:
- if f: os.unlink(f)
- except: pass
- keys = []
- key, fingerprint = None, None
- err = ""
- for l in ret.splitlines():
- # see DETAILS in the gnupg documentation
- # filter the logger output
- if not l.startswith("[GNUPG:]"):
- continue
- l = l[9:]
- if l.startswith("ERRSIG"):
- err = _("error while verifying signature")
- break
- elif l.startswith("VALIDSIG"):
- # fingerprint of the primary key
- fingerprint = l.split()[10]
- elif (l.startswith("GOODSIG") or
- l.startswith("EXPSIG") or
- l.startswith("EXPKEYSIG") or
- l.startswith("BADSIG")):
- if key is not None:
- keys.append(key + [fingerprint])
- key = l.split(" ", 2)
- fingerprint = None
- if err:
- return err, []
- if key is not None:
- keys.append(key + [fingerprint])
- return err, keys
-
-def newgpg(ui, **opts):
- """create a new gpg instance"""
- gpgpath = ui.config("gpg", "cmd", "gpg")
- gpgkey = opts.get('key')
- if not gpgkey:
- gpgkey = ui.config("gpg", "key", None)
- return gpg(gpgpath, gpgkey)
-
-def sigwalk(repo):
- """
- walk over every sigs, yields a couple
- ((node, version, sig), (filename, linenumber))
- """
- def parsefile(fileiter, context):
- ln = 1
- for l in fileiter:
- if not l:
- continue
- yield (l.split(" ", 2), (context, ln))
- ln +=1
-
- # read the heads
- fl = repo.file(".hgsigs")
- for r in reversed(fl.heads()):
- fn = ".hgsigs|%s" % hgnode.short(r)
- for item in parsefile(fl.read(r).splitlines(), fn):
- yield item
- try:
- # read local signatures
- fn = "localsigs"
- for item in parsefile(repo.opener(fn), fn):
- yield item
- except IOError:
- pass
-
-def getkeys(ui, repo, mygpg, sigdata, context):
- """get the keys who signed a data"""
- fn, ln = context
- node, version, sig = sigdata
- prefix = "%s:%d" % (fn, ln)
- node = hgnode.bin(node)
-
- data = node2txt(repo, node, version)
- sig = binascii.a2b_base64(sig)
- err, keys = mygpg.verify(data, sig)
- if err:
- ui.warn("%s:%d %s\n" % (fn, ln , err))
- return None
-
- validkeys = []
- # warn for expired key and/or sigs
- for key in keys:
- if key[0] == "BADSIG":
- ui.write(_("%s Bad signature from \"%s\"\n") % (prefix, key[2]))
- continue
- if key[0] == "EXPSIG":
- ui.write(_("%s Note: Signature has expired"
- " (signed by: \"%s\")\n") % (prefix, key[2]))
- elif key[0] == "EXPKEYSIG":
- ui.write(_("%s Note: This key has expired"
- " (signed by: \"%s\")\n") % (prefix, key[2]))
- validkeys.append((key[1], key[2], key[3]))
- return validkeys
-
-def sigs(ui, repo):
- """list signed changesets"""
- mygpg = newgpg(ui)
- revs = {}
-
- for data, context in sigwalk(repo):
- node, version, sig = data
- fn, ln = context
- try:
- n = repo.lookup(node)
- except KeyError:
- ui.warn(_("%s:%d node does not exist\n") % (fn, ln))
- continue
- r = repo.changelog.rev(n)
- keys = getkeys(ui, repo, mygpg, data, context)
- if not keys:
- continue
- revs.setdefault(r, [])
- revs[r].extend(keys)
- for rev in sorted(revs, reverse=True):
- for k in revs[rev]:
- r = "%5d:%s" % (rev, hgnode.hex(repo.changelog.node(rev)))
- ui.write("%-30s %s\n" % (keystr(ui, k), r))
-
-def check(ui, repo, rev):
- """verify all the signatures there may be for a particular revision"""
- mygpg = newgpg(ui)
- rev = repo.lookup(rev)
- hexrev = hgnode.hex(rev)
- keys = []
-
- for data, context in sigwalk(repo):
- node, version, sig = data
- if node == hexrev:
- k = getkeys(ui, repo, mygpg, data, context)
- if k:
- keys.extend(k)
-
- if not keys:
- ui.write(_("No valid signature for %s\n") % hgnode.short(rev))
- return
-
- # print summary
- ui.write("%s is signed by:\n" % hgnode.short(rev))
- for key in keys:
- ui.write(" %s\n" % keystr(ui, key))
-
-def keystr(ui, key):
- """associate a string to a key (username, comment)"""
- keyid, user, fingerprint = key
- comment = ui.config("gpg", fingerprint, None)
- if comment:
- return "%s (%s)" % (user, comment)
- else:
- return user
-
-def sign(ui, repo, *revs, **opts):
- """add a signature for the current or given revision
-
- If no revision is given, the parent of the working directory is used,
- or tip if no revision is checked out.
-
- See 'hg help dates' for a list of formats valid for -d/--date.
- """
-
- mygpg = newgpg(ui, **opts)
- sigver = "0"
- sigmessage = ""
-
- date = opts.get('date')
- if date:
- opts['date'] = util.parsedate(date)
-
- if revs:
- nodes = [repo.lookup(n) for n in revs]
- else:
- nodes = [node for node in repo.dirstate.parents()
- if node != hgnode.nullid]
- if len(nodes) > 1:
- raise util.Abort(_('uncommitted merge - please provide a '
- 'specific revision'))
- if not nodes:
- nodes = [repo.changelog.tip()]
-
- for n in nodes:
- hexnode = hgnode.hex(n)
- ui.write("Signing %d:%s\n" % (repo.changelog.rev(n),
- hgnode.short(n)))
- # build data
- data = node2txt(repo, n, sigver)
- sig = mygpg.sign(data)
- if not sig:
- raise util.Abort(_("Error while signing"))
- sig = binascii.b2a_base64(sig)
- sig = sig.replace("\n", "")
- sigmessage += "%s %s %s\n" % (hexnode, sigver, sig)
-
- # write it
- if opts['local']:
- repo.opener("localsigs", "ab").write(sigmessage)
- return
-
- for x in repo.status(unknown=True)[:5]:
- if ".hgsigs" in x and not opts["force"]:
- raise util.Abort(_("working copy of .hgsigs is changed "
- "(please commit .hgsigs manually "
- "or use --force)"))
-
- repo.wfile(".hgsigs", "ab").write(sigmessage)
-
- if '.hgsigs' not in repo.dirstate:
- repo.add([".hgsigs"])
-
- if opts["no_commit"]:
- return
-
- message = opts['message']
- if not message:
- # we don't translate commit messages
- message = "\n".join(["Added signature for changeset %s"
- % hgnode.short(n)
- for n in nodes])
- try:
- m = match.exact(repo.root, '', ['.hgsigs'])
- repo.commit(message, opts['user'], opts['date'], match=m)
- except ValueError, inst:
- raise util.Abort(str(inst))
-
-def node2txt(repo, node, ver):
- """map a manifest into some text"""
- if ver == "0":
- return "%s\n" % hgnode.hex(node)
- else:
- raise util.Abort(_("unknown signature version"))
-
-cmdtable = {
- "sign":
- (sign,
- [('l', 'local', None, _('make the signature local')),
- ('f', 'force', None, _('sign even if the sigfile is modified')),
- ('', 'no-commit', None, _('do not commit the sigfile after signing')),
- ('k', 'key', '', _('the key id to sign with')),
- ('m', 'message', '', _('commit message')),
- ] + commands.commitopts2,
- _('hg sign [OPTION]... [REVISION]...')),
- "sigcheck": (check, [], _('hg sigcheck REVISION')),
- "sigs": (sigs, [], _('hg sigs')),
-}
-
diff --git a/sys/lib/python/hgext/graphlog.py b/sys/lib/python/hgext/graphlog.py
deleted file mode 100644
index d77edf931..000000000
--- a/sys/lib/python/hgext/graphlog.py
+++ /dev/null
@@ -1,378 +0,0 @@
-# ASCII graph log extension for Mercurial
-#
-# Copyright 2007 Joel Rosdahl <joel@rosdahl.net>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-'''command to view revision graphs from a shell
-
-This extension adds a --graph option to the incoming, outgoing and log
-commands. When this options is given, an ASCII representation of the
-revision graph is also shown.
-'''
-
-import os, sys
-from mercurial.cmdutil import revrange, show_changeset
-from mercurial.commands import templateopts
-from mercurial.i18n import _
-from mercurial.node import nullrev
-from mercurial import bundlerepo, changegroup, cmdutil, commands, extensions
-from mercurial import hg, url, util, graphmod
-
-ASCIIDATA = 'ASC'
-
-def asciiformat(ui, repo, revdag, opts, parentrepo=None):
- """formats a changelog DAG walk for ASCII output"""
- if parentrepo is None:
- parentrepo = repo
- showparents = [ctx.node() for ctx in parentrepo[None].parents()]
- displayer = show_changeset(ui, repo, opts, buffered=True)
- for (id, type, ctx, parentids) in revdag:
- if type != graphmod.CHANGESET:
- continue
- displayer.show(ctx)
- lines = displayer.hunk.pop(ctx.rev()).split('\n')[:-1]
- char = ctx.node() in showparents and '@' or 'o'
- yield (id, ASCIIDATA, (char, lines), parentids)
-
-def asciiedges(nodes):
- """adds edge info to changelog DAG walk suitable for ascii()"""
- seen = []
- for node, type, data, parents in nodes:
- if node not in seen:
- seen.append(node)
- nodeidx = seen.index(node)
-
- knownparents = []
- newparents = []
- for parent in parents:
- if parent in seen:
- knownparents.append(parent)
- else:
- newparents.append(parent)
-
- ncols = len(seen)
- nextseen = seen[:]
- nextseen[nodeidx:nodeidx + 1] = newparents
- edges = [(nodeidx, nextseen.index(p)) for p in knownparents]
-
- if len(newparents) > 0:
- edges.append((nodeidx, nodeidx))
- if len(newparents) > 1:
- edges.append((nodeidx, nodeidx + 1))
- nmorecols = len(nextseen) - ncols
- seen = nextseen
- yield (nodeidx, type, data, edges, ncols, nmorecols)
-
-def fix_long_right_edges(edges):
- for (i, (start, end)) in enumerate(edges):
- if end > start:
- edges[i] = (start, end + 1)
-
-def get_nodeline_edges_tail(
- node_index, p_node_index, n_columns, n_columns_diff, p_diff, fix_tail):
- if fix_tail and n_columns_diff == p_diff and n_columns_diff != 0:
- # Still going in the same non-vertical direction.
- if n_columns_diff == -1:
- start = max(node_index + 1, p_node_index)
- tail = ["|", " "] * (start - node_index - 1)
- tail.extend(["/", " "] * (n_columns - start))
- return tail
- else:
- return ["\\", " "] * (n_columns - node_index - 1)
- else:
- return ["|", " "] * (n_columns - node_index - 1)
-
-def draw_edges(edges, nodeline, interline):
- for (start, end) in edges:
- if start == end + 1:
- interline[2 * end + 1] = "/"
- elif start == end - 1:
- interline[2 * start + 1] = "\\"
- elif start == end:
- interline[2 * start] = "|"
- else:
- nodeline[2 * end] = "+"
- if start > end:
- (start, end) = (end, start)
- for i in range(2 * start + 1, 2 * end):
- if nodeline[i] != "+":
- nodeline[i] = "-"
-
-def get_padding_line(ni, n_columns, edges):
- line = []
- line.extend(["|", " "] * ni)
- if (ni, ni - 1) in edges or (ni, ni) in edges:
- # (ni, ni - 1) (ni, ni)
- # | | | | | | | |
- # +---o | | o---+
- # | | c | | c | |
- # | |/ / | |/ /
- # | | | | | |
- c = "|"
- else:
- c = " "
- line.extend([c, " "])
- line.extend(["|", " "] * (n_columns - ni - 1))
- return line
-
-def ascii(ui, dag):
- """prints an ASCII graph of the DAG
-
- dag is a generator that emits tuples with the following elements:
-
- - Column of the current node in the set of ongoing edges.
- - Type indicator of node data == ASCIIDATA.
- - Payload: (char, lines):
- - Character to use as node's symbol.
- - List of lines to display as the node's text.
- - Edges; a list of (col, next_col) indicating the edges between
- the current node and its parents.
- - Number of columns (ongoing edges) in the current revision.
- - The difference between the number of columns (ongoing edges)
- in the next revision and the number of columns (ongoing edges)
- in the current revision. That is: -1 means one column removed;
- 0 means no columns added or removed; 1 means one column added.
- """
- prev_n_columns_diff = 0
- prev_node_index = 0
- for (node_index, type, (node_ch, node_lines), edges, n_columns, n_columns_diff) in dag:
-
- assert -2 < n_columns_diff < 2
- if n_columns_diff == -1:
- # Transform
- #
- # | | | | | |
- # o | | into o---+
- # |X / |/ /
- # | | | |
- fix_long_right_edges(edges)
-
- # add_padding_line says whether to rewrite
- #
- # | | | | | | | |
- # | o---+ into | o---+
- # | / / | | | # <--- padding line
- # o | | | / /
- # o | |
- add_padding_line = (len(node_lines) > 2 and
- n_columns_diff == -1 and
- [x for (x, y) in edges if x + 1 < y])
-
- # fix_nodeline_tail says whether to rewrite
- #
- # | | o | | | | o | |
- # | | |/ / | | |/ /
- # | o | | into | o / / # <--- fixed nodeline tail
- # | |/ / | |/ /
- # o | | o | |
- fix_nodeline_tail = len(node_lines) <= 2 and not add_padding_line
-
- # nodeline is the line containing the node character (typically o)
- nodeline = ["|", " "] * node_index
- nodeline.extend([node_ch, " "])
-
- nodeline.extend(
- get_nodeline_edges_tail(
- node_index, prev_node_index, n_columns, n_columns_diff,
- prev_n_columns_diff, fix_nodeline_tail))
-
- # shift_interline is the line containing the non-vertical
- # edges between this entry and the next
- shift_interline = ["|", " "] * node_index
- if n_columns_diff == -1:
- n_spaces = 1
- edge_ch = "/"
- elif n_columns_diff == 0:
- n_spaces = 2
- edge_ch = "|"
- else:
- n_spaces = 3
- edge_ch = "\\"
- shift_interline.extend(n_spaces * [" "])
- shift_interline.extend([edge_ch, " "] * (n_columns - node_index - 1))
-
- # draw edges from the current node to its parents
- draw_edges(edges, nodeline, shift_interline)
-
- # lines is the list of all graph lines to print
- lines = [nodeline]
- if add_padding_line:
- lines.append(get_padding_line(node_index, n_columns, edges))
- lines.append(shift_interline)
-
- # make sure that there are as many graph lines as there are
- # log strings
- while len(node_lines) < len(lines):
- node_lines.append("")
- if len(lines) < len(node_lines):
- extra_interline = ["|", " "] * (n_columns + n_columns_diff)
- while len(lines) < len(node_lines):
- lines.append(extra_interline)
-
- # print lines
- indentation_level = max(n_columns, n_columns + n_columns_diff)
- for (line, logstr) in zip(lines, node_lines):
- ln = "%-*s %s" % (2 * indentation_level, "".join(line), logstr)
- ui.write(ln.rstrip() + '\n')
-
- # ... and start over
- prev_node_index = node_index
- prev_n_columns_diff = n_columns_diff
-
-def get_revs(repo, rev_opt):
- if rev_opt:
- revs = revrange(repo, rev_opt)
- return (max(revs), min(revs))
- else:
- return (len(repo) - 1, 0)
-
-def check_unsupported_flags(opts):
- for op in ["follow", "follow_first", "date", "copies", "keyword", "remove",
- "only_merges", "user", "only_branch", "prune", "newest_first",
- "no_merges", "include", "exclude"]:
- if op in opts and opts[op]:
- raise util.Abort(_("--graph option is incompatible with --%s") % op)
-
-def graphlog(ui, repo, path=None, **opts):
- """show revision history alongside an ASCII revision graph
-
- Print a revision history alongside a revision graph drawn with
- ASCII characters.
-
- Nodes printed as an @ character are parents of the working
- directory.
- """
-
- check_unsupported_flags(opts)
- limit = cmdutil.loglimit(opts)
- start, stop = get_revs(repo, opts["rev"])
- stop = max(stop, start - limit + 1)
- if start == nullrev:
- return
-
- if path:
- path = util.canonpath(repo.root, os.getcwd(), path)
- if path: # could be reset in canonpath
- revdag = graphmod.filerevs(repo, path, start, stop)
- else:
- revdag = graphmod.revisions(repo, start, stop)
-
- fmtdag = asciiformat(ui, repo, revdag, opts)
- ascii(ui, asciiedges(fmtdag))
-
-def graphrevs(repo, nodes, opts):
- limit = cmdutil.loglimit(opts)
- nodes.reverse()
- if limit < sys.maxint:
- nodes = nodes[:limit]
- return graphmod.nodes(repo, nodes)
-
-def goutgoing(ui, repo, dest=None, **opts):
- """show the outgoing changesets alongside an ASCII revision graph
-
- Print the outgoing changesets alongside a revision graph drawn with
- ASCII characters.
-
- Nodes printed as an @ character are parents of the working
- directory.
- """
-
- check_unsupported_flags(opts)
- dest, revs, checkout = hg.parseurl(
- ui.expandpath(dest or 'default-push', dest or 'default'),
- opts.get('rev'))
- if revs:
- revs = [repo.lookup(rev) for rev in revs]
- other = hg.repository(cmdutil.remoteui(ui, opts), dest)
- ui.status(_('comparing with %s\n') % url.hidepassword(dest))
- o = repo.findoutgoing(other, force=opts.get('force'))
- if not o:
- ui.status(_("no changes found\n"))
- return
-
- o = repo.changelog.nodesbetween(o, revs)[0]
- revdag = graphrevs(repo, o, opts)
- fmtdag = asciiformat(ui, repo, revdag, opts)
- ascii(ui, asciiedges(fmtdag))
-
-def gincoming(ui, repo, source="default", **opts):
- """show the incoming changesets alongside an ASCII revision graph
-
- Print the incoming changesets alongside a revision graph drawn with
- ASCII characters.
-
- Nodes printed as an @ character are parents of the working
- directory.
- """
-
- check_unsupported_flags(opts)
- source, revs, checkout = hg.parseurl(ui.expandpath(source), opts.get('rev'))
- other = hg.repository(cmdutil.remoteui(repo, opts), source)
- ui.status(_('comparing with %s\n') % url.hidepassword(source))
- if revs:
- revs = [other.lookup(rev) for rev in revs]
- incoming = repo.findincoming(other, heads=revs, force=opts["force"])
- if not incoming:
- try:
- os.unlink(opts["bundle"])
- except:
- pass
- ui.status(_("no changes found\n"))
- return
-
- cleanup = None
- try:
-
- fname = opts["bundle"]
- if fname or not other.local():
- # create a bundle (uncompressed if other repo is not local)
- if revs is None:
- cg = other.changegroup(incoming, "incoming")
- else:
- cg = other.changegroupsubset(incoming, revs, 'incoming')
- bundletype = other.local() and "HG10BZ" or "HG10UN"
- fname = cleanup = changegroup.writebundle(cg, fname, bundletype)
- # keep written bundle?
- if opts["bundle"]:
- cleanup = None
- if not other.local():
- # use the created uncompressed bundlerepo
- other = bundlerepo.bundlerepository(ui, repo.root, fname)
-
- chlist = other.changelog.nodesbetween(incoming, revs)[0]
- revdag = graphrevs(other, chlist, opts)
- fmtdag = asciiformat(ui, other, revdag, opts, parentrepo=repo)
- ascii(ui, asciiedges(fmtdag))
-
- finally:
- if hasattr(other, 'close'):
- other.close()
- if cleanup:
- os.unlink(cleanup)
-
-def uisetup(ui):
- '''Initialize the extension.'''
- _wrapcmd(ui, 'log', commands.table, graphlog)
- _wrapcmd(ui, 'incoming', commands.table, gincoming)
- _wrapcmd(ui, 'outgoing', commands.table, goutgoing)
-
-def _wrapcmd(ui, cmd, table, wrapfn):
- '''wrap the command'''
- def graph(orig, *args, **kwargs):
- if kwargs['graph']:
- return wrapfn(*args, **kwargs)
- return orig(*args, **kwargs)
- entry = extensions.wrapcommand(table, cmd, graph)
- entry[1].append(('G', 'graph', None, _("show the revision DAG")))
-
-cmdtable = {
- "glog":
- (graphlog,
- [('l', 'limit', '', _('limit number of changes displayed')),
- ('p', 'patch', False, _('show patch')),
- ('r', 'rev', [], _('show the specified revision or range')),
- ] + templateopts,
- _('hg glog [OPTION]... [FILE]')),
-}
diff --git a/sys/lib/python/hgext/hgcia.py b/sys/lib/python/hgext/hgcia.py
deleted file mode 100644
index dfae38919..000000000
--- a/sys/lib/python/hgext/hgcia.py
+++ /dev/null
@@ -1,246 +0,0 @@
-# Copyright (C) 2007-8 Brendan Cully <brendan@kublai.com>
-# Published under the GNU GPL
-
-"""hooks for integrating with the CIA.vc notification service
-
-This is meant to be run as a changegroup or incoming hook. To
-configure it, set the following options in your hgrc::
-
- [cia]
- # your registered CIA user name
- user = foo
- # the name of the project in CIA
- project = foo
- # the module (subproject) (optional)
- #module = foo
- # Append a diffstat to the log message (optional)
- #diffstat = False
- # Template to use for log messages (optional)
- #template = {desc}\\n{baseurl}/rev/{node}-- {diffstat}
- # Style to use (optional)
- #style = foo
- # The URL of the CIA notification service (optional)
- # You can use mailto: URLs to send by email, eg
- # mailto:cia@cia.vc
- # Make sure to set email.from if you do this.
- #url = http://cia.vc/
- # print message instead of sending it (optional)
- #test = False
-
- [hooks]
- # one of these:
- changegroup.cia = python:hgcia.hook
- #incoming.cia = python:hgcia.hook
-
- [web]
- # If you want hyperlinks (optional)
- baseurl = http://server/path/to/repo
-"""
-
-from mercurial.i18n import _
-from mercurial.node import *
-from mercurial import cmdutil, patch, templater, util, mail
-import email.Parser
-
-import xmlrpclib
-from xml.sax import saxutils
-
-socket_timeout = 30 # seconds
-try:
- # set a timeout for the socket so you don't have to wait so looooong
- # when cia.vc is having problems. requires python >= 2.3:
- import socket
- socket.setdefaulttimeout(socket_timeout)
-except:
- pass
-
-HGCIA_VERSION = '0.1'
-HGCIA_URL = 'http://hg.kublai.com/mercurial/hgcia'
-
-
-class ciamsg(object):
- """ A CIA message """
- def __init__(self, cia, ctx):
- self.cia = cia
- self.ctx = ctx
- self.url = self.cia.url
-
- def fileelem(self, path, uri, action):
- if uri:
- uri = ' uri=%s' % saxutils.quoteattr(uri)
- return '<file%s action=%s>%s</file>' % (
- uri, saxutils.quoteattr(action), saxutils.escape(path))
-
- def fileelems(self):
- n = self.ctx.node()
- f = self.cia.repo.status(self.ctx.parents()[0].node(), n)
- url = self.url or ''
- elems = []
- for path in f[0]:
- uri = '%s/diff/%s/%s' % (url, short(n), path)
- elems.append(self.fileelem(path, url and uri, 'modify'))
- for path in f[1]:
- # TODO: copy/rename ?
- uri = '%s/file/%s/%s' % (url, short(n), path)
- elems.append(self.fileelem(path, url and uri, 'add'))
- for path in f[2]:
- elems.append(self.fileelem(path, '', 'remove'))
-
- return '\n'.join(elems)
-
- def sourceelem(self, project, module=None, branch=None):
- msg = ['<source>', '<project>%s</project>' % saxutils.escape(project)]
- if module:
- msg.append('<module>%s</module>' % saxutils.escape(module))
- if branch:
- msg.append('<branch>%s</branch>' % saxutils.escape(branch))
- msg.append('</source>')
-
- return '\n'.join(msg)
-
- def diffstat(self):
- class patchbuf(object):
- def __init__(self):
- self.lines = []
- # diffstat is stupid
- self.name = 'cia'
- def write(self, data):
- self.lines.append(data)
- def close(self):
- pass
-
- n = self.ctx.node()
- pbuf = patchbuf()
- patch.export(self.cia.repo, [n], fp=pbuf)
- return patch.diffstat(pbuf.lines) or ''
-
- def logmsg(self):
- diffstat = self.cia.diffstat and self.diffstat() or ''
- self.cia.ui.pushbuffer()
- self.cia.templater.show(self.ctx, changes=self.ctx.changeset(),
- url=self.cia.url, diffstat=diffstat)
- return self.cia.ui.popbuffer()
-
- def xml(self):
- n = short(self.ctx.node())
- src = self.sourceelem(self.cia.project, module=self.cia.module,
- branch=self.ctx.branch())
- # unix timestamp
- dt = self.ctx.date()
- timestamp = dt[0]
-
- author = saxutils.escape(self.ctx.user())
- rev = '%d:%s' % (self.ctx.rev(), n)
- log = saxutils.escape(self.logmsg())
-
- url = self.url and '<url>%s/rev/%s</url>' % (saxutils.escape(self.url),
- n) or ''
-
- msg = """
-<message>
- <generator>
- <name>Mercurial (hgcia)</name>
- <version>%s</version>
- <url>%s</url>
- <user>%s</user>
- </generator>
- %s
- <body>
- <commit>
- <author>%s</author>
- <version>%s</version>
- <log>%s</log>
- %s
- <files>%s</files>
- </commit>
- </body>
- <timestamp>%d</timestamp>
-</message>
-""" % \
- (HGCIA_VERSION, saxutils.escape(HGCIA_URL),
- saxutils.escape(self.cia.user), src, author, rev, log, url,
- self.fileelems(), timestamp)
-
- return msg
-
-
-class hgcia(object):
- """ CIA notification class """
-
- deftemplate = '{desc}'
- dstemplate = '{desc}\n-- \n{diffstat}'
-
- def __init__(self, ui, repo):
- self.ui = ui
- self.repo = repo
-
- self.ciaurl = self.ui.config('cia', 'url', 'http://cia.vc')
- self.user = self.ui.config('cia', 'user')
- self.project = self.ui.config('cia', 'project')
- self.module = self.ui.config('cia', 'module')
- self.diffstat = self.ui.configbool('cia', 'diffstat')
- self.emailfrom = self.ui.config('email', 'from')
- self.dryrun = self.ui.configbool('cia', 'test')
- self.url = self.ui.config('web', 'baseurl')
-
- style = self.ui.config('cia', 'style')
- template = self.ui.config('cia', 'template')
- if not template:
- template = self.diffstat and self.dstemplate or self.deftemplate
- template = templater.parsestring(template, quoted=False)
- t = cmdutil.changeset_templater(self.ui, self.repo, False, None,
- style, False)
- t.use_template(template)
- self.templater = t
-
- def sendrpc(self, msg):
- srv = xmlrpclib.Server(self.ciaurl)
- srv.hub.deliver(msg)
-
- def sendemail(self, address, data):
- p = email.Parser.Parser()
- msg = p.parsestr(data)
- msg['Date'] = util.datestr(format="%a, %d %b %Y %H:%M:%S %1%2")
- msg['To'] = address
- msg['From'] = self.emailfrom
- msg['Subject'] = 'DeliverXML'
- msg['Content-type'] = 'text/xml'
- msgtext = msg.as_string()
-
- self.ui.status(_('hgcia: sending update to %s\n') % address)
- mail.sendmail(self.ui, util.email(self.emailfrom),
- [address], msgtext)
-
-
-def hook(ui, repo, hooktype, node=None, url=None, **kwargs):
- """ send CIA notification """
- def sendmsg(cia, ctx):
- msg = ciamsg(cia, ctx).xml()
- if cia.dryrun:
- ui.write(msg)
- elif cia.ciaurl.startswith('mailto:'):
- if not cia.emailfrom:
- raise util.Abort(_('email.from must be defined when '
- 'sending by email'))
- cia.sendemail(cia.ciaurl[7:], msg)
- else:
- cia.sendrpc(msg)
-
- n = bin(node)
- cia = hgcia(ui, repo)
- if not cia.user:
- ui.debug(_('cia: no user specified'))
- return
- if not cia.project:
- ui.debug(_('cia: no project specified'))
- return
- if hooktype == 'changegroup':
- start = repo.changelog.rev(n)
- end = len(repo.changelog)
- for rev in xrange(start, end):
- n = repo.changelog.node(rev)
- ctx = repo.changectx(n)
- sendmsg(cia, ctx)
- else:
- ctx = repo.changectx(n)
- sendmsg(cia, ctx)
diff --git a/sys/lib/python/hgext/hgfactotum.py b/sys/lib/python/hgext/hgfactotum.py
deleted file mode 100644
index cd8d0f44b..000000000
--- a/sys/lib/python/hgext/hgfactotum.py
+++ /dev/null
@@ -1,70 +0,0 @@
-''' factotum support '''
-
-import mercurial.url
-import urllib2
-import factotum
-import base64
-
-class factotumbasic(urllib2.BaseHandler):
- def __init__(self, passmgr=None):
- self.f = factotum.Factotum()
- self.retried = 0
- self.auth = None
- def http_error_401(self, req, fp, code, msg, headers):
- host = urllib2.urlparse.urlparse(req.get_full_url())[1]
- authreq = headers.get('www-authenticate', None)
- if authreq == None: return None
- authreq = authreq.split(' ', 1)
- if authreq[0].lower() != 'basic': return None
- chal = urllib2.parse_keqv_list(urllib2.parse_http_list(authreq[1]))
- realm = chal['realm']
- self.auth = (host, realm)
- self.retried += 1
- if self.retried >= 3:
- self.f.delkey(proto="pass", host=host, realm=realm, role="client")
- self.f.start(proto="pass", host=host, realm=realm, role="client")
- pw = self.f.read().replace(' ', ':', 1)
- val = 'Basic %s' % base64.b64encode(pw).strip()
- if req.headers.get('Authorization', None) == val: return None
- req.add_header('Authorization', val)
- result = self.parent.open(req)
- self.retried = 0
- return result
- def http_error_403(self, req, fp, code, msg, headers):
- if self.auth != None:
- self.f.delkey(proto="pass", host=self.auth[0], realm=self.auth[1], role="client")
- self.auth = None
-
-class factotumdigest(urllib2.BaseHandler):
- auth_header = 'Authorization'
- handler_order = 490
-
- def __init__(self, passmgr=None):
- self.f = factotum.Factotum()
- self.retried = 0
- def http_error_401(self, req, fp, code, msg, headers):
- self.retried += 1
- host = urllib2.urlparse.urlparse(req.get_full_url())[1]
- authreq = headers.get('www-authenticate', None)
- if authreq == None: return None
- authreq = authreq.split(' ', 1)
- if authreq[0].lower() != 'digest': return None
- chal = urllib2.parse_keqv_list(urllib2.parse_http_list(authreq[1]))
- realm = chal['realm']
- nonce = chal['nonce']
- if self.retried >= 6:
- self.f.delkey(proto="httpdigest", realm=realm, host=host)
- self.f.start(proto="httpdigest", role="client", realm=realm, host=host)
- self.f.write(nonce + ' ' + req.get_method() + ' ' + req.get_selector())
- resp = self.f.read()
- user = self.f.attr()["user"]
- self.f.close()
- val = 'Digest username="%s", realm="%s", nonce="%s", uri="%s", response="%s", algorithm=MD5' % (user, realm, nonce, req.get_selector(), resp)
- if req.headers.get('Authorization', None) == val: return None
- req.add_unredirected_header('Authorization', val)
- result = self.parent.open(req)
- self.retried = 0
- return result
-
-urllib2.HTTPBasicAuthHandler = factotumbasic
-mercurial.url.httpdigestauthhandler = factotumdigest
diff --git a/sys/lib/python/hgext/hgk.py b/sys/lib/python/hgext/hgk.py
deleted file mode 100644
index 03441ce00..000000000
--- a/sys/lib/python/hgext/hgk.py
+++ /dev/null
@@ -1,347 +0,0 @@
-# Minimal support for git commands on an hg repository
-#
-# Copyright 2005, 2006 Chris Mason <mason@suse.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-'''browse the repository in a graphical way
-
-The hgk extension allows browsing the history of a repository in a
-graphical way. It requires Tcl/Tk version 8.4 or later. (Tcl/Tk is not
-distributed with Mercurial.)
-
-hgk consists of two parts: a Tcl script that does the displaying and
-querying of information, and an extension to Mercurial named hgk.py,
-which provides hooks for hgk to get information. hgk can be found in
-the contrib directory, and the extension is shipped in the hgext
-repository, and needs to be enabled.
-
-The hg view command will launch the hgk Tcl script. For this command
-to work, hgk must be in your search path. Alternately, you can specify
-the path to hgk in your .hgrc file::
-
- [hgk]
- path=/location/of/hgk
-
-hgk can make use of the extdiff extension to visualize revisions.
-Assuming you had already configured extdiff vdiff command, just add::
-
- [hgk]
- vdiff=vdiff
-
-Revisions context menu will now display additional entries to fire
-vdiff on hovered and selected revisions.
-'''
-
-import os
-from mercurial import commands, util, patch, revlog, cmdutil
-from mercurial.node import nullid, nullrev, short
-from mercurial.i18n import _
-
-def difftree(ui, repo, node1=None, node2=None, *files, **opts):
- """diff trees from two commits"""
- def __difftree(repo, node1, node2, files=[]):
- assert node2 is not None
- mmap = repo[node1].manifest()
- mmap2 = repo[node2].manifest()
- m = cmdutil.match(repo, files)
- modified, added, removed = repo.status(node1, node2, m)[:3]
- empty = short(nullid)
-
- for f in modified:
- # TODO get file permissions
- ui.write(":100664 100664 %s %s M\t%s\t%s\n" %
- (short(mmap[f]), short(mmap2[f]), f, f))
- for f in added:
- ui.write(":000000 100664 %s %s N\t%s\t%s\n" %
- (empty, short(mmap2[f]), f, f))
- for f in removed:
- ui.write(":100664 000000 %s %s D\t%s\t%s\n" %
- (short(mmap[f]), empty, f, f))
- ##
-
- while True:
- if opts['stdin']:
- try:
- line = raw_input().split(' ')
- node1 = line[0]
- if len(line) > 1:
- node2 = line[1]
- else:
- node2 = None
- except EOFError:
- break
- node1 = repo.lookup(node1)
- if node2:
- node2 = repo.lookup(node2)
- else:
- node2 = node1
- node1 = repo.changelog.parents(node1)[0]
- if opts['patch']:
- if opts['pretty']:
- catcommit(ui, repo, node2, "")
- m = cmdutil.match(repo, files)
- chunks = patch.diff(repo, node1, node2, match=m,
- opts=patch.diffopts(ui, {'git': True}))
- for chunk in chunks:
- ui.write(chunk)
- else:
- __difftree(repo, node1, node2, files=files)
- if not opts['stdin']:
- break
-
-def catcommit(ui, repo, n, prefix, ctx=None):
- nlprefix = '\n' + prefix;
- if ctx is None:
- ctx = repo[n]
- ui.write("tree %s\n" % short(ctx.changeset()[0])) # use ctx.node() instead ??
- for p in ctx.parents():
- ui.write("parent %s\n" % p)
-
- date = ctx.date()
- description = ctx.description().replace("\0", "")
- lines = description.splitlines()
- if lines and lines[-1].startswith('committer:'):
- committer = lines[-1].split(': ')[1].rstrip()
- else:
- committer = ctx.user()
-
- ui.write("author %s %s %s\n" % (ctx.user(), int(date[0]), date[1]))
- ui.write("committer %s %s %s\n" % (committer, int(date[0]), date[1]))
- ui.write("revision %d\n" % ctx.rev())
- ui.write("branch %s\n\n" % ctx.branch())
-
- if prefix != "":
- ui.write("%s%s\n" % (prefix, description.replace('\n', nlprefix).strip()))
- else:
- ui.write(description + "\n")
- if prefix:
- ui.write('\0')
-
-def base(ui, repo, node1, node2):
- """output common ancestor information"""
- node1 = repo.lookup(node1)
- node2 = repo.lookup(node2)
- n = repo.changelog.ancestor(node1, node2)
- ui.write(short(n) + "\n")
-
-def catfile(ui, repo, type=None, r=None, **opts):
- """cat a specific revision"""
- # in stdin mode, every line except the commit is prefixed with two
- # spaces. This way the our caller can find the commit without magic
- # strings
- #
- prefix = ""
- if opts['stdin']:
- try:
- (type, r) = raw_input().split(' ');
- prefix = " "
- except EOFError:
- return
-
- else:
- if not type or not r:
- ui.warn(_("cat-file: type or revision not supplied\n"))
- commands.help_(ui, 'cat-file')
-
- while r:
- if type != "commit":
- ui.warn(_("aborting hg cat-file only understands commits\n"))
- return 1;
- n = repo.lookup(r)
- catcommit(ui, repo, n, prefix)
- if opts['stdin']:
- try:
- (type, r) = raw_input().split(' ');
- except EOFError:
- break
- else:
- break
-
-# git rev-tree is a confusing thing. You can supply a number of
-# commit sha1s on the command line, and it walks the commit history
-# telling you which commits are reachable from the supplied ones via
-# a bitmask based on arg position.
-# you can specify a commit to stop at by starting the sha1 with ^
-def revtree(ui, args, repo, full="tree", maxnr=0, parents=False):
- def chlogwalk():
- count = len(repo)
- i = count
- l = [0] * 100
- chunk = 100
- while True:
- if chunk > i:
- chunk = i
- i = 0
- else:
- i -= chunk
-
- for x in xrange(chunk):
- if i + x >= count:
- l[chunk - x:] = [0] * (chunk - x)
- break
- if full != None:
- l[x] = repo[i + x]
- l[x].changeset() # force reading
- else:
- l[x] = 1
- for x in xrange(chunk-1, -1, -1):
- if l[x] != 0:
- yield (i + x, full != None and l[x] or None)
- if i == 0:
- break
-
- # calculate and return the reachability bitmask for sha
- def is_reachable(ar, reachable, sha):
- if len(ar) == 0:
- return 1
- mask = 0
- for i in xrange(len(ar)):
- if sha in reachable[i]:
- mask |= 1 << i
-
- return mask
-
- reachable = []
- stop_sha1 = []
- want_sha1 = []
- count = 0
-
- # figure out which commits they are asking for and which ones they
- # want us to stop on
- for i, arg in enumerate(args):
- if arg.startswith('^'):
- s = repo.lookup(arg[1:])
- stop_sha1.append(s)
- want_sha1.append(s)
- elif arg != 'HEAD':
- want_sha1.append(repo.lookup(arg))
-
- # calculate the graph for the supplied commits
- for i, n in enumerate(want_sha1):
- reachable.append(set());
- visit = [n];
- reachable[i].add(n)
- while visit:
- n = visit.pop(0)
- if n in stop_sha1:
- continue
- for p in repo.changelog.parents(n):
- if p not in reachable[i]:
- reachable[i].add(p)
- visit.append(p)
- if p in stop_sha1:
- continue
-
- # walk the repository looking for commits that are in our
- # reachability graph
- for i, ctx in chlogwalk():
- n = repo.changelog.node(i)
- mask = is_reachable(want_sha1, reachable, n)
- if mask:
- parentstr = ""
- if parents:
- pp = repo.changelog.parents(n)
- if pp[0] != nullid:
- parentstr += " " + short(pp[0])
- if pp[1] != nullid:
- parentstr += " " + short(pp[1])
- if not full:
- ui.write("%s%s\n" % (short(n), parentstr))
- elif full == "commit":
- ui.write("%s%s\n" % (short(n), parentstr))
- catcommit(ui, repo, n, ' ', ctx)
- else:
- (p1, p2) = repo.changelog.parents(n)
- (h, h1, h2) = map(short, (n, p1, p2))
- (i1, i2) = map(repo.changelog.rev, (p1, p2))
-
- date = ctx.date()[0]
- ui.write("%s %s:%s" % (date, h, mask))
- mask = is_reachable(want_sha1, reachable, p1)
- if i1 != nullrev and mask > 0:
- ui.write("%s:%s " % (h1, mask)),
- mask = is_reachable(want_sha1, reachable, p2)
- if i2 != nullrev and mask > 0:
- ui.write("%s:%s " % (h2, mask))
- ui.write("\n")
- if maxnr and count >= maxnr:
- break
- count += 1
-
-def revparse(ui, repo, *revs, **opts):
- """parse given revisions"""
- def revstr(rev):
- if rev == 'HEAD':
- rev = 'tip'
- return revlog.hex(repo.lookup(rev))
-
- for r in revs:
- revrange = r.split(':', 1)
- ui.write('%s\n' % revstr(revrange[0]))
- if len(revrange) == 2:
- ui.write('^%s\n' % revstr(revrange[1]))
-
-# git rev-list tries to order things by date, and has the ability to stop
-# at a given commit without walking the whole repo. TODO add the stop
-# parameter
-def revlist(ui, repo, *revs, **opts):
- """print revisions"""
- if opts['header']:
- full = "commit"
- else:
- full = None
- copy = [x for x in revs]
- revtree(ui, copy, repo, full, opts['max_count'], opts['parents'])
-
-def config(ui, repo, **opts):
- """print extension options"""
- def writeopt(name, value):
- ui.write('k=%s\nv=%s\n' % (name, value))
-
- writeopt('vdiff', ui.config('hgk', 'vdiff', ''))
-
-
-def view(ui, repo, *etc, **opts):
- "start interactive history viewer"
- os.chdir(repo.root)
- optstr = ' '.join(['--%s %s' % (k, v) for k, v in opts.iteritems() if v])
- cmd = ui.config("hgk", "path", "hgk") + " %s %s" % (optstr, " ".join(etc))
- ui.debug(_("running %s\n") % cmd)
- util.system(cmd)
-
-cmdtable = {
- "^view":
- (view,
- [('l', 'limit', '', _('limit number of changes displayed'))],
- _('hg view [-l LIMIT] [REVRANGE]')),
- "debug-diff-tree":
- (difftree,
- [('p', 'patch', None, _('generate patch')),
- ('r', 'recursive', None, _('recursive')),
- ('P', 'pretty', None, _('pretty')),
- ('s', 'stdin', None, _('stdin')),
- ('C', 'copy', None, _('detect copies')),
- ('S', 'search', "", _('search'))],
- _('hg git-diff-tree [OPTION]... NODE1 NODE2 [FILE]...')),
- "debug-cat-file":
- (catfile,
- [('s', 'stdin', None, _('stdin'))],
- _('hg debug-cat-file [OPTION]... TYPE FILE')),
- "debug-config":
- (config, [], _('hg debug-config')),
- "debug-merge-base":
- (base, [], _('hg debug-merge-base REV REV')),
- "debug-rev-parse":
- (revparse,
- [('', 'default', '', _('ignored'))],
- _('hg debug-rev-parse REV')),
- "debug-rev-list":
- (revlist,
- [('H', 'header', None, _('header')),
- ('t', 'topo-order', None, _('topo-order')),
- ('p', 'parents', None, _('parents')),
- ('n', 'max-count', 0, _('max-count'))],
- _('hg debug-rev-list [OPTION]... REV...')),
-}
diff --git a/sys/lib/python/hgext/hgwebfs.py b/sys/lib/python/hgext/hgwebfs.py
deleted file mode 100644
index 9ae26b959..000000000
--- a/sys/lib/python/hgext/hgwebfs.py
+++ /dev/null
@@ -1,105 +0,0 @@
-''' webfs support '''
-
-import mercurial.url
-import re
-import os
-
-class Webconn:
- def __init__(self, mnt, req):
- while True:
- try:
- self.open(mnt, req)
- return
- except IOError, e:
- try:
- errstr = e.strerror
- params = errstr[errstr.index("needkey ")+8:]
- if params.find("!password?") < 0:
- raise e
- if os.spawnl(os.P_WAIT, "/boot/factotum", "getkey", "-g", params) != 0:
- raise e
- except:
- raise e
-
- def open(self, mnt, req):
- if type(req) == str:
- self.url = req
- else:
- self.url = req.get_full_url()
- if self.url[0:5] == 'file:':
- path = self.url[5:]
- while path[0:2] == '//':
- path = path[1:]
- self.dir = '/dev/null'
- self.body = open(path, 'r', 0)
- return
- ctl = open(mnt+'/clone', 'r+', 0)
- try:
- self.dir = mnt+'/'+ctl.readline().rstrip('\n')
- ctl.seek(0)
- ctl.write('url '+self.url)
- m = 'User-Agent: mercurial/proto-1.0\r\n';
- ctl.seek(0)
- ctl.write('headers '+m)
- for h in req.headers:
- ctl.seek(0)
- ctl.write('headers '+h+': '+req.headers[h]+'\r\n')
-
- if req.has_data():
- data = req.get_data()
- post = open(self.dir+'/postbody', 'w', 0);
- try:
- data.seek(0)
- while True:
- buf = data.read(4096)
- if len(buf) == 0:
- break
- post.write(buf)
- finally:
- post.close()
- self.body = open(self.dir+'/body', 'r', 0)
- finally:
- ctl.close()
-
- def read(self, amt=-1):
- return self.body.read(amt);
-
- def close(self):
- self.body.close()
- self.body = None
- self.dir = None
-
- def geturl(self):
- return self.url
-
- def getheader(self, key):
- name = re.sub(r'[^a-z]+', '', key.lower())
- try:
- f = open(self.dir+'/'+name, 'r', 0)
- try:
- hdr = f.read()
- finally:
- f.close()
- return hdr
- except:
- return None
-
-class Webopener:
- def __init__(self):
- self.handlers = []
-
- def add_handler(self, handler):
- return
-
- def open(self, req, data=None):
- return Webconn('/mnt/web', req)
-
- def close(self):
- pass
-
-
-def webopener(ui, authinfo=None):
- return Webopener();
-
-mercurial.url.has_https = 1
-mercurial.url.opener = webopener
diff --git a/sys/lib/python/hgext/highlight/__init__.py b/sys/lib/python/hgext/highlight/__init__.py
deleted file mode 100644
index 65efae3c9..000000000
--- a/sys/lib/python/hgext/highlight/__init__.py
+++ /dev/null
@@ -1,60 +0,0 @@
-# highlight - syntax highlighting in hgweb, based on Pygments
-#
-# Copyright 2008, 2009 Patrick Mezard <pmezard@gmail.com> and others
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-#
-# The original module was split in an interface and an implementation
-# file to defer pygments loading and speedup extension setup.
-
-"""syntax highlighting for hgweb (requires Pygments)
-
-It depends on the Pygments syntax highlighting library:
-http://pygments.org/
-
-There is a single configuration option::
-
- [web]
- pygments_style = <style>
-
-The default is 'colorful'.
-"""
-
-import highlight
-from mercurial.hgweb import webcommands, webutil, common
-from mercurial import extensions, encoding
-
-def filerevision_highlight(orig, web, tmpl, fctx):
- mt = ''.join(tmpl('mimetype', encoding=encoding.encoding))
- # only pygmentize for mimetype containing 'html' so we both match
- # 'text/html' and possibly 'application/xhtml+xml' in the future
- # so that we don't have to touch the extension when the mimetype
- # for a template changes; also hgweb optimizes the case that a
- # raw file is sent using rawfile() and doesn't call us, so we
- # can't clash with the file's content-type here in case we
- # pygmentize a html file
- if 'html' in mt:
- style = web.config('web', 'pygments_style', 'colorful')
- highlight.pygmentize('fileline', fctx, style, tmpl)
- return orig(web, tmpl, fctx)
-
-def annotate_highlight(orig, web, req, tmpl):
- mt = ''.join(tmpl('mimetype', encoding=encoding.encoding))
- if 'html' in mt:
- fctx = webutil.filectx(web.repo, req)
- style = web.config('web', 'pygments_style', 'colorful')
- highlight.pygmentize('annotateline', fctx, style, tmpl)
- return orig(web, req, tmpl)
-
-def generate_css(web, req, tmpl):
- pg_style = web.config('web', 'pygments_style', 'colorful')
- fmter = highlight.HtmlFormatter(style = pg_style)
- req.respond(common.HTTP_OK, 'text/css')
- return ['/* pygments_style = %s */\n\n' % pg_style, fmter.get_style_defs('')]
-
-# monkeypatch in the new version
-extensions.wrapfunction(webcommands, '_filerevision', filerevision_highlight)
-extensions.wrapfunction(webcommands, 'annotate', annotate_highlight)
-webcommands.highlightcss = generate_css
-webcommands.__all__.append('highlightcss')
diff --git a/sys/lib/python/hgext/highlight/highlight.py b/sys/lib/python/hgext/highlight/highlight.py
deleted file mode 100644
index 0f767234d..000000000
--- a/sys/lib/python/hgext/highlight/highlight.py
+++ /dev/null
@@ -1,60 +0,0 @@
-# highlight.py - highlight extension implementation file
-#
-# Copyright 2007-2009 Adam Hupp <adam@hupp.org> and others
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-#
-# The original module was split in an interface and an implementation
-# file to defer pygments loading and speedup extension setup.
-
-from mercurial import demandimport
-demandimport.ignore.extend(['pkgutil', 'pkg_resources', '__main__',])
-from mercurial import util, encoding
-
-from pygments import highlight
-from pygments.util import ClassNotFound
-from pygments.lexers import guess_lexer, guess_lexer_for_filename, TextLexer
-from pygments.formatters import HtmlFormatter
-
-SYNTAX_CSS = ('\n<link rel="stylesheet" href="{url}highlightcss" '
- 'type="text/css" />')
-
-def pygmentize(field, fctx, style, tmpl):
-
- # append a <link ...> to the syntax highlighting css
- old_header = ''.join(tmpl('header'))
- if SYNTAX_CSS not in old_header:
- new_header = old_header + SYNTAX_CSS
- tmpl.cache['header'] = new_header
-
- text = fctx.data()
- if util.binary(text):
- return
-
- # avoid UnicodeDecodeError in pygments
- text = encoding.tolocal(text)
-
- # To get multi-line strings right, we can't format line-by-line
- try:
- lexer = guess_lexer_for_filename(fctx.path(), text[:1024],
- encoding=encoding.encoding)
- except (ClassNotFound, ValueError):
- try:
- lexer = guess_lexer(text[:1024], encoding=encoding.encoding)
- except (ClassNotFound, ValueError):
- lexer = TextLexer(encoding=encoding.encoding)
-
- formatter = HtmlFormatter(style=style, encoding=encoding.encoding)
-
- colorized = highlight(text, lexer, formatter)
- # strip wrapping div
- colorized = colorized[:colorized.find('\n</pre>')]
- colorized = colorized[colorized.find('<pre>')+5:]
- coloriter = iter(colorized.splitlines())
-
- tmpl.filters['colorize'] = lambda x: coloriter.next()
-
- oldl = tmpl.cache[field]
- newl = oldl.replace('line|escape', 'line|colorize')
- tmpl.cache[field] = newl
diff --git a/sys/lib/python/hgext/inotify/__init__.py b/sys/lib/python/hgext/inotify/__init__.py
deleted file mode 100644
index cc952c2c6..000000000
--- a/sys/lib/python/hgext/inotify/__init__.py
+++ /dev/null
@@ -1,109 +0,0 @@
-# __init__.py - inotify-based status acceleration for Linux
-#
-# Copyright 2006, 2007, 2008 Bryan O'Sullivan <bos@serpentine.com>
-# Copyright 2007, 2008 Brendan Cully <brendan@kublai.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-'''accelerate status report using Linux's inotify service'''
-
-# todo: socket permissions
-
-from mercurial.i18n import _
-from mercurial import cmdutil, util
-import server
-from weakref import proxy
-from client import client, QueryFailed
-
-def serve(ui, repo, **opts):
- '''start an inotify server for this repository'''
- timeout = opts.get('timeout')
- if timeout:
- timeout = float(timeout) * 1e3
-
- class service(object):
- def init(self):
- try:
- self.master = server.master(ui, repo.dirstate,
- repo.root, timeout)
- except server.AlreadyStartedException, inst:
- raise util.Abort(str(inst))
-
- def run(self):
- try:
- self.master.run()
- finally:
- self.master.shutdown()
-
- service = service()
- logfile = ui.config('inotify', 'log')
- cmdutil.service(opts, initfn=service.init, runfn=service.run,
- logfile=logfile)
-
-def debuginotify(ui, repo, **opts):
- '''debugging information for inotify extension
-
- Prints the list of directories being watched by the inotify server.
- '''
- cli = client(ui, repo)
- response = cli.debugquery()
-
- ui.write(_('directories being watched:\n'))
- for path in response:
- ui.write((' %s/\n') % path)
-
-def reposetup(ui, repo):
- if not hasattr(repo, 'dirstate'):
- return
-
- class inotifydirstate(repo.dirstate.__class__):
-
- # We'll set this to false after an unsuccessful attempt so that
- # next calls of status() within the same instance don't try again
- # to start an inotify server if it won't start.
- _inotifyon = True
-
- def status(self, match, ignored, clean, unknown=True):
- files = match.files()
- if '.' in files:
- files = []
- if self._inotifyon and not ignored:
- cli = client(ui, repo)
- try:
- result = cli.statusquery(files, match, False,
- clean, unknown)
- except QueryFailed, instr:
- ui.debug(str(instr))
- # don't retry within the same hg instance
- inotifydirstate._inotifyon = False
- pass
- else:
- if ui.config('inotify', 'debug'):
- r2 = super(inotifydirstate, self).status(
- match, False, clean, unknown)
- for c,a,b in zip('LMARDUIC', result, r2):
- for f in a:
- if f not in b:
- ui.warn('*** inotify: %s +%s\n' % (c, f))
- for f in b:
- if f not in a:
- ui.warn('*** inotify: %s -%s\n' % (c, f))
- result = r2
- return result
- return super(inotifydirstate, self).status(
- match, ignored, clean, unknown)
-
- repo.dirstate.__class__ = inotifydirstate
-
-cmdtable = {
- 'debuginotify':
- (debuginotify, [], ('hg debuginotify')),
- '^inserve':
- (serve,
- [('d', 'daemon', None, _('run server in background')),
- ('', 'daemon-pipefds', '', _('used internally by daemon mode')),
- ('t', 'idle-timeout', '', _('minutes to sit idle before exiting')),
- ('', 'pid-file', '', _('name of file to write process ID to'))],
- _('hg inserve [OPTION]...')),
- }
diff --git a/sys/lib/python/hgext/inotify/client.py b/sys/lib/python/hgext/inotify/client.py
deleted file mode 100644
index 800d4a3aa..000000000
--- a/sys/lib/python/hgext/inotify/client.py
+++ /dev/null
@@ -1,160 +0,0 @@
-# client.py - inotify status client
-#
-# Copyright 2006, 2007, 2008 Bryan O'Sullivan <bos@serpentine.com>
-# Copyright 2007, 2008 Brendan Cully <brendan@kublai.com>
-# Copyright 2009 Nicolas Dumazet <nicdumz@gmail.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-from mercurial.i18n import _
-import common, server
-import errno, os, socket, struct
-
-class QueryFailed(Exception): pass
-
-def start_server(function):
- """
- Decorator.
- Tries to call function, if it fails, try to (re)start inotify server.
- Raise QueryFailed if something went wrong
- """
- def decorated_function(self, *args):
- result = None
- try:
- return function(self, *args)
- except (OSError, socket.error), err:
- autostart = self.ui.configbool('inotify', 'autostart', True)
-
- if err[0] == errno.ECONNREFUSED:
- self.ui.warn(_('(found dead inotify server socket; '
- 'removing it)\n'))
- os.unlink(os.path.join(self.root, '.hg', 'inotify.sock'))
- if err[0] in (errno.ECONNREFUSED, errno.ENOENT) and autostart:
- self.ui.debug(_('(starting inotify server)\n'))
- try:
- try:
- server.start(self.ui, self.dirstate, self.root)
- except server.AlreadyStartedException, inst:
- # another process may have started its own
- # inotify server while this one was starting.
- self.ui.debug(str(inst))
- except Exception, inst:
- self.ui.warn(_('could not start inotify server: '
- '%s\n') % inst)
- else:
- try:
- return function(self, *args)
- except socket.error, err:
- self.ui.warn(_('could not talk to new inotify '
- 'server: %s\n') % err[-1])
- elif err[0] in (errno.ECONNREFUSED, errno.ENOENT):
- # silently ignore normal errors if autostart is False
- self.ui.debug(_('(inotify server not running)\n'))
- else:
- self.ui.warn(_('failed to contact inotify server: %s\n')
- % err[-1])
-
- self.ui.traceback()
- raise QueryFailed('inotify query failed')
-
- return decorated_function
-
-
-class client(object):
- def __init__(self, ui, repo):
- self.ui = ui
- self.dirstate = repo.dirstate
- self.root = repo.root
- self.sock = socket.socket(socket.AF_UNIX)
-
- def _connect(self):
- sockpath = os.path.join(self.root, '.hg', 'inotify.sock')
- try:
- self.sock.connect(sockpath)
- except socket.error, err:
- if err[0] == "AF_UNIX path too long":
- sockpath = os.readlink(sockpath)
- self.sock.connect(sockpath)
- else:
- raise
-
- def _send(self, type, data):
- """Sends protocol version number, and the data"""
- self.sock.sendall(chr(common.version) + type + data)
-
- self.sock.shutdown(socket.SHUT_WR)
-
- def _receive(self, type):
- """
- Read data, check version number, extract headers,
- and returns a tuple (data descriptor, header)
- Raises QueryFailed on error
- """
- cs = common.recvcs(self.sock)
- try:
- version = ord(cs.read(1))
- except TypeError:
- # empty answer, assume the server crashed
- self.ui.warn(_('received empty answer from inotify server'))
- raise QueryFailed('server crashed')
-
- if version != common.version:
- self.ui.warn(_('(inotify: received response from incompatible '
- 'server version %d)\n') % version)
- raise QueryFailed('incompatible server version')
-
- readtype = cs.read(4)
- if readtype != type:
- self.ui.warn(_('(inotify: received \'%s\' response when expecting'
- ' \'%s\')\n') % (readtype, type))
- raise QueryFailed('wrong response type')
-
- hdrfmt = common.resphdrfmts[type]
- hdrsize = common.resphdrsizes[type]
- try:
- resphdr = struct.unpack(hdrfmt, cs.read(hdrsize))
- except struct.error:
- raise QueryFailed('unable to retrieve query response headers')
-
- return cs, resphdr
-
- def query(self, type, req):
- self._connect()
-
- self._send(type, req)
-
- return self._receive(type)
-
- @start_server
- def statusquery(self, names, match, ignored, clean, unknown=True):
-
- def genquery():
- for n in names:
- yield n
- states = 'almrx!'
- if ignored:
- raise ValueError('this is insanity')
- if clean: states += 'c'
- if unknown: states += '?'
- yield states
-
- req = '\0'.join(genquery())
-
- cs, resphdr = self.query('STAT', req)
-
- def readnames(nbytes):
- if nbytes:
- names = cs.read(nbytes)
- if names:
- return filter(match, names.split('\0'))
- return []
- return map(readnames, resphdr)
-
- @start_server
- def debugquery(self):
- cs, resphdr = self.query('DBUG', '')
-
- nbytes = resphdr[0]
- names = cs.read(nbytes)
- return names.split('\0')
diff --git a/sys/lib/python/hgext/inotify/common.py b/sys/lib/python/hgext/inotify/common.py
deleted file mode 100644
index 2b18b5f12..000000000
--- a/sys/lib/python/hgext/inotify/common.py
+++ /dev/null
@@ -1,51 +0,0 @@
-# server.py - inotify common protocol code
-#
-# Copyright 2006, 2007, 2008 Bryan O'Sullivan <bos@serpentine.com>
-# Copyright 2007, 2008 Brendan Cully <brendan@kublai.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-import cStringIO, socket, struct
-
-"""
- Protocol between inotify clients and server:
-
- Client sending query:
- 1) send protocol version number
- 2) send query type (string, 4 letters long)
- 3) send query parameters:
- - For STAT, N+1 \0-separated strings:
- 1) N different names that need checking
- 2) 1 string containing all the status types to match
- - No parameter needed for DBUG
-
- Server sending query answer:
- 1) send protocol version number
- 2) send query type
- 3) send struct.pack'ed headers describing the length of the content:
- e.g. for STAT, receive 8 integers describing the length of the
- 8 \0-separated string lists ( one list for each lmar!?ic status type )
-
-"""
-
-version = 2
-
-resphdrfmts = {
- 'STAT': '>llllllll', # status requests
- 'DBUG': '>l' # debugging queries
-}
-resphdrsizes = dict((k, struct.calcsize(v))
- for k, v in resphdrfmts.iteritems())
-
-def recvcs(sock):
- cs = cStringIO.StringIO()
- s = True
- try:
- while s:
- s = sock.recv(65536)
- cs.write(s)
- finally:
- sock.shutdown(socket.SHUT_RD)
- cs.seek(0)
- return cs
diff --git a/sys/lib/python/hgext/inotify/linux/__init__.py b/sys/lib/python/hgext/inotify/linux/__init__.py
deleted file mode 100644
index 2fae16ab3..000000000
--- a/sys/lib/python/hgext/inotify/linux/__init__.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# __init__.py - low-level interfaces to the Linux inotify subsystem
-
-# Copyright 2006 Bryan O'Sullivan <bos@serpentine.com>
-
-# This library is free software; you can redistribute it and/or modify
-# it under the terms of version 2.1 of the GNU Lesser General Public
-# License, incorporated herein by reference.
-
-'''Low-level interface to the Linux inotify subsystem.
-
-The inotify subsystem provides an efficient mechanism for file status
-monitoring and change notification.
-
-This package provides the low-level inotify system call interface and
-associated constants and helper functions.
-
-For a higher-level interface that remains highly efficient, use the
-inotify.watcher package.'''
-
-__author__ = "Bryan O'Sullivan <bos@serpentine.com>"
-
-from _inotify import *
-
-procfs_path = '/proc/sys/fs/inotify'
-
-def _read_procfs_value(name):
- def read_value():
- try:
- return int(open(procfs_path + '/' + name).read())
- except OSError:
- return None
-
- read_value.__doc__ = '''Return the value of the %s setting from /proc.
-
- If inotify is not enabled on this system, return None.''' % name
-
- return read_value
-
-max_queued_events = _read_procfs_value('max_queued_events')
-max_user_instances = _read_procfs_value('max_user_instances')
-max_user_watches = _read_procfs_value('max_user_watches')
diff --git a/sys/lib/python/hgext/inotify/linux/_inotify.c b/sys/lib/python/hgext/inotify/linux/_inotify.c
deleted file mode 100644
index 42502aa0c..000000000
--- a/sys/lib/python/hgext/inotify/linux/_inotify.c
+++ /dev/null
@@ -1,608 +0,0 @@
-/*
- * _inotify.c - Python extension interfacing to the Linux inotify subsystem
- *
- * Copyright 2006 Bryan O'Sullivan <bos@serpentine.com>
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of version 2.1 of the GNU Lesser General
- * Public License, incorporated herein by reference.
- */
-
-#include <Python.h>
-#include <alloca.h>
-#include <sys/inotify.h>
-#include <stdint.h>
-#include <sys/ioctl.h>
-#include <unistd.h>
-
-static PyObject *init(PyObject *self, PyObject *args)
-{
- PyObject *ret = NULL;
- int fd = -1;
-
- if (!PyArg_ParseTuple(args, ":init"))
- goto bail;
-
- Py_BEGIN_ALLOW_THREADS
- fd = inotify_init();
- Py_END_ALLOW_THREADS
-
- if (fd == -1) {
- PyErr_SetFromErrno(PyExc_OSError);
- goto bail;
- }
-
- ret = PyInt_FromLong(fd);
- if (ret == NULL)
- goto bail;
-
- goto done;
-
-bail:
- if (fd != -1)
- close(fd);
-
- Py_CLEAR(ret);
-
-done:
- return ret;
-}
-
-PyDoc_STRVAR(
- init_doc,
- "init() -> fd\n"
- "\n"
- "Initialise an inotify instance.\n"
- "Return a file descriptor associated with a new inotify event queue.");
-
-static PyObject *add_watch(PyObject *self, PyObject *args)
-{
- PyObject *ret = NULL;
- uint32_t mask;
- int wd = -1;
- char *path;
- int fd;
-
- if (!PyArg_ParseTuple(args, "isI:add_watch", &fd, &path, &mask))
- goto bail;
-
- Py_BEGIN_ALLOW_THREADS
- wd = inotify_add_watch(fd, path, mask);
- Py_END_ALLOW_THREADS
-
- if (wd == -1) {
- PyErr_SetFromErrnoWithFilename(PyExc_OSError, path);
- goto bail;
- }
-
- ret = PyInt_FromLong(wd);
- if (ret == NULL)
- goto bail;
-
- goto done;
-
-bail:
- if (wd != -1)
- inotify_rm_watch(fd, wd);
-
- Py_CLEAR(ret);
-
-done:
- return ret;
-}
-
-PyDoc_STRVAR(
- add_watch_doc,
- "add_watch(fd, path, mask) -> wd\n"
- "\n"
- "Add a watch to an inotify instance, or modify an existing watch.\n"
- "\n"
- " fd: file descriptor returned by init()\n"
- " path: path to watch\n"
- " mask: mask of events to watch for\n"
- "\n"
- "Return a unique numeric watch descriptor for the inotify instance\n"
- "mapped by the file descriptor.");
-
-static PyObject *remove_watch(PyObject *self, PyObject *args)
-{
- PyObject *ret = NULL;
- uint32_t wd;
- int fd;
- int r;
-
- if (!PyArg_ParseTuple(args, "iI:remove_watch", &fd, &wd))
- goto bail;
-
- Py_BEGIN_ALLOW_THREADS
- r = inotify_rm_watch(fd, wd);
- Py_END_ALLOW_THREADS
-
- if (r == -1) {
- PyErr_SetFromErrno(PyExc_OSError);
- goto bail;
- }
-
- Py_INCREF(Py_None);
-
- goto done;
-
-bail:
- Py_CLEAR(ret);
-
-done:
- return ret;
-}
-
-PyDoc_STRVAR(
- remove_watch_doc,
- "remove_watch(fd, wd)\n"
- "\n"
- " fd: file descriptor returned by init()\n"
- " wd: watch descriptor returned by add_watch()\n"
- "\n"
- "Remove a watch associated with the watch descriptor wd from the\n"
- "inotify instance associated with the file descriptor fd.\n"
- "\n"
- "Removing a watch causes an IN_IGNORED event to be generated for this\n"
- "watch descriptor.");
-
-#define bit_name(x) {x, #x}
-
-static struct {
- int bit;
- const char *name;
- PyObject *pyname;
-} bit_names[] = {
- bit_name(IN_ACCESS),
- bit_name(IN_MODIFY),
- bit_name(IN_ATTRIB),
- bit_name(IN_CLOSE_WRITE),
- bit_name(IN_CLOSE_NOWRITE),
- bit_name(IN_OPEN),
- bit_name(IN_MOVED_FROM),
- bit_name(IN_MOVED_TO),
- bit_name(IN_CREATE),
- bit_name(IN_DELETE),
- bit_name(IN_DELETE_SELF),
- bit_name(IN_MOVE_SELF),
- bit_name(IN_UNMOUNT),
- bit_name(IN_Q_OVERFLOW),
- bit_name(IN_IGNORED),
- bit_name(IN_ONLYDIR),
- bit_name(IN_DONT_FOLLOW),
- bit_name(IN_MASK_ADD),
- bit_name(IN_ISDIR),
- bit_name(IN_ONESHOT),
- {0}
-};
-
-static PyObject *decode_mask(int mask)
-{
- PyObject *ret = PyList_New(0);
- int i;
-
- if (ret == NULL)
- goto bail;
-
- for (i = 0; bit_names[i].bit; i++) {
- if (mask & bit_names[i].bit) {
- if (bit_names[i].pyname == NULL) {
- bit_names[i].pyname = PyString_FromString(bit_names[i].name);
- if (bit_names[i].pyname == NULL)
- goto bail;
- }
- Py_INCREF(bit_names[i].pyname);
- if (PyList_Append(ret, bit_names[i].pyname) == -1)
- goto bail;
- }
- }
-
- goto done;
-
-bail:
- Py_CLEAR(ret);
-
-done:
- return ret;
-}
-
-static PyObject *pydecode_mask(PyObject *self, PyObject *args)
-{
- int mask;
-
- if (!PyArg_ParseTuple(args, "i:decode_mask", &mask))
- return NULL;
-
- return decode_mask(mask);
-}
-
-PyDoc_STRVAR(
- decode_mask_doc,
- "decode_mask(mask) -> list_of_strings\n"
- "\n"
- "Decode an inotify mask value into a list of strings that give the\n"
- "name of each bit set in the mask.");
-
-static char doc[] = "Low-level inotify interface wrappers.";
-
-static void define_const(PyObject *dict, const char *name, uint32_t val)
-{
- PyObject *pyval = PyInt_FromLong(val);
- PyObject *pyname = PyString_FromString(name);
-
- if (!pyname || !pyval)
- goto bail;
-
- PyDict_SetItem(dict, pyname, pyval);
-
-bail:
- Py_XDECREF(pyname);
- Py_XDECREF(pyval);
-}
-
-static void define_consts(PyObject *dict)
-{
- define_const(dict, "IN_ACCESS", IN_ACCESS);
- define_const(dict, "IN_MODIFY", IN_MODIFY);
- define_const(dict, "IN_ATTRIB", IN_ATTRIB);
- define_const(dict, "IN_CLOSE_WRITE", IN_CLOSE_WRITE);
- define_const(dict, "IN_CLOSE_NOWRITE", IN_CLOSE_NOWRITE);
- define_const(dict, "IN_OPEN", IN_OPEN);
- define_const(dict, "IN_MOVED_FROM", IN_MOVED_FROM);
- define_const(dict, "IN_MOVED_TO", IN_MOVED_TO);
-
- define_const(dict, "IN_CLOSE", IN_CLOSE);
- define_const(dict, "IN_MOVE", IN_MOVE);
-
- define_const(dict, "IN_CREATE", IN_CREATE);
- define_const(dict, "IN_DELETE", IN_DELETE);
- define_const(dict, "IN_DELETE_SELF", IN_DELETE_SELF);
- define_const(dict, "IN_MOVE_SELF", IN_MOVE_SELF);
- define_const(dict, "IN_UNMOUNT", IN_UNMOUNT);
- define_const(dict, "IN_Q_OVERFLOW", IN_Q_OVERFLOW);
- define_const(dict, "IN_IGNORED", IN_IGNORED);
-
- define_const(dict, "IN_ONLYDIR", IN_ONLYDIR);
- define_const(dict, "IN_DONT_FOLLOW", IN_DONT_FOLLOW);
- define_const(dict, "IN_MASK_ADD", IN_MASK_ADD);
- define_const(dict, "IN_ISDIR", IN_ISDIR);
- define_const(dict, "IN_ONESHOT", IN_ONESHOT);
- define_const(dict, "IN_ALL_EVENTS", IN_ALL_EVENTS);
-}
-
-struct event {
- PyObject_HEAD
- PyObject *wd;
- PyObject *mask;
- PyObject *cookie;
- PyObject *name;
-};
-
-static PyObject *event_wd(PyObject *self, void *x)
-{
- struct event *evt = (struct event *) self;
- Py_INCREF(evt->wd);
- return evt->wd;
-}
-
-static PyObject *event_mask(PyObject *self, void *x)
-{
- struct event *evt = (struct event *) self;
- Py_INCREF(evt->mask);
- return evt->mask;
-}
-
-static PyObject *event_cookie(PyObject *self, void *x)
-{
- struct event *evt = (struct event *) self;
- Py_INCREF(evt->cookie);
- return evt->cookie;
-}
-
-static PyObject *event_name(PyObject *self, void *x)
-{
- struct event *evt = (struct event *) self;
- Py_INCREF(evt->name);
- return evt->name;
-}
-
-static struct PyGetSetDef event_getsets[] = {
- {"wd", event_wd, NULL,
- "watch descriptor"},
- {"mask", event_mask, NULL,
- "event mask"},
- {"cookie", event_cookie, NULL,
- "rename cookie, if rename-related event"},
- {"name", event_name, NULL,
- "file name"},
- {NULL}
-};
-
-PyDoc_STRVAR(
- event_doc,
- "event: Structure describing an inotify event.");
-
-static PyObject *event_new(PyTypeObject *t, PyObject *a, PyObject *k)
-{
- return (*t->tp_alloc)(t, 0);
-}
-
-static void event_dealloc(struct event *evt)
-{
- Py_XDECREF(evt->wd);
- Py_XDECREF(evt->mask);
- Py_XDECREF(evt->cookie);
- Py_XDECREF(evt->name);
-
- (*evt->ob_type->tp_free)(evt);
-}
-
-static PyObject *event_repr(struct event *evt)
-{
- int wd = PyInt_AsLong(evt->wd);
- int cookie = evt->cookie == Py_None ? -1 : PyInt_AsLong(evt->cookie);
- PyObject *ret = NULL, *pymasks = NULL, *pymask = NULL;
- PyObject *join = NULL;
- char *maskstr;
-
- join = PyString_FromString("|");
- if (join == NULL)
- goto bail;
-
- pymasks = decode_mask(PyInt_AsLong(evt->mask));
- if (pymasks == NULL)
- goto bail;
-
- pymask = _PyString_Join(join, pymasks);
- if (pymask == NULL)
- goto bail;
-
- maskstr = PyString_AsString(pymask);
-
- if (evt->name != Py_None) {
- PyObject *pyname = PyString_Repr(evt->name, 1);
- char *name = pyname ? PyString_AsString(pyname) : "???";
-
- if (cookie == -1)
- ret = PyString_FromFormat("event(wd=%d, mask=%s, name=%s)",
- wd, maskstr, name);
- else
- ret = PyString_FromFormat("event(wd=%d, mask=%s, "
- "cookie=0x%x, name=%s)",
- wd, maskstr, cookie, name);
-
- Py_XDECREF(pyname);
- } else {
- if (cookie == -1)
- ret = PyString_FromFormat("event(wd=%d, mask=%s)",
- wd, maskstr);
- else {
- ret = PyString_FromFormat("event(wd=%d, mask=%s, cookie=0x%x)",
- wd, maskstr, cookie);
- }
- }
-
- goto done;
-bail:
- Py_CLEAR(ret);
-
-done:
- Py_XDECREF(pymask);
- Py_XDECREF(pymasks);
- Py_XDECREF(join);
-
- return ret;
-}
-
-static PyTypeObject event_type = {
- PyObject_HEAD_INIT(NULL)
- 0, /*ob_size*/
- "_inotify.event", /*tp_name*/
- sizeof(struct event), /*tp_basicsize*/
- 0, /*tp_itemsize*/
- (destructor)event_dealloc, /*tp_dealloc*/
- 0, /*tp_print*/
- 0, /*tp_getattr*/
- 0, /*tp_setattr*/
- 0, /*tp_compare*/
- (reprfunc)event_repr, /*tp_repr*/
- 0, /*tp_as_number*/
- 0, /*tp_as_sequence*/
- 0, /*tp_as_mapping*/
- 0, /*tp_hash */
- 0, /*tp_call*/
- 0, /*tp_str*/
- 0, /*tp_getattro*/
- 0, /*tp_setattro*/
- 0, /*tp_as_buffer*/
- Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /*tp_flags*/
- event_doc, /* tp_doc */
- 0, /* tp_traverse */
- 0, /* tp_clear */
- 0, /* tp_richcompare */
- 0, /* tp_weaklistoffset */
- 0, /* tp_iter */
- 0, /* tp_iternext */
- 0, /* tp_methods */
- 0, /* tp_members */
- event_getsets, /* tp_getset */
- 0, /* tp_base */
- 0, /* tp_dict */
- 0, /* tp_descr_get */
- 0, /* tp_descr_set */
- 0, /* tp_dictoffset */
- 0, /* tp_init */
- 0, /* tp_alloc */
- event_new, /* tp_new */
-};
-
-PyObject *read_events(PyObject *self, PyObject *args)
-{
- PyObject *ctor_args = NULL;
- PyObject *pybufsize = NULL;
- PyObject *ret = NULL;
- int bufsize = 65536;
- char *buf = NULL;
- int nread, pos;
- int fd;
-
- if (!PyArg_ParseTuple(args, "i|O:read", &fd, &pybufsize))
- goto bail;
-
- if (pybufsize && pybufsize != Py_None)
- bufsize = PyInt_AsLong(pybufsize);
-
- ret = PyList_New(0);
- if (ret == NULL)
- goto bail;
-
- if (bufsize <= 0) {
- int r;
-
- Py_BEGIN_ALLOW_THREADS
- r = ioctl(fd, FIONREAD, &bufsize);
- Py_END_ALLOW_THREADS
-
- if (r == -1) {
- PyErr_SetFromErrno(PyExc_OSError);
- goto bail;
- }
- if (bufsize == 0)
- goto done;
- }
- else {
- static long name_max;
- static long name_fd = -1;
- long min;
-
- if (name_fd != fd) {
- name_fd = fd;
- Py_BEGIN_ALLOW_THREADS
- name_max = fpathconf(fd, _PC_NAME_MAX);
- Py_END_ALLOW_THREADS
- }
-
- min = sizeof(struct inotify_event) + name_max + 1;
-
- if (bufsize < min) {
- PyErr_Format(PyExc_ValueError, "bufsize must be at least %d",
- (int) min);
- goto bail;
- }
- }
-
- buf = alloca(bufsize);
-
- Py_BEGIN_ALLOW_THREADS
- nread = read(fd, buf, bufsize);
- Py_END_ALLOW_THREADS
-
- if (nread == -1) {
- PyErr_SetFromErrno(PyExc_OSError);
- goto bail;
- }
-
- ctor_args = PyTuple_New(0);
-
- if (ctor_args == NULL)
- goto bail;
-
- pos = 0;
-
- while (pos < nread) {
- struct inotify_event *in = (struct inotify_event *) (buf + pos);
- struct event *evt;
- PyObject *obj;
-
- obj = PyObject_CallObject((PyObject *) &event_type, ctor_args);
-
- if (obj == NULL)
- goto bail;
-
- evt = (struct event *) obj;
-
- evt->wd = PyInt_FromLong(in->wd);
- evt->mask = PyInt_FromLong(in->mask);
- if (in->mask & IN_MOVE)
- evt->cookie = PyInt_FromLong(in->cookie);
- else {
- Py_INCREF(Py_None);
- evt->cookie = Py_None;
- }
- if (in->len)
- evt->name = PyString_FromString(in->name);
- else {
- Py_INCREF(Py_None);
- evt->name = Py_None;
- }
-
- if (!evt->wd || !evt->mask || !evt->cookie || !evt->name)
- goto mybail;
-
- if (PyList_Append(ret, obj) == -1)
- goto mybail;
-
- pos += sizeof(struct inotify_event) + in->len;
- continue;
-
- mybail:
- Py_CLEAR(evt->wd);
- Py_CLEAR(evt->mask);
- Py_CLEAR(evt->cookie);
- Py_CLEAR(evt->name);
- Py_DECREF(obj);
-
- goto bail;
- }
-
- goto done;
-
-bail:
- Py_CLEAR(ret);
-
-done:
- Py_XDECREF(ctor_args);
-
- return ret;
-}
-
-PyDoc_STRVAR(
- read_doc,
- "read(fd, bufsize[=65536]) -> list_of_events\n"
- "\n"
- "\nRead inotify events from a file descriptor.\n"
- "\n"
- " fd: file descriptor returned by init()\n"
- " bufsize: size of buffer to read into, in bytes\n"
- "\n"
- "Return a list of event objects.\n"
- "\n"
- "If bufsize is > 0, block until events are available to be read.\n"
- "Otherwise, immediately return all events that can be read without\n"
- "blocking.");
-
-
-static PyMethodDef methods[] = {
- {"init", init, METH_VARARGS, init_doc},
- {"add_watch", add_watch, METH_VARARGS, add_watch_doc},
- {"remove_watch", remove_watch, METH_VARARGS, remove_watch_doc},
- {"read", read_events, METH_VARARGS, read_doc},
- {"decode_mask", pydecode_mask, METH_VARARGS, decode_mask_doc},
- {NULL},
-};
-
-void init_inotify(void)
-{
- PyObject *mod, *dict;
-
- if (PyType_Ready(&event_type) == -1)
- return;
-
- mod = Py_InitModule3("_inotify", methods, doc);
-
- dict = PyModule_GetDict(mod);
-
- if (dict)
- define_consts(dict);
-}
diff --git a/sys/lib/python/hgext/inotify/linux/watcher.py b/sys/lib/python/hgext/inotify/linux/watcher.py
deleted file mode 100644
index 5695f8686..000000000
--- a/sys/lib/python/hgext/inotify/linux/watcher.py
+++ /dev/null
@@ -1,335 +0,0 @@
-# watcher.py - high-level interfaces to the Linux inotify subsystem
-
-# Copyright 2006 Bryan O'Sullivan <bos@serpentine.com>
-
-# This library is free software; you can redistribute it and/or modify
-# it under the terms of version 2.1 of the GNU Lesser General Public
-# License, incorporated herein by reference.
-
-'''High-level interfaces to the Linux inotify subsystem.
-
-The inotify subsystem provides an efficient mechanism for file status
-monitoring and change notification.
-
-The watcher class hides the low-level details of the inotify
-interface, and provides a Pythonic wrapper around it. It generates
-events that provide somewhat more information than raw inotify makes
-available.
-
-The autowatcher class is more useful, as it automatically watches
-newly-created directories on your behalf.'''
-
-__author__ = "Bryan O'Sullivan <bos@serpentine.com>"
-
-import _inotify as inotify
-import array
-import errno
-import fcntl
-import os
-import termios
-
-
-class event(object):
- '''Derived inotify event class.
-
- The following fields are available:
-
- mask: event mask, indicating what kind of event this is
-
- cookie: rename cookie, if a rename-related event
-
- path: path of the directory in which the event occurred
-
- name: name of the directory entry to which the event occurred
- (may be None if the event happened to a watched directory)
-
- fullpath: complete path at which the event occurred
-
- wd: watch descriptor that triggered this event'''
-
- __slots__ = (
- 'cookie',
- 'fullpath',
- 'mask',
- 'name',
- 'path',
- 'raw',
- 'wd',
- )
-
- def __init__(self, raw, path):
- self.path = path
- self.raw = raw
- if raw.name:
- self.fullpath = path + '/' + raw.name
- else:
- self.fullpath = path
-
- self.wd = raw.wd
- self.mask = raw.mask
- self.cookie = raw.cookie
- self.name = raw.name
-
- def __repr__(self):
- r = repr(self.raw)
- return 'event(path=' + repr(self.path) + ', ' + r[r.find('(')+1:]
-
-
-_event_props = {
- 'access': 'File was accessed',
- 'modify': 'File was modified',
- 'attrib': 'Attribute of a directory entry was changed',
- 'close_write': 'File was closed after being written to',
- 'close_nowrite': 'File was closed without being written to',
- 'open': 'File was opened',
- 'moved_from': 'Directory entry was renamed from this name',
- 'moved_to': 'Directory entry was renamed to this name',
- 'create': 'Directory entry was created',
- 'delete': 'Directory entry was deleted',
- 'delete_self': 'The watched directory entry was deleted',
- 'move_self': 'The watched directory entry was renamed',
- 'unmount': 'Directory was unmounted, and can no longer be watched',
- 'q_overflow': 'Kernel dropped events due to queue overflow',
- 'ignored': 'Directory entry is no longer being watched',
- 'isdir': 'Event occurred on a directory',
- }
-
-for k, v in _event_props.iteritems():
- mask = getattr(inotify, 'IN_' + k.upper())
- def getter(self):
- return self.mask & mask
- getter.__name__ = k
- getter.__doc__ = v
- setattr(event, k, property(getter, doc=v))
-
-del _event_props
-
-
-class watcher(object):
- '''Provide a Pythonic interface to the low-level inotify API.
-
- Also adds derived information to each event that is not available
- through the normal inotify API, such as directory name.'''
-
- __slots__ = (
- 'fd',
- '_paths',
- '_wds',
- )
-
- def __init__(self):
- '''Create a new inotify instance.'''
-
- self.fd = inotify.init()
- self._paths = {}
- self._wds = {}
-
- def fileno(self):
- '''Return the file descriptor this watcher uses.
-
- Useful for passing to select and poll.'''
-
- return self.fd
-
- def add(self, path, mask):
- '''Add or modify a watch.
-
- Return the watch descriptor added or modified.'''
-
- path = os.path.normpath(path)
- wd = inotify.add_watch(self.fd, path, mask)
- self._paths[path] = wd, mask
- self._wds[wd] = path, mask
- return wd
-
- def remove(self, wd):
- '''Remove the given watch.'''
-
- inotify.remove_watch(self.fd, wd)
- self._remove(wd)
-
- def _remove(self, wd):
- path_mask = self._wds.pop(wd, None)
- if path_mask is not None:
- self._paths.pop(path_mask[0])
-
- def path(self, path):
- '''Return a (watch descriptor, event mask) pair for the given path.
-
- If the path is not being watched, return None.'''
-
- return self._paths.get(path)
-
- def wd(self, wd):
- '''Return a (path, event mask) pair for the given watch descriptor.
-
- If the watch descriptor is not valid or not associated with
- this watcher, return None.'''
-
- return self._wds.get(wd)
-
- def read(self, bufsize=None):
- '''Read a list of queued inotify events.
-
- If bufsize is zero, only return those events that can be read
- immediately without blocking. Otherwise, block until events are
- available.'''
-
- events = []
- for evt in inotify.read(self.fd, bufsize):
- events.append(event(evt, self._wds[evt.wd][0]))
- if evt.mask & inotify.IN_IGNORED:
- self._remove(evt.wd)
- elif evt.mask & inotify.IN_UNMOUNT:
- self.close()
- return events
-
- def close(self):
- '''Shut down this watcher.
-
- All subsequent method calls are likely to raise exceptions.'''
-
- os.close(self.fd)
- self.fd = None
- self._paths = None
- self._wds = None
-
- def __len__(self):
- '''Return the number of active watches.'''
-
- return len(self._paths)
-
- def __iter__(self):
- '''Yield a (path, watch descriptor, event mask) tuple for each
- entry being watched.'''
-
- for path, (wd, mask) in self._paths.iteritems():
- yield path, wd, mask
-
- def __del__(self):
- if self.fd is not None:
- os.close(self.fd)
-
- ignored_errors = [errno.ENOENT, errno.EPERM, errno.ENOTDIR]
-
- def add_iter(self, path, mask, onerror=None):
- '''Add or modify watches over path and its subdirectories.
-
- Yield each added or modified watch descriptor.
-
- To ensure that this method runs to completion, you must
- iterate over all of its results, even if you do not care what
- they are. For example:
-
- for wd in w.add_iter(path, mask):
- pass
-
- By default, errors are ignored. If optional arg "onerror" is
- specified, it should be a function; it will be called with one
- argument, an OSError instance. It can report the error to
- continue with the walk, or raise the exception to abort the
- walk.'''
-
- # Add the IN_ONLYDIR flag to the event mask, to avoid a possible
- # race when adding a subdirectory. In the time between the
- # event being queued by the kernel and us processing it, the
- # directory may have been deleted, or replaced with a different
- # kind of entry with the same name.
-
- submask = mask | inotify.IN_ONLYDIR
-
- try:
- yield self.add(path, mask)
- except OSError, err:
- if onerror and err.errno not in self.ignored_errors:
- onerror(err)
- for root, dirs, names in os.walk(path, topdown=False, onerror=onerror):
- for d in dirs:
- try:
- yield self.add(root + '/' + d, submask)
- except OSError, err:
- if onerror and err.errno not in self.ignored_errors:
- onerror(err)
-
- def add_all(self, path, mask, onerror=None):
- '''Add or modify watches over path and its subdirectories.
-
- Return a list of added or modified watch descriptors.
-
- By default, errors are ignored. If optional arg "onerror" is
- specified, it should be a function; it will be called with one
- argument, an OSError instance. It can report the error to
- continue with the walk, or raise the exception to abort the
- walk.'''
-
- return [w for w in self.add_iter(path, mask, onerror)]
-
-
-class autowatcher(watcher):
- '''watcher class that automatically watches newly created directories.'''
-
- __slots__ = (
- 'addfilter',
- )
-
- def __init__(self, addfilter=None):
- '''Create a new inotify instance.
-
- This instance will automatically watch newly created
- directories.
-
- If the optional addfilter parameter is not None, it must be a
- callable that takes one parameter. It will be called each time
- a directory is about to be automatically watched. If it returns
- True, the directory will be watched if it still exists,
- otherwise, it will beb skipped.'''
-
- super(autowatcher, self).__init__()
- self.addfilter = addfilter
-
- _dir_create_mask = inotify.IN_ISDIR | inotify.IN_CREATE
-
- def read(self, bufsize=None):
- events = super(autowatcher, self).read(bufsize)
- for evt in events:
- if evt.mask & self._dir_create_mask == self._dir_create_mask:
- if self.addfilter is None or self.addfilter(evt):
- parentmask = self._wds[evt.wd][1]
- # See note about race avoidance via IN_ONLYDIR above.
- mask = parentmask | inotify.IN_ONLYDIR
- try:
- self.add_all(evt.fullpath, mask)
- except OSError, err:
- if err.errno not in self.ignored_errors:
- raise
- return events
-
-
-class threshold(object):
- '''Class that indicates whether a file descriptor has reached a
- threshold of readable bytes available.
-
- This class is not thread-safe.'''
-
- __slots__ = (
- 'fd',
- 'threshold',
- '_iocbuf',
- )
-
- def __init__(self, fd, threshold=1024):
- self.fd = fd
- self.threshold = threshold
- self._iocbuf = array.array('i', [0])
-
- def readable(self):
- '''Return the number of bytes readable on this file descriptor.'''
-
- fcntl.ioctl(self.fd, termios.FIONREAD, self._iocbuf, True)
- return self._iocbuf[0]
-
- def __call__(self):
- '''Indicate whether the number of readable bytes has met or
- exceeded the threshold.'''
-
- return self.readable() >= self.threshold
diff --git a/sys/lib/python/hgext/inotify/server.py b/sys/lib/python/hgext/inotify/server.py
deleted file mode 100644
index 75c00d632..000000000
--- a/sys/lib/python/hgext/inotify/server.py
+++ /dev/null
@@ -1,874 +0,0 @@
-# server.py - inotify status server
-#
-# Copyright 2006, 2007, 2008 Bryan O'Sullivan <bos@serpentine.com>
-# Copyright 2007, 2008 Brendan Cully <brendan@kublai.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-from mercurial.i18n import _
-from mercurial import osutil, util
-import common
-import errno, os, select, socket, stat, struct, sys, tempfile, time
-
-try:
- import linux as inotify
- from linux import watcher
-except ImportError:
- raise
-
-class AlreadyStartedException(Exception): pass
-
-def join(a, b):
- if a:
- if a[-1] == '/':
- return a + b
- return a + '/' + b
- return b
-
-def split(path):
- c = path.rfind('/')
- if c == -1:
- return '', path
- return path[:c], path[c+1:]
-
-walk_ignored_errors = (errno.ENOENT, errno.ENAMETOOLONG)
-
-def walkrepodirs(dirstate, absroot):
- '''Iterate over all subdirectories of this repo.
- Exclude the .hg directory, any nested repos, and ignored dirs.'''
- def walkit(dirname, top):
- fullpath = join(absroot, dirname)
- try:
- for name, kind in osutil.listdir(fullpath):
- if kind == stat.S_IFDIR:
- if name == '.hg':
- if not top:
- return
- else:
- d = join(dirname, name)
- if dirstate._ignore(d):
- continue
- for subdir in walkit(d, False):
- yield subdir
- except OSError, err:
- if err.errno not in walk_ignored_errors:
- raise
- yield fullpath
-
- return walkit('', True)
-
-def walk(dirstate, absroot, root):
- '''Like os.walk, but only yields regular files.'''
-
- # This function is critical to performance during startup.
-
- def walkit(root, reporoot):
- files, dirs = [], []
-
- try:
- fullpath = join(absroot, root)
- for name, kind in osutil.listdir(fullpath):
- if kind == stat.S_IFDIR:
- if name == '.hg':
- if not reporoot:
- return
- else:
- dirs.append(name)
- path = join(root, name)
- if dirstate._ignore(path):
- continue
- for result in walkit(path, False):
- yield result
- elif kind in (stat.S_IFREG, stat.S_IFLNK):
- files.append(name)
- yield fullpath, dirs, files
-
- except OSError, err:
- if err.errno == errno.ENOTDIR:
- # fullpath was a directory, but has since been replaced
- # by a file.
- yield fullpath, dirs, files
- elif err.errno not in walk_ignored_errors:
- raise
-
- return walkit(root, root == '')
-
-def _explain_watch_limit(ui, dirstate, rootabs):
- path = '/proc/sys/fs/inotify/max_user_watches'
- try:
- limit = int(file(path).read())
- except IOError, err:
- if err.errno != errno.ENOENT:
- raise
- raise util.Abort(_('this system does not seem to '
- 'support inotify'))
- ui.warn(_('*** the current per-user limit on the number '
- 'of inotify watches is %s\n') % limit)
- ui.warn(_('*** this limit is too low to watch every '
- 'directory in this repository\n'))
- ui.warn(_('*** counting directories: '))
- ndirs = len(list(walkrepodirs(dirstate, rootabs)))
- ui.warn(_('found %d\n') % ndirs)
- newlimit = min(limit, 1024)
- while newlimit < ((limit + ndirs) * 1.1):
- newlimit *= 2
- ui.warn(_('*** to raise the limit from %d to %d (run as root):\n') %
- (limit, newlimit))
- ui.warn(_('*** echo %d > %s\n') % (newlimit, path))
- raise util.Abort(_('cannot watch %s until inotify watch limit is raised')
- % rootabs)
-
-class pollable(object):
- """
- Interface to support polling.
- The file descriptor returned by fileno() is registered to a polling
- object.
- Usage:
- Every tick, check if an event has happened since the last tick:
- * If yes, call handle_events
- * If no, call handle_timeout
- """
- poll_events = select.POLLIN
- instances = {}
- poll = select.poll()
-
- def fileno(self):
- raise NotImplementedError
-
- def handle_events(self, events):
- raise NotImplementedError
-
- def handle_timeout(self):
- raise NotImplementedError
-
- def shutdown(self):
- raise NotImplementedError
-
- def register(self, timeout):
- fd = self.fileno()
-
- pollable.poll.register(fd, pollable.poll_events)
- pollable.instances[fd] = self
-
- self.registered = True
- self.timeout = timeout
-
- def unregister(self):
- pollable.poll.unregister(self)
- self.registered = False
-
- @classmethod
- def run(cls):
- while True:
- timeout = None
- timeobj = None
- for obj in cls.instances.itervalues():
- if obj.timeout is not None and (timeout is None or obj.timeout < timeout):
- timeout, timeobj = obj.timeout, obj
- try:
- events = cls.poll.poll(timeout)
- except select.error, err:
- if err[0] == errno.EINTR:
- continue
- raise
- if events:
- by_fd = {}
- for fd, event in events:
- by_fd.setdefault(fd, []).append(event)
-
- for fd, events in by_fd.iteritems():
- cls.instances[fd].handle_pollevents(events)
-
- elif timeobj:
- timeobj.handle_timeout()
-
-def eventaction(code):
- """
- Decorator to help handle events in repowatcher
- """
- def decorator(f):
- def wrapper(self, wpath):
- if code == 'm' and wpath in self.lastevent and \
- self.lastevent[wpath] in 'cm':
- return
- self.lastevent[wpath] = code
- self.timeout = 250
-
- f(self, wpath)
-
- wrapper.func_name = f.func_name
- return wrapper
- return decorator
-
-class directory(object):
- """
- Representing a directory
-
- * path is the relative path from repo root to this directory
- * files is a dict listing the files in this directory
- - keys are file names
- - values are file status
- * dirs is a dict listing the subdirectories
- - key are subdirectories names
- - values are directory objects
- """
- def __init__(self, relpath=''):
- self.path = relpath
- self.files = {}
- self.dirs = {}
-
- def dir(self, relpath):
- """
- Returns the directory contained at the relative path relpath.
- Creates the intermediate directories if necessary.
- """
- if not relpath:
- return self
- l = relpath.split('/')
- ret = self
- while l:
- next = l.pop(0)
- try:
- ret = ret.dirs[next]
- except KeyError:
- d = directory(join(ret.path, next))
- ret.dirs[next] = d
- ret = d
- return ret
-
- def walk(self, states):
- """
- yield (filename, status) pairs for items in the trees
- that have status in states.
- filenames are relative to the repo root
- """
- for file, st in self.files.iteritems():
- if st in states:
- yield join(self.path, file), st
- for dir in self.dirs.itervalues():
- for e in dir.walk(states):
- yield e
-
- def lookup(self, states, path):
- """
- yield root-relative filenames that match path, and whose
- status are in states:
- * if path is a file, yield path
- * if path is a directory, yield directory files
- * if path is not tracked, yield nothing
- """
- if path[-1] == '/':
- path = path[:-1]
-
- paths = path.split('/')
-
- # we need to check separately for last node
- last = paths.pop()
-
- tree = self
- try:
- for dir in paths:
- tree = tree.dirs[dir]
- except KeyError:
- # path is not tracked
- return
-
- try:
- # if path is a directory, walk it
- for file, st in tree.dirs[last].walk(states):
- yield file
- except KeyError:
- try:
- if tree.files[last] in states:
- # path is a file
- yield path
- except KeyError:
- # path is not tracked
- pass
-
-class repowatcher(pollable):
- """
- Watches inotify events
- """
- statuskeys = 'almr!?'
- mask = (
- inotify.IN_ATTRIB |
- inotify.IN_CREATE |
- inotify.IN_DELETE |
- inotify.IN_DELETE_SELF |
- inotify.IN_MODIFY |
- inotify.IN_MOVED_FROM |
- inotify.IN_MOVED_TO |
- inotify.IN_MOVE_SELF |
- inotify.IN_ONLYDIR |
- inotify.IN_UNMOUNT |
- 0)
-
- def __init__(self, ui, dirstate, root):
- self.ui = ui
- self.dirstate = dirstate
-
- self.wprefix = join(root, '')
- self.prefixlen = len(self.wprefix)
- try:
- self.watcher = watcher.watcher()
- except OSError, err:
- raise util.Abort(_('inotify service not available: %s') %
- err.strerror)
- self.threshold = watcher.threshold(self.watcher)
- self.fileno = self.watcher.fileno
-
- self.tree = directory()
- self.statcache = {}
- self.statustrees = dict([(s, directory()) for s in self.statuskeys])
-
- self.last_event = None
-
- self.lastevent = {}
-
- self.register(timeout=None)
-
- self.ds_info = self.dirstate_info()
- self.handle_timeout()
- self.scan()
-
- def event_time(self):
- last = self.last_event
- now = time.time()
- self.last_event = now
-
- if last is None:
- return 'start'
- delta = now - last
- if delta < 5:
- return '+%.3f' % delta
- if delta < 50:
- return '+%.2f' % delta
- return '+%.1f' % delta
-
- def dirstate_info(self):
- try:
- st = os.lstat(self.wprefix + '.hg/dirstate')
- return st.st_mtime, st.st_ino
- except OSError, err:
- if err.errno != errno.ENOENT:
- raise
- return 0, 0
-
- def add_watch(self, path, mask):
- if not path:
- return
- if self.watcher.path(path) is None:
- if self.ui.debugflag:
- self.ui.note(_('watching %r\n') % path[self.prefixlen:])
- try:
- self.watcher.add(path, mask)
- except OSError, err:
- if err.errno in (errno.ENOENT, errno.ENOTDIR):
- return
- if err.errno != errno.ENOSPC:
- raise
- _explain_watch_limit(self.ui, self.dirstate, self.wprefix)
-
- def setup(self):
- self.ui.note(_('watching directories under %r\n') % self.wprefix)
- self.add_watch(self.wprefix + '.hg', inotify.IN_DELETE)
- self.check_dirstate()
-
- def filestatus(self, fn, st):
- try:
- type_, mode, size, time = self.dirstate._map[fn][:4]
- except KeyError:
- type_ = '?'
- if type_ == 'n':
- st_mode, st_size, st_mtime = st
- if size == -1:
- return 'l'
- if size and (size != st_size or (mode ^ st_mode) & 0100):
- return 'm'
- if time != int(st_mtime):
- return 'l'
- return 'n'
- if type_ == '?' and self.dirstate._ignore(fn):
- return 'i'
- return type_
-
- def updatefile(self, wfn, osstat):
- '''
- update the file entry of an existing file.
-
- osstat: (mode, size, time) tuple, as returned by os.lstat(wfn)
- '''
-
- self._updatestatus(wfn, self.filestatus(wfn, osstat))
-
- def deletefile(self, wfn, oldstatus):
- '''
- update the entry of a file which has been deleted.
-
- oldstatus: char in statuskeys, status of the file before deletion
- '''
- if oldstatus == 'r':
- newstatus = 'r'
- elif oldstatus in 'almn':
- newstatus = '!'
- else:
- newstatus = None
-
- self.statcache.pop(wfn, None)
- self._updatestatus(wfn, newstatus)
-
- def _updatestatus(self, wfn, newstatus):
- '''
- Update the stored status of a file.
-
- newstatus: - char in (statuskeys + 'ni'), new status to apply.
- - or None, to stop tracking wfn
- '''
- root, fn = split(wfn)
- d = self.tree.dir(root)
-
- oldstatus = d.files.get(fn)
- # oldstatus can be either:
- # - None : fn is new
- # - a char in statuskeys: fn is a (tracked) file
-
- if self.ui.debugflag and oldstatus != newstatus:
- self.ui.note(_('status: %r %s -> %s\n') %
- (wfn, oldstatus, newstatus))
-
- if oldstatus and oldstatus in self.statuskeys \
- and oldstatus != newstatus:
- del self.statustrees[oldstatus].dir(root).files[fn]
-
- if newstatus in (None, 'i'):
- d.files.pop(fn, None)
- elif oldstatus != newstatus:
- d.files[fn] = newstatus
- if newstatus != 'n':
- self.statustrees[newstatus].dir(root).files[fn] = newstatus
-
-
- def check_deleted(self, key):
- # Files that had been deleted but were present in the dirstate
- # may have vanished from the dirstate; we must clean them up.
- nuke = []
- for wfn, ignore in self.statustrees[key].walk(key):
- if wfn not in self.dirstate:
- nuke.append(wfn)
- for wfn in nuke:
- root, fn = split(wfn)
- del self.statustrees[key].dir(root).files[fn]
- del self.tree.dir(root).files[fn]
-
- def scan(self, topdir=''):
- ds = self.dirstate._map.copy()
- self.add_watch(join(self.wprefix, topdir), self.mask)
- for root, dirs, files in walk(self.dirstate, self.wprefix, topdir):
- for d in dirs:
- self.add_watch(join(root, d), self.mask)
- wroot = root[self.prefixlen:]
- for fn in files:
- wfn = join(wroot, fn)
- self.updatefile(wfn, self.getstat(wfn))
- ds.pop(wfn, None)
- wtopdir = topdir
- if wtopdir and wtopdir[-1] != '/':
- wtopdir += '/'
- for wfn, state in ds.iteritems():
- if not wfn.startswith(wtopdir):
- continue
- try:
- st = self.stat(wfn)
- except OSError:
- status = state[0]
- self.deletefile(wfn, status)
- else:
- self.updatefile(wfn, st)
- self.check_deleted('!')
- self.check_deleted('r')
-
- def check_dirstate(self):
- ds_info = self.dirstate_info()
- if ds_info == self.ds_info:
- return
- self.ds_info = ds_info
- if not self.ui.debugflag:
- self.last_event = None
- self.ui.note(_('%s dirstate reload\n') % self.event_time())
- self.dirstate.invalidate()
- self.handle_timeout()
- self.scan()
- self.ui.note(_('%s end dirstate reload\n') % self.event_time())
-
- def update_hgignore(self):
- # An update of the ignore file can potentially change the
- # states of all unknown and ignored files.
-
- # XXX If the user has other ignore files outside the repo, or
- # changes their list of ignore files at run time, we'll
- # potentially never see changes to them. We could get the
- # client to report to us what ignore data they're using.
- # But it's easier to do nothing than to open that can of
- # worms.
-
- if '_ignore' in self.dirstate.__dict__:
- delattr(self.dirstate, '_ignore')
- self.ui.note(_('rescanning due to .hgignore change\n'))
- self.handle_timeout()
- self.scan()
-
- def getstat(self, wpath):
- try:
- return self.statcache[wpath]
- except KeyError:
- try:
- return self.stat(wpath)
- except OSError, err:
- if err.errno != errno.ENOENT:
- raise
-
- def stat(self, wpath):
- try:
- st = os.lstat(join(self.wprefix, wpath))
- ret = st.st_mode, st.st_size, st.st_mtime
- self.statcache[wpath] = ret
- return ret
- except OSError:
- self.statcache.pop(wpath, None)
- raise
-
- @eventaction('c')
- def created(self, wpath):
- if wpath == '.hgignore':
- self.update_hgignore()
- try:
- st = self.stat(wpath)
- if stat.S_ISREG(st[0]):
- self.updatefile(wpath, st)
- except OSError:
- pass
-
- @eventaction('m')
- def modified(self, wpath):
- if wpath == '.hgignore':
- self.update_hgignore()
- try:
- st = self.stat(wpath)
- if stat.S_ISREG(st[0]):
- if self.dirstate[wpath] in 'lmn':
- self.updatefile(wpath, st)
- except OSError:
- pass
-
- @eventaction('d')
- def deleted(self, wpath):
- if wpath == '.hgignore':
- self.update_hgignore()
- elif wpath.startswith('.hg/'):
- if wpath == '.hg/wlock':
- self.check_dirstate()
- return
-
- self.deletefile(wpath, self.dirstate[wpath])
-
- def process_create(self, wpath, evt):
- if self.ui.debugflag:
- self.ui.note(_('%s event: created %s\n') %
- (self.event_time(), wpath))
-
- if evt.mask & inotify.IN_ISDIR:
- self.scan(wpath)
- else:
- self.created(wpath)
-
- def process_delete(self, wpath, evt):
- if self.ui.debugflag:
- self.ui.note(_('%s event: deleted %s\n') %
- (self.event_time(), wpath))
-
- if evt.mask & inotify.IN_ISDIR:
- tree = self.tree.dir(wpath)
- todelete = [wfn for wfn, ignore in tree.walk('?')]
- for fn in todelete:
- self.deletefile(fn, '?')
- self.scan(wpath)
- else:
- self.deleted(wpath)
-
- def process_modify(self, wpath, evt):
- if self.ui.debugflag:
- self.ui.note(_('%s event: modified %s\n') %
- (self.event_time(), wpath))
-
- if not (evt.mask & inotify.IN_ISDIR):
- self.modified(wpath)
-
- def process_unmount(self, evt):
- self.ui.warn(_('filesystem containing %s was unmounted\n') %
- evt.fullpath)
- sys.exit(0)
-
- def handle_pollevents(self, events):
- if self.ui.debugflag:
- self.ui.note(_('%s readable: %d bytes\n') %
- (self.event_time(), self.threshold.readable()))
- if not self.threshold():
- if self.registered:
- if self.ui.debugflag:
- self.ui.note(_('%s below threshold - unhooking\n') %
- (self.event_time()))
- self.unregister()
- self.timeout = 250
- else:
- self.read_events()
-
- def read_events(self, bufsize=None):
- events = self.watcher.read(bufsize)
- if self.ui.debugflag:
- self.ui.note(_('%s reading %d events\n') %
- (self.event_time(), len(events)))
- for evt in events:
- assert evt.fullpath.startswith(self.wprefix)
- wpath = evt.fullpath[self.prefixlen:]
-
- # paths have been normalized, wpath never ends with a '/'
-
- if wpath.startswith('.hg/') and evt.mask & inotify.IN_ISDIR:
- # ignore subdirectories of .hg/ (merge, patches...)
- continue
-
- if evt.mask & inotify.IN_UNMOUNT:
- self.process_unmount(wpath, evt)
- elif evt.mask & (inotify.IN_MODIFY | inotify.IN_ATTRIB):
- self.process_modify(wpath, evt)
- elif evt.mask & (inotify.IN_DELETE | inotify.IN_DELETE_SELF |
- inotify.IN_MOVED_FROM):
- self.process_delete(wpath, evt)
- elif evt.mask & (inotify.IN_CREATE | inotify.IN_MOVED_TO):
- self.process_create(wpath, evt)
-
- self.lastevent.clear()
-
- def handle_timeout(self):
- if not self.registered:
- if self.ui.debugflag:
- self.ui.note(_('%s hooking back up with %d bytes readable\n') %
- (self.event_time(), self.threshold.readable()))
- self.read_events(0)
- self.register(timeout=None)
-
- self.timeout = None
-
- def shutdown(self):
- self.watcher.close()
-
- def debug(self):
- """
- Returns a sorted list of relatives paths currently watched,
- for debugging purposes.
- """
- return sorted(tuple[0][self.prefixlen:] for tuple in self.watcher)
-
-class server(pollable):
- """
- Listens for client queries on unix socket inotify.sock
- """
- def __init__(self, ui, root, repowatcher, timeout):
- self.ui = ui
- self.repowatcher = repowatcher
- self.sock = socket.socket(socket.AF_UNIX)
- self.sockpath = join(root, '.hg/inotify.sock')
- self.realsockpath = None
- try:
- self.sock.bind(self.sockpath)
- except socket.error, err:
- if err[0] == errno.EADDRINUSE:
- raise AlreadyStartedException(_('could not start server: %s')
- % err[1])
- if err[0] == "AF_UNIX path too long":
- tempdir = tempfile.mkdtemp(prefix="hg-inotify-")
- self.realsockpath = os.path.join(tempdir, "inotify.sock")
- try:
- self.sock.bind(self.realsockpath)
- os.symlink(self.realsockpath, self.sockpath)
- except (OSError, socket.error), inst:
- try:
- os.unlink(self.realsockpath)
- except:
- pass
- os.rmdir(tempdir)
- if inst.errno == errno.EEXIST:
- raise AlreadyStartedException(_('could not start server: %s')
- % inst.strerror)
- raise
- else:
- raise
- self.sock.listen(5)
- self.fileno = self.sock.fileno
- self.register(timeout=timeout)
-
- def handle_timeout(self):
- pass
-
- def answer_stat_query(self, cs):
- names = cs.read().split('\0')
-
- states = names.pop()
-
- self.ui.note(_('answering query for %r\n') % states)
-
- if self.repowatcher.timeout:
- # We got a query while a rescan is pending. Make sure we
- # rescan before responding, or we could give back a wrong
- # answer.
- self.repowatcher.handle_timeout()
-
- if not names:
- def genresult(states, tree):
- for fn, state in tree.walk(states):
- yield fn
- else:
- def genresult(states, tree):
- for fn in names:
- for f in tree.lookup(states, fn):
- yield f
-
- return ['\0'.join(r) for r in [
- genresult('l', self.repowatcher.statustrees['l']),
- genresult('m', self.repowatcher.statustrees['m']),
- genresult('a', self.repowatcher.statustrees['a']),
- genresult('r', self.repowatcher.statustrees['r']),
- genresult('!', self.repowatcher.statustrees['!']),
- '?' in states
- and genresult('?', self.repowatcher.statustrees['?'])
- or [],
- [],
- 'c' in states and genresult('n', self.repowatcher.tree) or [],
- ]]
-
- def answer_dbug_query(self):
- return ['\0'.join(self.repowatcher.debug())]
-
- def handle_pollevents(self, events):
- for e in events:
- self.handle_pollevent()
-
- def handle_pollevent(self):
- sock, addr = self.sock.accept()
-
- cs = common.recvcs(sock)
- version = ord(cs.read(1))
-
- if version != common.version:
- self.ui.warn(_('received query from incompatible client '
- 'version %d\n') % version)
- try:
- # try to send back our version to the client
- # this way, the client too is informed of the mismatch
- sock.sendall(chr(common.version))
- except:
- pass
- return
-
- type = cs.read(4)
-
- if type == 'STAT':
- results = self.answer_stat_query(cs)
- elif type == 'DBUG':
- results = self.answer_dbug_query()
- else:
- self.ui.warn(_('unrecognized query type: %s\n') % type)
- return
-
- try:
- try:
- v = chr(common.version)
-
- sock.sendall(v + type + struct.pack(common.resphdrfmts[type],
- *map(len, results)))
- sock.sendall(''.join(results))
- finally:
- sock.shutdown(socket.SHUT_WR)
- except socket.error, err:
- if err[0] != errno.EPIPE:
- raise
-
- def shutdown(self):
- self.sock.close()
- try:
- os.unlink(self.sockpath)
- if self.realsockpath:
- os.unlink(self.realsockpath)
- os.rmdir(os.path.dirname(self.realsockpath))
- except OSError, err:
- if err.errno != errno.ENOENT:
- raise
-
-class master(object):
- def __init__(self, ui, dirstate, root, timeout=None):
- self.ui = ui
- self.repowatcher = repowatcher(ui, dirstate, root)
- self.server = server(ui, root, self.repowatcher, timeout)
-
- def shutdown(self):
- for obj in pollable.instances.itervalues():
- obj.shutdown()
-
- def run(self):
- self.repowatcher.setup()
- self.ui.note(_('finished setup\n'))
- if os.getenv('TIME_STARTUP'):
- sys.exit(0)
- pollable.run()
-
-def start(ui, dirstate, root):
- def closefds(ignore):
- # (from python bug #1177468)
- # close all inherited file descriptors
- # Python 2.4.1 and later use /dev/urandom to seed the random module's RNG
- # a file descriptor is kept internally as os._urandomfd (created on demand
- # the first time os.urandom() is called), and should not be closed
- try:
- os.urandom(4)
- urandom_fd = getattr(os, '_urandomfd', None)
- except AttributeError:
- urandom_fd = None
- ignore.append(urandom_fd)
- for fd in range(3, 256):
- if fd in ignore:
- continue
- try:
- os.close(fd)
- except OSError:
- pass
-
- m = master(ui, dirstate, root)
- sys.stdout.flush()
- sys.stderr.flush()
-
- pid = os.fork()
- if pid:
- return pid
-
- closefds(pollable.instances.keys())
- os.setsid()
-
- fd = os.open('/dev/null', os.O_RDONLY)
- os.dup2(fd, 0)
- if fd > 0:
- os.close(fd)
-
- fd = os.open(ui.config('inotify', 'log', '/dev/null'),
- os.O_RDWR | os.O_CREAT | os.O_TRUNC)
- os.dup2(fd, 1)
- os.dup2(fd, 2)
- if fd > 2:
- os.close(fd)
-
- try:
- m.run()
- finally:
- m.shutdown()
- os._exit(0)
diff --git a/sys/lib/python/hgext/interhg.py b/sys/lib/python/hgext/interhg.py
deleted file mode 100644
index 3660c4081..000000000
--- a/sys/lib/python/hgext/interhg.py
+++ /dev/null
@@ -1,80 +0,0 @@
-# interhg.py - interhg
-#
-# Copyright 2007 OHASHI Hideya <ohachige@gmail.com>
-#
-# Contributor(s):
-# Edward Lee <edward.lee@engineering.uiuc.edu>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-'''expand expressions into changelog and summaries
-
-This extension allows the use of a special syntax in summaries, which
-will be automatically expanded into links or any other arbitrary
-expression, much like InterWiki does.
-
-A few example patterns (link to bug tracking, etc.) that may be used
-in your hgrc::
-
- [interhg]
- issues = s!issue(\\d+)!<a href="http://bts/issue\\1">issue\\1</a>!
- bugzilla = s!((?:bug|b=|(?=#?\\d{4,}))(?:\\s*#?)(\\d+))!<a..=\\2">\\1</a>!i
- boldify = s!(^|\\s)#(\\d+)\\b! <b>#\\2</b>!
-'''
-
-import re
-from mercurial.hgweb import hgweb_mod
-from mercurial import templatefilters, extensions
-from mercurial.i18n import _
-
-orig_escape = templatefilters.filters["escape"]
-
-interhg_table = []
-
-def interhg_escape(x):
- escstr = orig_escape(x)
- for regexp, format in interhg_table:
- escstr = regexp.sub(format, escstr)
- return escstr
-
-templatefilters.filters["escape"] = interhg_escape
-
-def interhg_refresh(orig, self):
- interhg_table[:] = []
- for key, pattern in self.repo.ui.configitems('interhg'):
- # grab the delimiter from the character after the "s"
- unesc = pattern[1]
- delim = re.escape(unesc)
-
- # identify portions of the pattern, taking care to avoid escaped
- # delimiters. the replace format and flags are optional, but delimiters
- # are required.
- match = re.match(r'^s%s(.+)(?:(?<=\\\\)|(?<!\\))%s(.*)%s([ilmsux])*$'
- % (delim, delim, delim), pattern)
- if not match:
- self.repo.ui.warn(_("interhg: invalid pattern for %s: %s\n")
- % (key, pattern))
- continue
-
- # we need to unescape the delimiter for regexp and format
- delim_re = re.compile(r'(?<!\\)\\%s' % delim)
- regexp = delim_re.sub(unesc, match.group(1))
- format = delim_re.sub(unesc, match.group(2))
-
- # the pattern allows for 6 regexp flags, so set them if necessary
- flagin = match.group(3)
- flags = 0
- if flagin:
- for flag in flagin.upper():
- flags |= re.__dict__[flag]
-
- try:
- regexp = re.compile(regexp, flags)
- interhg_table.append((regexp, format))
- except re.error:
- self.repo.ui.warn(_("interhg: invalid regexp for %s: %s\n")
- % (key, regexp))
- return orig(self)
-
-extensions.wrapfunction(hgweb_mod.hgweb, 'refresh', interhg_refresh)
diff --git a/sys/lib/python/hgext/keyword.py b/sys/lib/python/hgext/keyword.py
deleted file mode 100644
index b331389cf..000000000
--- a/sys/lib/python/hgext/keyword.py
+++ /dev/null
@@ -1,555 +0,0 @@
-# keyword.py - $Keyword$ expansion for Mercurial
-#
-# Copyright 2007-2009 Christian Ebert <blacktrash@gmx.net>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-#
-# $Id$
-#
-# Keyword expansion hack against the grain of a DSCM
-#
-# There are many good reasons why this is not needed in a distributed
-# SCM, still it may be useful in very small projects based on single
-# files (like LaTeX packages), that are mostly addressed to an
-# audience not running a version control system.
-#
-# For in-depth discussion refer to
-# <http://mercurial.selenic.com/wiki/KeywordPlan>.
-#
-# Keyword expansion is based on Mercurial's changeset template mappings.
-#
-# Binary files are not touched.
-#
-# Files to act upon/ignore are specified in the [keyword] section.
-# Customized keyword template mappings in the [keywordmaps] section.
-#
-# Run "hg help keyword" and "hg kwdemo" to get info on configuration.
-
-'''expand keywords in tracked files
-
-This extension expands RCS/CVS-like or self-customized $Keywords$ in
-tracked text files selected by your configuration.
-
-Keywords are only expanded in local repositories and not stored in the
-change history. The mechanism can be regarded as a convenience for the
-current user or for archive distribution.
-
-Configuration is done in the [keyword] and [keywordmaps] sections of
-hgrc files.
-
-Example::
-
- [keyword]
- # expand keywords in every python file except those matching "x*"
- **.py =
- x* = ignore
-
-NOTE: the more specific you are in your filename patterns the less you
-lose speed in huge repositories.
-
-For [keywordmaps] template mapping and expansion demonstration and
-control run "hg kwdemo". See "hg help templates" for a list of
-available templates and filters.
-
-An additional date template filter {date|utcdate} is provided. It
-returns a date like "2006/09/18 15:13:13".
-
-The default template mappings (view with "hg kwdemo -d") can be
-replaced with customized keywords and templates. Again, run "hg
-kwdemo" to control the results of your config changes.
-
-Before changing/disabling active keywords, run "hg kwshrink" to avoid
-the risk of inadvertently storing expanded keywords in the change
-history.
-
-To force expansion after enabling it, or a configuration change, run
-"hg kwexpand".
-
-Also, when committing with the record extension or using mq's qrecord,
-be aware that keywords cannot be updated. Again, run "hg kwexpand" on
-the files in question to update keyword expansions after all changes
-have been checked in.
-
-Expansions spanning more than one line and incremental expansions,
-like CVS' $Log$, are not supported. A keyword template map "Log =
-{desc}" expands to the first line of the changeset description.
-'''
-
-from mercurial import commands, cmdutil, dispatch, filelog, revlog, extensions
-from mercurial import patch, localrepo, templater, templatefilters, util, match
-from mercurial.hgweb import webcommands
-from mercurial.lock import release
-from mercurial.node import nullid
-from mercurial.i18n import _
-import re, shutil, tempfile
-
-commands.optionalrepo += ' kwdemo'
-
-# hg commands that do not act on keywords
-nokwcommands = ('add addremove annotate bundle copy export grep incoming init'
- ' log outgoing push rename rollback tip verify'
- ' convert email glog')
-
-# hg commands that trigger expansion only when writing to working dir,
-# not when reading filelog, and unexpand when reading from working dir
-restricted = 'merge record resolve qfold qimport qnew qpush qrefresh qrecord'
-
-# provide cvs-like UTC date filter
-utcdate = lambda x: util.datestr(x, '%Y/%m/%d %H:%M:%S')
-
-# make keyword tools accessible
-kwtools = {'templater': None, 'hgcmd': '', 'inc': [], 'exc': ['.hg*']}
-
-
-class kwtemplater(object):
- '''
- Sets up keyword templates, corresponding keyword regex, and
- provides keyword substitution functions.
- '''
- templates = {
- 'Revision': '{node|short}',
- 'Author': '{author|user}',
- 'Date': '{date|utcdate}',
- 'RCSFile': '{file|basename},v',
- 'Source': '{root}/{file},v',
- 'Id': '{file|basename},v {node|short} {date|utcdate} {author|user}',
- 'Header': '{root}/{file},v {node|short} {date|utcdate} {author|user}',
- }
-
- def __init__(self, ui, repo):
- self.ui = ui
- self.repo = repo
- self.match = match.match(repo.root, '', [],
- kwtools['inc'], kwtools['exc'])
- self.restrict = kwtools['hgcmd'] in restricted.split()
-
- kwmaps = self.ui.configitems('keywordmaps')
- if kwmaps: # override default templates
- self.templates = dict((k, templater.parsestring(v, False))
- for k, v in kwmaps)
- escaped = map(re.escape, self.templates.keys())
- kwpat = r'\$(%s)(: [^$\n\r]*? )??\$' % '|'.join(escaped)
- self.re_kw = re.compile(kwpat)
-
- templatefilters.filters['utcdate'] = utcdate
- self.ct = cmdutil.changeset_templater(self.ui, self.repo,
- False, None, '', False)
-
- def substitute(self, data, path, ctx, subfunc):
- '''Replaces keywords in data with expanded template.'''
- def kwsub(mobj):
- kw = mobj.group(1)
- self.ct.use_template(self.templates[kw])
- self.ui.pushbuffer()
- self.ct.show(ctx, root=self.repo.root, file=path)
- ekw = templatefilters.firstline(self.ui.popbuffer())
- return '$%s: %s $' % (kw, ekw)
- return subfunc(kwsub, data)
-
- def expand(self, path, node, data):
- '''Returns data with keywords expanded.'''
- if not self.restrict and self.match(path) and not util.binary(data):
- ctx = self.repo.filectx(path, fileid=node).changectx()
- return self.substitute(data, path, ctx, self.re_kw.sub)
- return data
-
- def iskwfile(self, path, flagfunc):
- '''Returns true if path matches [keyword] pattern
- and is not a symbolic link.
- Caveat: localrepository._link fails on Windows.'''
- return self.match(path) and not 'l' in flagfunc(path)
-
- def overwrite(self, node, expand, files):
- '''Overwrites selected files expanding/shrinking keywords.'''
- ctx = self.repo[node]
- mf = ctx.manifest()
- if node is not None: # commit
- files = [f for f in ctx.files() if f in mf]
- notify = self.ui.debug
- else: # kwexpand/kwshrink
- notify = self.ui.note
- candidates = [f for f in files if self.iskwfile(f, ctx.flags)]
- if candidates:
- self.restrict = True # do not expand when reading
- msg = (expand and _('overwriting %s expanding keywords\n')
- or _('overwriting %s shrinking keywords\n'))
- for f in candidates:
- fp = self.repo.file(f)
- data = fp.read(mf[f])
- if util.binary(data):
- continue
- if expand:
- if node is None:
- ctx = self.repo.filectx(f, fileid=mf[f]).changectx()
- data, found = self.substitute(data, f, ctx,
- self.re_kw.subn)
- else:
- found = self.re_kw.search(data)
- if found:
- notify(msg % f)
- self.repo.wwrite(f, data, mf.flags(f))
- if node is None:
- self.repo.dirstate.normal(f)
- self.restrict = False
-
- def shrinktext(self, text):
- '''Unconditionally removes all keyword substitutions from text.'''
- return self.re_kw.sub(r'$\1$', text)
-
- def shrink(self, fname, text):
- '''Returns text with all keyword substitutions removed.'''
- if self.match(fname) and not util.binary(text):
- return self.shrinktext(text)
- return text
-
- def shrinklines(self, fname, lines):
- '''Returns lines with keyword substitutions removed.'''
- if self.match(fname):
- text = ''.join(lines)
- if not util.binary(text):
- return self.shrinktext(text).splitlines(True)
- return lines
-
- def wread(self, fname, data):
- '''If in restricted mode returns data read from wdir with
- keyword substitutions removed.'''
- return self.restrict and self.shrink(fname, data) or data
-
-class kwfilelog(filelog.filelog):
- '''
- Subclass of filelog to hook into its read, add, cmp methods.
- Keywords are "stored" unexpanded, and processed on reading.
- '''
- def __init__(self, opener, kwt, path):
- super(kwfilelog, self).__init__(opener, path)
- self.kwt = kwt
- self.path = path
-
- def read(self, node):
- '''Expands keywords when reading filelog.'''
- data = super(kwfilelog, self).read(node)
- return self.kwt.expand(self.path, node, data)
-
- def add(self, text, meta, tr, link, p1=None, p2=None):
- '''Removes keyword substitutions when adding to filelog.'''
- text = self.kwt.shrink(self.path, text)
- return super(kwfilelog, self).add(text, meta, tr, link, p1, p2)
-
- def cmp(self, node, text):
- '''Removes keyword substitutions for comparison.'''
- text = self.kwt.shrink(self.path, text)
- if self.renamed(node):
- t2 = super(kwfilelog, self).read(node)
- return t2 != text
- return revlog.revlog.cmp(self, node, text)
-
-def _status(ui, repo, kwt, unknown, *pats, **opts):
- '''Bails out if [keyword] configuration is not active.
- Returns status of working directory.'''
- if kwt:
- match = cmdutil.match(repo, pats, opts)
- return repo.status(match=match, unknown=unknown, clean=True)
- if ui.configitems('keyword'):
- raise util.Abort(_('[keyword] patterns cannot match'))
- raise util.Abort(_('no [keyword] patterns configured'))
-
-def _kwfwrite(ui, repo, expand, *pats, **opts):
- '''Selects files and passes them to kwtemplater.overwrite.'''
- if repo.dirstate.parents()[1] != nullid:
- raise util.Abort(_('outstanding uncommitted merge'))
- kwt = kwtools['templater']
- status = _status(ui, repo, kwt, False, *pats, **opts)
- modified, added, removed, deleted = status[:4]
- if modified or added or removed or deleted:
- raise util.Abort(_('outstanding uncommitted changes'))
- wlock = lock = None
- try:
- wlock = repo.wlock()
- lock = repo.lock()
- kwt.overwrite(None, expand, status[6])
- finally:
- release(lock, wlock)
-
-def demo(ui, repo, *args, **opts):
- '''print [keywordmaps] configuration and an expansion example
-
- Show current, custom, or default keyword template maps and their
- expansions.
-
- Extend the current configuration by specifying maps as arguments
- and using -f/--rcfile to source an external hgrc file.
-
- Use -d/--default to disable current configuration.
-
- See "hg help templates" for information on templates and filters.
- '''
- def demoitems(section, items):
- ui.write('[%s]\n' % section)
- for k, v in items:
- ui.write('%s = %s\n' % (k, v))
-
- msg = 'hg keyword config and expansion example'
- fn = 'demo.txt'
- branchname = 'demobranch'
- tmpdir = tempfile.mkdtemp('', 'kwdemo.')
- ui.note(_('creating temporary repository at %s\n') % tmpdir)
- repo = localrepo.localrepository(ui, tmpdir, True)
- ui.setconfig('keyword', fn, '')
-
- uikwmaps = ui.configitems('keywordmaps')
- if args or opts.get('rcfile'):
- ui.status(_('\n\tconfiguration using custom keyword template maps\n'))
- if uikwmaps:
- ui.status(_('\textending current template maps\n'))
- if opts.get('default') or not uikwmaps:
- ui.status(_('\toverriding default template maps\n'))
- if opts.get('rcfile'):
- ui.readconfig(opts.get('rcfile'))
- if args:
- # simulate hgrc parsing
- rcmaps = ['[keywordmaps]\n'] + [a + '\n' for a in args]
- fp = repo.opener('hgrc', 'w')
- fp.writelines(rcmaps)
- fp.close()
- ui.readconfig(repo.join('hgrc'))
- kwmaps = dict(ui.configitems('keywordmaps'))
- elif opts.get('default'):
- ui.status(_('\n\tconfiguration using default keyword template maps\n'))
- kwmaps = kwtemplater.templates
- if uikwmaps:
- ui.status(_('\tdisabling current template maps\n'))
- for k, v in kwmaps.iteritems():
- ui.setconfig('keywordmaps', k, v)
- else:
- ui.status(_('\n\tconfiguration using current keyword template maps\n'))
- kwmaps = dict(uikwmaps) or kwtemplater.templates
-
- uisetup(ui)
- reposetup(ui, repo)
- for k, v in ui.configitems('extensions'):
- if k.endswith('keyword'):
- extension = '%s = %s' % (k, v)
- break
- ui.write('[extensions]\n%s\n' % extension)
- demoitems('keyword', ui.configitems('keyword'))
- demoitems('keywordmaps', kwmaps.iteritems())
- keywords = '$' + '$\n$'.join(kwmaps.keys()) + '$\n'
- repo.wopener(fn, 'w').write(keywords)
- repo.add([fn])
- path = repo.wjoin(fn)
- ui.note(_('\nkeywords written to %s:\n') % path)
- ui.note(keywords)
- ui.note('\nhg -R "%s" branch "%s"\n' % (tmpdir, branchname))
- # silence branch command if not verbose
- quiet = ui.quiet
- ui.quiet = not ui.verbose
- commands.branch(ui, repo, branchname)
- ui.quiet = quiet
- for name, cmd in ui.configitems('hooks'):
- if name.split('.', 1)[0].find('commit') > -1:
- repo.ui.setconfig('hooks', name, '')
- ui.note(_('unhooked all commit hooks\n'))
- ui.note('hg -R "%s" ci -m "%s"\n' % (tmpdir, msg))
- repo.commit(text=msg)
- ui.status(_('\n\tkeywords expanded\n'))
- ui.write(repo.wread(fn))
- ui.debug(_('\nremoving temporary repository %s\n') % tmpdir)
- shutil.rmtree(tmpdir, ignore_errors=True)
-
-def expand(ui, repo, *pats, **opts):
- '''expand keywords in the working directory
-
- Run after (re)enabling keyword expansion.
-
- kwexpand refuses to run if given files contain local changes.
- '''
- # 3rd argument sets expansion to True
- _kwfwrite(ui, repo, True, *pats, **opts)
-
-def files(ui, repo, *pats, **opts):
- '''show files configured for keyword expansion
-
- List which files in the working directory are matched by the
- [keyword] configuration patterns.
-
- Useful to prevent inadvertent keyword expansion and to speed up
- execution by including only files that are actual candidates for
- expansion.
-
- See "hg help keyword" on how to construct patterns both for
- inclusion and exclusion of files.
-
- Use -u/--untracked to list untracked files as well.
-
- With -a/--all and -v/--verbose the codes used to show the status
- of files are::
-
- K = keyword expansion candidate
- k = keyword expansion candidate (untracked)
- I = ignored
- i = ignored (untracked)
- '''
- kwt = kwtools['templater']
- status = _status(ui, repo, kwt, opts.get('untracked'), *pats, **opts)
- modified, added, removed, deleted, unknown, ignored, clean = status
- files = sorted(modified + added + clean)
- wctx = repo[None]
- kwfiles = [f for f in files if kwt.iskwfile(f, wctx.flags)]
- kwuntracked = [f for f in unknown if kwt.iskwfile(f, wctx.flags)]
- cwd = pats and repo.getcwd() or ''
- kwfstats = (not opts.get('ignore') and
- (('K', kwfiles), ('k', kwuntracked),) or ())
- if opts.get('all') or opts.get('ignore'):
- kwfstats += (('I', [f for f in files if f not in kwfiles]),
- ('i', [f for f in unknown if f not in kwuntracked]),)
- for char, filenames in kwfstats:
- fmt = (opts.get('all') or ui.verbose) and '%s %%s\n' % char or '%s\n'
- for f in filenames:
- ui.write(fmt % repo.pathto(f, cwd))
-
-def shrink(ui, repo, *pats, **opts):
- '''revert expanded keywords in the working directory
-
- Run before changing/disabling active keywords or if you experience
- problems with "hg import" or "hg merge".
-
- kwshrink refuses to run if given files contain local changes.
- '''
- # 3rd argument sets expansion to False
- _kwfwrite(ui, repo, False, *pats, **opts)
-
-
-def uisetup(ui):
- '''Collects [keyword] config in kwtools.
- Monkeypatches dispatch._parse if needed.'''
-
- for pat, opt in ui.configitems('keyword'):
- if opt != 'ignore':
- kwtools['inc'].append(pat)
- else:
- kwtools['exc'].append(pat)
-
- if kwtools['inc']:
- def kwdispatch_parse(orig, ui, args):
- '''Monkeypatch dispatch._parse to obtain running hg command.'''
- cmd, func, args, options, cmdoptions = orig(ui, args)
- kwtools['hgcmd'] = cmd
- return cmd, func, args, options, cmdoptions
-
- extensions.wrapfunction(dispatch, '_parse', kwdispatch_parse)
-
-def reposetup(ui, repo):
- '''Sets up repo as kwrepo for keyword substitution.
- Overrides file method to return kwfilelog instead of filelog
- if file matches user configuration.
- Wraps commit to overwrite configured files with updated
- keyword substitutions.
- Monkeypatches patch and webcommands.'''
-
- try:
- if (not repo.local() or not kwtools['inc']
- or kwtools['hgcmd'] in nokwcommands.split()
- or '.hg' in util.splitpath(repo.root)
- or repo._url.startswith('bundle:')):
- return
- except AttributeError:
- pass
-
- kwtools['templater'] = kwt = kwtemplater(ui, repo)
-
- class kwrepo(repo.__class__):
- def file(self, f):
- if f[0] == '/':
- f = f[1:]
- return kwfilelog(self.sopener, kwt, f)
-
- def wread(self, filename):
- data = super(kwrepo, self).wread(filename)
- return kwt.wread(filename, data)
-
- def commit(self, *args, **opts):
- # use custom commitctx for user commands
- # other extensions can still wrap repo.commitctx directly
- self.commitctx = self.kwcommitctx
- try:
- return super(kwrepo, self).commit(*args, **opts)
- finally:
- del self.commitctx
-
- def kwcommitctx(self, ctx, error=False):
- wlock = lock = None
- try:
- wlock = self.wlock()
- lock = self.lock()
- # store and postpone commit hooks
- commithooks = {}
- for name, cmd in ui.configitems('hooks'):
- if name.split('.', 1)[0] == 'commit':
- commithooks[name] = cmd
- ui.setconfig('hooks', name, None)
- if commithooks:
- # store parents for commit hooks
- p1, p2 = ctx.p1(), ctx.p2()
- xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
-
- n = super(kwrepo, self).commitctx(ctx, error)
-
- kwt.overwrite(n, True, None)
- if commithooks:
- for name, cmd in commithooks.iteritems():
- ui.setconfig('hooks', name, cmd)
- self.hook('commit', node=n, parent1=xp1, parent2=xp2)
- return n
- finally:
- release(lock, wlock)
-
- # monkeypatches
- def kwpatchfile_init(orig, self, ui, fname, opener,
- missing=False, eol=None):
- '''Monkeypatch/wrap patch.patchfile.__init__ to avoid
- rejects or conflicts due to expanded keywords in working dir.'''
- orig(self, ui, fname, opener, missing, eol)
- # shrink keywords read from working dir
- self.lines = kwt.shrinklines(self.fname, self.lines)
-
- def kw_diff(orig, repo, node1=None, node2=None, match=None, changes=None,
- opts=None):
- '''Monkeypatch patch.diff to avoid expansion except when
- comparing against working dir.'''
- if node2 is not None:
- kwt.match = util.never
- elif node1 is not None and node1 != repo['.'].node():
- kwt.restrict = True
- return orig(repo, node1, node2, match, changes, opts)
-
- def kwweb_skip(orig, web, req, tmpl):
- '''Wraps webcommands.x turning off keyword expansion.'''
- kwt.match = util.never
- return orig(web, req, tmpl)
-
- repo.__class__ = kwrepo
-
- extensions.wrapfunction(patch.patchfile, '__init__', kwpatchfile_init)
- extensions.wrapfunction(patch, 'diff', kw_diff)
- for c in 'annotate changeset rev filediff diff'.split():
- extensions.wrapfunction(webcommands, c, kwweb_skip)
-
-cmdtable = {
- 'kwdemo':
- (demo,
- [('d', 'default', None, _('show default keyword template maps')),
- ('f', 'rcfile', '', _('read maps from rcfile'))],
- _('hg kwdemo [-d] [-f RCFILE] [TEMPLATEMAP]...')),
- 'kwexpand': (expand, commands.walkopts,
- _('hg kwexpand [OPTION]... [FILE]...')),
- 'kwfiles':
- (files,
- [('a', 'all', None, _('show keyword status flags of all files')),
- ('i', 'ignore', None, _('show files excluded from expansion')),
- ('u', 'untracked', None, _('additionally show untracked files')),
- ] + commands.walkopts,
- _('hg kwfiles [OPTION]... [FILE]...')),
- 'kwshrink': (shrink, commands.walkopts,
- _('hg kwshrink [OPTION]... [FILE]...')),
-}
diff --git a/sys/lib/python/hgext/mq.py b/sys/lib/python/hgext/mq.py
deleted file mode 100644
index a2be932c4..000000000
--- a/sys/lib/python/hgext/mq.py
+++ /dev/null
@@ -1,2653 +0,0 @@
-# mq.py - patch queues for mercurial
-#
-# Copyright 2005, 2006 Chris Mason <mason@suse.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-'''manage a stack of patches
-
-This extension lets you work with a stack of patches in a Mercurial
-repository. It manages two stacks of patches - all known patches, and
-applied patches (subset of known patches).
-
-Known patches are represented as patch files in the .hg/patches
-directory. Applied patches are both patch files and changesets.
-
-Common tasks (use "hg help command" for more details)::
-
- prepare repository to work with patches qinit
- create new patch qnew
- import existing patch qimport
-
- print patch series qseries
- print applied patches qapplied
- print name of top applied patch qtop
-
- add known patch to applied stack qpush
- remove patch from applied stack qpop
- refresh contents of top applied patch qrefresh
-'''
-
-from mercurial.i18n import _
-from mercurial.node import bin, hex, short, nullid, nullrev
-from mercurial.lock import release
-from mercurial import commands, cmdutil, hg, patch, util
-from mercurial import repair, extensions, url, error
-import os, sys, re, errno
-
-commands.norepo += " qclone"
-
-# Patch names looks like unix-file names.
-# They must be joinable with queue directory and result in the patch path.
-normname = util.normpath
-
-class statusentry(object):
- def __init__(self, rev, name=None):
- if not name:
- fields = rev.split(':', 1)
- if len(fields) == 2:
- self.rev, self.name = fields
- else:
- self.rev, self.name = None, None
- else:
- self.rev, self.name = rev, name
-
- def __str__(self):
- return self.rev + ':' + self.name
-
-class patchheader(object):
- def __init__(self, pf):
- def eatdiff(lines):
- while lines:
- l = lines[-1]
- if (l.startswith("diff -") or
- l.startswith("Index:") or
- l.startswith("===========")):
- del lines[-1]
- else:
- break
- def eatempty(lines):
- while lines:
- l = lines[-1]
- if re.match('\s*$', l):
- del lines[-1]
- else:
- break
-
- message = []
- comments = []
- user = None
- date = None
- format = None
- subject = None
- diffstart = 0
-
- for line in file(pf):
- line = line.rstrip()
- if line.startswith('diff --git'):
- diffstart = 2
- break
- if diffstart:
- if line.startswith('+++ '):
- diffstart = 2
- break
- if line.startswith("--- "):
- diffstart = 1
- continue
- elif format == "hgpatch":
- # parse values when importing the result of an hg export
- if line.startswith("# User "):
- user = line[7:]
- elif line.startswith("# Date "):
- date = line[7:]
- elif not line.startswith("# ") and line:
- message.append(line)
- format = None
- elif line == '# HG changeset patch':
- message = []
- format = "hgpatch"
- elif (format != "tagdone" and (line.startswith("Subject: ") or
- line.startswith("subject: "))):
- subject = line[9:]
- format = "tag"
- elif (format != "tagdone" and (line.startswith("From: ") or
- line.startswith("from: "))):
- user = line[6:]
- format = "tag"
- elif format == "tag" and line == "":
- # when looking for tags (subject: from: etc) they
- # end once you find a blank line in the source
- format = "tagdone"
- elif message or line:
- message.append(line)
- comments.append(line)
-
- eatdiff(message)
- eatdiff(comments)
- eatempty(message)
- eatempty(comments)
-
- # make sure message isn't empty
- if format and format.startswith("tag") and subject:
- message.insert(0, "")
- message.insert(0, subject)
-
- self.message = message
- self.comments = comments
- self.user = user
- self.date = date
- self.haspatch = diffstart > 1
-
- def setuser(self, user):
- if not self.updateheader(['From: ', '# User '], user):
- try:
- patchheaderat = self.comments.index('# HG changeset patch')
- self.comments.insert(patchheaderat + 1, '# User ' + user)
- except ValueError:
- if self._hasheader(['Date: ']):
- self.comments = ['From: ' + user] + self.comments
- else:
- tmp = ['# HG changeset patch', '# User ' + user, '']
- self.comments = tmp + self.comments
- self.user = user
-
- def setdate(self, date):
- if not self.updateheader(['Date: ', '# Date '], date):
- try:
- patchheaderat = self.comments.index('# HG changeset patch')
- self.comments.insert(patchheaderat + 1, '# Date ' + date)
- except ValueError:
- if self._hasheader(['From: ']):
- self.comments = ['Date: ' + date] + self.comments
- else:
- tmp = ['# HG changeset patch', '# Date ' + date, '']
- self.comments = tmp + self.comments
- self.date = date
-
- def setmessage(self, message):
- if self.comments:
- self._delmsg()
- self.message = [message]
- self.comments += self.message
-
- def updateheader(self, prefixes, new):
- '''Update all references to a field in the patch header.
- Return whether the field is present.'''
- res = False
- for prefix in prefixes:
- for i in xrange(len(self.comments)):
- if self.comments[i].startswith(prefix):
- self.comments[i] = prefix + new
- res = True
- break
- return res
-
- def _hasheader(self, prefixes):
- '''Check if a header starts with any of the given prefixes.'''
- for prefix in prefixes:
- for comment in self.comments:
- if comment.startswith(prefix):
- return True
- return False
-
- def __str__(self):
- if not self.comments:
- return ''
- return '\n'.join(self.comments) + '\n\n'
-
- def _delmsg(self):
- '''Remove existing message, keeping the rest of the comments fields.
- If comments contains 'subject: ', message will prepend
- the field and a blank line.'''
- if self.message:
- subj = 'subject: ' + self.message[0].lower()
- for i in xrange(len(self.comments)):
- if subj == self.comments[i].lower():
- del self.comments[i]
- self.message = self.message[2:]
- break
- ci = 0
- for mi in self.message:
- while mi != self.comments[ci]:
- ci += 1
- del self.comments[ci]
-
-class queue(object):
- def __init__(self, ui, path, patchdir=None):
- self.basepath = path
- self.path = patchdir or os.path.join(path, "patches")
- self.opener = util.opener(self.path)
- self.ui = ui
- self.applied_dirty = 0
- self.series_dirty = 0
- self.series_path = "series"
- self.status_path = "status"
- self.guards_path = "guards"
- self.active_guards = None
- self.guards_dirty = False
- self._diffopts = None
-
- @util.propertycache
- def applied(self):
- if os.path.exists(self.join(self.status_path)):
- lines = self.opener(self.status_path).read().splitlines()
- return [statusentry(l) for l in lines]
- return []
-
- @util.propertycache
- def full_series(self):
- if os.path.exists(self.join(self.series_path)):
- return self.opener(self.series_path).read().splitlines()
- return []
-
- @util.propertycache
- def series(self):
- self.parse_series()
- return self.series
-
- @util.propertycache
- def series_guards(self):
- self.parse_series()
- return self.series_guards
-
- def invalidate(self):
- for a in 'applied full_series series series_guards'.split():
- if a in self.__dict__:
- delattr(self, a)
- self.applied_dirty = 0
- self.series_dirty = 0
- self.guards_dirty = False
- self.active_guards = None
-
- def diffopts(self):
- if self._diffopts is None:
- self._diffopts = patch.diffopts(self.ui)
- return self._diffopts
-
- def join(self, *p):
- return os.path.join(self.path, *p)
-
- def find_series(self, patch):
- pre = re.compile("(\s*)([^#]+)")
- index = 0
- for l in self.full_series:
- m = pre.match(l)
- if m:
- s = m.group(2)
- s = s.rstrip()
- if s == patch:
- return index
- index += 1
- return None
-
- guard_re = re.compile(r'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)')
-
- def parse_series(self):
- self.series = []
- self.series_guards = []
- for l in self.full_series:
- h = l.find('#')
- if h == -1:
- patch = l
- comment = ''
- elif h == 0:
- continue
- else:
- patch = l[:h]
- comment = l[h:]
- patch = patch.strip()
- if patch:
- if patch in self.series:
- raise util.Abort(_('%s appears more than once in %s') %
- (patch, self.join(self.series_path)))
- self.series.append(patch)
- self.series_guards.append(self.guard_re.findall(comment))
-
- def check_guard(self, guard):
- if not guard:
- return _('guard cannot be an empty string')
- bad_chars = '# \t\r\n\f'
- first = guard[0]
- if first in '-+':
- return (_('guard %r starts with invalid character: %r') %
- (guard, first))
- for c in bad_chars:
- if c in guard:
- return _('invalid character in guard %r: %r') % (guard, c)
-
- def set_active(self, guards):
- for guard in guards:
- bad = self.check_guard(guard)
- if bad:
- raise util.Abort(bad)
- guards = sorted(set(guards))
- self.ui.debug(_('active guards: %s\n') % ' '.join(guards))
- self.active_guards = guards
- self.guards_dirty = True
-
- def active(self):
- if self.active_guards is None:
- self.active_guards = []
- try:
- guards = self.opener(self.guards_path).read().split()
- except IOError, err:
- if err.errno != errno.ENOENT: raise
- guards = []
- for i, guard in enumerate(guards):
- bad = self.check_guard(guard)
- if bad:
- self.ui.warn('%s:%d: %s\n' %
- (self.join(self.guards_path), i + 1, bad))
- else:
- self.active_guards.append(guard)
- return self.active_guards
-
- def set_guards(self, idx, guards):
- for g in guards:
- if len(g) < 2:
- raise util.Abort(_('guard %r too short') % g)
- if g[0] not in '-+':
- raise util.Abort(_('guard %r starts with invalid char') % g)
- bad = self.check_guard(g[1:])
- if bad:
- raise util.Abort(bad)
- drop = self.guard_re.sub('', self.full_series[idx])
- self.full_series[idx] = drop + ''.join([' #' + g for g in guards])
- self.parse_series()
- self.series_dirty = True
-
- def pushable(self, idx):
- if isinstance(idx, str):
- idx = self.series.index(idx)
- patchguards = self.series_guards[idx]
- if not patchguards:
- return True, None
- guards = self.active()
- exactneg = [g for g in patchguards if g[0] == '-' and g[1:] in guards]
- if exactneg:
- return False, exactneg[0]
- pos = [g for g in patchguards if g[0] == '+']
- exactpos = [g for g in pos if g[1:] in guards]
- if pos:
- if exactpos:
- return True, exactpos[0]
- return False, pos
- return True, ''
-
- def explain_pushable(self, idx, all_patches=False):
- write = all_patches and self.ui.write or self.ui.warn
- if all_patches or self.ui.verbose:
- if isinstance(idx, str):
- idx = self.series.index(idx)
- pushable, why = self.pushable(idx)
- if all_patches and pushable:
- if why is None:
- write(_('allowing %s - no guards in effect\n') %
- self.series[idx])
- else:
- if not why:
- write(_('allowing %s - no matching negative guards\n') %
- self.series[idx])
- else:
- write(_('allowing %s - guarded by %r\n') %
- (self.series[idx], why))
- if not pushable:
- if why:
- write(_('skipping %s - guarded by %r\n') %
- (self.series[idx], why))
- else:
- write(_('skipping %s - no matching guards\n') %
- self.series[idx])
-
- def save_dirty(self):
- def write_list(items, path):
- fp = self.opener(path, 'w')
- for i in items:
- fp.write("%s\n" % i)
- fp.close()
- if self.applied_dirty: write_list(map(str, self.applied), self.status_path)
- if self.series_dirty: write_list(self.full_series, self.series_path)
- if self.guards_dirty: write_list(self.active_guards, self.guards_path)
-
- def removeundo(self, repo):
- undo = repo.sjoin('undo')
- if not os.path.exists(undo):
- return
- try:
- os.unlink(undo)
- except OSError, inst:
- self.ui.warn(_('error removing undo: %s\n') % str(inst))
-
- def printdiff(self, repo, node1, node2=None, files=None,
- fp=None, changes=None, opts={}):
- m = cmdutil.match(repo, files, opts)
- chunks = patch.diff(repo, node1, node2, m, changes, self.diffopts())
- write = fp is None and repo.ui.write or fp.write
- for chunk in chunks:
- write(chunk)
-
- def mergeone(self, repo, mergeq, head, patch, rev):
- # first try just applying the patch
- (err, n) = self.apply(repo, [ patch ], update_status=False,
- strict=True, merge=rev)
-
- if err == 0:
- return (err, n)
-
- if n is None:
- raise util.Abort(_("apply failed for patch %s") % patch)
-
- self.ui.warn(_("patch didn't work out, merging %s\n") % patch)
-
- # apply failed, strip away that rev and merge.
- hg.clean(repo, head)
- self.strip(repo, n, update=False, backup='strip')
-
- ctx = repo[rev]
- ret = hg.merge(repo, rev)
- if ret:
- raise util.Abort(_("update returned %d") % ret)
- n = repo.commit(ctx.description(), ctx.user(), force=True)
- if n is None:
- raise util.Abort(_("repo commit failed"))
- try:
- ph = patchheader(mergeq.join(patch))
- except:
- raise util.Abort(_("unable to read %s") % patch)
-
- patchf = self.opener(patch, "w")
- comments = str(ph)
- if comments:
- patchf.write(comments)
- self.printdiff(repo, head, n, fp=patchf)
- patchf.close()
- self.removeundo(repo)
- return (0, n)
-
- def qparents(self, repo, rev=None):
- if rev is None:
- (p1, p2) = repo.dirstate.parents()
- if p2 == nullid:
- return p1
- if len(self.applied) == 0:
- return None
- return bin(self.applied[-1].rev)
- pp = repo.changelog.parents(rev)
- if pp[1] != nullid:
- arevs = [ x.rev for x in self.applied ]
- p0 = hex(pp[0])
- p1 = hex(pp[1])
- if p0 in arevs:
- return pp[0]
- if p1 in arevs:
- return pp[1]
- return pp[0]
-
- def mergepatch(self, repo, mergeq, series):
- if len(self.applied) == 0:
- # each of the patches merged in will have two parents. This
- # can confuse the qrefresh, qdiff, and strip code because it
- # needs to know which parent is actually in the patch queue.
- # so, we insert a merge marker with only one parent. This way
- # the first patch in the queue is never a merge patch
- #
- pname = ".hg.patches.merge.marker"
- n = repo.commit('[mq]: merge marker', force=True)
- self.removeundo(repo)
- self.applied.append(statusentry(hex(n), pname))
- self.applied_dirty = 1
-
- head = self.qparents(repo)
-
- for patch in series:
- patch = mergeq.lookup(patch, strict=True)
- if not patch:
- self.ui.warn(_("patch %s does not exist\n") % patch)
- return (1, None)
- pushable, reason = self.pushable(patch)
- if not pushable:
- self.explain_pushable(patch, all_patches=True)
- continue
- info = mergeq.isapplied(patch)
- if not info:
- self.ui.warn(_("patch %s is not applied\n") % patch)
- return (1, None)
- rev = bin(info[1])
- (err, head) = self.mergeone(repo, mergeq, head, patch, rev)
- if head:
- self.applied.append(statusentry(hex(head), patch))
- self.applied_dirty = 1
- if err:
- return (err, head)
- self.save_dirty()
- return (0, head)
-
- def patch(self, repo, patchfile):
- '''Apply patchfile to the working directory.
- patchfile: name of patch file'''
- files = {}
- try:
- fuzz = patch.patch(patchfile, self.ui, strip=1, cwd=repo.root,
- files=files, eolmode=None)
- except Exception, inst:
- self.ui.note(str(inst) + '\n')
- if not self.ui.verbose:
- self.ui.warn(_("patch failed, unable to continue (try -v)\n"))
- return (False, files, False)
-
- return (True, files, fuzz)
-
- def apply(self, repo, series, list=False, update_status=True,
- strict=False, patchdir=None, merge=None, all_files={}):
- wlock = lock = tr = None
- try:
- wlock = repo.wlock()
- lock = repo.lock()
- tr = repo.transaction()
- try:
- ret = self._apply(repo, series, list, update_status,
- strict, patchdir, merge, all_files=all_files)
- tr.close()
- self.save_dirty()
- return ret
- except:
- try:
- tr.abort()
- finally:
- repo.invalidate()
- repo.dirstate.invalidate()
- raise
- finally:
- del tr
- release(lock, wlock)
- self.removeundo(repo)
-
- def _apply(self, repo, series, list=False, update_status=True,
- strict=False, patchdir=None, merge=None, all_files={}):
- '''returns (error, hash)
- error = 1 for unable to read, 2 for patch failed, 3 for patch fuzz'''
- # TODO unify with commands.py
- if not patchdir:
- patchdir = self.path
- err = 0
- n = None
- for patchname in series:
- pushable, reason = self.pushable(patchname)
- if not pushable:
- self.explain_pushable(patchname, all_patches=True)
- continue
- self.ui.status(_("applying %s\n") % patchname)
- pf = os.path.join(patchdir, patchname)
-
- try:
- ph = patchheader(self.join(patchname))
- except:
- self.ui.warn(_("unable to read %s\n") % patchname)
- err = 1
- break
-
- message = ph.message
- if not message:
- message = _("imported patch %s\n") % patchname
- else:
- if list:
- message.append(_("\nimported patch %s") % patchname)
- message = '\n'.join(message)
-
- if ph.haspatch:
- (patcherr, files, fuzz) = self.patch(repo, pf)
- all_files.update(files)
- patcherr = not patcherr
- else:
- self.ui.warn(_("patch %s is empty\n") % patchname)
- patcherr, files, fuzz = 0, [], 0
-
- if merge and files:
- # Mark as removed/merged and update dirstate parent info
- removed = []
- merged = []
- for f in files:
- if os.path.exists(repo.wjoin(f)):
- merged.append(f)
- else:
- removed.append(f)
- for f in removed:
- repo.dirstate.remove(f)
- for f in merged:
- repo.dirstate.merge(f)
- p1, p2 = repo.dirstate.parents()
- repo.dirstate.setparents(p1, merge)
-
- files = patch.updatedir(self.ui, repo, files)
- match = cmdutil.matchfiles(repo, files or [])
- n = repo.commit(message, ph.user, ph.date, match=match, force=True)
-
- if n is None:
- raise util.Abort(_("repo commit failed"))
-
- if update_status:
- self.applied.append(statusentry(hex(n), patchname))
-
- if patcherr:
- self.ui.warn(_("patch failed, rejects left in working dir\n"))
- err = 2
- break
-
- if fuzz and strict:
- self.ui.warn(_("fuzz found when applying patch, stopping\n"))
- err = 3
- break
- return (err, n)
-
- def _cleanup(self, patches, numrevs, keep=False):
- if not keep:
- r = self.qrepo()
- if r:
- r.remove(patches, True)
- else:
- for p in patches:
- os.unlink(self.join(p))
-
- if numrevs:
- del self.applied[:numrevs]
- self.applied_dirty = 1
-
- for i in sorted([self.find_series(p) for p in patches], reverse=True):
- del self.full_series[i]
- self.parse_series()
- self.series_dirty = 1
-
- def _revpatches(self, repo, revs):
- firstrev = repo[self.applied[0].rev].rev()
- patches = []
- for i, rev in enumerate(revs):
-
- if rev < firstrev:
- raise util.Abort(_('revision %d is not managed') % rev)
-
- ctx = repo[rev]
- base = bin(self.applied[i].rev)
- if ctx.node() != base:
- msg = _('cannot delete revision %d above applied patches')
- raise util.Abort(msg % rev)
-
- patch = self.applied[i].name
- for fmt in ('[mq]: %s', 'imported patch %s'):
- if ctx.description() == fmt % patch:
- msg = _('patch %s finalized without changeset message\n')
- repo.ui.status(msg % patch)
- break
-
- patches.append(patch)
- return patches
-
- def finish(self, repo, revs):
- patches = self._revpatches(repo, sorted(revs))
- self._cleanup(patches, len(patches))
-
- def delete(self, repo, patches, opts):
- if not patches and not opts.get('rev'):
- raise util.Abort(_('qdelete requires at least one revision or '
- 'patch name'))
-
- realpatches = []
- for patch in patches:
- patch = self.lookup(patch, strict=True)
- info = self.isapplied(patch)
- if info:
- raise util.Abort(_("cannot delete applied patch %s") % patch)
- if patch not in self.series:
- raise util.Abort(_("patch %s not in series file") % patch)
- realpatches.append(patch)
-
- numrevs = 0
- if opts.get('rev'):
- if not self.applied:
- raise util.Abort(_('no patches applied'))
- revs = cmdutil.revrange(repo, opts['rev'])
- if len(revs) > 1 and revs[0] > revs[1]:
- revs.reverse()
- revpatches = self._revpatches(repo, revs)
- realpatches += revpatches
- numrevs = len(revpatches)
-
- self._cleanup(realpatches, numrevs, opts.get('keep'))
-
- def check_toppatch(self, repo):
- if len(self.applied) > 0:
- top = bin(self.applied[-1].rev)
- pp = repo.dirstate.parents()
- if top not in pp:
- raise util.Abort(_("working directory revision is not qtip"))
- return top
- return None
- def check_localchanges(self, repo, force=False, refresh=True):
- m, a, r, d = repo.status()[:4]
- if m or a or r or d:
- if not force:
- if refresh:
- raise util.Abort(_("local changes found, refresh first"))
- else:
- raise util.Abort(_("local changes found"))
- return m, a, r, d
-
- _reserved = ('series', 'status', 'guards')
- def check_reserved_name(self, name):
- if (name in self._reserved or name.startswith('.hg')
- or name.startswith('.mq')):
- raise util.Abort(_('"%s" cannot be used as the name of a patch')
- % name)
-
- def new(self, repo, patchfn, *pats, **opts):
- """options:
- msg: a string or a no-argument function returning a string
- """
- msg = opts.get('msg')
- force = opts.get('force')
- user = opts.get('user')
- date = opts.get('date')
- if date:
- date = util.parsedate(date)
- self.check_reserved_name(patchfn)
- if os.path.exists(self.join(patchfn)):
- raise util.Abort(_('patch "%s" already exists') % patchfn)
- if opts.get('include') or opts.get('exclude') or pats:
- match = cmdutil.match(repo, pats, opts)
- # detect missing files in pats
- def badfn(f, msg):
- raise util.Abort('%s: %s' % (f, msg))
- match.bad = badfn
- m, a, r, d = repo.status(match=match)[:4]
- else:
- m, a, r, d = self.check_localchanges(repo, force)
- match = cmdutil.matchfiles(repo, m + a + r)
- commitfiles = m + a + r
- self.check_toppatch(repo)
- insert = self.full_series_end()
- wlock = repo.wlock()
- try:
- # if patch file write fails, abort early
- p = self.opener(patchfn, "w")
- try:
- if date:
- p.write("# HG changeset patch\n")
- if user:
- p.write("# User " + user + "\n")
- p.write("# Date %d %d\n\n" % date)
- elif user:
- p.write("From: " + user + "\n\n")
-
- if hasattr(msg, '__call__'):
- msg = msg()
- commitmsg = msg and msg or ("[mq]: %s" % patchfn)
- n = repo.commit(commitmsg, user, date, match=match, force=True)
- if n is None:
- raise util.Abort(_("repo commit failed"))
- try:
- self.full_series[insert:insert] = [patchfn]
- self.applied.append(statusentry(hex(n), patchfn))
- self.parse_series()
- self.series_dirty = 1
- self.applied_dirty = 1
- if msg:
- msg = msg + "\n\n"
- p.write(msg)
- if commitfiles:
- diffopts = self.diffopts()
- if opts.get('git'): diffopts.git = True
- parent = self.qparents(repo, n)
- chunks = patch.diff(repo, node1=parent, node2=n,
- match=match, opts=diffopts)
- for chunk in chunks:
- p.write(chunk)
- p.close()
- wlock.release()
- wlock = None
- r = self.qrepo()
- if r: r.add([patchfn])
- except:
- repo.rollback()
- raise
- except Exception:
- patchpath = self.join(patchfn)
- try:
- os.unlink(patchpath)
- except:
- self.ui.warn(_('error unlinking %s\n') % patchpath)
- raise
- self.removeundo(repo)
- finally:
- release(wlock)
-
- def strip(self, repo, rev, update=True, backup="all", force=None):
- wlock = lock = None
- try:
- wlock = repo.wlock()
- lock = repo.lock()
-
- if update:
- self.check_localchanges(repo, force=force, refresh=False)
- urev = self.qparents(repo, rev)
- hg.clean(repo, urev)
- repo.dirstate.write()
-
- self.removeundo(repo)
- repair.strip(self.ui, repo, rev, backup)
- # strip may have unbundled a set of backed up revisions after
- # the actual strip
- self.removeundo(repo)
- finally:
- release(lock, wlock)
-
- def isapplied(self, patch):
- """returns (index, rev, patch)"""
- for i, a in enumerate(self.applied):
- if a.name == patch:
- return (i, a.rev, a.name)
- return None
-
- # if the exact patch name does not exist, we try a few
- # variations. If strict is passed, we try only #1
- #
- # 1) a number to indicate an offset in the series file
- # 2) a unique substring of the patch name was given
- # 3) patchname[-+]num to indicate an offset in the series file
- def lookup(self, patch, strict=False):
- patch = patch and str(patch)
-
- def partial_name(s):
- if s in self.series:
- return s
- matches = [x for x in self.series if s in x]
- if len(matches) > 1:
- self.ui.warn(_('patch name "%s" is ambiguous:\n') % s)
- for m in matches:
- self.ui.warn(' %s\n' % m)
- return None
- if matches:
- return matches[0]
- if len(self.series) > 0 and len(self.applied) > 0:
- if s == 'qtip':
- return self.series[self.series_end(True)-1]
- if s == 'qbase':
- return self.series[0]
- return None
-
- if patch is None:
- return None
- if patch in self.series:
- return patch
-
- if not os.path.isfile(self.join(patch)):
- try:
- sno = int(patch)
- except(ValueError, OverflowError):
- pass
- else:
- if -len(self.series) <= sno < len(self.series):
- return self.series[sno]
-
- if not strict:
- res = partial_name(patch)
- if res:
- return res
- minus = patch.rfind('-')
- if minus >= 0:
- res = partial_name(patch[:minus])
- if res:
- i = self.series.index(res)
- try:
- off = int(patch[minus+1:] or 1)
- except(ValueError, OverflowError):
- pass
- else:
- if i - off >= 0:
- return self.series[i - off]
- plus = patch.rfind('+')
- if plus >= 0:
- res = partial_name(patch[:plus])
- if res:
- i = self.series.index(res)
- try:
- off = int(patch[plus+1:] or 1)
- except(ValueError, OverflowError):
- pass
- else:
- if i + off < len(self.series):
- return self.series[i + off]
- raise util.Abort(_("patch %s not in series") % patch)
-
- def push(self, repo, patch=None, force=False, list=False,
- mergeq=None, all=False):
- wlock = repo.wlock()
- try:
- if repo.dirstate.parents()[0] not in repo.heads():
- self.ui.status(_("(working directory not at a head)\n"))
-
- if not self.series:
- self.ui.warn(_('no patches in series\n'))
- return 0
-
- patch = self.lookup(patch)
- # Suppose our series file is: A B C and the current 'top'
- # patch is B. qpush C should be performed (moving forward)
- # qpush B is a NOP (no change) qpush A is an error (can't
- # go backwards with qpush)
- if patch:
- info = self.isapplied(patch)
- if info:
- if info[0] < len(self.applied) - 1:
- raise util.Abort(
- _("cannot push to a previous patch: %s") % patch)
- self.ui.warn(
- _('qpush: %s is already at the top\n') % patch)
- return
- pushable, reason = self.pushable(patch)
- if not pushable:
- if reason:
- reason = _('guarded by %r') % reason
- else:
- reason = _('no matching guards')
- self.ui.warn(_("cannot push '%s' - %s\n") % (patch, reason))
- return 1
- elif all:
- patch = self.series[-1]
- if self.isapplied(patch):
- self.ui.warn(_('all patches are currently applied\n'))
- return 0
-
- # Following the above example, starting at 'top' of B:
- # qpush should be performed (pushes C), but a subsequent
- # qpush without an argument is an error (nothing to
- # apply). This allows a loop of "...while hg qpush..." to
- # work as it detects an error when done
- start = self.series_end()
- if start == len(self.series):
- self.ui.warn(_('patch series already fully applied\n'))
- return 1
- if not force:
- self.check_localchanges(repo)
-
- self.applied_dirty = 1
- if start > 0:
- self.check_toppatch(repo)
- if not patch:
- patch = self.series[start]
- end = start + 1
- else:
- end = self.series.index(patch, start) + 1
-
- s = self.series[start:end]
- all_files = {}
- try:
- if mergeq:
- ret = self.mergepatch(repo, mergeq, s)
- else:
- ret = self.apply(repo, s, list, all_files=all_files)
- except:
- self.ui.warn(_('cleaning up working directory...'))
- node = repo.dirstate.parents()[0]
- hg.revert(repo, node, None)
- unknown = repo.status(unknown=True)[4]
- # only remove unknown files that we know we touched or
- # created while patching
- for f in unknown:
- if f in all_files:
- util.unlink(repo.wjoin(f))
- self.ui.warn(_('done\n'))
- raise
-
- top = self.applied[-1].name
- if ret[0] and ret[0] > 1:
- msg = _("errors during apply, please fix and refresh %s\n")
- self.ui.write(msg % top)
- else:
- self.ui.write(_("now at: %s\n") % top)
- return ret[0]
-
- finally:
- wlock.release()
-
- def pop(self, repo, patch=None, force=False, update=True, all=False):
- def getfile(f, rev, flags):
- t = repo.file(f).read(rev)
- repo.wwrite(f, t, flags)
-
- wlock = repo.wlock()
- try:
- if patch:
- # index, rev, patch
- info = self.isapplied(patch)
- if not info:
- patch = self.lookup(patch)
- info = self.isapplied(patch)
- if not info:
- raise util.Abort(_("patch %s is not applied") % patch)
-
- if len(self.applied) == 0:
- # Allow qpop -a to work repeatedly,
- # but not qpop without an argument
- self.ui.warn(_("no patches applied\n"))
- return not all
-
- if all:
- start = 0
- elif patch:
- start = info[0] + 1
- else:
- start = len(self.applied) - 1
-
- if start >= len(self.applied):
- self.ui.warn(_("qpop: %s is already at the top\n") % patch)
- return
-
- if not update:
- parents = repo.dirstate.parents()
- rr = [ bin(x.rev) for x in self.applied ]
- for p in parents:
- if p in rr:
- self.ui.warn(_("qpop: forcing dirstate update\n"))
- update = True
- else:
- parents = [p.hex() for p in repo[None].parents()]
- needupdate = False
- for entry in self.applied[start:]:
- if entry.rev in parents:
- needupdate = True
- break
- update = needupdate
-
- if not force and update:
- self.check_localchanges(repo)
-
- self.applied_dirty = 1
- end = len(self.applied)
- rev = bin(self.applied[start].rev)
- if update:
- top = self.check_toppatch(repo)
-
- try:
- heads = repo.changelog.heads(rev)
- except error.LookupError:
- node = short(rev)
- raise util.Abort(_('trying to pop unknown node %s') % node)
-
- if heads != [bin(self.applied[-1].rev)]:
- raise util.Abort(_("popping would remove a revision not "
- "managed by this patch queue"))
-
- # we know there are no local changes, so we can make a simplified
- # form of hg.update.
- if update:
- qp = self.qparents(repo, rev)
- changes = repo.changelog.read(qp)
- mmap = repo.manifest.read(changes[0])
- m, a, r, d = repo.status(qp, top)[:4]
- if d:
- raise util.Abort(_("deletions found between repo revs"))
- for f in m:
- getfile(f, mmap[f], mmap.flags(f))
- for f in r:
- getfile(f, mmap[f], mmap.flags(f))
- for f in m + r:
- repo.dirstate.normal(f)
- for f in a:
- try:
- os.unlink(repo.wjoin(f))
- except OSError, e:
- if e.errno != errno.ENOENT:
- raise
- try: os.removedirs(os.path.dirname(repo.wjoin(f)))
- except: pass
- repo.dirstate.forget(f)
- repo.dirstate.setparents(qp, nullid)
- for patch in reversed(self.applied[start:end]):
- self.ui.status(_("popping %s\n") % patch.name)
- del self.applied[start:end]
- self.strip(repo, rev, update=False, backup='strip')
- if len(self.applied):
- self.ui.write(_("now at: %s\n") % self.applied[-1].name)
- else:
- self.ui.write(_("patch queue now empty\n"))
- finally:
- wlock.release()
-
- def diff(self, repo, pats, opts):
- top = self.check_toppatch(repo)
- if not top:
- self.ui.write(_("no patches applied\n"))
- return
- qp = self.qparents(repo, top)
- self._diffopts = patch.diffopts(self.ui, opts)
- self.printdiff(repo, qp, files=pats, opts=opts)
-
- def refresh(self, repo, pats=None, **opts):
- if len(self.applied) == 0:
- self.ui.write(_("no patches applied\n"))
- return 1
- msg = opts.get('msg', '').rstrip()
- newuser = opts.get('user')
- newdate = opts.get('date')
- if newdate:
- newdate = '%d %d' % util.parsedate(newdate)
- wlock = repo.wlock()
- try:
- self.check_toppatch(repo)
- (top, patchfn) = (self.applied[-1].rev, self.applied[-1].name)
- top = bin(top)
- if repo.changelog.heads(top) != [top]:
- raise util.Abort(_("cannot refresh a revision with children"))
- cparents = repo.changelog.parents(top)
- patchparent = self.qparents(repo, top)
- ph = patchheader(self.join(patchfn))
-
- patchf = self.opener(patchfn, 'r')
-
- # if the patch was a git patch, refresh it as a git patch
- for line in patchf:
- if line.startswith('diff --git'):
- self.diffopts().git = True
- break
-
- if msg:
- ph.setmessage(msg)
- if newuser:
- ph.setuser(newuser)
- if newdate:
- ph.setdate(newdate)
-
- # only commit new patch when write is complete
- patchf = self.opener(patchfn, 'w', atomictemp=True)
-
- patchf.seek(0)
- patchf.truncate()
-
- comments = str(ph)
- if comments:
- patchf.write(comments)
-
- if opts.get('git'):
- self.diffopts().git = True
- tip = repo.changelog.tip()
- if top == tip:
- # if the top of our patch queue is also the tip, there is an
- # optimization here. We update the dirstate in place and strip
- # off the tip commit. Then just commit the current directory
- # tree. We can also send repo.commit the list of files
- # changed to speed up the diff
- #
- # in short mode, we only diff the files included in the
- # patch already plus specified files
- #
- # this should really read:
- # mm, dd, aa, aa2 = repo.status(tip, patchparent)[:4]
- # but we do it backwards to take advantage of manifest/chlog
- # caching against the next repo.status call
- #
- mm, aa, dd, aa2 = repo.status(patchparent, tip)[:4]
- changes = repo.changelog.read(tip)
- man = repo.manifest.read(changes[0])
- aaa = aa[:]
- matchfn = cmdutil.match(repo, pats, opts)
- if opts.get('short'):
- # if amending a patch, we start with existing
- # files plus specified files - unfiltered
- match = cmdutil.matchfiles(repo, mm + aa + dd + matchfn.files())
- # filter with inc/exl options
- matchfn = cmdutil.match(repo, opts=opts)
- else:
- match = cmdutil.matchall(repo)
- m, a, r, d = repo.status(match=match)[:4]
-
- # we might end up with files that were added between
- # tip and the dirstate parent, but then changed in the
- # local dirstate. in this case, we want them to only
- # show up in the added section
- for x in m:
- if x not in aa:
- mm.append(x)
- # we might end up with files added by the local dirstate that
- # were deleted by the patch. In this case, they should only
- # show up in the changed section.
- for x in a:
- if x in dd:
- del dd[dd.index(x)]
- mm.append(x)
- else:
- aa.append(x)
- # make sure any files deleted in the local dirstate
- # are not in the add or change column of the patch
- forget = []
- for x in d + r:
- if x in aa:
- del aa[aa.index(x)]
- forget.append(x)
- continue
- elif x in mm:
- del mm[mm.index(x)]
- dd.append(x)
-
- m = list(set(mm))
- r = list(set(dd))
- a = list(set(aa))
- c = [filter(matchfn, l) for l in (m, a, r)]
- match = cmdutil.matchfiles(repo, set(c[0] + c[1] + c[2]))
- chunks = patch.diff(repo, patchparent, match=match,
- changes=c, opts=self.diffopts())
- for chunk in chunks:
- patchf.write(chunk)
-
- try:
- if self.diffopts().git:
- copies = {}
- for dst in a:
- src = repo.dirstate.copied(dst)
- # during qfold, the source file for copies may
- # be removed. Treat this as a simple add.
- if src is not None and src in repo.dirstate:
- copies.setdefault(src, []).append(dst)
- repo.dirstate.add(dst)
- # remember the copies between patchparent and tip
- for dst in aaa:
- f = repo.file(dst)
- src = f.renamed(man[dst])
- if src:
- copies.setdefault(src[0], []).extend(copies.get(dst, []))
- if dst in a:
- copies[src[0]].append(dst)
- # we can't copy a file created by the patch itself
- if dst in copies:
- del copies[dst]
- for src, dsts in copies.iteritems():
- for dst in dsts:
- repo.dirstate.copy(src, dst)
- else:
- for dst in a:
- repo.dirstate.add(dst)
- # Drop useless copy information
- for f in list(repo.dirstate.copies()):
- repo.dirstate.copy(None, f)
- for f in r:
- repo.dirstate.remove(f)
- # if the patch excludes a modified file, mark that
- # file with mtime=0 so status can see it.
- mm = []
- for i in xrange(len(m)-1, -1, -1):
- if not matchfn(m[i]):
- mm.append(m[i])
- del m[i]
- for f in m:
- repo.dirstate.normal(f)
- for f in mm:
- repo.dirstate.normallookup(f)
- for f in forget:
- repo.dirstate.forget(f)
-
- if not msg:
- if not ph.message:
- message = "[mq]: %s\n" % patchfn
- else:
- message = "\n".join(ph.message)
- else:
- message = msg
-
- user = ph.user or changes[1]
-
- # assumes strip can roll itself back if interrupted
- repo.dirstate.setparents(*cparents)
- self.applied.pop()
- self.applied_dirty = 1
- self.strip(repo, top, update=False,
- backup='strip')
- except:
- repo.dirstate.invalidate()
- raise
-
- try:
- # might be nice to attempt to roll back strip after this
- patchf.rename()
- n = repo.commit(message, user, ph.date, match=match,
- force=True)
- self.applied.append(statusentry(hex(n), patchfn))
- except:
- ctx = repo[cparents[0]]
- repo.dirstate.rebuild(ctx.node(), ctx.manifest())
- self.save_dirty()
- self.ui.warn(_('refresh interrupted while patch was popped! '
- '(revert --all, qpush to recover)\n'))
- raise
- else:
- self.printdiff(repo, patchparent, fp=patchf)
- patchf.rename()
- added = repo.status()[1]
- for a in added:
- f = repo.wjoin(a)
- try:
- os.unlink(f)
- except OSError, e:
- if e.errno != errno.ENOENT:
- raise
- try: os.removedirs(os.path.dirname(f))
- except: pass
- # forget the file copies in the dirstate
- # push should readd the files later on
- repo.dirstate.forget(a)
- self.pop(repo, force=True)
- self.push(repo, force=True)
- finally:
- wlock.release()
- self.removeundo(repo)
-
- def init(self, repo, create=False):
- if not create and os.path.isdir(self.path):
- raise util.Abort(_("patch queue directory already exists"))
- try:
- os.mkdir(self.path)
- except OSError, inst:
- if inst.errno != errno.EEXIST or not create:
- raise
- if create:
- return self.qrepo(create=True)
-
- def unapplied(self, repo, patch=None):
- if patch and patch not in self.series:
- raise util.Abort(_("patch %s is not in series file") % patch)
- if not patch:
- start = self.series_end()
- else:
- start = self.series.index(patch) + 1
- unapplied = []
- for i in xrange(start, len(self.series)):
- pushable, reason = self.pushable(i)
- if pushable:
- unapplied.append((i, self.series[i]))
- self.explain_pushable(i)
- return unapplied
-
- def qseries(self, repo, missing=None, start=0, length=None, status=None,
- summary=False):
- def displayname(pfx, patchname):
- if summary:
- ph = patchheader(self.join(patchname))
- msg = ph.message
- msg = msg and ': ' + msg[0] or ': '
- else:
- msg = ''
- msg = "%s%s%s" % (pfx, patchname, msg)
- if self.ui.interactive():
- msg = util.ellipsis(msg, util.termwidth())
- self.ui.write(msg + '\n')
-
- applied = set([p.name for p in self.applied])
- if length is None:
- length = len(self.series) - start
- if not missing:
- if self.ui.verbose:
- idxwidth = len(str(start+length - 1))
- for i in xrange(start, start+length):
- patch = self.series[i]
- if patch in applied:
- stat = 'A'
- elif self.pushable(i)[0]:
- stat = 'U'
- else:
- stat = 'G'
- pfx = ''
- if self.ui.verbose:
- pfx = '%*d %s ' % (idxwidth, i, stat)
- elif status and status != stat:
- continue
- displayname(pfx, patch)
- else:
- msng_list = []
- for root, dirs, files in os.walk(self.path):
- d = root[len(self.path) + 1:]
- for f in files:
- fl = os.path.join(d, f)
- if (fl not in self.series and
- fl not in (self.status_path, self.series_path,
- self.guards_path)
- and not fl.startswith('.')):
- msng_list.append(fl)
- for x in sorted(msng_list):
- pfx = self.ui.verbose and ('D ') or ''
- displayname(pfx, x)
-
- def issaveline(self, l):
- if l.name == '.hg.patches.save.line':
- return True
-
- def qrepo(self, create=False):
- if create or os.path.isdir(self.join(".hg")):
- return hg.repository(self.ui, path=self.path, create=create)
-
- def restore(self, repo, rev, delete=None, qupdate=None):
- c = repo.changelog.read(rev)
- desc = c[4].strip()
- lines = desc.splitlines()
- i = 0
- datastart = None
- series = []
- applied = []
- qpp = None
- for i, line in enumerate(lines):
- if line == 'Patch Data:':
- datastart = i + 1
- elif line.startswith('Dirstate:'):
- l = line.rstrip()
- l = l[10:].split(' ')
- qpp = [ bin(x) for x in l ]
- elif datastart != None:
- l = line.rstrip()
- se = statusentry(l)
- file_ = se.name
- if se.rev:
- applied.append(se)
- else:
- series.append(file_)
- if datastart is None:
- self.ui.warn(_("No saved patch data found\n"))
- return 1
- self.ui.warn(_("restoring status: %s\n") % lines[0])
- self.full_series = series
- self.applied = applied
- self.parse_series()
- self.series_dirty = 1
- self.applied_dirty = 1
- heads = repo.changelog.heads()
- if delete:
- if rev not in heads:
- self.ui.warn(_("save entry has children, leaving it alone\n"))
- else:
- self.ui.warn(_("removing save entry %s\n") % short(rev))
- pp = repo.dirstate.parents()
- if rev in pp:
- update = True
- else:
- update = False
- self.strip(repo, rev, update=update, backup='strip')
- if qpp:
- self.ui.warn(_("saved queue repository parents: %s %s\n") %
- (short(qpp[0]), short(qpp[1])))
- if qupdate:
- self.ui.status(_("queue directory updating\n"))
- r = self.qrepo()
- if not r:
- self.ui.warn(_("Unable to load queue repository\n"))
- return 1
- hg.clean(r, qpp[0])
-
- def save(self, repo, msg=None):
- if len(self.applied) == 0:
- self.ui.warn(_("save: no patches applied, exiting\n"))
- return 1
- if self.issaveline(self.applied[-1]):
- self.ui.warn(_("status is already saved\n"))
- return 1
-
- ar = [ ':' + x for x in self.full_series ]
- if not msg:
- msg = _("hg patches saved state")
- else:
- msg = "hg patches: " + msg.rstrip('\r\n')
- r = self.qrepo()
- if r:
- pp = r.dirstate.parents()
- msg += "\nDirstate: %s %s" % (hex(pp[0]), hex(pp[1]))
- msg += "\n\nPatch Data:\n"
- text = msg + "\n".join([str(x) for x in self.applied]) + '\n' + (ar and
- "\n".join(ar) + '\n' or "")
- n = repo.commit(text, force=True)
- if not n:
- self.ui.warn(_("repo commit failed\n"))
- return 1
- self.applied.append(statusentry(hex(n),'.hg.patches.save.line'))
- self.applied_dirty = 1
- self.removeundo(repo)
-
- def full_series_end(self):
- if len(self.applied) > 0:
- p = self.applied[-1].name
- end = self.find_series(p)
- if end is None:
- return len(self.full_series)
- return end + 1
- return 0
-
- def series_end(self, all_patches=False):
- """If all_patches is False, return the index of the next pushable patch
- in the series, or the series length. If all_patches is True, return the
- index of the first patch past the last applied one.
- """
- end = 0
- def next(start):
- if all_patches:
- return start
- i = start
- while i < len(self.series):
- p, reason = self.pushable(i)
- if p:
- break
- self.explain_pushable(i)
- i += 1
- return i
- if len(self.applied) > 0:
- p = self.applied[-1].name
- try:
- end = self.series.index(p)
- except ValueError:
- return 0
- return next(end + 1)
- return next(end)
-
- def appliedname(self, index):
- pname = self.applied[index].name
- if not self.ui.verbose:
- p = pname
- else:
- p = str(self.series.index(pname)) + " " + pname
- return p
-
- def qimport(self, repo, files, patchname=None, rev=None, existing=None,
- force=None, git=False):
- def checkseries(patchname):
- if patchname in self.series:
- raise util.Abort(_('patch %s is already in the series file')
- % patchname)
- def checkfile(patchname):
- if not force and os.path.exists(self.join(patchname)):
- raise util.Abort(_('patch "%s" already exists')
- % patchname)
-
- if rev:
- if files:
- raise util.Abort(_('option "-r" not valid when importing '
- 'files'))
- rev = cmdutil.revrange(repo, rev)
- rev.sort(reverse=True)
- if (len(files) > 1 or len(rev) > 1) and patchname:
- raise util.Abort(_('option "-n" not valid when importing multiple '
- 'patches'))
- i = 0
- added = []
- if rev:
- # If mq patches are applied, we can only import revisions
- # that form a linear path to qbase.
- # Otherwise, they should form a linear path to a head.
- heads = repo.changelog.heads(repo.changelog.node(rev[-1]))
- if len(heads) > 1:
- raise util.Abort(_('revision %d is the root of more than one '
- 'branch') % rev[-1])
- if self.applied:
- base = hex(repo.changelog.node(rev[0]))
- if base in [n.rev for n in self.applied]:
- raise util.Abort(_('revision %d is already managed')
- % rev[0])
- if heads != [bin(self.applied[-1].rev)]:
- raise util.Abort(_('revision %d is not the parent of '
- 'the queue') % rev[0])
- base = repo.changelog.rev(bin(self.applied[0].rev))
- lastparent = repo.changelog.parentrevs(base)[0]
- else:
- if heads != [repo.changelog.node(rev[0])]:
- raise util.Abort(_('revision %d has unmanaged children')
- % rev[0])
- lastparent = None
-
- if git:
- self.diffopts().git = True
-
- for r in rev:
- p1, p2 = repo.changelog.parentrevs(r)
- n = repo.changelog.node(r)
- if p2 != nullrev:
- raise util.Abort(_('cannot import merge revision %d') % r)
- if lastparent and lastparent != r:
- raise util.Abort(_('revision %d is not the parent of %d')
- % (r, lastparent))
- lastparent = p1
-
- if not patchname:
- patchname = normname('%d.diff' % r)
- self.check_reserved_name(patchname)
- checkseries(patchname)
- checkfile(patchname)
- self.full_series.insert(0, patchname)
-
- patchf = self.opener(patchname, "w")
- patch.export(repo, [n], fp=patchf, opts=self.diffopts())
- patchf.close()
-
- se = statusentry(hex(n), patchname)
- self.applied.insert(0, se)
-
- added.append(patchname)
- patchname = None
- self.parse_series()
- self.applied_dirty = 1
-
- for filename in files:
- if existing:
- if filename == '-':
- raise util.Abort(_('-e is incompatible with import from -'))
- if not patchname:
- patchname = normname(filename)
- self.check_reserved_name(patchname)
- if not os.path.isfile(self.join(patchname)):
- raise util.Abort(_("patch %s does not exist") % patchname)
- else:
- try:
- if filename == '-':
- if not patchname:
- raise util.Abort(_('need --name to import a patch from -'))
- text = sys.stdin.read()
- else:
- text = url.open(self.ui, filename).read()
- except (OSError, IOError):
- raise util.Abort(_("unable to read %s") % filename)
- if not patchname:
- patchname = normname(os.path.basename(filename))
- self.check_reserved_name(patchname)
- checkfile(patchname)
- patchf = self.opener(patchname, "w")
- patchf.write(text)
- if not force:
- checkseries(patchname)
- if patchname not in self.series:
- index = self.full_series_end() + i
- self.full_series[index:index] = [patchname]
- self.parse_series()
- self.ui.warn(_("adding %s to series file\n") % patchname)
- i += 1
- added.append(patchname)
- patchname = None
- self.series_dirty = 1
- qrepo = self.qrepo()
- if qrepo:
- qrepo.add(added)
-
-def delete(ui, repo, *patches, **opts):
- """remove patches from queue
-
- The patches must not be applied, and at least one patch is required. With
- -k/--keep, the patch files are preserved in the patch directory.
-
- To stop managing a patch and move it into permanent history,
- use the qfinish command."""
- q = repo.mq
- q.delete(repo, patches, opts)
- q.save_dirty()
- return 0
-
-def applied(ui, repo, patch=None, **opts):
- """print the patches already applied"""
- q = repo.mq
- if patch:
- if patch not in q.series:
- raise util.Abort(_("patch %s is not in series file") % patch)
- end = q.series.index(patch) + 1
- else:
- end = q.series_end(True)
- return q.qseries(repo, length=end, status='A', summary=opts.get('summary'))
-
-def unapplied(ui, repo, patch=None, **opts):
- """print the patches not yet applied"""
- q = repo.mq
- if patch:
- if patch not in q.series:
- raise util.Abort(_("patch %s is not in series file") % patch)
- start = q.series.index(patch) + 1
- else:
- start = q.series_end(True)
- q.qseries(repo, start=start, status='U', summary=opts.get('summary'))
-
-def qimport(ui, repo, *filename, **opts):
- """import a patch
-
- The patch is inserted into the series after the last applied
- patch. If no patches have been applied, qimport prepends the patch
- to the series.
-
- The patch will have the same name as its source file unless you
- give it a new one with -n/--name.
-
- You can register an existing patch inside the patch directory with
- the -e/--existing flag.
-
- With -f/--force, an existing patch of the same name will be
- overwritten.
-
- An existing changeset may be placed under mq control with -r/--rev
- (e.g. qimport --rev tip -n patch will place tip under mq control).
- With -g/--git, patches imported with --rev will use the git diff
- format. See the diffs help topic for information on why this is
- important for preserving rename/copy information and permission
- changes.
-
- To import a patch from standard input, pass - as the patch file.
- When importing from standard input, a patch name must be specified
- using the --name flag.
- """
- q = repo.mq
- q.qimport(repo, filename, patchname=opts['name'],
- existing=opts['existing'], force=opts['force'], rev=opts['rev'],
- git=opts['git'])
- q.save_dirty()
-
- if opts.get('push') and not opts.get('rev'):
- return q.push(repo, None)
- return 0
-
-def init(ui, repo, **opts):
- """init a new queue repository
-
- The queue repository is unversioned by default. If
- -c/--create-repo is specified, qinit will create a separate nested
- repository for patches (qinit -c may also be run later to convert
- an unversioned patch repository into a versioned one). You can use
- qcommit to commit changes to this queue repository."""
- q = repo.mq
- r = q.init(repo, create=opts['create_repo'])
- q.save_dirty()
- if r:
- if not os.path.exists(r.wjoin('.hgignore')):
- fp = r.wopener('.hgignore', 'w')
- fp.write('^\\.hg\n')
- fp.write('^\\.mq\n')
- fp.write('syntax: glob\n')
- fp.write('status\n')
- fp.write('guards\n')
- fp.close()
- if not os.path.exists(r.wjoin('series')):
- r.wopener('series', 'w').close()
- r.add(['.hgignore', 'series'])
- commands.add(ui, r)
- return 0
-
-def clone(ui, source, dest=None, **opts):
- '''clone main and patch repository at same time
-
- If source is local, destination will have no patches applied. If
- source is remote, this command can not check if patches are
- applied in source, so cannot guarantee that patches are not
- applied in destination. If you clone remote repository, be sure
- before that it has no patches applied.
-
- Source patch repository is looked for in <src>/.hg/patches by
- default. Use -p <url> to change.
-
- The patch directory must be a nested Mercurial repository, as
- would be created by qinit -c.
- '''
- def patchdir(repo):
- url = repo.url()
- if url.endswith('/'):
- url = url[:-1]
- return url + '/.hg/patches'
- if dest is None:
- dest = hg.defaultdest(source)
- sr = hg.repository(cmdutil.remoteui(ui, opts), ui.expandpath(source))
- if opts['patches']:
- patchespath = ui.expandpath(opts['patches'])
- else:
- patchespath = patchdir(sr)
- try:
- hg.repository(ui, patchespath)
- except error.RepoError:
- raise util.Abort(_('versioned patch repository not found'
- ' (see qinit -c)'))
- qbase, destrev = None, None
- if sr.local():
- if sr.mq.applied:
- qbase = bin(sr.mq.applied[0].rev)
- if not hg.islocal(dest):
- heads = set(sr.heads())
- destrev = list(heads.difference(sr.heads(qbase)))
- destrev.append(sr.changelog.parents(qbase)[0])
- elif sr.capable('lookup'):
- try:
- qbase = sr.lookup('qbase')
- except error.RepoError:
- pass
- ui.note(_('cloning main repository\n'))
- sr, dr = hg.clone(ui, sr.url(), dest,
- pull=opts['pull'],
- rev=destrev,
- update=False,
- stream=opts['uncompressed'])
- ui.note(_('cloning patch repository\n'))
- hg.clone(ui, opts['patches'] or patchdir(sr), patchdir(dr),
- pull=opts['pull'], update=not opts['noupdate'],
- stream=opts['uncompressed'])
- if dr.local():
- if qbase:
- ui.note(_('stripping applied patches from destination '
- 'repository\n'))
- dr.mq.strip(dr, qbase, update=False, backup=None)
- if not opts['noupdate']:
- ui.note(_('updating destination repository\n'))
- hg.update(dr, dr.changelog.tip())
-
-def commit(ui, repo, *pats, **opts):
- """commit changes in the queue repository"""
- q = repo.mq
- r = q.qrepo()
- if not r: raise util.Abort('no queue repository')
- commands.commit(r.ui, r, *pats, **opts)
-
-def series(ui, repo, **opts):
- """print the entire series file"""
- repo.mq.qseries(repo, missing=opts['missing'], summary=opts['summary'])
- return 0
-
-def top(ui, repo, **opts):
- """print the name of the current patch"""
- q = repo.mq
- t = q.applied and q.series_end(True) or 0
- if t:
- return q.qseries(repo, start=t-1, length=1, status='A',
- summary=opts.get('summary'))
- else:
- ui.write(_("no patches applied\n"))
- return 1
-
-def next(ui, repo, **opts):
- """print the name of the next patch"""
- q = repo.mq
- end = q.series_end()
- if end == len(q.series):
- ui.write(_("all patches applied\n"))
- return 1
- return q.qseries(repo, start=end, length=1, summary=opts.get('summary'))
-
-def prev(ui, repo, **opts):
- """print the name of the previous patch"""
- q = repo.mq
- l = len(q.applied)
- if l == 1:
- ui.write(_("only one patch applied\n"))
- return 1
- if not l:
- ui.write(_("no patches applied\n"))
- return 1
- return q.qseries(repo, start=l-2, length=1, status='A',
- summary=opts.get('summary'))
-
-def setupheaderopts(ui, opts):
- def do(opt, val):
- if not opts[opt] and opts['current' + opt]:
- opts[opt] = val
- do('user', ui.username())
- do('date', "%d %d" % util.makedate())
-
-def new(ui, repo, patch, *args, **opts):
- """create a new patch
-
- qnew creates a new patch on top of the currently-applied patch (if
- any). It will refuse to run if there are any outstanding changes
- unless -f/--force is specified, in which case the patch will be
- initialized with them. You may also use -I/--include,
- -X/--exclude, and/or a list of files after the patch name to add
- only changes to matching files to the new patch, leaving the rest
- as uncommitted modifications.
-
- -u/--user and -d/--date can be used to set the (given) user and
- date, respectively. -U/--currentuser and -D/--currentdate set user
- to current user and date to current date.
-
- -e/--edit, -m/--message or -l/--logfile set the patch header as
- well as the commit message. If none is specified, the header is
- empty and the commit message is '[mq]: PATCH'.
-
- Use the -g/--git option to keep the patch in the git extended diff
- format. Read the diffs help topic for more information on why this
- is important for preserving permission changes and copy/rename
- information.
- """
- msg = cmdutil.logmessage(opts)
- def getmsg(): return ui.edit(msg, ui.username())
- q = repo.mq
- opts['msg'] = msg
- if opts.get('edit'):
- opts['msg'] = getmsg
- else:
- opts['msg'] = msg
- setupheaderopts(ui, opts)
- q.new(repo, patch, *args, **opts)
- q.save_dirty()
- return 0
-
-def refresh(ui, repo, *pats, **opts):
- """update the current patch
-
- If any file patterns are provided, the refreshed patch will
- contain only the modifications that match those patterns; the
- remaining modifications will remain in the working directory.
-
- If -s/--short is specified, files currently included in the patch
- will be refreshed just like matched files and remain in the patch.
-
- hg add/remove/copy/rename work as usual, though you might want to
- use git-style patches (-g/--git or [diff] git=1) to track copies
- and renames. See the diffs help topic for more information on the
- git diff format.
- """
- q = repo.mq
- message = cmdutil.logmessage(opts)
- if opts['edit']:
- if not q.applied:
- ui.write(_("no patches applied\n"))
- return 1
- if message:
- raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
- patch = q.applied[-1].name
- ph = patchheader(q.join(patch))
- message = ui.edit('\n'.join(ph.message), ph.user or ui.username())
- setupheaderopts(ui, opts)
- ret = q.refresh(repo, pats, msg=message, **opts)
- q.save_dirty()
- return ret
-
-def diff(ui, repo, *pats, **opts):
- """diff of the current patch and subsequent modifications
-
- Shows a diff which includes the current patch as well as any
- changes which have been made in the working directory since the
- last refresh (thus showing what the current patch would become
- after a qrefresh).
-
- Use 'hg diff' if you only want to see the changes made since the
- last qrefresh, or 'hg export qtip' if you want to see changes made
- by the current patch without including changes made since the
- qrefresh.
- """
- repo.mq.diff(repo, pats, opts)
- return 0
-
-def fold(ui, repo, *files, **opts):
- """fold the named patches into the current patch
-
- Patches must not yet be applied. Each patch will be successively
- applied to the current patch in the order given. If all the
- patches apply successfully, the current patch will be refreshed
- with the new cumulative patch, and the folded patches will be
- deleted. With -k/--keep, the folded patch files will not be
- removed afterwards.
-
- The header for each folded patch will be concatenated with the
- current patch header, separated by a line of '* * *'."""
-
- q = repo.mq
-
- if not files:
- raise util.Abort(_('qfold requires at least one patch name'))
- if not q.check_toppatch(repo):
- raise util.Abort(_('No patches applied'))
- q.check_localchanges(repo)
-
- message = cmdutil.logmessage(opts)
- if opts['edit']:
- if message:
- raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
-
- parent = q.lookup('qtip')
- patches = []
- messages = []
- for f in files:
- p = q.lookup(f)
- if p in patches or p == parent:
- ui.warn(_('Skipping already folded patch %s') % p)
- if q.isapplied(p):
- raise util.Abort(_('qfold cannot fold already applied patch %s') % p)
- patches.append(p)
-
- for p in patches:
- if not message:
- ph = patchheader(q.join(p))
- if ph.message:
- messages.append(ph.message)
- pf = q.join(p)
- (patchsuccess, files, fuzz) = q.patch(repo, pf)
- if not patchsuccess:
- raise util.Abort(_('Error folding patch %s') % p)
- patch.updatedir(ui, repo, files)
-
- if not message:
- ph = patchheader(q.join(parent))
- message, user = ph.message, ph.user
- for msg in messages:
- message.append('* * *')
- message.extend(msg)
- message = '\n'.join(message)
-
- if opts['edit']:
- message = ui.edit(message, user or ui.username())
-
- q.refresh(repo, msg=message)
- q.delete(repo, patches, opts)
- q.save_dirty()
-
-def goto(ui, repo, patch, **opts):
- '''push or pop patches until named patch is at top of stack'''
- q = repo.mq
- patch = q.lookup(patch)
- if q.isapplied(patch):
- ret = q.pop(repo, patch, force=opts['force'])
- else:
- ret = q.push(repo, patch, force=opts['force'])
- q.save_dirty()
- return ret
-
-def guard(ui, repo, *args, **opts):
- '''set or print guards for a patch
-
- Guards control whether a patch can be pushed. A patch with no
- guards is always pushed. A patch with a positive guard ("+foo") is
- pushed only if the qselect command has activated it. A patch with
- a negative guard ("-foo") is never pushed if the qselect command
- has activated it.
-
- With no arguments, print the currently active guards.
- With arguments, set guards for the named patch.
- NOTE: Specifying negative guards now requires '--'.
-
- To set guards on another patch:
- hg qguard -- other.patch +2.6.17 -stable
- '''
- def status(idx):
- guards = q.series_guards[idx] or ['unguarded']
- ui.write('%s: %s\n' % (q.series[idx], ' '.join(guards)))
- q = repo.mq
- patch = None
- args = list(args)
- if opts['list']:
- if args or opts['none']:
- raise util.Abort(_('cannot mix -l/--list with options or arguments'))
- for i in xrange(len(q.series)):
- status(i)
- return
- if not args or args[0][0:1] in '-+':
- if not q.applied:
- raise util.Abort(_('no patches applied'))
- patch = q.applied[-1].name
- if patch is None and args[0][0:1] not in '-+':
- patch = args.pop(0)
- if patch is None:
- raise util.Abort(_('no patch to work with'))
- if args or opts['none']:
- idx = q.find_series(patch)
- if idx is None:
- raise util.Abort(_('no patch named %s') % patch)
- q.set_guards(idx, args)
- q.save_dirty()
- else:
- status(q.series.index(q.lookup(patch)))
-
-def header(ui, repo, patch=None):
- """print the header of the topmost or specified patch"""
- q = repo.mq
-
- if patch:
- patch = q.lookup(patch)
- else:
- if not q.applied:
- ui.write('no patches applied\n')
- return 1
- patch = q.lookup('qtip')
- ph = patchheader(repo.mq.join(patch))
-
- ui.write('\n'.join(ph.message) + '\n')
-
-def lastsavename(path):
- (directory, base) = os.path.split(path)
- names = os.listdir(directory)
- namere = re.compile("%s.([0-9]+)" % base)
- maxindex = None
- maxname = None
- for f in names:
- m = namere.match(f)
- if m:
- index = int(m.group(1))
- if maxindex is None or index > maxindex:
- maxindex = index
- maxname = f
- if maxname:
- return (os.path.join(directory, maxname), maxindex)
- return (None, None)
-
-def savename(path):
- (last, index) = lastsavename(path)
- if last is None:
- index = 0
- newpath = path + ".%d" % (index + 1)
- return newpath
-
-def push(ui, repo, patch=None, **opts):
- """push the next patch onto the stack
-
- When -f/--force is applied, all local changes in patched files
- will be lost.
- """
- q = repo.mq
- mergeq = None
-
- if opts['merge']:
- if opts['name']:
- newpath = repo.join(opts['name'])
- else:
- newpath, i = lastsavename(q.path)
- if not newpath:
- ui.warn(_("no saved queues found, please use -n\n"))
- return 1
- mergeq = queue(ui, repo.join(""), newpath)
- ui.warn(_("merging with queue at: %s\n") % mergeq.path)
- ret = q.push(repo, patch, force=opts['force'], list=opts['list'],
- mergeq=mergeq, all=opts.get('all'))
- return ret
-
-def pop(ui, repo, patch=None, **opts):
- """pop the current patch off the stack
-
- By default, pops off the top of the patch stack. If given a patch
- name, keeps popping off patches until the named patch is at the
- top of the stack.
- """
- localupdate = True
- if opts['name']:
- q = queue(ui, repo.join(""), repo.join(opts['name']))
- ui.warn(_('using patch queue: %s\n') % q.path)
- localupdate = False
- else:
- q = repo.mq
- ret = q.pop(repo, patch, force=opts['force'], update=localupdate,
- all=opts['all'])
- q.save_dirty()
- return ret
-
-def rename(ui, repo, patch, name=None, **opts):
- """rename a patch
-
- With one argument, renames the current patch to PATCH1.
- With two arguments, renames PATCH1 to PATCH2."""
-
- q = repo.mq
-
- if not name:
- name = patch
- patch = None
-
- if patch:
- patch = q.lookup(patch)
- else:
- if not q.applied:
- ui.write(_('no patches applied\n'))
- return
- patch = q.lookup('qtip')
- absdest = q.join(name)
- if os.path.isdir(absdest):
- name = normname(os.path.join(name, os.path.basename(patch)))
- absdest = q.join(name)
- if os.path.exists(absdest):
- raise util.Abort(_('%s already exists') % absdest)
-
- if name in q.series:
- raise util.Abort(_('A patch named %s already exists in the series file') % name)
-
- if ui.verbose:
- ui.write('renaming %s to %s\n' % (patch, name))
- i = q.find_series(patch)
- guards = q.guard_re.findall(q.full_series[i])
- q.full_series[i] = name + ''.join([' #' + g for g in guards])
- q.parse_series()
- q.series_dirty = 1
-
- info = q.isapplied(patch)
- if info:
- q.applied[info[0]] = statusentry(info[1], name)
- q.applied_dirty = 1
-
- util.rename(q.join(patch), absdest)
- r = q.qrepo()
- if r:
- wlock = r.wlock()
- try:
- if r.dirstate[patch] == 'a':
- r.dirstate.forget(patch)
- r.dirstate.add(name)
- else:
- if r.dirstate[name] == 'r':
- r.undelete([name])
- r.copy(patch, name)
- r.remove([patch], False)
- finally:
- wlock.release()
-
- q.save_dirty()
-
-def restore(ui, repo, rev, **opts):
- """restore the queue state saved by a revision"""
- rev = repo.lookup(rev)
- q = repo.mq
- q.restore(repo, rev, delete=opts['delete'],
- qupdate=opts['update'])
- q.save_dirty()
- return 0
-
-def save(ui, repo, **opts):
- """save current queue state"""
- q = repo.mq
- message = cmdutil.logmessage(opts)
- ret = q.save(repo, msg=message)
- if ret:
- return ret
- q.save_dirty()
- if opts['copy']:
- path = q.path
- if opts['name']:
- newpath = os.path.join(q.basepath, opts['name'])
- if os.path.exists(newpath):
- if not os.path.isdir(newpath):
- raise util.Abort(_('destination %s exists and is not '
- 'a directory') % newpath)
- if not opts['force']:
- raise util.Abort(_('destination %s exists, '
- 'use -f to force') % newpath)
- else:
- newpath = savename(path)
- ui.warn(_("copy %s to %s\n") % (path, newpath))
- util.copyfiles(path, newpath)
- if opts['empty']:
- try:
- os.unlink(q.join(q.status_path))
- except:
- pass
- return 0
-
-def strip(ui, repo, rev, **opts):
- """strip a revision and all its descendants from the repository
-
- If one of the working directory's parent revisions is stripped, the
- working directory will be updated to the parent of the stripped
- revision.
- """
- backup = 'all'
- if opts['backup']:
- backup = 'strip'
- elif opts['nobackup']:
- backup = 'none'
-
- rev = repo.lookup(rev)
- p = repo.dirstate.parents()
- cl = repo.changelog
- update = True
- if p[0] == nullid:
- update = False
- elif p[1] == nullid and rev != cl.ancestor(p[0], rev):
- update = False
- elif rev not in (cl.ancestor(p[0], rev), cl.ancestor(p[1], rev)):
- update = False
-
- repo.mq.strip(repo, rev, backup=backup, update=update, force=opts['force'])
- return 0
-
-def select(ui, repo, *args, **opts):
- '''set or print guarded patches to push
-
- Use the qguard command to set or print guards on patch, then use
- qselect to tell mq which guards to use. A patch will be pushed if
- it has no guards or any positive guards match the currently
- selected guard, but will not be pushed if any negative guards
- match the current guard. For example:
-
- qguard foo.patch -stable (negative guard)
- qguard bar.patch +stable (positive guard)
- qselect stable
-
- This activates the "stable" guard. mq will skip foo.patch (because
- it has a negative match) but push bar.patch (because it has a
- positive match).
-
- With no arguments, prints the currently active guards.
- With one argument, sets the active guard.
-
- Use -n/--none to deactivate guards (no other arguments needed).
- When no guards are active, patches with positive guards are
- skipped and patches with negative guards are pushed.
-
- qselect can change the guards on applied patches. It does not pop
- guarded patches by default. Use --pop to pop back to the last
- applied patch that is not guarded. Use --reapply (which implies
- --pop) to push back to the current patch afterwards, but skip
- guarded patches.
-
- Use -s/--series to print a list of all guards in the series file
- (no other arguments needed). Use -v for more information.'''
-
- q = repo.mq
- guards = q.active()
- if args or opts['none']:
- old_unapplied = q.unapplied(repo)
- old_guarded = [i for i in xrange(len(q.applied)) if
- not q.pushable(i)[0]]
- q.set_active(args)
- q.save_dirty()
- if not args:
- ui.status(_('guards deactivated\n'))
- if not opts['pop'] and not opts['reapply']:
- unapplied = q.unapplied(repo)
- guarded = [i for i in xrange(len(q.applied))
- if not q.pushable(i)[0]]
- if len(unapplied) != len(old_unapplied):
- ui.status(_('number of unguarded, unapplied patches has '
- 'changed from %d to %d\n') %
- (len(old_unapplied), len(unapplied)))
- if len(guarded) != len(old_guarded):
- ui.status(_('number of guarded, applied patches has changed '
- 'from %d to %d\n') %
- (len(old_guarded), len(guarded)))
- elif opts['series']:
- guards = {}
- noguards = 0
- for gs in q.series_guards:
- if not gs:
- noguards += 1
- for g in gs:
- guards.setdefault(g, 0)
- guards[g] += 1
- if ui.verbose:
- guards['NONE'] = noguards
- guards = guards.items()
- guards.sort(key=lambda x: x[0][1:])
- if guards:
- ui.note(_('guards in series file:\n'))
- for guard, count in guards:
- ui.note('%2d ' % count)
- ui.write(guard, '\n')
- else:
- ui.note(_('no guards in series file\n'))
- else:
- if guards:
- ui.note(_('active guards:\n'))
- for g in guards:
- ui.write(g, '\n')
- else:
- ui.write(_('no active guards\n'))
- reapply = opts['reapply'] and q.applied and q.appliedname(-1)
- popped = False
- if opts['pop'] or opts['reapply']:
- for i in xrange(len(q.applied)):
- pushable, reason = q.pushable(i)
- if not pushable:
- ui.status(_('popping guarded patches\n'))
- popped = True
- if i == 0:
- q.pop(repo, all=True)
- else:
- q.pop(repo, i-1)
- break
- if popped:
- try:
- if reapply:
- ui.status(_('reapplying unguarded patches\n'))
- q.push(repo, reapply)
- finally:
- q.save_dirty()
-
-def finish(ui, repo, *revrange, **opts):
- """move applied patches into repository history
-
- Finishes the specified revisions (corresponding to applied
- patches) by moving them out of mq control into regular repository
- history.
-
- Accepts a revision range or the -a/--applied option. If --applied
- is specified, all applied mq revisions are removed from mq
- control. Otherwise, the given revisions must be at the base of the
- stack of applied patches.
-
- This can be especially useful if your changes have been applied to
- an upstream repository, or if you are about to push your changes
- to upstream.
- """
- if not opts['applied'] and not revrange:
- raise util.Abort(_('no revisions specified'))
- elif opts['applied']:
- revrange = ('qbase:qtip',) + revrange
-
- q = repo.mq
- if not q.applied:
- ui.status(_('no patches applied\n'))
- return 0
-
- revs = cmdutil.revrange(repo, revrange)
- q.finish(repo, revs)
- q.save_dirty()
- return 0
-
-def reposetup(ui, repo):
- class mqrepo(repo.__class__):
- @util.propertycache
- def mq(self):
- return queue(self.ui, self.join(""))
-
- def abort_if_wdir_patched(self, errmsg, force=False):
- if self.mq.applied and not force:
- parent = hex(self.dirstate.parents()[0])
- if parent in [s.rev for s in self.mq.applied]:
- raise util.Abort(errmsg)
-
- def commit(self, text="", user=None, date=None, match=None,
- force=False, editor=False, extra={}):
- self.abort_if_wdir_patched(
- _('cannot commit over an applied mq patch'),
- force)
-
- return super(mqrepo, self).commit(text, user, date, match, force,
- editor, extra)
-
- def push(self, remote, force=False, revs=None):
- if self.mq.applied and not force and not revs:
- raise util.Abort(_('source has mq patches applied'))
- return super(mqrepo, self).push(remote, force, revs)
-
- def _findtags(self):
- '''augment tags from base class with patch tags'''
- result = super(mqrepo, self)._findtags()
-
- q = self.mq
- if not q.applied:
- return result
-
- mqtags = [(bin(patch.rev), patch.name) for patch in q.applied]
-
- if mqtags[-1][0] not in self.changelog.nodemap:
- self.ui.warn(_('mq status file refers to unknown node %s\n')
- % short(mqtags[-1][0]))
- return result
-
- mqtags.append((mqtags[-1][0], 'qtip'))
- mqtags.append((mqtags[0][0], 'qbase'))
- mqtags.append((self.changelog.parents(mqtags[0][0])[0], 'qparent'))
- tags = result[0]
- for patch in mqtags:
- if patch[1] in tags:
- self.ui.warn(_('Tag %s overrides mq patch of the same name\n')
- % patch[1])
- else:
- tags[patch[1]] = patch[0]
-
- return result
-
- def _branchtags(self, partial, lrev):
- q = self.mq
- if not q.applied:
- return super(mqrepo, self)._branchtags(partial, lrev)
-
- cl = self.changelog
- qbasenode = bin(q.applied[0].rev)
- if qbasenode not in cl.nodemap:
- self.ui.warn(_('mq status file refers to unknown node %s\n')
- % short(qbasenode))
- return super(mqrepo, self)._branchtags(partial, lrev)
-
- qbase = cl.rev(qbasenode)
- start = lrev + 1
- if start < qbase:
- # update the cache (excluding the patches) and save it
- self._updatebranchcache(partial, lrev+1, qbase)
- self._writebranchcache(partial, cl.node(qbase-1), qbase-1)
- start = qbase
- # if start = qbase, the cache is as updated as it should be.
- # if start > qbase, the cache includes (part of) the patches.
- # we might as well use it, but we won't save it.
-
- # update the cache up to the tip
- self._updatebranchcache(partial, start, len(cl))
-
- return partial
-
- if repo.local():
- repo.__class__ = mqrepo
-
-def mqimport(orig, ui, repo, *args, **kwargs):
- if hasattr(repo, 'abort_if_wdir_patched'):
- repo.abort_if_wdir_patched(_('cannot import over an applied patch'),
- kwargs.get('force'))
- return orig(ui, repo, *args, **kwargs)
-
-def uisetup(ui):
- extensions.wrapcommand(commands.table, 'import', mqimport)
-
-seriesopts = [('s', 'summary', None, _('print first line of patch header'))]
-
-cmdtable = {
- "qapplied": (applied, [] + seriesopts, _('hg qapplied [-s] [PATCH]')),
- "qclone":
- (clone,
- [('', 'pull', None, _('use pull protocol to copy metadata')),
- ('U', 'noupdate', None, _('do not update the new working directories')),
- ('', 'uncompressed', None,
- _('use uncompressed transfer (fast over LAN)')),
- ('p', 'patches', '', _('location of source patch repository')),
- ] + commands.remoteopts,
- _('hg qclone [OPTION]... SOURCE [DEST]')),
- "qcommit|qci":
- (commit,
- commands.table["^commit|ci"][1],
- _('hg qcommit [OPTION]... [FILE]...')),
- "^qdiff":
- (diff,
- commands.diffopts + commands.diffopts2 + commands.walkopts,
- _('hg qdiff [OPTION]... [FILE]...')),
- "qdelete|qremove|qrm":
- (delete,
- [('k', 'keep', None, _('keep patch file')),
- ('r', 'rev', [], _('stop managing a revision (DEPRECATED)'))],
- _('hg qdelete [-k] [-r REV]... [PATCH]...')),
- 'qfold':
- (fold,
- [('e', 'edit', None, _('edit patch header')),
- ('k', 'keep', None, _('keep folded patch files')),
- ] + commands.commitopts,
- _('hg qfold [-e] [-k] [-m TEXT] [-l FILE] PATCH...')),
- 'qgoto':
- (goto,
- [('f', 'force', None, _('overwrite any local changes'))],
- _('hg qgoto [OPTION]... PATCH')),
- 'qguard':
- (guard,
- [('l', 'list', None, _('list all patches and guards')),
- ('n', 'none', None, _('drop all guards'))],
- _('hg qguard [-l] [-n] -- [PATCH] [+GUARD]... [-GUARD]...')),
- 'qheader': (header, [], _('hg qheader [PATCH]')),
- "^qimport":
- (qimport,
- [('e', 'existing', None, _('import file in patch directory')),
- ('n', 'name', '', _('name of patch file')),
- ('f', 'force', None, _('overwrite existing files')),
- ('r', 'rev', [], _('place existing revisions under mq control')),
- ('g', 'git', None, _('use git extended diff format')),
- ('P', 'push', None, _('qpush after importing'))],
- _('hg qimport [-e] [-n NAME] [-f] [-g] [-P] [-r REV]... FILE...')),
- "^qinit":
- (init,
- [('c', 'create-repo', None, _('create queue repository'))],
- _('hg qinit [-c]')),
- "qnew":
- (new,
- [('e', 'edit', None, _('edit commit message')),
- ('f', 'force', None, _('import uncommitted changes into patch')),
- ('g', 'git', None, _('use git extended diff format')),
- ('U', 'currentuser', None, _('add "From: <current user>" to patch')),
- ('u', 'user', '', _('add "From: <given user>" to patch')),
- ('D', 'currentdate', None, _('add "Date: <current date>" to patch')),
- ('d', 'date', '', _('add "Date: <given date>" to patch'))
- ] + commands.walkopts + commands.commitopts,
- _('hg qnew [-e] [-m TEXT] [-l FILE] [-f] PATCH [FILE]...')),
- "qnext": (next, [] + seriesopts, _('hg qnext [-s]')),
- "qprev": (prev, [] + seriesopts, _('hg qprev [-s]')),
- "^qpop":
- (pop,
- [('a', 'all', None, _('pop all patches')),
- ('n', 'name', '', _('queue name to pop')),
- ('f', 'force', None, _('forget any local changes'))],
- _('hg qpop [-a] [-n NAME] [-f] [PATCH | INDEX]')),
- "^qpush":
- (push,
- [('f', 'force', None, _('apply if the patch has rejects')),
- ('l', 'list', None, _('list patch name in commit text')),
- ('a', 'all', None, _('apply all patches')),
- ('m', 'merge', None, _('merge from another queue')),
- ('n', 'name', '', _('merge queue name'))],
- _('hg qpush [-f] [-l] [-a] [-m] [-n NAME] [PATCH | INDEX]')),
- "^qrefresh":
- (refresh,
- [('e', 'edit', None, _('edit commit message')),
- ('g', 'git', None, _('use git extended diff format')),
- ('s', 'short', None, _('refresh only files already in the patch and specified files')),
- ('U', 'currentuser', None, _('add/update author field in patch with current user')),
- ('u', 'user', '', _('add/update author field in patch with given user')),
- ('D', 'currentdate', None, _('add/update date field in patch with current date')),
- ('d', 'date', '', _('add/update date field in patch with given date'))
- ] + commands.walkopts + commands.commitopts,
- _('hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] [FILE]...')),
- 'qrename|qmv':
- (rename, [], _('hg qrename PATCH1 [PATCH2]')),
- "qrestore":
- (restore,
- [('d', 'delete', None, _('delete save entry')),
- ('u', 'update', None, _('update queue working directory'))],
- _('hg qrestore [-d] [-u] REV')),
- "qsave":
- (save,
- [('c', 'copy', None, _('copy patch directory')),
- ('n', 'name', '', _('copy directory name')),
- ('e', 'empty', None, _('clear queue status file')),
- ('f', 'force', None, _('force copy'))] + commands.commitopts,
- _('hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]')),
- "qselect":
- (select,
- [('n', 'none', None, _('disable all guards')),
- ('s', 'series', None, _('list all guards in series file')),
- ('', 'pop', None, _('pop to before first guarded applied patch')),
- ('', 'reapply', None, _('pop, then reapply patches'))],
- _('hg qselect [OPTION]... [GUARD]...')),
- "qseries":
- (series,
- [('m', 'missing', None, _('print patches not in series')),
- ] + seriesopts,
- _('hg qseries [-ms]')),
- "^strip":
- (strip,
- [('f', 'force', None, _('force removal with local changes')),
- ('b', 'backup', None, _('bundle unrelated changesets')),
- ('n', 'nobackup', None, _('no backups'))],
- _('hg strip [-f] [-b] [-n] REV')),
- "qtop": (top, [] + seriesopts, _('hg qtop [-s]')),
- "qunapplied": (unapplied, [] + seriesopts, _('hg qunapplied [-s] [PATCH]')),
- "qfinish":
- (finish,
- [('a', 'applied', None, _('finish all applied changesets'))],
- _('hg qfinish [-a] [REV]...')),
-}
diff --git a/sys/lib/python/hgext/notify.py b/sys/lib/python/hgext/notify.py
deleted file mode 100644
index 4cd27dc05..000000000
--- a/sys/lib/python/hgext/notify.py
+++ /dev/null
@@ -1,298 +0,0 @@
-# notify.py - email notifications for mercurial
-#
-# Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-'''hooks for sending email notifications at commit/push time
-
-Subscriptions can be managed through a hgrc file. Default mode is to
-print messages to stdout, for testing and configuring.
-
-To use, configure the notify extension and enable it in hgrc like
-this::
-
- [extensions]
- hgext.notify =
-
- [hooks]
- # one email for each incoming changeset
- incoming.notify = python:hgext.notify.hook
- # batch emails when many changesets incoming at one time
- changegroup.notify = python:hgext.notify.hook
-
- [notify]
- # config items go here
-
-Required configuration items::
-
- config = /path/to/file # file containing subscriptions
-
-Optional configuration items::
-
- test = True # print messages to stdout for testing
- strip = 3 # number of slashes to strip for url paths
- domain = example.com # domain to use if committer missing domain
- style = ... # style file to use when formatting email
- template = ... # template to use when formatting email
- incoming = ... # template to use when run as incoming hook
- changegroup = ... # template when run as changegroup hook
- maxdiff = 300 # max lines of diffs to include (0=none, -1=all)
- maxsubject = 67 # truncate subject line longer than this
- diffstat = True # add a diffstat before the diff content
- sources = serve # notify if source of incoming changes in this list
- # (serve == ssh or http, push, pull, bundle)
- [email]
- from = user@host.com # email address to send as if none given
- [web]
- baseurl = http://hgserver/... # root of hg web site for browsing commits
-
-The notify config file has same format as a regular hgrc file. It has
-two sections so you can express subscriptions in whatever way is
-handier for you.
-
-::
-
- [usersubs]
- # key is subscriber email, value is ","-separated list of glob patterns
- user@host = pattern
-
- [reposubs]
- # key is glob pattern, value is ","-separated list of subscriber emails
- pattern = user@host
-
-Glob patterns are matched against path to repository root.
-
-If you like, you can put notify config file in repository that users
-can push changes to, they can manage their own subscriptions.
-'''
-
-from mercurial.i18n import _
-from mercurial import patch, cmdutil, templater, util, mail
-import email.Parser, email.Errors, fnmatch, socket, time
-
-# template for single changeset can include email headers.
-single_template = '''
-Subject: changeset in {webroot}: {desc|firstline|strip}
-From: {author}
-
-changeset {node|short} in {root}
-details: {baseurl}{webroot}?cmd=changeset;node={node|short}
-description:
-\t{desc|tabindent|strip}
-'''.lstrip()
-
-# template for multiple changesets should not contain email headers,
-# because only first set of headers will be used and result will look
-# strange.
-multiple_template = '''
-changeset {node|short} in {root}
-details: {baseurl}{webroot}?cmd=changeset;node={node|short}
-summary: {desc|firstline}
-'''
-
-deftemplates = {
- 'changegroup': multiple_template,
-}
-
-class notifier(object):
- '''email notification class.'''
-
- def __init__(self, ui, repo, hooktype):
- self.ui = ui
- cfg = self.ui.config('notify', 'config')
- if cfg:
- self.ui.readconfig(cfg, sections=['usersubs', 'reposubs'])
- self.repo = repo
- self.stripcount = int(self.ui.config('notify', 'strip', 0))
- self.root = self.strip(self.repo.root)
- self.domain = self.ui.config('notify', 'domain')
- self.test = self.ui.configbool('notify', 'test', True)
- self.charsets = mail._charsets(self.ui)
- self.subs = self.subscribers()
-
- mapfile = self.ui.config('notify', 'style')
- template = (self.ui.config('notify', hooktype) or
- self.ui.config('notify', 'template'))
- self.t = cmdutil.changeset_templater(self.ui, self.repo,
- False, None, mapfile, False)
- if not mapfile and not template:
- template = deftemplates.get(hooktype) or single_template
- if template:
- template = templater.parsestring(template, quoted=False)
- self.t.use_template(template)
-
- def strip(self, path):
- '''strip leading slashes from local path, turn into web-safe path.'''
-
- path = util.pconvert(path)
- count = self.stripcount
- while count > 0:
- c = path.find('/')
- if c == -1:
- break
- path = path[c+1:]
- count -= 1
- return path
-
- def fixmail(self, addr):
- '''try to clean up email addresses.'''
-
- addr = util.email(addr.strip())
- if self.domain:
- a = addr.find('@localhost')
- if a != -1:
- addr = addr[:a]
- if '@' not in addr:
- return addr + '@' + self.domain
- return addr
-
- def subscribers(self):
- '''return list of email addresses of subscribers to this repo.'''
- subs = set()
- for user, pats in self.ui.configitems('usersubs'):
- for pat in pats.split(','):
- if fnmatch.fnmatch(self.repo.root, pat.strip()):
- subs.add(self.fixmail(user))
- for pat, users in self.ui.configitems('reposubs'):
- if fnmatch.fnmatch(self.repo.root, pat):
- for user in users.split(','):
- subs.add(self.fixmail(user))
- return [mail.addressencode(self.ui, s, self.charsets, self.test)
- for s in sorted(subs)]
-
- def url(self, path=None):
- return self.ui.config('web', 'baseurl') + (path or self.root)
-
- def node(self, ctx):
- '''format one changeset.'''
- self.t.show(ctx, changes=ctx.changeset(),
- baseurl=self.ui.config('web', 'baseurl'),
- root=self.repo.root, webroot=self.root)
-
- def skipsource(self, source):
- '''true if incoming changes from this source should be skipped.'''
- ok_sources = self.ui.config('notify', 'sources', 'serve').split()
- return source not in ok_sources
-
- def send(self, ctx, count, data):
- '''send message.'''
-
- p = email.Parser.Parser()
- try:
- msg = p.parsestr(data)
- except email.Errors.MessageParseError, inst:
- raise util.Abort(inst)
-
- # store sender and subject
- sender, subject = msg['From'], msg['Subject']
- del msg['From'], msg['Subject']
-
- if not msg.is_multipart():
- # create fresh mime message from scratch
- # (multipart templates must take care of this themselves)
- headers = msg.items()
- payload = msg.get_payload()
- # for notification prefer readability over data precision
- msg = mail.mimeencode(self.ui, payload, self.charsets, self.test)
- # reinstate custom headers
- for k, v in headers:
- msg[k] = v
-
- msg['Date'] = util.datestr(format="%a, %d %b %Y %H:%M:%S %1%2")
-
- # try to make subject line exist and be useful
- if not subject:
- if count > 1:
- subject = _('%s: %d new changesets') % (self.root, count)
- else:
- s = ctx.description().lstrip().split('\n', 1)[0].rstrip()
- subject = '%s: %s' % (self.root, s)
- maxsubject = int(self.ui.config('notify', 'maxsubject', 67))
- if maxsubject and len(subject) > maxsubject:
- subject = subject[:maxsubject-3] + '...'
- msg['Subject'] = mail.headencode(self.ui, subject,
- self.charsets, self.test)
-
- # try to make message have proper sender
- if not sender:
- sender = self.ui.config('email', 'from') or self.ui.username()
- if '@' not in sender or '@localhost' in sender:
- sender = self.fixmail(sender)
- msg['From'] = mail.addressencode(self.ui, sender,
- self.charsets, self.test)
-
- msg['X-Hg-Notification'] = 'changeset %s' % ctx
- if not msg['Message-Id']:
- msg['Message-Id'] = ('<hg.%s.%s.%s@%s>' %
- (ctx, int(time.time()),
- hash(self.repo.root), socket.getfqdn()))
- msg['To'] = ', '.join(self.subs)
-
- msgtext = msg.as_string()
- if self.test:
- self.ui.write(msgtext)
- if not msgtext.endswith('\n'):
- self.ui.write('\n')
- else:
- self.ui.status(_('notify: sending %d subscribers %d changes\n') %
- (len(self.subs), count))
- mail.sendmail(self.ui, util.email(msg['From']),
- self.subs, msgtext)
-
- def diff(self, ctx, ref=None):
-
- maxdiff = int(self.ui.config('notify', 'maxdiff', 300))
- prev = ctx.parents()[0].node()
- ref = ref and ref.node() or ctx.node()
- chunks = patch.diff(self.repo, prev, ref, opts=patch.diffopts(self.ui))
- difflines = ''.join(chunks).splitlines()
-
- if self.ui.configbool('notify', 'diffstat', True):
- s = patch.diffstat(difflines)
- # s may be nil, don't include the header if it is
- if s:
- self.ui.write('\ndiffstat:\n\n%s' % s)
-
- if maxdiff == 0:
- return
- elif maxdiff > 0 and len(difflines) > maxdiff:
- msg = _('\ndiffs (truncated from %d to %d lines):\n\n')
- self.ui.write(msg % (len(difflines), maxdiff))
- difflines = difflines[:maxdiff]
- elif difflines:
- self.ui.write(_('\ndiffs (%d lines):\n\n') % len(difflines))
-
- self.ui.write("\n".join(difflines))
-
-def hook(ui, repo, hooktype, node=None, source=None, **kwargs):
- '''send email notifications to interested subscribers.
-
- if used as changegroup hook, send one email for all changesets in
- changegroup. else send one email per changeset.'''
-
- n = notifier(ui, repo, hooktype)
- ctx = repo[node]
-
- if not n.subs:
- ui.debug(_('notify: no subscribers to repository %s\n') % n.root)
- return
- if n.skipsource(source):
- ui.debug(_('notify: changes have source "%s" - skipping\n') % source)
- return
-
- ui.pushbuffer()
- if hooktype == 'changegroup':
- start, end = ctx.rev(), len(repo)
- count = end - start
- for rev in xrange(start, end):
- n.node(repo[rev])
- n.diff(ctx, repo['tip'])
- else:
- count = 1
- n.node(ctx)
- n.diff(ctx)
-
- data = ui.popbuffer()
- n.send(ctx, count, data)
diff --git a/sys/lib/python/hgext/pager.py b/sys/lib/python/hgext/pager.py
deleted file mode 100644
index 1d973c485..000000000
--- a/sys/lib/python/hgext/pager.py
+++ /dev/null
@@ -1,64 +0,0 @@
-# pager.py - display output using a pager
-#
-# Copyright 2008 David Soria Parra <dsp@php.net>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-#
-# To load the extension, add it to your .hgrc file:
-#
-# [extension]
-# hgext.pager =
-#
-# Run "hg help pager" to get info on configuration.
-
-'''browse command output with an external pager
-
-To set the pager that should be used, set the application variable::
-
- [pager]
- pager = LESS='FSRX' less
-
-If no pager is set, the pager extensions uses the environment variable
-$PAGER. If neither pager.pager, nor $PAGER is set, no pager is used.
-
-If you notice "BROKEN PIPE" error messages, you can disable them by
-setting::
-
- [pager]
- quiet = True
-
-You can disable the pager for certain commands by adding them to the
-pager.ignore list::
-
- [pager]
- ignore = version, help, update
-
-You can also enable the pager only for certain commands using
-pager.attend::
-
- [pager]
- attend = log
-
-If pager.attend is present, pager.ignore will be ignored.
-
-To ignore global commands like "hg version" or "hg help", you have to
-specify them in the global .hgrc
-'''
-
-import sys, os, signal
-from mercurial import dispatch, util, extensions
-
-def uisetup(ui):
- def pagecmd(orig, ui, options, cmd, cmdfunc):
- p = ui.config("pager", "pager", os.environ.get("PAGER"))
- if p and sys.stdout.isatty() and '--debugger' not in sys.argv:
- attend = ui.configlist('pager', 'attend')
- if (cmd in attend or
- (cmd not in ui.configlist('pager', 'ignore') and not attend)):
- sys.stderr = sys.stdout = util.popen(p, "wb")
- if ui.configbool('pager', 'quiet'):
- signal.signal(signal.SIGPIPE, signal.SIG_DFL)
- return orig(ui, options, cmd, cmdfunc)
-
- extensions.wrapfunction(dispatch, '_runcommand', pagecmd)
diff --git a/sys/lib/python/hgext/parentrevspec.py b/sys/lib/python/hgext/parentrevspec.py
deleted file mode 100644
index 6d6b2eb6c..000000000
--- a/sys/lib/python/hgext/parentrevspec.py
+++ /dev/null
@@ -1,96 +0,0 @@
-# Mercurial extension to make it easy to refer to the parent of a revision
-#
-# Copyright (C) 2007 Alexis S. L. Carvalho <alexis@cecm.usp.br>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-'''interpret suffixes to refer to ancestor revisions
-
-This extension allows you to use git-style suffixes to refer to the
-ancestors of a specific revision.
-
-For example, if you can refer to a revision as "foo", then::
-
- foo^N = Nth parent of foo
- foo^0 = foo
- foo^1 = first parent of foo
- foo^2 = second parent of foo
- foo^ = foo^1
-
- foo~N = Nth first grandparent of foo
- foo~0 = foo
- foo~1 = foo^1 = foo^ = first parent of foo
- foo~2 = foo^1^1 = foo^^ = first parent of first parent of foo
-'''
-from mercurial import error
-
-def reposetup(ui, repo):
- if not repo.local():
- return
-
- class parentrevspecrepo(repo.__class__):
- def lookup(self, key):
- try:
- _super = super(parentrevspecrepo, self)
- return _super.lookup(key)
- except error.RepoError:
- pass
-
- circ = key.find('^')
- tilde = key.find('~')
- if circ < 0 and tilde < 0:
- raise
- elif circ >= 0 and tilde >= 0:
- end = min(circ, tilde)
- else:
- end = max(circ, tilde)
-
- cl = self.changelog
- base = key[:end]
- try:
- node = _super.lookup(base)
- except error.RepoError:
- # eek - reraise the first error
- return _super.lookup(key)
-
- rev = cl.rev(node)
- suffix = key[end:]
- i = 0
- while i < len(suffix):
- # foo^N => Nth parent of foo
- # foo^0 == foo
- # foo^1 == foo^ == 1st parent of foo
- # foo^2 == 2nd parent of foo
- if suffix[i] == '^':
- j = i + 1
- p = cl.parentrevs(rev)
- if j < len(suffix) and suffix[j].isdigit():
- j += 1
- n = int(suffix[i+1:j])
- if n > 2 or n == 2 and p[1] == -1:
- raise
- else:
- n = 1
- if n:
- rev = p[n - 1]
- i = j
- # foo~N => Nth first grandparent of foo
- # foo~0 = foo
- # foo~1 = foo^1 == foo^ == 1st parent of foo
- # foo~2 = foo^1^1 == foo^^ == 1st parent of 1st parent of foo
- elif suffix[i] == '~':
- j = i + 1
- while j < len(suffix) and suffix[j].isdigit():
- j += 1
- if j == i + 1:
- raise
- n = int(suffix[i+1:j])
- for k in xrange(n):
- rev = cl.parentrevs(rev)[0]
- i = j
- else:
- raise
- return cl.node(rev)
-
- repo.__class__ = parentrevspecrepo
diff --git a/sys/lib/python/hgext/patchbomb.py b/sys/lib/python/hgext/patchbomb.py
deleted file mode 100644
index 8ad33384b..000000000
--- a/sys/lib/python/hgext/patchbomb.py
+++ /dev/null
@@ -1,513 +0,0 @@
-# patchbomb.py - sending Mercurial changesets as patch emails
-#
-# Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-'''command to send changesets as (a series of) patch emails
-
-The series is started off with a "[PATCH 0 of N]" introduction, which
-describes the series as a whole.
-
-Each patch email has a Subject line of "[PATCH M of N] ...", using the
-first line of the changeset description as the subject text. The
-message contains two or three body parts:
-
-- The changeset description.
-- [Optional] The result of running diffstat on the patch.
-- The patch itself, as generated by "hg export".
-
-Each message refers to the first in the series using the In-Reply-To
-and References headers, so they will show up as a sequence in threaded
-mail and news readers, and in mail archives.
-
-With the -d/--diffstat option, you will be prompted for each changeset
-with a diffstat summary and the changeset summary, so you can be sure
-you are sending the right changes.
-
-To configure other defaults, add a section like this to your hgrc
-file::
-
- [email]
- from = My Name <my@email>
- to = recipient1, recipient2, ...
- cc = cc1, cc2, ...
- bcc = bcc1, bcc2, ...
-
-Then you can use the "hg email" command to mail a series of changesets
-as a patchbomb.
-
-To avoid sending patches prematurely, it is a good idea to first run
-the "email" command with the "-n" option (test only). You will be
-prompted for an email recipient address, a subject and an introductory
-message describing the patches of your patchbomb. Then when all is
-done, patchbomb messages are displayed. If the PAGER environment
-variable is set, your pager will be fired up once for each patchbomb
-message, so you can verify everything is alright.
-
-The -m/--mbox option is also very useful. Instead of previewing each
-patchbomb message in a pager or sending the messages directly, it will
-create a UNIX mailbox file with the patch emails. This mailbox file
-can be previewed with any mail user agent which supports UNIX mbox
-files, e.g. with mutt::
-
- % mutt -R -f mbox
-
-When you are previewing the patchbomb messages, you can use ``formail``
-(a utility that is commonly installed as part of the procmail
-package), to send each message out::
-
- % formail -s sendmail -bm -t < mbox
-
-That should be all. Now your patchbomb is on its way out.
-
-You can also either configure the method option in the email section
-to be a sendmail compatible mailer or fill out the [smtp] section so
-that the patchbomb extension can automatically send patchbombs
-directly from the commandline. See the [email] and [smtp] sections in
-hgrc(5) for details.
-'''
-
-import os, errno, socket, tempfile, cStringIO, time
-import email.MIMEMultipart, email.MIMEBase
-import email.Utils, email.Encoders, email.Generator
-from mercurial import cmdutil, commands, hg, mail, patch, util
-from mercurial.i18n import _
-from mercurial.node import bin
-
-def prompt(ui, prompt, default=None, rest=': ', empty_ok=False):
- if not ui.interactive():
- return default
- if default:
- prompt += ' [%s]' % default
- prompt += rest
- while True:
- r = ui.prompt(prompt, default=default)
- if r:
- return r
- if default is not None:
- return default
- if empty_ok:
- return r
- ui.warn(_('Please enter a valid value.\n'))
-
-def cdiffstat(ui, summary, patchlines):
- s = patch.diffstat(patchlines)
- if summary:
- ui.write(summary, '\n')
- ui.write(s, '\n')
- ans = prompt(ui, _('does the diffstat above look okay? '), 'y')
- if not ans.lower().startswith('y'):
- raise util.Abort(_('diffstat rejected'))
- return s
-
-def makepatch(ui, repo, patch, opts, _charsets, idx, total, patchname=None):
-
- desc = []
- node = None
- body = ''
-
- for line in patch:
- if line.startswith('#'):
- if line.startswith('# Node ID'):
- node = line.split()[-1]
- continue
- if line.startswith('diff -r') or line.startswith('diff --git'):
- break
- desc.append(line)
-
- if not patchname and not node:
- raise ValueError
-
- if opts.get('attach'):
- body = ('\n'.join(desc[1:]).strip() or
- 'Patch subject is complete summary.')
- body += '\n\n\n'
-
- if opts.get('plain'):
- while patch and patch[0].startswith('# '):
- patch.pop(0)
- if patch:
- patch.pop(0)
- while patch and not patch[0].strip():
- patch.pop(0)
-
- if opts.get('diffstat'):
- body += cdiffstat(ui, '\n'.join(desc), patch) + '\n\n'
-
- if opts.get('attach') or opts.get('inline'):
- msg = email.MIMEMultipart.MIMEMultipart()
- if body:
- msg.attach(mail.mimeencode(ui, body, _charsets, opts.get('test')))
- p = mail.mimetextpatch('\n'.join(patch), 'x-patch', opts.get('test'))
- binnode = bin(node)
- # if node is mq patch, it will have the patch file's name as a tag
- if not patchname:
- patchtags = [t for t in repo.nodetags(binnode)
- if t.endswith('.patch') or t.endswith('.diff')]
- if patchtags:
- patchname = patchtags[0]
- elif total > 1:
- patchname = cmdutil.make_filename(repo, '%b-%n.patch',
- binnode, seqno=idx, total=total)
- else:
- patchname = cmdutil.make_filename(repo, '%b.patch', binnode)
- disposition = 'inline'
- if opts.get('attach'):
- disposition = 'attachment'
- p['Content-Disposition'] = disposition + '; filename=' + patchname
- msg.attach(p)
- else:
- body += '\n'.join(patch)
- msg = mail.mimetextpatch(body, display=opts.get('test'))
-
- flag = ' '.join(opts.get('flag'))
- if flag:
- flag = ' ' + flag
-
- subj = desc[0].strip().rstrip('. ')
- if total == 1 and not opts.get('intro'):
- subj = '[PATCH%s] %s' % (flag, opts.get('subject') or subj)
- else:
- tlen = len(str(total))
- subj = '[PATCH %0*d of %d%s] %s' % (tlen, idx, total, flag, subj)
- msg['Subject'] = mail.headencode(ui, subj, _charsets, opts.get('test'))
- msg['X-Mercurial-Node'] = node
- return msg, subj
-
-def patchbomb(ui, repo, *revs, **opts):
- '''send changesets by email
-
- By default, diffs are sent in the format generated by hg export,
- one per message. The series starts with a "[PATCH 0 of N]"
- introduction, which describes the series as a whole.
-
- Each patch email has a Subject line of "[PATCH M of N] ...", using
- the first line of the changeset description as the subject text.
- The message contains two or three parts. First, the changeset
- description. Next, (optionally) if the diffstat program is
- installed and -d/--diffstat is used, the result of running
- diffstat on the patch. Finally, the patch itself, as generated by
- "hg export".
-
- By default the patch is included as text in the email body for
- easy reviewing. Using the -a/--attach option will instead create
- an attachment for the patch. With -i/--inline an inline attachment
- will be created.
-
- With -o/--outgoing, emails will be generated for patches not found
- in the destination repository (or only those which are ancestors
- of the specified revisions if any are provided)
-
- With -b/--bundle, changesets are selected as for --outgoing, but a
- single email containing a binary Mercurial bundle as an attachment
- will be sent.
-
- Examples::
-
- hg email -r 3000 # send patch 3000 only
- hg email -r 3000 -r 3001 # send patches 3000 and 3001
- hg email -r 3000:3005 # send patches 3000 through 3005
- hg email 3000 # send patch 3000 (deprecated)
-
- hg email -o # send all patches not in default
- hg email -o DEST # send all patches not in DEST
- hg email -o -r 3000 # send all ancestors of 3000 not in default
- hg email -o -r 3000 DEST # send all ancestors of 3000 not in DEST
-
- hg email -b # send bundle of all patches not in default
- hg email -b DEST # send bundle of all patches not in DEST
- hg email -b -r 3000 # bundle of all ancestors of 3000 not in default
- hg email -b -r 3000 DEST # bundle of all ancestors of 3000 not in DEST
-
- Before using this command, you will need to enable email in your
- hgrc. See the [email] section in hgrc(5) for details.
- '''
-
- _charsets = mail._charsets(ui)
-
- def outgoing(dest, revs):
- '''Return the revisions present locally but not in dest'''
- dest = ui.expandpath(dest or 'default-push', dest or 'default')
- revs = [repo.lookup(rev) for rev in revs]
- other = hg.repository(cmdutil.remoteui(repo, opts), dest)
- ui.status(_('comparing with %s\n') % dest)
- o = repo.findoutgoing(other)
- if not o:
- ui.status(_("no changes found\n"))
- return []
- o = repo.changelog.nodesbetween(o, revs or None)[0]
- return [str(repo.changelog.rev(r)) for r in o]
-
- def getpatches(revs):
- for r in cmdutil.revrange(repo, revs):
- output = cStringIO.StringIO()
- patch.export(repo, [r], fp=output,
- opts=patch.diffopts(ui, opts))
- yield output.getvalue().split('\n')
-
- def getbundle(dest):
- tmpdir = tempfile.mkdtemp(prefix='hg-email-bundle-')
- tmpfn = os.path.join(tmpdir, 'bundle')
- try:
- commands.bundle(ui, repo, tmpfn, dest, **opts)
- return open(tmpfn, 'rb').read()
- finally:
- try:
- os.unlink(tmpfn)
- except:
- pass
- os.rmdir(tmpdir)
-
- if not (opts.get('test') or opts.get('mbox')):
- # really sending
- mail.validateconfig(ui)
-
- if not (revs or opts.get('rev')
- or opts.get('outgoing') or opts.get('bundle')
- or opts.get('patches')):
- raise util.Abort(_('specify at least one changeset with -r or -o'))
-
- if opts.get('outgoing') and opts.get('bundle'):
- raise util.Abort(_("--outgoing mode always on with --bundle;"
- " do not re-specify --outgoing"))
-
- if opts.get('outgoing') or opts.get('bundle'):
- if len(revs) > 1:
- raise util.Abort(_("too many destinations"))
- dest = revs and revs[0] or None
- revs = []
-
- if opts.get('rev'):
- if revs:
- raise util.Abort(_('use only one form to specify the revision'))
- revs = opts.get('rev')
-
- if opts.get('outgoing'):
- revs = outgoing(dest, opts.get('rev'))
- if opts.get('bundle'):
- opts['revs'] = revs
-
- # start
- if opts.get('date'):
- start_time = util.parsedate(opts.get('date'))
- else:
- start_time = util.makedate()
-
- def genmsgid(id):
- return '<%s.%s@%s>' % (id[:20], int(start_time[0]), socket.getfqdn())
-
- def getdescription(body, sender):
- if opts.get('desc'):
- body = open(opts.get('desc')).read()
- else:
- ui.write(_('\nWrite the introductory message for the '
- 'patch series.\n\n'))
- body = ui.edit(body, sender)
- return body
-
- def getpatchmsgs(patches, patchnames=None):
- jumbo = []
- msgs = []
-
- ui.write(_('This patch series consists of %d patches.\n\n')
- % len(patches))
-
- name = None
- for i, p in enumerate(patches):
- jumbo.extend(p)
- if patchnames:
- name = patchnames[i]
- msg = makepatch(ui, repo, p, opts, _charsets, i + 1,
- len(patches), name)
- msgs.append(msg)
-
- if len(patches) > 1 or opts.get('intro'):
- tlen = len(str(len(patches)))
-
- flag = ' '.join(opts.get('flag'))
- if flag:
- subj = '[PATCH %0*d of %d %s] ' % (tlen, 0, len(patches), flag)
- else:
- subj = '[PATCH %0*d of %d] ' % (tlen, 0, len(patches))
- subj += opts.get('subject') or prompt(ui, 'Subject:', rest=subj,
- default='None')
-
- body = ''
- if opts.get('diffstat'):
- d = cdiffstat(ui, _('Final summary:\n'), jumbo)
- if d:
- body = '\n' + d
-
- body = getdescription(body, sender)
- msg = mail.mimeencode(ui, body, _charsets, opts.get('test'))
- msg['Subject'] = mail.headencode(ui, subj, _charsets,
- opts.get('test'))
-
- msgs.insert(0, (msg, subj))
- return msgs
-
- def getbundlemsgs(bundle):
- subj = (opts.get('subject')
- or prompt(ui, 'Subject:', 'A bundle for your repository'))
-
- body = getdescription('', sender)
- msg = email.MIMEMultipart.MIMEMultipart()
- if body:
- msg.attach(mail.mimeencode(ui, body, _charsets, opts.get('test')))
- datapart = email.MIMEBase.MIMEBase('application', 'x-mercurial-bundle')
- datapart.set_payload(bundle)
- bundlename = '%s.hg' % opts.get('bundlename', 'bundle')
- datapart.add_header('Content-Disposition', 'attachment',
- filename=bundlename)
- email.Encoders.encode_base64(datapart)
- msg.attach(datapart)
- msg['Subject'] = mail.headencode(ui, subj, _charsets, opts.get('test'))
- return [(msg, subj)]
-
- sender = (opts.get('from') or ui.config('email', 'from') or
- ui.config('patchbomb', 'from') or
- prompt(ui, 'From', ui.username()))
-
- # internal option used by pbranches
- patches = opts.get('patches')
- if patches:
- msgs = getpatchmsgs(patches, opts.get('patchnames'))
- elif opts.get('bundle'):
- msgs = getbundlemsgs(getbundle(dest))
- else:
- msgs = getpatchmsgs(list(getpatches(revs)))
-
- def getaddrs(opt, prpt, default = None):
- addrs = opts.get(opt) or (ui.config('email', opt) or
- ui.config('patchbomb', opt) or
- prompt(ui, prpt, default)).split(',')
- return [mail.addressencode(ui, a.strip(), _charsets, opts.get('test'))
- for a in addrs if a.strip()]
-
- to = getaddrs('to', 'To')
- cc = getaddrs('cc', 'Cc', '')
-
- bcc = opts.get('bcc') or (ui.config('email', 'bcc') or
- ui.config('patchbomb', 'bcc') or '').split(',')
- bcc = [mail.addressencode(ui, a.strip(), _charsets, opts.get('test'))
- for a in bcc if a.strip()]
-
- ui.write('\n')
-
- parent = opts.get('in_reply_to') or None
- # angle brackets may be omitted, they're not semantically part of the msg-id
- if parent is not None:
- if not parent.startswith('<'):
- parent = '<' + parent
- if not parent.endswith('>'):
- parent += '>'
-
- first = True
-
- sender_addr = email.Utils.parseaddr(sender)[1]
- sender = mail.addressencode(ui, sender, _charsets, opts.get('test'))
- sendmail = None
- for m, subj in msgs:
- try:
- m['Message-Id'] = genmsgid(m['X-Mercurial-Node'])
- except TypeError:
- m['Message-Id'] = genmsgid('patchbomb')
- if parent:
- m['In-Reply-To'] = parent
- m['References'] = parent
- if first:
- parent = m['Message-Id']
- first = False
-
- m['User-Agent'] = 'Mercurial-patchbomb/%s' % util.version()
- m['Date'] = email.Utils.formatdate(start_time[0], localtime=True)
-
- start_time = (start_time[0] + 1, start_time[1])
- m['From'] = sender
- m['To'] = ', '.join(to)
- if cc:
- m['Cc'] = ', '.join(cc)
- if bcc:
- m['Bcc'] = ', '.join(bcc)
- if opts.get('test'):
- ui.status(_('Displaying '), subj, ' ...\n')
- ui.flush()
- if 'PAGER' in os.environ:
- fp = util.popen(os.environ['PAGER'], 'w')
- else:
- fp = ui
- generator = email.Generator.Generator(fp, mangle_from_=False)
- try:
- generator.flatten(m, 0)
- fp.write('\n')
- except IOError, inst:
- if inst.errno != errno.EPIPE:
- raise
- if fp is not ui:
- fp.close()
- elif opts.get('mbox'):
- ui.status(_('Writing '), subj, ' ...\n')
- fp = open(opts.get('mbox'), 'In-Reply-To' in m and 'ab+' or 'wb+')
- generator = email.Generator.Generator(fp, mangle_from_=True)
- date = time.ctime(start_time[0])
- fp.write('From %s %s\n' % (sender_addr, date))
- generator.flatten(m, 0)
- fp.write('\n\n')
- fp.close()
- else:
- if not sendmail:
- sendmail = mail.connect(ui)
- ui.status(_('Sending '), subj, ' ...\n')
- # Exim does not remove the Bcc field
- del m['Bcc']
- fp = cStringIO.StringIO()
- generator = email.Generator.Generator(fp, mangle_from_=False)
- generator.flatten(m, 0)
- sendmail(sender, to + bcc + cc, fp.getvalue())
-
-emailopts = [
- ('a', 'attach', None, _('send patches as attachments')),
- ('i', 'inline', None, _('send patches as inline attachments')),
- ('', 'bcc', [], _('email addresses of blind carbon copy recipients')),
- ('c', 'cc', [], _('email addresses of copy recipients')),
- ('d', 'diffstat', None, _('add diffstat output to messages')),
- ('', 'date', '', _('use the given date as the sending date')),
- ('', 'desc', '', _('use the given file as the series description')),
- ('f', 'from', '', _('email address of sender')),
- ('n', 'test', None, _('print messages that would be sent')),
- ('m', 'mbox', '',
- _('write messages to mbox file instead of sending them')),
- ('s', 'subject', '',
- _('subject of first message (intro or single patch)')),
- ('', 'in-reply-to', '',
- _('message identifier to reply to')),
- ('', 'flag', [], _('flags to add in subject prefixes')),
- ('t', 'to', [], _('email addresses of recipients')),
- ]
-
-
-cmdtable = {
- "email":
- (patchbomb,
- [('g', 'git', None, _('use git extended diff format')),
- ('', 'plain', None, _('omit hg patch header')),
- ('o', 'outgoing', None,
- _('send changes not found in the target repository')),
- ('b', 'bundle', None,
- _('send changes not in target as a binary bundle')),
- ('', 'bundlename', 'bundle',
- _('name of the bundle attachment file')),
- ('r', 'rev', [], _('a revision to send')),
- ('', 'force', None,
- _('run even when remote repository is unrelated '
- '(with -b/--bundle)')),
- ('', 'base', [],
- _('a base changeset to specify instead of a destination '
- '(with -b/--bundle)')),
- ('', 'intro', None,
- _('send an introduction email for a single patch')),
- ] + emailopts + commands.remoteopts,
- _('hg email [OPTION]... [DEST]...'))
-}
diff --git a/sys/lib/python/hgext/purge.py b/sys/lib/python/hgext/purge.py
deleted file mode 100644
index 3946ad0f5..000000000
--- a/sys/lib/python/hgext/purge.py
+++ /dev/null
@@ -1,111 +0,0 @@
-# Copyright (C) 2006 - Marco Barisione <marco@barisione.org>
-#
-# This is a small extension for Mercurial (http://mercurial.selenic.com/)
-# that removes files not known to mercurial
-#
-# This program was inspired by the "cvspurge" script contained in CVS
-# utilities (http://www.red-bean.com/cvsutils/).
-#
-# For help on the usage of "hg purge" use:
-# hg help purge
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
-
-'''command to delete untracked files from the working directory'''
-
-from mercurial import util, commands, cmdutil
-from mercurial.i18n import _
-import os, stat
-
-def purge(ui, repo, *dirs, **opts):
- '''removes files not tracked by Mercurial
-
- Delete files not known to Mercurial. This is useful to test local
- and uncommitted changes in an otherwise-clean source tree.
-
- This means that purge will delete:
-
- - Unknown files: files marked with "?" by "hg status"
- - Empty directories: in fact Mercurial ignores directories unless
- they contain files under source control management
-
- But it will leave untouched:
-
- - Modified and unmodified tracked files
- - Ignored files (unless --all is specified)
- - New files added to the repository (with "hg add")
-
- If directories are given on the command line, only files in these
- directories are considered.
-
- Be careful with purge, as you could irreversibly delete some files
- you forgot to add to the repository. If you only want to print the
- list of files that this program would delete, use the --print
- option.
- '''
- act = not opts['print']
- eol = '\n'
- if opts['print0']:
- eol = '\0'
- act = False # --print0 implies --print
-
- def remove(remove_func, name):
- if act:
- try:
- remove_func(repo.wjoin(name))
- except OSError:
- m = _('%s cannot be removed') % name
- if opts['abort_on_err']:
- raise util.Abort(m)
- ui.warn(_('warning: %s\n') % m)
- else:
- ui.write('%s%s' % (name, eol))
-
- def removefile(path):
- try:
- os.remove(path)
- except OSError:
- # read-only files cannot be unlinked under Windows
- s = os.stat(path)
- if (s.st_mode & stat.S_IWRITE) != 0:
- raise
- os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
- os.remove(path)
-
- directories = []
- match = cmdutil.match(repo, dirs, opts)
- match.dir = directories.append
- status = repo.status(match=match, ignored=opts['all'], unknown=True)
-
- for f in sorted(status[4] + status[5]):
- ui.note(_('Removing file %s\n') % f)
- remove(removefile, f)
-
- for f in sorted(directories, reverse=True):
- if match(f) and not os.listdir(repo.wjoin(f)):
- ui.note(_('Removing directory %s\n') % f)
- remove(os.rmdir, f)
-
-cmdtable = {
- 'purge|clean':
- (purge,
- [('a', 'abort-on-err', None, _('abort if an error occurs')),
- ('', 'all', None, _('purge ignored files too')),
- ('p', 'print', None, _('print filenames instead of deleting them')),
- ('0', 'print0', None, _('end filenames with NUL, for use with xargs'
- ' (implies -p/--print)')),
- ] + commands.walkopts,
- _('hg purge [OPTION]... [DIR]...'))
-}
diff --git a/sys/lib/python/hgext/rebase.py b/sys/lib/python/hgext/rebase.py
deleted file mode 100644
index a1d030087..000000000
--- a/sys/lib/python/hgext/rebase.py
+++ /dev/null
@@ -1,471 +0,0 @@
-# rebase.py - rebasing feature for mercurial
-#
-# Copyright 2008 Stefano Tortarolo <stefano.tortarolo at gmail dot com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-'''command to move sets of revisions to a different ancestor
-
-This extension lets you rebase changesets in an existing Mercurial
-repository.
-
-For more information:
-http://mercurial.selenic.com/wiki/RebaseExtension
-'''
-
-from mercurial import util, repair, merge, cmdutil, commands, error
-from mercurial import extensions, ancestor, copies, patch
-from mercurial.commands import templateopts
-from mercurial.node import nullrev
-from mercurial.lock import release
-from mercurial.i18n import _
-import os, errno
-
-def rebasemerge(repo, rev, first=False):
- 'return the correct ancestor'
- oldancestor = ancestor.ancestor
-
- def newancestor(a, b, pfunc):
- ancestor.ancestor = oldancestor
- if b == rev:
- return repo[rev].parents()[0].rev()
- return ancestor.ancestor(a, b, pfunc)
-
- if not first:
- ancestor.ancestor = newancestor
- else:
- repo.ui.debug(_("first revision, do not change ancestor\n"))
- stats = merge.update(repo, rev, True, True, False)
- return stats
-
-def rebase(ui, repo, **opts):
- """move changeset (and descendants) to a different branch
-
- Rebase uses repeated merging to graft changesets from one part of
- history onto another. This can be useful for linearizing local
- changes relative to a master development tree.
-
- If a rebase is interrupted to manually resolve a merge, it can be
- continued with --continue/-c or aborted with --abort/-a.
- """
- originalwd = target = None
- external = nullrev
- state = {}
- skipped = set()
-
- lock = wlock = None
- try:
- lock = repo.lock()
- wlock = repo.wlock()
-
- # Validate input and define rebasing points
- destf = opts.get('dest', None)
- srcf = opts.get('source', None)
- basef = opts.get('base', None)
- contf = opts.get('continue')
- abortf = opts.get('abort')
- collapsef = opts.get('collapse', False)
- extrafn = opts.get('extrafn')
- keepf = opts.get('keep', False)
- keepbranchesf = opts.get('keepbranches', False)
-
- if contf or abortf:
- if contf and abortf:
- raise error.ParseError('rebase',
- _('cannot use both abort and continue'))
- if collapsef:
- raise error.ParseError(
- 'rebase', _('cannot use collapse with continue or abort'))
-
- if srcf or basef or destf:
- raise error.ParseError('rebase',
- _('abort and continue do not allow specifying revisions'))
-
- (originalwd, target, state, collapsef, keepf,
- keepbranchesf, external) = restorestatus(repo)
- if abortf:
- abort(repo, originalwd, target, state)
- return
- else:
- if srcf and basef:
- raise error.ParseError('rebase', _('cannot specify both a '
- 'revision and a base'))
- cmdutil.bail_if_changed(repo)
- result = buildstate(repo, destf, srcf, basef, collapsef)
- if result:
- originalwd, target, state, external = result
- else: # Empty state built, nothing to rebase
- ui.status(_('nothing to rebase\n'))
- return
-
- if keepbranchesf:
- if extrafn:
- raise error.ParseError(
- 'rebase', _('cannot use both keepbranches and extrafn'))
- def extrafn(ctx, extra):
- extra['branch'] = ctx.branch()
-
- # Rebase
- targetancestors = list(repo.changelog.ancestors(target))
- targetancestors.append(target)
-
- for rev in sorted(state):
- if state[rev] == -1:
- storestatus(repo, originalwd, target, state, collapsef, keepf,
- keepbranchesf, external)
- rebasenode(repo, rev, target, state, skipped, targetancestors,
- collapsef, extrafn)
- ui.note(_('rebase merging completed\n'))
-
- if collapsef:
- p1, p2 = defineparents(repo, min(state), target,
- state, targetancestors)
- concludenode(repo, rev, p1, external, state, collapsef,
- last=True, skipped=skipped, extrafn=extrafn)
-
- if 'qtip' in repo.tags():
- updatemq(repo, state, skipped, **opts)
-
- if not keepf:
- # Remove no more useful revisions
- if set(repo.changelog.descendants(min(state))) - set(state):
- ui.warn(_("warning: new changesets detected on source branch, "
- "not stripping\n"))
- else:
- repair.strip(ui, repo, repo[min(state)].node(), "strip")
-
- clearstatus(repo)
- ui.status(_("rebase completed\n"))
- if os.path.exists(repo.sjoin('undo')):
- util.unlink(repo.sjoin('undo'))
- if skipped:
- ui.note(_("%d revisions have been skipped\n") % len(skipped))
- finally:
- release(lock, wlock)
-
-def concludenode(repo, rev, p1, p2, state, collapse, last=False, skipped=None,
- extrafn=None):
- """Skip commit if collapsing has been required and rev is not the last
- revision, commit otherwise
- """
- repo.ui.debug(_(" set parents\n"))
- if collapse and not last:
- repo.dirstate.setparents(repo[p1].node())
- return None
-
- repo.dirstate.setparents(repo[p1].node(), repo[p2].node())
-
- if skipped is None:
- skipped = set()
-
- # Commit, record the old nodeid
- newrev = nullrev
- try:
- if last:
- # we don't translate commit messages
- commitmsg = 'Collapsed revision'
- for rebased in state:
- if rebased not in skipped:
- commitmsg += '\n* %s' % repo[rebased].description()
- commitmsg = repo.ui.edit(commitmsg, repo.ui.username())
- else:
- commitmsg = repo[rev].description()
- # Commit might fail if unresolved files exist
- extra = {'rebase_source': repo[rev].hex()}
- if extrafn:
- extrafn(repo[rev], extra)
- newrev = repo.commit(text=commitmsg, user=repo[rev].user(),
- date=repo[rev].date(), extra=extra)
- repo.dirstate.setbranch(repo[newrev].branch())
- return newrev
- except util.Abort:
- # Invalidate the previous setparents
- repo.dirstate.invalidate()
- raise
-
-def rebasenode(repo, rev, target, state, skipped, targetancestors, collapse,
- extrafn):
- 'Rebase a single revision'
- repo.ui.debug(_("rebasing %d:%s\n") % (rev, repo[rev]))
-
- p1, p2 = defineparents(repo, rev, target, state, targetancestors)
-
- repo.ui.debug(_(" future parents are %d and %d\n") % (repo[p1].rev(),
- repo[p2].rev()))
-
- # Merge phase
- if len(repo.parents()) != 2:
- # Update to target and merge it with local
- if repo['.'].rev() != repo[p1].rev():
- repo.ui.debug(_(" update to %d:%s\n") % (repo[p1].rev(), repo[p1]))
- merge.update(repo, p1, False, True, False)
- else:
- repo.ui.debug(_(" already in target\n"))
- repo.dirstate.write()
- repo.ui.debug(_(" merge against %d:%s\n") % (repo[rev].rev(), repo[rev]))
- first = repo[rev].rev() == repo[min(state)].rev()
- stats = rebasemerge(repo, rev, first)
-
- if stats[3] > 0:
- raise util.Abort(_('fix unresolved conflicts with hg resolve then '
- 'run hg rebase --continue'))
- else: # we have an interrupted rebase
- repo.ui.debug(_('resuming interrupted rebase\n'))
-
- # Keep track of renamed files in the revision that is going to be rebased
- # Here we simulate the copies and renames in the source changeset
- cop, diver = copies.copies(repo, repo[rev], repo[target], repo[p2], True)
- m1 = repo[rev].manifest()
- m2 = repo[target].manifest()
- for k, v in cop.iteritems():
- if k in m1:
- if v in m1 or v in m2:
- repo.dirstate.copy(v, k)
- if v in m2 and v not in m1:
- repo.dirstate.remove(v)
-
- newrev = concludenode(repo, rev, p1, p2, state, collapse,
- extrafn=extrafn)
-
- # Update the state
- if newrev is not None:
- state[rev] = repo[newrev].rev()
- else:
- if not collapse:
- repo.ui.note(_('no changes, revision %d skipped\n') % rev)
- repo.ui.debug(_('next revision set to %s\n') % p1)
- skipped.add(rev)
- state[rev] = p1
-
-def defineparents(repo, rev, target, state, targetancestors):
- 'Return the new parent relationship of the revision that will be rebased'
- parents = repo[rev].parents()
- p1 = p2 = nullrev
-
- P1n = parents[0].rev()
- if P1n in targetancestors:
- p1 = target
- elif P1n in state:
- p1 = state[P1n]
- else: # P1n external
- p1 = target
- p2 = P1n
-
- if len(parents) == 2 and parents[1].rev() not in targetancestors:
- P2n = parents[1].rev()
- # interesting second parent
- if P2n in state:
- if p1 == target: # P1n in targetancestors or external
- p1 = state[P2n]
- else:
- p2 = state[P2n]
- else: # P2n external
- if p2 != nullrev: # P1n external too => rev is a merged revision
- raise util.Abort(_('cannot use revision %d as base, result '
- 'would have 3 parents') % rev)
- p2 = P2n
- return p1, p2
-
-def isagitpatch(repo, patchname):
- 'Return true if the given patch is in git format'
- mqpatch = os.path.join(repo.mq.path, patchname)
- for line in patch.linereader(file(mqpatch, 'rb')):
- if line.startswith('diff --git'):
- return True
- return False
-
-def updatemq(repo, state, skipped, **opts):
- 'Update rebased mq patches - finalize and then import them'
- mqrebase = {}
- for p in repo.mq.applied:
- if repo[p.rev].rev() in state:
- repo.ui.debug(_('revision %d is an mq patch (%s), finalize it.\n') %
- (repo[p.rev].rev(), p.name))
- mqrebase[repo[p.rev].rev()] = (p.name, isagitpatch(repo, p.name))
-
- if mqrebase:
- repo.mq.finish(repo, mqrebase.keys())
-
- # We must start import from the newest revision
- for rev in sorted(mqrebase, reverse=True):
- if rev not in skipped:
- repo.ui.debug(_('import mq patch %d (%s)\n')
- % (state[rev], mqrebase[rev][0]))
- repo.mq.qimport(repo, (), patchname=mqrebase[rev][0],
- git=mqrebase[rev][1],rev=[str(state[rev])])
- repo.mq.save_dirty()
-
-def storestatus(repo, originalwd, target, state, collapse, keep, keepbranches,
- external):
- 'Store the current status to allow recovery'
- f = repo.opener("rebasestate", "w")
- f.write(repo[originalwd].hex() + '\n')
- f.write(repo[target].hex() + '\n')
- f.write(repo[external].hex() + '\n')
- f.write('%d\n' % int(collapse))
- f.write('%d\n' % int(keep))
- f.write('%d\n' % int(keepbranches))
- for d, v in state.iteritems():
- oldrev = repo[d].hex()
- newrev = repo[v].hex()
- f.write("%s:%s\n" % (oldrev, newrev))
- f.close()
- repo.ui.debug(_('rebase status stored\n'))
-
-def clearstatus(repo):
- 'Remove the status files'
- if os.path.exists(repo.join("rebasestate")):
- util.unlink(repo.join("rebasestate"))
-
-def restorestatus(repo):
- 'Restore a previously stored status'
- try:
- target = None
- collapse = False
- external = nullrev
- state = {}
- f = repo.opener("rebasestate")
- for i, l in enumerate(f.read().splitlines()):
- if i == 0:
- originalwd = repo[l].rev()
- elif i == 1:
- target = repo[l].rev()
- elif i == 2:
- external = repo[l].rev()
- elif i == 3:
- collapse = bool(int(l))
- elif i == 4:
- keep = bool(int(l))
- elif i == 5:
- keepbranches = bool(int(l))
- else:
- oldrev, newrev = l.split(':')
- state[repo[oldrev].rev()] = repo[newrev].rev()
- repo.ui.debug(_('rebase status resumed\n'))
- return originalwd, target, state, collapse, keep, keepbranches, external
- except IOError, err:
- if err.errno != errno.ENOENT:
- raise
- raise util.Abort(_('no rebase in progress'))
-
-def abort(repo, originalwd, target, state):
- 'Restore the repository to its original state'
- if set(repo.changelog.descendants(target)) - set(state.values()):
- repo.ui.warn(_("warning: new changesets detected on target branch, "
- "not stripping\n"))
- else:
- # Strip from the first rebased revision
- merge.update(repo, repo[originalwd].rev(), False, True, False)
- rebased = filter(lambda x: x > -1, state.values())
- if rebased:
- strippoint = min(rebased)
- repair.strip(repo.ui, repo, repo[strippoint].node(), "strip")
- clearstatus(repo)
- repo.ui.status(_('rebase aborted\n'))
-
-def buildstate(repo, dest, src, base, collapse):
- 'Define which revisions are going to be rebased and where'
- targetancestors = set()
-
- if not dest:
- # Destination defaults to the latest revision in the current branch
- branch = repo[None].branch()
- dest = repo[branch].rev()
- else:
- if 'qtip' in repo.tags() and (repo[dest].hex() in
- [s.rev for s in repo.mq.applied]):
- raise util.Abort(_('cannot rebase onto an applied mq patch'))
- dest = repo[dest].rev()
-
- if src:
- commonbase = repo[src].ancestor(repo[dest])
- if commonbase == repo[src]:
- raise util.Abort(_('cannot rebase an ancestor'))
- if commonbase == repo[dest]:
- raise util.Abort(_('cannot rebase a descendant'))
- source = repo[src].rev()
- else:
- if base:
- cwd = repo[base].rev()
- else:
- cwd = repo['.'].rev()
-
- if cwd == dest:
- repo.ui.debug(_('already working on current\n'))
- return None
-
- targetancestors = set(repo.changelog.ancestors(dest))
- if cwd in targetancestors:
- repo.ui.debug(_('already working on the current branch\n'))
- return None
-
- cwdancestors = set(repo.changelog.ancestors(cwd))
- cwdancestors.add(cwd)
- rebasingbranch = cwdancestors - targetancestors
- source = min(rebasingbranch)
-
- repo.ui.debug(_('rebase onto %d starting from %d\n') % (dest, source))
- state = dict.fromkeys(repo.changelog.descendants(source), nullrev)
- external = nullrev
- if collapse:
- if not targetancestors:
- targetancestors = set(repo.changelog.ancestors(dest))
- for rev in state:
- # Check externals and fail if there are more than one
- for p in repo[rev].parents():
- if (p.rev() not in state and p.rev() != source
- and p.rev() not in targetancestors):
- if external != nullrev:
- raise util.Abort(_('unable to collapse, there is more '
- 'than one external parent'))
- external = p.rev()
-
- state[source] = nullrev
- return repo['.'].rev(), repo[dest].rev(), state, external
-
-def pullrebase(orig, ui, repo, *args, **opts):
- 'Call rebase after pull if the latter has been invoked with --rebase'
- if opts.get('rebase'):
- if opts.get('update'):
- del opts['update']
- ui.debug(_('--update and --rebase are not compatible, ignoring '
- 'the update flag\n'))
-
- cmdutil.bail_if_changed(repo)
- revsprepull = len(repo)
- orig(ui, repo, *args, **opts)
- revspostpull = len(repo)
- if revspostpull > revsprepull:
- rebase(ui, repo, **opts)
- branch = repo[None].branch()
- dest = repo[branch].rev()
- if dest != repo['.'].rev():
- # there was nothing to rebase we force an update
- merge.update(repo, dest, False, False, False)
- else:
- orig(ui, repo, *args, **opts)
-
-def uisetup(ui):
- 'Replace pull with a decorator to provide --rebase option'
- entry = extensions.wrapcommand(commands.table, 'pull', pullrebase)
- entry[1].append(('', 'rebase', None,
- _("rebase working directory to branch head"))
-)
-
-cmdtable = {
-"rebase":
- (rebase,
- [
- ('s', 'source', '', _('rebase from a given revision')),
- ('b', 'base', '', _('rebase from the base of a given revision')),
- ('d', 'dest', '', _('rebase onto a given revision')),
- ('', 'collapse', False, _('collapse the rebased revisions')),
- ('', 'keep', False, _('keep original revisions')),
- ('', 'keepbranches', False, _('keep original branches')),
- ('c', 'continue', False, _('continue an interrupted rebase')),
- ('a', 'abort', False, _('abort an interrupted rebase')),] +
- templateopts,
- _('hg rebase [-s REV | -b REV] [-d REV] [--collapse] [--keep] '
- '[--keepbranches] | [-c] | [-a]')),
-}
diff --git a/sys/lib/python/hgext/record.py b/sys/lib/python/hgext/record.py
deleted file mode 100644
index 71a4f13c7..000000000
--- a/sys/lib/python/hgext/record.py
+++ /dev/null
@@ -1,551 +0,0 @@
-# record.py
-#
-# Copyright 2007 Bryan O'Sullivan <bos@serpentine.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-'''commands to interactively select changes for commit/qrefresh'''
-
-from mercurial.i18n import gettext, _
-from mercurial import cmdutil, commands, extensions, hg, mdiff, patch
-from mercurial import util
-import copy, cStringIO, errno, operator, os, re, tempfile
-
-lines_re = re.compile(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
-
-def scanpatch(fp):
- """like patch.iterhunks, but yield different events
-
- - ('file', [header_lines + fromfile + tofile])
- - ('context', [context_lines])
- - ('hunk', [hunk_lines])
- - ('range', (-start,len, +start,len, diffp))
- """
- lr = patch.linereader(fp)
-
- def scanwhile(first, p):
- """scan lr while predicate holds"""
- lines = [first]
- while True:
- line = lr.readline()
- if not line:
- break
- if p(line):
- lines.append(line)
- else:
- lr.push(line)
- break
- return lines
-
- while True:
- line = lr.readline()
- if not line:
- break
- if line.startswith('diff --git a/'):
- def notheader(line):
- s = line.split(None, 1)
- return not s or s[0] not in ('---', 'diff')
- header = scanwhile(line, notheader)
- fromfile = lr.readline()
- if fromfile.startswith('---'):
- tofile = lr.readline()
- header += [fromfile, tofile]
- else:
- lr.push(fromfile)
- yield 'file', header
- elif line[0] == ' ':
- yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
- elif line[0] in '-+':
- yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
- else:
- m = lines_re.match(line)
- if m:
- yield 'range', m.groups()
- else:
- raise patch.PatchError('unknown patch content: %r' % line)
-
-class header(object):
- """patch header
-
- XXX shoudn't we move this to mercurial/patch.py ?
- """
- diff_re = re.compile('diff --git a/(.*) b/(.*)$')
- allhunks_re = re.compile('(?:index|new file|deleted file) ')
- pretty_re = re.compile('(?:new file|deleted file) ')
- special_re = re.compile('(?:index|new|deleted|copy|rename) ')
-
- def __init__(self, header):
- self.header = header
- self.hunks = []
-
- def binary(self):
- for h in self.header:
- if h.startswith('index '):
- return True
-
- def pretty(self, fp):
- for h in self.header:
- if h.startswith('index '):
- fp.write(_('this modifies a binary file (all or nothing)\n'))
- break
- if self.pretty_re.match(h):
- fp.write(h)
- if self.binary():
- fp.write(_('this is a binary file\n'))
- break
- if h.startswith('---'):
- fp.write(_('%d hunks, %d lines changed\n') %
- (len(self.hunks),
- sum([h.added + h.removed for h in self.hunks])))
- break
- fp.write(h)
-
- def write(self, fp):
- fp.write(''.join(self.header))
-
- def allhunks(self):
- for h in self.header:
- if self.allhunks_re.match(h):
- return True
-
- def files(self):
- fromfile, tofile = self.diff_re.match(self.header[0]).groups()
- if fromfile == tofile:
- return [fromfile]
- return [fromfile, tofile]
-
- def filename(self):
- return self.files()[-1]
-
- def __repr__(self):
- return '<header %s>' % (' '.join(map(repr, self.files())))
-
- def special(self):
- for h in self.header:
- if self.special_re.match(h):
- return True
-
-def countchanges(hunk):
- """hunk -> (n+,n-)"""
- add = len([h for h in hunk if h[0] == '+'])
- rem = len([h for h in hunk if h[0] == '-'])
- return add, rem
-
-class hunk(object):
- """patch hunk
-
- XXX shouldn't we merge this with patch.hunk ?
- """
- maxcontext = 3
-
- def __init__(self, header, fromline, toline, proc, before, hunk, after):
- def trimcontext(number, lines):
- delta = len(lines) - self.maxcontext
- if False and delta > 0:
- return number + delta, lines[:self.maxcontext]
- return number, lines
-
- self.header = header
- self.fromline, self.before = trimcontext(fromline, before)
- self.toline, self.after = trimcontext(toline, after)
- self.proc = proc
- self.hunk = hunk
- self.added, self.removed = countchanges(self.hunk)
-
- def write(self, fp):
- delta = len(self.before) + len(self.after)
- if self.after and self.after[-1] == '\\ No newline at end of file\n':
- delta -= 1
- fromlen = delta + self.removed
- tolen = delta + self.added
- fp.write('@@ -%d,%d +%d,%d @@%s\n' %
- (self.fromline, fromlen, self.toline, tolen,
- self.proc and (' ' + self.proc)))
- fp.write(''.join(self.before + self.hunk + self.after))
-
- pretty = write
-
- def filename(self):
- return self.header.filename()
-
- def __repr__(self):
- return '<hunk %r@%d>' % (self.filename(), self.fromline)
-
-def parsepatch(fp):
- """patch -> [] of hunks """
- class parser(object):
- """patch parsing state machine"""
- def __init__(self):
- self.fromline = 0
- self.toline = 0
- self.proc = ''
- self.header = None
- self.context = []
- self.before = []
- self.hunk = []
- self.stream = []
-
- def addrange(self, (fromstart, fromend, tostart, toend, proc)):
- self.fromline = int(fromstart)
- self.toline = int(tostart)
- self.proc = proc
-
- def addcontext(self, context):
- if self.hunk:
- h = hunk(self.header, self.fromline, self.toline, self.proc,
- self.before, self.hunk, context)
- self.header.hunks.append(h)
- self.stream.append(h)
- self.fromline += len(self.before) + h.removed
- self.toline += len(self.before) + h.added
- self.before = []
- self.hunk = []
- self.proc = ''
- self.context = context
-
- def addhunk(self, hunk):
- if self.context:
- self.before = self.context
- self.context = []
- self.hunk = hunk
-
- def newfile(self, hdr):
- self.addcontext([])
- h = header(hdr)
- self.stream.append(h)
- self.header = h
-
- def finished(self):
- self.addcontext([])
- return self.stream
-
- transitions = {
- 'file': {'context': addcontext,
- 'file': newfile,
- 'hunk': addhunk,
- 'range': addrange},
- 'context': {'file': newfile,
- 'hunk': addhunk,
- 'range': addrange},
- 'hunk': {'context': addcontext,
- 'file': newfile,
- 'range': addrange},
- 'range': {'context': addcontext,
- 'hunk': addhunk},
- }
-
- p = parser()
-
- state = 'context'
- for newstate, data in scanpatch(fp):
- try:
- p.transitions[state][newstate](p, data)
- except KeyError:
- raise patch.PatchError('unhandled transition: %s -> %s' %
- (state, newstate))
- state = newstate
- return p.finished()
-
-def filterpatch(ui, chunks):
- """Interactively filter patch chunks into applied-only chunks"""
- chunks = list(chunks)
- chunks.reverse()
- seen = set()
- def consumefile():
- """fetch next portion from chunks until a 'header' is seen
- NB: header == new-file mark
- """
- consumed = []
- while chunks:
- if isinstance(chunks[-1], header):
- break
- else:
- consumed.append(chunks.pop())
- return consumed
-
- resp_all = [None] # this two are changed from inside prompt,
- resp_file = [None] # so can't be usual variables
- applied = {} # 'filename' -> [] of chunks
- def prompt(query):
- """prompt query, and process base inputs
-
- - y/n for the rest of file
- - y/n for the rest
- - ? (help)
- - q (quit)
-
- else, input is returned to the caller.
- """
- if resp_all[0] is not None:
- return resp_all[0]
- if resp_file[0] is not None:
- return resp_file[0]
- while True:
- resps = _('[Ynsfdaq?]')
- choices = (_('&Yes, record this change'),
- _('&No, skip this change'),
- _('&Skip remaining changes to this file'),
- _('Record remaining changes to this &file'),
- _('&Done, skip remaining changes and files'),
- _('Record &all changes to all remaining files'),
- _('&Quit, recording no changes'),
- _('&?'))
- r = ui.promptchoice("%s %s " % (query, resps), choices)
- if r == 7: # ?
- doc = gettext(record.__doc__)
- c = doc.find(_('y - record this change'))
- for l in doc[c:].splitlines():
- if l: ui.write(l.strip(), '\n')
- continue
- elif r == 0: # yes
- ret = 'y'
- elif r == 1: # no
- ret = 'n'
- elif r == 2: # Skip
- ret = resp_file[0] = 'n'
- elif r == 3: # file (Record remaining)
- ret = resp_file[0] = 'y'
- elif r == 4: # done, skip remaining
- ret = resp_all[0] = 'n'
- elif r == 5: # all
- ret = resp_all[0] = 'y'
- elif r == 6: # quit
- raise util.Abort(_('user quit'))
- return ret
- pos, total = 0, len(chunks) - 1
- while chunks:
- chunk = chunks.pop()
- if isinstance(chunk, header):
- # new-file mark
- resp_file = [None]
- fixoffset = 0
- hdr = ''.join(chunk.header)
- if hdr in seen:
- consumefile()
- continue
- seen.add(hdr)
- if resp_all[0] is None:
- chunk.pretty(ui)
- r = prompt(_('examine changes to %s?') %
- _(' and ').join(map(repr, chunk.files())))
- if r == _('y'):
- applied[chunk.filename()] = [chunk]
- if chunk.allhunks():
- applied[chunk.filename()] += consumefile()
- else:
- consumefile()
- else:
- # new hunk
- if resp_file[0] is None and resp_all[0] is None:
- chunk.pretty(ui)
- r = total == 1 and prompt(_('record this change to %r?') %
- chunk.filename()) \
- or prompt(_('record change %d/%d to %r?') %
- (pos, total, chunk.filename()))
- if r == _('y'):
- if fixoffset:
- chunk = copy.copy(chunk)
- chunk.toline += fixoffset
- applied[chunk.filename()].append(chunk)
- else:
- fixoffset += chunk.removed - chunk.added
- pos = pos + 1
- return reduce(operator.add, [h for h in applied.itervalues()
- if h[0].special() or len(h) > 1], [])
-
-def record(ui, repo, *pats, **opts):
- '''interactively select changes to commit
-
- If a list of files is omitted, all changes reported by "hg status"
- will be candidates for recording.
-
- See 'hg help dates' for a list of formats valid for -d/--date.
-
- You will be prompted for whether to record changes to each
- modified file, and for files with multiple changes, for each
- change to use. For each query, the following responses are
- possible::
-
- y - record this change
- n - skip this change
-
- s - skip remaining changes to this file
- f - record remaining changes to this file
-
- d - done, skip remaining changes and files
- a - record all changes to all remaining files
- q - quit, recording no changes
-
- ? - display help'''
-
- def record_committer(ui, repo, pats, opts):
- commands.commit(ui, repo, *pats, **opts)
-
- dorecord(ui, repo, record_committer, *pats, **opts)
-
-
-def qrecord(ui, repo, patch, *pats, **opts):
- '''interactively record a new patch
-
- See 'hg help qnew' & 'hg help record' for more information and
- usage.
- '''
-
- try:
- mq = extensions.find('mq')
- except KeyError:
- raise util.Abort(_("'mq' extension not loaded"))
-
- def qrecord_committer(ui, repo, pats, opts):
- mq.new(ui, repo, patch, *pats, **opts)
-
- opts = opts.copy()
- opts['force'] = True # always 'qnew -f'
- dorecord(ui, repo, qrecord_committer, *pats, **opts)
-
-
-def dorecord(ui, repo, committer, *pats, **opts):
- if not ui.interactive():
- raise util.Abort(_('running non-interactively, use commit instead'))
-
- def recordfunc(ui, repo, message, match, opts):
- """This is generic record driver.
-
- Its job is to interactively filter local changes, and accordingly
- prepare working dir into a state, where the job can be delegated to
- non-interactive commit command such as 'commit' or 'qrefresh'.
-
- After the actual job is done by non-interactive command, working dir
- state is restored to original.
-
- In the end we'll record intresting changes, and everything else will be
- left in place, so the user can continue his work.
- """
-
- changes = repo.status(match=match)[:3]
- diffopts = mdiff.diffopts(git=True, nodates=True)
- chunks = patch.diff(repo, changes=changes, opts=diffopts)
- fp = cStringIO.StringIO()
- fp.write(''.join(chunks))
- fp.seek(0)
-
- # 1. filter patch, so we have intending-to apply subset of it
- chunks = filterpatch(ui, parsepatch(fp))
- del fp
-
- contenders = set()
- for h in chunks:
- try: contenders.update(set(h.files()))
- except AttributeError: pass
-
- changed = changes[0] + changes[1] + changes[2]
- newfiles = [f for f in changed if f in contenders]
- if not newfiles:
- ui.status(_('no changes to record\n'))
- return 0
-
- modified = set(changes[0])
-
- # 2. backup changed files, so we can restore them in the end
- backups = {}
- backupdir = repo.join('record-backups')
- try:
- os.mkdir(backupdir)
- except OSError, err:
- if err.errno != errno.EEXIST:
- raise
- try:
- # backup continues
- for f in newfiles:
- if f not in modified:
- continue
- fd, tmpname = tempfile.mkstemp(prefix=f.replace('/', '_')+'.',
- dir=backupdir)
- os.close(fd)
- ui.debug(_('backup %r as %r\n') % (f, tmpname))
- util.copyfile(repo.wjoin(f), tmpname)
- backups[f] = tmpname
-
- fp = cStringIO.StringIO()
- for c in chunks:
- if c.filename() in backups:
- c.write(fp)
- dopatch = fp.tell()
- fp.seek(0)
-
- # 3a. apply filtered patch to clean repo (clean)
- if backups:
- hg.revert(repo, repo.dirstate.parents()[0], backups.has_key)
-
- # 3b. (apply)
- if dopatch:
- try:
- ui.debug(_('applying patch\n'))
- ui.debug(fp.getvalue())
- pfiles = {}
- patch.internalpatch(fp, ui, 1, repo.root, files=pfiles,
- eolmode=None)
- patch.updatedir(ui, repo, pfiles)
- except patch.PatchError, err:
- s = str(err)
- if s:
- raise util.Abort(s)
- else:
- raise util.Abort(_('patch failed to apply'))
- del fp
-
- # 4. We prepared working directory according to filtered patch.
- # Now is the time to delegate the job to commit/qrefresh or the like!
-
- # it is important to first chdir to repo root -- we'll call a
- # highlevel command with list of pathnames relative to repo root
- cwd = os.getcwd()
- os.chdir(repo.root)
- try:
- committer(ui, repo, newfiles, opts)
- finally:
- os.chdir(cwd)
-
- return 0
- finally:
- # 5. finally restore backed-up files
- try:
- for realname, tmpname in backups.iteritems():
- ui.debug(_('restoring %r to %r\n') % (tmpname, realname))
- util.copyfile(tmpname, repo.wjoin(realname))
- os.unlink(tmpname)
- os.rmdir(backupdir)
- except OSError:
- pass
- return cmdutil.commit(ui, repo, recordfunc, pats, opts)
-
-cmdtable = {
- "record":
- (record,
-
- # add commit options
- commands.table['^commit|ci'][1],
-
- _('hg record [OPTION]... [FILE]...')),
-}
-
-
-def extsetup():
- try:
- mq = extensions.find('mq')
- except KeyError:
- return
-
- qcmdtable = {
- "qrecord":
- (qrecord,
-
- # add qnew options, except '--force'
- [opt for opt in mq.cmdtable['qnew'][1] if opt[1] != 'force'],
-
- _('hg qrecord [OPTION]... PATCH [FILE]...')),
- }
-
- cmdtable.update(qcmdtable)
-
diff --git a/sys/lib/python/hgext/share.py b/sys/lib/python/hgext/share.py
deleted file mode 100644
index e714ce0aa..000000000
--- a/sys/lib/python/hgext/share.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-'''share a common history between several working directories'''
-
-from mercurial.i18n import _
-from mercurial import hg, commands
-
-def share(ui, source, dest=None, noupdate=False):
- """create a new shared repository (experimental)
-
- Initialize a new repository and working directory that shares its
- history with another repository.
-
- NOTE: actions that change history such as rollback or moving the
- source may confuse sharers.
- """
-
- return hg.share(ui, source, dest, not noupdate)
-
-cmdtable = {
- "share":
- (share,
- [('U', 'noupdate', None, _('do not create a working copy'))],
- _('[-U] SOURCE [DEST]')),
-}
-
-commands.norepo += " share"
diff --git a/sys/lib/python/hgext/transplant.py b/sys/lib/python/hgext/transplant.py
deleted file mode 100644
index 1d26c7efd..000000000
--- a/sys/lib/python/hgext/transplant.py
+++ /dev/null
@@ -1,606 +0,0 @@
-# Patch transplanting extension for Mercurial
-#
-# Copyright 2006, 2007 Brendan Cully <brendan@kublai.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-'''command to transplant changesets from another branch
-
-This extension allows you to transplant patches from another branch.
-
-Transplanted patches are recorded in .hg/transplant/transplants, as a
-map from a changeset hash to its hash in the source repository.
-'''
-
-from mercurial.i18n import _
-import os, tempfile
-from mercurial import bundlerepo, changegroup, cmdutil, hg, merge, match
-from mercurial import patch, revlog, util, error
-
-class transplantentry(object):
- def __init__(self, lnode, rnode):
- self.lnode = lnode
- self.rnode = rnode
-
-class transplants(object):
- def __init__(self, path=None, transplantfile=None, opener=None):
- self.path = path
- self.transplantfile = transplantfile
- self.opener = opener
-
- if not opener:
- self.opener = util.opener(self.path)
- self.transplants = []
- self.dirty = False
- self.read()
-
- def read(self):
- abspath = os.path.join(self.path, self.transplantfile)
- if self.transplantfile and os.path.exists(abspath):
- for line in self.opener(self.transplantfile).read().splitlines():
- lnode, rnode = map(revlog.bin, line.split(':'))
- self.transplants.append(transplantentry(lnode, rnode))
-
- def write(self):
- if self.dirty and self.transplantfile:
- if not os.path.isdir(self.path):
- os.mkdir(self.path)
- fp = self.opener(self.transplantfile, 'w')
- for c in self.transplants:
- l, r = map(revlog.hex, (c.lnode, c.rnode))
- fp.write(l + ':' + r + '\n')
- fp.close()
- self.dirty = False
-
- def get(self, rnode):
- return [t for t in self.transplants if t.rnode == rnode]
-
- def set(self, lnode, rnode):
- self.transplants.append(transplantentry(lnode, rnode))
- self.dirty = True
-
- def remove(self, transplant):
- del self.transplants[self.transplants.index(transplant)]
- self.dirty = True
-
-class transplanter(object):
- def __init__(self, ui, repo):
- self.ui = ui
- self.path = repo.join('transplant')
- self.opener = util.opener(self.path)
- self.transplants = transplants(self.path, 'transplants',
- opener=self.opener)
-
- def applied(self, repo, node, parent):
- '''returns True if a node is already an ancestor of parent
- or has already been transplanted'''
- if hasnode(repo, node):
- if node in repo.changelog.reachable(parent, stop=node):
- return True
- for t in self.transplants.get(node):
- # it might have been stripped
- if not hasnode(repo, t.lnode):
- self.transplants.remove(t)
- return False
- if t.lnode in repo.changelog.reachable(parent, stop=t.lnode):
- return True
- return False
-
- def apply(self, repo, source, revmap, merges, opts={}):
- '''apply the revisions in revmap one by one in revision order'''
- revs = sorted(revmap)
- p1, p2 = repo.dirstate.parents()
- pulls = []
- diffopts = patch.diffopts(self.ui, opts)
- diffopts.git = True
-
- lock = wlock = None
- try:
- wlock = repo.wlock()
- lock = repo.lock()
- for rev in revs:
- node = revmap[rev]
- revstr = '%s:%s' % (rev, revlog.short(node))
-
- if self.applied(repo, node, p1):
- self.ui.warn(_('skipping already applied revision %s\n') %
- revstr)
- continue
-
- parents = source.changelog.parents(node)
- if not opts.get('filter'):
- # If the changeset parent is the same as the
- # wdir's parent, just pull it.
- if parents[0] == p1:
- pulls.append(node)
- p1 = node
- continue
- if pulls:
- if source != repo:
- repo.pull(source, heads=pulls)
- merge.update(repo, pulls[-1], False, False, None)
- p1, p2 = repo.dirstate.parents()
- pulls = []
-
- domerge = False
- if node in merges:
- # pulling all the merge revs at once would mean we
- # couldn't transplant after the latest even if
- # transplants before them fail.
- domerge = True
- if not hasnode(repo, node):
- repo.pull(source, heads=[node])
-
- if parents[1] != revlog.nullid:
- self.ui.note(_('skipping merge changeset %s:%s\n')
- % (rev, revlog.short(node)))
- patchfile = None
- else:
- fd, patchfile = tempfile.mkstemp(prefix='hg-transplant-')
- fp = os.fdopen(fd, 'w')
- gen = patch.diff(source, parents[0], node, opts=diffopts)
- for chunk in gen:
- fp.write(chunk)
- fp.close()
-
- del revmap[rev]
- if patchfile or domerge:
- try:
- n = self.applyone(repo, node,
- source.changelog.read(node),
- patchfile, merge=domerge,
- log=opts.get('log'),
- filter=opts.get('filter'))
- if n and domerge:
- self.ui.status(_('%s merged at %s\n') % (revstr,
- revlog.short(n)))
- elif n:
- self.ui.status(_('%s transplanted to %s\n')
- % (revlog.short(node),
- revlog.short(n)))
- finally:
- if patchfile:
- os.unlink(patchfile)
- if pulls:
- repo.pull(source, heads=pulls)
- merge.update(repo, pulls[-1], False, False, None)
- finally:
- self.saveseries(revmap, merges)
- self.transplants.write()
- lock.release()
- wlock.release()
-
- def filter(self, filter, changelog, patchfile):
- '''arbitrarily rewrite changeset before applying it'''
-
- self.ui.status(_('filtering %s\n') % patchfile)
- user, date, msg = (changelog[1], changelog[2], changelog[4])
-
- fd, headerfile = tempfile.mkstemp(prefix='hg-transplant-')
- fp = os.fdopen(fd, 'w')
- fp.write("# HG changeset patch\n")
- fp.write("# User %s\n" % user)
- fp.write("# Date %d %d\n" % date)
- fp.write(changelog[4])
- fp.close()
-
- try:
- util.system('%s %s %s' % (filter, util.shellquote(headerfile),
- util.shellquote(patchfile)),
- environ={'HGUSER': changelog[1]},
- onerr=util.Abort, errprefix=_('filter failed'))
- user, date, msg = self.parselog(file(headerfile))[1:4]
- finally:
- os.unlink(headerfile)
-
- return (user, date, msg)
-
- def applyone(self, repo, node, cl, patchfile, merge=False, log=False,
- filter=None):
- '''apply the patch in patchfile to the repository as a transplant'''
- (manifest, user, (time, timezone), files, message) = cl[:5]
- date = "%d %d" % (time, timezone)
- extra = {'transplant_source': node}
- if filter:
- (user, date, message) = self.filter(filter, cl, patchfile)
-
- if log:
- # we don't translate messages inserted into commits
- message += '\n(transplanted from %s)' % revlog.hex(node)
-
- self.ui.status(_('applying %s\n') % revlog.short(node))
- self.ui.note('%s %s\n%s\n' % (user, date, message))
-
- if not patchfile and not merge:
- raise util.Abort(_('can only omit patchfile if merging'))
- if patchfile:
- try:
- files = {}
- try:
- patch.patch(patchfile, self.ui, cwd=repo.root,
- files=files, eolmode=None)
- if not files:
- self.ui.warn(_('%s: empty changeset')
- % revlog.hex(node))
- return None
- finally:
- files = patch.updatedir(self.ui, repo, files)
- except Exception, inst:
- if filter:
- os.unlink(patchfile)
- seriespath = os.path.join(self.path, 'series')
- if os.path.exists(seriespath):
- os.unlink(seriespath)
- p1 = repo.dirstate.parents()[0]
- p2 = node
- self.log(user, date, message, p1, p2, merge=merge)
- self.ui.write(str(inst) + '\n')
- raise util.Abort(_('Fix up the merge and run '
- 'hg transplant --continue'))
- else:
- files = None
- if merge:
- p1, p2 = repo.dirstate.parents()
- repo.dirstate.setparents(p1, node)
- m = match.always(repo.root, '')
- else:
- m = match.exact(repo.root, '', files)
-
- n = repo.commit(message, user, date, extra=extra, match=m)
- if not merge:
- self.transplants.set(n, node)
-
- return n
-
- def resume(self, repo, source, opts=None):
- '''recover last transaction and apply remaining changesets'''
- if os.path.exists(os.path.join(self.path, 'journal')):
- n, node = self.recover(repo)
- self.ui.status(_('%s transplanted as %s\n') % (revlog.short(node),
- revlog.short(n)))
- seriespath = os.path.join(self.path, 'series')
- if not os.path.exists(seriespath):
- self.transplants.write()
- return
- nodes, merges = self.readseries()
- revmap = {}
- for n in nodes:
- revmap[source.changelog.rev(n)] = n
- os.unlink(seriespath)
-
- self.apply(repo, source, revmap, merges, opts)
-
- def recover(self, repo):
- '''commit working directory using journal metadata'''
- node, user, date, message, parents = self.readlog()
- merge = len(parents) == 2
-
- if not user or not date or not message or not parents[0]:
- raise util.Abort(_('transplant log file is corrupt'))
-
- extra = {'transplant_source': node}
- wlock = repo.wlock()
- try:
- p1, p2 = repo.dirstate.parents()
- if p1 != parents[0]:
- raise util.Abort(
- _('working dir not at transplant parent %s') %
- revlog.hex(parents[0]))
- if merge:
- repo.dirstate.setparents(p1, parents[1])
- n = repo.commit(message, user, date, extra=extra)
- if not n:
- raise util.Abort(_('commit failed'))
- if not merge:
- self.transplants.set(n, node)
- self.unlog()
-
- return n, node
- finally:
- wlock.release()
-
- def readseries(self):
- nodes = []
- merges = []
- cur = nodes
- for line in self.opener('series').read().splitlines():
- if line.startswith('# Merges'):
- cur = merges
- continue
- cur.append(revlog.bin(line))
-
- return (nodes, merges)
-
- def saveseries(self, revmap, merges):
- if not revmap:
- return
-
- if not os.path.isdir(self.path):
- os.mkdir(self.path)
- series = self.opener('series', 'w')
- for rev in sorted(revmap):
- series.write(revlog.hex(revmap[rev]) + '\n')
- if merges:
- series.write('# Merges\n')
- for m in merges:
- series.write(revlog.hex(m) + '\n')
- series.close()
-
- def parselog(self, fp):
- parents = []
- message = []
- node = revlog.nullid
- inmsg = False
- for line in fp.read().splitlines():
- if inmsg:
- message.append(line)
- elif line.startswith('# User '):
- user = line[7:]
- elif line.startswith('# Date '):
- date = line[7:]
- elif line.startswith('# Node ID '):
- node = revlog.bin(line[10:])
- elif line.startswith('# Parent '):
- parents.append(revlog.bin(line[9:]))
- elif not line.startswith('#'):
- inmsg = True
- message.append(line)
- return (node, user, date, '\n'.join(message), parents)
-
- def log(self, user, date, message, p1, p2, merge=False):
- '''journal changelog metadata for later recover'''
-
- if not os.path.isdir(self.path):
- os.mkdir(self.path)
- fp = self.opener('journal', 'w')
- fp.write('# User %s\n' % user)
- fp.write('# Date %s\n' % date)
- fp.write('# Node ID %s\n' % revlog.hex(p2))
- fp.write('# Parent ' + revlog.hex(p1) + '\n')
- if merge:
- fp.write('# Parent ' + revlog.hex(p2) + '\n')
- fp.write(message.rstrip() + '\n')
- fp.close()
-
- def readlog(self):
- return self.parselog(self.opener('journal'))
-
- def unlog(self):
- '''remove changelog journal'''
- absdst = os.path.join(self.path, 'journal')
- if os.path.exists(absdst):
- os.unlink(absdst)
-
- def transplantfilter(self, repo, source, root):
- def matchfn(node):
- if self.applied(repo, node, root):
- return False
- if source.changelog.parents(node)[1] != revlog.nullid:
- return False
- extra = source.changelog.read(node)[5]
- cnode = extra.get('transplant_source')
- if cnode and self.applied(repo, cnode, root):
- return False
- return True
-
- return matchfn
-
-def hasnode(repo, node):
- try:
- return repo.changelog.rev(node) != None
- except error.RevlogError:
- return False
-
-def browserevs(ui, repo, nodes, opts):
- '''interactively transplant changesets'''
- def browsehelp(ui):
- ui.write('y: transplant this changeset\n'
- 'n: skip this changeset\n'
- 'm: merge at this changeset\n'
- 'p: show patch\n'
- 'c: commit selected changesets\n'
- 'q: cancel transplant\n'
- '?: show this help\n')
-
- displayer = cmdutil.show_changeset(ui, repo, opts)
- transplants = []
- merges = []
- for node in nodes:
- displayer.show(repo[node])
- action = None
- while not action:
- action = ui.prompt(_('apply changeset? [ynmpcq?]:'))
- if action == '?':
- browsehelp(ui)
- action = None
- elif action == 'p':
- parent = repo.changelog.parents(node)[0]
- for chunk in patch.diff(repo, parent, node):
- ui.write(chunk)
- action = None
- elif action not in ('y', 'n', 'm', 'c', 'q'):
- ui.write('no such option\n')
- action = None
- if action == 'y':
- transplants.append(node)
- elif action == 'm':
- merges.append(node)
- elif action == 'c':
- break
- elif action == 'q':
- transplants = ()
- merges = ()
- break
- return (transplants, merges)
-
-def transplant(ui, repo, *revs, **opts):
- '''transplant changesets from another branch
-
- Selected changesets will be applied on top of the current working
- directory with the log of the original changeset. If --log is
- specified, log messages will have a comment appended of the form::
-
- (transplanted from CHANGESETHASH)
-
- You can rewrite the changelog message with the --filter option.
- Its argument will be invoked with the current changelog message as
- $1 and the patch as $2.
-
- If --source/-s is specified, selects changesets from the named
- repository. If --branch/-b is specified, selects changesets from
- the branch holding the named revision, up to that revision. If
- --all/-a is specified, all changesets on the branch will be
- transplanted, otherwise you will be prompted to select the
- changesets you want.
-
- hg transplant --branch REVISION --all will rebase the selected
- branch (up to the named revision) onto your current working
- directory.
-
- You can optionally mark selected transplanted changesets as merge
- changesets. You will not be prompted to transplant any ancestors
- of a merged transplant, and you can merge descendants of them
- normally instead of transplanting them.
-
- If no merges or revisions are provided, hg transplant will start
- an interactive changeset browser.
-
- If a changeset application fails, you can fix the merge by hand
- and then resume where you left off by calling hg transplant
- --continue/-c.
- '''
- def getremotechanges(repo, url):
- sourcerepo = ui.expandpath(url)
- source = hg.repository(ui, sourcerepo)
- common, incoming, rheads = repo.findcommonincoming(source, force=True)
- if not incoming:
- return (source, None, None)
-
- bundle = None
- if not source.local():
- if source.capable('changegroupsubset'):
- cg = source.changegroupsubset(incoming, rheads, 'incoming')
- else:
- cg = source.changegroup(incoming, 'incoming')
- bundle = changegroup.writebundle(cg, None, 'HG10UN')
- source = bundlerepo.bundlerepository(ui, repo.root, bundle)
-
- return (source, incoming, bundle)
-
- def incwalk(repo, incoming, branches, match=util.always):
- if not branches:
- branches=None
- for node in repo.changelog.nodesbetween(incoming, branches)[0]:
- if match(node):
- yield node
-
- def transplantwalk(repo, root, branches, match=util.always):
- if not branches:
- branches = repo.heads()
- ancestors = []
- for branch in branches:
- ancestors.append(repo.changelog.ancestor(root, branch))
- for node in repo.changelog.nodesbetween(ancestors, branches)[0]:
- if match(node):
- yield node
-
- def checkopts(opts, revs):
- if opts.get('continue'):
- if filter(lambda opt: opts.get(opt), ('branch', 'all', 'merge')):
- raise util.Abort(_('--continue is incompatible with '
- 'branch, all or merge'))
- return
- if not (opts.get('source') or revs or
- opts.get('merge') or opts.get('branch')):
- raise util.Abort(_('no source URL, branch tag or revision '
- 'list provided'))
- if opts.get('all'):
- if not opts.get('branch'):
- raise util.Abort(_('--all requires a branch revision'))
- if revs:
- raise util.Abort(_('--all is incompatible with a '
- 'revision list'))
-
- checkopts(opts, revs)
-
- if not opts.get('log'):
- opts['log'] = ui.config('transplant', 'log')
- if not opts.get('filter'):
- opts['filter'] = ui.config('transplant', 'filter')
-
- tp = transplanter(ui, repo)
-
- p1, p2 = repo.dirstate.parents()
- if len(repo) > 0 and p1 == revlog.nullid:
- raise util.Abort(_('no revision checked out'))
- if not opts.get('continue'):
- if p2 != revlog.nullid:
- raise util.Abort(_('outstanding uncommitted merges'))
- m, a, r, d = repo.status()[:4]
- if m or a or r or d:
- raise util.Abort(_('outstanding local changes'))
-
- bundle = None
- source = opts.get('source')
- if source:
- (source, incoming, bundle) = getremotechanges(repo, source)
- else:
- source = repo
-
- try:
- if opts.get('continue'):
- tp.resume(repo, source, opts)
- return
-
- tf=tp.transplantfilter(repo, source, p1)
- if opts.get('prune'):
- prune = [source.lookup(r)
- for r in cmdutil.revrange(source, opts.get('prune'))]
- matchfn = lambda x: tf(x) and x not in prune
- else:
- matchfn = tf
- branches = map(source.lookup, opts.get('branch', ()))
- merges = map(source.lookup, opts.get('merge', ()))
- revmap = {}
- if revs:
- for r in cmdutil.revrange(source, revs):
- revmap[int(r)] = source.lookup(r)
- elif opts.get('all') or not merges:
- if source != repo:
- alltransplants = incwalk(source, incoming, branches,
- match=matchfn)
- else:
- alltransplants = transplantwalk(source, p1, branches,
- match=matchfn)
- if opts.get('all'):
- revs = alltransplants
- else:
- revs, newmerges = browserevs(ui, source, alltransplants, opts)
- merges.extend(newmerges)
- for r in revs:
- revmap[source.changelog.rev(r)] = r
- for r in merges:
- revmap[source.changelog.rev(r)] = r
-
- tp.apply(repo, source, revmap, merges, opts)
- finally:
- if bundle:
- source.close()
- os.unlink(bundle)
-
-cmdtable = {
- "transplant":
- (transplant,
- [('s', 'source', '', _('pull patches from REPOSITORY')),
- ('b', 'branch', [], _('pull patches from branch BRANCH')),
- ('a', 'all', None, _('pull all changesets up to BRANCH')),
- ('p', 'prune', [], _('skip over REV')),
- ('m', 'merge', [], _('merge at REV')),
- ('', 'log', None, _('append transplant info to log message')),
- ('c', 'continue', None, _('continue last transplant session '
- 'after repair')),
- ('', 'filter', '', _('filter changesets through FILTER'))],
- _('hg transplant [-s REPOSITORY] [-b BRANCH [-a]] [-p REV] '
- '[-m REV] [REV]...'))
-}
diff --git a/sys/lib/python/hgext/win32mbcs.py b/sys/lib/python/hgext/win32mbcs.py
deleted file mode 100644
index a707f053e..000000000
--- a/sys/lib/python/hgext/win32mbcs.py
+++ /dev/null
@@ -1,147 +0,0 @@
-# win32mbcs.py -- MBCS filename support for Mercurial
-#
-# Copyright (c) 2008 Shun-ichi Goto <shunichi.goto@gmail.com>
-#
-# Version: 0.2
-# Author: Shun-ichi Goto <shunichi.goto@gmail.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-#
-
-'''allow the use of MBCS paths with problematic encodings
-
-Some MBCS encodings are not good for some path operations (i.e.
-splitting path, case conversion, etc.) with its encoded bytes. We call
-such a encoding (i.e. shift_jis and big5) as "problematic encoding".
-This extension can be used to fix the issue with those encodings by
-wrapping some functions to convert to Unicode string before path
-operation.
-
-This extension is useful for:
-
-- Japanese Windows users using shift_jis encoding.
-- Chinese Windows users using big5 encoding.
-- All users who use a repository with one of problematic encodings on
- case-insensitive file system.
-
-This extension is not needed for:
-
-- Any user who use only ASCII chars in path.
-- Any user who do not use any of problematic encodings.
-
-Note that there are some limitations on using this extension:
-
-- You should use single encoding in one repository.
-- You should set same encoding for the repository by locale or
- HGENCODING.
-
-Path encoding conversion are done between Unicode and
-encoding.encoding which is decided by Mercurial from current locale
-setting or HGENCODING.
-'''
-
-import os, sys
-from mercurial.i18n import _
-from mercurial import util, encoding
-
-def decode(arg):
- if isinstance(arg, str):
- uarg = arg.decode(encoding.encoding)
- if arg == uarg.encode(encoding.encoding):
- return uarg
- raise UnicodeError("Not local encoding")
- elif isinstance(arg, tuple):
- return tuple(map(decode, arg))
- elif isinstance(arg, list):
- return map(decode, arg)
- elif isinstance(arg, dict):
- for k, v in arg.items():
- arg[k] = decode(v)
- return arg
-
-def encode(arg):
- if isinstance(arg, unicode):
- return arg.encode(encoding.encoding)
- elif isinstance(arg, tuple):
- return tuple(map(encode, arg))
- elif isinstance(arg, list):
- return map(encode, arg)
- elif isinstance(arg, dict):
- for k, v in arg.items():
- arg[k] = encode(v)
- return arg
-
-def appendsep(s):
- # ensure the path ends with os.sep, appending it if necessary.
- try:
- us = decode(s)
- except UnicodeError:
- us = s
- if us and us[-1] not in ':/\\':
- s += os.sep
- return s
-
-def wrapper(func, args, kwds):
- # check argument is unicode, then call original
- for arg in args:
- if isinstance(arg, unicode):
- return func(*args, **kwds)
-
- try:
- # convert arguments to unicode, call func, then convert back
- return encode(func(*decode(args), **decode(kwds)))
- except UnicodeError:
- raise util.Abort(_("[win32mbcs] filename conversion failed with"
- " %s encoding\n") % (encoding.encoding))
-
-def wrapperforlistdir(func, args, kwds):
- # Ensure 'path' argument ends with os.sep to avoids
- # misinterpreting last 0x5c of MBCS 2nd byte as path separator.
- if args:
- args = list(args)
- args[0] = appendsep(args[0])
- if kwds.has_key('path'):
- kwds['path'] = appendsep(kwds['path'])
- return func(*args, **kwds)
-
-def wrapname(name, wrapper):
- module, name = name.rsplit('.', 1)
- module = sys.modules[module]
- func = getattr(module, name)
- def f(*args, **kwds):
- return wrapper(func, args, kwds)
- try:
- f.__name__ = func.__name__ # fail with python23
- except Exception:
- pass
- setattr(module, name, f)
-
-# List of functions to be wrapped.
-# NOTE: os.path.dirname() and os.path.basename() are safe because
-# they use result of os.path.split()
-funcs = '''os.path.join os.path.split os.path.splitext
- os.path.splitunc os.path.normpath os.path.normcase os.makedirs
- mercurial.util.endswithsep mercurial.util.splitpath mercurial.util.checkcase
- mercurial.util.fspath mercurial.windows.pconvert'''
-
-# codec and alias names of sjis and big5 to be faked.
-problematic_encodings = '''big5 big5-tw csbig5 big5hkscs big5-hkscs
- hkscs cp932 932 ms932 mskanji ms-kanji shift_jis csshiftjis shiftjis
- sjis s_jis shift_jis_2004 shiftjis2004 sjis_2004 sjis2004
- shift_jisx0213 shiftjisx0213 sjisx0213 s_jisx0213 950 cp950 ms950 '''
-
-def reposetup(ui, repo):
- # TODO: decide use of config section for this extension
- if not os.path.supports_unicode_filenames:
- ui.warn(_("[win32mbcs] cannot activate on this platform.\n"))
- return
-
- # fake is only for relevant environment.
- if encoding.encoding.lower() in problematic_encodings.split():
- for f in funcs.split():
- wrapname(f, wrapper)
- wrapname("mercurial.osutil.listdir", wrapperforlistdir)
- ui.debug(_("[win32mbcs] activated with encoding: %s\n")
- % encoding.encoding)
-
diff --git a/sys/lib/python/hgext/win32text.py b/sys/lib/python/hgext/win32text.py
deleted file mode 100644
index 2c64f1356..000000000
--- a/sys/lib/python/hgext/win32text.py
+++ /dev/null
@@ -1,158 +0,0 @@
-# win32text.py - LF <-> CRLF/CR translation utilities for Windows/Mac users
-#
-# Copyright 2005, 2007-2009 Matt Mackall <mpm@selenic.com> and others
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-'''perform automatic newline conversion
-
-To perform automatic newline conversion, use::
-
- [extensions]
- hgext.win32text =
- [encode]
- ** = cleverencode:
- # or ** = macencode:
-
- [decode]
- ** = cleverdecode:
- # or ** = macdecode:
-
-If not doing conversion, to make sure you do not commit CRLF/CR by accident::
-
- [hooks]
- pretxncommit.crlf = python:hgext.win32text.forbidcrlf
- # or pretxncommit.cr = python:hgext.win32text.forbidcr
-
-To do the same check on a server to prevent CRLF/CR from being
-pushed or pulled::
-
- [hooks]
- pretxnchangegroup.crlf = python:hgext.win32text.forbidcrlf
- # or pretxnchangegroup.cr = python:hgext.win32text.forbidcr
-'''
-
-from mercurial.i18n import _
-from mercurial.node import short
-from mercurial import util
-import re
-
-# regexp for single LF without CR preceding.
-re_single_lf = re.compile('(^|[^\r])\n', re.MULTILINE)
-
-newlinestr = {'\r\n': 'CRLF', '\r': 'CR'}
-filterstr = {'\r\n': 'clever', '\r': 'mac'}
-
-def checknewline(s, newline, ui=None, repo=None, filename=None):
- # warn if already has 'newline' in repository.
- # it might cause unexpected eol conversion.
- # see issue 302:
- # http://mercurial.selenic.com/bts/issue302
- if newline in s and ui and filename and repo:
- ui.warn(_('WARNING: %s already has %s line endings\n'
- 'and does not need EOL conversion by the win32text plugin.\n'
- 'Before your next commit, please reconsider your '
- 'encode/decode settings in \nMercurial.ini or %s.\n') %
- (filename, newlinestr[newline], repo.join('hgrc')))
-
-def dumbdecode(s, cmd, **kwargs):
- checknewline(s, '\r\n', **kwargs)
- # replace single LF to CRLF
- return re_single_lf.sub('\\1\r\n', s)
-
-def dumbencode(s, cmd):
- return s.replace('\r\n', '\n')
-
-def macdumbdecode(s, cmd, **kwargs):
- checknewline(s, '\r', **kwargs)
- return s.replace('\n', '\r')
-
-def macdumbencode(s, cmd):
- return s.replace('\r', '\n')
-
-def cleverdecode(s, cmd, **kwargs):
- if not util.binary(s):
- return dumbdecode(s, cmd, **kwargs)
- return s
-
-def cleverencode(s, cmd):
- if not util.binary(s):
- return dumbencode(s, cmd)
- return s
-
-def macdecode(s, cmd, **kwargs):
- if not util.binary(s):
- return macdumbdecode(s, cmd, **kwargs)
- return s
-
-def macencode(s, cmd):
- if not util.binary(s):
- return macdumbencode(s, cmd)
- return s
-
-_filters = {
- 'dumbdecode:': dumbdecode,
- 'dumbencode:': dumbencode,
- 'cleverdecode:': cleverdecode,
- 'cleverencode:': cleverencode,
- 'macdumbdecode:': macdumbdecode,
- 'macdumbencode:': macdumbencode,
- 'macdecode:': macdecode,
- 'macencode:': macencode,
- }
-
-def forbidnewline(ui, repo, hooktype, node, newline, **kwargs):
- halt = False
- seen = set()
- # we try to walk changesets in reverse order from newest to
- # oldest, so that if we see a file multiple times, we take the
- # newest version as canonical. this prevents us from blocking a
- # changegroup that contains an unacceptable commit followed later
- # by a commit that fixes the problem.
- tip = repo['tip']
- for rev in xrange(len(repo)-1, repo[node].rev()-1, -1):
- c = repo[rev]
- for f in c.files():
- if f in seen or f not in tip or f not in c:
- continue
- seen.add(f)
- data = c[f].data()
- if not util.binary(data) and newline in data:
- if not halt:
- ui.warn(_('Attempt to commit or push text file(s) '
- 'using %s line endings\n') %
- newlinestr[newline])
- ui.warn(_('in %s: %s\n') % (short(c.node()), f))
- halt = True
- if halt and hooktype == 'pretxnchangegroup':
- crlf = newlinestr[newline].lower()
- filter = filterstr[newline]
- ui.warn(_('\nTo prevent this mistake in your local repository,\n'
- 'add to Mercurial.ini or .hg/hgrc:\n'
- '\n'
- '[hooks]\n'
- 'pretxncommit.%s = python:hgext.win32text.forbid%s\n'
- '\n'
- 'and also consider adding:\n'
- '\n'
- '[extensions]\n'
- 'hgext.win32text =\n'
- '[encode]\n'
- '** = %sencode:\n'
- '[decode]\n'
- '** = %sdecode:\n') % (crlf, crlf, filter, filter))
- return halt
-
-def forbidcrlf(ui, repo, hooktype, node, **kwargs):
- return forbidnewline(ui, repo, hooktype, node, '\r\n', **kwargs)
-
-def forbidcr(ui, repo, hooktype, node, **kwargs):
- return forbidnewline(ui, repo, hooktype, node, '\r', **kwargs)
-
-def reposetup(ui, repo):
- if not repo.local():
- return
- for name, fn in _filters.iteritems():
- repo.adddatafilter(name, fn)
-
diff --git a/sys/lib/python/hgext/zeroconf/Zeroconf.py b/sys/lib/python/hgext/zeroconf/Zeroconf.py
deleted file mode 100644
index 33a345923..000000000
--- a/sys/lib/python/hgext/zeroconf/Zeroconf.py
+++ /dev/null
@@ -1,1573 +0,0 @@
-""" Multicast DNS Service Discovery for Python, v0.12
- Copyright (C) 2003, Paul Scott-Murphy
-
- This module provides a framework for the use of DNS Service Discovery
- using IP multicast. It has been tested against the JRendezvous
- implementation from <a href="http://strangeberry.com">StrangeBerry</a>,
- and against the mDNSResponder from Mac OS X 10.3.8.
-
- This library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- This library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with this library; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-"""
-
-"""0.12 update - allow selection of binding interface
- typo fix - Thanks A. M. Kuchlingi
- removed all use of word 'Rendezvous' - this is an API change"""
-
-"""0.11 update - correction to comments for addListener method
- support for new record types seen from OS X
- - IPv6 address
- - hostinfo
- ignore unknown DNS record types
- fixes to name decoding
- works alongside other processes using port 5353 (e.g. on Mac OS X)
- tested against Mac OS X 10.3.2's mDNSResponder
- corrections to removal of list entries for service browser"""
-
-"""0.10 update - Jonathon Paisley contributed these corrections:
- always multicast replies, even when query is unicast
- correct a pointer encoding problem
- can now write records in any order
- traceback shown on failure
- better TXT record parsing
- server is now separate from name
- can cancel a service browser
-
- modified some unit tests to accommodate these changes"""
-
-"""0.09 update - remove all records on service unregistration
- fix DOS security problem with readName"""
-
-"""0.08 update - changed licensing to LGPL"""
-
-"""0.07 update - faster shutdown on engine
- pointer encoding of outgoing names
- ServiceBrowser now works
- new unit tests"""
-
-"""0.06 update - small improvements with unit tests
- added defined exception types
- new style objects
- fixed hostname/interface problem
- fixed socket timeout problem
- fixed addServiceListener() typo bug
- using select() for socket reads
- tested on Debian unstable with Python 2.2.2"""
-
-"""0.05 update - ensure case insensitivty on domain names
- support for unicast DNS queries"""
-
-"""0.04 update - added some unit tests
- added __ne__ adjuncts where required
- ensure names end in '.local.'
- timeout on receiving socket for clean shutdown"""
-
-__author__ = "Paul Scott-Murphy"
-__email__ = "paul at scott dash murphy dot com"
-__version__ = "0.12"
-
-import string
-import time
-import struct
-import socket
-import threading
-import select
-import traceback
-
-__all__ = ["Zeroconf", "ServiceInfo", "ServiceBrowser"]
-
-# hook for threads
-
-globals()['_GLOBAL_DONE'] = 0
-
-# Some timing constants
-
-_UNREGISTER_TIME = 125
-_CHECK_TIME = 175
-_REGISTER_TIME = 225
-_LISTENER_TIME = 200
-_BROWSER_TIME = 500
-
-# Some DNS constants
-
-_MDNS_ADDR = '224.0.0.251'
-_MDNS_PORT = 5353;
-_DNS_PORT = 53;
-_DNS_TTL = 60 * 60; # one hour default TTL
-
-_MAX_MSG_TYPICAL = 1460 # unused
-_MAX_MSG_ABSOLUTE = 8972
-
-_FLAGS_QR_MASK = 0x8000 # query response mask
-_FLAGS_QR_QUERY = 0x0000 # query
-_FLAGS_QR_RESPONSE = 0x8000 # response
-
-_FLAGS_AA = 0x0400 # Authorative answer
-_FLAGS_TC = 0x0200 # Truncated
-_FLAGS_RD = 0x0100 # Recursion desired
-_FLAGS_RA = 0x8000 # Recursion available
-
-_FLAGS_Z = 0x0040 # Zero
-_FLAGS_AD = 0x0020 # Authentic data
-_FLAGS_CD = 0x0010 # Checking disabled
-
-_CLASS_IN = 1
-_CLASS_CS = 2
-_CLASS_CH = 3
-_CLASS_HS = 4
-_CLASS_NONE = 254
-_CLASS_ANY = 255
-_CLASS_MASK = 0x7FFF
-_CLASS_UNIQUE = 0x8000
-
-_TYPE_A = 1
-_TYPE_NS = 2
-_TYPE_MD = 3
-_TYPE_MF = 4
-_TYPE_CNAME = 5
-_TYPE_SOA = 6
-_TYPE_MB = 7
-_TYPE_MG = 8
-_TYPE_MR = 9
-_TYPE_NULL = 10
-_TYPE_WKS = 11
-_TYPE_PTR = 12
-_TYPE_HINFO = 13
-_TYPE_MINFO = 14
-_TYPE_MX = 15
-_TYPE_TXT = 16
-_TYPE_AAAA = 28
-_TYPE_SRV = 33
-_TYPE_ANY = 255
-
-# Mapping constants to names
-
-_CLASSES = { _CLASS_IN : "in",
- _CLASS_CS : "cs",
- _CLASS_CH : "ch",
- _CLASS_HS : "hs",
- _CLASS_NONE : "none",
- _CLASS_ANY : "any" }
-
-_TYPES = { _TYPE_A : "a",
- _TYPE_NS : "ns",
- _TYPE_MD : "md",
- _TYPE_MF : "mf",
- _TYPE_CNAME : "cname",
- _TYPE_SOA : "soa",
- _TYPE_MB : "mb",
- _TYPE_MG : "mg",
- _TYPE_MR : "mr",
- _TYPE_NULL : "null",
- _TYPE_WKS : "wks",
- _TYPE_PTR : "ptr",
- _TYPE_HINFO : "hinfo",
- _TYPE_MINFO : "minfo",
- _TYPE_MX : "mx",
- _TYPE_TXT : "txt",
- _TYPE_AAAA : "quada",
- _TYPE_SRV : "srv",
- _TYPE_ANY : "any" }
-
-# utility functions
-
-def currentTimeMillis():
- """Current system time in milliseconds"""
- return time.time() * 1000
-
-# Exceptions
-
-class NonLocalNameException(Exception):
- pass
-
-class NonUniqueNameException(Exception):
- pass
-
-class NamePartTooLongException(Exception):
- pass
-
-class AbstractMethodException(Exception):
- pass
-
-class BadTypeInNameException(Exception):
- pass
-
-# implementation classes
-
-class DNSEntry(object):
- """A DNS entry"""
-
- def __init__(self, name, type, clazz):
- self.key = string.lower(name)
- self.name = name
- self.type = type
- self.clazz = clazz & _CLASS_MASK
- self.unique = (clazz & _CLASS_UNIQUE) != 0
-
- def __eq__(self, other):
- """Equality test on name, type, and class"""
- if isinstance(other, DNSEntry):
- return self.name == other.name and self.type == other.type and self.clazz == other.clazz
- return 0
-
- def __ne__(self, other):
- """Non-equality test"""
- return not self.__eq__(other)
-
- def getClazz(self, clazz):
- """Class accessor"""
- try:
- return _CLASSES[clazz]
- except:
- return "?(%s)" % (clazz)
-
- def getType(self, type):
- """Type accessor"""
- try:
- return _TYPES[type]
- except:
- return "?(%s)" % (type)
-
- def toString(self, hdr, other):
- """String representation with additional information"""
- result = "%s[%s,%s" % (hdr, self.getType(self.type), self.getClazz(self.clazz))
- if self.unique:
- result += "-unique,"
- else:
- result += ","
- result += self.name
- if other is not None:
- result += ",%s]" % (other)
- else:
- result += "]"
- return result
-
-class DNSQuestion(DNSEntry):
- """A DNS question entry"""
-
- def __init__(self, name, type, clazz):
- if not name.endswith(".local."):
- raise NonLocalNameException(name)
- DNSEntry.__init__(self, name, type, clazz)
-
- def answeredBy(self, rec):
- """Returns true if the question is answered by the record"""
- return self.clazz == rec.clazz and (self.type == rec.type or self.type == _TYPE_ANY) and self.name == rec.name
-
- def __repr__(self):
- """String representation"""
- return DNSEntry.toString(self, "question", None)
-
-
-class DNSRecord(DNSEntry):
- """A DNS record - like a DNS entry, but has a TTL"""
-
- def __init__(self, name, type, clazz, ttl):
- DNSEntry.__init__(self, name, type, clazz)
- self.ttl = ttl
- self.created = currentTimeMillis()
-
- def __eq__(self, other):
- """Tests equality as per DNSRecord"""
- if isinstance(other, DNSRecord):
- return DNSEntry.__eq__(self, other)
- return 0
-
- def suppressedBy(self, msg):
- """Returns true if any answer in a message can suffice for the
- information held in this record."""
- for record in msg.answers:
- if self.suppressedByAnswer(record):
- return 1
- return 0
-
- def suppressedByAnswer(self, other):
- """Returns true if another record has same name, type and class,
- and if its TTL is at least half of this record's."""
- if self == other and other.ttl > (self.ttl / 2):
- return 1
- return 0
-
- def getExpirationTime(self, percent):
- """Returns the time at which this record will have expired
- by a certain percentage."""
- return self.created + (percent * self.ttl * 10)
-
- def getRemainingTTL(self, now):
- """Returns the remaining TTL in seconds."""
- return max(0, (self.getExpirationTime(100) - now) / 1000)
-
- def isExpired(self, now):
- """Returns true if this record has expired."""
- return self.getExpirationTime(100) <= now
-
- def isStale(self, now):
- """Returns true if this record is at least half way expired."""
- return self.getExpirationTime(50) <= now
-
- def resetTTL(self, other):
- """Sets this record's TTL and created time to that of
- another record."""
- self.created = other.created
- self.ttl = other.ttl
-
- def write(self, out):
- """Abstract method"""
- raise AbstractMethodException
-
- def toString(self, other):
- """String representation with addtional information"""
- arg = "%s/%s,%s" % (self.ttl, self.getRemainingTTL(currentTimeMillis()), other)
- return DNSEntry.toString(self, "record", arg)
-
-class DNSAddress(DNSRecord):
- """A DNS address record"""
-
- def __init__(self, name, type, clazz, ttl, address):
- DNSRecord.__init__(self, name, type, clazz, ttl)
- self.address = address
-
- def write(self, out):
- """Used in constructing an outgoing packet"""
- out.writeString(self.address, len(self.address))
-
- def __eq__(self, other):
- """Tests equality on address"""
- if isinstance(other, DNSAddress):
- return self.address == other.address
- return 0
-
- def __repr__(self):
- """String representation"""
- try:
- return socket.inet_ntoa(self.address)
- except:
- return self.address
-
-class DNSHinfo(DNSRecord):
- """A DNS host information record"""
-
- def __init__(self, name, type, clazz, ttl, cpu, os):
- DNSRecord.__init__(self, name, type, clazz, ttl)
- self.cpu = cpu
- self.os = os
-
- def write(self, out):
- """Used in constructing an outgoing packet"""
- out.writeString(self.cpu, len(self.cpu))
- out.writeString(self.os, len(self.os))
-
- def __eq__(self, other):
- """Tests equality on cpu and os"""
- if isinstance(other, DNSHinfo):
- return self.cpu == other.cpu and self.os == other.os
- return 0
-
- def __repr__(self):
- """String representation"""
- return self.cpu + " " + self.os
-
-class DNSPointer(DNSRecord):
- """A DNS pointer record"""
-
- def __init__(self, name, type, clazz, ttl, alias):
- DNSRecord.__init__(self, name, type, clazz, ttl)
- self.alias = alias
-
- def write(self, out):
- """Used in constructing an outgoing packet"""
- out.writeName(self.alias)
-
- def __eq__(self, other):
- """Tests equality on alias"""
- if isinstance(other, DNSPointer):
- return self.alias == other.alias
- return 0
-
- def __repr__(self):
- """String representation"""
- return self.toString(self.alias)
-
-class DNSText(DNSRecord):
- """A DNS text record"""
-
- def __init__(self, name, type, clazz, ttl, text):
- DNSRecord.__init__(self, name, type, clazz, ttl)
- self.text = text
-
- def write(self, out):
- """Used in constructing an outgoing packet"""
- out.writeString(self.text, len(self.text))
-
- def __eq__(self, other):
- """Tests equality on text"""
- if isinstance(other, DNSText):
- return self.text == other.text
- return 0
-
- def __repr__(self):
- """String representation"""
- if len(self.text) > 10:
- return self.toString(self.text[:7] + "...")
- else:
- return self.toString(self.text)
-
-class DNSService(DNSRecord):
- """A DNS service record"""
-
- def __init__(self, name, type, clazz, ttl, priority, weight, port, server):
- DNSRecord.__init__(self, name, type, clazz, ttl)
- self.priority = priority
- self.weight = weight
- self.port = port
- self.server = server
-
- def write(self, out):
- """Used in constructing an outgoing packet"""
- out.writeShort(self.priority)
- out.writeShort(self.weight)
- out.writeShort(self.port)
- out.writeName(self.server)
-
- def __eq__(self, other):
- """Tests equality on priority, weight, port and server"""
- if isinstance(other, DNSService):
- return self.priority == other.priority and self.weight == other.weight and self.port == other.port and self.server == other.server
- return 0
-
- def __repr__(self):
- """String representation"""
- return self.toString("%s:%s" % (self.server, self.port))
-
-class DNSIncoming(object):
- """Object representation of an incoming DNS packet"""
-
- def __init__(self, data):
- """Constructor from string holding bytes of packet"""
- self.offset = 0
- self.data = data
- self.questions = []
- self.answers = []
- self.numQuestions = 0
- self.numAnswers = 0
- self.numAuthorities = 0
- self.numAdditionals = 0
-
- self.readHeader()
- self.readQuestions()
- self.readOthers()
-
- def readHeader(self):
- """Reads header portion of packet"""
- format = '!HHHHHH'
- length = struct.calcsize(format)
- info = struct.unpack(format, self.data[self.offset:self.offset+length])
- self.offset += length
-
- self.id = info[0]
- self.flags = info[1]
- self.numQuestions = info[2]
- self.numAnswers = info[3]
- self.numAuthorities = info[4]
- self.numAdditionals = info[5]
-
- def readQuestions(self):
- """Reads questions section of packet"""
- format = '!HH'
- length = struct.calcsize(format)
- for i in range(0, self.numQuestions):
- name = self.readName()
- info = struct.unpack(format, self.data[self.offset:self.offset+length])
- self.offset += length
-
- try:
- question = DNSQuestion(name, info[0], info[1])
- self.questions.append(question)
- except NonLocalNameException:
- pass
-
- def readInt(self):
- """Reads an integer from the packet"""
- format = '!I'
- length = struct.calcsize(format)
- info = struct.unpack(format, self.data[self.offset:self.offset+length])
- self.offset += length
- return info[0]
-
- def readCharacterString(self):
- """Reads a character string from the packet"""
- length = ord(self.data[self.offset])
- self.offset += 1
- return self.readString(length)
-
- def readString(self, len):
- """Reads a string of a given length from the packet"""
- format = '!' + str(len) + 's'
- length = struct.calcsize(format)
- info = struct.unpack(format, self.data[self.offset:self.offset+length])
- self.offset += length
- return info[0]
-
- def readUnsignedShort(self):
- """Reads an unsigned short from the packet"""
- format = '!H'
- length = struct.calcsize(format)
- info = struct.unpack(format, self.data[self.offset:self.offset+length])
- self.offset += length
- return info[0]
-
- def readOthers(self):
- """Reads the answers, authorities and additionals section of the packet"""
- format = '!HHiH'
- length = struct.calcsize(format)
- n = self.numAnswers + self.numAuthorities + self.numAdditionals
- for i in range(0, n):
- domain = self.readName()
- info = struct.unpack(format, self.data[self.offset:self.offset+length])
- self.offset += length
-
- rec = None
- if info[0] == _TYPE_A:
- rec = DNSAddress(domain, info[0], info[1], info[2], self.readString(4))
- elif info[0] == _TYPE_CNAME or info[0] == _TYPE_PTR:
- rec = DNSPointer(domain, info[0], info[1], info[2], self.readName())
- elif info[0] == _TYPE_TXT:
- rec = DNSText(domain, info[0], info[1], info[2], self.readString(info[3]))
- elif info[0] == _TYPE_SRV:
- rec = DNSService(domain, info[0], info[1], info[2], self.readUnsignedShort(), self.readUnsignedShort(), self.readUnsignedShort(), self.readName())
- elif info[0] == _TYPE_HINFO:
- rec = DNSHinfo(domain, info[0], info[1], info[2], self.readCharacterString(), self.readCharacterString())
- elif info[0] == _TYPE_AAAA:
- rec = DNSAddress(domain, info[0], info[1], info[2], self.readString(16))
- else:
- # Try to ignore types we don't know about
- # this may mean the rest of the name is
- # unable to be parsed, and may show errors
- # so this is left for debugging. New types
- # encountered need to be parsed properly.
- #
- #print "UNKNOWN TYPE = " + str(info[0])
- #raise BadTypeInNameException
- pass
-
- if rec is not None:
- self.answers.append(rec)
-
- def isQuery(self):
- """Returns true if this is a query"""
- return (self.flags & _FLAGS_QR_MASK) == _FLAGS_QR_QUERY
-
- def isResponse(self):
- """Returns true if this is a response"""
- return (self.flags & _FLAGS_QR_MASK) == _FLAGS_QR_RESPONSE
-
- def readUTF(self, offset, len):
- """Reads a UTF-8 string of a given length from the packet"""
- result = self.data[offset:offset+len].decode('utf-8')
- return result
-
- def readName(self):
- """Reads a domain name from the packet"""
- result = ''
- off = self.offset
- next = -1
- first = off
-
- while 1:
- len = ord(self.data[off])
- off += 1
- if len == 0:
- break
- t = len & 0xC0
- if t == 0x00:
- result = ''.join((result, self.readUTF(off, len) + '.'))
- off += len
- elif t == 0xC0:
- if next < 0:
- next = off + 1
- off = ((len & 0x3F) << 8) | ord(self.data[off])
- if off >= first:
- raise "Bad domain name (circular) at " + str(off)
- first = off
- else:
- raise "Bad domain name at " + str(off)
-
- if next >= 0:
- self.offset = next
- else:
- self.offset = off
-
- return result
-
-
-class DNSOutgoing(object):
- """Object representation of an outgoing packet"""
-
- def __init__(self, flags, multicast = 1):
- self.finished = 0
- self.id = 0
- self.multicast = multicast
- self.flags = flags
- self.names = {}
- self.data = []
- self.size = 12
-
- self.questions = []
- self.answers = []
- self.authorities = []
- self.additionals = []
-
- def addQuestion(self, record):
- """Adds a question"""
- self.questions.append(record)
-
- def addAnswer(self, inp, record):
- """Adds an answer"""
- if not record.suppressedBy(inp):
- self.addAnswerAtTime(record, 0)
-
- def addAnswerAtTime(self, record, now):
- """Adds an answer if if does not expire by a certain time"""
- if record is not None:
- if now == 0 or not record.isExpired(now):
- self.answers.append((record, now))
-
- def addAuthorativeAnswer(self, record):
- """Adds an authoritative answer"""
- self.authorities.append(record)
-
- def addAdditionalAnswer(self, record):
- """Adds an additional answer"""
- self.additionals.append(record)
-
- def writeByte(self, value):
- """Writes a single byte to the packet"""
- format = '!c'
- self.data.append(struct.pack(format, chr(value)))
- self.size += 1
-
- def insertShort(self, index, value):
- """Inserts an unsigned short in a certain position in the packet"""
- format = '!H'
- self.data.insert(index, struct.pack(format, value))
- self.size += 2
-
- def writeShort(self, value):
- """Writes an unsigned short to the packet"""
- format = '!H'
- self.data.append(struct.pack(format, value))
- self.size += 2
-
- def writeInt(self, value):
- """Writes an unsigned integer to the packet"""
- format = '!I'
- self.data.append(struct.pack(format, int(value)))
- self.size += 4
-
- def writeString(self, value, length):
- """Writes a string to the packet"""
- format = '!' + str(length) + 's'
- self.data.append(struct.pack(format, value))
- self.size += length
-
- def writeUTF(self, s):
- """Writes a UTF-8 string of a given length to the packet"""
- utfstr = s.encode('utf-8')
- length = len(utfstr)
- if length > 64:
- raise NamePartTooLongException
- self.writeByte(length)
- self.writeString(utfstr, length)
-
- def writeName(self, name):
- """Writes a domain name to the packet"""
-
- try:
- # Find existing instance of this name in packet
- #
- index = self.names[name]
- except KeyError:
- # No record of this name already, so write it
- # out as normal, recording the location of the name
- # for future pointers to it.
- #
- self.names[name] = self.size
- parts = name.split('.')
- if parts[-1] == '':
- parts = parts[:-1]
- for part in parts:
- self.writeUTF(part)
- self.writeByte(0)
- return
-
- # An index was found, so write a pointer to it
- #
- self.writeByte((index >> 8) | 0xC0)
- self.writeByte(index)
-
- def writeQuestion(self, question):
- """Writes a question to the packet"""
- self.writeName(question.name)
- self.writeShort(question.type)
- self.writeShort(question.clazz)
-
- def writeRecord(self, record, now):
- """Writes a record (answer, authoritative answer, additional) to
- the packet"""
- self.writeName(record.name)
- self.writeShort(record.type)
- if record.unique and self.multicast:
- self.writeShort(record.clazz | _CLASS_UNIQUE)
- else:
- self.writeShort(record.clazz)
- if now == 0:
- self.writeInt(record.ttl)
- else:
- self.writeInt(record.getRemainingTTL(now))
- index = len(self.data)
- # Adjust size for the short we will write before this record
- #
- self.size += 2
- record.write(self)
- self.size -= 2
-
- length = len(''.join(self.data[index:]))
- self.insertShort(index, length) # Here is the short we adjusted for
-
- def packet(self):
- """Returns a string containing the packet's bytes
-
- No further parts should be added to the packet once this
- is done."""
- if not self.finished:
- self.finished = 1
- for question in self.questions:
- self.writeQuestion(question)
- for answer, time in self.answers:
- self.writeRecord(answer, time)
- for authority in self.authorities:
- self.writeRecord(authority, 0)
- for additional in self.additionals:
- self.writeRecord(additional, 0)
-
- self.insertShort(0, len(self.additionals))
- self.insertShort(0, len(self.authorities))
- self.insertShort(0, len(self.answers))
- self.insertShort(0, len(self.questions))
- self.insertShort(0, self.flags)
- if self.multicast:
- self.insertShort(0, 0)
- else:
- self.insertShort(0, self.id)
- return ''.join(self.data)
-
-
-class DNSCache(object):
- """A cache of DNS entries"""
-
- def __init__(self):
- self.cache = {}
-
- def add(self, entry):
- """Adds an entry"""
- try:
- list = self.cache[entry.key]
- except:
- list = self.cache[entry.key] = []
- list.append(entry)
-
- def remove(self, entry):
- """Removes an entry"""
- try:
- list = self.cache[entry.key]
- list.remove(entry)
- except:
- pass
-
- def get(self, entry):
- """Gets an entry by key. Will return None if there is no
- matching entry."""
- try:
- list = self.cache[entry.key]
- return list[list.index(entry)]
- except:
- return None
-
- def getByDetails(self, name, type, clazz):
- """Gets an entry by details. Will return None if there is
- no matching entry."""
- entry = DNSEntry(name, type, clazz)
- return self.get(entry)
-
- def entriesWithName(self, name):
- """Returns a list of entries whose key matches the name."""
- try:
- return self.cache[name]
- except:
- return []
-
- def entries(self):
- """Returns a list of all entries"""
- def add(x, y): return x+y
- try:
- return reduce(add, self.cache.values())
- except:
- return []
-
-
-class Engine(threading.Thread):
- """An engine wraps read access to sockets, allowing objects that
- need to receive data from sockets to be called back when the
- sockets are ready.
-
- A reader needs a handle_read() method, which is called when the socket
- it is interested in is ready for reading.
-
- Writers are not implemented here, because we only send short
- packets.
- """
-
- def __init__(self, zeroconf):
- threading.Thread.__init__(self)
- self.zeroconf = zeroconf
- self.readers = {} # maps socket to reader
- self.timeout = 5
- self.condition = threading.Condition()
- self.start()
-
- def run(self):
- while not globals()['_GLOBAL_DONE']:
- rs = self.getReaders()
- if len(rs) == 0:
- # No sockets to manage, but we wait for the timeout
- # or addition of a socket
- #
- self.condition.acquire()
- self.condition.wait(self.timeout)
- self.condition.release()
- else:
- try:
- rr, wr, er = select.select(rs, [], [], self.timeout)
- for socket in rr:
- try:
- self.readers[socket].handle_read()
- except:
- traceback.print_exc()
- except:
- pass
-
- def getReaders(self):
- self.condition.acquire()
- result = self.readers.keys()
- self.condition.release()
- return result
-
- def addReader(self, reader, socket):
- self.condition.acquire()
- self.readers[socket] = reader
- self.condition.notify()
- self.condition.release()
-
- def delReader(self, socket):
- self.condition.acquire()
- del(self.readers[socket])
- self.condition.notify()
- self.condition.release()
-
- def notify(self):
- self.condition.acquire()
- self.condition.notify()
- self.condition.release()
-
-class Listener(object):
- """A Listener is used by this module to listen on the multicast
- group to which DNS messages are sent, allowing the implementation
- to cache information as it arrives.
-
- It requires registration with an Engine object in order to have
- the read() method called when a socket is availble for reading."""
-
- def __init__(self, zeroconf):
- self.zeroconf = zeroconf
- self.zeroconf.engine.addReader(self, self.zeroconf.socket)
-
- def handle_read(self):
- data, (addr, port) = self.zeroconf.socket.recvfrom(_MAX_MSG_ABSOLUTE)
- self.data = data
- msg = DNSIncoming(data)
- if msg.isQuery():
- # Always multicast responses
- #
- if port == _MDNS_PORT:
- self.zeroconf.handleQuery(msg, _MDNS_ADDR, _MDNS_PORT)
- # If it's not a multicast query, reply via unicast
- # and multicast
- #
- elif port == _DNS_PORT:
- self.zeroconf.handleQuery(msg, addr, port)
- self.zeroconf.handleQuery(msg, _MDNS_ADDR, _MDNS_PORT)
- else:
- self.zeroconf.handleResponse(msg)
-
-
-class Reaper(threading.Thread):
- """A Reaper is used by this module to remove cache entries that
- have expired."""
-
- def __init__(self, zeroconf):
- threading.Thread.__init__(self)
- self.zeroconf = zeroconf
- self.start()
-
- def run(self):
- while 1:
- self.zeroconf.wait(10 * 1000)
- if globals()['_GLOBAL_DONE']:
- return
- now = currentTimeMillis()
- for record in self.zeroconf.cache.entries():
- if record.isExpired(now):
- self.zeroconf.updateRecord(now, record)
- self.zeroconf.cache.remove(record)
-
-
-class ServiceBrowser(threading.Thread):
- """Used to browse for a service of a specific type.
-
- The listener object will have its addService() and
- removeService() methods called when this browser
- discovers changes in the services availability."""
-
- def __init__(self, zeroconf, type, listener):
- """Creates a browser for a specific type"""
- threading.Thread.__init__(self)
- self.zeroconf = zeroconf
- self.type = type
- self.listener = listener
- self.services = {}
- self.nextTime = currentTimeMillis()
- self.delay = _BROWSER_TIME
- self.list = []
-
- self.done = 0
-
- self.zeroconf.addListener(self, DNSQuestion(self.type, _TYPE_PTR, _CLASS_IN))
- self.start()
-
- def updateRecord(self, zeroconf, now, record):
- """Callback invoked by Zeroconf when new information arrives.
-
- Updates information required by browser in the Zeroconf cache."""
- if record.type == _TYPE_PTR and record.name == self.type:
- expired = record.isExpired(now)
- try:
- oldrecord = self.services[record.alias.lower()]
- if not expired:
- oldrecord.resetTTL(record)
- else:
- del(self.services[record.alias.lower()])
- callback = lambda x: self.listener.removeService(x, self.type, record.alias)
- self.list.append(callback)
- return
- except:
- if not expired:
- self.services[record.alias.lower()] = record
- callback = lambda x: self.listener.addService(x, self.type, record.alias)
- self.list.append(callback)
-
- expires = record.getExpirationTime(75)
- if expires < self.nextTime:
- self.nextTime = expires
-
- def cancel(self):
- self.done = 1
- self.zeroconf.notifyAll()
-
- def run(self):
- while 1:
- event = None
- now = currentTimeMillis()
- if len(self.list) == 0 and self.nextTime > now:
- self.zeroconf.wait(self.nextTime - now)
- if globals()['_GLOBAL_DONE'] or self.done:
- return
- now = currentTimeMillis()
-
- if self.nextTime <= now:
- out = DNSOutgoing(_FLAGS_QR_QUERY)
- out.addQuestion(DNSQuestion(self.type, _TYPE_PTR, _CLASS_IN))
- for record in self.services.values():
- if not record.isExpired(now):
- out.addAnswerAtTime(record, now)
- self.zeroconf.send(out)
- self.nextTime = now + self.delay
- self.delay = min(20 * 1000, self.delay * 2)
-
- if len(self.list) > 0:
- event = self.list.pop(0)
-
- if event is not None:
- event(self.zeroconf)
-
-
-class ServiceInfo(object):
- """Service information"""
-
- def __init__(self, type, name, address=None, port=None, weight=0, priority=0, properties=None, server=None):
- """Create a service description.
-
- type: fully qualified service type name
- name: fully qualified service name
- address: IP address as unsigned short, network byte order
- port: port that the service runs on
- weight: weight of the service
- priority: priority of the service
- properties: dictionary of properties (or a string holding the bytes for the text field)
- server: fully qualified name for service host (defaults to name)"""
-
- if not name.endswith(type):
- raise BadTypeInNameException
- self.type = type
- self.name = name
- self.address = address
- self.port = port
- self.weight = weight
- self.priority = priority
- if server:
- self.server = server
- else:
- self.server = name
- self.setProperties(properties)
-
- def setProperties(self, properties):
- """Sets properties and text of this info from a dictionary"""
- if isinstance(properties, dict):
- self.properties = properties
- list = []
- result = ''
- for key in properties:
- value = properties[key]
- if value is None:
- suffix = ''.encode('utf-8')
- elif isinstance(value, str):
- suffix = value.encode('utf-8')
- elif isinstance(value, int):
- if value:
- suffix = 'true'
- else:
- suffix = 'false'
- else:
- suffix = ''.encode('utf-8')
- list.append('='.join((key, suffix)))
- for item in list:
- result = ''.join((result, struct.pack('!c', chr(len(item))), item))
- self.text = result
- else:
- self.text = properties
-
- def setText(self, text):
- """Sets properties and text given a text field"""
- self.text = text
- try:
- result = {}
- end = len(text)
- index = 0
- strs = []
- while index < end:
- length = ord(text[index])
- index += 1
- strs.append(text[index:index+length])
- index += length
-
- for s in strs:
- eindex = s.find('=')
- if eindex == -1:
- # No equals sign at all
- key = s
- value = 0
- else:
- key = s[:eindex]
- value = s[eindex+1:]
- if value == 'true':
- value = 1
- elif value == 'false' or not value:
- value = 0
-
- # Only update non-existent properties
- if key and result.get(key) == None:
- result[key] = value
-
- self.properties = result
- except:
- traceback.print_exc()
- self.properties = None
-
- def getType(self):
- """Type accessor"""
- return self.type
-
- def getName(self):
- """Name accessor"""
- if self.type is not None and self.name.endswith("." + self.type):
- return self.name[:len(self.name) - len(self.type) - 1]
- return self.name
-
- def getAddress(self):
- """Address accessor"""
- return self.address
-
- def getPort(self):
- """Port accessor"""
- return self.port
-
- def getPriority(self):
- """Pirority accessor"""
- return self.priority
-
- def getWeight(self):
- """Weight accessor"""
- return self.weight
-
- def getProperties(self):
- """Properties accessor"""
- return self.properties
-
- def getText(self):
- """Text accessor"""
- return self.text
-
- def getServer(self):
- """Server accessor"""
- return self.server
-
- def updateRecord(self, zeroconf, now, record):
- """Updates service information from a DNS record"""
- if record is not None and not record.isExpired(now):
- if record.type == _TYPE_A:
- #if record.name == self.name:
- if record.name == self.server:
- self.address = record.address
- elif record.type == _TYPE_SRV:
- if record.name == self.name:
- self.server = record.server
- self.port = record.port
- self.weight = record.weight
- self.priority = record.priority
- #self.address = None
- self.updateRecord(zeroconf, now, zeroconf.cache.getByDetails(self.server, _TYPE_A, _CLASS_IN))
- elif record.type == _TYPE_TXT:
- if record.name == self.name:
- self.setText(record.text)
-
- def request(self, zeroconf, timeout):
- """Returns true if the service could be discovered on the
- network, and updates this object with details discovered.
- """
- now = currentTimeMillis()
- delay = _LISTENER_TIME
- next = now + delay
- last = now + timeout
- result = 0
- try:
- zeroconf.addListener(self, DNSQuestion(self.name, _TYPE_ANY, _CLASS_IN))
- while self.server is None or self.address is None or self.text is None:
- if last <= now:
- return 0
- if next <= now:
- out = DNSOutgoing(_FLAGS_QR_QUERY)
- out.addQuestion(DNSQuestion(self.name, _TYPE_SRV, _CLASS_IN))
- out.addAnswerAtTime(zeroconf.cache.getByDetails(self.name, _TYPE_SRV, _CLASS_IN), now)
- out.addQuestion(DNSQuestion(self.name, _TYPE_TXT, _CLASS_IN))
- out.addAnswerAtTime(zeroconf.cache.getByDetails(self.name, _TYPE_TXT, _CLASS_IN), now)
- if self.server is not None:
- out.addQuestion(DNSQuestion(self.server, _TYPE_A, _CLASS_IN))
- out.addAnswerAtTime(zeroconf.cache.getByDetails(self.server, _TYPE_A, _CLASS_IN), now)
- zeroconf.send(out)
- next = now + delay
- delay = delay * 2
-
- zeroconf.wait(min(next, last) - now)
- now = currentTimeMillis()
- result = 1
- finally:
- zeroconf.removeListener(self)
-
- return result
-
- def __eq__(self, other):
- """Tests equality of service name"""
- if isinstance(other, ServiceInfo):
- return other.name == self.name
- return 0
-
- def __ne__(self, other):
- """Non-equality test"""
- return not self.__eq__(other)
-
- def __repr__(self):
- """String representation"""
- result = "service[%s,%s:%s," % (self.name, socket.inet_ntoa(self.getAddress()), self.port)
- if self.text is None:
- result += "None"
- else:
- if len(self.text) < 20:
- result += self.text
- else:
- result += self.text[:17] + "..."
- result += "]"
- return result
-
-
-class Zeroconf(object):
- """Implementation of Zeroconf Multicast DNS Service Discovery
-
- Supports registration, unregistration, queries and browsing.
- """
- def __init__(self, bindaddress=None):
- """Creates an instance of the Zeroconf class, establishing
- multicast communications, listening and reaping threads."""
- globals()['_GLOBAL_DONE'] = 0
- if bindaddress is None:
- self.intf = socket.gethostbyname(socket.gethostname())
- else:
- self.intf = bindaddress
- self.group = ('', _MDNS_PORT)
- self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
- try:
- self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
- self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
- except:
- # SO_REUSEADDR should be equivalent to SO_REUSEPORT for
- # multicast UDP sockets (p 731, "TCP/IP Illustrated,
- # Volume 2"), but some BSD-derived systems require
- # SO_REUSEPORT to be specified explicity. Also, not all
- # versions of Python have SO_REUSEPORT available. So
- # if you're on a BSD-based system, and haven't upgraded
- # to Python 2.3 yet, you may find this library doesn't
- # work as expected.
- #
- pass
- self.socket.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_TTL, 255)
- self.socket.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_LOOP, 1)
- try:
- self.socket.bind(self.group)
- except:
- # Some versions of linux raise an exception even though
- # the SO_REUSE* options have been set, so ignore it
- #
- pass
- #self.socket.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_IF, socket.inet_aton(self.intf) + socket.inet_aton('0.0.0.0'))
- self.socket.setsockopt(socket.SOL_IP, socket.IP_ADD_MEMBERSHIP, socket.inet_aton(_MDNS_ADDR) + socket.inet_aton('0.0.0.0'))
-
- self.listeners = []
- self.browsers = []
- self.services = {}
- self.servicetypes = {}
-
- self.cache = DNSCache()
-
- self.condition = threading.Condition()
-
- self.engine = Engine(self)
- self.listener = Listener(self)
- self.reaper = Reaper(self)
-
- def isLoopback(self):
- return self.intf.startswith("127.0.0.1")
-
- def isLinklocal(self):
- return self.intf.startswith("169.254.")
-
- def wait(self, timeout):
- """Calling thread waits for a given number of milliseconds or
- until notified."""
- self.condition.acquire()
- self.condition.wait(timeout/1000)
- self.condition.release()
-
- def notifyAll(self):
- """Notifies all waiting threads"""
- self.condition.acquire()
- self.condition.notifyAll()
- self.condition.release()
-
- def getServiceInfo(self, type, name, timeout=3000):
- """Returns network's service information for a particular
- name and type, or None if no service matches by the timeout,
- which defaults to 3 seconds."""
- info = ServiceInfo(type, name)
- if info.request(self, timeout):
- return info
- return None
-
- def addServiceListener(self, type, listener):
- """Adds a listener for a particular service type. This object
- will then have its updateRecord method called when information
- arrives for that type."""
- self.removeServiceListener(listener)
- self.browsers.append(ServiceBrowser(self, type, listener))
-
- def removeServiceListener(self, listener):
- """Removes a listener from the set that is currently listening."""
- for browser in self.browsers:
- if browser.listener == listener:
- browser.cancel()
- del(browser)
-
- def registerService(self, info, ttl=_DNS_TTL):
- """Registers service information to the network with a default TTL
- of 60 seconds. Zeroconf will then respond to requests for
- information for that service. The name of the service may be
- changed if needed to make it unique on the network."""
- self.checkService(info)
- self.services[info.name.lower()] = info
- if self.servicetypes.has_key(info.type):
- self.servicetypes[info.type]+=1
- else:
- self.servicetypes[info.type]=1
- now = currentTimeMillis()
- nextTime = now
- i = 0
- while i < 3:
- if now < nextTime:
- self.wait(nextTime - now)
- now = currentTimeMillis()
- continue
- out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
- out.addAnswerAtTime(DNSPointer(info.type, _TYPE_PTR, _CLASS_IN, ttl, info.name), 0)
- out.addAnswerAtTime(DNSService(info.name, _TYPE_SRV, _CLASS_IN, ttl, info.priority, info.weight, info.port, info.server), 0)
- out.addAnswerAtTime(DNSText(info.name, _TYPE_TXT, _CLASS_IN, ttl, info.text), 0)
- if info.address:
- out.addAnswerAtTime(DNSAddress(info.server, _TYPE_A, _CLASS_IN, ttl, info.address), 0)
- self.send(out)
- i += 1
- nextTime += _REGISTER_TIME
-
- def unregisterService(self, info):
- """Unregister a service."""
- try:
- del(self.services[info.name.lower()])
- if self.servicetypes[info.type]>1:
- self.servicetypes[info.type]-=1
- else:
- del self.servicetypes[info.type]
- except:
- pass
- now = currentTimeMillis()
- nextTime = now
- i = 0
- while i < 3:
- if now < nextTime:
- self.wait(nextTime - now)
- now = currentTimeMillis()
- continue
- out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
- out.addAnswerAtTime(DNSPointer(info.type, _TYPE_PTR, _CLASS_IN, 0, info.name), 0)
- out.addAnswerAtTime(DNSService(info.name, _TYPE_SRV, _CLASS_IN, 0, info.priority, info.weight, info.port, info.name), 0)
- out.addAnswerAtTime(DNSText(info.name, _TYPE_TXT, _CLASS_IN, 0, info.text), 0)
- if info.address:
- out.addAnswerAtTime(DNSAddress(info.server, _TYPE_A, _CLASS_IN, 0, info.address), 0)
- self.send(out)
- i += 1
- nextTime += _UNREGISTER_TIME
-
- def unregisterAllServices(self):
- """Unregister all registered services."""
- if len(self.services) > 0:
- now = currentTimeMillis()
- nextTime = now
- i = 0
- while i < 3:
- if now < nextTime:
- self.wait(nextTime - now)
- now = currentTimeMillis()
- continue
- out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
- for info in self.services.values():
- out.addAnswerAtTime(DNSPointer(info.type, _TYPE_PTR, _CLASS_IN, 0, info.name), 0)
- out.addAnswerAtTime(DNSService(info.name, _TYPE_SRV, _CLASS_IN, 0, info.priority, info.weight, info.port, info.server), 0)
- out.addAnswerAtTime(DNSText(info.name, _TYPE_TXT, _CLASS_IN, 0, info.text), 0)
- if info.address:
- out.addAnswerAtTime(DNSAddress(info.server, _TYPE_A, _CLASS_IN, 0, info.address), 0)
- self.send(out)
- i += 1
- nextTime += _UNREGISTER_TIME
-
- def checkService(self, info):
- """Checks the network for a unique service name, modifying the
- ServiceInfo passed in if it is not unique."""
- now = currentTimeMillis()
- nextTime = now
- i = 0
- while i < 3:
- for record in self.cache.entriesWithName(info.type):
- if record.type == _TYPE_PTR and not record.isExpired(now) and record.alias == info.name:
- if (info.name.find('.') < 0):
- info.name = info.name + ".[" + info.address + ":" + info.port + "]." + info.type
- self.checkService(info)
- return
- raise NonUniqueNameException
- if now < nextTime:
- self.wait(nextTime - now)
- now = currentTimeMillis()
- continue
- out = DNSOutgoing(_FLAGS_QR_QUERY | _FLAGS_AA)
- self.debug = out
- out.addQuestion(DNSQuestion(info.type, _TYPE_PTR, _CLASS_IN))
- out.addAuthorativeAnswer(DNSPointer(info.type, _TYPE_PTR, _CLASS_IN, _DNS_TTL, info.name))
- self.send(out)
- i += 1
- nextTime += _CHECK_TIME
-
- def addListener(self, listener, question):
- """Adds a listener for a given question. The listener will have
- its updateRecord method called when information is available to
- answer the question."""
- now = currentTimeMillis()
- self.listeners.append(listener)
- if question is not None:
- for record in self.cache.entriesWithName(question.name):
- if question.answeredBy(record) and not record.isExpired(now):
- listener.updateRecord(self, now, record)
- self.notifyAll()
-
- def removeListener(self, listener):
- """Removes a listener."""
- try:
- self.listeners.remove(listener)
- self.notifyAll()
- except:
- pass
-
- def updateRecord(self, now, rec):
- """Used to notify listeners of new information that has updated
- a record."""
- for listener in self.listeners:
- listener.updateRecord(self, now, rec)
- self.notifyAll()
-
- def handleResponse(self, msg):
- """Deal with incoming response packets. All answers
- are held in the cache, and listeners are notified."""
- now = currentTimeMillis()
- for record in msg.answers:
- expired = record.isExpired(now)
- if record in self.cache.entries():
- if expired:
- self.cache.remove(record)
- else:
- entry = self.cache.get(record)
- if entry is not None:
- entry.resetTTL(record)
- record = entry
- else:
- self.cache.add(record)
-
- self.updateRecord(now, record)
-
- def handleQuery(self, msg, addr, port):
- """Deal with incoming query packets. Provides a response if
- possible."""
- out = None
-
- # Support unicast client responses
- #
- if port != _MDNS_PORT:
- out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA, 0)
- for question in msg.questions:
- out.addQuestion(question)
-
- for question in msg.questions:
- if question.type == _TYPE_PTR:
- if question.name == "_services._dns-sd._udp.local.":
- for stype in self.servicetypes.keys():
- if out is None:
- out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
- out.addAnswer(msg, DNSPointer("_services._dns-sd._udp.local.", _TYPE_PTR, _CLASS_IN, _DNS_TTL, stype))
- for service in self.services.values():
- if question.name == service.type:
- if out is None:
- out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
- out.addAnswer(msg, DNSPointer(service.type, _TYPE_PTR, _CLASS_IN, _DNS_TTL, service.name))
- else:
- try:
- if out is None:
- out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
-
- # Answer A record queries for any service addresses we know
- if question.type == _TYPE_A or question.type == _TYPE_ANY:
- for service in self.services.values():
- if service.server == question.name.lower():
- out.addAnswer(msg, DNSAddress(question.name, _TYPE_A, _CLASS_IN | _CLASS_UNIQUE, _DNS_TTL, service.address))
-
- service = self.services.get(question.name.lower(), None)
- if not service: continue
-
- if question.type == _TYPE_SRV or question.type == _TYPE_ANY:
- out.addAnswer(msg, DNSService(question.name, _TYPE_SRV, _CLASS_IN | _CLASS_UNIQUE, _DNS_TTL, service.priority, service.weight, service.port, service.server))
- if question.type == _TYPE_TXT or question.type == _TYPE_ANY:
- out.addAnswer(msg, DNSText(question.name, _TYPE_TXT, _CLASS_IN | _CLASS_UNIQUE, _DNS_TTL, service.text))
- if question.type == _TYPE_SRV:
- out.addAdditionalAnswer(DNSAddress(service.server, _TYPE_A, _CLASS_IN | _CLASS_UNIQUE, _DNS_TTL, service.address))
- except:
- traceback.print_exc()
-
- if out is not None and out.answers:
- out.id = msg.id
- self.send(out, addr, port)
-
- def send(self, out, addr = _MDNS_ADDR, port = _MDNS_PORT):
- """Sends an outgoing packet."""
- # This is a quick test to see if we can parse the packets we generate
- #temp = DNSIncoming(out.packet())
- try:
- self.socket.sendto(out.packet(), 0, (addr, port))
- except:
- # Ignore this, it may be a temporary loss of network connection
- pass
-
- def close(self):
- """Ends the background threads, and prevent this instance from
- servicing further queries."""
- if globals()['_GLOBAL_DONE'] == 0:
- globals()['_GLOBAL_DONE'] = 1
- self.notifyAll()
- self.engine.notify()
- self.unregisterAllServices()
- self.socket.setsockopt(socket.SOL_IP, socket.IP_DROP_MEMBERSHIP, socket.inet_aton(_MDNS_ADDR) + socket.inet_aton('0.0.0.0'))
- self.socket.close()
-
-# Test a few module features, including service registration, service
-# query (for Zoe), and service unregistration.
-
-if __name__ == '__main__':
- print "Multicast DNS Service Discovery for Python, version", __version__
- r = Zeroconf()
- print "1. Testing registration of a service..."
- desc = {'version':'0.10','a':'test value', 'b':'another value'}
- info = ServiceInfo("_http._tcp.local.", "My Service Name._http._tcp.local.", socket.inet_aton("127.0.0.1"), 1234, 0, 0, desc)
- print " Registering service..."
- r.registerService(info)
- print " Registration done."
- print "2. Testing query of service information..."
- print " Getting ZOE service:", str(r.getServiceInfo("_http._tcp.local.", "ZOE._http._tcp.local."))
- print " Query done."
- print "3. Testing query of own service..."
- print " Getting self:", str(r.getServiceInfo("_http._tcp.local.", "My Service Name._http._tcp.local."))
- print " Query done."
- print "4. Testing unregister of service information..."
- r.unregisterService(info)
- print " Unregister done."
- r.close()
diff --git a/sys/lib/python/hgext/zeroconf/__init__.py b/sys/lib/python/hgext/zeroconf/__init__.py
deleted file mode 100644
index a57bbf593..000000000
--- a/sys/lib/python/hgext/zeroconf/__init__.py
+++ /dev/null
@@ -1,159 +0,0 @@
-# zeroconf.py - zeroconf support for Mercurial
-#
-# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-'''discover and advertise repositories on the local network
-
-Zeroconf enabled repositories will be announced in a network without
-the need to configure a server or a service. They can be discovered
-without knowing their actual IP address.
-
-To allow other people to discover your repository using run "hg serve"
-in your repository::
-
- $ cd test
- $ hg serve
-
-You can discover zeroconf enabled repositories by running "hg paths"::
-
- $ hg paths
- zc-test = http://example.com:8000/test
-'''
-
-import Zeroconf, socket, time, os
-from mercurial import ui
-from mercurial import extensions
-from mercurial.hgweb import hgweb_mod
-from mercurial.hgweb import hgwebdir_mod
-
-# publish
-
-server = None
-localip = None
-
-def getip():
- # finds external-facing interface without sending any packets (Linux)
- try:
- s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
- s.connect(('1.0.0.1', 0))
- ip = s.getsockname()[0]
- return ip
- except:
- pass
-
- # Generic method, sometimes gives useless results
- try:
- dumbip = socket.gethostbyaddr(socket.gethostname())[2][0]
- if not dumbip.startswith('127.') and ':' not in dumbip:
- return dumbip
- except socket.gaierror:
- dumbip = '127.0.0.1'
-
- # works elsewhere, but actually sends a packet
- try:
- s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
- s.connect(('1.0.0.1', 1))
- ip = s.getsockname()[0]
- return ip
- except:
- pass
-
- return dumbip
-
-def publish(name, desc, path, port):
- global server, localip
- if not server:
- ip = getip()
- if ip.startswith('127.'):
- # if we have no internet connection, this can happen.
- return
- localip = socket.inet_aton(ip)
- server = Zeroconf.Zeroconf(ip)
-
- hostname = socket.gethostname().split('.')[0]
- host = hostname + ".local"
- name = "%s-%s" % (hostname, name)
-
- # advertise to browsers
- svc = Zeroconf.ServiceInfo('_http._tcp.local.',
- name + '._http._tcp.local.',
- server = host,
- port = port,
- properties = {'description': desc,
- 'path': "/" + path},
- address = localip, weight = 0, priority = 0)
- server.registerService(svc)
-
- # advertise to Mercurial clients
- svc = Zeroconf.ServiceInfo('_hg._tcp.local.',
- name + '._hg._tcp.local.',
- server = host,
- port = port,
- properties = {'description': desc,
- 'path': "/" + path},
- address = localip, weight = 0, priority = 0)
- server.registerService(svc)
-
-class hgwebzc(hgweb_mod.hgweb):
- def __init__(self, repo, name=None):
- super(hgwebzc, self).__init__(repo, name)
- name = self.reponame or os.path.basename(repo.root)
- desc = self.repo.ui.config("web", "description", name)
- publish(name, desc, name, int(repo.ui.config("web", "port", 8000)))
-
-class hgwebdirzc(hgwebdir_mod.hgwebdir):
- def run(self):
- for r, p in self.repos:
- u = self.ui.copy()
- u.readconfig(os.path.join(p, '.hg', 'hgrc'))
- n = os.path.basename(r)
- publish(n, "hgweb", p, int(u.config("web", "port", 8000)))
- return super(hgwebdirzc, self).run()
-
-# listen
-
-class listener(object):
- def __init__(self):
- self.found = {}
- def removeService(self, server, type, name):
- if repr(name) in self.found:
- del self.found[repr(name)]
- def addService(self, server, type, name):
- self.found[repr(name)] = server.getServiceInfo(type, name)
-
-def getzcpaths():
- ip = getip()
- if ip.startswith('127.'):
- return
- server = Zeroconf.Zeroconf(ip)
- l = listener()
- Zeroconf.ServiceBrowser(server, "_hg._tcp.local.", l)
- time.sleep(1)
- server.close()
- for v in l.found.values():
- n = v.name[:v.name.index('.')]
- n.replace(" ", "-")
- u = "http://%s:%s%s" % (socket.inet_ntoa(v.address), v.port,
- v.properties.get("path", "/"))
- yield "zc-" + n, u
-
-def config(orig, self, section, key, default=None, untrusted=False):
- if section == "paths" and key.startswith("zc-"):
- for n, p in getzcpaths():
- if n == key:
- return p
- return orig(self, section, key, default, untrusted)
-
-def configitems(orig, self, section, untrusted=False):
- r = orig(self, section, untrusted)
- if section == "paths":
- r += getzcpaths()
- return r
-
-extensions.wrapfunction(ui.ui, 'config', config)
-extensions.wrapfunction(ui.ui, 'configitems', configitems)
-hgweb_mod.hgweb = hgwebzc
-hgwebdir_mod.hgwebdir = hgwebdirzc
diff --git a/sys/lib/python/hmac.py b/sys/lib/python/hmac.py
deleted file mode 100644
index 41d6c6cbd..000000000
--- a/sys/lib/python/hmac.py
+++ /dev/null
@@ -1,113 +0,0 @@
-"""HMAC (Keyed-Hashing for Message Authentication) Python module.
-
-Implements the HMAC algorithm as described by RFC 2104.
-"""
-
-def _strxor(s1, s2):
- """Utility method. XOR the two strings s1 and s2 (must have same length).
- """
- return "".join(map(lambda x, y: chr(ord(x) ^ ord(y)), s1, s2))
-
-# The size of the digests returned by HMAC depends on the underlying
-# hashing module used.
-digest_size = None
-
-# A unique object passed by HMAC.copy() to the HMAC constructor, in order
-# that the latter return very quickly. HMAC("") in contrast is quite
-# expensive.
-_secret_backdoor_key = []
-
-class HMAC:
- """RFC2104 HMAC class.
-
- This supports the API for Cryptographic Hash Functions (PEP 247).
- """
-
- def __init__(self, key, msg = None, digestmod = None):
- """Create a new HMAC object.
-
- key: key for the keyed hash object.
- msg: Initial input for the hash, if provided.
- digestmod: A module supporting PEP 247. *OR*
- A hashlib constructor returning a new hash object.
- Defaults to hashlib.md5.
- """
-
- if key is _secret_backdoor_key: # cheap
- return
-
- if digestmod is None:
- import hashlib
- digestmod = hashlib.md5
-
- if callable(digestmod):
- self.digest_cons = digestmod
- else:
- self.digest_cons = lambda d='': digestmod.new(d)
-
- self.outer = self.digest_cons()
- self.inner = self.digest_cons()
- self.digest_size = self.inner.digest_size
-
- blocksize = 64
- ipad = "\x36" * blocksize
- opad = "\x5C" * blocksize
-
- if len(key) > blocksize:
- key = self.digest_cons(key).digest()
-
- key = key + chr(0) * (blocksize - len(key))
- self.outer.update(_strxor(key, opad))
- self.inner.update(_strxor(key, ipad))
- if msg is not None:
- self.update(msg)
-
-## def clear(self):
-## raise NotImplementedError, "clear() method not available in HMAC."
-
- def update(self, msg):
- """Update this hashing object with the string msg.
- """
- self.inner.update(msg)
-
- def copy(self):
- """Return a separate copy of this hashing object.
-
- An update to this copy won't affect the original object.
- """
- other = HMAC(_secret_backdoor_key)
- other.digest_cons = self.digest_cons
- other.digest_size = self.digest_size
- other.inner = self.inner.copy()
- other.outer = self.outer.copy()
- return other
-
- def digest(self):
- """Return the hash value of this hashing object.
-
- This returns a string containing 8-bit data. The object is
- not altered in any way by this function; you can continue
- updating the object after calling this function.
- """
- h = self.outer.copy()
- h.update(self.inner.digest())
- return h.digest()
-
- def hexdigest(self):
- """Like digest(), but returns a string of hexadecimal digits instead.
- """
- return "".join([hex(ord(x))[2:].zfill(2)
- for x in tuple(self.digest())])
-
-def new(key, msg = None, digestmod = None):
- """Create a new hashing object and return it.
-
- key: The starting key for the hash.
- msg: if available, will immediately be hashed into the object's starting
- state.
-
- You can now feed arbitrary strings into the object using its update()
- method, and can ask for the hash value at any time by calling its digest()
- method.
- """
- return HMAC(key, msg, digestmod)
diff --git a/sys/lib/python/hotshot/__init__.py b/sys/lib/python/hotshot/__init__.py
deleted file mode 100644
index b9f7866e4..000000000
--- a/sys/lib/python/hotshot/__init__.py
+++ /dev/null
@@ -1,76 +0,0 @@
-"""High-perfomance logging profiler, mostly written in C."""
-
-import _hotshot
-
-from _hotshot import ProfilerError
-
-
-class Profile:
- def __init__(self, logfn, lineevents=0, linetimings=1):
- self.lineevents = lineevents and 1 or 0
- self.linetimings = (linetimings and lineevents) and 1 or 0
- self._prof = p = _hotshot.profiler(
- logfn, self.lineevents, self.linetimings)
-
- # Attempt to avoid confusing results caused by the presence of
- # Python wrappers around these functions, but only if we can
- # be sure the methods have not been overridden or extended.
- if self.__class__ is Profile:
- self.close = p.close
- self.start = p.start
- self.stop = p.stop
- self.addinfo = p.addinfo
-
- def close(self):
- """Close the logfile and terminate the profiler."""
- self._prof.close()
-
- def fileno(self):
- """Return the file descriptor of the profiler's log file."""
- return self._prof.fileno()
-
- def start(self):
- """Start the profiler."""
- self._prof.start()
-
- def stop(self):
- """Stop the profiler."""
- self._prof.stop()
-
- def addinfo(self, key, value):
- """Add an arbitrary labelled value to the profile log."""
- self._prof.addinfo(key, value)
-
- # These methods offer the same interface as the profile.Profile class,
- # but delegate most of the work to the C implementation underneath.
-
- def run(self, cmd):
- """Profile an exec-compatible string in the script
- environment.
-
- The globals from the __main__ module are used as both the
- globals and locals for the script.
- """
- import __main__
- dict = __main__.__dict__
- return self.runctx(cmd, dict, dict)
-
- def runctx(self, cmd, globals, locals):
- """Evaluate an exec-compatible string in a specific
- environment.
-
- The string is compiled before profiling begins.
- """
- code = compile(cmd, "<string>", "exec")
- self._prof.runcode(code, globals, locals)
- return self
-
- def runcall(self, func, *args, **kw):
- """Profile a single call of a callable.
-
- Additional positional and keyword arguments may be passed
- along; the result of the call is returned, and exceptions are
- allowed to propogate cleanly, while ensuring that profiling is
- disabled on the way out.
- """
- return self._prof.runcall(func, args, kw)
diff --git a/sys/lib/python/hotshot/log.py b/sys/lib/python/hotshot/log.py
deleted file mode 100644
index 7d6d91d44..000000000
--- a/sys/lib/python/hotshot/log.py
+++ /dev/null
@@ -1,192 +0,0 @@
-import _hotshot
-import os.path
-import parser
-import symbol
-import sys
-
-from _hotshot import \
- WHAT_ENTER, \
- WHAT_EXIT, \
- WHAT_LINENO, \
- WHAT_DEFINE_FILE, \
- WHAT_DEFINE_FUNC, \
- WHAT_ADD_INFO
-
-
-__all__ = ["LogReader", "ENTER", "EXIT", "LINE"]
-
-
-ENTER = WHAT_ENTER
-EXIT = WHAT_EXIT
-LINE = WHAT_LINENO
-
-
-class LogReader:
- def __init__(self, logfn):
- # fileno -> filename
- self._filemap = {}
- # (fileno, lineno) -> filename, funcname
- self._funcmap = {}
-
- self._reader = _hotshot.logreader(logfn)
- self._nextitem = self._reader.next
- self._info = self._reader.info
- if self._info.has_key('current-directory'):
- self.cwd = self._info['current-directory']
- else:
- self.cwd = None
-
- # This mirrors the call stack of the profiled code as the log
- # is read back in. It contains tuples of the form:
- #
- # (file name, line number of function def, function name)
- #
- self._stack = []
- self._append = self._stack.append
- self._pop = self._stack.pop
-
- def close(self):
- self._reader.close()
-
- def fileno(self):
- """Return the file descriptor of the log reader's log file."""
- return self._reader.fileno()
-
- def addinfo(self, key, value):
- """This method is called for each additional ADD_INFO record.
-
- This can be overridden by applications that want to receive
- these events. The default implementation does not need to be
- called by alternate implementations.
-
- The initial set of ADD_INFO records do not pass through this
- mechanism; this is only needed to receive notification when
- new values are added. Subclasses can inspect self._info after
- calling LogReader.__init__().
- """
- pass
-
- def get_filename(self, fileno):
- try:
- return self._filemap[fileno]
- except KeyError:
- raise ValueError, "unknown fileno"
-
- def get_filenames(self):
- return self._filemap.values()
-
- def get_fileno(self, filename):
- filename = os.path.normcase(os.path.normpath(filename))
- for fileno, name in self._filemap.items():
- if name == filename:
- return fileno
- raise ValueError, "unknown filename"
-
- def get_funcname(self, fileno, lineno):
- try:
- return self._funcmap[(fileno, lineno)]
- except KeyError:
- raise ValueError, "unknown function location"
-
- # Iteration support:
- # This adds an optional (& ignored) parameter to next() so that the
- # same bound method can be used as the __getitem__() method -- this
- # avoids using an additional method call which kills the performance.
-
- def next(self, index=0):
- while 1:
- # This call may raise StopIteration:
- what, tdelta, fileno, lineno = self._nextitem()
-
- # handle the most common cases first
-
- if what == WHAT_ENTER:
- filename, funcname = self._decode_location(fileno, lineno)
- t = (filename, lineno, funcname)
- self._append(t)
- return what, t, tdelta
-
- if what == WHAT_EXIT:
- return what, self._pop(), tdelta
-
- if what == WHAT_LINENO:
- filename, firstlineno, funcname = self._stack[-1]
- return what, (filename, lineno, funcname), tdelta
-
- if what == WHAT_DEFINE_FILE:
- filename = os.path.normcase(os.path.normpath(tdelta))
- self._filemap[fileno] = filename
- elif what == WHAT_DEFINE_FUNC:
- filename = self._filemap[fileno]
- self._funcmap[(fileno, lineno)] = (filename, tdelta)
- elif what == WHAT_ADD_INFO:
- # value already loaded into self.info; call the
- # overridable addinfo() handler so higher-level code
- # can pick up the new value
- if tdelta == 'current-directory':
- self.cwd = lineno
- self.addinfo(tdelta, lineno)
- else:
- raise ValueError, "unknown event type"
-
- def __iter__(self):
- return self
-
- #
- # helpers
- #
-
- def _decode_location(self, fileno, lineno):
- try:
- return self._funcmap[(fileno, lineno)]
- except KeyError:
- #
- # This should only be needed when the log file does not
- # contain all the DEFINE_FUNC records needed to allow the
- # function name to be retrieved from the log file.
- #
- if self._loadfile(fileno):
- filename = funcname = None
- try:
- filename, funcname = self._funcmap[(fileno, lineno)]
- except KeyError:
- filename = self._filemap.get(fileno)
- funcname = None
- self._funcmap[(fileno, lineno)] = (filename, funcname)
- return filename, funcname
-
- def _loadfile(self, fileno):
- try:
- filename = self._filemap[fileno]
- except KeyError:
- print "Could not identify fileId", fileno
- return 1
- if filename is None:
- return 1
- absname = os.path.normcase(os.path.join(self.cwd, filename))
-
- try:
- fp = open(absname)
- except IOError:
- return
- st = parser.suite(fp.read())
- fp.close()
-
- # Scan the tree looking for def and lambda nodes, filling in
- # self._funcmap with all the available information.
- funcdef = symbol.funcdef
- lambdef = symbol.lambdef
-
- stack = [st.totuple(1)]
-
- while stack:
- tree = stack.pop()
- try:
- sym = tree[0]
- except (IndexError, TypeError):
- continue
- if sym == funcdef:
- self._funcmap[(fileno, tree[2][2])] = filename, tree[2][1]
- elif sym == lambdef:
- self._funcmap[(fileno, tree[1][2])] = filename, "<lambda>"
- stack.extend(list(tree[1:]))
diff --git a/sys/lib/python/hotshot/stats.py b/sys/lib/python/hotshot/stats.py
deleted file mode 100644
index 7ff2277a1..000000000
--- a/sys/lib/python/hotshot/stats.py
+++ /dev/null
@@ -1,93 +0,0 @@
-"""Statistics analyzer for HotShot."""
-
-import profile
-import pstats
-
-import hotshot.log
-
-from hotshot.log import ENTER, EXIT
-
-
-def load(filename):
- return StatsLoader(filename).load()
-
-
-class StatsLoader:
- def __init__(self, logfn):
- self._logfn = logfn
- self._code = {}
- self._stack = []
- self.pop_frame = self._stack.pop
-
- def load(self):
- # The timer selected by the profiler should never be used, so make
- # sure it doesn't work:
- p = Profile()
- p.get_time = _brokentimer
- log = hotshot.log.LogReader(self._logfn)
- taccum = 0
- for event in log:
- what, (filename, lineno, funcname), tdelta = event
- if tdelta > 0:
- taccum += tdelta
-
- # We multiply taccum to convert from the microseconds we
- # have to the seconds that the profile/pstats module work
- # with; this allows the numbers to have some basis in
- # reality (ignoring calibration issues for now).
-
- if what == ENTER:
- frame = self.new_frame(filename, lineno, funcname)
- p.trace_dispatch_call(frame, taccum * .000001)
- taccum = 0
-
- elif what == EXIT:
- frame = self.pop_frame()
- p.trace_dispatch_return(frame, taccum * .000001)
- taccum = 0
-
- # no further work for line events
-
- assert not self._stack
- return pstats.Stats(p)
-
- def new_frame(self, *args):
- # args must be filename, firstlineno, funcname
- # our code objects are cached since we don't need to create
- # new ones every time
- try:
- code = self._code[args]
- except KeyError:
- code = FakeCode(*args)
- self._code[args] = code
- # frame objects are create fresh, since the back pointer will
- # vary considerably
- if self._stack:
- back = self._stack[-1]
- else:
- back = None
- frame = FakeFrame(code, back)
- self._stack.append(frame)
- return frame
-
-
-class Profile(profile.Profile):
- def simulate_cmd_complete(self):
- pass
-
-
-class FakeCode:
- def __init__(self, filename, firstlineno, funcname):
- self.co_filename = filename
- self.co_firstlineno = firstlineno
- self.co_name = self.__name__ = funcname
-
-
-class FakeFrame:
- def __init__(self, code, back):
- self.f_back = back
- self.f_code = code
-
-
-def _brokentimer():
- raise RuntimeError, "this timer should not be called"
diff --git a/sys/lib/python/hotshot/stones.py b/sys/lib/python/hotshot/stones.py
deleted file mode 100644
index cd4c51d48..000000000
--- a/sys/lib/python/hotshot/stones.py
+++ /dev/null
@@ -1,31 +0,0 @@
-import errno
-import hotshot
-import hotshot.stats
-import os
-import sys
-import test.pystone
-
-def main(logfile):
- p = hotshot.Profile(logfile)
- benchtime, stones = p.runcall(test.pystone.pystones)
- p.close()
-
- print "Pystone(%s) time for %d passes = %g" % \
- (test.pystone.__version__, test.pystone.LOOPS, benchtime)
- print "This machine benchmarks at %g pystones/second" % stones
-
- stats = hotshot.stats.load(logfile)
- stats.strip_dirs()
- stats.sort_stats('time', 'calls')
- try:
- stats.print_stats(20)
- except IOError, e:
- if e.errno != errno.EPIPE:
- raise
-
-if __name__ == '__main__':
- if sys.argv[1:]:
- main(sys.argv[1])
- else:
- import tempfile
- main(tempfile.NamedTemporaryFile().name)
diff --git a/sys/lib/python/htmlentitydefs.py b/sys/lib/python/htmlentitydefs.py
deleted file mode 100644
index 3dd14a79f..000000000
--- a/sys/lib/python/htmlentitydefs.py
+++ /dev/null
@@ -1,273 +0,0 @@
-"""HTML character entity references."""
-
-# maps the HTML entity name to the Unicode codepoint
-name2codepoint = {
- 'AElig': 0x00c6, # latin capital letter AE = latin capital ligature AE, U+00C6 ISOlat1
- 'Aacute': 0x00c1, # latin capital letter A with acute, U+00C1 ISOlat1
- 'Acirc': 0x00c2, # latin capital letter A with circumflex, U+00C2 ISOlat1
- 'Agrave': 0x00c0, # latin capital letter A with grave = latin capital letter A grave, U+00C0 ISOlat1
- 'Alpha': 0x0391, # greek capital letter alpha, U+0391
- 'Aring': 0x00c5, # latin capital letter A with ring above = latin capital letter A ring, U+00C5 ISOlat1
- 'Atilde': 0x00c3, # latin capital letter A with tilde, U+00C3 ISOlat1
- 'Auml': 0x00c4, # latin capital letter A with diaeresis, U+00C4 ISOlat1
- 'Beta': 0x0392, # greek capital letter beta, U+0392
- 'Ccedil': 0x00c7, # latin capital letter C with cedilla, U+00C7 ISOlat1
- 'Chi': 0x03a7, # greek capital letter chi, U+03A7
- 'Dagger': 0x2021, # double dagger, U+2021 ISOpub
- 'Delta': 0x0394, # greek capital letter delta, U+0394 ISOgrk3
- 'ETH': 0x00d0, # latin capital letter ETH, U+00D0 ISOlat1
- 'Eacute': 0x00c9, # latin capital letter E with acute, U+00C9 ISOlat1
- 'Ecirc': 0x00ca, # latin capital letter E with circumflex, U+00CA ISOlat1
- 'Egrave': 0x00c8, # latin capital letter E with grave, U+00C8 ISOlat1
- 'Epsilon': 0x0395, # greek capital letter epsilon, U+0395
- 'Eta': 0x0397, # greek capital letter eta, U+0397
- 'Euml': 0x00cb, # latin capital letter E with diaeresis, U+00CB ISOlat1
- 'Gamma': 0x0393, # greek capital letter gamma, U+0393 ISOgrk3
- 'Iacute': 0x00cd, # latin capital letter I with acute, U+00CD ISOlat1
- 'Icirc': 0x00ce, # latin capital letter I with circumflex, U+00CE ISOlat1
- 'Igrave': 0x00cc, # latin capital letter I with grave, U+00CC ISOlat1
- 'Iota': 0x0399, # greek capital letter iota, U+0399
- 'Iuml': 0x00cf, # latin capital letter I with diaeresis, U+00CF ISOlat1
- 'Kappa': 0x039a, # greek capital letter kappa, U+039A
- 'Lambda': 0x039b, # greek capital letter lambda, U+039B ISOgrk3
- 'Mu': 0x039c, # greek capital letter mu, U+039C
- 'Ntilde': 0x00d1, # latin capital letter N with tilde, U+00D1 ISOlat1
- 'Nu': 0x039d, # greek capital letter nu, U+039D
- 'OElig': 0x0152, # latin capital ligature OE, U+0152 ISOlat2
- 'Oacute': 0x00d3, # latin capital letter O with acute, U+00D3 ISOlat1
- 'Ocirc': 0x00d4, # latin capital letter O with circumflex, U+00D4 ISOlat1
- 'Ograve': 0x00d2, # latin capital letter O with grave, U+00D2 ISOlat1
- 'Omega': 0x03a9, # greek capital letter omega, U+03A9 ISOgrk3
- 'Omicron': 0x039f, # greek capital letter omicron, U+039F
- 'Oslash': 0x00d8, # latin capital letter O with stroke = latin capital letter O slash, U+00D8 ISOlat1
- 'Otilde': 0x00d5, # latin capital letter O with tilde, U+00D5 ISOlat1
- 'Ouml': 0x00d6, # latin capital letter O with diaeresis, U+00D6 ISOlat1
- 'Phi': 0x03a6, # greek capital letter phi, U+03A6 ISOgrk3
- 'Pi': 0x03a0, # greek capital letter pi, U+03A0 ISOgrk3
- 'Prime': 0x2033, # double prime = seconds = inches, U+2033 ISOtech
- 'Psi': 0x03a8, # greek capital letter psi, U+03A8 ISOgrk3
- 'Rho': 0x03a1, # greek capital letter rho, U+03A1
- 'Scaron': 0x0160, # latin capital letter S with caron, U+0160 ISOlat2
- 'Sigma': 0x03a3, # greek capital letter sigma, U+03A3 ISOgrk3
- 'THORN': 0x00de, # latin capital letter THORN, U+00DE ISOlat1
- 'Tau': 0x03a4, # greek capital letter tau, U+03A4
- 'Theta': 0x0398, # greek capital letter theta, U+0398 ISOgrk3
- 'Uacute': 0x00da, # latin capital letter U with acute, U+00DA ISOlat1
- 'Ucirc': 0x00db, # latin capital letter U with circumflex, U+00DB ISOlat1
- 'Ugrave': 0x00d9, # latin capital letter U with grave, U+00D9 ISOlat1
- 'Upsilon': 0x03a5, # greek capital letter upsilon, U+03A5 ISOgrk3
- 'Uuml': 0x00dc, # latin capital letter U with diaeresis, U+00DC ISOlat1
- 'Xi': 0x039e, # greek capital letter xi, U+039E ISOgrk3
- 'Yacute': 0x00dd, # latin capital letter Y with acute, U+00DD ISOlat1
- 'Yuml': 0x0178, # latin capital letter Y with diaeresis, U+0178 ISOlat2
- 'Zeta': 0x0396, # greek capital letter zeta, U+0396
- 'aacute': 0x00e1, # latin small letter a with acute, U+00E1 ISOlat1
- 'acirc': 0x00e2, # latin small letter a with circumflex, U+00E2 ISOlat1
- 'acute': 0x00b4, # acute accent = spacing acute, U+00B4 ISOdia
- 'aelig': 0x00e6, # latin small letter ae = latin small ligature ae, U+00E6 ISOlat1
- 'agrave': 0x00e0, # latin small letter a with grave = latin small letter a grave, U+00E0 ISOlat1
- 'alefsym': 0x2135, # alef symbol = first transfinite cardinal, U+2135 NEW
- 'alpha': 0x03b1, # greek small letter alpha, U+03B1 ISOgrk3
- 'amp': 0x0026, # ampersand, U+0026 ISOnum
- 'and': 0x2227, # logical and = wedge, U+2227 ISOtech
- 'ang': 0x2220, # angle, U+2220 ISOamso
- 'aring': 0x00e5, # latin small letter a with ring above = latin small letter a ring, U+00E5 ISOlat1
- 'asymp': 0x2248, # almost equal to = asymptotic to, U+2248 ISOamsr
- 'atilde': 0x00e3, # latin small letter a with tilde, U+00E3 ISOlat1
- 'auml': 0x00e4, # latin small letter a with diaeresis, U+00E4 ISOlat1
- 'bdquo': 0x201e, # double low-9 quotation mark, U+201E NEW
- 'beta': 0x03b2, # greek small letter beta, U+03B2 ISOgrk3
- 'brvbar': 0x00a6, # broken bar = broken vertical bar, U+00A6 ISOnum
- 'bull': 0x2022, # bullet = black small circle, U+2022 ISOpub
- 'cap': 0x2229, # intersection = cap, U+2229 ISOtech
- 'ccedil': 0x00e7, # latin small letter c with cedilla, U+00E7 ISOlat1
- 'cedil': 0x00b8, # cedilla = spacing cedilla, U+00B8 ISOdia
- 'cent': 0x00a2, # cent sign, U+00A2 ISOnum
- 'chi': 0x03c7, # greek small letter chi, U+03C7 ISOgrk3
- 'circ': 0x02c6, # modifier letter circumflex accent, U+02C6 ISOpub
- 'clubs': 0x2663, # black club suit = shamrock, U+2663 ISOpub
- 'cong': 0x2245, # approximately equal to, U+2245 ISOtech
- 'copy': 0x00a9, # copyright sign, U+00A9 ISOnum
- 'crarr': 0x21b5, # downwards arrow with corner leftwards = carriage return, U+21B5 NEW
- 'cup': 0x222a, # union = cup, U+222A ISOtech
- 'curren': 0x00a4, # currency sign, U+00A4 ISOnum
- 'dArr': 0x21d3, # downwards double arrow, U+21D3 ISOamsa
- 'dagger': 0x2020, # dagger, U+2020 ISOpub
- 'darr': 0x2193, # downwards arrow, U+2193 ISOnum
- 'deg': 0x00b0, # degree sign, U+00B0 ISOnum
- 'delta': 0x03b4, # greek small letter delta, U+03B4 ISOgrk3
- 'diams': 0x2666, # black diamond suit, U+2666 ISOpub
- 'divide': 0x00f7, # division sign, U+00F7 ISOnum
- 'eacute': 0x00e9, # latin small letter e with acute, U+00E9 ISOlat1
- 'ecirc': 0x00ea, # latin small letter e with circumflex, U+00EA ISOlat1
- 'egrave': 0x00e8, # latin small letter e with grave, U+00E8 ISOlat1
- 'empty': 0x2205, # empty set = null set = diameter, U+2205 ISOamso
- 'emsp': 0x2003, # em space, U+2003 ISOpub
- 'ensp': 0x2002, # en space, U+2002 ISOpub
- 'epsilon': 0x03b5, # greek small letter epsilon, U+03B5 ISOgrk3
- 'equiv': 0x2261, # identical to, U+2261 ISOtech
- 'eta': 0x03b7, # greek small letter eta, U+03B7 ISOgrk3
- 'eth': 0x00f0, # latin small letter eth, U+00F0 ISOlat1
- 'euml': 0x00eb, # latin small letter e with diaeresis, U+00EB ISOlat1
- 'euro': 0x20ac, # euro sign, U+20AC NEW
- 'exist': 0x2203, # there exists, U+2203 ISOtech
- 'fnof': 0x0192, # latin small f with hook = function = florin, U+0192 ISOtech
- 'forall': 0x2200, # for all, U+2200 ISOtech
- 'frac12': 0x00bd, # vulgar fraction one half = fraction one half, U+00BD ISOnum
- 'frac14': 0x00bc, # vulgar fraction one quarter = fraction one quarter, U+00BC ISOnum
- 'frac34': 0x00be, # vulgar fraction three quarters = fraction three quarters, U+00BE ISOnum
- 'frasl': 0x2044, # fraction slash, U+2044 NEW
- 'gamma': 0x03b3, # greek small letter gamma, U+03B3 ISOgrk3
- 'ge': 0x2265, # greater-than or equal to, U+2265 ISOtech
- 'gt': 0x003e, # greater-than sign, U+003E ISOnum
- 'hArr': 0x21d4, # left right double arrow, U+21D4 ISOamsa
- 'harr': 0x2194, # left right arrow, U+2194 ISOamsa
- 'hearts': 0x2665, # black heart suit = valentine, U+2665 ISOpub
- 'hellip': 0x2026, # horizontal ellipsis = three dot leader, U+2026 ISOpub
- 'iacute': 0x00ed, # latin small letter i with acute, U+00ED ISOlat1
- 'icirc': 0x00ee, # latin small letter i with circumflex, U+00EE ISOlat1
- 'iexcl': 0x00a1, # inverted exclamation mark, U+00A1 ISOnum
- 'igrave': 0x00ec, # latin small letter i with grave, U+00EC ISOlat1
- 'image': 0x2111, # blackletter capital I = imaginary part, U+2111 ISOamso
- 'infin': 0x221e, # infinity, U+221E ISOtech
- 'int': 0x222b, # integral, U+222B ISOtech
- 'iota': 0x03b9, # greek small letter iota, U+03B9 ISOgrk3
- 'iquest': 0x00bf, # inverted question mark = turned question mark, U+00BF ISOnum
- 'isin': 0x2208, # element of, U+2208 ISOtech
- 'iuml': 0x00ef, # latin small letter i with diaeresis, U+00EF ISOlat1
- 'kappa': 0x03ba, # greek small letter kappa, U+03BA ISOgrk3
- 'lArr': 0x21d0, # leftwards double arrow, U+21D0 ISOtech
- 'lambda': 0x03bb, # greek small letter lambda, U+03BB ISOgrk3
- 'lang': 0x2329, # left-pointing angle bracket = bra, U+2329 ISOtech
- 'laquo': 0x00ab, # left-pointing double angle quotation mark = left pointing guillemet, U+00AB ISOnum
- 'larr': 0x2190, # leftwards arrow, U+2190 ISOnum
- 'lceil': 0x2308, # left ceiling = apl upstile, U+2308 ISOamsc
- 'ldquo': 0x201c, # left double quotation mark, U+201C ISOnum
- 'le': 0x2264, # less-than or equal to, U+2264 ISOtech
- 'lfloor': 0x230a, # left floor = apl downstile, U+230A ISOamsc
- 'lowast': 0x2217, # asterisk operator, U+2217 ISOtech
- 'loz': 0x25ca, # lozenge, U+25CA ISOpub
- 'lrm': 0x200e, # left-to-right mark, U+200E NEW RFC 2070
- 'lsaquo': 0x2039, # single left-pointing angle quotation mark, U+2039 ISO proposed
- 'lsquo': 0x2018, # left single quotation mark, U+2018 ISOnum
- 'lt': 0x003c, # less-than sign, U+003C ISOnum
- 'macr': 0x00af, # macron = spacing macron = overline = APL overbar, U+00AF ISOdia
- 'mdash': 0x2014, # em dash, U+2014 ISOpub
- 'micro': 0x00b5, # micro sign, U+00B5 ISOnum
- 'middot': 0x00b7, # middle dot = Georgian comma = Greek middle dot, U+00B7 ISOnum
- 'minus': 0x2212, # minus sign, U+2212 ISOtech
- 'mu': 0x03bc, # greek small letter mu, U+03BC ISOgrk3
- 'nabla': 0x2207, # nabla = backward difference, U+2207 ISOtech
- 'nbsp': 0x00a0, # no-break space = non-breaking space, U+00A0 ISOnum
- 'ndash': 0x2013, # en dash, U+2013 ISOpub
- 'ne': 0x2260, # not equal to, U+2260 ISOtech
- 'ni': 0x220b, # contains as member, U+220B ISOtech
- 'not': 0x00ac, # not sign, U+00AC ISOnum
- 'notin': 0x2209, # not an element of, U+2209 ISOtech
- 'nsub': 0x2284, # not a subset of, U+2284 ISOamsn
- 'ntilde': 0x00f1, # latin small letter n with tilde, U+00F1 ISOlat1
- 'nu': 0x03bd, # greek small letter nu, U+03BD ISOgrk3
- 'oacute': 0x00f3, # latin small letter o with acute, U+00F3 ISOlat1
- 'ocirc': 0x00f4, # latin small letter o with circumflex, U+00F4 ISOlat1
- 'oelig': 0x0153, # latin small ligature oe, U+0153 ISOlat2
- 'ograve': 0x00f2, # latin small letter o with grave, U+00F2 ISOlat1
- 'oline': 0x203e, # overline = spacing overscore, U+203E NEW
- 'omega': 0x03c9, # greek small letter omega, U+03C9 ISOgrk3
- 'omicron': 0x03bf, # greek small letter omicron, U+03BF NEW
- 'oplus': 0x2295, # circled plus = direct sum, U+2295 ISOamsb
- 'or': 0x2228, # logical or = vee, U+2228 ISOtech
- 'ordf': 0x00aa, # feminine ordinal indicator, U+00AA ISOnum
- 'ordm': 0x00ba, # masculine ordinal indicator, U+00BA ISOnum
- 'oslash': 0x00f8, # latin small letter o with stroke, = latin small letter o slash, U+00F8 ISOlat1
- 'otilde': 0x00f5, # latin small letter o with tilde, U+00F5 ISOlat1
- 'otimes': 0x2297, # circled times = vector product, U+2297 ISOamsb
- 'ouml': 0x00f6, # latin small letter o with diaeresis, U+00F6 ISOlat1
- 'para': 0x00b6, # pilcrow sign = paragraph sign, U+00B6 ISOnum
- 'part': 0x2202, # partial differential, U+2202 ISOtech
- 'permil': 0x2030, # per mille sign, U+2030 ISOtech
- 'perp': 0x22a5, # up tack = orthogonal to = perpendicular, U+22A5 ISOtech
- 'phi': 0x03c6, # greek small letter phi, U+03C6 ISOgrk3
- 'pi': 0x03c0, # greek small letter pi, U+03C0 ISOgrk3
- 'piv': 0x03d6, # greek pi symbol, U+03D6 ISOgrk3
- 'plusmn': 0x00b1, # plus-minus sign = plus-or-minus sign, U+00B1 ISOnum
- 'pound': 0x00a3, # pound sign, U+00A3 ISOnum
- 'prime': 0x2032, # prime = minutes = feet, U+2032 ISOtech
- 'prod': 0x220f, # n-ary product = product sign, U+220F ISOamsb
- 'prop': 0x221d, # proportional to, U+221D ISOtech
- 'psi': 0x03c8, # greek small letter psi, U+03C8 ISOgrk3
- 'quot': 0x0022, # quotation mark = APL quote, U+0022 ISOnum
- 'rArr': 0x21d2, # rightwards double arrow, U+21D2 ISOtech
- 'radic': 0x221a, # square root = radical sign, U+221A ISOtech
- 'rang': 0x232a, # right-pointing angle bracket = ket, U+232A ISOtech
- 'raquo': 0x00bb, # right-pointing double angle quotation mark = right pointing guillemet, U+00BB ISOnum
- 'rarr': 0x2192, # rightwards arrow, U+2192 ISOnum
- 'rceil': 0x2309, # right ceiling, U+2309 ISOamsc
- 'rdquo': 0x201d, # right double quotation mark, U+201D ISOnum
- 'real': 0x211c, # blackletter capital R = real part symbol, U+211C ISOamso
- 'reg': 0x00ae, # registered sign = registered trade mark sign, U+00AE ISOnum
- 'rfloor': 0x230b, # right floor, U+230B ISOamsc
- 'rho': 0x03c1, # greek small letter rho, U+03C1 ISOgrk3
- 'rlm': 0x200f, # right-to-left mark, U+200F NEW RFC 2070
- 'rsaquo': 0x203a, # single right-pointing angle quotation mark, U+203A ISO proposed
- 'rsquo': 0x2019, # right single quotation mark, U+2019 ISOnum
- 'sbquo': 0x201a, # single low-9 quotation mark, U+201A NEW
- 'scaron': 0x0161, # latin small letter s with caron, U+0161 ISOlat2
- 'sdot': 0x22c5, # dot operator, U+22C5 ISOamsb
- 'sect': 0x00a7, # section sign, U+00A7 ISOnum
- 'shy': 0x00ad, # soft hyphen = discretionary hyphen, U+00AD ISOnum
- 'sigma': 0x03c3, # greek small letter sigma, U+03C3 ISOgrk3
- 'sigmaf': 0x03c2, # greek small letter final sigma, U+03C2 ISOgrk3
- 'sim': 0x223c, # tilde operator = varies with = similar to, U+223C ISOtech
- 'spades': 0x2660, # black spade suit, U+2660 ISOpub
- 'sub': 0x2282, # subset of, U+2282 ISOtech
- 'sube': 0x2286, # subset of or equal to, U+2286 ISOtech
- 'sum': 0x2211, # n-ary sumation, U+2211 ISOamsb
- 'sup': 0x2283, # superset of, U+2283 ISOtech
- 'sup1': 0x00b9, # superscript one = superscript digit one, U+00B9 ISOnum
- 'sup2': 0x00b2, # superscript two = superscript digit two = squared, U+00B2 ISOnum
- 'sup3': 0x00b3, # superscript three = superscript digit three = cubed, U+00B3 ISOnum
- 'supe': 0x2287, # superset of or equal to, U+2287 ISOtech
- 'szlig': 0x00df, # latin small letter sharp s = ess-zed, U+00DF ISOlat1
- 'tau': 0x03c4, # greek small letter tau, U+03C4 ISOgrk3
- 'there4': 0x2234, # therefore, U+2234 ISOtech
- 'theta': 0x03b8, # greek small letter theta, U+03B8 ISOgrk3
- 'thetasym': 0x03d1, # greek small letter theta symbol, U+03D1 NEW
- 'thinsp': 0x2009, # thin space, U+2009 ISOpub
- 'thorn': 0x00fe, # latin small letter thorn with, U+00FE ISOlat1
- 'tilde': 0x02dc, # small tilde, U+02DC ISOdia
- 'times': 0x00d7, # multiplication sign, U+00D7 ISOnum
- 'trade': 0x2122, # trade mark sign, U+2122 ISOnum
- 'uArr': 0x21d1, # upwards double arrow, U+21D1 ISOamsa
- 'uacute': 0x00fa, # latin small letter u with acute, U+00FA ISOlat1
- 'uarr': 0x2191, # upwards arrow, U+2191 ISOnum
- 'ucirc': 0x00fb, # latin small letter u with circumflex, U+00FB ISOlat1
- 'ugrave': 0x00f9, # latin small letter u with grave, U+00F9 ISOlat1
- 'uml': 0x00a8, # diaeresis = spacing diaeresis, U+00A8 ISOdia
- 'upsih': 0x03d2, # greek upsilon with hook symbol, U+03D2 NEW
- 'upsilon': 0x03c5, # greek small letter upsilon, U+03C5 ISOgrk3
- 'uuml': 0x00fc, # latin small letter u with diaeresis, U+00FC ISOlat1
- 'weierp': 0x2118, # script capital P = power set = Weierstrass p, U+2118 ISOamso
- 'xi': 0x03be, # greek small letter xi, U+03BE ISOgrk3
- 'yacute': 0x00fd, # latin small letter y with acute, U+00FD ISOlat1
- 'yen': 0x00a5, # yen sign = yuan sign, U+00A5 ISOnum
- 'yuml': 0x00ff, # latin small letter y with diaeresis, U+00FF ISOlat1
- 'zeta': 0x03b6, # greek small letter zeta, U+03B6 ISOgrk3
- 'zwj': 0x200d, # zero width joiner, U+200D NEW RFC 2070
- 'zwnj': 0x200c, # zero width non-joiner, U+200C NEW RFC 2070
-}
-
-# maps the Unicode codepoint to the HTML entity name
-codepoint2name = {}
-
-# maps the HTML entity name to the character
-# (or a character reference if the character is outside the Latin-1 range)
-entitydefs = {}
-
-for (name, codepoint) in name2codepoint.iteritems():
- codepoint2name[codepoint] = name
- if codepoint <= 0xff:
- entitydefs[name] = chr(codepoint)
- else:
- entitydefs[name] = '&#%d;' % codepoint
-
-del name, codepoint
diff --git a/sys/lib/python/htmllib.py b/sys/lib/python/htmllib.py
deleted file mode 100644
index 24a2e2f3c..000000000
--- a/sys/lib/python/htmllib.py
+++ /dev/null
@@ -1,486 +0,0 @@
-"""HTML 2.0 parser.
-
-See the HTML 2.0 specification:
-http://www.w3.org/hypertext/WWW/MarkUp/html-spec/html-spec_toc.html
-"""
-
-import sgmllib
-
-from formatter import AS_IS
-
-__all__ = ["HTMLParser", "HTMLParseError"]
-
-
-class HTMLParseError(sgmllib.SGMLParseError):
- """Error raised when an HTML document can't be parsed."""
-
-
-class HTMLParser(sgmllib.SGMLParser):
- """This is the basic HTML parser class.
-
- It supports all entity names required by the XHTML 1.0 Recommendation.
- It also defines handlers for all HTML 2.0 and many HTML 3.0 and 3.2
- elements.
-
- """
-
- from htmlentitydefs import entitydefs
-
- def __init__(self, formatter, verbose=0):
- """Creates an instance of the HTMLParser class.
-
- The formatter parameter is the formatter instance associated with
- the parser.
-
- """
- sgmllib.SGMLParser.__init__(self, verbose)
- self.formatter = formatter
-
- def error(self, message):
- raise HTMLParseError(message)
-
- def reset(self):
- sgmllib.SGMLParser.reset(self)
- self.savedata = None
- self.isindex = 0
- self.title = None
- self.base = None
- self.anchor = None
- self.anchorlist = []
- self.nofill = 0
- self.list_stack = []
-
- # ------ Methods used internally; some may be overridden
-
- # --- Formatter interface, taking care of 'savedata' mode;
- # shouldn't need to be overridden
-
- def handle_data(self, data):
- if self.savedata is not None:
- self.savedata = self.savedata + data
- else:
- if self.nofill:
- self.formatter.add_literal_data(data)
- else:
- self.formatter.add_flowing_data(data)
-
- # --- Hooks to save data; shouldn't need to be overridden
-
- def save_bgn(self):
- """Begins saving character data in a buffer instead of sending it
- to the formatter object.
-
- Retrieve the stored data via the save_end() method. Use of the
- save_bgn() / save_end() pair may not be nested.
-
- """
- self.savedata = ''
-
- def save_end(self):
- """Ends buffering character data and returns all data saved since
- the preceding call to the save_bgn() method.
-
- If the nofill flag is false, whitespace is collapsed to single
- spaces. A call to this method without a preceding call to the
- save_bgn() method will raise a TypeError exception.
-
- """
- data = self.savedata
- self.savedata = None
- if not self.nofill:
- data = ' '.join(data.split())
- return data
-
- # --- Hooks for anchors; should probably be overridden
-
- def anchor_bgn(self, href, name, type):
- """This method is called at the start of an anchor region.
-
- The arguments correspond to the attributes of the <A> tag with
- the same names. The default implementation maintains a list of
- hyperlinks (defined by the HREF attribute for <A> tags) within
- the document. The list of hyperlinks is available as the data
- attribute anchorlist.
-
- """
- self.anchor = href
- if self.anchor:
- self.anchorlist.append(href)
-
- def anchor_end(self):
- """This method is called at the end of an anchor region.
-
- The default implementation adds a textual footnote marker using an
- index into the list of hyperlinks created by the anchor_bgn()method.
-
- """
- if self.anchor:
- self.handle_data("[%d]" % len(self.anchorlist))
- self.anchor = None
-
- # --- Hook for images; should probably be overridden
-
- def handle_image(self, src, alt, *args):
- """This method is called to handle images.
-
- The default implementation simply passes the alt value to the
- handle_data() method.
-
- """
- self.handle_data(alt)
-
- # --------- Top level elememts
-
- def start_html(self, attrs): pass
- def end_html(self): pass
-
- def start_head(self, attrs): pass
- def end_head(self): pass
-
- def start_body(self, attrs): pass
- def end_body(self): pass
-
- # ------ Head elements
-
- def start_title(self, attrs):
- self.save_bgn()
-
- def end_title(self):
- self.title = self.save_end()
-
- def do_base(self, attrs):
- for a, v in attrs:
- if a == 'href':
- self.base = v
-
- def do_isindex(self, attrs):
- self.isindex = 1
-
- def do_link(self, attrs):
- pass
-
- def do_meta(self, attrs):
- pass
-
- def do_nextid(self, attrs): # Deprecated
- pass
-
- # ------ Body elements
-
- # --- Headings
-
- def start_h1(self, attrs):
- self.formatter.end_paragraph(1)
- self.formatter.push_font(('h1', 0, 1, 0))
-
- def end_h1(self):
- self.formatter.end_paragraph(1)
- self.formatter.pop_font()
-
- def start_h2(self, attrs):
- self.formatter.end_paragraph(1)
- self.formatter.push_font(('h2', 0, 1, 0))
-
- def end_h2(self):
- self.formatter.end_paragraph(1)
- self.formatter.pop_font()
-
- def start_h3(self, attrs):
- self.formatter.end_paragraph(1)
- self.formatter.push_font(('h3', 0, 1, 0))
-
- def end_h3(self):
- self.formatter.end_paragraph(1)
- self.formatter.pop_font()
-
- def start_h4(self, attrs):
- self.formatter.end_paragraph(1)
- self.formatter.push_font(('h4', 0, 1, 0))
-
- def end_h4(self):
- self.formatter.end_paragraph(1)
- self.formatter.pop_font()
-
- def start_h5(self, attrs):
- self.formatter.end_paragraph(1)
- self.formatter.push_font(('h5', 0, 1, 0))
-
- def end_h5(self):
- self.formatter.end_paragraph(1)
- self.formatter.pop_font()
-
- def start_h6(self, attrs):
- self.formatter.end_paragraph(1)
- self.formatter.push_font(('h6', 0, 1, 0))
-
- def end_h6(self):
- self.formatter.end_paragraph(1)
- self.formatter.pop_font()
-
- # --- Block Structuring Elements
-
- def do_p(self, attrs):
- self.formatter.end_paragraph(1)
-
- def start_pre(self, attrs):
- self.formatter.end_paragraph(1)
- self.formatter.push_font((AS_IS, AS_IS, AS_IS, 1))
- self.nofill = self.nofill + 1
-
- def end_pre(self):
- self.formatter.end_paragraph(1)
- self.formatter.pop_font()
- self.nofill = max(0, self.nofill - 1)
-
- def start_xmp(self, attrs):
- self.start_pre(attrs)
- self.setliteral('xmp') # Tell SGML parser
-
- def end_xmp(self):
- self.end_pre()
-
- def start_listing(self, attrs):
- self.start_pre(attrs)
- self.setliteral('listing') # Tell SGML parser
-
- def end_listing(self):
- self.end_pre()
-
- def start_address(self, attrs):
- self.formatter.end_paragraph(0)
- self.formatter.push_font((AS_IS, 1, AS_IS, AS_IS))
-
- def end_address(self):
- self.formatter.end_paragraph(0)
- self.formatter.pop_font()
-
- def start_blockquote(self, attrs):
- self.formatter.end_paragraph(1)
- self.formatter.push_margin('blockquote')
-
- def end_blockquote(self):
- self.formatter.end_paragraph(1)
- self.formatter.pop_margin()
-
- # --- List Elements
-
- def start_ul(self, attrs):
- self.formatter.end_paragraph(not self.list_stack)
- self.formatter.push_margin('ul')
- self.list_stack.append(['ul', '*', 0])
-
- def end_ul(self):
- if self.list_stack: del self.list_stack[-1]
- self.formatter.end_paragraph(not self.list_stack)
- self.formatter.pop_margin()
-
- def do_li(self, attrs):
- self.formatter.end_paragraph(0)
- if self.list_stack:
- [dummy, label, counter] = top = self.list_stack[-1]
- top[2] = counter = counter+1
- else:
- label, counter = '*', 0
- self.formatter.add_label_data(label, counter)
-
- def start_ol(self, attrs):
- self.formatter.end_paragraph(not self.list_stack)
- self.formatter.push_margin('ol')
- label = '1.'
- for a, v in attrs:
- if a == 'type':
- if len(v) == 1: v = v + '.'
- label = v
- self.list_stack.append(['ol', label, 0])
-
- def end_ol(self):
- if self.list_stack: del self.list_stack[-1]
- self.formatter.end_paragraph(not self.list_stack)
- self.formatter.pop_margin()
-
- def start_menu(self, attrs):
- self.start_ul(attrs)
-
- def end_menu(self):
- self.end_ul()
-
- def start_dir(self, attrs):
- self.start_ul(attrs)
-
- def end_dir(self):
- self.end_ul()
-
- def start_dl(self, attrs):
- self.formatter.end_paragraph(1)
- self.list_stack.append(['dl', '', 0])
-
- def end_dl(self):
- self.ddpop(1)
- if self.list_stack: del self.list_stack[-1]
-
- def do_dt(self, attrs):
- self.ddpop()
-
- def do_dd(self, attrs):
- self.ddpop()
- self.formatter.push_margin('dd')
- self.list_stack.append(['dd', '', 0])
-
- def ddpop(self, bl=0):
- self.formatter.end_paragraph(bl)
- if self.list_stack:
- if self.list_stack[-1][0] == 'dd':
- del self.list_stack[-1]
- self.formatter.pop_margin()
-
- # --- Phrase Markup
-
- # Idiomatic Elements
-
- def start_cite(self, attrs): self.start_i(attrs)
- def end_cite(self): self.end_i()
-
- def start_code(self, attrs): self.start_tt(attrs)
- def end_code(self): self.end_tt()
-
- def start_em(self, attrs): self.start_i(attrs)
- def end_em(self): self.end_i()
-
- def start_kbd(self, attrs): self.start_tt(attrs)
- def end_kbd(self): self.end_tt()
-
- def start_samp(self, attrs): self.start_tt(attrs)
- def end_samp(self): self.end_tt()
-
- def start_strong(self, attrs): self.start_b(attrs)
- def end_strong(self): self.end_b()
-
- def start_var(self, attrs): self.start_i(attrs)
- def end_var(self): self.end_i()
-
- # Typographic Elements
-
- def start_i(self, attrs):
- self.formatter.push_font((AS_IS, 1, AS_IS, AS_IS))
- def end_i(self):
- self.formatter.pop_font()
-
- def start_b(self, attrs):
- self.formatter.push_font((AS_IS, AS_IS, 1, AS_IS))
- def end_b(self):
- self.formatter.pop_font()
-
- def start_tt(self, attrs):
- self.formatter.push_font((AS_IS, AS_IS, AS_IS, 1))
- def end_tt(self):
- self.formatter.pop_font()
-
- def start_a(self, attrs):
- href = ''
- name = ''
- type = ''
- for attrname, value in attrs:
- value = value.strip()
- if attrname == 'href':
- href = value
- if attrname == 'name':
- name = value
- if attrname == 'type':
- type = value.lower()
- self.anchor_bgn(href, name, type)
-
- def end_a(self):
- self.anchor_end()
-
- # --- Line Break
-
- def do_br(self, attrs):
- self.formatter.add_line_break()
-
- # --- Horizontal Rule
-
- def do_hr(self, attrs):
- self.formatter.add_hor_rule()
-
- # --- Image
-
- def do_img(self, attrs):
- align = ''
- alt = '(image)'
- ismap = ''
- src = ''
- width = 0
- height = 0
- for attrname, value in attrs:
- if attrname == 'align':
- align = value
- if attrname == 'alt':
- alt = value
- if attrname == 'ismap':
- ismap = value
- if attrname == 'src':
- src = value
- if attrname == 'width':
- try: width = int(value)
- except ValueError: pass
- if attrname == 'height':
- try: height = int(value)
- except ValueError: pass
- self.handle_image(src, alt, ismap, align, width, height)
-
- # --- Really Old Unofficial Deprecated Stuff
-
- def do_plaintext(self, attrs):
- self.start_pre(attrs)
- self.setnomoretags() # Tell SGML parser
-
- # --- Unhandled tags
-
- def unknown_starttag(self, tag, attrs):
- pass
-
- def unknown_endtag(self, tag):
- pass
-
-
-def test(args = None):
- import sys, formatter
-
- if not args:
- args = sys.argv[1:]
-
- silent = args and args[0] == '-s'
- if silent:
- del args[0]
-
- if args:
- file = args[0]
- else:
- file = 'test.html'
-
- if file == '-':
- f = sys.stdin
- else:
- try:
- f = open(file, 'r')
- except IOError, msg:
- print file, ":", msg
- sys.exit(1)
-
- data = f.read()
-
- if f is not sys.stdin:
- f.close()
-
- if silent:
- f = formatter.NullFormatter()
- else:
- f = formatter.AbstractFormatter(formatter.DumbWriter())
-
- p = HTMLParser(f)
- p.feed(data)
- p.close()
-
-
-if __name__ == '__main__':
- test()
diff --git a/sys/lib/python/httplib.py b/sys/lib/python/httplib.py
deleted file mode 100644
index 403e4eed0..000000000
--- a/sys/lib/python/httplib.py
+++ /dev/null
@@ -1,1428 +0,0 @@
-"""HTTP/1.1 client library
-
-<intro stuff goes here>
-<other stuff, too>
-
-HTTPConnection goes through a number of "states", which define when a client
-may legally make another request or fetch the response for a particular
-request. This diagram details these state transitions:
-
- (null)
- |
- | HTTPConnection()
- v
- Idle
- |
- | putrequest()
- v
- Request-started
- |
- | ( putheader() )* endheaders()
- v
- Request-sent
- |
- | response = getresponse()
- v
- Unread-response [Response-headers-read]
- |\____________________
- | |
- | response.read() | putrequest()
- v v
- Idle Req-started-unread-response
- ______/|
- / |
- response.read() | | ( putheader() )* endheaders()
- v v
- Request-started Req-sent-unread-response
- |
- | response.read()
- v
- Request-sent
-
-This diagram presents the following rules:
- -- a second request may not be started until {response-headers-read}
- -- a response [object] cannot be retrieved until {request-sent}
- -- there is no differentiation between an unread response body and a
- partially read response body
-
-Note: this enforcement is applied by the HTTPConnection class. The
- HTTPResponse class does not enforce this state machine, which
- implies sophisticated clients may accelerate the request/response
- pipeline. Caution should be taken, though: accelerating the states
- beyond the above pattern may imply knowledge of the server's
- connection-close behavior for certain requests. For example, it
- is impossible to tell whether the server will close the connection
- UNTIL the response headers have been read; this means that further
- requests cannot be placed into the pipeline until it is known that
- the server will NOT be closing the connection.
-
-Logical State __state __response
-------------- ------- ----------
-Idle _CS_IDLE None
-Request-started _CS_REQ_STARTED None
-Request-sent _CS_REQ_SENT None
-Unread-response _CS_IDLE <response_class>
-Req-started-unread-response _CS_REQ_STARTED <response_class>
-Req-sent-unread-response _CS_REQ_SENT <response_class>
-"""
-
-import errno
-import mimetools
-import socket
-from urlparse import urlsplit
-
-try:
- from cStringIO import StringIO
-except ImportError:
- from StringIO import StringIO
-
-__all__ = ["HTTP", "HTTPResponse", "HTTPConnection", "HTTPSConnection",
- "HTTPException", "NotConnected", "UnknownProtocol",
- "UnknownTransferEncoding", "UnimplementedFileMode",
- "IncompleteRead", "InvalidURL", "ImproperConnectionState",
- "CannotSendRequest", "CannotSendHeader", "ResponseNotReady",
- "BadStatusLine", "error", "responses"]
-
-HTTP_PORT = 80
-HTTPS_PORT = 443
-
-_UNKNOWN = 'UNKNOWN'
-
-# connection states
-_CS_IDLE = 'Idle'
-_CS_REQ_STARTED = 'Request-started'
-_CS_REQ_SENT = 'Request-sent'
-
-# status codes
-# informational
-CONTINUE = 100
-SWITCHING_PROTOCOLS = 101
-PROCESSING = 102
-
-# successful
-OK = 200
-CREATED = 201
-ACCEPTED = 202
-NON_AUTHORITATIVE_INFORMATION = 203
-NO_CONTENT = 204
-RESET_CONTENT = 205
-PARTIAL_CONTENT = 206
-MULTI_STATUS = 207
-IM_USED = 226
-
-# redirection
-MULTIPLE_CHOICES = 300
-MOVED_PERMANENTLY = 301
-FOUND = 302
-SEE_OTHER = 303
-NOT_MODIFIED = 304
-USE_PROXY = 305
-TEMPORARY_REDIRECT = 307
-
-# client error
-BAD_REQUEST = 400
-UNAUTHORIZED = 401
-PAYMENT_REQUIRED = 402
-FORBIDDEN = 403
-NOT_FOUND = 404
-METHOD_NOT_ALLOWED = 405
-NOT_ACCEPTABLE = 406
-PROXY_AUTHENTICATION_REQUIRED = 407
-REQUEST_TIMEOUT = 408
-CONFLICT = 409
-GONE = 410
-LENGTH_REQUIRED = 411
-PRECONDITION_FAILED = 412
-REQUEST_ENTITY_TOO_LARGE = 413
-REQUEST_URI_TOO_LONG = 414
-UNSUPPORTED_MEDIA_TYPE = 415
-REQUESTED_RANGE_NOT_SATISFIABLE = 416
-EXPECTATION_FAILED = 417
-UNPROCESSABLE_ENTITY = 422
-LOCKED = 423
-FAILED_DEPENDENCY = 424
-UPGRADE_REQUIRED = 426
-
-# server error
-INTERNAL_SERVER_ERROR = 500
-NOT_IMPLEMENTED = 501
-BAD_GATEWAY = 502
-SERVICE_UNAVAILABLE = 503
-GATEWAY_TIMEOUT = 504
-HTTP_VERSION_NOT_SUPPORTED = 505
-INSUFFICIENT_STORAGE = 507
-NOT_EXTENDED = 510
-
-# Mapping status codes to official W3C names
-responses = {
- 100: 'Continue',
- 101: 'Switching Protocols',
-
- 200: 'OK',
- 201: 'Created',
- 202: 'Accepted',
- 203: 'Non-Authoritative Information',
- 204: 'No Content',
- 205: 'Reset Content',
- 206: 'Partial Content',
-
- 300: 'Multiple Choices',
- 301: 'Moved Permanently',
- 302: 'Found',
- 303: 'See Other',
- 304: 'Not Modified',
- 305: 'Use Proxy',
- 306: '(Unused)',
- 307: 'Temporary Redirect',
-
- 400: 'Bad Request',
- 401: 'Unauthorized',
- 402: 'Payment Required',
- 403: 'Forbidden',
- 404: 'Not Found',
- 405: 'Method Not Allowed',
- 406: 'Not Acceptable',
- 407: 'Proxy Authentication Required',
- 408: 'Request Timeout',
- 409: 'Conflict',
- 410: 'Gone',
- 411: 'Length Required',
- 412: 'Precondition Failed',
- 413: 'Request Entity Too Large',
- 414: 'Request-URI Too Long',
- 415: 'Unsupported Media Type',
- 416: 'Requested Range Not Satisfiable',
- 417: 'Expectation Failed',
-
- 500: 'Internal Server Error',
- 501: 'Not Implemented',
- 502: 'Bad Gateway',
- 503: 'Service Unavailable',
- 504: 'Gateway Timeout',
- 505: 'HTTP Version Not Supported',
-}
-
-# maximal amount of data to read at one time in _safe_read
-MAXAMOUNT = 1048576
-
-class HTTPMessage(mimetools.Message):
-
- def addheader(self, key, value):
- """Add header for field key handling repeats."""
- prev = self.dict.get(key)
- if prev is None:
- self.dict[key] = value
- else:
- combined = ", ".join((prev, value))
- self.dict[key] = combined
-
- def addcontinue(self, key, more):
- """Add more field data from a continuation line."""
- prev = self.dict[key]
- self.dict[key] = prev + "\n " + more
-
- def readheaders(self):
- """Read header lines.
-
- Read header lines up to the entirely blank line that terminates them.
- The (normally blank) line that ends the headers is skipped, but not
- included in the returned list. If a non-header line ends the headers,
- (which is an error), an attempt is made to backspace over it; it is
- never included in the returned list.
-
- The variable self.status is set to the empty string if all went well,
- otherwise it is an error message. The variable self.headers is a
- completely uninterpreted list of lines contained in the header (so
- printing them will reproduce the header exactly as it appears in the
- file).
-
- If multiple header fields with the same name occur, they are combined
- according to the rules in RFC 2616 sec 4.2:
-
- Appending each subsequent field-value to the first, each separated
- by a comma. The order in which header fields with the same field-name
- are received is significant to the interpretation of the combined
- field value.
- """
- # XXX The implementation overrides the readheaders() method of
- # rfc822.Message. The base class design isn't amenable to
- # customized behavior here so the method here is a copy of the
- # base class code with a few small changes.
-
- self.dict = {}
- self.unixfrom = ''
- self.headers = hlist = []
- self.status = ''
- headerseen = ""
- firstline = 1
- startofline = unread = tell = None
- if hasattr(self.fp, 'unread'):
- unread = self.fp.unread
- elif self.seekable:
- tell = self.fp.tell
- while True:
- if tell:
- try:
- startofline = tell()
- except IOError:
- startofline = tell = None
- self.seekable = 0
- line = self.fp.readline()
- if not line:
- self.status = 'EOF in headers'
- break
- # Skip unix From name time lines
- if firstline and line.startswith('From '):
- self.unixfrom = self.unixfrom + line
- continue
- firstline = 0
- if headerseen and line[0] in ' \t':
- # XXX Not sure if continuation lines are handled properly
- # for http and/or for repeating headers
- # It's a continuation line.
- hlist.append(line)
- self.addcontinue(headerseen, line.strip())
- continue
- elif self.iscomment(line):
- # It's a comment. Ignore it.
- continue
- elif self.islast(line):
- # Note! No pushback here! The delimiter line gets eaten.
- break
- headerseen = self.isheader(line)
- if headerseen:
- # It's a legal header line, save it.
- hlist.append(line)
- self.addheader(headerseen, line[len(headerseen)+1:].strip())
- continue
- else:
- # It's not a header line; throw it back and stop here.
- if not self.dict:
- self.status = 'No headers'
- else:
- self.status = 'Non-header line where header expected'
- # Try to undo the read.
- if unread:
- unread(line)
- elif tell:
- self.fp.seek(startofline)
- else:
- self.status = self.status + '; bad seek'
- break
-
-class HTTPResponse:
-
- # strict: If true, raise BadStatusLine if the status line can't be
- # parsed as a valid HTTP/1.0 or 1.1 status line. By default it is
- # false because it prevents clients from talking to HTTP/0.9
- # servers. Note that a response with a sufficiently corrupted
- # status line will look like an HTTP/0.9 response.
-
- # See RFC 2616 sec 19.6 and RFC 1945 sec 6 for details.
-
- def __init__(self, sock, debuglevel=0, strict=0, method=None):
- self.fp = sock.makefile('rb', 0)
- self.debuglevel = debuglevel
- self.strict = strict
- self._method = method
-
- self.msg = None
-
- # from the Status-Line of the response
- self.version = _UNKNOWN # HTTP-Version
- self.status = _UNKNOWN # Status-Code
- self.reason = _UNKNOWN # Reason-Phrase
-
- self.chunked = _UNKNOWN # is "chunked" being used?
- self.chunk_left = _UNKNOWN # bytes left to read in current chunk
- self.length = _UNKNOWN # number of bytes left in response
- self.will_close = _UNKNOWN # conn will close at end of response
-
- def _read_status(self):
- # Initialize with Simple-Response defaults
- line = self.fp.readline()
- if self.debuglevel > 0:
- print "reply:", repr(line)
- if not line:
- # Presumably, the server closed the connection before
- # sending a valid response.
- raise BadStatusLine(line)
- try:
- [version, status, reason] = line.split(None, 2)
- except ValueError:
- try:
- [version, status] = line.split(None, 1)
- reason = ""
- except ValueError:
- # empty version will cause next test to fail and status
- # will be treated as 0.9 response.
- version = ""
- if not version.startswith('HTTP/'):
- if self.strict:
- self.close()
- raise BadStatusLine(line)
- else:
- # assume it's a Simple-Response from an 0.9 server
- self.fp = LineAndFileWrapper(line, self.fp)
- return "HTTP/0.9", 200, ""
-
- # The status code is a three-digit number
- try:
- status = int(status)
- if status < 100 or status > 999:
- raise BadStatusLine(line)
- except ValueError:
- raise BadStatusLine(line)
- return version, status, reason
-
- def begin(self):
- if self.msg is not None:
- # we've already started reading the response
- return
-
- # read until we get a non-100 response
- while True:
- version, status, reason = self._read_status()
- if status != CONTINUE:
- break
- # skip the header from the 100 response
- while True:
- skip = self.fp.readline().strip()
- if not skip:
- break
- if self.debuglevel > 0:
- print "header:", skip
-
- self.status = status
- self.reason = reason.strip()
- if version == 'HTTP/1.0':
- self.version = 10
- elif version.startswith('HTTP/1.'):
- self.version = 11 # use HTTP/1.1 code for HTTP/1.x where x>=1
- elif version == 'HTTP/0.9':
- self.version = 9
- else:
- raise UnknownProtocol(version)
-
- if self.version == 9:
- self.length = None
- self.chunked = 0
- self.will_close = 1
- self.msg = HTTPMessage(StringIO())
- return
-
- self.msg = HTTPMessage(self.fp, 0)
- if self.debuglevel > 0:
- for hdr in self.msg.headers:
- print "header:", hdr,
-
- # don't let the msg keep an fp
- self.msg.fp = None
-
- # are we using the chunked-style of transfer encoding?
- tr_enc = self.msg.getheader('transfer-encoding')
- if tr_enc and tr_enc.lower() == "chunked":
- self.chunked = 1
- self.chunk_left = None
- else:
- self.chunked = 0
-
- # will the connection close at the end of the response?
- self.will_close = self._check_close()
-
- # do we have a Content-Length?
- # NOTE: RFC 2616, S4.4, #3 says we ignore this if tr_enc is "chunked"
- length = self.msg.getheader('content-length')
- if length and not self.chunked:
- try:
- self.length = int(length)
- except ValueError:
- self.length = None
- else:
- self.length = None
-
- # does the body have a fixed length? (of zero)
- if (status == NO_CONTENT or status == NOT_MODIFIED or
- 100 <= status < 200 or # 1xx codes
- self._method == 'HEAD'):
- self.length = 0
-
- # if the connection remains open, and we aren't using chunked, and
- # a content-length was not provided, then assume that the connection
- # WILL close.
- if not self.will_close and \
- not self.chunked and \
- self.length is None:
- self.will_close = 1
-
- def _check_close(self):
- conn = self.msg.getheader('connection')
- if self.version == 11:
- # An HTTP/1.1 proxy is assumed to stay open unless
- # explicitly closed.
- conn = self.msg.getheader('connection')
- if conn and "close" in conn.lower():
- return True
- return False
-
- # Some HTTP/1.0 implementations have support for persistent
- # connections, using rules different than HTTP/1.1.
-
- # For older HTTP, Keep-Alive indiciates persistent connection.
- if self.msg.getheader('keep-alive'):
- return False
-
- # At least Akamai returns a "Connection: Keep-Alive" header,
- # which was supposed to be sent by the client.
- if conn and "keep-alive" in conn.lower():
- return False
-
- # Proxy-Connection is a netscape hack.
- pconn = self.msg.getheader('proxy-connection')
- if pconn and "keep-alive" in pconn.lower():
- return False
-
- # otherwise, assume it will close
- return True
-
- def close(self):
- if self.fp:
- self.fp.close()
- self.fp = None
-
- def isclosed(self):
- # NOTE: it is possible that we will not ever call self.close(). This
- # case occurs when will_close is TRUE, length is None, and we
- # read up to the last byte, but NOT past it.
- #
- # IMPLIES: if will_close is FALSE, then self.close() will ALWAYS be
- # called, meaning self.isclosed() is meaningful.
- return self.fp is None
-
- # XXX It would be nice to have readline and __iter__ for this, too.
-
- def read(self, amt=None):
- if self.fp is None:
- return ''
-
- if self.chunked:
- return self._read_chunked(amt)
-
- if amt is None:
- # unbounded read
- if self.length is None:
- s = self.fp.read()
- else:
- s = self._safe_read(self.length)
- self.length = 0
- self.close() # we read everything
- return s
-
- if self.length is not None:
- if amt > self.length:
- # clip the read to the "end of response"
- amt = self.length
-
- # we do not use _safe_read() here because this may be a .will_close
- # connection, and the user is reading more bytes than will be provided
- # (for example, reading in 1k chunks)
- s = self.fp.read(amt)
- if self.length is not None:
- self.length -= len(s)
-
- return s
-
- def _read_chunked(self, amt):
- assert self.chunked != _UNKNOWN
- chunk_left = self.chunk_left
- value = ''
-
- # XXX This accumulates chunks by repeated string concatenation,
- # which is not efficient as the number or size of chunks gets big.
- while True:
- if chunk_left is None:
- line = self.fp.readline()
- i = line.find(';')
- if i >= 0:
- line = line[:i] # strip chunk-extensions
- chunk_left = int(line, 16)
- if chunk_left == 0:
- break
- if amt is None:
- value += self._safe_read(chunk_left)
- elif amt < chunk_left:
- value += self._safe_read(amt)
- self.chunk_left = chunk_left - amt
- return value
- elif amt == chunk_left:
- value += self._safe_read(amt)
- self._safe_read(2) # toss the CRLF at the end of the chunk
- self.chunk_left = None
- return value
- else:
- value += self._safe_read(chunk_left)
- amt -= chunk_left
-
- # we read the whole chunk, get another
- self._safe_read(2) # toss the CRLF at the end of the chunk
- chunk_left = None
-
- # read and discard trailer up to the CRLF terminator
- ### note: we shouldn't have any trailers!
- while True:
- line = self.fp.readline()
- if line == '\r\n':
- break
-
- # we read everything; close the "file"
- self.close()
-
- return value
-
- def _safe_read(self, amt):
- """Read the number of bytes requested, compensating for partial reads.
-
- Normally, we have a blocking socket, but a read() can be interrupted
- by a signal (resulting in a partial read).
-
- Note that we cannot distinguish between EOF and an interrupt when zero
- bytes have been read. IncompleteRead() will be raised in this
- situation.
-
- This function should be used when <amt> bytes "should" be present for
- reading. If the bytes are truly not available (due to EOF), then the
- IncompleteRead exception can be used to detect the problem.
- """
- s = []
- while amt > 0:
- chunk = self.fp.read(min(amt, MAXAMOUNT))
- if not chunk:
- raise IncompleteRead(s)
- s.append(chunk)
- amt -= len(chunk)
- return ''.join(s)
-
- def getheader(self, name, default=None):
- if self.msg is None:
- raise ResponseNotReady()
- return self.msg.getheader(name, default)
-
- def getheaders(self):
- """Return list of (header, value) tuples."""
- if self.msg is None:
- raise ResponseNotReady()
- return self.msg.items()
-
-
-class HTTPConnection:
-
- _http_vsn = 11
- _http_vsn_str = 'HTTP/1.1'
-
- response_class = HTTPResponse
- default_port = HTTP_PORT
- auto_open = 1
- debuglevel = 0
- strict = 0
-
- def __init__(self, host, port=None, strict=None):
- self.sock = None
- self._buffer = []
- self.__response = None
- self.__state = _CS_IDLE
- self._method = None
-
- self._set_hostport(host, port)
- if strict is not None:
- self.strict = strict
-
- def _set_hostport(self, host, port):
- if port is None:
- i = host.rfind(':')
- j = host.rfind(']') # ipv6 addresses have [...]
- if i > j:
- try:
- port = int(host[i+1:])
- except ValueError:
- raise InvalidURL("nonnumeric port: '%s'" % host[i+1:])
- host = host[:i]
- else:
- port = self.default_port
- if host and host[0] == '[' and host[-1] == ']':
- host = host[1:-1]
- self.host = host
- self.port = port
-
- def set_debuglevel(self, level):
- self.debuglevel = level
-
- def connect(self):
- """Connect to the host and port specified in __init__."""
- msg = "getaddrinfo returns an empty list"
- for res in socket.getaddrinfo(self.host, self.port, 0,
- socket.SOCK_STREAM):
- af, socktype, proto, canonname, sa = res
- try:
- self.sock = socket.socket(af, socktype, proto)
- if self.debuglevel > 0:
- print "connect: (%s, %s)" % (self.host, self.port)
- self.sock.connect(sa)
- except socket.error, msg:
- if self.debuglevel > 0:
- print 'connect fail:', (self.host, self.port)
- if self.sock:
- self.sock.close()
- self.sock = None
- continue
- break
- if not self.sock:
- raise socket.error, msg
-
- def close(self):
- """Close the connection to the HTTP server."""
- if self.sock:
- self.sock.close() # close it manually... there may be other refs
- self.sock = None
- if self.__response:
- self.__response.close()
- self.__response = None
- self.__state = _CS_IDLE
-
- def send(self, str):
- """Send `str' to the server."""
- if self.sock is None:
- if self.auto_open:
- self.connect()
- else:
- raise NotConnected()
-
- # send the data to the server. if we get a broken pipe, then close
- # the socket. we want to reconnect when somebody tries to send again.
- #
- # NOTE: we DO propagate the error, though, because we cannot simply
- # ignore the error... the caller will know if they can retry.
- if self.debuglevel > 0:
- print "send:", repr(str)
- try:
- self.sock.sendall(str)
- except socket.error, v:
- if v[0] == 32: # Broken pipe
- self.close()
- raise
-
- def _output(self, s):
- """Add a line of output to the current request buffer.
-
- Assumes that the line does *not* end with \\r\\n.
- """
- self._buffer.append(s)
-
- def _send_output(self):
- """Send the currently buffered request and clear the buffer.
-
- Appends an extra \\r\\n to the buffer.
- """
- self._buffer.extend(("", ""))
- msg = "\r\n".join(self._buffer)
- del self._buffer[:]
- self.send(msg)
-
- def putrequest(self, method, url, skip_host=0, skip_accept_encoding=0):
- """Send a request to the server.
-
- `method' specifies an HTTP request method, e.g. 'GET'.
- `url' specifies the object being requested, e.g. '/index.html'.
- `skip_host' if True does not add automatically a 'Host:' header
- `skip_accept_encoding' if True does not add automatically an
- 'Accept-Encoding:' header
- """
-
- # if a prior response has been completed, then forget about it.
- if self.__response and self.__response.isclosed():
- self.__response = None
-
-
- # in certain cases, we cannot issue another request on this connection.
- # this occurs when:
- # 1) we are in the process of sending a request. (_CS_REQ_STARTED)
- # 2) a response to a previous request has signalled that it is going
- # to close the connection upon completion.
- # 3) the headers for the previous response have not been read, thus
- # we cannot determine whether point (2) is true. (_CS_REQ_SENT)
- #
- # if there is no prior response, then we can request at will.
- #
- # if point (2) is true, then we will have passed the socket to the
- # response (effectively meaning, "there is no prior response"), and
- # will open a new one when a new request is made.
- #
- # Note: if a prior response exists, then we *can* start a new request.
- # We are not allowed to begin fetching the response to this new
- # request, however, until that prior response is complete.
- #
- if self.__state == _CS_IDLE:
- self.__state = _CS_REQ_STARTED
- else:
- raise CannotSendRequest()
-
- # Save the method we use, we need it later in the response phase
- self._method = method
- if not url:
- url = '/'
- str = '%s %s %s' % (method, url, self._http_vsn_str)
-
- self._output(str)
-
- if self._http_vsn == 11:
- # Issue some standard headers for better HTTP/1.1 compliance
-
- if not skip_host:
- # this header is issued *only* for HTTP/1.1
- # connections. more specifically, this means it is
- # only issued when the client uses the new
- # HTTPConnection() class. backwards-compat clients
- # will be using HTTP/1.0 and those clients may be
- # issuing this header themselves. we should NOT issue
- # it twice; some web servers (such as Apache) barf
- # when they see two Host: headers
-
- # If we need a non-standard port,include it in the
- # header. If the request is going through a proxy,
- # but the host of the actual URL, not the host of the
- # proxy.
-
- netloc = ''
- if url.startswith('http'):
- nil, netloc, nil, nil, nil = urlsplit(url)
-
- if netloc:
- try:
- netloc_enc = netloc.encode("ascii")
- except UnicodeEncodeError:
- netloc_enc = netloc.encode("idna")
- self.putheader('Host', netloc_enc)
- else:
- try:
- host_enc = self.host.encode("ascii")
- except UnicodeEncodeError:
- host_enc = self.host.encode("idna")
- if self.port == HTTP_PORT:
- self.putheader('Host', host_enc)
- else:
- self.putheader('Host', "%s:%s" % (host_enc, self.port))
-
- # note: we are assuming that clients will not attempt to set these
- # headers since *this* library must deal with the
- # consequences. this also means that when the supporting
- # libraries are updated to recognize other forms, then this
- # code should be changed (removed or updated).
-
- # we only want a Content-Encoding of "identity" since we don't
- # support encodings such as x-gzip or x-deflate.
- if not skip_accept_encoding:
- self.putheader('Accept-Encoding', 'identity')
-
- # we can accept "chunked" Transfer-Encodings, but no others
- # NOTE: no TE header implies *only* "chunked"
- #self.putheader('TE', 'chunked')
-
- # if TE is supplied in the header, then it must appear in a
- # Connection header.
- #self.putheader('Connection', 'TE')
-
- else:
- # For HTTP/1.0, the server will assume "not chunked"
- pass
-
- def putheader(self, header, value):
- """Send a request header line to the server.
-
- For example: h.putheader('Accept', 'text/html')
- """
- if self.__state != _CS_REQ_STARTED:
- raise CannotSendHeader()
-
- str = '%s: %s' % (header, value)
- self._output(str)
-
- def endheaders(self):
- """Indicate that the last header line has been sent to the server."""
-
- if self.__state == _CS_REQ_STARTED:
- self.__state = _CS_REQ_SENT
- else:
- raise CannotSendHeader()
-
- self._send_output()
-
- def request(self, method, url, body=None, headers={}):
- """Send a complete request to the server."""
-
- try:
- self._send_request(method, url, body, headers)
- except socket.error, v:
- # trap 'Broken pipe' if we're allowed to automatically reconnect
- if v[0] != 32 or not self.auto_open:
- raise
- # try one more time
- self._send_request(method, url, body, headers)
-
- def _send_request(self, method, url, body, headers):
- # honour explicitly requested Host: and Accept-Encoding headers
- header_names = dict.fromkeys([k.lower() for k in headers])
- skips = {}
- if 'host' in header_names:
- skips['skip_host'] = 1
- if 'accept-encoding' in header_names:
- skips['skip_accept_encoding'] = 1
-
- self.putrequest(method, url, **skips)
-
- if body and ('content-length' not in header_names):
- self.putheader('Content-Length', str(len(body)))
- for hdr, value in headers.iteritems():
- self.putheader(hdr, value)
- self.endheaders()
-
- if body:
- self.send(body)
-
- def getresponse(self):
- "Get the response from the server."
-
- # if a prior response has been completed, then forget about it.
- if self.__response and self.__response.isclosed():
- self.__response = None
-
- #
- # if a prior response exists, then it must be completed (otherwise, we
- # cannot read this response's header to determine the connection-close
- # behavior)
- #
- # note: if a prior response existed, but was connection-close, then the
- # socket and response were made independent of this HTTPConnection
- # object since a new request requires that we open a whole new
- # connection
- #
- # this means the prior response had one of two states:
- # 1) will_close: this connection was reset and the prior socket and
- # response operate independently
- # 2) persistent: the response was retained and we await its
- # isclosed() status to become true.
- #
- if self.__state != _CS_REQ_SENT or self.__response:
- raise ResponseNotReady()
-
- if self.debuglevel > 0:
- response = self.response_class(self.sock, self.debuglevel,
- strict=self.strict,
- method=self._method)
- else:
- response = self.response_class(self.sock, strict=self.strict,
- method=self._method)
-
- response.begin()
- assert response.will_close != _UNKNOWN
- self.__state = _CS_IDLE
-
- if response.will_close:
- # this effectively passes the connection to the response
- self.close()
- else:
- # remember this, so we can tell when it is complete
- self.__response = response
-
- return response
-
-# The next several classes are used to define FakeSocket, a socket-like
-# interface to an SSL connection.
-
-# The primary complexity comes from faking a makefile() method. The
-# standard socket makefile() implementation calls dup() on the socket
-# file descriptor. As a consequence, clients can call close() on the
-# parent socket and its makefile children in any order. The underlying
-# socket isn't closed until they are all closed.
-
-# The implementation uses reference counting to keep the socket open
-# until the last client calls close(). SharedSocket keeps track of
-# the reference counting and SharedSocketClient provides an constructor
-# and close() method that call incref() and decref() correctly.
-
-class SharedSocket:
-
- def __init__(self, sock):
- self.sock = sock
- self._refcnt = 0
-
- def incref(self):
- self._refcnt += 1
-
- def decref(self):
- self._refcnt -= 1
- assert self._refcnt >= 0
- if self._refcnt == 0:
- self.sock.close()
-
- def __del__(self):
- self.sock.close()
-
-class SharedSocketClient:
-
- def __init__(self, shared):
- self._closed = 0
- self._shared = shared
- self._shared.incref()
- self._sock = shared.sock
-
- def close(self):
- if not self._closed:
- self._shared.decref()
- self._closed = 1
- self._shared = None
-
-class SSLFile(SharedSocketClient):
- """File-like object wrapping an SSL socket."""
-
- BUFSIZE = 8192
-
- def __init__(self, sock, ssl, bufsize=None):
- SharedSocketClient.__init__(self, sock)
- self._ssl = ssl
- self._buf = ''
- self._bufsize = bufsize or self.__class__.BUFSIZE
-
- def _read(self):
- buf = ''
- # put in a loop so that we retry on transient errors
- while True:
- try:
- buf = self._ssl.read(self._bufsize)
- except socket.sslerror, err:
- if (err[0] == socket.SSL_ERROR_WANT_READ
- or err[0] == socket.SSL_ERROR_WANT_WRITE):
- continue
- if (err[0] == socket.SSL_ERROR_ZERO_RETURN
- or err[0] == socket.SSL_ERROR_EOF):
- break
- raise
- except socket.error, err:
- if err[0] == errno.EINTR:
- continue
- if err[0] == errno.EBADF:
- # XXX socket was closed?
- break
- raise
- else:
- break
- return buf
-
- def read(self, size=None):
- L = [self._buf]
- avail = len(self._buf)
- while size is None or avail < size:
- s = self._read()
- if s == '':
- break
- L.append(s)
- avail += len(s)
- all = "".join(L)
- if size is None:
- self._buf = ''
- return all
- else:
- self._buf = all[size:]
- return all[:size]
-
- def readline(self):
- L = [self._buf]
- self._buf = ''
- while 1:
- i = L[-1].find("\n")
- if i >= 0:
- break
- s = self._read()
- if s == '':
- break
- L.append(s)
- if i == -1:
- # loop exited because there is no more data
- return "".join(L)
- else:
- all = "".join(L)
- # XXX could do enough bookkeeping not to do a 2nd search
- i = all.find("\n") + 1
- line = all[:i]
- self._buf = all[i:]
- return line
-
- def readlines(self, sizehint=0):
- total = 0
- list = []
- while True:
- line = self.readline()
- if not line:
- break
- list.append(line)
- total += len(line)
- if sizehint and total >= sizehint:
- break
- return list
-
- def fileno(self):
- return self._sock.fileno()
-
- def __iter__(self):
- return self
-
- def next(self):
- line = self.readline()
- if not line:
- raise StopIteration
- return line
-
-class FakeSocket(SharedSocketClient):
-
- class _closedsocket:
- def __getattr__(self, name):
- raise error(9, 'Bad file descriptor')
-
- def __init__(self, sock, ssl):
- sock = SharedSocket(sock)
- SharedSocketClient.__init__(self, sock)
- self._ssl = ssl
-
- def close(self):
- SharedSocketClient.close(self)
- self._sock = self.__class__._closedsocket()
-
- def makefile(self, mode, bufsize=None):
- if mode != 'r' and mode != 'rb':
- raise UnimplementedFileMode()
- return SSLFile(self._shared, self._ssl, bufsize)
-
- def send(self, stuff, flags = 0):
- return self._ssl.write(stuff)
-
- sendall = send
-
- def recv(self, len = 1024, flags = 0):
- return self._ssl.read(len)
-
- def __getattr__(self, attr):
- return getattr(self._sock, attr)
-
-
-class HTTPSConnection(HTTPConnection):
- "This class allows communication via SSL."
-
- default_port = HTTPS_PORT
-
- def __init__(self, host, port=None, key_file=None, cert_file=None,
- strict=None):
- HTTPConnection.__init__(self, host, port, strict)
- self.key_file = key_file
- self.cert_file = cert_file
-
- def connect(self):
- "Connect to a host on a given (SSL) port."
-
- sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- sock.connect((self.host, self.port))
- ssl = socket.ssl(sock, self.key_file, self.cert_file)
- self.sock = FakeSocket(sock, ssl)
-
-
-class HTTP:
- "Compatibility class with httplib.py from 1.5."
-
- _http_vsn = 10
- _http_vsn_str = 'HTTP/1.0'
-
- debuglevel = 0
-
- _connection_class = HTTPConnection
-
- def __init__(self, host='', port=None, strict=None):
- "Provide a default host, since the superclass requires one."
-
- # some joker passed 0 explicitly, meaning default port
- if port == 0:
- port = None
-
- # Note that we may pass an empty string as the host; this will throw
- # an error when we attempt to connect. Presumably, the client code
- # will call connect before then, with a proper host.
- self._setup(self._connection_class(host, port, strict))
-
- def _setup(self, conn):
- self._conn = conn
-
- # set up delegation to flesh out interface
- self.send = conn.send
- self.putrequest = conn.putrequest
- self.endheaders = conn.endheaders
- self.set_debuglevel = conn.set_debuglevel
-
- conn._http_vsn = self._http_vsn
- conn._http_vsn_str = self._http_vsn_str
-
- self.file = None
-
- def connect(self, host=None, port=None):
- "Accept arguments to set the host/port, since the superclass doesn't."
-
- if host is not None:
- self._conn._set_hostport(host, port)
- self._conn.connect()
-
- def getfile(self):
- "Provide a getfile, since the superclass' does not use this concept."
- return self.file
-
- def putheader(self, header, *values):
- "The superclass allows only one value argument."
- self._conn.putheader(header, '\r\n\t'.join(values))
-
- def getreply(self):
- """Compat definition since superclass does not define it.
-
- Returns a tuple consisting of:
- - server status code (e.g. '200' if all goes well)
- - server "reason" corresponding to status code
- - any RFC822 headers in the response from the server
- """
- try:
- response = self._conn.getresponse()
- except BadStatusLine, e:
- ### hmm. if getresponse() ever closes the socket on a bad request,
- ### then we are going to have problems with self.sock
-
- ### should we keep this behavior? do people use it?
- # keep the socket open (as a file), and return it
- self.file = self._conn.sock.makefile('rb', 0)
-
- # close our socket -- we want to restart after any protocol error
- self.close()
-
- self.headers = None
- return -1, e.line, None
-
- self.headers = response.msg
- self.file = response.fp
- return response.status, response.reason, response.msg
-
- def close(self):
- self._conn.close()
-
- # note that self.file == response.fp, which gets closed by the
- # superclass. just clear the object ref here.
- ### hmm. messy. if status==-1, then self.file is owned by us.
- ### well... we aren't explicitly closing, but losing this ref will
- ### do it
- self.file = None
-
-if hasattr(socket, 'ssl'):
- class HTTPS(HTTP):
- """Compatibility with 1.5 httplib interface
-
- Python 1.5.2 did not have an HTTPS class, but it defined an
- interface for sending http requests that is also useful for
- https.
- """
-
- _connection_class = HTTPSConnection
-
- def __init__(self, host='', port=None, key_file=None, cert_file=None,
- strict=None):
- # provide a default host, pass the X509 cert info
-
- # urf. compensate for bad input.
- if port == 0:
- port = None
- self._setup(self._connection_class(host, port, key_file,
- cert_file, strict))
-
- # we never actually use these for anything, but we keep them
- # here for compatibility with post-1.5.2 CVS.
- self.key_file = key_file
- self.cert_file = cert_file
-
-
-class HTTPException(Exception):
- # Subclasses that define an __init__ must call Exception.__init__
- # or define self.args. Otherwise, str() will fail.
- pass
-
-class NotConnected(HTTPException):
- pass
-
-class InvalidURL(HTTPException):
- pass
-
-class UnknownProtocol(HTTPException):
- def __init__(self, version):
- self.args = version,
- self.version = version
-
-class UnknownTransferEncoding(HTTPException):
- pass
-
-class UnimplementedFileMode(HTTPException):
- pass
-
-class IncompleteRead(HTTPException):
- def __init__(self, partial):
- self.args = partial,
- self.partial = partial
-
-class ImproperConnectionState(HTTPException):
- pass
-
-class CannotSendRequest(ImproperConnectionState):
- pass
-
-class CannotSendHeader(ImproperConnectionState):
- pass
-
-class ResponseNotReady(ImproperConnectionState):
- pass
-
-class BadStatusLine(HTTPException):
- def __init__(self, line):
- self.args = line,
- self.line = line
-
-# for backwards compatibility
-error = HTTPException
-
-class LineAndFileWrapper:
- """A limited file-like object for HTTP/0.9 responses."""
-
- # The status-line parsing code calls readline(), which normally
- # get the HTTP status line. For a 0.9 response, however, this is
- # actually the first line of the body! Clients need to get a
- # readable file object that contains that line.
-
- def __init__(self, line, file):
- self._line = line
- self._file = file
- self._line_consumed = 0
- self._line_offset = 0
- self._line_left = len(line)
-
- def __getattr__(self, attr):
- return getattr(self._file, attr)
-
- def _done(self):
- # called when the last byte is read from the line. After the
- # call, all read methods are delegated to the underlying file
- # object.
- self._line_consumed = 1
- self.read = self._file.read
- self.readline = self._file.readline
- self.readlines = self._file.readlines
-
- def read(self, amt=None):
- if self._line_consumed:
- return self._file.read(amt)
- assert self._line_left
- if amt is None or amt > self._line_left:
- s = self._line[self._line_offset:]
- self._done()
- if amt is None:
- return s + self._file.read()
- else:
- return s + self._file.read(amt - len(s))
- else:
- assert amt <= self._line_left
- i = self._line_offset
- j = i + amt
- s = self._line[i:j]
- self._line_offset = j
- self._line_left -= amt
- if self._line_left == 0:
- self._done()
- return s
-
- def readline(self):
- if self._line_consumed:
- return self._file.readline()
- assert self._line_left
- s = self._line[self._line_offset:]
- self._done()
- return s
-
- def readlines(self, size=None):
- if self._line_consumed:
- return self._file.readlines(size)
- assert self._line_left
- L = [self._line[self._line_offset:]]
- self._done()
- if size is None:
- return L + self._file.readlines()
- else:
- return L + self._file.readlines(size)
-
-def test():
- """Test this module.
-
- A hodge podge of tests collected here, because they have too many
- external dependencies for the regular test suite.
- """
-
- import sys
- import getopt
- opts, args = getopt.getopt(sys.argv[1:], 'd')
- dl = 0
- for o, a in opts:
- if o == '-d': dl = dl + 1
- host = 'www.python.org'
- selector = '/'
- if args[0:]: host = args[0]
- if args[1:]: selector = args[1]
- h = HTTP()
- h.set_debuglevel(dl)
- h.connect(host)
- h.putrequest('GET', selector)
- h.endheaders()
- status, reason, headers = h.getreply()
- print 'status =', status
- print 'reason =', reason
- print "read", len(h.getfile().read())
- print
- if headers:
- for header in headers.headers: print header.strip()
- print
-
- # minimal test that code to extract host from url works
- class HTTP11(HTTP):
- _http_vsn = 11
- _http_vsn_str = 'HTTP/1.1'
-
- h = HTTP11('www.python.org')
- h.putrequest('GET', 'http://www.python.org/~jeremy/')
- h.endheaders()
- h.getreply()
- h.close()
-
- if hasattr(socket, 'ssl'):
-
- for host, selector in (('sourceforge.net', '/projects/python'),
- ):
- print "https://%s%s" % (host, selector)
- hs = HTTPS()
- hs.set_debuglevel(dl)
- hs.connect(host)
- hs.putrequest('GET', selector)
- hs.endheaders()
- status, reason, headers = hs.getreply()
- print 'status =', status
- print 'reason =', reason
- print "read", len(hs.getfile().read())
- print
- if headers:
- for header in headers.headers: print header.strip()
- print
-
-if __name__ == '__main__':
- test()
diff --git a/sys/lib/python/idlelib/AutoComplete.py b/sys/lib/python/idlelib/AutoComplete.py
deleted file mode 100644
index 708538662..000000000
--- a/sys/lib/python/idlelib/AutoComplete.py
+++ /dev/null
@@ -1,226 +0,0 @@
-"""AutoComplete.py - An IDLE extension for automatically completing names.
-
-This extension can complete either attribute names of file names. It can pop
-a window with all available names, for the user to select from.
-"""
-import os
-import sys
-import string
-
-from configHandler import idleConf
-
-import AutoCompleteWindow
-from HyperParser import HyperParser
-
-import __main__
-
-# This string includes all chars that may be in a file name (without a path
-# separator)
-FILENAME_CHARS = string.ascii_letters + string.digits + os.curdir + "._~#$:-"
-# This string includes all chars that may be in an identifier
-ID_CHARS = string.ascii_letters + string.digits + "_"
-
-# These constants represent the two different types of completions
-COMPLETE_ATTRIBUTES, COMPLETE_FILES = range(1, 2+1)
-
-class AutoComplete:
-
- menudefs = [
- ('edit', [
- ("Show completions", "<<force-open-completions>>"),
- ])
- ]
-
- popupwait = idleConf.GetOption("extensions", "AutoComplete",
- "popupwait", type="int", default=0)
-
- def __init__(self, editwin=None):
- if editwin == None: # subprocess and test
- self.editwin = None
- return
- self.editwin = editwin
- self.text = editwin.text
- self.autocompletewindow = None
-
- # id of delayed call, and the index of the text insert when the delayed
- # call was issued. If _delayed_completion_id is None, there is no
- # delayed call.
- self._delayed_completion_id = None
- self._delayed_completion_index = None
-
- def _make_autocomplete_window(self):
- return AutoCompleteWindow.AutoCompleteWindow(self.text)
-
- def _remove_autocomplete_window(self, event=None):
- if self.autocompletewindow:
- self.autocompletewindow.hide_window()
- self.autocompletewindow = None
-
- def force_open_completions_event(self, event):
- """Happens when the user really wants to open a completion list, even
- if a function call is needed.
- """
- self.open_completions(True, False, True)
-
- def try_open_completions_event(self, event):
- """Happens when it would be nice to open a completion list, but not
- really neccesary, for example after an dot, so function
- calls won't be made.
- """
- lastchar = self.text.get("insert-1c")
- if lastchar == ".":
- self._open_completions_later(False, False, False,
- COMPLETE_ATTRIBUTES)
- elif lastchar == os.sep:
- self._open_completions_later(False, False, False,
- COMPLETE_FILES)
-
- def autocomplete_event(self, event):
- """Happens when the user wants to complete his word, and if neccesary,
- open a completion list after that (if there is more than one
- completion)
- """
- if hasattr(event, "mc_state") and event.mc_state:
- # A modifier was pressed along with the tab, continue as usual.
- return
- if self.autocompletewindow and self.autocompletewindow.is_active():
- self.autocompletewindow.complete()
- return "break"
- else:
- opened = self.open_completions(False, True, True)
- if opened:
- return "break"
-
- def _open_completions_later(self, *args):
- self._delayed_completion_index = self.text.index("insert")
- if self._delayed_completion_id is not None:
- self.text.after_cancel(self._delayed_completion_id)
- self._delayed_completion_id = \
- self.text.after(self.popupwait, self._delayed_open_completions,
- *args)
-
- def _delayed_open_completions(self, *args):
- self._delayed_completion_id = None
- if self.text.index("insert") != self._delayed_completion_index:
- return
- self.open_completions(*args)
-
- def open_completions(self, evalfuncs, complete, userWantsWin, mode=None):
- """Find the completions and create the AutoCompleteWindow.
- Return True if successful (no syntax error or so found).
- if complete is True, then if there's nothing to complete and no
- start of completion, won't open completions and return False.
- If mode is given, will open a completion list only in this mode.
- """
- # Cancel another delayed call, if it exists.
- if self._delayed_completion_id is not None:
- self.text.after_cancel(self._delayed_completion_id)
- self._delayed_completion_id = None
-
- hp = HyperParser(self.editwin, "insert")
- curline = self.text.get("insert linestart", "insert")
- i = j = len(curline)
- if hp.is_in_string() and (not mode or mode==COMPLETE_FILES):
- self._remove_autocomplete_window()
- mode = COMPLETE_FILES
- while i and curline[i-1] in FILENAME_CHARS:
- i -= 1
- comp_start = curline[i:j]
- j = i
- while i and curline[i-1] in FILENAME_CHARS+os.sep:
- i -= 1
- comp_what = curline[i:j]
- elif hp.is_in_code() and (not mode or mode==COMPLETE_ATTRIBUTES):
- self._remove_autocomplete_window()
- mode = COMPLETE_ATTRIBUTES
- while i and curline[i-1] in ID_CHARS:
- i -= 1
- comp_start = curline[i:j]
- if i and curline[i-1] == '.':
- hp.set_index("insert-%dc" % (len(curline)-(i-1)))
- comp_what = hp.get_expression()
- if not comp_what or \
- (not evalfuncs and comp_what.find('(') != -1):
- return
- else:
- comp_what = ""
- else:
- return
-
- if complete and not comp_what and not comp_start:
- return
- comp_lists = self.fetch_completions(comp_what, mode)
- if not comp_lists[0]:
- return
- self.autocompletewindow = self._make_autocomplete_window()
- self.autocompletewindow.show_window(comp_lists,
- "insert-%dc" % len(comp_start),
- complete,
- mode,
- userWantsWin)
- return True
-
- def fetch_completions(self, what, mode):
- """Return a pair of lists of completions for something. The first list
- is a sublist of the second. Both are sorted.
-
- If there is a Python subprocess, get the comp. list there. Otherwise,
- either fetch_completions() is running in the subprocess itself or it
- was called in an IDLE EditorWindow before any script had been run.
-
- The subprocess environment is that of the most recently run script. If
- two unrelated modules are being edited some calltips in the current
- module may be inoperative if the module was not the last to run.
- """
- try:
- rpcclt = self.editwin.flist.pyshell.interp.rpcclt
- except:
- rpcclt = None
- if rpcclt:
- return rpcclt.remotecall("exec", "get_the_completion_list",
- (what, mode), {})
- else:
- if mode == COMPLETE_ATTRIBUTES:
- if what == "":
- namespace = __main__.__dict__.copy()
- namespace.update(__main__.__builtins__.__dict__)
- bigl = eval("dir()", namespace)
- bigl.sort()
- if "__all__" in bigl:
- smalll = eval("__all__", namespace)
- smalll.sort()
- else:
- smalll = filter(lambda s: s[:1] != '_', bigl)
- else:
- try:
- entity = self.get_entity(what)
- bigl = dir(entity)
- bigl.sort()
- if "__all__" in bigl:
- smalll = entity.__all__
- smalll.sort()
- else:
- smalll = filter(lambda s: s[:1] != '_', bigl)
- except:
- return [], []
-
- elif mode == COMPLETE_FILES:
- if what == "":
- what = "."
- try:
- expandedpath = os.path.expanduser(what)
- bigl = os.listdir(expandedpath)
- bigl.sort()
- smalll = filter(lambda s: s[:1] != '.', bigl)
- except OSError:
- return [], []
-
- if not smalll:
- smalll = bigl
- return smalll, bigl
-
- def get_entity(self, name):
- """Lookup name in a namespace spanning sys.modules and __main.dict__"""
- namespace = sys.modules.copy()
- namespace.update(__main__.__dict__)
- return eval(name, namespace)
diff --git a/sys/lib/python/idlelib/AutoCompleteWindow.py b/sys/lib/python/idlelib/AutoCompleteWindow.py
deleted file mode 100644
index d8bbff4a1..000000000
--- a/sys/lib/python/idlelib/AutoCompleteWindow.py
+++ /dev/null
@@ -1,393 +0,0 @@
-"""
-An auto-completion window for IDLE, used by the AutoComplete extension
-"""
-from Tkinter import *
-from MultiCall import MC_SHIFT
-import AutoComplete
-
-HIDE_VIRTUAL_EVENT_NAME = "<<autocompletewindow-hide>>"
-HIDE_SEQUENCES = ("<FocusOut>", "<ButtonPress>")
-KEYPRESS_VIRTUAL_EVENT_NAME = "<<autocompletewindow-keypress>>"
-# We need to bind event beyond <Key> so that the function will be called
-# before the default specific IDLE function
-KEYPRESS_SEQUENCES = ("<Key>", "<Key-BackSpace>", "<Key-Return>",
- "<Key-Up>", "<Key-Down>", "<Key-Home>", "<Key-End>")
-KEYRELEASE_VIRTUAL_EVENT_NAME = "<<autocompletewindow-keyrelease>>"
-KEYRELEASE_SEQUENCE = "<KeyRelease>"
-LISTUPDATE_SEQUENCE = "<ButtonRelease>"
-WINCONFIG_SEQUENCE = "<Configure>"
-DOUBLECLICK_SEQUENCE = "<Double-ButtonRelease>"
-
-class AutoCompleteWindow:
-
- def __init__(self, widget):
- # The widget (Text) on which we place the AutoCompleteWindow
- self.widget = widget
- # The widgets we create
- self.autocompletewindow = self.listbox = self.scrollbar = None
- # The default foreground and background of a selection. Saved because
- # they are changed to the regular colors of list items when the
- # completion start is not a prefix of the selected completion
- self.origselforeground = self.origselbackground = None
- # The list of completions
- self.completions = None
- # A list with more completions, or None
- self.morecompletions = None
- # The completion mode. Either AutoComplete.COMPLETE_ATTRIBUTES or
- # AutoComplete.COMPLETE_FILES
- self.mode = None
- # The current completion start, on the text box (a string)
- self.start = None
- # The index of the start of the completion
- self.startindex = None
- # The last typed start, used so that when the selection changes,
- # the new start will be as close as possible to the last typed one.
- self.lasttypedstart = None
- # Do we have an indication that the user wants the completion window
- # (for example, he clicked the list)
- self.userwantswindow = None
- # event ids
- self.hideid = self.keypressid = self.listupdateid = self.winconfigid \
- = self.keyreleaseid = self.doubleclickid = None
-
- def _change_start(self, newstart):
- i = 0
- while i < len(self.start) and i < len(newstart) and \
- self.start[i] == newstart[i]:
- i += 1
- if i < len(self.start):
- self.widget.delete("%s+%dc" % (self.startindex, i),
- "%s+%dc" % (self.startindex, len(self.start)))
- if i < len(newstart):
- self.widget.insert("%s+%dc" % (self.startindex, i),
- newstart[i:])
- self.start = newstart
-
- def _binary_search(self, s):
- """Find the first index in self.completions where completions[i] is
- greater or equal to s, or the last index if there is no such
- one."""
- i = 0; j = len(self.completions)
- while j > i:
- m = (i + j) // 2
- if self.completions[m] >= s:
- j = m
- else:
- i = m + 1
- return min(i, len(self.completions)-1)
-
- def _complete_string(self, s):
- """Assuming that s is the prefix of a string in self.completions,
- return the longest string which is a prefix of all the strings which
- s is a prefix of them. If s is not a prefix of a string, return s."""
- first = self._binary_search(s)
- if self.completions[first][:len(s)] != s:
- # There is not even one completion which s is a prefix of.
- return s
- # Find the end of the range of completions where s is a prefix of.
- i = first + 1
- j = len(self.completions)
- while j > i:
- m = (i + j) // 2
- if self.completions[m][:len(s)] != s:
- j = m
- else:
- i = m + 1
- last = i-1
-
- # We should return the maximum prefix of first and last
- i = len(s)
- while len(self.completions[first]) > i and \
- len(self.completions[last]) > i and \
- self.completions[first][i] == self.completions[last][i]:
- i += 1
- return self.completions[first][:i]
-
- def _selection_changed(self):
- """Should be called when the selection of the Listbox has changed.
- Updates the Listbox display and calls _change_start."""
- cursel = int(self.listbox.curselection()[0])
-
- self.listbox.see(cursel)
-
- lts = self.lasttypedstart
- selstart = self.completions[cursel]
- if self._binary_search(lts) == cursel:
- newstart = lts
- else:
- i = 0
- while i < len(lts) and i < len(selstart) and lts[i] == selstart[i]:
- i += 1
- while cursel > 0 and selstart[:i] <= self.completions[cursel-1]:
- i += 1
- newstart = selstart[:i]
- self._change_start(newstart)
-
- if self.completions[cursel][:len(self.start)] == self.start:
- # start is a prefix of the selected completion
- self.listbox.configure(selectbackground=self.origselbackground,
- selectforeground=self.origselforeground)
- else:
- self.listbox.configure(selectbackground=self.listbox.cget("bg"),
- selectforeground=self.listbox.cget("fg"))
- # If there are more completions, show them, and call me again.
- if self.morecompletions:
- self.completions = self.morecompletions
- self.morecompletions = None
- self.listbox.delete(0, END)
- for item in self.completions:
- self.listbox.insert(END, item)
- self.listbox.select_set(self._binary_search(self.start))
- self._selection_changed()
-
- def show_window(self, comp_lists, index, complete, mode, userWantsWin):
- """Show the autocomplete list, bind events.
- If complete is True, complete the text, and if there is exactly one
- matching completion, don't open a list."""
- # Handle the start we already have
- self.completions, self.morecompletions = comp_lists
- self.mode = mode
- self.startindex = self.widget.index(index)
- self.start = self.widget.get(self.startindex, "insert")
- if complete:
- completed = self._complete_string(self.start)
- self._change_start(completed)
- i = self._binary_search(completed)
- if self.completions[i] == completed and \
- (i == len(self.completions)-1 or
- self.completions[i+1][:len(completed)] != completed):
- # There is exactly one matching completion
- return
- self.userwantswindow = userWantsWin
- self.lasttypedstart = self.start
-
- # Put widgets in place
- self.autocompletewindow = acw = Toplevel(self.widget)
- # Put it in a position so that it is not seen.
- acw.wm_geometry("+10000+10000")
- # Make it float
- acw.wm_overrideredirect(1)
- try:
- # This command is only needed and available on Tk >= 8.4.0 for OSX
- # Without it, call tips intrude on the typing process by grabbing
- # the focus.
- acw.tk.call("::tk::unsupported::MacWindowStyle", "style", acw._w,
- "help", "noActivates")
- except TclError:
- pass
- self.scrollbar = scrollbar = Scrollbar(acw, orient=VERTICAL)
- self.listbox = listbox = Listbox(acw, yscrollcommand=scrollbar.set,
- exportselection=False, bg="white")
- for item in self.completions:
- listbox.insert(END, item)
- self.origselforeground = listbox.cget("selectforeground")
- self.origselbackground = listbox.cget("selectbackground")
- scrollbar.config(command=listbox.yview)
- scrollbar.pack(side=RIGHT, fill=Y)
- listbox.pack(side=LEFT, fill=BOTH, expand=True)
-
- # Initialize the listbox selection
- self.listbox.select_set(self._binary_search(self.start))
- self._selection_changed()
-
- # bind events
- self.hideid = self.widget.bind(HIDE_VIRTUAL_EVENT_NAME,
- self.hide_event)
- for seq in HIDE_SEQUENCES:
- self.widget.event_add(HIDE_VIRTUAL_EVENT_NAME, seq)
- self.keypressid = self.widget.bind(KEYPRESS_VIRTUAL_EVENT_NAME,
- self.keypress_event)
- for seq in KEYPRESS_SEQUENCES:
- self.widget.event_add(KEYPRESS_VIRTUAL_EVENT_NAME, seq)
- self.keyreleaseid = self.widget.bind(KEYRELEASE_VIRTUAL_EVENT_NAME,
- self.keyrelease_event)
- self.widget.event_add(KEYRELEASE_VIRTUAL_EVENT_NAME,KEYRELEASE_SEQUENCE)
- self.listupdateid = listbox.bind(LISTUPDATE_SEQUENCE,
- self.listupdate_event)
- self.winconfigid = acw.bind(WINCONFIG_SEQUENCE, self.winconfig_event)
- self.doubleclickid = listbox.bind(DOUBLECLICK_SEQUENCE,
- self.doubleclick_event)
-
- def winconfig_event(self, event):
- if not self.is_active():
- return
- # Position the completion list window
- acw = self.autocompletewindow
- self.widget.see(self.startindex)
- x, y, cx, cy = self.widget.bbox(self.startindex)
- acw.wm_geometry("+%d+%d" % (x + self.widget.winfo_rootx(),
- y + self.widget.winfo_rooty() \
- -acw.winfo_height()))
-
-
- def hide_event(self, event):
- if not self.is_active():
- return
- self.hide_window()
-
- def listupdate_event(self, event):
- if not self.is_active():
- return
- self.userwantswindow = True
- self._selection_changed()
-
- def doubleclick_event(self, event):
- # Put the selected completion in the text, and close the list
- cursel = int(self.listbox.curselection()[0])
- self._change_start(self.completions[cursel])
- self.hide_window()
-
- def keypress_event(self, event):
- if not self.is_active():
- return
- keysym = event.keysym
- if hasattr(event, "mc_state"):
- state = event.mc_state
- else:
- state = 0
-
- if (len(keysym) == 1 or keysym in ("underscore", "BackSpace")
- or (self.mode==AutoComplete.COMPLETE_FILES and keysym in
- ("period", "minus"))) \
- and not (state & ~MC_SHIFT):
- # Normal editing of text
- if len(keysym) == 1:
- self._change_start(self.start + keysym)
- elif keysym == "underscore":
- self._change_start(self.start + '_')
- elif keysym == "period":
- self._change_start(self.start + '.')
- elif keysym == "minus":
- self._change_start(self.start + '-')
- else:
- # keysym == "BackSpace"
- if len(self.start) == 0:
- self.hide_window()
- return
- self._change_start(self.start[:-1])
- self.lasttypedstart = self.start
- self.listbox.select_clear(0, int(self.listbox.curselection()[0]))
- self.listbox.select_set(self._binary_search(self.start))
- self._selection_changed()
- return "break"
-
- elif keysym == "Return" and not state:
- # If start is a prefix of the selection, or there was an indication
- # that the user used the completion window, put the selected
- # completion in the text, and close the list.
- # Otherwise, close the window and let the event through.
- cursel = int(self.listbox.curselection()[0])
- if self.completions[cursel][:len(self.start)] == self.start or \
- self.userwantswindow:
- self._change_start(self.completions[cursel])
- self.hide_window()
- return "break"
- else:
- self.hide_window()
- return
-
- elif (self.mode == AutoComplete.COMPLETE_ATTRIBUTES and keysym in
- ("period", "space", "parenleft", "parenright", "bracketleft",
- "bracketright")) or \
- (self.mode == AutoComplete.COMPLETE_FILES and keysym in
- ("slash", "backslash", "quotedbl", "apostrophe")) \
- and not (state & ~MC_SHIFT):
- # If start is a prefix of the selection, but is not '' when
- # completing file names, put the whole
- # selected completion. Anyway, close the list.
- cursel = int(self.listbox.curselection()[0])
- if self.completions[cursel][:len(self.start)] == self.start \
- and (self.mode==AutoComplete.COMPLETE_ATTRIBUTES or self.start):
- self._change_start(self.completions[cursel])
- self.hide_window()
- return
-
- elif keysym in ("Home", "End", "Prior", "Next", "Up", "Down") and \
- not state:
- # Move the selection in the listbox
- self.userwantswindow = True
- cursel = int(self.listbox.curselection()[0])
- if keysym == "Home":
- newsel = 0
- elif keysym == "End":
- newsel = len(self.completions)-1
- elif keysym in ("Prior", "Next"):
- jump = self.listbox.nearest(self.listbox.winfo_height()) - \
- self.listbox.nearest(0)
- if keysym == "Prior":
- newsel = max(0, cursel-jump)
- else:
- assert keysym == "Next"
- newsel = min(len(self.completions)-1, cursel+jump)
- elif keysym == "Up":
- newsel = max(0, cursel-1)
- else:
- assert keysym == "Down"
- newsel = min(len(self.completions)-1, cursel+1)
- self.listbox.select_clear(cursel)
- self.listbox.select_set(newsel)
- self._selection_changed()
- return "break"
-
- elif (keysym == "Tab" and not state):
- # The user wants a completion, but it is handled by AutoComplete
- # (not AutoCompleteWindow), so ignore.
- self.userwantswindow = True
- return
-
- elif reduce(lambda x, y: x or y,
- [keysym.find(s) != -1 for s in ("Shift", "Control", "Alt",
- "Meta", "Command", "Option")
- ]):
- # A modifier key, so ignore
- return
-
- else:
- # Unknown event, close the window and let it through.
- self.hide_window()
- return
-
- def keyrelease_event(self, event):
- if not self.is_active():
- return
- if self.widget.index("insert") != \
- self.widget.index("%s+%dc" % (self.startindex, len(self.start))):
- # If we didn't catch an event which moved the insert, close window
- self.hide_window()
-
- def is_active(self):
- return self.autocompletewindow is not None
-
- def complete(self):
- self._change_start(self._complete_string(self.start))
- # The selection doesn't change.
-
- def hide_window(self):
- if not self.is_active():
- return
-
- # unbind events
- for seq in HIDE_SEQUENCES:
- self.widget.event_delete(HIDE_VIRTUAL_EVENT_NAME, seq)
- self.widget.unbind(HIDE_VIRTUAL_EVENT_NAME, self.hideid)
- self.hideid = None
- for seq in KEYPRESS_SEQUENCES:
- self.widget.event_delete(KEYPRESS_VIRTUAL_EVENT_NAME, seq)
- self.widget.unbind(KEYPRESS_VIRTUAL_EVENT_NAME, self.keypressid)
- self.keypressid = None
- self.widget.event_delete(KEYRELEASE_VIRTUAL_EVENT_NAME,
- KEYRELEASE_SEQUENCE)
- self.widget.unbind(KEYRELEASE_VIRTUAL_EVENT_NAME, self.keyreleaseid)
- self.keyreleaseid = None
- self.listbox.unbind(LISTUPDATE_SEQUENCE, self.listupdateid)
- self.listupdateid = None
- self.autocompletewindow.unbind(WINCONFIG_SEQUENCE, self.winconfigid)
- self.winconfigid = None
-
- # destroy widgets
- self.scrollbar.destroy()
- self.scrollbar = None
- self.listbox.destroy()
- self.listbox = None
- self.autocompletewindow.destroy()
- self.autocompletewindow = None
diff --git a/sys/lib/python/idlelib/AutoExpand.py b/sys/lib/python/idlelib/AutoExpand.py
deleted file mode 100644
index 9e93d57d6..000000000
--- a/sys/lib/python/idlelib/AutoExpand.py
+++ /dev/null
@@ -1,83 +0,0 @@
-import string
-import re
-
-###$ event <<expand-word>>
-###$ win <Alt-slash>
-###$ unix <Alt-slash>
-
-class AutoExpand:
-
- menudefs = [
- ('edit', [
- ('E_xpand Word', '<<expand-word>>'),
- ]),
- ]
-
- wordchars = string.ascii_letters + string.digits + "_"
-
- def __init__(self, editwin):
- self.text = editwin.text
- self.state = None
-
- def expand_word_event(self, event):
- curinsert = self.text.index("insert")
- curline = self.text.get("insert linestart", "insert lineend")
- if not self.state:
- words = self.getwords()
- index = 0
- else:
- words, index, insert, line = self.state
- if insert != curinsert or line != curline:
- words = self.getwords()
- index = 0
- if not words:
- self.text.bell()
- return "break"
- word = self.getprevword()
- self.text.delete("insert - %d chars" % len(word), "insert")
- newword = words[index]
- index = (index + 1) % len(words)
- if index == 0:
- self.text.bell() # Warn we cycled around
- self.text.insert("insert", newword)
- curinsert = self.text.index("insert")
- curline = self.text.get("insert linestart", "insert lineend")
- self.state = words, index, curinsert, curline
- return "break"
-
- def getwords(self):
- word = self.getprevword()
- if not word:
- return []
- before = self.text.get("1.0", "insert wordstart")
- wbefore = re.findall(r"\b" + word + r"\w+\b", before)
- del before
- after = self.text.get("insert wordend", "end")
- wafter = re.findall(r"\b" + word + r"\w+\b", after)
- del after
- if not wbefore and not wafter:
- return []
- words = []
- dict = {}
- # search backwards through words before
- wbefore.reverse()
- for w in wbefore:
- if dict.get(w):
- continue
- words.append(w)
- dict[w] = w
- # search onwards through words after
- for w in wafter:
- if dict.get(w):
- continue
- words.append(w)
- dict[w] = w
- words.append(word)
- return words
-
- def getprevword(self):
- line = self.text.get("insert linestart", "insert")
- i = len(line)
- while i > 0 and line[i-1] in self.wordchars:
- i = i-1
- return line[i:]
diff --git a/sys/lib/python/idlelib/Bindings.py b/sys/lib/python/idlelib/Bindings.py
deleted file mode 100644
index a3c9fc45f..000000000
--- a/sys/lib/python/idlelib/Bindings.py
+++ /dev/null
@@ -1,111 +0,0 @@
-"""Define the menu contents, hotkeys, and event bindings.
-
-There is additional configuration information in the EditorWindow class (and
-subclasses): the menus are created there based on the menu_specs (class)
-variable, and menus not created are silently skipped in the code here. This
-makes it possible, for example, to define a Debug menu which is only present in
-the PythonShell window, and a Format menu which is only present in the Editor
-windows.
-
-"""
-import sys
-from configHandler import idleConf
-
-menudefs = [
- # underscore prefixes character to underscore
- ('file', [
- ('_New Window', '<<open-new-window>>'),
- ('_Open...', '<<open-window-from-file>>'),
- ('Open _Module...', '<<open-module>>'),
- ('Class _Browser', '<<open-class-browser>>'),
- ('_Path Browser', '<<open-path-browser>>'),
- None,
- ('_Save', '<<save-window>>'),
- ('Save _As...', '<<save-window-as-file>>'),
- ('Save Cop_y As...', '<<save-copy-of-window-as-file>>'),
- None,
- ('Prin_t Window', '<<print-window>>'),
- None,
- ('_Close', '<<close-window>>'),
- ('E_xit', '<<close-all-windows>>'),
- ]),
- ('edit', [
- ('_Undo', '<<undo>>'),
- ('_Redo', '<<redo>>'),
- None,
- ('Cu_t', '<<cut>>'),
- ('_Copy', '<<copy>>'),
- ('_Paste', '<<paste>>'),
- ('Select _All', '<<select-all>>'),
- None,
- ('_Find...', '<<find>>'),
- ('Find A_gain', '<<find-again>>'),
- ('Find _Selection', '<<find-selection>>'),
- ('Find in Files...', '<<find-in-files>>'),
- ('R_eplace...', '<<replace>>'),
- ('Go to _Line', '<<goto-line>>'),
- ]),
-('format', [
- ('_Indent Region', '<<indent-region>>'),
- ('_Dedent Region', '<<dedent-region>>'),
- ('Comment _Out Region', '<<comment-region>>'),
- ('U_ncomment Region', '<<uncomment-region>>'),
- ('Tabify Region', '<<tabify-region>>'),
- ('Untabify Region', '<<untabify-region>>'),
- ('Toggle Tabs', '<<toggle-tabs>>'),
- ('New Indent Width', '<<change-indentwidth>>'),
- ]),
- ('run', [
- ('Python Shell', '<<open-python-shell>>'),
- ]),
- ('shell', [
- ('_View Last Restart', '<<view-restart>>'),
- ('_Restart Shell', '<<restart-shell>>'),
- ]),
- ('debug', [
- ('_Go to File/Line', '<<goto-file-line>>'),
- ('!_Debugger', '<<toggle-debugger>>'),
- ('_Stack Viewer', '<<open-stack-viewer>>'),
- ('!_Auto-open Stack Viewer', '<<toggle-jit-stack-viewer>>'),
- ]),
- ('options', [
- ('_Configure IDLE...', '<<open-config-dialog>>'),
- None,
- ]),
- ('help', [
- ('_About IDLE', '<<about-idle>>'),
- None,
- ('_IDLE Help', '<<help>>'),
- ('Python _Docs', '<<python-docs>>'),
- ]),
-]
-
-import sys
-if sys.platform == 'darwin' and '.app' in sys.executable:
- # Running as a proper MacOS application bundle. This block restructures
- # the menus a little to make them conform better to the HIG.
-
- quitItem = menudefs[0][1][-1]
- closeItem = menudefs[0][1][-2]
-
- # Remove the last 3 items of the file menu: a separator, close window and
- # quit. Close window will be reinserted just above the save item, where
- # it should be according to the HIG. Quit is in the application menu.
- del menudefs[0][1][-3:]
- menudefs[0][1].insert(6, closeItem)
-
- # Remove the 'About' entry from the help menu, it is in the application
- # menu
- del menudefs[-1][1][0:2]
-
- menudefs.insert(0,
- ('application', [
- ('About IDLE', '<<about-idle>>'),
- None,
- ('_Preferences....', '<<open-config-dialog>>'),
- ]))
-
-
-default_keydefs = idleConf.GetCurrentKeySet()
-
-del sys
diff --git a/sys/lib/python/idlelib/CREDITS.txt b/sys/lib/python/idlelib/CREDITS.txt
deleted file mode 100644
index 30561a9ea..000000000
--- a/sys/lib/python/idlelib/CREDITS.txt
+++ /dev/null
@@ -1,36 +0,0 @@
-Guido van Rossum, as well as being the creator of the Python language, is the
-original creator of IDLE. Other contributors prior to Version 0.8 include
-Mark Hammond, Jeremy Hylton, Tim Peters, and Moshe Zadka.
-
-IDLE's recent development has been carried out in the IDLEfork project.
-The objective was to develop a version of IDLE which had an execution
-environment which could be initialized prior to each run of user code.
-
-The IDLEfork project was initiated by David Scherer, with some help from Peter
-Schneider-Kamp and Nicholas Riley. David wrote the first version of the RPC
-code and designed a fast turn-around environment for VPython. Guido developed
-the RPC code and Remote Debugger currently integrated in IDLE. Bruce Sherwood
-contributed considerable time testing and suggesting improvements.
-
-Besides David and Guido, the main developers who have been active on IDLEfork
-are Stephen M. Gava, who implemented the configuration GUI, the new
-configuration system, and the About dialog, and Kurt B. Kaiser, who completed
-the integration of the RPC and remote debugger, implemented the threaded
-subprocess, and made a number of usability enhancements.
-
-Other contributors include Raymond Hettinger, Tony Lownds (Mac integration),
-Neal Norwitz (code check and clean-up), Ronald Oussoren (Mac integration),
-Noam Raphael (Code Context, Call Tips, many other patches), and Chui Tey (RPC
-integration, debugger integration and persistent breakpoints).
-
-Scott David Daniels, Tal Einat, Hernan Foffani, Christos Georgiou,
-Jim Jewett, Martin v. Löwis, Jason Orendorff, Josh Robb, Nigel Rowe,
-Bruce Sherwood, and Jeff Shute have submitted useful patches. Thanks, guys!
-
-For additional details refer to NEWS.txt and Changelog.
-
-Please contact the IDLE maintainer (kbk@shore.net) to have yourself included
-here if you are one of those we missed!
-
-
-
diff --git a/sys/lib/python/idlelib/CallTipWindow.py b/sys/lib/python/idlelib/CallTipWindow.py
deleted file mode 100644
index 22238855c..000000000
--- a/sys/lib/python/idlelib/CallTipWindow.py
+++ /dev/null
@@ -1,171 +0,0 @@
-"""A CallTip window class for Tkinter/IDLE.
-
-After ToolTip.py, which uses ideas gleaned from PySol
-Used by the CallTips IDLE extension.
-
-"""
-from Tkinter import *
-
-HIDE_VIRTUAL_EVENT_NAME = "<<calltipwindow-hide>>"
-HIDE_SEQUENCES = ("<Key-Escape>", "<FocusOut>")
-CHECKHIDE_VIRTUAL_EVENT_NAME = "<<calltipwindow-checkhide>>"
-CHECKHIDE_SEQUENCES = ("<KeyRelease>", "<ButtonRelease>")
-CHECKHIDE_TIME = 100 # miliseconds
-
-MARK_RIGHT = "calltipwindowregion_right"
-
-class CallTip:
-
- def __init__(self, widget):
- self.widget = widget
- self.tipwindow = self.label = None
- self.parenline = self.parencol = None
- self.lastline = None
- self.hideid = self.checkhideid = None
-
- def position_window(self):
- """Check if needs to reposition the window, and if so - do it."""
- curline = int(self.widget.index("insert").split('.')[0])
- if curline == self.lastline:
- return
- self.lastline = curline
- self.widget.see("insert")
- if curline == self.parenline:
- box = self.widget.bbox("%d.%d" % (self.parenline,
- self.parencol))
- else:
- box = self.widget.bbox("%d.0" % curline)
- if not box:
- box = list(self.widget.bbox("insert"))
- # align to left of window
- box[0] = 0
- box[2] = 0
- x = box[0] + self.widget.winfo_rootx() + 2
- y = box[1] + box[3] + self.widget.winfo_rooty()
- self.tipwindow.wm_geometry("+%d+%d" % (x, y))
-
- def showtip(self, text, parenleft, parenright):
- """Show the calltip, bind events which will close it and reposition it.
- """
- # truncate overly long calltip
- if len(text) >= 79:
- textlines = text.splitlines()
- for i, line in enumerate(textlines):
- if len(line) > 79:
- textlines[i] = line[:75] + ' ...'
- text = '\n'.join(textlines)
- self.text = text
- if self.tipwindow or not self.text:
- return
-
- self.widget.mark_set(MARK_RIGHT, parenright)
- self.parenline, self.parencol = map(
- int, self.widget.index(parenleft).split("."))
-
- self.tipwindow = tw = Toplevel(self.widget)
- self.position_window()
- # remove border on calltip window
- tw.wm_overrideredirect(1)
- try:
- # This command is only needed and available on Tk >= 8.4.0 for OSX
- # Without it, call tips intrude on the typing process by grabbing
- # the focus.
- tw.tk.call("::tk::unsupported::MacWindowStyle", "style", tw._w,
- "help", "noActivates")
- except TclError:
- pass
- self.label = Label(tw, text=self.text, justify=LEFT,
- background="#ffffe0", relief=SOLID, borderwidth=1,
- font = self.widget['font'])
- self.label.pack()
-
- self.checkhideid = self.widget.bind(CHECKHIDE_VIRTUAL_EVENT_NAME,
- self.checkhide_event)
- for seq in CHECKHIDE_SEQUENCES:
- self.widget.event_add(CHECKHIDE_VIRTUAL_EVENT_NAME, seq)
- self.widget.after(CHECKHIDE_TIME, self.checkhide_event)
- self.hideid = self.widget.bind(HIDE_VIRTUAL_EVENT_NAME,
- self.hide_event)
- for seq in HIDE_SEQUENCES:
- self.widget.event_add(HIDE_VIRTUAL_EVENT_NAME, seq)
-
- def checkhide_event(self, event=None):
- if not self.tipwindow:
- # If the event was triggered by the same event that unbinded
- # this function, the function will be called nevertheless,
- # so do nothing in this case.
- return
- curline, curcol = map(int, self.widget.index("insert").split('.'))
- if curline < self.parenline or \
- (curline == self.parenline and curcol <= self.parencol) or \
- self.widget.compare("insert", ">", MARK_RIGHT):
- self.hidetip()
- else:
- self.position_window()
- self.widget.after(CHECKHIDE_TIME, self.checkhide_event)
-
- def hide_event(self, event):
- if not self.tipwindow:
- # See the explanation in checkhide_event.
- return
- self.hidetip()
-
- def hidetip(self):
- if not self.tipwindow:
- return
-
- for seq in CHECKHIDE_SEQUENCES:
- self.widget.event_delete(CHECKHIDE_VIRTUAL_EVENT_NAME, seq)
- self.widget.unbind(CHECKHIDE_VIRTUAL_EVENT_NAME, self.checkhideid)
- self.checkhideid = None
- for seq in HIDE_SEQUENCES:
- self.widget.event_delete(HIDE_VIRTUAL_EVENT_NAME, seq)
- self.widget.unbind(HIDE_VIRTUAL_EVENT_NAME, self.hideid)
- self.hideid = None
-
- self.label.destroy()
- self.label = None
- self.tipwindow.destroy()
- self.tipwindow = None
-
- self.widget.mark_unset(MARK_RIGHT)
- self.parenline = self.parencol = self.lastline = None
-
- def is_active(self):
- return bool(self.tipwindow)
-
-
-
-###############################
-#
-# Test Code
-#
-class container: # Conceptually an editor_window
- def __init__(self):
- root = Tk()
- text = self.text = Text(root)
- text.pack(side=LEFT, fill=BOTH, expand=1)
- text.insert("insert", "string.split")
- root.update()
- self.calltip = CallTip(text)
-
- text.event_add("<<calltip-show>>", "(")
- text.event_add("<<calltip-hide>>", ")")
- text.bind("<<calltip-show>>", self.calltip_show)
- text.bind("<<calltip-hide>>", self.calltip_hide)
-
- text.focus_set()
- root.mainloop()
-
- def calltip_show(self, event):
- self.calltip.showtip("Hello world")
-
- def calltip_hide(self, event):
- self.calltip.hidetip()
-
-def main():
- # Test code
- c=container()
-
-if __name__=='__main__':
- main()
diff --git a/sys/lib/python/idlelib/CallTips.py b/sys/lib/python/idlelib/CallTips.py
deleted file mode 100644
index 997eb13a0..000000000
--- a/sys/lib/python/idlelib/CallTips.py
+++ /dev/null
@@ -1,212 +0,0 @@
-"""CallTips.py - An IDLE Extension to Jog Your Memory
-
-Call Tips are floating windows which display function, class, and method
-parameter and docstring information when you type an opening parenthesis, and
-which disappear when you type a closing parenthesis.
-"""
-import sys
-import types
-
-import CallTipWindow
-from HyperParser import HyperParser
-
-import __main__
-
-class CallTips:
-
- menudefs = [
- ('edit', [
- ("Show call tip", "<<force-open-calltip>>"),
- ])
- ]
-
- def __init__(self, editwin=None):
- if editwin is None: # subprocess and test
- self.editwin = None
- return
- self.editwin = editwin
- self.text = editwin.text
- self.calltip = None
- self._make_calltip_window = self._make_tk_calltip_window
-
- def close(self):
- self._make_calltip_window = None
-
- def _make_tk_calltip_window(self):
- # See __init__ for usage
- return CallTipWindow.CallTip(self.text)
-
- def _remove_calltip_window(self, event=None):
- if self.calltip:
- self.calltip.hidetip()
- self.calltip = None
-
- def force_open_calltip_event(self, event):
- """Happens when the user really wants to open a CallTip, even if a
- function call is needed.
- """
- self.open_calltip(True)
-
- def try_open_calltip_event(self, event):
- """Happens when it would be nice to open a CallTip, but not really
- neccesary, for example after an opening bracket, so function calls
- won't be made.
- """
- self.open_calltip(False)
-
- def refresh_calltip_event(self, event):
- """If there is already a calltip window, check if it is still needed,
- and if so, reload it.
- """
- if self.calltip and self.calltip.is_active():
- self.open_calltip(False)
-
- def open_calltip(self, evalfuncs):
- self._remove_calltip_window()
-
- hp = HyperParser(self.editwin, "insert")
- sur_paren = hp.get_surrounding_brackets('(')
- if not sur_paren:
- return
- hp.set_index(sur_paren[0])
- name = hp.get_expression()
- if not name or (not evalfuncs and name.find('(') != -1):
- return
- arg_text = self.fetch_tip(name)
- if not arg_text:
- return
- self.calltip = self._make_calltip_window()
- self.calltip.showtip(arg_text, sur_paren[0], sur_paren[1])
-
- def fetch_tip(self, name):
- """Return the argument list and docstring of a function or class
-
- If there is a Python subprocess, get the calltip there. Otherwise,
- either fetch_tip() is running in the subprocess itself or it was called
- in an IDLE EditorWindow before any script had been run.
-
- The subprocess environment is that of the most recently run script. If
- two unrelated modules are being edited some calltips in the current
- module may be inoperative if the module was not the last to run.
-
- """
- try:
- rpcclt = self.editwin.flist.pyshell.interp.rpcclt
- except:
- rpcclt = None
- if rpcclt:
- return rpcclt.remotecall("exec", "get_the_calltip",
- (name,), {})
- else:
- entity = self.get_entity(name)
- return get_arg_text(entity)
-
- def get_entity(self, name):
- "Lookup name in a namespace spanning sys.modules and __main.dict__"
- if name:
- namespace = sys.modules.copy()
- namespace.update(__main__.__dict__)
- try:
- return eval(name, namespace)
- except:
- return None
-
-def _find_constructor(class_ob):
- # Given a class object, return a function object used for the
- # constructor (ie, __init__() ) or None if we can't find one.
- try:
- return class_ob.__init__.im_func
- except AttributeError:
- for base in class_ob.__bases__:
- rc = _find_constructor(base)
- if rc is not None: return rc
- return None
-
-def get_arg_text(ob):
- """Get a string describing the arguments for the given object"""
- argText = ""
- if ob is not None:
- argOffset = 0
- if type(ob) in (types.ClassType, types.TypeType):
- # Look for the highest __init__ in the class chain.
- fob = _find_constructor(ob)
- if fob is None:
- fob = lambda: None
- else:
- argOffset = 1
- elif type(ob)==types.MethodType:
- # bit of a hack for methods - turn it into a function
- # but we drop the "self" param.
- fob = ob.im_func
- argOffset = 1
- else:
- fob = ob
- # Try and build one for Python defined functions
- if type(fob) in [types.FunctionType, types.LambdaType]:
- try:
- realArgs = fob.func_code.co_varnames[argOffset:fob.func_code.co_argcount]
- defaults = fob.func_defaults or []
- defaults = list(map(lambda name: "=%s" % repr(name), defaults))
- defaults = [""] * (len(realArgs)-len(defaults)) + defaults
- items = map(lambda arg, dflt: arg+dflt, realArgs, defaults)
- if fob.func_code.co_flags & 0x4:
- items.append("...")
- if fob.func_code.co_flags & 0x8:
- items.append("***")
- argText = ", ".join(items)
- argText = "(%s)" % argText
- except:
- pass
- # See if we can use the docstring
- doc = getattr(ob, "__doc__", "")
- if doc:
- doc = doc.lstrip()
- pos = doc.find("\n")
- if pos < 0 or pos > 70:
- pos = 70
- if argText:
- argText += "\n"
- argText += doc[:pos]
- return argText
-
-#################################################
-#
-# Test code
-#
-if __name__=='__main__':
-
- def t1(): "()"
- def t2(a, b=None): "(a, b=None)"
- def t3(a, *args): "(a, ...)"
- def t4(*args): "(...)"
- def t5(a, *args): "(a, ...)"
- def t6(a, b=None, *args, **kw): "(a, b=None, ..., ***)"
-
- class TC:
- "(a=None, ...)"
- def __init__(self, a=None, *b): "(a=None, ...)"
- def t1(self): "()"
- def t2(self, a, b=None): "(a, b=None)"
- def t3(self, a, *args): "(a, ...)"
- def t4(self, *args): "(...)"
- def t5(self, a, *args): "(a, ...)"
- def t6(self, a, b=None, *args, **kw): "(a, b=None, ..., ***)"
-
- def test(tests):
- ct = CallTips()
- failed=[]
- for t in tests:
- expected = t.__doc__ + "\n" + t.__doc__
- name = t.__name__
- arg_text = ct.fetch_tip(name)
- if arg_text != expected:
- failed.append(t)
- print "%s - expected %s, but got %s" % (t, expected,
- get_arg_text(entity))
- print "%d of %d tests failed" % (len(failed), len(tests))
-
- tc = TC()
- tests = (t1, t2, t3, t4, t5, t6,
- TC, tc.t1, tc.t2, tc.t3, tc.t4, tc.t5, tc.t6)
-
- test(tests)
diff --git a/sys/lib/python/idlelib/ChangeLog b/sys/lib/python/idlelib/ChangeLog
deleted file mode 100644
index 985871bee..000000000
--- a/sys/lib/python/idlelib/ChangeLog
+++ /dev/null
@@ -1,1591 +0,0 @@
-Please refer to the IDLEfork and IDLE CVS repositories for
-change details subsequent to the 0.8.1 release.
-
-
-IDLEfork ChangeLog
-==================
-
-2001-07-20 11:35 elguavas
-
- * README.txt, NEWS.txt: bring up to date for 0.8.1 release
-
-2001-07-19 16:40 elguavas
-
- * IDLEFORK.html: replaced by IDLEFORK-index.html
-
-2001-07-19 16:39 elguavas
-
- * IDLEFORK-index.html: updated placeholder idlefork homepage
-
-2001-07-19 14:49 elguavas
-
- * ChangeLog, EditorWindow.py, INSTALLATION, NEWS.txt, README.txt,
- TODO.txt, idlever.py:
- minor tidy-ups ready for 0.8.1 alpha tarball release
-
-2001-07-17 15:12 kbk
-
- * INSTALLATION, setup.py: INSTALLATION: Remove the coexist.patch
- instructions
-
- **************** setup.py:
-
- Remove the idles script, add some words on IDLE Fork to the
- long_description, and clean up some line spacing.
-
-2001-07-17 15:01 kbk
-
- * coexist.patch: Put this in the attic, at least for now...
-
-2001-07-17 14:59 kbk
-
- * PyShell.py, idle, idles: Implement idle command interface as
- suggested by GvR [idle-dev] 16 July **************** PyShell: Added
- functionality:
-
- usage: idle.py [-c command] [-d] [-i] [-r script] [-s] [-t title]
- [arg] ...
-
- idle file(s) (without options) edit the file(s)
-
- -c cmd run the command in a shell -d enable the
- debugger -i open an interactive shell -i file(s) open a
- shell and also an editor window for each file -r script run a file
- as a script in a shell -s run $IDLESTARTUP or
- $PYTHONSTARTUP before anything else -t title set title of shell
- window
-
- Remaining arguments are applied to the command (-c) or script (-r).
-
- ****************** idles: Removed the idles script, not needed
-
- ****************** idle: Removed the IdleConf references, not
- required anymore
-
-2001-07-16 17:08 kbk
-
- * INSTALLATION, coexist.patch: Added installation instructions.
-
- Added a patch which modifies idlefork so that it can co-exist with
- "official" IDLE in the site-packages directory. This patch is not
- necessary if only idlefork IDLE is installed. See INSTALLATION for
- further details.
-
-2001-07-16 15:50 kbk
-
- * idles: Add a script "idles" which opens a Python Shell window.
-
- The default behaviour of idlefork idle is to open an editor window
- instead of a shell. Complex expressions may be run in a fresh
- environment by selecting "run". There are times, however, when a
- shell is desired. Though one can be started by "idle -t 'foo'",
- this script is more convenient. In addition, a shell and an editor
- window can be started in parallel by "idles -e foo.py".
-
-2001-07-16 15:25 kbk
-
- * PyShell.py: Call out IDLE Fork in startup message.
-
-2001-07-16 14:00 kbk
-
- * PyShell.py, setup.py: Add a script "idles" which opens a Python
- Shell window.
-
- The default behaviour of idlefork idle is to open an editor window
- instead of a shell. Complex expressions may be run in a fresh
- environment by selecting "run". There are times, however, when a
- shell is desired. Though one can be started by "idle -t 'foo'",
- this script is more convenient. In addition, a shell and an editor
- window can be started in parallel by "idles -e foo.py".
-
-2001-07-15 03:06 kbk
-
- * pyclbr.py, tabnanny.py: tabnanny and pyclbr are now found in /Lib
-
-2001-07-15 02:29 kbk
-
- * BrowserControl.py: Remove, was retained for 1.5.2 support
-
-2001-07-14 15:48 kbk
-
- * setup.py: Installing Idle to site-packages via Distutils does not
- copy the Idle help.txt file.
-
- Ref SF Python Patch 422471
-
-2001-07-14 15:26 kbk
-
- * keydefs.py: py-cvs-2001_07_13 (Rev 1.3) merge
-
- "Make copy, cut and paste events case insensitive. Reported by
- Patrick K. O'Brien on idle-dev. (Should other bindings follow
- suit?)" --GvR
-
-2001-07-14 15:21 kbk
-
- * idle.py: py-cvs-2001_07_13 (Rev 1.4) merge
-
- "Move the action of loading the configuration to the IdleConf
- module rather than the idle.py script. This has advantages and
- disadvantages; the biggest advantage being that we can more easily
- have an alternative main program." --GvR
-
-2001-07-14 15:18 kbk
-
- * extend.txt: py-cvs-2001_07_13 (Rev 1.4) merge
-
- "Quick update to the extension mechanism (extend.py is gone, long
- live config.txt)" --GvR
-
-2001-07-14 15:15 kbk
-
- * StackViewer.py: py-cvs-2001_07_13 (Rev 1.16) merge
-
- "Refactored, with some future plans in mind. This now uses the new
- gotofileline() method defined in FileList.py" --GvR
-
-2001-07-14 15:10 kbk
-
- * PyShell.py: py-cvs-2001_07_13 (Rev 1.34) merge
-
- "Amazing. A very subtle change in policy in descr-branch actually
- found a bug here. Here's the deal: Class PyShell derives from
- class OutputWindow. Method PyShell.close() wants to invoke its
- parent method, but because PyShell long ago was inherited from
- class PyShellEditorWindow, it invokes
- PyShelEditorWindow.close(self). Now, class PyShellEditorWindow
- itself derives from class OutputWindow, and inherits the close()
- method from there without overriding it. Under the old rules,
- PyShellEditorWindow.close would return an unbound method restricted
- to the class that defined the implementation of close(), which was
- OutputWindow.close. Under the new rules, the unbound method is
- restricted to the class whose method was requested, that is
- PyShellEditorWindow, and this was correctly trapped as an error."
- --GvR
-
-2001-07-14 14:59 kbk
-
- * PyParse.py: py-cvs-2001_07_13 (Rel 1.9) merge
-
- "Taught IDLE's autoident parser that "yield" is a keyword that
- begins a stmt. Along w/ the preceding change to keyword.py, making
- all this work w/ a future-stmt just looks harder and harder."
- --tim_one
-
- (From Rel 1.8: "Hack to make this still work with Python 1.5.2.
- ;-( " --fdrake)
-
-2001-07-14 14:51 kbk
-
- * IdleConf.py: py-cvs-2001_07_13 (Rel 1.7) merge
-
- "Move the action of loading the configuration to the IdleConf
- module rather than the idle.py script. This has advantages and
- disadvantages; the biggest advantage being that we can more easily
- have an alternative main program." --GvR
-
-2001-07-14 14:45 kbk
-
- * FileList.py: py-cvs-2000_07_13 (Rev 1.9) merge
-
- "Delete goodname() method, which is unused. Add gotofileline(), a
- convenience method which I intend to use in a variant. Rename
- test() to _test()." --GvR
-
- This was an interesting merge. The join completely missed removing
- goodname(), which was adjacent, but outside of, a small conflict.
- I only caught it by comparing the 1.1.3.2/1.1.3.3 diff. CVS ain't
- infallible.
-
-2001-07-14 13:58 kbk
-
- * EditorWindow.py: py-cvs-2000_07_13 (Rev 1.38) merge "Remove
- legacy support for the BrowserControl module; the webbrowser module
- has been included since Python 2.0, and that is the preferred
- interface." --fdrake
-
-2001-07-14 13:32 kbk
-
- * EditorWindow.py, FileList.py, IdleConf.py, PyParse.py,
- PyShell.py, StackViewer.py, extend.txt, idle.py, keydefs.py: Import
- the 2001 July 13 23:59 GMT version of Python CVS IDLE on the
- existing 1.1.3 vendor branch named py-cvs-vendor-branch. Release
- tag is py-cvs-2001_07_13.
-
-2001-07-14 12:02 kbk
-
- * Icons/python.gif: py-cvs-rel2_1 (Rev 1.2) merge Copied py-cvs rev
- 1.2 changed file to idlefork MAIN
-
-2001-07-14 11:58 kbk
-
- * Icons/minusnode.gif: py-cvs-rel2_1 (Rev 1.2) merge Copied py-cvs
- 1.2 changed file to idlefork MAIN
-
-2001-07-14 11:23 kbk
-
- * ScrolledList.py: py-cvs-rel2_1 (rev 1.5) merge - whitespace
- normalization
-
-2001-07-14 11:20 kbk
-
- * Separator.py: py-cvs-rel2_1 (Rev 1.3) merge - whitespace
- normalization
-
-2001-07-14 11:16 kbk
-
- * StackViewer.py: py-cvs-rel2_1 (Rev 1.15) merge - whitespace
- normalization
-
-2001-07-14 11:14 kbk
-
- * ToolTip.py: py-cvs-rel2_1 (Rev 1.2) merge - whitespace
- normalization
-
-2001-07-14 10:13 kbk
-
- * PyShell.py: cvs-py-rel2_1 (Rev 1.29 - 1.33) merge
-
- Merged the following py-cvs revs without conflict: 1.29 Reduce
- copyright text output at startup 1.30 Delay setting sys.args until
- Tkinter is fully initialized 1.31 Whitespace normalization 1.32
- Turn syntax warning into error when interactive 1.33 Fix warning
- initialization bug
-
- Note that module is extensively modified wrt py-cvs
-
-2001-07-14 06:33 kbk
-
- * PyParse.py: py-cvs-rel2_1 (Rev 1.6 - 1.8) merge Fix autoindent
- bug and deflect Unicode from text.get()
-
-2001-07-14 06:00 kbk
-
- * Percolator.py: py-cvs-rel2_1 (Rev 1.3) "move "from Tkinter import
- *" to module level" --jhylton
-
-2001-07-14 05:57 kbk
-
- * PathBrowser.py: py-cvs-rel2_1 (Rev 1.6) merge - whitespace
- normalization
-
-2001-07-14 05:49 kbk
-
- * ParenMatch.py: cvs-py-rel2_1 (Rev 1.5) merge - whitespace
- normalization
-
-2001-07-14 03:57 kbk
-
- * ObjectBrowser.py: py-cvs-rel2_1 (Rev 1.3) merge "Make the test
- program work outside IDLE." -- GvR
-
-2001-07-14 03:52 kbk
-
- * MultiStatusBar.py: py-cvs-rel2_1 (Rev 1.2) merge - whitespace
- normalization
-
-2001-07-14 03:44 kbk
-
- * MultiScrolledLists.py: py-cvs-rel2_1 (Rev 1.2) merge - whitespace
- normalization
-
-2001-07-14 03:40 kbk
-
- * IdleHistory.py: py-cvs-rel2_1 (Rev 1.4) merge - whitespace
- normalization
-
-2001-07-14 03:38 kbk
-
- * IdleConf.py: py-cvs-rel2_1 (Rev 1.6) merge - whitespace
- normalization
-
-2001-07-13 14:18 kbk
-
- * IOBinding.py: py-cvs-rel2_1 (Rev 1.4) merge - move "import *" to
- module level
-
-2001-07-13 14:12 kbk
-
- * FormatParagraph.py: py-cvs-rel2_1 (Rev 1.9) merge - whitespace
- normalization
-
-2001-07-13 14:07 kbk
-
- * FileList.py: py-cvs-rel2_1 (Rev 1.8) merge - whitespace
- normalization
-
-2001-07-13 13:35 kbk
-
- * EditorWindow.py: py-cvs-rel2_1 (Rev 1.33 - 1.37) merge
-
- VP IDLE version depended on VP's ExecBinding.py and spawn.py to get
- the path to the Windows Doc directory (relative to python.exe).
- Removed this conflicting code in favor of py-cvs updates which on
- Windows use a hard coded path relative to the location of this
- module. py-cvs updates include support for webbrowser.py. Module
- still has BrowserControl.py for 1.5.2 support.
-
- At this point, the differences wrt py-cvs relate to menu
- functionality.
-
-2001-07-13 11:30 kbk
-
- * ConfigParser.py: py-cvs-rel2_1 merge - Remove, lives in /Lib
-
-2001-07-13 10:10 kbk
-
- * Delegator.py: py-cvs-rel2_1 (Rev 1.3) merge - whitespace
- normalization
-
-2001-07-13 10:07 kbk
-
- * Debugger.py: py-cvs-rel2_1 (Rev 1.15) merge - whitespace
- normalization
-
-2001-07-13 10:04 kbk
-
- * ColorDelegator.py: py-cvs-rel2_1 (Rev 1.11 and 1.12) merge
- Colorize "as" after "import" / use DEBUG instead of __debug__
-
-2001-07-13 09:54 kbk
-
- * ClassBrowser.py: py-cvs-rel2_1 (Rev 1.12) merge - whitespace
- normalization
-
-2001-07-13 09:41 kbk
-
- * BrowserControl.py: py-cvs-rel2_1 (Rev 1.1) merge - New File -
- Force HEAD to trunk with -f Note: browser.py was renamed
- BrowserControl.py 10 May 2000. It provides a collection of classes
- and convenience functions to control external browsers "for 1.5.2
- support". It was removed from py-cvs 18 April 2001.
-
-2001-07-13 09:10 kbk
-
- * CallTips.py: py-cvs-rel2_1 (Rev 1.8) merge - whitespace
- normalization
-
-2001-07-13 08:26 kbk
-
- * CallTipWindow.py: py-cvs-rel2_1 (Rev 1.3) merge - whitespace
- normalization
-
-2001-07-13 08:13 kbk
-
- * AutoExpand.py: py-cvs-rel1_2 (Rev 1.4) merge, "Add Alt-slash to
- Unix keydefs (I somehow need it on RH 6.2). Get rid of assignment
- to unused self.text.wordlist." --GvR
-
-2001-07-12 16:54 elguavas
-
- * ReplaceDialog.py: py-cvs merge, python 1.5.2 compatibility
-
-2001-07-12 16:46 elguavas
-
- * ScriptBinding.py: py-cvs merge, better error dialog
-
-2001-07-12 16:38 elguavas
-
- * TODO.txt: py-cvs merge, additions
-
-2001-07-12 15:35 elguavas
-
- * WindowList.py: py-cvs merge, correct indentation
-
-2001-07-12 15:24 elguavas
-
- * config.txt: py-cvs merge, correct typo
-
-2001-07-12 15:21 elguavas
-
- * help.txt: py-cvs merge, update colour changing info
-
-2001-07-12 14:51 elguavas
-
- * idle.py: py-cvs merge, idle_dir loading changed
-
-2001-07-12 14:44 elguavas
-
- * idlever.py: py-cvs merge, version update
-
-2001-07-11 12:53 kbk
-
- * BrowserControl.py: Initial revision
-
-2001-07-11 12:53 kbk
-
- * AutoExpand.py, BrowserControl.py, CallTipWindow.py, CallTips.py,
- ClassBrowser.py, ColorDelegator.py, Debugger.py, Delegator.py,
- EditorWindow.py, FileList.py, FormatParagraph.py, IOBinding.py,
- IdleConf.py, IdleHistory.py, MultiScrolledLists.py,
- MultiStatusBar.py, ObjectBrowser.py, OutputWindow.py,
- ParenMatch.py, PathBrowser.py, Percolator.py, PyParse.py,
- PyShell.py, RemoteInterp.py, ReplaceDialog.py, ScriptBinding.py,
- ScrolledList.py, Separator.py, StackViewer.py, TODO.txt,
- ToolTip.py, WindowList.py, config.txt, help.txt, idle, idle.bat,
- idle.py, idlever.py, setup.py, Icons/minusnode.gif,
- Icons/python.gif: Import the release 2.1 version of Python CVS IDLE
- on the existing 1.1.3 vendor branch named py-cvs-vendor-branch,
- with release tag py-cvs-rel2_1.
-
-2001-07-11 12:34 kbk
-
- * AutoExpand.py, AutoIndent.py, Bindings.py, CallTipWindow.py,
- CallTips.py, ChangeLog, ClassBrowser.py, ColorDelegator.py,
- Debugger.py, Delegator.py, EditorWindow.py, FileList.py,
- FormatParagraph.py, FrameViewer.py, GrepDialog.py, IOBinding.py,
- IdleConf.py, IdleHistory.py, MultiScrolledLists.py,
- MultiStatusBar.py, NEWS.txt, ObjectBrowser.py, OldStackViewer.py,
- OutputWindow.py, ParenMatch.py, PathBrowser.py, Percolator.py,
- PyParse.py, PyShell.py, README.txt, RemoteInterp.py,
- ReplaceDialog.py, ScriptBinding.py, ScrolledList.py,
- SearchBinding.py, SearchDialog.py, SearchDialogBase.py,
- SearchEngine.py, Separator.py, StackViewer.py, TODO.txt,
- ToolTip.py, TreeWidget.py, UndoDelegator.py, WidgetRedirector.py,
- WindowList.py, ZoomHeight.py, __init__.py, config-unix.txt,
- config-win.txt, config.txt, eventparse.py, extend.txt, help.txt,
- idle.bat, idle.py, idle.pyw, idlever.py, keydefs.py, pyclbr.py,
- tabnanny.py, testcode.py, Icons/folder.gif, Icons/minusnode.gif,
- Icons/openfolder.gif, Icons/plusnode.gif, Icons/python.gif,
- Icons/tk.gif: Import the 9 March 2000 version of Python CVS IDLE as
- 1.1.3 vendor branch named py-cvs-vendor-branch.
-
-2001-07-04 13:43 kbk
-
- * Icons/: folder.gif, minusnode.gif, openfolder.gif, plusnode.gif,
- python.gif, tk.gif: Null commit with -f option to force an uprev
- and put HEADs firmly on the trunk.
-
-2001-07-04 13:15 kbk
-
- * AutoExpand.py, AutoIndent.py, Bindings.py, CallTipWindow.py,
- CallTips.py, ChangeLog, ClassBrowser.py, ColorDelegator.py,
- ConfigParser.py, Debugger.py, Delegator.py, EditorWindow.py,
- ExecBinding.py, FileList.py, FormatParagraph.py, FrameViewer.py,
- GrepDialog.py, IDLEFORK.html, IOBinding.py, IdleConf.py,
- IdleHistory.py, MultiScrolledLists.py, MultiStatusBar.py, NEWS.txt,
- ObjectBrowser.py, OldStackViewer.py, OutputWindow.py,
- ParenMatch.py, PathBrowser.py, Percolator.py, PyParse.py,
- PyShell.py, README.txt, Remote.py, RemoteInterp.py,
- ReplaceDialog.py, ScriptBinding.py, ScrolledList.py,
- SearchBinding.py, SearchDialog.py, SearchDialogBase.py,
- SearchEngine.py, Separator.py, StackViewer.py, TODO.txt,
- ToolTip.py, TreeWidget.py, UndoDelegator.py, WidgetRedirector.py,
- WindowList.py, ZoomHeight.py, __init__.py, config-unix.txt,
- config-win.txt, config.txt, eventparse.py, extend.txt, help.txt,
- idle, idle.bat, idle.py, idle.pyw, idlever.py, keydefs.py,
- loader.py, protocol.py, pyclbr.py, setup.py, spawn.py, tabnanny.py,
- testcode.py: Null commit with -f option to force an uprev and put
- HEADs firmly on the trunk.
-
-2001-06-27 10:24 elguavas
-
- * IDLEFORK.html: updated contact details
-
-2001-06-25 17:23 elguavas
-
- * idle, RemoteInterp.py, setup.py: Initial revision
-
-2001-06-25 17:23 elguavas
-
- * idle, RemoteInterp.py, setup.py: import current python cvs idle
- as a vendor branch
-
-2001-06-24 15:10 elguavas
-
- * IDLEFORK.html: tiny change to test new syncmail setup
-
-2001-06-24 14:41 elguavas
-
- * IDLEFORK.html: change to new developer contact, also a test
- commit for new syncmail setup
-
-2001-06-23 18:15 elguavas
-
- * IDLEFORK.html: tiny test update for revitalised idle-fork
-
-2000-09-24 17:29 nriley
-
- * protocol.py: Fixes for Python 1.6 compatibility - socket bind and
- connect get a tuple instead two arguments.
-
-2000-09-24 17:28 nriley
-
- * spawn.py: Change for Python 1.6 compatibility - UNIX's 'os'
- module defines 'spawnv' now, so we check for 'fork' first.
-
-2000-08-15 22:51 nowonder
-
- * IDLEFORK.html:
- corrected email address
-
-2000-08-15 22:47 nowonder
-
- * IDLEFORK.html:
- added .html file for http://idlefork.sourceforge.net
-
-2000-08-15 11:13 dscherer
-
- * AutoExpand.py, AutoIndent.py, Bindings.py, CallTipWindow.py,
- CallTips.py, __init__.py, ChangeLog, ClassBrowser.py,
- ColorDelegator.py, ConfigParser.py, Debugger.py, Delegator.py,
- FileList.py, FormatParagraph.py, FrameViewer.py, GrepDialog.py,
- IOBinding.py, IdleConf.py, IdleHistory.py, MultiScrolledLists.py,
- MultiStatusBar.py, NEWS.txt, ObjectBrowser.py, OldStackViewer.py,
- OutputWindow.py, ParenMatch.py, PathBrowser.py, Percolator.py,
- PyParse.py, PyShell.py, README.txt, ReplaceDialog.py,
- ScriptBinding.py, ScrolledList.py, SearchBinding.py,
- SearchDialog.py, SearchDialogBase.py, SearchEngine.py,
- Separator.py, StackViewer.py, TODO.txt, ToolTip.py, TreeWidget.py,
- UndoDelegator.py, WidgetRedirector.py, WindowList.py, help.txt,
- ZoomHeight.py, config-unix.txt, config-win.txt, config.txt,
- eventparse.py, extend.txt, idle.bat, idle.py, idle.pyw, idlever.py,
- keydefs.py, loader.py, pyclbr.py, tabnanny.py, testcode.py,
- EditorWindow.py, ExecBinding.py, Remote.py, protocol.py, spawn.py,
- Icons/folder.gif, Icons/minusnode.gif, Icons/openfolder.gif,
- Icons/plusnode.gif, Icons/python.gif, Icons/tk.gif: Initial
- revision
-
-2000-08-15 11:13 dscherer
-
- * AutoExpand.py, AutoIndent.py, Bindings.py, CallTipWindow.py,
- CallTips.py, __init__.py, ChangeLog, ClassBrowser.py,
- ColorDelegator.py, ConfigParser.py, Debugger.py, Delegator.py,
- FileList.py, FormatParagraph.py, FrameViewer.py, GrepDialog.py,
- IOBinding.py, IdleConf.py, IdleHistory.py, MultiScrolledLists.py,
- MultiStatusBar.py, NEWS.txt, ObjectBrowser.py, OldStackViewer.py,
- OutputWindow.py, ParenMatch.py, PathBrowser.py, Percolator.py,
- PyParse.py, PyShell.py, README.txt, ReplaceDialog.py,
- ScriptBinding.py, ScrolledList.py, SearchBinding.py,
- SearchDialog.py, SearchDialogBase.py, SearchEngine.py,
- Separator.py, StackViewer.py, TODO.txt, ToolTip.py, TreeWidget.py,
- UndoDelegator.py, WidgetRedirector.py, WindowList.py, help.txt,
- ZoomHeight.py, config-unix.txt, config-win.txt, config.txt,
- eventparse.py, extend.txt, idle.bat, idle.py, idle.pyw, idlever.py,
- keydefs.py, loader.py, pyclbr.py, tabnanny.py, testcode.py,
- EditorWindow.py, ExecBinding.py, Remote.py, protocol.py, spawn.py,
- Icons/folder.gif, Icons/minusnode.gif, Icons/openfolder.gif,
- Icons/plusnode.gif, Icons/python.gif, Icons/tk.gif: Modified IDLE
- from VPython 0.2
-
-
-original IDLE ChangeLog:
-========================
-
-Tue Feb 15 18:08:19 2000 Guido van Rossum <guido@cnri.reston.va.us>
-
- * NEWS.txt: Notice status bar and stack viewer.
-
- * EditorWindow.py: Support for Moshe's status bar.
-
- * MultiStatusBar.py: Status bar code -- by Moshe Zadka.
-
- * OldStackViewer.py:
- Adding the old stack viewer implementation back, for the debugger.
-
- * StackViewer.py: New stack viewer, uses a tree widget.
- (XXX: the debugger doesn't yet use this.)
-
- * WindowList.py:
- Correct a typo and remove an unqualified except that was hiding the error.
-
- * ClassBrowser.py: Add an XXX comment about the ClassBrowser AIP.
-
- * ChangeLog: Updated change log.
-
- * NEWS.txt: News update. Probably incomplete; what else is new?
-
- * README.txt:
- Updated for pending IDLE 0.5 release (still very rough -- just getting
- it out in a more convenient format than CVS).
-
- * TODO.txt: Tiny addition.
-
-Thu Sep 9 14:16:02 1999 Guido van Rossum <guido@cnri.reston.va.us>
-
- * TODO.txt: A few new TODO entries.
-
-Thu Aug 26 23:06:22 1999 Guido van Rossum <guido@cnri.reston.va.us>
-
- * Bindings.py: Add Python Documentation entry to Help menu.
-
- * EditorWindow.py:
- Find the help.txt file relative to __file__ or ".", not in sys.path.
- (Suggested by Moshe Zadka, but implemented differently.)
-
- Add <<python-docs>> event which, on Unix, brings up Netscape pointing
- to http://www.python.doc/current/ (a local copy would be nice but its
- location can't be predicted). Windows solution TBD.
-
-Wed Aug 11 14:55:43 1999 Guido van Rossum <guido@cnri.reston.va.us>
-
- * TreeWidget.py:
- Moshe noticed an inconsistency in his comment, so I'm rephrasing it to
- be clearer.
-
- * TreeWidget.py:
- Patch inspired by Moshe Zadka to search for the Icons directory in the
- same directory as __file__, rather than searching for it along sys.path.
- This works better when idle is a package.
-
-Thu Jul 15 13:11:02 1999 Guido van Rossum <guido@cnri.reston.va.us>
-
- * TODO.txt: New wishes.
-
-Sat Jul 10 13:17:35 1999 Guido van Rossum <guido@cnri.reston.va.us>
-
- * IdlePrefs.py:
- Make the color for stderr red (i.e. the standard warning/danger/stop
- color) rather than green. Suggested by Sam Schulenburg.
-
-Fri Jun 25 17:26:34 1999 Guido van Rossum <guido@cnri.reston.va.us>
-
- * PyShell.py: Close debugger when closing. This may break a cycle.
-
- * Debugger.py: Break cycle on close.
-
- * ClassBrowser.py: Destroy the tree when closing.
-
- * TreeWidget.py: Add destroy() method to recursively destroy a tree.
-
- * PyShell.py: Extend _close() to break cycles.
- Break some other cycles too (and destroy the root when done).
-
- * EditorWindow.py:
- Add _close() method that does the actual cleanup (close() asks the
- user what they want first if there's unsaved stuff, and may cancel).
- It closes more than before.
-
- Add unload_extensions() method to unload all extensions; called from
- _close(). It calls an extension's close() method if it has one.
-
- * Percolator.py: Add close() method that breaks cycles.
-
- * WidgetRedirector.py: Add unregister() method.
- Unregister everything at closing.
- Don't call close() in __del__, rely on explicit call to close().
-
- * IOBinding.py, FormatParagraph.py, CallTips.py:
- Add close() method that breaks a cycle.
-
-Fri Jun 11 15:03:00 1999 Guido van Rossum <guido@cnri.reston.va.us>
-
- * AutoIndent.py, EditorWindow.py, FormatParagraph.py:
- Tim Peters smart.patch:
-
- EditorWindow.py:
-
- + Added get_tabwidth & set_tabwidth "virtual text" methods, that get/set the
- widget's view of what a tab means.
-
- + Moved TK_TABWIDTH_DEFAULT here from AutoIndent.
-
- + Renamed Mark's get_selection_index to get_selection_indices (sorry, Mark,
- but the name was plain wrong <wink>).
-
- FormatParagraph.py: renamed use of get_selection_index.
-
- AutoIndent.py:
-
- + Moved TK_TABWIDTH_DEFAULT to EditorWindow.
-
- + Rewrote set_indentation_params to use new VTW get/set_tabwidth methods.
-
- + Changed smart_backspace_event to delete whitespace back to closest
- preceding virtual tab stop or real character (note that this may require
- inserting characters if backspacing over a tab!).
-
- + Nuked almost references to the selection tag, in favor of using
- get_selection_indices. The sole exception is in set_region, for which no
- "set_selection" abstraction has yet been agreed upon.
-
- + Had too much fun using the spiffy new features of the format-paragraph
- cmd.
-
-Thu Jun 10 17:48:02 1999 Guido van Rossum <guido@cnri.reston.va.us>
-
- * FormatParagraph.py:
- Code by Mark Hammond to format paragraphs embedded in comments.
- Read the comments (which I reformatted using the new feature :-)
- for some limitations.
-
- * EditorWindow.py:
- Added abstraction get_selection_index() (Mark Hammond). Also
- reformatted some comment blocks to show off a cool feature I'm about
- to check in next.
-
- * ClassBrowser.py:
- Adapt to the new pyclbr's support of listing top-level functions. If
- this functionality is not present (e.g. when used with a vintage
- Python 1.5.2 installation) top-level functions are not listed.
-
- (Hmm... Any distribution of IDLE 0.5 should probably include a copy
- of the new pyclbr.py!)
-
- * AutoIndent.py:
- Fix off-by-one error in Tim's recent change to comment_region(): the
- list of lines returned by get_region() contains an empty line at the
- end representing the start of the next line, and this shouldn't be
- commented out!
-
- * CallTips.py:
- Mark Hammond writes: Here is another change that allows it to work for
- class creation - tries to locate an __init__ function. Also updated
- the test code to reflect your new "***" change.
-
- * CallTipWindow.py:
- Mark Hammond writes: Tim's suggestion of copying the font for the
- CallTipWindow from the text control makes sense, and actually makes
- the control look better IMO.
-
-Wed Jun 9 20:34:57 1999 Guido van Rossum <guido@cnri.reston.va.us>
-
- * CallTips.py:
- Append "..." if the appropriate flag (for varargs) in co_flags is set.
- Ditto "***" for kwargs.
-
-Tue Jun 8 13:06:07 1999 Guido van Rossum <guido@cnri.reston.va.us>
-
- * ReplaceDialog.py:
- Hmm... Tim didn't turn "replace all" into a single undo block.
- I think I like it better if it os, so here.
-
- * ReplaceDialog.py: Tim Peters: made replacement atomic for undo/redo.
-
- * AutoIndent.py: Tim Peters:
-
- + Set usetabs=1. Editing pyclbr.py was driving me nuts <0.6 wink>.
- usetabs=1 is the Emacs pymode default too, and thanks to indentwidth !=
- tabwidth magical usetabs disabling, new files are still created with tabs
- turned off. The only implication is that if you open a file whose first
- indent is a single tab, IDLE will now magically use tabs for that file (and
- set indentwidth to 8). Note that the whole scheme doesn't work right for
- PythonWin, though, since Windows users typically set tabwidth to 4; Mark
- probably has to hide the IDLE algorithm from them (which he already knows).
-
- + Changed comment_region_event to stick "##" in front of every line. The
- "holes" previously left on blank lines were visually confusing (made it
- needlessly hard to figure out what to uncomment later).
-
-Mon Jun 7 15:38:40 1999 Guido van Rossum <guido@cnri.reston.va.us>
-
- * TreeWidget.py, ObjectBrowser.py:
- Remove unnecessary reference to pyclbr from test() code.
-
- * PyParse.py: Tim Peters:
-
- Smarter logic for finding a parse synch point.
-
- Does a half to a fifth the work in normal cases; don't notice the speedup,
- but makes more breathing room for other extensions.
-
- Speeds terrible cases by at least a factor of 10. "Terrible" == e.g. you put
- """ at the start of Tkinter.py, undo it, zoom to the bottom, and start
- typing in code. Used to take about 8 seconds for ENTER to respond, now some
- large fraction of a second. The new code gets indented correctly, despite
- that it all remains "string colored" until the colorizer catches up (after
- which, ENTER appears instantaneous again).
-
-Fri Jun 4 19:21:19 1999 Guido van Rossum <guido@cnri.reston.va.us>
-
- * extend.py: Might as well enable CallTips by default.
- If there are too many complaints I'll remove it again or fix it.
-
-Thu Jun 3 14:32:16 1999 Guido van Rossum <guido@cnri.reston.va.us>
-
- * AutoIndent.py, EditorWindow.py, PyParse.py:
- New offerings by Tim Peters; he writes:
-
- IDLE is now the first Python editor in the Universe not confused by my
- doctest.py <wink>.
-
- As threatened, this defines IDLE's is_char_in_string function as a
- method of EditorWindow. You just need to define one similarly in
- whatever it is you pass as editwin to AutoIndent; looking at the
- EditorWindow.py part of the patch should make this clear.
-
- * GrepDialog.py: Enclose pattern in quotes in status message.
-
- * CallTips.py:
- Mark Hammond fixed some comments and improved the way the tip text is
- constructed.
-
-Wed Jun 2 18:18:57 1999 Guido van Rossum <guido@cnri.reston.va.us>
-
- * CallTips.py:
- My fix to Mark's code: restore the universal check on <KeyRelease>.
- Always cancel on <Key-Escape> or <ButtonPress>.
-
- * CallTips.py:
- A version that Mark Hammond posted to the newsgroup. Has some newer
- stuff for getting the tip. Had to fix the Key-( and Key-) events
- for Unix. Will have to re-apply my patch for catching KeyRelease and
- ButtonRelease events.
-
- * CallTipWindow.py, CallTips.py:
- Call tips by Mark Hammond (plus tiny fix by me.)
-
- * IdleHistory.py:
- Changes by Mark Hammond: (1) support optional output_sep argument to
- the constructor so he can eliminate the sys.ps2 that PythonWin leaves
- in the source; (2) remove duplicate history items.
-
- * AutoIndent.py:
- Changes by Mark Hammond to allow using IDLE extensions in PythonWin as
- well: make three dialog routines instance variables.
-
- * EditorWindow.py:
- Change by Mark Hammond to allow using IDLE extensions in PythonWin as
- well: make three dialog routines instance variables.
-
-Tue Jun 1 20:06:44 1999 Guido van Rossum <guido@cnri.reston.va.us>
-
- * AutoIndent.py: Hah! A fix of my own to Tim's code!
- Unix bindings for <<toggle-tabs>> and <<change-indentwidth>> were
- missing, and somehow that meant the events were never generated,
- even though they were in the menu. The new Unix bindings are now
- the same as the Windows bindings (M-t and M-u).
-
- * AutoIndent.py, PyParse.py, PyShell.py: Tim Peters again:
-
- The new version (attached) is fast enough all the time in every real module
- I have <whew!>. You can make it slow by, e.g., creating an open list with
- 5,000 90-character identifiers (+ trailing comma) each on its own line, then
- adding an item to the end -- but that still consumes less than a second on
- my P5-166. Response time in real code appears instantaneous.
-
- Fixed some bugs.
-
- New feature: when hitting ENTER and the cursor is beyond the line's leading
- indentation, whitespace is removed on both sides of the cursor; before
- whitespace was removed only on the left; e.g., assuming the cursor is
- between the comma and the space:
-
- def something(arg1, arg2):
- ^ cursor to the left of here, and hit ENTER
- arg2): # new line used to end up here
- arg2): # but now lines up the way you expect
-
- New hack: AutoIndent has grown a context_use_ps1 Boolean config option,
- defaulting to 0 (false) and set to 1 (only) by PyShell. Reason: handling
- the fancy stuff requires looking backward for a parsing synch point; ps1
- lines are the only sensible thing to look for in a shell window, but are a
- bad thing to look for in a file window (ps1 lines show up in my module
- docstrings often). PythonWin's shell should set this true too.
-
- Persistent problem: strings containing def/class can still screw things up
- completely. No improvement. Simplest workaround is on the user's head, and
- consists of inserting e.g.
-
- def _(): pass
-
- (or any other def/class) after the end of the multiline string that's
- screwing them up. This is especially irksome because IDLE's syntax coloring
- is *not* confused, so when this happens the colors don't match the
- indentation behavior they see.
-
- * AutoIndent.py: Tim Peters again:
-
- [Tim, after adding some bracket smarts to AutoIndent.py]
- > ...
- > What it can't possibly do without reparsing large gobs of text is
- > suggest a reasonable indent level after you've *closed* a bracket
- > left open on some previous line.
- > ...
-
- The attached can, and actually fast enough to use -- most of the time. The
- code is tricky beyond belief to achieve that, but it works so far; e.g.,
-
- return len(string.expandtabs(str[self.stmt_start :
- ^ indents to caret
- i],
- ^ indents to caret
- self.tabwidth)) + 1
- ^ indents to caret
-
- It's about as smart as pymode now, wrt both bracket and backslash
- continuation rules. It does require reparsing large gobs of text, and if it
- happens to find something that looks like a "def" or "class" or sys.ps1
- buried in a multiline string, but didn't suck up enough preceding text to
- see the start of the string, it's completely hosed. I can't repair that --
- it's just too slow to reparse from the start of the file all the time.
-
- AutoIndent has grown a new num_context_lines tuple attribute that controls
- how far to look back, and-- like other params --this could/should be made
- user-overridable at startup and per-file on the fly.
-
- * PyParse.py: New file by Tim Peters:
-
- One new file in the attached, PyParse.py. The LineStudier (whatever it was
- called <wink>) class was removed from AutoIndent; PyParse subsumes its
- functionality.
-
- * AutoIndent.py: Tim Peters keeps revising this module (more to come):
-
- Removed "New tabwidth" menu binding.
-
- Added "a tab means how many spaces?" dialog to block tabify and untabify. I
- think prompting for this is good now: they're usually at-most-once-per-file
- commands, and IDLE can't let them change tabwidth from the Tk default
- anymore, so IDLE can no longer presume to have any idea what a tab means.
-
- Irony: for the purpose of keeping comments aligned via tabs, Tk's
- non-default approach is much nicer than the Emacs/Notepad/Codewright/vi/etc
- approach.
-
- * EditorWindow.py:
- 1. Catch NameError on import (could be raised by case mismatch on Windows).
- 2. No longer need to reset pyclbr cache and show watch cursor when calling
- ClassBrowser -- the ClassBrowser takes care of pyclbr and the TreeWidget
- takes care of the watch cursor.
- 3. Reset the focus to the current window after error message about class
- browser on buffer without filename.
-
- * Icons/minusnode.gif, Icons/plusnode.gif: Missed a few.
-
- * ClassBrowser.py, PathBrowser.py: Rewritten based on TreeWidget.py
-
- * ObjectBrowser.py: Object browser, based on TreeWidget.py.
-
- * TreeWidget.py: Tree widget done right.
-
- * ToolTip.py: As yet unused code for tool tips.
-
- * ScriptBinding.py:
- Ensure sys.argv[0] is the script name on Run Script.
-
- * ZoomHeight.py: Move zoom height functionality to separate function.
-
- * Icons/folder.gif, Icons/openfolder.gif, Icons/python.gif, Icons/tk.gif:
- A few icons used by ../TreeWidget.py and its callers.
-
- * AutoIndent.py: New version by Tim Peters improves block opening test.
-
-Fri May 21 04:46:17 1999 Guido van Rossum <guido@cnri.reston.va.us>
-
- * Attic/History.py, PyShell.py: Rename History to IdleHistory.
- Add isatty() to pseudo files.
-
- * StackViewer.py: Make initial stack viewer wider
-
- * TODO.txt: New wishes
-
- * AutoIndent.py, EditorWindow.py, PyShell.py:
- Much improved autoindent and handling of tabs,
- by Tim Peters.
-
-Mon May 3 15:49:52 1999 Guido van Rossum <guido@cnri.reston.va.us>
-
- * AutoIndent.py, EditorWindow.py, FormatParagraph.py, UndoDelegator.py:
- Tim Peters writes:
-
- I'm still unsure, but couldn't stand the virtual event trickery so tried a
- different sin (adding undo_block_start/stop methods to the Text instance in
- EditorWindow.py). Like it or not, it's efficient and works <wink>. Better
- idea?
-
- Give the attached a whirl. Even if you hate the implementation, I think
- you'll like the results. Think I caught all the "block edit" cmds,
- including Format Paragraph, plus subtler ones involving smart indents and
- backspacing.
-
- * WidgetRedirector.py: Tim Peters writes:
-
- [W]hile trying to dope out how redirection works, stumbled into two
- possible glitches. In the first, it doesn't appear to make sense to try to
- rename a command that's already been destroyed; in the second, the name
- "previous" doesn't really bring to mind "ignore the previous value" <wink>.
-
-Fri Apr 30 19:39:25 1999 Guido van Rossum <guido@cnri.reston.va.us>
-
- * __init__.py: Support for using idle as a package.
-
- * PathBrowser.py:
- Avoid listing files more than once (e.g. foomodule.so has two hits:
- once for foo + module.so, once for foomodule + .so).
-
-Mon Apr 26 22:20:38 1999 Guido van Rossum <guido@cnri.reston.va.us>
-
- * ChangeLog, ColorDelegator.py, PyShell.py: Tim Peters strikes again:
-
- Ho ho ho -- that's trickier than it sounded! The colorizer is working with
- "line.col" strings instead of Text marks, and the absolute coordinates of
- the point of interest can change across the self.update call (voice of
- baffled experience, when two quick backspaces no longer fooled it, but a
- backspace followed by a quick ENTER did <wink>).
-
- Anyway, the attached appears to do the trick. CPU usage goes way up when
- typing quickly into a long triple-quoted string, but the latency is fine for
- me (a relatively fast typist on a relatively slow machine). Most of the
- changes here are left over from reducing the # of vrbl names to help me
- reason about the logic better; I hope the code is a *little* easier to
-
-Fri Apr 23 14:01:25 1999 Guido van Rossum <guido@cnri.reston.va.us>
-
- * EditorWindow.py:
- Provide full arguments to __import__ so it works in packagized IDLE.
-
-Thu Apr 22 23:20:17 1999 Guido van Rossum <guido@cnri.reston.va.us>
-
- * help.txt:
- Bunch of updates necessary due to recent changes; added docs for File
- menu, command line and color preferences.
-
- * Bindings.py: Remove obsolete 'script' menu.
-
- * TODO.txt: Several wishes fulfilled.
-
- * OutputWindow.py:
- Moved classes OnDemandOutputWindow and PseudoFile here,
- from ScriptBinding.py where they are no longer needed.
-
- * ScriptBinding.py:
- Mostly rewritten. Instead of the old Run module and Debug module,
- there are two new commands:
-
- Import module (F5) imports or reloads the module and also adds its
- name to the __main__ namespace. This gets executed in the PyShell
- window under control of its debug settings.
-
- Run script (Control-F5) is similar but executes the contents of the
- file directly in the __main__ namespace.
-
- * PyShell.py: Nits: document use of $IDLESTARTUP; display idle version
-
- * idlever.py: New version to celebrate new command line
-
- * OutputWindow.py: Added flush(), for completeness.
-
- * PyShell.py:
- A lot of changes to make the command line more useful. You can now do:
- idle.py -e file ... -- to edit files
- idle.py script arg ... -- to run a script
- idle.py -c cmd arg ... -- to run a command
- Other options, see also the usage message (also new!) for more details:
- -d -- enable debugger
- -s -- run $IDLESTARTUP or $PYTHONSTARTUP
- -t title -- set Python Shell window's title
- sys.argv is set accordingly, unless -e is used.
- sys.path is absolutized, and all relevant paths are inserted into it.
-
- Other changes:
- - the environment in which commands are executed is now the
- __main__ module
- - explicitly save sys.stdout etc., don't restore from sys.__stdout__
- - new interpreter methods execsource(), execfile(), stuffsource()
- - a few small nits
-
- * TODO.txt:
- Some more TODO items. Made up my mind about command line args,
- Run/Import, __main__.
-
- * ColorDelegator.py:
- Super-elegant patch by Tim Peters that speeds up colorization
- dramatically (up to 15 times he claims). Works by reading more than
- one line at a time, up to 100-line chunks (starting with one line and
- then doubling up to the limit). On a typical machine (e.g. Tim's
- P5-166) this doesn't reduce interactive responsiveness in a noticeable
- way.
-
-Wed Apr 21 15:49:34 1999 Guido van Rossum <guido@cnri.reston.va.us>
-
- * ColorDelegator.py:
- Patch by Tim Peters to speed up colorizing of big multiline strings.
-
-Tue Apr 20 17:32:52 1999 Guido van Rossum <guido@cnri.reston.va.us>
-
- * extend.txt:
- For an event 'foo-bar', the corresponding method must be called
- foo_bar_event(). Therefore, fix the references to zoom_height() in
- the example.
-
- * IdlePrefs.py: Restored the original IDLE color scheme.
-
- * PyShell.py, IdlePrefs.py, ColorDelegator.py, EditorWindow.py:
- Color preferences code by Loren Luke (massaged by me somewhat)
-
- * SearchEngine.py:
- Patch by Mark Favas: it fixes the search engine behaviour where an
- unsuccessful search wraps around and re-searches that part of the file
- between the start of the search and the end of the file - only really
- an issue for very large files, but... (also removes a redundant
- m.span() call).
-
-Mon Apr 19 16:26:02 1999 Guido van Rossum <guido@cnri.reston.va.us>
-
- * TODO.txt: A few wishes are now fulfilled.
-
- * AutoIndent.py: Tim Peters implements some of my wishes:
-
- o Makes the tab key intelligently insert spaces when appropriate
- (see Help list banter twixt David Ascher and me; idea stolen from
- every other editor on earth <wink>).
-
- o newline_and_indent_event trims trailing whitespace on the old
- line (pymode and Codewright).
-
- o newline_and_indent_event no longer fooled by trailing whitespace or
- comment after ":" (pymode, PTUI).
-
- o newline_and_indent_event now reduces the new line's indentation after
- return, break, continue, raise and pass stmts (pymode).
-
- The last two are easy to fool in the presence of strings &
- continuations, but pymode requires Emacs's high-powered C parsing
- functions to avoid that in finite time.
-
-======================================================================
- Python release 1.5.2c1, IDLE version 0.4
-======================================================================
-
-Wed Apr 7 18:41:59 1999 Guido van Rossum <guido@cnri.reston.va.us>
-
- * README.txt, NEWS.txt: New version.
-
- * idlever.py: Version bump awaiting impending new release.
- (Not much has changed :-( )
-
-Mon Mar 29 14:52:28 1999 Guido van Rossum <guido@cnri.reston.va.us>
-
- * ScriptBinding.py, PyShell.py:
- At Tim Peters' recommendation, add a dummy flush() method to
- PseudoFile.
-
-Thu Mar 11 23:21:23 1999 Guido van Rossum <guido@cnri.reston.va.us>
-
- * PathBrowser.py: Don't crash when sys.path contains an empty string.
-
- * Attic/Outline.py: This file was never supposed to be part of IDLE.
-
- * PathBrowser.py:
- - Don't crash in the case where a superclass is a string instead of a
- pyclbr.Class object; this can happen when the superclass is
- unrecognizable (to pyclbr), e.g. when module renaming is used.
-
- - Show a watch cursor when calling pyclbr (since it may take a while
- recursively parsing imported modules!).
-
-Wed Mar 10 05:18:02 1999 Guido van Rossum <guido@cnri.reston.va.us>
-
- * EditorWindow.py, Bindings.py: Add PathBrowser to File module
-
- * PathBrowser.py: "Path browser" - 4 scrolled lists displaying:
- directories on sys.path
- modules in selected directory
- classes in selected module
- methods of selected class
-
- Sinlge clicking in a directory, module or class item updates the next
- column with info about the selected item. Double clicking in a
- module, class or method item opens the file (and selects the clicked
- item if it is a class or method).
-
- I guess eventually I should be using a tree widget for this, but the
- ones I've seen don't work well enough, so for now I use the old
- Smalltalk or NeXT style multi-column hierarchical browser.
-
- * MultiScrolledLists.py:
- New utility: multiple scrolled lists in parallel
-
- * ScrolledList.py: - White background.
- - Display "(None)" (or text of your choosing) when empty.
- - Don't set the focus.
-
-======================================================================
- Python release 1.5.2b2, IDLE version 0.3
-======================================================================
-
-Wed Feb 17 22:47:41 1999 Guido van Rossum <guido@cnri.reston.va.us>
-
- * NEWS.txt: News in 0.3.
-
- * README.txt, idlever.py: Bump version to 0.3.
-
- * EditorWindow.py:
- After all, we don't need to call the callbacks ourselves!
-
- * WindowList.py:
- When deleting, call the callbacks *after* deleting the window from our list!
-
- * EditorWindow.py:
- Fix up the Windows menu via the new callback mechanism instead of
- depending on menu post commands (which don't work when the menu is
- torn off).
-
- * WindowList.py:
- Support callbacks to patch up Windows menus everywhere.
-
- * ChangeLog: Oh, why not. Checking in the Emacs-generated change log.
-
-Tue Feb 16 22:34:17 1999 Guido van Rossum <guido@cnri.reston.va.us>
-
- * ScriptBinding.py:
- Only pop up the stack viewer when requested in the Debug menu.
-
-Mon Feb 8 22:27:49 1999 Guido van Rossum <guido@cnri.reston.va.us>
-
- * WindowList.py: Don't crash if a window no longer exists.
-
- * TODO.txt: Restructured a bit.
-
-Mon Feb 1 23:06:17 1999 Guido van Rossum <guido@cnri.reston.va.us>
-
- * PyShell.py: Add current dir or paths of file args to sys.path.
-
- * Debugger.py: Add canonic() function -- for brand new bdb.py feature.
-
- * StackViewer.py: Protect against accessing an empty stack.
-
-Fri Jan 29 20:44:45 1999 Guido van Rossum <guido@cnri.reston.va.us>
-
- * ZoomHeight.py:
- Use only the height to decide whether to zoom in or out.
-
-Thu Jan 28 22:24:30 1999 Guido van Rossum <guido@cnri.reston.va.us>
-
- * EditorWindow.py, FileList.py:
- Make sure the Tcl variables are shared between windows.
-
- * PyShell.py, EditorWindow.py, Bindings.py:
- Move menu/key binding code from Bindings.py to EditorWindow.py,
- with changed APIs -- it makes much more sense there.
- Also add a new feature: if the first character of a menu label is
- a '!', it gets a checkbox. Checkboxes are bound to Boolean Tcl variables
- that can be accessed through the new getvar/setvar/getrawvar API;
- the variable is named after the event to which the menu is bound.
-
- * Debugger.py: Add Quit button to the debugger window.
-
- * SearchDialog.py:
- When find_again() finds exactly the current selection, it's a failure.
-
- * idle.py, Attic/idle: Rename idle -> idle.py
-
-Mon Jan 18 15:18:57 1999 Guido van Rossum <guido@cnri.reston.va.us>
-
- * EditorWindow.py, WindowList.py: Only deiconify when iconic.
-
- * TODO.txt: Misc
-
-Tue Jan 12 22:14:34 1999 Guido van Rossum <guido@cnri.reston.va.us>
-
- * testcode.py, Attic/test.py:
- Renamed test.py to testcode.py so one can import Python's
- test package from inside IDLE. (Suggested by Jack Jansen.)
-
- * EditorWindow.py, ColorDelegator.py:
- Hack to close a window that is colorizing.
-
- * Separator.py: Vladimir Marangozov's patch:
- The separator dances too much and seems to jump by arbitrary amounts
- in arbitrary directions when I try to move it for resizing the frames.
- This patch makes it more quiet.
-
-Mon Jan 11 14:52:40 1999 Guido van Rossum <guido@cnri.reston.va.us>
-
- * TODO.txt: Some requests have been fulfilled.
-
- * EditorWindow.py:
- Set the cursor to a watch when opening the class browser (which may
- take quite a while, browsing multiple files).
-
- Newer, better center() -- but assumes no wrapping.
-
- * SearchBinding.py:
- Got rid of debug print statement in goto_line_event().
-
- * ScriptBinding.py:
- I think I like it better if it prints the traceback even when it displays
- the stack viewer.
-
- * Debugger.py: Bind ESC to close-window.
-
- * ClassBrowser.py: Use a HSeparator between the classes and the items.
- Make the list of classes wider by default (40 chars).
- Bind ESC to close-window.
-
- * Separator.py:
- Separator classes (draggable divider between two panes).
-
-Sat Jan 9 22:01:33 1999 Guido van Rossum <guido@cnri.reston.va.us>
-
- * WindowList.py:
- Don't traceback when wakeup() is called when the window has been destroyed.
- This can happen when a torn-of Windows menu references closed windows.
- And Tim Peters claims that the Windows menu is his favorite to tear off...
-
- * EditorWindow.py: Allow tearing off of the Windows menu.
-
- * StackViewer.py: Close on ESC.
-
- * help.txt: Updated a bunch of things (it was mostly still 0.1!)
-
- * extend.py: Added ScriptBinding to standard bindings.
-
- * ScriptBinding.py:
- This now actually works. See doc string. It can run a module (i.e.
- import or reload) or debug it (same with debugger control). Output
- goes to a fresh output window, only created when needed.
-
-======================================================================
- Python release 1.5.2b1, IDLE version 0.2
-======================================================================
-
-Fri Jan 8 17:26:02 1999 Guido van Rossum <guido@cnri.reston.va.us>
-
- * README.txt, NEWS.txt: What's new in this release.
-
- * Bindings.py, PyShell.py:
- Paul Prescod's patches to allow the stack viewer to pop up when a
- traceback is printed.
-
-Thu Jan 7 00:12:15 1999 Guido van Rossum <guido@cnri.reston.va.us>
-
- * FormatParagraph.py:
- Change paragraph width limit to 70 (like Emacs M-Q).
-
- * README.txt:
- Separating TODO from README. Slight reformulation of features. No
- exact release date.
-
- * TODO.txt: Separating TODO from README.
-
-Mon Jan 4 21:19:09 1999 Guido van Rossum <guido@cnri.reston.va.us>
-
- * FormatParagraph.py:
- Hm. There was a boundary condition error at the end of the file too.
-
- * SearchBinding.py: Hm. Add Unix binding for replace, too.
-
- * keydefs.py: Ran eventparse.py again.
-
- * FormatParagraph.py: Added Unix Meta-q key binding;
- fix find_paragraph when at start of file.
-
- * AutoExpand.py: Added Meta-/ binding for Unix as alt for Alt-/.
-
- * SearchBinding.py:
- Add unix binding for grep (otherwise the menu entry doesn't work!)
-
- * ZoomHeight.py: Adjusted Unix height to work with fvwm96. :=(
-
- * GrepDialog.py: Need to import sys!
-
- * help.txt, extend.txt, README.txt: Formatted some paragraphs
-
- * extend.py, FormatParagraph.py:
- Add new extension to reformat a (text) paragraph.
-
- * ZoomHeight.py: Typo in Win specific height setting.
-
-Sun Jan 3 00:47:35 1999 Guido van Rossum <guido@cnri.reston.va.us>
-
- * AutoIndent.py: Added something like Tim Peters' backspace patch.
-
- * ZoomHeight.py: Adapted to Unix (i.e., more hardcoded constants).
-
-Sat Jan 2 21:28:54 1999 Guido van Rossum <guido@cnri.reston.va.us>
-
- * keydefs.py, idlever.py, idle.pyw, idle.bat, help.txt, extend.txt, extend.py, eventparse.py, ZoomHeight.py, WindowList.py, UndoDelegator.py, StackViewer.py, SearchEngine.py, SearchDialogBase.py, SearchDialog.py, ScrolledList.py, SearchBinding.py, ScriptBinding.py, ReplaceDialog.py, Attic/README, README.txt, PyShell.py, Attic/PopupMenu.py, OutputWindow.py, IOBinding.py, Attic/HelpWindow.py, History.py, GrepDialog.py, FileList.py, FrameViewer.py, EditorWindow.py, Debugger.py, Delegator.py, ColorDelegator.py, Bindings.py, ClassBrowser.py, AutoExpand.py, AutoIndent.py:
- Checking in IDLE 0.2.
-
- Much has changed -- too much, in fact, to write down.
- The big news is that there's a standard way to write IDLE extensions;
- see extend.txt. Some sample extensions have been provided, and
- some existing code has been converted to extensions. Probably the
- biggest new user feature is a new search dialog with more options,
- search and replace, and even search in files (grep).
-
- This is exactly as downloaded from my laptop after returning
- from the holidays -- it hasn't even been tested on Unix yet.
-
-Fri Dec 18 15:52:54 1998 Guido van Rossum <guido@cnri.reston.va.us>
-
- * FileList.py, ClassBrowser.py:
- Fix the class browser to work even when the file is not on sys.path.
-
-Tue Dec 8 20:39:36 1998 Guido van Rossum <guido@cnri.reston.va.us>
-
- * Attic/turtle.py: Moved to Python 1.5.2/Lib
-
-Fri Nov 27 03:19:20 1998 Guido van Rossum <guido@cnri.reston.va.us>
-
- * help.txt: Typo
-
- * EditorWindow.py, FileList.py: Support underlining of menu labels
-
- * Bindings.py:
- New approach, separate tables for menus (platform-independent) and key
- definitions (platform-specific), and generating accelerator strings
- automatically from the key definitions.
-
-Mon Nov 16 18:37:42 1998 Guido van Rossum <guido@cnri.reston.va.us>
-
- * Attic/README: Clarify portability and main program.
-
- * Attic/README: Added intro for 0.1 release and append Grail notes.
-
-Mon Oct 26 18:49:00 1998 Guido van Rossum <guido@cnri.reston.va.us>
-
- * Attic/turtle.py: root is now a global called _root
-
-Sat Oct 24 16:38:38 1998 Guido van Rossum <guido@cnri.reston.va.us>
-
- * Attic/turtle.py: Raise the root window on reset().
- Different action on WM_DELETE_WINDOW is more likely to do the right thing,
- allowing us to destroy old windows.
-
- * Attic/turtle.py:
- Split the goto() function in two: _goto() is the internal one,
- using Canvas coordinates, and goto() uses turtle coordinates
- and accepts variable argument lists.
-
- * Attic/turtle.py: Cope with destruction of the window
-
- * Attic/turtle.py: Turtle graphics
-
- * Debugger.py: Use of Breakpoint class should be bdb.Breakpoint.
-
-Mon Oct 19 03:33:40 1998 Guido van Rossum <guido@cnri.reston.va.us>
-
- * SearchBinding.py:
- Speed up the search a bit -- don't drag a mark around...
-
- * PyShell.py:
- Change our special entries from <console#N> to <pyshell#N>.
- Patch linecache.checkcache() to keep our special entries alive.
- Add popup menu to all editor windows to set a breakpoint.
-
- * Debugger.py:
- Use and pass through the 'force' flag to set_dict() where appropriate.
- Default source and globals checkboxes to false.
- Don't interact in user_return().
- Add primitive set_breakpoint() method.
-
- * ColorDelegator.py:
- Raise priority of 'sel' tag so its foreground (on Windows) will take
- priority over text colorization (which on Windows is almost the
- same color as the selection background).
-
- Define a tag and color for breakpoints ("BREAK").
-
- * Attic/PopupMenu.py: Disable "Open stack viewer" and "help" commands.
-
- * StackViewer.py:
- Add optional 'force' argument (default 0) to load_dict().
- If set, redo the display even if it's the same dict.
-
-Fri Oct 16 21:10:12 1998 Guido van Rossum <guido@cnri.reston.va.us>
-
- * StackViewer.py: Do nothing when loading the same dict as before.
-
- * PyShell.py: Details for debugger interface.
-
- * Debugger.py:
- Restructured and more consistent. Save checkboxes across instantiations.
-
- * EditorWindow.py, Attic/README, Bindings.py:
- Get rid of conflicting ^X binding. Use ^W.
-
- * Debugger.py, StackViewer.py:
- Debugger can now show local and global variables.
-
- * Debugger.py: Oops
-
- * Debugger.py, PyShell.py: Better debugger support (show stack etc).
-
- * Attic/PopupMenu.py: Follow renames in StackViewer module
-
- * StackViewer.py:
- Rename classes to StackViewer (the widget) and StackBrowser (the toplevel).
-
- * ScrolledList.py: Add close() method
-
- * EditorWindow.py: Clarify 'Open Module' dialog text
-
- * StackViewer.py: Restructured into a browser and a widget.
-
-Thu Oct 15 23:27:08 1998 Guido van Rossum <guido@cnri.reston.va.us>
-
- * ClassBrowser.py, ScrolledList.py:
- Generalized the scrolled list which is the base for the class and
- method browser into a separate class in its own module.
-
- * Attic/test.py: Cosmetic change
-
- * Debugger.py: Don't show function name if there is none
-
-Wed Oct 14 03:43:05 1998 Guido van Rossum <guido@cnri.reston.va.us>
-
- * Debugger.py, PyShell.py: Polish the Debugger GUI a bit.
- Closing it now also does the right thing.
-
-Tue Oct 13 23:51:13 1998 Guido van Rossum <guido@cnri.reston.va.us>
-
- * Debugger.py, PyShell.py, Bindings.py:
- Ad primitive debugger interface (so far it will step and show you the
- source, but it doesn't yet show the stack).
-
- * Attic/README: Misc
-
- * StackViewer.py: Whoops -- referenced self.top before it was set.
-
- * help.txt: Added history and completion commands.
-
- * help.txt: Updated
-
- * FileList.py: Add class browser functionality.
-
- * StackViewer.py:
- Add a close() method and bind to WM_DELETE_WINDOW protocol
-
- * PyShell.py: Clear the linecache before printing a traceback
-
- * Bindings.py: Added class browser binding.
-
- * ClassBrowser.py: Much improved, much left to do.
-
- * PyShell.py: Make the return key do what I mean more often.
-
- * ClassBrowser.py:
- Adding the beginnings of a Class browser. Incomplete, yet.
-
- * EditorWindow.py, Bindings.py:
- Add new command, "Open module". You select or type a module name,
- and it opens the source.
-
-Mon Oct 12 23:59:27 1998 Guido van Rossum <guido@cnri.reston.va.us>
-
- * PyShell.py: Subsume functionality from Popup menu in Debug menu.
- Other stuff so the PyShell window can be resurrected from the Windows menu.
-
- * FileList.py: Get rid of PopUp menu.
- Create a simple Windows menu. (Imperfect when Untitled windows exist.)
- Add wakeup() method: deiconify, raise, focus.
-
- * EditorWindow.py: Generalize menu creation.
-
- * Bindings.py: Add Debug and Help menu items.
-
- * EditorWindow.py: Added a menu bar to every window.
-
- * Bindings.py: Add menu configuration to the event configuration.
-
- * Attic/PopupMenu.py: Pass a root to the help window.
-
- * SearchBinding.py:
- Add parent argument to 'to to line number' dialog box.
-
-Sat Oct 10 19:15:32 1998 Guido van Rossum <guido@cnri.reston.va.us>
-
- * StackViewer.py:
- Add a label at the top showing (very basic) help for the stack viewer.
- Add a label at the bottom showing the exception info.
-
- * Attic/test.py, Attic/idle: Add Unix main script and test program.
-
- * idle.pyw, help.txt, WidgetRedirector.py, UndoDelegator.py, StackViewer.py, SearchBinding.py, Attic/README, PyShell.py, Attic/PopupMenu.py, Percolator.py, Outline.py, IOBinding.py, History.py, Attic/HelpWindow.py, FrameViewer.py, FileList.py, EditorWindow.py, Delegator.py, ColorDelegator.py, Bindings.py, AutoIndent.py, AutoExpand.py:
- Initial checking of Tk-based Python IDE.
- Features: text editor with syntax coloring and undo;
- subclassed into interactive Python shell which adds history.
-
diff --git a/sys/lib/python/idlelib/ClassBrowser.py b/sys/lib/python/idlelib/ClassBrowser.py
deleted file mode 100644
index e5a60a513..000000000
--- a/sys/lib/python/idlelib/ClassBrowser.py
+++ /dev/null
@@ -1,221 +0,0 @@
-"""Class browser.
-
-XXX TO DO:
-
-- reparse when source changed (maybe just a button would be OK?)
- (or recheck on window popup)
-- add popup menu with more options (e.g. doc strings, base classes, imports)
-- show function argument list? (have to do pattern matching on source)
-- should the classes and methods lists also be in the module's menu bar?
-- add base classes to class browser tree
-"""
-
-import os
-import sys
-import pyclbr
-
-import PyShell
-from WindowList import ListedToplevel
-from TreeWidget import TreeNode, TreeItem, ScrolledCanvas
-from configHandler import idleConf
-
-class ClassBrowser:
-
- def __init__(self, flist, name, path):
- # XXX This API should change, if the file doesn't end in ".py"
- # XXX the code here is bogus!
- self.name = name
- self.file = os.path.join(path[0], self.name + ".py")
- self.init(flist)
-
- def close(self, event=None):
- self.top.destroy()
- self.node.destroy()
-
- def init(self, flist):
- self.flist = flist
- # reset pyclbr
- pyclbr._modules.clear()
- # create top
- self.top = top = ListedToplevel(flist.root)
- top.protocol("WM_DELETE_WINDOW", self.close)
- top.bind("<Escape>", self.close)
- self.settitle()
- top.focus_set()
- # create scrolled canvas
- theme = idleConf.GetOption('main','Theme','name')
- background = idleConf.GetHighlight(theme, 'normal')['background']
- sc = ScrolledCanvas(top, bg=background, highlightthickness=0, takefocus=1)
- sc.frame.pack(expand=1, fill="both")
- item = self.rootnode()
- self.node = node = TreeNode(sc.canvas, None, item)
- node.update()
- node.expand()
-
- def settitle(self):
- self.top.wm_title("Class Browser - " + self.name)
- self.top.wm_iconname("Class Browser")
-
- def rootnode(self):
- return ModuleBrowserTreeItem(self.file)
-
-class ModuleBrowserTreeItem(TreeItem):
-
- def __init__(self, file):
- self.file = file
-
- def GetText(self):
- return os.path.basename(self.file)
-
- def GetIconName(self):
- return "python"
-
- def GetSubList(self):
- sublist = []
- for name in self.listclasses():
- item = ClassBrowserTreeItem(name, self.classes, self.file)
- sublist.append(item)
- return sublist
-
- def OnDoubleClick(self):
- if os.path.normcase(self.file[-3:]) != ".py":
- return
- if not os.path.exists(self.file):
- return
- PyShell.flist.open(self.file)
-
- def IsExpandable(self):
- return os.path.normcase(self.file[-3:]) == ".py"
-
- def listclasses(self):
- dir, file = os.path.split(self.file)
- name, ext = os.path.splitext(file)
- if os.path.normcase(ext) != ".py":
- return []
- try:
- dict = pyclbr.readmodule_ex(name, [dir] + sys.path)
- except ImportError, msg:
- return []
- items = []
- self.classes = {}
- for key, cl in dict.items():
- if cl.module == name:
- s = key
- if hasattr(cl, 'super') and cl.super:
- supers = []
- for sup in cl.super:
- if type(sup) is type(''):
- sname = sup
- else:
- sname = sup.name
- if sup.module != cl.module:
- sname = "%s.%s" % (sup.module, sname)
- supers.append(sname)
- s = s + "(%s)" % ", ".join(supers)
- items.append((cl.lineno, s))
- self.classes[s] = cl
- items.sort()
- list = []
- for item, s in items:
- list.append(s)
- return list
-
-class ClassBrowserTreeItem(TreeItem):
-
- def __init__(self, name, classes, file):
- self.name = name
- self.classes = classes
- self.file = file
- try:
- self.cl = self.classes[self.name]
- except (IndexError, KeyError):
- self.cl = None
- self.isfunction = isinstance(self.cl, pyclbr.Function)
-
- def GetText(self):
- if self.isfunction:
- return "def " + self.name + "(...)"
- else:
- return "class " + self.name
-
- def GetIconName(self):
- if self.isfunction:
- return "python"
- else:
- return "folder"
-
- def IsExpandable(self):
- if self.cl:
- try:
- return not not self.cl.methods
- except AttributeError:
- return False
-
- def GetSubList(self):
- if not self.cl:
- return []
- sublist = []
- for name in self.listmethods():
- item = MethodBrowserTreeItem(name, self.cl, self.file)
- sublist.append(item)
- return sublist
-
- def OnDoubleClick(self):
- if not os.path.exists(self.file):
- return
- edit = PyShell.flist.open(self.file)
- if hasattr(self.cl, 'lineno'):
- lineno = self.cl.lineno
- edit.gotoline(lineno)
-
- def listmethods(self):
- if not self.cl:
- return []
- items = []
- for name, lineno in self.cl.methods.items():
- items.append((lineno, name))
- items.sort()
- list = []
- for item, name in items:
- list.append(name)
- return list
-
-class MethodBrowserTreeItem(TreeItem):
-
- def __init__(self, name, cl, file):
- self.name = name
- self.cl = cl
- self.file = file
-
- def GetText(self):
- return "def " + self.name + "(...)"
-
- def GetIconName(self):
- return "python" # XXX
-
- def IsExpandable(self):
- return 0
-
- def OnDoubleClick(self):
- if not os.path.exists(self.file):
- return
- edit = PyShell.flist.open(self.file)
- edit.gotoline(self.cl.methods[self.name])
-
-def main():
- try:
- file = __file__
- except NameError:
- file = sys.argv[0]
- if sys.argv[1:]:
- file = sys.argv[1]
- else:
- file = sys.argv[0]
- dir, file = os.path.split(file)
- name = os.path.splitext(file)[0]
- ClassBrowser(PyShell.flist, name, [dir])
- if sys.stdin is sys.__stdin__:
- mainloop()
-
-if __name__ == "__main__":
- main()
diff --git a/sys/lib/python/idlelib/CodeContext.py b/sys/lib/python/idlelib/CodeContext.py
deleted file mode 100644
index 74d5b7024..000000000
--- a/sys/lib/python/idlelib/CodeContext.py
+++ /dev/null
@@ -1,167 +0,0 @@
-"""CodeContext - Extension to display the block context above the edit window
-
-Once code has scrolled off the top of a window, it can be difficult to
-determine which block you are in. This extension implements a pane at the top
-of each IDLE edit window which provides block structure hints. These hints are
-the lines which contain the block opening keywords, e.g. 'if', for the
-enclosing block. The number of hint lines is determined by the numlines
-variable in the CodeContext section of config-extensions.def. Lines which do
-not open blocks are not shown in the context hints pane.
-
-"""
-import Tkinter
-from configHandler import idleConf
-import re
-from sys import maxint as INFINITY
-
-BLOCKOPENERS = set(["class", "def", "elif", "else", "except", "finally", "for",
- "if", "try", "while", "with"])
-UPDATEINTERVAL = 100 # millisec
-FONTUPDATEINTERVAL = 1000 # millisec
-
-getspacesfirstword =\
- lambda s, c=re.compile(r"^(\s*)(\w*)"): c.match(s).groups()
-
-class CodeContext:
- menudefs = [('options', [('!Code Conte_xt', '<<toggle-code-context>>')])]
-
- context_depth = idleConf.GetOption("extensions", "CodeContext",
- "numlines", type="int", default=3)
- bgcolor = idleConf.GetOption("extensions", "CodeContext",
- "bgcolor", type="str", default="LightGray")
- fgcolor = idleConf.GetOption("extensions", "CodeContext",
- "fgcolor", type="str", default="Black")
- def __init__(self, editwin):
- self.editwin = editwin
- self.text = editwin.text
- self.textfont = self.text["font"]
- self.label = None
- # self.info is a list of (line number, indent level, line text, block
- # keyword) tuples providing the block structure associated with
- # self.topvisible (the linenumber of the line displayed at the top of
- # the edit window). self.info[0] is initialized as a 'dummy' line which
- # starts the toplevel 'block' of the module.
- self.info = [(0, -1, "", False)]
- self.topvisible = 1
- visible = idleConf.GetOption("extensions", "CodeContext",
- "visible", type="bool", default=False)
- if visible:
- self.toggle_code_context_event()
- self.editwin.setvar('<<toggle-code-context>>', True)
- # Start two update cycles, one for context lines, one for font changes.
- self.text.after(UPDATEINTERVAL, self.timer_event)
- self.text.after(FONTUPDATEINTERVAL, self.font_timer_event)
-
- def toggle_code_context_event(self, event=None):
- if not self.label:
- self.pad_frame = Tkinter.Frame(self.editwin.top,
- bg=self.bgcolor, border=2,
- relief="sunken")
- self.label = Tkinter.Label(self.pad_frame,
- text="\n" * (self.context_depth - 1),
- anchor="w", justify="left",
- font=self.textfont,
- bg=self.bgcolor, fg=self.fgcolor,
- border=0,
- width=1, # Don't request more than we get
- )
- self.label.pack(side="top", fill="x", expand=True,
- padx=4, pady=0)
- self.pad_frame.pack(side="top", fill="x", expand=False,
- padx=0, pady=0,
- after=self.editwin.status_bar)
- else:
- self.label.destroy()
- self.pad_frame.destroy()
- self.label = None
- idleConf.SetOption("extensions", "CodeContext", "visible",
- str(self.label is not None))
- idleConf.SaveUserCfgFiles()
-
- def get_line_info(self, linenum):
- """Get the line indent value, text, and any block start keyword
-
- If the line does not start a block, the keyword value is False.
- The indentation of empty lines (or comment lines) is INFINITY.
-
- """
- text = self.text.get("%d.0" % linenum, "%d.end" % linenum)
- spaces, firstword = getspacesfirstword(text)
- opener = firstword in BLOCKOPENERS and firstword
- if len(text) == len(spaces) or text[len(spaces)] == '#':
- indent = INFINITY
- else:
- indent = len(spaces)
- return indent, text, opener
-
- def get_context(self, new_topvisible, stopline=1, stopindent=0):
- """Get context lines, starting at new_topvisible and working backwards.
-
- Stop when stopline or stopindent is reached. Return a tuple of context
- data and the indent level at the top of the region inspected.
-
- """
- assert stopline > 0
- lines = []
- # The indentation level we are currently in:
- lastindent = INFINITY
- # For a line to be interesting, it must begin with a block opening
- # keyword, and have less indentation than lastindent.
- for linenum in xrange(new_topvisible, stopline-1, -1):
- indent, text, opener = self.get_line_info(linenum)
- if indent < lastindent:
- lastindent = indent
- if opener in ("else", "elif"):
- # We also show the if statement
- lastindent += 1
- if opener and linenum < new_topvisible and indent >= stopindent:
- lines.append((linenum, indent, text, opener))
- if lastindent <= stopindent:
- break
- lines.reverse()
- return lines, lastindent
-
- def update_code_context(self):
- """Update context information and lines visible in the context pane.
-
- """
- new_topvisible = int(self.text.index("@0,0").split('.')[0])
- if self.topvisible == new_topvisible: # haven't scrolled
- return
- if self.topvisible < new_topvisible: # scroll down
- lines, lastindent = self.get_context(new_topvisible,
- self.topvisible)
- # retain only context info applicable to the region
- # between topvisible and new_topvisible:
- while self.info[-1][1] >= lastindent:
- del self.info[-1]
- elif self.topvisible > new_topvisible: # scroll up
- stopindent = self.info[-1][1] + 1
- # retain only context info associated
- # with lines above new_topvisible:
- while self.info[-1][0] >= new_topvisible:
- stopindent = self.info[-1][1]
- del self.info[-1]
- lines, lastindent = self.get_context(new_topvisible,
- self.info[-1][0]+1,
- stopindent)
- self.info.extend(lines)
- self.topvisible = new_topvisible
-
- # empty lines in context pane:
- context_strings = [""] * max(0, self.context_depth - len(self.info))
- # followed by the context hint lines:
- context_strings += [x[2] for x in self.info[-self.context_depth:]]
- self.label["text"] = '\n'.join(context_strings)
-
- def timer_event(self):
- if self.label:
- self.update_code_context()
- self.text.after(UPDATEINTERVAL, self.timer_event)
-
- def font_timer_event(self):
- newtextfont = self.text["font"]
- if self.label and newtextfont != self.textfont:
- self.textfont = newtextfont
- self.label["font"] = self.textfont
- self.text.after(FONTUPDATEINTERVAL, self.font_timer_event)
diff --git a/sys/lib/python/idlelib/ColorDelegator.py b/sys/lib/python/idlelib/ColorDelegator.py
deleted file mode 100644
index e55f9e6b7..000000000
--- a/sys/lib/python/idlelib/ColorDelegator.py
+++ /dev/null
@@ -1,263 +0,0 @@
-import time
-import re
-import keyword
-import __builtin__
-from Tkinter import *
-from Delegator import Delegator
-from configHandler import idleConf
-
-DEBUG = False
-
-def any(name, alternates):
- "Return a named group pattern matching list of alternates."
- return "(?P<%s>" % name + "|".join(alternates) + ")"
-
-def make_pat():
- kw = r"\b" + any("KEYWORD", keyword.kwlist) + r"\b"
- builtinlist = [str(name) for name in dir(__builtin__)
- if not name.startswith('_')]
- # self.file = file("file") :
- # 1st 'file' colorized normal, 2nd as builtin, 3rd as string
- builtin = r"([^.'\"\\#]\b|^)" + any("BUILTIN", builtinlist) + r"\b"
- comment = any("COMMENT", [r"#[^\n]*"])
- sqstring = r"(\b[rRuU])?'[^'\\\n]*(\\.[^'\\\n]*)*'?"
- dqstring = r'(\b[rRuU])?"[^"\\\n]*(\\.[^"\\\n]*)*"?'
- sq3string = r"(\b[rRuU])?'''[^'\\]*((\\.|'(?!''))[^'\\]*)*(''')?"
- dq3string = r'(\b[rRuU])?"""[^"\\]*((\\.|"(?!""))[^"\\]*)*(""")?'
- string = any("STRING", [sq3string, dq3string, sqstring, dqstring])
- return kw + "|" + builtin + "|" + comment + "|" + string +\
- "|" + any("SYNC", [r"\n"])
-
-prog = re.compile(make_pat(), re.S)
-idprog = re.compile(r"\s+(\w+)", re.S)
-asprog = re.compile(r".*?\b(as)\b")
-
-class ColorDelegator(Delegator):
-
- def __init__(self):
- Delegator.__init__(self)
- self.prog = prog
- self.idprog = idprog
- self.asprog = asprog
- self.LoadTagDefs()
-
- def setdelegate(self, delegate):
- if self.delegate is not None:
- self.unbind("<<toggle-auto-coloring>>")
- Delegator.setdelegate(self, delegate)
- if delegate is not None:
- self.config_colors()
- self.bind("<<toggle-auto-coloring>>", self.toggle_colorize_event)
- self.notify_range("1.0", "end")
-
- def config_colors(self):
- for tag, cnf in self.tagdefs.items():
- if cnf:
- self.tag_configure(tag, **cnf)
- self.tag_raise('sel')
-
- def LoadTagDefs(self):
- theme = idleConf.GetOption('main','Theme','name')
- self.tagdefs = {
- "COMMENT": idleConf.GetHighlight(theme, "comment"),
- "KEYWORD": idleConf.GetHighlight(theme, "keyword"),
- "BUILTIN": idleConf.GetHighlight(theme, "builtin"),
- "STRING": idleConf.GetHighlight(theme, "string"),
- "DEFINITION": idleConf.GetHighlight(theme, "definition"),
- "SYNC": {'background':None,'foreground':None},
- "TODO": {'background':None,'foreground':None},
- "BREAK": idleConf.GetHighlight(theme, "break"),
- "ERROR": idleConf.GetHighlight(theme, "error"),
- # The following is used by ReplaceDialog:
- "hit": idleConf.GetHighlight(theme, "hit"),
- }
-
- if DEBUG: print 'tagdefs',self.tagdefs
-
- def insert(self, index, chars, tags=None):
- index = self.index(index)
- self.delegate.insert(index, chars, tags)
- self.notify_range(index, index + "+%dc" % len(chars))
-
- def delete(self, index1, index2=None):
- index1 = self.index(index1)
- self.delegate.delete(index1, index2)
- self.notify_range(index1)
-
- after_id = None
- allow_colorizing = True
- colorizing = False
-
- def notify_range(self, index1, index2=None):
- self.tag_add("TODO", index1, index2)
- if self.after_id:
- if DEBUG: print "colorizing already scheduled"
- return
- if self.colorizing:
- self.stop_colorizing = True
- if DEBUG: print "stop colorizing"
- if self.allow_colorizing:
- if DEBUG: print "schedule colorizing"
- self.after_id = self.after(1, self.recolorize)
-
- close_when_done = None # Window to be closed when done colorizing
-
- def close(self, close_when_done=None):
- if self.after_id:
- after_id = self.after_id
- self.after_id = None
- if DEBUG: print "cancel scheduled recolorizer"
- self.after_cancel(after_id)
- self.allow_colorizing = False
- self.stop_colorizing = True
- if close_when_done:
- if not self.colorizing:
- close_when_done.destroy()
- else:
- self.close_when_done = close_when_done
-
- def toggle_colorize_event(self, event):
- if self.after_id:
- after_id = self.after_id
- self.after_id = None
- if DEBUG: print "cancel scheduled recolorizer"
- self.after_cancel(after_id)
- if self.allow_colorizing and self.colorizing:
- if DEBUG: print "stop colorizing"
- self.stop_colorizing = True
- self.allow_colorizing = not self.allow_colorizing
- if self.allow_colorizing and not self.colorizing:
- self.after_id = self.after(1, self.recolorize)
- if DEBUG:
- print "auto colorizing turned",\
- self.allow_colorizing and "on" or "off"
- return "break"
-
- def recolorize(self):
- self.after_id = None
- if not self.delegate:
- if DEBUG: print "no delegate"
- return
- if not self.allow_colorizing:
- if DEBUG: print "auto colorizing is off"
- return
- if self.colorizing:
- if DEBUG: print "already colorizing"
- return
- try:
- self.stop_colorizing = False
- self.colorizing = True
- if DEBUG: print "colorizing..."
- t0 = time.clock()
- self.recolorize_main()
- t1 = time.clock()
- if DEBUG: print "%.3f seconds" % (t1-t0)
- finally:
- self.colorizing = False
- if self.allow_colorizing and self.tag_nextrange("TODO", "1.0"):
- if DEBUG: print "reschedule colorizing"
- self.after_id = self.after(1, self.recolorize)
- if self.close_when_done:
- top = self.close_when_done
- self.close_when_done = None
- top.destroy()
-
- def recolorize_main(self):
- next = "1.0"
- while True:
- item = self.tag_nextrange("TODO", next)
- if not item:
- break
- head, tail = item
- self.tag_remove("SYNC", head, tail)
- item = self.tag_prevrange("SYNC", head)
- if item:
- head = item[1]
- else:
- head = "1.0"
-
- chars = ""
- next = head
- lines_to_get = 1
- ok = False
- while not ok:
- mark = next
- next = self.index(mark + "+%d lines linestart" %
- lines_to_get)
- lines_to_get = min(lines_to_get * 2, 100)
- ok = "SYNC" in self.tag_names(next + "-1c")
- line = self.get(mark, next)
- ##print head, "get", mark, next, "->", repr(line)
- if not line:
- return
- for tag in self.tagdefs.keys():
- self.tag_remove(tag, mark, next)
- chars = chars + line
- m = self.prog.search(chars)
- while m:
- for key, value in m.groupdict().items():
- if value:
- a, b = m.span(key)
- self.tag_add(key,
- head + "+%dc" % a,
- head + "+%dc" % b)
- if value in ("def", "class"):
- m1 = self.idprog.match(chars, b)
- if m1:
- a, b = m1.span(1)
- self.tag_add("DEFINITION",
- head + "+%dc" % a,
- head + "+%dc" % b)
- elif value == "import":
- # color all the "as" words on same line, except
- # if in a comment; cheap approximation to the
- # truth
- if '#' in chars:
- endpos = chars.index('#')
- else:
- endpos = len(chars)
- while True:
- m1 = self.asprog.match(chars, b, endpos)
- if not m1:
- break
- a, b = m1.span(1)
- self.tag_add("KEYWORD",
- head + "+%dc" % a,
- head + "+%dc" % b)
- m = self.prog.search(chars, m.end())
- if "SYNC" in self.tag_names(next + "-1c"):
- head = next
- chars = ""
- else:
- ok = False
- if not ok:
- # We're in an inconsistent state, and the call to
- # update may tell us to stop. It may also change
- # the correct value for "next" (since this is a
- # line.col string, not a true mark). So leave a
- # crumb telling the next invocation to resume here
- # in case update tells us to leave.
- self.tag_add("TODO", next)
- self.update()
- if self.stop_colorizing:
- if DEBUG: print "colorizing stopped"
- return
-
- def removecolors(self):
- for tag in self.tagdefs.keys():
- self.tag_remove(tag, "1.0", "end")
-
-def main():
- from Percolator import Percolator
- root = Tk()
- root.wm_protocol("WM_DELETE_WINDOW", root.quit)
- text = Text(background="white")
- text.pack(expand=1, fill="both")
- text.focus_set()
- p = Percolator(text)
- d = ColorDelegator()
- p.insertfilter(d)
- root.mainloop()
-
-if __name__ == "__main__":
- main()
diff --git a/sys/lib/python/idlelib/Debugger.py b/sys/lib/python/idlelib/Debugger.py
deleted file mode 100644
index f56460aad..000000000
--- a/sys/lib/python/idlelib/Debugger.py
+++ /dev/null
@@ -1,481 +0,0 @@
-import os
-import bdb
-import types
-from Tkinter import *
-from WindowList import ListedToplevel
-from ScrolledList import ScrolledList
-import macosxSupport
-
-
-class Idb(bdb.Bdb):
-
- def __init__(self, gui):
- self.gui = gui
- bdb.Bdb.__init__(self)
-
- def user_line(self, frame):
- if self.in_rpc_code(frame):
- self.set_step()
- return
- message = self.__frame2message(frame)
- self.gui.interaction(message, frame)
-
- def user_exception(self, frame, info):
- if self.in_rpc_code(frame):
- self.set_step()
- return
- message = self.__frame2message(frame)
- self.gui.interaction(message, frame, info)
-
- def in_rpc_code(self, frame):
- if frame.f_code.co_filename.count('rpc.py'):
- return True
- else:
- prev_frame = frame.f_back
- if prev_frame.f_code.co_filename.count('Debugger.py'):
- # (that test will catch both Debugger.py and RemoteDebugger.py)
- return False
- return self.in_rpc_code(prev_frame)
-
- def __frame2message(self, frame):
- code = frame.f_code
- filename = code.co_filename
- lineno = frame.f_lineno
- basename = os.path.basename(filename)
- message = "%s:%s" % (basename, lineno)
- if code.co_name != "?":
- message = "%s: %s()" % (message, code.co_name)
- return message
-
-
-class Debugger:
-
- vstack = vsource = vlocals = vglobals = None
-
- def __init__(self, pyshell, idb=None):
- if idb is None:
- idb = Idb(self)
- self.pyshell = pyshell
- self.idb = idb
- self.frame = None
- self.make_gui()
- self.interacting = 0
-
- def run(self, *args):
- try:
- self.interacting = 1
- return self.idb.run(*args)
- finally:
- self.interacting = 0
-
- def close(self, event=None):
- if self.interacting:
- self.top.bell()
- return
- if self.stackviewer:
- self.stackviewer.close(); self.stackviewer = None
- # Clean up pyshell if user clicked debugger control close widget.
- # (Causes a harmless extra cycle through close_debugger() if user
- # toggled debugger from pyshell Debug menu)
- self.pyshell.close_debugger()
- # Now close the debugger control window....
- self.top.destroy()
-
- def make_gui(self):
- pyshell = self.pyshell
- self.flist = pyshell.flist
- self.root = root = pyshell.root
- self.top = top = ListedToplevel(root)
- self.top.wm_title("Debug Control")
- self.top.wm_iconname("Debug")
- top.wm_protocol("WM_DELETE_WINDOW", self.close)
- self.top.bind("<Escape>", self.close)
- #
- self.bframe = bframe = Frame(top)
- self.bframe.pack(anchor="w")
- self.buttons = bl = []
- #
- self.bcont = b = Button(bframe, text="Go", command=self.cont)
- bl.append(b)
- self.bstep = b = Button(bframe, text="Step", command=self.step)
- bl.append(b)
- self.bnext = b = Button(bframe, text="Over", command=self.next)
- bl.append(b)
- self.bret = b = Button(bframe, text="Out", command=self.ret)
- bl.append(b)
- self.bret = b = Button(bframe, text="Quit", command=self.quit)
- bl.append(b)
- #
- for b in bl:
- b.configure(state="disabled")
- b.pack(side="left")
- #
- self.cframe = cframe = Frame(bframe)
- self.cframe.pack(side="left")
- #
- if not self.vstack:
- self.__class__.vstack = BooleanVar(top)
- self.vstack.set(1)
- self.bstack = Checkbutton(cframe,
- text="Stack", command=self.show_stack, variable=self.vstack)
- self.bstack.grid(row=0, column=0)
- if not self.vsource:
- self.__class__.vsource = BooleanVar(top)
- self.bsource = Checkbutton(cframe,
- text="Source", command=self.show_source, variable=self.vsource)
- self.bsource.grid(row=0, column=1)
- if not self.vlocals:
- self.__class__.vlocals = BooleanVar(top)
- self.vlocals.set(1)
- self.blocals = Checkbutton(cframe,
- text="Locals", command=self.show_locals, variable=self.vlocals)
- self.blocals.grid(row=1, column=0)
- if not self.vglobals:
- self.__class__.vglobals = BooleanVar(top)
- self.bglobals = Checkbutton(cframe,
- text="Globals", command=self.show_globals, variable=self.vglobals)
- self.bglobals.grid(row=1, column=1)
- #
- self.status = Label(top, anchor="w")
- self.status.pack(anchor="w")
- self.error = Label(top, anchor="w")
- self.error.pack(anchor="w", fill="x")
- self.errorbg = self.error.cget("background")
- #
- self.fstack = Frame(top, height=1)
- self.fstack.pack(expand=1, fill="both")
- self.flocals = Frame(top)
- self.flocals.pack(expand=1, fill="both")
- self.fglobals = Frame(top, height=1)
- self.fglobals.pack(expand=1, fill="both")
- #
- if self.vstack.get():
- self.show_stack()
- if self.vlocals.get():
- self.show_locals()
- if self.vglobals.get():
- self.show_globals()
-
- def interaction(self, message, frame, info=None):
- self.frame = frame
- self.status.configure(text=message)
- #
- if info:
- type, value, tb = info
- try:
- m1 = type.__name__
- except AttributeError:
- m1 = "%s" % str(type)
- if value is not None:
- try:
- m1 = "%s: %s" % (m1, str(value))
- except:
- pass
- bg = "yellow"
- else:
- m1 = ""
- tb = None
- bg = self.errorbg
- self.error.configure(text=m1, background=bg)
- #
- sv = self.stackviewer
- if sv:
- stack, i = self.idb.get_stack(self.frame, tb)
- sv.load_stack(stack, i)
- #
- self.show_variables(1)
- #
- if self.vsource.get():
- self.sync_source_line()
- #
- for b in self.buttons:
- b.configure(state="normal")
- #
- self.top.wakeup()
- self.root.mainloop()
- #
- for b in self.buttons:
- b.configure(state="disabled")
- self.status.configure(text="")
- self.error.configure(text="", background=self.errorbg)
- self.frame = None
-
- def sync_source_line(self):
- frame = self.frame
- if not frame:
- return
- filename, lineno = self.__frame2fileline(frame)
- if filename[:1] + filename[-1:] != "<>" and os.path.exists(filename):
- self.flist.gotofileline(filename, lineno)
-
- def __frame2fileline(self, frame):
- code = frame.f_code
- filename = code.co_filename
- lineno = frame.f_lineno
- return filename, lineno
-
- def cont(self):
- self.idb.set_continue()
- self.root.quit()
-
- def step(self):
- self.idb.set_step()
- self.root.quit()
-
- def next(self):
- self.idb.set_next(self.frame)
- self.root.quit()
-
- def ret(self):
- self.idb.set_return(self.frame)
- self.root.quit()
-
- def quit(self):
- self.idb.set_quit()
- self.root.quit()
-
- stackviewer = None
-
- def show_stack(self):
- if not self.stackviewer and self.vstack.get():
- self.stackviewer = sv = StackViewer(self.fstack, self.flist, self)
- if self.frame:
- stack, i = self.idb.get_stack(self.frame, None)
- sv.load_stack(stack, i)
- else:
- sv = self.stackviewer
- if sv and not self.vstack.get():
- self.stackviewer = None
- sv.close()
- self.fstack['height'] = 1
-
- def show_source(self):
- if self.vsource.get():
- self.sync_source_line()
-
- def show_frame(self, (frame, lineno)):
- self.frame = frame
- self.show_variables()
-
- localsviewer = None
- globalsviewer = None
-
- def show_locals(self):
- lv = self.localsviewer
- if self.vlocals.get():
- if not lv:
- self.localsviewer = NamespaceViewer(self.flocals, "Locals")
- else:
- if lv:
- self.localsviewer = None
- lv.close()
- self.flocals['height'] = 1
- self.show_variables()
-
- def show_globals(self):
- gv = self.globalsviewer
- if self.vglobals.get():
- if not gv:
- self.globalsviewer = NamespaceViewer(self.fglobals, "Globals")
- else:
- if gv:
- self.globalsviewer = None
- gv.close()
- self.fglobals['height'] = 1
- self.show_variables()
-
- def show_variables(self, force=0):
- lv = self.localsviewer
- gv = self.globalsviewer
- frame = self.frame
- if not frame:
- ldict = gdict = None
- else:
- ldict = frame.f_locals
- gdict = frame.f_globals
- if lv and gv and ldict is gdict:
- ldict = None
- if lv:
- lv.load_dict(ldict, force, self.pyshell.interp.rpcclt)
- if gv:
- gv.load_dict(gdict, force, self.pyshell.interp.rpcclt)
-
- def set_breakpoint_here(self, filename, lineno):
- self.idb.set_break(filename, lineno)
-
- def clear_breakpoint_here(self, filename, lineno):
- self.idb.clear_break(filename, lineno)
-
- def clear_file_breaks(self, filename):
- self.idb.clear_all_file_breaks(filename)
-
- def load_breakpoints(self):
- "Load PyShellEditorWindow breakpoints into subprocess debugger"
- pyshell_edit_windows = self.pyshell.flist.inversedict.keys()
- for editwin in pyshell_edit_windows:
- filename = editwin.io.filename
- try:
- for lineno in editwin.breakpoints:
- self.set_breakpoint_here(filename, lineno)
- except AttributeError:
- continue
-
-class StackViewer(ScrolledList):
-
- def __init__(self, master, flist, gui):
- if macosxSupport.runningAsOSXApp():
- # At least on with the stock AquaTk version on OSX 10.4 you'll
- # get an shaking GUI that eventually kills IDLE if the width
- # argument is specified.
- ScrolledList.__init__(self, master)
- else:
- ScrolledList.__init__(self, master, width=80)
- self.flist = flist
- self.gui = gui
- self.stack = []
-
- def load_stack(self, stack, index=None):
- self.stack = stack
- self.clear()
- for i in range(len(stack)):
- frame, lineno = stack[i]
- try:
- modname = frame.f_globals["__name__"]
- except:
- modname = "?"
- code = frame.f_code
- filename = code.co_filename
- funcname = code.co_name
- import linecache
- sourceline = linecache.getline(filename, lineno)
- import string
- sourceline = string.strip(sourceline)
- if funcname in ("?", "", None):
- item = "%s, line %d: %s" % (modname, lineno, sourceline)
- else:
- item = "%s.%s(), line %d: %s" % (modname, funcname,
- lineno, sourceline)
- if i == index:
- item = "> " + item
- self.append(item)
- if index is not None:
- self.select(index)
-
- def popup_event(self, event):
- "override base method"
- if self.stack:
- return ScrolledList.popup_event(self, event)
-
- def fill_menu(self):
- "override base method"
- menu = self.menu
- menu.add_command(label="Go to source line",
- command=self.goto_source_line)
- menu.add_command(label="Show stack frame",
- command=self.show_stack_frame)
-
- def on_select(self, index):
- "override base method"
- if 0 <= index < len(self.stack):
- self.gui.show_frame(self.stack[index])
-
- def on_double(self, index):
- "override base method"
- self.show_source(index)
-
- def goto_source_line(self):
- index = self.listbox.index("active")
- self.show_source(index)
-
- def show_stack_frame(self):
- index = self.listbox.index("active")
- if 0 <= index < len(self.stack):
- self.gui.show_frame(self.stack[index])
-
- def show_source(self, index):
- if not (0 <= index < len(self.stack)):
- return
- frame, lineno = self.stack[index]
- code = frame.f_code
- filename = code.co_filename
- if os.path.isfile(filename):
- edit = self.flist.open(filename)
- if edit:
- edit.gotoline(lineno)
-
-
-class NamespaceViewer:
-
- def __init__(self, master, title, dict=None):
- width = 0
- height = 40
- if dict:
- height = 20*len(dict) # XXX 20 == observed height of Entry widget
- self.master = master
- self.title = title
- import repr
- self.repr = repr.Repr()
- self.repr.maxstring = 60
- self.repr.maxother = 60
- self.frame = frame = Frame(master)
- self.frame.pack(expand=1, fill="both")
- self.label = Label(frame, text=title, borderwidth=2, relief="groove")
- self.label.pack(fill="x")
- self.vbar = vbar = Scrollbar(frame, name="vbar")
- vbar.pack(side="right", fill="y")
- self.canvas = canvas = Canvas(frame,
- height=min(300, max(40, height)),
- scrollregion=(0, 0, width, height))
- canvas.pack(side="left", fill="both", expand=1)
- vbar["command"] = canvas.yview
- canvas["yscrollcommand"] = vbar.set
- self.subframe = subframe = Frame(canvas)
- self.sfid = canvas.create_window(0, 0, window=subframe, anchor="nw")
- self.load_dict(dict)
-
- dict = -1
-
- def load_dict(self, dict, force=0, rpc_client=None):
- if dict is self.dict and not force:
- return
- subframe = self.subframe
- frame = self.frame
- for c in subframe.children.values():
- c.destroy()
- self.dict = None
- if not dict:
- l = Label(subframe, text="None")
- l.grid(row=0, column=0)
- else:
- names = dict.keys()
- names.sort()
- row = 0
- for name in names:
- value = dict[name]
- svalue = self.repr.repr(value) # repr(value)
- # Strip extra quotes caused by calling repr on the (already)
- # repr'd value sent across the RPC interface:
- if rpc_client:
- svalue = svalue[1:-1]
- l = Label(subframe, text=name)
- l.grid(row=row, column=0, sticky="nw")
- l = Entry(subframe, width=0, borderwidth=0)
- l.insert(0, svalue)
- l.grid(row=row, column=1, sticky="nw")
- row = row+1
- self.dict = dict
- # XXX Could we use a <Configure> callback for the following?
- subframe.update_idletasks() # Alas!
- width = subframe.winfo_reqwidth()
- height = subframe.winfo_reqheight()
- canvas = self.canvas
- self.canvas["scrollregion"] = (0, 0, width, height)
- if height > 300:
- canvas["height"] = 300
- frame.pack(expand=1)
- else:
- canvas["height"] = height
- frame.pack(expand=0)
-
- def close(self):
- self.frame.destroy()
diff --git a/sys/lib/python/idlelib/Delegator.py b/sys/lib/python/idlelib/Delegator.py
deleted file mode 100644
index 6125591fe..000000000
--- a/sys/lib/python/idlelib/Delegator.py
+++ /dev/null
@@ -1,33 +0,0 @@
-class Delegator:
-
- # The cache is only used to be able to change delegates!
-
- def __init__(self, delegate=None):
- self.delegate = delegate
- self.__cache = {}
-
- def __getattr__(self, name):
- attr = getattr(self.delegate, name) # May raise AttributeError
- setattr(self, name, attr)
- self.__cache[name] = attr
- return attr
-
- def resetcache(self):
- for key in self.__cache.keys():
- try:
- delattr(self, key)
- except AttributeError:
- pass
- self.__cache.clear()
-
- def cachereport(self):
- keys = self.__cache.keys()
- keys.sort()
- print keys
-
- def setdelegate(self, delegate):
- self.resetcache()
- self.delegate = delegate
-
- def getdelegate(self):
- return self.delegate
diff --git a/sys/lib/python/idlelib/EditorWindow.py b/sys/lib/python/idlelib/EditorWindow.py
deleted file mode 100644
index 259c7f340..000000000
--- a/sys/lib/python/idlelib/EditorWindow.py
+++ /dev/null
@@ -1,1511 +0,0 @@
-import sys
-import os
-import re
-import imp
-from itertools import count
-from Tkinter import *
-import tkSimpleDialog
-import tkMessageBox
-from MultiCall import MultiCallCreator
-
-import webbrowser
-import idlever
-import WindowList
-import SearchDialog
-import GrepDialog
-import ReplaceDialog
-import PyParse
-from configHandler import idleConf
-import aboutDialog, textView, configDialog
-import macosxSupport
-
-# The default tab setting for a Text widget, in average-width characters.
-TK_TABWIDTH_DEFAULT = 8
-
-def _find_module(fullname, path=None):
- """Version of imp.find_module() that handles hierarchical module names"""
-
- file = None
- for tgt in fullname.split('.'):
- if file is not None:
- file.close() # close intermediate files
- (file, filename, descr) = imp.find_module(tgt, path)
- if descr[2] == imp.PY_SOURCE:
- break # find but not load the source file
- module = imp.load_module(tgt, file, filename, descr)
- try:
- path = module.__path__
- except AttributeError:
- raise ImportError, 'No source for module ' + module.__name__
- return file, filename, descr
-
-class EditorWindow(object):
- from Percolator import Percolator
- from ColorDelegator import ColorDelegator
- from UndoDelegator import UndoDelegator
- from IOBinding import IOBinding, filesystemencoding, encoding
- import Bindings
- from Tkinter import Toplevel
- from MultiStatusBar import MultiStatusBar
-
- help_url = None
-
- def __init__(self, flist=None, filename=None, key=None, root=None):
- if EditorWindow.help_url is None:
- dochome = os.path.join(sys.prefix, 'Doc', 'index.html')
- if sys.platform.count('linux'):
- # look for html docs in a couple of standard places
- pyver = 'python-docs-' + '%s.%s.%s' % sys.version_info[:3]
- if os.path.isdir('/var/www/html/python/'): # "python2" rpm
- dochome = '/var/www/html/python/index.html'
- else:
- basepath = '/usr/share/doc/' # standard location
- dochome = os.path.join(basepath, pyver,
- 'Doc', 'index.html')
- elif sys.platform[:3] == 'win':
- chmfile = os.path.join(sys.prefix, 'Doc',
- 'Python%d%d.chm' % sys.version_info[:2])
- if os.path.isfile(chmfile):
- dochome = chmfile
-
- elif macosxSupport.runningAsOSXApp():
- # documentation is stored inside the python framework
- dochome = os.path.join(sys.prefix,
- 'Resources/English.lproj/Documentation/index.html')
-
- dochome = os.path.normpath(dochome)
- if os.path.isfile(dochome):
- EditorWindow.help_url = dochome
- if sys.platform == 'darwin':
- # Safari requires real file:-URLs
- EditorWindow.help_url = 'file://' + EditorWindow.help_url
- else:
- EditorWindow.help_url = "http://www.python.org/doc/current"
- currentTheme=idleConf.CurrentTheme()
- self.flist = flist
- root = root or flist.root
- self.root = root
- try:
- sys.ps1
- except AttributeError:
- sys.ps1 = '>>> '
- self.menubar = Menu(root)
- self.top = top = WindowList.ListedToplevel(root, menu=self.menubar)
- if flist:
- self.tkinter_vars = flist.vars
- #self.top.instance_dict makes flist.inversedict avalable to
- #configDialog.py so it can access all EditorWindow instaces
- self.top.instance_dict = flist.inversedict
- else:
- self.tkinter_vars = {} # keys: Tkinter event names
- # values: Tkinter variable instances
- self.top.instance_dict = {}
- self.recent_files_path = os.path.join(idleConf.GetUserCfgDir(),
- 'recent-files.lst')
- self.vbar = vbar = Scrollbar(top, name='vbar')
- self.text_frame = text_frame = Frame(top)
- self.width = idleConf.GetOption('main','EditorWindow','width')
- self.text = text = MultiCallCreator(Text)(
- text_frame, name='text', padx=5, wrap='none',
- foreground=idleConf.GetHighlight(currentTheme,
- 'normal',fgBg='fg'),
- background=idleConf.GetHighlight(currentTheme,
- 'normal',fgBg='bg'),
- highlightcolor=idleConf.GetHighlight(currentTheme,
- 'hilite',fgBg='fg'),
- highlightbackground=idleConf.GetHighlight(currentTheme,
- 'hilite',fgBg='bg'),
- insertbackground=idleConf.GetHighlight(currentTheme,
- 'cursor',fgBg='fg'),
- width=self.width,
- height=idleConf.GetOption('main','EditorWindow','height') )
- self.top.focused_widget = self.text
-
- self.createmenubar()
- self.apply_bindings()
-
- self.top.protocol("WM_DELETE_WINDOW", self.close)
- self.top.bind("<<close-window>>", self.close_event)
- if macosxSupport.runningAsOSXApp():
- # Command-W on editorwindows doesn't work without this.
- text.bind('<<close-window>>', self.close_event)
- text.bind("<<cut>>", self.cut)
- text.bind("<<copy>>", self.copy)
- text.bind("<<paste>>", self.paste)
- text.bind("<<center-insert>>", self.center_insert_event)
- text.bind("<<help>>", self.help_dialog)
- text.bind("<<python-docs>>", self.python_docs)
- text.bind("<<about-idle>>", self.about_dialog)
- text.bind("<<open-config-dialog>>", self.config_dialog)
- text.bind("<<open-module>>", self.open_module)
- text.bind("<<do-nothing>>", lambda event: "break")
- text.bind("<<select-all>>", self.select_all)
- text.bind("<<remove-selection>>", self.remove_selection)
- text.bind("<<find>>", self.find_event)
- text.bind("<<find-again>>", self.find_again_event)
- text.bind("<<find-in-files>>", self.find_in_files_event)
- text.bind("<<find-selection>>", self.find_selection_event)
- text.bind("<<replace>>", self.replace_event)
- text.bind("<<goto-line>>", self.goto_line_event)
- text.bind("<3>", self.right_menu_event)
- text.bind("<<smart-backspace>>",self.smart_backspace_event)
- text.bind("<<newline-and-indent>>",self.newline_and_indent_event)
- text.bind("<<smart-indent>>",self.smart_indent_event)
- text.bind("<<indent-region>>",self.indent_region_event)
- text.bind("<<dedent-region>>",self.dedent_region_event)
- text.bind("<<comment-region>>",self.comment_region_event)
- text.bind("<<uncomment-region>>",self.uncomment_region_event)
- text.bind("<<tabify-region>>",self.tabify_region_event)
- text.bind("<<untabify-region>>",self.untabify_region_event)
- text.bind("<<toggle-tabs>>",self.toggle_tabs_event)
- text.bind("<<change-indentwidth>>",self.change_indentwidth_event)
- text.bind("<Left>", self.move_at_edge_if_selection(0))
- text.bind("<Right>", self.move_at_edge_if_selection(1))
- text.bind("<<del-word-left>>", self.del_word_left)
- text.bind("<<del-word-right>>", self.del_word_right)
-
- if flist:
- flist.inversedict[self] = key
- if key:
- flist.dict[key] = self
- text.bind("<<open-new-window>>", self.new_callback)
- text.bind("<<close-all-windows>>", self.flist.close_all_callback)
- text.bind("<<open-class-browser>>", self.open_class_browser)
- text.bind("<<open-path-browser>>", self.open_path_browser)
-
- self.set_status_bar()
- vbar['command'] = text.yview
- vbar.pack(side=RIGHT, fill=Y)
- text['yscrollcommand'] = vbar.set
- fontWeight = 'normal'
- if idleConf.GetOption('main', 'EditorWindow', 'font-bold', type='bool'):
- fontWeight='bold'
- text.config(font=(idleConf.GetOption('main', 'EditorWindow', 'font'),
- idleConf.GetOption('main', 'EditorWindow', 'font-size'),
- fontWeight))
- text_frame.pack(side=LEFT, fill=BOTH, expand=1)
- text.pack(side=TOP, fill=BOTH, expand=1)
- text.focus_set()
-
- # usetabs true -> literal tab characters are used by indent and
- # dedent cmds, possibly mixed with spaces if
- # indentwidth is not a multiple of tabwidth,
- # which will cause Tabnanny to nag!
- # false -> tab characters are converted to spaces by indent
- # and dedent cmds, and ditto TAB keystrokes
- # Although use-spaces=0 can be configured manually in config-main.def,
- # configuration of tabs v. spaces is not supported in the configuration
- # dialog. IDLE promotes the preferred Python indentation: use spaces!
- usespaces = idleConf.GetOption('main', 'Indent', 'use-spaces', type='bool')
- self.usetabs = not usespaces
-
- # tabwidth is the display width of a literal tab character.
- # CAUTION: telling Tk to use anything other than its default
- # tab setting causes it to use an entirely different tabbing algorithm,
- # treating tab stops as fixed distances from the left margin.
- # Nobody expects this, so for now tabwidth should never be changed.
- self.tabwidth = 8 # must remain 8 until Tk is fixed.
-
- # indentwidth is the number of screen characters per indent level.
- # The recommended Python indentation is four spaces.
- self.indentwidth = self.tabwidth
- self.set_notabs_indentwidth()
-
- # If context_use_ps1 is true, parsing searches back for a ps1 line;
- # else searches for a popular (if, def, ...) Python stmt.
- self.context_use_ps1 = False
-
- # When searching backwards for a reliable place to begin parsing,
- # first start num_context_lines[0] lines back, then
- # num_context_lines[1] lines back if that didn't work, and so on.
- # The last value should be huge (larger than the # of lines in a
- # conceivable file).
- # Making the initial values larger slows things down more often.
- self.num_context_lines = 50, 500, 5000000
-
- self.per = per = self.Percolator(text)
- if self.ispythonsource(filename):
- self.color = color = self.ColorDelegator()
- per.insertfilter(color)
- else:
- self.color = None
-
- self.undo = undo = self.UndoDelegator()
- per.insertfilter(undo)
- text.undo_block_start = undo.undo_block_start
- text.undo_block_stop = undo.undo_block_stop
- undo.set_saved_change_hook(self.saved_change_hook)
-
- # IOBinding implements file I/O and printing functionality
- self.io = io = self.IOBinding(self)
- io.set_filename_change_hook(self.filename_change_hook)
-
- # Create the recent files submenu
- self.recent_files_menu = Menu(self.menubar)
- self.menudict['file'].insert_cascade(3, label='Recent Files',
- underline=0,
- menu=self.recent_files_menu)
- self.update_recent_files_list()
-
- if filename:
- if os.path.exists(filename) and not os.path.isdir(filename):
- io.loadfile(filename)
- else:
- io.set_filename(filename)
- self.saved_change_hook()
-
- self.set_indentation_params(self.ispythonsource(filename))
-
- self.load_extensions()
-
- menu = self.menudict.get('windows')
- if menu:
- end = menu.index("end")
- if end is None:
- end = -1
- if end >= 0:
- menu.add_separator()
- end = end + 1
- self.wmenu_end = end
- WindowList.register_callback(self.postwindowsmenu)
-
- # Some abstractions so IDLE extensions are cross-IDE
- self.askyesno = tkMessageBox.askyesno
- self.askinteger = tkSimpleDialog.askinteger
- self.showerror = tkMessageBox.showerror
-
- def _filename_to_unicode(self, filename):
- """convert filename to unicode in order to display it in Tk"""
- if isinstance(filename, unicode) or not filename:
- return filename
- else:
- try:
- return filename.decode(self.filesystemencoding)
- except UnicodeDecodeError:
- # XXX
- try:
- return filename.decode(self.encoding)
- except UnicodeDecodeError:
- # byte-to-byte conversion
- return filename.decode('iso8859-1')
-
- def new_callback(self, event):
- dirname, basename = self.io.defaultfilename()
- self.flist.new(dirname)
- return "break"
-
- def set_status_bar(self):
- self.status_bar = self.MultiStatusBar(self.top)
- if macosxSupport.runningAsOSXApp():
- # Insert some padding to avoid obscuring some of the statusbar
- # by the resize widget.
- self.status_bar.set_label('_padding1', ' ', side=RIGHT)
- self.status_bar.set_label('column', 'Col: ?', side=RIGHT)
- self.status_bar.set_label('line', 'Ln: ?', side=RIGHT)
- self.status_bar.pack(side=BOTTOM, fill=X)
- self.text.bind("<<set-line-and-column>>", self.set_line_and_column)
- self.text.event_add("<<set-line-and-column>>",
- "<KeyRelease>", "<ButtonRelease>")
- self.text.after_idle(self.set_line_and_column)
-
- def set_line_and_column(self, event=None):
- line, column = self.text.index(INSERT).split('.')
- self.status_bar.set_label('column', 'Col: %s' % column)
- self.status_bar.set_label('line', 'Ln: %s' % line)
-
- menu_specs = [
- ("file", "_File"),
- ("edit", "_Edit"),
- ("format", "F_ormat"),
- ("run", "_Run"),
- ("options", "_Options"),
- ("windows", "_Windows"),
- ("help", "_Help"),
- ]
-
- if macosxSupport.runningAsOSXApp():
- del menu_specs[-3]
- menu_specs[-2] = ("windows", "_Window")
-
-
- def createmenubar(self):
- mbar = self.menubar
- self.menudict = menudict = {}
- for name, label in self.menu_specs:
- underline, label = prepstr(label)
- menudict[name] = menu = Menu(mbar, name=name)
- mbar.add_cascade(label=label, menu=menu, underline=underline)
-
- if sys.platform == 'darwin' and '.framework' in sys.executable:
- # Insert the application menu
- menudict['application'] = menu = Menu(mbar, name='apple')
- mbar.add_cascade(label='IDLE', menu=menu)
-
- self.fill_menus()
- self.base_helpmenu_length = self.menudict['help'].index(END)
- self.reset_help_menu_entries()
-
- def postwindowsmenu(self):
- # Only called when Windows menu exists
- menu = self.menudict['windows']
- end = menu.index("end")
- if end is None:
- end = -1
- if end > self.wmenu_end:
- menu.delete(self.wmenu_end+1, end)
- WindowList.add_windows_to_menu(menu)
-
- rmenu = None
-
- def right_menu_event(self, event):
- self.text.tag_remove("sel", "1.0", "end")
- self.text.mark_set("insert", "@%d,%d" % (event.x, event.y))
- if not self.rmenu:
- self.make_rmenu()
- rmenu = self.rmenu
- self.event = event
- iswin = sys.platform[:3] == 'win'
- if iswin:
- self.text.config(cursor="arrow")
- rmenu.tk_popup(event.x_root, event.y_root)
- if iswin:
- self.text.config(cursor="ibeam")
-
- rmenu_specs = [
- # ("Label", "<<virtual-event>>"), ...
- ("Close", "<<close-window>>"), # Example
- ]
-
- def make_rmenu(self):
- rmenu = Menu(self.text, tearoff=0)
- for label, eventname in self.rmenu_specs:
- def command(text=self.text, eventname=eventname):
- text.event_generate(eventname)
- rmenu.add_command(label=label, command=command)
- self.rmenu = rmenu
-
- def about_dialog(self, event=None):
- aboutDialog.AboutDialog(self.top,'About IDLE')
-
- def config_dialog(self, event=None):
- configDialog.ConfigDialog(self.top,'Settings')
-
- def help_dialog(self, event=None):
- fn=os.path.join(os.path.abspath(os.path.dirname(__file__)),'help.txt')
- textView.TextViewer(self.top,'Help',fn)
-
- def python_docs(self, event=None):
- if sys.platform[:3] == 'win':
- os.startfile(self.help_url)
- else:
- webbrowser.open(self.help_url)
- return "break"
-
- def cut(self,event):
- self.text.event_generate("<<Cut>>")
- return "break"
-
- def copy(self,event):
- if not self.text.tag_ranges("sel"):
- # There is no selection, so do nothing and maybe interrupt.
- return
- self.text.event_generate("<<Copy>>")
- return "break"
-
- def paste(self,event):
- self.text.event_generate("<<Paste>>")
- return "break"
-
- def select_all(self, event=None):
- self.text.tag_add("sel", "1.0", "end-1c")
- self.text.mark_set("insert", "1.0")
- self.text.see("insert")
- return "break"
-
- def remove_selection(self, event=None):
- self.text.tag_remove("sel", "1.0", "end")
- self.text.see("insert")
-
- def move_at_edge_if_selection(self, edge_index):
- """Cursor move begins at start or end of selection
-
- When a left/right cursor key is pressed create and return to Tkinter a
- function which causes a cursor move from the associated edge of the
- selection.
-
- """
- self_text_index = self.text.index
- self_text_mark_set = self.text.mark_set
- edges_table = ("sel.first+1c", "sel.last-1c")
- def move_at_edge(event):
- if (event.state & 5) == 0: # no shift(==1) or control(==4) pressed
- try:
- self_text_index("sel.first")
- self_text_mark_set("insert", edges_table[edge_index])
- except TclError:
- pass
- return move_at_edge
-
- def del_word_left(self, event):
- self.text.event_generate('<Meta-Delete>')
- return "break"
-
- def del_word_right(self, event):
- self.text.event_generate('<Meta-d>')
- return "break"
-
- def find_event(self, event):
- SearchDialog.find(self.text)
- return "break"
-
- def find_again_event(self, event):
- SearchDialog.find_again(self.text)
- return "break"
-
- def find_selection_event(self, event):
- SearchDialog.find_selection(self.text)
- return "break"
-
- def find_in_files_event(self, event):
- GrepDialog.grep(self.text, self.io, self.flist)
- return "break"
-
- def replace_event(self, event):
- ReplaceDialog.replace(self.text)
- return "break"
-
- def goto_line_event(self, event):
- text = self.text
- lineno = tkSimpleDialog.askinteger("Goto",
- "Go to line number:",parent=text)
- if lineno is None:
- return "break"
- if lineno <= 0:
- text.bell()
- return "break"
- text.mark_set("insert", "%d.0" % lineno)
- text.see("insert")
-
- def open_module(self, event=None):
- # XXX Shouldn't this be in IOBinding or in FileList?
- try:
- name = self.text.get("sel.first", "sel.last")
- except TclError:
- name = ""
- else:
- name = name.strip()
- name = tkSimpleDialog.askstring("Module",
- "Enter the name of a Python module\n"
- "to search on sys.path and open:",
- parent=self.text, initialvalue=name)
- if name:
- name = name.strip()
- if not name:
- return
- # XXX Ought to insert current file's directory in front of path
- try:
- (f, file, (suffix, mode, type)) = _find_module(name)
- except (NameError, ImportError), msg:
- tkMessageBox.showerror("Import error", str(msg), parent=self.text)
- return
- if type != imp.PY_SOURCE:
- tkMessageBox.showerror("Unsupported type",
- "%s is not a source module" % name, parent=self.text)
- return
- if f:
- f.close()
- if self.flist:
- self.flist.open(file)
- else:
- self.io.loadfile(file)
-
- def open_class_browser(self, event=None):
- filename = self.io.filename
- if not filename:
- tkMessageBox.showerror(
- "No filename",
- "This buffer has no associated filename",
- master=self.text)
- self.text.focus_set()
- return None
- head, tail = os.path.split(filename)
- base, ext = os.path.splitext(tail)
- import ClassBrowser
- ClassBrowser.ClassBrowser(self.flist, base, [head])
-
- def open_path_browser(self, event=None):
- import PathBrowser
- PathBrowser.PathBrowser(self.flist)
-
- def gotoline(self, lineno):
- if lineno is not None and lineno > 0:
- self.text.mark_set("insert", "%d.0" % lineno)
- self.text.tag_remove("sel", "1.0", "end")
- self.text.tag_add("sel", "insert", "insert +1l")
- self.center()
-
- def ispythonsource(self, filename):
- if not filename or os.path.isdir(filename):
- return True
- base, ext = os.path.splitext(os.path.basename(filename))
- if os.path.normcase(ext) in (".py", ".pyw"):
- return True
- try:
- f = open(filename)
- line = f.readline()
- f.close()
- except IOError:
- return False
- return line.startswith('#!') and line.find('python') >= 0
-
- def close_hook(self):
- if self.flist:
- self.flist.close_edit(self)
-
- def set_close_hook(self, close_hook):
- self.close_hook = close_hook
-
- def filename_change_hook(self):
- if self.flist:
- self.flist.filename_changed_edit(self)
- self.saved_change_hook()
- self.top.update_windowlist_registry(self)
- if self.ispythonsource(self.io.filename):
- self.addcolorizer()
- else:
- self.rmcolorizer()
-
- def addcolorizer(self):
- if self.color:
- return
- self.per.removefilter(self.undo)
- self.color = self.ColorDelegator()
- self.per.insertfilter(self.color)
- self.per.insertfilter(self.undo)
-
- def rmcolorizer(self):
- if not self.color:
- return
- self.color.removecolors()
- self.per.removefilter(self.undo)
- self.per.removefilter(self.color)
- self.color = None
- self.per.insertfilter(self.undo)
-
- def ResetColorizer(self):
- "Update the colour theme if it is changed"
- # Called from configDialog.py
- if self.color:
- self.color = self.ColorDelegator()
- self.per.insertfilter(self.color)
- theme = idleConf.GetOption('main','Theme','name')
- self.text.config(idleConf.GetHighlight(theme, "normal"))
-
- def ResetFont(self):
- "Update the text widgets' font if it is changed"
- # Called from configDialog.py
- fontWeight='normal'
- if idleConf.GetOption('main','EditorWindow','font-bold',type='bool'):
- fontWeight='bold'
- self.text.config(font=(idleConf.GetOption('main','EditorWindow','font'),
- idleConf.GetOption('main','EditorWindow','font-size'),
- fontWeight))
-
- def RemoveKeybindings(self):
- "Remove the keybindings before they are changed."
- # Called from configDialog.py
- self.Bindings.default_keydefs = keydefs = idleConf.GetCurrentKeySet()
- for event, keylist in keydefs.items():
- self.text.event_delete(event, *keylist)
- for extensionName in self.get_standard_extension_names():
- xkeydefs = idleConf.GetExtensionBindings(extensionName)
- if xkeydefs:
- for event, keylist in xkeydefs.items():
- self.text.event_delete(event, *keylist)
-
- def ApplyKeybindings(self):
- "Update the keybindings after they are changed"
- # Called from configDialog.py
- self.Bindings.default_keydefs = keydefs = idleConf.GetCurrentKeySet()
- self.apply_bindings()
- for extensionName in self.get_standard_extension_names():
- xkeydefs = idleConf.GetExtensionBindings(extensionName)
- if xkeydefs:
- self.apply_bindings(xkeydefs)
- #update menu accelerators
- menuEventDict = {}
- for menu in self.Bindings.menudefs:
- menuEventDict[menu[0]] = {}
- for item in menu[1]:
- if item:
- menuEventDict[menu[0]][prepstr(item[0])[1]] = item[1]
- for menubarItem in self.menudict.keys():
- menu = self.menudict[menubarItem]
- end = menu.index(END) + 1
- for index in range(0, end):
- if menu.type(index) == 'command':
- accel = menu.entrycget(index, 'accelerator')
- if accel:
- itemName = menu.entrycget(index, 'label')
- event = ''
- if menuEventDict.has_key(menubarItem):
- if menuEventDict[menubarItem].has_key(itemName):
- event = menuEventDict[menubarItem][itemName]
- if event:
- accel = get_accelerator(keydefs, event)
- menu.entryconfig(index, accelerator=accel)
-
- def set_notabs_indentwidth(self):
- "Update the indentwidth if changed and not using tabs in this window"
- # Called from configDialog.py
- if not self.usetabs:
- self.indentwidth = idleConf.GetOption('main', 'Indent','num-spaces',
- type='int')
-
- def reset_help_menu_entries(self):
- "Update the additional help entries on the Help menu"
- help_list = idleConf.GetAllExtraHelpSourcesList()
- helpmenu = self.menudict['help']
- # first delete the extra help entries, if any
- helpmenu_length = helpmenu.index(END)
- if helpmenu_length > self.base_helpmenu_length:
- helpmenu.delete((self.base_helpmenu_length + 1), helpmenu_length)
- # then rebuild them
- if help_list:
- helpmenu.add_separator()
- for entry in help_list:
- cmd = self.__extra_help_callback(entry[1])
- helpmenu.add_command(label=entry[0], command=cmd)
- # and update the menu dictionary
- self.menudict['help'] = helpmenu
-
- def __extra_help_callback(self, helpfile):
- "Create a callback with the helpfile value frozen at definition time"
- def display_extra_help(helpfile=helpfile):
- if not helpfile.startswith(('www', 'http')):
- url = os.path.normpath(helpfile)
- if sys.platform[:3] == 'win':
- os.startfile(helpfile)
- else:
- webbrowser.open(helpfile)
- return display_extra_help
-
- def update_recent_files_list(self, new_file=None):
- "Load and update the recent files list and menus"
- rf_list = []
- if os.path.exists(self.recent_files_path):
- rf_list_file = open(self.recent_files_path,'r')
- try:
- rf_list = rf_list_file.readlines()
- finally:
- rf_list_file.close()
- if new_file:
- new_file = os.path.abspath(new_file) + '\n'
- if new_file in rf_list:
- rf_list.remove(new_file) # move to top
- rf_list.insert(0, new_file)
- # clean and save the recent files list
- bad_paths = []
- for path in rf_list:
- if '\0' in path or not os.path.exists(path[0:-1]):
- bad_paths.append(path)
- rf_list = [path for path in rf_list if path not in bad_paths]
- ulchars = "1234567890ABCDEFGHIJK"
- rf_list = rf_list[0:len(ulchars)]
- rf_file = open(self.recent_files_path, 'w')
- try:
- rf_file.writelines(rf_list)
- finally:
- rf_file.close()
- # for each edit window instance, construct the recent files menu
- for instance in self.top.instance_dict.keys():
- menu = instance.recent_files_menu
- menu.delete(1, END) # clear, and rebuild:
- for i, file in zip(count(), rf_list):
- file_name = file[0:-1] # zap \n
- # make unicode string to display non-ASCII chars correctly
- ufile_name = self._filename_to_unicode(file_name)
- callback = instance.__recent_file_callback(file_name)
- menu.add_command(label=ulchars[i] + " " + ufile_name,
- command=callback,
- underline=0)
-
- def __recent_file_callback(self, file_name):
- def open_recent_file(fn_closure=file_name):
- self.io.open(editFile=fn_closure)
- return open_recent_file
-
- def saved_change_hook(self):
- short = self.short_title()
- long = self.long_title()
- if short and long:
- title = short + " - " + long
- elif short:
- title = short
- elif long:
- title = long
- else:
- title = "Untitled"
- icon = short or long or title
- if not self.get_saved():
- title = "*%s*" % title
- icon = "*%s" % icon
- self.top.wm_title(title)
- self.top.wm_iconname(icon)
-
- def get_saved(self):
- return self.undo.get_saved()
-
- def set_saved(self, flag):
- self.undo.set_saved(flag)
-
- def reset_undo(self):
- self.undo.reset_undo()
-
- def short_title(self):
- filename = self.io.filename
- if filename:
- filename = os.path.basename(filename)
- # return unicode string to display non-ASCII chars correctly
- return self._filename_to_unicode(filename)
-
- def long_title(self):
- # return unicode string to display non-ASCII chars correctly
- return self._filename_to_unicode(self.io.filename or "")
-
- def center_insert_event(self, event):
- self.center()
-
- def center(self, mark="insert"):
- text = self.text
- top, bot = self.getwindowlines()
- lineno = self.getlineno(mark)
- height = bot - top
- newtop = max(1, lineno - height//2)
- text.yview(float(newtop))
-
- def getwindowlines(self):
- text = self.text
- top = self.getlineno("@0,0")
- bot = self.getlineno("@0,65535")
- if top == bot and text.winfo_height() == 1:
- # Geometry manager hasn't run yet
- height = int(text['height'])
- bot = top + height - 1
- return top, bot
-
- def getlineno(self, mark="insert"):
- text = self.text
- return int(float(text.index(mark)))
-
- def get_geometry(self):
- "Return (width, height, x, y)"
- geom = self.top.wm_geometry()
- m = re.match(r"(\d+)x(\d+)\+(-?\d+)\+(-?\d+)", geom)
- tuple = (map(int, m.groups()))
- return tuple
-
- def close_event(self, event):
- self.close()
-
- def maybesave(self):
- if self.io:
- if not self.get_saved():
- if self.top.state()!='normal':
- self.top.deiconify()
- self.top.lower()
- self.top.lift()
- return self.io.maybesave()
-
- def close(self):
- reply = self.maybesave()
- if str(reply) != "cancel":
- self._close()
- return reply
-
- def _close(self):
- if self.io.filename:
- self.update_recent_files_list(new_file=self.io.filename)
- WindowList.unregister_callback(self.postwindowsmenu)
- if self.close_hook:
- self.close_hook()
- self.flist = None
- colorizing = 0
- self.unload_extensions()
- self.io.close(); self.io = None
- self.undo = None # XXX
- if self.color:
- colorizing = self.color.colorizing
- doh = colorizing and self.top
- self.color.close(doh) # Cancel colorization
- self.text = None
- self.tkinter_vars = None
- self.per.close(); self.per = None
- if not colorizing:
- self.top.destroy()
-
- def load_extensions(self):
- self.extensions = {}
- self.load_standard_extensions()
-
- def unload_extensions(self):
- for ins in self.extensions.values():
- if hasattr(ins, "close"):
- ins.close()
- self.extensions = {}
-
- def load_standard_extensions(self):
- for name in self.get_standard_extension_names():
- try:
- self.load_extension(name)
- except:
- print "Failed to load extension", repr(name)
- import traceback
- traceback.print_exc()
-
- def get_standard_extension_names(self):
- return idleConf.GetExtensions(editor_only=True)
-
- def load_extension(self, name):
- try:
- mod = __import__(name, globals(), locals(), [])
- except ImportError:
- print "\nFailed to import extension: ", name
- return
- cls = getattr(mod, name)
- keydefs = idleConf.GetExtensionBindings(name)
- if hasattr(cls, "menudefs"):
- self.fill_menus(cls.menudefs, keydefs)
- ins = cls(self)
- self.extensions[name] = ins
- if keydefs:
- self.apply_bindings(keydefs)
- for vevent in keydefs.keys():
- methodname = vevent.replace("-", "_")
- while methodname[:1] == '<':
- methodname = methodname[1:]
- while methodname[-1:] == '>':
- methodname = methodname[:-1]
- methodname = methodname + "_event"
- if hasattr(ins, methodname):
- self.text.bind(vevent, getattr(ins, methodname))
-
- def apply_bindings(self, keydefs=None):
- if keydefs is None:
- keydefs = self.Bindings.default_keydefs
- text = self.text
- text.keydefs = keydefs
- for event, keylist in keydefs.items():
- if keylist:
- text.event_add(event, *keylist)
-
- def fill_menus(self, menudefs=None, keydefs=None):
- """Add appropriate entries to the menus and submenus
-
- Menus that are absent or None in self.menudict are ignored.
- """
- if menudefs is None:
- menudefs = self.Bindings.menudefs
- if keydefs is None:
- keydefs = self.Bindings.default_keydefs
- menudict = self.menudict
- text = self.text
- for mname, entrylist in menudefs:
- menu = menudict.get(mname)
- if not menu:
- continue
- for entry in entrylist:
- if not entry:
- menu.add_separator()
- else:
- label, eventname = entry
- checkbutton = (label[:1] == '!')
- if checkbutton:
- label = label[1:]
- underline, label = prepstr(label)
- accelerator = get_accelerator(keydefs, eventname)
- def command(text=text, eventname=eventname):
- text.event_generate(eventname)
- if checkbutton:
- var = self.get_var_obj(eventname, BooleanVar)
- menu.add_checkbutton(label=label, underline=underline,
- command=command, accelerator=accelerator,
- variable=var)
- else:
- menu.add_command(label=label, underline=underline,
- command=command,
- accelerator=accelerator)
-
- def getvar(self, name):
- var = self.get_var_obj(name)
- if var:
- value = var.get()
- return value
- else:
- raise NameError, name
-
- def setvar(self, name, value, vartype=None):
- var = self.get_var_obj(name, vartype)
- if var:
- var.set(value)
- else:
- raise NameError, name
-
- def get_var_obj(self, name, vartype=None):
- var = self.tkinter_vars.get(name)
- if not var and vartype:
- # create a Tkinter variable object with self.text as master:
- self.tkinter_vars[name] = var = vartype(self.text)
- return var
-
- # Tk implementations of "virtual text methods" -- each platform
- # reusing IDLE's support code needs to define these for its GUI's
- # flavor of widget.
-
- # Is character at text_index in a Python string? Return 0 for
- # "guaranteed no", true for anything else. This info is expensive
- # to compute ab initio, but is probably already known by the
- # platform's colorizer.
-
- def is_char_in_string(self, text_index):
- if self.color:
- # Return true iff colorizer hasn't (re)gotten this far
- # yet, or the character is tagged as being in a string
- return self.text.tag_prevrange("TODO", text_index) or \
- "STRING" in self.text.tag_names(text_index)
- else:
- # The colorizer is missing: assume the worst
- return 1
-
- # If a selection is defined in the text widget, return (start,
- # end) as Tkinter text indices, otherwise return (None, None)
- def get_selection_indices(self):
- try:
- first = self.text.index("sel.first")
- last = self.text.index("sel.last")
- return first, last
- except TclError:
- return None, None
-
- # Return the text widget's current view of what a tab stop means
- # (equivalent width in spaces).
-
- def get_tabwidth(self):
- current = self.text['tabs'] or TK_TABWIDTH_DEFAULT
- return int(current)
-
- # Set the text widget's current view of what a tab stop means.
-
- def set_tabwidth(self, newtabwidth):
- text = self.text
- if self.get_tabwidth() != newtabwidth:
- pixels = text.tk.call("font", "measure", text["font"],
- "-displayof", text.master,
- "n" * newtabwidth)
- text.configure(tabs=pixels)
-
- # If ispythonsource and guess are true, guess a good value for
- # indentwidth based on file content (if possible), and if
- # indentwidth != tabwidth set usetabs false.
- # In any case, adjust the Text widget's view of what a tab
- # character means.
-
- def set_indentation_params(self, ispythonsource, guess=True):
- if guess and ispythonsource:
- i = self.guess_indent()
- if 2 <= i <= 8:
- self.indentwidth = i
- if self.indentwidth != self.tabwidth:
- self.usetabs = False
- self.set_tabwidth(self.tabwidth)
-
- def smart_backspace_event(self, event):
- text = self.text
- first, last = self.get_selection_indices()
- if first and last:
- text.delete(first, last)
- text.mark_set("insert", first)
- return "break"
- # Delete whitespace left, until hitting a real char or closest
- # preceding virtual tab stop.
- chars = text.get("insert linestart", "insert")
- if chars == '':
- if text.compare("insert", ">", "1.0"):
- # easy: delete preceding newline
- text.delete("insert-1c")
- else:
- text.bell() # at start of buffer
- return "break"
- if chars[-1] not in " \t":
- # easy: delete preceding real char
- text.delete("insert-1c")
- return "break"
- # Ick. It may require *inserting* spaces if we back up over a
- # tab character! This is written to be clear, not fast.
- tabwidth = self.tabwidth
- have = len(chars.expandtabs(tabwidth))
- assert have > 0
- want = ((have - 1) // self.indentwidth) * self.indentwidth
- # Debug prompt is multilined....
- last_line_of_prompt = sys.ps1.split('\n')[-1]
- ncharsdeleted = 0
- while 1:
- if chars == last_line_of_prompt:
- break
- chars = chars[:-1]
- ncharsdeleted = ncharsdeleted + 1
- have = len(chars.expandtabs(tabwidth))
- if have <= want or chars[-1] not in " \t":
- break
- text.undo_block_start()
- text.delete("insert-%dc" % ncharsdeleted, "insert")
- if have < want:
- text.insert("insert", ' ' * (want - have))
- text.undo_block_stop()
- return "break"
-
- def smart_indent_event(self, event):
- # if intraline selection:
- # delete it
- # elif multiline selection:
- # do indent-region
- # else:
- # indent one level
- text = self.text
- first, last = self.get_selection_indices()
- text.undo_block_start()
- try:
- if first and last:
- if index2line(first) != index2line(last):
- return self.indent_region_event(event)
- text.delete(first, last)
- text.mark_set("insert", first)
- prefix = text.get("insert linestart", "insert")
- raw, effective = classifyws(prefix, self.tabwidth)
- if raw == len(prefix):
- # only whitespace to the left
- self.reindent_to(effective + self.indentwidth)
- else:
- # tab to the next 'stop' within or to right of line's text:
- if self.usetabs:
- pad = '\t'
- else:
- effective = len(prefix.expandtabs(self.tabwidth))
- n = self.indentwidth
- pad = ' ' * (n - effective % n)
- text.insert("insert", pad)
- text.see("insert")
- return "break"
- finally:
- text.undo_block_stop()
-
- def newline_and_indent_event(self, event):
- text = self.text
- first, last = self.get_selection_indices()
- text.undo_block_start()
- try:
- if first and last:
- text.delete(first, last)
- text.mark_set("insert", first)
- line = text.get("insert linestart", "insert")
- i, n = 0, len(line)
- while i < n and line[i] in " \t":
- i = i+1
- if i == n:
- # the cursor is in or at leading indentation in a continuation
- # line; just inject an empty line at the start
- text.insert("insert linestart", '\n')
- return "break"
- indent = line[:i]
- # strip whitespace before insert point unless it's in the prompt
- i = 0
- last_line_of_prompt = sys.ps1.split('\n')[-1]
- while line and line[-1] in " \t" and line != last_line_of_prompt:
- line = line[:-1]
- i = i+1
- if i:
- text.delete("insert - %d chars" % i, "insert")
- # strip whitespace after insert point
- while text.get("insert") in " \t":
- text.delete("insert")
- # start new line
- text.insert("insert", '\n')
-
- # adjust indentation for continuations and block
- # open/close first need to find the last stmt
- lno = index2line(text.index('insert'))
- y = PyParse.Parser(self.indentwidth, self.tabwidth)
- if not self.context_use_ps1:
- for context in self.num_context_lines:
- startat = max(lno - context, 1)
- startatindex = `startat` + ".0"
- rawtext = text.get(startatindex, "insert")
- y.set_str(rawtext)
- bod = y.find_good_parse_start(
- self.context_use_ps1,
- self._build_char_in_string_func(startatindex))
- if bod is not None or startat == 1:
- break
- y.set_lo(bod or 0)
- else:
- r = text.tag_prevrange("console", "insert")
- if r:
- startatindex = r[1]
- else:
- startatindex = "1.0"
- rawtext = text.get(startatindex, "insert")
- y.set_str(rawtext)
- y.set_lo(0)
-
- c = y.get_continuation_type()
- if c != PyParse.C_NONE:
- # The current stmt hasn't ended yet.
- if c == PyParse.C_STRING_FIRST_LINE:
- # after the first line of a string; do not indent at all
- pass
- elif c == PyParse.C_STRING_NEXT_LINES:
- # inside a string which started before this line;
- # just mimic the current indent
- text.insert("insert", indent)
- elif c == PyParse.C_BRACKET:
- # line up with the first (if any) element of the
- # last open bracket structure; else indent one
- # level beyond the indent of the line with the
- # last open bracket
- self.reindent_to(y.compute_bracket_indent())
- elif c == PyParse.C_BACKSLASH:
- # if more than one line in this stmt already, just
- # mimic the current indent; else if initial line
- # has a start on an assignment stmt, indent to
- # beyond leftmost =; else to beyond first chunk of
- # non-whitespace on initial line
- if y.get_num_lines_in_stmt() > 1:
- text.insert("insert", indent)
- else:
- self.reindent_to(y.compute_backslash_indent())
- else:
- assert 0, "bogus continuation type %r" % (c,)
- return "break"
-
- # This line starts a brand new stmt; indent relative to
- # indentation of initial line of closest preceding
- # interesting stmt.
- indent = y.get_base_indent_string()
- text.insert("insert", indent)
- if y.is_block_opener():
- self.smart_indent_event(event)
- elif indent and y.is_block_closer():
- self.smart_backspace_event(event)
- return "break"
- finally:
- text.see("insert")
- text.undo_block_stop()
-
- # Our editwin provides a is_char_in_string function that works
- # with a Tk text index, but PyParse only knows about offsets into
- # a string. This builds a function for PyParse that accepts an
- # offset.
-
- def _build_char_in_string_func(self, startindex):
- def inner(offset, _startindex=startindex,
- _icis=self.is_char_in_string):
- return _icis(_startindex + "+%dc" % offset)
- return inner
-
- def indent_region_event(self, event):
- head, tail, chars, lines = self.get_region()
- for pos in range(len(lines)):
- line = lines[pos]
- if line:
- raw, effective = classifyws(line, self.tabwidth)
- effective = effective + self.indentwidth
- lines[pos] = self._make_blanks(effective) + line[raw:]
- self.set_region(head, tail, chars, lines)
- return "break"
-
- def dedent_region_event(self, event):
- head, tail, chars, lines = self.get_region()
- for pos in range(len(lines)):
- line = lines[pos]
- if line:
- raw, effective = classifyws(line, self.tabwidth)
- effective = max(effective - self.indentwidth, 0)
- lines[pos] = self._make_blanks(effective) + line[raw:]
- self.set_region(head, tail, chars, lines)
- return "break"
-
- def comment_region_event(self, event):
- head, tail, chars, lines = self.get_region()
- for pos in range(len(lines) - 1):
- line = lines[pos]
- lines[pos] = '##' + line
- self.set_region(head, tail, chars, lines)
-
- def uncomment_region_event(self, event):
- head, tail, chars, lines = self.get_region()
- for pos in range(len(lines)):
- line = lines[pos]
- if not line:
- continue
- if line[:2] == '##':
- line = line[2:]
- elif line[:1] == '#':
- line = line[1:]
- lines[pos] = line
- self.set_region(head, tail, chars, lines)
-
- def tabify_region_event(self, event):
- head, tail, chars, lines = self.get_region()
- tabwidth = self._asktabwidth()
- for pos in range(len(lines)):
- line = lines[pos]
- if line:
- raw, effective = classifyws(line, tabwidth)
- ntabs, nspaces = divmod(effective, tabwidth)
- lines[pos] = '\t' * ntabs + ' ' * nspaces + line[raw:]
- self.set_region(head, tail, chars, lines)
-
- def untabify_region_event(self, event):
- head, tail, chars, lines = self.get_region()
- tabwidth = self._asktabwidth()
- for pos in range(len(lines)):
- lines[pos] = lines[pos].expandtabs(tabwidth)
- self.set_region(head, tail, chars, lines)
-
- def toggle_tabs_event(self, event):
- if self.askyesno(
- "Toggle tabs",
- "Turn tabs " + ("on", "off")[self.usetabs] +
- "?\nIndent width " +
- ("will be", "remains at")[self.usetabs] + " 8." +
- "\n Note: a tab is always 8 columns",
- parent=self.text):
- self.usetabs = not self.usetabs
- # Try to prevent inconsistent indentation.
- # User must change indent width manually after using tabs.
- self.indentwidth = 8
- return "break"
-
- # XXX this isn't bound to anything -- see tabwidth comments
-## def change_tabwidth_event(self, event):
-## new = self._asktabwidth()
-## if new != self.tabwidth:
-## self.tabwidth = new
-## self.set_indentation_params(0, guess=0)
-## return "break"
-
- def change_indentwidth_event(self, event):
- new = self.askinteger(
- "Indent width",
- "New indent width (2-16)\n(Always use 8 when using tabs)",
- parent=self.text,
- initialvalue=self.indentwidth,
- minvalue=2,
- maxvalue=16)
- if new and new != self.indentwidth and not self.usetabs:
- self.indentwidth = new
- return "break"
-
- def get_region(self):
- text = self.text
- first, last = self.get_selection_indices()
- if first and last:
- head = text.index(first + " linestart")
- tail = text.index(last + "-1c lineend +1c")
- else:
- head = text.index("insert linestart")
- tail = text.index("insert lineend +1c")
- chars = text.get(head, tail)
- lines = chars.split("\n")
- return head, tail, chars, lines
-
- def set_region(self, head, tail, chars, lines):
- text = self.text
- newchars = "\n".join(lines)
- if newchars == chars:
- text.bell()
- return
- text.tag_remove("sel", "1.0", "end")
- text.mark_set("insert", head)
- text.undo_block_start()
- text.delete(head, tail)
- text.insert(head, newchars)
- text.undo_block_stop()
- text.tag_add("sel", head, "insert")
-
- # Make string that displays as n leading blanks.
-
- def _make_blanks(self, n):
- if self.usetabs:
- ntabs, nspaces = divmod(n, self.tabwidth)
- return '\t' * ntabs + ' ' * nspaces
- else:
- return ' ' * n
-
- # Delete from beginning of line to insert point, then reinsert
- # column logical (meaning use tabs if appropriate) spaces.
-
- def reindent_to(self, column):
- text = self.text
- text.undo_block_start()
- if text.compare("insert linestart", "!=", "insert"):
- text.delete("insert linestart", "insert")
- if column:
- text.insert("insert", self._make_blanks(column))
- text.undo_block_stop()
-
- def _asktabwidth(self):
- return self.askinteger(
- "Tab width",
- "Columns per tab? (2-16)",
- parent=self.text,
- initialvalue=self.indentwidth,
- minvalue=2,
- maxvalue=16) or self.tabwidth
-
- # Guess indentwidth from text content.
- # Return guessed indentwidth. This should not be believed unless
- # it's in a reasonable range (e.g., it will be 0 if no indented
- # blocks are found).
-
- def guess_indent(self):
- opener, indented = IndentSearcher(self.text, self.tabwidth).run()
- if opener and indented:
- raw, indentsmall = classifyws(opener, self.tabwidth)
- raw, indentlarge = classifyws(indented, self.tabwidth)
- else:
- indentsmall = indentlarge = 0
- return indentlarge - indentsmall
-
-# "line.col" -> line, as an int
-def index2line(index):
- return int(float(index))
-
-# Look at the leading whitespace in s.
-# Return pair (# of leading ws characters,
-# effective # of leading blanks after expanding
-# tabs to width tabwidth)
-
-def classifyws(s, tabwidth):
- raw = effective = 0
- for ch in s:
- if ch == ' ':
- raw = raw + 1
- effective = effective + 1
- elif ch == '\t':
- raw = raw + 1
- effective = (effective // tabwidth + 1) * tabwidth
- else:
- break
- return raw, effective
-
-import tokenize
-_tokenize = tokenize
-del tokenize
-
-class IndentSearcher(object):
-
- # .run() chews over the Text widget, looking for a block opener
- # and the stmt following it. Returns a pair,
- # (line containing block opener, line containing stmt)
- # Either or both may be None.
-
- def __init__(self, text, tabwidth):
- self.text = text
- self.tabwidth = tabwidth
- self.i = self.finished = 0
- self.blkopenline = self.indentedline = None
-
- def readline(self):
- if self.finished:
- return ""
- i = self.i = self.i + 1
- mark = repr(i) + ".0"
- if self.text.compare(mark, ">=", "end"):
- return ""
- return self.text.get(mark, mark + " lineend+1c")
-
- def tokeneater(self, type, token, start, end, line,
- INDENT=_tokenize.INDENT,
- NAME=_tokenize.NAME,
- OPENERS=('class', 'def', 'for', 'if', 'try', 'while')):
- if self.finished:
- pass
- elif type == NAME and token in OPENERS:
- self.blkopenline = line
- elif type == INDENT and self.blkopenline:
- self.indentedline = line
- self.finished = 1
-
- def run(self):
- save_tabsize = _tokenize.tabsize
- _tokenize.tabsize = self.tabwidth
- try:
- try:
- _tokenize.tokenize(self.readline, self.tokeneater)
- except _tokenize.TokenError:
- # since we cut off the tokenizer early, we can trigger
- # spurious errors
- pass
- finally:
- _tokenize.tabsize = save_tabsize
- return self.blkopenline, self.indentedline
-
-### end autoindent code ###
-
-def prepstr(s):
- # Helper to extract the underscore from a string, e.g.
- # prepstr("Co_py") returns (2, "Copy").
- i = s.find('_')
- if i >= 0:
- s = s[:i] + s[i+1:]
- return i, s
-
-
-keynames = {
- 'bracketleft': '[',
- 'bracketright': ']',
- 'slash': '/',
-}
-
-def get_accelerator(keydefs, eventname):
- keylist = keydefs.get(eventname)
- if not keylist:
- return ""
- s = keylist[0]
- s = re.sub(r"-[a-z]\b", lambda m: m.group().upper(), s)
- s = re.sub(r"\b\w+\b", lambda m: keynames.get(m.group(), m.group()), s)
- s = re.sub("Key-", "", s)
- s = re.sub("Cancel","Ctrl-Break",s) # dscherer@cmu.edu
- s = re.sub("Control-", "Ctrl-", s)
- s = re.sub("-", "+", s)
- s = re.sub("><", " ", s)
- s = re.sub("<", "", s)
- s = re.sub(">", "", s)
- return s
-
-
-def fixwordbreaks(root):
- # Make sure that Tk's double-click and next/previous word
- # operations use our definition of a word (i.e. an identifier)
- tk = root.tk
- tk.call('tcl_wordBreakAfter', 'a b', 0) # make sure word.tcl is loaded
- tk.call('set', 'tcl_wordchars', '[a-zA-Z0-9_]')
- tk.call('set', 'tcl_nonwordchars', '[^a-zA-Z0-9_]')
-
-
-def test():
- root = Tk()
- fixwordbreaks(root)
- root.withdraw()
- if sys.argv[1:]:
- filename = sys.argv[1]
- else:
- filename = None
- edit = EditorWindow(root=root, filename=filename)
- edit.set_close_hook(root.quit)
- root.mainloop()
- root.destroy()
-
-if __name__ == '__main__':
- test()
diff --git a/sys/lib/python/idlelib/FileList.py b/sys/lib/python/idlelib/FileList.py
deleted file mode 100644
index 4b5790102..000000000
--- a/sys/lib/python/idlelib/FileList.py
+++ /dev/null
@@ -1,124 +0,0 @@
-import os
-from Tkinter import *
-import tkMessageBox
-
-
-class FileList:
-
- from EditorWindow import EditorWindow # class variable, may be overridden
- # e.g. by PyShellFileList
-
- def __init__(self, root):
- self.root = root
- self.dict = {}
- self.inversedict = {}
- self.vars = {} # For EditorWindow.getrawvar (shared Tcl variables)
-
- def open(self, filename, action=None):
- assert filename
- filename = self.canonize(filename)
- if os.path.isdir(filename):
- # This can happen when bad filename is passed on command line:
- tkMessageBox.showerror(
- "File Error",
- "%r is a directory." % (filename,),
- master=self.root)
- return None
- key = os.path.normcase(filename)
- if self.dict.has_key(key):
- edit = self.dict[key]
- edit.top.wakeup()
- return edit
- if action:
- # Don't create window, perform 'action', e.g. open in same window
- return action(filename)
- else:
- return self.EditorWindow(self, filename, key)
-
- def gotofileline(self, filename, lineno=None):
- edit = self.open(filename)
- if edit is not None and lineno is not None:
- edit.gotoline(lineno)
-
- def new(self, filename=None):
- return self.EditorWindow(self, filename)
-
- def close_all_callback(self, event):
- for edit in self.inversedict.keys():
- reply = edit.close()
- if reply == "cancel":
- break
- return "break"
-
- def close_edit(self, edit):
- try:
- key = self.inversedict[edit]
- except KeyError:
- print "Don't know this EditorWindow object. (close)"
- return
- if key:
- del self.dict[key]
- del self.inversedict[edit]
- if not self.inversedict:
- self.root.quit()
-
- def filename_changed_edit(self, edit):
- edit.saved_change_hook()
- try:
- key = self.inversedict[edit]
- except KeyError:
- print "Don't know this EditorWindow object. (rename)"
- return
- filename = edit.io.filename
- if not filename:
- if key:
- del self.dict[key]
- self.inversedict[edit] = None
- return
- filename = self.canonize(filename)
- newkey = os.path.normcase(filename)
- if newkey == key:
- return
- if self.dict.has_key(newkey):
- conflict = self.dict[newkey]
- self.inversedict[conflict] = None
- tkMessageBox.showerror(
- "Name Conflict",
- "You now have multiple edit windows open for %r" % (filename,),
- master=self.root)
- self.dict[newkey] = edit
- self.inversedict[edit] = newkey
- if key:
- try:
- del self.dict[key]
- except KeyError:
- pass
-
- def canonize(self, filename):
- if not os.path.isabs(filename):
- try:
- pwd = os.getcwd()
- except os.error:
- pass
- else:
- filename = os.path.join(pwd, filename)
- return os.path.normpath(filename)
-
-
-def _test():
- from EditorWindow import fixwordbreaks
- import sys
- root = Tk()
- fixwordbreaks(root)
- root.withdraw()
- flist = FileList(root)
- if sys.argv[1:]:
- for filename in sys.argv[1:]:
- flist.open(filename)
- else:
- flist.new()
- if flist.inversedict:
- root.mainloop()
-
-if __name__ == '__main__':
- _test()
diff --git a/sys/lib/python/idlelib/FormatParagraph.py b/sys/lib/python/idlelib/FormatParagraph.py
deleted file mode 100644
index ed9f28d12..000000000
--- a/sys/lib/python/idlelib/FormatParagraph.py
+++ /dev/null
@@ -1,148 +0,0 @@
-# Extension to format a paragraph
-
-# Does basic, standard text formatting, and also understands Python
-# comment blocks. Thus, for editing Python source code, this
-# extension is really only suitable for reformatting these comment
-# blocks or triple-quoted strings.
-
-# Known problems with comment reformatting:
-# * If there is a selection marked, and the first line of the
-# selection is not complete, the block will probably not be detected
-# as comments, and will have the normal "text formatting" rules
-# applied.
-# * If a comment block has leading whitespace that mixes tabs and
-# spaces, they will not be considered part of the same block.
-# * Fancy comments, like this bulleted list, arent handled :-)
-
-import re
-from configHandler import idleConf
-
-class FormatParagraph:
-
- menudefs = [
- ('format', [ # /s/edit/format dscherer@cmu.edu
- ('Format Paragraph', '<<format-paragraph>>'),
- ])
- ]
-
- def __init__(self, editwin):
- self.editwin = editwin
-
- def close(self):
- self.editwin = None
-
- def format_paragraph_event(self, event):
- maxformatwidth = int(idleConf.GetOption('main','FormatParagraph','paragraph'))
- text = self.editwin.text
- first, last = self.editwin.get_selection_indices()
- if first and last:
- data = text.get(first, last)
- comment_header = ''
- else:
- first, last, comment_header, data = \
- find_paragraph(text, text.index("insert"))
- if comment_header:
- # Reformat the comment lines - convert to text sans header.
- lines = data.split("\n")
- lines = map(lambda st, l=len(comment_header): st[l:], lines)
- data = "\n".join(lines)
- # Reformat to maxformatwidth chars or a 20 char width, whichever is greater.
- format_width = max(maxformatwidth - len(comment_header), 20)
- newdata = reformat_paragraph(data, format_width)
- # re-split and re-insert the comment header.
- newdata = newdata.split("\n")
- # If the block ends in a \n, we dont want the comment
- # prefix inserted after it. (Im not sure it makes sense to
- # reformat a comment block that isnt made of complete
- # lines, but whatever!) Can't think of a clean soltution,
- # so we hack away
- block_suffix = ""
- if not newdata[-1]:
- block_suffix = "\n"
- newdata = newdata[:-1]
- builder = lambda item, prefix=comment_header: prefix+item
- newdata = '\n'.join(map(builder, newdata)) + block_suffix
- else:
- # Just a normal text format
- newdata = reformat_paragraph(data, maxformatwidth)
- text.tag_remove("sel", "1.0", "end")
- if newdata != data:
- text.mark_set("insert", first)
- text.undo_block_start()
- text.delete(first, last)
- text.insert(first, newdata)
- text.undo_block_stop()
- else:
- text.mark_set("insert", last)
- text.see("insert")
-
-def find_paragraph(text, mark):
- lineno, col = map(int, mark.split("."))
- line = text.get("%d.0" % lineno, "%d.0 lineend" % lineno)
- while text.compare("%d.0" % lineno, "<", "end") and is_all_white(line):
- lineno = lineno + 1
- line = text.get("%d.0" % lineno, "%d.0 lineend" % lineno)
- first_lineno = lineno
- comment_header = get_comment_header(line)
- comment_header_len = len(comment_header)
- while get_comment_header(line)==comment_header and \
- not is_all_white(line[comment_header_len:]):
- lineno = lineno + 1
- line = text.get("%d.0" % lineno, "%d.0 lineend" % lineno)
- last = "%d.0" % lineno
- # Search back to beginning of paragraph
- lineno = first_lineno - 1
- line = text.get("%d.0" % lineno, "%d.0 lineend" % lineno)
- while lineno > 0 and \
- get_comment_header(line)==comment_header and \
- not is_all_white(line[comment_header_len:]):
- lineno = lineno - 1
- line = text.get("%d.0" % lineno, "%d.0 lineend" % lineno)
- first = "%d.0" % (lineno+1)
- return first, last, comment_header, text.get(first, last)
-
-def reformat_paragraph(data, limit):
- lines = data.split("\n")
- i = 0
- n = len(lines)
- while i < n and is_all_white(lines[i]):
- i = i+1
- if i >= n:
- return data
- indent1 = get_indent(lines[i])
- if i+1 < n and not is_all_white(lines[i+1]):
- indent2 = get_indent(lines[i+1])
- else:
- indent2 = indent1
- new = lines[:i]
- partial = indent1
- while i < n and not is_all_white(lines[i]):
- # XXX Should take double space after period (etc.) into account
- words = re.split("(\s+)", lines[i])
- for j in range(0, len(words), 2):
- word = words[j]
- if not word:
- continue # Can happen when line ends in whitespace
- if len((partial + word).expandtabs()) > limit and \
- partial != indent1:
- new.append(partial.rstrip())
- partial = indent2
- partial = partial + word + " "
- if j+1 < len(words) and words[j+1] != " ":
- partial = partial + " "
- i = i+1
- new.append(partial.rstrip())
- # XXX Should reformat remaining paragraphs as well
- new.extend(lines[i:])
- return "\n".join(new)
-
-def is_all_white(line):
- return re.match(r"^\s*$", line) is not None
-
-def get_indent(line):
- return re.match(r"^(\s*)", line).group()
-
-def get_comment_header(line):
- m = re.match(r"^(\s*#*)", line)
- if m is None: return ""
- return m.group(1)
diff --git a/sys/lib/python/idlelib/GrepDialog.py b/sys/lib/python/idlelib/GrepDialog.py
deleted file mode 100644
index ab136bc11..000000000
--- a/sys/lib/python/idlelib/GrepDialog.py
+++ /dev/null
@@ -1,133 +0,0 @@
-import os
-import fnmatch
-import sys
-from Tkinter import *
-import SearchEngine
-from SearchDialogBase import SearchDialogBase
-
-def grep(text, io=None, flist=None):
- root = text._root()
- engine = SearchEngine.get(root)
- if not hasattr(engine, "_grepdialog"):
- engine._grepdialog = GrepDialog(root, engine, flist)
- dialog = engine._grepdialog
- searchphrase = text.get("sel.first", "sel.last")
- dialog.open(text, searchphrase, io)
-
-class GrepDialog(SearchDialogBase):
-
- title = "Find in Files Dialog"
- icon = "Grep"
- needwrapbutton = 0
-
- def __init__(self, root, engine, flist):
- SearchDialogBase.__init__(self, root, engine)
- self.flist = flist
- self.globvar = StringVar(root)
- self.recvar = BooleanVar(root)
-
- def open(self, text, searchphrase, io=None):
- SearchDialogBase.open(self, text, searchphrase)
- if io:
- path = io.filename or ""
- else:
- path = ""
- dir, base = os.path.split(path)
- head, tail = os.path.splitext(base)
- if not tail:
- tail = ".py"
- self.globvar.set(os.path.join(dir, "*" + tail))
-
- def create_entries(self):
- SearchDialogBase.create_entries(self)
- self.globent = self.make_entry("In files:", self.globvar)
-
- def create_other_buttons(self):
- f = self.make_frame()
-
- btn = Checkbutton(f, anchor="w",
- variable=self.recvar,
- text="Recurse down subdirectories")
- btn.pack(side="top", fill="both")
- btn.select()
-
- def create_command_buttons(self):
- SearchDialogBase.create_command_buttons(self)
- self.make_button("Search Files", self.default_command, 1)
-
- def default_command(self, event=None):
- prog = self.engine.getprog()
- if not prog:
- return
- path = self.globvar.get()
- if not path:
- self.top.bell()
- return
- from OutputWindow import OutputWindow
- save = sys.stdout
- try:
- sys.stdout = OutputWindow(self.flist)
- self.grep_it(prog, path)
- finally:
- sys.stdout = save
-
- def grep_it(self, prog, path):
- dir, base = os.path.split(path)
- list = self.findfiles(dir, base, self.recvar.get())
- list.sort()
- self.close()
- pat = self.engine.getpat()
- print "Searching %r in %s ..." % (pat, path)
- hits = 0
- for fn in list:
- try:
- f = open(fn)
- except IOError, msg:
- print msg
- continue
- lineno = 0
- while 1:
- block = f.readlines(100000)
- if not block:
- break
- for line in block:
- lineno = lineno + 1
- if line[-1:] == '\n':
- line = line[:-1]
- if prog.search(line):
- sys.stdout.write("%s: %s: %s\n" % (fn, lineno, line))
- hits = hits + 1
- if hits:
- if hits == 1:
- s = ""
- else:
- s = "s"
- print "Found", hits, "hit%s." % s
- print "(Hint: right-click to open locations.)"
- else:
- print "No hits."
-
- def findfiles(self, dir, base, rec):
- try:
- names = os.listdir(dir or os.curdir)
- except os.error, msg:
- print msg
- return []
- list = []
- subdirs = []
- for name in names:
- fn = os.path.join(dir, name)
- if os.path.isdir(fn):
- subdirs.append(fn)
- else:
- if fnmatch.fnmatch(name, base):
- list.append(fn)
- if rec:
- for subdir in subdirs:
- list.extend(self.findfiles(subdir, base, rec))
- return list
-
- def close(self, event=None):
- if self.top:
- self.top.grab_release()
- self.top.withdraw()
diff --git a/sys/lib/python/idlelib/HISTORY.txt b/sys/lib/python/idlelib/HISTORY.txt
deleted file mode 100644
index c0faaad87..000000000
--- a/sys/lib/python/idlelib/HISTORY.txt
+++ /dev/null
@@ -1,296 +0,0 @@
-IDLE History
-============
-
-This file contains the release messages for previous IDLE releases.
-As you read on you go back to the dark ages of IDLE's history.
-
-
-What's New in IDLEfork 0.8.1?
-=============================
-
-*Release date: 22-Jul-2001*
-
-- New tarball released as a result of the 'revitalisation' of the IDLEfork
- project.
-
-- This release requires python 2.1 or better. Compatability with earlier
- versions of python (especially ancient ones like 1.5x) is no longer a
- priority in IDLEfork development.
-
-- This release is based on a merging of the earlier IDLE fork work with current
- cvs IDLE (post IDLE version 0.8), with some minor additional coding by Kurt
- B. Kaiser and Stephen M. Gava.
-
-- This release is basically functional but also contains some known breakages,
- particularly with running things from the shell window. Also the debugger is
- not working, but I believe this was the case with the previous IDLE fork
- release (0.7.1) as well.
-
-- This release is being made now to mark the point at which IDLEfork is
- launching into a new stage of development.
-
-- IDLEfork CVS will now be branched to enable further development and
- exploration of the two "execution in a remote process" patches submitted by
- David Scherer (David's is currently in IDLEfork) and GvR, while stabilisation
- and development of less heavyweight improvements (like user customisation)
- can continue on the trunk.
-
-
-What's New in IDLEfork 0.7.1?
-==============================
-
-*Release date: 15-Aug-2000*
-
-- First project tarball released.
-
-- This was the first release of IDLE fork, which at this stage was a
- combination of IDLE 0.5 and the VPython idle fork, with additional changes
- coded by David Scherer, Peter Schneider-Kamp and Nicholas Riley.
-
-
-
-IDLEfork 0.7.1 - 29 May 2000
------------------------------
-
- David Scherer <dscherer@cmu.edu>
-
-- This is a modification of the CVS version of IDLE 0.5, updated as of
- 2000-03-09. It is alpha software and might be unstable. If it breaks, you
- get to keep both pieces.
-
-- If you have problems or suggestions, you should either contact me or post to
- the list at http://www.python.org/mailman/listinfo/idle-dev (making it clear
- that you are using this modified version of IDLE).
-
-- Changes:
-
- - The ExecBinding module, a replacement for ScriptBinding, executes programs
- in a separate process, piping standard I/O through an RPC mechanism to an
- OnDemandOutputWindow in IDLE. It supports executing unnamed programs
- (through a temporary file). It does not yet support debugging.
-
- - When running programs with ExecBinding, tracebacks will be clipped to
- exclude system modules. If, however, a system module calls back into the
- user program, that part of the traceback will be shown.
-
- - The OnDemandOutputWindow class has been improved. In particular, it now
- supports a readline() function used to implement user input, and a
- scroll_clear() operation which is used to hide the output of a previous run
- by scrolling it out of the window.
-
- - Startup behavior has been changed. By default IDLE starts up with just a
- blank editor window, rather than an interactive window. Opening a file in
- such a blank window replaces the (nonexistent) contents of that window
- instead of creating another window. Because of the need to have a
- well-known port for the ExecBinding protocol, only one copy of IDLE can be
- running. Additional invocations use the RPC mechanism to report their
- command line arguments to the copy already running.
-
- - The menus have been reorganized. In particular, the excessively large
- 'edit' menu has been split up into 'edit', 'format', and 'run'.
-
- - 'Python Documentation' now works on Windows, if the win32api module is
- present.
-
- - A few key bindings have been changed: F1 now loads Python Documentation
- instead of the IDLE help; shift-TAB is now a synonym for unindent.
-
-- New modules:
-
- ExecBinding.py Executes program through loader
- loader.py Bootstraps user program
- protocol.py RPC protocol
- Remote.py User-process interpreter
- spawn.py OS-specific code to start programs
-
-- Files modified:
-
- autoindent.py ( bindings tweaked )
- bindings.py ( menus reorganized )
- config.txt ( execbinding enabled )
- editorwindow.py ( new menus, fixed 'Python Documentation' )
- filelist.py ( hook for "open in same window" )
- formatparagraph.py ( bindings tweaked )
- idle.bat ( removed absolute pathname )
- idle.pyw ( weird bug due to import with same name? )
- iobinding.py ( open in same window, EOL convention )
- keydefs.py ( bindings tweaked )
- outputwindow.py ( readline, scroll_clear, etc )
- pyshell.py ( changed startup behavior )
- readme.txt ( <Recursion on file with id=1234567> )
-
-
-
-IDLE 0.5 - February 2000 - Release Notes
-----------------------------------------
-
-This is an early release of IDLE, my own attempt at a Tkinter-based
-IDE for Python.
-
-(For a more detailed change log, see the file ChangeLog.)
-
-FEATURES
-
-IDLE has the following features:
-
-- coded in 100% pure Python, using the Tkinter GUI toolkit (i.e. Tcl/Tk)
-
-- cross-platform: works on Windows and Unix (on the Mac, there are
-currently problems with Tcl/Tk)
-
-- multi-window text editor with multiple undo, Python colorizing
-and many other features, e.g. smart indent and call tips
-
-- Python shell window (a.k.a. interactive interpreter)
-
-- debugger (not complete, but you can set breakpoints, view and step)
-
-USAGE
-
-The main program is in the file "idle.py"; on Unix, you should be able
-to run it by typing "./idle.py" to your shell. On Windows, you can
-run it by double-clicking it; you can use idle.pyw to avoid popping up
-a DOS console. If you want to pass command line arguments on Windows,
-use the batch file idle.bat.
-
-Command line arguments: files passed on the command line are executed,
-not opened for editing, unless you give the -e command line option.
-Try "./idle.py -h" to see other command line options.
-
-IDLE requires Python 1.5.2, so it is currently only usable with a
-Python 1.5.2 distribution. (An older version of IDLE is distributed
-with Python 1.5.2; you can drop this version on top of it.)
-
-COPYRIGHT
-
-IDLE is covered by the standard Python copyright notice
-(http://www.python.org/doc/Copyright.html).
-
-
-New in IDLE 0.5 (2/15/2000)
----------------------------
-
-Tons of stuff, much of it contributed by Tim Peters and Mark Hammond:
-
-- Status bar, displaying current line/column (Moshe Zadka).
-
-- Better stack viewer, using tree widget. (XXX Only used by Stack
-Viewer menu, not by the debugger.)
-
-- Format paragraph now recognizes Python block comments and reformats
-them correctly (MH)
-
-- New version of pyclbr.py parses top-level functions and understands
-much more of Python's syntax; this is reflected in the class and path
-browsers (TP)
-
-- Much better auto-indent; knows how to indent the insides of
-multi-line statements (TP)
-
-- Call tip window pops up when you type the name of a known function
-followed by an open parenthesis. Hit ESC or click elsewhere in the
-window to close the tip window (MH)
-
-- Comment out region now inserts ## to make it stand out more (TP)
-
-- New path and class browsers based on a tree widget that looks
-familiar to Windows users
-
-- Reworked script running commands to be more intuitive: I/O now
-always goes to the *Python Shell* window, and raw_input() works
-correctly. You use F5 to import/reload a module: this adds the module
-name to the __main__ namespace. You use Control-F5 to run a script:
-this runs the script *in* the __main__ namespace. The latter also
-sets sys.argv[] to the script name
-
-
-New in IDLE 0.4 (4/7/99)
-------------------------
-
-Most important change: a new menu entry "File -> Path browser", shows
-a 4-column hierarchical browser which lets you browse sys.path,
-directories, modules, and classes. Yes, it's a superset of the Class
-browser menu entry. There's also a new internal module,
-MultiScrolledLists.py, which provides the framework for this dialog.
-
-
-New in IDLE 0.3 (2/17/99)
--------------------------
-
-Most important changes:
-
-- Enabled support for running a module, with or without the debugger.
-Output goes to a new window. Pressing F5 in a module is effectively a
-reload of that module; Control-F5 loads it under the debugger.
-
-- Re-enable tearing off the Windows menu, and make a torn-off Windows
-menu update itself whenever a window is opened or closed.
-
-- Menu items can now be have a checkbox (when the menu label starts
-with "!"); use this for the Debugger and "Auto-open stack viewer"
-(was: JIT stack viewer) menu items.
-
-- Added a Quit button to the Debugger API.
-
-- The current directory is explicitly inserted into sys.path.
-
-- Fix the debugger (when using Python 1.5.2b2) to use canonical
-filenames for breakpoints, so these actually work. (There's still a
-lot of work to be done to the management of breakpoints in the
-debugger though.)
-
-- Closing a window that is still colorizing now actually works.
-
-- Allow dragging of the separator between the two list boxes in the
-class browser.
-
-- Bind ESC to "close window" of the debugger, stack viewer and class
-browser. It removes the selection highlighting in regular text
-windows. (These are standard Windows conventions.)
-
-
-New in IDLE 0.2 (1/8/99)
-------------------------
-
-Lots of changes; here are the highlights:
-
-General:
-
-- You can now write and configure your own IDLE extension modules; see
-extend.txt.
-
-
-File menu:
-
-The command to open the Python shell window is now in the File menu.
-
-
-Edit menu:
-
-New Find dialog with more options; replace dialog; find in files dialog.
-
-Commands to tabify or untabify a region.
-
-Command to format a paragraph.
-
-
-Debug menu:
-
-JIT (Just-In-Time) stack viewer toggle -- if set, the stack viewer
-automaticall pops up when you get a traceback.
-
-Windows menu:
-
-Zoom height -- make the window full height.
-
-
-Help menu:
-
-The help text now show up in a regular window so you can search and
-even edit it if you like.
-
-
-
-IDLE 0.1 was distributed with the Python 1.5.2b1 release on 12/22/98.
-
-======================================================================
diff --git a/sys/lib/python/idlelib/HyperParser.py b/sys/lib/python/idlelib/HyperParser.py
deleted file mode 100644
index 519de74d1..000000000
--- a/sys/lib/python/idlelib/HyperParser.py
+++ /dev/null
@@ -1,241 +0,0 @@
-"""
-HyperParser
-===========
-This module defines the HyperParser class, which provides advanced parsing
-abilities for the ParenMatch and other extensions.
-The HyperParser uses PyParser. PyParser is intended mostly to give information
-on the proper indentation of code. HyperParser gives some information on the
-structure of code, used by extensions to help the user.
-"""
-
-import string
-import keyword
-import PyParse
-
-class HyperParser:
-
- def __init__(self, editwin, index):
- """Initialize the HyperParser to analyze the surroundings of the given
- index.
- """
-
- self.editwin = editwin
- self.text = text = editwin.text
-
- parser = PyParse.Parser(editwin.indentwidth, editwin.tabwidth)
-
- def index2line(index):
- return int(float(index))
- lno = index2line(text.index(index))
-
- if not editwin.context_use_ps1:
- for context in editwin.num_context_lines:
- startat = max(lno - context, 1)
- startatindex = `startat` + ".0"
- stopatindex = "%d.end" % lno
- # We add the newline because PyParse requires a newline at end.
- # We add a space so that index won't be at end of line, so that
- # its status will be the same as the char before it, if should.
- parser.set_str(text.get(startatindex, stopatindex)+' \n')
- bod = parser.find_good_parse_start(
- editwin._build_char_in_string_func(startatindex))
- if bod is not None or startat == 1:
- break
- parser.set_lo(bod or 0)
- else:
- r = text.tag_prevrange("console", index)
- if r:
- startatindex = r[1]
- else:
- startatindex = "1.0"
- stopatindex = "%d.end" % lno
- # We add the newline because PyParse requires a newline at end.
- # We add a space so that index won't be at end of line, so that
- # its status will be the same as the char before it, if should.
- parser.set_str(text.get(startatindex, stopatindex)+' \n')
- parser.set_lo(0)
-
- # We want what the parser has, except for the last newline and space.
- self.rawtext = parser.str[:-2]
- # As far as I can see, parser.str preserves the statement we are in,
- # so that stopatindex can be used to synchronize the string with the
- # text box indices.
- self.stopatindex = stopatindex
- self.bracketing = parser.get_last_stmt_bracketing()
- # find which pairs of bracketing are openers. These always correspond
- # to a character of rawtext.
- self.isopener = [i>0 and self.bracketing[i][1] > self.bracketing[i-1][1]
- for i in range(len(self.bracketing))]
-
- self.set_index(index)
-
- def set_index(self, index):
- """Set the index to which the functions relate. Note that it must be
- in the same statement.
- """
- indexinrawtext = \
- len(self.rawtext) - len(self.text.get(index, self.stopatindex))
- if indexinrawtext < 0:
- raise ValueError("The index given is before the analyzed statement")
- self.indexinrawtext = indexinrawtext
- # find the rightmost bracket to which index belongs
- self.indexbracket = 0
- while self.indexbracket < len(self.bracketing)-1 and \
- self.bracketing[self.indexbracket+1][0] < self.indexinrawtext:
- self.indexbracket += 1
- if self.indexbracket < len(self.bracketing)-1 and \
- self.bracketing[self.indexbracket+1][0] == self.indexinrawtext and \
- not self.isopener[self.indexbracket+1]:
- self.indexbracket += 1
-
- def is_in_string(self):
- """Is the index given to the HyperParser is in a string?"""
- # The bracket to which we belong should be an opener.
- # If it's an opener, it has to have a character.
- return self.isopener[self.indexbracket] and \
- self.rawtext[self.bracketing[self.indexbracket][0]] in ('"', "'")
-
- def is_in_code(self):
- """Is the index given to the HyperParser is in a normal code?"""
- return not self.isopener[self.indexbracket] or \
- self.rawtext[self.bracketing[self.indexbracket][0]] not in \
- ('#', '"', "'")
-
- def get_surrounding_brackets(self, openers='([{', mustclose=False):
- """If the index given to the HyperParser is surrounded by a bracket
- defined in openers (or at least has one before it), return the
- indices of the opening bracket and the closing bracket (or the
- end of line, whichever comes first).
- If it is not surrounded by brackets, or the end of line comes before
- the closing bracket and mustclose is True, returns None.
- """
- bracketinglevel = self.bracketing[self.indexbracket][1]
- before = self.indexbracket
- while not self.isopener[before] or \
- self.rawtext[self.bracketing[before][0]] not in openers or \
- self.bracketing[before][1] > bracketinglevel:
- before -= 1
- if before < 0:
- return None
- bracketinglevel = min(bracketinglevel, self.bracketing[before][1])
- after = self.indexbracket + 1
- while after < len(self.bracketing) and \
- self.bracketing[after][1] >= bracketinglevel:
- after += 1
-
- beforeindex = self.text.index("%s-%dc" %
- (self.stopatindex, len(self.rawtext)-self.bracketing[before][0]))
- if after >= len(self.bracketing) or \
- self.bracketing[after][0] > len(self.rawtext):
- if mustclose:
- return None
- afterindex = self.stopatindex
- else:
- # We are after a real char, so it is a ')' and we give the index
- # before it.
- afterindex = self.text.index("%s-%dc" %
- (self.stopatindex,
- len(self.rawtext)-(self.bracketing[after][0]-1)))
-
- return beforeindex, afterindex
-
- # This string includes all chars that may be in a white space
- _whitespace_chars = " \t\n\\"
- # This string includes all chars that may be in an identifier
- _id_chars = string.ascii_letters + string.digits + "_"
- # This string includes all chars that may be the first char of an identifier
- _id_first_chars = string.ascii_letters + "_"
-
- # Given a string and pos, return the number of chars in the identifier
- # which ends at pos, or 0 if there is no such one. Saved words are not
- # identifiers.
- def _eat_identifier(self, str, limit, pos):
- i = pos
- while i > limit and str[i-1] in self._id_chars:
- i -= 1
- if i < pos and (str[i] not in self._id_first_chars or \
- keyword.iskeyword(str[i:pos])):
- i = pos
- return pos - i
-
- def get_expression(self):
- """Return a string with the Python expression which ends at the given
- index, which is empty if there is no real one.
- """
- if not self.is_in_code():
- raise ValueError("get_expression should only be called if index "\
- "is inside a code.")
-
- rawtext = self.rawtext
- bracketing = self.bracketing
-
- brck_index = self.indexbracket
- brck_limit = bracketing[brck_index][0]
- pos = self.indexinrawtext
-
- last_identifier_pos = pos
- postdot_phase = True
-
- while 1:
- # Eat whitespaces, comments, and if postdot_phase is False - one dot
- while 1:
- if pos>brck_limit and rawtext[pos-1] in self._whitespace_chars:
- # Eat a whitespace
- pos -= 1
- elif not postdot_phase and \
- pos > brck_limit and rawtext[pos-1] == '.':
- # Eat a dot
- pos -= 1
- postdot_phase = True
- # The next line will fail if we are *inside* a comment, but we
- # shouldn't be.
- elif pos == brck_limit and brck_index > 0 and \
- rawtext[bracketing[brck_index-1][0]] == '#':
- # Eat a comment
- brck_index -= 2
- brck_limit = bracketing[brck_index][0]
- pos = bracketing[brck_index+1][0]
- else:
- # If we didn't eat anything, quit.
- break
-
- if not postdot_phase:
- # We didn't find a dot, so the expression end at the last
- # identifier pos.
- break
-
- ret = self._eat_identifier(rawtext, brck_limit, pos)
- if ret:
- # There is an identifier to eat
- pos = pos - ret
- last_identifier_pos = pos
- # Now, in order to continue the search, we must find a dot.
- postdot_phase = False
- # (the loop continues now)
-
- elif pos == brck_limit:
- # We are at a bracketing limit. If it is a closing bracket,
- # eat the bracket, otherwise, stop the search.
- level = bracketing[brck_index][1]
- while brck_index > 0 and bracketing[brck_index-1][1] > level:
- brck_index -= 1
- if bracketing[brck_index][0] == brck_limit:
- # We were not at the end of a closing bracket
- break
- pos = bracketing[brck_index][0]
- brck_index -= 1
- brck_limit = bracketing[brck_index][0]
- last_identifier_pos = pos
- if rawtext[pos] in "([":
- # [] and () may be used after an identifier, so we
- # continue. postdot_phase is True, so we don't allow a dot.
- pass
- else:
- # We can't continue after other types of brackets
- break
-
- else:
- # We've found an operator or something.
- break
-
- return rawtext[last_identifier_pos:self.indexinrawtext]
diff --git a/sys/lib/python/idlelib/IOBinding.py b/sys/lib/python/idlelib/IOBinding.py
deleted file mode 100644
index deeb5c527..000000000
--- a/sys/lib/python/idlelib/IOBinding.py
+++ /dev/null
@@ -1,584 +0,0 @@
-# changes by dscherer@cmu.edu
-# - IOBinding.open() replaces the current window with the opened file,
-# if the current window is both unmodified and unnamed
-# - IOBinding.loadfile() interprets Windows, UNIX, and Macintosh
-# end-of-line conventions, instead of relying on the standard library,
-# which will only understand the local convention.
-
-import os
-import types
-import sys
-import codecs
-import tempfile
-import tkFileDialog
-import tkMessageBox
-import re
-from Tkinter import *
-from SimpleDialog import SimpleDialog
-
-from configHandler import idleConf
-
-try:
- from codecs import BOM_UTF8
-except ImportError:
- # only available since Python 2.3
- BOM_UTF8 = '\xef\xbb\xbf'
-
-# Try setting the locale, so that we can find out
-# what encoding to use
-try:
- import locale
- locale.setlocale(locale.LC_CTYPE, "")
-except (ImportError, locale.Error):
- pass
-
-# Encoding for file names
-filesystemencoding = sys.getfilesystemencoding()
-
-encoding = "ascii"
-if sys.platform == 'win32':
- # On Windows, we could use "mbcs". However, to give the user
- # a portable encoding name, we need to find the code page
- try:
- encoding = locale.getdefaultlocale()[1]
- codecs.lookup(encoding)
- except LookupError:
- pass
-else:
- try:
- # Different things can fail here: the locale module may not be
- # loaded, it may not offer nl_langinfo, or CODESET, or the
- # resulting codeset may be unknown to Python. We ignore all
- # these problems, falling back to ASCII
- encoding = locale.nl_langinfo(locale.CODESET)
- if encoding is None or encoding is '':
- # situation occurs on Mac OS X
- encoding = 'ascii'
- codecs.lookup(encoding)
- except (NameError, AttributeError, LookupError):
- # Try getdefaultlocale well: it parses environment variables,
- # which may give a clue. Unfortunately, getdefaultlocale has
- # bugs that can cause ValueError.
- try:
- encoding = locale.getdefaultlocale()[1]
- if encoding is None or encoding is '':
- # situation occurs on Mac OS X
- encoding = 'ascii'
- codecs.lookup(encoding)
- except (ValueError, LookupError):
- pass
-
-encoding = encoding.lower()
-
-coding_re = re.compile("coding[:=]\s*([-\w_.]+)")
-
-class EncodingMessage(SimpleDialog):
- "Inform user that an encoding declaration is needed."
- def __init__(self, master, enc):
- self.should_edit = False
-
- self.root = top = Toplevel(master)
- top.bind("<Return>", self.return_event)
- top.bind("<Escape>", self.do_ok)
- top.protocol("WM_DELETE_WINDOW", self.wm_delete_window)
- top.wm_title("I/O Warning")
- top.wm_iconname("I/O Warning")
- self.top = top
-
- l1 = Label(top,
- text="Non-ASCII found, yet no encoding declared. Add a line like")
- l1.pack(side=TOP, anchor=W)
- l2 = Entry(top, font="courier")
- l2.insert(0, "# -*- coding: %s -*-" % enc)
- # For some reason, the text is not selectable anymore if the
- # widget is disabled.
- # l2['state'] = DISABLED
- l2.pack(side=TOP, anchor = W, fill=X)
- l3 = Label(top, text="to your file\n"
- "Choose OK to save this file as %s\n"
- "Edit your general options to silence this warning" % enc)
- l3.pack(side=TOP, anchor = W)
-
- buttons = Frame(top)
- buttons.pack(side=TOP, fill=X)
- # Both return and cancel mean the same thing: do nothing
- self.default = self.cancel = 0
- b1 = Button(buttons, text="Ok", default="active",
- command=self.do_ok)
- b1.pack(side=LEFT, fill=BOTH, expand=1)
- b2 = Button(buttons, text="Edit my file",
- command=self.do_edit)
- b2.pack(side=LEFT, fill=BOTH, expand=1)
-
- self._set_transient(master)
-
- def do_ok(self):
- self.done(0)
-
- def do_edit(self):
- self.done(1)
-
-def coding_spec(str):
- """Return the encoding declaration according to PEP 263.
-
- Raise LookupError if the encoding is declared but unknown.
- """
- # Only consider the first two lines
- str = str.split("\n")[:2]
- str = "\n".join(str)
-
- match = coding_re.search(str)
- if not match:
- return None
- name = match.group(1)
- # Check whether the encoding is known
- import codecs
- try:
- codecs.lookup(name)
- except LookupError:
- # The standard encoding error does not indicate the encoding
- raise LookupError, "Unknown encoding "+name
- return name
-
-
-class IOBinding:
-
- def __init__(self, editwin):
- self.editwin = editwin
- self.text = editwin.text
- self.__id_open = self.text.bind("<<open-window-from-file>>", self.open)
- self.__id_save = self.text.bind("<<save-window>>", self.save)
- self.__id_saveas = self.text.bind("<<save-window-as-file>>",
- self.save_as)
- self.__id_savecopy = self.text.bind("<<save-copy-of-window-as-file>>",
- self.save_a_copy)
- self.fileencoding = None
- self.__id_print = self.text.bind("<<print-window>>", self.print_window)
-
- def close(self):
- # Undo command bindings
- self.text.unbind("<<open-window-from-file>>", self.__id_open)
- self.text.unbind("<<save-window>>", self.__id_save)
- self.text.unbind("<<save-window-as-file>>",self.__id_saveas)
- self.text.unbind("<<save-copy-of-window-as-file>>", self.__id_savecopy)
- self.text.unbind("<<print-window>>", self.__id_print)
- # Break cycles
- self.editwin = None
- self.text = None
- self.filename_change_hook = None
-
- def get_saved(self):
- return self.editwin.get_saved()
-
- def set_saved(self, flag):
- self.editwin.set_saved(flag)
-
- def reset_undo(self):
- self.editwin.reset_undo()
-
- filename_change_hook = None
-
- def set_filename_change_hook(self, hook):
- self.filename_change_hook = hook
-
- filename = None
- dirname = None
-
- def set_filename(self, filename):
- if filename and os.path.isdir(filename):
- self.filename = None
- self.dirname = filename
- else:
- self.filename = filename
- self.dirname = None
- self.set_saved(1)
- if self.filename_change_hook:
- self.filename_change_hook()
-
- def open(self, event=None, editFile=None):
- if self.editwin.flist:
- if not editFile:
- filename = self.askopenfile()
- else:
- filename=editFile
- if filename:
- # If the current window has no filename and hasn't been
- # modified, we replace its contents (no loss). Otherwise
- # we open a new window. But we won't replace the
- # shell window (which has an interp(reter) attribute), which
- # gets set to "not modified" at every new prompt.
- try:
- interp = self.editwin.interp
- except:
- interp = None
- if not self.filename and self.get_saved() and not interp:
- self.editwin.flist.open(filename, self.loadfile)
- else:
- self.editwin.flist.open(filename)
- else:
- self.text.focus_set()
- return "break"
- #
- # Code for use outside IDLE:
- if self.get_saved():
- reply = self.maybesave()
- if reply == "cancel":
- self.text.focus_set()
- return "break"
- if not editFile:
- filename = self.askopenfile()
- else:
- filename=editFile
- if filename:
- self.loadfile(filename)
- else:
- self.text.focus_set()
- return "break"
-
- eol = r"(\r\n)|\n|\r" # \r\n (Windows), \n (UNIX), or \r (Mac)
- eol_re = re.compile(eol)
- eol_convention = os.linesep # Default
-
- def loadfile(self, filename):
- try:
- # open the file in binary mode so that we can handle
- # end-of-line convention ourselves.
- f = open(filename,'rb')
- chars = f.read()
- f.close()
- except IOError, msg:
- tkMessageBox.showerror("I/O Error", str(msg), master=self.text)
- return False
-
- chars = self.decode(chars)
- # We now convert all end-of-lines to '\n's
- firsteol = self.eol_re.search(chars)
- if firsteol:
- self.eol_convention = firsteol.group(0)
- if isinstance(self.eol_convention, unicode):
- # Make sure it is an ASCII string
- self.eol_convention = self.eol_convention.encode("ascii")
- chars = self.eol_re.sub(r"\n", chars)
-
- self.text.delete("1.0", "end")
- self.set_filename(None)
- self.text.insert("1.0", chars)
- self.reset_undo()
- self.set_filename(filename)
- self.text.mark_set("insert", "1.0")
- self.text.see("insert")
- self.updaterecentfileslist(filename)
- return True
-
- def decode(self, chars):
- """Create a Unicode string
-
- If that fails, let Tcl try its best
- """
- # Check presence of a UTF-8 signature first
- if chars.startswith(BOM_UTF8):
- try:
- chars = chars[3:].decode("utf-8")
- except UnicodeError:
- # has UTF-8 signature, but fails to decode...
- return chars
- else:
- # Indicates that this file originally had a BOM
- self.fileencoding = BOM_UTF8
- return chars
- # Next look for coding specification
- try:
- enc = coding_spec(chars)
- except LookupError, name:
- tkMessageBox.showerror(
- title="Error loading the file",
- message="The encoding '%s' is not known to this Python "\
- "installation. The file may not display correctly" % name,
- master = self.text)
- enc = None
- if enc:
- try:
- return unicode(chars, enc)
- except UnicodeError:
- pass
- # If it is ASCII, we need not to record anything
- try:
- return unicode(chars, 'ascii')
- except UnicodeError:
- pass
- # Finally, try the locale's encoding. This is deprecated;
- # the user should declare a non-ASCII encoding
- try:
- chars = unicode(chars, encoding)
- self.fileencoding = encoding
- except UnicodeError:
- pass
- return chars
-
- def maybesave(self):
- if self.get_saved():
- return "yes"
- message = "Do you want to save %s before closing?" % (
- self.filename or "this untitled document")
- m = tkMessageBox.Message(
- title="Save On Close",
- message=message,
- icon=tkMessageBox.QUESTION,
- type=tkMessageBox.YESNOCANCEL,
- master=self.text)
- reply = m.show()
- if reply == "yes":
- self.save(None)
- if not self.get_saved():
- reply = "cancel"
- self.text.focus_set()
- return reply
-
- def save(self, event):
- if not self.filename:
- self.save_as(event)
- else:
- if self.writefile(self.filename):
- self.set_saved(1)
- try:
- self.editwin.store_file_breaks()
- except AttributeError: # may be a PyShell
- pass
- self.text.focus_set()
- return "break"
-
- def save_as(self, event):
- filename = self.asksavefile()
- if filename:
- if self.writefile(filename):
- self.set_filename(filename)
- self.set_saved(1)
- try:
- self.editwin.store_file_breaks()
- except AttributeError:
- pass
- self.text.focus_set()
- self.updaterecentfileslist(filename)
- return "break"
-
- def save_a_copy(self, event):
- filename = self.asksavefile()
- if filename:
- self.writefile(filename)
- self.text.focus_set()
- self.updaterecentfileslist(filename)
- return "break"
-
- def writefile(self, filename):
- self.fixlastline()
- chars = self.encode(self.text.get("1.0", "end-1c"))
- if self.eol_convention != "\n":
- chars = chars.replace("\n", self.eol_convention)
- try:
- f = open(filename, "wb")
- f.write(chars)
- f.flush()
- f.close()
- return True
- except IOError, msg:
- tkMessageBox.showerror("I/O Error", str(msg),
- master=self.text)
- return False
-
- def encode(self, chars):
- if isinstance(chars, types.StringType):
- # This is either plain ASCII, or Tk was returning mixed-encoding
- # text to us. Don't try to guess further.
- return chars
- # See whether there is anything non-ASCII in it.
- # If not, no need to figure out the encoding.
- try:
- return chars.encode('ascii')
- except UnicodeError:
- pass
- # If there is an encoding declared, try this first.
- try:
- enc = coding_spec(chars)
- failed = None
- except LookupError, msg:
- failed = msg
- enc = None
- if enc:
- try:
- return chars.encode(enc)
- except UnicodeError:
- failed = "Invalid encoding '%s'" % enc
- if failed:
- tkMessageBox.showerror(
- "I/O Error",
- "%s. Saving as UTF-8" % failed,
- master = self.text)
- # If there was a UTF-8 signature, use that. This should not fail
- if self.fileencoding == BOM_UTF8 or failed:
- return BOM_UTF8 + chars.encode("utf-8")
- # Try the original file encoding next, if any
- if self.fileencoding:
- try:
- return chars.encode(self.fileencoding)
- except UnicodeError:
- tkMessageBox.showerror(
- "I/O Error",
- "Cannot save this as '%s' anymore. Saving as UTF-8" \
- % self.fileencoding,
- master = self.text)
- return BOM_UTF8 + chars.encode("utf-8")
- # Nothing was declared, and we had not determined an encoding
- # on loading. Recommend an encoding line.
- config_encoding = idleConf.GetOption("main","EditorWindow",
- "encoding")
- if config_encoding == 'utf-8':
- # User has requested that we save files as UTF-8
- return BOM_UTF8 + chars.encode("utf-8")
- ask_user = True
- try:
- chars = chars.encode(encoding)
- enc = encoding
- if config_encoding == 'locale':
- ask_user = False
- except UnicodeError:
- chars = BOM_UTF8 + chars.encode("utf-8")
- enc = "utf-8"
- if not ask_user:
- return chars
- dialog = EncodingMessage(self.editwin.top, enc)
- dialog.go()
- if dialog.num == 1:
- # User asked us to edit the file
- encline = "# -*- coding: %s -*-\n" % enc
- firstline = self.text.get("1.0", "2.0")
- if firstline.startswith("#!"):
- # Insert encoding after #! line
- self.text.insert("2.0", encline)
- else:
- self.text.insert("1.0", encline)
- return self.encode(self.text.get("1.0", "end-1c"))
- return chars
-
- def fixlastline(self):
- c = self.text.get("end-2c")
- if c != '\n':
- self.text.insert("end-1c", "\n")
-
- def print_window(self, event):
- tempfilename = None
- saved = self.get_saved()
- if saved:
- filename = self.filename
- # shell undo is reset after every prompt, looks saved, probably isn't
- if not saved or filename is None:
- # XXX KBK 08Jun03 Wouldn't it be better to ask the user to save?
- (tfd, tempfilename) = tempfile.mkstemp(prefix='IDLE_tmp_')
- filename = tempfilename
- os.close(tfd)
- if not self.writefile(tempfilename):
- os.unlink(tempfilename)
- return "break"
- platform=os.name
- printPlatform=1
- if platform == 'posix': #posix platform
- command = idleConf.GetOption('main','General',
- 'print-command-posix')
- command = command + " 2>&1"
- elif platform == 'nt': #win32 platform
- command = idleConf.GetOption('main','General','print-command-win')
- else: #no printing for this platform
- printPlatform=0
- if printPlatform: #we can try to print for this platform
- command = command % filename
- pipe = os.popen(command, "r")
- # things can get ugly on NT if there is no printer available.
- output = pipe.read().strip()
- status = pipe.close()
- if status:
- output = "Printing failed (exit status 0x%x)\n" % \
- status + output
- if output:
- output = "Printing command: %s\n" % repr(command) + output
- tkMessageBox.showerror("Print status", output, master=self.text)
- else: #no printing for this platform
- message="Printing is not enabled for this platform: %s" % platform
- tkMessageBox.showinfo("Print status", message, master=self.text)
- if tempfilename:
- os.unlink(tempfilename)
- return "break"
-
- opendialog = None
- savedialog = None
-
- filetypes = [
- ("Python and text files", "*.py *.pyw *.txt", "TEXT"),
- ("All text files", "*", "TEXT"),
- ("All files", "*"),
- ]
-
- def askopenfile(self):
- dir, base = self.defaultfilename("open")
- if not self.opendialog:
- self.opendialog = tkFileDialog.Open(master=self.text,
- filetypes=self.filetypes)
- filename = self.opendialog.show(initialdir=dir, initialfile=base)
- if isinstance(filename, unicode):
- filename = filename.encode(filesystemencoding)
- return filename
-
- def defaultfilename(self, mode="open"):
- if self.filename:
- return os.path.split(self.filename)
- elif self.dirname:
- return self.dirname, ""
- else:
- try:
- pwd = os.getcwd()
- except os.error:
- pwd = ""
- return pwd, ""
-
- def asksavefile(self):
- dir, base = self.defaultfilename("save")
- if not self.savedialog:
- self.savedialog = tkFileDialog.SaveAs(master=self.text,
- filetypes=self.filetypes)
- filename = self.savedialog.show(initialdir=dir, initialfile=base)
- if isinstance(filename, unicode):
- filename = filename.encode(filesystemencoding)
- return filename
-
- def updaterecentfileslist(self,filename):
- "Update recent file list on all editor windows"
- self.editwin.update_recent_files_list(filename)
-
-def test():
- root = Tk()
- class MyEditWin:
- def __init__(self, text):
- self.text = text
- self.flist = None
- self.text.bind("<Control-o>", self.open)
- self.text.bind("<Control-s>", self.save)
- self.text.bind("<Alt-s>", self.save_as)
- self.text.bind("<Alt-z>", self.save_a_copy)
- def get_saved(self): return 0
- def set_saved(self, flag): pass
- def reset_undo(self): pass
- def open(self, event):
- self.text.event_generate("<<open-window-from-file>>")
- def save(self, event):
- self.text.event_generate("<<save-window>>")
- def save_as(self, event):
- self.text.event_generate("<<save-window-as-file>>")
- def save_a_copy(self, event):
- self.text.event_generate("<<save-copy-of-window-as-file>>")
- text = Text(root)
- text.pack()
- text.focus_set()
- editwin = MyEditWin(text)
- io = IOBinding(editwin)
- root.mainloop()
-
-if __name__ == "__main__":
- test()
diff --git a/sys/lib/python/idlelib/Icons/folder.gif b/sys/lib/python/idlelib/Icons/folder.gif
deleted file mode 100644
index effe8dc8a..000000000
--- a/sys/lib/python/idlelib/Icons/folder.gif
+++ /dev/null
Binary files differ
diff --git a/sys/lib/python/idlelib/Icons/idle.icns b/sys/lib/python/idlelib/Icons/idle.icns
deleted file mode 100644
index f65e3130f..000000000
--- a/sys/lib/python/idlelib/Icons/idle.icns
+++ /dev/null
Binary files differ
diff --git a/sys/lib/python/idlelib/Icons/minusnode.gif b/sys/lib/python/idlelib/Icons/minusnode.gif
deleted file mode 100644
index c72e46ff8..000000000
--- a/sys/lib/python/idlelib/Icons/minusnode.gif
+++ /dev/null
Binary files differ
diff --git a/sys/lib/python/idlelib/Icons/openfolder.gif b/sys/lib/python/idlelib/Icons/openfolder.gif
deleted file mode 100644
index 24aea1beb..000000000
--- a/sys/lib/python/idlelib/Icons/openfolder.gif
+++ /dev/null
Binary files differ
diff --git a/sys/lib/python/idlelib/Icons/plusnode.gif b/sys/lib/python/idlelib/Icons/plusnode.gif
deleted file mode 100644
index 13ace90eb..000000000
--- a/sys/lib/python/idlelib/Icons/plusnode.gif
+++ /dev/null
Binary files differ
diff --git a/sys/lib/python/idlelib/Icons/python.gif b/sys/lib/python/idlelib/Icons/python.gif
deleted file mode 100644
index 58271edec..000000000
--- a/sys/lib/python/idlelib/Icons/python.gif
+++ /dev/null
Binary files differ
diff --git a/sys/lib/python/idlelib/Icons/tk.gif b/sys/lib/python/idlelib/Icons/tk.gif
deleted file mode 100644
index a603f5ecb..000000000
--- a/sys/lib/python/idlelib/Icons/tk.gif
+++ /dev/null
Binary files differ
diff --git a/sys/lib/python/idlelib/IdleHistory.py b/sys/lib/python/idlelib/IdleHistory.py
deleted file mode 100644
index 960242f78..000000000
--- a/sys/lib/python/idlelib/IdleHistory.py
+++ /dev/null
@@ -1,88 +0,0 @@
-from configHandler import idleConf
-
-class History:
-
- def __init__(self, text, output_sep = "\n"):
- self.text = text
- self.history = []
- self.history_prefix = None
- self.history_pointer = None
- self.output_sep = output_sep
- self.cyclic = idleConf.GetOption("main", "History", "cyclic", 1, "bool")
- text.bind("<<history-previous>>", self.history_prev)
- text.bind("<<history-next>>", self.history_next)
-
- def history_next(self, event):
- self.history_do(0)
- return "break"
-
- def history_prev(self, event):
- self.history_do(1)
- return "break"
-
- def _get_source(self, start, end):
- # Get source code from start index to end index. Lines in the
- # text control may be separated by sys.ps2 .
- lines = self.text.get(start, end).split(self.output_sep)
- return "\n".join(lines)
-
- def _put_source(self, where, source):
- output = self.output_sep.join(source.split("\n"))
- self.text.insert(where, output)
-
- def history_do(self, reverse):
- nhist = len(self.history)
- pointer = self.history_pointer
- prefix = self.history_prefix
- if pointer is not None and prefix is not None:
- if self.text.compare("insert", "!=", "end-1c") or \
- self._get_source("iomark", "end-1c") != self.history[pointer]:
- pointer = prefix = None
- if pointer is None or prefix is None:
- prefix = self._get_source("iomark", "end-1c")
- if reverse:
- pointer = nhist
- else:
- if self.cyclic:
- pointer = -1
- else:
- self.text.bell()
- return
- nprefix = len(prefix)
- while 1:
- if reverse:
- pointer = pointer - 1
- else:
- pointer = pointer + 1
- if pointer < 0 or pointer >= nhist:
- self.text.bell()
- if not self.cyclic and pointer < 0:
- return
- else:
- if self._get_source("iomark", "end-1c") != prefix:
- self.text.delete("iomark", "end-1c")
- self._put_source("iomark", prefix)
- pointer = prefix = None
- break
- item = self.history[pointer]
- if item[:nprefix] == prefix and len(item) > nprefix:
- self.text.delete("iomark", "end-1c")
- self._put_source("iomark", item)
- break
- self.text.mark_set("insert", "end-1c")
- self.text.see("insert")
- self.text.tag_remove("sel", "1.0", "end")
- self.history_pointer = pointer
- self.history_prefix = prefix
-
- def history_store(self, source):
- source = source.strip()
- if len(source) > 2:
- # avoid duplicates
- try:
- self.history.remove(source)
- except ValueError:
- pass
- self.history.append(source)
- self.history_pointer = None
- self.history_prefix = None
diff --git a/sys/lib/python/idlelib/MultiCall.py b/sys/lib/python/idlelib/MultiCall.py
deleted file mode 100644
index 547df13ee..000000000
--- a/sys/lib/python/idlelib/MultiCall.py
+++ /dev/null
@@ -1,406 +0,0 @@
-"""
-MultiCall - a class which inherits its methods from a Tkinter widget (Text, for
-example), but enables multiple calls of functions per virtual event - all
-matching events will be called, not only the most specific one. This is done
-by wrapping the event functions - event_add, event_delete and event_info.
-MultiCall recognizes only a subset of legal event sequences. Sequences which
-are not recognized are treated by the original Tk handling mechanism. A
-more-specific event will be called before a less-specific event.
-
-The recognized sequences are complete one-event sequences (no emacs-style
-Ctrl-X Ctrl-C, no shortcuts like <3>), for all types of events.
-Key/Button Press/Release events can have modifiers.
-The recognized modifiers are Shift, Control, Option and Command for Mac, and
-Control, Alt, Shift, Meta/M for other platforms.
-
-For all events which were handled by MultiCall, a new member is added to the
-event instance passed to the binded functions - mc_type. This is one of the
-event type constants defined in this module (such as MC_KEYPRESS).
-For Key/Button events (which are handled by MultiCall and may receive
-modifiers), another member is added - mc_state. This member gives the state
-of the recognized modifiers, as a combination of the modifier constants
-also defined in this module (for example, MC_SHIFT).
-Using these members is absolutely portable.
-
-The order by which events are called is defined by these rules:
-1. A more-specific event will be called before a less-specific event.
-2. A recently-binded event will be called before a previously-binded event,
- unless this conflicts with the first rule.
-Each function will be called at most once for each event.
-"""
-
-import sys
-import os
-import string
-import re
-import Tkinter
-
-# the event type constants, which define the meaning of mc_type
-MC_KEYPRESS=0; MC_KEYRELEASE=1; MC_BUTTONPRESS=2; MC_BUTTONRELEASE=3;
-MC_ACTIVATE=4; MC_CIRCULATE=5; MC_COLORMAP=6; MC_CONFIGURE=7;
-MC_DEACTIVATE=8; MC_DESTROY=9; MC_ENTER=10; MC_EXPOSE=11; MC_FOCUSIN=12;
-MC_FOCUSOUT=13; MC_GRAVITY=14; MC_LEAVE=15; MC_MAP=16; MC_MOTION=17;
-MC_MOUSEWHEEL=18; MC_PROPERTY=19; MC_REPARENT=20; MC_UNMAP=21; MC_VISIBILITY=22;
-# the modifier state constants, which define the meaning of mc_state
-MC_SHIFT = 1<<0; MC_CONTROL = 1<<2; MC_ALT = 1<<3; MC_META = 1<<5
-MC_OPTION = 1<<6; MC_COMMAND = 1<<7
-
-# define the list of modifiers, to be used in complex event types.
-if sys.platform == "darwin" and sys.executable.count(".app"):
- _modifiers = (("Shift",), ("Control",), ("Option",), ("Command",))
- _modifier_masks = (MC_SHIFT, MC_CONTROL, MC_OPTION, MC_COMMAND)
-else:
- _modifiers = (("Control",), ("Alt",), ("Shift",), ("Meta", "M"))
- _modifier_masks = (MC_CONTROL, MC_ALT, MC_SHIFT, MC_META)
-
-# a dictionary to map a modifier name into its number
-_modifier_names = dict([(name, number)
- for number in range(len(_modifiers))
- for name in _modifiers[number]])
-
-# A binder is a class which binds functions to one type of event. It has two
-# methods: bind and unbind, which get a function and a parsed sequence, as
-# returned by _parse_sequence(). There are two types of binders:
-# _SimpleBinder handles event types with no modifiers and no detail.
-# No Python functions are called when no events are binded.
-# _ComplexBinder handles event types with modifiers and a detail.
-# A Python function is called each time an event is generated.
-
-class _SimpleBinder:
- def __init__(self, type, widget, widgetinst):
- self.type = type
- self.sequence = '<'+_types[type][0]+'>'
- self.widget = widget
- self.widgetinst = widgetinst
- self.bindedfuncs = []
- self.handlerid = None
-
- def bind(self, triplet, func):
- if not self.handlerid:
- def handler(event, l = self.bindedfuncs, mc_type = self.type):
- event.mc_type = mc_type
- wascalled = {}
- for i in range(len(l)-1, -1, -1):
- func = l[i]
- if func not in wascalled:
- wascalled[func] = True
- r = func(event)
- if r:
- return r
- self.handlerid = self.widget.bind(self.widgetinst,
- self.sequence, handler)
- self.bindedfuncs.append(func)
-
- def unbind(self, triplet, func):
- self.bindedfuncs.remove(func)
- if not self.bindedfuncs:
- self.widget.unbind(self.widgetinst, self.sequence, self.handlerid)
- self.handlerid = None
-
- def __del__(self):
- if self.handlerid:
- self.widget.unbind(self.widgetinst, self.sequence, self.handlerid)
-
-# An int in range(1 << len(_modifiers)) represents a combination of modifiers
-# (if the least significent bit is on, _modifiers[0] is on, and so on).
-# _state_subsets gives for each combination of modifiers, or *state*,
-# a list of the states which are a subset of it. This list is ordered by the
-# number of modifiers is the state - the most specific state comes first.
-_states = range(1 << len(_modifiers))
-_state_names = [reduce(lambda x, y: x + y,
- [_modifiers[i][0]+'-' for i in range(len(_modifiers))
- if (1 << i) & s],
- "")
- for s in _states]
-_state_subsets = map(lambda i: filter(lambda j: not (j & (~i)), _states),
- _states)
-for l in _state_subsets:
- l.sort(lambda a, b, nummod = lambda x: len(filter(lambda i: (1<<i) & x,
- range(len(_modifiers)))):
- nummod(b) - nummod(a))
-# _state_codes gives for each state, the portable code to be passed as mc_state
-_state_codes = [reduce(lambda x, y: x | y,
- [_modifier_masks[i] for i in range(len(_modifiers))
- if (1 << i) & s],
- 0)
- for s in _states]
-
-class _ComplexBinder:
- # This class binds many functions, and only unbinds them when it is deleted.
- # self.handlerids is the list of seqs and ids of binded handler functions.
- # The binded functions sit in a dictionary of lists of lists, which maps
- # a detail (or None) and a state into a list of functions.
- # When a new detail is discovered, handlers for all the possible states
- # are binded.
-
- def __create_handler(self, lists, mc_type, mc_state):
- def handler(event, lists = lists,
- mc_type = mc_type, mc_state = mc_state,
- ishandlerrunning = self.ishandlerrunning,
- doafterhandler = self.doafterhandler):
- ishandlerrunning[:] = [True]
- event.mc_type = mc_type
- event.mc_state = mc_state
- wascalled = {}
- r = None
- for l in lists:
- for i in range(len(l)-1, -1, -1):
- func = l[i]
- if func not in wascalled:
- wascalled[func] = True
- r = l[i](event)
- if r:
- break
- if r:
- break
- ishandlerrunning[:] = []
- # Call all functions in doafterhandler and remove them from list
- while doafterhandler:
- doafterhandler.pop()()
- if r:
- return r
- return handler
-
- def __init__(self, type, widget, widgetinst):
- self.type = type
- self.typename = _types[type][0]
- self.widget = widget
- self.widgetinst = widgetinst
- self.bindedfuncs = {None: [[] for s in _states]}
- self.handlerids = []
- # we don't want to change the lists of functions while a handler is
- # running - it will mess up the loop and anyway, we usually want the
- # change to happen from the next event. So we have a list of functions
- # for the handler to run after it finishes calling the binded functions.
- # It calls them only once.
- # ishandlerrunning is a list. An empty one means no, otherwise - yes.
- # this is done so that it would be mutable.
- self.ishandlerrunning = []
- self.doafterhandler = []
- for s in _states:
- lists = [self.bindedfuncs[None][i] for i in _state_subsets[s]]
- handler = self.__create_handler(lists, type, _state_codes[s])
- seq = '<'+_state_names[s]+self.typename+'>'
- self.handlerids.append((seq, self.widget.bind(self.widgetinst,
- seq, handler)))
-
- def bind(self, triplet, func):
- if not self.bindedfuncs.has_key(triplet[2]):
- self.bindedfuncs[triplet[2]] = [[] for s in _states]
- for s in _states:
- lists = [ self.bindedfuncs[detail][i]
- for detail in (triplet[2], None)
- for i in _state_subsets[s] ]
- handler = self.__create_handler(lists, self.type,
- _state_codes[s])
- seq = "<%s%s-%s>"% (_state_names[s], self.typename, triplet[2])
- self.handlerids.append((seq, self.widget.bind(self.widgetinst,
- seq, handler)))
- doit = lambda: self.bindedfuncs[triplet[2]][triplet[0]].append(func)
- if not self.ishandlerrunning:
- doit()
- else:
- self.doafterhandler.append(doit)
-
- def unbind(self, triplet, func):
- doit = lambda: self.bindedfuncs[triplet[2]][triplet[0]].remove(func)
- if not self.ishandlerrunning:
- doit()
- else:
- self.doafterhandler.append(doit)
-
- def __del__(self):
- for seq, id in self.handlerids:
- self.widget.unbind(self.widgetinst, seq, id)
-
-# define the list of event types to be handled by MultiEvent. the order is
-# compatible with the definition of event type constants.
-_types = (
- ("KeyPress", "Key"), ("KeyRelease",), ("ButtonPress", "Button"),
- ("ButtonRelease",), ("Activate",), ("Circulate",), ("Colormap",),
- ("Configure",), ("Deactivate",), ("Destroy",), ("Enter",), ("Expose",),
- ("FocusIn",), ("FocusOut",), ("Gravity",), ("Leave",), ("Map",),
- ("Motion",), ("MouseWheel",), ("Property",), ("Reparent",), ("Unmap",),
- ("Visibility",),
-)
-
-# which binder should be used for every event type?
-_binder_classes = (_ComplexBinder,) * 4 + (_SimpleBinder,) * (len(_types)-4)
-
-# A dictionary to map a type name into its number
-_type_names = dict([(name, number)
- for number in range(len(_types))
- for name in _types[number]])
-
-_keysym_re = re.compile(r"^\w+$")
-_button_re = re.compile(r"^[1-5]$")
-def _parse_sequence(sequence):
- """Get a string which should describe an event sequence. If it is
- successfully parsed as one, return a tuple containing the state (as an int),
- the event type (as an index of _types), and the detail - None if none, or a
- string if there is one. If the parsing is unsuccessful, return None.
- """
- if not sequence or sequence[0] != '<' or sequence[-1] != '>':
- return None
- words = string.split(sequence[1:-1], '-')
-
- modifiers = 0
- while words and words[0] in _modifier_names:
- modifiers |= 1 << _modifier_names[words[0]]
- del words[0]
-
- if words and words[0] in _type_names:
- type = _type_names[words[0]]
- del words[0]
- else:
- return None
-
- if _binder_classes[type] is _SimpleBinder:
- if modifiers or words:
- return None
- else:
- detail = None
- else:
- # _ComplexBinder
- if type in [_type_names[s] for s in ("KeyPress", "KeyRelease")]:
- type_re = _keysym_re
- else:
- type_re = _button_re
-
- if not words:
- detail = None
- elif len(words) == 1 and type_re.match(words[0]):
- detail = words[0]
- else:
- return None
-
- return modifiers, type, detail
-
-def _triplet_to_sequence(triplet):
- if triplet[2]:
- return '<'+_state_names[triplet[0]]+_types[triplet[1]][0]+'-'+ \
- triplet[2]+'>'
- else:
- return '<'+_state_names[triplet[0]]+_types[triplet[1]][0]+'>'
-
-_multicall_dict = {}
-def MultiCallCreator(widget):
- """Return a MultiCall class which inherits its methods from the
- given widget class (for example, Tkinter.Text). This is used
- instead of a templating mechanism.
- """
- if widget in _multicall_dict:
- return _multicall_dict[widget]
-
- class MultiCall (widget):
- assert issubclass(widget, Tkinter.Misc)
-
- def __init__(self, *args, **kwargs):
- apply(widget.__init__, (self,)+args, kwargs)
- # a dictionary which maps a virtual event to a tuple with:
- # 0. the function binded
- # 1. a list of triplets - the sequences it is binded to
- self.__eventinfo = {}
- self.__binders = [_binder_classes[i](i, widget, self)
- for i in range(len(_types))]
-
- def bind(self, sequence=None, func=None, add=None):
- #print "bind(%s, %s, %s) called." % (sequence, func, add)
- if type(sequence) is str and len(sequence) > 2 and \
- sequence[:2] == "<<" and sequence[-2:] == ">>":
- if sequence in self.__eventinfo:
- ei = self.__eventinfo[sequence]
- if ei[0] is not None:
- for triplet in ei[1]:
- self.__binders[triplet[1]].unbind(triplet, ei[0])
- ei[0] = func
- if ei[0] is not None:
- for triplet in ei[1]:
- self.__binders[triplet[1]].bind(triplet, func)
- else:
- self.__eventinfo[sequence] = [func, []]
- return widget.bind(self, sequence, func, add)
-
- def unbind(self, sequence, funcid=None):
- if type(sequence) is str and len(sequence) > 2 and \
- sequence[:2] == "<<" and sequence[-2:] == ">>" and \
- sequence in self.__eventinfo:
- func, triplets = self.__eventinfo[sequence]
- if func is not None:
- for triplet in triplets:
- self.__binders[triplet[1]].unbind(triplet, func)
- self.__eventinfo[sequence][0] = None
- return widget.unbind(self, sequence, funcid)
-
- def event_add(self, virtual, *sequences):
- #print "event_add(%s,%s) was called"%(repr(virtual),repr(sequences))
- if virtual not in self.__eventinfo:
- self.__eventinfo[virtual] = [None, []]
-
- func, triplets = self.__eventinfo[virtual]
- for seq in sequences:
- triplet = _parse_sequence(seq)
- if triplet is None:
- #print >> sys.stderr, "Seq. %s was added by Tkinter."%seq
- widget.event_add(self, virtual, seq)
- else:
- if func is not None:
- self.__binders[triplet[1]].bind(triplet, func)
- triplets.append(triplet)
-
- def event_delete(self, virtual, *sequences):
- if virtual not in self.__eventinfo:
- return
- func, triplets = self.__eventinfo[virtual]
- for seq in sequences:
- triplet = _parse_sequence(seq)
- if triplet is None:
- #print >> sys.stderr, "Seq. %s was deleted by Tkinter."%seq
- widget.event_delete(self, virtual, seq)
- else:
- if func is not None:
- self.__binders[triplet[1]].unbind(triplet, func)
- triplets.remove(triplet)
-
- def event_info(self, virtual=None):
- if virtual is None or virtual not in self.__eventinfo:
- return widget.event_info(self, virtual)
- else:
- return tuple(map(_triplet_to_sequence,
- self.__eventinfo[virtual][1])) + \
- widget.event_info(self, virtual)
-
- def __del__(self):
- for virtual in self.__eventinfo:
- func, triplets = self.__eventinfo[virtual]
- if func:
- for triplet in triplets:
- self.__binders[triplet[1]].unbind(triplet, func)
-
-
- _multicall_dict[widget] = MultiCall
- return MultiCall
-
-if __name__ == "__main__":
- # Test
- root = Tkinter.Tk()
- text = MultiCallCreator(Tkinter.Text)(root)
- text.pack()
- def bindseq(seq, n=[0]):
- def handler(event):
- print seq
- text.bind("<<handler%d>>"%n[0], handler)
- text.event_add("<<handler%d>>"%n[0], seq)
- n[0] += 1
- bindseq("<Key>")
- bindseq("<Control-Key>")
- bindseq("<Alt-Key-a>")
- bindseq("<Control-Key-a>")
- bindseq("<Alt-Control-Key-a>")
- bindseq("<Key-b>")
- bindseq("<Control-Button-1>")
- bindseq("<Alt-Button-1>")
- bindseq("<FocusOut>")
- bindseq("<Enter>")
- bindseq("<Leave>")
- root.mainloop()
diff --git a/sys/lib/python/idlelib/MultiStatusBar.py b/sys/lib/python/idlelib/MultiStatusBar.py
deleted file mode 100644
index 2d4c5473d..000000000
--- a/sys/lib/python/idlelib/MultiStatusBar.py
+++ /dev/null
@@ -1,32 +0,0 @@
-from Tkinter import *
-
-class MultiStatusBar(Frame):
-
- def __init__(self, master=None, **kw):
- if master is None:
- master = Tk()
- Frame.__init__(self, master, **kw)
- self.labels = {}
-
- def set_label(self, name, text='', side=LEFT):
- if not self.labels.has_key(name):
- label = Label(self, bd=1, relief=SUNKEN, anchor=W)
- label.pack(side=side)
- self.labels[name] = label
- else:
- label = self.labels[name]
- label.config(text=text)
-
-def _test():
- b = Frame()
- c = Text(b)
- c.pack(side=TOP)
- a = MultiStatusBar(b)
- a.set_label("one", "hello")
- a.set_label("two", "world")
- a.pack(side=BOTTOM, fill=X)
- b.pack()
- b.mainloop()
-
-if __name__ == '__main__':
- _test()
diff --git a/sys/lib/python/idlelib/NEWS.txt b/sys/lib/python/idlelib/NEWS.txt
deleted file mode 100644
index 54ce807d6..000000000
--- a/sys/lib/python/idlelib/NEWS.txt
+++ /dev/null
@@ -1,613 +0,0 @@
-What's New in IDLE 1.2.1?
-=========================
-
-*Release date: 18-APR-2007*
-
-What's New in IDLE 1.2.1c1?
-===========================
-
-*Release date: 05-APR-2007*
-
-What's New in IDLE 1.2?
-=======================
-
-*Release date: 19-SEP-2006*
-
-What's New in IDLE 1.2c2?
-=========================
-
-*Release date: 12-SEP-2006*
-
-What's New in IDLE 1.2c1?
-=========================
-
-*Release date: 17-AUG-2006*
-
-- File menu hotkeys: there were three 'p' assignments. Reassign the
- 'Save Copy As' and 'Print' hotkeys to 'y' and 't'. Change the
- Shell hotkey from 's' to 'l'.
-
-- IDLE honors new quit() and exit() commands from site.py Quitter() object.
- Patch 1540892, Jim Jewett
-
-- The 'with' statement is now a Code Context block opener.
- Patch 1540851, Jim Jewett
-
-- Retrieval of previous shell command was not always preserving indentation
- (since 1.2a1) Patch 1528468 Tal Einat.
-
-- Changing tokenize (39046) to detect dedent broke tabnanny check (since 1.2a1)
-
-- ToggleTab dialog was setting indent to 8 even if cancelled (since 1.2a1).
-
-- When used w/o subprocess, all exceptions were preceded by an error
- message claiming they were IDLE internal errors (since 1.2a1).
-
-What's New in IDLE 1.2b3?
-=========================
-
-*Release date: 03-AUG-2006*
-
-- EditorWindow.test() was failing. Bug 1417598
-
-- EditorWindow failed when used stand-alone if sys.ps1 not set.
- Bug 1010370 Dave Florek
-
-- Tooltips failed on new-syle class __init__ args. Bug 1027566 Loren Guthrie
-
-- Avoid occasional failure to detect closing paren properly.
- Patch 1407280 Tal Einat
-
-- Rebinding Tab key was inserting 'tab' instead of 'Tab'. Bug 1179168.
-
-- Colorizer now handles #<builtin> correctly, also unicode strings and
- 'as' keyword in comment directly following import command. Closes 1325071.
- Patch 1479219 Tal Einat
-
-What's New in IDLE 1.2b2?
-=========================
-
-*Release date: 11-JUL-2006*
-
-What's New in IDLE 1.2b1?
-=========================
-
-*Release date: 20-JUN-2006*
-
-What's New in IDLE 1.2a2?
-=========================
-
-*Release date: 27-APR-2006*
-
-What's New in IDLE 1.2a1?
-=========================
-
-*Release date: 05-APR-2006*
-
-- Source file f.flush() after writing; trying to avoid lossage if user
- kills GUI.
-
-- Options / Keys / Advanced dialog made functional. Also, allow binding
- of 'movement' keys.
-
-- 'syntax' patch adds improved calltips and a new class attribute listbox.
- MultiCall module allows binding multiple actions to an event.
- Patch 906702 Noam Raphael
-
-- Better indentation after first line of string continuation.
- IDLEfork Patch 681992, Noam Raphael
-
-- Fixed CodeContext alignment problem, following suggestion from Tal Einat.
-
-- Increased performance in CodeContext extension Patch 936169 Noam Raphael
-
-- Mac line endings were incorrect when pasting code from some browsers
- when using X11 and the Fink distribution. Python Bug 1263656.
-
-- <Enter> when cursor is on a previous command retrieves that command. Instead
- of replacing the input line, the previous command is now appended to the
- input line. Indentation is preserved, and undo is enabled.
- Patch 1196917 Jeff Shute
-
-- Clarify "tab/space" Error Dialog and "Tab Width" Dialog associated with
- the Untabify command.
-
-- Corrected "tab/space" Error Dialog to show correct menu for Untabify.
- Patch 1196980 Jeff Shute
-
-- New files are colorized by default, and colorizing is removed when
- saving as non-Python files. Patch 1196895 Jeff Shute
- Closes Python Bugs 775012 and 800432, partial fix IDLEfork 763524
-
-- Improve subprocess link error notification.
-
-- run.py: use Queue's blocking feature instead of sleeping in the main
- loop. Patch # 1190163 Michiel de Hoon
-
-- Add config-main option to make the 'history' feature non-cyclic.
- Default remains cyclic. Python Patch 914546 Noam Raphael.
-
-- Removed ability to configure tabs indent from Options dialog. This 'feature'
- has never worked and no one has complained. It is still possible to set a
- default tabs (v. spaces) indent 'manually' via config-main.def (or to turn on
- tabs for the current EditorWindow via the Format menu) but IDLE will
- encourage indentation via spaces.
-
-- Enable setting the indentation width using the Options dialog.
- Bug # 783877
-
-- Add keybindings for del-word-left and del-word-right.
-
-- Discourage using an indent width other than 8 when using tabs to indent
- Python code.
-
-- Restore use of EditorWindow.set_indentation_params(), was dead code since
- Autoindent was merged into EditorWindow. This allows IDLE to conform to the
- indentation width of a loaded file. (But it still will not switch to tabs
- even if the file uses tabs.) Any change in indent width is local to that
- window.
-
-- Add Tabnanny check before Run/F5, not just when Checking module.
-
-- If an extension can't be loaded, print warning and skip it instead of
- erroring out.
-
-- Improve error handling when .idlerc can't be created (warn and exit).
-
-- The GUI was hanging if the shell window was closed while a raw_input()
- was pending. Restored the quit() of the readline() mainloop().
- http://mail.python.org/pipermail/idle-dev/2004-December/002307.html
-
-- The remote procedure call module rpc.py can now access data attributes of
- remote registered objects. Changes to these attributes are local, however.
-
-What's New in IDLE 1.1?
-=======================
-
-*Release date: 30-NOV-2004*
-
-- On OpenBSD, terminating IDLE with ctrl-c from the command line caused a
- stuck subprocess MainThread because only the SocketThread was exiting.
-
-What's New in IDLE 1.1b3/rc1?
-=============================
-
-*Release date: 18-NOV-2004*
-
-- Saving a Keyset w/o making changes (by using the "Save as New Custom Key Set"
- button) caused IDLE to fail on restart (no new keyset was created in
- config-keys.cfg). Also true for Theme/highlights. Python Bug 1064535.
-
-- A change to the linecache.py API caused IDLE to exit when an exception was
- raised while running without the subprocess (-n switch). Python Bug 1063840.
-
-What's New in IDLE 1.1b2?
-=========================
-
-*Release date: 03-NOV-2004*
-
-- When paragraph reformat width was made configurable, a bug was
- introduced that caused reformatting of comment blocks to ignore how
- far the block was indented, effectively adding the indentation width
- to the reformat width. This has been repaired, and the reformat
- width is again a bound on the total width of reformatted lines.
-
-What's New in IDLE 1.1b1?
-=========================
-
-*Release date: 15-OCT-2004*
-
-
-What's New in IDLE 1.1a3?
-=========================
-
-*Release date: 02-SEP-2004*
-
-- Improve keyboard focus binding, especially in Windows menu. Improve
- window raising, especially in the Windows menu and in the debugger.
- IDLEfork 763524.
-
-- If user passes a non-existant filename on the commandline, just
- open a new file, don't raise a dialog. IDLEfork 854928.
-
-
-What's New in IDLE 1.1a2?
-=========================
-
-*Release date: 05-AUG-2004*
-
-- EditorWindow.py was not finding the .chm help file on Windows. Typo
- at Rev 1.54. Python Bug 990954
-
-- checking sys.platform for substring 'win' was breaking IDLE docs on Mac
- (darwin). Also, Mac Safari browser requires full file:// URIs. SF 900580.
-
-
-What's New in IDLE 1.1a1?
-=========================
-
-*Release date: 08-JUL-2004*
-
-- Redirect the warning stream to the shell during the ScriptBinding check of
- user code and format the warning similarly to an exception for both that
- check and for runtime warnings raised in the subprocess.
-
-- CodeContext hint pane visibility state is now persistent across sessions.
- The pane no longer appears in the shell window. Added capability to limit
- extensions to shell window or editor windows. Noam Raphael addition
- to Patch 936169.
-
-- Paragraph reformat width is now a configurable parameter in the
- Options GUI.
-
-- New Extension: CodeContext. Provides block structuring hints for code
- which has scrolled above an edit window. Patch 936169 Noam Raphael.
-
-- If nulls somehow got into the strings in recent-files.lst
- EditorWindow.update_recent_files_list() was failing. Python Bug 931336.
-
-- If the normal background is changed via Configure/Highlighting, it will
- update immediately, thanks to the previously mentioned patch by Nigel Rowe.
-
-- Add a highlight theme for builtin keywords. Python Patch 805830 Nigel Rowe
- This also fixed IDLEfork bug [ 693418 ] Normal text background color not
- refreshed and Python bug [897872 ] Unknown color name on HP-UX
-
-- rpc.py:SocketIO - Large modules were generating large pickles when downloaded
- to the execution server. The return of the OK response from the subprocess
- initialization was interfering and causing the sending socket to be not
- ready. Add an IO ready test to fix this. Moved the polling IO ready test
- into pollpacket().
-
-- Fix typo in rpc.py, s/b "pickle.PicklingError" not "pickle.UnpicklingError".
-
-- Added a Tk error dialog to run.py inform the user if the subprocess can't
- connect to the user GUI process. Added a timeout to the GUI's listening
- socket. Added Tk error dialogs to PyShell.py to announce a failure to bind
- the port or connect to the subprocess. Clean up error handling during
- connection initiation phase. This is an update of Python Patch 778323.
-
-- Print correct exception even if source file changed since shell was
- restarted. IDLEfork Patch 869012 Noam Raphael
-
-- Keybindings with the Shift modifier now work correctly. So do bindings which
- use the Space key. Limit unmodified user keybindings to the function keys.
- Python Bug 775353, IDLEfork Bugs 755647, 761557
-
-- After an exception, run.py was not setting the exception vector. Noam
- Raphael suggested correcting this so pdb's postmortem pm() would work.
- IDLEfork Patch 844675
-
-- IDLE now does not fail to save the file anymore if the Tk buffer is not a
- Unicode string, yet eol_convention is. Python Bugs 774680, 788378
-
-- IDLE didn't start correctly when Python was installed in "Program Files" on
- W2K and XP. Python Bugs 780451, 784183
-
-- config-main.def documentation incorrectly referred to idle- instead of
- config- filenames. SF 782759 Also added note about .idlerc location.
-
-
-What's New in IDLE 1.0?
-=======================
-
-*Release date: 29-Jul-2003*
-
-- Added a banner to the shell discussing warnings possibly raised by personal
- firewall software. Added same comment to README.txt.
-
-
-What's New in IDLE 1.0 release candidate 2?
-===========================================
-
-*Release date: 24-Jul-2003*
-
-- Calltip error when docstring was None Python Bug 775541
-
-
-What's New in IDLE 1.0 release candidate 1?
-===========================================
-
-*Release date: 18-Jul-2003*
-
-- Updated extend.txt, help.txt, and config-extensions.def to correctly
- reflect the current status of the configuration system. Python Bug 768469
-
-- Fixed: Call Tip Trimming May Loop Forever. Python Patch 769142 (Daniels)
-
-- Replaced apply(f, args, kwds) with f(*args, **kwargs) to improve performance
- Python Patch 768187
-
-- Break or continue statements outside a loop were causing IDLE crash
- Python Bug 767794
-
-- Convert Unicode strings from readline to IOBinding.encoding. Also set
- sys.std{in|out|err}.encoding, for both the local and the subprocess case.
- SF IDLEfork patch 682347.
-
-
-What's New in IDLE 1.0b2?
-=========================
-
-*Release date: 29-Jun-2003*
-
-- Extend AboutDialog.ViewFile() to support file encodings. Make the CREDITS
- file Latin-1.
-
-- Updated the About dialog to reflect re-integration into Python. Provide
- buttons to display Python's NEWS, License, and Credits, plus additional
- buttons for IDLE's README and NEWS.
-
-- TextViewer() now has a third parameter which allows inserting text into the
- viewer instead of reading from a file.
-
-- (Created the .../Lib/idlelib directory in the Python CVS, which is a clone of
- IDLEfork modified to install in the Python environment. The code in the
- interrupt module has been moved to thread.interrupt_main(). )
-
-- Printing the Shell window was failing if it was not saved first SF 748975
-
-- When using the Search in Files dialog, if the user had a selection
- highlighted in his Editor window, insert it into the dialog search field.
-
-- The Python Shell entry was disappearing from the Windows menu.
-
-- Update the Windows file list when a file name change occurs
-
-- Change to File / Open Module: always pop up the dialog, using the current
- selection as the default value. This is easier to use habitually.
-
-- Avoided a problem with starting the subprocess when 'localhost' doesn't
- resolve to the user's loopback interface. SF 747772
-
-- Fixed an issue with highlighted errors never de-colorizing. SF 747677. Also
- improved notification of Tabnanny Token Error.
-
-- File / New will by default save in the directory of the Edit window from
- which it was initiated. SF 748973 Guido van Rossum patch.
-
-
-What's New in IDLEfork 0.9b1?
-=============================
-
-*Release date: 02-Jun-2003*
-
-- The current working directory of the execution environment (and shell
- following completion of execution) is now that of the module being run.
-
-- Added the delete-exitfunc option to config-main.def. (This option is not
- included in the Options dialog.) Setting this to True (the default) will
- cause IDLE to not run sys.exitfunc/atexit when the subprocess exits.
-
-- IDLE now preserves the line ending codes when editing a file produced on
- a different platform. SF 661759, SF 538584
-
-- Reduced default editor font size to 10 point and increased window height
- to provide a better initial impression on Windows.
-
-- Options / Fonts/Tabs / Set Base Editor Font: List box was not highlighting
- the default font when first installed on Windows. SF 661676
-
-- Added Autosave feature: when user runs code from edit window, if the file
- has been modified IDLE will silently save it if Autosave is enabled. The
- option is set in the Options dialog, and the default is to prompt the
- user to save the file. SF 661318 Bruce Sherwood patch.
-
-- Improved the RESTART annotation in the shell window when the user restarts
- the shell while it is generating output. Also improved annotation when user
- repeatedly hammers the Ctrl-F6 restart.
-
-- Allow IDLE to run when not installed and cwd is not the IDLE directory
- SF Patch 686254 "Run IDLEfork from any directory without set-up" - Raphael
-
-- When a module is run from an EditorWindow: if its directory is not in
- sys.path, prepend it. This allows the module to import other modules in
- the same directory. Do the same for a script run from the command line.
-
-- Correctly restart the subprocess if it is running user code and the user
- attempts to run some other module or restarts the shell. Do the same if
- the link is broken and it is possible to restart the subprocess and re-
- connect to the GUI. SF RFE 661321.
-
-- Improved exception reporting when running commands or scripts from the
- command line.
-
-- Added a -n command line switch to start IDLE without the subprocess.
- Removed the Shell menu when running in that mode. Updated help messages.
-
-- Added a comment to the shell startup header to indicate when IDLE is not
- using the subprocess.
-
-- Restore the ability to run without the subprocess. This can be important for
- some platforms or configurations. (Running without the subprocess allows the
- debugger to trace through parts of IDLE itself, which may or may not be
- desirable, depending on your point of view. In addition, the traditional
- reload/import tricks must be use if user source code is changed.) This is
- helpful for developing IDLE using IDLE, because one instance can be used to
- edit the code and a separate instance run to test changes. (Multiple
- concurrent IDLE instances with subprocesses is a future feature)
-
-- Improve the error message a user gets when saving a file with non-ASCII
- characters and no source encoding is specified. Done by adding a dialog
- 'EncodingMessage', which contains the line to add in a fixed-font entry
- widget, and which has a button to add that line to the file automatically.
- Also, add a configuration option 'EditorWindow/encoding', which has three
- possible values: none, utf-8, and locale. None is the default: IDLE will show
- this dialog when non-ASCII characters are encountered. utf-8 means that files
- with non-ASCII characters are saved as utf-8-with-bom. locale means that
- files are saved in the locale's encoding; the dialog is only displayed if the
- source contains characters outside the locale's charset. SF 710733 - Loewis
-
-- Improved I/O response by tweaking the wait parameter in various
- calls to signal.signal().
-
-- Implemented a threaded subprocess which allows interrupting a pass
- loop in user code using the 'interrupt' extension. User code runs
- in MainThread, while the RPCServer is handled by SockThread. This is
- necessary because Windows doesn't support signals.
-
-- Implemented the 'interrupt' extension module, which allows a subthread
- to raise a KeyboardInterrupt in the main thread.
-
-- Attempting to save the shell raised an error related to saving
- breakpoints, which are not implemented in the shell
-
-- Provide a correct message when 'exit' or 'quit' are entered at the
- IDLE command prompt SF 695861
-
-- Eliminate extra blank line in shell output caused by not flushing
- stdout when user code ends with an unterminated print. SF 695861
-
-- Moved responsibility for exception formatting (i.e. pruning IDLE internal
- calls) out of rpc.py into the client and server.
-
-- Exit IDLE cleanly even when doing subprocess I/O
-
-- Handle subprocess interrupt with an RPC message.
-
-- Restart the subprocess if it terminates itself. (VPython programs do that)
-
-- Support subclassing of exceptions, including in the shell, by moving the
- exception formatting to the subprocess.
-
-
-
-What's New in IDLEfork 0.9 Alpha 2?
-===================================
-
-*Release date: 27-Jan-2003*
-
-- Updated INSTALL.txt to claify use of the python2 rpm.
-
-- Improved formatting in IDLE Help.
-
-- Run menu: Replace "Run Script" with "Run Module".
-
-- Code encountering an unhandled exception under the debugger now shows
- the correct traceback, with IDLE internal levels pruned out.
-
-- If an exception occurs entirely in IDLE, don't prune the IDLE internal
- modules from the traceback displayed.
-
-- Class Browser and Path Browser now use Alt-Key-2 for vertical zoom.
-
-- IDLE icons will now install correctly even when setup.py is run from the
- build directory
-
-- Class Browser now compatible with Python2.3 version of pyclbr.py
-
-- Left cursor move in presence of selected text now moves from left end
- of the selection.
-
-- Add Meta keybindings to "IDLE Classic Windows" to handle reversed
- Alt/Meta on some Linux distros.
-
-- Change default: IDLE now starts with Python Shell.
-
-- Removed the File Path from the Additional Help Sources scrolled list.
-
-- Add capability to access Additional Help Sources on the web if the
- Help File Path begins with //http or www. (Otherwise local path is
- validated, as before.)
-
-- Additional Help Sources were not being posted on the Help menu in the
- order entered. Implement sorting the list by [HelpFiles] 'option'
- number.
-
-- Add Browse button to New Help Source dialog. Arrange to start in
- Python/Doc if platform is Windows, otherwise start in current directory.
-
-- Put the Additional Help Sources directly on the Help menu instead of in
- an Extra Help cascade menu. Rearrange the Help menu so the Additional
- Help Sources come last. Update help.txt appropriately.
-
-- Fix Tk root pop-ups in configSectionNameDialog.py and configDialog.py
-
-- Uniform capitalization in General tab of ConfigDialog, update the doc string.
-
-- Fix bug in ConfigDialog where SaveAllChangedConfig() was unexpectedly
- deleting Additional Help Sources from the user's config file.
-
-- Make configHelpSourceEdit OK button the default and bind <Return>
-
-- Fix Tk root pop-ups in configHelpSourceEdit: error dialogs not attached
- to parents.
-
-- Use os.startfile() to open both Additional Help and Python Help on the
- Windows platform. The application associated with the file type will act as
- the viewer. Windows help files (.chm) are now supported via the
- Settings/General/Additional Help facility.
-
-- If Python Help files are installed locally on Linux, use them instead of
- accessing python.org.
-
-- Make the methods for finding the Python help docs more robust, and make
- them work in the installed configuration, also.
-
-- On the Save Before Run dialog, make the OK button the default. One
- less mouse action!
-
-- Add a method: EditorWindow.get_geometry() for future use in implementing
- window location persistence.
-
-- Removed the "Help/Advice" menu entry. Thanks, David! We'll remember!
-
-- Change the "Classic Windows" theme's paste key to be <ctrl-v>.
-
-- Rearrange the Shell menu to put Stack Viewer entries adjacent.
-
-- Add the ability to restart the subprocess interpreter from the shell window;
- add an associated menu entry "Shell/Restart" with binding Control-F6. Update
- IDLE help.
-
-- Upon a restart, annotate the shell window with a "restart boundary". Add a
- shell window menu "Shell/View Restart" with binding F6 to jump to the most
- recent restart boundary.
-
-- Add Shell menu to Python Shell; change "Settings" to "Options".
-
-- Remove incorrect comment in setup.py: IDLEfork is now installed as a package.
-
-- Add INSTALL.txt, HISTORY.txt, NEWS.txt to installed configuration.
-
-- In installer text, fix reference to Visual Python, should be VPython.
- Properly credit David Scherer.
-
-- Modified idle, idle.py, idle.pyw to improve exception handling.
-
-
-What's New in IDLEfork 0.9 Alpha 1?
-===================================
-
-*Release date: 31-Dec-2002*
-
-- First release of major new functionality. For further details refer to
- Idle-dev and/or the Sourceforge CVS.
-
-- Adapted to the Mac platform.
-
-- Overhauled the IDLE startup options and revised the idle -h help message,
- which provides details of command line usage.
-
-- Multiple bug fixes and usability enhancements.
-
-- Introduced the new RPC implementation, which includes a debugger. The output
- of user code is to the shell, and the shell may be used to inspect the
- environment after the run has finished. (In version 0.8.1 the shell
- environment was separate from the environment of the user code.)
-
-- Introduced the configuration GUI and a new About dialog.
-
-- Removed David Scherer's Remote Procedure Call code and replaced with Guido
- van Rossum's. GvR code has support for the IDLE debugger and uses the shell
- to inspect the environment of code Run from an Edit window. Files removed:
- ExecBinding.py, loader.py, protocol.py, Remote.py, spawn.py
-
---------------------------------------------------------------------
-Refer to HISTORY.txt for additional information on earlier releases.
---------------------------------------------------------------------
-
-
-
-
-
diff --git a/sys/lib/python/idlelib/ObjectBrowser.py b/sys/lib/python/idlelib/ObjectBrowser.py
deleted file mode 100644
index a2a6cee49..000000000
--- a/sys/lib/python/idlelib/ObjectBrowser.py
+++ /dev/null
@@ -1,151 +0,0 @@
-# XXX TO DO:
-# - popup menu
-# - support partial or total redisplay
-# - more doc strings
-# - tooltips
-
-# object browser
-
-# XXX TO DO:
-# - for classes/modules, add "open source" to object browser
-
-from TreeWidget import TreeItem, TreeNode, ScrolledCanvas
-
-from repr import Repr
-
-myrepr = Repr()
-myrepr.maxstring = 100
-myrepr.maxother = 100
-
-class ObjectTreeItem(TreeItem):
- def __init__(self, labeltext, object, setfunction=None):
- self.labeltext = labeltext
- self.object = object
- self.setfunction = setfunction
- def GetLabelText(self):
- return self.labeltext
- def GetText(self):
- return myrepr.repr(self.object)
- def GetIconName(self):
- if not self.IsExpandable():
- return "python"
- def IsEditable(self):
- return self.setfunction is not None
- def SetText(self, text):
- try:
- value = eval(text)
- self.setfunction(value)
- except:
- pass
- else:
- self.object = value
- def IsExpandable(self):
- return not not dir(self.object)
- def GetSubList(self):
- keys = dir(self.object)
- sublist = []
- for key in keys:
- try:
- value = getattr(self.object, key)
- except AttributeError:
- continue
- item = make_objecttreeitem(
- str(key) + " =",
- value,
- lambda value, key=key, object=self.object:
- setattr(object, key, value))
- sublist.append(item)
- return sublist
-
-class InstanceTreeItem(ObjectTreeItem):
- def IsExpandable(self):
- return True
- def GetSubList(self):
- sublist = ObjectTreeItem.GetSubList(self)
- sublist.insert(0,
- make_objecttreeitem("__class__ =", self.object.__class__))
- return sublist
-
-class ClassTreeItem(ObjectTreeItem):
- def IsExpandable(self):
- return True
- def GetSubList(self):
- sublist = ObjectTreeItem.GetSubList(self)
- if len(self.object.__bases__) == 1:
- item = make_objecttreeitem("__bases__[0] =",
- self.object.__bases__[0])
- else:
- item = make_objecttreeitem("__bases__ =", self.object.__bases__)
- sublist.insert(0, item)
- return sublist
-
-class AtomicObjectTreeItem(ObjectTreeItem):
- def IsExpandable(self):
- return 0
-
-class SequenceTreeItem(ObjectTreeItem):
- def IsExpandable(self):
- return len(self.object) > 0
- def keys(self):
- return range(len(self.object))
- def GetSubList(self):
- sublist = []
- for key in self.keys():
- try:
- value = self.object[key]
- except KeyError:
- continue
- def setfunction(value, key=key, object=self.object):
- object[key] = value
- item = make_objecttreeitem("%r:" % (key,), value, setfunction)
- sublist.append(item)
- return sublist
-
-class DictTreeItem(SequenceTreeItem):
- def keys(self):
- keys = self.object.keys()
- try:
- keys.sort()
- except:
- pass
- return keys
-
-from types import *
-
-dispatch = {
- IntType: AtomicObjectTreeItem,
- LongType: AtomicObjectTreeItem,
- FloatType: AtomicObjectTreeItem,
- StringType: AtomicObjectTreeItem,
- TupleType: SequenceTreeItem,
- ListType: SequenceTreeItem,
- DictType: DictTreeItem,
- InstanceType: InstanceTreeItem,
- ClassType: ClassTreeItem,
-}
-
-def make_objecttreeitem(labeltext, object, setfunction=None):
- t = type(object)
- if dispatch.has_key(t):
- c = dispatch[t]
- else:
- c = ObjectTreeItem
- return c(labeltext, object, setfunction)
-
-# Test script
-
-def _test():
- import sys
- from Tkinter import Tk
- root = Tk()
- root.configure(bd=0, bg="yellow")
- root.focus_set()
- sc = ScrolledCanvas(root, bg="white", highlightthickness=0, takefocus=1)
- sc.frame.pack(expand=1, fill="both")
- item = make_objecttreeitem("sys", sys)
- node = TreeNode(sc.canvas, None, item)
- node.update()
- root.mainloop()
-
-if __name__ == '__main__':
- _test()
diff --git a/sys/lib/python/idlelib/OutputWindow.py b/sys/lib/python/idlelib/OutputWindow.py
deleted file mode 100644
index 787e9b0bb..000000000
--- a/sys/lib/python/idlelib/OutputWindow.py
+++ /dev/null
@@ -1,157 +0,0 @@
-from Tkinter import *
-from EditorWindow import EditorWindow
-import re
-import tkMessageBox
-import IOBinding
-
-class OutputWindow(EditorWindow):
-
- """An editor window that can serve as an output file.
-
- Also the future base class for the Python shell window.
- This class has no input facilities.
- """
-
- def __init__(self, *args):
- EditorWindow.__init__(self, *args)
- self.text.bind("<<goto-file-line>>", self.goto_file_line)
-
- # Customize EditorWindow
-
- def ispythonsource(self, filename):
- # No colorization needed
- return 0
-
- def short_title(self):
- return "Output"
-
- def maybesave(self):
- # Override base class method -- don't ask any questions
- if self.get_saved():
- return "yes"
- else:
- return "no"
-
- # Act as output file
-
- def write(self, s, tags=(), mark="insert"):
- # Tk assumes that byte strings are Latin-1;
- # we assume that they are in the locale's encoding
- if isinstance(s, str):
- try:
- s = unicode(s, IOBinding.encoding)
- except UnicodeError:
- # some other encoding; let Tcl deal with it
- pass
- self.text.insert(mark, s, tags)
- self.text.see(mark)
- self.text.update()
-
- def writelines(self, l):
- map(self.write, l)
-
- def flush(self):
- pass
-
- # Our own right-button menu
-
- rmenu_specs = [
- ("Go to file/line", "<<goto-file-line>>"),
- ]
-
- file_line_pats = [
- r'file "([^"]*)", line (\d+)',
- r'([^\s]+)\((\d+)\)',
- r'([^\s]+):\s*(\d+):',
- ]
-
- file_line_progs = None
-
- def goto_file_line(self, event=None):
- if self.file_line_progs is None:
- l = []
- for pat in self.file_line_pats:
- l.append(re.compile(pat, re.IGNORECASE))
- self.file_line_progs = l
- # x, y = self.event.x, self.event.y
- # self.text.mark_set("insert", "@%d,%d" % (x, y))
- line = self.text.get("insert linestart", "insert lineend")
- result = self._file_line_helper(line)
- if not result:
- # Try the previous line. This is handy e.g. in tracebacks,
- # where you tend to right-click on the displayed source line
- line = self.text.get("insert -1line linestart",
- "insert -1line lineend")
- result = self._file_line_helper(line)
- if not result:
- tkMessageBox.showerror(
- "No special line",
- "The line you point at doesn't look like "
- "a valid file name followed by a line number.",
- master=self.text)
- return
- filename, lineno = result
- edit = self.flist.open(filename)
- edit.gotoline(lineno)
-
- def _file_line_helper(self, line):
- for prog in self.file_line_progs:
- m = prog.search(line)
- if m:
- break
- else:
- return None
- filename, lineno = m.group(1, 2)
- try:
- f = open(filename, "r")
- f.close()
- except IOError:
- return None
- try:
- return filename, int(lineno)
- except TypeError:
- return None
-
-# These classes are currently not used but might come in handy
-
-class OnDemandOutputWindow:
-
- tagdefs = {
- # XXX Should use IdlePrefs.ColorPrefs
- "stdout": {"foreground": "blue"},
- "stderr": {"foreground": "#007700"},
- }
-
- def __init__(self, flist):
- self.flist = flist
- self.owin = None
-
- def write(self, s, tags, mark):
- if not self.owin:
- self.setup()
- self.owin.write(s, tags, mark)
-
- def setup(self):
- self.owin = owin = OutputWindow(self.flist)
- text = owin.text
- for tag, cnf in self.tagdefs.items():
- if cnf:
- text.tag_configure(tag, **cnf)
- text.tag_raise('sel')
- self.write = self.owin.write
-
-#class PseudoFile:
-#
-# def __init__(self, owin, tags, mark="end"):
-# self.owin = owin
-# self.tags = tags
-# self.mark = mark
-
-# def write(self, s):
-# self.owin.write(s, self.tags, self.mark)
-
-# def writelines(self, l):
-# map(self.write, l)
-
-# def flush(self):
-# pass
diff --git a/sys/lib/python/idlelib/ParenMatch.py b/sys/lib/python/idlelib/ParenMatch.py
deleted file mode 100644
index 250ae8b70..000000000
--- a/sys/lib/python/idlelib/ParenMatch.py
+++ /dev/null
@@ -1,172 +0,0 @@
-"""ParenMatch -- An IDLE extension for parenthesis matching.
-
-When you hit a right paren, the cursor should move briefly to the left
-paren. Paren here is used generically; the matching applies to
-parentheses, square brackets, and curly braces.
-"""
-
-from HyperParser import HyperParser
-from configHandler import idleConf
-
-_openers = {')':'(',']':'[','}':'{'}
-CHECK_DELAY = 100 # miliseconds
-
-class ParenMatch:
- """Highlight matching parentheses
-
- There are three supported style of paren matching, based loosely
- on the Emacs options. The style is select based on the
- HILITE_STYLE attribute; it can be changed used the set_style
- method.
-
- The supported styles are:
-
- default -- When a right paren is typed, highlight the matching
- left paren for 1/2 sec.
-
- expression -- When a right paren is typed, highlight the entire
- expression from the left paren to the right paren.
-
- TODO:
- - extend IDLE with configuration dialog to change options
- - implement rest of Emacs highlight styles (see below)
- - print mismatch warning in IDLE status window
-
- Note: In Emacs, there are several styles of highlight where the
- matching paren is highlighted whenever the cursor is immediately
- to the right of a right paren. I don't know how to do that in Tk,
- so I haven't bothered.
- """
- menudefs = [
- ('edit', [
- ("Show surrounding parens", "<<flash-paren>>"),
- ])
- ]
- STYLE = idleConf.GetOption('extensions','ParenMatch','style',
- default='expression')
- FLASH_DELAY = idleConf.GetOption('extensions','ParenMatch','flash-delay',
- type='int',default=500)
- HILITE_CONFIG = idleConf.GetHighlight(idleConf.CurrentTheme(),'hilite')
- BELL = idleConf.GetOption('extensions','ParenMatch','bell',
- type='bool',default=1)
-
- RESTORE_VIRTUAL_EVENT_NAME = "<<parenmatch-check-restore>>"
- # We want the restore event be called before the usual return and
- # backspace events.
- RESTORE_SEQUENCES = ("<KeyPress>", "<ButtonPress>",
- "<Key-Return>", "<Key-BackSpace>")
-
- def __init__(self, editwin):
- self.editwin = editwin
- self.text = editwin.text
- # Bind the check-restore event to the function restore_event,
- # so that we can then use activate_restore (which calls event_add)
- # and deactivate_restore (which calls event_delete).
- editwin.text.bind(self.RESTORE_VIRTUAL_EVENT_NAME,
- self.restore_event)
- self.counter = 0
- self.is_restore_active = 0
- self.set_style(self.STYLE)
-
- def activate_restore(self):
- if not self.is_restore_active:
- for seq in self.RESTORE_SEQUENCES:
- self.text.event_add(self.RESTORE_VIRTUAL_EVENT_NAME, seq)
- self.is_restore_active = True
-
- def deactivate_restore(self):
- if self.is_restore_active:
- for seq in self.RESTORE_SEQUENCES:
- self.text.event_delete(self.RESTORE_VIRTUAL_EVENT_NAME, seq)
- self.is_restore_active = False
-
- def set_style(self, style):
- self.STYLE = style
- if style == "default":
- self.create_tag = self.create_tag_default
- self.set_timeout = self.set_timeout_last
- elif style == "expression":
- self.create_tag = self.create_tag_expression
- self.set_timeout = self.set_timeout_none
-
- def flash_paren_event(self, event):
- indices = HyperParser(self.editwin, "insert").get_surrounding_brackets()
- if indices is None:
- self.warn_mismatched()
- return
- self.activate_restore()
- self.create_tag(indices)
- self.set_timeout_last()
-
- def paren_closed_event(self, event):
- # If it was a shortcut and not really a closing paren, quit.
- closer = self.text.get("insert-1c")
- if closer not in _openers:
- return
- hp = HyperParser(self.editwin, "insert-1c")
- if not hp.is_in_code():
- return
- indices = hp.get_surrounding_brackets(_openers[closer], True)
- if indices is None:
- self.warn_mismatched()
- return
- self.activate_restore()
- self.create_tag(indices)
- self.set_timeout()
-
- def restore_event(self, event=None):
- self.text.tag_delete("paren")
- self.deactivate_restore()
- self.counter += 1 # disable the last timer, if there is one.
-
- def handle_restore_timer(self, timer_count):
- if timer_count == self.counter:
- self.restore_event()
-
- def warn_mismatched(self):
- if self.BELL:
- self.text.bell()
-
- # any one of the create_tag_XXX methods can be used depending on
- # the style
-
- def create_tag_default(self, indices):
- """Highlight the single paren that matches"""
- self.text.tag_add("paren", indices[0])
- self.text.tag_config("paren", self.HILITE_CONFIG)
-
- def create_tag_expression(self, indices):
- """Highlight the entire expression"""
- if self.text.get(indices[1]) in (')', ']', '}'):
- rightindex = indices[1]+"+1c"
- else:
- rightindex = indices[1]
- self.text.tag_add("paren", indices[0], rightindex)
- self.text.tag_config("paren", self.HILITE_CONFIG)
-
- # any one of the set_timeout_XXX methods can be used depending on
- # the style
-
- def set_timeout_none(self):
- """Highlight will remain until user input turns it off
- or the insert has moved"""
- # After CHECK_DELAY, call a function which disables the "paren" tag
- # if the event is for the most recent timer and the insert has changed,
- # or schedules another call for itself.
- self.counter += 1
- def callme(callme, self=self, c=self.counter,
- index=self.text.index("insert")):
- if index != self.text.index("insert"):
- self.handle_restore_timer(c)
- else:
- self.editwin.text_frame.after(CHECK_DELAY, callme, callme)
- self.editwin.text_frame.after(CHECK_DELAY, callme, callme)
-
- def set_timeout_last(self):
- """The last highlight created will be removed after .5 sec"""
- # associate a counter with an event; only disable the "paren"
- # tag if the event is for the most recent timer.
- self.counter += 1
- self.editwin.text_frame.after(self.FLASH_DELAY,
- lambda self=self, c=self.counter: \
- self.handle_restore_timer(c))
diff --git a/sys/lib/python/idlelib/PathBrowser.py b/sys/lib/python/idlelib/PathBrowser.py
deleted file mode 100644
index 86cd2707d..000000000
--- a/sys/lib/python/idlelib/PathBrowser.py
+++ /dev/null
@@ -1,95 +0,0 @@
-import os
-import sys
-import imp
-
-from TreeWidget import TreeItem
-from ClassBrowser import ClassBrowser, ModuleBrowserTreeItem
-
-class PathBrowser(ClassBrowser):
-
- def __init__(self, flist):
- self.init(flist)
-
- def settitle(self):
- self.top.wm_title("Path Browser")
- self.top.wm_iconname("Path Browser")
-
- def rootnode(self):
- return PathBrowserTreeItem()
-
-class PathBrowserTreeItem(TreeItem):
-
- def GetText(self):
- return "sys.path"
-
- def GetSubList(self):
- sublist = []
- for dir in sys.path:
- item = DirBrowserTreeItem(dir)
- sublist.append(item)
- return sublist
-
-class DirBrowserTreeItem(TreeItem):
-
- def __init__(self, dir, packages=[]):
- self.dir = dir
- self.packages = packages
-
- def GetText(self):
- if not self.packages:
- return self.dir
- else:
- return self.packages[-1] + ": package"
-
- def GetSubList(self):
- try:
- names = os.listdir(self.dir or os.curdir)
- except os.error:
- return []
- packages = []
- for name in names:
- file = os.path.join(self.dir, name)
- if self.ispackagedir(file):
- nn = os.path.normcase(name)
- packages.append((nn, name, file))
- packages.sort()
- sublist = []
- for nn, name, file in packages:
- item = DirBrowserTreeItem(file, self.packages + [name])
- sublist.append(item)
- for nn, name in self.listmodules(names):
- item = ModuleBrowserTreeItem(os.path.join(self.dir, name))
- sublist.append(item)
- return sublist
-
- def ispackagedir(self, file):
- if not os.path.isdir(file):
- return 0
- init = os.path.join(file, "__init__.py")
- return os.path.exists(init)
-
- def listmodules(self, allnames):
- modules = {}
- suffixes = imp.get_suffixes()
- sorted = []
- for suff, mode, flag in suffixes:
- i = -len(suff)
- for name in allnames[:]:
- normed_name = os.path.normcase(name)
- if normed_name[i:] == suff:
- mod_name = name[:i]
- if not modules.has_key(mod_name):
- modules[mod_name] = None
- sorted.append((normed_name, name))
- allnames.remove(name)
- sorted.sort()
- return sorted
-
-def main():
- import PyShell
- PathBrowser(PyShell.flist)
- if sys.stdin is sys.__stdin__:
- mainloop()
-
-if __name__ == "__main__":
- main()
diff --git a/sys/lib/python/idlelib/Percolator.py b/sys/lib/python/idlelib/Percolator.py
deleted file mode 100644
index ebbcba9e6..000000000
--- a/sys/lib/python/idlelib/Percolator.py
+++ /dev/null
@@ -1,85 +0,0 @@
-from WidgetRedirector import WidgetRedirector
-from Delegator import Delegator
-
-class Percolator:
-
- def __init__(self, text):
- # XXX would be nice to inherit from Delegator
- self.text = text
- self.redir = WidgetRedirector(text)
- self.top = self.bottom = Delegator(text)
- self.bottom.insert = self.redir.register("insert", self.insert)
- self.bottom.delete = self.redir.register("delete", self.delete)
- self.filters = []
-
- def close(self):
- while self.top is not self.bottom:
- self.removefilter(self.top)
- self.top = None
- self.bottom.setdelegate(None); self.bottom = None
- self.redir.close(); self.redir = None
- self.text = None
-
- def insert(self, index, chars, tags=None):
- # Could go away if inheriting from Delegator
- self.top.insert(index, chars, tags)
-
- def delete(self, index1, index2=None):
- # Could go away if inheriting from Delegator
- self.top.delete(index1, index2)
-
- def insertfilter(self, filter):
- # Perhaps rename to pushfilter()?
- assert isinstance(filter, Delegator)
- assert filter.delegate is None
- filter.setdelegate(self.top)
- self.top = filter
-
- def removefilter(self, filter):
- # XXX Perhaps should only support popfilter()?
- assert isinstance(filter, Delegator)
- assert filter.delegate is not None
- f = self.top
- if f is filter:
- self.top = filter.delegate
- filter.setdelegate(None)
- else:
- while f.delegate is not filter:
- assert f is not self.bottom
- f.resetcache()
- f = f.delegate
- f.setdelegate(filter.delegate)
- filter.setdelegate(None)
-
-
-def main():
- class Tracer(Delegator):
- def __init__(self, name):
- self.name = name
- Delegator.__init__(self, None)
- def insert(self, *args):
- print self.name, ": insert", args
- self.delegate.insert(*args)
- def delete(self, *args):
- print self.name, ": delete", args
- self.delegate.delete(*args)
- root = Tk()
- root.wm_protocol("WM_DELETE_WINDOW", root.quit)
- text = Text()
- text.pack()
- text.focus_set()
- p = Percolator(text)
- t1 = Tracer("t1")
- t2 = Tracer("t2")
- p.insertfilter(t1)
- p.insertfilter(t2)
- root.mainloop()
- p.removefilter(t2)
- root.mainloop()
- p.insertfilter(t2)
- p.removefilter(t1)
- root.mainloop()
-
-if __name__ == "__main__":
- from Tkinter import *
- main()
diff --git a/sys/lib/python/idlelib/PyParse.py b/sys/lib/python/idlelib/PyParse.py
deleted file mode 100644
index 1a9db6743..000000000
--- a/sys/lib/python/idlelib/PyParse.py
+++ /dev/null
@@ -1,594 +0,0 @@
-import re
-import sys
-
-# Reason last stmt is continued (or C_NONE if it's not).
-(C_NONE, C_BACKSLASH, C_STRING_FIRST_LINE,
- C_STRING_NEXT_LINES, C_BRACKET) = range(5)
-
-if 0: # for throwaway debugging output
- def dump(*stuff):
- sys.__stdout__.write(" ".join(map(str, stuff)) + "\n")
-
-# Find what looks like the start of a popular stmt.
-
-_synchre = re.compile(r"""
- ^
- [ \t]*
- (?: while
- | else
- | def
- | return
- | assert
- | break
- | class
- | continue
- | elif
- | try
- | except
- | raise
- | import
- | yield
- )
- \b
-""", re.VERBOSE | re.MULTILINE).search
-
-# Match blank line or non-indenting comment line.
-
-_junkre = re.compile(r"""
- [ \t]*
- (?: \# \S .* )?
- \n
-""", re.VERBOSE).match
-
-# Match any flavor of string; the terminating quote is optional
-# so that we're robust in the face of incomplete program text.
-
-_match_stringre = re.compile(r"""
- \""" [^"\\]* (?:
- (?: \\. | "(?!"") )
- [^"\\]*
- )*
- (?: \""" )?
-
-| " [^"\\\n]* (?: \\. [^"\\\n]* )* "?
-
-| ''' [^'\\]* (?:
- (?: \\. | '(?!'') )
- [^'\\]*
- )*
- (?: ''' )?
-
-| ' [^'\\\n]* (?: \\. [^'\\\n]* )* '?
-""", re.VERBOSE | re.DOTALL).match
-
-# Match a line that starts with something interesting;
-# used to find the first item of a bracket structure.
-
-_itemre = re.compile(r"""
- [ \t]*
- [^\s#\\] # if we match, m.end()-1 is the interesting char
-""", re.VERBOSE).match
-
-# Match start of stmts that should be followed by a dedent.
-
-_closere = re.compile(r"""
- \s*
- (?: return
- | break
- | continue
- | raise
- | pass
- )
- \b
-""", re.VERBOSE).match
-
-# Chew up non-special chars as quickly as possible. If match is
-# successful, m.end() less 1 is the index of the last boring char
-# matched. If match is unsuccessful, the string starts with an
-# interesting char.
-
-_chew_ordinaryre = re.compile(r"""
- [^[\](){}#'"\\]+
-""", re.VERBOSE).match
-
-# Build translation table to map uninteresting chars to "x", open
-# brackets to "(", and close brackets to ")".
-
-_tran = ['x'] * 256
-for ch in "({[":
- _tran[ord(ch)] = '('
-for ch in ")}]":
- _tran[ord(ch)] = ')'
-for ch in "\"'\\\n#":
- _tran[ord(ch)] = ch
-_tran = ''.join(_tran)
-del ch
-
-try:
- UnicodeType = type(unicode(""))
-except NameError:
- UnicodeType = None
-
-class Parser:
-
- def __init__(self, indentwidth, tabwidth):
- self.indentwidth = indentwidth
- self.tabwidth = tabwidth
-
- def set_str(self, str):
- assert len(str) == 0 or str[-1] == '\n'
- if type(str) is UnicodeType:
- # The parse functions have no idea what to do with Unicode, so
- # replace all Unicode characters with "x". This is "safe"
- # so long as the only characters germane to parsing the structure
- # of Python are 7-bit ASCII. It's *necessary* because Unicode
- # strings don't have a .translate() method that supports
- # deletechars.
- uniphooey = str
- str = []
- push = str.append
- for raw in map(ord, uniphooey):
- push(raw < 127 and chr(raw) or "x")
- str = "".join(str)
- self.str = str
- self.study_level = 0
-
- # Return index of a good place to begin parsing, as close to the
- # end of the string as possible. This will be the start of some
- # popular stmt like "if" or "def". Return None if none found:
- # the caller should pass more prior context then, if possible, or
- # if not (the entire program text up until the point of interest
- # has already been tried) pass 0 to set_lo.
- #
- # This will be reliable iff given a reliable is_char_in_string
- # function, meaning that when it says "no", it's absolutely
- # guaranteed that the char is not in a string.
-
- def find_good_parse_start(self, is_char_in_string=None,
- _synchre=_synchre):
- str, pos = self.str, None
-
- if not is_char_in_string:
- # no clue -- make the caller pass everything
- return None
-
- # Peek back from the end for a good place to start,
- # but don't try too often; pos will be left None, or
- # bumped to a legitimate synch point.
- limit = len(str)
- for tries in range(5):
- i = str.rfind(":\n", 0, limit)
- if i < 0:
- break
- i = str.rfind('\n', 0, i) + 1 # start of colon line
- m = _synchre(str, i, limit)
- if m and not is_char_in_string(m.start()):
- pos = m.start()
- break
- limit = i
- if pos is None:
- # Nothing looks like a block-opener, or stuff does
- # but is_char_in_string keeps returning true; most likely
- # we're in or near a giant string, the colorizer hasn't
- # caught up enough to be helpful, or there simply *aren't*
- # any interesting stmts. In any of these cases we're
- # going to have to parse the whole thing to be sure, so
- # give it one last try from the start, but stop wasting
- # time here regardless of the outcome.
- m = _synchre(str)
- if m and not is_char_in_string(m.start()):
- pos = m.start()
- return pos
-
- # Peeking back worked; look forward until _synchre no longer
- # matches.
- i = pos + 1
- while 1:
- m = _synchre(str, i)
- if m:
- s, i = m.span()
- if not is_char_in_string(s):
- pos = s
- else:
- break
- return pos
-
- # Throw away the start of the string. Intended to be called with
- # find_good_parse_start's result.
-
- def set_lo(self, lo):
- assert lo == 0 or self.str[lo-1] == '\n'
- if lo > 0:
- self.str = self.str[lo:]
-
- # As quickly as humanly possible <wink>, find the line numbers (0-
- # based) of the non-continuation lines.
- # Creates self.{goodlines, continuation}.
-
- def _study1(self):
- if self.study_level >= 1:
- return
- self.study_level = 1
-
- # Map all uninteresting characters to "x", all open brackets
- # to "(", all close brackets to ")", then collapse runs of
- # uninteresting characters. This can cut the number of chars
- # by a factor of 10-40, and so greatly speed the following loop.
- str = self.str
- str = str.translate(_tran)
- str = str.replace('xxxxxxxx', 'x')
- str = str.replace('xxxx', 'x')
- str = str.replace('xx', 'x')
- str = str.replace('xx', 'x')
- str = str.replace('\nx', '\n')
- # note that replacing x\n with \n would be incorrect, because
- # x may be preceded by a backslash
-
- # March over the squashed version of the program, accumulating
- # the line numbers of non-continued stmts, and determining
- # whether & why the last stmt is a continuation.
- continuation = C_NONE
- level = lno = 0 # level is nesting level; lno is line number
- self.goodlines = goodlines = [0]
- push_good = goodlines.append
- i, n = 0, len(str)
- while i < n:
- ch = str[i]
- i = i+1
-
- # cases are checked in decreasing order of frequency
- if ch == 'x':
- continue
-
- if ch == '\n':
- lno = lno + 1
- if level == 0:
- push_good(lno)
- # else we're in an unclosed bracket structure
- continue
-
- if ch == '(':
- level = level + 1
- continue
-
- if ch == ')':
- if level:
- level = level - 1
- # else the program is invalid, but we can't complain
- continue
-
- if ch == '"' or ch == "'":
- # consume the string
- quote = ch
- if str[i-1:i+2] == quote * 3:
- quote = quote * 3
- firstlno = lno
- w = len(quote) - 1
- i = i+w
- while i < n:
- ch = str[i]
- i = i+1
-
- if ch == 'x':
- continue
-
- if str[i-1:i+w] == quote:
- i = i+w
- break
-
- if ch == '\n':
- lno = lno + 1
- if w == 0:
- # unterminated single-quoted string
- if level == 0:
- push_good(lno)
- break
- continue
-
- if ch == '\\':
- assert i < n
- if str[i] == '\n':
- lno = lno + 1
- i = i+1
- continue
-
- # else comment char or paren inside string
-
- else:
- # didn't break out of the loop, so we're still
- # inside a string
- if (lno - 1) == firstlno:
- # before the previous \n in str, we were in the first
- # line of the string
- continuation = C_STRING_FIRST_LINE
- else:
- continuation = C_STRING_NEXT_LINES
- continue # with outer loop
-
- if ch == '#':
- # consume the comment
- i = str.find('\n', i)
- assert i >= 0
- continue
-
- assert ch == '\\'
- assert i < n
- if str[i] == '\n':
- lno = lno + 1
- if i+1 == n:
- continuation = C_BACKSLASH
- i = i+1
-
- # The last stmt may be continued for all 3 reasons.
- # String continuation takes precedence over bracket
- # continuation, which beats backslash continuation.
- if (continuation != C_STRING_FIRST_LINE
- and continuation != C_STRING_NEXT_LINES and level > 0):
- continuation = C_BRACKET
- self.continuation = continuation
-
- # Push the final line number as a sentinel value, regardless of
- # whether it's continued.
- assert (continuation == C_NONE) == (goodlines[-1] == lno)
- if goodlines[-1] != lno:
- push_good(lno)
-
- def get_continuation_type(self):
- self._study1()
- return self.continuation
-
- # study1 was sufficient to determine the continuation status,
- # but doing more requires looking at every character. study2
- # does this for the last interesting statement in the block.
- # Creates:
- # self.stmt_start, stmt_end
- # slice indices of last interesting stmt
- # self.stmt_bracketing
- # the bracketing structure of the last interesting stmt;
- # for example, for the statement "say(boo) or die", stmt_bracketing
- # will be [(0, 0), (3, 1), (8, 0)]. Strings and comments are
- # treated as brackets, for the matter.
- # self.lastch
- # last non-whitespace character before optional trailing
- # comment
- # self.lastopenbracketpos
- # if continuation is C_BRACKET, index of last open bracket
-
- def _study2(self):
- if self.study_level >= 2:
- return
- self._study1()
- self.study_level = 2
-
- # Set p and q to slice indices of last interesting stmt.
- str, goodlines = self.str, self.goodlines
- i = len(goodlines) - 1
- p = len(str) # index of newest line
- while i:
- assert p
- # p is the index of the stmt at line number goodlines[i].
- # Move p back to the stmt at line number goodlines[i-1].
- q = p
- for nothing in range(goodlines[i-1], goodlines[i]):
- # tricky: sets p to 0 if no preceding newline
- p = str.rfind('\n', 0, p-1) + 1
- # The stmt str[p:q] isn't a continuation, but may be blank
- # or a non-indenting comment line.
- if _junkre(str, p):
- i = i-1
- else:
- break
- if i == 0:
- # nothing but junk!
- assert p == 0
- q = p
- self.stmt_start, self.stmt_end = p, q
-
- # Analyze this stmt, to find the last open bracket (if any)
- # and last interesting character (if any).
- lastch = ""
- stack = [] # stack of open bracket indices
- push_stack = stack.append
- bracketing = [(p, 0)]
- while p < q:
- # suck up all except ()[]{}'"#\\
- m = _chew_ordinaryre(str, p, q)
- if m:
- # we skipped at least one boring char
- newp = m.end()
- # back up over totally boring whitespace
- i = newp - 1 # index of last boring char
- while i >= p and str[i] in " \t\n":
- i = i-1
- if i >= p:
- lastch = str[i]
- p = newp
- if p >= q:
- break
-
- ch = str[p]
-
- if ch in "([{":
- push_stack(p)
- bracketing.append((p, len(stack)))
- lastch = ch
- p = p+1
- continue
-
- if ch in ")]}":
- if stack:
- del stack[-1]
- lastch = ch
- p = p+1
- bracketing.append((p, len(stack)))
- continue
-
- if ch == '"' or ch == "'":
- # consume string
- # Note that study1 did this with a Python loop, but
- # we use a regexp here; the reason is speed in both
- # cases; the string may be huge, but study1 pre-squashed
- # strings to a couple of characters per line. study1
- # also needed to keep track of newlines, and we don't
- # have to.
- bracketing.append((p, len(stack)+1))
- lastch = ch
- p = _match_stringre(str, p, q).end()
- bracketing.append((p, len(stack)))
- continue
-
- if ch == '#':
- # consume comment and trailing newline
- bracketing.append((p, len(stack)+1))
- p = str.find('\n', p, q) + 1
- assert p > 0
- bracketing.append((p, len(stack)))
- continue
-
- assert ch == '\\'
- p = p+1 # beyond backslash
- assert p < q
- if str[p] != '\n':
- # the program is invalid, but can't complain
- lastch = ch + str[p]
- p = p+1 # beyond escaped char
-
- # end while p < q:
-
- self.lastch = lastch
- if stack:
- self.lastopenbracketpos = stack[-1]
- self.stmt_bracketing = tuple(bracketing)
-
- # Assuming continuation is C_BRACKET, return the number
- # of spaces the next line should be indented.
-
- def compute_bracket_indent(self):
- self._study2()
- assert self.continuation == C_BRACKET
- j = self.lastopenbracketpos
- str = self.str
- n = len(str)
- origi = i = str.rfind('\n', 0, j) + 1
- j = j+1 # one beyond open bracket
- # find first list item; set i to start of its line
- while j < n:
- m = _itemre(str, j)
- if m:
- j = m.end() - 1 # index of first interesting char
- extra = 0
- break
- else:
- # this line is junk; advance to next line
- i = j = str.find('\n', j) + 1
- else:
- # nothing interesting follows the bracket;
- # reproduce the bracket line's indentation + a level
- j = i = origi
- while str[j] in " \t":
- j = j+1
- extra = self.indentwidth
- return len(str[i:j].expandtabs(self.tabwidth)) + extra
-
- # Return number of physical lines in last stmt (whether or not
- # it's an interesting stmt! this is intended to be called when
- # continuation is C_BACKSLASH).
-
- def get_num_lines_in_stmt(self):
- self._study1()
- goodlines = self.goodlines
- return goodlines[-1] - goodlines[-2]
-
- # Assuming continuation is C_BACKSLASH, return the number of spaces
- # the next line should be indented. Also assuming the new line is
- # the first one following the initial line of the stmt.
-
- def compute_backslash_indent(self):
- self._study2()
- assert self.continuation == C_BACKSLASH
- str = self.str
- i = self.stmt_start
- while str[i] in " \t":
- i = i+1
- startpos = i
-
- # See whether the initial line starts an assignment stmt; i.e.,
- # look for an = operator
- endpos = str.find('\n', startpos) + 1
- found = level = 0
- while i < endpos:
- ch = str[i]
- if ch in "([{":
- level = level + 1
- i = i+1
- elif ch in ")]}":
- if level:
- level = level - 1
- i = i+1
- elif ch == '"' or ch == "'":
- i = _match_stringre(str, i, endpos).end()
- elif ch == '#':
- break
- elif level == 0 and ch == '=' and \
- (i == 0 or str[i-1] not in "=<>!") and \
- str[i+1] != '=':
- found = 1
- break
- else:
- i = i+1
-
- if found:
- # found a legit =, but it may be the last interesting
- # thing on the line
- i = i+1 # move beyond the =
- found = re.match(r"\s*\\", str[i:endpos]) is None
-
- if not found:
- # oh well ... settle for moving beyond the first chunk
- # of non-whitespace chars
- i = startpos
- while str[i] not in " \t\n":
- i = i+1
-
- return len(str[self.stmt_start:i].expandtabs(\
- self.tabwidth)) + 1
-
- # Return the leading whitespace on the initial line of the last
- # interesting stmt.
-
- def get_base_indent_string(self):
- self._study2()
- i, n = self.stmt_start, self.stmt_end
- j = i
- str = self.str
- while j < n and str[j] in " \t":
- j = j + 1
- return str[i:j]
-
- # Did the last interesting stmt open a block?
-
- def is_block_opener(self):
- self._study2()
- return self.lastch == ':'
-
- # Did the last interesting stmt close a block?
-
- def is_block_closer(self):
- self._study2()
- return _closere(self.str, self.stmt_start) is not None
-
- # index of last open bracket ({[, or None if none
- lastopenbracketpos = None
-
- def get_last_open_bracket_pos(self):
- self._study2()
- return self.lastopenbracketpos
-
- # the structure of the bracketing of the last interesting statement,
- # in the format defined in _study2, or None if the text didn't contain
- # anything
- stmt_bracketing = None
-
- def get_last_stmt_bracketing(self):
- self._study2()
- return self.stmt_bracketing
diff --git a/sys/lib/python/idlelib/PyShell.py b/sys/lib/python/idlelib/PyShell.py
deleted file mode 100644
index 1bdd0a64c..000000000
--- a/sys/lib/python/idlelib/PyShell.py
+++ /dev/null
@@ -1,1441 +0,0 @@
-#! /usr/bin/env python
-
-import os
-import os.path
-import sys
-import string
-import getopt
-import re
-import socket
-import time
-import threading
-import traceback
-import types
-import macosxSupport
-
-import linecache
-from code import InteractiveInterpreter
-
-try:
- from Tkinter import *
-except ImportError:
- print>>sys.__stderr__, "** IDLE can't import Tkinter. " \
- "Your Python may not be configured for Tk. **"
- sys.exit(1)
-import tkMessageBox
-
-from EditorWindow import EditorWindow, fixwordbreaks
-from FileList import FileList
-from ColorDelegator import ColorDelegator
-from UndoDelegator import UndoDelegator
-from OutputWindow import OutputWindow
-from configHandler import idleConf
-import idlever
-
-import rpc
-import Debugger
-import RemoteDebugger
-
-IDENTCHARS = string.ascii_letters + string.digits + "_"
-LOCALHOST = '127.0.0.1'
-
-try:
- from signal import SIGTERM
-except ImportError:
- SIGTERM = 15
-
-# Override warnings module to write to warning_stream. Initialize to send IDLE
-# internal warnings to the console. ScriptBinding.check_syntax() will
-# temporarily redirect the stream to the shell window to display warnings when
-# checking user's code.
-global warning_stream
-warning_stream = sys.__stderr__
-try:
- import warnings
-except ImportError:
- pass
-else:
- def idle_showwarning(message, category, filename, lineno):
- file = warning_stream
- try:
- file.write(warnings.formatwarning(message, category, filename, lineno))
- except IOError:
- pass ## file (probably __stderr__) is invalid, warning dropped.
- warnings.showwarning = idle_showwarning
- def idle_formatwarning(message, category, filename, lineno):
- """Format warnings the IDLE way"""
- s = "\nWarning (from warnings module):\n"
- s += ' File \"%s\", line %s\n' % (filename, lineno)
- line = linecache.getline(filename, lineno).strip()
- if line:
- s += " %s\n" % line
- s += "%s: %s\n>>> " % (category.__name__, message)
- return s
- warnings.formatwarning = idle_formatwarning
-
-def extended_linecache_checkcache(filename=None,
- orig_checkcache=linecache.checkcache):
- """Extend linecache.checkcache to preserve the <pyshell#...> entries
-
- Rather than repeating the linecache code, patch it to save the
- <pyshell#...> entries, call the original linecache.checkcache()
- (which destroys them), and then restore the saved entries.
-
- orig_checkcache is bound at definition time to the original
- method, allowing it to be patched.
-
- """
- cache = linecache.cache
- save = {}
- for filename in cache.keys():
- if filename[:1] + filename[-1:] == '<>':
- save[filename] = cache[filename]
- orig_checkcache()
- cache.update(save)
-
-# Patch linecache.checkcache():
-linecache.checkcache = extended_linecache_checkcache
-
-
-class PyShellEditorWindow(EditorWindow):
- "Regular text edit window in IDLE, supports breakpoints"
-
- def __init__(self, *args):
- self.breakpoints = []
- EditorWindow.__init__(self, *args)
- self.text.bind("<<set-breakpoint-here>>", self.set_breakpoint_here)
- self.text.bind("<<clear-breakpoint-here>>", self.clear_breakpoint_here)
- self.text.bind("<<open-python-shell>>", self.flist.open_shell)
-
- self.breakpointPath = os.path.join(idleConf.GetUserCfgDir(),
- 'breakpoints.lst')
- # whenever a file is changed, restore breakpoints
- if self.io.filename: self.restore_file_breaks()
- def filename_changed_hook(old_hook=self.io.filename_change_hook,
- self=self):
- self.restore_file_breaks()
- old_hook()
- self.io.set_filename_change_hook(filename_changed_hook)
-
- rmenu_specs = [("Set Breakpoint", "<<set-breakpoint-here>>"),
- ("Clear Breakpoint", "<<clear-breakpoint-here>>")]
-
- def set_breakpoint(self, lineno):
- text = self.text
- filename = self.io.filename
- text.tag_add("BREAK", "%d.0" % lineno, "%d.0" % (lineno+1))
- try:
- i = self.breakpoints.index(lineno)
- except ValueError: # only add if missing, i.e. do once
- self.breakpoints.append(lineno)
- try: # update the subprocess debugger
- debug = self.flist.pyshell.interp.debugger
- debug.set_breakpoint_here(filename, lineno)
- except: # but debugger may not be active right now....
- pass
-
- def set_breakpoint_here(self, event=None):
- text = self.text
- filename = self.io.filename
- if not filename:
- text.bell()
- return
- lineno = int(float(text.index("insert")))
- self.set_breakpoint(lineno)
-
- def clear_breakpoint_here(self, event=None):
- text = self.text
- filename = self.io.filename
- if not filename:
- text.bell()
- return
- lineno = int(float(text.index("insert")))
- try:
- self.breakpoints.remove(lineno)
- except:
- pass
- text.tag_remove("BREAK", "insert linestart",\
- "insert lineend +1char")
- try:
- debug = self.flist.pyshell.interp.debugger
- debug.clear_breakpoint_here(filename, lineno)
- except:
- pass
-
- def clear_file_breaks(self):
- if self.breakpoints:
- text = self.text
- filename = self.io.filename
- if not filename:
- text.bell()
- return
- self.breakpoints = []
- text.tag_remove("BREAK", "1.0", END)
- try:
- debug = self.flist.pyshell.interp.debugger
- debug.clear_file_breaks(filename)
- except:
- pass
-
- def store_file_breaks(self):
- "Save breakpoints when file is saved"
- # XXX 13 Dec 2002 KBK Currently the file must be saved before it can
- # be run. The breaks are saved at that time. If we introduce
- # a temporary file save feature the save breaks functionality
- # needs to be re-verified, since the breaks at the time the
- # temp file is created may differ from the breaks at the last
- # permanent save of the file. Currently, a break introduced
- # after a save will be effective, but not persistent.
- # This is necessary to keep the saved breaks synched with the
- # saved file.
- #
- # Breakpoints are set as tagged ranges in the text. Certain
- # kinds of edits cause these ranges to be deleted: Inserting
- # or deleting a line just before a breakpoint, and certain
- # deletions prior to a breakpoint. These issues need to be
- # investigated and understood. It's not clear if they are
- # Tk issues or IDLE issues, or whether they can actually
- # be fixed. Since a modified file has to be saved before it is
- # run, and since self.breakpoints (from which the subprocess
- # debugger is loaded) is updated during the save, the visible
- # breaks stay synched with the subprocess even if one of these
- # unexpected breakpoint deletions occurs.
- breaks = self.breakpoints
- filename = self.io.filename
- try:
- lines = open(self.breakpointPath,"r").readlines()
- except IOError:
- lines = []
- new_file = open(self.breakpointPath,"w")
- for line in lines:
- if not line.startswith(filename + '='):
- new_file.write(line)
- self.update_breakpoints()
- breaks = self.breakpoints
- if breaks:
- new_file.write(filename + '=' + str(breaks) + '\n')
- new_file.close()
-
- def restore_file_breaks(self):
- self.text.update() # this enables setting "BREAK" tags to be visible
- filename = self.io.filename
- if filename is None:
- return
- if os.path.isfile(self.breakpointPath):
- lines = open(self.breakpointPath,"r").readlines()
- for line in lines:
- if line.startswith(filename + '='):
- breakpoint_linenumbers = eval(line[len(filename)+1:])
- for breakpoint_linenumber in breakpoint_linenumbers:
- self.set_breakpoint(breakpoint_linenumber)
-
- def update_breakpoints(self):
- "Retrieves all the breakpoints in the current window"
- text = self.text
- ranges = text.tag_ranges("BREAK")
- linenumber_list = self.ranges_to_linenumbers(ranges)
- self.breakpoints = linenumber_list
-
- def ranges_to_linenumbers(self, ranges):
- lines = []
- for index in range(0, len(ranges), 2):
- lineno = int(float(ranges[index]))
- end = int(float(ranges[index+1]))
- while lineno < end:
- lines.append(lineno)
- lineno += 1
- return lines
-
-# XXX 13 Dec 2002 KBK Not used currently
-# def saved_change_hook(self):
-# "Extend base method - clear breaks if module is modified"
-# if not self.get_saved():
-# self.clear_file_breaks()
-# EditorWindow.saved_change_hook(self)
-
- def _close(self):
- "Extend base method - clear breaks when module is closed"
- self.clear_file_breaks()
- EditorWindow._close(self)
-
-
-class PyShellFileList(FileList):
- "Extend base class: IDLE supports a shell and breakpoints"
-
- # override FileList's class variable, instances return PyShellEditorWindow
- # instead of EditorWindow when new edit windows are created.
- EditorWindow = PyShellEditorWindow
-
- pyshell = None
-
- def open_shell(self, event=None):
- if self.pyshell:
- self.pyshell.top.wakeup()
- else:
- self.pyshell = PyShell(self)
- if self.pyshell:
- if not self.pyshell.begin():
- return None
- return self.pyshell
-
-
-class ModifiedColorDelegator(ColorDelegator):
- "Extend base class: colorizer for the shell window itself"
-
- def __init__(self):
- ColorDelegator.__init__(self)
- self.LoadTagDefs()
-
- def recolorize_main(self):
- self.tag_remove("TODO", "1.0", "iomark")
- self.tag_add("SYNC", "1.0", "iomark")
- ColorDelegator.recolorize_main(self)
-
- def LoadTagDefs(self):
- ColorDelegator.LoadTagDefs(self)
- theme = idleConf.GetOption('main','Theme','name')
- self.tagdefs.update({
- "stdin": {'background':None,'foreground':None},
- "stdout": idleConf.GetHighlight(theme, "stdout"),
- "stderr": idleConf.GetHighlight(theme, "stderr"),
- "console": idleConf.GetHighlight(theme, "console"),
- None: idleConf.GetHighlight(theme, "normal"),
- })
-
-class ModifiedUndoDelegator(UndoDelegator):
- "Extend base class: forbid insert/delete before the I/O mark"
-
- def insert(self, index, chars, tags=None):
- try:
- if self.delegate.compare(index, "<", "iomark"):
- self.delegate.bell()
- return
- except TclError:
- pass
- UndoDelegator.insert(self, index, chars, tags)
-
- def delete(self, index1, index2=None):
- try:
- if self.delegate.compare(index1, "<", "iomark"):
- self.delegate.bell()
- return
- except TclError:
- pass
- UndoDelegator.delete(self, index1, index2)
-
-
-class MyRPCClient(rpc.RPCClient):
-
- def handle_EOF(self):
- "Override the base class - just re-raise EOFError"
- raise EOFError
-
-
-class ModifiedInterpreter(InteractiveInterpreter):
-
- def __init__(self, tkconsole):
- self.tkconsole = tkconsole
- locals = sys.modules['__main__'].__dict__
- InteractiveInterpreter.__init__(self, locals=locals)
- self.save_warnings_filters = None
- self.restarting = False
- self.subprocess_arglist = self.build_subprocess_arglist()
-
- port = 8833
- rpcclt = None
- rpcpid = None
-
- def spawn_subprocess(self):
- args = self.subprocess_arglist
- self.rpcpid = os.spawnv(os.P_NOWAIT, sys.executable, args)
-
- def build_subprocess_arglist(self):
- w = ['-W' + s for s in sys.warnoptions]
- if 1/2 > 0: # account for new division
- w.append('-Qnew')
- # Maybe IDLE is installed and is being accessed via sys.path,
- # or maybe it's not installed and the idle.py script is being
- # run from the IDLE source directory.
- del_exitf = idleConf.GetOption('main', 'General', 'delete-exitfunc',
- default=False, type='bool')
- if __name__ == 'idlelib.PyShell':
- command = "__import__('idlelib.run').run.main(%r)" % (del_exitf,)
- else:
- command = "__import__('run').main(%r)" % (del_exitf,)
- if sys.platform[:3] == 'win' and ' ' in sys.executable:
- # handle embedded space in path by quoting the argument
- decorated_exec = '"%s"' % sys.executable
- else:
- decorated_exec = sys.executable
- return [decorated_exec] + w + ["-c", command, str(self.port)]
-
- def start_subprocess(self):
- # spawning first avoids passing a listening socket to the subprocess
- self.spawn_subprocess()
- #time.sleep(20) # test to simulate GUI not accepting connection
- addr = (LOCALHOST, self.port)
- # Idle starts listening for connection on localhost
- for i in range(3):
- time.sleep(i)
- try:
- self.rpcclt = MyRPCClient(addr)
- break
- except socket.error, err:
- pass
- else:
- self.display_port_binding_error()
- return None
- # Accept the connection from the Python execution server
- self.rpcclt.listening_sock.settimeout(10)
- try:
- self.rpcclt.accept()
- except socket.timeout, err:
- self.display_no_subprocess_error()
- return None
- self.rpcclt.register("stdin", self.tkconsole)
- self.rpcclt.register("stdout", self.tkconsole.stdout)
- self.rpcclt.register("stderr", self.tkconsole.stderr)
- self.rpcclt.register("flist", self.tkconsole.flist)
- self.rpcclt.register("linecache", linecache)
- self.rpcclt.register("interp", self)
- self.transfer_path()
- self.poll_subprocess()
- return self.rpcclt
-
- def restart_subprocess(self):
- if self.restarting:
- return self.rpcclt
- self.restarting = True
- # close only the subprocess debugger
- debug = self.getdebugger()
- if debug:
- try:
- # Only close subprocess debugger, don't unregister gui_adap!
- RemoteDebugger.close_subprocess_debugger(self.rpcclt)
- except:
- pass
- # Kill subprocess, spawn a new one, accept connection.
- self.rpcclt.close()
- self.unix_terminate()
- console = self.tkconsole
- was_executing = console.executing
- console.executing = False
- self.spawn_subprocess()
- try:
- self.rpcclt.accept()
- except socket.timeout, err:
- self.display_no_subprocess_error()
- return None
- self.transfer_path()
- # annotate restart in shell window and mark it
- console.text.delete("iomark", "end-1c")
- if was_executing:
- console.write('\n')
- console.showprompt()
- halfbar = ((int(console.width) - 16) // 2) * '='
- console.write(halfbar + ' RESTART ' + halfbar)
- console.text.mark_set("restart", "end-1c")
- console.text.mark_gravity("restart", "left")
- console.showprompt()
- # restart subprocess debugger
- if debug:
- # Restarted debugger connects to current instance of debug GUI
- gui = RemoteDebugger.restart_subprocess_debugger(self.rpcclt)
- # reload remote debugger breakpoints for all PyShellEditWindows
- debug.load_breakpoints()
- self.restarting = False
- return self.rpcclt
-
- def __request_interrupt(self):
- self.rpcclt.remotecall("exec", "interrupt_the_server", (), {})
-
- def interrupt_subprocess(self):
- threading.Thread(target=self.__request_interrupt).start()
-
- def kill_subprocess(self):
- try:
- self.rpcclt.close()
- except AttributeError: # no socket
- pass
- self.unix_terminate()
- self.tkconsole.executing = False
- self.rpcclt = None
-
- def unix_terminate(self):
- "UNIX: make sure subprocess is terminated and collect status"
- if hasattr(os, 'kill'):
- try:
- os.kill(self.rpcpid, SIGTERM)
- except OSError:
- # process already terminated:
- return
- else:
- try:
- os.waitpid(self.rpcpid, 0)
- except OSError:
- return
-
- def transfer_path(self):
- self.runcommand("""if 1:
- import sys as _sys
- _sys.path = %r
- del _sys
- \n""" % (sys.path,))
-
- active_seq = None
-
- def poll_subprocess(self):
- clt = self.rpcclt
- if clt is None:
- return
- try:
- response = clt.pollresponse(self.active_seq, wait=0.05)
- except (EOFError, IOError, KeyboardInterrupt):
- # lost connection or subprocess terminated itself, restart
- # [the KBI is from rpc.SocketIO.handle_EOF()]
- if self.tkconsole.closing:
- return
- response = None
- self.restart_subprocess()
- if response:
- self.tkconsole.resetoutput()
- self.active_seq = None
- how, what = response
- console = self.tkconsole.console
- if how == "OK":
- if what is not None:
- print >>console, repr(what)
- elif how == "EXCEPTION":
- if self.tkconsole.getvar("<<toggle-jit-stack-viewer>>"):
- self.remote_stack_viewer()
- elif how == "ERROR":
- errmsg = "PyShell.ModifiedInterpreter: Subprocess ERROR:\n"
- print >>sys.__stderr__, errmsg, what
- print >>console, errmsg, what
- # we received a response to the currently active seq number:
- try:
- self.tkconsole.endexecuting()
- except AttributeError: # shell may have closed
- pass
- # Reschedule myself
- if not self.tkconsole.closing:
- self.tkconsole.text.after(self.tkconsole.pollinterval,
- self.poll_subprocess)
-
- debugger = None
-
- def setdebugger(self, debugger):
- self.debugger = debugger
-
- def getdebugger(self):
- return self.debugger
-
- def open_remote_stack_viewer(self):
- """Initiate the remote stack viewer from a separate thread.
-
- This method is called from the subprocess, and by returning from this
- method we allow the subprocess to unblock. After a bit the shell
- requests the subprocess to open the remote stack viewer which returns a
- static object looking at the last exceptiopn. It is queried through
- the RPC mechanism.
-
- """
- self.tkconsole.text.after(300, self.remote_stack_viewer)
- return
-
- def remote_stack_viewer(self):
- import RemoteObjectBrowser
- oid = self.rpcclt.remotequeue("exec", "stackviewer", ("flist",), {})
- if oid is None:
- self.tkconsole.root.bell()
- return
- item = RemoteObjectBrowser.StubObjectTreeItem(self.rpcclt, oid)
- from TreeWidget import ScrolledCanvas, TreeNode
- top = Toplevel(self.tkconsole.root)
- theme = idleConf.GetOption('main','Theme','name')
- background = idleConf.GetHighlight(theme, 'normal')['background']
- sc = ScrolledCanvas(top, bg=background, highlightthickness=0)
- sc.frame.pack(expand=1, fill="both")
- node = TreeNode(sc.canvas, None, item)
- node.expand()
- # XXX Should GC the remote tree when closing the window
-
- gid = 0
-
- def execsource(self, source):
- "Like runsource() but assumes complete exec source"
- filename = self.stuffsource(source)
- self.execfile(filename, source)
-
- def execfile(self, filename, source=None):
- "Execute an existing file"
- if source is None:
- source = open(filename, "r").read()
- try:
- code = compile(source, filename, "exec")
- except (OverflowError, SyntaxError):
- self.tkconsole.resetoutput()
- tkerr = self.tkconsole.stderr
- print>>tkerr, '*** Error in script or command!\n'
- print>>tkerr, 'Traceback (most recent call last):'
- InteractiveInterpreter.showsyntaxerror(self, filename)
- self.tkconsole.showprompt()
- else:
- self.runcode(code)
-
- def runsource(self, source):
- "Extend base class method: Stuff the source in the line cache first"
- filename = self.stuffsource(source)
- self.more = 0
- self.save_warnings_filters = warnings.filters[:]
- warnings.filterwarnings(action="error", category=SyntaxWarning)
- if isinstance(source, types.UnicodeType):
- import IOBinding
- try:
- source = source.encode(IOBinding.encoding)
- except UnicodeError:
- self.tkconsole.resetoutput()
- self.write("Unsupported characters in input\n")
- return
- try:
- # InteractiveInterpreter.runsource() calls its runcode() method,
- # which is overridden (see below)
- return InteractiveInterpreter.runsource(self, source, filename)
- finally:
- if self.save_warnings_filters is not None:
- warnings.filters[:] = self.save_warnings_filters
- self.save_warnings_filters = None
-
- def stuffsource(self, source):
- "Stuff source in the filename cache"
- filename = "<pyshell#%d>" % self.gid
- self.gid = self.gid + 1
- lines = source.split("\n")
- linecache.cache[filename] = len(source)+1, 0, lines, filename
- return filename
-
- def prepend_syspath(self, filename):
- "Prepend sys.path with file's directory if not already included"
- self.runcommand("""if 1:
- _filename = %r
- import sys as _sys
- from os.path import dirname as _dirname
- _dir = _dirname(_filename)
- if not _dir in _sys.path:
- _sys.path.insert(0, _dir)
- del _filename, _sys, _dirname, _dir
- \n""" % (filename,))
-
- def showsyntaxerror(self, filename=None):
- """Extend base class method: Add Colorizing
-
- Color the offending position instead of printing it and pointing at it
- with a caret.
-
- """
- text = self.tkconsole.text
- stuff = self.unpackerror()
- if stuff:
- msg, lineno, offset, line = stuff
- if lineno == 1:
- pos = "iomark + %d chars" % (offset-1)
- else:
- pos = "iomark linestart + %d lines + %d chars" % \
- (lineno-1, offset-1)
- text.tag_add("ERROR", pos)
- text.see(pos)
- char = text.get(pos)
- if char and char in IDENTCHARS:
- text.tag_add("ERROR", pos + " wordstart", pos)
- self.tkconsole.resetoutput()
- self.write("SyntaxError: %s\n" % str(msg))
- else:
- self.tkconsole.resetoutput()
- InteractiveInterpreter.showsyntaxerror(self, filename)
- self.tkconsole.showprompt()
-
- def unpackerror(self):
- type, value, tb = sys.exc_info()
- ok = type is SyntaxError
- if ok:
- try:
- msg, (dummy_filename, lineno, offset, line) = value
- if not offset:
- offset = 0
- except:
- ok = 0
- if ok:
- return msg, lineno, offset, line
- else:
- return None
-
- def showtraceback(self):
- "Extend base class method to reset output properly"
- self.tkconsole.resetoutput()
- self.checklinecache()
- InteractiveInterpreter.showtraceback(self)
- if self.tkconsole.getvar("<<toggle-jit-stack-viewer>>"):
- self.tkconsole.open_stack_viewer()
-
- def checklinecache(self):
- c = linecache.cache
- for key in c.keys():
- if key[:1] + key[-1:] != "<>":
- del c[key]
-
- def runcommand(self, code):
- "Run the code without invoking the debugger"
- # The code better not raise an exception!
- if self.tkconsole.executing:
- self.display_executing_dialog()
- return 0
- if self.rpcclt:
- self.rpcclt.remotequeue("exec", "runcode", (code,), {})
- else:
- exec code in self.locals
- return 1
-
- def runcode(self, code):
- "Override base class method"
- if self.tkconsole.executing:
- self.interp.restart_subprocess()
- self.checklinecache()
- if self.save_warnings_filters is not None:
- warnings.filters[:] = self.save_warnings_filters
- self.save_warnings_filters = None
- debugger = self.debugger
- try:
- self.tkconsole.beginexecuting()
- try:
- if not debugger and self.rpcclt is not None:
- self.active_seq = self.rpcclt.asyncqueue("exec", "runcode",
- (code,), {})
- elif debugger:
- debugger.run(code, self.locals)
- else:
- exec code in self.locals
- except SystemExit:
- if not self.tkconsole.closing:
- if tkMessageBox.askyesno(
- "Exit?",
- "Do you want to exit altogether?",
- default="yes",
- master=self.tkconsole.text):
- raise
- else:
- self.showtraceback()
- else:
- raise
- except:
- if use_subprocess:
- print >> self.tkconsole.stderr, \
- "IDLE internal error in runcode()"
- self.showtraceback()
- if use_subprocess:
- self.tkconsole.endexecuting()
- finally:
- if not use_subprocess:
- try:
- self.tkconsole.endexecuting()
- except AttributeError: # shell may have closed
- pass
-
- def write(self, s):
- "Override base class method"
- self.tkconsole.stderr.write(s)
-
- def display_port_binding_error(self):
- tkMessageBox.showerror(
- "Port Binding Error",
- "IDLE can't bind TCP/IP port 8833, which is necessary to "
- "communicate with its Python execution server. Either "
- "no networking is installed on this computer or another "
- "process (another IDLE?) is using the port. Run IDLE with the -n "
- "command line switch to start without a subprocess and refer to "
- "Help/IDLE Help 'Running without a subprocess' for further "
- "details.",
- master=self.tkconsole.text)
-
- def display_no_subprocess_error(self):
- tkMessageBox.showerror(
- "Subprocess Startup Error",
- "IDLE's subprocess didn't make connection. Either IDLE can't "
- "start a subprocess or personal firewall software is blocking "
- "the connection.",
- master=self.tkconsole.text)
-
- def display_executing_dialog(self):
- tkMessageBox.showerror(
- "Already executing",
- "The Python Shell window is already executing a command; "
- "please wait until it is finished.",
- master=self.tkconsole.text)
-
-
-class PyShell(OutputWindow):
-
- shell_title = "Python Shell"
-
- # Override classes
- ColorDelegator = ModifiedColorDelegator
- UndoDelegator = ModifiedUndoDelegator
-
- # Override menus
- menu_specs = [
- ("file", "_File"),
- ("edit", "_Edit"),
- ("debug", "_Debug"),
- ("options", "_Options"),
- ("windows", "_Windows"),
- ("help", "_Help"),
- ]
-
- if macosxSupport.runningAsOSXApp():
- del menu_specs[-3]
- menu_specs[-2] = ("windows", "_Window")
-
-
- # New classes
- from IdleHistory import History
-
- def __init__(self, flist=None):
- if use_subprocess:
- ms = self.menu_specs
- if ms[2][0] != "shell":
- ms.insert(2, ("shell", "She_ll"))
- self.interp = ModifiedInterpreter(self)
- if flist is None:
- root = Tk()
- fixwordbreaks(root)
- root.withdraw()
- flist = PyShellFileList(root)
- #
- OutputWindow.__init__(self, flist, None, None)
- #
-## self.config(usetabs=1, indentwidth=8, context_use_ps1=1)
- self.usetabs = True
- # indentwidth must be 8 when using tabs. See note in EditorWindow:
- self.indentwidth = 8
- self.context_use_ps1 = True
- #
- text = self.text
- text.configure(wrap="char")
- text.bind("<<newline-and-indent>>", self.enter_callback)
- text.bind("<<plain-newline-and-indent>>", self.linefeed_callback)
- text.bind("<<interrupt-execution>>", self.cancel_callback)
- text.bind("<<beginning-of-line>>", self.home_callback)
- text.bind("<<end-of-file>>", self.eof_callback)
- text.bind("<<open-stack-viewer>>", self.open_stack_viewer)
- text.bind("<<toggle-debugger>>", self.toggle_debugger)
- text.bind("<<toggle-jit-stack-viewer>>", self.toggle_jit_stack_viewer)
- if use_subprocess:
- text.bind("<<view-restart>>", self.view_restart_mark)
- text.bind("<<restart-shell>>", self.restart_shell)
- #
- self.save_stdout = sys.stdout
- self.save_stderr = sys.stderr
- self.save_stdin = sys.stdin
- import IOBinding
- self.stdout = PseudoFile(self, "stdout", IOBinding.encoding)
- self.stderr = PseudoFile(self, "stderr", IOBinding.encoding)
- self.console = PseudoFile(self, "console", IOBinding.encoding)
- if not use_subprocess:
- sys.stdout = self.stdout
- sys.stderr = self.stderr
- sys.stdin = self
- #
- self.history = self.History(self.text)
- #
- self.pollinterval = 50 # millisec
-
- def get_standard_extension_names(self):
- return idleConf.GetExtensions(shell_only=True)
-
- reading = False
- executing = False
- canceled = False
- endoffile = False
- closing = False
-
- def set_warning_stream(self, stream):
- global warning_stream
- warning_stream = stream
-
- def get_warning_stream(self):
- return warning_stream
-
- def toggle_debugger(self, event=None):
- if self.executing:
- tkMessageBox.showerror("Don't debug now",
- "You can only toggle the debugger when idle",
- master=self.text)
- self.set_debugger_indicator()
- return "break"
- else:
- db = self.interp.getdebugger()
- if db:
- self.close_debugger()
- else:
- self.open_debugger()
-
- def set_debugger_indicator(self):
- db = self.interp.getdebugger()
- self.setvar("<<toggle-debugger>>", not not db)
-
- def toggle_jit_stack_viewer(self, event=None):
- pass # All we need is the variable
-
- def close_debugger(self):
- db = self.interp.getdebugger()
- if db:
- self.interp.setdebugger(None)
- db.close()
- if self.interp.rpcclt:
- RemoteDebugger.close_remote_debugger(self.interp.rpcclt)
- self.resetoutput()
- self.console.write("[DEBUG OFF]\n")
- sys.ps1 = ">>> "
- self.showprompt()
- self.set_debugger_indicator()
-
- def open_debugger(self):
- if self.interp.rpcclt:
- dbg_gui = RemoteDebugger.start_remote_debugger(self.interp.rpcclt,
- self)
- else:
- dbg_gui = Debugger.Debugger(self)
- self.interp.setdebugger(dbg_gui)
- dbg_gui.load_breakpoints()
- sys.ps1 = "[DEBUG ON]\n>>> "
- self.showprompt()
- self.set_debugger_indicator()
-
- def beginexecuting(self):
- "Helper for ModifiedInterpreter"
- self.resetoutput()
- self.executing = 1
-
- def endexecuting(self):
- "Helper for ModifiedInterpreter"
- self.executing = 0
- self.canceled = 0
- self.showprompt()
-
- def close(self):
- "Extend EditorWindow.close()"
- if self.executing:
- response = tkMessageBox.askokcancel(
- "Kill?",
- "The program is still running!\n Do you want to kill it?",
- default="ok",
- parent=self.text)
- if response == False:
- return "cancel"
- if self.reading:
- self.top.quit()
- self.canceled = True
- self.closing = True
- # Wait for poll_subprocess() rescheduling to stop
- self.text.after(2 * self.pollinterval, self.close2)
-
- def close2(self):
- return EditorWindow.close(self)
-
- def _close(self):
- "Extend EditorWindow._close(), shut down debugger and execution server"
- self.close_debugger()
- if use_subprocess:
- self.interp.kill_subprocess()
- # Restore std streams
- sys.stdout = self.save_stdout
- sys.stderr = self.save_stderr
- sys.stdin = self.save_stdin
- # Break cycles
- self.interp = None
- self.console = None
- self.flist.pyshell = None
- self.history = None
- EditorWindow._close(self)
-
- def ispythonsource(self, filename):
- "Override EditorWindow method: never remove the colorizer"
- return True
-
- def short_title(self):
- return self.shell_title
-
- COPYRIGHT = \
- 'Type "copyright", "credits" or "license()" for more information.'
-
- firewallmessage = """
- ****************************************************************
- Personal firewall software may warn about the connection IDLE
- makes to its subprocess using this computer's internal loopback
- interface. This connection is not visible on any external
- interface and no data is sent to or received from the Internet.
- ****************************************************************
- """
-
- def begin(self):
- self.resetoutput()
- if use_subprocess:
- nosub = ''
- client = self.interp.start_subprocess()
- if not client:
- self.close()
- return False
- else:
- nosub = "==== No Subprocess ===="
- self.write("Python %s on %s\n%s\n%s\nIDLE %s %s\n" %
- (sys.version, sys.platform, self.COPYRIGHT,
- self.firewallmessage, idlever.IDLE_VERSION, nosub))
- self.showprompt()
- import Tkinter
- Tkinter._default_root = None # 03Jan04 KBK What's this?
- return True
-
- def readline(self):
- save = self.reading
- try:
- self.reading = 1
- self.top.mainloop() # nested mainloop()
- finally:
- self.reading = save
- line = self.text.get("iomark", "end-1c")
- if len(line) == 0: # may be EOF if we quit our mainloop with Ctrl-C
- line = "\n"
- if isinstance(line, unicode):
- import IOBinding
- try:
- line = line.encode(IOBinding.encoding)
- except UnicodeError:
- pass
- self.resetoutput()
- if self.canceled:
- self.canceled = 0
- if not use_subprocess:
- raise KeyboardInterrupt
- if self.endoffile:
- self.endoffile = 0
- line = ""
- return line
-
- def isatty(self):
- return True
-
- def cancel_callback(self, event=None):
- try:
- if self.text.compare("sel.first", "!=", "sel.last"):
- return # Active selection -- always use default binding
- except:
- pass
- if not (self.executing or self.reading):
- self.resetoutput()
- self.interp.write("KeyboardInterrupt\n")
- self.showprompt()
- return "break"
- self.endoffile = 0
- self.canceled = 1
- if (self.executing and self.interp.rpcclt):
- if self.interp.getdebugger():
- self.interp.restart_subprocess()
- else:
- self.interp.interrupt_subprocess()
- if self.reading:
- self.top.quit() # exit the nested mainloop() in readline()
- return "break"
-
- def eof_callback(self, event):
- if self.executing and not self.reading:
- return # Let the default binding (delete next char) take over
- if not (self.text.compare("iomark", "==", "insert") and
- self.text.compare("insert", "==", "end-1c")):
- return # Let the default binding (delete next char) take over
- if not self.executing:
- self.resetoutput()
- self.close()
- else:
- self.canceled = 0
- self.endoffile = 1
- self.top.quit()
- return "break"
-
- def home_callback(self, event):
- if event.state != 0 and event.keysym == "Home":
- return # <Modifier-Home>; fall back to class binding
- if self.text.compare("iomark", "<=", "insert") and \
- self.text.compare("insert linestart", "<=", "iomark"):
- self.text.mark_set("insert", "iomark")
- self.text.tag_remove("sel", "1.0", "end")
- self.text.see("insert")
- return "break"
-
- def linefeed_callback(self, event):
- # Insert a linefeed without entering anything (still autoindented)
- if self.reading:
- self.text.insert("insert", "\n")
- self.text.see("insert")
- else:
- self.newline_and_indent_event(event)
- return "break"
-
- def enter_callback(self, event):
- if self.executing and not self.reading:
- return # Let the default binding (insert '\n') take over
- # If some text is selected, recall the selection
- # (but only if this before the I/O mark)
- try:
- sel = self.text.get("sel.first", "sel.last")
- if sel:
- if self.text.compare("sel.last", "<=", "iomark"):
- self.recall(sel, event)
- return "break"
- except:
- pass
- # If we're strictly before the line containing iomark, recall
- # the current line, less a leading prompt, less leading or
- # trailing whitespace
- if self.text.compare("insert", "<", "iomark linestart"):
- # Check if there's a relevant stdin range -- if so, use it
- prev = self.text.tag_prevrange("stdin", "insert")
- if prev and self.text.compare("insert", "<", prev[1]):
- self.recall(self.text.get(prev[0], prev[1]), event)
- return "break"
- next = self.text.tag_nextrange("stdin", "insert")
- if next and self.text.compare("insert lineend", ">=", next[0]):
- self.recall(self.text.get(next[0], next[1]), event)
- return "break"
- # No stdin mark -- just get the current line, less any prompt
- indices = self.text.tag_nextrange("console", "insert linestart")
- if indices and \
- self.text.compare(indices[0], "<=", "insert linestart"):
- self.recall(self.text.get(indices[1], "insert lineend"), event)
- else:
- self.recall(self.text.get("insert linestart", "insert lineend"), event)
- return "break"
- # If we're between the beginning of the line and the iomark, i.e.
- # in the prompt area, move to the end of the prompt
- if self.text.compare("insert", "<", "iomark"):
- self.text.mark_set("insert", "iomark")
- # If we're in the current input and there's only whitespace
- # beyond the cursor, erase that whitespace first
- s = self.text.get("insert", "end-1c")
- if s and not s.strip():
- self.text.delete("insert", "end-1c")
- # If we're in the current input before its last line,
- # insert a newline right at the insert point
- if self.text.compare("insert", "<", "end-1c linestart"):
- self.newline_and_indent_event(event)
- return "break"
- # We're in the last line; append a newline and submit it
- self.text.mark_set("insert", "end-1c")
- if self.reading:
- self.text.insert("insert", "\n")
- self.text.see("insert")
- else:
- self.newline_and_indent_event(event)
- self.text.tag_add("stdin", "iomark", "end-1c")
- self.text.update_idletasks()
- if self.reading:
- self.top.quit() # Break out of recursive mainloop() in raw_input()
- else:
- self.runit()
- return "break"
-
- def recall(self, s, event):
- # remove leading and trailing empty or whitespace lines
- s = re.sub(r'^\s*\n', '' , s)
- s = re.sub(r'\n\s*$', '', s)
- lines = s.split('\n')
- self.text.undo_block_start()
- try:
- self.text.tag_remove("sel", "1.0", "end")
- self.text.mark_set("insert", "end-1c")
- prefix = self.text.get("insert linestart", "insert")
- if prefix.rstrip().endswith(':'):
- self.newline_and_indent_event(event)
- prefix = self.text.get("insert linestart", "insert")
- self.text.insert("insert", lines[0].strip())
- if len(lines) > 1:
- orig_base_indent = re.search(r'^([ \t]*)', lines[0]).group(0)
- new_base_indent = re.search(r'^([ \t]*)', prefix).group(0)
- for line in lines[1:]:
- if line.startswith(orig_base_indent):
- # replace orig base indentation with new indentation
- line = new_base_indent + line[len(orig_base_indent):]
- self.text.insert('insert', '\n'+line.rstrip())
- finally:
- self.text.see("insert")
- self.text.undo_block_stop()
-
- def runit(self):
- line = self.text.get("iomark", "end-1c")
- # Strip off last newline and surrounding whitespace.
- # (To allow you to hit return twice to end a statement.)
- i = len(line)
- while i > 0 and line[i-1] in " \t":
- i = i-1
- if i > 0 and line[i-1] == "\n":
- i = i-1
- while i > 0 and line[i-1] in " \t":
- i = i-1
- line = line[:i]
- more = self.interp.runsource(line)
-
- def open_stack_viewer(self, event=None):
- if self.interp.rpcclt:
- return self.interp.remote_stack_viewer()
- try:
- sys.last_traceback
- except:
- tkMessageBox.showerror("No stack trace",
- "There is no stack trace yet.\n"
- "(sys.last_traceback is not defined)",
- master=self.text)
- return
- from StackViewer import StackBrowser
- sv = StackBrowser(self.root, self.flist)
-
- def view_restart_mark(self, event=None):
- self.text.see("iomark")
- self.text.see("restart")
-
- def restart_shell(self, event=None):
- self.interp.restart_subprocess()
-
- def showprompt(self):
- self.resetoutput()
- try:
- s = str(sys.ps1)
- except:
- s = ""
- self.console.write(s)
- self.text.mark_set("insert", "end-1c")
- self.set_line_and_column()
- self.io.reset_undo()
-
- def resetoutput(self):
- source = self.text.get("iomark", "end-1c")
- if self.history:
- self.history.history_store(source)
- if self.text.get("end-2c") != "\n":
- self.text.insert("end-1c", "\n")
- self.text.mark_set("iomark", "end-1c")
- self.set_line_and_column()
- sys.stdout.softspace = 0
-
- def write(self, s, tags=()):
- try:
- self.text.mark_gravity("iomark", "right")
- OutputWindow.write(self, s, tags, "iomark")
- self.text.mark_gravity("iomark", "left")
- except:
- pass
- if self.canceled:
- self.canceled = 0
- if not use_subprocess:
- raise KeyboardInterrupt
-
-class PseudoFile(object):
-
- def __init__(self, shell, tags, encoding=None):
- self.shell = shell
- self.tags = tags
- self.softspace = 0
- self.encoding = encoding
-
- def write(self, s):
- self.shell.write(s, self.tags)
-
- def writelines(self, l):
- map(self.write, l)
-
- def flush(self):
- pass
-
- def isatty(self):
- return True
-
-
-usage_msg = """\
-
-USAGE: idle [-deins] [-t title] [file]*
- idle [-dns] [-t title] (-c cmd | -r file) [arg]*
- idle [-dns] [-t title] - [arg]*
-
- -h print this help message and exit
- -n run IDLE without a subprocess (see Help/IDLE Help for details)
-
-The following options will override the IDLE 'settings' configuration:
-
- -e open an edit window
- -i open a shell window
-
-The following options imply -i and will open a shell:
-
- -c cmd run the command in a shell, or
- -r file run script from file
-
- -d enable the debugger
- -s run $IDLESTARTUP or $PYTHONSTARTUP before anything else
- -t title set title of shell window
-
-A default edit window will be bypassed when -c, -r, or - are used.
-
-[arg]* are passed to the command (-c) or script (-r) in sys.argv[1:].
-
-Examples:
-
-idle
- Open an edit window or shell depending on IDLE's configuration.
-
-idle foo.py foobar.py
- Edit the files, also open a shell if configured to start with shell.
-
-idle -est "Baz" foo.py
- Run $IDLESTARTUP or $PYTHONSTARTUP, edit foo.py, and open a shell
- window with the title "Baz".
-
-idle -c "import sys; print sys.argv" "foo"
- Open a shell window and run the command, passing "-c" in sys.argv[0]
- and "foo" in sys.argv[1].
-
-idle -d -s -r foo.py "Hello World"
- Open a shell window, run a startup script, enable the debugger, and
- run foo.py, passing "foo.py" in sys.argv[0] and "Hello World" in
- sys.argv[1].
-
-echo "import sys; print sys.argv" | idle - "foobar"
- Open a shell window, run the script piped in, passing '' in sys.argv[0]
- and "foobar" in sys.argv[1].
-"""
-
-def main():
- global flist, root, use_subprocess
-
- use_subprocess = True
- enable_shell = False
- enable_edit = False
- debug = False
- cmd = None
- script = None
- startup = False
- try:
- opts, args = getopt.getopt(sys.argv[1:], "c:deihnr:st:")
- except getopt.error, msg:
- sys.stderr.write("Error: %s\n" % str(msg))
- sys.stderr.write(usage_msg)
- sys.exit(2)
- for o, a in opts:
- if o == '-c':
- cmd = a
- enable_shell = True
- if o == '-d':
- debug = True
- enable_shell = True
- if o == '-e':
- enable_edit = True
- if o == '-h':
- sys.stdout.write(usage_msg)
- sys.exit()
- if o == '-i':
- enable_shell = True
- if o == '-n':
- use_subprocess = False
- if o == '-r':
- script = a
- if os.path.isfile(script):
- pass
- else:
- print "No script file: ", script
- sys.exit()
- enable_shell = True
- if o == '-s':
- startup = True
- enable_shell = True
- if o == '-t':
- PyShell.shell_title = a
- enable_shell = True
- if args and args[0] == '-':
- cmd = sys.stdin.read()
- enable_shell = True
- # process sys.argv and sys.path:
- for i in range(len(sys.path)):
- sys.path[i] = os.path.abspath(sys.path[i])
- if args and args[0] == '-':
- sys.argv = [''] + args[1:]
- elif cmd:
- sys.argv = ['-c'] + args
- elif script:
- sys.argv = [script] + args
- elif args:
- enable_edit = True
- pathx = []
- for filename in args:
- pathx.append(os.path.dirname(filename))
- for dir in pathx:
- dir = os.path.abspath(dir)
- if not dir in sys.path:
- sys.path.insert(0, dir)
- else:
- dir = os.getcwd()
- if not dir in sys.path:
- sys.path.insert(0, dir)
- # check the IDLE settings configuration (but command line overrides)
- edit_start = idleConf.GetOption('main', 'General',
- 'editor-on-startup', type='bool')
- enable_edit = enable_edit or edit_start
- enable_shell = enable_shell or not edit_start
- # start editor and/or shell windows:
- root = Tk(className="Idle")
-
- fixwordbreaks(root)
- root.withdraw()
- flist = PyShellFileList(root)
- macosxSupport.setupApp(root, flist)
-
- if enable_edit:
- if not (cmd or script):
- for filename in args:
- flist.open(filename)
- if not args:
- flist.new()
- if enable_shell:
- shell = flist.open_shell()
- if not shell:
- return # couldn't open shell
-
- if macosxSupport.runningAsOSXApp() and flist.dict:
- # On OSX: when the user has double-clicked on a file that causes
- # IDLE to be launched the shell window will open just in front of
- # the file she wants to see. Lower the interpreter window when
- # there are open files.
- shell.top.lower()
-
- shell = flist.pyshell
- # handle remaining options:
- if debug:
- shell.open_debugger()
- if startup:
- filename = os.environ.get("IDLESTARTUP") or \
- os.environ.get("PYTHONSTARTUP")
- if filename and os.path.isfile(filename):
- shell.interp.execfile(filename)
- if shell and cmd or script:
- shell.interp.runcommand("""if 1:
- import sys as _sys
- _sys.argv = %r
- del _sys
- \n""" % (sys.argv,))
- if cmd:
- shell.interp.execsource(cmd)
- elif script:
- shell.interp.prepend_syspath(script)
- shell.interp.execfile(script)
-
- root.mainloop()
- root.destroy()
-
-if __name__ == "__main__":
- sys.modules['PyShell'] = sys.modules['__main__']
- main()
diff --git a/sys/lib/python/idlelib/README.txt b/sys/lib/python/idlelib/README.txt
deleted file mode 100644
index 101f7eba1..000000000
--- a/sys/lib/python/idlelib/README.txt
+++ /dev/null
@@ -1,63 +0,0 @@
-IDLE is Python's Tkinter-based Integrated DeveLopment Environment.
-
-IDLE emphasizes a lightweight, clean design with a simple user interface.
-Although it is suitable for beginners, even advanced users will find that
-IDLE has everything they really need to develop pure Python code.
-
-IDLE features a multi-window text editor with multiple undo, Python colorizing,
-and many other capabilities, e.g. smart indent, call tips, and autocompletion.
-
-The editor has comprehensive search functions, including searching through
-multiple files. Class browsers and path browsers provide fast access to
-code objects from a top level viewpoint without dealing with code folding.
-
-There is a Python Shell window which features colorizing and command recall.
-
-IDLE executes Python code in a separate process, which is restarted for each
-Run (F5) initiated from an editor window. The environment can also be
-restarted from the Shell window without restarting IDLE.
-
-This enhancement has often been requested, and is now finally available. The
-magic "reload/import *" incantations are no longer required when editing and
-testing a module two or three steps down the import chain.
-
-(Personal firewall software may warn about the connection IDLE makes to its
-subprocess using this computer's internal loopback interface. This connection
-is not visible on any external interface and no data is sent to or received
-from the Internet.)
-
-It is possible to interrupt tightly looping user code, even on Windows.
-
-Applications which cannot support subprocesses and/or sockets can still run
-IDLE in a single process.
-
-IDLE has an integrated debugger with stepping, persistent breakpoints, and call
-stack visibility.
-
-There is a GUI configuration manager which makes it easy to select fonts,
-colors, keybindings, and startup options. This facility includes a feature
-which allows the user to specify additional help sources, either locally or on
-the web.
-
-IDLE is coded in 100% pure Python, using the Tkinter GUI toolkit (Tk/Tcl)
-and is cross-platform, working on Unix, Mac, and Windows.
-
-IDLE accepts command line arguments. Try idle -h to see the options.
-
-
-If you find bugs or have suggestions, let us know about them by using the
-Python Bug Tracker:
-
-http://sourceforge.net/projects/python
-
-Patches are always appreciated at the Python Patch Tracker, and change
-requests should be posted to the RFE Tracker.
-
-For further details and links, read the Help files and check the IDLE home
-page at
-
-http://www.python.org/idle/
-
-There is a mail list for IDLE: idle-dev@python.org. You can join at
-
-http://mail.python.org/mailman/listinfo/idle-dev
diff --git a/sys/lib/python/idlelib/RemoteDebugger.py b/sys/lib/python/idlelib/RemoteDebugger.py
deleted file mode 100644
index 74085c36f..000000000
--- a/sys/lib/python/idlelib/RemoteDebugger.py
+++ /dev/null
@@ -1,381 +0,0 @@
-"""Support for remote Python debugging.
-
-Some ASCII art to describe the structure:
-
- IN PYTHON SUBPROCESS # IN IDLE PROCESS
- #
- # oid='gui_adapter'
- +----------+ # +------------+ +-----+
- | GUIProxy |--remote#call-->| GUIAdapter |--calls-->| GUI |
-+-----+--calls-->+----------+ # +------------+ +-----+
-| Idb | # /
-+-----+<-calls--+------------+ # +----------+<--calls-/
- | IdbAdapter |<--remote#call--| IdbProxy |
- +------------+ # +----------+
- oid='idb_adapter' #
-
-The purpose of the Proxy and Adapter classes is to translate certain
-arguments and return values that cannot be transported through the RPC
-barrier, in particular frame and traceback objects.
-
-"""
-
-import sys
-import types
-import rpc
-import Debugger
-
-debugging = 0
-
-idb_adap_oid = "idb_adapter"
-gui_adap_oid = "gui_adapter"
-
-#=======================================
-#
-# In the PYTHON subprocess:
-
-frametable = {}
-dicttable = {}
-codetable = {}
-tracebacktable = {}
-
-def wrap_frame(frame):
- fid = id(frame)
- frametable[fid] = frame
- return fid
-
-def wrap_info(info):
- "replace info[2], a traceback instance, by its ID"
- if info is None:
- return None
- else:
- traceback = info[2]
- assert isinstance(traceback, types.TracebackType)
- traceback_id = id(traceback)
- tracebacktable[traceback_id] = traceback
- modified_info = (info[0], info[1], traceback_id)
- return modified_info
-
-class GUIProxy:
-
- def __init__(self, conn, gui_adap_oid):
- self.conn = conn
- self.oid = gui_adap_oid
-
- def interaction(self, message, frame, info=None):
- # calls rpc.SocketIO.remotecall() via run.MyHandler instance
- # pass frame and traceback object IDs instead of the objects themselves
- self.conn.remotecall(self.oid, "interaction",
- (message, wrap_frame(frame), wrap_info(info)),
- {})
-
-class IdbAdapter:
-
- def __init__(self, idb):
- self.idb = idb
-
- #----------called by an IdbProxy----------
-
- def set_step(self):
- self.idb.set_step()
-
- def set_quit(self):
- self.idb.set_quit()
-
- def set_continue(self):
- self.idb.set_continue()
-
- def set_next(self, fid):
- frame = frametable[fid]
- self.idb.set_next(frame)
-
- def set_return(self, fid):
- frame = frametable[fid]
- self.idb.set_return(frame)
-
- def get_stack(self, fid, tbid):
- ##print >>sys.__stderr__, "get_stack(%r, %r)" % (fid, tbid)
- frame = frametable[fid]
- if tbid is None:
- tb = None
- else:
- tb = tracebacktable[tbid]
- stack, i = self.idb.get_stack(frame, tb)
- ##print >>sys.__stderr__, "get_stack() ->", stack
- stack = [(wrap_frame(frame), k) for frame, k in stack]
- ##print >>sys.__stderr__, "get_stack() ->", stack
- return stack, i
-
- def run(self, cmd):
- import __main__
- self.idb.run(cmd, __main__.__dict__)
-
- def set_break(self, filename, lineno):
- msg = self.idb.set_break(filename, lineno)
- return msg
-
- def clear_break(self, filename, lineno):
- msg = self.idb.clear_break(filename, lineno)
- return msg
-
- def clear_all_file_breaks(self, filename):
- msg = self.idb.clear_all_file_breaks(filename)
- return msg
-
- #----------called by a FrameProxy----------
-
- def frame_attr(self, fid, name):
- frame = frametable[fid]
- return getattr(frame, name)
-
- def frame_globals(self, fid):
- frame = frametable[fid]
- dict = frame.f_globals
- did = id(dict)
- dicttable[did] = dict
- return did
-
- def frame_locals(self, fid):
- frame = frametable[fid]
- dict = frame.f_locals
- did = id(dict)
- dicttable[did] = dict
- return did
-
- def frame_code(self, fid):
- frame = frametable[fid]
- code = frame.f_code
- cid = id(code)
- codetable[cid] = code
- return cid
-
- #----------called by a CodeProxy----------
-
- def code_name(self, cid):
- code = codetable[cid]
- return code.co_name
-
- def code_filename(self, cid):
- code = codetable[cid]
- return code.co_filename
-
- #----------called by a DictProxy----------
-
- def dict_keys(self, did):
- dict = dicttable[did]
- return dict.keys()
-
- def dict_item(self, did, key):
- dict = dicttable[did]
- value = dict[key]
- value = repr(value)
- return value
-
-#----------end class IdbAdapter----------
-
-
-def start_debugger(rpchandler, gui_adap_oid):
- """Start the debugger and its RPC link in the Python subprocess
-
- Start the subprocess side of the split debugger and set up that side of the
- RPC link by instantiating the GUIProxy, Idb debugger, and IdbAdapter
- objects and linking them together. Register the IdbAdapter with the
- RPCServer to handle RPC requests from the split debugger GUI via the
- IdbProxy.
-
- """
- gui_proxy = GUIProxy(rpchandler, gui_adap_oid)
- idb = Debugger.Idb(gui_proxy)
- idb_adap = IdbAdapter(idb)
- rpchandler.register(idb_adap_oid, idb_adap)
- return idb_adap_oid
-
-
-#=======================================
-#
-# In the IDLE process:
-
-
-class FrameProxy:
-
- def __init__(self, conn, fid):
- self._conn = conn
- self._fid = fid
- self._oid = "idb_adapter"
- self._dictcache = {}
-
- def __getattr__(self, name):
- if name[:1] == "_":
- raise AttributeError, name
- if name == "f_code":
- return self._get_f_code()
- if name == "f_globals":
- return self._get_f_globals()
- if name == "f_locals":
- return self._get_f_locals()
- return self._conn.remotecall(self._oid, "frame_attr",
- (self._fid, name), {})
-
- def _get_f_code(self):
- cid = self._conn.remotecall(self._oid, "frame_code", (self._fid,), {})
- return CodeProxy(self._conn, self._oid, cid)
-
- def _get_f_globals(self):
- did = self._conn.remotecall(self._oid, "frame_globals",
- (self._fid,), {})
- return self._get_dict_proxy(did)
-
- def _get_f_locals(self):
- did = self._conn.remotecall(self._oid, "frame_locals",
- (self._fid,), {})
- return self._get_dict_proxy(did)
-
- def _get_dict_proxy(self, did):
- if self._dictcache.has_key(did):
- return self._dictcache[did]
- dp = DictProxy(self._conn, self._oid, did)
- self._dictcache[did] = dp
- return dp
-
-
-class CodeProxy:
-
- def __init__(self, conn, oid, cid):
- self._conn = conn
- self._oid = oid
- self._cid = cid
-
- def __getattr__(self, name):
- if name == "co_name":
- return self._conn.remotecall(self._oid, "code_name",
- (self._cid,), {})
- if name == "co_filename":
- return self._conn.remotecall(self._oid, "code_filename",
- (self._cid,), {})
-
-
-class DictProxy:
-
- def __init__(self, conn, oid, did):
- self._conn = conn
- self._oid = oid
- self._did = did
-
- def keys(self):
- return self._conn.remotecall(self._oid, "dict_keys", (self._did,), {})
-
- def __getitem__(self, key):
- return self._conn.remotecall(self._oid, "dict_item",
- (self._did, key), {})
-
- def __getattr__(self, name):
- ##print >>sys.__stderr__, "failed DictProxy.__getattr__:", name
- raise AttributeError, name
-
-
-class GUIAdapter:
-
- def __init__(self, conn, gui):
- self.conn = conn
- self.gui = gui
-
- def interaction(self, message, fid, modified_info):
- ##print "interaction: (%s, %s, %s)" % (message, fid, modified_info)
- frame = FrameProxy(self.conn, fid)
- self.gui.interaction(message, frame, modified_info)
-
-
-class IdbProxy:
-
- def __init__(self, conn, shell, oid):
- self.oid = oid
- self.conn = conn
- self.shell = shell
-
- def call(self, methodname, *args, **kwargs):
- ##print "**IdbProxy.call %s %s %s" % (methodname, args, kwargs)
- value = self.conn.remotecall(self.oid, methodname, args, kwargs)
- ##print "**IdbProxy.call %s returns %r" % (methodname, value)
- return value
-
- def run(self, cmd, locals):
- # Ignores locals on purpose!
- seq = self.conn.asyncqueue(self.oid, "run", (cmd,), {})
- self.shell.interp.active_seq = seq
-
- def get_stack(self, frame, tbid):
- # passing frame and traceback IDs, not the objects themselves
- stack, i = self.call("get_stack", frame._fid, tbid)
- stack = [(FrameProxy(self.conn, fid), k) for fid, k in stack]
- return stack, i
-
- def set_continue(self):
- self.call("set_continue")
-
- def set_step(self):
- self.call("set_step")
-
- def set_next(self, frame):
- self.call("set_next", frame._fid)
-
- def set_return(self, frame):
- self.call("set_return", frame._fid)
-
- def set_quit(self):
- self.call("set_quit")
-
- def set_break(self, filename, lineno):
- msg = self.call("set_break", filename, lineno)
- return msg
-
- def clear_break(self, filename, lineno):
- msg = self.call("clear_break", filename, lineno)
- return msg
-
- def clear_all_file_breaks(self, filename):
- msg = self.call("clear_all_file_breaks", filename)
- return msg
-
-def start_remote_debugger(rpcclt, pyshell):
- """Start the subprocess debugger, initialize the debugger GUI and RPC link
-
- Request the RPCServer start the Python subprocess debugger and link. Set
- up the Idle side of the split debugger by instantiating the IdbProxy,
- debugger GUI, and debugger GUIAdapter objects and linking them together.
-
- Register the GUIAdapter with the RPCClient to handle debugger GUI
- interaction requests coming from the subprocess debugger via the GUIProxy.
-
- The IdbAdapter will pass execution and environment requests coming from the
- Idle debugger GUI to the subprocess debugger via the IdbProxy.
-
- """
- global idb_adap_oid
-
- idb_adap_oid = rpcclt.remotecall("exec", "start_the_debugger",\
- (gui_adap_oid,), {})
- idb_proxy = IdbProxy(rpcclt, pyshell, idb_adap_oid)
- gui = Debugger.Debugger(pyshell, idb_proxy)
- gui_adap = GUIAdapter(rpcclt, gui)
- rpcclt.register(gui_adap_oid, gui_adap)
- return gui
-
-def close_remote_debugger(rpcclt):
- """Shut down subprocess debugger and Idle side of debugger RPC link
-
- Request that the RPCServer shut down the subprocess debugger and link.
- Unregister the GUIAdapter, which will cause a GC on the Idle process
- debugger and RPC link objects. (The second reference to the debugger GUI
- is deleted in PyShell.close_remote_debugger().)
-
- """
- close_subprocess_debugger(rpcclt)
- rpcclt.unregister(gui_adap_oid)
-
-def close_subprocess_debugger(rpcclt):
- rpcclt.remotecall("exec", "stop_the_debugger", (idb_adap_oid,), {})
-
-def restart_subprocess_debugger(rpcclt):
- idb_adap_oid_ret = rpcclt.remotecall("exec", "start_the_debugger",\
- (gui_adap_oid,), {})
- assert idb_adap_oid_ret == idb_adap_oid, 'Idb restarted with different oid'
diff --git a/sys/lib/python/idlelib/RemoteObjectBrowser.py b/sys/lib/python/idlelib/RemoteObjectBrowser.py
deleted file mode 100644
index 6ba339137..000000000
--- a/sys/lib/python/idlelib/RemoteObjectBrowser.py
+++ /dev/null
@@ -1,36 +0,0 @@
-import rpc
-
-def remote_object_tree_item(item):
- wrapper = WrappedObjectTreeItem(item)
- oid = id(wrapper)
- rpc.objecttable[oid] = wrapper
- return oid
-
-class WrappedObjectTreeItem:
- # Lives in PYTHON subprocess
-
- def __init__(self, item):
- self.__item = item
-
- def __getattr__(self, name):
- value = getattr(self.__item, name)
- return value
-
- def _GetSubList(self):
- list = self.__item._GetSubList()
- return map(remote_object_tree_item, list)
-
-class StubObjectTreeItem:
- # Lives in IDLE process
-
- def __init__(self, sockio, oid):
- self.sockio = sockio
- self.oid = oid
-
- def __getattr__(self, name):
- value = rpc.MethodProxy(self.sockio, self.oid, name)
- return value
-
- def _GetSubList(self):
- list = self.sockio.remotecall(self.oid, "_GetSubList", (), {})
- return [StubObjectTreeItem(self.sockio, oid) for oid in list]
diff --git a/sys/lib/python/idlelib/ReplaceDialog.py b/sys/lib/python/idlelib/ReplaceDialog.py
deleted file mode 100644
index c8eb1c8c4..000000000
--- a/sys/lib/python/idlelib/ReplaceDialog.py
+++ /dev/null
@@ -1,167 +0,0 @@
-from Tkinter import *
-import SearchEngine
-from SearchDialogBase import SearchDialogBase
-
-def replace(text):
- root = text._root()
- engine = SearchEngine.get(root)
- if not hasattr(engine, "_replacedialog"):
- engine._replacedialog = ReplaceDialog(root, engine)
- dialog = engine._replacedialog
- dialog.open(text)
-
-class ReplaceDialog(SearchDialogBase):
-
- title = "Replace Dialog"
- icon = "Replace"
-
- def __init__(self, root, engine):
- SearchDialogBase.__init__(self, root, engine)
- self.replvar = StringVar(root)
-
- def open(self, text):
- SearchDialogBase.open(self, text)
- try:
- first = text.index("sel.first")
- except TclError:
- first = None
- try:
- last = text.index("sel.last")
- except TclError:
- last = None
- first = first or text.index("insert")
- last = last or first
- self.show_hit(first, last)
- self.ok = 1
-
- def create_entries(self):
- SearchDialogBase.create_entries(self)
- self.replent = self.make_entry("Replace with:", self.replvar)
-
- def create_command_buttons(self):
- SearchDialogBase.create_command_buttons(self)
- self.make_button("Find", self.find_it)
- self.make_button("Replace", self.replace_it)
- self.make_button("Replace+Find", self.default_command, 1)
- self.make_button("Replace All", self.replace_all)
-
- def find_it(self, event=None):
- self.do_find(0)
-
- def replace_it(self, event=None):
- if self.do_find(self.ok):
- self.do_replace()
-
- def default_command(self, event=None):
- if self.do_find(self.ok):
- self.do_replace()
- self.do_find(0)
-
- def replace_all(self, event=None):
- prog = self.engine.getprog()
- if not prog:
- return
- repl = self.replvar.get()
- text = self.text
- res = self.engine.search_text(text, prog)
- if not res:
- text.bell()
- return
- text.tag_remove("sel", "1.0", "end")
- text.tag_remove("hit", "1.0", "end")
- line = res[0]
- col = res[1].start()
- if self.engine.iswrap():
- line = 1
- col = 0
- ok = 1
- first = last = None
- # XXX ought to replace circular instead of top-to-bottom when wrapping
- text.undo_block_start()
- while 1:
- res = self.engine.search_forward(text, prog, line, col, 0, ok)
- if not res:
- break
- line, m = res
- chars = text.get("%d.0" % line, "%d.0" % (line+1))
- orig = m.group()
- new = m.expand(repl)
- i, j = m.span()
- first = "%d.%d" % (line, i)
- last = "%d.%d" % (line, j)
- if new == orig:
- text.mark_set("insert", last)
- else:
- text.mark_set("insert", first)
- if first != last:
- text.delete(first, last)
- if new:
- text.insert(first, new)
- col = i + len(new)
- ok = 0
- text.undo_block_stop()
- if first and last:
- self.show_hit(first, last)
- self.close()
-
- def do_find(self, ok=0):
- if not self.engine.getprog():
- return False
- text = self.text
- res = self.engine.search_text(text, None, ok)
- if not res:
- text.bell()
- return False
- line, m = res
- i, j = m.span()
- first = "%d.%d" % (line, i)
- last = "%d.%d" % (line, j)
- self.show_hit(first, last)
- self.ok = 1
- return True
-
- def do_replace(self):
- prog = self.engine.getprog()
- if not prog:
- return False
- text = self.text
- try:
- first = pos = text.index("sel.first")
- last = text.index("sel.last")
- except TclError:
- pos = None
- if not pos:
- first = last = pos = text.index("insert")
- line, col = SearchEngine.get_line_col(pos)
- chars = text.get("%d.0" % line, "%d.0" % (line+1))
- m = prog.match(chars, col)
- if not prog:
- return False
- new = m.expand(self.replvar.get())
- text.mark_set("insert", first)
- text.undo_block_start()
- if m.group():
- text.delete(first, last)
- if new:
- text.insert(first, new)
- text.undo_block_stop()
- self.show_hit(first, text.index("insert"))
- self.ok = 0
- return True
-
- def show_hit(self, first, last):
- text = self.text
- text.mark_set("insert", first)
- text.tag_remove("sel", "1.0", "end")
- text.tag_add("sel", first, last)
- text.tag_remove("hit", "1.0", "end")
- if first == last:
- text.tag_add("hit", first)
- else:
- text.tag_add("hit", first, last)
- text.see("insert")
- text.update_idletasks()
-
- def close(self, event=None):
- SearchDialogBase.close(self, event)
- self.text.tag_remove("hit", "1.0", "end")
diff --git a/sys/lib/python/idlelib/ScriptBinding.py b/sys/lib/python/idlelib/ScriptBinding.py
deleted file mode 100644
index f325ad1d2..000000000
--- a/sys/lib/python/idlelib/ScriptBinding.py
+++ /dev/null
@@ -1,210 +0,0 @@
-"""Extension to execute code outside the Python shell window.
-
-This adds the following commands:
-
-- Check module does a full syntax check of the current module.
- It also runs the tabnanny to catch any inconsistent tabs.
-
-- Run module executes the module's code in the __main__ namespace. The window
- must have been saved previously. The module is added to sys.modules, and is
- also added to the __main__ namespace.
-
-XXX GvR Redesign this interface (yet again) as follows:
-
-- Present a dialog box for ``Run Module''
-
-- Allow specify command line arguments in the dialog box
-
-"""
-
-import os
-import re
-import string
-import tabnanny
-import tokenize
-import tkMessageBox
-import PyShell
-
-from configHandler import idleConf
-
-IDENTCHARS = string.ascii_letters + string.digits + "_"
-
-indent_message = """Error: Inconsistent indentation detected!
-
-1) Your indentation is outright incorrect (easy to fix), OR
-
-2) Your indentation mixes tabs and spaces.
-
-To fix case 2, change all tabs to spaces by using Edit->Select All followed \
-by Format->Untabify Region and specify the number of columns used by each tab.
-"""
-
-class ScriptBinding:
-
- menudefs = [
- ('run', [None,
- ('Check Module', '<<check-module>>'),
- ('Run Module', '<<run-module>>'), ]), ]
-
- def __init__(self, editwin):
- self.editwin = editwin
- # Provide instance variables referenced by Debugger
- # XXX This should be done differently
- self.flist = self.editwin.flist
- self.root = self.editwin.root
-
- def check_module_event(self, event):
- filename = self.getfilename()
- if not filename:
- return
- if not self.tabnanny(filename):
- return
- self.checksyntax(filename)
-
- def tabnanny(self, filename):
- f = open(filename, 'r')
- try:
- tabnanny.process_tokens(tokenize.generate_tokens(f.readline))
- except tokenize.TokenError, msg:
- msgtxt, (lineno, start) = msg
- self.editwin.gotoline(lineno)
- self.errorbox("Tabnanny Tokenizing Error",
- "Token Error: %s" % msgtxt)
- return False
- except tabnanny.NannyNag, nag:
- # The error messages from tabnanny are too confusing...
- self.editwin.gotoline(nag.get_lineno())
- self.errorbox("Tab/space error", indent_message)
- return False
- except IndentationError:
- # From tokenize(), let compile() in checksyntax find it again.
- pass
- return True
-
- def checksyntax(self, filename):
- self.shell = shell = self.flist.open_shell()
- saved_stream = shell.get_warning_stream()
- shell.set_warning_stream(shell.stderr)
- f = open(filename, 'r')
- source = f.read()
- f.close()
- if '\r' in source:
- source = re.sub(r"\r\n", "\n", source)
- source = re.sub(r"\r", "\n", source)
- if source and source[-1] != '\n':
- source = source + '\n'
- text = self.editwin.text
- text.tag_remove("ERROR", "1.0", "end")
- try:
- try:
- # If successful, return the compiled code
- return compile(source, filename, "exec")
- except (SyntaxError, OverflowError), err:
- try:
- msg, (errorfilename, lineno, offset, line) = err
- if not errorfilename:
- err.args = msg, (filename, lineno, offset, line)
- err.filename = filename
- self.colorize_syntax_error(msg, lineno, offset)
- except:
- msg = "*** " + str(err)
- self.errorbox("Syntax error",
- "There's an error in your program:\n" + msg)
- return False
- finally:
- shell.set_warning_stream(saved_stream)
-
- def colorize_syntax_error(self, msg, lineno, offset):
- text = self.editwin.text
- pos = "0.0 + %d lines + %d chars" % (lineno-1, offset-1)
- text.tag_add("ERROR", pos)
- char = text.get(pos)
- if char and char in IDENTCHARS:
- text.tag_add("ERROR", pos + " wordstart", pos)
- if '\n' == text.get(pos): # error at line end
- text.mark_set("insert", pos)
- else:
- text.mark_set("insert", pos + "+1c")
- text.see(pos)
-
- def run_module_event(self, event):
- """Run the module after setting up the environment.
-
- First check the syntax. If OK, make sure the shell is active and
- then transfer the arguments, set the run environment's working
- directory to the directory of the module being executed and also
- add that directory to its sys.path if not already included.
-
- """
- filename = self.getfilename()
- if not filename:
- return
- if not self.tabnanny(filename):
- return
- code = self.checksyntax(filename)
- if not code:
- return
- shell = self.shell
- interp = shell.interp
- if PyShell.use_subprocess:
- shell.restart_shell()
- dirname = os.path.dirname(filename)
- # XXX Too often this discards arguments the user just set...
- interp.runcommand("""if 1:
- _filename = %r
- import sys as _sys
- from os.path import basename as _basename
- if (not _sys.argv or
- _basename(_sys.argv[0]) != _basename(_filename)):
- _sys.argv = [_filename]
- import os as _os
- _os.chdir(%r)
- del _filename, _sys, _basename, _os
- \n""" % (filename, dirname))
- interp.prepend_syspath(filename)
- # XXX KBK 03Jul04 When run w/o subprocess, runtime warnings still
- # go to __stderr__. With subprocess, they go to the shell.
- # Need to change streams in PyShell.ModifiedInterpreter.
- interp.runcode(code)
-
- def getfilename(self):
- """Get source filename. If not saved, offer to save (or create) file
-
- The debugger requires a source file. Make sure there is one, and that
- the current version of the source buffer has been saved. If the user
- declines to save or cancels the Save As dialog, return None.
-
- If the user has configured IDLE for Autosave, the file will be
- silently saved if it already exists and is dirty.
-
- """
- filename = self.editwin.io.filename
- if not self.editwin.get_saved():
- autosave = idleConf.GetOption('main', 'General',
- 'autosave', type='bool')
- if autosave and filename:
- self.editwin.io.save(None)
- else:
- reply = self.ask_save_dialog()
- self.editwin.text.focus_set()
- if reply == "ok":
- self.editwin.io.save(None)
- filename = self.editwin.io.filename
- else:
- filename = None
- return filename
-
- def ask_save_dialog(self):
- msg = "Source Must Be Saved\n" + 5*' ' + "OK to Save?"
- mb = tkMessageBox.Message(title="Save Before Run or Check",
- message=msg,
- icon=tkMessageBox.QUESTION,
- type=tkMessageBox.OKCANCEL,
- default=tkMessageBox.OK,
- master=self.editwin.text)
- return mb.show()
-
- def errorbox(self, title, message):
- # XXX This should really be a function of EditorWindow...
- tkMessageBox.showerror(title, message, master=self.editwin.text)
- self.editwin.text.focus_set()
diff --git a/sys/lib/python/idlelib/ScrolledList.py b/sys/lib/python/idlelib/ScrolledList.py
deleted file mode 100644
index 921193657..000000000
--- a/sys/lib/python/idlelib/ScrolledList.py
+++ /dev/null
@@ -1,139 +0,0 @@
-from Tkinter import *
-
-class ScrolledList:
-
- default = "(None)"
-
- def __init__(self, master, **options):
- # Create top frame, with scrollbar and listbox
- self.master = master
- self.frame = frame = Frame(master)
- self.frame.pack(fill="both", expand=1)
- self.vbar = vbar = Scrollbar(frame, name="vbar")
- self.vbar.pack(side="right", fill="y")
- self.listbox = listbox = Listbox(frame, exportselection=0,
- background="white")
- if options:
- listbox.configure(options)
- listbox.pack(expand=1, fill="both")
- # Tie listbox and scrollbar together
- vbar["command"] = listbox.yview
- listbox["yscrollcommand"] = vbar.set
- # Bind events to the list box
- listbox.bind("<ButtonRelease-1>", self.click_event)
- listbox.bind("<Double-ButtonRelease-1>", self.double_click_event)
- listbox.bind("<ButtonPress-3>", self.popup_event)
- listbox.bind("<Key-Up>", self.up_event)
- listbox.bind("<Key-Down>", self.down_event)
- # Mark as empty
- self.clear()
-
- def close(self):
- self.frame.destroy()
-
- def clear(self):
- self.listbox.delete(0, "end")
- self.empty = 1
- self.listbox.insert("end", self.default)
-
- def append(self, item):
- if self.empty:
- self.listbox.delete(0, "end")
- self.empty = 0
- self.listbox.insert("end", str(item))
-
- def get(self, index):
- return self.listbox.get(index)
-
- def click_event(self, event):
- self.listbox.activate("@%d,%d" % (event.x, event.y))
- index = self.listbox.index("active")
- self.select(index)
- self.on_select(index)
- return "break"
-
- def double_click_event(self, event):
- index = self.listbox.index("active")
- self.select(index)
- self.on_double(index)
- return "break"
-
- menu = None
-
- def popup_event(self, event):
- if not self.menu:
- self.make_menu()
- menu = self.menu
- self.listbox.activate("@%d,%d" % (event.x, event.y))
- index = self.listbox.index("active")
- self.select(index)
- menu.tk_popup(event.x_root, event.y_root)
-
- def make_menu(self):
- menu = Menu(self.listbox, tearoff=0)
- self.menu = menu
- self.fill_menu()
-
- def up_event(self, event):
- index = self.listbox.index("active")
- if self.listbox.selection_includes(index):
- index = index - 1
- else:
- index = self.listbox.size() - 1
- if index < 0:
- self.listbox.bell()
- else:
- self.select(index)
- self.on_select(index)
- return "break"
-
- def down_event(self, event):
- index = self.listbox.index("active")
- if self.listbox.selection_includes(index):
- index = index + 1
- else:
- index = 0
- if index >= self.listbox.size():
- self.listbox.bell()
- else:
- self.select(index)
- self.on_select(index)
- return "break"
-
- def select(self, index):
- self.listbox.focus_set()
- self.listbox.activate(index)
- self.listbox.selection_clear(0, "end")
- self.listbox.selection_set(index)
- self.listbox.see(index)
-
- # Methods to override for specific actions
-
- def fill_menu(self):
- pass
-
- def on_select(self, index):
- pass
-
- def on_double(self, index):
- pass
-
-
-def test():
- root = Tk()
- root.protocol("WM_DELETE_WINDOW", root.destroy)
- class MyScrolledList(ScrolledList):
- def fill_menu(self): self.menu.add_command(label="pass")
- def on_select(self, index): print "select", self.get(index)
- def on_double(self, index): print "double", self.get(index)
- s = MyScrolledList(root)
- for i in range(30):
- s.append("item %02d" % i)
- return root
-
-def main():
- root = test()
- root.mainloop()
-
-if __name__ == '__main__':
- main()
diff --git a/sys/lib/python/idlelib/SearchDialog.py b/sys/lib/python/idlelib/SearchDialog.py
deleted file mode 100644
index d7124d67f..000000000
--- a/sys/lib/python/idlelib/SearchDialog.py
+++ /dev/null
@@ -1,68 +0,0 @@
-from Tkinter import *
-import SearchEngine
-from SearchDialogBase import SearchDialogBase
-
-
-def _setup(text):
- root = text._root()
- engine = SearchEngine.get(root)
- if not hasattr(engine, "_searchdialog"):
- engine._searchdialog = SearchDialog(root, engine)
- return engine._searchdialog
-
-def find(text):
- pat = text.get("sel.first", "sel.last")
- return _setup(text).open(text,pat)
-
-def find_again(text):
- return _setup(text).find_again(text)
-
-def find_selection(text):
- return _setup(text).find_selection(text)
-
-class SearchDialog(SearchDialogBase):
-
- def create_widgets(self):
- f = SearchDialogBase.create_widgets(self)
- self.make_button("Find", self.default_command, 1)
-
- def default_command(self, event=None):
- if not self.engine.getprog():
- return
- if self.find_again(self.text):
- self.close()
-
- def find_again(self, text):
- if not self.engine.getpat():
- self.open(text)
- return False
- if not self.engine.getprog():
- return False
- res = self.engine.search_text(text)
- if res:
- line, m = res
- i, j = m.span()
- first = "%d.%d" % (line, i)
- last = "%d.%d" % (line, j)
- try:
- selfirst = text.index("sel.first")
- sellast = text.index("sel.last")
- if selfirst == first and sellast == last:
- text.bell()
- return False
- except TclError:
- pass
- text.tag_remove("sel", "1.0", "end")
- text.tag_add("sel", first, last)
- text.mark_set("insert", self.engine.isback() and first or last)
- text.see("insert")
- return True
- else:
- text.bell()
- return False
-
- def find_selection(self, text):
- pat = text.get("sel.first", "sel.last")
- if pat:
- self.engine.setcookedpat(pat)
- return self.find_again(text)
diff --git a/sys/lib/python/idlelib/SearchDialogBase.py b/sys/lib/python/idlelib/SearchDialogBase.py
deleted file mode 100644
index f63e7ae37..000000000
--- a/sys/lib/python/idlelib/SearchDialogBase.py
+++ /dev/null
@@ -1,140 +0,0 @@
-from Tkinter import *
-
-class SearchDialogBase:
-
- title = "Search Dialog"
- icon = "Search"
- needwrapbutton = 1
-
- def __init__(self, root, engine):
- self.root = root
- self.engine = engine
- self.top = None
-
- def open(self, text, searchphrase=None):
- self.text = text
- if not self.top:
- self.create_widgets()
- else:
- self.top.deiconify()
- self.top.tkraise()
- if searchphrase:
- self.ent.delete(0,"end")
- self.ent.insert("end",searchphrase)
- self.ent.focus_set()
- self.ent.selection_range(0, "end")
- self.ent.icursor(0)
- self.top.grab_set()
-
- def close(self, event=None):
- if self.top:
- self.top.grab_release()
- self.top.withdraw()
-
- def create_widgets(self):
- top = Toplevel(self.root)
- top.bind("<Return>", self.default_command)
- top.bind("<Escape>", self.close)
- top.protocol("WM_DELETE_WINDOW", self.close)
- top.wm_title(self.title)
- top.wm_iconname(self.icon)
- self.top = top
-
- self.row = 0
- self.top.grid_columnconfigure(0, pad=2, weight=0)
- self.top.grid_columnconfigure(1, pad=2, minsize=100, weight=100)
-
- self.create_entries()
- self.create_option_buttons()
- self.create_other_buttons()
- return self.create_command_buttons()
-
- def make_entry(self, label, var):
- l = Label(self.top, text=label)
- l.grid(row=self.row, column=0, sticky="nw")
- e = Entry(self.top, textvariable=var, exportselection=0)
- e.grid(row=self.row, column=1, sticky="nwe")
- self.row = self.row + 1
- return e
-
- def make_frame(self,labeltext=None):
- if labeltext:
- l = Label(self.top, text=labeltext)
- l.grid(row=self.row, column=0, sticky="nw")
- f = Frame(self.top)
- f.grid(row=self.row, column=1, columnspan=1, sticky="nwe")
- self.row = self.row + 1
- return f
-
- def make_button(self, label, command, isdef=0):
- b = Button(self.buttonframe,
- text=label, command=command,
- default=isdef and "active" or "normal")
- cols,rows=self.buttonframe.grid_size()
- b.grid(pady=1,row=rows,column=0,sticky="ew")
- self.buttonframe.grid(rowspan=rows+1)
- return b
-
- def create_entries(self):
- self.ent = self.make_entry("Find:", self.engine.patvar)
-
- def create_option_buttons(self):
- f = self.make_frame("Options")
-
- btn = Checkbutton(f, anchor="w",
- variable=self.engine.revar,
- text="Regular expression")
- btn.pack(side="left", fill="both")
- if self.engine.isre():
- btn.select()
-
- btn = Checkbutton(f, anchor="w",
- variable=self.engine.casevar,
- text="Match case")
- btn.pack(side="left", fill="both")
- if self.engine.iscase():
- btn.select()
-
- btn = Checkbutton(f, anchor="w",
- variable=self.engine.wordvar,
- text="Whole word")
- btn.pack(side="left", fill="both")
- if self.engine.isword():
- btn.select()
-
- if self.needwrapbutton:
- btn = Checkbutton(f, anchor="w",
- variable=self.engine.wrapvar,
- text="Wrap around")
- btn.pack(side="left", fill="both")
- if self.engine.iswrap():
- btn.select()
-
- def create_other_buttons(self):
- f = self.make_frame("Direction")
-
- #lbl = Label(f, text="Direction: ")
- #lbl.pack(side="left")
-
- btn = Radiobutton(f, anchor="w",
- variable=self.engine.backvar, value=1,
- text="Up")
- btn.pack(side="left", fill="both")
- if self.engine.isback():
- btn.select()
-
- btn = Radiobutton(f, anchor="w",
- variable=self.engine.backvar, value=0,
- text="Down")
- btn.pack(side="left", fill="both")
- if not self.engine.isback():
- btn.select()
-
- def create_command_buttons(self):
- #
- # place button frame on the right
- f = self.buttonframe = Frame(self.top)
- f.grid(row=0,column=2,padx=2,pady=2,ipadx=2,ipady=2)
-
- b = self.make_button("close", self.close)
- b.lower()
diff --git a/sys/lib/python/idlelib/SearchEngine.py b/sys/lib/python/idlelib/SearchEngine.py
deleted file mode 100644
index cc40a00c5..000000000
--- a/sys/lib/python/idlelib/SearchEngine.py
+++ /dev/null
@@ -1,220 +0,0 @@
-import re
-from Tkinter import *
-import tkMessageBox
-
-def get(root):
- if not hasattr(root, "_searchengine"):
- root._searchengine = SearchEngine(root)
- # XXX This will never garbage-collect -- who cares
- return root._searchengine
-
-class SearchEngine:
-
- def __init__(self, root):
- self.root = root
- # State shared by search, replace, and grep;
- # the search dialogs bind these to UI elements.
- self.patvar = StringVar(root) # search pattern
- self.revar = BooleanVar(root) # regular expression?
- self.casevar = BooleanVar(root) # match case?
- self.wordvar = BooleanVar(root) # match whole word?
- self.wrapvar = BooleanVar(root) # wrap around buffer?
- self.wrapvar.set(1) # (on by default)
- self.backvar = BooleanVar(root) # search backwards?
-
- # Access methods
-
- def getpat(self):
- return self.patvar.get()
-
- def setpat(self, pat):
- self.patvar.set(pat)
-
- def isre(self):
- return self.revar.get()
-
- def iscase(self):
- return self.casevar.get()
-
- def isword(self):
- return self.wordvar.get()
-
- def iswrap(self):
- return self.wrapvar.get()
-
- def isback(self):
- return self.backvar.get()
-
- # Higher level access methods
-
- def getcookedpat(self):
- pat = self.getpat()
- if not self.isre():
- pat = re.escape(pat)
- if self.isword():
- pat = r"\b%s\b" % pat
- return pat
-
- def getprog(self):
- pat = self.getpat()
- if not pat:
- self.report_error(pat, "Empty regular expression")
- return None
- pat = self.getcookedpat()
- flags = 0
- if not self.iscase():
- flags = flags | re.IGNORECASE
- try:
- prog = re.compile(pat, flags)
- except re.error, what:
- try:
- msg, col = what
- except:
- msg = str(what)
- col = -1
- self.report_error(pat, msg, col)
- return None
- return prog
-
- def report_error(self, pat, msg, col=-1):
- # Derived class could overrid this with something fancier
- msg = "Error: " + str(msg)
- if pat:
- msg = msg + "\np\Pattern: " + str(pat)
- if col >= 0:
- msg = msg + "\nOffset: " + str(col)
- tkMessageBox.showerror("Regular expression error",
- msg, master=self.root)
-
- def setcookedpat(self, pat):
- if self.isre():
- pat = re.escape(pat)
- self.setpat(pat)
-
- def search_text(self, text, prog=None, ok=0):
- """Search a text widget for the pattern.
-
- If prog is given, it should be the precompiled pattern.
- Return a tuple (lineno, matchobj); None if not found.
-
- This obeys the wrap and direction (back) settings.
-
- The search starts at the selection (if there is one) or
- at the insert mark (otherwise). If the search is forward,
- it starts at the right of the selection; for a backward
- search, it starts at the left end. An empty match exactly
- at either end of the selection (or at the insert mark if
- there is no selection) is ignored unless the ok flag is true
- -- this is done to guarantee progress.
-
- If the search is allowed to wrap around, it will return the
- original selection if (and only if) it is the only match.
-
- """
- if not prog:
- prog = self.getprog()
- if not prog:
- return None # Compilation failed -- stop
- wrap = self.wrapvar.get()
- first, last = get_selection(text)
- if self.isback():
- if ok:
- start = last
- else:
- start = first
- line, col = get_line_col(start)
- res = self.search_backward(text, prog, line, col, wrap, ok)
- else:
- if ok:
- start = first
- else:
- start = last
- line, col = get_line_col(start)
- res = self.search_forward(text, prog, line, col, wrap, ok)
- return res
-
- def search_forward(self, text, prog, line, col, wrap, ok=0):
- wrapped = 0
- startline = line
- chars = text.get("%d.0" % line, "%d.0" % (line+1))
- while chars:
- m = prog.search(chars[:-1], col)
- if m:
- if ok or m.end() > col:
- return line, m
- line = line + 1
- if wrapped and line > startline:
- break
- col = 0
- ok = 1
- chars = text.get("%d.0" % line, "%d.0" % (line+1))
- if not chars and wrap:
- wrapped = 1
- wrap = 0
- line = 1
- chars = text.get("1.0", "2.0")
- return None
-
- def search_backward(self, text, prog, line, col, wrap, ok=0):
- wrapped = 0
- startline = line
- chars = text.get("%d.0" % line, "%d.0" % (line+1))
- while 1:
- m = search_reverse(prog, chars[:-1], col)
- if m:
- if ok or m.start() < col:
- return line, m
- line = line - 1
- if wrapped and line < startline:
- break
- ok = 1
- if line <= 0:
- if not wrap:
- break
- wrapped = 1
- wrap = 0
- pos = text.index("end-1c")
- line, col = map(int, pos.split("."))
- chars = text.get("%d.0" % line, "%d.0" % (line+1))
- col = len(chars) - 1
- return None
-
-# Helper to search backwards in a string.
-# (Optimized for the case where the pattern isn't found.)
-
-def search_reverse(prog, chars, col):
- m = prog.search(chars)
- if not m:
- return None
- found = None
- i, j = m.span()
- while i < col and j <= col:
- found = m
- if i == j:
- j = j+1
- m = prog.search(chars, j)
- if not m:
- break
- i, j = m.span()
- return found
-
-# Helper to get selection end points, defaulting to insert mark.
-# Return a tuple of indices ("line.col" strings).
-
-def get_selection(text):
- try:
- first = text.index("sel.first")
- last = text.index("sel.last")
- except TclError:
- first = last = None
- if not first:
- first = text.index("insert")
- if not last:
- last = first
- return first, last
-
-# Helper to parse a text index into a (line, col) tuple.
-
-def get_line_col(index):
- line, col = map(int, index.split(".")) # Fails on invalid index
- return line, col
diff --git a/sys/lib/python/idlelib/StackViewer.py b/sys/lib/python/idlelib/StackViewer.py
deleted file mode 100644
index 6b7730bcd..000000000
--- a/sys/lib/python/idlelib/StackViewer.py
+++ /dev/null
@@ -1,137 +0,0 @@
-import os
-import sys
-import linecache
-
-from TreeWidget import TreeNode, TreeItem, ScrolledCanvas
-from ObjectBrowser import ObjectTreeItem, make_objecttreeitem
-
-def StackBrowser(root, flist=None, tb=None, top=None):
- if top is None:
- from Tkinter import Toplevel
- top = Toplevel(root)
- sc = ScrolledCanvas(top, bg="white", highlightthickness=0)
- sc.frame.pack(expand=1, fill="both")
- item = StackTreeItem(flist, tb)
- node = TreeNode(sc.canvas, None, item)
- node.expand()
-
-class StackTreeItem(TreeItem):
-
- def __init__(self, flist=None, tb=None):
- self.flist = flist
- self.stack = self.get_stack(tb)
- self.text = self.get_exception()
-
- def get_stack(self, tb):
- if tb is None:
- tb = sys.last_traceback
- stack = []
- if tb and tb.tb_frame is None:
- tb = tb.tb_next
- while tb is not None:
- stack.append((tb.tb_frame, tb.tb_lineno))
- tb = tb.tb_next
- return stack
-
- def get_exception(self):
- type = sys.last_type
- value = sys.last_value
- if hasattr(type, "__name__"):
- type = type.__name__
- s = str(type)
- if value is not None:
- s = s + ": " + str(value)
- return s
-
- def GetText(self):
- return self.text
-
- def GetSubList(self):
- sublist = []
- for info in self.stack:
- item = FrameTreeItem(info, self.flist)
- sublist.append(item)
- return sublist
-
-class FrameTreeItem(TreeItem):
-
- def __init__(self, info, flist):
- self.info = info
- self.flist = flist
-
- def GetText(self):
- frame, lineno = self.info
- try:
- modname = frame.f_globals["__name__"]
- except:
- modname = "?"
- code = frame.f_code
- filename = code.co_filename
- funcname = code.co_name
- sourceline = linecache.getline(filename, lineno)
- sourceline = sourceline.strip()
- if funcname in ("?", "", None):
- item = "%s, line %d: %s" % (modname, lineno, sourceline)
- else:
- item = "%s.%s(...), line %d: %s" % (modname, funcname,
- lineno, sourceline)
- return item
-
- def GetSubList(self):
- frame, lineno = self.info
- sublist = []
- if frame.f_globals is not frame.f_locals:
- item = VariablesTreeItem("<locals>", frame.f_locals, self.flist)
- sublist.append(item)
- item = VariablesTreeItem("<globals>", frame.f_globals, self.flist)
- sublist.append(item)
- return sublist
-
- def OnDoubleClick(self):
- if self.flist:
- frame, lineno = self.info
- filename = frame.f_code.co_filename
- if os.path.isfile(filename):
- self.flist.gotofileline(filename, lineno)
-
-class VariablesTreeItem(ObjectTreeItem):
-
- def GetText(self):
- return self.labeltext
-
- def GetLabelText(self):
- return None
-
- def IsExpandable(self):
- return len(self.object) > 0
-
- def keys(self):
- return self.object.keys()
-
- def GetSubList(self):
- sublist = []
- for key in self.keys():
- try:
- value = self.object[key]
- except KeyError:
- continue
- def setfunction(value, key=key, object=self.object):
- object[key] = value
- item = make_objecttreeitem(key + " =", value, setfunction)
- sublist.append(item)
- return sublist
-
-
-def _test():
- try:
- import testcode
- reload(testcode)
- except:
- sys.last_type, sys.last_value, sys.last_traceback = sys.exc_info()
- from Tkinter import Tk
- root = Tk()
- StackBrowser(None, top=root)
- root.mainloop()
-
-if __name__ == "__main__":
- _test()
diff --git a/sys/lib/python/idlelib/TODO.txt b/sys/lib/python/idlelib/TODO.txt
deleted file mode 100644
index e2f1ac0f2..000000000
--- a/sys/lib/python/idlelib/TODO.txt
+++ /dev/null
@@ -1,210 +0,0 @@
-Original IDLE todo, much of it now outdated:
-============================================
-TO DO:
-
-- improve debugger:
- - manage breakpoints globally, allow bp deletion, tbreak, cbreak etc.
- - real object browser
- - help on how to use it (a simple help button will do wonders)
- - performance? (updates of large sets of locals are slow)
- - better integration of "debug module"
- - debugger should be global resource (attached to flist, not to shell)
- - fix the stupid bug where you need to step twice
- - display class name in stack viewer entries for methods
- - suppress tracing through IDLE internals (e.g. print) DONE
- - add a button to suppress through a specific module or class or method
- - more object inspection to stack viewer, e.g. to view all array items
-- insert the initial current directory into sys.path DONE
-- default directory attribute for each window instead of only for windows
- that have an associated filename
-- command expansion from keywords, module contents, other buffers, etc.
-- "Recent documents" menu item DONE
-- Filter region command
-- Optional horizontal scroll bar
-- more Emacsisms:
- - ^K should cut to buffer
- - M-[, M-] to move by paragraphs
- - incremental search?
-- search should indicate wrap-around in some way
-- restructure state sensitive code to avoid testing flags all the time
-- persistent user state (e.g. window and cursor positions, bindings)
-- make backups when saving
-- check file mtimes at various points
-- Pluggable interface with RCS/CVS/Perforce/Clearcase
-- better help?
-- don't open second class browser on same module (nor second path browser)
-- unify class and path browsers
-- Need to define a standard way whereby one can determine one is running
- inside IDLE (needed for Tk mainloop, also handy for $PYTHONSTARTUP)
-- Add more utility methods for use by extensions (a la get_selection)
-- Way to run command in totally separate interpreter (fork+os.system?) DONE
-- Way to find definition of fully-qualified name:
- In other words, select "UserDict.UserDict", hit some magic key and
- it loads up UserDict.py and finds the first def or class for UserDict.
-- need a way to force colorization on/off
-- need a way to force auto-indent on/off
-
-Details:
-
-- ^O (on Unix -- open-line) should honor autoindent
-- after paste, show end of pasted text
-- on Windows, should turn short filename to long filename (not only in argv!)
- (shouldn't this be done -- or undone -- by ntpath.normpath?)
-- new autoindent after colon even indents when the colon is in a comment!
-- sometimes forward slashes in pathname remain
-- sometimes star in window name remains in Windows menu
-- With unix bindings, ESC by itself is ignored
-- Sometimes for no apparent reason a selection from the cursor to the
- end of the command buffer appears, which is hard to get rid of
- because it stays when you are typing!
-- The Line/Col in the status bar can be wrong initially in PyShell DONE
-
-Structural problems:
-
-- too much knowledge in FileList about EditorWindow (for example)
-- should add some primitives for accessing the selection etc.
- to repeat cumbersome code over and over
-
-======================================================================
-
-Jeff Bauer suggests:
-
-- Open Module doesn't appear to handle hierarchical packages.
-- Class browser should also allow hierarchical packages.
-- Open and Open Module could benefit from a history, DONE
- either command line style, or Microsoft recent-file
- style.
-- Add a Smalltalk-style inspector (i.e. Tkinspect)
-
-The last suggestion is already a reality, but not yet
-integrated into IDLE. I use a module called inspector.py,
-that used to be available from python.org(?) It no longer
-appears to be in the contributed section, and the source
-has no author attribution.
-
-In any case, the code is useful for visually navigating
-an object's attributes, including its container hierarchy.
-
- >>> from inspector import Tkinspect
- >>> Tkinspect(None, myObject)
-
-Tkinspect could probably be extended and refined to
-integrate better into IDLE.
-
-======================================================================
-
-Comparison to PTUI
-------------------
-
-+ PTUI's help is better (HTML!)
-
-+ PTUI can attach a shell to any module
-
-+ PTUI has some more I/O commands:
- open multiple
- append
- examine (what's that?)
-
-======================================================================
-
-Notes after trying to run Grail
--------------------------------
-
-- Grail does stuff to sys.path based on sys.argv[0]; you must set
-sys.argv[0] to something decent first (it is normally set to the path of
-the idle script).
-
-- Grail must be exec'ed in __main__ because that's imported by some
-other parts of Grail.
-
-- Grail uses a module called History and so does idle :-(
-
-======================================================================
-
-Robin Friedrich's items:
-
-Things I'd like to see:
- - I'd like support for shift-click extending the selection. There's a
- bug now that it doesn't work the first time you try it.
- - Printing is needed. How hard can that be on Windows? FIRST CUT DONE
- - The python-mode trick of autoindenting a line with <tab> is neat and
- very handy.
- - (someday) a spellchecker for docstrings and comments.
- - a pagedown/up command key which moves to next class/def statement (top
- level)
- - split window capability
- - DnD text relocation/copying
-
-Things I don't want to see.
- - line numbers... will probably slow things down way too much.
- - Please use another icon for the tree browser leaf. The small snake
- isn't cutting it.
-
-----------------------------------------------------------------------
-
-- Customizable views (multi-window or multi-pane). (Markus Gritsch)
-
-- Being able to double click (maybe double right click) on a callable
-object in the editor which shows the source of the object, if
-possible. (Gerrit Holl)
-
-- Hooks into the guts, like in Emacs. (Mike Romberg)
-
-- Sharing the editor with a remote tutor. (Martijn Faassen)
-
-- Multiple views on the same file. (Tony J Ibbs)
-
-- Store breakpoints in a global (per-project) database (GvR); Dirk
-Heise adds: save some space-trimmed context and search around when
-reopening a file that might have been edited by someone else.
-
-- Capture menu events in extensions without changing the IDLE source.
-(Matthias Barmeier)
-
-- Use overlapping panels (a "notebook" in MFC terms I think) for info
-that doesn't need to be accessible simultaneously (e.g. HTML source
-and output). Use multi-pane windows for info that does need to be
-shown together (e.g. class browser and source). (Albert Brandl)
-
-- A project should invisibly track all symbols, for instant search,
-replace and cross-ref. Projects should be allowed to span multiple
-directories, hosts, etc. Project management files are placed in a
-directory you specify. A global mapping between project names and
-project directories should exist [not so sure --GvR]. (Tim Peters)
-
-- Merge attr-tips and auto-expand. (Mark Hammond, Tim Peters)
-
-- Python Shell should behave more like a "shell window" as users know
-it -- i.e. you can only edit the current command, and the cursor can't
-escape from the command area. (Albert Brandl)
-
-- Set X11 class to "idle/Idle", set icon and title to something
-beginning with "idle" -- for window manangers. (Randall Hopper)
-
-- Config files editable through a preferences dialog. (me) DONE
-
-- Config files still editable outside the preferences dialog.
-(Randall Hopper) DONE
-
-- When you're editing a command in PyShell, and there are only blank
-lines below the cursor, hitting Return should ignore or delete those
-blank lines rather than deciding you're not on the last line. (me)
-
-- Run command (F5 c.s.) should be more like Pythonwin's Run -- a
-dialog with options to give command line arguments, run the debugger,
-etc. (me)
-
-- Shouldn't be able to delete part of the prompt (or any text before
-it) in the PyShell. (Martijn Faassen) DONE
-
-- Emacs style auto-fill (also smart about comments and strings).
-(Jeremy Hylton)
-
-- Output of Run Script should go to a separate output window, not to
-the shell window. Output of separate runs should all go to the same
-window but clearly delimited. (David Scherer) REJECT FIRST, LATTER DONE
-
-- GUI form designer to kick VB's butt. (Robert Geiger) THAT'S NOT IDLE
-
-- Printing! Possibly via generation of PDF files which the user must
-then send to the printer separately. (Dinu Gherman) FIRST CUT
diff --git a/sys/lib/python/idlelib/ToolTip.py b/sys/lib/python/idlelib/ToolTip.py
deleted file mode 100644
index ce7a3d3ee..000000000
--- a/sys/lib/python/idlelib/ToolTip.py
+++ /dev/null
@@ -1,89 +0,0 @@
-# general purpose 'tooltip' routines - currently unused in idlefork
-# (although the 'calltips' extension is partly based on this code)
-# may be useful for some purposes in (or almost in ;) the current project scope
-# Ideas gleaned from PySol
-
-from Tkinter import *
-
-class ToolTipBase:
-
- def __init__(self, button):
- self.button = button
- self.tipwindow = None
- self.id = None
- self.x = self.y = 0
- self._id1 = self.button.bind("<Enter>", self.enter)
- self._id2 = self.button.bind("<Leave>", self.leave)
- self._id3 = self.button.bind("<ButtonPress>", self.leave)
-
- def enter(self, event=None):
- self.schedule()
-
- def leave(self, event=None):
- self.unschedule()
- self.hidetip()
-
- def schedule(self):
- self.unschedule()
- self.id = self.button.after(1500, self.showtip)
-
- def unschedule(self):
- id = self.id
- self.id = None
- if id:
- self.button.after_cancel(id)
-
- def showtip(self):
- if self.tipwindow:
- return
- # The tip window must be completely outside the button;
- # otherwise when the mouse enters the tip window we get
- # a leave event and it disappears, and then we get an enter
- # event and it reappears, and so on forever :-(
- x = self.button.winfo_rootx() + 20
- y = self.button.winfo_rooty() + self.button.winfo_height() + 1
- self.tipwindow = tw = Toplevel(self.button)
- tw.wm_overrideredirect(1)
- tw.wm_geometry("+%d+%d" % (x, y))
- self.showcontents()
-
- def showcontents(self, text="Your text here"):
- # Override this in derived class
- label = Label(self.tipwindow, text=text, justify=LEFT,
- background="#ffffe0", relief=SOLID, borderwidth=1)
- label.pack()
-
- def hidetip(self):
- tw = self.tipwindow
- self.tipwindow = None
- if tw:
- tw.destroy()
-
-class ToolTip(ToolTipBase):
- def __init__(self, button, text):
- ToolTipBase.__init__(self, button)
- self.text = text
- def showcontents(self):
- ToolTipBase.showcontents(self, self.text)
-
-class ListboxToolTip(ToolTipBase):
- def __init__(self, button, items):
- ToolTipBase.__init__(self, button)
- self.items = items
- def showcontents(self):
- listbox = Listbox(self.tipwindow, background="#ffffe0")
- listbox.pack()
- for item in self.items:
- listbox.insert(END, item)
-
-def main():
- # Test code
- root = Tk()
- b = Button(root, text="Hello", command=root.destroy)
- b.pack()
- root.update()
- tip = ListboxToolTip(b, ["Hello", "world"])
- root.mainloop()
-
-if __name__ == '__main__':
- main()
diff --git a/sys/lib/python/idlelib/TreeWidget.py b/sys/lib/python/idlelib/TreeWidget.py
deleted file mode 100644
index c5c171fb8..000000000
--- a/sys/lib/python/idlelib/TreeWidget.py
+++ /dev/null
@@ -1,478 +0,0 @@
-# XXX TO DO:
-# - popup menu
-# - support partial or total redisplay
-# - key bindings (instead of quick-n-dirty bindings on Canvas):
-# - up/down arrow keys to move focus around
-# - ditto for page up/down, home/end
-# - left/right arrows to expand/collapse & move out/in
-# - more doc strings
-# - add icons for "file", "module", "class", "method"; better "python" icon
-# - callback for selection???
-# - multiple-item selection
-# - tooltips
-# - redo geometry without magic numbers
-# - keep track of object ids to allow more careful cleaning
-# - optimize tree redraw after expand of subnode
-
-import os
-import sys
-from Tkinter import *
-import imp
-
-import ZoomHeight
-from configHandler import idleConf
-
-ICONDIR = "Icons"
-
-# Look for Icons subdirectory in the same directory as this module
-try:
- _icondir = os.path.join(os.path.dirname(__file__), ICONDIR)
-except NameError:
- _icondir = ICONDIR
-if os.path.isdir(_icondir):
- ICONDIR = _icondir
-elif not os.path.isdir(ICONDIR):
- raise RuntimeError, "can't find icon directory (%r)" % (ICONDIR,)
-
-def listicons(icondir=ICONDIR):
- """Utility to display the available icons."""
- root = Tk()
- import glob
- list = glob.glob(os.path.join(icondir, "*.gif"))
- list.sort()
- images = []
- row = column = 0
- for file in list:
- name = os.path.splitext(os.path.basename(file))[0]
- image = PhotoImage(file=file, master=root)
- images.append(image)
- label = Label(root, image=image, bd=1, relief="raised")
- label.grid(row=row, column=column)
- label = Label(root, text=name)
- label.grid(row=row+1, column=column)
- column = column + 1
- if column >= 10:
- row = row+2
- column = 0
- root.images = images
-
-
-class TreeNode:
-
- def __init__(self, canvas, parent, item):
- self.canvas = canvas
- self.parent = parent
- self.item = item
- self.state = 'collapsed'
- self.selected = False
- self.children = []
- self.x = self.y = None
- self.iconimages = {} # cache of PhotoImage instances for icons
-
- def destroy(self):
- for c in self.children[:]:
- self.children.remove(c)
- c.destroy()
- self.parent = None
-
- def geticonimage(self, name):
- try:
- return self.iconimages[name]
- except KeyError:
- pass
- file, ext = os.path.splitext(name)
- ext = ext or ".gif"
- fullname = os.path.join(ICONDIR, file + ext)
- image = PhotoImage(master=self.canvas, file=fullname)
- self.iconimages[name] = image
- return image
-
- def select(self, event=None):
- if self.selected:
- return
- self.deselectall()
- self.selected = True
- self.canvas.delete(self.image_id)
- self.drawicon()
- self.drawtext()
-
- def deselect(self, event=None):
- if not self.selected:
- return
- self.selected = False
- self.canvas.delete(self.image_id)
- self.drawicon()
- self.drawtext()
-
- def deselectall(self):
- if self.parent:
- self.parent.deselectall()
- else:
- self.deselecttree()
-
- def deselecttree(self):
- if self.selected:
- self.deselect()
- for child in self.children:
- child.deselecttree()
-
- def flip(self, event=None):
- if self.state == 'expanded':
- self.collapse()
- else:
- self.expand()
- self.item.OnDoubleClick()
- return "break"
-
- def expand(self, event=None):
- if not self.item._IsExpandable():
- return
- if self.state != 'expanded':
- self.state = 'expanded'
- self.update()
- self.view()
-
- def collapse(self, event=None):
- if self.state != 'collapsed':
- self.state = 'collapsed'
- self.update()
-
- def view(self):
- top = self.y - 2
- bottom = self.lastvisiblechild().y + 17
- height = bottom - top
- visible_top = self.canvas.canvasy(0)
- visible_height = self.canvas.winfo_height()
- visible_bottom = self.canvas.canvasy(visible_height)
- if visible_top <= top and bottom <= visible_bottom:
- return
- x0, y0, x1, y1 = self.canvas._getints(self.canvas['scrollregion'])
- if top >= visible_top and height <= visible_height:
- fraction = top + height - visible_height
- else:
- fraction = top
- fraction = float(fraction) / y1
- self.canvas.yview_moveto(fraction)
-
- def lastvisiblechild(self):
- if self.children and self.state == 'expanded':
- return self.children[-1].lastvisiblechild()
- else:
- return self
-
- def update(self):
- if self.parent:
- self.parent.update()
- else:
- oldcursor = self.canvas['cursor']
- self.canvas['cursor'] = "watch"
- self.canvas.update()
- self.canvas.delete(ALL) # XXX could be more subtle
- self.draw(7, 2)
- x0, y0, x1, y1 = self.canvas.bbox(ALL)
- self.canvas.configure(scrollregion=(0, 0, x1, y1))
- self.canvas['cursor'] = oldcursor
-
- def draw(self, x, y):
- # XXX This hard-codes too many geometry constants!
- self.x, self.y = x, y
- self.drawicon()
- self.drawtext()
- if self.state != 'expanded':
- return y+17
- # draw children
- if not self.children:
- sublist = self.item._GetSubList()
- if not sublist:
- # _IsExpandable() was mistaken; that's allowed
- return y+17
- for item in sublist:
- child = self.__class__(self.canvas, self, item)
- self.children.append(child)
- cx = x+20
- cy = y+17
- cylast = 0
- for child in self.children:
- cylast = cy
- self.canvas.create_line(x+9, cy+7, cx, cy+7, fill="gray50")
- cy = child.draw(cx, cy)
- if child.item._IsExpandable():
- if child.state == 'expanded':
- iconname = "minusnode"
- callback = child.collapse
- else:
- iconname = "plusnode"
- callback = child.expand
- image = self.geticonimage(iconname)
- id = self.canvas.create_image(x+9, cylast+7, image=image)
- # XXX This leaks bindings until canvas is deleted:
- self.canvas.tag_bind(id, "<1>", callback)
- self.canvas.tag_bind(id, "<Double-1>", lambda x: None)
- id = self.canvas.create_line(x+9, y+10, x+9, cylast+7,
- ##stipple="gray50", # XXX Seems broken in Tk 8.0.x
- fill="gray50")
- self.canvas.tag_lower(id) # XXX .lower(id) before Python 1.5.2
- return cy
-
- def drawicon(self):
- if self.selected:
- imagename = (self.item.GetSelectedIconName() or
- self.item.GetIconName() or
- "openfolder")
- else:
- imagename = self.item.GetIconName() or "folder"
- image = self.geticonimage(imagename)
- id = self.canvas.create_image(self.x, self.y, anchor="nw", image=image)
- self.image_id = id
- self.canvas.tag_bind(id, "<1>", self.select)
- self.canvas.tag_bind(id, "<Double-1>", self.flip)
-
- def drawtext(self):
- textx = self.x+20-1
- texty = self.y-1
- labeltext = self.item.GetLabelText()
- if labeltext:
- id = self.canvas.create_text(textx, texty, anchor="nw",
- text=labeltext)
- self.canvas.tag_bind(id, "<1>", self.select)
- self.canvas.tag_bind(id, "<Double-1>", self.flip)
- x0, y0, x1, y1 = self.canvas.bbox(id)
- textx = max(x1, 200) + 10
- text = self.item.GetText() or "<no text>"
- try:
- self.entry
- except AttributeError:
- pass
- else:
- self.edit_finish()
- try:
- label = self.label
- except AttributeError:
- # padding carefully selected (on Windows) to match Entry widget:
- self.label = Label(self.canvas, text=text, bd=0, padx=2, pady=2)
- theme = idleConf.GetOption('main','Theme','name')
- if self.selected:
- self.label.configure(idleConf.GetHighlight(theme, 'hilite'))
- else:
- self.label.configure(idleConf.GetHighlight(theme, 'normal'))
- id = self.canvas.create_window(textx, texty,
- anchor="nw", window=self.label)
- self.label.bind("<1>", self.select_or_edit)
- self.label.bind("<Double-1>", self.flip)
- self.text_id = id
-
- def select_or_edit(self, event=None):
- if self.selected and self.item.IsEditable():
- self.edit(event)
- else:
- self.select(event)
-
- def edit(self, event=None):
- self.entry = Entry(self.label, bd=0, highlightthickness=1, width=0)
- self.entry.insert(0, self.label['text'])
- self.entry.selection_range(0, END)
- self.entry.pack(ipadx=5)
- self.entry.focus_set()
- self.entry.bind("<Return>", self.edit_finish)
- self.entry.bind("<Escape>", self.edit_cancel)
-
- def edit_finish(self, event=None):
- try:
- entry = self.entry
- del self.entry
- except AttributeError:
- return
- text = entry.get()
- entry.destroy()
- if text and text != self.item.GetText():
- self.item.SetText(text)
- text = self.item.GetText()
- self.label['text'] = text
- self.drawtext()
- self.canvas.focus_set()
-
- def edit_cancel(self, event=None):
- try:
- entry = self.entry
- del self.entry
- except AttributeError:
- return
- entry.destroy()
- self.drawtext()
- self.canvas.focus_set()
-
-
-class TreeItem:
-
- """Abstract class representing tree items.
-
- Methods should typically be overridden, otherwise a default action
- is used.
-
- """
-
- def __init__(self):
- """Constructor. Do whatever you need to do."""
-
- def GetText(self):
- """Return text string to display."""
-
- def GetLabelText(self):
- """Return label text string to display in front of text (if any)."""
-
- expandable = None
-
- def _IsExpandable(self):
- """Do not override! Called by TreeNode."""
- if self.expandable is None:
- self.expandable = self.IsExpandable()
- return self.expandable
-
- def IsExpandable(self):
- """Return whether there are subitems."""
- return 1
-
- def _GetSubList(self):
- """Do not override! Called by TreeNode."""
- if not self.IsExpandable():
- return []
- sublist = self.GetSubList()
- if not sublist:
- self.expandable = 0
- return sublist
-
- def IsEditable(self):
- """Return whether the item's text may be edited."""
-
- def SetText(self, text):
- """Change the item's text (if it is editable)."""
-
- def GetIconName(self):
- """Return name of icon to be displayed normally."""
-
- def GetSelectedIconName(self):
- """Return name of icon to be displayed when selected."""
-
- def GetSubList(self):
- """Return list of items forming sublist."""
-
- def OnDoubleClick(self):
- """Called on a double-click on the item."""
-
-
-# Example application
-
-class FileTreeItem(TreeItem):
-
- """Example TreeItem subclass -- browse the file system."""
-
- def __init__(self, path):
- self.path = path
-
- def GetText(self):
- return os.path.basename(self.path) or self.path
-
- def IsEditable(self):
- return os.path.basename(self.path) != ""
-
- def SetText(self, text):
- newpath = os.path.dirname(self.path)
- newpath = os.path.join(newpath, text)
- if os.path.dirname(newpath) != os.path.dirname(self.path):
- return
- try:
- os.rename(self.path, newpath)
- self.path = newpath
- except os.error:
- pass
-
- def GetIconName(self):
- if not self.IsExpandable():
- return "python" # XXX wish there was a "file" icon
-
- def IsExpandable(self):
- return os.path.isdir(self.path)
-
- def GetSubList(self):
- try:
- names = os.listdir(self.path)
- except os.error:
- return []
- names.sort(lambda a, b: cmp(os.path.normcase(a), os.path.normcase(b)))
- sublist = []
- for name in names:
- item = FileTreeItem(os.path.join(self.path, name))
- sublist.append(item)
- return sublist
-
-
-# A canvas widget with scroll bars and some useful bindings
-
-class ScrolledCanvas:
- def __init__(self, master, **opts):
- if not opts.has_key('yscrollincrement'):
- opts['yscrollincrement'] = 17
- self.master = master
- self.frame = Frame(master)
- self.frame.rowconfigure(0, weight=1)
- self.frame.columnconfigure(0, weight=1)
- self.canvas = Canvas(self.frame, **opts)
- self.canvas.grid(row=0, column=0, sticky="nsew")
- self.vbar = Scrollbar(self.frame, name="vbar")
- self.vbar.grid(row=0, column=1, sticky="nse")
- self.hbar = Scrollbar(self.frame, name="hbar", orient="horizontal")
- self.hbar.grid(row=1, column=0, sticky="ews")
- self.canvas['yscrollcommand'] = self.vbar.set
- self.vbar['command'] = self.canvas.yview
- self.canvas['xscrollcommand'] = self.hbar.set
- self.hbar['command'] = self.canvas.xview
- self.canvas.bind("<Key-Prior>", self.page_up)
- self.canvas.bind("<Key-Next>", self.page_down)
- self.canvas.bind("<Key-Up>", self.unit_up)
- self.canvas.bind("<Key-Down>", self.unit_down)
- #if isinstance(master, Toplevel) or isinstance(master, Tk):
- self.canvas.bind("<Alt-Key-2>", self.zoom_height)
- self.canvas.focus_set()
- def page_up(self, event):
- self.canvas.yview_scroll(-1, "page")
- return "break"
- def page_down(self, event):
- self.canvas.yview_scroll(1, "page")
- return "break"
- def unit_up(self, event):
- self.canvas.yview_scroll(-1, "unit")
- return "break"
- def unit_down(self, event):
- self.canvas.yview_scroll(1, "unit")
- return "break"
- def zoom_height(self, event):
- ZoomHeight.zoom_height(self.master)
- return "break"
-
-
-# Testing functions
-
-def test():
- import PyShell
- root = Toplevel(PyShell.root)
- root.configure(bd=0, bg="yellow")
- root.focus_set()
- sc = ScrolledCanvas(root, bg="white", highlightthickness=0, takefocus=1)
- sc.frame.pack(expand=1, fill="both")
- item = FileTreeItem("C:/windows/desktop")
- node = TreeNode(sc.canvas, None, item)
- node.expand()
-
-def test2():
- # test w/o scrolling canvas
- root = Tk()
- root.configure(bd=0)
- canvas = Canvas(root, bg="white", highlightthickness=0)
- canvas.pack(expand=1, fill="both")
- item = FileTreeItem(os.curdir)
- node = TreeNode(canvas, None, item)
- node.update()
- canvas.focus_set()
-
-if __name__ == '__main__':
- test()
diff --git a/sys/lib/python/idlelib/UndoDelegator.py b/sys/lib/python/idlelib/UndoDelegator.py
deleted file mode 100644
index 182a1170e..000000000
--- a/sys/lib/python/idlelib/UndoDelegator.py
+++ /dev/null
@@ -1,352 +0,0 @@
-import sys
-import string
-from Tkinter import *
-from Delegator import Delegator
-
-#$ event <<redo>>
-#$ win <Control-y>
-#$ unix <Alt-z>
-
-#$ event <<undo>>
-#$ win <Control-z>
-#$ unix <Control-z>
-
-#$ event <<dump-undo-state>>
-#$ win <Control-backslash>
-#$ unix <Control-backslash>
-
-
-class UndoDelegator(Delegator):
-
- max_undo = 1000
-
- def __init__(self):
- Delegator.__init__(self)
- self.reset_undo()
-
- def setdelegate(self, delegate):
- if self.delegate is not None:
- self.unbind("<<undo>>")
- self.unbind("<<redo>>")
- self.unbind("<<dump-undo-state>>")
- Delegator.setdelegate(self, delegate)
- if delegate is not None:
- self.bind("<<undo>>", self.undo_event)
- self.bind("<<redo>>", self.redo_event)
- self.bind("<<dump-undo-state>>", self.dump_event)
-
- def dump_event(self, event):
- from pprint import pprint
- pprint(self.undolist[:self.pointer])
- print "pointer:", self.pointer,
- print "saved:", self.saved,
- print "can_merge:", self.can_merge,
- print "get_saved():", self.get_saved()
- pprint(self.undolist[self.pointer:])
- return "break"
-
- def reset_undo(self):
- self.was_saved = -1
- self.pointer = 0
- self.undolist = []
- self.undoblock = 0 # or a CommandSequence instance
- self.set_saved(1)
-
- def set_saved(self, flag):
- if flag:
- self.saved = self.pointer
- else:
- self.saved = -1
- self.can_merge = False
- self.check_saved()
-
- def get_saved(self):
- return self.saved == self.pointer
-
- saved_change_hook = None
-
- def set_saved_change_hook(self, hook):
- self.saved_change_hook = hook
-
- was_saved = -1
-
- def check_saved(self):
- is_saved = self.get_saved()
- if is_saved != self.was_saved:
- self.was_saved = is_saved
- if self.saved_change_hook:
- self.saved_change_hook()
-
- def insert(self, index, chars, tags=None):
- self.addcmd(InsertCommand(index, chars, tags))
-
- def delete(self, index1, index2=None):
- self.addcmd(DeleteCommand(index1, index2))
-
- # Clients should call undo_block_start() and undo_block_stop()
- # around a sequence of editing cmds to be treated as a unit by
- # undo & redo. Nested matching calls are OK, and the inner calls
- # then act like nops. OK too if no editing cmds, or only one
- # editing cmd, is issued in between: if no cmds, the whole
- # sequence has no effect; and if only one cmd, that cmd is entered
- # directly into the undo list, as if undo_block_xxx hadn't been
- # called. The intent of all that is to make this scheme easy
- # to use: all the client has to worry about is making sure each
- # _start() call is matched by a _stop() call.
-
- def undo_block_start(self):
- if self.undoblock == 0:
- self.undoblock = CommandSequence()
- self.undoblock.bump_depth()
-
- def undo_block_stop(self):
- if self.undoblock.bump_depth(-1) == 0:
- cmd = self.undoblock
- self.undoblock = 0
- if len(cmd) > 0:
- if len(cmd) == 1:
- # no need to wrap a single cmd
- cmd = cmd.getcmd(0)
- # this blk of cmds, or single cmd, has already
- # been done, so don't execute it again
- self.addcmd(cmd, 0)
-
- def addcmd(self, cmd, execute=True):
- if execute:
- cmd.do(self.delegate)
- if self.undoblock != 0:
- self.undoblock.append(cmd)
- return
- if self.can_merge and self.pointer > 0:
- lastcmd = self.undolist[self.pointer-1]
- if lastcmd.merge(cmd):
- return
- self.undolist[self.pointer:] = [cmd]
- if self.saved > self.pointer:
- self.saved = -1
- self.pointer = self.pointer + 1
- if len(self.undolist) > self.max_undo:
- ##print "truncating undo list"
- del self.undolist[0]
- self.pointer = self.pointer - 1
- if self.saved >= 0:
- self.saved = self.saved - 1
- self.can_merge = True
- self.check_saved()
-
- def undo_event(self, event):
- if self.pointer == 0:
- self.bell()
- return "break"
- cmd = self.undolist[self.pointer - 1]
- cmd.undo(self.delegate)
- self.pointer = self.pointer - 1
- self.can_merge = False
- self.check_saved()
- return "break"
-
- def redo_event(self, event):
- if self.pointer >= len(self.undolist):
- self.bell()
- return "break"
- cmd = self.undolist[self.pointer]
- cmd.redo(self.delegate)
- self.pointer = self.pointer + 1
- self.can_merge = False
- self.check_saved()
- return "break"
-
-
-class Command:
-
- # Base class for Undoable commands
-
- tags = None
-
- def __init__(self, index1, index2, chars, tags=None):
- self.marks_before = {}
- self.marks_after = {}
- self.index1 = index1
- self.index2 = index2
- self.chars = chars
- if tags:
- self.tags = tags
-
- def __repr__(self):
- s = self.__class__.__name__
- t = (self.index1, self.index2, self.chars, self.tags)
- if self.tags is None:
- t = t[:-1]
- return s + repr(t)
-
- def do(self, text):
- pass
-
- def redo(self, text):
- pass
-
- def undo(self, text):
- pass
-
- def merge(self, cmd):
- return 0
-
- def save_marks(self, text):
- marks = {}
- for name in text.mark_names():
- if name != "insert" and name != "current":
- marks[name] = text.index(name)
- return marks
-
- def set_marks(self, text, marks):
- for name, index in marks.items():
- text.mark_set(name, index)
-
-
-class InsertCommand(Command):
-
- # Undoable insert command
-
- def __init__(self, index1, chars, tags=None):
- Command.__init__(self, index1, None, chars, tags)
-
- def do(self, text):
- self.marks_before = self.save_marks(text)
- self.index1 = text.index(self.index1)
- if text.compare(self.index1, ">", "end-1c"):
- # Insert before the final newline
- self.index1 = text.index("end-1c")
- text.insert(self.index1, self.chars, self.tags)
- self.index2 = text.index("%s+%dc" % (self.index1, len(self.chars)))
- self.marks_after = self.save_marks(text)
- ##sys.__stderr__.write("do: %s\n" % self)
-
- def redo(self, text):
- text.mark_set('insert', self.index1)
- text.insert(self.index1, self.chars, self.tags)
- self.set_marks(text, self.marks_after)
- text.see('insert')
- ##sys.__stderr__.write("redo: %s\n" % self)
-
- def undo(self, text):
- text.mark_set('insert', self.index1)
- text.delete(self.index1, self.index2)
- self.set_marks(text, self.marks_before)
- text.see('insert')
- ##sys.__stderr__.write("undo: %s\n" % self)
-
- def merge(self, cmd):
- if self.__class__ is not cmd.__class__:
- return False
- if self.index2 != cmd.index1:
- return False
- if self.tags != cmd.tags:
- return False
- if len(cmd.chars) != 1:
- return False
- if self.chars and \
- self.classify(self.chars[-1]) != self.classify(cmd.chars):
- return False
- self.index2 = cmd.index2
- self.chars = self.chars + cmd.chars
- return True
-
- alphanumeric = string.ascii_letters + string.digits + "_"
-
- def classify(self, c):
- if c in self.alphanumeric:
- return "alphanumeric"
- if c == "\n":
- return "newline"
- return "punctuation"
-
-
-class DeleteCommand(Command):
-
- # Undoable delete command
-
- def __init__(self, index1, index2=None):
- Command.__init__(self, index1, index2, None, None)
-
- def do(self, text):
- self.marks_before = self.save_marks(text)
- self.index1 = text.index(self.index1)
- if self.index2:
- self.index2 = text.index(self.index2)
- else:
- self.index2 = text.index(self.index1 + " +1c")
- if text.compare(self.index2, ">", "end-1c"):
- # Don't delete the final newline
- self.index2 = text.index("end-1c")
- self.chars = text.get(self.index1, self.index2)
- text.delete(self.index1, self.index2)
- self.marks_after = self.save_marks(text)
- ##sys.__stderr__.write("do: %s\n" % self)
-
- def redo(self, text):
- text.mark_set('insert', self.index1)
- text.delete(self.index1, self.index2)
- self.set_marks(text, self.marks_after)
- text.see('insert')
- ##sys.__stderr__.write("redo: %s\n" % self)
-
- def undo(self, text):
- text.mark_set('insert', self.index1)
- text.insert(self.index1, self.chars)
- self.set_marks(text, self.marks_before)
- text.see('insert')
- ##sys.__stderr__.write("undo: %s\n" % self)
-
-class CommandSequence(Command):
-
- # Wrapper for a sequence of undoable cmds to be undone/redone
- # as a unit
-
- def __init__(self):
- self.cmds = []
- self.depth = 0
-
- def __repr__(self):
- s = self.__class__.__name__
- strs = []
- for cmd in self.cmds:
- strs.append(" %r" % (cmd,))
- return s + "(\n" + ",\n".join(strs) + "\n)"
-
- def __len__(self):
- return len(self.cmds)
-
- def append(self, cmd):
- self.cmds.append(cmd)
-
- def getcmd(self, i):
- return self.cmds[i]
-
- def redo(self, text):
- for cmd in self.cmds:
- cmd.redo(text)
-
- def undo(self, text):
- cmds = self.cmds[:]
- cmds.reverse()
- for cmd in cmds:
- cmd.undo(text)
-
- def bump_depth(self, incr=1):
- self.depth = self.depth + incr
- return self.depth
-
-def main():
- from Percolator import Percolator
- root = Tk()
- root.wm_protocol("WM_DELETE_WINDOW", root.quit)
- text = Text()
- text.pack()
- text.focus_set()
- p = Percolator(text)
- d = UndoDelegator()
- p.insertfilter(d)
- root.mainloop()
-
-if __name__ == "__main__":
- main()
diff --git a/sys/lib/python/idlelib/WidgetRedirector.py b/sys/lib/python/idlelib/WidgetRedirector.py
deleted file mode 100644
index df60cea4f..000000000
--- a/sys/lib/python/idlelib/WidgetRedirector.py
+++ /dev/null
@@ -1,92 +0,0 @@
-from Tkinter import *
-
-
-class WidgetRedirector:
-
- """Support for redirecting arbitrary widget subcommands."""
-
- def __init__(self, widget):
- self.dict = {}
- self.widget = widget
- self.tk = tk = widget.tk
- w = widget._w
- self.orig = w + "_orig"
- tk.call("rename", w, self.orig)
- tk.createcommand(w, self.dispatch)
-
- def __repr__(self):
- return "WidgetRedirector(%s<%s>)" % (self.widget.__class__.__name__,
- self.widget._w)
-
- def close(self):
- for name in self.dict.keys():
- self.unregister(name)
- widget = self.widget; del self.widget
- orig = self.orig; del self.orig
- tk = widget.tk
- w = widget._w
- tk.deletecommand(w)
- tk.call("rename", orig, w)
-
- def register(self, name, function):
- if self.dict.has_key(name):
- previous = dict[name]
- else:
- previous = OriginalCommand(self, name)
- self.dict[name] = function
- setattr(self.widget, name, function)
- return previous
-
- def unregister(self, name):
- if self.dict.has_key(name):
- function = self.dict[name]
- del self.dict[name]
- if hasattr(self.widget, name):
- delattr(self.widget, name)
- return function
- else:
- return None
-
- def dispatch(self, cmd, *args):
- m = self.dict.get(cmd)
- try:
- if m:
- return m(*args)
- else:
- return self.tk.call((self.orig, cmd) + args)
- except TclError:
- return ""
-
-
-class OriginalCommand:
-
- def __init__(self, redir, name):
- self.redir = redir
- self.name = name
- self.tk = redir.tk
- self.orig = redir.orig
- self.tk_call = self.tk.call
- self.orig_and_name = (self.orig, self.name)
-
- def __repr__(self):
- return "OriginalCommand(%r, %r)" % (self.redir, self.name)
-
- def __call__(self, *args):
- return self.tk_call(self.orig_and_name + args)
-
-
-def main():
- root = Tk()
- text = Text()
- text.pack()
- text.focus_set()
- redir = WidgetRedirector(text)
- global orig_insert
- def my_insert(*args):
- print "insert", args
- orig_insert(*args)
- orig_insert = redir.register("insert", my_insert)
- root.mainloop()
-
-if __name__ == "__main__":
- main()
diff --git a/sys/lib/python/idlelib/WindowList.py b/sys/lib/python/idlelib/WindowList.py
deleted file mode 100644
index 658502b20..000000000
--- a/sys/lib/python/idlelib/WindowList.py
+++ /dev/null
@@ -1,90 +0,0 @@
-from Tkinter import *
-
-class WindowList:
-
- def __init__(self):
- self.dict = {}
- self.callbacks = []
-
- def add(self, window):
- window.after_idle(self.call_callbacks)
- self.dict[str(window)] = window
-
- def delete(self, window):
- try:
- del self.dict[str(window)]
- except KeyError:
- # Sometimes, destroy() is called twice
- pass
- self.call_callbacks()
-
- def add_windows_to_menu(self, menu):
- list = []
- for key in self.dict.keys():
- window = self.dict[key]
- try:
- title = window.get_title()
- except TclError:
- continue
- list.append((title, window))
- list.sort()
- for title, window in list:
- menu.add_command(label=title, command=window.wakeup)
-
- def register_callback(self, callback):
- self.callbacks.append(callback)
-
- def unregister_callback(self, callback):
- try:
- self.callbacks.remove(callback)
- except ValueError:
- pass
-
- def call_callbacks(self):
- for callback in self.callbacks:
- try:
- callback()
- except:
- print "warning: callback failed in WindowList", \
- sys.exc_type, ":", sys.exc_value
-
-registry = WindowList()
-
-add_windows_to_menu = registry.add_windows_to_menu
-register_callback = registry.register_callback
-unregister_callback = registry.unregister_callback
-
-
-class ListedToplevel(Toplevel):
-
- def __init__(self, master, **kw):
- Toplevel.__init__(self, master, kw)
- registry.add(self)
- self.focused_widget = self
-
- def destroy(self):
- registry.delete(self)
- Toplevel.destroy(self)
- # If this is Idle's last window then quit the mainloop
- # (Needed for clean exit on Windows 98)
- if not registry.dict:
- self.quit()
-
- def update_windowlist_registry(self, window):
- registry.call_callbacks()
-
- def get_title(self):
- # Subclass can override
- return self.wm_title()
-
- def wakeup(self):
- try:
- if self.wm_state() == "iconic":
- self.wm_withdraw()
- self.wm_deiconify()
- self.tkraise()
- self.focused_widget.focus_set()
- except TclError:
- # This can happen when the window menu was torn off.
- # Simply ignore it.
- pass
diff --git a/sys/lib/python/idlelib/ZoomHeight.py b/sys/lib/python/idlelib/ZoomHeight.py
deleted file mode 100644
index 83ca3a697..000000000
--- a/sys/lib/python/idlelib/ZoomHeight.py
+++ /dev/null
@@ -1,50 +0,0 @@
-# Sample extension: zoom a window to maximum height
-
-import re
-import sys
-import macosxSupport
-
-class ZoomHeight:
-
- menudefs = [
- ('windows', [
- ('_Zoom Height', '<<zoom-height>>'),
- ])
- ]
-
- def __init__(self, editwin):
- self.editwin = editwin
-
- def zoom_height_event(self, event):
- top = self.editwin.top
- zoom_height(top)
-
-def zoom_height(top):
- geom = top.wm_geometry()
- m = re.match(r"(\d+)x(\d+)\+(-?\d+)\+(-?\d+)", geom)
- if not m:
- top.bell()
- return
- width, height, x, y = map(int, m.groups())
- newheight = top.winfo_screenheight()
- if sys.platform == 'win32':
- newy = 0
- newheight = newheight - 72
-
- elif macosxSupport.runningAsOSXApp():
- # The '88' below is a magic number that avoids placing the bottom
- # of the window below the panel on my machine. I don't know how
- # to calculate the correct value for this with tkinter.
- newy = 22
- newheight = newheight - newy - 88
-
- else:
- #newy = 24
- newy = 0
- #newheight = newheight - 96
- newheight = newheight - 88
- if height >= newheight:
- newgeom = ""
- else:
- newgeom = "%dx%d+%d+%d" % (width, newheight, x, newy)
- top.wm_geometry(newgeom)
diff --git a/sys/lib/python/idlelib/__init__.py b/sys/lib/python/idlelib/__init__.py
deleted file mode 100644
index 7a83ddea7..000000000
--- a/sys/lib/python/idlelib/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-# Dummy file to make this a package.
diff --git a/sys/lib/python/idlelib/aboutDialog.py b/sys/lib/python/idlelib/aboutDialog.py
deleted file mode 100644
index c1210612e..000000000
--- a/sys/lib/python/idlelib/aboutDialog.py
+++ /dev/null
@@ -1,163 +0,0 @@
-"""About Dialog for IDLE
-
-"""
-
-from Tkinter import *
-import string, os
-import textView
-import idlever
-
-class AboutDialog(Toplevel):
- """Modal about dialog for idle
-
- """
- def __init__(self,parent,title):
- Toplevel.__init__(self, parent)
- self.configure(borderwidth=5)
- self.geometry("+%d+%d" % (parent.winfo_rootx()+30,
- parent.winfo_rooty()+30))
- self.bg = "#707070"
- self.fg = "#ffffff"
- self.CreateWidgets()
- self.resizable(height=FALSE, width=FALSE)
- self.title(title)
- self.transient(parent)
- self.grab_set()
- self.protocol("WM_DELETE_WINDOW", self.Ok)
- self.parent = parent
- self.buttonOk.focus_set()
- self.bind('<Return>',self.Ok) #dismiss dialog
- self.bind('<Escape>',self.Ok) #dismiss dialog
- self.wait_window()
-
- def CreateWidgets(self):
- frameMain = Frame(self, borderwidth=2, relief=SUNKEN)
- frameButtons = Frame(self)
- frameButtons.pack(side=BOTTOM, fill=X)
- frameMain.pack(side=TOP, expand=TRUE, fill=BOTH)
- self.buttonOk = Button(frameButtons, text='Close',
- command=self.Ok)
- self.buttonOk.pack(padx=5, pady=5)
- #self.picture = Image('photo', data=self.pictureData)
- frameBg = Frame(frameMain, bg=self.bg)
- frameBg.pack(expand=TRUE, fill=BOTH)
- labelTitle = Label(frameBg, text='IDLE', fg=self.fg, bg=self.bg,
- font=('courier', 24, 'bold'))
- labelTitle.grid(row=0, column=0, sticky=W, padx=10, pady=10)
- #labelPicture = Label(frameBg, text='[picture]')
- #image=self.picture, bg=self.bg)
- #labelPicture.grid(row=1, column=1, sticky=W, rowspan=2,
- # padx=0, pady=3)
- byline = "Python's Integrated DeveLopment Environment" + 5*'\n'
- labelDesc = Label(frameBg, text=byline, justify=LEFT,
- fg=self.fg, bg=self.bg)
- labelDesc.grid(row=2, column=0, sticky=W, columnspan=3, padx=10, pady=5)
- labelEmail = Label(frameBg, text='email: idle-dev@python.org',
- justify=LEFT, fg=self.fg, bg=self.bg)
- labelEmail.grid(row=6, column=0, columnspan=2,
- sticky=W, padx=10, pady=0)
- labelWWW = Label(frameBg, text='www: http://www.python.org/idle/',
- justify=LEFT, fg=self.fg, bg=self.bg)
- labelWWW.grid(row=7, column=0, columnspan=2, sticky=W, padx=10, pady=0)
- Frame(frameBg, borderwidth=1, relief=SUNKEN,
- height=2, bg=self.bg).grid(row=8, column=0, sticky=EW,
- columnspan=3, padx=5, pady=5)
- labelPythonVer = Label(frameBg, text='Python version: ' + \
- sys.version.split()[0], fg=self.fg, bg=self.bg)
- labelPythonVer.grid(row=9, column=0, sticky=W, padx=10, pady=0)
- # handle weird tk version num in windoze python >= 1.6 (?!?)
- tkVer = repr(TkVersion).split('.')
- tkVer[len(tkVer)-1] = str('%.3g' % (float('.'+tkVer[len(tkVer)-1])))[2:]
- if tkVer[len(tkVer)-1] == '':
- tkVer[len(tkVer)-1] = '0'
- tkVer = string.join(tkVer,'.')
- labelTkVer = Label(frameBg, text='Tk version: '+
- tkVer, fg=self.fg, bg=self.bg)
- labelTkVer.grid(row=9, column=1, sticky=W, padx=2, pady=0)
- py_button_f = Frame(frameBg, bg=self.bg)
- py_button_f.grid(row=10, column=0, columnspan=2, sticky=NSEW)
- buttonLicense = Button(py_button_f, text='License', width=8,
- highlightbackground=self.bg,
- command=self.ShowLicense)
- buttonLicense.pack(side=LEFT, padx=10, pady=10)
- buttonCopyright = Button(py_button_f, text='Copyright', width=8,
- highlightbackground=self.bg,
- command=self.ShowCopyright)
- buttonCopyright.pack(side=LEFT, padx=10, pady=10)
- buttonCredits = Button(py_button_f, text='Credits', width=8,
- highlightbackground=self.bg,
- command=self.ShowPythonCredits)
- buttonCredits.pack(side=LEFT, padx=10, pady=10)
- Frame(frameBg, borderwidth=1, relief=SUNKEN,
- height=2, bg=self.bg).grid(row=11, column=0, sticky=EW,
- columnspan=3, padx=5, pady=5)
- idle_v = Label(frameBg, text='IDLE version: ' + idlever.IDLE_VERSION,
- fg=self.fg, bg=self.bg)
- idle_v.grid(row=12, column=0, sticky=W, padx=10, pady=0)
- idle_button_f = Frame(frameBg, bg=self.bg)
- idle_button_f.grid(row=13, column=0, columnspan=3, sticky=NSEW)
- idle_about_b = Button(idle_button_f, text='README', width=8,
- highlightbackground=self.bg,
- command=self.ShowIDLEAbout)
- idle_about_b.pack(side=LEFT, padx=10, pady=10)
- idle_news_b = Button(idle_button_f, text='NEWS', width=8,
- highlightbackground=self.bg,
- command=self.ShowIDLENEWS)
- idle_news_b.pack(side=LEFT, padx=10, pady=10)
- idle_credits_b = Button(idle_button_f, text='Credits', width=8,
- highlightbackground=self.bg,
- command=self.ShowIDLECredits)
- idle_credits_b.pack(side=LEFT, padx=10, pady=10)
-
- def ShowLicense(self):
- self.display_printer_text(license, 'About - License')
-
- def ShowCopyright(self):
- self.display_printer_text(copyright, 'About - Copyright')
-
- def ShowPythonCredits(self):
- self.display_printer_text(credits, 'About - Python Credits')
-
- def ShowIDLECredits(self):
- self.ViewFile('About - Credits','CREDITS.txt', 'iso-8859-1')
-
- def ShowIDLEAbout(self):
- self.ViewFile('About - Readme', 'README.txt')
-
- def ShowIDLENEWS(self):
- self.ViewFile('About - NEWS', 'NEWS.txt')
-
- def display_printer_text(self, printer, title):
- printer._Printer__setup()
- data = '\n'.join(printer._Printer__lines)
- textView.TextViewer(self, title, None, data)
-
- def ViewFile(self, viewTitle, viewFile, encoding=None):
- fn = os.path.join(os.path.abspath(os.path.dirname(__file__)), viewFile)
- if encoding:
- import codecs
- try:
- textFile = codecs.open(fn, 'r')
- except IOError:
- import tkMessageBox
- tkMessageBox.showerror(title='File Load Error',
- message='Unable to load file %r .' % (fn,),
- parent=self)
- return
- else:
- data = textFile.read()
- else:
- data = None
- textView.TextViewer(self, viewTitle, fn, data=data)
-
- def Ok(self, event=None):
- self.destroy()
-
-if __name__ == '__main__':
- # test the dialog
- root = Tk()
- def run():
- import aboutDialog
- aboutDialog.AboutDialog(root, 'About')
- Button(root, text='Dialog', command=run).pack()
- root.mainloop()
diff --git a/sys/lib/python/idlelib/config-extensions.def b/sys/lib/python/idlelib/config-extensions.def
deleted file mode 100644
index 2d5cf6822..000000000
--- a/sys/lib/python/idlelib/config-extensions.def
+++ /dev/null
@@ -1,88 +0,0 @@
-# config-extensions.def
-#
-# IDLE reads several config files to determine user preferences. This
-# file is the default configuration file for IDLE extensions settings.
-#
-# Each extension must have at least one section, named after the extension
-# module. This section must contain an 'enable' item (=1 to enable the
-# extension, =0 to disable it), it may contain 'enable_editor' or 'enable_shell'
-# items, to apply it only to editor/shell windows, and may also contain any
-# other general configuration items for the extension.
-#
-# Each extension must define at least one section named ExtensionName_bindings
-# or ExtensionName_cfgBindings. If present, ExtensionName_bindings defines
-# virtual event bindings for the extension that are not user re-configurable.
-# If present, ExtensionName_cfgBindings defines virtual event bindings for the
-# extension that may be sensibly re-configured.
-#
-# If there are no keybindings for a menus' virtual events, include lines like
-# <<toggle-code-context>>= (See [CodeContext], below.)
-#
-# Currently it is necessary to manually modify this file to change extension
-# key bindings and default values. To customize, create
-# ~/.idlerc/config-extensions.cfg and append the appropriate customized
-# section(s). Those sections will override the defaults in this file.
-#
-# Note: If a keybinding is already in use when the extension is
-# loaded, the extension's virtual event's keybinding will be set to ''.
-#
-# See config-keys.def for notes on specifying keys and extend.txt for
-# information on creating IDLE extensions.
-
-[FormatParagraph]
-enable=1
-[FormatParagraph_cfgBindings]
-format-paragraph=<Alt-Key-q>
-
-[AutoExpand]
-enable=1
-[AutoExpand_cfgBindings]
-expand-word=<Alt-Key-slash>
-
-[ZoomHeight]
-enable=1
-[ZoomHeight_cfgBindings]
-zoom-height=<Alt-Key-2>
-
-[ScriptBinding]
-enable=1
-[ScriptBinding_cfgBindings]
-run-module=<Key-F5>
-check-module=<Alt-Key-x>
-
-[CallTips]
-enable=1
-[CallTips_cfgBindings]
-force-open-calltip=<Control-Key-backslash>
-[CallTips_bindings]
-try-open-calltip=<KeyRelease-parenleft>
-refresh-calltip=<KeyRelease-parenright> <KeyRelease-0>
-
-[ParenMatch]
-enable=1
-style= expression
-flash-delay= 500
-bell= 1
-[ParenMatch_cfgBindings]
-flash-paren=<Control-Key-0>
-[ParenMatch_bindings]
-paren-closed=<KeyRelease-parenright> <KeyRelease-bracketright> <KeyRelease-braceright>
-
-[AutoComplete]
-enable=1
-popupwait=2000
-[AutoComplete_cfgBindings]
-force-open-completions=<Control-Key-space>
-[AutoComplete_bindings]
-autocomplete=<Key-Tab>
-try-open-completions=<KeyRelease-period> <KeyRelease-slash> <KeyRelease-backslash>
-
-[CodeContext]
-enable=1
-enable_shell=0
-numlines=3
-visible=0
-bgcolor=LightGray
-fgcolor=Black
-[CodeContext_bindings]
-toggle-code-context=
diff --git a/sys/lib/python/idlelib/config-highlight.def b/sys/lib/python/idlelib/config-highlight.def
deleted file mode 100644
index 7d20f7824..000000000
--- a/sys/lib/python/idlelib/config-highlight.def
+++ /dev/null
@@ -1,64 +0,0 @@
-# IDLE reads several config files to determine user preferences. This
-# file is the default config file for idle highlight theme settings.
-
-[IDLE Classic]
-normal-foreground= #000000
-normal-background= #ffffff
-keyword-foreground= #ff7700
-keyword-background= #ffffff
-builtin-foreground= #900090
-builtin-background= #ffffff
-comment-foreground= #dd0000
-comment-background= #ffffff
-string-foreground= #00aa00
-string-background= #ffffff
-definition-foreground= #0000ff
-definition-background= #ffffff
-hilite-foreground= #000000
-hilite-background= gray
-break-foreground= black
-break-background= #ffff55
-hit-foreground= #ffffff
-hit-background= #000000
-error-foreground= #000000
-error-background= #ff7777
-#cursor (only foreground can be set, restart IDLE)
-cursor-foreground= black
-#shell window
-stdout-foreground= blue
-stdout-background= #ffffff
-stderr-foreground= red
-stderr-background= #ffffff
-console-foreground= #770000
-console-background= #ffffff
-
-[IDLE New]
-normal-foreground= #000000
-normal-background= #ffffff
-keyword-foreground= #ff7700
-keyword-background= #ffffff
-builtin-foreground= #900090
-builtin-background= #ffffff
-comment-foreground= #dd0000
-comment-background= #ffffff
-string-foreground= #00aa00
-string-background= #ffffff
-definition-foreground= #0000ff
-definition-background= #ffffff
-hilite-foreground= #000000
-hilite-background= gray
-break-foreground= black
-break-background= #ffff55
-hit-foreground= #ffffff
-hit-background= #000000
-error-foreground= #000000
-error-background= #ff7777
-#cursor (only foreground can be set, restart IDLE)
-cursor-foreground= black
-#shell window
-stdout-foreground= blue
-stdout-background= #ffffff
-stderr-foreground= red
-stderr-background= #ffffff
-console-foreground= #770000
-console-background= #ffffff
diff --git a/sys/lib/python/idlelib/config-keys.def b/sys/lib/python/idlelib/config-keys.def
deleted file mode 100644
index fb0aaf4dc..000000000
--- a/sys/lib/python/idlelib/config-keys.def
+++ /dev/null
@@ -1,214 +0,0 @@
-# IDLE reads several config files to determine user preferences. This
-# file is the default config file for idle key binding settings.
-# Where multiple keys are specified for an action: if they are separated
-# by a space (eg. action=<key1> <key2>) then the keys are alternatives, if
-# there is no space (eg. action=<key1><key2>) then the keys comprise a
-# single 'emacs style' multi-keystoke binding. The tk event specifier 'Key'
-# is used in all cases, for consistency in auto key conflict checking in the
-# configuration gui.
-
-[IDLE Classic Windows]
-copy=<Control-Key-c> <Control-Key-C>
-cut=<Control-Key-x> <Control-Key-X>
-paste=<Control-Key-v> <Control-Key-V>
-beginning-of-line= <Key-Home>
-center-insert=<Control-Key-l> <Control-Key-L>
-close-all-windows=<Control-Key-q>
-close-window=<Alt-Key-F4> <Meta-Key-F4>
-do-nothing=<Control-Key-F12>
-end-of-file=<Control-Key-d> <Control-Key-D>
-python-docs=<Key-F1>
-python-context-help=<Shift-Key-F1>
-history-next=<Alt-Key-n> <Meta-Key-n>
-history-previous=<Alt-Key-p> <Meta-Key-p>
-interrupt-execution=<Control-Key-c> <Control-Key-C>
-view-restart=<Key-F6>
-restart-shell=<Control-Key-F6>
-open-class-browser=<Alt-Key-c> <Meta-Key-c> <Alt-Key-C>
-open-module=<Alt-Key-m> <Meta-Key-m> <Alt-Key-M>
-open-new-window=<Control-Key-n> <Control-Key-N>
-open-window-from-file=<Control-Key-o> <Control-Key-O>
-plain-newline-and-indent=<Control-Key-j> <Control-Key-J>
-print-window=<Control-Key-p> <Control-Key-P>
-redo=<Control-Shift-Key-Z>
-remove-selection=<Key-Escape>
-save-copy-of-window-as-file=<Alt-Shift-Key-S>
-save-window-as-file=<Control-Shift-Key-S>
-save-window=<Control-Key-s>
-select-all=<Control-Key-a>
-toggle-auto-coloring=<Control-Key-slash>
-undo=<Control-Key-z> <Control-Key-Z>
-find=<Control-Key-f> <Control-Key-F>
-find-again=<Control-Key-g> <Key-F3>
-find-in-files=<Alt-Key-F3> <Meta-Key-F3>
-find-selection=<Control-Key-F3>
-replace=<Control-Key-h> <Control-Key-H>
-goto-line=<Alt-Key-g> <Meta-Key-g>
-smart-backspace=<Key-BackSpace>
-newline-and-indent=<Key-Return> <Key-KP_Enter>
-smart-indent=<Key-Tab>
-indent-region=<Control-Key-bracketright>
-dedent-region=<Control-Key-bracketleft>
-comment-region=<Alt-Key-3> <Meta-Key-3>
-uncomment-region=<Alt-Key-4> <Meta-Key-4>
-tabify-region=<Alt-Key-5> <Meta-Key-5>
-untabify-region=<Alt-Key-6> <Meta-Key-6>
-toggle-tabs=<Alt-Key-t> <Meta-Key-t> <Alt-Key-T>
-change-indentwidth=<Alt-Key-u> <Meta-Key-u> <Alt-Key-U>
-del-word-left=<Control-Key-BackSpace>
-del-word-right=<Control-Key-Delete>
-
-[IDLE Classic Unix]
-copy=<Alt-Key-w> <Meta-Key-w>
-cut=<Control-Key-w>
-paste=<Control-Key-y>
-beginning-of-line=<Control-Key-a> <Key-Home>
-center-insert=<Control-Key-l>
-close-all-windows=<Control-Key-x><Control-Key-c>
-close-window=<Control-Key-x><Control-Key-0>
-do-nothing=<Control-Key-x>
-end-of-file=<Control-Key-d>
-history-next=<Alt-Key-n> <Meta-Key-n>
-history-previous=<Alt-Key-p> <Meta-Key-p>
-interrupt-execution=<Control-Key-c>
-view-restart=<Key-F6>
-restart-shell=<Control-Key-F6>
-open-class-browser=<Control-Key-x><Control-Key-b>
-open-module=<Control-Key-x><Control-Key-m>
-open-new-window=<Control-Key-x><Control-Key-n>
-open-window-from-file=<Control-Key-x><Control-Key-f>
-plain-newline-and-indent=<Control-Key-j>
-print-window=<Control-x><Control-Key-p>
-python-docs=<Control-Key-h>
-python-context-help=<Control-Shift-Key-H>
-redo=<Alt-Key-z> <Meta-Key-z>
-remove-selection=<Key-Escape>
-save-copy-of-window-as-file=<Control-Key-x><Control-Key-y>
-save-window-as-file=<Control-Key-x><Control-Key-w>
-save-window=<Control-Key-x><Control-Key-s>
-select-all=<Alt-Key-a> <Meta-Key-a>
-toggle-auto-coloring=<Control-Key-slash>
-undo=<Control-Key-z>
-find=<Control-Key-u><Control-Key-u><Control-Key-s>
-find-again=<Control-Key-u><Control-Key-s>
-find-in-files=<Alt-Key-s> <Meta-Key-s>
-find-selection=<Control-Key-s>
-replace=<Control-Key-r>
-goto-line=<Alt-Key-g> <Meta-Key-g>
-smart-backspace=<Key-BackSpace>
-newline-and-indent=<Key-Return> <Key-KP_Enter>
-smart-indent=<Key-Tab>
-indent-region=<Control-Key-bracketright>
-dedent-region=<Control-Key-bracketleft>
-comment-region=<Alt-Key-3>
-uncomment-region=<Alt-Key-4>
-tabify-region=<Alt-Key-5>
-untabify-region=<Alt-Key-6>
-toggle-tabs=<Alt-Key-t>
-change-indentwidth=<Alt-Key-u>
-del-word-left=<Alt-Key-BackSpace>
-del-word-right=<Alt-Key-d>
-
-[IDLE Classic Mac]
-copy=<Command-Key-c>
-cut=<Command-Key-x>
-paste=<Command-Key-v>
-beginning-of-line= <Key-Home>
-center-insert=<Control-Key-l>
-close-all-windows=<Command-Key-q>
-close-window=<Command-Key-w>
-do-nothing=<Control-Key-F12>
-end-of-file=<Control-Key-d>
-python-docs=<Key-F1>
-python-context-help=<Shift-Key-F1>
-history-next=<Control-Key-n>
-history-previous=<Control-Key-p>
-interrupt-execution=<Control-Key-c>
-view-restart=<Key-F6>
-restart-shell=<Control-Key-F6>
-open-class-browser=<Command-Key-b>
-open-module=<Command-Key-m>
-open-new-window=<Command-Key-n>
-open-window-from-file=<Command-Key-o>
-plain-newline-and-indent=<Control-Key-j>
-print-window=<Command-Key-p>
-redo=<Shift-Command-Key-Z>
-remove-selection=<Key-Escape>
-save-window-as-file=<Shift-Command-Key-S>
-save-window=<Command-Key-s>
-save-copy-of-window-as-file=<Option-Command-Key-s>
-select-all=<Command-Key-a>
-toggle-auto-coloring=<Control-Key-slash>
-undo=<Command-Key-z>
-find=<Command-Key-f>
-find-again=<Command-Key-g> <Key-F3>
-find-in-files=<Command-Key-F3>
-find-selection=<Shift-Command-Key-F3>
-replace=<Command-Key-r>
-goto-line=<Command-Key-j>
-smart-backspace=<Key-BackSpace>
-newline-and-indent=<Key-Return> <Key-KP_Enter>
-smart-indent=<Key-Tab>
-indent-region=<Command-Key-bracketright>
-dedent-region=<Command-Key-bracketleft>
-comment-region=<Control-Key-3>
-uncomment-region=<Control-Key-4>
-tabify-region=<Control-Key-5>
-untabify-region=<Control-Key-6>
-toggle-tabs=<Control-Key-t>
-change-indentwidth=<Control-Key-u>
-del-word-left=<Control-Key-BackSpace>
-del-word-right=<Control-Key-Delete>
-
-[IDLE Classic OSX]
-toggle-tabs = <Control-Key-t>
-interrupt-execution = <Control-Key-c>
-untabify-region = <Control-Key-6>
-remove-selection = <Key-Escape>
-print-window = <Command-Key-p>
-replace = <Command-Key-r>
-goto-line = <Command-Key-j>
-plain-newline-and-indent = <Control-Key-j>
-history-previous = <Control-Key-p>
-beginning-of-line = <Control-Key-Left>
-end-of-line = <Control-Key-Right>
-comment-region = <Control-Key-3>
-redo = <Shift-Command-Key-Z>
-close-window = <Command-Key-w>
-restart-shell = <Control-Key-F6>
-save-window-as-file = <Command-Key-S>
-close-all-windows = <Command-Key-q>
-view-restart = <Key-F6>
-tabify-region = <Control-Key-5>
-find-again = <Command-Key-g> <Key-F3>
-find = <Command-Key-f>
-toggle-auto-coloring = <Control-Key-slash>
-select-all = <Command-Key-a>
-smart-backspace = <Key-BackSpace>
-change-indentwidth = <Control-Key-u>
-do-nothing = <Control-Key-F12>
-smart-indent = <Key-Tab>
-center-insert = <Control-Key-l>
-history-next = <Control-Key-n>
-del-word-right = <Option-Key-Delete>
-undo = <Command-Key-z>
-save-window = <Command-Key-s>
-uncomment-region = <Control-Key-4>
-cut = <Command-Key-x>
-find-in-files = <Command-Key-F3>
-dedent-region = <Command-Key-bracketleft>
-copy = <Command-Key-c>
-paste = <Command-Key-v>
-indent-region = <Command-Key-bracketright>
-del-word-left = <Option-Key-BackSpace> <Option-Command-Key-BackSpace>
-newline-and-indent = <Key-Return> <Key-KP_Enter>
-end-of-file = <Control-Key-d>
-open-class-browser = <Command-Key-b>
-open-new-window = <Command-Key-n>
-open-module = <Command-Key-m>
-find-selection = <Shift-Command-Key-F3>
-python-context-help = <Shift-Key-F1>
-save-copy-of-window-as-file = <Shift-Command-Key-s>
-open-window-from-file = <Command-Key-o>
-python-docs = <Key-F1>
-
diff --git a/sys/lib/python/idlelib/config-main.def b/sys/lib/python/idlelib/config-main.def
deleted file mode 100644
index 5ddd098de..000000000
--- a/sys/lib/python/idlelib/config-main.def
+++ /dev/null
@@ -1,79 +0,0 @@
-# IDLE reads several config files to determine user preferences. This
-# file is the default config file for general idle settings.
-#
-# When IDLE starts, it will look in
-# the following two sets of files, in order:
-#
-# default configuration
-# ---------------------
-# config-main.def the default general config file
-# config-extensions.def the default extension config file
-# config-highlight.def the default highlighting config file
-# config-keys.def the default keybinding config file
-#
-# user configuration
-# -------------------
-# ~/.idlerc/config-main.cfg the user general config file
-# ~/.idlerc/config-extensions.cfg the user extension config file
-# ~/.idlerc/config-highlight.cfg the user highlighting config file
-# ~/.idlerc/config-keys.cfg the user keybinding config file
-#
-# On Windows2000 and Windows XP the .idlerc directory is at
-# Documents and Settings\<username>\.idlerc
-#
-# On Windows98 it is at c:\.idlerc
-#
-# Any options the user saves through the config dialog will be saved to
-# the relevant user config file. Reverting any general setting to the
-# default causes that entry to be wiped from the user file and re-read
-# from the default file. User highlighting themes or keybinding sets are
-# retained unless specifically deleted within the config dialog. Choosing
-# one of the default themes or keysets just applies the relevant settings
-# from the default file.
-#
-# Additional help sources are listed in the [HelpFiles] section and must be
-# viewable by a web browser (or the Windows Help viewer in the case of .chm
-# files). These sources will be listed on the Help menu. The pattern is
-# <sequence_number = menu item;/path/to/help/source>
-# You can't use a semi-colon in a menu item or path. The path will be platform
-# specific because of path separators, drive specs etc.
-#
-# It is best to use the Configuration GUI to set up additional help sources!
-# Example:
-#1 = My Extra Help Source;/usr/share/doc/foo/index.html
-#2 = Another Help Source;/path/to/another.pdf
-
-[General]
-editor-on-startup= 0
-autosave= 0
-print-command-posix=lpr %s
-print-command-win=start /min notepad /p %s
-delete-exitfunc= 1
-
-[EditorWindow]
-width= 80
-height= 40
-font= courier
-font-size= 10
-font-bold= 0
-encoding= none
-
-[FormatParagraph]
-paragraph=70
-
-[Indent]
-use-spaces= 1
-num-spaces= 4
-
-[Theme]
-default= 1
-name= IDLE Classic
-
-[Keys]
-default= 1
-name= IDLE Classic Windows
-
-[History]
-cyclic=1
-
-[HelpFiles]
diff --git a/sys/lib/python/idlelib/configDialog.py b/sys/lib/python/idlelib/configDialog.py
deleted file mode 100644
index 2d8835c2a..000000000
--- a/sys/lib/python/idlelib/configDialog.py
+++ /dev/null
@@ -1,1147 +0,0 @@
-"""IDLE Configuration Dialog: support user customization of IDLE by GUI
-
-Customize font faces, sizes, and colorization attributes. Set indentation
-defaults. Customize keybindings. Colorization and keybindings can be
-saved as user defined sets. Select startup options including shell/editor
-and default window size. Define additional help sources.
-
-Note that tab width in IDLE is currently fixed at eight due to Tk issues.
-Refer to comments in EditorWindow autoindent code for details.
-
-"""
-from Tkinter import *
-import tkMessageBox, tkColorChooser, tkFont
-import string, copy
-
-from configHandler import idleConf
-from dynOptionMenuWidget import DynOptionMenu
-from tabpage import TabPageSet
-from keybindingDialog import GetKeysDialog
-from configSectionNameDialog import GetCfgSectionNameDialog
-from configHelpSourceEdit import GetHelpSourceDialog
-
-class ConfigDialog(Toplevel):
-
- def __init__(self,parent,title):
- Toplevel.__init__(self, parent)
- self.configure(borderwidth=5)
- self.geometry("+%d+%d" % (parent.winfo_rootx()+20,
- parent.winfo_rooty()+30))
- #Theme Elements. Each theme element key is its display name.
- #The first value of the tuple is the sample area tag name.
- #The second value is the display name list sort index.
- self.themeElements={'Normal Text':('normal','00'),
- 'Python Keywords':('keyword','01'),
- 'Python Definitions':('definition','02'),
- 'Python Builtins':('builtin', '03'),
- 'Python Comments':('comment','04'),
- 'Python Strings':('string','05'),
- 'Selected Text':('hilite','06'),
- 'Found Text':('hit','07'),
- 'Cursor':('cursor','08'),
- 'Error Text':('error','09'),
- 'Shell Normal Text':('console','10'),
- 'Shell Stdout Text':('stdout','11'),
- 'Shell Stderr Text':('stderr','12'),
- }
- self.ResetChangedItems() #load initial values in changed items dict
- self.CreateWidgets()
- self.resizable(height=FALSE,width=FALSE)
- self.transient(parent)
- self.grab_set()
- self.protocol("WM_DELETE_WINDOW", self.Cancel)
- self.parent = parent
- self.tabPages.focus_set()
- #key bindings for this dialog
- #self.bind('<Escape>',self.Cancel) #dismiss dialog, no save
- #self.bind('<Alt-a>',self.Apply) #apply changes, save
- #self.bind('<F1>',self.Help) #context help
- self.LoadConfigs()
- self.AttachVarCallbacks() #avoid callbacks during LoadConfigs
- self.wait_window()
-
- def CreateWidgets(self):
- self.tabPages = TabPageSet(self,
- pageNames=['Fonts/Tabs','Highlighting','Keys','General'])
- self.tabPages.ChangePage()#activates default (first) page
- frameActionButtons = Frame(self)
- #action buttons
- self.buttonHelp = Button(frameActionButtons,text='Help',
- command=self.Help,takefocus=FALSE)
- self.buttonOk = Button(frameActionButtons,text='Ok',
- command=self.Ok,takefocus=FALSE)
- self.buttonApply = Button(frameActionButtons,text='Apply',
- command=self.Apply,takefocus=FALSE)
- self.buttonCancel = Button(frameActionButtons,text='Cancel',
- command=self.Cancel,takefocus=FALSE)
- self.CreatePageFontTab()
- self.CreatePageHighlight()
- self.CreatePageKeys()
- self.CreatePageGeneral()
- self.buttonHelp.pack(side=RIGHT,padx=5,pady=5)
- self.buttonOk.pack(side=LEFT,padx=5,pady=5)
- self.buttonApply.pack(side=LEFT,padx=5,pady=5)
- self.buttonCancel.pack(side=LEFT,padx=5,pady=5)
- frameActionButtons.pack(side=BOTTOM)
- self.tabPages.pack(side=TOP,expand=TRUE,fill=BOTH)
-
- def CreatePageFontTab(self):
- #tkVars
- self.fontSize=StringVar(self)
- self.fontBold=BooleanVar(self)
- self.fontName=StringVar(self)
- self.spaceNum=IntVar(self)
- self.editFont=tkFont.Font(self,('courier',10,'normal'))
- ##widget creation
- #body frame
- frame=self.tabPages.pages['Fonts/Tabs']['page']
- #body section frames
- frameFont=Frame(frame,borderwidth=2,relief=GROOVE)
- frameIndent=Frame(frame,borderwidth=2,relief=GROOVE)
- #frameFont
- labelFontTitle=Label(frameFont,text='Set Base Editor Font')
- frameFontName=Frame(frameFont)
- frameFontParam=Frame(frameFont)
- labelFontNameTitle=Label(frameFontName,justify=LEFT,
- text='Font :')
- self.listFontName=Listbox(frameFontName,height=5,takefocus=FALSE,
- exportselection=FALSE)
- self.listFontName.bind('<ButtonRelease-1>',self.OnListFontButtonRelease)
- scrollFont=Scrollbar(frameFontName)
- scrollFont.config(command=self.listFontName.yview)
- self.listFontName.config(yscrollcommand=scrollFont.set)
- labelFontSizeTitle=Label(frameFontParam,text='Size :')
- self.optMenuFontSize=DynOptionMenu(frameFontParam,self.fontSize,None,
- command=self.SetFontSample)
- checkFontBold=Checkbutton(frameFontParam,variable=self.fontBold,
- onvalue=1,offvalue=0,text='Bold',command=self.SetFontSample)
- frameFontSample=Frame(frameFont,relief=SOLID,borderwidth=1)
- self.labelFontSample=Label(frameFontSample,
- text='AaBbCcDdEe\nFfGgHhIiJjK\n1234567890\n#:+=(){}[]',
- justify=LEFT,font=self.editFont)
- #frameIndent
- frameIndentSize=Frame(frameIndent)
- labelSpaceNumTitle=Label(frameIndentSize, justify=LEFT,
- text='Python Standard: 4 Spaces!')
- self.scaleSpaceNum=Scale(frameIndentSize, variable=self.spaceNum,
- label='Indentation Width', orient='horizontal',
- tickinterval=2, from_=2, to=16)
- #widget packing
- #body
- frameFont.pack(side=LEFT,padx=5,pady=10,expand=TRUE,fill=BOTH)
- frameIndent.pack(side=LEFT,padx=5,pady=10,fill=Y)
- #frameFont
- labelFontTitle.pack(side=TOP,anchor=W,padx=5,pady=5)
- frameFontName.pack(side=TOP,padx=5,pady=5,fill=X)
- frameFontParam.pack(side=TOP,padx=5,pady=5,fill=X)
- labelFontNameTitle.pack(side=TOP,anchor=W)
- self.listFontName.pack(side=LEFT,expand=TRUE,fill=X)
- scrollFont.pack(side=LEFT,fill=Y)
- labelFontSizeTitle.pack(side=LEFT,anchor=W)
- self.optMenuFontSize.pack(side=LEFT,anchor=W)
- checkFontBold.pack(side=LEFT,anchor=W,padx=20)
- frameFontSample.pack(side=TOP,padx=5,pady=5,expand=TRUE,fill=BOTH)
- self.labelFontSample.pack(expand=TRUE,fill=BOTH)
- #frameIndent
- frameIndentSize.pack(side=TOP,padx=5,pady=5,fill=BOTH)
- labelSpaceNumTitle.pack(side=TOP,anchor=W,padx=5)
- self.scaleSpaceNum.pack(side=TOP,padx=5,fill=X)
- return frame
-
- def CreatePageHighlight(self):
- self.builtinTheme=StringVar(self)
- self.customTheme=StringVar(self)
- self.fgHilite=BooleanVar(self)
- self.colour=StringVar(self)
- self.fontName=StringVar(self)
- self.themeIsBuiltin=BooleanVar(self)
- self.highlightTarget=StringVar(self)
- ##widget creation
- #body frame
- frame=self.tabPages.pages['Highlighting']['page']
- #body section frames
- frameCustom=Frame(frame,borderwidth=2,relief=GROOVE)
- frameTheme=Frame(frame,borderwidth=2,relief=GROOVE)
- #frameCustom
- self.textHighlightSample=Text(frameCustom,relief=SOLID,borderwidth=1,
- font=('courier',12,''),cursor='hand2',width=21,height=10,
- takefocus=FALSE,highlightthickness=0,wrap=NONE)
- text=self.textHighlightSample
- text.bind('<Double-Button-1>',lambda e: 'break')
- text.bind('<B1-Motion>',lambda e: 'break')
- textAndTags=(('#you can click here','comment'),('\n','normal'),
- ('#to choose items','comment'),('\n','normal'),('def','keyword'),
- (' ','normal'),('func','definition'),('(param):','normal'),
- ('\n ','normal'),('"""string"""','string'),('\n var0 = ','normal'),
- ("'string'",'string'),('\n var1 = ','normal'),("'selected'",'hilite'),
- ('\n var2 = ','normal'),("'found'",'hit'),
- ('\n var3 = ','normal'),('list', 'builtin'), ('(','normal'),
- ('None', 'builtin'),(')\n\n','normal'),
- (' error ','error'),(' ','normal'),('cursor |','cursor'),
- ('\n ','normal'),('shell','console'),(' ','normal'),('stdout','stdout'),
- (' ','normal'),('stderr','stderr'),('\n','normal'))
- for txTa in textAndTags:
- text.insert(END,txTa[0],txTa[1])
- for element in self.themeElements.keys():
- text.tag_bind(self.themeElements[element][0],'<ButtonPress-1>',
- lambda event,elem=element: event.widget.winfo_toplevel()
- .highlightTarget.set(elem))
- text.config(state=DISABLED)
- self.frameColourSet=Frame(frameCustom,relief=SOLID,borderwidth=1)
- frameFgBg=Frame(frameCustom)
- labelCustomTitle=Label(frameCustom,text='Set Custom Highlighting')
- buttonSetColour=Button(self.frameColourSet,text='Choose Colour for :',
- command=self.GetColour,highlightthickness=0)
- self.optMenuHighlightTarget=DynOptionMenu(self.frameColourSet,
- self.highlightTarget,None,highlightthickness=0)#,command=self.SetHighlightTargetBinding
- self.radioFg=Radiobutton(frameFgBg,variable=self.fgHilite,
- value=1,text='Foreground',command=self.SetColourSampleBinding)
- self.radioBg=Radiobutton(frameFgBg,variable=self.fgHilite,
- value=0,text='Background',command=self.SetColourSampleBinding)
- self.fgHilite.set(1)
- buttonSaveCustomTheme=Button(frameCustom,
- text='Save as New Custom Theme',command=self.SaveAsNewTheme)
- #frameTheme
- labelThemeTitle=Label(frameTheme,text='Select a Highlighting Theme')
- labelTypeTitle=Label(frameTheme,text='Select : ')
- self.radioThemeBuiltin=Radiobutton(frameTheme,variable=self.themeIsBuiltin,
- value=1,command=self.SetThemeType,text='a Built-in Theme')
- self.radioThemeCustom=Radiobutton(frameTheme,variable=self.themeIsBuiltin,
- value=0,command=self.SetThemeType,text='a Custom Theme')
- self.optMenuThemeBuiltin=DynOptionMenu(frameTheme,
- self.builtinTheme,None,command=None)
- self.optMenuThemeCustom=DynOptionMenu(frameTheme,
- self.customTheme,None,command=None)
- self.buttonDeleteCustomTheme=Button(frameTheme,text='Delete Custom Theme',
- command=self.DeleteCustomTheme)
- ##widget packing
- #body
- frameCustom.pack(side=LEFT,padx=5,pady=10,expand=TRUE,fill=BOTH)
- frameTheme.pack(side=LEFT,padx=5,pady=10,fill=Y)
- #frameCustom
- labelCustomTitle.pack(side=TOP,anchor=W,padx=5,pady=5)
- self.frameColourSet.pack(side=TOP,padx=5,pady=5,expand=TRUE,fill=X)
- frameFgBg.pack(side=TOP,padx=5,pady=0)
- self.textHighlightSample.pack(side=TOP,padx=5,pady=5,expand=TRUE,
- fill=BOTH)
- buttonSetColour.pack(side=TOP,expand=TRUE,fill=X,padx=8,pady=4)
- self.optMenuHighlightTarget.pack(side=TOP,expand=TRUE,fill=X,padx=8,pady=3)
- self.radioFg.pack(side=LEFT,anchor=E)
- self.radioBg.pack(side=RIGHT,anchor=W)
- buttonSaveCustomTheme.pack(side=BOTTOM,fill=X,padx=5,pady=5)
- #frameTheme
- labelThemeTitle.pack(side=TOP,anchor=W,padx=5,pady=5)
- labelTypeTitle.pack(side=TOP,anchor=W,padx=5,pady=5)
- self.radioThemeBuiltin.pack(side=TOP,anchor=W,padx=5)
- self.radioThemeCustom.pack(side=TOP,anchor=W,padx=5,pady=2)
- self.optMenuThemeBuiltin.pack(side=TOP,fill=X,padx=5,pady=5)
- self.optMenuThemeCustom.pack(side=TOP,fill=X,anchor=W,padx=5,pady=5)
- self.buttonDeleteCustomTheme.pack(side=TOP,fill=X,padx=5,pady=5)
- return frame
-
- def CreatePageKeys(self):
- #tkVars
- self.bindingTarget=StringVar(self)
- self.builtinKeys=StringVar(self)
- self.customKeys=StringVar(self)
- self.keysAreBuiltin=BooleanVar(self)
- self.keyBinding=StringVar(self)
- ##widget creation
- #body frame
- frame=self.tabPages.pages['Keys']['page']
- #body section frames
- frameCustom=Frame(frame,borderwidth=2,relief=GROOVE)
- frameKeySets=Frame(frame,borderwidth=2,relief=GROOVE)
- #frameCustom
- frameTarget=Frame(frameCustom)
- labelCustomTitle=Label(frameCustom,text='Set Custom Key Bindings')
- labelTargetTitle=Label(frameTarget,text='Action - Key(s)')
- scrollTargetY=Scrollbar(frameTarget)
- scrollTargetX=Scrollbar(frameTarget,orient=HORIZONTAL)
- self.listBindings=Listbox(frameTarget,takefocus=FALSE,
- exportselection=FALSE)
- self.listBindings.bind('<ButtonRelease-1>',self.KeyBindingSelected)
- scrollTargetY.config(command=self.listBindings.yview)
- scrollTargetX.config(command=self.listBindings.xview)
- self.listBindings.config(yscrollcommand=scrollTargetY.set)
- self.listBindings.config(xscrollcommand=scrollTargetX.set)
- self.buttonNewKeys=Button(frameCustom,text='Get New Keys for Selection',
- command=self.GetNewKeys,state=DISABLED)
- buttonSaveCustomKeys=Button(frameCustom,
- text='Save as New Custom Key Set',command=self.SaveAsNewKeySet)
- #frameKeySets
- labelKeysTitle=Label(frameKeySets,text='Select a Key Set')
- labelTypeTitle=Label(frameKeySets,text='Select : ')
- self.radioKeysBuiltin=Radiobutton(frameKeySets,variable=self.keysAreBuiltin,
- value=1,command=self.SetKeysType,text='a Built-in Key Set')
- self.radioKeysCustom=Radiobutton(frameKeySets,variable=self.keysAreBuiltin,
- value=0,command=self.SetKeysType,text='a Custom Key Set')
- self.optMenuKeysBuiltin=DynOptionMenu(frameKeySets,
- self.builtinKeys,None,command=None)
- self.optMenuKeysCustom=DynOptionMenu(frameKeySets,
- self.customKeys,None,command=None)
- self.buttonDeleteCustomKeys=Button(frameKeySets,text='Delete Custom Key Set',
- command=self.DeleteCustomKeys)
- ##widget packing
- #body
- frameCustom.pack(side=LEFT,padx=5,pady=5,expand=TRUE,fill=BOTH)
- frameKeySets.pack(side=LEFT,padx=5,pady=5,fill=Y)
- #frameCustom
- labelCustomTitle.pack(side=TOP,anchor=W,padx=5,pady=5)
- buttonSaveCustomKeys.pack(side=BOTTOM,fill=X,padx=5,pady=5)
- self.buttonNewKeys.pack(side=BOTTOM,fill=X,padx=5,pady=5)
- frameTarget.pack(side=LEFT,padx=5,pady=5,expand=TRUE,fill=BOTH)
- #frame target
- frameTarget.columnconfigure(0,weight=1)
- frameTarget.rowconfigure(1,weight=1)
- labelTargetTitle.grid(row=0,column=0,columnspan=2,sticky=W)
- self.listBindings.grid(row=1,column=0,sticky=NSEW)
- scrollTargetY.grid(row=1,column=1,sticky=NS)
- scrollTargetX.grid(row=2,column=0,sticky=EW)
- #frameKeySets
- labelKeysTitle.pack(side=TOP,anchor=W,padx=5,pady=5)
- labelTypeTitle.pack(side=TOP,anchor=W,padx=5,pady=5)
- self.radioKeysBuiltin.pack(side=TOP,anchor=W,padx=5)
- self.radioKeysCustom.pack(side=TOP,anchor=W,padx=5,pady=2)
- self.optMenuKeysBuiltin.pack(side=TOP,fill=X,padx=5,pady=5)
- self.optMenuKeysCustom.pack(side=TOP,fill=X,anchor=W,padx=5,pady=5)
- self.buttonDeleteCustomKeys.pack(side=TOP,fill=X,padx=5,pady=5)
- return frame
-
- def CreatePageGeneral(self):
- #tkVars
- self.winWidth=StringVar(self)
- self.winHeight=StringVar(self)
- self.paraWidth=StringVar(self)
- self.startupEdit=IntVar(self)
- self.autoSave=IntVar(self)
- self.encoding=StringVar(self)
- self.userHelpBrowser=BooleanVar(self)
- self.helpBrowser=StringVar(self)
- #widget creation
- #body
- frame=self.tabPages.pages['General']['page']
- #body section frames
- frameRun=Frame(frame,borderwidth=2,relief=GROOVE)
- frameSave=Frame(frame,borderwidth=2,relief=GROOVE)
- frameWinSize=Frame(frame,borderwidth=2,relief=GROOVE)
- frameParaSize=Frame(frame,borderwidth=2,relief=GROOVE)
- frameEncoding=Frame(frame,borderwidth=2,relief=GROOVE)
- frameHelp=Frame(frame,borderwidth=2,relief=GROOVE)
- #frameRun
- labelRunTitle=Label(frameRun,text='Startup Preferences')
- labelRunChoiceTitle=Label(frameRun,text='At Startup')
- radioStartupEdit=Radiobutton(frameRun,variable=self.startupEdit,
- value=1,command=self.SetKeysType,text="Open Edit Window")
- radioStartupShell=Radiobutton(frameRun,variable=self.startupEdit,
- value=0,command=self.SetKeysType,text='Open Shell Window')
- #frameSave
- labelSaveTitle=Label(frameSave,text='Autosave Preference')
- labelRunSaveTitle=Label(frameSave,text='At Start of Run (F5) ')
- radioSaveAsk=Radiobutton(frameSave,variable=self.autoSave,
- value=0,command=self.SetKeysType,text="Prompt to Save")
- radioSaveAuto=Radiobutton(frameSave,variable=self.autoSave,
- value=1,command=self.SetKeysType,text='No Prompt')
- #frameWinSize
- labelWinSizeTitle=Label(frameWinSize,text='Initial Window Size'+
- ' (in characters)')
- labelWinWidthTitle=Label(frameWinSize,text='Width')
- entryWinWidth=Entry(frameWinSize,textvariable=self.winWidth,
- width=3)
- labelWinHeightTitle=Label(frameWinSize,text='Height')
- entryWinHeight=Entry(frameWinSize,textvariable=self.winHeight,
- width=3)
- #paragraphFormatWidth
- labelParaWidthTitle=Label(frameParaSize,text='Paragraph reformat'+
- ' width (in characters)')
- entryParaWidth=Entry(frameParaSize,textvariable=self.paraWidth,
- width=3)
- #frameEncoding
- labelEncodingTitle=Label(frameEncoding,text="Default Source Encoding")
- radioEncLocale=Radiobutton(frameEncoding,variable=self.encoding,
- value="locale",text="Locale-defined")
- radioEncUTF8=Radiobutton(frameEncoding,variable=self.encoding,
- value="utf-8",text="UTF-8")
- radioEncNone=Radiobutton(frameEncoding,variable=self.encoding,
- value="none",text="None")
- #frameHelp
- frameHelpList=Frame(frameHelp)
- frameHelpListButtons=Frame(frameHelpList)
- labelHelpListTitle=Label(frameHelpList,text='Additional Help Sources:')
- scrollHelpList=Scrollbar(frameHelpList)
- self.listHelp=Listbox(frameHelpList,height=5,takefocus=FALSE,
- exportselection=FALSE)
- scrollHelpList.config(command=self.listHelp.yview)
- self.listHelp.config(yscrollcommand=scrollHelpList.set)
- self.listHelp.bind('<ButtonRelease-1>',self.HelpSourceSelected)
- self.buttonHelpListEdit=Button(frameHelpListButtons,text='Edit',
- state=DISABLED,width=8,command=self.HelpListItemEdit)
- self.buttonHelpListAdd=Button(frameHelpListButtons,text='Add',
- width=8,command=self.HelpListItemAdd)
- self.buttonHelpListRemove=Button(frameHelpListButtons,text='Remove',
- state=DISABLED,width=8,command=self.HelpListItemRemove)
- #widget packing
- #body
- frameRun.pack(side=TOP,padx=5,pady=5,fill=X)
- frameSave.pack(side=TOP,padx=5,pady=5,fill=X)
- frameWinSize.pack(side=TOP,padx=5,pady=5,fill=X)
- frameParaSize.pack(side=TOP,padx=5,pady=5,fill=X)
- frameEncoding.pack(side=TOP,padx=5,pady=5,fill=X)
- frameHelp.pack(side=TOP,padx=5,pady=5,expand=TRUE,fill=BOTH)
- #frameRun
- labelRunTitle.pack(side=TOP,anchor=W,padx=5,pady=5)
- labelRunChoiceTitle.pack(side=LEFT,anchor=W,padx=5,pady=5)
- radioStartupShell.pack(side=RIGHT,anchor=W,padx=5,pady=5)
- radioStartupEdit.pack(side=RIGHT,anchor=W,padx=5,pady=5)
- #frameSave
- labelSaveTitle.pack(side=TOP,anchor=W,padx=5,pady=5)
- labelRunSaveTitle.pack(side=LEFT,anchor=W,padx=5,pady=5)
- radioSaveAuto.pack(side=RIGHT,anchor=W,padx=5,pady=5)
- radioSaveAsk.pack(side=RIGHT,anchor=W,padx=5,pady=5)
- #frameWinSize
- labelWinSizeTitle.pack(side=LEFT,anchor=W,padx=5,pady=5)
- entryWinHeight.pack(side=RIGHT,anchor=E,padx=10,pady=5)
- labelWinHeightTitle.pack(side=RIGHT,anchor=E,pady=5)
- entryWinWidth.pack(side=RIGHT,anchor=E,padx=10,pady=5)
- labelWinWidthTitle.pack(side=RIGHT,anchor=E,pady=5)
- #paragraphFormatWidth
- labelParaWidthTitle.pack(side=LEFT,anchor=W,padx=5,pady=5)
- entryParaWidth.pack(side=RIGHT,anchor=E,padx=10,pady=5)
- #frameEncoding
- labelEncodingTitle.pack(side=LEFT,anchor=W,padx=5,pady=5)
- radioEncNone.pack(side=RIGHT,anchor=E,pady=5)
- radioEncUTF8.pack(side=RIGHT,anchor=E,pady=5)
- radioEncLocale.pack(side=RIGHT,anchor=E,pady=5)
- #frameHelp
- frameHelpListButtons.pack(side=RIGHT,padx=5,pady=5,fill=Y)
- frameHelpList.pack(side=TOP,padx=5,pady=5,expand=TRUE,fill=BOTH)
- labelHelpListTitle.pack(side=TOP,anchor=W)
- scrollHelpList.pack(side=RIGHT,anchor=W,fill=Y)
- self.listHelp.pack(side=LEFT,anchor=E,expand=TRUE,fill=BOTH)
- self.buttonHelpListEdit.pack(side=TOP,anchor=W,pady=5)
- self.buttonHelpListAdd.pack(side=TOP,anchor=W)
- self.buttonHelpListRemove.pack(side=TOP,anchor=W,pady=5)
- return frame
-
- def AttachVarCallbacks(self):
- self.fontSize.trace_variable('w',self.VarChanged_fontSize)
- self.fontName.trace_variable('w',self.VarChanged_fontName)
- self.fontBold.trace_variable('w',self.VarChanged_fontBold)
- self.spaceNum.trace_variable('w',self.VarChanged_spaceNum)
- self.colour.trace_variable('w',self.VarChanged_colour)
- self.builtinTheme.trace_variable('w',self.VarChanged_builtinTheme)
- self.customTheme.trace_variable('w',self.VarChanged_customTheme)
- self.themeIsBuiltin.trace_variable('w',self.VarChanged_themeIsBuiltin)
- self.highlightTarget.trace_variable('w',self.VarChanged_highlightTarget)
- self.keyBinding.trace_variable('w',self.VarChanged_keyBinding)
- self.builtinKeys.trace_variable('w',self.VarChanged_builtinKeys)
- self.customKeys.trace_variable('w',self.VarChanged_customKeys)
- self.keysAreBuiltin.trace_variable('w',self.VarChanged_keysAreBuiltin)
- self.winWidth.trace_variable('w',self.VarChanged_winWidth)
- self.winHeight.trace_variable('w',self.VarChanged_winHeight)
- self.paraWidth.trace_variable('w',self.VarChanged_paraWidth)
- self.startupEdit.trace_variable('w',self.VarChanged_startupEdit)
- self.autoSave.trace_variable('w',self.VarChanged_autoSave)
- self.encoding.trace_variable('w',self.VarChanged_encoding)
-
- def VarChanged_fontSize(self,*params):
- value=self.fontSize.get()
- self.AddChangedItem('main','EditorWindow','font-size',value)
-
- def VarChanged_fontName(self,*params):
- value=self.fontName.get()
- self.AddChangedItem('main','EditorWindow','font',value)
-
- def VarChanged_fontBold(self,*params):
- value=self.fontBold.get()
- self.AddChangedItem('main','EditorWindow','font-bold',value)
-
- def VarChanged_spaceNum(self,*params):
- value=self.spaceNum.get()
- self.AddChangedItem('main','Indent','num-spaces',value)
-
- def VarChanged_colour(self,*params):
- self.OnNewColourSet()
-
- def VarChanged_builtinTheme(self,*params):
- value=self.builtinTheme.get()
- self.AddChangedItem('main','Theme','name',value)
- self.PaintThemeSample()
-
- def VarChanged_customTheme(self,*params):
- value=self.customTheme.get()
- if value != '- no custom themes -':
- self.AddChangedItem('main','Theme','name',value)
- self.PaintThemeSample()
-
- def VarChanged_themeIsBuiltin(self,*params):
- value=self.themeIsBuiltin.get()
- self.AddChangedItem('main','Theme','default',value)
- if value:
- self.VarChanged_builtinTheme()
- else:
- self.VarChanged_customTheme()
-
- def VarChanged_highlightTarget(self,*params):
- self.SetHighlightTarget()
-
- def VarChanged_keyBinding(self,*params):
- value=self.keyBinding.get()
- keySet=self.customKeys.get()
- event=self.listBindings.get(ANCHOR).split()[0]
- if idleConf.IsCoreBinding(event):
- #this is a core keybinding
- self.AddChangedItem('keys',keySet,event,value)
- else: #this is an extension key binding
- extName=idleConf.GetExtnNameForEvent(event)
- extKeybindSection=extName+'_cfgBindings'
- self.AddChangedItem('extensions',extKeybindSection,event,value)
-
- def VarChanged_builtinKeys(self,*params):
- value=self.builtinKeys.get()
- self.AddChangedItem('main','Keys','name',value)
- self.LoadKeysList(value)
-
- def VarChanged_customKeys(self,*params):
- value=self.customKeys.get()
- if value != '- no custom keys -':
- self.AddChangedItem('main','Keys','name',value)
- self.LoadKeysList(value)
-
- def VarChanged_keysAreBuiltin(self,*params):
- value=self.keysAreBuiltin.get()
- self.AddChangedItem('main','Keys','default',value)
- if value:
- self.VarChanged_builtinKeys()
- else:
- self.VarChanged_customKeys()
-
- def VarChanged_winWidth(self,*params):
- value=self.winWidth.get()
- self.AddChangedItem('main','EditorWindow','width',value)
-
- def VarChanged_winHeight(self,*params):
- value=self.winHeight.get()
- self.AddChangedItem('main','EditorWindow','height',value)
-
- def VarChanged_paraWidth(self,*params):
- value=self.paraWidth.get()
- self.AddChangedItem('main','FormatParagraph','paragraph',value)
-
- def VarChanged_startupEdit(self,*params):
- value=self.startupEdit.get()
- self.AddChangedItem('main','General','editor-on-startup',value)
-
- def VarChanged_autoSave(self,*params):
- value=self.autoSave.get()
- self.AddChangedItem('main','General','autosave',value)
-
- def VarChanged_encoding(self,*params):
- value=self.encoding.get()
- self.AddChangedItem('main','EditorWindow','encoding',value)
-
- def ResetChangedItems(self):
- #When any config item is changed in this dialog, an entry
- #should be made in the relevant section (config type) of this
- #dictionary. The key should be the config file section name and the
- #value a dictionary, whose key:value pairs are item=value pairs for
- #that config file section.
- self.changedItems={'main':{},'highlight':{},'keys':{},'extensions':{}}
-
- def AddChangedItem(self,type,section,item,value):
- value=str(value) #make sure we use a string
- if not self.changedItems[type].has_key(section):
- self.changedItems[type][section]={}
- self.changedItems[type][section][item]=value
-
- def GetDefaultItems(self):
- dItems={'main':{},'highlight':{},'keys':{},'extensions':{}}
- for configType in dItems.keys():
- sections=idleConf.GetSectionList('default',configType)
- for section in sections:
- dItems[configType][section]={}
- options=idleConf.defaultCfg[configType].GetOptionList(section)
- for option in options:
- dItems[configType][section][option]=(
- idleConf.defaultCfg[configType].Get(section,option))
- return dItems
-
- def SetThemeType(self):
- if self.themeIsBuiltin.get():
- self.optMenuThemeBuiltin.config(state=NORMAL)
- self.optMenuThemeCustom.config(state=DISABLED)
- self.buttonDeleteCustomTheme.config(state=DISABLED)
- else:
- self.optMenuThemeBuiltin.config(state=DISABLED)
- self.radioThemeCustom.config(state=NORMAL)
- self.optMenuThemeCustom.config(state=NORMAL)
- self.buttonDeleteCustomTheme.config(state=NORMAL)
-
- def SetKeysType(self):
- if self.keysAreBuiltin.get():
- self.optMenuKeysBuiltin.config(state=NORMAL)
- self.optMenuKeysCustom.config(state=DISABLED)
- self.buttonDeleteCustomKeys.config(state=DISABLED)
- else:
- self.optMenuKeysBuiltin.config(state=DISABLED)
- self.radioKeysCustom.config(state=NORMAL)
- self.optMenuKeysCustom.config(state=NORMAL)
- self.buttonDeleteCustomKeys.config(state=NORMAL)
-
- def GetNewKeys(self):
- listIndex=self.listBindings.index(ANCHOR)
- binding=self.listBindings.get(listIndex)
- bindName=binding.split()[0] #first part, up to first space
- if self.keysAreBuiltin.get():
- currentKeySetName=self.builtinKeys.get()
- else:
- currentKeySetName=self.customKeys.get()
- currentBindings=idleConf.GetCurrentKeySet()
- if currentKeySetName in self.changedItems['keys'].keys(): #unsaved changes
- keySetChanges=self.changedItems['keys'][currentKeySetName]
- for event in keySetChanges.keys():
- currentBindings[event]=keySetChanges[event].split()
- currentKeySequences=currentBindings.values()
- newKeys=GetKeysDialog(self,'Get New Keys',bindName,
- currentKeySequences).result
- if newKeys: #new keys were specified
- if self.keysAreBuiltin.get(): #current key set is a built-in
- message=('Your changes will be saved as a new Custom Key Set. '+
- 'Enter a name for your new Custom Key Set below.')
- newKeySet=self.GetNewKeysName(message)
- if not newKeySet: #user cancelled custom key set creation
- self.listBindings.select_set(listIndex)
- self.listBindings.select_anchor(listIndex)
- return
- else: #create new custom key set based on previously active key set
- self.CreateNewKeySet(newKeySet)
- self.listBindings.delete(listIndex)
- self.listBindings.insert(listIndex,bindName+' - '+newKeys)
- self.listBindings.select_set(listIndex)
- self.listBindings.select_anchor(listIndex)
- self.keyBinding.set(newKeys)
- else:
- self.listBindings.select_set(listIndex)
- self.listBindings.select_anchor(listIndex)
-
- def GetNewKeysName(self,message):
- usedNames=(idleConf.GetSectionList('user','keys')+
- idleConf.GetSectionList('default','keys'))
- newKeySet=GetCfgSectionNameDialog(self,'New Custom Key Set',
- message,usedNames).result
- return newKeySet
-
- def SaveAsNewKeySet(self):
- newKeysName=self.GetNewKeysName('New Key Set Name:')
- if newKeysName:
- self.CreateNewKeySet(newKeysName)
-
- def KeyBindingSelected(self,event):
- self.buttonNewKeys.config(state=NORMAL)
-
- def CreateNewKeySet(self,newKeySetName):
- #creates new custom key set based on the previously active key set,
- #and makes the new key set active
- if self.keysAreBuiltin.get():
- prevKeySetName=self.builtinKeys.get()
- else:
- prevKeySetName=self.customKeys.get()
- prevKeys=idleConf.GetCoreKeys(prevKeySetName)
- newKeys={}
- for event in prevKeys.keys(): #add key set to changed items
- eventName=event[2:-2] #trim off the angle brackets
- binding=string.join(prevKeys[event])
- newKeys[eventName]=binding
- #handle any unsaved changes to prev key set
- if prevKeySetName in self.changedItems['keys'].keys():
- keySetChanges=self.changedItems['keys'][prevKeySetName]
- for event in keySetChanges.keys():
- newKeys[event]=keySetChanges[event]
- #save the new theme
- self.SaveNewKeySet(newKeySetName,newKeys)
- #change gui over to the new key set
- customKeyList=idleConf.GetSectionList('user','keys')
- customKeyList.sort()
- self.optMenuKeysCustom.SetMenu(customKeyList,newKeySetName)
- self.keysAreBuiltin.set(0)
- self.SetKeysType()
-
- def LoadKeysList(self,keySetName):
- reselect=0
- newKeySet=0
- if self.listBindings.curselection():
- reselect=1
- listIndex=self.listBindings.index(ANCHOR)
- keySet=idleConf.GetKeySet(keySetName)
- bindNames=keySet.keys()
- bindNames.sort()
- self.listBindings.delete(0,END)
- for bindName in bindNames:
- key=string.join(keySet[bindName]) #make key(s) into a string
- bindName=bindName[2:-2] #trim off the angle brackets
- if keySetName in self.changedItems['keys'].keys():
- #handle any unsaved changes to this key set
- if bindName in self.changedItems['keys'][keySetName].keys():
- key=self.changedItems['keys'][keySetName][bindName]
- self.listBindings.insert(END, bindName+' - '+key)
- if reselect:
- self.listBindings.see(listIndex)
- self.listBindings.select_set(listIndex)
- self.listBindings.select_anchor(listIndex)
-
- def DeleteCustomKeys(self):
- keySetName=self.customKeys.get()
- if not tkMessageBox.askyesno('Delete Key Set','Are you sure you wish '+
- 'to delete the key set %r ?' % (keySetName),
- parent=self):
- return
- #remove key set from config
- idleConf.userCfg['keys'].remove_section(keySetName)
- if self.changedItems['keys'].has_key(keySetName):
- del(self.changedItems['keys'][keySetName])
- #write changes
- idleConf.userCfg['keys'].Save()
- #reload user key set list
- itemList=idleConf.GetSectionList('user','keys')
- itemList.sort()
- if not itemList:
- self.radioKeysCustom.config(state=DISABLED)
- self.optMenuKeysCustom.SetMenu(itemList,'- no custom keys -')
- else:
- self.optMenuKeysCustom.SetMenu(itemList,itemList[0])
- #revert to default key set
- self.keysAreBuiltin.set(idleConf.defaultCfg['main'].Get('Keys','default'))
- self.builtinKeys.set(idleConf.defaultCfg['main'].Get('Keys','name'))
- #user can't back out of these changes, they must be applied now
- self.Apply()
- self.SetKeysType()
-
- def DeleteCustomTheme(self):
- themeName=self.customTheme.get()
- if not tkMessageBox.askyesno('Delete Theme','Are you sure you wish '+
- 'to delete the theme %r ?' % (themeName,),
- parent=self):
- return
- #remove theme from config
- idleConf.userCfg['highlight'].remove_section(themeName)
- if self.changedItems['highlight'].has_key(themeName):
- del(self.changedItems['highlight'][themeName])
- #write changes
- idleConf.userCfg['highlight'].Save()
- #reload user theme list
- itemList=idleConf.GetSectionList('user','highlight')
- itemList.sort()
- if not itemList:
- self.radioThemeCustom.config(state=DISABLED)
- self.optMenuThemeCustom.SetMenu(itemList,'- no custom themes -')
- else:
- self.optMenuThemeCustom.SetMenu(itemList,itemList[0])
- #revert to default theme
- self.themeIsBuiltin.set(idleConf.defaultCfg['main'].Get('Theme','default'))
- self.builtinTheme.set(idleConf.defaultCfg['main'].Get('Theme','name'))
- #user can't back out of these changes, they must be applied now
- self.Apply()
- self.SetThemeType()
-
- def GetColour(self):
- target=self.highlightTarget.get()
- prevColour=self.frameColourSet.cget('bg')
- rgbTuplet, colourString = tkColorChooser.askcolor(parent=self,
- title='Pick new colour for : '+target,initialcolor=prevColour)
- if colourString and (colourString!=prevColour):
- #user didn't cancel, and they chose a new colour
- if self.themeIsBuiltin.get(): #current theme is a built-in
- message=('Your changes will be saved as a new Custom Theme. '+
- 'Enter a name for your new Custom Theme below.')
- newTheme=self.GetNewThemeName(message)
- if not newTheme: #user cancelled custom theme creation
- return
- else: #create new custom theme based on previously active theme
- self.CreateNewTheme(newTheme)
- self.colour.set(colourString)
- else: #current theme is user defined
- self.colour.set(colourString)
-
- def OnNewColourSet(self):
- newColour=self.colour.get()
- self.frameColourSet.config(bg=newColour)#set sample
- if self.fgHilite.get(): plane='foreground'
- else: plane='background'
- sampleElement=self.themeElements[self.highlightTarget.get()][0]
- self.textHighlightSample.tag_config(sampleElement, **{plane:newColour})
- theme=self.customTheme.get()
- themeElement=sampleElement+'-'+plane
- self.AddChangedItem('highlight',theme,themeElement,newColour)
-
- def GetNewThemeName(self,message):
- usedNames=(idleConf.GetSectionList('user','highlight')+
- idleConf.GetSectionList('default','highlight'))
- newTheme=GetCfgSectionNameDialog(self,'New Custom Theme',
- message,usedNames).result
- return newTheme
-
- def SaveAsNewTheme(self):
- newThemeName=self.GetNewThemeName('New Theme Name:')
- if newThemeName:
- self.CreateNewTheme(newThemeName)
-
- def CreateNewTheme(self,newThemeName):
- #creates new custom theme based on the previously active theme,
- #and makes the new theme active
- if self.themeIsBuiltin.get():
- themeType='default'
- themeName=self.builtinTheme.get()
- else:
- themeType='user'
- themeName=self.customTheme.get()
- newTheme=idleConf.GetThemeDict(themeType,themeName)
- #apply any of the old theme's unsaved changes to the new theme
- if themeName in self.changedItems['highlight'].keys():
- themeChanges=self.changedItems['highlight'][themeName]
- for element in themeChanges.keys():
- newTheme[element]=themeChanges[element]
- #save the new theme
- self.SaveNewTheme(newThemeName,newTheme)
- #change gui over to the new theme
- customThemeList=idleConf.GetSectionList('user','highlight')
- customThemeList.sort()
- self.optMenuThemeCustom.SetMenu(customThemeList,newThemeName)
- self.themeIsBuiltin.set(0)
- self.SetThemeType()
-
- def OnListFontButtonRelease(self,event):
- font = self.listFontName.get(ANCHOR)
- self.fontName.set(font.lower())
- self.SetFontSample()
-
- def SetFontSample(self,event=None):
- fontName=self.fontName.get()
- if self.fontBold.get():
- fontWeight=tkFont.BOLD
- else:
- fontWeight=tkFont.NORMAL
- self.editFont.config(size=self.fontSize.get(),
- weight=fontWeight,family=fontName)
-
- def SetHighlightTarget(self):
- if self.highlightTarget.get()=='Cursor': #bg not possible
- self.radioFg.config(state=DISABLED)
- self.radioBg.config(state=DISABLED)
- self.fgHilite.set(1)
- else: #both fg and bg can be set
- self.radioFg.config(state=NORMAL)
- self.radioBg.config(state=NORMAL)
- self.fgHilite.set(1)
- self.SetColourSample()
-
- def SetColourSampleBinding(self,*args):
- self.SetColourSample()
-
- def SetColourSample(self):
- #set the colour smaple area
- tag=self.themeElements[self.highlightTarget.get()][0]
- if self.fgHilite.get(): plane='foreground'
- else: plane='background'
- colour=self.textHighlightSample.tag_cget(tag,plane)
- self.frameColourSet.config(bg=colour)
-
- def PaintThemeSample(self):
- if self.themeIsBuiltin.get(): #a default theme
- theme=self.builtinTheme.get()
- else: #a user theme
- theme=self.customTheme.get()
- for elementTitle in self.themeElements.keys():
- element=self.themeElements[elementTitle][0]
- colours=idleConf.GetHighlight(theme,element)
- if element=='cursor': #cursor sample needs special painting
- colours['background']=idleConf.GetHighlight(theme,
- 'normal', fgBg='bg')
- #handle any unsaved changes to this theme
- if theme in self.changedItems['highlight'].keys():
- themeDict=self.changedItems['highlight'][theme]
- if themeDict.has_key(element+'-foreground'):
- colours['foreground']=themeDict[element+'-foreground']
- if themeDict.has_key(element+'-background'):
- colours['background']=themeDict[element+'-background']
- self.textHighlightSample.tag_config(element, **colours)
- self.SetColourSample()
-
- def HelpSourceSelected(self,event):
- self.SetHelpListButtonStates()
-
- def SetHelpListButtonStates(self):
- if self.listHelp.size()<1: #no entries in list
- self.buttonHelpListEdit.config(state=DISABLED)
- self.buttonHelpListRemove.config(state=DISABLED)
- else: #there are some entries
- if self.listHelp.curselection(): #there currently is a selection
- self.buttonHelpListEdit.config(state=NORMAL)
- self.buttonHelpListRemove.config(state=NORMAL)
- else: #there currently is not a selection
- self.buttonHelpListEdit.config(state=DISABLED)
- self.buttonHelpListRemove.config(state=DISABLED)
-
- def HelpListItemAdd(self):
- helpSource=GetHelpSourceDialog(self,'New Help Source').result
- if helpSource:
- self.userHelpList.append( (helpSource[0],helpSource[1]) )
- self.listHelp.insert(END,helpSource[0])
- self.UpdateUserHelpChangedItems()
- self.SetHelpListButtonStates()
-
- def HelpListItemEdit(self):
- itemIndex=self.listHelp.index(ANCHOR)
- helpSource=self.userHelpList[itemIndex]
- newHelpSource=GetHelpSourceDialog(self,'Edit Help Source',
- menuItem=helpSource[0],filePath=helpSource[1]).result
- if (not newHelpSource) or (newHelpSource==helpSource):
- return #no changes
- self.userHelpList[itemIndex]=newHelpSource
- self.listHelp.delete(itemIndex)
- self.listHelp.insert(itemIndex,newHelpSource[0])
- self.UpdateUserHelpChangedItems()
- self.SetHelpListButtonStates()
-
- def HelpListItemRemove(self):
- itemIndex=self.listHelp.index(ANCHOR)
- del(self.userHelpList[itemIndex])
- self.listHelp.delete(itemIndex)
- self.UpdateUserHelpChangedItems()
- self.SetHelpListButtonStates()
-
- def UpdateUserHelpChangedItems(self):
- "Clear and rebuild the HelpFiles section in self.changedItems"
- self.changedItems['main']['HelpFiles'] = {}
- for num in range(1,len(self.userHelpList)+1):
- self.AddChangedItem('main','HelpFiles',str(num),
- string.join(self.userHelpList[num-1][:2],';'))
-
- def LoadFontCfg(self):
- ##base editor font selection list
- fonts=list(tkFont.families(self))
- fonts.sort()
- for font in fonts:
- self.listFontName.insert(END,font)
- configuredFont=idleConf.GetOption('main','EditorWindow','font',
- default='courier')
- lc_configuredFont = configuredFont.lower()
- self.fontName.set(lc_configuredFont)
- lc_fonts = [s.lower() for s in fonts]
- if lc_configuredFont in lc_fonts:
- currentFontIndex = lc_fonts.index(lc_configuredFont)
- self.listFontName.see(currentFontIndex)
- self.listFontName.select_set(currentFontIndex)
- self.listFontName.select_anchor(currentFontIndex)
- ##font size dropdown
- fontSize=idleConf.GetOption('main','EditorWindow','font-size',
- default='10')
- self.optMenuFontSize.SetMenu(('7','8','9','10','11','12','13','14',
- '16','18','20','22'),fontSize )
- ##fontWeight
- self.fontBold.set(idleConf.GetOption('main','EditorWindow',
- 'font-bold',default=0,type='bool'))
- ##font sample
- self.SetFontSample()
-
- def LoadTabCfg(self):
- ##indent sizes
- spaceNum=idleConf.GetOption('main','Indent','num-spaces',
- default=4,type='int')
- self.spaceNum.set(spaceNum)
-
- def LoadThemeCfg(self):
- ##current theme type radiobutton
- self.themeIsBuiltin.set(idleConf.GetOption('main','Theme','default',
- type='bool',default=1))
- ##currently set theme
- currentOption=idleConf.CurrentTheme()
- ##load available theme option menus
- if self.themeIsBuiltin.get(): #default theme selected
- itemList=idleConf.GetSectionList('default','highlight')
- itemList.sort()
- self.optMenuThemeBuiltin.SetMenu(itemList,currentOption)
- itemList=idleConf.GetSectionList('user','highlight')
- itemList.sort()
- if not itemList:
- self.radioThemeCustom.config(state=DISABLED)
- self.customTheme.set('- no custom themes -')
- else:
- self.optMenuThemeCustom.SetMenu(itemList,itemList[0])
- else: #user theme selected
- itemList=idleConf.GetSectionList('user','highlight')
- itemList.sort()
- self.optMenuThemeCustom.SetMenu(itemList,currentOption)
- itemList=idleConf.GetSectionList('default','highlight')
- itemList.sort()
- self.optMenuThemeBuiltin.SetMenu(itemList,itemList[0])
- self.SetThemeType()
- ##load theme element option menu
- themeNames=self.themeElements.keys()
- themeNames.sort(self.__ThemeNameIndexCompare)
- self.optMenuHighlightTarget.SetMenu(themeNames,themeNames[0])
- self.PaintThemeSample()
- self.SetHighlightTarget()
-
- def __ThemeNameIndexCompare(self,a,b):
- if self.themeElements[a][1]<self.themeElements[b][1]: return -1
- elif self.themeElements[a][1]==self.themeElements[b][1]: return 0
- else: return 1
-
- def LoadKeyCfg(self):
- ##current keys type radiobutton
- self.keysAreBuiltin.set(idleConf.GetOption('main','Keys','default',
- type='bool',default=1))
- ##currently set keys
- currentOption=idleConf.CurrentKeys()
- ##load available keyset option menus
- if self.keysAreBuiltin.get(): #default theme selected
- itemList=idleConf.GetSectionList('default','keys')
- itemList.sort()
- self.optMenuKeysBuiltin.SetMenu(itemList,currentOption)
- itemList=idleConf.GetSectionList('user','keys')
- itemList.sort()
- if not itemList:
- self.radioKeysCustom.config(state=DISABLED)
- self.customKeys.set('- no custom keys -')
- else:
- self.optMenuKeysCustom.SetMenu(itemList,itemList[0])
- else: #user key set selected
- itemList=idleConf.GetSectionList('user','keys')
- itemList.sort()
- self.optMenuKeysCustom.SetMenu(itemList,currentOption)
- itemList=idleConf.GetSectionList('default','keys')
- itemList.sort()
- self.optMenuKeysBuiltin.SetMenu(itemList,itemList[0])
- self.SetKeysType()
- ##load keyset element list
- keySetName=idleConf.CurrentKeys()
- self.LoadKeysList(keySetName)
-
- def LoadGeneralCfg(self):
- #startup state
- self.startupEdit.set(idleConf.GetOption('main','General',
- 'editor-on-startup',default=1,type='bool'))
- #autosave state
- self.autoSave.set(idleConf.GetOption('main', 'General', 'autosave',
- default=0, type='bool'))
- #initial window size
- self.winWidth.set(idleConf.GetOption('main','EditorWindow','width'))
- self.winHeight.set(idleConf.GetOption('main','EditorWindow','height'))
- #initial paragraph reformat size
- self.paraWidth.set(idleConf.GetOption('main','FormatParagraph','paragraph'))
- # default source encoding
- self.encoding.set(idleConf.GetOption('main', 'EditorWindow',
- 'encoding', default='none'))
- # additional help sources
- self.userHelpList = idleConf.GetAllExtraHelpSourcesList()
- for helpItem in self.userHelpList:
- self.listHelp.insert(END,helpItem[0])
- self.SetHelpListButtonStates()
-
- def LoadConfigs(self):
- """
- load configuration from default and user config files and populate
- the widgets on the config dialog pages.
- """
- ### fonts / tabs page
- self.LoadFontCfg()
- self.LoadTabCfg()
- ### highlighting page
- self.LoadThemeCfg()
- ### keys page
- self.LoadKeyCfg()
- ### general page
- self.LoadGeneralCfg()
-
- def SaveNewKeySet(self,keySetName,keySet):
- """
- save a newly created core key set.
- keySetName - string, the name of the new key set
- keySet - dictionary containing the new key set
- """
- if not idleConf.userCfg['keys'].has_section(keySetName):
- idleConf.userCfg['keys'].add_section(keySetName)
- for event in keySet.keys():
- value=keySet[event]
- idleConf.userCfg['keys'].SetOption(keySetName,event,value)
-
- def SaveNewTheme(self,themeName,theme):
- """
- save a newly created theme.
- themeName - string, the name of the new theme
- theme - dictionary containing the new theme
- """
- if not idleConf.userCfg['highlight'].has_section(themeName):
- idleConf.userCfg['highlight'].add_section(themeName)
- for element in theme.keys():
- value=theme[element]
- idleConf.userCfg['highlight'].SetOption(themeName,element,value)
-
- def SetUserValue(self,configType,section,item,value):
- if idleConf.defaultCfg[configType].has_option(section,item):
- if idleConf.defaultCfg[configType].Get(section,item)==value:
- #the setting equals a default setting, remove it from user cfg
- return idleConf.userCfg[configType].RemoveOption(section,item)
- #if we got here set the option
- return idleConf.userCfg[configType].SetOption(section,item,value)
-
- def SaveAllChangedConfigs(self):
- "Save configuration changes to the user config file."
- idleConf.userCfg['main'].Save()
- for configType in self.changedItems.keys():
- cfgTypeHasChanges = False
- for section in self.changedItems[configType].keys():
- if section == 'HelpFiles':
- #this section gets completely replaced
- idleConf.userCfg['main'].remove_section('HelpFiles')
- cfgTypeHasChanges = True
- for item in self.changedItems[configType][section].keys():
- value = self.changedItems[configType][section][item]
- if self.SetUserValue(configType,section,item,value):
- cfgTypeHasChanges = True
- if cfgTypeHasChanges:
- idleConf.userCfg[configType].Save()
- for configType in ['keys', 'highlight']:
- # save these even if unchanged!
- idleConf.userCfg[configType].Save()
- self.ResetChangedItems() #clear the changed items dict
-
- def DeactivateCurrentConfig(self):
- #Before a config is saved, some cleanup of current
- #config must be done - remove the previous keybindings
- winInstances=self.parent.instance_dict.keys()
- for instance in winInstances:
- instance.RemoveKeybindings()
-
- def ActivateConfigChanges(self):
- "Dynamically apply configuration changes"
- winInstances=self.parent.instance_dict.keys()
- for instance in winInstances:
- instance.ResetColorizer()
- instance.ResetFont()
- instance.set_notabs_indentwidth()
- instance.ApplyKeybindings()
- instance.reset_help_menu_entries()
-
- def Cancel(self):
- self.destroy()
-
- def Ok(self):
- self.Apply()
- self.destroy()
-
- def Apply(self):
- self.DeactivateCurrentConfig()
- self.SaveAllChangedConfigs()
- self.ActivateConfigChanges()
-
- def Help(self):
- pass
-
-if __name__ == '__main__':
- #test the dialog
- root=Tk()
- Button(root,text='Dialog',
- command=lambda:ConfigDialog(root,'Settings')).pack()
- root.instance_dict={}
- root.mainloop()
diff --git a/sys/lib/python/idlelib/configHandler.py b/sys/lib/python/idlelib/configHandler.py
deleted file mode 100644
index 826fb5dbb..000000000
--- a/sys/lib/python/idlelib/configHandler.py
+++ /dev/null
@@ -1,696 +0,0 @@
-"""Provides access to stored IDLE configuration information.
-
-Refer to the comments at the beginning of config-main.def for a description of
-the available configuration files and the design implemented to update user
-configuration information. In particular, user configuration choices which
-duplicate the defaults will be removed from the user's configuration files,
-and if a file becomes empty, it will be deleted.
-
-The contents of the user files may be altered using the Options/Configure IDLE
-menu to access the configuration GUI (configDialog.py), or manually.
-
-Throughout this module there is an emphasis on returning useable defaults
-when a problem occurs in returning a requested configuration value back to
-idle. This is to allow IDLE to continue to function in spite of errors in
-the retrieval of config information. When a default is returned instead of
-a requested config value, a message is printed to stderr to aid in
-configuration problem notification and resolution.
-
-"""
-import os
-import sys
-import string
-import macosxSupport
-from ConfigParser import ConfigParser, NoOptionError, NoSectionError
-
-class InvalidConfigType(Exception): pass
-class InvalidConfigSet(Exception): pass
-class InvalidFgBg(Exception): pass
-class InvalidTheme(Exception): pass
-
-class IdleConfParser(ConfigParser):
- """
- A ConfigParser specialised for idle configuration file handling
- """
- def __init__(self, cfgFile, cfgDefaults=None):
- """
- cfgFile - string, fully specified configuration file name
- """
- self.file=cfgFile
- ConfigParser.__init__(self,defaults=cfgDefaults)
-
- def Get(self, section, option, type=None, default=None):
- """
- Get an option value for given section/option or return default.
- If type is specified, return as type.
- """
- if type=='bool':
- getVal=self.getboolean
- elif type=='int':
- getVal=self.getint
- else:
- getVal=self.get
- if self.has_option(section,option):
- #return getVal(section, option, raw, vars, default)
- return getVal(section, option)
- else:
- return default
-
- def GetOptionList(self,section):
- """
- Get an option list for given section
- """
- if self.has_section(section):
- return self.options(section)
- else: #return a default value
- return []
-
- def Load(self):
- """
- Load the configuration file from disk
- """
- self.read(self.file)
-
-class IdleUserConfParser(IdleConfParser):
- """
- IdleConfigParser specialised for user configuration handling.
- """
-
- def AddSection(self,section):
- """
- if section doesn't exist, add it
- """
- if not self.has_section(section):
- self.add_section(section)
-
- def RemoveEmptySections(self):
- """
- remove any sections that have no options
- """
- for section in self.sections():
- if not self.GetOptionList(section):
- self.remove_section(section)
-
- def IsEmpty(self):
- """
- Remove empty sections and then return 1 if parser has no sections
- left, else return 0.
- """
- self.RemoveEmptySections()
- if self.sections():
- return 0
- else:
- return 1
-
- def RemoveOption(self,section,option):
- """
- If section/option exists, remove it.
- Returns 1 if option was removed, 0 otherwise.
- """
- if self.has_section(section):
- return self.remove_option(section,option)
-
- def SetOption(self,section,option,value):
- """
- Sets option to value, adding section if required.
- Returns 1 if option was added or changed, otherwise 0.
- """
- if self.has_option(section,option):
- if self.get(section,option)==value:
- return 0
- else:
- self.set(section,option,value)
- return 1
- else:
- if not self.has_section(section):
- self.add_section(section)
- self.set(section,option,value)
- return 1
-
- def RemoveFile(self):
- """
- Removes the user config file from disk if it exists.
- """
- if os.path.exists(self.file):
- os.remove(self.file)
-
- def Save(self):
- """Update user configuration file.
-
- Remove empty sections. If resulting config isn't empty, write the file
- to disk. If config is empty, remove the file from disk if it exists.
-
- """
- if not self.IsEmpty():
- cfgFile=open(self.file,'w')
- self.write(cfgFile)
- else:
- self.RemoveFile()
-
-class IdleConf:
- """
- holds config parsers for all idle config files:
- default config files
- (idle install dir)/config-main.def
- (idle install dir)/config-extensions.def
- (idle install dir)/config-highlight.def
- (idle install dir)/config-keys.def
- user config files
- (user home dir)/.idlerc/config-main.cfg
- (user home dir)/.idlerc/config-extensions.cfg
- (user home dir)/.idlerc/config-highlight.cfg
- (user home dir)/.idlerc/config-keys.cfg
- """
- def __init__(self):
- self.defaultCfg={}
- self.userCfg={}
- self.cfg={}
- self.CreateConfigHandlers()
- self.LoadCfgFiles()
- #self.LoadCfg()
-
- def CreateConfigHandlers(self):
- """
- set up a dictionary of config parsers for default and user
- configurations respectively
- """
- #build idle install path
- if __name__ != '__main__': # we were imported
- idleDir=os.path.dirname(__file__)
- else: # we were exec'ed (for testing only)
- idleDir=os.path.abspath(sys.path[0])
- userDir=self.GetUserCfgDir()
- configTypes=('main','extensions','highlight','keys')
- defCfgFiles={}
- usrCfgFiles={}
- for cfgType in configTypes: #build config file names
- defCfgFiles[cfgType]=os.path.join(idleDir,'config-'+cfgType+'.def')
- usrCfgFiles[cfgType]=os.path.join(userDir,'config-'+cfgType+'.cfg')
- for cfgType in configTypes: #create config parsers
- self.defaultCfg[cfgType]=IdleConfParser(defCfgFiles[cfgType])
- self.userCfg[cfgType]=IdleUserConfParser(usrCfgFiles[cfgType])
-
- def GetUserCfgDir(self):
- """
- Creates (if required) and returns a filesystem directory for storing
- user config files.
-
- """
- cfgDir = '.idlerc'
- userDir = os.path.expanduser('~')
- if userDir != '~': # expanduser() found user home dir
- if not os.path.exists(userDir):
- warn = ('\n Warning: os.path.expanduser("~") points to\n '+
- userDir+',\n but the path does not exist.\n')
- sys.stderr.write(warn)
- userDir = '~'
- if userDir == "~": # still no path to home!
- # traditionally IDLE has defaulted to os.getcwd(), is this adequate?
- userDir = os.getcwd()
- userDir = os.path.join(userDir, cfgDir)
- if not os.path.exists(userDir):
- try:
- os.mkdir(userDir)
- except (OSError, IOError):
- warn = ('\n Warning: unable to create user config directory\n'+
- userDir+'\n Check path and permissions.\n Exiting!\n\n')
- sys.stderr.write(warn)
- raise SystemExit
- return userDir
-
- def GetOption(self, configType, section, option, default=None, type=None,
- warn_on_default=True):
- """
- Get an option value for given config type and given general
- configuration section/option or return a default. If type is specified,
- return as type. Firstly the user configuration is checked, with a
- fallback to the default configuration, and a final 'catch all'
- fallback to a useable passed-in default if the option isn't present in
- either the user or the default configuration.
- configType must be one of ('main','extensions','highlight','keys')
- If a default is returned, and warn_on_default is True, a warning is
- printed to stderr.
-
- """
- if self.userCfg[configType].has_option(section,option):
- return self.userCfg[configType].Get(section, option, type=type)
- elif self.defaultCfg[configType].has_option(section,option):
- return self.defaultCfg[configType].Get(section, option, type=type)
- else: #returning default, print warning
- if warn_on_default:
- warning = ('\n Warning: configHandler.py - IdleConf.GetOption -\n'
- ' problem retrieving configration option %r\n'
- ' from section %r.\n'
- ' returning default value: %r\n' %
- (option, section, default))
- sys.stderr.write(warning)
- return default
-
- def SetOption(self, configType, section, option, value):
- """In user's config file, set section's option to value.
-
- """
- self.userCfg[configType].SetOption(section, option, value)
-
- def GetSectionList(self, configSet, configType):
- """
- Get a list of sections from either the user or default config for
- the given config type.
- configSet must be either 'user' or 'default'
- configType must be one of ('main','extensions','highlight','keys')
- """
- if not (configType in ('main','extensions','highlight','keys')):
- raise InvalidConfigType, 'Invalid configType specified'
- if configSet == 'user':
- cfgParser=self.userCfg[configType]
- elif configSet == 'default':
- cfgParser=self.defaultCfg[configType]
- else:
- raise InvalidConfigSet, 'Invalid configSet specified'
- return cfgParser.sections()
-
- def GetHighlight(self, theme, element, fgBg=None):
- """
- return individual highlighting theme elements.
- fgBg - string ('fg'or'bg') or None, if None return a dictionary
- containing fg and bg colours (appropriate for passing to Tkinter in,
- e.g., a tag_config call), otherwise fg or bg colour only as specified.
- """
- if self.defaultCfg['highlight'].has_section(theme):
- themeDict=self.GetThemeDict('default',theme)
- else:
- themeDict=self.GetThemeDict('user',theme)
- fore=themeDict[element+'-foreground']
- if element=='cursor': #there is no config value for cursor bg
- back=themeDict['normal-background']
- else:
- back=themeDict[element+'-background']
- highlight={"foreground": fore,"background": back}
- if not fgBg: #return dict of both colours
- return highlight
- else: #return specified colour only
- if fgBg == 'fg':
- return highlight["foreground"]
- if fgBg == 'bg':
- return highlight["background"]
- else:
- raise InvalidFgBg, 'Invalid fgBg specified'
-
- def GetThemeDict(self,type,themeName):
- """
- type - string, 'default' or 'user' theme type
- themeName - string, theme name
- Returns a dictionary which holds {option:value} for each element
- in the specified theme. Values are loaded over a set of ultimate last
- fallback defaults to guarantee that all theme elements are present in
- a newly created theme.
- """
- if type == 'user':
- cfgParser=self.userCfg['highlight']
- elif type == 'default':
- cfgParser=self.defaultCfg['highlight']
- else:
- raise InvalidTheme, 'Invalid theme type specified'
- #foreground and background values are provded for each theme element
- #(apart from cursor) even though all these values are not yet used
- #by idle, to allow for their use in the future. Default values are
- #generally black and white.
- theme={ 'normal-foreground':'#000000',
- 'normal-background':'#ffffff',
- 'keyword-foreground':'#000000',
- 'keyword-background':'#ffffff',
- 'builtin-foreground':'#000000',
- 'builtin-background':'#ffffff',
- 'comment-foreground':'#000000',
- 'comment-background':'#ffffff',
- 'string-foreground':'#000000',
- 'string-background':'#ffffff',
- 'definition-foreground':'#000000',
- 'definition-background':'#ffffff',
- 'hilite-foreground':'#000000',
- 'hilite-background':'gray',
- 'break-foreground':'#ffffff',
- 'break-background':'#000000',
- 'hit-foreground':'#ffffff',
- 'hit-background':'#000000',
- 'error-foreground':'#ffffff',
- 'error-background':'#000000',
- #cursor (only foreground can be set)
- 'cursor-foreground':'#000000',
- #shell window
- 'stdout-foreground':'#000000',
- 'stdout-background':'#ffffff',
- 'stderr-foreground':'#000000',
- 'stderr-background':'#ffffff',
- 'console-foreground':'#000000',
- 'console-background':'#ffffff' }
- for element in theme.keys():
- if not cfgParser.has_option(themeName,element):
- #we are going to return a default, print warning
- warning=('\n Warning: configHandler.py - IdleConf.GetThemeDict'
- ' -\n problem retrieving theme element %r'
- '\n from theme %r.\n'
- ' returning default value: %r\n' %
- (element, themeName, theme[element]))
- sys.stderr.write(warning)
- colour=cfgParser.Get(themeName,element,default=theme[element])
- theme[element]=colour
- return theme
-
- def CurrentTheme(self):
- """
- Returns the name of the currently active theme
- """
- return self.GetOption('main','Theme','name',default='')
-
- def CurrentKeys(self):
- """
- Returns the name of the currently active key set
- """
- return self.GetOption('main','Keys','name',default='')
-
- def GetExtensions(self, active_only=True, editor_only=False, shell_only=False):
- """
- Gets a list of all idle extensions declared in the config files.
- active_only - boolean, if true only return active (enabled) extensions
- """
- extns=self.RemoveKeyBindNames(
- self.GetSectionList('default','extensions'))
- userExtns=self.RemoveKeyBindNames(
- self.GetSectionList('user','extensions'))
- for extn in userExtns:
- if extn not in extns: #user has added own extension
- extns.append(extn)
- if active_only:
- activeExtns=[]
- for extn in extns:
- if self.GetOption('extensions', extn, 'enable', default=True,
- type='bool'):
- #the extension is enabled
- if editor_only or shell_only:
- if editor_only:
- option = "enable_editor"
- else:
- option = "enable_shell"
- if self.GetOption('extensions', extn,option,
- default=True, type='bool',
- warn_on_default=False):
- activeExtns.append(extn)
- else:
- activeExtns.append(extn)
- return activeExtns
- else:
- return extns
-
- def RemoveKeyBindNames(self,extnNameList):
- #get rid of keybinding section names
- names=extnNameList
- kbNameIndicies=[]
- for name in names:
- if name.endswith(('_bindings', '_cfgBindings')):
- kbNameIndicies.append(names.index(name))
- kbNameIndicies.sort()
- kbNameIndicies.reverse()
- for index in kbNameIndicies: #delete each keybinding section name
- del(names[index])
- return names
-
- def GetExtnNameForEvent(self,virtualEvent):
- """
- Returns the name of the extension that virtualEvent is bound in, or
- None if not bound in any extension.
- virtualEvent - string, name of the virtual event to test for, without
- the enclosing '<< >>'
- """
- extName=None
- vEvent='<<'+virtualEvent+'>>'
- for extn in self.GetExtensions(active_only=0):
- for event in self.GetExtensionKeys(extn).keys():
- if event == vEvent:
- extName=extn
- return extName
-
- def GetExtensionKeys(self,extensionName):
- """
- returns a dictionary of the configurable keybindings for a particular
- extension,as they exist in the dictionary returned by GetCurrentKeySet;
- that is, where previously used bindings are disabled.
- """
- keysName=extensionName+'_cfgBindings'
- activeKeys=self.GetCurrentKeySet()
- extKeys={}
- if self.defaultCfg['extensions'].has_section(keysName):
- eventNames=self.defaultCfg['extensions'].GetOptionList(keysName)
- for eventName in eventNames:
- event='<<'+eventName+'>>'
- binding=activeKeys[event]
- extKeys[event]=binding
- return extKeys
-
- def __GetRawExtensionKeys(self,extensionName):
- """
- returns a dictionary of the configurable keybindings for a particular
- extension, as defined in the configuration files, or an empty dictionary
- if no bindings are found
- """
- keysName=extensionName+'_cfgBindings'
- extKeys={}
- if self.defaultCfg['extensions'].has_section(keysName):
- eventNames=self.defaultCfg['extensions'].GetOptionList(keysName)
- for eventName in eventNames:
- binding=self.GetOption('extensions',keysName,
- eventName,default='').split()
- event='<<'+eventName+'>>'
- extKeys[event]=binding
- return extKeys
-
- def GetExtensionBindings(self,extensionName):
- """
- Returns a dictionary of all the event bindings for a particular
- extension. The configurable keybindings are returned as they exist in
- the dictionary returned by GetCurrentKeySet; that is, where re-used
- keybindings are disabled.
- """
- bindsName=extensionName+'_bindings'
- extBinds=self.GetExtensionKeys(extensionName)
- #add the non-configurable bindings
- if self.defaultCfg['extensions'].has_section(bindsName):
- eventNames=self.defaultCfg['extensions'].GetOptionList(bindsName)
- for eventName in eventNames:
- binding=self.GetOption('extensions',bindsName,
- eventName,default='').split()
- event='<<'+eventName+'>>'
- extBinds[event]=binding
-
- return extBinds
-
- def GetKeyBinding(self, keySetName, eventStr):
- """
- returns the keybinding for a specific event.
- keySetName - string, name of key binding set
- eventStr - string, the virtual event we want the binding for,
- represented as a string, eg. '<<event>>'
- """
- eventName=eventStr[2:-2] #trim off the angle brackets
- binding=self.GetOption('keys',keySetName,eventName,default='').split()
- return binding
-
- def GetCurrentKeySet(self):
- result = self.GetKeySet(self.CurrentKeys())
-
- if macosxSupport.runningAsOSXApp():
- # We're using AquaTk, replace all keybingings that use the
- # Alt key by ones that use the Option key because the former
- # don't work reliably.
- for k, v in result.items():
- v2 = [ x.replace('<Alt-', '<Option-') for x in v ]
- if v != v2:
- result[k] = v2
-
- return result
-
- def GetKeySet(self,keySetName):
- """
- Returns a dictionary of: all requested core keybindings, plus the
- keybindings for all currently active extensions. If a binding defined
- in an extension is already in use, that binding is disabled.
- """
- keySet=self.GetCoreKeys(keySetName)
- activeExtns=self.GetExtensions(active_only=1)
- for extn in activeExtns:
- extKeys=self.__GetRawExtensionKeys(extn)
- if extKeys: #the extension defines keybindings
- for event in extKeys.keys():
- if extKeys[event] in keySet.values():
- #the binding is already in use
- extKeys[event]='' #disable this binding
- keySet[event]=extKeys[event] #add binding
- return keySet
-
- def IsCoreBinding(self,virtualEvent):
- """
- returns true if the virtual event is bound in the core idle keybindings.
- virtualEvent - string, name of the virtual event to test for, without
- the enclosing '<< >>'
- """
- return ('<<'+virtualEvent+'>>') in self.GetCoreKeys().keys()
-
- def GetCoreKeys(self, keySetName=None):
- """
- returns the requested set of core keybindings, with fallbacks if
- required.
- Keybindings loaded from the config file(s) are loaded _over_ these
- defaults, so if there is a problem getting any core binding there will
- be an 'ultimate last resort fallback' to the CUA-ish bindings
- defined here.
- """
- keyBindings={
- '<<copy>>': ['<Control-c>', '<Control-C>'],
- '<<cut>>': ['<Control-x>', '<Control-X>'],
- '<<paste>>': ['<Control-v>', '<Control-V>'],
- '<<beginning-of-line>>': ['<Control-a>', '<Home>'],
- '<<center-insert>>': ['<Control-l>'],
- '<<close-all-windows>>': ['<Control-q>'],
- '<<close-window>>': ['<Alt-F4>'],
- '<<do-nothing>>': ['<Control-x>'],
- '<<end-of-file>>': ['<Control-d>'],
- '<<python-docs>>': ['<F1>'],
- '<<python-context-help>>': ['<Shift-F1>'],
- '<<history-next>>': ['<Alt-n>'],
- '<<history-previous>>': ['<Alt-p>'],
- '<<interrupt-execution>>': ['<Control-c>'],
- '<<view-restart>>': ['<F6>'],
- '<<restart-shell>>': ['<Control-F6>'],
- '<<open-class-browser>>': ['<Alt-c>'],
- '<<open-module>>': ['<Alt-m>'],
- '<<open-new-window>>': ['<Control-n>'],
- '<<open-window-from-file>>': ['<Control-o>'],
- '<<plain-newline-and-indent>>': ['<Control-j>'],
- '<<print-window>>': ['<Control-p>'],
- '<<redo>>': ['<Control-y>'],
- '<<remove-selection>>': ['<Escape>'],
- '<<save-copy-of-window-as-file>>': ['<Alt-Shift-S>'],
- '<<save-window-as-file>>': ['<Alt-s>'],
- '<<save-window>>': ['<Control-s>'],
- '<<select-all>>': ['<Alt-a>'],
- '<<toggle-auto-coloring>>': ['<Control-slash>'],
- '<<undo>>': ['<Control-z>'],
- '<<find-again>>': ['<Control-g>', '<F3>'],
- '<<find-in-files>>': ['<Alt-F3>'],
- '<<find-selection>>': ['<Control-F3>'],
- '<<find>>': ['<Control-f>'],
- '<<replace>>': ['<Control-h>'],
- '<<goto-line>>': ['<Alt-g>'],
- '<<smart-backspace>>': ['<Key-BackSpace>'],
- '<<newline-and-indent>>': ['<Key-Return> <Key-KP_Enter>'],
- '<<smart-indent>>': ['<Key-Tab>'],
- '<<indent-region>>': ['<Control-Key-bracketright>'],
- '<<dedent-region>>': ['<Control-Key-bracketleft>'],
- '<<comment-region>>': ['<Alt-Key-3>'],
- '<<uncomment-region>>': ['<Alt-Key-4>'],
- '<<tabify-region>>': ['<Alt-Key-5>'],
- '<<untabify-region>>': ['<Alt-Key-6>'],
- '<<toggle-tabs>>': ['<Alt-Key-t>'],
- '<<change-indentwidth>>': ['<Alt-Key-u>'],
- '<<del-word-left>>': ['<Control-Key-BackSpace>'],
- '<<del-word-right>>': ['<Control-Key-Delete>']
- }
- if keySetName:
- for event in keyBindings.keys():
- binding=self.GetKeyBinding(keySetName,event)
- if binding:
- keyBindings[event]=binding
- else: #we are going to return a default, print warning
- warning=('\n Warning: configHandler.py - IdleConf.GetCoreKeys'
- ' -\n problem retrieving key binding for event %r'
- '\n from key set %r.\n'
- ' returning default value: %r\n' %
- (event, keySetName, keyBindings[event]))
- sys.stderr.write(warning)
- return keyBindings
-
- def GetExtraHelpSourceList(self,configSet):
- """Fetch list of extra help sources from a given configSet.
-
- Valid configSets are 'user' or 'default'. Return a list of tuples of
- the form (menu_item , path_to_help_file , option), or return the empty
- list. 'option' is the sequence number of the help resource. 'option'
- values determine the position of the menu items on the Help menu,
- therefore the returned list must be sorted by 'option'.
-
- """
- helpSources=[]
- if configSet=='user':
- cfgParser=self.userCfg['main']
- elif configSet=='default':
- cfgParser=self.defaultCfg['main']
- else:
- raise InvalidConfigSet, 'Invalid configSet specified'
- options=cfgParser.GetOptionList('HelpFiles')
- for option in options:
- value=cfgParser.Get('HelpFiles',option,default=';')
- if value.find(';')==-1: #malformed config entry with no ';'
- menuItem='' #make these empty
- helpPath='' #so value won't be added to list
- else: #config entry contains ';' as expected
- value=string.split(value,';')
- menuItem=value[0].strip()
- helpPath=value[1].strip()
- if menuItem and helpPath: #neither are empty strings
- helpSources.append( (menuItem,helpPath,option) )
- helpSources.sort(self.__helpsort)
- return helpSources
-
- def __helpsort(self, h1, h2):
- if int(h1[2]) < int(h2[2]):
- return -1
- elif int(h1[2]) > int(h2[2]):
- return 1
- else:
- return 0
-
- def GetAllExtraHelpSourcesList(self):
- """
- Returns a list of tuples containing the details of all additional help
- sources configured, or an empty list if there are none. Tuples are of
- the format returned by GetExtraHelpSourceList.
- """
- allHelpSources=( self.GetExtraHelpSourceList('default')+
- self.GetExtraHelpSourceList('user') )
- return allHelpSources
-
- def LoadCfgFiles(self):
- """
- load all configuration files.
- """
- for key in self.defaultCfg.keys():
- self.defaultCfg[key].Load()
- self.userCfg[key].Load() #same keys
-
- def SaveUserCfgFiles(self):
- """
- write all loaded user configuration files back to disk
- """
- for key in self.userCfg.keys():
- self.userCfg[key].Save()
-
-idleConf=IdleConf()
-
-### module test
-if __name__ == '__main__':
- def dumpCfg(cfg):
- print '\n',cfg,'\n'
- for key in cfg.keys():
- sections=cfg[key].sections()
- print key
- print sections
- for section in sections:
- options=cfg[key].options(section)
- print section
- print options
- for option in options:
- print option, '=', cfg[key].Get(section,option)
- dumpCfg(idleConf.defaultCfg)
- dumpCfg(idleConf.userCfg)
- print idleConf.userCfg['main'].Get('Theme','name')
- #print idleConf.userCfg['highlight'].GetDefHighlight('Foo','normal')
diff --git a/sys/lib/python/idlelib/configHelpSourceEdit.py b/sys/lib/python/idlelib/configHelpSourceEdit.py
deleted file mode 100644
index 661162196..000000000
--- a/sys/lib/python/idlelib/configHelpSourceEdit.py
+++ /dev/null
@@ -1,169 +0,0 @@
-"Dialog to specify or edit the parameters for a user configured help source."
-
-import os
-import sys
-
-from Tkinter import *
-import tkMessageBox
-import tkFileDialog
-
-class GetHelpSourceDialog(Toplevel):
- def __init__(self, parent, title, menuItem='', filePath=''):
- """Get menu entry and url/ local file location for Additional Help
-
- User selects a name for the Help resource and provides a web url
- or a local file as its source. The user can enter a url or browse
- for the file.
-
- """
- Toplevel.__init__(self, parent)
- self.configure(borderwidth=5)
- self.resizable(height=FALSE, width=FALSE)
- self.title(title)
- self.transient(parent)
- self.grab_set()
- self.protocol("WM_DELETE_WINDOW", self.Cancel)
- self.parent = parent
- self.result = None
- self.CreateWidgets()
- self.menu.set(menuItem)
- self.path.set(filePath)
- self.withdraw() #hide while setting geometry
- #needs to be done here so that the winfo_reqwidth is valid
- self.update_idletasks()
- #centre dialog over parent:
- self.geometry("+%d+%d" %
- ((parent.winfo_rootx() + ((parent.winfo_width()/2)
- -(self.winfo_reqwidth()/2)),
- parent.winfo_rooty() + ((parent.winfo_height()/2)
- -(self.winfo_reqheight()/2)))))
- self.deiconify() #geometry set, unhide
- self.bind('<Return>', self.Ok)
- self.wait_window()
-
- def CreateWidgets(self):
- self.menu = StringVar(self)
- self.path = StringVar(self)
- self.fontSize = StringVar(self)
- self.frameMain = Frame(self, borderwidth=2, relief=GROOVE)
- self.frameMain.pack(side=TOP, expand=TRUE, fill=BOTH)
- labelMenu = Label(self.frameMain, anchor=W, justify=LEFT,
- text='Menu Item:')
- self.entryMenu = Entry(self.frameMain, textvariable=self.menu,
- width=30)
- self.entryMenu.focus_set()
- labelPath = Label(self.frameMain, anchor=W, justify=LEFT,
- text='Help File Path: Enter URL or browse for file')
- self.entryPath = Entry(self.frameMain, textvariable=self.path,
- width=40)
- self.entryMenu.focus_set()
- labelMenu.pack(anchor=W, padx=5, pady=3)
- self.entryMenu.pack(anchor=W, padx=5, pady=3)
- labelPath.pack(anchor=W, padx=5, pady=3)
- self.entryPath.pack(anchor=W, padx=5, pady=3)
- browseButton = Button(self.frameMain, text='Browse', width=8,
- command=self.browseFile)
- browseButton.pack(pady=3)
- frameButtons = Frame(self)
- frameButtons.pack(side=BOTTOM, fill=X)
- self.buttonOk = Button(frameButtons, text='OK',
- width=8, default=ACTIVE, command=self.Ok)
- self.buttonOk.grid(row=0, column=0, padx=5,pady=5)
- self.buttonCancel = Button(frameButtons, text='Cancel',
- width=8, command=self.Cancel)
- self.buttonCancel.grid(row=0, column=1, padx=5, pady=5)
-
- def browseFile(self):
- filetypes = [
- ("HTML Files", "*.htm *.html", "TEXT"),
- ("PDF Files", "*.pdf", "TEXT"),
- ("Windows Help Files", "*.chm"),
- ("Text Files", "*.txt", "TEXT"),
- ("All Files", "*")]
- path = self.path.get()
- if path:
- dir, base = os.path.split(path)
- else:
- base = None
- if sys.platform[:3] == 'win':
- dir = os.path.join(os.path.dirname(sys.executable), 'Doc')
- if not os.path.isdir(dir):
- dir = os.getcwd()
- else:
- dir = os.getcwd()
- opendialog = tkFileDialog.Open(parent=self, filetypes=filetypes)
- file = opendialog.show(initialdir=dir, initialfile=base)
- if file:
- self.path.set(file)
-
- def MenuOk(self):
- "Simple validity check for a sensible menu item name"
- menuOk = True
- menu = self.menu.get()
- menu.strip()
- if not menu:
- tkMessageBox.showerror(title='Menu Item Error',
- message='No menu item specified',
- parent=self)
- self.entryMenu.focus_set()
- menuOk = False
- elif len(menu) > 30:
- tkMessageBox.showerror(title='Menu Item Error',
- message='Menu item too long:'
- '\nLimit 30 characters.',
- parent=self)
- self.entryMenu.focus_set()
- menuOk = False
- return menuOk
-
- def PathOk(self):
- "Simple validity check for menu file path"
- pathOk = True
- path = self.path.get()
- path.strip()
- if not path: #no path specified
- tkMessageBox.showerror(title='File Path Error',
- message='No help file path specified.',
- parent=self)
- self.entryPath.focus_set()
- pathOk = False
- elif path.startswith(('www.', 'http')):
- pass
- else:
- if path[:5] == 'file:':
- path = path[5:]
- if not os.path.exists(path):
- tkMessageBox.showerror(title='File Path Error',
- message='Help file path does not exist.',
- parent=self)
- self.entryPath.focus_set()
- pathOk = False
- return pathOk
-
- def Ok(self, event=None):
- if self.MenuOk() and self.PathOk():
- self.result = (self.menu.get().strip(),
- self.path.get().strip())
- if sys.platform == 'darwin':
- path = self.result[1]
- if path.startswith(('www', 'file:', 'http:')):
- pass
- else:
- # Mac Safari insists on using the URI form for local files
- self.result = list(self.result)
- self.result[1] = "file://" + path
- self.destroy()
-
- def Cancel(self, event=None):
- self.result = None
- self.destroy()
-
-if __name__ == '__main__':
- #test the dialog
- root = Tk()
- def run():
- keySeq = ''
- dlg = GetHelpSourceDialog(root, 'Get Help Source')
- print dlg.result
- Button(root,text='Dialog', command=run).pack()
- root.mainloop()
diff --git a/sys/lib/python/idlelib/configSectionNameDialog.py b/sys/lib/python/idlelib/configSectionNameDialog.py
deleted file mode 100644
index 4f1b002af..000000000
--- a/sys/lib/python/idlelib/configSectionNameDialog.py
+++ /dev/null
@@ -1,97 +0,0 @@
-"""
-Dialog that allows user to specify a new config file section name.
-Used to get new highlight theme and keybinding set names.
-"""
-from Tkinter import *
-import tkMessageBox
-
-class GetCfgSectionNameDialog(Toplevel):
- def __init__(self,parent,title,message,usedNames):
- """
- message - string, informational message to display
- usedNames - list, list of names already in use for validity check
- """
- Toplevel.__init__(self, parent)
- self.configure(borderwidth=5)
- self.resizable(height=FALSE,width=FALSE)
- self.title(title)
- self.transient(parent)
- self.grab_set()
- self.protocol("WM_DELETE_WINDOW", self.Cancel)
- self.parent = parent
- self.message=message
- self.usedNames=usedNames
- self.result=''
- self.CreateWidgets()
- self.withdraw() #hide while setting geometry
- self.update_idletasks()
- #needs to be done here so that the winfo_reqwidth is valid
- self.messageInfo.config(width=self.frameMain.winfo_reqwidth())
- self.geometry("+%d+%d" %
- ((parent.winfo_rootx()+((parent.winfo_width()/2)
- -(self.winfo_reqwidth()/2)),
- parent.winfo_rooty()+((parent.winfo_height()/2)
- -(self.winfo_reqheight()/2)) )) ) #centre dialog over parent
- self.deiconify() #geometry set, unhide
- self.wait_window()
-
- def CreateWidgets(self):
- self.name=StringVar(self)
- self.fontSize=StringVar(self)
- self.frameMain = Frame(self,borderwidth=2,relief=SUNKEN)
- self.frameMain.pack(side=TOP,expand=TRUE,fill=BOTH)
- self.messageInfo=Message(self.frameMain,anchor=W,justify=LEFT,padx=5,pady=5,
- text=self.message)#,aspect=200)
- entryName=Entry(self.frameMain,textvariable=self.name,width=30)
- entryName.focus_set()
- self.messageInfo.pack(padx=5,pady=5)#,expand=TRUE,fill=BOTH)
- entryName.pack(padx=5,pady=5)
- frameButtons=Frame(self)
- frameButtons.pack(side=BOTTOM,fill=X)
- self.buttonOk = Button(frameButtons,text='Ok',
- width=8,command=self.Ok)
- self.buttonOk.grid(row=0,column=0,padx=5,pady=5)
- self.buttonCancel = Button(frameButtons,text='Cancel',
- width=8,command=self.Cancel)
- self.buttonCancel.grid(row=0,column=1,padx=5,pady=5)
-
- def NameOk(self):
- #simple validity check for a sensible
- #ConfigParser file section name
- nameOk=1
- name=self.name.get()
- name.strip()
- if not name: #no name specified
- tkMessageBox.showerror(title='Name Error',
- message='No name specified.', parent=self)
- nameOk=0
- elif len(name)>30: #name too long
- tkMessageBox.showerror(title='Name Error',
- message='Name too long. It should be no more than '+
- '30 characters.', parent=self)
- nameOk=0
- elif name in self.usedNames:
- tkMessageBox.showerror(title='Name Error',
- message='This name is already in use.', parent=self)
- nameOk=0
- return nameOk
-
- def Ok(self, event=None):
- if self.NameOk():
- self.result=self.name.get().strip()
- self.destroy()
-
- def Cancel(self, event=None):
- self.result=''
- self.destroy()
-
-if __name__ == '__main__':
- #test the dialog
- root=Tk()
- def run():
- keySeq=''
- dlg=GetCfgSectionNameDialog(root,'Get Name',
- 'The information here should need to be word wrapped. Test.')
- print dlg.result
- Button(root,text='Dialog',command=run).pack()
- root.mainloop()
diff --git a/sys/lib/python/idlelib/dynOptionMenuWidget.py b/sys/lib/python/idlelib/dynOptionMenuWidget.py
deleted file mode 100644
index e81f7babe..000000000
--- a/sys/lib/python/idlelib/dynOptionMenuWidget.py
+++ /dev/null
@@ -1,35 +0,0 @@
-"""
-OptionMenu widget modified to allow dynamic menu reconfiguration
-and setting of highlightthickness
-"""
-from Tkinter import OptionMenu
-from Tkinter import _setit
-import copy
-
-class DynOptionMenu(OptionMenu):
- """
- unlike OptionMenu, our kwargs can include highlightthickness
- """
- def __init__(self, master, variable, value, *values, **kwargs):
- #get a copy of kwargs before OptionMenu.__init__ munges them
- kwargsCopy=copy.copy(kwargs)
- if 'highlightthickness' in kwargs.keys():
- del(kwargs['highlightthickness'])
- OptionMenu.__init__(self, master, variable, value, *values, **kwargs)
- self.config(highlightthickness=kwargsCopy.get('highlightthickness'))
- #self.menu=self['menu']
- self.variable=variable
- self.command=kwargs.get('command')
-
- def SetMenu(self,valueList,value=None):
- """
- clear and reload the menu with a new set of options.
- valueList - list of new options
- value - initial value to set the optionmenu's menubutton to
- """
- self['menu'].delete(0,'end')
- for item in valueList:
- self['menu'].add_command(label=item,
- command=_setit(self.variable,item,self.command))
- if value:
- self.variable.set(value)
diff --git a/sys/lib/python/idlelib/extend.txt b/sys/lib/python/idlelib/extend.txt
deleted file mode 100644
index f5fb3e040..000000000
--- a/sys/lib/python/idlelib/extend.txt
+++ /dev/null
@@ -1,83 +0,0 @@
-Writing an IDLE extension
-=========================
-
-An IDLE extension can define new key bindings and menu entries for IDLE
-edit windows. There is a simple mechanism to load extensions when IDLE
-starts up and to attach them to each edit window. (It is also possible
-to make other changes to IDLE, but this must be done by editing the IDLE
-source code.)
-
-The list of extensions loaded at startup time is configured by editing
-the file config-extensions.def. See below for details.
-
-An IDLE extension is defined by a class. Methods of the class define
-actions that are invoked by event bindings or menu entries. Class (or
-instance) variables define the bindings and menu additions; these are
-automatically applied by IDLE when the extension is linked to an edit
-window.
-
-An IDLE extension class is instantiated with a single argument,
-`editwin', an EditorWindow instance. The extension cannot assume much
-about this argument, but it is guarateed to have the following instance
-variables:
-
- text a Text instance (a widget)
- io an IOBinding instance (more about this later)
- flist the FileList instance (shared by all edit windows)
-
-(There are a few more, but they are rarely useful.)
-
-The extension class must not directly bind Window Manager (e.g. X) events.
-Rather, it must define one or more virtual events, e.g. <<zoom-height>>, and
-corresponding methods, e.g. zoom_height_event(). The virtual events will be
-bound to the corresponding methods, and Window Manager events can then be bound
-to the virtual events. (This indirection is done so that the key bindings can
-easily be changed, and so that other sources of virtual events can exist, such
-as menu entries.)
-
-An extension can define menu entries. This is done with a class or instance
-variable named menudefs; it should be a list of pairs, where each pair is a
-menu name (lowercase) and a list of menu entries. Each menu entry is either
-None (to insert a separator entry) or a pair of strings (menu_label,
-virtual_event). Here, menu_label is the label of the menu entry, and
-virtual_event is the virtual event to be generated when the entry is selected.
-An underscore in the menu label is removed; the character following the
-underscore is displayed underlined, to indicate the shortcut character (for
-Windows).
-
-At the moment, extensions cannot define whole new menus; they must define
-entries in existing menus. Some menus are not present on some windows; such
-entry definitions are then ignored, but key bindings are still applied. (This
-should probably be refined in the future.)
-
-Extensions are not required to define menu entries for all the events they
-implement. (They are also not required to create keybindings, but in that
-case there must be empty bindings in cofig-extensions.def)
-
-Here is a complete example example:
-
-class ZoomHeight:
-
- menudefs = [
- ('edit', [
- None, # Separator
- ('_Zoom Height', '<<zoom-height>>'),
- ])
- ]
-
- def __init__(self, editwin):
- self.editwin = editwin
-
- def zoom_height_event(self, event):
- "...Do what you want here..."
-
-The final piece of the puzzle is the file "config-extensions.def", which is
-used to to configure the loading of extensions and to establish key (or, more
-generally, event) bindings to the virtual events defined in the extensions.
-
-See the comments at the top of config-extensions.def for information. It's
-currently necessary to manually modify that file to change IDLE's extension
-loading or extension key bindings.
-
-For further information on binding refer to the Tkinter Resources web page at
-python.org and to the Tk Command "bind" man page.
diff --git a/sys/lib/python/idlelib/help.txt b/sys/lib/python/idlelib/help.txt
deleted file mode 100644
index 6d2ba2fff..000000000
--- a/sys/lib/python/idlelib/help.txt
+++ /dev/null
@@ -1,213 +0,0 @@
-[See the end of this file for ** TIPS ** on using IDLE !!]
-
-Click on the dotted line at the top of a menu to "tear it off": a
-separate window containing the menu is created.
-
-File Menu:
-
- New Window -- Create a new editing window
- Open... -- Open an existing file
- Recent Files... -- Open a list of recent files
- Open Module... -- Open an existing module (searches sys.path)
- Class Browser -- Show classes and methods in current file
- Path Browser -- Show sys.path directories, modules, classes
- and methods
- ---
- Save -- Save current window to the associated file (unsaved
- windows have a * before and after the window title)
-
- Save As... -- Save current window to new file, which becomes
- the associated file
- Save Copy As... -- Save current window to different file
- without changing the associated file
- ---
- Print Window -- Print the current window
- ---
- Close -- Close current window (asks to save if unsaved)
- Exit -- Close all windows, quit (asks to save if unsaved)
-
-Edit Menu:
-
- Undo -- Undo last change to current window
- (A maximum of 1000 changes may be undone)
- Redo -- Redo last undone change to current window
- ---
- Cut -- Copy a selection into system-wide clipboard,
- then delete the selection
- Copy -- Copy selection into system-wide clipboard
- Paste -- Insert system-wide clipboard into window
- Select All -- Select the entire contents of the edit buffer
- ---
- Find... -- Open a search dialog box with many options
- Find Again -- Repeat last search
- Find Selection -- Search for the string in the selection
- Find in Files... -- Open a search dialog box for searching files
- Replace... -- Open a search-and-replace dialog box
- Go to Line -- Ask for a line number and show that line
- Expand Word -- Expand the word you have typed to match another
- word in the same buffer; repeat to get a
- different expansion
-
-Format Menu (only in Edit window):
-
- Indent Region -- Shift selected lines right 4 spaces
- Dedent Region -- Shift selected lines left 4 spaces
- Comment Out Region -- Insert ## in front of selected lines
- Uncomment Region -- Remove leading # or ## from selected lines
- Tabify Region -- Turns *leading* stretches of spaces into tabs
- (Note: We recommend using 4 space blocks to indent Python code.)
- Untabify Region -- Turn *all* tabs into the right number of spaces
- New Indent Width... -- Open dialog to change indent width
- Format Paragraph -- Reformat the current blank-line-separated
- paragraph
-
-Run Menu (only in Edit window):
-
- Python Shell -- Open or wake up the Python shell window
- ---
- Check Module -- Run a syntax check on the module
- Run Module -- Execute the current file in the __main__ namespace
-
-Shell Menu (only in Shell window):
-
- View Last Restart -- Scroll the shell window to the last restart
- Restart Shell -- Restart the interpreter with a fresh environment
-
-Debug Menu (only in Shell window):
-
- Go to File/Line -- look around the insert point for a filename
- and linenumber, open the file, and show the line
- Debugger (toggle) -- Run commands in the shell under the debugger
- Stack Viewer -- Show the stack traceback of the last exception
- Auto-open Stack Viewer (toggle) -- Open stack viewer on traceback
-
-Options Menu:
-
- Configure IDLE -- Open a configuration dialog. Fonts, indentation,
- keybindings, and color themes may be altered.
- Startup Preferences may be set, and Additional Help
- Souces can be specified.
- ---
- Code Context -- Open a pane at the top of the edit window which
- shows the block context of the section of code
- which is scrolling off the top or the window.
-
-Windows Menu:
-
- Zoom Height -- toggles the window between configured size
- and maximum height.
- ---
- The rest of this menu lists the names of all open windows;
- select one to bring it to the foreground (deiconifying it if
- necessary).
-
-Help Menu:
-
- About IDLE -- Version, copyright, license, credits
- IDLE Readme -- Background discussion and change details
- ---
- IDLE Help -- Display this file
- Python Docs -- Access local Python documentation, if
- installed. Otherwise, access www.python.org.
- ---
- (Additional Help Sources may be added here)
-
-
-** TIPS **
-==========
-
-Additional Help Sources:
-
- Windows users can Google on zopeshelf.chm to access Zope help files in
- the Windows help format. The Additional Help Sources feature of the
- configuration GUI supports .chm, along with any other filetypes
- supported by your browser. Supply a Menu Item title, and enter the
- location in the Help File Path slot of the New Help Source dialog. Use
- http:// and/or www. to identify external URLs, or download the file and
- browse for its path on your machine using the Browse button.
-
- All users can access the extensive sources of help, including
- tutorials, available at www.python.org/doc. Selected URLs can be added
- or removed from the Help menu at any time using Configure IDLE.
-
-Basic editing and navigation:
-
- Backspace deletes char to the left; DEL deletes char to the right.
- Control-backspace deletes word left, Control-DEL deletes word right.
- Arrow keys and Page Up/Down move around.
- Control-left/right Arrow moves by words in a strange but useful way.
- Home/End go to begin/end of line.
- Control-Home/End go to begin/end of file.
- Some useful Emacs bindings (Control-a, Control-e, Control-k, etc.)
- are inherited from Tcl/Tk.
- Standard Windows bindings may work on that platform.
- Keybindings are selected in the Settings Dialog, look there.
-
-Automatic indentation:
-
- After a block-opening statement, the next line is indented by 4 spaces
- (in the Python Shell window by one tab). After certain keywords
- (break, return etc.) the next line is dedented. In leading
- indentation, Backspace deletes up to 4 spaces if they are there. Tab
- inserts spaces (in the Python Shell window one tab), number depends on
- Indent Width. (N.B. Currently tabs are restricted to four spaces due
- to Tcl/Tk issues.)
-
- See also the indent/dedent region commands in the edit menu.
-
-Python Shell window:
-
- Control-c interrupts executing command.
- Control-d sends end-of-file; closes window if typed at >>> prompt
- (this is Control-z on Windows).
-
- Command history:
-
- Alt-p retrieves previous command matching what you have typed.
- Alt-n retrieves next.
- (These are Control-p, Control-n on the Mac)
- Return while cursor is on a previous command retrieves that command.
- Expand word is also useful to reduce typing.
-
- Syntax colors:
-
- The coloring is applied in a background "thread", so you may
- occasionally see uncolorized text. To change the color
- scheme, use the Configure IDLE / Highlighting dialog.
-
- Python default syntax colors:
-
- Keywords orange
- Builtins royal purple
- Strings green
- Comments red
- Definitions blue
-
- Shell default colors:
-
- Console output brown
- stdout blue
- stderr red
- stdin black
-
-Other preferences:
-
- The font preferences, keybinding, and startup preferences can
- be changed using the Settings dialog.
-
-Command line usage:
-
- Enter idle -h at the command prompt to get a usage message.
-
-Running without a subprocess:
-
- If IDLE is started with the -n command line switch it will run in a
- single process and will not create the subprocess which runs the RPC
- Python execution server. This can be useful if Python cannot create
- the subprocess or the RPC socket interface on your platform. However,
- in this mode user code is not isolated from IDLE itself. Also, the
- environment is not restarted when Run/Run Module (F5) is selected. If
- your code has been modified, you must reload() the affected modules and
- re-import any specific items (e.g. from foo import baz) if the changes
- are to take effect. For these reasons, it is preferable to run IDLE
- with the default subprocess if at all possible.
diff --git a/sys/lib/python/idlelib/idle.bat b/sys/lib/python/idlelib/idle.bat
deleted file mode 100755
index c1b5fd28a..000000000
--- a/sys/lib/python/idlelib/idle.bat
+++ /dev/null
@@ -1,3 +0,0 @@
-@echo off
-rem Working IDLE bat for Windows - uses start instead of absolute pathname
-start idle.pyw %1 %2 %3 %4 %5 %6 %7 %8 %9
diff --git a/sys/lib/python/idlelib/idle.py b/sys/lib/python/idlelib/idle.py
deleted file mode 100644
index 537dd5a9a..000000000
--- a/sys/lib/python/idlelib/idle.py
+++ /dev/null
@@ -1,21 +0,0 @@
-try:
- import idlelib.PyShell
-except ImportError:
- # IDLE is not installed, but maybe PyShell is on sys.path:
- try:
- import PyShell
- except ImportError:
- raise
- else:
- import os
- idledir = os.path.dirname(os.path.abspath(PyShell.__file__))
- if idledir != os.getcwd():
- # We're not in the IDLE directory, help the subprocess find run.py
- pypath = os.environ.get('PYTHONPATH', '')
- if pypath:
- os.environ['PYTHONPATH'] = pypath + ':' + idledir
- else:
- os.environ['PYTHONPATH'] = idledir
- PyShell.main()
-else:
- idlelib.PyShell.main()
diff --git a/sys/lib/python/idlelib/idle.pyw b/sys/lib/python/idlelib/idle.pyw
deleted file mode 100644
index 537dd5a9a..000000000
--- a/sys/lib/python/idlelib/idle.pyw
+++ /dev/null
@@ -1,21 +0,0 @@
-try:
- import idlelib.PyShell
-except ImportError:
- # IDLE is not installed, but maybe PyShell is on sys.path:
- try:
- import PyShell
- except ImportError:
- raise
- else:
- import os
- idledir = os.path.dirname(os.path.abspath(PyShell.__file__))
- if idledir != os.getcwd():
- # We're not in the IDLE directory, help the subprocess find run.py
- pypath = os.environ.get('PYTHONPATH', '')
- if pypath:
- os.environ['PYTHONPATH'] = pypath + ':' + idledir
- else:
- os.environ['PYTHONPATH'] = idledir
- PyShell.main()
-else:
- idlelib.PyShell.main()
diff --git a/sys/lib/python/idlelib/idlever.py b/sys/lib/python/idlelib/idlever.py
deleted file mode 100644
index 9f7fb5a5b..000000000
--- a/sys/lib/python/idlelib/idlever.py
+++ /dev/null
@@ -1 +0,0 @@
-IDLE_VERSION = "1.2.1"
diff --git a/sys/lib/python/idlelib/keybindingDialog.py b/sys/lib/python/idlelib/keybindingDialog.py
deleted file mode 100644
index aff9cac58..000000000
--- a/sys/lib/python/idlelib/keybindingDialog.py
+++ /dev/null
@@ -1,268 +0,0 @@
-"""
-Dialog for building Tkinter accelerator key bindings
-"""
-from Tkinter import *
-import tkMessageBox
-import string, os
-
-class GetKeysDialog(Toplevel):
- def __init__(self,parent,title,action,currentKeySequences):
- """
- action - string, the name of the virtual event these keys will be
- mapped to
- currentKeys - list, a list of all key sequence lists currently mapped
- to virtual events, for overlap checking
- """
- Toplevel.__init__(self, parent)
- self.configure(borderwidth=5)
- self.resizable(height=FALSE,width=FALSE)
- self.title(title)
- self.transient(parent)
- self.grab_set()
- self.protocol("WM_DELETE_WINDOW", self.Cancel)
- self.parent = parent
- self.action=action
- self.currentKeySequences=currentKeySequences
- self.result=''
- self.keyString=StringVar(self)
- self.keyString.set('')
- self.SetModifiersForPlatform() # set self.modifiers, self.modifier_label
- self.modifier_vars = []
- for modifier in self.modifiers:
- variable = StringVar(self)
- variable.set('')
- self.modifier_vars.append(variable)
- self.advanced = False
- self.CreateWidgets()
- self.LoadFinalKeyList()
- self.withdraw() #hide while setting geometry
- self.update_idletasks()
- self.geometry("+%d+%d" %
- ((parent.winfo_rootx()+((parent.winfo_width()/2)
- -(self.winfo_reqwidth()/2)),
- parent.winfo_rooty()+((parent.winfo_height()/2)
- -(self.winfo_reqheight()/2)) )) ) #centre dialog over parent
- self.deiconify() #geometry set, unhide
- self.wait_window()
-
- def CreateWidgets(self):
- frameMain = Frame(self,borderwidth=2,relief=SUNKEN)
- frameMain.pack(side=TOP,expand=TRUE,fill=BOTH)
- frameButtons=Frame(self)
- frameButtons.pack(side=BOTTOM,fill=X)
- self.buttonOK = Button(frameButtons,text='OK',
- width=8,command=self.OK)
- self.buttonOK.grid(row=0,column=0,padx=5,pady=5)
- self.buttonCancel = Button(frameButtons,text='Cancel',
- width=8,command=self.Cancel)
- self.buttonCancel.grid(row=0,column=1,padx=5,pady=5)
- self.frameKeySeqBasic = Frame(frameMain)
- self.frameKeySeqAdvanced = Frame(frameMain)
- self.frameControlsBasic = Frame(frameMain)
- self.frameHelpAdvanced = Frame(frameMain)
- self.frameKeySeqAdvanced.grid(row=0,column=0,sticky=NSEW,padx=5,pady=5)
- self.frameKeySeqBasic.grid(row=0,column=0,sticky=NSEW,padx=5,pady=5)
- self.frameKeySeqBasic.lift()
- self.frameHelpAdvanced.grid(row=1,column=0,sticky=NSEW,padx=5)
- self.frameControlsBasic.grid(row=1,column=0,sticky=NSEW,padx=5)
- self.frameControlsBasic.lift()
- self.buttonLevel = Button(frameMain,command=self.ToggleLevel,
- text='Advanced Key Binding Entry >>')
- self.buttonLevel.grid(row=2,column=0,stick=EW,padx=5,pady=5)
- labelTitleBasic = Label(self.frameKeySeqBasic,
- text="New keys for '"+self.action+"' :")
- labelTitleBasic.pack(anchor=W)
- labelKeysBasic = Label(self.frameKeySeqBasic,justify=LEFT,
- textvariable=self.keyString,relief=GROOVE,borderwidth=2)
- labelKeysBasic.pack(ipadx=5,ipady=5,fill=X)
- self.modifier_checkbuttons = {}
- column = 0
- for modifier, variable in zip(self.modifiers, self.modifier_vars):
- label = self.modifier_label.get(modifier, modifier)
- check=Checkbutton(self.frameControlsBasic,
- command=self.BuildKeyString,
- text=label,variable=variable,onvalue=modifier,offvalue='')
- check.grid(row=0,column=column,padx=2,sticky=W)
- self.modifier_checkbuttons[modifier] = check
- column += 1
- labelFnAdvice=Label(self.frameControlsBasic,justify=LEFT,
- text=\
- "Select the desired modifier keys\n"+
- "above, and the final key from the\n"+
- "list on the right.\n\n" +
- "Use upper case Symbols when using\n" +
- "the Shift modifier. (Letters will be\n" +
- "converted automatically.)")
- labelFnAdvice.grid(row=1,column=0,columnspan=4,padx=2,sticky=W)
- self.listKeysFinal=Listbox(self.frameControlsBasic,width=15,height=10,
- selectmode=SINGLE)
- self.listKeysFinal.bind('<ButtonRelease-1>',self.FinalKeySelected)
- self.listKeysFinal.grid(row=0,column=4,rowspan=4,sticky=NS)
- scrollKeysFinal=Scrollbar(self.frameControlsBasic,orient=VERTICAL,
- command=self.listKeysFinal.yview)
- self.listKeysFinal.config(yscrollcommand=scrollKeysFinal.set)
- scrollKeysFinal.grid(row=0,column=5,rowspan=4,sticky=NS)
- self.buttonClear=Button(self.frameControlsBasic,
- text='Clear Keys',command=self.ClearKeySeq)
- self.buttonClear.grid(row=2,column=0,columnspan=4)
- labelTitleAdvanced = Label(self.frameKeySeqAdvanced,justify=LEFT,
- text="Enter new binding(s) for '"+self.action+"' :\n"+
- "(These bindings will not be checked for validity!)")
- labelTitleAdvanced.pack(anchor=W)
- self.entryKeysAdvanced=Entry(self.frameKeySeqAdvanced,
- textvariable=self.keyString)
- self.entryKeysAdvanced.pack(fill=X)
- labelHelpAdvanced=Label(self.frameHelpAdvanced,justify=LEFT,
- text="Key bindings are specified using Tkinter keysyms as\n"+
- "in these samples: <Control-f>, <Shift-F2>, <F12>,\n"
- "<Control-space>, <Meta-less>, <Control-Alt-Shift-X>.\n"
- "Upper case is used when the Shift modifier is present!\n\n" +
- "'Emacs style' multi-keystroke bindings are specified as\n" +
- "follows: <Control-x><Control-y>, where the first key\n" +
- "is the 'do-nothing' keybinding.\n\n" +
- "Multiple separate bindings for one action should be\n"+
- "separated by a space, eg., <Alt-v> <Meta-v>." )
- labelHelpAdvanced.grid(row=0,column=0,sticky=NSEW)
-
- def SetModifiersForPlatform(self):
- """Determine list of names of key modifiers for this platform.
-
- The names are used to build Tk bindings -- it doesn't matter if the
- keyboard has these keys, it matters if Tk understands them. The
- order is also important: key binding equality depends on it, so
- config-keys.def must use the same ordering.
- """
- import sys
- if sys.platform == 'darwin' and sys.argv[0].count('.app'):
- self.modifiers = ['Shift', 'Control', 'Option', 'Command']
- else:
- self.modifiers = ['Control', 'Alt', 'Shift']
- self.modifier_label = {'Control': 'Ctrl'} # short name
-
- def ToggleLevel(self):
- if self.buttonLevel.cget('text')[:8]=='Advanced':
- self.ClearKeySeq()
- self.buttonLevel.config(text='<< Basic Key Binding Entry')
- self.frameKeySeqAdvanced.lift()
- self.frameHelpAdvanced.lift()
- self.entryKeysAdvanced.focus_set()
- self.advanced = True
- else:
- self.ClearKeySeq()
- self.buttonLevel.config(text='Advanced Key Binding Entry >>')
- self.frameKeySeqBasic.lift()
- self.frameControlsBasic.lift()
- self.advanced = False
-
- def FinalKeySelected(self,event):
- self.BuildKeyString()
-
- def BuildKeyString(self):
- keyList = modifiers = self.GetModifiers()
- finalKey = self.listKeysFinal.get(ANCHOR)
- if finalKey:
- finalKey = self.TranslateKey(finalKey, modifiers)
- keyList.append(finalKey)
- self.keyString.set('<' + string.join(keyList,'-') + '>')
-
- def GetModifiers(self):
- modList = [variable.get() for variable in self.modifier_vars]
- return filter(None, modList)
-
- def ClearKeySeq(self):
- self.listKeysFinal.select_clear(0,END)
- self.listKeysFinal.yview(MOVETO, '0.0')
- for variable in self.modifier_vars:
- variable.set('')
- self.keyString.set('')
-
- def LoadFinalKeyList(self):
- #these tuples are also available for use in validity checks
- self.functionKeys=('F1','F2','F2','F4','F5','F6','F7','F8','F9',
- 'F10','F11','F12')
- self.alphanumKeys=tuple(string.ascii_lowercase+string.digits)
- self.punctuationKeys=tuple('~!@#%^&*()_-+={}[]|;:,.<>/?')
- self.whitespaceKeys=('Tab','Space','Return')
- self.editKeys=('BackSpace','Delete','Insert')
- self.moveKeys=('Home','End','Page Up','Page Down','Left Arrow',
- 'Right Arrow','Up Arrow','Down Arrow')
- #make a tuple of most of the useful common 'final' keys
- keys=(self.alphanumKeys+self.punctuationKeys+self.functionKeys+
- self.whitespaceKeys+self.editKeys+self.moveKeys)
- self.listKeysFinal.insert(END, *keys)
-
- def TranslateKey(self, key, modifiers):
- "Translate from keycap symbol to the Tkinter keysym"
- translateDict = {'Space':'space',
- '~':'asciitilde','!':'exclam','@':'at','#':'numbersign',
- '%':'percent','^':'asciicircum','&':'ampersand','*':'asterisk',
- '(':'parenleft',')':'parenright','_':'underscore','-':'minus',
- '+':'plus','=':'equal','{':'braceleft','}':'braceright',
- '[':'bracketleft',']':'bracketright','|':'bar',';':'semicolon',
- ':':'colon',',':'comma','.':'period','<':'less','>':'greater',
- '/':'slash','?':'question','Page Up':'Prior','Page Down':'Next',
- 'Left Arrow':'Left','Right Arrow':'Right','Up Arrow':'Up',
- 'Down Arrow': 'Down', 'Tab':'Tab'}
- if key in translateDict.keys():
- key = translateDict[key]
- if 'Shift' in modifiers and key in string.ascii_lowercase:
- key = key.upper()
- key = 'Key-' + key
- return key
-
- def OK(self, event=None):
- if self.advanced or self.KeysOK(): # doesn't check advanced string yet
- self.result=self.keyString.get()
- self.destroy()
-
- def Cancel(self, event=None):
- self.result=''
- self.destroy()
-
- def KeysOK(self):
- '''Validity check on user's 'basic' keybinding selection.
-
- Doesn't check the string produced by the advanced dialog because
- 'modifiers' isn't set.
-
- '''
- keys = self.keyString.get()
- keys.strip()
- finalKey = self.listKeysFinal.get(ANCHOR)
- modifiers = self.GetModifiers()
- # create a key sequence list for overlap check:
- keySequence = keys.split()
- keysOK = False
- title = 'Key Sequence Error'
- if not keys:
- tkMessageBox.showerror(title=title, parent=self,
- message='No keys specified.')
- elif not keys.endswith('>'):
- tkMessageBox.showerror(title=title, parent=self,
- message='Missing the final Key')
- elif (not modifiers
- and finalKey not in self.functionKeys + self.moveKeys):
- tkMessageBox.showerror(title=title, parent=self,
- message='No modifier key(s) specified.')
- elif (modifiers == ['Shift']) \
- and (finalKey not in
- self.functionKeys + self.moveKeys + ('Tab', 'Space')):
- msg = 'The shift modifier by itself may not be used with'\
- ' this key symbol.'
- tkMessageBox.showerror(title=title, parent=self, message=msg)
- elif keySequence in self.currentKeySequences:
- msg = 'This key combination is already in use.'
- tkMessageBox.showerror(title=title, parent=self, message=msg)
- else:
- keysOK = True
- return keysOK
-
-if __name__ == '__main__':
- #test the dialog
- root=Tk()
- def run():
- keySeq=''
- dlg=GetKeysDialog(root,'Get Keys','find-again',[])
- print dlg.result
- Button(root,text='Dialog',command=run).pack()
- root.mainloop()
diff --git a/sys/lib/python/idlelib/macosxSupport.py b/sys/lib/python/idlelib/macosxSupport.py
deleted file mode 100644
index ad61fff46..000000000
--- a/sys/lib/python/idlelib/macosxSupport.py
+++ /dev/null
@@ -1,112 +0,0 @@
-"""
-A number of function that enhance IDLE on MacOSX when it used as a normal
-GUI application (as opposed to an X11 application).
-"""
-import sys
-
-def runningAsOSXApp():
- """ Returns True iff running from the IDLE.app bundle on OSX """
- return (sys.platform == 'darwin' and 'IDLE.app' in sys.argv[0])
-
-def addOpenEventSupport(root, flist):
- """
- This ensures that the application will respont to open AppleEvents, which
- makes is feaseable to use IDLE as the default application for python files.
- """
- def doOpenFile(*args):
- for fn in args:
- flist.open(fn)
-
- # The command below is a hook in aquatk that is called whenever the app
- # receives a file open event. The callback can have multiple arguments,
- # one for every file that should be opened.
- root.createcommand("::tk::mac::OpenDocument", doOpenFile)
-
-def hideTkConsole(root):
- root.tk.call('console', 'hide')
-
-def overrideRootMenu(root, flist):
- """
- Replace the Tk root menu by something that's more appropriate for
- IDLE.
- """
- # The menu that is attached to the Tk root (".") is also used by AquaTk for
- # all windows that don't specify a menu of their own. The default menubar
- # contains a number of menus, none of which are appropriate for IDLE. The
- # Most annoying of those is an 'About Tck/Tk...' menu in the application
- # menu.
- #
- # This function replaces the default menubar by a mostly empty one, it
- # should only contain the correct application menu and the window menu.
- #
- # Due to a (mis-)feature of TkAqua the user will also see an empty Help
- # menu.
- from Tkinter import Menu, Text, Text
- from EditorWindow import prepstr, get_accelerator
- import Bindings
- import WindowList
- from MultiCall import MultiCallCreator
-
- menubar = Menu(root)
- root.configure(menu=menubar)
- menudict = {}
-
- menudict['windows'] = menu = Menu(menubar, name='windows')
- menubar.add_cascade(label='Window', menu=menu, underline=0)
-
- def postwindowsmenu(menu=menu):
- end = menu.index('end')
- if end is None:
- end = -1
-
- if end > 0:
- menu.delete(0, end)
- WindowList.add_windows_to_menu(menu)
- WindowList.register_callback(postwindowsmenu)
-
- menudict['application'] = menu = Menu(menubar, name='apple')
- menubar.add_cascade(label='IDLE', menu=menu)
-
- def about_dialog(event=None):
- import aboutDialog
- aboutDialog.AboutDialog(root, 'About IDLE')
-
- def config_dialog(event=None):
- import configDialog
- configDialog.ConfigDialog(root, 'Settings')
-
- root.bind('<<about-idle>>', about_dialog)
- root.bind('<<open-config-dialog>>', config_dialog)
- if flist:
- root.bind('<<close-all-windows>>', flist.close_all_callback)
-
- for mname, entrylist in Bindings.menudefs:
- menu = menudict.get(mname)
- if not menu:
- continue
- for entry in entrylist:
- if not entry:
- menu.add_separator()
- else:
- label, eventname = entry
- underline, label = prepstr(label)
- accelerator = get_accelerator(Bindings.default_keydefs,
- eventname)
- def command(text=root, eventname=eventname):
- text.event_generate(eventname)
- menu.add_command(label=label, underline=underline,
- command=command, accelerator=accelerator)
-
-
-
-
-
-def setupApp(root, flist):
- """
- Perform setup for the OSX application bundle.
- """
- if not runningAsOSXApp(): return
-
- hideTkConsole(root)
- overrideRootMenu(root, flist)
- addOpenEventSupport(root, flist)
diff --git a/sys/lib/python/idlelib/rpc.py b/sys/lib/python/idlelib/rpc.py
deleted file mode 100644
index 3bac6a30b..000000000
--- a/sys/lib/python/idlelib/rpc.py
+++ /dev/null
@@ -1,602 +0,0 @@
-"""RPC Implemention, originally written for the Python Idle IDE
-
-For security reasons, GvR requested that Idle's Python execution server process
-connect to the Idle process, which listens for the connection. Since Idle has
-has only one client per server, this was not a limitation.
-
- +---------------------------------+ +-------------+
- | SocketServer.BaseRequestHandler | | SocketIO |
- +---------------------------------+ +-------------+
- ^ | register() |
- | | unregister()|
- | +-------------+
- | ^ ^
- | | |
- | + -------------------+ |
- | | |
- +-------------------------+ +-----------------+
- | RPCHandler | | RPCClient |
- | [attribute of RPCServer]| | |
- +-------------------------+ +-----------------+
-
-The RPCServer handler class is expected to provide register/unregister methods.
-RPCHandler inherits the mix-in class SocketIO, which provides these methods.
-
-See the Idle run.main() docstring for further information on how this was
-accomplished in Idle.
-
-"""
-
-import sys
-import os
-import socket
-import select
-import SocketServer
-import struct
-import cPickle as pickle
-import threading
-import Queue
-import traceback
-import copy_reg
-import types
-import marshal
-
-
-def unpickle_code(ms):
- co = marshal.loads(ms)
- assert isinstance(co, types.CodeType)
- return co
-
-def pickle_code(co):
- assert isinstance(co, types.CodeType)
- ms = marshal.dumps(co)
- return unpickle_code, (ms,)
-
-# XXX KBK 24Aug02 function pickling capability not used in Idle
-# def unpickle_function(ms):
-# return ms
-
-# def pickle_function(fn):
-# assert isinstance(fn, type.FunctionType)
-# return repr(fn)
-
-copy_reg.pickle(types.CodeType, pickle_code, unpickle_code)
-# copy_reg.pickle(types.FunctionType, pickle_function, unpickle_function)
-
-BUFSIZE = 8*1024
-LOCALHOST = '127.0.0.1'
-
-class RPCServer(SocketServer.TCPServer):
-
- def __init__(self, addr, handlerclass=None):
- if handlerclass is None:
- handlerclass = RPCHandler
- SocketServer.TCPServer.__init__(self, addr, handlerclass)
-
- def server_bind(self):
- "Override TCPServer method, no bind() phase for connecting entity"
- pass
-
- def server_activate(self):
- """Override TCPServer method, connect() instead of listen()
-
- Due to the reversed connection, self.server_address is actually the
- address of the Idle Client to which we are connecting.
-
- """
- self.socket.connect(self.server_address)
-
- def get_request(self):
- "Override TCPServer method, return already connected socket"
- return self.socket, self.server_address
-
- def handle_error(self, request, client_address):
- """Override TCPServer method
-
- Error message goes to __stderr__. No error message if exiting
- normally or socket raised EOF. Other exceptions not handled in
- server code will cause os._exit.
-
- """
- try:
- raise
- except SystemExit:
- raise
- except:
- erf = sys.__stderr__
- print>>erf, '\n' + '-'*40
- print>>erf, 'Unhandled server exception!'
- print>>erf, 'Thread: %s' % threading.currentThread().getName()
- print>>erf, 'Client Address: ', client_address
- print>>erf, 'Request: ', repr(request)
- traceback.print_exc(file=erf)
- print>>erf, '\n*** Unrecoverable, server exiting!'
- print>>erf, '-'*40
- os._exit(0)
-
-#----------------- end class RPCServer --------------------
-
-objecttable = {}
-request_queue = Queue.Queue(0)
-response_queue = Queue.Queue(0)
-
-
-class SocketIO(object):
-
- nextseq = 0
-
- def __init__(self, sock, objtable=None, debugging=None):
- self.sockthread = threading.currentThread()
- if debugging is not None:
- self.debugging = debugging
- self.sock = sock
- if objtable is None:
- objtable = objecttable
- self.objtable = objtable
- self.responses = {}
- self.cvars = {}
-
- def close(self):
- sock = self.sock
- self.sock = None
- if sock is not None:
- sock.close()
-
- def exithook(self):
- "override for specific exit action"
- os._exit()
-
- def debug(self, *args):
- if not self.debugging:
- return
- s = self.location + " " + str(threading.currentThread().getName())
- for a in args:
- s = s + " " + str(a)
- print>>sys.__stderr__, s
-
- def register(self, oid, object):
- self.objtable[oid] = object
-
- def unregister(self, oid):
- try:
- del self.objtable[oid]
- except KeyError:
- pass
-
- def localcall(self, seq, request):
- self.debug("localcall:", request)
- try:
- how, (oid, methodname, args, kwargs) = request
- except TypeError:
- return ("ERROR", "Bad request format")
- if not self.objtable.has_key(oid):
- return ("ERROR", "Unknown object id: %r" % (oid,))
- obj = self.objtable[oid]
- if methodname == "__methods__":
- methods = {}
- _getmethods(obj, methods)
- return ("OK", methods)
- if methodname == "__attributes__":
- attributes = {}
- _getattributes(obj, attributes)
- return ("OK", attributes)
- if not hasattr(obj, methodname):
- return ("ERROR", "Unsupported method name: %r" % (methodname,))
- method = getattr(obj, methodname)
- try:
- if how == 'CALL':
- ret = method(*args, **kwargs)
- if isinstance(ret, RemoteObject):
- ret = remoteref(ret)
- return ("OK", ret)
- elif how == 'QUEUE':
- request_queue.put((seq, (method, args, kwargs)))
- return("QUEUED", None)
- else:
- return ("ERROR", "Unsupported message type: %s" % how)
- except SystemExit:
- raise
- except socket.error:
- raise
- except:
- msg = "*** Internal Error: rpc.py:SocketIO.localcall()\n\n"\
- " Object: %s \n Method: %s \n Args: %s\n"
- print>>sys.__stderr__, msg % (oid, method, args)
- traceback.print_exc(file=sys.__stderr__)
- return ("EXCEPTION", None)
-
- def remotecall(self, oid, methodname, args, kwargs):
- self.debug("remotecall:asynccall: ", oid, methodname)
- seq = self.asynccall(oid, methodname, args, kwargs)
- return self.asyncreturn(seq)
-
- def remotequeue(self, oid, methodname, args, kwargs):
- self.debug("remotequeue:asyncqueue: ", oid, methodname)
- seq = self.asyncqueue(oid, methodname, args, kwargs)
- return self.asyncreturn(seq)
-
- def asynccall(self, oid, methodname, args, kwargs):
- request = ("CALL", (oid, methodname, args, kwargs))
- seq = self.newseq()
- if threading.currentThread() != self.sockthread:
- cvar = threading.Condition()
- self.cvars[seq] = cvar
- self.debug(("asynccall:%d:" % seq), oid, methodname, args, kwargs)
- self.putmessage((seq, request))
- return seq
-
- def asyncqueue(self, oid, methodname, args, kwargs):
- request = ("QUEUE", (oid, methodname, args, kwargs))
- seq = self.newseq()
- if threading.currentThread() != self.sockthread:
- cvar = threading.Condition()
- self.cvars[seq] = cvar
- self.debug(("asyncqueue:%d:" % seq), oid, methodname, args, kwargs)
- self.putmessage((seq, request))
- return seq
-
- def asyncreturn(self, seq):
- self.debug("asyncreturn:%d:call getresponse(): " % seq)
- response = self.getresponse(seq, wait=0.05)
- self.debug(("asyncreturn:%d:response: " % seq), response)
- return self.decoderesponse(response)
-
- def decoderesponse(self, response):
- how, what = response
- if how == "OK":
- return what
- if how == "QUEUED":
- return None
- if how == "EXCEPTION":
- self.debug("decoderesponse: EXCEPTION")
- return None
- if how == "EOF":
- self.debug("decoderesponse: EOF")
- self.decode_interrupthook()
- return None
- if how == "ERROR":
- self.debug("decoderesponse: Internal ERROR:", what)
- raise RuntimeError, what
- raise SystemError, (how, what)
-
- def decode_interrupthook(self):
- ""
- raise EOFError
-
- def mainloop(self):
- """Listen on socket until I/O not ready or EOF
-
- pollresponse() will loop looking for seq number None, which
- never comes, and exit on EOFError.
-
- """
- try:
- self.getresponse(myseq=None, wait=0.05)
- except EOFError:
- self.debug("mainloop:return")
- return
-
- def getresponse(self, myseq, wait):
- response = self._getresponse(myseq, wait)
- if response is not None:
- how, what = response
- if how == "OK":
- response = how, self._proxify(what)
- return response
-
- def _proxify(self, obj):
- if isinstance(obj, RemoteProxy):
- return RPCProxy(self, obj.oid)
- if isinstance(obj, types.ListType):
- return map(self._proxify, obj)
- # XXX Check for other types -- not currently needed
- return obj
-
- def _getresponse(self, myseq, wait):
- self.debug("_getresponse:myseq:", myseq)
- if threading.currentThread() is self.sockthread:
- # this thread does all reading of requests or responses
- while 1:
- response = self.pollresponse(myseq, wait)
- if response is not None:
- return response
- else:
- # wait for notification from socket handling thread
- cvar = self.cvars[myseq]
- cvar.acquire()
- while not self.responses.has_key(myseq):
- cvar.wait()
- response = self.responses[myseq]
- self.debug("_getresponse:%s: thread woke up: response: %s" %
- (myseq, response))
- del self.responses[myseq]
- del self.cvars[myseq]
- cvar.release()
- return response
-
- def newseq(self):
- self.nextseq = seq = self.nextseq + 2
- return seq
-
- def putmessage(self, message):
- self.debug("putmessage:%d:" % message[0])
- try:
- s = pickle.dumps(message)
- except pickle.PicklingError:
- print >>sys.__stderr__, "Cannot pickle:", repr(message)
- raise
- s = struct.pack("<i", len(s)) + s
- while len(s) > 0:
- try:
- r, w, x = select.select([], [self.sock], [])
- n = self.sock.send(s[:BUFSIZE])
- except (AttributeError, TypeError):
- raise IOError, "socket no longer exists"
- except socket.error:
- raise
- else:
- s = s[n:]
-
- buffer = ""
- bufneed = 4
- bufstate = 0 # meaning: 0 => reading count; 1 => reading data
-
- def pollpacket(self, wait):
- self._stage0()
- if len(self.buffer) < self.bufneed:
- r, w, x = select.select([self.sock.fileno()], [], [], wait)
- if len(r) == 0:
- return None
- try:
- s = self.sock.recv(BUFSIZE)
- except socket.error:
- raise EOFError
- if len(s) == 0:
- raise EOFError
- self.buffer += s
- self._stage0()
- return self._stage1()
-
- def _stage0(self):
- if self.bufstate == 0 and len(self.buffer) >= 4:
- s = self.buffer[:4]
- self.buffer = self.buffer[4:]
- self.bufneed = struct.unpack("<i", s)[0]
- self.bufstate = 1
-
- def _stage1(self):
- if self.bufstate == 1 and len(self.buffer) >= self.bufneed:
- packet = self.buffer[:self.bufneed]
- self.buffer = self.buffer[self.bufneed:]
- self.bufneed = 4
- self.bufstate = 0
- return packet
-
- def pollmessage(self, wait):
- packet = self.pollpacket(wait)
- if packet is None:
- return None
- try:
- message = pickle.loads(packet)
- except pickle.UnpicklingError:
- print >>sys.__stderr__, "-----------------------"
- print >>sys.__stderr__, "cannot unpickle packet:", repr(packet)
- traceback.print_stack(file=sys.__stderr__)
- print >>sys.__stderr__, "-----------------------"
- raise
- return message
-
- def pollresponse(self, myseq, wait):
- """Handle messages received on the socket.
-
- Some messages received may be asynchronous 'call' or 'queue' requests,
- and some may be responses for other threads.
-
- 'call' requests are passed to self.localcall() with the expectation of
- immediate execution, during which time the socket is not serviced.
-
- 'queue' requests are used for tasks (which may block or hang) to be
- processed in a different thread. These requests are fed into
- request_queue by self.localcall(). Responses to queued requests are
- taken from response_queue and sent across the link with the associated
- sequence numbers. Messages in the queues are (sequence_number,
- request/response) tuples and code using this module removing messages
- from the request_queue is responsible for returning the correct
- sequence number in the response_queue.
-
- pollresponse() will loop until a response message with the myseq
- sequence number is received, and will save other responses in
- self.responses and notify the owning thread.
-
- """
- while 1:
- # send queued response if there is one available
- try:
- qmsg = response_queue.get(0)
- except Queue.Empty:
- pass
- else:
- seq, response = qmsg
- message = (seq, ('OK', response))
- self.putmessage(message)
- # poll for message on link
- try:
- message = self.pollmessage(wait)
- if message is None: # socket not ready
- return None
- except EOFError:
- self.handle_EOF()
- return None
- except AttributeError:
- return None
- seq, resq = message
- how = resq[0]
- self.debug("pollresponse:%d:myseq:%s" % (seq, myseq))
- # process or queue a request
- if how in ("CALL", "QUEUE"):
- self.debug("pollresponse:%d:localcall:call:" % seq)
- response = self.localcall(seq, resq)
- self.debug("pollresponse:%d:localcall:response:%s"
- % (seq, response))
- if how == "CALL":
- self.putmessage((seq, response))
- elif how == "QUEUE":
- # don't acknowledge the 'queue' request!
- pass
- continue
- # return if completed message transaction
- elif seq == myseq:
- return resq
- # must be a response for a different thread:
- else:
- cv = self.cvars.get(seq, None)
- # response involving unknown sequence number is discarded,
- # probably intended for prior incarnation of server
- if cv is not None:
- cv.acquire()
- self.responses[seq] = resq
- cv.notify()
- cv.release()
- continue
-
- def handle_EOF(self):
- "action taken upon link being closed by peer"
- self.EOFhook()
- self.debug("handle_EOF")
- for key in self.cvars:
- cv = self.cvars[key]
- cv.acquire()
- self.responses[key] = ('EOF', None)
- cv.notify()
- cv.release()
- # call our (possibly overridden) exit function
- self.exithook()
-
- def EOFhook(self):
- "Classes using rpc client/server can override to augment EOF action"
- pass
-
-#----------------- end class SocketIO --------------------
-
-class RemoteObject(object):
- # Token mix-in class
- pass
-
-def remoteref(obj):
- oid = id(obj)
- objecttable[oid] = obj
- return RemoteProxy(oid)
-
-class RemoteProxy(object):
-
- def __init__(self, oid):
- self.oid = oid
-
-class RPCHandler(SocketServer.BaseRequestHandler, SocketIO):
-
- debugging = False
- location = "#S" # Server
-
- def __init__(self, sock, addr, svr):
- svr.current_handler = self ## cgt xxx
- SocketIO.__init__(self, sock)
- SocketServer.BaseRequestHandler.__init__(self, sock, addr, svr)
-
- def handle(self):
- "handle() method required by SocketServer"
- self.mainloop()
-
- def get_remote_proxy(self, oid):
- return RPCProxy(self, oid)
-
-class RPCClient(SocketIO):
-
- debugging = False
- location = "#C" # Client
-
- nextseq = 1 # Requests coming from the client are odd numbered
-
- def __init__(self, address, family=socket.AF_INET, type=socket.SOCK_STREAM):
- self.listening_sock = socket.socket(family, type)
- self.listening_sock.setsockopt(socket.SOL_SOCKET,
- socket.SO_REUSEADDR, 1)
- self.listening_sock.bind(address)
- self.listening_sock.listen(1)
-
- def accept(self):
- working_sock, address = self.listening_sock.accept()
- if self.debugging:
- print>>sys.__stderr__, "****** Connection request from ", address
- if address[0] == LOCALHOST:
- SocketIO.__init__(self, working_sock)
- else:
- print>>sys.__stderr__, "** Invalid host: ", address
- raise socket.error
-
- def get_remote_proxy(self, oid):
- return RPCProxy(self, oid)
-
-class RPCProxy(object):
-
- __methods = None
- __attributes = None
-
- def __init__(self, sockio, oid):
- self.sockio = sockio
- self.oid = oid
-
- def __getattr__(self, name):
- if self.__methods is None:
- self.__getmethods()
- if self.__methods.get(name):
- return MethodProxy(self.sockio, self.oid, name)
- if self.__attributes is None:
- self.__getattributes()
- if self.__attributes.has_key(name):
- value = self.sockio.remotecall(self.oid, '__getattribute__',
- (name,), {})
- return value
- else:
- raise AttributeError, name
-
- def __getattributes(self):
- self.__attributes = self.sockio.remotecall(self.oid,
- "__attributes__", (), {})
-
- def __getmethods(self):
- self.__methods = self.sockio.remotecall(self.oid,
- "__methods__", (), {})
-
-def _getmethods(obj, methods):
- # Helper to get a list of methods from an object
- # Adds names to dictionary argument 'methods'
- for name in dir(obj):
- attr = getattr(obj, name)
- if callable(attr):
- methods[name] = 1
- if type(obj) == types.InstanceType:
- _getmethods(obj.__class__, methods)
- if type(obj) == types.ClassType:
- for super in obj.__bases__:
- _getmethods(super, methods)
-
-def _getattributes(obj, attributes):
- for name in dir(obj):
- attr = getattr(obj, name)
- if not callable(attr):
- attributes[name] = 1
-
-class MethodProxy(object):
-
- def __init__(self, sockio, oid, name):
- self.sockio = sockio
- self.oid = oid
- self.name = name
-
- def __call__(self, *args, **kwargs):
- value = self.sockio.remotecall(self.oid, self.name, args, kwargs)
- return value
-
-
-# XXX KBK 09Sep03 We need a proper unit test for this module. Previously
-# existing test code was removed at Rev 1.27.
diff --git a/sys/lib/python/idlelib/run.py b/sys/lib/python/idlelib/run.py
deleted file mode 100644
index ae810c4ec..000000000
--- a/sys/lib/python/idlelib/run.py
+++ /dev/null
@@ -1,327 +0,0 @@
-import sys
-import os
-import linecache
-import time
-import socket
-import traceback
-import thread
-import threading
-import Queue
-
-import CallTips
-import AutoComplete
-
-import RemoteDebugger
-import RemoteObjectBrowser
-import StackViewer
-import rpc
-
-import __main__
-
-LOCALHOST = '127.0.0.1'
-
-try:
- import warnings
-except ImportError:
- pass
-else:
- def idle_formatwarning_subproc(message, category, filename, lineno):
- """Format warnings the IDLE way"""
- s = "\nWarning (from warnings module):\n"
- s += ' File \"%s\", line %s\n' % (filename, lineno)
- line = linecache.getline(filename, lineno).strip()
- if line:
- s += " %s\n" % line
- s += "%s: %s\n" % (category.__name__, message)
- return s
- warnings.formatwarning = idle_formatwarning_subproc
-
-# Thread shared globals: Establish a queue between a subthread (which handles
-# the socket) and the main thread (which runs user code), plus global
-# completion and exit flags:
-
-exit_now = False
-quitting = False
-
-def main(del_exitfunc=False):
- """Start the Python execution server in a subprocess
-
- In the Python subprocess, RPCServer is instantiated with handlerclass
- MyHandler, which inherits register/unregister methods from RPCHandler via
- the mix-in class SocketIO.
-
- When the RPCServer 'server' is instantiated, the TCPServer initialization
- creates an instance of run.MyHandler and calls its handle() method.
- handle() instantiates a run.Executive object, passing it a reference to the
- MyHandler object. That reference is saved as attribute rpchandler of the
- Executive instance. The Executive methods have access to the reference and
- can pass it on to entities that they command
- (e.g. RemoteDebugger.Debugger.start_debugger()). The latter, in turn, can
- call MyHandler(SocketIO) register/unregister methods via the reference to
- register and unregister themselves.
-
- """
- global exit_now
- global quitting
- global no_exitfunc
- no_exitfunc = del_exitfunc
- port = 8833
- #time.sleep(15) # test subprocess not responding
- if sys.argv[1:]:
- port = int(sys.argv[1])
- sys.argv[:] = [""]
- sockthread = threading.Thread(target=manage_socket,
- name='SockThread',
- args=((LOCALHOST, port),))
- sockthread.setDaemon(True)
- sockthread.start()
- while 1:
- try:
- if exit_now:
- try:
- exit()
- except KeyboardInterrupt:
- # exiting but got an extra KBI? Try again!
- continue
- try:
- seq, request = rpc.request_queue.get(block=True, timeout=0.05)
- except Queue.Empty:
- continue
- method, args, kwargs = request
- ret = method(*args, **kwargs)
- rpc.response_queue.put((seq, ret))
- except KeyboardInterrupt:
- if quitting:
- exit_now = True
- continue
- except SystemExit:
- raise
- except:
- type, value, tb = sys.exc_info()
- try:
- print_exception()
- rpc.response_queue.put((seq, None))
- except:
- # Link didn't work, print same exception to __stderr__
- traceback.print_exception(type, value, tb, file=sys.__stderr__)
- exit()
- else:
- continue
-
-def manage_socket(address):
- for i in range(3):
- time.sleep(i)
- try:
- server = MyRPCServer(address, MyHandler)
- break
- except socket.error, err:
- print>>sys.__stderr__,"IDLE Subprocess: socket error: "\
- + err[1] + ", retrying...."
- else:
- print>>sys.__stderr__, "IDLE Subprocess: Connection to "\
- "IDLE GUI failed, exiting."
- show_socket_error(err, address)
- global exit_now
- exit_now = True
- return
- server.handle_request() # A single request only
-
-def show_socket_error(err, address):
- import Tkinter
- import tkMessageBox
- root = Tkinter.Tk()
- root.withdraw()
- if err[0] == 61: # connection refused
- msg = "IDLE's subprocess can't connect to %s:%d. This may be due "\
- "to your personal firewall configuration. It is safe to "\
- "allow this internal connection because no data is visible on "\
- "external ports." % address
- tkMessageBox.showerror("IDLE Subprocess Error", msg, parent=root)
- else:
- tkMessageBox.showerror("IDLE Subprocess Error", "Socket Error: %s" % err[1])
- root.destroy()
-
-def print_exception():
- import linecache
- linecache.checkcache()
- flush_stdout()
- efile = sys.stderr
- typ, val, tb = excinfo = sys.exc_info()
- sys.last_type, sys.last_value, sys.last_traceback = excinfo
- tbe = traceback.extract_tb(tb)
- print>>efile, '\nTraceback (most recent call last):'
- exclude = ("run.py", "rpc.py", "threading.py", "Queue.py",
- "RemoteDebugger.py", "bdb.py")
- cleanup_traceback(tbe, exclude)
- traceback.print_list(tbe, file=efile)
- lines = traceback.format_exception_only(typ, val)
- for line in lines:
- print>>efile, line,
-
-def cleanup_traceback(tb, exclude):
- "Remove excluded traces from beginning/end of tb; get cached lines"
- orig_tb = tb[:]
- while tb:
- for rpcfile in exclude:
- if tb[0][0].count(rpcfile):
- break # found an exclude, break for: and delete tb[0]
- else:
- break # no excludes, have left RPC code, break while:
- del tb[0]
- while tb:
- for rpcfile in exclude:
- if tb[-1][0].count(rpcfile):
- break
- else:
- break
- del tb[-1]
- if len(tb) == 0:
- # exception was in IDLE internals, don't prune!
- tb[:] = orig_tb[:]
- print>>sys.stderr, "** IDLE Internal Exception: "
- rpchandler = rpc.objecttable['exec'].rpchandler
- for i in range(len(tb)):
- fn, ln, nm, line = tb[i]
- if nm == '?':
- nm = "-toplevel-"
- if not line and fn.startswith("<pyshell#"):
- line = rpchandler.remotecall('linecache', 'getline',
- (fn, ln), {})
- tb[i] = fn, ln, nm, line
-
-def flush_stdout():
- try:
- if sys.stdout.softspace:
- sys.stdout.softspace = 0
- sys.stdout.write("\n")
- except (AttributeError, EOFError):
- pass
-
-def exit():
- """Exit subprocess, possibly after first deleting sys.exitfunc
-
- If config-main.cfg/.def 'General' 'delete-exitfunc' is True, then any
- sys.exitfunc will be removed before exiting. (VPython support)
-
- """
- if no_exitfunc:
- del sys.exitfunc
- sys.exit(0)
-
-class MyRPCServer(rpc.RPCServer):
-
- def handle_error(self, request, client_address):
- """Override RPCServer method for IDLE
-
- Interrupt the MainThread and exit server if link is dropped.
-
- """
- global quitting
- try:
- raise
- except SystemExit:
- raise
- except EOFError:
- global exit_now
- exit_now = True
- thread.interrupt_main()
- except:
- erf = sys.__stderr__
- print>>erf, '\n' + '-'*40
- print>>erf, 'Unhandled server exception!'
- print>>erf, 'Thread: %s' % threading.currentThread().getName()
- print>>erf, 'Client Address: ', client_address
- print>>erf, 'Request: ', repr(request)
- traceback.print_exc(file=erf)
- print>>erf, '\n*** Unrecoverable, server exiting!'
- print>>erf, '-'*40
- quitting = True
- thread.interrupt_main()
-
-
-class MyHandler(rpc.RPCHandler):
-
- def handle(self):
- """Override base method"""
- executive = Executive(self)
- self.register("exec", executive)
- sys.stdin = self.console = self.get_remote_proxy("stdin")
- sys.stdout = self.get_remote_proxy("stdout")
- sys.stderr = self.get_remote_proxy("stderr")
- import IOBinding
- sys.stdin.encoding = sys.stdout.encoding = \
- sys.stderr.encoding = IOBinding.encoding
- self.interp = self.get_remote_proxy("interp")
- rpc.RPCHandler.getresponse(self, myseq=None, wait=0.05)
-
- def exithook(self):
- "override SocketIO method - wait for MainThread to shut us down"
- time.sleep(10)
-
- def EOFhook(self):
- "Override SocketIO method - terminate wait on callback and exit thread"
- global quitting
- quitting = True
- thread.interrupt_main()
-
- def decode_interrupthook(self):
- "interrupt awakened thread"
- global quitting
- quitting = True
- thread.interrupt_main()
-
-
-class Executive(object):
-
- def __init__(self, rpchandler):
- self.rpchandler = rpchandler
- self.locals = __main__.__dict__
- self.calltip = CallTips.CallTips()
- self.autocomplete = AutoComplete.AutoComplete()
-
- def runcode(self, code):
- try:
- self.usr_exc_info = None
- exec code in self.locals
- except:
- self.usr_exc_info = sys.exc_info()
- if quitting:
- exit()
- # even print a user code SystemExit exception, continue
- print_exception()
- jit = self.rpchandler.console.getvar("<<toggle-jit-stack-viewer>>")
- if jit:
- self.rpchandler.interp.open_remote_stack_viewer()
- else:
- flush_stdout()
-
- def interrupt_the_server(self):
- thread.interrupt_main()
-
- def start_the_debugger(self, gui_adap_oid):
- return RemoteDebugger.start_debugger(self.rpchandler, gui_adap_oid)
-
- def stop_the_debugger(self, idb_adap_oid):
- "Unregister the Idb Adapter. Link objects and Idb then subject to GC"
- self.rpchandler.unregister(idb_adap_oid)
-
- def get_the_calltip(self, name):
- return self.calltip.fetch_tip(name)
-
- def get_the_completion_list(self, what, mode):
- return self.autocomplete.fetch_completions(what, mode)
-
- def stackviewer(self, flist_oid=None):
- if self.usr_exc_info:
- typ, val, tb = self.usr_exc_info
- else:
- return None
- flist = None
- if flist_oid is not None:
- flist = self.rpchandler.get_remote_proxy(flist_oid)
- while tb and tb.tb_frame.f_globals["__name__"] in ["rpc", "run"]:
- tb = tb.tb_next
- sys.last_type = typ
- sys.last_value = val
- item = StackViewer.StackTreeItem(flist, tb)
- return RemoteObjectBrowser.remote_object_tree_item(item)
diff --git a/sys/lib/python/idlelib/tabpage.py b/sys/lib/python/idlelib/tabpage.py
deleted file mode 100644
index 12f89291d..000000000
--- a/sys/lib/python/idlelib/tabpage.py
+++ /dev/null
@@ -1,113 +0,0 @@
-"""
-a couple of classes for implementing partial tabbed-page like behaviour
-"""
-
-from Tkinter import *
-
-class InvalidTabPage(Exception): pass
-class AlreadyExists(Exception): pass
-
-class PageTab(Frame):
- """
- a 'page tab' like framed button
- """
- def __init__(self,parent):
- Frame.__init__(self, parent,borderwidth=2,relief=RIDGE)
- self.button=Radiobutton(self,padx=5,pady=5,takefocus=FALSE,
- indicatoron=FALSE,highlightthickness=0,
- borderwidth=0,selectcolor=self.cget('bg'))
- self.button.pack()
-
-class TabPageSet(Frame):
- """
- a set of 'pages' with TabButtons for controlling their display
- """
- def __init__(self,parent,pageNames=[],**kw):
- """
- pageNames - a list of strings, each string will be the dictionary key
- to a page's data, and the name displayed on the page's tab. Should be
- specified in desired page order. The first page will be the default
- and first active page.
- """
- Frame.__init__(self, parent, kw)
- self.grid_location(0,0)
- self.columnconfigure(0,weight=1)
- self.rowconfigure(1,weight=1)
- self.tabBar=Frame(self)
- self.tabBar.grid(row=0,column=0,sticky=EW)
- self.activePage=StringVar(self)
- self.defaultPage=''
- self.pages={}
- for name in pageNames:
- self.AddPage(name)
-
- def ChangePage(self,pageName=None):
- if pageName:
- if pageName in self.pages.keys():
- self.activePage.set(pageName)
- else:
- raise InvalidTabPage, 'Invalid TabPage Name'
- ## pop up the active 'tab' only
- for page in self.pages.keys():
- self.pages[page]['tab'].config(relief=RIDGE)
- self.pages[self.GetActivePage()]['tab'].config(relief=RAISED)
- ## switch page
- self.pages[self.GetActivePage()]['page'].lift()
-
- def GetActivePage(self):
- return self.activePage.get()
-
- def AddPage(self,pageName):
- if pageName in self.pages.keys():
- raise AlreadyExists, 'TabPage Name Already Exists'
- self.pages[pageName]={'tab':PageTab(self.tabBar),
- 'page':Frame(self,borderwidth=2,relief=RAISED)}
- self.pages[pageName]['tab'].button.config(text=pageName,
- command=self.ChangePage,variable=self.activePage,
- value=pageName)
- self.pages[pageName]['tab'].pack(side=LEFT)
- self.pages[pageName]['page'].grid(row=1,column=0,sticky=NSEW)
- if len(self.pages)==1: # adding first page
- self.defaultPage=pageName
- self.activePage.set(self.defaultPage)
- self.ChangePage()
-
- def RemovePage(self,pageName):
- if not pageName in self.pages.keys():
- raise InvalidTabPage, 'Invalid TabPage Name'
- self.pages[pageName]['tab'].pack_forget()
- self.pages[pageName]['page'].grid_forget()
- self.pages[pageName]['tab'].destroy()
- self.pages[pageName]['page'].destroy()
- del(self.pages[pageName])
- # handle removing last remaining, or default, or active page
- if not self.pages: # removed last remaining page
- self.defaultPage=''
- return
- if pageName==self.defaultPage: # set a new default page
- self.defaultPage=\
- self.tabBar.winfo_children()[0].button.cget('text')
- if pageName==self.GetActivePage(): # set a new active page
- self.activePage.set(self.defaultPage)
- self.ChangePage()
-
-if __name__ == '__main__':
- #test dialog
- root=Tk()
- tabPage=TabPageSet(root,pageNames=['Foobar','Baz'])
- tabPage.pack(expand=TRUE,fill=BOTH)
- Label(tabPage.pages['Foobar']['page'],text='Foo',pady=20).pack()
- Label(tabPage.pages['Foobar']['page'],text='Bar',pady=20).pack()
- Label(tabPage.pages['Baz']['page'],text='Baz').pack()
- entryPgName=Entry(root)
- buttonAdd=Button(root,text='Add Page',
- command=lambda:tabPage.AddPage(entryPgName.get()))
- buttonRemove=Button(root,text='Remove Page',
- command=lambda:tabPage.RemovePage(entryPgName.get()))
- labelPgName=Label(root,text='name of page to add/remove:')
- buttonAdd.pack(padx=5,pady=5)
- buttonRemove.pack(padx=5,pady=5)
- labelPgName.pack(padx=5)
- entryPgName.pack(padx=5)
- tabPage.ChangePage()
- root.mainloop()
diff --git a/sys/lib/python/idlelib/testcode.py b/sys/lib/python/idlelib/testcode.py
deleted file mode 100644
index 05eaa562c..000000000
--- a/sys/lib/python/idlelib/testcode.py
+++ /dev/null
@@ -1,31 +0,0 @@
-import string
-
-def f():
- a = 0
- b = 1
- c = 2
- d = 3
- e = 4
- g()
-
-def g():
- h()
-
-def h():
- i()
-
-def i():
- j()
-
-def j():
- k()
-
-def k():
- l()
-
-l = lambda: test()
-
-def test():
- string.capwords(1)
-
-f()
diff --git a/sys/lib/python/idlelib/textView.py b/sys/lib/python/idlelib/textView.py
deleted file mode 100644
index 917a6cc0c..000000000
--- a/sys/lib/python/idlelib/textView.py
+++ /dev/null
@@ -1,78 +0,0 @@
-"""Simple text browser for IDLE
-
-"""
-
-from Tkinter import *
-import tkMessageBox
-
-class TextViewer(Toplevel):
- """
- simple text viewer dialog for idle
- """
- def __init__(self, parent, title, fileName, data=None):
- """If data exists, load it into viewer, otherwise try to load file.
-
- fileName - string, should be an absoulute filename
- """
- Toplevel.__init__(self, parent)
- self.configure(borderwidth=5)
- self.geometry("=%dx%d+%d+%d" % (625, 500,
- parent.winfo_rootx() + 10,
- parent.winfo_rooty() + 10))
- #elguavas - config placeholders til config stuff completed
- self.bg = '#ffffff'
- self.fg = '#000000'
-
- self.CreateWidgets()
- self.title(title)
- self.transient(parent)
- self.grab_set()
- self.protocol("WM_DELETE_WINDOW", self.Ok)
- self.parent = parent
- self.textView.focus_set()
- #key bindings for this dialog
- self.bind('<Return>',self.Ok) #dismiss dialog
- self.bind('<Escape>',self.Ok) #dismiss dialog
- if data:
- self.textView.insert(0.0, data)
- else:
- self.LoadTextFile(fileName)
- self.textView.config(state=DISABLED)
- self.wait_window()
-
- def LoadTextFile(self, fileName):
- textFile = None
- try:
- textFile = open(fileName, 'r')
- except IOError:
- tkMessageBox.showerror(title='File Load Error',
- message='Unable to load file %r .' % (fileName,))
- else:
- self.textView.insert(0.0,textFile.read())
-
- def CreateWidgets(self):
- frameText = Frame(self, relief=SUNKEN, height=700)
- frameButtons = Frame(self)
- self.buttonOk = Button(frameButtons, text='Close',
- command=self.Ok, takefocus=FALSE)
- self.scrollbarView = Scrollbar(frameText, orient=VERTICAL,
- takefocus=FALSE, highlightthickness=0)
- self.textView = Text(frameText, wrap=WORD, highlightthickness=0,
- fg=self.fg, bg=self.bg)
- self.scrollbarView.config(command=self.textView.yview)
- self.textView.config(yscrollcommand=self.scrollbarView.set)
- self.buttonOk.pack()
- self.scrollbarView.pack(side=RIGHT,fill=Y)
- self.textView.pack(side=LEFT,expand=TRUE,fill=BOTH)
- frameButtons.pack(side=BOTTOM,fill=X)
- frameText.pack(side=TOP,expand=TRUE,fill=BOTH)
-
- def Ok(self, event=None):
- self.destroy()
-
-if __name__ == '__main__':
- #test the dialog
- root=Tk()
- Button(root,text='View',
- command=lambda:TextViewer(root,'Text','./textView.py')).pack()
- root.mainloop()
diff --git a/sys/lib/python/ihooks.py b/sys/lib/python/ihooks.py
deleted file mode 100644
index f5b93ab9c..000000000
--- a/sys/lib/python/ihooks.py
+++ /dev/null
@@ -1,520 +0,0 @@
-"""Import hook support.
-
-Consistent use of this module will make it possible to change the
-different mechanisms involved in loading modules independently.
-
-While the built-in module imp exports interfaces to the built-in
-module searching and loading algorithm, and it is possible to replace
-the built-in function __import__ in order to change the semantics of
-the import statement, until now it has been difficult to combine the
-effect of different __import__ hacks, like loading modules from URLs
-by rimport.py, or restricted execution by rexec.py.
-
-This module defines three new concepts:
-
-1) A "file system hooks" class provides an interface to a filesystem.
-
-One hooks class is defined (Hooks), which uses the interface provided
-by standard modules os and os.path. It should be used as the base
-class for other hooks classes.
-
-2) A "module loader" class provides an interface to search for a
-module in a search path and to load it. It defines a method which
-searches for a module in a single directory; by overriding this method
-one can redefine the details of the search. If the directory is None,
-built-in and frozen modules are searched instead.
-
-Two module loader class are defined, both implementing the search
-strategy used by the built-in __import__ function: ModuleLoader uses
-the imp module's find_module interface, while HookableModuleLoader
-uses a file system hooks class to interact with the file system. Both
-use the imp module's load_* interfaces to actually load the module.
-
-3) A "module importer" class provides an interface to import a
-module, as well as interfaces to reload and unload a module. It also
-provides interfaces to install and uninstall itself instead of the
-default __import__ and reload (and unload) functions.
-
-One module importer class is defined (ModuleImporter), which uses a
-module loader instance passed in (by default HookableModuleLoader is
-instantiated).
-
-The classes defined here should be used as base classes for extended
-functionality along those lines.
-
-If a module importer class supports dotted names, its import_module()
-must return a different value depending on whether it is called on
-behalf of a "from ... import ..." statement or not. (This is caused
-by the way the __import__ hook is used by the Python interpreter.) It
-would also do wise to install a different version of reload().
-
-"""
-
-
-import __builtin__
-import imp
-import os
-import sys
-
-__all__ = ["BasicModuleLoader","Hooks","ModuleLoader","FancyModuleLoader",
- "BasicModuleImporter","ModuleImporter","install","uninstall"]
-
-VERBOSE = 0
-
-
-from imp import C_EXTENSION, PY_SOURCE, PY_COMPILED
-from imp import C_BUILTIN, PY_FROZEN, PKG_DIRECTORY
-BUILTIN_MODULE = C_BUILTIN
-FROZEN_MODULE = PY_FROZEN
-
-
-class _Verbose:
-
- def __init__(self, verbose = VERBOSE):
- self.verbose = verbose
-
- def get_verbose(self):
- return self.verbose
-
- def set_verbose(self, verbose):
- self.verbose = verbose
-
- # XXX The following is an experimental interface
-
- def note(self, *args):
- if self.verbose:
- self.message(*args)
-
- def message(self, format, *args):
- if args:
- print format%args
- else:
- print format
-
-
-class BasicModuleLoader(_Verbose):
-
- """Basic module loader.
-
- This provides the same functionality as built-in import. It
- doesn't deal with checking sys.modules -- all it provides is
- find_module() and a load_module(), as well as find_module_in_dir()
- which searches just one directory, and can be overridden by a
- derived class to change the module search algorithm when the basic
- dependency on sys.path is unchanged.
-
- The interface is a little more convenient than imp's:
- find_module(name, [path]) returns None or 'stuff', and
- load_module(name, stuff) loads the module.
-
- """
-
- def find_module(self, name, path = None):
- if path is None:
- path = [None] + self.default_path()
- for dir in path:
- stuff = self.find_module_in_dir(name, dir)
- if stuff: return stuff
- return None
-
- def default_path(self):
- return sys.path
-
- def find_module_in_dir(self, name, dir):
- if dir is None:
- return self.find_builtin_module(name)
- else:
- try:
- return imp.find_module(name, [dir])
- except ImportError:
- return None
-
- def find_builtin_module(self, name):
- # XXX frozen packages?
- if imp.is_builtin(name):
- return None, '', ('', '', BUILTIN_MODULE)
- if imp.is_frozen(name):
- return None, '', ('', '', FROZEN_MODULE)
- return None
-
- def load_module(self, name, stuff):
- file, filename, info = stuff
- try:
- return imp.load_module(name, file, filename, info)
- finally:
- if file: file.close()
-
-
-class Hooks(_Verbose):
-
- """Hooks into the filesystem and interpreter.
-
- By deriving a subclass you can redefine your filesystem interface,
- e.g. to merge it with the URL space.
-
- This base class behaves just like the native filesystem.
-
- """
-
- # imp interface
- def get_suffixes(self): return imp.get_suffixes()
- def new_module(self, name): return imp.new_module(name)
- def is_builtin(self, name): return imp.is_builtin(name)
- def init_builtin(self, name): return imp.init_builtin(name)
- def is_frozen(self, name): return imp.is_frozen(name)
- def init_frozen(self, name): return imp.init_frozen(name)
- def get_frozen_object(self, name): return imp.get_frozen_object(name)
- def load_source(self, name, filename, file=None):
- return imp.load_source(name, filename, file)
- def load_compiled(self, name, filename, file=None):
- return imp.load_compiled(name, filename, file)
- def load_dynamic(self, name, filename, file=None):
- return imp.load_dynamic(name, filename, file)
- def load_package(self, name, filename, file=None):
- return imp.load_module(name, file, filename, ("", "", PKG_DIRECTORY))
-
- def add_module(self, name):
- d = self.modules_dict()
- if name in d: return d[name]
- d[name] = m = self.new_module(name)
- return m
-
- # sys interface
- def modules_dict(self): return sys.modules
- def default_path(self): return sys.path
-
- def path_split(self, x): return os.path.split(x)
- def path_join(self, x, y): return os.path.join(x, y)
- def path_isabs(self, x): return os.path.isabs(x)
- # etc.
-
- def path_exists(self, x): return os.path.exists(x)
- def path_isdir(self, x): return os.path.isdir(x)
- def path_isfile(self, x): return os.path.isfile(x)
- def path_islink(self, x): return os.path.islink(x)
- # etc.
-
- def openfile(self, *x): return open(*x)
- openfile_error = IOError
- def listdir(self, x): return os.listdir(x)
- listdir_error = os.error
- # etc.
-
-
-class ModuleLoader(BasicModuleLoader):
-
- """Default module loader; uses file system hooks.
-
- By defining suitable hooks, you might be able to load modules from
- other sources than the file system, e.g. from compressed or
- encrypted files, tar files or (if you're brave!) URLs.
-
- """
-
- def __init__(self, hooks = None, verbose = VERBOSE):
- BasicModuleLoader.__init__(self, verbose)
- self.hooks = hooks or Hooks(verbose)
-
- def default_path(self):
- return self.hooks.default_path()
-
- def modules_dict(self):
- return self.hooks.modules_dict()
-
- def get_hooks(self):
- return self.hooks
-
- def set_hooks(self, hooks):
- self.hooks = hooks
-
- def find_builtin_module(self, name):
- # XXX frozen packages?
- if self.hooks.is_builtin(name):
- return None, '', ('', '', BUILTIN_MODULE)
- if self.hooks.is_frozen(name):
- return None, '', ('', '', FROZEN_MODULE)
- return None
-
- def find_module_in_dir(self, name, dir, allow_packages=1):
- if dir is None:
- return self.find_builtin_module(name)
- if allow_packages:
- fullname = self.hooks.path_join(dir, name)
- if self.hooks.path_isdir(fullname):
- stuff = self.find_module_in_dir("__init__", fullname, 0)
- if stuff:
- file = stuff[0]
- if file: file.close()
- return None, fullname, ('', '', PKG_DIRECTORY)
- for info in self.hooks.get_suffixes():
- suff, mode, type = info
- fullname = self.hooks.path_join(dir, name+suff)
- try:
- fp = self.hooks.openfile(fullname, mode)
- return fp, fullname, info
- except self.hooks.openfile_error:
- pass
- return None
-
- def load_module(self, name, stuff):
- file, filename, info = stuff
- (suff, mode, type) = info
- try:
- if type == BUILTIN_MODULE:
- return self.hooks.init_builtin(name)
- if type == FROZEN_MODULE:
- return self.hooks.init_frozen(name)
- if type == C_EXTENSION:
- m = self.hooks.load_dynamic(name, filename, file)
- elif type == PY_SOURCE:
- m = self.hooks.load_source(name, filename, file)
- elif type == PY_COMPILED:
- m = self.hooks.load_compiled(name, filename, file)
- elif type == PKG_DIRECTORY:
- m = self.hooks.load_package(name, filename, file)
- else:
- raise ImportError, "Unrecognized module type (%r) for %s" % \
- (type, name)
- finally:
- if file: file.close()
- m.__file__ = filename
- return m
-
-
-class FancyModuleLoader(ModuleLoader):
-
- """Fancy module loader -- parses and execs the code itself."""
-
- def load_module(self, name, stuff):
- file, filename, (suff, mode, type) = stuff
- realfilename = filename
- path = None
-
- if type == PKG_DIRECTORY:
- initstuff = self.find_module_in_dir("__init__", filename, 0)
- if not initstuff:
- raise ImportError, "No __init__ module in package %s" % name
- initfile, initfilename, initinfo = initstuff
- initsuff, initmode, inittype = initinfo
- if inittype not in (PY_COMPILED, PY_SOURCE):
- if initfile: initfile.close()
- raise ImportError, \
- "Bad type (%r) for __init__ module in package %s" % (
- inittype, name)
- path = [filename]
- file = initfile
- realfilename = initfilename
- type = inittype
-
- if type == FROZEN_MODULE:
- code = self.hooks.get_frozen_object(name)
- elif type == PY_COMPILED:
- import marshal
- file.seek(8)
- code = marshal.load(file)
- elif type == PY_SOURCE:
- data = file.read()
- code = compile(data, realfilename, 'exec')
- else:
- return ModuleLoader.load_module(self, name, stuff)
-
- m = self.hooks.add_module(name)
- if path:
- m.__path__ = path
- m.__file__ = filename
- try:
- exec code in m.__dict__
- except:
- d = self.hooks.modules_dict()
- if name in d:
- del d[name]
- raise
- return m
-
-
-class BasicModuleImporter(_Verbose):
-
- """Basic module importer; uses module loader.
-
- This provides basic import facilities but no package imports.
-
- """
-
- def __init__(self, loader = None, verbose = VERBOSE):
- _Verbose.__init__(self, verbose)
- self.loader = loader or ModuleLoader(None, verbose)
- self.modules = self.loader.modules_dict()
-
- def get_loader(self):
- return self.loader
-
- def set_loader(self, loader):
- self.loader = loader
-
- def get_hooks(self):
- return self.loader.get_hooks()
-
- def set_hooks(self, hooks):
- return self.loader.set_hooks(hooks)
-
- def import_module(self, name, globals={}, locals={}, fromlist=[]):
- name = str(name)
- if name in self.modules:
- return self.modules[name] # Fast path
- stuff = self.loader.find_module(name)
- if not stuff:
- raise ImportError, "No module named %s" % name
- return self.loader.load_module(name, stuff)
-
- def reload(self, module, path = None):
- name = str(module.__name__)
- stuff = self.loader.find_module(name, path)
- if not stuff:
- raise ImportError, "Module %s not found for reload" % name
- return self.loader.load_module(name, stuff)
-
- def unload(self, module):
- del self.modules[str(module.__name__)]
- # XXX Should this try to clear the module's namespace?
-
- def install(self):
- self.save_import_module = __builtin__.__import__
- self.save_reload = __builtin__.reload
- if not hasattr(__builtin__, 'unload'):
- __builtin__.unload = None
- self.save_unload = __builtin__.unload
- __builtin__.__import__ = self.import_module
- __builtin__.reload = self.reload
- __builtin__.unload = self.unload
-
- def uninstall(self):
- __builtin__.__import__ = self.save_import_module
- __builtin__.reload = self.save_reload
- __builtin__.unload = self.save_unload
- if not __builtin__.unload:
- del __builtin__.unload
-
-
-class ModuleImporter(BasicModuleImporter):
-
- """A module importer that supports packages."""
-
- def import_module(self, name, globals=None, locals=None, fromlist=None):
- parent = self.determine_parent(globals)
- q, tail = self.find_head_package(parent, str(name))
- m = self.load_tail(q, tail)
- if not fromlist:
- return q
- if hasattr(m, "__path__"):
- self.ensure_fromlist(m, fromlist)
- return m
-
- def determine_parent(self, globals):
- if not globals or not "__name__" in globals:
- return None
- pname = globals['__name__']
- if "__path__" in globals:
- parent = self.modules[pname]
- assert globals is parent.__dict__
- return parent
- if '.' in pname:
- i = pname.rfind('.')
- pname = pname[:i]
- parent = self.modules[pname]
- assert parent.__name__ == pname
- return parent
- return None
-
- def find_head_package(self, parent, name):
- if '.' in name:
- i = name.find('.')
- head = name[:i]
- tail = name[i+1:]
- else:
- head = name
- tail = ""
- if parent:
- qname = "%s.%s" % (parent.__name__, head)
- else:
- qname = head
- q = self.import_it(head, qname, parent)
- if q: return q, tail
- if parent:
- qname = head
- parent = None
- q = self.import_it(head, qname, parent)
- if q: return q, tail
- raise ImportError, "No module named " + qname
-
- def load_tail(self, q, tail):
- m = q
- while tail:
- i = tail.find('.')
- if i < 0: i = len(tail)
- head, tail = tail[:i], tail[i+1:]
- mname = "%s.%s" % (m.__name__, head)
- m = self.import_it(head, mname, m)
- if not m:
- raise ImportError, "No module named " + mname
- return m
-
- def ensure_fromlist(self, m, fromlist, recursive=0):
- for sub in fromlist:
- if sub == "*":
- if not recursive:
- try:
- all = m.__all__
- except AttributeError:
- pass
- else:
- self.ensure_fromlist(m, all, 1)
- continue
- if sub != "*" and not hasattr(m, sub):
- subname = "%s.%s" % (m.__name__, sub)
- submod = self.import_it(sub, subname, m)
- if not submod:
- raise ImportError, "No module named " + subname
-
- def import_it(self, partname, fqname, parent, force_load=0):
- if not partname:
- raise ValueError, "Empty module name"
- if not force_load:
- try:
- return self.modules[fqname]
- except KeyError:
- pass
- try:
- path = parent and parent.__path__
- except AttributeError:
- return None
- partname = str(partname)
- stuff = self.loader.find_module(partname, path)
- if not stuff:
- return None
- fqname = str(fqname)
- m = self.loader.load_module(fqname, stuff)
- if parent:
- setattr(parent, partname, m)
- return m
-
- def reload(self, module):
- name = str(module.__name__)
- if '.' not in name:
- return self.import_it(name, name, None, force_load=1)
- i = name.rfind('.')
- pname = name[:i]
- parent = self.modules[pname]
- return self.import_it(name[i+1:], name, parent, force_load=1)
-
-
-default_importer = None
-current_importer = None
-
-def install(importer = None):
- global current_importer
- current_importer = importer or default_importer or ModuleImporter()
- current_importer.install()
-
-def uninstall():
- global current_importer
- current_importer.uninstall()
diff --git a/sys/lib/python/imaplib.py b/sys/lib/python/imaplib.py
deleted file mode 100644
index 08e15207a..000000000
--- a/sys/lib/python/imaplib.py
+++ /dev/null
@@ -1,1499 +0,0 @@
-"""IMAP4 client.
-
-Based on RFC 2060.
-
-Public class: IMAP4
-Public variable: Debug
-Public functions: Internaldate2tuple
- Int2AP
- ParseFlags
- Time2Internaldate
-"""
-
-# Author: Piers Lauder <piers@cs.su.oz.au> December 1997.
-#
-# Authentication code contributed by Donn Cave <donn@u.washington.edu> June 1998.
-# String method conversion by ESR, February 2001.
-# GET/SETACL contributed by Anthony Baxter <anthony@interlink.com.au> April 2001.
-# IMAP4_SSL contributed by Tino Lange <Tino.Lange@isg.de> March 2002.
-# GET/SETQUOTA contributed by Andreas Zeidler <az@kreativkombinat.de> June 2002.
-# PROXYAUTH contributed by Rick Holbert <holbert.13@osu.edu> November 2002.
-# GET/SETANNOTATION contributed by Tomas Lindroos <skitta@abo.fi> June 2005.
-
-__version__ = "2.58"
-
-import binascii, os, random, re, socket, sys, time
-
-__all__ = ["IMAP4", "IMAP4_SSL", "IMAP4_stream", "Internaldate2tuple",
- "Int2AP", "ParseFlags", "Time2Internaldate"]
-
-# Globals
-
-CRLF = '\r\n'
-Debug = 0
-IMAP4_PORT = 143
-IMAP4_SSL_PORT = 993
-AllowedVersions = ('IMAP4REV1', 'IMAP4') # Most recent first
-
-# Commands
-
-Commands = {
- # name valid states
- 'APPEND': ('AUTH', 'SELECTED'),
- 'AUTHENTICATE': ('NONAUTH',),
- 'CAPABILITY': ('NONAUTH', 'AUTH', 'SELECTED', 'LOGOUT'),
- 'CHECK': ('SELECTED',),
- 'CLOSE': ('SELECTED',),
- 'COPY': ('SELECTED',),
- 'CREATE': ('AUTH', 'SELECTED'),
- 'DELETE': ('AUTH', 'SELECTED'),
- 'DELETEACL': ('AUTH', 'SELECTED'),
- 'EXAMINE': ('AUTH', 'SELECTED'),
- 'EXPUNGE': ('SELECTED',),
- 'FETCH': ('SELECTED',),
- 'GETACL': ('AUTH', 'SELECTED'),
- 'GETANNOTATION':('AUTH', 'SELECTED'),
- 'GETQUOTA': ('AUTH', 'SELECTED'),
- 'GETQUOTAROOT': ('AUTH', 'SELECTED'),
- 'MYRIGHTS': ('AUTH', 'SELECTED'),
- 'LIST': ('AUTH', 'SELECTED'),
- 'LOGIN': ('NONAUTH',),
- 'LOGOUT': ('NONAUTH', 'AUTH', 'SELECTED', 'LOGOUT'),
- 'LSUB': ('AUTH', 'SELECTED'),
- 'NAMESPACE': ('AUTH', 'SELECTED'),
- 'NOOP': ('NONAUTH', 'AUTH', 'SELECTED', 'LOGOUT'),
- 'PARTIAL': ('SELECTED',), # NB: obsolete
- 'PROXYAUTH': ('AUTH',),
- 'RENAME': ('AUTH', 'SELECTED'),
- 'SEARCH': ('SELECTED',),
- 'SELECT': ('AUTH', 'SELECTED'),
- 'SETACL': ('AUTH', 'SELECTED'),
- 'SETANNOTATION':('AUTH', 'SELECTED'),
- 'SETQUOTA': ('AUTH', 'SELECTED'),
- 'SORT': ('SELECTED',),
- 'STATUS': ('AUTH', 'SELECTED'),
- 'STORE': ('SELECTED',),
- 'SUBSCRIBE': ('AUTH', 'SELECTED'),
- 'THREAD': ('SELECTED',),
- 'UID': ('SELECTED',),
- 'UNSUBSCRIBE': ('AUTH', 'SELECTED'),
- }
-
-# Patterns to match server responses
-
-Continuation = re.compile(r'\+( (?P<data>.*))?')
-Flags = re.compile(r'.*FLAGS \((?P<flags>[^\)]*)\)')
-InternalDate = re.compile(r'.*INTERNALDATE "'
- r'(?P<day>[ 0123][0-9])-(?P<mon>[A-Z][a-z][a-z])-(?P<year>[0-9][0-9][0-9][0-9])'
- r' (?P<hour>[0-9][0-9]):(?P<min>[0-9][0-9]):(?P<sec>[0-9][0-9])'
- r' (?P<zonen>[-+])(?P<zoneh>[0-9][0-9])(?P<zonem>[0-9][0-9])'
- r'"')
-Literal = re.compile(r'.*{(?P<size>\d+)}$')
-MapCRLF = re.compile(r'\r\n|\r|\n')
-Response_code = re.compile(r'\[(?P<type>[A-Z-]+)( (?P<data>[^\]]*))?\]')
-Untagged_response = re.compile(r'\* (?P<type>[A-Z-]+)( (?P<data>.*))?')
-Untagged_status = re.compile(r'\* (?P<data>\d+) (?P<type>[A-Z-]+)( (?P<data2>.*))?')
-
-
-
-class IMAP4:
-
- """IMAP4 client class.
-
- Instantiate with: IMAP4([host[, port]])
-
- host - host's name (default: localhost);
- port - port number (default: standard IMAP4 port).
-
- All IMAP4rev1 commands are supported by methods of the same
- name (in lower-case).
-
- All arguments to commands are converted to strings, except for
- AUTHENTICATE, and the last argument to APPEND which is passed as
- an IMAP4 literal. If necessary (the string contains any
- non-printing characters or white-space and isn't enclosed with
- either parentheses or double quotes) each string is quoted.
- However, the 'password' argument to the LOGIN command is always
- quoted. If you want to avoid having an argument string quoted
- (eg: the 'flags' argument to STORE) then enclose the string in
- parentheses (eg: "(\Deleted)").
-
- Each command returns a tuple: (type, [data, ...]) where 'type'
- is usually 'OK' or 'NO', and 'data' is either the text from the
- tagged response, or untagged results from command. Each 'data'
- is either a string, or a tuple. If a tuple, then the first part
- is the header of the response, and the second part contains
- the data (ie: 'literal' value).
-
- Errors raise the exception class <instance>.error("<reason>").
- IMAP4 server errors raise <instance>.abort("<reason>"),
- which is a sub-class of 'error'. Mailbox status changes
- from READ-WRITE to READ-ONLY raise the exception class
- <instance>.readonly("<reason>"), which is a sub-class of 'abort'.
-
- "error" exceptions imply a program error.
- "abort" exceptions imply the connection should be reset, and
- the command re-tried.
- "readonly" exceptions imply the command should be re-tried.
-
- Note: to use this module, you must read the RFCs pertaining to the
- IMAP4 protocol, as the semantics of the arguments to each IMAP4
- command are left to the invoker, not to mention the results. Also,
- most IMAP servers implement a sub-set of the commands available here.
- """
-
- class error(Exception): pass # Logical errors - debug required
- class abort(error): pass # Service errors - close and retry
- class readonly(abort): pass # Mailbox status changed to READ-ONLY
-
- mustquote = re.compile(r"[^\w!#$%&'*+,.:;<=>?^`|~-]")
-
- def __init__(self, host = '', port = IMAP4_PORT):
- self.debug = Debug
- self.state = 'LOGOUT'
- self.literal = None # A literal argument to a command
- self.tagged_commands = {} # Tagged commands awaiting response
- self.untagged_responses = {} # {typ: [data, ...], ...}
- self.continuation_response = '' # Last continuation response
- self.is_readonly = False # READ-ONLY desired state
- self.tagnum = 0
-
- # Open socket to server.
-
- self.open(host, port)
-
- # Create unique tag for this session,
- # and compile tagged response matcher.
-
- self.tagpre = Int2AP(random.randint(4096, 65535))
- self.tagre = re.compile(r'(?P<tag>'
- + self.tagpre
- + r'\d+) (?P<type>[A-Z]+) (?P<data>.*)')
-
- # Get server welcome message,
- # request and store CAPABILITY response.
-
- if __debug__:
- self._cmd_log_len = 10
- self._cmd_log_idx = 0
- self._cmd_log = {} # Last `_cmd_log_len' interactions
- if self.debug >= 1:
- self._mesg('imaplib version %s' % __version__)
- self._mesg('new IMAP4 connection, tag=%s' % self.tagpre)
-
- self.welcome = self._get_response()
- if 'PREAUTH' in self.untagged_responses:
- self.state = 'AUTH'
- elif 'OK' in self.untagged_responses:
- self.state = 'NONAUTH'
- else:
- raise self.error(self.welcome)
-
- typ, dat = self.capability()
- if dat == [None]:
- raise self.error('no CAPABILITY response from server')
- self.capabilities = tuple(dat[-1].upper().split())
-
- if __debug__:
- if self.debug >= 3:
- self._mesg('CAPABILITIES: %r' % (self.capabilities,))
-
- for version in AllowedVersions:
- if not version in self.capabilities:
- continue
- self.PROTOCOL_VERSION = version
- return
-
- raise self.error('server not IMAP4 compliant')
-
-
- def __getattr__(self, attr):
- # Allow UPPERCASE variants of IMAP4 command methods.
- if attr in Commands:
- return getattr(self, attr.lower())
- raise AttributeError("Unknown IMAP4 command: '%s'" % attr)
-
-
-
- # Overridable methods
-
-
- def open(self, host = '', port = IMAP4_PORT):
- """Setup connection to remote server on "host:port"
- (default: localhost:standard IMAP4 port).
- This connection will be used by the routines:
- read, readline, send, shutdown.
- """
- self.host = host
- self.port = port
- self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- self.sock.connect((host, port))
- self.file = self.sock.makefile('rb')
-
-
- def read(self, size):
- """Read 'size' bytes from remote."""
- return self.file.read(size)
-
-
- def readline(self):
- """Read line from remote."""
- return self.file.readline()
-
-
- def send(self, data):
- """Send data to remote."""
- self.sock.sendall(data)
-
-
- def shutdown(self):
- """Close I/O established in "open"."""
- self.file.close()
- self.sock.close()
-
-
- def socket(self):
- """Return socket instance used to connect to IMAP4 server.
-
- socket = <instance>.socket()
- """
- return self.sock
-
-
-
- # Utility methods
-
-
- def recent(self):
- """Return most recent 'RECENT' responses if any exist,
- else prompt server for an update using the 'NOOP' command.
-
- (typ, [data]) = <instance>.recent()
-
- 'data' is None if no new messages,
- else list of RECENT responses, most recent last.
- """
- name = 'RECENT'
- typ, dat = self._untagged_response('OK', [None], name)
- if dat[-1]:
- return typ, dat
- typ, dat = self.noop() # Prod server for response
- return self._untagged_response(typ, dat, name)
-
-
- def response(self, code):
- """Return data for response 'code' if received, or None.
-
- Old value for response 'code' is cleared.
-
- (code, [data]) = <instance>.response(code)
- """
- return self._untagged_response(code, [None], code.upper())
-
-
-
- # IMAP4 commands
-
-
- def append(self, mailbox, flags, date_time, message):
- """Append message to named mailbox.
-
- (typ, [data]) = <instance>.append(mailbox, flags, date_time, message)
-
- All args except `message' can be None.
- """
- name = 'APPEND'
- if not mailbox:
- mailbox = 'INBOX'
- if flags:
- if (flags[0],flags[-1]) != ('(',')'):
- flags = '(%s)' % flags
- else:
- flags = None
- if date_time:
- date_time = Time2Internaldate(date_time)
- else:
- date_time = None
- self.literal = MapCRLF.sub(CRLF, message)
- return self._simple_command(name, mailbox, flags, date_time)
-
-
- def authenticate(self, mechanism, authobject):
- """Authenticate command - requires response processing.
-
- 'mechanism' specifies which authentication mechanism is to
- be used - it must appear in <instance>.capabilities in the
- form AUTH=<mechanism>.
-
- 'authobject' must be a callable object:
-
- data = authobject(response)
-
- It will be called to process server continuation responses.
- It should return data that will be encoded and sent to server.
- It should return None if the client abort response '*' should
- be sent instead.
- """
- mech = mechanism.upper()
- # XXX: shouldn't this code be removed, not commented out?
- #cap = 'AUTH=%s' % mech
- #if not cap in self.capabilities: # Let the server decide!
- # raise self.error("Server doesn't allow %s authentication." % mech)
- self.literal = _Authenticator(authobject).process
- typ, dat = self._simple_command('AUTHENTICATE', mech)
- if typ != 'OK':
- raise self.error(dat[-1])
- self.state = 'AUTH'
- return typ, dat
-
-
- def capability(self):
- """(typ, [data]) = <instance>.capability()
- Fetch capabilities list from server."""
-
- name = 'CAPABILITY'
- typ, dat = self._simple_command(name)
- return self._untagged_response(typ, dat, name)
-
-
- def check(self):
- """Checkpoint mailbox on server.
-
- (typ, [data]) = <instance>.check()
- """
- return self._simple_command('CHECK')
-
-
- def close(self):
- """Close currently selected mailbox.
-
- Deleted messages are removed from writable mailbox.
- This is the recommended command before 'LOGOUT'.
-
- (typ, [data]) = <instance>.close()
- """
- try:
- typ, dat = self._simple_command('CLOSE')
- finally:
- self.state = 'AUTH'
- return typ, dat
-
-
- def copy(self, message_set, new_mailbox):
- """Copy 'message_set' messages onto end of 'new_mailbox'.
-
- (typ, [data]) = <instance>.copy(message_set, new_mailbox)
- """
- return self._simple_command('COPY', message_set, new_mailbox)
-
-
- def create(self, mailbox):
- """Create new mailbox.
-
- (typ, [data]) = <instance>.create(mailbox)
- """
- return self._simple_command('CREATE', mailbox)
-
-
- def delete(self, mailbox):
- """Delete old mailbox.
-
- (typ, [data]) = <instance>.delete(mailbox)
- """
- return self._simple_command('DELETE', mailbox)
-
- def deleteacl(self, mailbox, who):
- """Delete the ACLs (remove any rights) set for who on mailbox.
-
- (typ, [data]) = <instance>.deleteacl(mailbox, who)
- """
- return self._simple_command('DELETEACL', mailbox, who)
-
- def expunge(self):
- """Permanently remove deleted items from selected mailbox.
-
- Generates 'EXPUNGE' response for each deleted message.
-
- (typ, [data]) = <instance>.expunge()
-
- 'data' is list of 'EXPUNGE'd message numbers in order received.
- """
- name = 'EXPUNGE'
- typ, dat = self._simple_command(name)
- return self._untagged_response(typ, dat, name)
-
-
- def fetch(self, message_set, message_parts):
- """Fetch (parts of) messages.
-
- (typ, [data, ...]) = <instance>.fetch(message_set, message_parts)
-
- 'message_parts' should be a string of selected parts
- enclosed in parentheses, eg: "(UID BODY[TEXT])".
-
- 'data' are tuples of message part envelope and data.
- """
- name = 'FETCH'
- typ, dat = self._simple_command(name, message_set, message_parts)
- return self._untagged_response(typ, dat, name)
-
-
- def getacl(self, mailbox):
- """Get the ACLs for a mailbox.
-
- (typ, [data]) = <instance>.getacl(mailbox)
- """
- typ, dat = self._simple_command('GETACL', mailbox)
- return self._untagged_response(typ, dat, 'ACL')
-
-
- def getannotation(self, mailbox, entry, attribute):
- """(typ, [data]) = <instance>.getannotation(mailbox, entry, attribute)
- Retrieve ANNOTATIONs."""
-
- typ, dat = self._simple_command('GETANNOTATION', mailbox, entry, attribute)
- return self._untagged_response(typ, dat, 'ANNOTATION')
-
-
- def getquota(self, root):
- """Get the quota root's resource usage and limits.
-
- Part of the IMAP4 QUOTA extension defined in rfc2087.
-
- (typ, [data]) = <instance>.getquota(root)
- """
- typ, dat = self._simple_command('GETQUOTA', root)
- return self._untagged_response(typ, dat, 'QUOTA')
-
-
- def getquotaroot(self, mailbox):
- """Get the list of quota roots for the named mailbox.
-
- (typ, [[QUOTAROOT responses...], [QUOTA responses]]) = <instance>.getquotaroot(mailbox)
- """
- typ, dat = self._simple_command('GETQUOTAROOT', mailbox)
- typ, quota = self._untagged_response(typ, dat, 'QUOTA')
- typ, quotaroot = self._untagged_response(typ, dat, 'QUOTAROOT')
- return typ, [quotaroot, quota]
-
-
- def list(self, directory='""', pattern='*'):
- """List mailbox names in directory matching pattern.
-
- (typ, [data]) = <instance>.list(directory='""', pattern='*')
-
- 'data' is list of LIST responses.
- """
- name = 'LIST'
- typ, dat = self._simple_command(name, directory, pattern)
- return self._untagged_response(typ, dat, name)
-
-
- def login(self, user, password):
- """Identify client using plaintext password.
-
- (typ, [data]) = <instance>.login(user, password)
-
- NB: 'password' will be quoted.
- """
- typ, dat = self._simple_command('LOGIN', user, self._quote(password))
- if typ != 'OK':
- raise self.error(dat[-1])
- self.state = 'AUTH'
- return typ, dat
-
-
- def login_cram_md5(self, user, password):
- """ Force use of CRAM-MD5 authentication.
-
- (typ, [data]) = <instance>.login_cram_md5(user, password)
- """
- self.user, self.password = user, password
- return self.authenticate('CRAM-MD5', self._CRAM_MD5_AUTH)
-
-
- def _CRAM_MD5_AUTH(self, challenge):
- """ Authobject to use with CRAM-MD5 authentication. """
- import hmac
- return self.user + " " + hmac.HMAC(self.password, challenge).hexdigest()
-
-
- def logout(self):
- """Shutdown connection to server.
-
- (typ, [data]) = <instance>.logout()
-
- Returns server 'BYE' response.
- """
- self.state = 'LOGOUT'
- try: typ, dat = self._simple_command('LOGOUT')
- except: typ, dat = 'NO', ['%s: %s' % sys.exc_info()[:2]]
- self.shutdown()
- if 'BYE' in self.untagged_responses:
- return 'BYE', self.untagged_responses['BYE']
- return typ, dat
-
-
- def lsub(self, directory='""', pattern='*'):
- """List 'subscribed' mailbox names in directory matching pattern.
-
- (typ, [data, ...]) = <instance>.lsub(directory='""', pattern='*')
-
- 'data' are tuples of message part envelope and data.
- """
- name = 'LSUB'
- typ, dat = self._simple_command(name, directory, pattern)
- return self._untagged_response(typ, dat, name)
-
- def myrights(self, mailbox):
- """Show my ACLs for a mailbox (i.e. the rights that I have on mailbox).
-
- (typ, [data]) = <instance>.myrights(mailbox)
- """
- typ,dat = self._simple_command('MYRIGHTS', mailbox)
- return self._untagged_response(typ, dat, 'MYRIGHTS')
-
- def namespace(self):
- """ Returns IMAP namespaces ala rfc2342
-
- (typ, [data, ...]) = <instance>.namespace()
- """
- name = 'NAMESPACE'
- typ, dat = self._simple_command(name)
- return self._untagged_response(typ, dat, name)
-
-
- def noop(self):
- """Send NOOP command.
-
- (typ, [data]) = <instance>.noop()
- """
- if __debug__:
- if self.debug >= 3:
- self._dump_ur(self.untagged_responses)
- return self._simple_command('NOOP')
-
-
- def partial(self, message_num, message_part, start, length):
- """Fetch truncated part of a message.
-
- (typ, [data, ...]) = <instance>.partial(message_num, message_part, start, length)
-
- 'data' is tuple of message part envelope and data.
- """
- name = 'PARTIAL'
- typ, dat = self._simple_command(name, message_num, message_part, start, length)
- return self._untagged_response(typ, dat, 'FETCH')
-
-
- def proxyauth(self, user):
- """Assume authentication as "user".
-
- Allows an authorised administrator to proxy into any user's
- mailbox.
-
- (typ, [data]) = <instance>.proxyauth(user)
- """
-
- name = 'PROXYAUTH'
- return self._simple_command('PROXYAUTH', user)
-
-
- def rename(self, oldmailbox, newmailbox):
- """Rename old mailbox name to new.
-
- (typ, [data]) = <instance>.rename(oldmailbox, newmailbox)
- """
- return self._simple_command('RENAME', oldmailbox, newmailbox)
-
-
- def search(self, charset, *criteria):
- """Search mailbox for matching messages.
-
- (typ, [data]) = <instance>.search(charset, criterion, ...)
-
- 'data' is space separated list of matching message numbers.
- """
- name = 'SEARCH'
- if charset:
- typ, dat = self._simple_command(name, 'CHARSET', charset, *criteria)
- else:
- typ, dat = self._simple_command(name, *criteria)
- return self._untagged_response(typ, dat, name)
-
-
- def select(self, mailbox='INBOX', readonly=False):
- """Select a mailbox.
-
- Flush all untagged responses.
-
- (typ, [data]) = <instance>.select(mailbox='INBOX', readonly=False)
-
- 'data' is count of messages in mailbox ('EXISTS' response).
-
- Mandated responses are ('FLAGS', 'EXISTS', 'RECENT', 'UIDVALIDITY'), so
- other responses should be obtained via <instance>.response('FLAGS') etc.
- """
- self.untagged_responses = {} # Flush old responses.
- self.is_readonly = readonly
- if readonly:
- name = 'EXAMINE'
- else:
- name = 'SELECT'
- typ, dat = self._simple_command(name, mailbox)
- if typ != 'OK':
- self.state = 'AUTH' # Might have been 'SELECTED'
- return typ, dat
- self.state = 'SELECTED'
- if 'READ-ONLY' in self.untagged_responses \
- and not readonly:
- if __debug__:
- if self.debug >= 1:
- self._dump_ur(self.untagged_responses)
- raise self.readonly('%s is not writable' % mailbox)
- return typ, self.untagged_responses.get('EXISTS', [None])
-
-
- def setacl(self, mailbox, who, what):
- """Set a mailbox acl.
-
- (typ, [data]) = <instance>.setacl(mailbox, who, what)
- """
- return self._simple_command('SETACL', mailbox, who, what)
-
-
- def setannotation(self, *args):
- """(typ, [data]) = <instance>.setannotation(mailbox[, entry, attribute]+)
- Set ANNOTATIONs."""
-
- typ, dat = self._simple_command('SETANNOTATION', *args)
- return self._untagged_response(typ, dat, 'ANNOTATION')
-
-
- def setquota(self, root, limits):
- """Set the quota root's resource limits.
-
- (typ, [data]) = <instance>.setquota(root, limits)
- """
- typ, dat = self._simple_command('SETQUOTA', root, limits)
- return self._untagged_response(typ, dat, 'QUOTA')
-
-
- def sort(self, sort_criteria, charset, *search_criteria):
- """IMAP4rev1 extension SORT command.
-
- (typ, [data]) = <instance>.sort(sort_criteria, charset, search_criteria, ...)
- """
- name = 'SORT'
- #if not name in self.capabilities: # Let the server decide!
- # raise self.error('unimplemented extension command: %s' % name)
- if (sort_criteria[0],sort_criteria[-1]) != ('(',')'):
- sort_criteria = '(%s)' % sort_criteria
- typ, dat = self._simple_command(name, sort_criteria, charset, *search_criteria)
- return self._untagged_response(typ, dat, name)
-
-
- def status(self, mailbox, names):
- """Request named status conditions for mailbox.
-
- (typ, [data]) = <instance>.status(mailbox, names)
- """
- name = 'STATUS'
- #if self.PROTOCOL_VERSION == 'IMAP4': # Let the server decide!
- # raise self.error('%s unimplemented in IMAP4 (obtain IMAP4rev1 server, or re-code)' % name)
- typ, dat = self._simple_command(name, mailbox, names)
- return self._untagged_response(typ, dat, name)
-
-
- def store(self, message_set, command, flags):
- """Alters flag dispositions for messages in mailbox.
-
- (typ, [data]) = <instance>.store(message_set, command, flags)
- """
- if (flags[0],flags[-1]) != ('(',')'):
- flags = '(%s)' % flags # Avoid quoting the flags
- typ, dat = self._simple_command('STORE', message_set, command, flags)
- return self._untagged_response(typ, dat, 'FETCH')
-
-
- def subscribe(self, mailbox):
- """Subscribe to new mailbox.
-
- (typ, [data]) = <instance>.subscribe(mailbox)
- """
- return self._simple_command('SUBSCRIBE', mailbox)
-
-
- def thread(self, threading_algorithm, charset, *search_criteria):
- """IMAPrev1 extension THREAD command.
-
- (type, [data]) = <instance>.thread(threading_alogrithm, charset, search_criteria, ...)
- """
- name = 'THREAD'
- typ, dat = self._simple_command(name, threading_algorithm, charset, *search_criteria)
- return self._untagged_response(typ, dat, name)
-
-
- def uid(self, command, *args):
- """Execute "command arg ..." with messages identified by UID,
- rather than message number.
-
- (typ, [data]) = <instance>.uid(command, arg1, arg2, ...)
-
- Returns response appropriate to 'command'.
- """
- command = command.upper()
- if not command in Commands:
- raise self.error("Unknown IMAP4 UID command: %s" % command)
- if self.state not in Commands[command]:
- raise self.error('command %s illegal in state %s'
- % (command, self.state))
- name = 'UID'
- typ, dat = self._simple_command(name, command, *args)
- if command in ('SEARCH', 'SORT'):
- name = command
- else:
- name = 'FETCH'
- return self._untagged_response(typ, dat, name)
-
-
- def unsubscribe(self, mailbox):
- """Unsubscribe from old mailbox.
-
- (typ, [data]) = <instance>.unsubscribe(mailbox)
- """
- return self._simple_command('UNSUBSCRIBE', mailbox)
-
-
- def xatom(self, name, *args):
- """Allow simple extension commands
- notified by server in CAPABILITY response.
-
- Assumes command is legal in current state.
-
- (typ, [data]) = <instance>.xatom(name, arg, ...)
-
- Returns response appropriate to extension command `name'.
- """
- name = name.upper()
- #if not name in self.capabilities: # Let the server decide!
- # raise self.error('unknown extension command: %s' % name)
- if not name in Commands:
- Commands[name] = (self.state,)
- return self._simple_command(name, *args)
-
-
-
- # Private methods
-
-
- def _append_untagged(self, typ, dat):
-
- if dat is None: dat = ''
- ur = self.untagged_responses
- if __debug__:
- if self.debug >= 5:
- self._mesg('untagged_responses[%s] %s += ["%s"]' %
- (typ, len(ur.get(typ,'')), dat))
- if typ in ur:
- ur[typ].append(dat)
- else:
- ur[typ] = [dat]
-
-
- def _check_bye(self):
- bye = self.untagged_responses.get('BYE')
- if bye:
- raise self.abort(bye[-1])
-
-
- def _command(self, name, *args):
-
- if self.state not in Commands[name]:
- self.literal = None
- raise self.error(
- 'command %s illegal in state %s' % (name, self.state))
-
- for typ in ('OK', 'NO', 'BAD'):
- if typ in self.untagged_responses:
- del self.untagged_responses[typ]
-
- if 'READ-ONLY' in self.untagged_responses \
- and not self.is_readonly:
- raise self.readonly('mailbox status changed to READ-ONLY')
-
- tag = self._new_tag()
- data = '%s %s' % (tag, name)
- for arg in args:
- if arg is None: continue
- data = '%s %s' % (data, self._checkquote(arg))
-
- literal = self.literal
- if literal is not None:
- self.literal = None
- if type(literal) is type(self._command):
- literator = literal
- else:
- literator = None
- data = '%s {%s}' % (data, len(literal))
-
- if __debug__:
- if self.debug >= 4:
- self._mesg('> %s' % data)
- else:
- self._log('> %s' % data)
-
- try:
- self.send('%s%s' % (data, CRLF))
- except (socket.error, OSError), val:
- raise self.abort('socket error: %s' % val)
-
- if literal is None:
- return tag
-
- while 1:
- # Wait for continuation response
-
- while self._get_response():
- if self.tagged_commands[tag]: # BAD/NO?
- return tag
-
- # Send literal
-
- if literator:
- literal = literator(self.continuation_response)
-
- if __debug__:
- if self.debug >= 4:
- self._mesg('write literal size %s' % len(literal))
-
- try:
- self.send(literal)
- self.send(CRLF)
- except (socket.error, OSError), val:
- raise self.abort('socket error: %s' % val)
-
- if not literator:
- break
-
- return tag
-
-
- def _command_complete(self, name, tag):
- self._check_bye()
- try:
- typ, data = self._get_tagged_response(tag)
- except self.abort, val:
- raise self.abort('command: %s => %s' % (name, val))
- except self.error, val:
- raise self.error('command: %s => %s' % (name, val))
- self._check_bye()
- if typ == 'BAD':
- raise self.error('%s command error: %s %s' % (name, typ, data))
- return typ, data
-
-
- def _get_response(self):
-
- # Read response and store.
- #
- # Returns None for continuation responses,
- # otherwise first response line received.
-
- resp = self._get_line()
-
- # Command completion response?
-
- if self._match(self.tagre, resp):
- tag = self.mo.group('tag')
- if not tag in self.tagged_commands:
- raise self.abort('unexpected tagged response: %s' % resp)
-
- typ = self.mo.group('type')
- dat = self.mo.group('data')
- self.tagged_commands[tag] = (typ, [dat])
- else:
- dat2 = None
-
- # '*' (untagged) responses?
-
- if not self._match(Untagged_response, resp):
- if self._match(Untagged_status, resp):
- dat2 = self.mo.group('data2')
-
- if self.mo is None:
- # Only other possibility is '+' (continuation) response...
-
- if self._match(Continuation, resp):
- self.continuation_response = self.mo.group('data')
- return None # NB: indicates continuation
-
- raise self.abort("unexpected response: '%s'" % resp)
-
- typ = self.mo.group('type')
- dat = self.mo.group('data')
- if dat is None: dat = '' # Null untagged response
- if dat2: dat = dat + ' ' + dat2
-
- # Is there a literal to come?
-
- while self._match(Literal, dat):
-
- # Read literal direct from connection.
-
- size = int(self.mo.group('size'))
- if __debug__:
- if self.debug >= 4:
- self._mesg('read literal size %s' % size)
- data = self.read(size)
-
- # Store response with literal as tuple
-
- self._append_untagged(typ, (dat, data))
-
- # Read trailer - possibly containing another literal
-
- dat = self._get_line()
-
- self._append_untagged(typ, dat)
-
- # Bracketed response information?
-
- if typ in ('OK', 'NO', 'BAD') and self._match(Response_code, dat):
- self._append_untagged(self.mo.group('type'), self.mo.group('data'))
-
- if __debug__:
- if self.debug >= 1 and typ in ('NO', 'BAD', 'BYE'):
- self._mesg('%s response: %s' % (typ, dat))
-
- return resp
-
-
- def _get_tagged_response(self, tag):
-
- while 1:
- result = self.tagged_commands[tag]
- if result is not None:
- del self.tagged_commands[tag]
- return result
-
- # Some have reported "unexpected response" exceptions.
- # Note that ignoring them here causes loops.
- # Instead, send me details of the unexpected response and
- # I'll update the code in `_get_response()'.
-
- try:
- self._get_response()
- except self.abort, val:
- if __debug__:
- if self.debug >= 1:
- self.print_log()
- raise
-
-
- def _get_line(self):
-
- line = self.readline()
- if not line:
- raise self.abort('socket error: EOF')
-
- # Protocol mandates all lines terminated by CRLF
-
- line = line[:-2]
- if __debug__:
- if self.debug >= 4:
- self._mesg('< %s' % line)
- else:
- self._log('< %s' % line)
- return line
-
-
- def _match(self, cre, s):
-
- # Run compiled regular expression match method on 's'.
- # Save result, return success.
-
- self.mo = cre.match(s)
- if __debug__:
- if self.mo is not None and self.debug >= 5:
- self._mesg("\tmatched r'%s' => %r" % (cre.pattern, self.mo.groups()))
- return self.mo is not None
-
-
- def _new_tag(self):
-
- tag = '%s%s' % (self.tagpre, self.tagnum)
- self.tagnum = self.tagnum + 1
- self.tagged_commands[tag] = None
- return tag
-
-
- def _checkquote(self, arg):
-
- # Must quote command args if non-alphanumeric chars present,
- # and not already quoted.
-
- if type(arg) is not type(''):
- return arg
- if len(arg) >= 2 and (arg[0],arg[-1]) in (('(',')'),('"','"')):
- return arg
- if arg and self.mustquote.search(arg) is None:
- return arg
- return self._quote(arg)
-
-
- def _quote(self, arg):
-
- arg = arg.replace('\\', '\\\\')
- arg = arg.replace('"', '\\"')
-
- return '"%s"' % arg
-
-
- def _simple_command(self, name, *args):
-
- return self._command_complete(name, self._command(name, *args))
-
-
- def _untagged_response(self, typ, dat, name):
-
- if typ == 'NO':
- return typ, dat
- if not name in self.untagged_responses:
- return typ, [None]
- data = self.untagged_responses.pop(name)
- if __debug__:
- if self.debug >= 5:
- self._mesg('untagged_responses[%s] => %s' % (name, data))
- return typ, data
-
-
- if __debug__:
-
- def _mesg(self, s, secs=None):
- if secs is None:
- secs = time.time()
- tm = time.strftime('%M:%S', time.localtime(secs))
- sys.stderr.write(' %s.%02d %s\n' % (tm, (secs*100)%100, s))
- sys.stderr.flush()
-
- def _dump_ur(self, dict):
- # Dump untagged responses (in `dict').
- l = dict.items()
- if not l: return
- t = '\n\t\t'
- l = map(lambda x:'%s: "%s"' % (x[0], x[1][0] and '" "'.join(x[1]) or ''), l)
- self._mesg('untagged responses dump:%s%s' % (t, t.join(l)))
-
- def _log(self, line):
- # Keep log of last `_cmd_log_len' interactions for debugging.
- self._cmd_log[self._cmd_log_idx] = (line, time.time())
- self._cmd_log_idx += 1
- if self._cmd_log_idx >= self._cmd_log_len:
- self._cmd_log_idx = 0
-
- def print_log(self):
- self._mesg('last %d IMAP4 interactions:' % len(self._cmd_log))
- i, n = self._cmd_log_idx, self._cmd_log_len
- while n:
- try:
- self._mesg(*self._cmd_log[i])
- except:
- pass
- i += 1
- if i >= self._cmd_log_len:
- i = 0
- n -= 1
-
-
-
-class IMAP4_SSL(IMAP4):
-
- """IMAP4 client class over SSL connection
-
- Instantiate with: IMAP4_SSL([host[, port[, keyfile[, certfile]]]])
-
- host - host's name (default: localhost);
- port - port number (default: standard IMAP4 SSL port).
- keyfile - PEM formatted file that contains your private key (default: None);
- certfile - PEM formatted certificate chain file (default: None);
-
- for more documentation see the docstring of the parent class IMAP4.
- """
-
-
- def __init__(self, host = '', port = IMAP4_SSL_PORT, keyfile = None, certfile = None):
- self.keyfile = keyfile
- self.certfile = certfile
- IMAP4.__init__(self, host, port)
-
-
- def open(self, host = '', port = IMAP4_SSL_PORT):
- """Setup connection to remote server on "host:port".
- (default: localhost:standard IMAP4 SSL port).
- This connection will be used by the routines:
- read, readline, send, shutdown.
- """
- self.host = host
- self.port = port
- self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- self.sock.connect((host, port))
- self.sslobj = socket.ssl(self.sock, self.keyfile, self.certfile)
-
-
- def read(self, size):
- """Read 'size' bytes from remote."""
- # sslobj.read() sometimes returns < size bytes
- chunks = []
- read = 0
- while read < size:
- data = self.sslobj.read(size-read)
- read += len(data)
- chunks.append(data)
-
- return ''.join(chunks)
-
-
- def readline(self):
- """Read line from remote."""
- # NB: socket.ssl needs a "readline" method, or perhaps a "makefile" method.
- line = []
- while 1:
- char = self.sslobj.read(1)
- line.append(char)
- if char == "\n": return ''.join(line)
-
-
- def send(self, data):
- """Send data to remote."""
- # NB: socket.ssl needs a "sendall" method to match socket objects.
- bytes = len(data)
- while bytes > 0:
- sent = self.sslobj.write(data)
- if sent == bytes:
- break # avoid copy
- data = data[sent:]
- bytes = bytes - sent
-
-
- def shutdown(self):
- """Close I/O established in "open"."""
- self.sock.close()
-
-
- def socket(self):
- """Return socket instance used to connect to IMAP4 server.
-
- socket = <instance>.socket()
- """
- return self.sock
-
-
- def ssl(self):
- """Return SSLObject instance used to communicate with the IMAP4 server.
-
- ssl = <instance>.socket.ssl()
- """
- return self.sslobj
-
-
-
-class IMAP4_stream(IMAP4):
-
- """IMAP4 client class over a stream
-
- Instantiate with: IMAP4_stream(command)
-
- where "command" is a string that can be passed to os.popen2()
-
- for more documentation see the docstring of the parent class IMAP4.
- """
-
-
- def __init__(self, command):
- self.command = command
- IMAP4.__init__(self)
-
-
- def open(self, host = None, port = None):
- """Setup a stream connection.
- This connection will be used by the routines:
- read, readline, send, shutdown.
- """
- self.host = None # For compatibility with parent class
- self.port = None
- self.sock = None
- self.file = None
- self.writefile, self.readfile = os.popen2(self.command)
-
-
- def read(self, size):
- """Read 'size' bytes from remote."""
- return self.readfile.read(size)
-
-
- def readline(self):
- """Read line from remote."""
- return self.readfile.readline()
-
-
- def send(self, data):
- """Send data to remote."""
- self.writefile.write(data)
- self.writefile.flush()
-
-
- def shutdown(self):
- """Close I/O established in "open"."""
- self.readfile.close()
- self.writefile.close()
-
-
-
-class _Authenticator:
-
- """Private class to provide en/decoding
- for base64-based authentication conversation.
- """
-
- def __init__(self, mechinst):
- self.mech = mechinst # Callable object to provide/process data
-
- def process(self, data):
- ret = self.mech(self.decode(data))
- if ret is None:
- return '*' # Abort conversation
- return self.encode(ret)
-
- def encode(self, inp):
- #
- # Invoke binascii.b2a_base64 iteratively with
- # short even length buffers, strip the trailing
- # line feed from the result and append. "Even"
- # means a number that factors to both 6 and 8,
- # so when it gets to the end of the 8-bit input
- # there's no partial 6-bit output.
- #
- oup = ''
- while inp:
- if len(inp) > 48:
- t = inp[:48]
- inp = inp[48:]
- else:
- t = inp
- inp = ''
- e = binascii.b2a_base64(t)
- if e:
- oup = oup + e[:-1]
- return oup
-
- def decode(self, inp):
- if not inp:
- return ''
- return binascii.a2b_base64(inp)
-
-
-
-Mon2num = {'Jan': 1, 'Feb': 2, 'Mar': 3, 'Apr': 4, 'May': 5, 'Jun': 6,
- 'Jul': 7, 'Aug': 8, 'Sep': 9, 'Oct': 10, 'Nov': 11, 'Dec': 12}
-
-def Internaldate2tuple(resp):
- """Convert IMAP4 INTERNALDATE to UT.
-
- Returns Python time module tuple.
- """
-
- mo = InternalDate.match(resp)
- if not mo:
- return None
-
- mon = Mon2num[mo.group('mon')]
- zonen = mo.group('zonen')
-
- day = int(mo.group('day'))
- year = int(mo.group('year'))
- hour = int(mo.group('hour'))
- min = int(mo.group('min'))
- sec = int(mo.group('sec'))
- zoneh = int(mo.group('zoneh'))
- zonem = int(mo.group('zonem'))
-
- # INTERNALDATE timezone must be subtracted to get UT
-
- zone = (zoneh*60 + zonem)*60
- if zonen == '-':
- zone = -zone
-
- tt = (year, mon, day, hour, min, sec, -1, -1, -1)
-
- utc = time.mktime(tt)
-
- # Following is necessary because the time module has no 'mkgmtime'.
- # 'mktime' assumes arg in local timezone, so adds timezone/altzone.
-
- lt = time.localtime(utc)
- if time.daylight and lt[-1]:
- zone = zone + time.altzone
- else:
- zone = zone + time.timezone
-
- return time.localtime(utc - zone)
-
-
-
-def Int2AP(num):
-
- """Convert integer to A-P string representation."""
-
- val = ''; AP = 'ABCDEFGHIJKLMNOP'
- num = int(abs(num))
- while num:
- num, mod = divmod(num, 16)
- val = AP[mod] + val
- return val
-
-
-
-def ParseFlags(resp):
-
- """Convert IMAP4 flags response to python tuple."""
-
- mo = Flags.match(resp)
- if not mo:
- return ()
-
- return tuple(mo.group('flags').split())
-
-
-def Time2Internaldate(date_time):
-
- """Convert 'date_time' to IMAP4 INTERNALDATE representation.
-
- Return string in form: '"DD-Mmm-YYYY HH:MM:SS +HHMM"'
- """
-
- if isinstance(date_time, (int, float)):
- tt = time.localtime(date_time)
- elif isinstance(date_time, (tuple, time.struct_time)):
- tt = date_time
- elif isinstance(date_time, str) and (date_time[0],date_time[-1]) == ('"','"'):
- return date_time # Assume in correct format
- else:
- raise ValueError("date_time not of a known type")
-
- dt = time.strftime("%d-%b-%Y %H:%M:%S", tt)
- if dt[0] == '0':
- dt = ' ' + dt[1:]
- if time.daylight and tt[-1]:
- zone = -time.altzone
- else:
- zone = -time.timezone
- return '"' + dt + " %+03d%02d" % divmod(zone//60, 60) + '"'
-
-
-
-if __name__ == '__main__':
-
- # To test: invoke either as 'python imaplib.py [IMAP4_server_hostname]'
- # or 'python imaplib.py -s "rsh IMAP4_server_hostname exec /etc/rimapd"'
- # to test the IMAP4_stream class
-
- import getopt, getpass
-
- try:
- optlist, args = getopt.getopt(sys.argv[1:], 'd:s:')
- except getopt.error, val:
- optlist, args = (), ()
-
- stream_command = None
- for opt,val in optlist:
- if opt == '-d':
- Debug = int(val)
- elif opt == '-s':
- stream_command = val
- if not args: args = (stream_command,)
-
- if not args: args = ('',)
-
- host = args[0]
-
- USER = getpass.getuser()
- PASSWD = getpass.getpass("IMAP password for %s on %s: " % (USER, host or "localhost"))
-
- test_mesg = 'From: %(user)s@localhost%(lf)sSubject: IMAP4 test%(lf)s%(lf)sdata...%(lf)s' % {'user':USER, 'lf':'\n'}
- test_seq1 = (
- ('login', (USER, PASSWD)),
- ('create', ('/tmp/xxx 1',)),
- ('rename', ('/tmp/xxx 1', '/tmp/yyy')),
- ('CREATE', ('/tmp/yyz 2',)),
- ('append', ('/tmp/yyz 2', None, None, test_mesg)),
- ('list', ('/tmp', 'yy*')),
- ('select', ('/tmp/yyz 2',)),
- ('search', (None, 'SUBJECT', 'test')),
- ('fetch', ('1', '(FLAGS INTERNALDATE RFC822)')),
- ('store', ('1', 'FLAGS', '(\Deleted)')),
- ('namespace', ()),
- ('expunge', ()),
- ('recent', ()),
- ('close', ()),
- )
-
- test_seq2 = (
- ('select', ()),
- ('response',('UIDVALIDITY',)),
- ('uid', ('SEARCH', 'ALL')),
- ('response', ('EXISTS',)),
- ('append', (None, None, None, test_mesg)),
- ('recent', ()),
- ('logout', ()),
- )
-
- def run(cmd, args):
- M._mesg('%s %s' % (cmd, args))
- typ, dat = getattr(M, cmd)(*args)
- M._mesg('%s => %s %s' % (cmd, typ, dat))
- if typ == 'NO': raise dat[0]
- return dat
-
- try:
- if stream_command:
- M = IMAP4_stream(stream_command)
- else:
- M = IMAP4(host)
- if M.state == 'AUTH':
- test_seq1 = test_seq1[1:] # Login not needed
- M._mesg('PROTOCOL_VERSION = %s' % M.PROTOCOL_VERSION)
- M._mesg('CAPABILITIES = %r' % (M.capabilities,))
-
- for cmd,args in test_seq1:
- run(cmd, args)
-
- for ml in run('list', ('/tmp/', 'yy%')):
- mo = re.match(r'.*"([^"]+)"$', ml)
- if mo: path = mo.group(1)
- else: path = ml.split()[-1]
- run('delete', (path,))
-
- for cmd,args in test_seq2:
- dat = run(cmd, args)
-
- if (cmd,args) != ('uid', ('SEARCH', 'ALL')):
- continue
-
- uid = dat[-1].split()
- if not uid: continue
- run('uid', ('FETCH', '%s' % uid[-1],
- '(FLAGS INTERNALDATE RFC822.SIZE RFC822.HEADER RFC822.TEXT)'))
-
- print '\nAll tests OK.'
-
- except:
- print '\nTests failed.'
-
- if not Debug:
- print '''
-If you would like to see debugging output,
-try: %s -d5
-''' % sys.argv[0]
-
- raise
diff --git a/sys/lib/python/imghdr.py b/sys/lib/python/imghdr.py
deleted file mode 100644
index 2fbc9661e..000000000
--- a/sys/lib/python/imghdr.py
+++ /dev/null
@@ -1,161 +0,0 @@
-"""Recognize image file formats based on their first few bytes."""
-
-__all__ = ["what"]
-
-#-------------------------#
-# Recognize image headers #
-#-------------------------#
-
-def what(file, h=None):
- if h is None:
- if type(file) == type(''):
- f = open(file, 'rb')
- h = f.read(32)
- else:
- location = file.tell()
- h = file.read(32)
- file.seek(location)
- f = None
- else:
- f = None
- try:
- for tf in tests:
- res = tf(h, f)
- if res:
- return res
- finally:
- if f: f.close()
- return None
-
-
-#---------------------------------#
-# Subroutines per image file type #
-#---------------------------------#
-
-tests = []
-
-def test_rgb(h, f):
- """SGI image library"""
- if h[:2] == '\001\332':
- return 'rgb'
-
-tests.append(test_rgb)
-
-def test_gif(h, f):
- """GIF ('87 and '89 variants)"""
- if h[:6] in ('GIF87a', 'GIF89a'):
- return 'gif'
-
-tests.append(test_gif)
-
-def test_pbm(h, f):
- """PBM (portable bitmap)"""
- if len(h) >= 3 and \
- h[0] == 'P' and h[1] in '14' and h[2] in ' \t\n\r':
- return 'pbm'
-
-tests.append(test_pbm)
-
-def test_pgm(h, f):
- """PGM (portable graymap)"""
- if len(h) >= 3 and \
- h[0] == 'P' and h[1] in '25' and h[2] in ' \t\n\r':
- return 'pgm'
-
-tests.append(test_pgm)
-
-def test_ppm(h, f):
- """PPM (portable pixmap)"""
- if len(h) >= 3 and \
- h[0] == 'P' and h[1] in '36' and h[2] in ' \t\n\r':
- return 'ppm'
-
-tests.append(test_ppm)
-
-def test_tiff(h, f):
- """TIFF (can be in Motorola or Intel byte order)"""
- if h[:2] in ('MM', 'II'):
- return 'tiff'
-
-tests.append(test_tiff)
-
-def test_rast(h, f):
- """Sun raster file"""
- if h[:4] == '\x59\xA6\x6A\x95':
- return 'rast'
-
-tests.append(test_rast)
-
-def test_xbm(h, f):
- """X bitmap (X10 or X11)"""
- s = '#define '
- if h[:len(s)] == s:
- return 'xbm'
-
-tests.append(test_xbm)
-
-def test_jpeg(h, f):
- """JPEG data in JFIF format"""
- if h[6:10] == 'JFIF':
- return 'jpeg'
-
-tests.append(test_jpeg)
-
-def test_exif(h, f):
- """JPEG data in Exif format"""
- if h[6:10] == 'Exif':
- return 'jpeg'
-
-tests.append(test_exif)
-
-def test_bmp(h, f):
- if h[:2] == 'BM':
- return 'bmp'
-
-tests.append(test_bmp)
-
-def test_png(h, f):
- if h[:8] == "\211PNG\r\n\032\n":
- return 'png'
-
-tests.append(test_png)
-
-#--------------------#
-# Small test program #
-#--------------------#
-
-def test():
- import sys
- recursive = 0
- if sys.argv[1:] and sys.argv[1] == '-r':
- del sys.argv[1:2]
- recursive = 1
- try:
- if sys.argv[1:]:
- testall(sys.argv[1:], recursive, 1)
- else:
- testall(['.'], recursive, 1)
- except KeyboardInterrupt:
- sys.stderr.write('\n[Interrupted]\n')
- sys.exit(1)
-
-def testall(list, recursive, toplevel):
- import sys
- import os
- for filename in list:
- if os.path.isdir(filename):
- print filename + '/:',
- if recursive or toplevel:
- print 'recursing down:'
- import glob
- names = glob.glob(os.path.join(filename, '*'))
- testall(names, recursive, 0)
- else:
- print '*** directory (use -r) ***'
- else:
- print filename + ':',
- sys.stdout.flush()
- try:
- print what(filename)
- except IOError:
- print '*** not found ***'
diff --git a/sys/lib/python/imputil.py b/sys/lib/python/imputil.py
deleted file mode 100644
index 675a634db..000000000
--- a/sys/lib/python/imputil.py
+++ /dev/null
@@ -1,731 +0,0 @@
-"""
-Import utilities
-
-Exported classes:
- ImportManager Manage the import process
-
- Importer Base class for replacing standard import functions
- BuiltinImporter Emulate the import mechanism for builtin and frozen modules
-
- DynLoadSuffixImporter
-"""
-
-# note: avoid importing non-builtin modules
-import imp ### not available in JPython?
-import sys
-import __builtin__
-
-# for the DirectoryImporter
-import struct
-import marshal
-
-__all__ = ["ImportManager","Importer","BuiltinImporter"]
-
-_StringType = type('')
-_ModuleType = type(sys) ### doesn't work in JPython...
-
-class ImportManager:
- "Manage the import process."
-
- def install(self, namespace=vars(__builtin__)):
- "Install this ImportManager into the specified namespace."
-
- if isinstance(namespace, _ModuleType):
- namespace = vars(namespace)
-
- # Note: we have no notion of "chaining"
-
- # Record the previous import hook, then install our own.
- self.previous_importer = namespace['__import__']
- self.namespace = namespace
- namespace['__import__'] = self._import_hook
-
- ### fix this
- #namespace['reload'] = self._reload_hook
-
- def uninstall(self):
- "Restore the previous import mechanism."
- self.namespace['__import__'] = self.previous_importer
-
- def add_suffix(self, suffix, importFunc):
- assert callable(importFunc)
- self.fs_imp.add_suffix(suffix, importFunc)
-
- ######################################################################
- #
- # PRIVATE METHODS
- #
-
- clsFilesystemImporter = None
-
- def __init__(self, fs_imp=None):
- # we're definitely going to be importing something in the future,
- # so let's just load the OS-related facilities.
- if not _os_stat:
- _os_bootstrap()
-
- # This is the Importer that we use for grabbing stuff from the
- # filesystem. It defines one more method (import_from_dir) for our use.
- if fs_imp is None:
- cls = self.clsFilesystemImporter or _FilesystemImporter
- fs_imp = cls()
- self.fs_imp = fs_imp
-
- # Initialize the set of suffixes that we recognize and import.
- # The default will import dynamic-load modules first, followed by
- # .py files (or a .py file's cached bytecode)
- for desc in imp.get_suffixes():
- if desc[2] == imp.C_EXTENSION:
- self.add_suffix(desc[0],
- DynLoadSuffixImporter(desc).import_file)
- self.add_suffix('.py', py_suffix_importer)
-
- def _import_hook(self, fqname, globals=None, locals=None, fromlist=None):
- """Python calls this hook to locate and import a module."""
-
- parts = fqname.split('.')
-
- # determine the context of this import
- parent = self._determine_import_context(globals)
-
- # if there is a parent, then its importer should manage this import
- if parent:
- module = parent.__importer__._do_import(parent, parts, fromlist)
- if module:
- return module
-
- # has the top module already been imported?
- try:
- top_module = sys.modules[parts[0]]
- except KeyError:
-
- # look for the topmost module
- top_module = self._import_top_module(parts[0])
- if not top_module:
- # the topmost module wasn't found at all.
- raise ImportError, 'No module named ' + fqname
-
- # fast-path simple imports
- if len(parts) == 1:
- if not fromlist:
- return top_module
-
- if not top_module.__dict__.get('__ispkg__'):
- # __ispkg__ isn't defined (the module was not imported by us),
- # or it is zero.
- #
- # In the former case, there is no way that we could import
- # sub-modules that occur in the fromlist (but we can't raise an
- # error because it may just be names) because we don't know how
- # to deal with packages that were imported by other systems.
- #
- # In the latter case (__ispkg__ == 0), there can't be any sub-
- # modules present, so we can just return.
- #
- # In both cases, since len(parts) == 1, the top_module is also
- # the "bottom" which is the defined return when a fromlist
- # exists.
- return top_module
-
- importer = top_module.__dict__.get('__importer__')
- if importer:
- return importer._finish_import(top_module, parts[1:], fromlist)
-
- # Grrr, some people "import os.path" or do "from os.path import ..."
- if len(parts) == 2 and hasattr(top_module, parts[1]):
- if fromlist:
- return getattr(top_module, parts[1])
- else:
- return top_module
-
- # If the importer does not exist, then we have to bail. A missing
- # importer means that something else imported the module, and we have
- # no knowledge of how to get sub-modules out of the thing.
- raise ImportError, 'No module named ' + fqname
-
- def _determine_import_context(self, globals):
- """Returns the context in which a module should be imported.
-
- The context could be a loaded (package) module and the imported module
- will be looked for within that package. The context could also be None,
- meaning there is no context -- the module should be looked for as a
- "top-level" module.
- """
-
- if not globals or not globals.get('__importer__'):
- # globals does not refer to one of our modules or packages. That
- # implies there is no relative import context (as far as we are
- # concerned), and it should just pick it off the standard path.
- return None
-
- # The globals refer to a module or package of ours. It will define
- # the context of the new import. Get the module/package fqname.
- parent_fqname = globals['__name__']
-
- # if a package is performing the import, then return itself (imports
- # refer to pkg contents)
- if globals['__ispkg__']:
- parent = sys.modules[parent_fqname]
- assert globals is parent.__dict__
- return parent
-
- i = parent_fqname.rfind('.')
-
- # a module outside of a package has no particular import context
- if i == -1:
- return None
-
- # if a module in a package is performing the import, then return the
- # package (imports refer to siblings)
- parent_fqname = parent_fqname[:i]
- parent = sys.modules[parent_fqname]
- assert parent.__name__ == parent_fqname
- return parent
-
- def _import_top_module(self, name):
- # scan sys.path looking for a location in the filesystem that contains
- # the module, or an Importer object that can import the module.
- for item in sys.path:
- if isinstance(item, _StringType):
- module = self.fs_imp.import_from_dir(item, name)
- else:
- module = item.import_top(name)
- if module:
- return module
- return None
-
- def _reload_hook(self, module):
- "Python calls this hook to reload a module."
-
- # reloading of a module may or may not be possible (depending on the
- # importer), but at least we can validate that it's ours to reload
- importer = module.__dict__.get('__importer__')
- if not importer:
- ### oops. now what...
- pass
-
- # okay. it is using the imputil system, and we must delegate it, but
- # we don't know what to do (yet)
- ### we should blast the module dict and do another get_code(). need to
- ### flesh this out and add proper docco...
- raise SystemError, "reload not yet implemented"
-
-
-class Importer:
- "Base class for replacing standard import functions."
-
- def import_top(self, name):
- "Import a top-level module."
- return self._import_one(None, name, name)
-
- ######################################################################
- #
- # PRIVATE METHODS
- #
- def _finish_import(self, top, parts, fromlist):
- # if "a.b.c" was provided, then load the ".b.c" portion down from
- # below the top-level module.
- bottom = self._load_tail(top, parts)
-
- # if the form is "import a.b.c", then return "a"
- if not fromlist:
- # no fromlist: return the top of the import tree
- return top
-
- # the top module was imported by self.
- #
- # this means that the bottom module was also imported by self (just
- # now, or in the past and we fetched it from sys.modules).
- #
- # since we imported/handled the bottom module, this means that we can
- # also handle its fromlist (and reliably use __ispkg__).
-
- # if the bottom node is a package, then (potentially) import some
- # modules.
- #
- # note: if it is not a package, then "fromlist" refers to names in
- # the bottom module rather than modules.
- # note: for a mix of names and modules in the fromlist, we will
- # import all modules and insert those into the namespace of
- # the package module. Python will pick up all fromlist names
- # from the bottom (package) module; some will be modules that
- # we imported and stored in the namespace, others are expected
- # to be present already.
- if bottom.__ispkg__:
- self._import_fromlist(bottom, fromlist)
-
- # if the form is "from a.b import c, d" then return "b"
- return bottom
-
- def _import_one(self, parent, modname, fqname):
- "Import a single module."
-
- # has the module already been imported?
- try:
- return sys.modules[fqname]
- except KeyError:
- pass
-
- # load the module's code, or fetch the module itself
- result = self.get_code(parent, modname, fqname)
- if result is None:
- return None
-
- module = self._process_result(result, fqname)
-
- # insert the module into its parent
- if parent:
- setattr(parent, modname, module)
- return module
-
- def _process_result(self, (ispkg, code, values), fqname):
- # did get_code() return an actual module? (rather than a code object)
- is_module = isinstance(code, _ModuleType)
-
- # use the returned module, or create a new one to exec code into
- if is_module:
- module = code
- else:
- module = imp.new_module(fqname)
-
- ### record packages a bit differently??
- module.__importer__ = self
- module.__ispkg__ = ispkg
-
- # insert additional values into the module (before executing the code)
- module.__dict__.update(values)
-
- # the module is almost ready... make it visible
- sys.modules[fqname] = module
-
- # execute the code within the module's namespace
- if not is_module:
- try:
- exec code in module.__dict__
- except:
- if fqname in sys.modules:
- del sys.modules[fqname]
- raise
-
- # fetch from sys.modules instead of returning module directly.
- # also make module's __name__ agree with fqname, in case
- # the "exec code in module.__dict__" played games on us.
- module = sys.modules[fqname]
- module.__name__ = fqname
- return module
-
- def _load_tail(self, m, parts):
- """Import the rest of the modules, down from the top-level module.
-
- Returns the last module in the dotted list of modules.
- """
- for part in parts:
- fqname = "%s.%s" % (m.__name__, part)
- m = self._import_one(m, part, fqname)
- if not m:
- raise ImportError, "No module named " + fqname
- return m
-
- def _import_fromlist(self, package, fromlist):
- 'Import any sub-modules in the "from" list.'
-
- # if '*' is present in the fromlist, then look for the '__all__'
- # variable to find additional items (modules) to import.
- if '*' in fromlist:
- fromlist = list(fromlist) + \
- list(package.__dict__.get('__all__', []))
-
- for sub in fromlist:
- # if the name is already present, then don't try to import it (it
- # might not be a module!).
- if sub != '*' and not hasattr(package, sub):
- subname = "%s.%s" % (package.__name__, sub)
- submod = self._import_one(package, sub, subname)
- if not submod:
- raise ImportError, "cannot import name " + subname
-
- def _do_import(self, parent, parts, fromlist):
- """Attempt to import the module relative to parent.
-
- This method is used when the import context specifies that <self>
- imported the parent module.
- """
- top_name = parts[0]
- top_fqname = parent.__name__ + '.' + top_name
- top_module = self._import_one(parent, top_name, top_fqname)
- if not top_module:
- # this importer and parent could not find the module (relatively)
- return None
-
- return self._finish_import(top_module, parts[1:], fromlist)
-
- ######################################################################
- #
- # METHODS TO OVERRIDE
- #
- def get_code(self, parent, modname, fqname):
- """Find and retrieve the code for the given module.
-
- parent specifies a parent module to define a context for importing. It
- may be None, indicating no particular context for the search.
-
- modname specifies a single module (not dotted) within the parent.
-
- fqname specifies the fully-qualified module name. This is a
- (potentially) dotted name from the "root" of the module namespace
- down to the modname.
- If there is no parent, then modname==fqname.
-
- This method should return None, or a 3-tuple.
-
- * If the module was not found, then None should be returned.
-
- * The first item of the 2- or 3-tuple should be the integer 0 or 1,
- specifying whether the module that was found is a package or not.
-
- * The second item is the code object for the module (it will be
- executed within the new module's namespace). This item can also
- be a fully-loaded module object (e.g. loaded from a shared lib).
-
- * The third item is a dictionary of name/value pairs that will be
- inserted into new module before the code object is executed. This
- is provided in case the module's code expects certain values (such
- as where the module was found). When the second item is a module
- object, then these names/values will be inserted *after* the module
- has been loaded/initialized.
- """
- raise RuntimeError, "get_code not implemented"
-
-
-######################################################################
-#
-# Some handy stuff for the Importers
-#
-
-# byte-compiled file suffix character
-_suffix_char = __debug__ and 'c' or 'o'
-
-# byte-compiled file suffix
-_suffix = '.py' + _suffix_char
-
-def _compile(pathname, timestamp):
- """Compile (and cache) a Python source file.
-
- The file specified by <pathname> is compiled to a code object and
- returned.
-
- Presuming the appropriate privileges exist, the bytecodes will be
- saved back to the filesystem for future imports. The source file's
- modification timestamp must be provided as a Long value.
- """
- codestring = open(pathname, 'rU').read()
- if codestring and codestring[-1] != '\n':
- codestring = codestring + '\n'
- code = __builtin__.compile(codestring, pathname, 'exec')
-
- # try to cache the compiled code
- try:
- f = open(pathname + _suffix_char, 'wb')
- except IOError:
- pass
- else:
- f.write('\0\0\0\0')
- f.write(struct.pack('<I', timestamp))
- marshal.dump(code, f)
- f.flush()
- f.seek(0, 0)
- f.write(imp.get_magic())
- f.close()
-
- return code
-
-_os_stat = _os_path_join = None
-def _os_bootstrap():
- "Set up 'os' module replacement functions for use during import bootstrap."
-
- names = sys.builtin_module_names
-
- join = None
- if 'posix' in names:
- sep = '/'
- from posix import stat
- elif 'nt' in names:
- sep = '\\'
- from nt import stat
- elif 'dos' in names:
- sep = '\\'
- from dos import stat
- elif 'os2' in names:
- sep = '\\'
- from os2 import stat
- elif 'mac' in names:
- from mac import stat
- def join(a, b):
- if a == '':
- return b
- if ':' not in a:
- a = ':' + a
- if a[-1:] != ':':
- a = a + ':'
- return a + b
- else:
- raise ImportError, 'no os specific module found'
-
- if join is None:
- def join(a, b, sep=sep):
- if a == '':
- return b
- lastchar = a[-1:]
- if lastchar == '/' or lastchar == sep:
- return a + b
- return a + sep + b
-
- global _os_stat
- _os_stat = stat
-
- global _os_path_join
- _os_path_join = join
-
-def _os_path_isdir(pathname):
- "Local replacement for os.path.isdir()."
- try:
- s = _os_stat(pathname)
- except OSError:
- return None
- return (s.st_mode & 0170000) == 0040000
-
-def _timestamp(pathname):
- "Return the file modification time as a Long."
- try:
- s = _os_stat(pathname)
- except OSError:
- return None
- return long(s.st_mtime)
-
-
-######################################################################
-#
-# Emulate the import mechanism for builtin and frozen modules
-#
-class BuiltinImporter(Importer):
- def get_code(self, parent, modname, fqname):
- if parent:
- # these modules definitely do not occur within a package context
- return None
-
- # look for the module
- if imp.is_builtin(modname):
- type = imp.C_BUILTIN
- elif imp.is_frozen(modname):
- type = imp.PY_FROZEN
- else:
- # not found
- return None
-
- # got it. now load and return it.
- module = imp.load_module(modname, None, modname, ('', '', type))
- return 0, module, { }
-
-
-######################################################################
-#
-# Internal importer used for importing from the filesystem
-#
-class _FilesystemImporter(Importer):
- def __init__(self):
- self.suffixes = [ ]
-
- def add_suffix(self, suffix, importFunc):
- assert callable(importFunc)
- self.suffixes.append((suffix, importFunc))
-
- def import_from_dir(self, dir, fqname):
- result = self._import_pathname(_os_path_join(dir, fqname), fqname)
- if result:
- return self._process_result(result, fqname)
- return None
-
- def get_code(self, parent, modname, fqname):
- # This importer is never used with an empty parent. Its existence is
- # private to the ImportManager. The ImportManager uses the
- # import_from_dir() method to import top-level modules/packages.
- # This method is only used when we look for a module within a package.
- assert parent
-
- for submodule_path in parent.__path__:
- code = self._import_pathname(_os_path_join(submodule_path, modname), fqname)
- if code is not None:
- return code
- return self._import_pathname(_os_path_join(parent.__pkgdir__, modname),
- fqname)
-
- def _import_pathname(self, pathname, fqname):
- if _os_path_isdir(pathname):
- result = self._import_pathname(_os_path_join(pathname, '__init__'),
- fqname)
- if result:
- values = result[2]
- values['__pkgdir__'] = pathname
- values['__path__'] = [ pathname ]
- return 1, result[1], values
- return None
-
- for suffix, importFunc in self.suffixes:
- filename = pathname + suffix
- try:
- finfo = _os_stat(filename)
- except OSError:
- pass
- else:
- return importFunc(filename, finfo, fqname)
- return None
-
-######################################################################
-#
-# SUFFIX-BASED IMPORTERS
-#
-
-def py_suffix_importer(filename, finfo, fqname):
- file = filename[:-3] + _suffix
- t_py = long(finfo[8])
- t_pyc = _timestamp(file)
-
- code = None
- if t_pyc is not None and t_pyc >= t_py:
- f = open(file, 'rb')
- if f.read(4) == imp.get_magic():
- t = struct.unpack('<I', f.read(4))[0]
- if t == t_py:
- code = marshal.load(f)
- f.close()
- if code is None:
- file = filename
- code = _compile(file, t_py)
-
- return 0, code, { '__file__' : file }
-
-class DynLoadSuffixImporter:
- def __init__(self, desc):
- self.desc = desc
-
- def import_file(self, filename, finfo, fqname):
- fp = open(filename, self.desc[1])
- module = imp.load_module(fqname, fp, filename, self.desc)
- module.__file__ = filename
- return 0, module, { }
-
-
-######################################################################
-
-def _print_importers():
- items = sys.modules.items()
- items.sort()
- for name, module in items:
- if module:
- print name, module.__dict__.get('__importer__', '-- no importer')
- else:
- print name, '-- non-existent module'
-
-def _test_revamp():
- ImportManager().install()
- sys.path.insert(0, BuiltinImporter())
-
-######################################################################
-
-#
-# TODO
-#
-# from Finn Bock:
-# type(sys) is not a module in JPython. what to use instead?
-# imp.C_EXTENSION is not in JPython. same for get_suffixes and new_module
-#
-# given foo.py of:
-# import sys
-# sys.modules['foo'] = sys
-#
-# ---- standard import mechanism
-# >>> import foo
-# >>> foo
-# <module 'sys' (built-in)>
-#
-# ---- revamped import mechanism
-# >>> import imputil
-# >>> imputil._test_revamp()
-# >>> import foo
-# >>> foo
-# <module 'foo' from 'foo.py'>
-#
-#
-# from MAL:
-# should BuiltinImporter exist in sys.path or hard-wired in ImportManager?
-# need __path__ processing
-# performance
-# move chaining to a subclass [gjs: it's been nuked]
-# deinstall should be possible
-# query mechanism needed: is a specific Importer installed?
-# py/pyc/pyo piping hooks to filter/process these files
-# wish list:
-# distutils importer hooked to list of standard Internet repositories
-# module->file location mapper to speed FS-based imports
-# relative imports
-# keep chaining so that it can play nice with other import hooks
-#
-# from Gordon:
-# push MAL's mapper into sys.path[0] as a cache (hard-coded for apps)
-#
-# from Guido:
-# need to change sys.* references for rexec environs
-# need hook for MAL's walk-me-up import strategy, or Tim's absolute strategy
-# watch out for sys.modules[...] is None
-# flag to force absolute imports? (speeds _determine_import_context and
-# checking for a relative module)
-# insert names of archives into sys.path (see quote below)
-# note: reload does NOT blast module dict
-# shift import mechanisms and policies around; provide for hooks, overrides
-# (see quote below)
-# add get_source stuff
-# get_topcode and get_subcode
-# CRLF handling in _compile
-# race condition in _compile
-# refactoring of os.py to deal with _os_bootstrap problem
-# any special handling to do for importing a module with a SyntaxError?
-# (e.g. clean up the traceback)
-# implement "domain" for path-type functionality using pkg namespace
-# (rather than FS-names like __path__)
-# don't use the word "private"... maybe "internal"
-#
-#
-# Guido's comments on sys.path caching:
-#
-# We could cache this in a dictionary: the ImportManager can have a
-# cache dict mapping pathnames to importer objects, and a separate
-# method for coming up with an importer given a pathname that's not yet
-# in the cache. The method should do a stat and/or look at the
-# extension to decide which importer class to use; you can register new
-# importer classes by registering a suffix or a Boolean function, plus a
-# class. If you register a new importer class, the cache is zapped.
-# The cache is independent from sys.path (but maintained per
-# ImportManager instance) so that rearrangements of sys.path do the
-# right thing. If a path is dropped from sys.path the corresponding
-# cache entry is simply no longer used.
-#
-# My/Guido's comments on factoring ImportManager and Importer:
-#
-# > However, we still have a tension occurring here:
-# >
-# > 1) implementing policy in ImportManager assists in single-point policy
-# > changes for app/rexec situations
-# > 2) implementing policy in Importer assists in package-private policy
-# > changes for normal, operating conditions
-# >
-# > I'll see if I can sort out a way to do this. Maybe the Importer class will
-# > implement the methods (which can be overridden to change policy) by
-# > delegating to ImportManager.
-#
-# Maybe also think about what kind of policies an Importer would be
-# likely to want to change. I have a feeling that a lot of the code
-# there is actually not so much policy but a *necessity* to get things
-# working given the calling conventions for the __import__ hook: whether
-# to return the head or tail of a dotted name, or when to do the "finish
-# fromlist" stuff.
-#
diff --git a/sys/lib/python/inspect.py b/sys/lib/python/inspect.py
deleted file mode 100644
index 986a415e2..000000000
--- a/sys/lib/python/inspect.py
+++ /dev/null
@@ -1,889 +0,0 @@
-# -*- coding: iso-8859-1 -*-
-"""Get useful information from live Python objects.
-
-This module encapsulates the interface provided by the internal special
-attributes (func_*, co_*, im_*, tb_*, etc.) in a friendlier fashion.
-It also provides some help for examining source code and class layout.
-
-Here are some of the useful functions provided by this module:
-
- ismodule(), isclass(), ismethod(), isfunction(), istraceback(),
- isframe(), iscode(), isbuiltin(), isroutine() - check object types
- getmembers() - get members of an object that satisfy a given condition
-
- getfile(), getsourcefile(), getsource() - find an object's source code
- getdoc(), getcomments() - get documentation on an object
- getmodule() - determine the module that an object came from
- getclasstree() - arrange classes so as to represent their hierarchy
-
- getargspec(), getargvalues() - get info about function arguments
- formatargspec(), formatargvalues() - format an argument spec
- getouterframes(), getinnerframes() - get info about frames
- currentframe() - get the current stack frame
- stack(), trace() - get info about frames on the stack or in a traceback
-"""
-
-# This module is in the public domain. No warranties.
-
-__author__ = 'Ka-Ping Yee <ping@lfw.org>'
-__date__ = '1 Jan 2001'
-
-import sys, os, types, string, re, dis, imp, tokenize, linecache
-from operator import attrgetter
-
-# ----------------------------------------------------------- type-checking
-def ismodule(object):
- """Return true if the object is a module.
-
- Module objects provide these attributes:
- __doc__ documentation string
- __file__ filename (missing for built-in modules)"""
- return isinstance(object, types.ModuleType)
-
-def isclass(object):
- """Return true if the object is a class.
-
- Class objects provide these attributes:
- __doc__ documentation string
- __module__ name of module in which this class was defined"""
- return isinstance(object, types.ClassType) or hasattr(object, '__bases__')
-
-def ismethod(object):
- """Return true if the object is an instance method.
-
- Instance method objects provide these attributes:
- __doc__ documentation string
- __name__ name with which this method was defined
- im_class class object in which this method belongs
- im_func function object containing implementation of method
- im_self instance to which this method is bound, or None"""
- return isinstance(object, types.MethodType)
-
-def ismethoddescriptor(object):
- """Return true if the object is a method descriptor.
-
- But not if ismethod() or isclass() or isfunction() are true.
-
- This is new in Python 2.2, and, for example, is true of int.__add__.
- An object passing this test has a __get__ attribute but not a __set__
- attribute, but beyond that the set of attributes varies. __name__ is
- usually sensible, and __doc__ often is.
-
- Methods implemented via descriptors that also pass one of the other
- tests return false from the ismethoddescriptor() test, simply because
- the other tests promise more -- you can, e.g., count on having the
- im_func attribute (etc) when an object passes ismethod()."""
- return (hasattr(object, "__get__")
- and not hasattr(object, "__set__") # else it's a data descriptor
- and not ismethod(object) # mutual exclusion
- and not isfunction(object)
- and not isclass(object))
-
-def isdatadescriptor(object):
- """Return true if the object is a data descriptor.
-
- Data descriptors have both a __get__ and a __set__ attribute. Examples are
- properties (defined in Python) and getsets and members (defined in C).
- Typically, data descriptors will also have __name__ and __doc__ attributes
- (properties, getsets, and members have both of these attributes), but this
- is not guaranteed."""
- return (hasattr(object, "__set__") and hasattr(object, "__get__"))
-
-if hasattr(types, 'MemberDescriptorType'):
- # CPython and equivalent
- def ismemberdescriptor(object):
- """Return true if the object is a member descriptor.
-
- Member descriptors are specialized descriptors defined in extension
- modules."""
- return isinstance(object, types.MemberDescriptorType)
-else:
- # Other implementations
- def ismemberdescriptor(object):
- """Return true if the object is a member descriptor.
-
- Member descriptors are specialized descriptors defined in extension
- modules."""
- return False
-
-if hasattr(types, 'GetSetDescriptorType'):
- # CPython and equivalent
- def isgetsetdescriptor(object):
- """Return true if the object is a getset descriptor.
-
- getset descriptors are specialized descriptors defined in extension
- modules."""
- return isinstance(object, types.GetSetDescriptorType)
-else:
- # Other implementations
- def isgetsetdescriptor(object):
- """Return true if the object is a getset descriptor.
-
- getset descriptors are specialized descriptors defined in extension
- modules."""
- return False
-
-def isfunction(object):
- """Return true if the object is a user-defined function.
-
- Function objects provide these attributes:
- __doc__ documentation string
- __name__ name with which this function was defined
- func_code code object containing compiled function bytecode
- func_defaults tuple of any default values for arguments
- func_doc (same as __doc__)
- func_globals global namespace in which this function was defined
- func_name (same as __name__)"""
- return isinstance(object, types.FunctionType)
-
-def istraceback(object):
- """Return true if the object is a traceback.
-
- Traceback objects provide these attributes:
- tb_frame frame object at this level
- tb_lasti index of last attempted instruction in bytecode
- tb_lineno current line number in Python source code
- tb_next next inner traceback object (called by this level)"""
- return isinstance(object, types.TracebackType)
-
-def isframe(object):
- """Return true if the object is a frame object.
-
- Frame objects provide these attributes:
- f_back next outer frame object (this frame's caller)
- f_builtins built-in namespace seen by this frame
- f_code code object being executed in this frame
- f_exc_traceback traceback if raised in this frame, or None
- f_exc_type exception type if raised in this frame, or None
- f_exc_value exception value if raised in this frame, or None
- f_globals global namespace seen by this frame
- f_lasti index of last attempted instruction in bytecode
- f_lineno current line number in Python source code
- f_locals local namespace seen by this frame
- f_restricted 0 or 1 if frame is in restricted execution mode
- f_trace tracing function for this frame, or None"""
- return isinstance(object, types.FrameType)
-
-def iscode(object):
- """Return true if the object is a code object.
-
- Code objects provide these attributes:
- co_argcount number of arguments (not including * or ** args)
- co_code string of raw compiled bytecode
- co_consts tuple of constants used in the bytecode
- co_filename name of file in which this code object was created
- co_firstlineno number of first line in Python source code
- co_flags bitmap: 1=optimized | 2=newlocals | 4=*arg | 8=**arg
- co_lnotab encoded mapping of line numbers to bytecode indices
- co_name name with which this code object was defined
- co_names tuple of names of local variables
- co_nlocals number of local variables
- co_stacksize virtual machine stack space required
- co_varnames tuple of names of arguments and local variables"""
- return isinstance(object, types.CodeType)
-
-def isbuiltin(object):
- """Return true if the object is a built-in function or method.
-
- Built-in functions and methods provide these attributes:
- __doc__ documentation string
- __name__ original name of this function or method
- __self__ instance to which a method is bound, or None"""
- return isinstance(object, types.BuiltinFunctionType)
-
-def isroutine(object):
- """Return true if the object is any kind of function or method."""
- return (isbuiltin(object)
- or isfunction(object)
- or ismethod(object)
- or ismethoddescriptor(object))
-
-def getmembers(object, predicate=None):
- """Return all members of an object as (name, value) pairs sorted by name.
- Optionally, only return members that satisfy a given predicate."""
- results = []
- for key in dir(object):
- value = getattr(object, key)
- if not predicate or predicate(value):
- results.append((key, value))
- results.sort()
- return results
-
-def classify_class_attrs(cls):
- """Return list of attribute-descriptor tuples.
-
- For each name in dir(cls), the return list contains a 4-tuple
- with these elements:
-
- 0. The name (a string).
-
- 1. The kind of attribute this is, one of these strings:
- 'class method' created via classmethod()
- 'static method' created via staticmethod()
- 'property' created via property()
- 'method' any other flavor of method
- 'data' not a method
-
- 2. The class which defined this attribute (a class).
-
- 3. The object as obtained directly from the defining class's
- __dict__, not via getattr. This is especially important for
- data attributes: C.data is just a data object, but
- C.__dict__['data'] may be a data descriptor with additional
- info, like a __doc__ string.
- """
-
- mro = getmro(cls)
- names = dir(cls)
- result = []
- for name in names:
- # Get the object associated with the name.
- # Getting an obj from the __dict__ sometimes reveals more than
- # using getattr. Static and class methods are dramatic examples.
- if name in cls.__dict__:
- obj = cls.__dict__[name]
- else:
- obj = getattr(cls, name)
-
- # Figure out where it was defined.
- homecls = getattr(obj, "__objclass__", None)
- if homecls is None:
- # search the dicts.
- for base in mro:
- if name in base.__dict__:
- homecls = base
- break
-
- # Get the object again, in order to get it from the defining
- # __dict__ instead of via getattr (if possible).
- if homecls is not None and name in homecls.__dict__:
- obj = homecls.__dict__[name]
-
- # Also get the object via getattr.
- obj_via_getattr = getattr(cls, name)
-
- # Classify the object.
- if isinstance(obj, staticmethod):
- kind = "static method"
- elif isinstance(obj, classmethod):
- kind = "class method"
- elif isinstance(obj, property):
- kind = "property"
- elif (ismethod(obj_via_getattr) or
- ismethoddescriptor(obj_via_getattr)):
- kind = "method"
- else:
- kind = "data"
-
- result.append((name, kind, homecls, obj))
-
- return result
-
-# ----------------------------------------------------------- class helpers
-def _searchbases(cls, accum):
- # Simulate the "classic class" search order.
- if cls in accum:
- return
- accum.append(cls)
- for base in cls.__bases__:
- _searchbases(base, accum)
-
-def getmro(cls):
- "Return tuple of base classes (including cls) in method resolution order."
- if hasattr(cls, "__mro__"):
- return cls.__mro__
- else:
- result = []
- _searchbases(cls, result)
- return tuple(result)
-
-# -------------------------------------------------- source code extraction
-def indentsize(line):
- """Return the indent size, in spaces, at the start of a line of text."""
- expline = string.expandtabs(line)
- return len(expline) - len(string.lstrip(expline))
-
-def getdoc(object):
- """Get the documentation string for an object.
-
- All tabs are expanded to spaces. To clean up docstrings that are
- indented to line up with blocks of code, any whitespace than can be
- uniformly removed from the second line onwards is removed."""
- try:
- doc = object.__doc__
- except AttributeError:
- return None
- if not isinstance(doc, types.StringTypes):
- return None
- try:
- lines = string.split(string.expandtabs(doc), '\n')
- except UnicodeError:
- return None
- else:
- # Find minimum indentation of any non-blank lines after first line.
- margin = sys.maxint
- for line in lines[1:]:
- content = len(string.lstrip(line))
- if content:
- indent = len(line) - content
- margin = min(margin, indent)
- # Remove indentation.
- if lines:
- lines[0] = lines[0].lstrip()
- if margin < sys.maxint:
- for i in range(1, len(lines)): lines[i] = lines[i][margin:]
- # Remove any trailing or leading blank lines.
- while lines and not lines[-1]:
- lines.pop()
- while lines and not lines[0]:
- lines.pop(0)
- return string.join(lines, '\n')
-
-def getfile(object):
- """Work out which source or compiled file an object was defined in."""
- if ismodule(object):
- if hasattr(object, '__file__'):
- return object.__file__
- raise TypeError('arg is a built-in module')
- if isclass(object):
- object = sys.modules.get(object.__module__)
- if hasattr(object, '__file__'):
- return object.__file__
- raise TypeError('arg is a built-in class')
- if ismethod(object):
- object = object.im_func
- if isfunction(object):
- object = object.func_code
- if istraceback(object):
- object = object.tb_frame
- if isframe(object):
- object = object.f_code
- if iscode(object):
- return object.co_filename
- raise TypeError('arg is not a module, class, method, '
- 'function, traceback, frame, or code object')
-
-def getmoduleinfo(path):
- """Get the module name, suffix, mode, and module type for a given file."""
- filename = os.path.basename(path)
- suffixes = map(lambda (suffix, mode, mtype):
- (-len(suffix), suffix, mode, mtype), imp.get_suffixes())
- suffixes.sort() # try longest suffixes first, in case they overlap
- for neglen, suffix, mode, mtype in suffixes:
- if filename[neglen:] == suffix:
- return filename[:neglen], suffix, mode, mtype
-
-def getmodulename(path):
- """Return the module name for a given file, or None."""
- info = getmoduleinfo(path)
- if info: return info[0]
-
-def getsourcefile(object):
- """Return the Python source file an object was defined in, if it exists."""
- filename = getfile(object)
- if string.lower(filename[-4:]) in ('.pyc', '.pyo'):
- filename = filename[:-4] + '.py'
- for suffix, mode, kind in imp.get_suffixes():
- if 'b' in mode and string.lower(filename[-len(suffix):]) == suffix:
- # Looks like a binary file. We want to only return a text file.
- return None
- if os.path.exists(filename):
- return filename
- # only return a non-existent filename if the module has a PEP 302 loader
- if hasattr(getmodule(object, filename), '__loader__'):
- return filename
-
-def getabsfile(object, _filename=None):
- """Return an absolute path to the source or compiled file for an object.
-
- The idea is for each object to have a unique origin, so this routine
- normalizes the result as much as possible."""
- if _filename is None:
- _filename = getsourcefile(object) or getfile(object)
- return os.path.normcase(os.path.abspath(_filename))
-
-modulesbyfile = {}
-_filesbymodname = {}
-
-def getmodule(object, _filename=None):
- """Return the module an object was defined in, or None if not found."""
- if ismodule(object):
- return object
- if hasattr(object, '__module__'):
- return sys.modules.get(object.__module__)
- # Try the filename to modulename cache
- if _filename is not None and _filename in modulesbyfile:
- return sys.modules.get(modulesbyfile[_filename])
- # Try the cache again with the absolute file name
- try:
- file = getabsfile(object, _filename)
- except TypeError:
- return None
- if file in modulesbyfile:
- return sys.modules.get(modulesbyfile[file])
- # Update the filename to module name cache and check yet again
- # Copy sys.modules in order to cope with changes while iterating
- for modname, module in sys.modules.items():
- if ismodule(module) and hasattr(module, '__file__'):
- f = module.__file__
- if f == _filesbymodname.get(modname, None):
- # Have already mapped this module, so skip it
- continue
- _filesbymodname[modname] = f
- f = getabsfile(module)
- # Always map to the name the module knows itself by
- modulesbyfile[f] = modulesbyfile[
- os.path.realpath(f)] = module.__name__
- if file in modulesbyfile:
- return sys.modules.get(modulesbyfile[file])
- # Check the main module
- main = sys.modules['__main__']
- if not hasattr(object, '__name__'):
- return None
- if hasattr(main, object.__name__):
- mainobject = getattr(main, object.__name__)
- if mainobject is object:
- return main
- # Check builtins
- builtin = sys.modules['__builtin__']
- if hasattr(builtin, object.__name__):
- builtinobject = getattr(builtin, object.__name__)
- if builtinobject is object:
- return builtin
-
-def findsource(object):
- """Return the entire source file and starting line number for an object.
-
- The argument may be a module, class, method, function, traceback, frame,
- or code object. The source code is returned as a list of all the lines
- in the file and the line number indexes a line in that list. An IOError
- is raised if the source code cannot be retrieved."""
- file = getsourcefile(object) or getfile(object)
- module = getmodule(object, file)
- if module:
- lines = linecache.getlines(file, module.__dict__)
- else:
- lines = linecache.getlines(file)
- if not lines:
- raise IOError('could not get source code')
-
- if ismodule(object):
- return lines, 0
-
- if isclass(object):
- name = object.__name__
- pat = re.compile(r'^(\s*)class\s*' + name + r'\b')
- # make some effort to find the best matching class definition:
- # use the one with the least indentation, which is the one
- # that's most probably not inside a function definition.
- candidates = []
- for i in range(len(lines)):
- match = pat.match(lines[i])
- if match:
- # if it's at toplevel, it's already the best one
- if lines[i][0] == 'c':
- return lines, i
- # else add whitespace to candidate list
- candidates.append((match.group(1), i))
- if candidates:
- # this will sort by whitespace, and by line number,
- # less whitespace first
- candidates.sort()
- return lines, candidates[0][1]
- else:
- raise IOError('could not find class definition')
-
- if ismethod(object):
- object = object.im_func
- if isfunction(object):
- object = object.func_code
- if istraceback(object):
- object = object.tb_frame
- if isframe(object):
- object = object.f_code
- if iscode(object):
- if not hasattr(object, 'co_firstlineno'):
- raise IOError('could not find function definition')
- lnum = object.co_firstlineno - 1
- pat = re.compile(r'^(\s*def\s)|(.*(?<!\w)lambda(:|\s))|^(\s*@)')
- while lnum > 0:
- if pat.match(lines[lnum]): break
- lnum = lnum - 1
- return lines, lnum
- raise IOError('could not find code object')
-
-def getcomments(object):
- """Get lines of comments immediately preceding an object's source code.
-
- Returns None when source can't be found.
- """
- try:
- lines, lnum = findsource(object)
- except (IOError, TypeError):
- return None
-
- if ismodule(object):
- # Look for a comment block at the top of the file.
- start = 0
- if lines and lines[0][:2] == '#!': start = 1
- while start < len(lines) and string.strip(lines[start]) in ('', '#'):
- start = start + 1
- if start < len(lines) and lines[start][:1] == '#':
- comments = []
- end = start
- while end < len(lines) and lines[end][:1] == '#':
- comments.append(string.expandtabs(lines[end]))
- end = end + 1
- return string.join(comments, '')
-
- # Look for a preceding block of comments at the same indentation.
- elif lnum > 0:
- indent = indentsize(lines[lnum])
- end = lnum - 1
- if end >= 0 and string.lstrip(lines[end])[:1] == '#' and \
- indentsize(lines[end]) == indent:
- comments = [string.lstrip(string.expandtabs(lines[end]))]
- if end > 0:
- end = end - 1
- comment = string.lstrip(string.expandtabs(lines[end]))
- while comment[:1] == '#' and indentsize(lines[end]) == indent:
- comments[:0] = [comment]
- end = end - 1
- if end < 0: break
- comment = string.lstrip(string.expandtabs(lines[end]))
- while comments and string.strip(comments[0]) == '#':
- comments[:1] = []
- while comments and string.strip(comments[-1]) == '#':
- comments[-1:] = []
- return string.join(comments, '')
-
-class EndOfBlock(Exception): pass
-
-class BlockFinder:
- """Provide a tokeneater() method to detect the end of a code block."""
- def __init__(self):
- self.indent = 0
- self.islambda = False
- self.started = False
- self.passline = False
- self.last = 1
-
- def tokeneater(self, type, token, (srow, scol), (erow, ecol), line):
- if not self.started:
- # look for the first "def", "class" or "lambda"
- if token in ("def", "class", "lambda"):
- if token == "lambda":
- self.islambda = True
- self.started = True
- self.passline = True # skip to the end of the line
- elif type == tokenize.NEWLINE:
- self.passline = False # stop skipping when a NEWLINE is seen
- self.last = srow
- if self.islambda: # lambdas always end at the first NEWLINE
- raise EndOfBlock
- elif self.passline:
- pass
- elif type == tokenize.INDENT:
- self.indent = self.indent + 1
- self.passline = True
- elif type == tokenize.DEDENT:
- self.indent = self.indent - 1
- # the end of matching indent/dedent pairs end a block
- # (note that this only works for "def"/"class" blocks,
- # not e.g. for "if: else:" or "try: finally:" blocks)
- if self.indent <= 0:
- raise EndOfBlock
- elif self.indent == 0 and type not in (tokenize.COMMENT, tokenize.NL):
- # any other token on the same indentation level end the previous
- # block as well, except the pseudo-tokens COMMENT and NL.
- raise EndOfBlock
-
-def getblock(lines):
- """Extract the block of code at the top of the given list of lines."""
- blockfinder = BlockFinder()
- try:
- tokenize.tokenize(iter(lines).next, blockfinder.tokeneater)
- except (EndOfBlock, IndentationError):
- pass
- return lines[:blockfinder.last]
-
-def getsourcelines(object):
- """Return a list of source lines and starting line number for an object.
-
- The argument may be a module, class, method, function, traceback, frame,
- or code object. The source code is returned as a list of the lines
- corresponding to the object and the line number indicates where in the
- original source file the first line of code was found. An IOError is
- raised if the source code cannot be retrieved."""
- lines, lnum = findsource(object)
-
- if ismodule(object): return lines, 0
- else: return getblock(lines[lnum:]), lnum + 1
-
-def getsource(object):
- """Return the text of the source code for an object.
-
- The argument may be a module, class, method, function, traceback, frame,
- or code object. The source code is returned as a single string. An
- IOError is raised if the source code cannot be retrieved."""
- lines, lnum = getsourcelines(object)
- return string.join(lines, '')
-
-# --------------------------------------------------- class tree extraction
-def walktree(classes, children, parent):
- """Recursive helper function for getclasstree()."""
- results = []
- classes.sort(key=attrgetter('__module__', '__name__'))
- for c in classes:
- results.append((c, c.__bases__))
- if c in children:
- results.append(walktree(children[c], children, c))
- return results
-
-def getclasstree(classes, unique=0):
- """Arrange the given list of classes into a hierarchy of nested lists.
-
- Where a nested list appears, it contains classes derived from the class
- whose entry immediately precedes the list. Each entry is a 2-tuple
- containing a class and a tuple of its base classes. If the 'unique'
- argument is true, exactly one entry appears in the returned structure
- for each class in the given list. Otherwise, classes using multiple
- inheritance and their descendants will appear multiple times."""
- children = {}
- roots = []
- for c in classes:
- if c.__bases__:
- for parent in c.__bases__:
- if not parent in children:
- children[parent] = []
- children[parent].append(c)
- if unique and parent in classes: break
- elif c not in roots:
- roots.append(c)
- for parent in children:
- if parent not in classes:
- roots.append(parent)
- return walktree(roots, children, None)
-
-# ------------------------------------------------ argument list extraction
-# These constants are from Python's compile.h.
-CO_OPTIMIZED, CO_NEWLOCALS, CO_VARARGS, CO_VARKEYWORDS = 1, 2, 4, 8
-
-def getargs(co):
- """Get information about the arguments accepted by a code object.
-
- Three things are returned: (args, varargs, varkw), where 'args' is
- a list of argument names (possibly containing nested lists), and
- 'varargs' and 'varkw' are the names of the * and ** arguments or None."""
-
- if not iscode(co):
- raise TypeError('arg is not a code object')
-
- code = co.co_code
- nargs = co.co_argcount
- names = co.co_varnames
- args = list(names[:nargs])
- step = 0
-
- # The following acrobatics are for anonymous (tuple) arguments.
- for i in range(nargs):
- if args[i][:1] in ('', '.'):
- stack, remain, count = [], [], []
- while step < len(code):
- op = ord(code[step])
- step = step + 1
- if op >= dis.HAVE_ARGUMENT:
- opname = dis.opname[op]
- value = ord(code[step]) + ord(code[step+1])*256
- step = step + 2
- if opname in ('UNPACK_TUPLE', 'UNPACK_SEQUENCE'):
- remain.append(value)
- count.append(value)
- elif opname == 'STORE_FAST':
- stack.append(names[value])
-
- # Special case for sublists of length 1: def foo((bar))
- # doesn't generate the UNPACK_TUPLE bytecode, so if
- # `remain` is empty here, we have such a sublist.
- if not remain:
- stack[0] = [stack[0]]
- break
- else:
- remain[-1] = remain[-1] - 1
- while remain[-1] == 0:
- remain.pop()
- size = count.pop()
- stack[-size:] = [stack[-size:]]
- if not remain: break
- remain[-1] = remain[-1] - 1
- if not remain: break
- args[i] = stack[0]
-
- varargs = None
- if co.co_flags & CO_VARARGS:
- varargs = co.co_varnames[nargs]
- nargs = nargs + 1
- varkw = None
- if co.co_flags & CO_VARKEYWORDS:
- varkw = co.co_varnames[nargs]
- return args, varargs, varkw
-
-def getargspec(func):
- """Get the names and default values of a function's arguments.
-
- A tuple of four things is returned: (args, varargs, varkw, defaults).
- 'args' is a list of the argument names (it may contain nested lists).
- 'varargs' and 'varkw' are the names of the * and ** arguments or None.
- 'defaults' is an n-tuple of the default values of the last n arguments.
- """
-
- if ismethod(func):
- func = func.im_func
- if not isfunction(func):
- raise TypeError('arg is not a Python function')
- args, varargs, varkw = getargs(func.func_code)
- return args, varargs, varkw, func.func_defaults
-
-def getargvalues(frame):
- """Get information about arguments passed into a particular frame.
-
- A tuple of four things is returned: (args, varargs, varkw, locals).
- 'args' is a list of the argument names (it may contain nested lists).
- 'varargs' and 'varkw' are the names of the * and ** arguments or None.
- 'locals' is the locals dictionary of the given frame."""
- args, varargs, varkw = getargs(frame.f_code)
- return args, varargs, varkw, frame.f_locals
-
-def joinseq(seq):
- if len(seq) == 1:
- return '(' + seq[0] + ',)'
- else:
- return '(' + string.join(seq, ', ') + ')'
-
-def strseq(object, convert, join=joinseq):
- """Recursively walk a sequence, stringifying each element."""
- if type(object) in (list, tuple):
- return join(map(lambda o, c=convert, j=join: strseq(o, c, j), object))
- else:
- return convert(object)
-
-def formatargspec(args, varargs=None, varkw=None, defaults=None,
- formatarg=str,
- formatvarargs=lambda name: '*' + name,
- formatvarkw=lambda name: '**' + name,
- formatvalue=lambda value: '=' + repr(value),
- join=joinseq):
- """Format an argument spec from the 4 values returned by getargspec.
-
- The first four arguments are (args, varargs, varkw, defaults). The
- other four arguments are the corresponding optional formatting functions
- that are called to turn names and values into strings. The ninth
- argument is an optional function to format the sequence of arguments."""
- specs = []
- if defaults:
- firstdefault = len(args) - len(defaults)
- for i in range(len(args)):
- spec = strseq(args[i], formatarg, join)
- if defaults and i >= firstdefault:
- spec = spec + formatvalue(defaults[i - firstdefault])
- specs.append(spec)
- if varargs is not None:
- specs.append(formatvarargs(varargs))
- if varkw is not None:
- specs.append(formatvarkw(varkw))
- return '(' + string.join(specs, ', ') + ')'
-
-def formatargvalues(args, varargs, varkw, locals,
- formatarg=str,
- formatvarargs=lambda name: '*' + name,
- formatvarkw=lambda name: '**' + name,
- formatvalue=lambda value: '=' + repr(value),
- join=joinseq):
- """Format an argument spec from the 4 values returned by getargvalues.
-
- The first four arguments are (args, varargs, varkw, locals). The
- next four arguments are the corresponding optional formatting functions
- that are called to turn names and values into strings. The ninth
- argument is an optional function to format the sequence of arguments."""
- def convert(name, locals=locals,
- formatarg=formatarg, formatvalue=formatvalue):
- return formatarg(name) + formatvalue(locals[name])
- specs = []
- for i in range(len(args)):
- specs.append(strseq(args[i], convert, join))
- if varargs:
- specs.append(formatvarargs(varargs) + formatvalue(locals[varargs]))
- if varkw:
- specs.append(formatvarkw(varkw) + formatvalue(locals[varkw]))
- return '(' + string.join(specs, ', ') + ')'
-
-# -------------------------------------------------- stack frame extraction
-def getframeinfo(frame, context=1):
- """Get information about a frame or traceback object.
-
- A tuple of five things is returned: the filename, the line number of
- the current line, the function name, a list of lines of context from
- the source code, and the index of the current line within that list.
- The optional second argument specifies the number of lines of context
- to return, which are centered around the current line."""
- if istraceback(frame):
- lineno = frame.tb_lineno
- frame = frame.tb_frame
- else:
- lineno = frame.f_lineno
- if not isframe(frame):
- raise TypeError('arg is not a frame or traceback object')
-
- filename = getsourcefile(frame) or getfile(frame)
- if context > 0:
- start = lineno - 1 - context//2
- try:
- lines, lnum = findsource(frame)
- except IOError:
- lines = index = None
- else:
- start = max(start, 1)
- start = max(0, min(start, len(lines) - context))
- lines = lines[start:start+context]
- index = lineno - 1 - start
- else:
- lines = index = None
-
- return (filename, lineno, frame.f_code.co_name, lines, index)
-
-def getlineno(frame):
- """Get the line number from a frame object, allowing for optimization."""
- # FrameType.f_lineno is now a descriptor that grovels co_lnotab
- return frame.f_lineno
-
-def getouterframes(frame, context=1):
- """Get a list of records for a frame and all higher (calling) frames.
-
- Each record contains a frame object, filename, line number, function
- name, a list of lines of context, and index within the context."""
- framelist = []
- while frame:
- framelist.append((frame,) + getframeinfo(frame, context))
- frame = frame.f_back
- return framelist
-
-def getinnerframes(tb, context=1):
- """Get a list of records for a traceback's frame and all lower frames.
-
- Each record contains a frame object, filename, line number, function
- name, a list of lines of context, and index within the context."""
- framelist = []
- while tb:
- framelist.append((tb.tb_frame,) + getframeinfo(tb, context))
- tb = tb.tb_next
- return framelist
-
-currentframe = sys._getframe
-
-def stack(context=1):
- """Return a list of records for the stack above the caller's frame."""
- return getouterframes(sys._getframe(1), context)
-
-def trace(context=1):
- """Return a list of records for the stack below the current exception."""
- return getinnerframes(sys.exc_info()[2], context)
diff --git a/sys/lib/python/keyword.py b/sys/lib/python/keyword.py
deleted file mode 100755
index cd1d55e8a..000000000
--- a/sys/lib/python/keyword.py
+++ /dev/null
@@ -1,95 +0,0 @@
-#! /usr/bin/env python
-
-"""Keywords (from "graminit.c")
-
-This file is automatically generated; please don't muck it up!
-
-To update the symbols in this file, 'cd' to the top directory of
-the python source tree after building the interpreter and run:
-
- python Lib/keyword.py
-"""
-
-__all__ = ["iskeyword", "kwlist"]
-
-kwlist = [
-#--start keywords--
- 'and',
- 'as',
- 'assert',
- 'break',
- 'class',
- 'continue',
- 'def',
- 'del',
- 'elif',
- 'else',
- 'except',
- 'exec',
- 'finally',
- 'for',
- 'from',
- 'global',
- 'if',
- 'import',
- 'in',
- 'is',
- 'lambda',
- 'not',
- 'or',
- 'pass',
- 'print',
- 'raise',
- 'return',
- 'try',
- 'while',
- 'with',
- 'yield',
-#--end keywords--
- ]
-
-iskeyword = frozenset(kwlist).__contains__
-
-def main():
- import sys, re
-
- args = sys.argv[1:]
- iptfile = args and args[0] or "Python/graminit.c"
- if len(args) > 1: optfile = args[1]
- else: optfile = "Lib/keyword.py"
-
- # scan the source file for keywords
- fp = open(iptfile)
- strprog = re.compile('"([^"]+)"')
- lines = []
- while 1:
- line = fp.readline()
- if not line: break
- if '{1, "' in line:
- match = strprog.search(line)
- if match:
- lines.append(" '" + match.group(1) + "',\n")
- fp.close()
- lines.sort()
-
- # load the output skeleton from the target
- fp = open(optfile)
- format = fp.readlines()
- fp.close()
-
- # insert the lines of keywords
- try:
- start = format.index("#--start keywords--\n") + 1
- end = format.index("#--end keywords--\n")
- format[start:end] = lines
- except ValueError:
- sys.stderr.write("target does not contain format markers\n")
- sys.exit(1)
-
- # write the output file
- fp = open(optfile, 'w')
- fp.write(''.join(format))
- fp.close()
-
-if __name__ == "__main__":
- main()
diff --git a/sys/lib/python/lib-tk/Canvas.py b/sys/lib/python/lib-tk/Canvas.py
deleted file mode 100644
index 83d7bba74..000000000
--- a/sys/lib/python/lib-tk/Canvas.py
+++ /dev/null
@@ -1,190 +0,0 @@
-# This module exports classes for the various canvas item types
-
-# NOTE: This module was an experiment and is now obsolete.
-# It's best to use the Tkinter.Canvas class directly.
-
-from Tkinter import Canvas, _cnfmerge, _flatten
-
-
-class CanvasItem:
- def __init__(self, canvas, itemType, *args, **kw):
- self.canvas = canvas
- self.id = canvas._create(itemType, args, kw)
- if not hasattr(canvas, 'items'):
- canvas.items = {}
- canvas.items[self.id] = self
- def __str__(self):
- return str(self.id)
- def __repr__(self):
- return '<%s, id=%d>' % (self.__class__.__name__, self.id)
- def delete(self):
- del self.canvas.items[self.id]
- self.canvas.delete(self.id)
- def __getitem__(self, key):
- v = self.canvas.tk.split(self.canvas.tk.call(
- self.canvas._w, 'itemconfigure',
- self.id, '-' + key))
- return v[4]
- cget = __getitem__
- def __setitem__(self, key, value):
- self.canvas.itemconfig(self.id, {key: value})
- def keys(self):
- if not hasattr(self, '_keys'):
- self._keys = map(lambda x, tk=self.canvas.tk:
- tk.splitlist(x)[0][1:],
- self.canvas.tk.splitlist(
- self.canvas._do(
- 'itemconfigure',
- (self.id,))))
- return self._keys
- def has_key(self, key):
- return key in self.keys()
- def __contains__(self, key):
- return key in self.keys()
- def addtag(self, tag, option='withtag'):
- self.canvas.addtag(tag, option, self.id)
- def bbox(self):
- x1, y1, x2, y2 = self.canvas.bbox(self.id)
- return (x1, y1), (x2, y2)
- def bind(self, sequence=None, command=None, add=None):
- return self.canvas.tag_bind(self.id, sequence, command, add)
- def unbind(self, sequence, funcid=None):
- self.canvas.tag_unbind(self.id, sequence, funcid)
- def config(self, cnf={}, **kw):
- return self.canvas.itemconfig(self.id, _cnfmerge((cnf, kw)))
- def coords(self, pts = ()):
- flat = ()
- for x, y in pts: flat = flat + (x, y)
- return self.canvas.coords(self.id, *flat)
- def dchars(self, first, last=None):
- self.canvas.dchars(self.id, first, last)
- def dtag(self, ttd):
- self.canvas.dtag(self.id, ttd)
- def focus(self):
- self.canvas.focus(self.id)
- def gettags(self):
- return self.canvas.gettags(self.id)
- def icursor(self, index):
- self.canvas.icursor(self.id, index)
- def index(self, index):
- return self.canvas.index(self.id, index)
- def insert(self, beforethis, string):
- self.canvas.insert(self.id, beforethis, string)
- def lower(self, belowthis=None):
- self.canvas.tag_lower(self.id, belowthis)
- def move(self, xamount, yamount):
- self.canvas.move(self.id, xamount, yamount)
- def tkraise(self, abovethis=None):
- self.canvas.tag_raise(self.id, abovethis)
- raise_ = tkraise # BW compat
- def scale(self, xorigin, yorigin, xscale, yscale):
- self.canvas.scale(self.id, xorigin, yorigin, xscale, yscale)
- def type(self):
- return self.canvas.type(self.id)
-
-class Arc(CanvasItem):
- def __init__(self, canvas, *args, **kw):
- CanvasItem.__init__(self, canvas, 'arc', *args, **kw)
-
-class Bitmap(CanvasItem):
- def __init__(self, canvas, *args, **kw):
- CanvasItem.__init__(self, canvas, 'bitmap', *args, **kw)
-
-class ImageItem(CanvasItem):
- def __init__(self, canvas, *args, **kw):
- CanvasItem.__init__(self, canvas, 'image', *args, **kw)
-
-class Line(CanvasItem):
- def __init__(self, canvas, *args, **kw):
- CanvasItem.__init__(self, canvas, 'line', *args, **kw)
-
-class Oval(CanvasItem):
- def __init__(self, canvas, *args, **kw):
- CanvasItem.__init__(self, canvas, 'oval', *args, **kw)
-
-class Polygon(CanvasItem):
- def __init__(self, canvas, *args, **kw):
- CanvasItem.__init__(self, canvas, 'polygon', *args, **kw)
-
-class Rectangle(CanvasItem):
- def __init__(self, canvas, *args, **kw):
- CanvasItem.__init__(self, canvas, 'rectangle', *args, **kw)
-
-# XXX "Text" is taken by the Text widget...
-class CanvasText(CanvasItem):
- def __init__(self, canvas, *args, **kw):
- CanvasItem.__init__(self, canvas, 'text', *args, **kw)
-
-class Window(CanvasItem):
- def __init__(self, canvas, *args, **kw):
- CanvasItem.__init__(self, canvas, 'window', *args, **kw)
-
-class Group:
- def __init__(self, canvas, tag=None):
- if not tag:
- tag = 'Group%d' % id(self)
- self.tag = self.id = tag
- self.canvas = canvas
- self.canvas.dtag(self.tag)
- def str(self):
- return self.tag
- __str__ = str
- def _do(self, cmd, *args):
- return self.canvas._do(cmd, (self.tag,) + _flatten(args))
- def addtag_above(self, tagOrId):
- self._do('addtag', 'above', tagOrId)
- def addtag_all(self):
- self._do('addtag', 'all')
- def addtag_below(self, tagOrId):
- self._do('addtag', 'below', tagOrId)
- def addtag_closest(self, x, y, halo=None, start=None):
- self._do('addtag', 'closest', x, y, halo, start)
- def addtag_enclosed(self, x1, y1, x2, y2):
- self._do('addtag', 'enclosed', x1, y1, x2, y2)
- def addtag_overlapping(self, x1, y1, x2, y2):
- self._do('addtag', 'overlapping', x1, y1, x2, y2)
- def addtag_withtag(self, tagOrId):
- self._do('addtag', 'withtag', tagOrId)
- def bbox(self):
- return self.canvas._getints(self._do('bbox'))
- def bind(self, sequence=None, command=None, add=None):
- return self.canvas.tag_bind(self.id, sequence, command, add)
- def unbind(self, sequence, funcid=None):
- self.canvas.tag_unbind(self.id, sequence, funcid)
- def coords(self, *pts):
- return self._do('coords', pts)
- def dchars(self, first, last=None):
- self._do('dchars', first, last)
- def delete(self):
- self._do('delete')
- def dtag(self, tagToDelete=None):
- self._do('dtag', tagToDelete)
- def focus(self):
- self._do('focus')
- def gettags(self):
- return self.canvas.tk.splitlist(self._do('gettags', self.tag))
- def icursor(self, index):
- return self._do('icursor', index)
- def index(self, index):
- return self.canvas.tk.getint(self._do('index', index))
- def insert(self, beforeThis, string):
- self._do('insert', beforeThis, string)
- def config(self, cnf={}, **kw):
- return self.canvas.itemconfigure(self.tag, _cnfmerge((cnf,kw)))
- def lower(self, belowThis=None):
- self._do('lower', belowThis)
- def move(self, xAmount, yAmount):
- self._do('move', xAmount, yAmount)
- def tkraise(self, aboveThis=None):
- self._do('raise', aboveThis)
- lift = tkraise
- def scale(self, xOrigin, yOrigin, xScale, yScale):
- self._do('scale', xOrigin, yOrigin, xScale, yScale)
- def select_adjust(self, index):
- self.canvas._do('select', ('adjust', self.tag, index))
- def select_from(self, index):
- self.canvas._do('select', ('from', self.tag, index))
- def select_to(self, index):
- self.canvas._do('select', ('to', self.tag, index))
- def type(self):
- return self._do('type')
diff --git a/sys/lib/python/lib-tk/Dialog.py b/sys/lib/python/lib-tk/Dialog.py
deleted file mode 100644
index b52e5b49d..000000000
--- a/sys/lib/python/lib-tk/Dialog.py
+++ /dev/null
@@ -1,49 +0,0 @@
-# Dialog.py -- Tkinter interface to the tk_dialog script.
-
-from Tkinter import *
-from Tkinter import _cnfmerge
-
-if TkVersion <= 3.6:
- DIALOG_ICON = 'warning'
-else:
- DIALOG_ICON = 'questhead'
-
-
-class Dialog(Widget):
- def __init__(self, master=None, cnf={}, **kw):
- cnf = _cnfmerge((cnf, kw))
- self.widgetName = '__dialog__'
- Widget._setup(self, master, cnf)
- self.num = self.tk.getint(
- self.tk.call(
- 'tk_dialog', self._w,
- cnf['title'], cnf['text'],
- cnf['bitmap'], cnf['default'],
- *cnf['strings']))
- try: Widget.destroy(self)
- except TclError: pass
- def destroy(self): pass
-
-def _test():
- d = Dialog(None, {'title': 'File Modified',
- 'text':
- 'File "Python.h" has been modified'
- ' since the last time it was saved.'
- ' Do you want to save it before'
- ' exiting the application.',
- 'bitmap': DIALOG_ICON,
- 'default': 0,
- 'strings': ('Save File',
- 'Discard Changes',
- 'Return to Editor')})
- print d.num
-
-
-if __name__ == '__main__':
- t = Button(None, {'text': 'Test',
- 'command': _test,
- Pack: {}})
- q = Button(None, {'text': 'Quit',
- 'command': t.quit,
- Pack: {}})
- t.mainloop()
diff --git a/sys/lib/python/lib-tk/FileDialog.py b/sys/lib/python/lib-tk/FileDialog.py
deleted file mode 100644
index b08d3a84f..000000000
--- a/sys/lib/python/lib-tk/FileDialog.py
+++ /dev/null
@@ -1,274 +0,0 @@
-"""File selection dialog classes.
-
-Classes:
-
-- FileDialog
-- LoadFileDialog
-- SaveFileDialog
-
-"""
-
-from Tkinter import *
-from Dialog import Dialog
-
-import os
-import fnmatch
-
-
-dialogstates = {}
-
-
-class FileDialog:
-
- """Standard file selection dialog -- no checks on selected file.
-
- Usage:
-
- d = FileDialog(master)
- fname = d.go(dir_or_file, pattern, default, key)
- if fname is None: ...canceled...
- else: ...open file...
-
- All arguments to go() are optional.
-
- The 'key' argument specifies a key in the global dictionary
- 'dialogstates', which keeps track of the values for the directory
- and pattern arguments, overriding the values passed in (it does
- not keep track of the default argument!). If no key is specified,
- the dialog keeps no memory of previous state. Note that memory is
- kept even when the dialog is canceled. (All this emulates the
- behavior of the Macintosh file selection dialogs.)
-
- """
-
- title = "File Selection Dialog"
-
- def __init__(self, master, title=None):
- if title is None: title = self.title
- self.master = master
- self.directory = None
-
- self.top = Toplevel(master)
- self.top.title(title)
- self.top.iconname(title)
-
- self.botframe = Frame(self.top)
- self.botframe.pack(side=BOTTOM, fill=X)
-
- self.selection = Entry(self.top)
- self.selection.pack(side=BOTTOM, fill=X)
- self.selection.bind('<Return>', self.ok_event)
-
- self.filter = Entry(self.top)
- self.filter.pack(side=TOP, fill=X)
- self.filter.bind('<Return>', self.filter_command)
-
- self.midframe = Frame(self.top)
- self.midframe.pack(expand=YES, fill=BOTH)
-
- self.filesbar = Scrollbar(self.midframe)
- self.filesbar.pack(side=RIGHT, fill=Y)
- self.files = Listbox(self.midframe, exportselection=0,
- yscrollcommand=(self.filesbar, 'set'))
- self.files.pack(side=RIGHT, expand=YES, fill=BOTH)
- btags = self.files.bindtags()
- self.files.bindtags(btags[1:] + btags[:1])
- self.files.bind('<ButtonRelease-1>', self.files_select_event)
- self.files.bind('<Double-ButtonRelease-1>', self.files_double_event)
- self.filesbar.config(command=(self.files, 'yview'))
-
- self.dirsbar = Scrollbar(self.midframe)
- self.dirsbar.pack(side=LEFT, fill=Y)
- self.dirs = Listbox(self.midframe, exportselection=0,
- yscrollcommand=(self.dirsbar, 'set'))
- self.dirs.pack(side=LEFT, expand=YES, fill=BOTH)
- self.dirsbar.config(command=(self.dirs, 'yview'))
- btags = self.dirs.bindtags()
- self.dirs.bindtags(btags[1:] + btags[:1])
- self.dirs.bind('<ButtonRelease-1>', self.dirs_select_event)
- self.dirs.bind('<Double-ButtonRelease-1>', self.dirs_double_event)
-
- self.ok_button = Button(self.botframe,
- text="OK",
- command=self.ok_command)
- self.ok_button.pack(side=LEFT)
- self.filter_button = Button(self.botframe,
- text="Filter",
- command=self.filter_command)
- self.filter_button.pack(side=LEFT, expand=YES)
- self.cancel_button = Button(self.botframe,
- text="Cancel",
- command=self.cancel_command)
- self.cancel_button.pack(side=RIGHT)
-
- self.top.protocol('WM_DELETE_WINDOW', self.cancel_command)
- # XXX Are the following okay for a general audience?
- self.top.bind('<Alt-w>', self.cancel_command)
- self.top.bind('<Alt-W>', self.cancel_command)
-
- def go(self, dir_or_file=os.curdir, pattern="*", default="", key=None):
- if key and dialogstates.has_key(key):
- self.directory, pattern = dialogstates[key]
- else:
- dir_or_file = os.path.expanduser(dir_or_file)
- if os.path.isdir(dir_or_file):
- self.directory = dir_or_file
- else:
- self.directory, default = os.path.split(dir_or_file)
- self.set_filter(self.directory, pattern)
- self.set_selection(default)
- self.filter_command()
- self.selection.focus_set()
- self.top.wait_visibility() # window needs to be visible for the grab
- self.top.grab_set()
- self.how = None
- self.master.mainloop() # Exited by self.quit(how)
- if key:
- directory, pattern = self.get_filter()
- if self.how:
- directory = os.path.dirname(self.how)
- dialogstates[key] = directory, pattern
- self.top.destroy()
- return self.how
-
- def quit(self, how=None):
- self.how = how
- self.master.quit() # Exit mainloop()
-
- def dirs_double_event(self, event):
- self.filter_command()
-
- def dirs_select_event(self, event):
- dir, pat = self.get_filter()
- subdir = self.dirs.get('active')
- dir = os.path.normpath(os.path.join(self.directory, subdir))
- self.set_filter(dir, pat)
-
- def files_double_event(self, event):
- self.ok_command()
-
- def files_select_event(self, event):
- file = self.files.get('active')
- self.set_selection(file)
-
- def ok_event(self, event):
- self.ok_command()
-
- def ok_command(self):
- self.quit(self.get_selection())
-
- def filter_command(self, event=None):
- dir, pat = self.get_filter()
- try:
- names = os.listdir(dir)
- except os.error:
- self.master.bell()
- return
- self.directory = dir
- self.set_filter(dir, pat)
- names.sort()
- subdirs = [os.pardir]
- matchingfiles = []
- for name in names:
- fullname = os.path.join(dir, name)
- if os.path.isdir(fullname):
- subdirs.append(name)
- elif fnmatch.fnmatch(name, pat):
- matchingfiles.append(name)
- self.dirs.delete(0, END)
- for name in subdirs:
- self.dirs.insert(END, name)
- self.files.delete(0, END)
- for name in matchingfiles:
- self.files.insert(END, name)
- head, tail = os.path.split(self.get_selection())
- if tail == os.curdir: tail = ''
- self.set_selection(tail)
-
- def get_filter(self):
- filter = self.filter.get()
- filter = os.path.expanduser(filter)
- if filter[-1:] == os.sep or os.path.isdir(filter):
- filter = os.path.join(filter, "*")
- return os.path.split(filter)
-
- def get_selection(self):
- file = self.selection.get()
- file = os.path.expanduser(file)
- return file
-
- def cancel_command(self, event=None):
- self.quit()
-
- def set_filter(self, dir, pat):
- if not os.path.isabs(dir):
- try:
- pwd = os.getcwd()
- except os.error:
- pwd = None
- if pwd:
- dir = os.path.join(pwd, dir)
- dir = os.path.normpath(dir)
- self.filter.delete(0, END)
- self.filter.insert(END, os.path.join(dir or os.curdir, pat or "*"))
-
- def set_selection(self, file):
- self.selection.delete(0, END)
- self.selection.insert(END, os.path.join(self.directory, file))
-
-
-class LoadFileDialog(FileDialog):
-
- """File selection dialog which checks that the file exists."""
-
- title = "Load File Selection Dialog"
-
- def ok_command(self):
- file = self.get_selection()
- if not os.path.isfile(file):
- self.master.bell()
- else:
- self.quit(file)
-
-
-class SaveFileDialog(FileDialog):
-
- """File selection dialog which checks that the file may be created."""
-
- title = "Save File Selection Dialog"
-
- def ok_command(self):
- file = self.get_selection()
- if os.path.exists(file):
- if os.path.isdir(file):
- self.master.bell()
- return
- d = Dialog(self.top,
- title="Overwrite Existing File Question",
- text="Overwrite existing file %r?" % (file,),
- bitmap='questhead',
- default=1,
- strings=("Yes", "Cancel"))
- if d.num != 0:
- return
- else:
- head, tail = os.path.split(file)
- if not os.path.isdir(head):
- self.master.bell()
- return
- self.quit(file)
-
-
-def test():
- """Simple test program."""
- root = Tk()
- root.withdraw()
- fd = LoadFileDialog(root)
- loadfile = fd.go(key="test")
- fd = SaveFileDialog(root)
- savefile = fd.go(key="test")
- print loadfile, savefile
-
-
-if __name__ == '__main__':
- test()
diff --git a/sys/lib/python/lib-tk/FixTk.py b/sys/lib/python/lib-tk/FixTk.py
deleted file mode 100644
index a7801a250..000000000
--- a/sys/lib/python/lib-tk/FixTk.py
+++ /dev/null
@@ -1,37 +0,0 @@
-import sys, os
-
-# Delay import _tkinter until we have set TCL_LIBRARY,
-# so that Tcl_FindExecutable has a chance to locate its
-# encoding directory.
-
-# Unfortunately, we cannot know the TCL_LIBRARY directory
-# if we don't know the tcl version, which we cannot find out
-# without import Tcl. Fortunately, Tcl will itself look in
-# <TCL_LIBRARY>\..\tcl<TCL_VERSION>, so anything close to
-# the real Tcl library will do.
-
-prefix = os.path.join(sys.prefix,"tcl")
-# if this does not exist, no further search is needed
-if os.path.exists(prefix):
- if not os.environ.has_key("TCL_LIBRARY"):
- for name in os.listdir(prefix):
- if name.startswith("tcl"):
- tcldir = os.path.join(prefix,name)
- if os.path.isdir(tcldir):
- os.environ["TCL_LIBRARY"] = tcldir
- # Compute TK_LIBRARY, knowing that it has the same version
- # as Tcl
- import _tkinter
- ver = str(_tkinter.TCL_VERSION)
- if not os.environ.has_key("TK_LIBRARY"):
- v = os.path.join(prefix, 'tk'+ver)
- if os.path.exists(os.path.join(v, "tclIndex")):
- os.environ['TK_LIBRARY'] = v
- # We don't know the Tix version, so we must search the entire
- # directory
- if not os.environ.has_key("TIX_LIBRARY"):
- for name in os.listdir(prefix):
- if name.startswith("tix"):
- tixdir = os.path.join(prefix,name)
- if os.path.isdir(tixdir):
- os.environ["TIX_LIBRARY"] = tixdir
diff --git a/sys/lib/python/lib-tk/ScrolledText.py b/sys/lib/python/lib-tk/ScrolledText.py
deleted file mode 100644
index 367aa89ca..000000000
--- a/sys/lib/python/lib-tk/ScrolledText.py
+++ /dev/null
@@ -1,43 +0,0 @@
-# A ScrolledText widget feels like a text widget but also has a
-# vertical scroll bar on its right. (Later, options may be added to
-# add a horizontal bar as well, to make the bars disappear
-# automatically when not needed, to move them to the other side of the
-# window, etc.)
-#
-# Configuration options are passed to the Text widget.
-# A Frame widget is inserted between the master and the text, to hold
-# the Scrollbar widget.
-# Most methods calls are inherited from the Text widget; Pack methods
-# are redirected to the Frame widget however.
-
-from Tkinter import *
-from Tkinter import _cnfmerge
-
-class ScrolledText(Text):
- def __init__(self, master=None, cnf=None, **kw):
- if cnf is None:
- cnf = {}
- if kw:
- cnf = _cnfmerge((cnf, kw))
- fcnf = {}
- for k in cnf.keys():
- if type(k) == ClassType or k == 'name':
- fcnf[k] = cnf[k]
- del cnf[k]
- self.frame = Frame(master, **fcnf)
- self.vbar = Scrollbar(self.frame, name='vbar')
- self.vbar.pack(side=RIGHT, fill=Y)
- cnf['name'] = 'text'
- Text.__init__(self, self.frame, **cnf)
- self.pack(side=LEFT, fill=BOTH, expand=1)
- self['yscrollcommand'] = self.vbar.set
- self.vbar['command'] = self.yview
-
- # Copy geometry methods of self.frame -- hack!
- methods = Pack.__dict__.keys()
- methods = methods + Grid.__dict__.keys()
- methods = methods + Place.__dict__.keys()
-
- for m in methods:
- if m[0] != '_' and m != 'config' and m != 'configure':
- setattr(self, m, getattr(self.frame, m))
diff --git a/sys/lib/python/lib-tk/SimpleDialog.py b/sys/lib/python/lib-tk/SimpleDialog.py
deleted file mode 100644
index cb08318db..000000000
--- a/sys/lib/python/lib-tk/SimpleDialog.py
+++ /dev/null
@@ -1,112 +0,0 @@
-"""A simple but flexible modal dialog box."""
-
-
-from Tkinter import *
-
-
-class SimpleDialog:
-
- def __init__(self, master,
- text='', buttons=[], default=None, cancel=None,
- title=None, class_=None):
- if class_:
- self.root = Toplevel(master, class_=class_)
- else:
- self.root = Toplevel(master)
- if title:
- self.root.title(title)
- self.root.iconname(title)
- self.message = Message(self.root, text=text, aspect=400)
- self.message.pack(expand=1, fill=BOTH)
- self.frame = Frame(self.root)
- self.frame.pack()
- self.num = default
- self.cancel = cancel
- self.default = default
- self.root.bind('<Return>', self.return_event)
- for num in range(len(buttons)):
- s = buttons[num]
- b = Button(self.frame, text=s,
- command=(lambda self=self, num=num: self.done(num)))
- if num == default:
- b.config(relief=RIDGE, borderwidth=8)
- b.pack(side=LEFT, fill=BOTH, expand=1)
- self.root.protocol('WM_DELETE_WINDOW', self.wm_delete_window)
- self._set_transient(master)
-
- def _set_transient(self, master, relx=0.5, rely=0.3):
- widget = self.root
- widget.withdraw() # Remain invisible while we figure out the geometry
- widget.transient(master)
- widget.update_idletasks() # Actualize geometry information
- if master.winfo_ismapped():
- m_width = master.winfo_width()
- m_height = master.winfo_height()
- m_x = master.winfo_rootx()
- m_y = master.winfo_rooty()
- else:
- m_width = master.winfo_screenwidth()
- m_height = master.winfo_screenheight()
- m_x = m_y = 0
- w_width = widget.winfo_reqwidth()
- w_height = widget.winfo_reqheight()
- x = m_x + (m_width - w_width) * relx
- y = m_y + (m_height - w_height) * rely
- if x+w_width > master.winfo_screenwidth():
- x = master.winfo_screenwidth() - w_width
- elif x < 0:
- x = 0
- if y+w_height > master.winfo_screenheight():
- y = master.winfo_screenheight() - w_height
- elif y < 0:
- y = 0
- widget.geometry("+%d+%d" % (x, y))
- widget.deiconify() # Become visible at the desired location
-
- def go(self):
- self.root.wait_visibility()
- self.root.grab_set()
- self.root.mainloop()
- self.root.destroy()
- return self.num
-
- def return_event(self, event):
- if self.default is None:
- self.root.bell()
- else:
- self.done(self.default)
-
- def wm_delete_window(self):
- if self.cancel is None:
- self.root.bell()
- else:
- self.done(self.cancel)
-
- def done(self, num):
- self.num = num
- self.root.quit()
-
-
-if __name__ == '__main__':
-
- def test():
- root = Tk()
- def doit(root=root):
- d = SimpleDialog(root,
- text="This is a test dialog. "
- "Would this have been an actual dialog, "
- "the buttons below would have been glowing "
- "in soft pink light.\n"
- "Do you believe this?",
- buttons=["Yes", "No", "Cancel"],
- default=0,
- cancel=2,
- title="Test Dialog")
- print d.go()
- t = Button(root, text='Test', command=doit)
- t.pack()
- q = Button(root, text='Quit', command=t.quit)
- q.pack()
- t.mainloop()
-
- test()
diff --git a/sys/lib/python/lib-tk/Tix.py b/sys/lib/python/lib-tk/Tix.py
deleted file mode 100755
index 347037903..000000000
--- a/sys/lib/python/lib-tk/Tix.py
+++ /dev/null
@@ -1,1891 +0,0 @@
-# -*-mode: python; fill-column: 75; tab-width: 8; coding: iso-latin-1-unix -*-
-#
-# $Id: Tix.py 52785 2006-11-18 18:42:22Z martin.v.loewis $
-#
-# Tix.py -- Tix widget wrappers.
-#
-# For Tix, see http://tix.sourceforge.net
-#
-# - Sudhir Shenoy (sshenoy@gol.com), Dec. 1995.
-# based on an idea of Jean-Marc Lugrin (lugrin@ms.com)
-#
-# NOTE: In order to minimize changes to Tkinter.py, some of the code here
-# (TixWidget.__init__) has been taken from Tkinter (Widget.__init__)
-# and will break if there are major changes in Tkinter.
-#
-# The Tix widgets are represented by a class hierarchy in python with proper
-# inheritance of base classes.
-#
-# As a result after creating a 'w = StdButtonBox', I can write
-# w.ok['text'] = 'Who Cares'
-# or w.ok['bg'] = w['bg']
-# or even w.ok.invoke()
-# etc.
-#
-# Compare the demo tixwidgets.py to the original Tcl program and you will
-# appreciate the advantages.
-#
-
-from Tkinter import *
-from Tkinter import _flatten, _cnfmerge, _default_root
-
-# WARNING - TkVersion is a limited precision floating point number
-if TkVersion < 3.999:
- raise ImportError, "This version of Tix.py requires Tk 4.0 or higher"
-
-import _tkinter # If this fails your Python may not be configured for Tk
-
-# Some more constants (for consistency with Tkinter)
-WINDOW = 'window'
-TEXT = 'text'
-STATUS = 'status'
-IMMEDIATE = 'immediate'
-IMAGE = 'image'
-IMAGETEXT = 'imagetext'
-BALLOON = 'balloon'
-AUTO = 'auto'
-ACROSSTOP = 'acrosstop'
-
-# Some constants used by Tkinter dooneevent()
-TCL_DONT_WAIT = 1 << 1
-TCL_WINDOW_EVENTS = 1 << 2
-TCL_FILE_EVENTS = 1 << 3
-TCL_TIMER_EVENTS = 1 << 4
-TCL_IDLE_EVENTS = 1 << 5
-TCL_ALL_EVENTS = 0
-
-# BEWARE - this is implemented by copying some code from the Widget class
-# in Tkinter (to override Widget initialization) and is therefore
-# liable to break.
-import Tkinter, os
-
-# Could probably add this to Tkinter.Misc
-class tixCommand:
- """The tix commands provide access to miscellaneous elements
- of Tix's internal state and the Tix application context.
- Most of the information manipulated by these commands pertains
- to the application as a whole, or to a screen or
- display, rather than to a particular window.
-
- This is a mixin class, assumed to be mixed to Tkinter.Tk
- that supports the self.tk.call method.
- """
-
- def tix_addbitmapdir(self, directory):
- """Tix maintains a list of directories under which
- the tix_getimage and tix_getbitmap commands will
- search for image files. The standard bitmap directory
- is $TIX_LIBRARY/bitmaps. The addbitmapdir command
- adds directory into this list. By using this
- command, the image files of an applications can
- also be located using the tix_getimage or tix_getbitmap
- command.
- """
- return self.tk.call('tix', 'addbitmapdir', directory)
-
- def tix_cget(self, option):
- """Returns the current value of the configuration
- option given by option. Option may be any of the
- options described in the CONFIGURATION OPTIONS section.
- """
- return self.tk.call('tix', 'cget', option)
-
- def tix_configure(self, cnf=None, **kw):
- """Query or modify the configuration options of the Tix application
- context. If no option is specified, returns a dictionary all of the
- available options. If option is specified with no value, then the
- command returns a list describing the one named option (this list
- will be identical to the corresponding sublist of the value
- returned if no option is specified). If one or more option-value
- pairs are specified, then the command modifies the given option(s)
- to have the given value(s); in this case the command returns an
- empty string. Option may be any of the configuration options.
- """
- # Copied from Tkinter.py
- if kw:
- cnf = _cnfmerge((cnf, kw))
- elif cnf:
- cnf = _cnfmerge(cnf)
- if cnf is None:
- cnf = {}
- for x in self.tk.split(self.tk.call('tix', 'configure')):
- cnf[x[0][1:]] = (x[0][1:],) + x[1:]
- return cnf
- if isinstance(cnf, StringType):
- x = self.tk.split(self.tk.call('tix', 'configure', '-'+cnf))
- return (x[0][1:],) + x[1:]
- return self.tk.call(('tix', 'configure') + self._options(cnf))
-
- def tix_filedialog(self, dlgclass=None):
- """Returns the file selection dialog that may be shared among
- different calls from this application. This command will create a
- file selection dialog widget when it is called the first time. This
- dialog will be returned by all subsequent calls to tix_filedialog.
- An optional dlgclass parameter can be passed to specified what type
- of file selection dialog widget is desired. Possible options are
- tix FileSelectDialog or tixExFileSelectDialog.
- """
- if dlgclass is not None:
- return self.tk.call('tix', 'filedialog', dlgclass)
- else:
- return self.tk.call('tix', 'filedialog')
-
- def tix_getbitmap(self, name):
- """Locates a bitmap file of the name name.xpm or name in one of the
- bitmap directories (see the tix_addbitmapdir command above). By
- using tix_getbitmap, you can avoid hard coding the pathnames of the
- bitmap files in your application. When successful, it returns the
- complete pathname of the bitmap file, prefixed with the character
- '@'. The returned value can be used to configure the -bitmap
- option of the TK and Tix widgets.
- """
- return self.tk.call('tix', 'getbitmap', name)
-
- def tix_getimage(self, name):
- """Locates an image file of the name name.xpm, name.xbm or name.ppm
- in one of the bitmap directories (see the addbitmapdir command
- above). If more than one file with the same name (but different
- extensions) exist, then the image type is chosen according to the
- depth of the X display: xbm images are chosen on monochrome
- displays and color images are chosen on color displays. By using
- tix_ getimage, you can advoid hard coding the pathnames of the
- image files in your application. When successful, this command
- returns the name of the newly created image, which can be used to
- configure the -image option of the Tk and Tix widgets.
- """
- return self.tk.call('tix', 'getimage', name)
-
- def tix_option_get(self, name):
- """Gets the options manitained by the Tix
- scheme mechanism. Available options include:
-
- active_bg active_fg bg
- bold_font dark1_bg dark1_fg
- dark2_bg dark2_fg disabled_fg
- fg fixed_font font
- inactive_bg inactive_fg input1_bg
- input2_bg italic_font light1_bg
- light1_fg light2_bg light2_fg
- menu_font output1_bg output2_bg
- select_bg select_fg selector
- """
- # could use self.tk.globalgetvar('tixOption', name)
- return self.tk.call('tix', 'option', 'get', name)
-
- def tix_resetoptions(self, newScheme, newFontSet, newScmPrio=None):
- """Resets the scheme and fontset of the Tix application to
- newScheme and newFontSet, respectively. This affects only those
- widgets created after this call. Therefore, it is best to call the
- resetoptions command before the creation of any widgets in a Tix
- application.
-
- The optional parameter newScmPrio can be given to reset the
- priority level of the Tk options set by the Tix schemes.
-
- Because of the way Tk handles the X option database, after Tix has
- been has imported and inited, it is not possible to reset the color
- schemes and font sets using the tix config command. Instead, the
- tix_resetoptions command must be used.
- """
- if newScmPrio is not None:
- return self.tk.call('tix', 'resetoptions', newScheme, newFontSet, newScmPrio)
- else:
- return self.tk.call('tix', 'resetoptions', newScheme, newFontSet)
-
-class Tk(Tkinter.Tk, tixCommand):
- """Toplevel widget of Tix which represents mostly the main window
- of an application. It has an associated Tcl interpreter."""
- def __init__(self, screenName=None, baseName=None, className='Tix'):
- Tkinter.Tk.__init__(self, screenName, baseName, className)
- tixlib = os.environ.get('TIX_LIBRARY')
- self.tk.eval('global auto_path; lappend auto_path [file dir [info nameof]]')
- if tixlib is not None:
- self.tk.eval('global auto_path; lappend auto_path {%s}' % tixlib)
- self.tk.eval('global tcl_pkgPath; lappend tcl_pkgPath {%s}' % tixlib)
- # Load Tix - this should work dynamically or statically
- # If it's static, tcl/tix8.1/pkgIndex.tcl should have
- # 'load {} Tix'
- # If it's dynamic under Unix, tcl/tix8.1/pkgIndex.tcl should have
- # 'load libtix8.1.8.3.so Tix'
- self.tk.eval('package require Tix')
-
- def destroy(self):
- # For safety, remove an delete_window binding before destroy
- self.protocol("WM_DELETE_WINDOW", "")
- Tkinter.Tk.destroy(self)
-
-# The Tix 'tixForm' geometry manager
-class Form:
- """The Tix Form geometry manager
-
- Widgets can be arranged by specifying attachments to other widgets.
- See Tix documentation for complete details"""
-
- def config(self, cnf={}, **kw):
- self.tk.call('tixForm', self._w, *self._options(cnf, kw))
-
- form = config
-
- def __setitem__(self, key, value):
- Form.form(self, {key: value})
-
- def check(self):
- return self.tk.call('tixForm', 'check', self._w)
-
- def forget(self):
- self.tk.call('tixForm', 'forget', self._w)
-
- def grid(self, xsize=0, ysize=0):
- if (not xsize) and (not ysize):
- x = self.tk.call('tixForm', 'grid', self._w)
- y = self.tk.splitlist(x)
- z = ()
- for x in y:
- z = z + (self.tk.getint(x),)
- return z
- return self.tk.call('tixForm', 'grid', self._w, xsize, ysize)
-
- def info(self, option=None):
- if not option:
- return self.tk.call('tixForm', 'info', self._w)
- if option[0] != '-':
- option = '-' + option
- return self.tk.call('tixForm', 'info', self._w, option)
-
- def slaves(self):
- return map(self._nametowidget,
- self.tk.splitlist(
- self.tk.call(
- 'tixForm', 'slaves', self._w)))
-
-
-
-Tkinter.Widget.__bases__ = Tkinter.Widget.__bases__ + (Form,)
-
-class TixWidget(Tkinter.Widget):
- """A TixWidget class is used to package all (or most) Tix widgets.
-
- Widget initialization is extended in two ways:
- 1) It is possible to give a list of options which must be part of
- the creation command (so called Tix 'static' options). These cannot be
- given as a 'config' command later.
- 2) It is possible to give the name of an existing TK widget. These are
- child widgets created automatically by a Tix mega-widget. The Tk call
- to create these widgets is therefore bypassed in TixWidget.__init__
-
- Both options are for use by subclasses only.
- """
- def __init__ (self, master=None, widgetName=None,
- static_options=None, cnf={}, kw={}):
- # Merge keywords and dictionary arguments
- if kw:
- cnf = _cnfmerge((cnf, kw))
- else:
- cnf = _cnfmerge(cnf)
-
- # Move static options into extra. static_options must be
- # a list of keywords (or None).
- extra=()
-
- # 'options' is always a static option
- if static_options:
- static_options.append('options')
- else:
- static_options = ['options']
-
- for k,v in cnf.items()[:]:
- if k in static_options:
- extra = extra + ('-' + k, v)
- del cnf[k]
-
- self.widgetName = widgetName
- Widget._setup(self, master, cnf)
-
- # If widgetName is None, this is a dummy creation call where the
- # corresponding Tk widget has already been created by Tix
- if widgetName:
- self.tk.call(widgetName, self._w, *extra)
-
- # Non-static options - to be done via a 'config' command
- if cnf:
- Widget.config(self, cnf)
-
- # Dictionary to hold subwidget names for easier access. We can't
- # use the children list because the public Tix names may not be the
- # same as the pathname component
- self.subwidget_list = {}
-
- # We set up an attribute access function so that it is possible to
- # do w.ok['text'] = 'Hello' rather than w.subwidget('ok')['text'] = 'Hello'
- # when w is a StdButtonBox.
- # We can even do w.ok.invoke() because w.ok is subclassed from the
- # Button class if you go through the proper constructors
- def __getattr__(self, name):
- if self.subwidget_list.has_key(name):
- return self.subwidget_list[name]
- raise AttributeError, name
-
- def set_silent(self, value):
- """Set a variable without calling its action routine"""
- self.tk.call('tixSetSilent', self._w, value)
-
- def subwidget(self, name):
- """Return the named subwidget (which must have been created by
- the sub-class)."""
- n = self._subwidget_name(name)
- if not n:
- raise TclError, "Subwidget " + name + " not child of " + self._name
- # Remove header of name and leading dot
- n = n[len(self._w)+1:]
- return self._nametowidget(n)
-
- def subwidgets_all(self):
- """Return all subwidgets."""
- names = self._subwidget_names()
- if not names:
- return []
- retlist = []
- for name in names:
- name = name[len(self._w)+1:]
- try:
- retlist.append(self._nametowidget(name))
- except:
- # some of the widgets are unknown e.g. border in LabelFrame
- pass
- return retlist
-
- def _subwidget_name(self,name):
- """Get a subwidget name (returns a String, not a Widget !)"""
- try:
- return self.tk.call(self._w, 'subwidget', name)
- except TclError:
- return None
-
- def _subwidget_names(self):
- """Return the name of all subwidgets."""
- try:
- x = self.tk.call(self._w, 'subwidgets', '-all')
- return self.tk.split(x)
- except TclError:
- return None
-
- def config_all(self, option, value):
- """Set configuration options for all subwidgets (and self)."""
- if option == '':
- return
- elif not isinstance(option, StringType):
- option = repr(option)
- if not isinstance(value, StringType):
- value = repr(value)
- names = self._subwidget_names()
- for name in names:
- self.tk.call(name, 'configure', '-' + option, value)
- # These are missing from Tkinter
- def image_create(self, imgtype, cnf={}, master=None, **kw):
- if not master:
- master = Tkinter._default_root
- if not master:
- raise RuntimeError, 'Too early to create image'
- if kw and cnf: cnf = _cnfmerge((cnf, kw))
- elif kw: cnf = kw
- options = ()
- for k, v in cnf.items():
- if callable(v):
- v = self._register(v)
- options = options + ('-'+k, v)
- return master.tk.call(('image', 'create', imgtype,) + options)
- def image_delete(self, imgname):
- try:
- self.tk.call('image', 'delete', imgname)
- except TclError:
- # May happen if the root was destroyed
- pass
-
-# Subwidgets are child widgets created automatically by mega-widgets.
-# In python, we have to create these subwidgets manually to mirror their
-# existence in Tk/Tix.
-class TixSubWidget(TixWidget):
- """Subwidget class.
-
- This is used to mirror child widgets automatically created
- by Tix/Tk as part of a mega-widget in Python (which is not informed
- of this)"""
-
- def __init__(self, master, name,
- destroy_physically=1, check_intermediate=1):
- if check_intermediate:
- path = master._subwidget_name(name)
- try:
- path = path[len(master._w)+1:]
- plist = path.split('.')
- except:
- plist = []
-
- if not check_intermediate:
- # immediate descendant
- TixWidget.__init__(self, master, None, None, {'name' : name})
- else:
- # Ensure that the intermediate widgets exist
- parent = master
- for i in range(len(plist) - 1):
- n = '.'.join(plist[:i+1])
- try:
- w = master._nametowidget(n)
- parent = w
- except KeyError:
- # Create the intermediate widget
- parent = TixSubWidget(parent, plist[i],
- destroy_physically=0,
- check_intermediate=0)
- # The Tk widget name is in plist, not in name
- if plist:
- name = plist[-1]
- TixWidget.__init__(self, parent, None, None, {'name' : name})
- self.destroy_physically = destroy_physically
-
- def destroy(self):
- # For some widgets e.g., a NoteBook, when we call destructors,
- # we must be careful not to destroy the frame widget since this
- # also destroys the parent NoteBook thus leading to an exception
- # in Tkinter when it finally calls Tcl to destroy the NoteBook
- for c in self.children.values(): c.destroy()
- if self.master.children.has_key(self._name):
- del self.master.children[self._name]
- if self.master.subwidget_list.has_key(self._name):
- del self.master.subwidget_list[self._name]
- if self.destroy_physically:
- # This is bypassed only for a few widgets
- self.tk.call('destroy', self._w)
-
-
-# Useful func. to split Tcl lists and return as a dict. From Tkinter.py
-def _lst2dict(lst):
- dict = {}
- for x in lst:
- dict[x[0][1:]] = (x[0][1:],) + x[1:]
- return dict
-
-# Useful class to create a display style - later shared by many items.
-# Contributed by Steffen Kremser
-class DisplayStyle:
- """DisplayStyle - handle configuration options shared by
- (multiple) Display Items"""
-
- def __init__(self, itemtype, cnf={}, **kw):
- master = _default_root # global from Tkinter
- if not master and cnf.has_key('refwindow'): master=cnf['refwindow']
- elif not master and kw.has_key('refwindow'): master= kw['refwindow']
- elif not master: raise RuntimeError, "Too early to create display style: no root window"
- self.tk = master.tk
- self.stylename = self.tk.call('tixDisplayStyle', itemtype,
- *self._options(cnf,kw) )
-
- def __str__(self):
- return self.stylename
-
- def _options(self, cnf, kw):
- if kw and cnf:
- cnf = _cnfmerge((cnf, kw))
- elif kw:
- cnf = kw
- opts = ()
- for k, v in cnf.items():
- opts = opts + ('-'+k, v)
- return opts
-
- def delete(self):
- self.tk.call(self.stylename, 'delete')
-
- def __setitem__(self,key,value):
- self.tk.call(self.stylename, 'configure', '-%s'%key, value)
-
- def config(self, cnf={}, **kw):
- return _lst2dict(
- self.tk.split(
- self.tk.call(
- self.stylename, 'configure', *self._options(cnf,kw))))
-
- def __getitem__(self,key):
- return self.tk.call(self.stylename, 'cget', '-%s'%key)
-
-
-######################################################
-### The Tix Widget classes - in alphabetical order ###
-######################################################
-
-class Balloon(TixWidget):
- """Balloon help widget.
-
- Subwidget Class
- --------- -----
- label Label
- message Message"""
-
- # FIXME: It should inherit -superclass tixShell
- def __init__(self, master=None, cnf={}, **kw):
- # static seem to be -installcolormap -initwait -statusbar -cursor
- static = ['options', 'installcolormap', 'initwait', 'statusbar',
- 'cursor']
- TixWidget.__init__(self, master, 'tixBalloon', static, cnf, kw)
- self.subwidget_list['label'] = _dummyLabel(self, 'label',
- destroy_physically=0)
- self.subwidget_list['message'] = _dummyLabel(self, 'message',
- destroy_physically=0)
-
- def bind_widget(self, widget, cnf={}, **kw):
- """Bind balloon widget to another.
- One balloon widget may be bound to several widgets at the same time"""
- self.tk.call(self._w, 'bind', widget._w, *self._options(cnf, kw))
-
- def unbind_widget(self, widget):
- self.tk.call(self._w, 'unbind', widget._w)
-
-class ButtonBox(TixWidget):
- """ButtonBox - A container for pushbuttons.
- Subwidgets are the buttons added with the add method.
- """
- def __init__(self, master=None, cnf={}, **kw):
- TixWidget.__init__(self, master, 'tixButtonBox',
- ['orientation', 'options'], cnf, kw)
-
- def add(self, name, cnf={}, **kw):
- """Add a button with given name to box."""
-
- btn = self.tk.call(self._w, 'add', name, *self._options(cnf, kw))
- self.subwidget_list[name] = _dummyButton(self, name)
- return btn
-
- def invoke(self, name):
- if self.subwidget_list.has_key(name):
- self.tk.call(self._w, 'invoke', name)
-
-class ComboBox(TixWidget):
- """ComboBox - an Entry field with a dropdown menu. The user can select a
- choice by either typing in the entry subwdget or selecting from the
- listbox subwidget.
-
- Subwidget Class
- --------- -----
- entry Entry
- arrow Button
- slistbox ScrolledListBox
- tick Button
- cross Button : present if created with the fancy option"""
-
- # FIXME: It should inherit -superclass tixLabelWidget
- def __init__ (self, master=None, cnf={}, **kw):
- TixWidget.__init__(self, master, 'tixComboBox',
- ['editable', 'dropdown', 'fancy', 'options'],
- cnf, kw)
- self.subwidget_list['label'] = _dummyLabel(self, 'label')
- self.subwidget_list['entry'] = _dummyEntry(self, 'entry')
- self.subwidget_list['arrow'] = _dummyButton(self, 'arrow')
- self.subwidget_list['slistbox'] = _dummyScrolledListBox(self,
- 'slistbox')
- try:
- self.subwidget_list['tick'] = _dummyButton(self, 'tick')
- self.subwidget_list['cross'] = _dummyButton(self, 'cross')
- except TypeError:
- # unavailable when -fancy not specified
- pass
-
- # align
-
- def add_history(self, str):
- self.tk.call(self._w, 'addhistory', str)
-
- def append_history(self, str):
- self.tk.call(self._w, 'appendhistory', str)
-
- def insert(self, index, str):
- self.tk.call(self._w, 'insert', index, str)
-
- def pick(self, index):
- self.tk.call(self._w, 'pick', index)
-
-class Control(TixWidget):
- """Control - An entry field with value change arrows. The user can
- adjust the value by pressing the two arrow buttons or by entering
- the value directly into the entry. The new value will be checked
- against the user-defined upper and lower limits.
-
- Subwidget Class
- --------- -----
- incr Button
- decr Button
- entry Entry
- label Label"""
-
- # FIXME: It should inherit -superclass tixLabelWidget
- def __init__ (self, master=None, cnf={}, **kw):
- TixWidget.__init__(self, master, 'tixControl', ['options'], cnf, kw)
- self.subwidget_list['incr'] = _dummyButton(self, 'incr')
- self.subwidget_list['decr'] = _dummyButton(self, 'decr')
- self.subwidget_list['label'] = _dummyLabel(self, 'label')
- self.subwidget_list['entry'] = _dummyEntry(self, 'entry')
-
- def decrement(self):
- self.tk.call(self._w, 'decr')
-
- def increment(self):
- self.tk.call(self._w, 'incr')
-
- def invoke(self):
- self.tk.call(self._w, 'invoke')
-
- def update(self):
- self.tk.call(self._w, 'update')
-
-class DirList(TixWidget):
- """DirList - displays a list view of a directory, its previous
- directories and its sub-directories. The user can choose one of
- the directories displayed in the list or change to another directory.
-
- Subwidget Class
- --------- -----
- hlist HList
- hsb Scrollbar
- vsb Scrollbar"""
-
- # FIXME: It should inherit -superclass tixScrolledHList
- def __init__(self, master, cnf={}, **kw):
- TixWidget.__init__(self, master, 'tixDirList', ['options'], cnf, kw)
- self.subwidget_list['hlist'] = _dummyHList(self, 'hlist')
- self.subwidget_list['vsb'] = _dummyScrollbar(self, 'vsb')
- self.subwidget_list['hsb'] = _dummyScrollbar(self, 'hsb')
-
- def chdir(self, dir):
- self.tk.call(self._w, 'chdir', dir)
-
-class DirTree(TixWidget):
- """DirTree - Directory Listing in a hierarchical view.
- Displays a tree view of a directory, its previous directories and its
- sub-directories. The user can choose one of the directories displayed
- in the list or change to another directory.
-
- Subwidget Class
- --------- -----
- hlist HList
- hsb Scrollbar
- vsb Scrollbar"""
-
- # FIXME: It should inherit -superclass tixScrolledHList
- def __init__(self, master, cnf={}, **kw):
- TixWidget.__init__(self, master, 'tixDirTree', ['options'], cnf, kw)
- self.subwidget_list['hlist'] = _dummyHList(self, 'hlist')
- self.subwidget_list['vsb'] = _dummyScrollbar(self, 'vsb')
- self.subwidget_list['hsb'] = _dummyScrollbar(self, 'hsb')
-
- def chdir(self, dir):
- self.tk.call(self._w, 'chdir', dir)
-
-class DirSelectBox(TixWidget):
- """DirSelectBox - Motif style file select box.
- It is generally used for
- the user to choose a file. FileSelectBox stores the files mostly
- recently selected into a ComboBox widget so that they can be quickly
- selected again.
-
- Subwidget Class
- --------- -----
- selection ComboBox
- filter ComboBox
- dirlist ScrolledListBox
- filelist ScrolledListBox"""
-
- def __init__(self, master, cnf={}, **kw):
- TixWidget.__init__(self, master, 'tixDirSelectBox', ['options'], cnf, kw)
- self.subwidget_list['dirlist'] = _dummyDirList(self, 'dirlist')
- self.subwidget_list['dircbx'] = _dummyFileComboBox(self, 'dircbx')
-
-class ExFileSelectBox(TixWidget):
- """ExFileSelectBox - MS Windows style file select box.
- It provides an convenient method for the user to select files.
-
- Subwidget Class
- --------- -----
- cancel Button
- ok Button
- hidden Checkbutton
- types ComboBox
- dir ComboBox
- file ComboBox
- dirlist ScrolledListBox
- filelist ScrolledListBox"""
-
- def __init__(self, master, cnf={}, **kw):
- TixWidget.__init__(self, master, 'tixExFileSelectBox', ['options'], cnf, kw)
- self.subwidget_list['cancel'] = _dummyButton(self, 'cancel')
- self.subwidget_list['ok'] = _dummyButton(self, 'ok')
- self.subwidget_list['hidden'] = _dummyCheckbutton(self, 'hidden')
- self.subwidget_list['types'] = _dummyComboBox(self, 'types')
- self.subwidget_list['dir'] = _dummyComboBox(self, 'dir')
- self.subwidget_list['dirlist'] = _dummyDirList(self, 'dirlist')
- self.subwidget_list['file'] = _dummyComboBox(self, 'file')
- self.subwidget_list['filelist'] = _dummyScrolledListBox(self, 'filelist')
-
- def filter(self):
- self.tk.call(self._w, 'filter')
-
- def invoke(self):
- self.tk.call(self._w, 'invoke')
-
-
-# Should inherit from a Dialog class
-class DirSelectDialog(TixWidget):
- """The DirSelectDialog widget presents the directories in the file
- system in a dialog window. The user can use this dialog window to
- navigate through the file system to select the desired directory.
-
- Subwidgets Class
- ---------- -----
- dirbox DirSelectDialog"""
-
- # FIXME: It should inherit -superclass tixDialogShell
- def __init__(self, master, cnf={}, **kw):
- TixWidget.__init__(self, master, 'tixDirSelectDialog',
- ['options'], cnf, kw)
- self.subwidget_list['dirbox'] = _dummyDirSelectBox(self, 'dirbox')
- # cancel and ok buttons are missing
-
- def popup(self):
- self.tk.call(self._w, 'popup')
-
- def popdown(self):
- self.tk.call(self._w, 'popdown')
-
-
-# Should inherit from a Dialog class
-class ExFileSelectDialog(TixWidget):
- """ExFileSelectDialog - MS Windows style file select dialog.
- It provides an convenient method for the user to select files.
-
- Subwidgets Class
- ---------- -----
- fsbox ExFileSelectBox"""
-
- # FIXME: It should inherit -superclass tixDialogShell
- def __init__(self, master, cnf={}, **kw):
- TixWidget.__init__(self, master, 'tixExFileSelectDialog',
- ['options'], cnf, kw)
- self.subwidget_list['fsbox'] = _dummyExFileSelectBox(self, 'fsbox')
-
- def popup(self):
- self.tk.call(self._w, 'popup')
-
- def popdown(self):
- self.tk.call(self._w, 'popdown')
-
-class FileSelectBox(TixWidget):
- """ExFileSelectBox - Motif style file select box.
- It is generally used for
- the user to choose a file. FileSelectBox stores the files mostly
- recently selected into a ComboBox widget so that they can be quickly
- selected again.
-
- Subwidget Class
- --------- -----
- selection ComboBox
- filter ComboBox
- dirlist ScrolledListBox
- filelist ScrolledListBox"""
-
- def __init__(self, master, cnf={}, **kw):
- TixWidget.__init__(self, master, 'tixFileSelectBox', ['options'], cnf, kw)
- self.subwidget_list['dirlist'] = _dummyScrolledListBox(self, 'dirlist')
- self.subwidget_list['filelist'] = _dummyScrolledListBox(self, 'filelist')
- self.subwidget_list['filter'] = _dummyComboBox(self, 'filter')
- self.subwidget_list['selection'] = _dummyComboBox(self, 'selection')
-
- def apply_filter(self): # name of subwidget is same as command
- self.tk.call(self._w, 'filter')
-
- def invoke(self):
- self.tk.call(self._w, 'invoke')
-
-# Should inherit from a Dialog class
-class FileSelectDialog(TixWidget):
- """FileSelectDialog - Motif style file select dialog.
-
- Subwidgets Class
- ---------- -----
- btns StdButtonBox
- fsbox FileSelectBox"""
-
- # FIXME: It should inherit -superclass tixStdDialogShell
- def __init__(self, master, cnf={}, **kw):
- TixWidget.__init__(self, master, 'tixFileSelectDialog',
- ['options'], cnf, kw)
- self.subwidget_list['btns'] = _dummyStdButtonBox(self, 'btns')
- self.subwidget_list['fsbox'] = _dummyFileSelectBox(self, 'fsbox')
-
- def popup(self):
- self.tk.call(self._w, 'popup')
-
- def popdown(self):
- self.tk.call(self._w, 'popdown')
-
-class FileEntry(TixWidget):
- """FileEntry - Entry field with button that invokes a FileSelectDialog.
- The user can type in the filename manually. Alternatively, the user can
- press the button widget that sits next to the entry, which will bring
- up a file selection dialog.
-
- Subwidgets Class
- ---------- -----
- button Button
- entry Entry"""
-
- # FIXME: It should inherit -superclass tixLabelWidget
- def __init__(self, master, cnf={}, **kw):
- TixWidget.__init__(self, master, 'tixFileEntry',
- ['dialogtype', 'options'], cnf, kw)
- self.subwidget_list['button'] = _dummyButton(self, 'button')
- self.subwidget_list['entry'] = _dummyEntry(self, 'entry')
-
- def invoke(self):
- self.tk.call(self._w, 'invoke')
-
- def file_dialog(self):
- # FIXME: return python object
- pass
-
-class HList(TixWidget):
- """HList - Hierarchy display widget can be used to display any data
- that have a hierarchical structure, for example, file system directory
- trees. The list entries are indented and connected by branch lines
- according to their places in the hierachy.
-
- Subwidgets - None"""
-
- def __init__ (self,master=None,cnf={}, **kw):
- TixWidget.__init__(self, master, 'tixHList',
- ['columns', 'options'], cnf, kw)
-
- def add(self, entry, cnf={}, **kw):
- return self.tk.call(self._w, 'add', entry, *self._options(cnf, kw))
-
- def add_child(self, parent=None, cnf={}, **kw):
- if not parent:
- parent = ''
- return self.tk.call(
- self._w, 'addchild', parent, *self._options(cnf, kw))
-
- def anchor_set(self, entry):
- self.tk.call(self._w, 'anchor', 'set', entry)
-
- def anchor_clear(self):
- self.tk.call(self._w, 'anchor', 'clear')
-
- def column_width(self, col=0, width=None, chars=None):
- if not chars:
- return self.tk.call(self._w, 'column', 'width', col, width)
- else:
- return self.tk.call(self._w, 'column', 'width', col,
- '-char', chars)
-
- def delete_all(self):
- self.tk.call(self._w, 'delete', 'all')
-
- def delete_entry(self, entry):
- self.tk.call(self._w, 'delete', 'entry', entry)
-
- def delete_offsprings(self, entry):
- self.tk.call(self._w, 'delete', 'offsprings', entry)
-
- def delete_siblings(self, entry):
- self.tk.call(self._w, 'delete', 'siblings', entry)
-
- def dragsite_set(self, index):
- self.tk.call(self._w, 'dragsite', 'set', index)
-
- def dragsite_clear(self):
- self.tk.call(self._w, 'dragsite', 'clear')
-
- def dropsite_set(self, index):
- self.tk.call(self._w, 'dropsite', 'set', index)
-
- def dropsite_clear(self):
- self.tk.call(self._w, 'dropsite', 'clear')
-
- def header_create(self, col, cnf={}, **kw):
- self.tk.call(self._w, 'header', 'create', col, *self._options(cnf, kw))
-
- def header_configure(self, col, cnf={}, **kw):
- if cnf is None:
- return _lst2dict(
- self.tk.split(
- self.tk.call(self._w, 'header', 'configure', col)))
- self.tk.call(self._w, 'header', 'configure', col,
- *self._options(cnf, kw))
-
- def header_cget(self, col, opt):
- return self.tk.call(self._w, 'header', 'cget', col, opt)
-
- def header_exists(self, col):
- return self.tk.call(self._w, 'header', 'exists', col)
-
- def header_delete(self, col):
- self.tk.call(self._w, 'header', 'delete', col)
-
- def header_size(self, col):
- return self.tk.call(self._w, 'header', 'size', col)
-
- def hide_entry(self, entry):
- self.tk.call(self._w, 'hide', 'entry', entry)
-
- def indicator_create(self, entry, cnf={}, **kw):
- self.tk.call(
- self._w, 'indicator', 'create', entry, *self._options(cnf, kw))
-
- def indicator_configure(self, entry, cnf={}, **kw):
- if cnf is None:
- return _lst2dict(
- self.tk.split(
- self.tk.call(self._w, 'indicator', 'configure', entry)))
- self.tk.call(
- self._w, 'indicator', 'configure', entry, *self._options(cnf, kw))
-
- def indicator_cget(self, entry, opt):
- return self.tk.call(self._w, 'indicator', 'cget', entry, opt)
-
- def indicator_exists(self, entry):
- return self.tk.call (self._w, 'indicator', 'exists', entry)
-
- def indicator_delete(self, entry):
- self.tk.call(self._w, 'indicator', 'delete', entry)
-
- def indicator_size(self, entry):
- return self.tk.call(self._w, 'indicator', 'size', entry)
-
- def info_anchor(self):
- return self.tk.call(self._w, 'info', 'anchor')
-
- def info_children(self, entry=None):
- c = self.tk.call(self._w, 'info', 'children', entry)
- return self.tk.splitlist(c)
-
- def info_data(self, entry):
- return self.tk.call(self._w, 'info', 'data', entry)
-
- def info_exists(self, entry):
- return self.tk.call(self._w, 'info', 'exists', entry)
-
- def info_hidden(self, entry):
- return self.tk.call(self._w, 'info', 'hidden', entry)
-
- def info_next(self, entry):
- return self.tk.call(self._w, 'info', 'next', entry)
-
- def info_parent(self, entry):
- return self.tk.call(self._w, 'info', 'parent', entry)
-
- def info_prev(self, entry):
- return self.tk.call(self._w, 'info', 'prev', entry)
-
- def info_selection(self):
- c = self.tk.call(self._w, 'info', 'selection')
- return self.tk.splitlist(c)
-
- def item_cget(self, entry, col, opt):
- return self.tk.call(self._w, 'item', 'cget', entry, col, opt)
-
- def item_configure(self, entry, col, cnf={}, **kw):
- if cnf is None:
- return _lst2dict(
- self.tk.split(
- self.tk.call(self._w, 'item', 'configure', entry, col)))
- self.tk.call(self._w, 'item', 'configure', entry, col,
- *self._options(cnf, kw))
-
- def item_create(self, entry, col, cnf={}, **kw):
- self.tk.call(
- self._w, 'item', 'create', entry, col, *self._options(cnf, kw))
-
- def item_exists(self, entry, col):
- return self.tk.call(self._w, 'item', 'exists', entry, col)
-
- def item_delete(self, entry, col):
- self.tk.call(self._w, 'item', 'delete', entry, col)
-
- def entrycget(self, entry, opt):
- return self.tk.call(self._w, 'entrycget', entry, opt)
-
- def entryconfigure(self, entry, cnf={}, **kw):
- if cnf is None:
- return _lst2dict(
- self.tk.split(
- self.tk.call(self._w, 'entryconfigure', entry)))
- self.tk.call(self._w, 'entryconfigure', entry,
- *self._options(cnf, kw))
-
- def nearest(self, y):
- return self.tk.call(self._w, 'nearest', y)
-
- def see(self, entry):
- self.tk.call(self._w, 'see', entry)
-
- def selection_clear(self, cnf={}, **kw):
- self.tk.call(self._w, 'selection', 'clear', *self._options(cnf, kw))
-
- def selection_includes(self, entry):
- return self.tk.call(self._w, 'selection', 'includes', entry)
-
- def selection_set(self, first, last=None):
- self.tk.call(self._w, 'selection', 'set', first, last)
-
- def show_entry(self, entry):
- return self.tk.call(self._w, 'show', 'entry', entry)
-
- def xview(self, *args):
- self.tk.call(self._w, 'xview', *args)
-
- def yview(self, *args):
- self.tk.call(self._w, 'yview', *args)
-
-class InputOnly(TixWidget):
- """InputOnly - Invisible widget. Unix only.
-
- Subwidgets - None"""
-
- def __init__ (self,master=None,cnf={}, **kw):
- TixWidget.__init__(self, master, 'tixInputOnly', None, cnf, kw)
-
-class LabelEntry(TixWidget):
- """LabelEntry - Entry field with label. Packages an entry widget
- and a label into one mega widget. It can beused be used to simplify
- the creation of ``entry-form'' type of interface.
-
- Subwidgets Class
- ---------- -----
- label Label
- entry Entry"""
-
- def __init__ (self,master=None,cnf={}, **kw):
- TixWidget.__init__(self, master, 'tixLabelEntry',
- ['labelside','options'], cnf, kw)
- self.subwidget_list['label'] = _dummyLabel(self, 'label')
- self.subwidget_list['entry'] = _dummyEntry(self, 'entry')
-
-class LabelFrame(TixWidget):
- """LabelFrame - Labelled Frame container. Packages a frame widget
- and a label into one mega widget. To create widgets inside a
- LabelFrame widget, one creates the new widgets relative to the
- frame subwidget and manage them inside the frame subwidget.
-
- Subwidgets Class
- ---------- -----
- label Label
- frame Frame"""
-
- def __init__ (self,master=None,cnf={}, **kw):
- TixWidget.__init__(self, master, 'tixLabelFrame',
- ['labelside','options'], cnf, kw)
- self.subwidget_list['label'] = _dummyLabel(self, 'label')
- self.subwidget_list['frame'] = _dummyFrame(self, 'frame')
-
-
-class ListNoteBook(TixWidget):
- """A ListNoteBook widget is very similar to the TixNoteBook widget:
- it can be used to display many windows in a limited space using a
- notebook metaphor. The notebook is divided into a stack of pages
- (windows). At one time only one of these pages can be shown.
- The user can navigate through these pages by
- choosing the name of the desired page in the hlist subwidget."""
-
- def __init__(self, master, cnf={}, **kw):
- TixWidget.__init__(self, master, 'tixListNoteBook', ['options'], cnf, kw)
- # Is this necessary? It's not an exposed subwidget in Tix.
- self.subwidget_list['pane'] = _dummyPanedWindow(self, 'pane',
- destroy_physically=0)
- self.subwidget_list['hlist'] = _dummyHList(self, 'hlist')
- self.subwidget_list['shlist'] = _dummyScrolledHList(self, 'shlist')
-
- def add(self, name, cnf={}, **kw):
- self.tk.call(self._w, 'add', name, *self._options(cnf, kw))
- self.subwidget_list[name] = TixSubWidget(self, name)
- return self.subwidget_list[name]
-
- def page(self, name):
- return self.subwidget(name)
-
- def pages(self):
- # Can't call subwidgets_all directly because we don't want .nbframe
- names = self.tk.split(self.tk.call(self._w, 'pages'))
- ret = []
- for x in names:
- ret.append(self.subwidget(x))
- return ret
-
- def raise_page(self, name): # raise is a python keyword
- self.tk.call(self._w, 'raise', name)
-
-class Meter(TixWidget):
- """The Meter widget can be used to show the progress of a background
- job which may take a long time to execute.
- """
-
- def __init__(self, master=None, cnf={}, **kw):
- TixWidget.__init__(self, master, 'tixMeter',
- ['options'], cnf, kw)
-
-class NoteBook(TixWidget):
- """NoteBook - Multi-page container widget (tabbed notebook metaphor).
-
- Subwidgets Class
- ---------- -----
- nbframe NoteBookFrame
- <pages> page widgets added dynamically with the add method"""
-
- def __init__ (self,master=None,cnf={}, **kw):
- TixWidget.__init__(self,master,'tixNoteBook', ['options'], cnf, kw)
- self.subwidget_list['nbframe'] = TixSubWidget(self, 'nbframe',
- destroy_physically=0)
-
- def add(self, name, cnf={}, **kw):
- self.tk.call(self._w, 'add', name, *self._options(cnf, kw))
- self.subwidget_list[name] = TixSubWidget(self, name)
- return self.subwidget_list[name]
-
- def delete(self, name):
- self.tk.call(self._w, 'delete', name)
- self.subwidget_list[name].destroy()
- del self.subwidget_list[name]
-
- def page(self, name):
- return self.subwidget(name)
-
- def pages(self):
- # Can't call subwidgets_all directly because we don't want .nbframe
- names = self.tk.split(self.tk.call(self._w, 'pages'))
- ret = []
- for x in names:
- ret.append(self.subwidget(x))
- return ret
-
- def raise_page(self, name): # raise is a python keyword
- self.tk.call(self._w, 'raise', name)
-
- def raised(self):
- return self.tk.call(self._w, 'raised')
-
-class NoteBookFrame(TixWidget):
- # FIXME: This is dangerous to expose to be called on its own.
- pass
-
-class OptionMenu(TixWidget):
- """OptionMenu - creates a menu button of options.
-
- Subwidget Class
- --------- -----
- menubutton Menubutton
- menu Menu"""
-
- def __init__(self, master, cnf={}, **kw):
- TixWidget.__init__(self, master, 'tixOptionMenu', ['options'], cnf, kw)
- self.subwidget_list['menubutton'] = _dummyMenubutton(self, 'menubutton')
- self.subwidget_list['menu'] = _dummyMenu(self, 'menu')
-
- def add_command(self, name, cnf={}, **kw):
- self.tk.call(self._w, 'add', 'command', name, *self._options(cnf, kw))
-
- def add_separator(self, name, cnf={}, **kw):
- self.tk.call(self._w, 'add', 'separator', name, *self._options(cnf, kw))
-
- def delete(self, name):
- self.tk.call(self._w, 'delete', name)
-
- def disable(self, name):
- self.tk.call(self._w, 'disable', name)
-
- def enable(self, name):
- self.tk.call(self._w, 'enable', name)
-
-class PanedWindow(TixWidget):
- """PanedWindow - Multi-pane container widget
- allows the user to interactively manipulate the sizes of several
- panes. The panes can be arranged either vertically or horizontally.The
- user changes the sizes of the panes by dragging the resize handle
- between two panes.
-
- Subwidgets Class
- ---------- -----
- <panes> g/p widgets added dynamically with the add method."""
-
- def __init__(self, master, cnf={}, **kw):
- TixWidget.__init__(self, master, 'tixPanedWindow', ['orientation', 'options'], cnf, kw)
-
- # add delete forget panecget paneconfigure panes setsize
- def add(self, name, cnf={}, **kw):
- self.tk.call(self._w, 'add', name, *self._options(cnf, kw))
- self.subwidget_list[name] = TixSubWidget(self, name,
- check_intermediate=0)
- return self.subwidget_list[name]
-
- def delete(self, name):
- self.tk.call(self._w, 'delete', name)
- self.subwidget_list[name].destroy()
- del self.subwidget_list[name]
-
- def forget(self, name):
- self.tk.call(self._w, 'forget', name)
-
- def panecget(self, entry, opt):
- return self.tk.call(self._w, 'panecget', entry, opt)
-
- def paneconfigure(self, entry, cnf={}, **kw):
- if cnf is None:
- return _lst2dict(
- self.tk.split(
- self.tk.call(self._w, 'paneconfigure', entry)))
- self.tk.call(self._w, 'paneconfigure', entry, *self._options(cnf, kw))
-
- def panes(self):
- names = self.tk.call(self._w, 'panes')
- ret = []
- for x in names:
- ret.append(self.subwidget(x))
- return ret
-
-class PopupMenu(TixWidget):
- """PopupMenu widget can be used as a replacement of the tk_popup command.
- The advantage of the Tix PopupMenu widget is it requires less application
- code to manipulate.
-
-
- Subwidgets Class
- ---------- -----
- menubutton Menubutton
- menu Menu"""
-
- # FIXME: It should inherit -superclass tixShell
- def __init__(self, master, cnf={}, **kw):
- TixWidget.__init__(self, master, 'tixPopupMenu', ['options'], cnf, kw)
- self.subwidget_list['menubutton'] = _dummyMenubutton(self, 'menubutton')
- self.subwidget_list['menu'] = _dummyMenu(self, 'menu')
-
- def bind_widget(self, widget):
- self.tk.call(self._w, 'bind', widget._w)
-
- def unbind_widget(self, widget):
- self.tk.call(self._w, 'unbind', widget._w)
-
- def post_widget(self, widget, x, y):
- self.tk.call(self._w, 'post', widget._w, x, y)
-
-class ResizeHandle(TixWidget):
- """Internal widget to draw resize handles on Scrolled widgets."""
- def __init__(self, master, cnf={}, **kw):
- # There seems to be a Tix bug rejecting the configure method
- # Let's try making the flags -static
- flags = ['options', 'command', 'cursorfg', 'cursorbg',
- 'handlesize', 'hintcolor', 'hintwidth',
- 'x', 'y']
- # In fact, x y height width are configurable
- TixWidget.__init__(self, master, 'tixResizeHandle',
- flags, cnf, kw)
-
- def attach_widget(self, widget):
- self.tk.call(self._w, 'attachwidget', widget._w)
-
- def detach_widget(self, widget):
- self.tk.call(self._w, 'detachwidget', widget._w)
-
- def hide(self, widget):
- self.tk.call(self._w, 'hide', widget._w)
-
- def show(self, widget):
- self.tk.call(self._w, 'show', widget._w)
-
-class ScrolledHList(TixWidget):
- """ScrolledHList - HList with automatic scrollbars."""
-
- # FIXME: It should inherit -superclass tixScrolledWidget
- def __init__(self, master, cnf={}, **kw):
- TixWidget.__init__(self, master, 'tixScrolledHList', ['options'],
- cnf, kw)
- self.subwidget_list['hlist'] = _dummyHList(self, 'hlist')
- self.subwidget_list['vsb'] = _dummyScrollbar(self, 'vsb')
- self.subwidget_list['hsb'] = _dummyScrollbar(self, 'hsb')
-
-class ScrolledListBox(TixWidget):
- """ScrolledListBox - Listbox with automatic scrollbars."""
-
- # FIXME: It should inherit -superclass tixScrolledWidget
- def __init__(self, master, cnf={}, **kw):
- TixWidget.__init__(self, master, 'tixScrolledListBox', ['options'], cnf, kw)
- self.subwidget_list['listbox'] = _dummyListbox(self, 'listbox')
- self.subwidget_list['vsb'] = _dummyScrollbar(self, 'vsb')
- self.subwidget_list['hsb'] = _dummyScrollbar(self, 'hsb')
-
-class ScrolledText(TixWidget):
- """ScrolledText - Text with automatic scrollbars."""
-
- # FIXME: It should inherit -superclass tixScrolledWidget
- def __init__(self, master, cnf={}, **kw):
- TixWidget.__init__(self, master, 'tixScrolledText', ['options'], cnf, kw)
- self.subwidget_list['text'] = _dummyText(self, 'text')
- self.subwidget_list['vsb'] = _dummyScrollbar(self, 'vsb')
- self.subwidget_list['hsb'] = _dummyScrollbar(self, 'hsb')
-
-class ScrolledTList(TixWidget):
- """ScrolledTList - TList with automatic scrollbars."""
-
- # FIXME: It should inherit -superclass tixScrolledWidget
- def __init__(self, master, cnf={}, **kw):
- TixWidget.__init__(self, master, 'tixScrolledTList', ['options'],
- cnf, kw)
- self.subwidget_list['tlist'] = _dummyTList(self, 'tlist')
- self.subwidget_list['vsb'] = _dummyScrollbar(self, 'vsb')
- self.subwidget_list['hsb'] = _dummyScrollbar(self, 'hsb')
-
-class ScrolledWindow(TixWidget):
- """ScrolledWindow - Window with automatic scrollbars."""
-
- # FIXME: It should inherit -superclass tixScrolledWidget
- def __init__(self, master, cnf={}, **kw):
- TixWidget.__init__(self, master, 'tixScrolledWindow', ['options'], cnf, kw)
- self.subwidget_list['window'] = _dummyFrame(self, 'window')
- self.subwidget_list['vsb'] = _dummyScrollbar(self, 'vsb')
- self.subwidget_list['hsb'] = _dummyScrollbar(self, 'hsb')
-
-class Select(TixWidget):
- """Select - Container of button subwidgets. It can be used to provide
- radio-box or check-box style of selection options for the user.
-
- Subwidgets are buttons added dynamically using the add method."""
-
- # FIXME: It should inherit -superclass tixLabelWidget
- def __init__(self, master, cnf={}, **kw):
- TixWidget.__init__(self, master, 'tixSelect',
- ['allowzero', 'radio', 'orientation', 'labelside',
- 'options'],
- cnf, kw)
- self.subwidget_list['label'] = _dummyLabel(self, 'label')
-
- def add(self, name, cnf={}, **kw):
- self.tk.call(self._w, 'add', name, *self._options(cnf, kw))
- self.subwidget_list[name] = _dummyButton(self, name)
- return self.subwidget_list[name]
-
- def invoke(self, name):
- self.tk.call(self._w, 'invoke', name)
-
-class Shell(TixWidget):
- """Toplevel window.
-
- Subwidgets - None"""
-
- def __init__ (self,master=None,cnf={}, **kw):
- TixWidget.__init__(self, master, 'tixShell', ['options', 'title'], cnf, kw)
-
-class DialogShell(TixWidget):
- """Toplevel window, with popup popdown and center methods.
- It tells the window manager that it is a dialog window and should be
- treated specially. The exact treatment depends on the treatment of
- the window manager.
-
- Subwidgets - None"""
-
- # FIXME: It should inherit from Shell
- def __init__ (self,master=None,cnf={}, **kw):
- TixWidget.__init__(self, master,
- 'tixDialogShell',
- ['options', 'title', 'mapped',
- 'minheight', 'minwidth',
- 'parent', 'transient'], cnf, kw)
-
- def popdown(self):
- self.tk.call(self._w, 'popdown')
-
- def popup(self):
- self.tk.call(self._w, 'popup')
-
- def center(self):
- self.tk.call(self._w, 'center')
-
-class StdButtonBox(TixWidget):
- """StdButtonBox - Standard Button Box (OK, Apply, Cancel and Help) """
-
- def __init__(self, master=None, cnf={}, **kw):
- TixWidget.__init__(self, master, 'tixStdButtonBox',
- ['orientation', 'options'], cnf, kw)
- self.subwidget_list['ok'] = _dummyButton(self, 'ok')
- self.subwidget_list['apply'] = _dummyButton(self, 'apply')
- self.subwidget_list['cancel'] = _dummyButton(self, 'cancel')
- self.subwidget_list['help'] = _dummyButton(self, 'help')
-
- def invoke(self, name):
- if self.subwidget_list.has_key(name):
- self.tk.call(self._w, 'invoke', name)
-
-class TList(TixWidget):
- """TList - Hierarchy display widget which can be
- used to display data in a tabular format. The list entries of a TList
- widget are similar to the entries in the Tk listbox widget. The main
- differences are (1) the TList widget can display the list entries in a
- two dimensional format and (2) you can use graphical images as well as
- multiple colors and fonts for the list entries.
-
- Subwidgets - None"""
-
- def __init__ (self,master=None,cnf={}, **kw):
- TixWidget.__init__(self, master, 'tixTList', ['options'], cnf, kw)
-
- def active_set(self, index):
- self.tk.call(self._w, 'active', 'set', index)
-
- def active_clear(self):
- self.tk.call(self._w, 'active', 'clear')
-
- def anchor_set(self, index):
- self.tk.call(self._w, 'anchor', 'set', index)
-
- def anchor_clear(self):
- self.tk.call(self._w, 'anchor', 'clear')
-
- def delete(self, from_, to=None):
- self.tk.call(self._w, 'delete', from_, to)
-
- def dragsite_set(self, index):
- self.tk.call(self._w, 'dragsite', 'set', index)
-
- def dragsite_clear(self):
- self.tk.call(self._w, 'dragsite', 'clear')
-
- def dropsite_set(self, index):
- self.tk.call(self._w, 'dropsite', 'set', index)
-
- def dropsite_clear(self):
- self.tk.call(self._w, 'dropsite', 'clear')
-
- def insert(self, index, cnf={}, **kw):
- self.tk.call(self._w, 'insert', index, *self._options(cnf, kw))
-
- def info_active(self):
- return self.tk.call(self._w, 'info', 'active')
-
- def info_anchor(self):
- return self.tk.call(self._w, 'info', 'anchor')
-
- def info_down(self, index):
- return self.tk.call(self._w, 'info', 'down', index)
-
- def info_left(self, index):
- return self.tk.call(self._w, 'info', 'left', index)
-
- def info_right(self, index):
- return self.tk.call(self._w, 'info', 'right', index)
-
- def info_selection(self):
- c = self.tk.call(self._w, 'info', 'selection')
- return self.tk.splitlist(c)
-
- def info_size(self):
- return self.tk.call(self._w, 'info', 'size')
-
- def info_up(self, index):
- return self.tk.call(self._w, 'info', 'up', index)
-
- def nearest(self, x, y):
- return self.tk.call(self._w, 'nearest', x, y)
-
- def see(self, index):
- self.tk.call(self._w, 'see', index)
-
- def selection_clear(self, cnf={}, **kw):
- self.tk.call(self._w, 'selection', 'clear', *self._options(cnf, kw))
-
- def selection_includes(self, index):
- return self.tk.call(self._w, 'selection', 'includes', index)
-
- def selection_set(self, first, last=None):
- self.tk.call(self._w, 'selection', 'set', first, last)
-
- def xview(self, *args):
- self.tk.call(self._w, 'xview', *args)
-
- def yview(self, *args):
- self.tk.call(self._w, 'yview', *args)
-
-class Tree(TixWidget):
- """Tree - The tixTree widget can be used to display hierachical
- data in a tree form. The user can adjust
- the view of the tree by opening or closing parts of the tree."""
-
- # FIXME: It should inherit -superclass tixScrolledWidget
- def __init__(self, master=None, cnf={}, **kw):
- TixWidget.__init__(self, master, 'tixTree',
- ['options'], cnf, kw)
- self.subwidget_list['hlist'] = _dummyHList(self, 'hlist')
- self.subwidget_list['vsb'] = _dummyScrollbar(self, 'vsb')
- self.subwidget_list['hsb'] = _dummyScrollbar(self, 'hsb')
-
- def autosetmode(self):
- '''This command calls the setmode method for all the entries in this
- Tree widget: if an entry has no child entries, its mode is set to
- none. Otherwise, if the entry has any hidden child entries, its mode is
- set to open; otherwise its mode is set to close.'''
- self.tk.call(self._w, 'autosetmode')
-
- def close(self, entrypath):
- '''Close the entry given by entryPath if its mode is close.'''
- self.tk.call(self._w, 'close', entrypath)
-
- def getmode(self, entrypath):
- '''Returns the current mode of the entry given by entryPath.'''
- return self.tk.call(self._w, 'getmode', entrypath)
-
- def open(self, entrypath):
- '''Open the entry given by entryPath if its mode is open.'''
- self.tk.call(self._w, 'open', entrypath)
-
- def setmode(self, entrypath, mode='none'):
- '''This command is used to indicate whether the entry given by
- entryPath has children entries and whether the children are visible. mode
- must be one of open, close or none. If mode is set to open, a (+)
- indicator is drawn next the the entry. If mode is set to close, a (-)
- indicator is drawn next the the entry. If mode is set to none, no
- indicators will be drawn for this entry. The default mode is none. The
- open mode indicates the entry has hidden children and this entry can be
- opened by the user. The close mode indicates that all the children of the
- entry are now visible and the entry can be closed by the user.'''
- self.tk.call(self._w, 'setmode', entrypath, mode)
-
-
-# Could try subclassing Tree for CheckList - would need another arg to init
-class CheckList(TixWidget):
- """The CheckList widget
- displays a list of items to be selected by the user. CheckList acts
- similarly to the Tk checkbutton or radiobutton widgets, except it is
- capable of handling many more items than checkbuttons or radiobuttons.
- """
- # FIXME: It should inherit -superclass tixTree
- def __init__(self, master=None, cnf={}, **kw):
- TixWidget.__init__(self, master, 'tixCheckList',
- ['options'], cnf, kw)
- self.subwidget_list['hlist'] = _dummyHList(self, 'hlist')
- self.subwidget_list['vsb'] = _dummyScrollbar(self, 'vsb')
- self.subwidget_list['hsb'] = _dummyScrollbar(self, 'hsb')
-
- def autosetmode(self):
- '''This command calls the setmode method for all the entries in this
- Tree widget: if an entry has no child entries, its mode is set to
- none. Otherwise, if the entry has any hidden child entries, its mode is
- set to open; otherwise its mode is set to close.'''
- self.tk.call(self._w, 'autosetmode')
-
- def close(self, entrypath):
- '''Close the entry given by entryPath if its mode is close.'''
- self.tk.call(self._w, 'close', entrypath)
-
- def getmode(self, entrypath):
- '''Returns the current mode of the entry given by entryPath.'''
- return self.tk.call(self._w, 'getmode', entrypath)
-
- def open(self, entrypath):
- '''Open the entry given by entryPath if its mode is open.'''
- self.tk.call(self._w, 'open', entrypath)
-
- def getselection(self, mode='on'):
- '''Returns a list of items whose status matches status. If status is
- not specified, the list of items in the "on" status will be returned.
- Mode can be on, off, default'''
- c = self.tk.split(self.tk.call(self._w, 'getselection', mode))
- return self.tk.splitlist(c)
-
- def getstatus(self, entrypath):
- '''Returns the current status of entryPath.'''
- return self.tk.call(self._w, 'getstatus', entrypath)
-
- def setstatus(self, entrypath, mode='on'):
- '''Sets the status of entryPath to be status. A bitmap will be
- displayed next to the entry its status is on, off or default.'''
- self.tk.call(self._w, 'setstatus', entrypath, mode)
-
-
-###########################################################################
-### The subclassing below is used to instantiate the subwidgets in each ###
-### mega widget. This allows us to access their methods directly. ###
-###########################################################################
-
-class _dummyButton(Button, TixSubWidget):
- def __init__(self, master, name, destroy_physically=1):
- TixSubWidget.__init__(self, master, name, destroy_physically)
-
-class _dummyCheckbutton(Checkbutton, TixSubWidget):
- def __init__(self, master, name, destroy_physically=1):
- TixSubWidget.__init__(self, master, name, destroy_physically)
-
-class _dummyEntry(Entry, TixSubWidget):
- def __init__(self, master, name, destroy_physically=1):
- TixSubWidget.__init__(self, master, name, destroy_physically)
-
-class _dummyFrame(Frame, TixSubWidget):
- def __init__(self, master, name, destroy_physically=1):
- TixSubWidget.__init__(self, master, name, destroy_physically)
-
-class _dummyLabel(Label, TixSubWidget):
- def __init__(self, master, name, destroy_physically=1):
- TixSubWidget.__init__(self, master, name, destroy_physically)
-
-class _dummyListbox(Listbox, TixSubWidget):
- def __init__(self, master, name, destroy_physically=1):
- TixSubWidget.__init__(self, master, name, destroy_physically)
-
-class _dummyMenu(Menu, TixSubWidget):
- def __init__(self, master, name, destroy_physically=1):
- TixSubWidget.__init__(self, master, name, destroy_physically)
-
-class _dummyMenubutton(Menubutton, TixSubWidget):
- def __init__(self, master, name, destroy_physically=1):
- TixSubWidget.__init__(self, master, name, destroy_physically)
-
-class _dummyScrollbar(Scrollbar, TixSubWidget):
- def __init__(self, master, name, destroy_physically=1):
- TixSubWidget.__init__(self, master, name, destroy_physically)
-
-class _dummyText(Text, TixSubWidget):
- def __init__(self, master, name, destroy_physically=1):
- TixSubWidget.__init__(self, master, name, destroy_physically)
-
-class _dummyScrolledListBox(ScrolledListBox, TixSubWidget):
- def __init__(self, master, name, destroy_physically=1):
- TixSubWidget.__init__(self, master, name, destroy_physically)
- self.subwidget_list['listbox'] = _dummyListbox(self, 'listbox')
- self.subwidget_list['vsb'] = _dummyScrollbar(self, 'vsb')
- self.subwidget_list['hsb'] = _dummyScrollbar(self, 'hsb')
-
-class _dummyHList(HList, TixSubWidget):
- def __init__(self, master, name, destroy_physically=1):
- TixSubWidget.__init__(self, master, name, destroy_physically)
-
-class _dummyScrolledHList(ScrolledHList, TixSubWidget):
- def __init__(self, master, name, destroy_physically=1):
- TixSubWidget.__init__(self, master, name, destroy_physically)
- self.subwidget_list['hlist'] = _dummyHList(self, 'hlist')
- self.subwidget_list['vsb'] = _dummyScrollbar(self, 'vsb')
- self.subwidget_list['hsb'] = _dummyScrollbar(self, 'hsb')
-
-class _dummyTList(TList, TixSubWidget):
- def __init__(self, master, name, destroy_physically=1):
- TixSubWidget.__init__(self, master, name, destroy_physically)
-
-class _dummyComboBox(ComboBox, TixSubWidget):
- def __init__(self, master, name, destroy_physically=1):
- TixSubWidget.__init__(self, master, name, ['fancy',destroy_physically])
- self.subwidget_list['label'] = _dummyLabel(self, 'label')
- self.subwidget_list['entry'] = _dummyEntry(self, 'entry')
- self.subwidget_list['arrow'] = _dummyButton(self, 'arrow')
-
- self.subwidget_list['slistbox'] = _dummyScrolledListBox(self,
- 'slistbox')
- try:
- self.subwidget_list['tick'] = _dummyButton(self, 'tick')
- #cross Button : present if created with the fancy option
- self.subwidget_list['cross'] = _dummyButton(self, 'cross')
- except TypeError:
- # unavailable when -fancy not specified
- pass
-
-class _dummyDirList(DirList, TixSubWidget):
- def __init__(self, master, name, destroy_physically=1):
- TixSubWidget.__init__(self, master, name, destroy_physically)
- self.subwidget_list['hlist'] = _dummyHList(self, 'hlist')
- self.subwidget_list['vsb'] = _dummyScrollbar(self, 'vsb')
- self.subwidget_list['hsb'] = _dummyScrollbar(self, 'hsb')
-
-class _dummyDirSelectBox(DirSelectBox, TixSubWidget):
- def __init__(self, master, name, destroy_physically=1):
- TixSubWidget.__init__(self, master, name, destroy_physically)
- self.subwidget_list['dirlist'] = _dummyDirList(self, 'dirlist')
- self.subwidget_list['dircbx'] = _dummyFileComboBox(self, 'dircbx')
-
-class _dummyExFileSelectBox(ExFileSelectBox, TixSubWidget):
- def __init__(self, master, name, destroy_physically=1):
- TixSubWidget.__init__(self, master, name, destroy_physically)
- self.subwidget_list['cancel'] = _dummyButton(self, 'cancel')
- self.subwidget_list['ok'] = _dummyButton(self, 'ok')
- self.subwidget_list['hidden'] = _dummyCheckbutton(self, 'hidden')
- self.subwidget_list['types'] = _dummyComboBox(self, 'types')
- self.subwidget_list['dir'] = _dummyComboBox(self, 'dir')
- self.subwidget_list['dirlist'] = _dummyScrolledListBox(self, 'dirlist')
- self.subwidget_list['file'] = _dummyComboBox(self, 'file')
- self.subwidget_list['filelist'] = _dummyScrolledListBox(self, 'filelist')
-
-class _dummyFileSelectBox(FileSelectBox, TixSubWidget):
- def __init__(self, master, name, destroy_physically=1):
- TixSubWidget.__init__(self, master, name, destroy_physically)
- self.subwidget_list['dirlist'] = _dummyScrolledListBox(self, 'dirlist')
- self.subwidget_list['filelist'] = _dummyScrolledListBox(self, 'filelist')
- self.subwidget_list['filter'] = _dummyComboBox(self, 'filter')
- self.subwidget_list['selection'] = _dummyComboBox(self, 'selection')
-
-class _dummyFileComboBox(ComboBox, TixSubWidget):
- def __init__(self, master, name, destroy_physically=1):
- TixSubWidget.__init__(self, master, name, destroy_physically)
- self.subwidget_list['dircbx'] = _dummyComboBox(self, 'dircbx')
-
-class _dummyStdButtonBox(StdButtonBox, TixSubWidget):
- def __init__(self, master, name, destroy_physically=1):
- TixSubWidget.__init__(self, master, name, destroy_physically)
- self.subwidget_list['ok'] = _dummyButton(self, 'ok')
- self.subwidget_list['apply'] = _dummyButton(self, 'apply')
- self.subwidget_list['cancel'] = _dummyButton(self, 'cancel')
- self.subwidget_list['help'] = _dummyButton(self, 'help')
-
-class _dummyNoteBookFrame(NoteBookFrame, TixSubWidget):
- def __init__(self, master, name, destroy_physically=0):
- TixSubWidget.__init__(self, master, name, destroy_physically)
-
-class _dummyPanedWindow(PanedWindow, TixSubWidget):
- def __init__(self, master, name, destroy_physically=1):
- TixSubWidget.__init__(self, master, name, destroy_physically)
-
-########################
-### Utility Routines ###
-########################
-
-#mike Should tixDestroy be exposed as a wrapper? - but not for widgets.
-
-def OptionName(widget):
- '''Returns the qualified path name for the widget. Normally used to set
- default options for subwidgets. See tixwidgets.py'''
- return widget.tk.call('tixOptionName', widget._w)
-
-# Called with a dictionary argument of the form
-# {'*.c':'C source files', '*.txt':'Text Files', '*':'All files'}
-# returns a string which can be used to configure the fsbox file types
-# in an ExFileSelectBox. i.e.,
-# '{{*} {* - All files}} {{*.c} {*.c - C source files}} {{*.txt} {*.txt - Text Files}}'
-def FileTypeList(dict):
- s = ''
- for type in dict.keys():
- s = s + '{{' + type + '} {' + type + ' - ' + dict[type] + '}} '
- return s
-
-# Still to be done:
-# tixIconView
-class CObjView(TixWidget):
- """This file implements the Canvas Object View widget. This is a base
- class of IconView. It implements automatic placement/adjustment of the
- scrollbars according to the canvas objects inside the canvas subwidget.
- The scrollbars are adjusted so that the canvas is just large enough
- to see all the objects.
- """
- # FIXME: It should inherit -superclass tixScrolledWidget
- pass
-
-
-class Grid(TixWidget):
- '''The Tix Grid command creates a new window and makes it into a
- tixGrid widget. Additional options, may be specified on the command
- line or in the option database to configure aspects such as its cursor
- and relief.
-
- A Grid widget displays its contents in a two dimensional grid of cells.
- Each cell may contain one Tix display item, which may be in text,
- graphics or other formats. See the DisplayStyle class for more information
- about Tix display items. Individual cells, or groups of cells, can be
- formatted with a wide range of attributes, such as its color, relief and
- border.
-
- Subwidgets - None'''
- # valid specific resources as of Tk 8.4
- # editdonecmd, editnotifycmd, floatingcols, floatingrows, formatcmd,
- # highlightbackground, highlightcolor, leftmargin, itemtype, selectmode,
- # selectunit, topmargin,
- def __init__(self, master=None, cnf={}, **kw):
- static= []
- self.cnf= cnf
- TixWidget.__init__(self, master, 'tixGrid', static, cnf, kw)
-
- # valid options as of Tk 8.4
- # anchor, bdtype, cget, configure, delete, dragsite, dropsite, entrycget, edit
- # entryconfigure, format, geometryinfo, info, index, move, nearest, selection
- # set, size, unset, xview, yview
- # def anchor option ?args ...?
- def anchor_get(self):
- "Get the (x,y) coordinate of the current anchor cell"
- return self._getints(self.tk.call(self, 'anchor', 'get'))
-
- # def bdtype
- # def delete dim from ?to?
- def delete_row(self, from_, to=None):
- """Delete rows between from_ and to inclusive.
- If to is not provided, delete only row at from_"""
- if to is None:
- self.tk.call(self, 'delete', 'row', from_)
- else:
- self.tk.call(self, 'delete', 'row', from_, to)
- def delete_column(self, from_, to=None):
- """Delete columns between from_ and to inclusive.
- If to is not provided, delete only column at from_"""
- if to is None:
- self.tk.call(self, 'delete', 'column', from_)
- else:
- self.tk.call(self, 'delete', 'column', from_, to)
- # def edit apply
- # def edit set x y
-
- def entrycget(self, x, y, option):
- "Get the option value for cell at (x,y)"
- return self.tk.call(self, 'entrycget', x, y, option)
-
- def entryconfigure(self, x, y, **kw):
- return self.tk.call(self, 'entryconfigure', x, y, *self._options(None, kw))
- # def format
- # def index
-
- def info_exists(self, x, y):
- "Return True if display item exists at (x,y)"
- return bool(int(self.tk.call(self, 'info', 'exists', x, y)))
-
- def info_bbox(self, x, y):
- # This seems to always return '', at least for 'text' displayitems
- return self.tk.call(self, 'info', 'bbox', x, y)
-
- def nearest(self, x, y):
- "Return coordinate of cell nearest pixel coordinate (x,y)"
- return self._getints(self.tk.call(self, 'nearest', x, y))
-
- # def selection adjust
- # def selection clear
- # def selection includes
- # def selection set
- # def selection toggle
- # def move dim from to offset
-
- def set(self, x, y, itemtype=None, **kw):
- args= self._options(self.cnf, kw)
- if itemtype is not None:
- args= ('-itemtype', itemtype) + args
- self.tk.call(self, 'set', x, y, *args)
-
- # def size dim index ?option value ...?
- # def unset x y
-
- def xview(self):
- return self._getdoubles(self.tk.call(self, 'xview'))
- def xview_moveto(self, fraction):
- self.tk.call(self,'xview', 'moveto', fraction)
- def xview_scroll(self, count, what="units"):
- "Scroll right (count>0) or left <count> of units|pages"
- self.tk.call(self, 'xview', 'scroll', count, what)
-
- def yview(self):
- return self._getdoubles(self.tk.call(self, 'yview'))
- def yview_moveto(self, fraction):
- self.tk.call(self,'ysview', 'moveto', fraction)
- def yview_scroll(self, count, what="units"):
- "Scroll down (count>0) or up <count> of units|pages"
- self.tk.call(self, 'yview', 'scroll', count, what)
-
-class ScrolledGrid(Grid):
- '''Scrolled Grid widgets'''
-
- # FIXME: It should inherit -superclass tixScrolledWidget
- def __init__(self, master=None, cnf={}, **kw):
- static= []
- self.cnf= cnf
- TixWidget.__init__(self, master, 'tixScrolledGrid', static, cnf, kw)
diff --git a/sys/lib/python/lib-tk/Tkconstants.py b/sys/lib/python/lib-tk/Tkconstants.py
deleted file mode 100644
index 63eee33d2..000000000
--- a/sys/lib/python/lib-tk/Tkconstants.py
+++ /dev/null
@@ -1,110 +0,0 @@
-# Symbolic constants for Tk
-
-# Booleans
-NO=FALSE=OFF=0
-YES=TRUE=ON=1
-
-# -anchor and -sticky
-N='n'
-S='s'
-W='w'
-E='e'
-NW='nw'
-SW='sw'
-NE='ne'
-SE='se'
-NS='ns'
-EW='ew'
-NSEW='nsew'
-CENTER='center'
-
-# -fill
-NONE='none'
-X='x'
-Y='y'
-BOTH='both'
-
-# -side
-LEFT='left'
-TOP='top'
-RIGHT='right'
-BOTTOM='bottom'
-
-# -relief
-RAISED='raised'
-SUNKEN='sunken'
-FLAT='flat'
-RIDGE='ridge'
-GROOVE='groove'
-SOLID = 'solid'
-
-# -orient
-HORIZONTAL='horizontal'
-VERTICAL='vertical'
-
-# -tabs
-NUMERIC='numeric'
-
-# -wrap
-CHAR='char'
-WORD='word'
-
-# -align
-BASELINE='baseline'
-
-# -bordermode
-INSIDE='inside'
-OUTSIDE='outside'
-
-# Special tags, marks and insert positions
-SEL='sel'
-SEL_FIRST='sel.first'
-SEL_LAST='sel.last'
-END='end'
-INSERT='insert'
-CURRENT='current'
-ANCHOR='anchor'
-ALL='all' # e.g. Canvas.delete(ALL)
-
-# Text widget and button states
-NORMAL='normal'
-DISABLED='disabled'
-ACTIVE='active'
-# Canvas state
-HIDDEN='hidden'
-
-# Menu item types
-CASCADE='cascade'
-CHECKBUTTON='checkbutton'
-COMMAND='command'
-RADIOBUTTON='radiobutton'
-SEPARATOR='separator'
-
-# Selection modes for list boxes
-SINGLE='single'
-BROWSE='browse'
-MULTIPLE='multiple'
-EXTENDED='extended'
-
-# Activestyle for list boxes
-# NONE='none' is also valid
-DOTBOX='dotbox'
-UNDERLINE='underline'
-
-# Various canvas styles
-PIESLICE='pieslice'
-CHORD='chord'
-ARC='arc'
-FIRST='first'
-LAST='last'
-BUTT='butt'
-PROJECTING='projecting'
-ROUND='round'
-BEVEL='bevel'
-MITER='miter'
-
-# Arguments to xview/yview
-MOVETO='moveto'
-SCROLL='scroll'
-UNITS='units'
-PAGES='pages'
diff --git a/sys/lib/python/lib-tk/Tkdnd.py b/sys/lib/python/lib-tk/Tkdnd.py
deleted file mode 100644
index 2e37d9ec3..000000000
--- a/sys/lib/python/lib-tk/Tkdnd.py
+++ /dev/null
@@ -1,321 +0,0 @@
-"""Drag-and-drop support for Tkinter.
-
-This is very preliminary. I currently only support dnd *within* one
-application, between different windows (or within the same window).
-
-I an trying to make this as generic as possible -- not dependent on
-the use of a particular widget or icon type, etc. I also hope that
-this will work with Pmw.
-
-To enable an object to be dragged, you must create an event binding
-for it that starts the drag-and-drop process. Typically, you should
-bind <ButtonPress> to a callback function that you write. The function
-should call Tkdnd.dnd_start(source, event), where 'source' is the
-object to be dragged, and 'event' is the event that invoked the call
-(the argument to your callback function). Even though this is a class
-instantiation, the returned instance should not be stored -- it will
-be kept alive automatically for the duration of the drag-and-drop.
-
-When a drag-and-drop is already in process for the Tk interpreter, the
-call is *ignored*; this normally averts starting multiple simultaneous
-dnd processes, e.g. because different button callbacks all
-dnd_start().
-
-The object is *not* necessarily a widget -- it can be any
-application-specific object that is meaningful to potential
-drag-and-drop targets.
-
-Potential drag-and-drop targets are discovered as follows. Whenever
-the mouse moves, and at the start and end of a drag-and-drop move, the
-Tk widget directly under the mouse is inspected. This is the target
-widget (not to be confused with the target object, yet to be
-determined). If there is no target widget, there is no dnd target
-object. If there is a target widget, and it has an attribute
-dnd_accept, this should be a function (or any callable object). The
-function is called as dnd_accept(source, event), where 'source' is the
-object being dragged (the object passed to dnd_start() above), and
-'event' is the most recent event object (generally a <Motion> event;
-it can also be <ButtonPress> or <ButtonRelease>). If the dnd_accept()
-function returns something other than None, this is the new dnd target
-object. If dnd_accept() returns None, or if the target widget has no
-dnd_accept attribute, the target widget's parent is considered as the
-target widget, and the search for a target object is repeated from
-there. If necessary, the search is repeated all the way up to the
-root widget. If none of the target widgets can produce a target
-object, there is no target object (the target object is None).
-
-The target object thus produced, if any, is called the new target
-object. It is compared with the old target object (or None, if there
-was no old target widget). There are several cases ('source' is the
-source object, and 'event' is the most recent event object):
-
-- Both the old and new target objects are None. Nothing happens.
-
-- The old and new target objects are the same object. Its method
-dnd_motion(source, event) is called.
-
-- The old target object was None, and the new target object is not
-None. The new target object's method dnd_enter(source, event) is
-called.
-
-- The new target object is None, and the old target object is not
-None. The old target object's method dnd_leave(source, event) is
-called.
-
-- The old and new target objects differ and neither is None. The old
-target object's method dnd_leave(source, event), and then the new
-target object's method dnd_enter(source, event) is called.
-
-Once this is done, the new target object replaces the old one, and the
-Tk mainloop proceeds. The return value of the methods mentioned above
-is ignored; if they raise an exception, the normal exception handling
-mechanisms take over.
-
-The drag-and-drop processes can end in two ways: a final target object
-is selected, or no final target object is selected. When a final
-target object is selected, it will always have been notified of the
-potential drop by a call to its dnd_enter() method, as described
-above, and possibly one or more calls to its dnd_motion() method; its
-dnd_leave() method has not been called since the last call to
-dnd_enter(). The target is notified of the drop by a call to its
-method dnd_commit(source, event).
-
-If no final target object is selected, and there was an old target
-object, its dnd_leave(source, event) method is called to complete the
-dnd sequence.
-
-Finally, the source object is notified that the drag-and-drop process
-is over, by a call to source.dnd_end(target, event), specifying either
-the selected target object, or None if no target object was selected.
-The source object can use this to implement the commit action; this is
-sometimes simpler than to do it in the target's dnd_commit(). The
-target's dnd_commit() method could then simply be aliased to
-dnd_leave().
-
-At any time during a dnd sequence, the application can cancel the
-sequence by calling the cancel() method on the object returned by
-dnd_start(). This will call dnd_leave() if a target is currently
-active; it will never call dnd_commit().
-
-"""
-
-
-import Tkinter
-
-
-# The factory function
-
-def dnd_start(source, event):
- h = DndHandler(source, event)
- if h.root:
- return h
- else:
- return None
-
-
-# The class that does the work
-
-class DndHandler:
-
- root = None
-
- def __init__(self, source, event):
- if event.num > 5:
- return
- root = event.widget._root()
- try:
- root.__dnd
- return # Don't start recursive dnd
- except AttributeError:
- root.__dnd = self
- self.root = root
- self.source = source
- self.target = None
- self.initial_button = button = event.num
- self.initial_widget = widget = event.widget
- self.release_pattern = "<B%d-ButtonRelease-%d>" % (button, button)
- self.save_cursor = widget['cursor'] or ""
- widget.bind(self.release_pattern, self.on_release)
- widget.bind("<Motion>", self.on_motion)
- widget['cursor'] = "hand2"
-
- def __del__(self):
- root = self.root
- self.root = None
- if root:
- try:
- del root.__dnd
- except AttributeError:
- pass
-
- def on_motion(self, event):
- x, y = event.x_root, event.y_root
- target_widget = self.initial_widget.winfo_containing(x, y)
- source = self.source
- new_target = None
- while target_widget:
- try:
- attr = target_widget.dnd_accept
- except AttributeError:
- pass
- else:
- new_target = attr(source, event)
- if new_target:
- break
- target_widget = target_widget.master
- old_target = self.target
- if old_target is new_target:
- if old_target:
- old_target.dnd_motion(source, event)
- else:
- if old_target:
- self.target = None
- old_target.dnd_leave(source, event)
- if new_target:
- new_target.dnd_enter(source, event)
- self.target = new_target
-
- def on_release(self, event):
- self.finish(event, 1)
-
- def cancel(self, event=None):
- self.finish(event, 0)
-
- def finish(self, event, commit=0):
- target = self.target
- source = self.source
- widget = self.initial_widget
- root = self.root
- try:
- del root.__dnd
- self.initial_widget.unbind(self.release_pattern)
- self.initial_widget.unbind("<Motion>")
- widget['cursor'] = self.save_cursor
- self.target = self.source = self.initial_widget = self.root = None
- if target:
- if commit:
- target.dnd_commit(source, event)
- else:
- target.dnd_leave(source, event)
- finally:
- source.dnd_end(target, event)
-
-
-
-# ----------------------------------------------------------------------
-# The rest is here for testing and demonstration purposes only!
-
-class Icon:
-
- def __init__(self, name):
- self.name = name
- self.canvas = self.label = self.id = None
-
- def attach(self, canvas, x=10, y=10):
- if canvas is self.canvas:
- self.canvas.coords(self.id, x, y)
- return
- if self.canvas:
- self.detach()
- if not canvas:
- return
- label = Tkinter.Label(canvas, text=self.name,
- borderwidth=2, relief="raised")
- id = canvas.create_window(x, y, window=label, anchor="nw")
- self.canvas = canvas
- self.label = label
- self.id = id
- label.bind("<ButtonPress>", self.press)
-
- def detach(self):
- canvas = self.canvas
- if not canvas:
- return
- id = self.id
- label = self.label
- self.canvas = self.label = self.id = None
- canvas.delete(id)
- label.destroy()
-
- def press(self, event):
- if dnd_start(self, event):
- # where the pointer is relative to the label widget:
- self.x_off = event.x
- self.y_off = event.y
- # where the widget is relative to the canvas:
- self.x_orig, self.y_orig = self.canvas.coords(self.id)
-
- def move(self, event):
- x, y = self.where(self.canvas, event)
- self.canvas.coords(self.id, x, y)
-
- def putback(self):
- self.canvas.coords(self.id, self.x_orig, self.y_orig)
-
- def where(self, canvas, event):
- # where the corner of the canvas is relative to the screen:
- x_org = canvas.winfo_rootx()
- y_org = canvas.winfo_rooty()
- # where the pointer is relative to the canvas widget:
- x = event.x_root - x_org
- y = event.y_root - y_org
- # compensate for initial pointer offset
- return x - self.x_off, y - self.y_off
-
- def dnd_end(self, target, event):
- pass
-
-class Tester:
-
- def __init__(self, root):
- self.top = Tkinter.Toplevel(root)
- self.canvas = Tkinter.Canvas(self.top, width=100, height=100)
- self.canvas.pack(fill="both", expand=1)
- self.canvas.dnd_accept = self.dnd_accept
-
- def dnd_accept(self, source, event):
- return self
-
- def dnd_enter(self, source, event):
- self.canvas.focus_set() # Show highlight border
- x, y = source.where(self.canvas, event)
- x1, y1, x2, y2 = source.canvas.bbox(source.id)
- dx, dy = x2-x1, y2-y1
- self.dndid = self.canvas.create_rectangle(x, y, x+dx, y+dy)
- self.dnd_motion(source, event)
-
- def dnd_motion(self, source, event):
- x, y = source.where(self.canvas, event)
- x1, y1, x2, y2 = self.canvas.bbox(self.dndid)
- self.canvas.move(self.dndid, x-x1, y-y1)
-
- def dnd_leave(self, source, event):
- self.top.focus_set() # Hide highlight border
- self.canvas.delete(self.dndid)
- self.dndid = None
-
- def dnd_commit(self, source, event):
- self.dnd_leave(source, event)
- x, y = source.where(self.canvas, event)
- source.attach(self.canvas, x, y)
-
-def test():
- root = Tkinter.Tk()
- root.geometry("+1+1")
- Tkinter.Button(command=root.quit, text="Quit").pack()
- t1 = Tester(root)
- t1.top.geometry("+1+60")
- t2 = Tester(root)
- t2.top.geometry("+120+60")
- t3 = Tester(root)
- t3.top.geometry("+240+60")
- i1 = Icon("ICON1")
- i2 = Icon("ICON2")
- i3 = Icon("ICON3")
- i1.attach(t1.canvas)
- i2.attach(t2.canvas)
- i3.attach(t3.canvas)
- root.mainloop()
-
-if __name__ == '__main__':
- test()
diff --git a/sys/lib/python/lib-tk/Tkinter.py b/sys/lib/python/lib-tk/Tkinter.py
deleted file mode 100644
index daeff4732..000000000
--- a/sys/lib/python/lib-tk/Tkinter.py
+++ /dev/null
@@ -1,3759 +0,0 @@
-"""Wrapper functions for Tcl/Tk.
-
-Tkinter provides classes which allow the display, positioning and
-control of widgets. Toplevel widgets are Tk and Toplevel. Other
-widgets are Frame, Label, Entry, Text, Canvas, Button, Radiobutton,
-Checkbutton, Scale, Listbox, Scrollbar, OptionMenu, Spinbox
-LabelFrame and PanedWindow.
-
-Properties of the widgets are specified with keyword arguments.
-Keyword arguments have the same name as the corresponding resource
-under Tk.
-
-Widgets are positioned with one of the geometry managers Place, Pack
-or Grid. These managers can be called with methods place, pack, grid
-available in every Widget.
-
-Actions are bound to events by resources (e.g. keyword argument
-command) or with the method bind.
-
-Example (Hello, World):
-import Tkinter
-from Tkconstants import *
-tk = Tkinter.Tk()
-frame = Tkinter.Frame(tk, relief=RIDGE, borderwidth=2)
-frame.pack(fill=BOTH,expand=1)
-label = Tkinter.Label(frame, text="Hello, World")
-label.pack(fill=X, expand=1)
-button = Tkinter.Button(frame,text="Exit",command=tk.destroy)
-button.pack(side=BOTTOM)
-tk.mainloop()
-"""
-
-__version__ = "$Revision: 50704 $"
-
-import sys
-if sys.platform == "win32":
- import FixTk # Attempt to configure Tcl/Tk without requiring PATH
-import _tkinter # If this fails your Python may not be configured for Tk
-tkinter = _tkinter # b/w compat for export
-TclError = _tkinter.TclError
-from types import *
-from Tkconstants import *
-try:
- import MacOS; _MacOS = MacOS; del MacOS
-except ImportError:
- _MacOS = None
-
-wantobjects = 1
-
-TkVersion = float(_tkinter.TK_VERSION)
-TclVersion = float(_tkinter.TCL_VERSION)
-
-READABLE = _tkinter.READABLE
-WRITABLE = _tkinter.WRITABLE
-EXCEPTION = _tkinter.EXCEPTION
-
-# These are not always defined, e.g. not on Win32 with Tk 8.0 :-(
-try: _tkinter.createfilehandler
-except AttributeError: _tkinter.createfilehandler = None
-try: _tkinter.deletefilehandler
-except AttributeError: _tkinter.deletefilehandler = None
-
-
-def _flatten(tuple):
- """Internal function."""
- res = ()
- for item in tuple:
- if type(item) in (TupleType, ListType):
- res = res + _flatten(item)
- elif item is not None:
- res = res + (item,)
- return res
-
-try: _flatten = _tkinter._flatten
-except AttributeError: pass
-
-def _cnfmerge(cnfs):
- """Internal function."""
- if type(cnfs) is DictionaryType:
- return cnfs
- elif type(cnfs) in (NoneType, StringType):
- return cnfs
- else:
- cnf = {}
- for c in _flatten(cnfs):
- try:
- cnf.update(c)
- except (AttributeError, TypeError), msg:
- print "_cnfmerge: fallback due to:", msg
- for k, v in c.items():
- cnf[k] = v
- return cnf
-
-try: _cnfmerge = _tkinter._cnfmerge
-except AttributeError: pass
-
-class Event:
- """Container for the properties of an event.
-
- Instances of this type are generated if one of the following events occurs:
-
- KeyPress, KeyRelease - for keyboard events
- ButtonPress, ButtonRelease, Motion, Enter, Leave, MouseWheel - for mouse events
- Visibility, Unmap, Map, Expose, FocusIn, FocusOut, Circulate,
- Colormap, Gravity, Reparent, Property, Destroy, Activate,
- Deactivate - for window events.
-
- If a callback function for one of these events is registered
- using bind, bind_all, bind_class, or tag_bind, the callback is
- called with an Event as first argument. It will have the
- following attributes (in braces are the event types for which
- the attribute is valid):
-
- serial - serial number of event
- num - mouse button pressed (ButtonPress, ButtonRelease)
- focus - whether the window has the focus (Enter, Leave)
- height - height of the exposed window (Configure, Expose)
- width - width of the exposed window (Configure, Expose)
- keycode - keycode of the pressed key (KeyPress, KeyRelease)
- state - state of the event as a number (ButtonPress, ButtonRelease,
- Enter, KeyPress, KeyRelease,
- Leave, Motion)
- state - state as a string (Visibility)
- time - when the event occurred
- x - x-position of the mouse
- y - y-position of the mouse
- x_root - x-position of the mouse on the screen
- (ButtonPress, ButtonRelease, KeyPress, KeyRelease, Motion)
- y_root - y-position of the mouse on the screen
- (ButtonPress, ButtonRelease, KeyPress, KeyRelease, Motion)
- char - pressed character (KeyPress, KeyRelease)
- send_event - see X/Windows documentation
- keysym - keysym of the event as a string (KeyPress, KeyRelease)
- keysym_num - keysym of the event as a number (KeyPress, KeyRelease)
- type - type of the event as a number
- widget - widget in which the event occurred
- delta - delta of wheel movement (MouseWheel)
- """
- pass
-
-_support_default_root = 1
-_default_root = None
-
-def NoDefaultRoot():
- """Inhibit setting of default root window.
-
- Call this function to inhibit that the first instance of
- Tk is used for windows without an explicit parent window.
- """
- global _support_default_root
- _support_default_root = 0
- global _default_root
- _default_root = None
- del _default_root
-
-def _tkerror(err):
- """Internal function."""
- pass
-
-def _exit(code='0'):
- """Internal function. Calling it will throw the exception SystemExit."""
- raise SystemExit, code
-
-_varnum = 0
-class Variable:
- """Class to define value holders for e.g. buttons.
-
- Subclasses StringVar, IntVar, DoubleVar, BooleanVar are specializations
- that constrain the type of the value returned from get()."""
- _default = ""
- def __init__(self, master=None, value=None, name=None):
- """Construct a variable
-
- MASTER can be given as master widget.
- VALUE is an optional value (defaults to "")
- NAME is an optional Tcl name (defaults to PY_VARnum).
-
- If NAME matches an existing variable and VALUE is omitted
- then the existing value is retained.
- """
- global _varnum
- if not master:
- master = _default_root
- self._master = master
- self._tk = master.tk
- if name:
- self._name = name
- else:
- self._name = 'PY_VAR' + repr(_varnum)
- _varnum += 1
- if value != None:
- self.set(value)
- elif not self._tk.call("info", "exists", self._name):
- self.set(self._default)
- def __del__(self):
- """Unset the variable in Tcl."""
- self._tk.globalunsetvar(self._name)
- def __str__(self):
- """Return the name of the variable in Tcl."""
- return self._name
- def set(self, value):
- """Set the variable to VALUE."""
- return self._tk.globalsetvar(self._name, value)
- def get(self):
- """Return value of variable."""
- return self._tk.globalgetvar(self._name)
- def trace_variable(self, mode, callback):
- """Define a trace callback for the variable.
-
- MODE is one of "r", "w", "u" for read, write, undefine.
- CALLBACK must be a function which is called when
- the variable is read, written or undefined.
-
- Return the name of the callback.
- """
- cbname = self._master._register(callback)
- self._tk.call("trace", "variable", self._name, mode, cbname)
- return cbname
- trace = trace_variable
- def trace_vdelete(self, mode, cbname):
- """Delete the trace callback for a variable.
-
- MODE is one of "r", "w", "u" for read, write, undefine.
- CBNAME is the name of the callback returned from trace_variable or trace.
- """
- self._tk.call("trace", "vdelete", self._name, mode, cbname)
- self._master.deletecommand(cbname)
- def trace_vinfo(self):
- """Return all trace callback information."""
- return map(self._tk.split, self._tk.splitlist(
- self._tk.call("trace", "vinfo", self._name)))
- def __eq__(self, other):
- """Comparison for equality (==).
-
- Note: if the Variable's master matters to behavior
- also compare self._master == other._master
- """
- return self.__class__.__name__ == other.__class__.__name__ \
- and self._name == other._name
-
-class StringVar(Variable):
- """Value holder for strings variables."""
- _default = ""
- def __init__(self, master=None, value=None, name=None):
- """Construct a string variable.
-
- MASTER can be given as master widget.
- VALUE is an optional value (defaults to "")
- NAME is an optional Tcl name (defaults to PY_VARnum).
-
- If NAME matches an existing variable and VALUE is omitted
- then the existing value is retained.
- """
- Variable.__init__(self, master, value, name)
-
- def get(self):
- """Return value of variable as string."""
- value = self._tk.globalgetvar(self._name)
- if isinstance(value, basestring):
- return value
- return str(value)
-
-class IntVar(Variable):
- """Value holder for integer variables."""
- _default = 0
- def __init__(self, master=None, value=None, name=None):
- """Construct an integer variable.
-
- MASTER can be given as master widget.
- VALUE is an optional value (defaults to 0)
- NAME is an optional Tcl name (defaults to PY_VARnum).
-
- If NAME matches an existing variable and VALUE is omitted
- then the existing value is retained.
- """
- Variable.__init__(self, master, value, name)
-
- def set(self, value):
- """Set the variable to value, converting booleans to integers."""
- if isinstance(value, bool):
- value = int(value)
- return Variable.set(self, value)
-
- def get(self):
- """Return the value of the variable as an integer."""
- return getint(self._tk.globalgetvar(self._name))
-
-class DoubleVar(Variable):
- """Value holder for float variables."""
- _default = 0.0
- def __init__(self, master=None, value=None, name=None):
- """Construct a float variable.
-
- MASTER can be given as master widget.
- VALUE is an optional value (defaults to 0.0)
- NAME is an optional Tcl name (defaults to PY_VARnum).
-
- If NAME matches an existing variable and VALUE is omitted
- then the existing value is retained.
- """
- Variable.__init__(self, master, value, name)
-
- def get(self):
- """Return the value of the variable as a float."""
- return getdouble(self._tk.globalgetvar(self._name))
-
-class BooleanVar(Variable):
- """Value holder for boolean variables."""
- _default = False
- def __init__(self, master=None, value=None, name=None):
- """Construct a boolean variable.
-
- MASTER can be given as master widget.
- VALUE is an optional value (defaults to False)
- NAME is an optional Tcl name (defaults to PY_VARnum).
-
- If NAME matches an existing variable and VALUE is omitted
- then the existing value is retained.
- """
- Variable.__init__(self, master, value, name)
-
- def get(self):
- """Return the value of the variable as a bool."""
- return self._tk.getboolean(self._tk.globalgetvar(self._name))
-
-def mainloop(n=0):
- """Run the main loop of Tcl."""
- _default_root.tk.mainloop(n)
-
-getint = int
-
-getdouble = float
-
-def getboolean(s):
- """Convert true and false to integer values 1 and 0."""
- return _default_root.tk.getboolean(s)
-
-# Methods defined on both toplevel and interior widgets
-class Misc:
- """Internal class.
-
- Base class which defines methods common for interior widgets."""
-
- # XXX font command?
- _tclCommands = None
- def destroy(self):
- """Internal function.
-
- Delete all Tcl commands created for
- this widget in the Tcl interpreter."""
- if self._tclCommands is not None:
- for name in self._tclCommands:
- #print '- Tkinter: deleted command', name
- self.tk.deletecommand(name)
- self._tclCommands = None
- def deletecommand(self, name):
- """Internal function.
-
- Delete the Tcl command provided in NAME."""
- #print '- Tkinter: deleted command', name
- self.tk.deletecommand(name)
- try:
- self._tclCommands.remove(name)
- except ValueError:
- pass
- def tk_strictMotif(self, boolean=None):
- """Set Tcl internal variable, whether the look and feel
- should adhere to Motif.
-
- A parameter of 1 means adhere to Motif (e.g. no color
- change if mouse passes over slider).
- Returns the set value."""
- return self.tk.getboolean(self.tk.call(
- 'set', 'tk_strictMotif', boolean))
- def tk_bisque(self):
- """Change the color scheme to light brown as used in Tk 3.6 and before."""
- self.tk.call('tk_bisque')
- def tk_setPalette(self, *args, **kw):
- """Set a new color scheme for all widget elements.
-
- A single color as argument will cause that all colors of Tk
- widget elements are derived from this.
- Alternatively several keyword parameters and its associated
- colors can be given. The following keywords are valid:
- activeBackground, foreground, selectColor,
- activeForeground, highlightBackground, selectBackground,
- background, highlightColor, selectForeground,
- disabledForeground, insertBackground, troughColor."""
- self.tk.call(('tk_setPalette',)
- + _flatten(args) + _flatten(kw.items()))
- def tk_menuBar(self, *args):
- """Do not use. Needed in Tk 3.6 and earlier."""
- pass # obsolete since Tk 4.0
- def wait_variable(self, name='PY_VAR'):
- """Wait until the variable is modified.
-
- A parameter of type IntVar, StringVar, DoubleVar or
- BooleanVar must be given."""
- self.tk.call('tkwait', 'variable', name)
- waitvar = wait_variable # XXX b/w compat
- def wait_window(self, window=None):
- """Wait until a WIDGET is destroyed.
-
- If no parameter is given self is used."""
- if window is None:
- window = self
- self.tk.call('tkwait', 'window', window._w)
- def wait_visibility(self, window=None):
- """Wait until the visibility of a WIDGET changes
- (e.g. it appears).
-
- If no parameter is given self is used."""
- if window is None:
- window = self
- self.tk.call('tkwait', 'visibility', window._w)
- def setvar(self, name='PY_VAR', value='1'):
- """Set Tcl variable NAME to VALUE."""
- self.tk.setvar(name, value)
- def getvar(self, name='PY_VAR'):
- """Return value of Tcl variable NAME."""
- return self.tk.getvar(name)
- getint = int
- getdouble = float
- def getboolean(self, s):
- """Return a boolean value for Tcl boolean values true and false given as parameter."""
- return self.tk.getboolean(s)
- def focus_set(self):
- """Direct input focus to this widget.
-
- If the application currently does not have the focus
- this widget will get the focus if the application gets
- the focus through the window manager."""
- self.tk.call('focus', self._w)
- focus = focus_set # XXX b/w compat?
- def focus_force(self):
- """Direct input focus to this widget even if the
- application does not have the focus. Use with
- caution!"""
- self.tk.call('focus', '-force', self._w)
- def focus_get(self):
- """Return the widget which has currently the focus in the
- application.
-
- Use focus_displayof to allow working with several
- displays. Return None if application does not have
- the focus."""
- name = self.tk.call('focus')
- if name == 'none' or not name: return None
- return self._nametowidget(name)
- def focus_displayof(self):
- """Return the widget which has currently the focus on the
- display where this widget is located.
-
- Return None if the application does not have the focus."""
- name = self.tk.call('focus', '-displayof', self._w)
- if name == 'none' or not name: return None
- return self._nametowidget(name)
- def focus_lastfor(self):
- """Return the widget which would have the focus if top level
- for this widget gets the focus from the window manager."""
- name = self.tk.call('focus', '-lastfor', self._w)
- if name == 'none' or not name: return None
- return self._nametowidget(name)
- def tk_focusFollowsMouse(self):
- """The widget under mouse will get automatically focus. Can not
- be disabled easily."""
- self.tk.call('tk_focusFollowsMouse')
- def tk_focusNext(self):
- """Return the next widget in the focus order which follows
- widget which has currently the focus.
-
- The focus order first goes to the next child, then to
- the children of the child recursively and then to the
- next sibling which is higher in the stacking order. A
- widget is omitted if it has the takefocus resource set
- to 0."""
- name = self.tk.call('tk_focusNext', self._w)
- if not name: return None
- return self._nametowidget(name)
- def tk_focusPrev(self):
- """Return previous widget in the focus order. See tk_focusNext for details."""
- name = self.tk.call('tk_focusPrev', self._w)
- if not name: return None
- return self._nametowidget(name)
- def after(self, ms, func=None, *args):
- """Call function once after given time.
-
- MS specifies the time in milliseconds. FUNC gives the
- function which shall be called. Additional parameters
- are given as parameters to the function call. Return
- identifier to cancel scheduling with after_cancel."""
- if not func:
- # I'd rather use time.sleep(ms*0.001)
- self.tk.call('after', ms)
- else:
- def callit():
- try:
- func(*args)
- finally:
- try:
- self.deletecommand(name)
- except TclError:
- pass
- name = self._register(callit)
- return self.tk.call('after', ms, name)
- def after_idle(self, func, *args):
- """Call FUNC once if the Tcl main loop has no event to
- process.
-
- Return an identifier to cancel the scheduling with
- after_cancel."""
- return self.after('idle', func, *args)
- def after_cancel(self, id):
- """Cancel scheduling of function identified with ID.
-
- Identifier returned by after or after_idle must be
- given as first parameter."""
- try:
- data = self.tk.call('after', 'info', id)
- # In Tk 8.3, splitlist returns: (script, type)
- # In Tk 8.4, splitlist may return (script, type) or (script,)
- script = self.tk.splitlist(data)[0]
- self.deletecommand(script)
- except TclError:
- pass
- self.tk.call('after', 'cancel', id)
- def bell(self, displayof=0):
- """Ring a display's bell."""
- self.tk.call(('bell',) + self._displayof(displayof))
-
- # Clipboard handling:
- def clipboard_get(self, **kw):
- """Retrieve data from the clipboard on window's display.
-
- The window keyword defaults to the root window of the Tkinter
- application.
-
- The type keyword specifies the form in which the data is
- to be returned and should be an atom name such as STRING
- or FILE_NAME. Type defaults to STRING.
-
- This command is equivalent to:
-
- selection_get(CLIPBOARD)
- """
- return self.tk.call(('clipboard', 'get') + self._options(kw))
-
- def clipboard_clear(self, **kw):
- """Clear the data in the Tk clipboard.
-
- A widget specified for the optional displayof keyword
- argument specifies the target display."""
- if not kw.has_key('displayof'): kw['displayof'] = self._w
- self.tk.call(('clipboard', 'clear') + self._options(kw))
- def clipboard_append(self, string, **kw):
- """Append STRING to the Tk clipboard.
-
- A widget specified at the optional displayof keyword
- argument specifies the target display. The clipboard
- can be retrieved with selection_get."""
- if not kw.has_key('displayof'): kw['displayof'] = self._w
- self.tk.call(('clipboard', 'append') + self._options(kw)
- + ('--', string))
- # XXX grab current w/o window argument
- def grab_current(self):
- """Return widget which has currently the grab in this application
- or None."""
- name = self.tk.call('grab', 'current', self._w)
- if not name: return None
- return self._nametowidget(name)
- def grab_release(self):
- """Release grab for this widget if currently set."""
- self.tk.call('grab', 'release', self._w)
- def grab_set(self):
- """Set grab for this widget.
-
- A grab directs all events to this and descendant
- widgets in the application."""
- self.tk.call('grab', 'set', self._w)
- def grab_set_global(self):
- """Set global grab for this widget.
-
- A global grab directs all events to this and
- descendant widgets on the display. Use with caution -
- other applications do not get events anymore."""
- self.tk.call('grab', 'set', '-global', self._w)
- def grab_status(self):
- """Return None, "local" or "global" if this widget has
- no, a local or a global grab."""
- status = self.tk.call('grab', 'status', self._w)
- if status == 'none': status = None
- return status
- def lower(self, belowThis=None):
- """Lower this widget in the stacking order."""
- self.tk.call('lower', self._w, belowThis)
- def option_add(self, pattern, value, priority = None):
- """Set a VALUE (second parameter) for an option
- PATTERN (first parameter).
-
- An optional third parameter gives the numeric priority
- (defaults to 80)."""
- self.tk.call('option', 'add', pattern, value, priority)
- def option_clear(self):
- """Clear the option database.
-
- It will be reloaded if option_add is called."""
- self.tk.call('option', 'clear')
- def option_get(self, name, className):
- """Return the value for an option NAME for this widget
- with CLASSNAME.
-
- Values with higher priority override lower values."""
- return self.tk.call('option', 'get', self._w, name, className)
- def option_readfile(self, fileName, priority = None):
- """Read file FILENAME into the option database.
-
- An optional second parameter gives the numeric
- priority."""
- self.tk.call('option', 'readfile', fileName, priority)
- def selection_clear(self, **kw):
- """Clear the current X selection."""
- if not kw.has_key('displayof'): kw['displayof'] = self._w
- self.tk.call(('selection', 'clear') + self._options(kw))
- def selection_get(self, **kw):
- """Return the contents of the current X selection.
-
- A keyword parameter selection specifies the name of
- the selection and defaults to PRIMARY. A keyword
- parameter displayof specifies a widget on the display
- to use."""
- if not kw.has_key('displayof'): kw['displayof'] = self._w
- return self.tk.call(('selection', 'get') + self._options(kw))
- def selection_handle(self, command, **kw):
- """Specify a function COMMAND to call if the X
- selection owned by this widget is queried by another
- application.
-
- This function must return the contents of the
- selection. The function will be called with the
- arguments OFFSET and LENGTH which allows the chunking
- of very long selections. The following keyword
- parameters can be provided:
- selection - name of the selection (default PRIMARY),
- type - type of the selection (e.g. STRING, FILE_NAME)."""
- name = self._register(command)
- self.tk.call(('selection', 'handle') + self._options(kw)
- + (self._w, name))
- def selection_own(self, **kw):
- """Become owner of X selection.
-
- A keyword parameter selection specifies the name of
- the selection (default PRIMARY)."""
- self.tk.call(('selection', 'own') +
- self._options(kw) + (self._w,))
- def selection_own_get(self, **kw):
- """Return owner of X selection.
-
- The following keyword parameter can
- be provided:
- selection - name of the selection (default PRIMARY),
- type - type of the selection (e.g. STRING, FILE_NAME)."""
- if not kw.has_key('displayof'): kw['displayof'] = self._w
- name = self.tk.call(('selection', 'own') + self._options(kw))
- if not name: return None
- return self._nametowidget(name)
- def send(self, interp, cmd, *args):
- """Send Tcl command CMD to different interpreter INTERP to be executed."""
- return self.tk.call(('send', interp, cmd) + args)
- def lower(self, belowThis=None):
- """Lower this widget in the stacking order."""
- self.tk.call('lower', self._w, belowThis)
- def tkraise(self, aboveThis=None):
- """Raise this widget in the stacking order."""
- self.tk.call('raise', self._w, aboveThis)
- lift = tkraise
- def colormodel(self, value=None):
- """Useless. Not implemented in Tk."""
- return self.tk.call('tk', 'colormodel', self._w, value)
- def winfo_atom(self, name, displayof=0):
- """Return integer which represents atom NAME."""
- args = ('winfo', 'atom') + self._displayof(displayof) + (name,)
- return getint(self.tk.call(args))
- def winfo_atomname(self, id, displayof=0):
- """Return name of atom with identifier ID."""
- args = ('winfo', 'atomname') \
- + self._displayof(displayof) + (id,)
- return self.tk.call(args)
- def winfo_cells(self):
- """Return number of cells in the colormap for this widget."""
- return getint(
- self.tk.call('winfo', 'cells', self._w))
- def winfo_children(self):
- """Return a list of all widgets which are children of this widget."""
- result = []
- for child in self.tk.splitlist(
- self.tk.call('winfo', 'children', self._w)):
- try:
- # Tcl sometimes returns extra windows, e.g. for
- # menus; those need to be skipped
- result.append(self._nametowidget(child))
- except KeyError:
- pass
- return result
-
- def winfo_class(self):
- """Return window class name of this widget."""
- return self.tk.call('winfo', 'class', self._w)
- def winfo_colormapfull(self):
- """Return true if at the last color request the colormap was full."""
- return self.tk.getboolean(
- self.tk.call('winfo', 'colormapfull', self._w))
- def winfo_containing(self, rootX, rootY, displayof=0):
- """Return the widget which is at the root coordinates ROOTX, ROOTY."""
- args = ('winfo', 'containing') \
- + self._displayof(displayof) + (rootX, rootY)
- name = self.tk.call(args)
- if not name: return None
- return self._nametowidget(name)
- def winfo_depth(self):
- """Return the number of bits per pixel."""
- return getint(self.tk.call('winfo', 'depth', self._w))
- def winfo_exists(self):
- """Return true if this widget exists."""
- return getint(
- self.tk.call('winfo', 'exists', self._w))
- def winfo_fpixels(self, number):
- """Return the number of pixels for the given distance NUMBER
- (e.g. "3c") as float."""
- return getdouble(self.tk.call(
- 'winfo', 'fpixels', self._w, number))
- def winfo_geometry(self):
- """Return geometry string for this widget in the form "widthxheight+X+Y"."""
- return self.tk.call('winfo', 'geometry', self._w)
- def winfo_height(self):
- """Return height of this widget."""
- return getint(
- self.tk.call('winfo', 'height', self._w))
- def winfo_id(self):
- """Return identifier ID for this widget."""
- return self.tk.getint(
- self.tk.call('winfo', 'id', self._w))
- def winfo_interps(self, displayof=0):
- """Return the name of all Tcl interpreters for this display."""
- args = ('winfo', 'interps') + self._displayof(displayof)
- return self.tk.splitlist(self.tk.call(args))
- def winfo_ismapped(self):
- """Return true if this widget is mapped."""
- return getint(
- self.tk.call('winfo', 'ismapped', self._w))
- def winfo_manager(self):
- """Return the window mananger name for this widget."""
- return self.tk.call('winfo', 'manager', self._w)
- def winfo_name(self):
- """Return the name of this widget."""
- return self.tk.call('winfo', 'name', self._w)
- def winfo_parent(self):
- """Return the name of the parent of this widget."""
- return self.tk.call('winfo', 'parent', self._w)
- def winfo_pathname(self, id, displayof=0):
- """Return the pathname of the widget given by ID."""
- args = ('winfo', 'pathname') \
- + self._displayof(displayof) + (id,)
- return self.tk.call(args)
- def winfo_pixels(self, number):
- """Rounded integer value of winfo_fpixels."""
- return getint(
- self.tk.call('winfo', 'pixels', self._w, number))
- def winfo_pointerx(self):
- """Return the x coordinate of the pointer on the root window."""
- return getint(
- self.tk.call('winfo', 'pointerx', self._w))
- def winfo_pointerxy(self):
- """Return a tuple of x and y coordinates of the pointer on the root window."""
- return self._getints(
- self.tk.call('winfo', 'pointerxy', self._w))
- def winfo_pointery(self):
- """Return the y coordinate of the pointer on the root window."""
- return getint(
- self.tk.call('winfo', 'pointery', self._w))
- def winfo_reqheight(self):
- """Return requested height of this widget."""
- return getint(
- self.tk.call('winfo', 'reqheight', self._w))
- def winfo_reqwidth(self):
- """Return requested width of this widget."""
- return getint(
- self.tk.call('winfo', 'reqwidth', self._w))
- def winfo_rgb(self, color):
- """Return tuple of decimal values for red, green, blue for
- COLOR in this widget."""
- return self._getints(
- self.tk.call('winfo', 'rgb', self._w, color))
- def winfo_rootx(self):
- """Return x coordinate of upper left corner of this widget on the
- root window."""
- return getint(
- self.tk.call('winfo', 'rootx', self._w))
- def winfo_rooty(self):
- """Return y coordinate of upper left corner of this widget on the
- root window."""
- return getint(
- self.tk.call('winfo', 'rooty', self._w))
- def winfo_screen(self):
- """Return the screen name of this widget."""
- return self.tk.call('winfo', 'screen', self._w)
- def winfo_screencells(self):
- """Return the number of the cells in the colormap of the screen
- of this widget."""
- return getint(
- self.tk.call('winfo', 'screencells', self._w))
- def winfo_screendepth(self):
- """Return the number of bits per pixel of the root window of the
- screen of this widget."""
- return getint(
- self.tk.call('winfo', 'screendepth', self._w))
- def winfo_screenheight(self):
- """Return the number of pixels of the height of the screen of this widget
- in pixel."""
- return getint(
- self.tk.call('winfo', 'screenheight', self._w))
- def winfo_screenmmheight(self):
- """Return the number of pixels of the height of the screen of
- this widget in mm."""
- return getint(
- self.tk.call('winfo', 'screenmmheight', self._w))
- def winfo_screenmmwidth(self):
- """Return the number of pixels of the width of the screen of
- this widget in mm."""
- return getint(
- self.tk.call('winfo', 'screenmmwidth', self._w))
- def winfo_screenvisual(self):
- """Return one of the strings directcolor, grayscale, pseudocolor,
- staticcolor, staticgray, or truecolor for the default
- colormodel of this screen."""
- return self.tk.call('winfo', 'screenvisual', self._w)
- def winfo_screenwidth(self):
- """Return the number of pixels of the width of the screen of
- this widget in pixel."""
- return getint(
- self.tk.call('winfo', 'screenwidth', self._w))
- def winfo_server(self):
- """Return information of the X-Server of the screen of this widget in
- the form "XmajorRminor vendor vendorVersion"."""
- return self.tk.call('winfo', 'server', self._w)
- def winfo_toplevel(self):
- """Return the toplevel widget of this widget."""
- return self._nametowidget(self.tk.call(
- 'winfo', 'toplevel', self._w))
- def winfo_viewable(self):
- """Return true if the widget and all its higher ancestors are mapped."""
- return getint(
- self.tk.call('winfo', 'viewable', self._w))
- def winfo_visual(self):
- """Return one of the strings directcolor, grayscale, pseudocolor,
- staticcolor, staticgray, or truecolor for the
- colormodel of this widget."""
- return self.tk.call('winfo', 'visual', self._w)
- def winfo_visualid(self):
- """Return the X identifier for the visual for this widget."""
- return self.tk.call('winfo', 'visualid', self._w)
- def winfo_visualsavailable(self, includeids=0):
- """Return a list of all visuals available for the screen
- of this widget.
-
- Each item in the list consists of a visual name (see winfo_visual), a
- depth and if INCLUDEIDS=1 is given also the X identifier."""
- data = self.tk.split(
- self.tk.call('winfo', 'visualsavailable', self._w,
- includeids and 'includeids' or None))
- if type(data) is StringType:
- data = [self.tk.split(data)]
- return map(self.__winfo_parseitem, data)
- def __winfo_parseitem(self, t):
- """Internal function."""
- return t[:1] + tuple(map(self.__winfo_getint, t[1:]))
- def __winfo_getint(self, x):
- """Internal function."""
- return int(x, 0)
- def winfo_vrootheight(self):
- """Return the height of the virtual root window associated with this
- widget in pixels. If there is no virtual root window return the
- height of the screen."""
- return getint(
- self.tk.call('winfo', 'vrootheight', self._w))
- def winfo_vrootwidth(self):
- """Return the width of the virtual root window associated with this
- widget in pixel. If there is no virtual root window return the
- width of the screen."""
- return getint(
- self.tk.call('winfo', 'vrootwidth', self._w))
- def winfo_vrootx(self):
- """Return the x offset of the virtual root relative to the root
- window of the screen of this widget."""
- return getint(
- self.tk.call('winfo', 'vrootx', self._w))
- def winfo_vrooty(self):
- """Return the y offset of the virtual root relative to the root
- window of the screen of this widget."""
- return getint(
- self.tk.call('winfo', 'vrooty', self._w))
- def winfo_width(self):
- """Return the width of this widget."""
- return getint(
- self.tk.call('winfo', 'width', self._w))
- def winfo_x(self):
- """Return the x coordinate of the upper left corner of this widget
- in the parent."""
- return getint(
- self.tk.call('winfo', 'x', self._w))
- def winfo_y(self):
- """Return the y coordinate of the upper left corner of this widget
- in the parent."""
- return getint(
- self.tk.call('winfo', 'y', self._w))
- def update(self):
- """Enter event loop until all pending events have been processed by Tcl."""
- self.tk.call('update')
- def update_idletasks(self):
- """Enter event loop until all idle callbacks have been called. This
- will update the display of windows but not process events caused by
- the user."""
- self.tk.call('update', 'idletasks')
- def bindtags(self, tagList=None):
- """Set or get the list of bindtags for this widget.
-
- With no argument return the list of all bindtags associated with
- this widget. With a list of strings as argument the bindtags are
- set to this list. The bindtags determine in which order events are
- processed (see bind)."""
- if tagList is None:
- return self.tk.splitlist(
- self.tk.call('bindtags', self._w))
- else:
- self.tk.call('bindtags', self._w, tagList)
- def _bind(self, what, sequence, func, add, needcleanup=1):
- """Internal function."""
- if type(func) is StringType:
- self.tk.call(what + (sequence, func))
- elif func:
- funcid = self._register(func, self._substitute,
- needcleanup)
- cmd = ('%sif {"[%s %s]" == "break"} break\n'
- %
- (add and '+' or '',
- funcid, self._subst_format_str))
- self.tk.call(what + (sequence, cmd))
- return funcid
- elif sequence:
- return self.tk.call(what + (sequence,))
- else:
- return self.tk.splitlist(self.tk.call(what))
- def bind(self, sequence=None, func=None, add=None):
- """Bind to this widget at event SEQUENCE a call to function FUNC.
-
- SEQUENCE is a string of concatenated event
- patterns. An event pattern is of the form
- <MODIFIER-MODIFIER-TYPE-DETAIL> where MODIFIER is one
- of Control, Mod2, M2, Shift, Mod3, M3, Lock, Mod4, M4,
- Button1, B1, Mod5, M5 Button2, B2, Meta, M, Button3,
- B3, Alt, Button4, B4, Double, Button5, B5 Triple,
- Mod1, M1. TYPE is one of Activate, Enter, Map,
- ButtonPress, Button, Expose, Motion, ButtonRelease
- FocusIn, MouseWheel, Circulate, FocusOut, Property,
- Colormap, Gravity Reparent, Configure, KeyPress, Key,
- Unmap, Deactivate, KeyRelease Visibility, Destroy,
- Leave and DETAIL is the button number for ButtonPress,
- ButtonRelease and DETAIL is the Keysym for KeyPress and
- KeyRelease. Examples are
- <Control-Button-1> for pressing Control and mouse button 1 or
- <Alt-A> for pressing A and the Alt key (KeyPress can be omitted).
- An event pattern can also be a virtual event of the form
- <<AString>> where AString can be arbitrary. This
- event can be generated by event_generate.
- If events are concatenated they must appear shortly
- after each other.
-
- FUNC will be called if the event sequence occurs with an
- instance of Event as argument. If the return value of FUNC is
- "break" no further bound function is invoked.
-
- An additional boolean parameter ADD specifies whether FUNC will
- be called additionally to the other bound function or whether
- it will replace the previous function.
-
- Bind will return an identifier to allow deletion of the bound function with
- unbind without memory leak.
-
- If FUNC or SEQUENCE is omitted the bound function or list
- of bound events are returned."""
-
- return self._bind(('bind', self._w), sequence, func, add)
- def unbind(self, sequence, funcid=None):
- """Unbind for this widget for event SEQUENCE the
- function identified with FUNCID."""
- self.tk.call('bind', self._w, sequence, '')
- if funcid:
- self.deletecommand(funcid)
- def bind_all(self, sequence=None, func=None, add=None):
- """Bind to all widgets at an event SEQUENCE a call to function FUNC.
- An additional boolean parameter ADD specifies whether FUNC will
- be called additionally to the other bound function or whether
- it will replace the previous function. See bind for the return value."""
- return self._bind(('bind', 'all'), sequence, func, add, 0)
- def unbind_all(self, sequence):
- """Unbind for all widgets for event SEQUENCE all functions."""
- self.tk.call('bind', 'all' , sequence, '')
- def bind_class(self, className, sequence=None, func=None, add=None):
-
- """Bind to widgets with bindtag CLASSNAME at event
- SEQUENCE a call of function FUNC. An additional
- boolean parameter ADD specifies whether FUNC will be
- called additionally to the other bound function or
- whether it will replace the previous function. See bind for
- the return value."""
-
- return self._bind(('bind', className), sequence, func, add, 0)
- def unbind_class(self, className, sequence):
- """Unbind for a all widgets with bindtag CLASSNAME for event SEQUENCE
- all functions."""
- self.tk.call('bind', className , sequence, '')
- def mainloop(self, n=0):
- """Call the mainloop of Tk."""
- self.tk.mainloop(n)
- def quit(self):
- """Quit the Tcl interpreter. All widgets will be destroyed."""
- self.tk.quit()
- def _getints(self, string):
- """Internal function."""
- if string:
- return tuple(map(getint, self.tk.splitlist(string)))
- def _getdoubles(self, string):
- """Internal function."""
- if string:
- return tuple(map(getdouble, self.tk.splitlist(string)))
- def _getboolean(self, string):
- """Internal function."""
- if string:
- return self.tk.getboolean(string)
- def _displayof(self, displayof):
- """Internal function."""
- if displayof:
- return ('-displayof', displayof)
- if displayof is None:
- return ('-displayof', self._w)
- return ()
- def _options(self, cnf, kw = None):
- """Internal function."""
- if kw:
- cnf = _cnfmerge((cnf, kw))
- else:
- cnf = _cnfmerge(cnf)
- res = ()
- for k, v in cnf.items():
- if v is not None:
- if k[-1] == '_': k = k[:-1]
- if callable(v):
- v = self._register(v)
- res = res + ('-'+k, v)
- return res
- def nametowidget(self, name):
- """Return the Tkinter instance of a widget identified by
- its Tcl name NAME."""
- w = self
- if name[0] == '.':
- w = w._root()
- name = name[1:]
- while name:
- i = name.find('.')
- if i >= 0:
- name, tail = name[:i], name[i+1:]
- else:
- tail = ''
- w = w.children[name]
- name = tail
- return w
- _nametowidget = nametowidget
- def _register(self, func, subst=None, needcleanup=1):
- """Return a newly created Tcl function. If this
- function is called, the Python function FUNC will
- be executed. An optional function SUBST can
- be given which will be executed before FUNC."""
- f = CallWrapper(func, subst, self).__call__
- name = repr(id(f))
- try:
- func = func.im_func
- except AttributeError:
- pass
- try:
- name = name + func.__name__
- except AttributeError:
- pass
- self.tk.createcommand(name, f)
- if needcleanup:
- if self._tclCommands is None:
- self._tclCommands = []
- self._tclCommands.append(name)
- #print '+ Tkinter created command', name
- return name
- register = _register
- def _root(self):
- """Internal function."""
- w = self
- while w.master: w = w.master
- return w
- _subst_format = ('%#', '%b', '%f', '%h', '%k',
- '%s', '%t', '%w', '%x', '%y',
- '%A', '%E', '%K', '%N', '%W', '%T', '%X', '%Y', '%D')
- _subst_format_str = " ".join(_subst_format)
- def _substitute(self, *args):
- """Internal function."""
- if len(args) != len(self._subst_format): return args
- getboolean = self.tk.getboolean
-
- getint = int
- def getint_event(s):
- """Tk changed behavior in 8.4.2, returning "??" rather more often."""
- try:
- return int(s)
- except ValueError:
- return s
-
- nsign, b, f, h, k, s, t, w, x, y, A, E, K, N, W, T, X, Y, D = args
- # Missing: (a, c, d, m, o, v, B, R)
- e = Event()
- # serial field: valid vor all events
- # number of button: ButtonPress and ButtonRelease events only
- # height field: Configure, ConfigureRequest, Create,
- # ResizeRequest, and Expose events only
- # keycode field: KeyPress and KeyRelease events only
- # time field: "valid for events that contain a time field"
- # width field: Configure, ConfigureRequest, Create, ResizeRequest,
- # and Expose events only
- # x field: "valid for events that contain a x field"
- # y field: "valid for events that contain a y field"
- # keysym as decimal: KeyPress and KeyRelease events only
- # x_root, y_root fields: ButtonPress, ButtonRelease, KeyPress,
- # KeyRelease,and Motion events
- e.serial = getint(nsign)
- e.num = getint_event(b)
- try: e.focus = getboolean(f)
- except TclError: pass
- e.height = getint_event(h)
- e.keycode = getint_event(k)
- e.state = getint_event(s)
- e.time = getint_event(t)
- e.width = getint_event(w)
- e.x = getint_event(x)
- e.y = getint_event(y)
- e.char = A
- try: e.send_event = getboolean(E)
- except TclError: pass
- e.keysym = K
- e.keysym_num = getint_event(N)
- e.type = T
- try:
- e.widget = self._nametowidget(W)
- except KeyError:
- e.widget = W
- e.x_root = getint_event(X)
- e.y_root = getint_event(Y)
- try:
- e.delta = getint(D)
- except ValueError:
- e.delta = 0
- return (e,)
- def _report_exception(self):
- """Internal function."""
- import sys
- exc, val, tb = sys.exc_type, sys.exc_value, sys.exc_traceback
- root = self._root()
- root.report_callback_exception(exc, val, tb)
- def _configure(self, cmd, cnf, kw):
- """Internal function."""
- if kw:
- cnf = _cnfmerge((cnf, kw))
- elif cnf:
- cnf = _cnfmerge(cnf)
- if cnf is None:
- cnf = {}
- for x in self.tk.split(
- self.tk.call(_flatten((self._w, cmd)))):
- cnf[x[0][1:]] = (x[0][1:],) + x[1:]
- return cnf
- if type(cnf) is StringType:
- x = self.tk.split(
- self.tk.call(_flatten((self._w, cmd, '-'+cnf))))
- return (x[0][1:],) + x[1:]
- self.tk.call(_flatten((self._w, cmd)) + self._options(cnf))
- # These used to be defined in Widget:
- def configure(self, cnf=None, **kw):
- """Configure resources of a widget.
-
- The values for resources are specified as keyword
- arguments. To get an overview about
- the allowed keyword arguments call the method keys.
- """
- return self._configure('configure', cnf, kw)
- config = configure
- def cget(self, key):
- """Return the resource value for a KEY given as string."""
- return self.tk.call(self._w, 'cget', '-' + key)
- __getitem__ = cget
- def __setitem__(self, key, value):
- self.configure({key: value})
- def keys(self):
- """Return a list of all resource names of this widget."""
- return map(lambda x: x[0][1:],
- self.tk.split(self.tk.call(self._w, 'configure')))
- def __str__(self):
- """Return the window path name of this widget."""
- return self._w
- # Pack methods that apply to the master
- _noarg_ = ['_noarg_']
- def pack_propagate(self, flag=_noarg_):
- """Set or get the status for propagation of geometry information.
-
- A boolean argument specifies whether the geometry information
- of the slaves will determine the size of this widget. If no argument
- is given the current setting will be returned.
- """
- if flag is Misc._noarg_:
- return self._getboolean(self.tk.call(
- 'pack', 'propagate', self._w))
- else:
- self.tk.call('pack', 'propagate', self._w, flag)
- propagate = pack_propagate
- def pack_slaves(self):
- """Return a list of all slaves of this widget
- in its packing order."""
- return map(self._nametowidget,
- self.tk.splitlist(
- self.tk.call('pack', 'slaves', self._w)))
- slaves = pack_slaves
- # Place method that applies to the master
- def place_slaves(self):
- """Return a list of all slaves of this widget
- in its packing order."""
- return map(self._nametowidget,
- self.tk.splitlist(
- self.tk.call(
- 'place', 'slaves', self._w)))
- # Grid methods that apply to the master
- def grid_bbox(self, column=None, row=None, col2=None, row2=None):
- """Return a tuple of integer coordinates for the bounding
- box of this widget controlled by the geometry manager grid.
-
- If COLUMN, ROW is given the bounding box applies from
- the cell with row and column 0 to the specified
- cell. If COL2 and ROW2 are given the bounding box
- starts at that cell.
-
- The returned integers specify the offset of the upper left
- corner in the master widget and the width and height.
- """
- args = ('grid', 'bbox', self._w)
- if column is not None and row is not None:
- args = args + (column, row)
- if col2 is not None and row2 is not None:
- args = args + (col2, row2)
- return self._getints(self.tk.call(*args)) or None
-
- bbox = grid_bbox
- def _grid_configure(self, command, index, cnf, kw):
- """Internal function."""
- if type(cnf) is StringType and not kw:
- if cnf[-1:] == '_':
- cnf = cnf[:-1]
- if cnf[:1] != '-':
- cnf = '-'+cnf
- options = (cnf,)
- else:
- options = self._options(cnf, kw)
- if not options:
- res = self.tk.call('grid',
- command, self._w, index)
- words = self.tk.splitlist(res)
- dict = {}
- for i in range(0, len(words), 2):
- key = words[i][1:]
- value = words[i+1]
- if not value:
- value = None
- elif '.' in value:
- value = getdouble(value)
- else:
- value = getint(value)
- dict[key] = value
- return dict
- res = self.tk.call(
- ('grid', command, self._w, index)
- + options)
- if len(options) == 1:
- if not res: return None
- # In Tk 7.5, -width can be a float
- if '.' in res: return getdouble(res)
- return getint(res)
- def grid_columnconfigure(self, index, cnf={}, **kw):
- """Configure column INDEX of a grid.
-
- Valid resources are minsize (minimum size of the column),
- weight (how much does additional space propagate to this column)
- and pad (how much space to let additionally)."""
- return self._grid_configure('columnconfigure', index, cnf, kw)
- columnconfigure = grid_columnconfigure
- def grid_location(self, x, y):
- """Return a tuple of column and row which identify the cell
- at which the pixel at position X and Y inside the master
- widget is located."""
- return self._getints(
- self.tk.call(
- 'grid', 'location', self._w, x, y)) or None
- def grid_propagate(self, flag=_noarg_):
- """Set or get the status for propagation of geometry information.
-
- A boolean argument specifies whether the geometry information
- of the slaves will determine the size of this widget. If no argument
- is given, the current setting will be returned.
- """
- if flag is Misc._noarg_:
- return self._getboolean(self.tk.call(
- 'grid', 'propagate', self._w))
- else:
- self.tk.call('grid', 'propagate', self._w, flag)
- def grid_rowconfigure(self, index, cnf={}, **kw):
- """Configure row INDEX of a grid.
-
- Valid resources are minsize (minimum size of the row),
- weight (how much does additional space propagate to this row)
- and pad (how much space to let additionally)."""
- return self._grid_configure('rowconfigure', index, cnf, kw)
- rowconfigure = grid_rowconfigure
- def grid_size(self):
- """Return a tuple of the number of column and rows in the grid."""
- return self._getints(
- self.tk.call('grid', 'size', self._w)) or None
- size = grid_size
- def grid_slaves(self, row=None, column=None):
- """Return a list of all slaves of this widget
- in its packing order."""
- args = ()
- if row is not None:
- args = args + ('-row', row)
- if column is not None:
- args = args + ('-column', column)
- return map(self._nametowidget,
- self.tk.splitlist(self.tk.call(
- ('grid', 'slaves', self._w) + args)))
-
- # Support for the "event" command, new in Tk 4.2.
- # By Case Roole.
-
- def event_add(self, virtual, *sequences):
- """Bind a virtual event VIRTUAL (of the form <<Name>>)
- to an event SEQUENCE such that the virtual event is triggered
- whenever SEQUENCE occurs."""
- args = ('event', 'add', virtual) + sequences
- self.tk.call(args)
-
- def event_delete(self, virtual, *sequences):
- """Unbind a virtual event VIRTUAL from SEQUENCE."""
- args = ('event', 'delete', virtual) + sequences
- self.tk.call(args)
-
- def event_generate(self, sequence, **kw):
- """Generate an event SEQUENCE. Additional
- keyword arguments specify parameter of the event
- (e.g. x, y, rootx, rooty)."""
- args = ('event', 'generate', self._w, sequence)
- for k, v in kw.items():
- args = args + ('-%s' % k, str(v))
- self.tk.call(args)
-
- def event_info(self, virtual=None):
- """Return a list of all virtual events or the information
- about the SEQUENCE bound to the virtual event VIRTUAL."""
- return self.tk.splitlist(
- self.tk.call('event', 'info', virtual))
-
- # Image related commands
-
- def image_names(self):
- """Return a list of all existing image names."""
- return self.tk.call('image', 'names')
-
- def image_types(self):
- """Return a list of all available image types (e.g. phote bitmap)."""
- return self.tk.call('image', 'types')
-
-
-class CallWrapper:
- """Internal class. Stores function to call when some user
- defined Tcl function is called e.g. after an event occurred."""
- def __init__(self, func, subst, widget):
- """Store FUNC, SUBST and WIDGET as members."""
- self.func = func
- self.subst = subst
- self.widget = widget
- def __call__(self, *args):
- """Apply first function SUBST to arguments, than FUNC."""
- try:
- if self.subst:
- args = self.subst(*args)
- return self.func(*args)
- except SystemExit, msg:
- raise SystemExit, msg
- except:
- self.widget._report_exception()
-
-
-class Wm:
- """Provides functions for the communication with the window manager."""
-
- def wm_aspect(self,
- minNumer=None, minDenom=None,
- maxNumer=None, maxDenom=None):
- """Instruct the window manager to set the aspect ratio (width/height)
- of this widget to be between MINNUMER/MINDENOM and MAXNUMER/MAXDENOM. Return a tuple
- of the actual values if no argument is given."""
- return self._getints(
- self.tk.call('wm', 'aspect', self._w,
- minNumer, minDenom,
- maxNumer, maxDenom))
- aspect = wm_aspect
-
- def wm_attributes(self, *args):
- """This subcommand returns or sets platform specific attributes
-
- The first form returns a list of the platform specific flags and
- their values. The second form returns the value for the specific
- option. The third form sets one or more of the values. The values
- are as follows:
-
- On Windows, -disabled gets or sets whether the window is in a
- disabled state. -toolwindow gets or sets the style of the window
- to toolwindow (as defined in the MSDN). -topmost gets or sets
- whether this is a topmost window (displays above all other
- windows).
-
- On Macintosh, XXXXX
-
- On Unix, there are currently no special attribute values.
- """
- args = ('wm', 'attributes', self._w) + args
- return self.tk.call(args)
- attributes=wm_attributes
-
- def wm_client(self, name=None):
- """Store NAME in WM_CLIENT_MACHINE property of this widget. Return
- current value."""
- return self.tk.call('wm', 'client', self._w, name)
- client = wm_client
- def wm_colormapwindows(self, *wlist):
- """Store list of window names (WLIST) into WM_COLORMAPWINDOWS property
- of this widget. This list contains windows whose colormaps differ from their
- parents. Return current list of widgets if WLIST is empty."""
- if len(wlist) > 1:
- wlist = (wlist,) # Tk needs a list of windows here
- args = ('wm', 'colormapwindows', self._w) + wlist
- return map(self._nametowidget, self.tk.call(args))
- colormapwindows = wm_colormapwindows
- def wm_command(self, value=None):
- """Store VALUE in WM_COMMAND property. It is the command
- which shall be used to invoke the application. Return current
- command if VALUE is None."""
- return self.tk.call('wm', 'command', self._w, value)
- command = wm_command
- def wm_deiconify(self):
- """Deiconify this widget. If it was never mapped it will not be mapped.
- On Windows it will raise this widget and give it the focus."""
- return self.tk.call('wm', 'deiconify', self._w)
- deiconify = wm_deiconify
- def wm_focusmodel(self, model=None):
- """Set focus model to MODEL. "active" means that this widget will claim
- the focus itself, "passive" means that the window manager shall give
- the focus. Return current focus model if MODEL is None."""
- return self.tk.call('wm', 'focusmodel', self._w, model)
- focusmodel = wm_focusmodel
- def wm_frame(self):
- """Return identifier for decorative frame of this widget if present."""
- return self.tk.call('wm', 'frame', self._w)
- frame = wm_frame
- def wm_geometry(self, newGeometry=None):
- """Set geometry to NEWGEOMETRY of the form =widthxheight+x+y. Return
- current value if None is given."""
- return self.tk.call('wm', 'geometry', self._w, newGeometry)
- geometry = wm_geometry
- def wm_grid(self,
- baseWidth=None, baseHeight=None,
- widthInc=None, heightInc=None):
- """Instruct the window manager that this widget shall only be
- resized on grid boundaries. WIDTHINC and HEIGHTINC are the width and
- height of a grid unit in pixels. BASEWIDTH and BASEHEIGHT are the
- number of grid units requested in Tk_GeometryRequest."""
- return self._getints(self.tk.call(
- 'wm', 'grid', self._w,
- baseWidth, baseHeight, widthInc, heightInc))
- grid = wm_grid
- def wm_group(self, pathName=None):
- """Set the group leader widgets for related widgets to PATHNAME. Return
- the group leader of this widget if None is given."""
- return self.tk.call('wm', 'group', self._w, pathName)
- group = wm_group
- def wm_iconbitmap(self, bitmap=None, default=None):
- """Set bitmap for the iconified widget to BITMAP. Return
- the bitmap if None is given.
-
- Under Windows, the DEFAULT parameter can be used to set the icon
- for the widget and any descendents that don't have an icon set
- explicitly. DEFAULT can be the relative path to a .ico file
- (example: root.iconbitmap(default='myicon.ico') ). See Tk
- documentation for more information."""
- if default:
- return self.tk.call('wm', 'iconbitmap', self._w, '-default', default)
- else:
- return self.tk.call('wm', 'iconbitmap', self._w, bitmap)
- iconbitmap = wm_iconbitmap
- def wm_iconify(self):
- """Display widget as icon."""
- return self.tk.call('wm', 'iconify', self._w)
- iconify = wm_iconify
- def wm_iconmask(self, bitmap=None):
- """Set mask for the icon bitmap of this widget. Return the
- mask if None is given."""
- return self.tk.call('wm', 'iconmask', self._w, bitmap)
- iconmask = wm_iconmask
- def wm_iconname(self, newName=None):
- """Set the name of the icon for this widget. Return the name if
- None is given."""
- return self.tk.call('wm', 'iconname', self._w, newName)
- iconname = wm_iconname
- def wm_iconposition(self, x=None, y=None):
- """Set the position of the icon of this widget to X and Y. Return
- a tuple of the current values of X and X if None is given."""
- return self._getints(self.tk.call(
- 'wm', 'iconposition', self._w, x, y))
- iconposition = wm_iconposition
- def wm_iconwindow(self, pathName=None):
- """Set widget PATHNAME to be displayed instead of icon. Return the current
- value if None is given."""
- return self.tk.call('wm', 'iconwindow', self._w, pathName)
- iconwindow = wm_iconwindow
- def wm_maxsize(self, width=None, height=None):
- """Set max WIDTH and HEIGHT for this widget. If the window is gridded
- the values are given in grid units. Return the current values if None
- is given."""
- return self._getints(self.tk.call(
- 'wm', 'maxsize', self._w, width, height))
- maxsize = wm_maxsize
- def wm_minsize(self, width=None, height=None):
- """Set min WIDTH and HEIGHT for this widget. If the window is gridded
- the values are given in grid units. Return the current values if None
- is given."""
- return self._getints(self.tk.call(
- 'wm', 'minsize', self._w, width, height))
- minsize = wm_minsize
- def wm_overrideredirect(self, boolean=None):
- """Instruct the window manager to ignore this widget
- if BOOLEAN is given with 1. Return the current value if None
- is given."""
- return self._getboolean(self.tk.call(
- 'wm', 'overrideredirect', self._w, boolean))
- overrideredirect = wm_overrideredirect
- def wm_positionfrom(self, who=None):
- """Instruct the window manager that the position of this widget shall
- be defined by the user if WHO is "user", and by its own policy if WHO is
- "program"."""
- return self.tk.call('wm', 'positionfrom', self._w, who)
- positionfrom = wm_positionfrom
- def wm_protocol(self, name=None, func=None):
- """Bind function FUNC to command NAME for this widget.
- Return the function bound to NAME if None is given. NAME could be
- e.g. "WM_SAVE_YOURSELF" or "WM_DELETE_WINDOW"."""
- if callable(func):
- command = self._register(func)
- else:
- command = func
- return self.tk.call(
- 'wm', 'protocol', self._w, name, command)
- protocol = wm_protocol
- def wm_resizable(self, width=None, height=None):
- """Instruct the window manager whether this width can be resized
- in WIDTH or HEIGHT. Both values are boolean values."""
- return self.tk.call('wm', 'resizable', self._w, width, height)
- resizable = wm_resizable
- def wm_sizefrom(self, who=None):
- """Instruct the window manager that the size of this widget shall
- be defined by the user if WHO is "user", and by its own policy if WHO is
- "program"."""
- return self.tk.call('wm', 'sizefrom', self._w, who)
- sizefrom = wm_sizefrom
- def wm_state(self, newstate=None):
- """Query or set the state of this widget as one of normal, icon,
- iconic (see wm_iconwindow), withdrawn, or zoomed (Windows only)."""
- return self.tk.call('wm', 'state', self._w, newstate)
- state = wm_state
- def wm_title(self, string=None):
- """Set the title of this widget."""
- return self.tk.call('wm', 'title', self._w, string)
- title = wm_title
- def wm_transient(self, master=None):
- """Instruct the window manager that this widget is transient
- with regard to widget MASTER."""
- return self.tk.call('wm', 'transient', self._w, master)
- transient = wm_transient
- def wm_withdraw(self):
- """Withdraw this widget from the screen such that it is unmapped
- and forgotten by the window manager. Re-draw it with wm_deiconify."""
- return self.tk.call('wm', 'withdraw', self._w)
- withdraw = wm_withdraw
-
-
-class Tk(Misc, Wm):
- """Toplevel widget of Tk which represents mostly the main window
- of an appliation. It has an associated Tcl interpreter."""
- _w = '.'
- def __init__(self, screenName=None, baseName=None, className='Tk',
- useTk=1, sync=0, use=None):
- """Return a new Toplevel widget on screen SCREENNAME. A new Tcl interpreter will
- be created. BASENAME will be used for the identification of the profile file (see
- readprofile).
- It is constructed from sys.argv[0] without extensions if None is given. CLASSNAME
- is the name of the widget class."""
- self.master = None
- self.children = {}
- self._tkloaded = 0
- # to avoid recursions in the getattr code in case of failure, we
- # ensure that self.tk is always _something_.
- self.tk = None
- if baseName is None:
- import sys, os
- baseName = os.path.basename(sys.argv[0])
- baseName, ext = os.path.splitext(baseName)
- if ext not in ('.py', '.pyc', '.pyo'):
- baseName = baseName + ext
- interactive = 0
- self.tk = _tkinter.create(screenName, baseName, className, interactive, wantobjects, useTk, sync, use)
- if useTk:
- self._loadtk()
- self.readprofile(baseName, className)
- def loadtk(self):
- if not self._tkloaded:
- self.tk.loadtk()
- self._loadtk()
- def _loadtk(self):
- self._tkloaded = 1
- global _default_root
- if _MacOS and hasattr(_MacOS, 'SchedParams'):
- # Disable event scanning except for Command-Period
- _MacOS.SchedParams(1, 0)
- # Work around nasty MacTk bug
- # XXX Is this one still needed?
- self.update()
- # Version sanity checks
- tk_version = self.tk.getvar('tk_version')
- if tk_version != _tkinter.TK_VERSION:
- raise RuntimeError, \
- "tk.h version (%s) doesn't match libtk.a version (%s)" \
- % (_tkinter.TK_VERSION, tk_version)
- # Under unknown circumstances, tcl_version gets coerced to float
- tcl_version = str(self.tk.getvar('tcl_version'))
- if tcl_version != _tkinter.TCL_VERSION:
- raise RuntimeError, \
- "tcl.h version (%s) doesn't match libtcl.a version (%s)" \
- % (_tkinter.TCL_VERSION, tcl_version)
- if TkVersion < 4.0:
- raise RuntimeError, \
- "Tk 4.0 or higher is required; found Tk %s" \
- % str(TkVersion)
- # Create and register the tkerror and exit commands
- # We need to inline parts of _register here, _ register
- # would register differently-named commands.
- if self._tclCommands is None:
- self._tclCommands = []
- self.tk.createcommand('tkerror', _tkerror)
- self.tk.createcommand('exit', _exit)
- self._tclCommands.append('tkerror')
- self._tclCommands.append('exit')
- if _support_default_root and not _default_root:
- _default_root = self
- self.protocol("WM_DELETE_WINDOW", self.destroy)
- def destroy(self):
- """Destroy this and all descendants widgets. This will
- end the application of this Tcl interpreter."""
- for c in self.children.values(): c.destroy()
- self.tk.call('destroy', self._w)
- Misc.destroy(self)
- global _default_root
- if _support_default_root and _default_root is self:
- _default_root = None
- def readprofile(self, baseName, className):
- """Internal function. It reads BASENAME.tcl and CLASSNAME.tcl into
- the Tcl Interpreter and calls execfile on BASENAME.py and CLASSNAME.py if
- such a file exists in the home directory."""
- import os
- if os.environ.has_key('HOME'): home = os.environ['HOME']
- else: home = os.curdir
- class_tcl = os.path.join(home, '.%s.tcl' % className)
- class_py = os.path.join(home, '.%s.py' % className)
- base_tcl = os.path.join(home, '.%s.tcl' % baseName)
- base_py = os.path.join(home, '.%s.py' % baseName)
- dir = {'self': self}
- exec 'from Tkinter import *' in dir
- if os.path.isfile(class_tcl):
- self.tk.call('source', class_tcl)
- if os.path.isfile(class_py):
- execfile(class_py, dir)
- if os.path.isfile(base_tcl):
- self.tk.call('source', base_tcl)
- if os.path.isfile(base_py):
- execfile(base_py, dir)
- def report_callback_exception(self, exc, val, tb):
- """Internal function. It reports exception on sys.stderr."""
- import traceback, sys
- sys.stderr.write("Exception in Tkinter callback\n")
- sys.last_type = exc
- sys.last_value = val
- sys.last_traceback = tb
- traceback.print_exception(exc, val, tb)
- def __getattr__(self, attr):
- "Delegate attribute access to the interpreter object"
- return getattr(self.tk, attr)
-
-# Ideally, the classes Pack, Place and Grid disappear, the
-# pack/place/grid methods are defined on the Widget class, and
-# everybody uses w.pack_whatever(...) instead of Pack.whatever(w,
-# ...), with pack(), place() and grid() being short for
-# pack_configure(), place_configure() and grid_columnconfigure(), and
-# forget() being short for pack_forget(). As a practical matter, I'm
-# afraid that there is too much code out there that may be using the
-# Pack, Place or Grid class, so I leave them intact -- but only as
-# backwards compatibility features. Also note that those methods that
-# take a master as argument (e.g. pack_propagate) have been moved to
-# the Misc class (which now incorporates all methods common between
-# toplevel and interior widgets). Again, for compatibility, these are
-# copied into the Pack, Place or Grid class.
-
-
-def Tcl(screenName=None, baseName=None, className='Tk', useTk=0):
- return Tk(screenName, baseName, className, useTk)
-
-class Pack:
- """Geometry manager Pack.
-
- Base class to use the methods pack_* in every widget."""
- def pack_configure(self, cnf={}, **kw):
- """Pack a widget in the parent widget. Use as options:
- after=widget - pack it after you have packed widget
- anchor=NSEW (or subset) - position widget according to
- given direction
- before=widget - pack it before you will pack widget
- expand=bool - expand widget if parent size grows
- fill=NONE or X or Y or BOTH - fill widget if widget grows
- in=master - use master to contain this widget
- ipadx=amount - add internal padding in x direction
- ipady=amount - add internal padding in y direction
- padx=amount - add padding in x direction
- pady=amount - add padding in y direction
- side=TOP or BOTTOM or LEFT or RIGHT - where to add this widget.
- """
- self.tk.call(
- ('pack', 'configure', self._w)
- + self._options(cnf, kw))
- pack = configure = config = pack_configure
- def pack_forget(self):
- """Unmap this widget and do not use it for the packing order."""
- self.tk.call('pack', 'forget', self._w)
- forget = pack_forget
- def pack_info(self):
- """Return information about the packing options
- for this widget."""
- words = self.tk.splitlist(
- self.tk.call('pack', 'info', self._w))
- dict = {}
- for i in range(0, len(words), 2):
- key = words[i][1:]
- value = words[i+1]
- if value[:1] == '.':
- value = self._nametowidget(value)
- dict[key] = value
- return dict
- info = pack_info
- propagate = pack_propagate = Misc.pack_propagate
- slaves = pack_slaves = Misc.pack_slaves
-
-class Place:
- """Geometry manager Place.
-
- Base class to use the methods place_* in every widget."""
- def place_configure(self, cnf={}, **kw):
- """Place a widget in the parent widget. Use as options:
- in=master - master relative to which the widget is placed.
- x=amount - locate anchor of this widget at position x of master
- y=amount - locate anchor of this widget at position y of master
- relx=amount - locate anchor of this widget between 0.0 and 1.0
- relative to width of master (1.0 is right edge)
- rely=amount - locate anchor of this widget between 0.0 and 1.0
- relative to height of master (1.0 is bottom edge)
- anchor=NSEW (or subset) - position anchor according to given direction
- width=amount - width of this widget in pixel
- height=amount - height of this widget in pixel
- relwidth=amount - width of this widget between 0.0 and 1.0
- relative to width of master (1.0 is the same width
- as the master)
- relheight=amount - height of this widget between 0.0 and 1.0
- relative to height of master (1.0 is the same
- height as the master)
- bordermode="inside" or "outside" - whether to take border width of master widget
- into account
- """
- for k in ['in_']:
- if kw.has_key(k):
- kw[k[:-1]] = kw[k]
- del kw[k]
- self.tk.call(
- ('place', 'configure', self._w)
- + self._options(cnf, kw))
- place = configure = config = place_configure
- def place_forget(self):
- """Unmap this widget."""
- self.tk.call('place', 'forget', self._w)
- forget = place_forget
- def place_info(self):
- """Return information about the placing options
- for this widget."""
- words = self.tk.splitlist(
- self.tk.call('place', 'info', self._w))
- dict = {}
- for i in range(0, len(words), 2):
- key = words[i][1:]
- value = words[i+1]
- if value[:1] == '.':
- value = self._nametowidget(value)
- dict[key] = value
- return dict
- info = place_info
- slaves = place_slaves = Misc.place_slaves
-
-class Grid:
- """Geometry manager Grid.
-
- Base class to use the methods grid_* in every widget."""
- # Thanks to Masazumi Yoshikawa (yosikawa@isi.edu)
- def grid_configure(self, cnf={}, **kw):
- """Position a widget in the parent widget in a grid. Use as options:
- column=number - use cell identified with given column (starting with 0)
- columnspan=number - this widget will span several columns
- in=master - use master to contain this widget
- ipadx=amount - add internal padding in x direction
- ipady=amount - add internal padding in y direction
- padx=amount - add padding in x direction
- pady=amount - add padding in y direction
- row=number - use cell identified with given row (starting with 0)
- rowspan=number - this widget will span several rows
- sticky=NSEW - if cell is larger on which sides will this
- widget stick to the cell boundary
- """
- self.tk.call(
- ('grid', 'configure', self._w)
- + self._options(cnf, kw))
- grid = configure = config = grid_configure
- bbox = grid_bbox = Misc.grid_bbox
- columnconfigure = grid_columnconfigure = Misc.grid_columnconfigure
- def grid_forget(self):
- """Unmap this widget."""
- self.tk.call('grid', 'forget', self._w)
- forget = grid_forget
- def grid_remove(self):
- """Unmap this widget but remember the grid options."""
- self.tk.call('grid', 'remove', self._w)
- def grid_info(self):
- """Return information about the options
- for positioning this widget in a grid."""
- words = self.tk.splitlist(
- self.tk.call('grid', 'info', self._w))
- dict = {}
- for i in range(0, len(words), 2):
- key = words[i][1:]
- value = words[i+1]
- if value[:1] == '.':
- value = self._nametowidget(value)
- dict[key] = value
- return dict
- info = grid_info
- location = grid_location = Misc.grid_location
- propagate = grid_propagate = Misc.grid_propagate
- rowconfigure = grid_rowconfigure = Misc.grid_rowconfigure
- size = grid_size = Misc.grid_size
- slaves = grid_slaves = Misc.grid_slaves
-
-class BaseWidget(Misc):
- """Internal class."""
- def _setup(self, master, cnf):
- """Internal function. Sets up information about children."""
- if _support_default_root:
- global _default_root
- if not master:
- if not _default_root:
- _default_root = Tk()
- master = _default_root
- self.master = master
- self.tk = master.tk
- name = None
- if cnf.has_key('name'):
- name = cnf['name']
- del cnf['name']
- if not name:
- name = repr(id(self))
- self._name = name
- if master._w=='.':
- self._w = '.' + name
- else:
- self._w = master._w + '.' + name
- self.children = {}
- if self.master.children.has_key(self._name):
- self.master.children[self._name].destroy()
- self.master.children[self._name] = self
- def __init__(self, master, widgetName, cnf={}, kw={}, extra=()):
- """Construct a widget with the parent widget MASTER, a name WIDGETNAME
- and appropriate options."""
- if kw:
- cnf = _cnfmerge((cnf, kw))
- self.widgetName = widgetName
- BaseWidget._setup(self, master, cnf)
- classes = []
- for k in cnf.keys():
- if type(k) is ClassType:
- classes.append((k, cnf[k]))
- del cnf[k]
- self.tk.call(
- (widgetName, self._w) + extra + self._options(cnf))
- for k, v in classes:
- k.configure(self, v)
- def destroy(self):
- """Destroy this and all descendants widgets."""
- for c in self.children.values(): c.destroy()
- self.tk.call('destroy', self._w)
- if self.master.children.has_key(self._name):
- del self.master.children[self._name]
- Misc.destroy(self)
- def _do(self, name, args=()):
- # XXX Obsolete -- better use self.tk.call directly!
- return self.tk.call((self._w, name) + args)
-
-class Widget(BaseWidget, Pack, Place, Grid):
- """Internal class.
-
- Base class for a widget which can be positioned with the geometry managers
- Pack, Place or Grid."""
- pass
-
-class Toplevel(BaseWidget, Wm):
- """Toplevel widget, e.g. for dialogs."""
- def __init__(self, master=None, cnf={}, **kw):
- """Construct a toplevel widget with the parent MASTER.
-
- Valid resource names: background, bd, bg, borderwidth, class,
- colormap, container, cursor, height, highlightbackground,
- highlightcolor, highlightthickness, menu, relief, screen, takefocus,
- use, visual, width."""
- if kw:
- cnf = _cnfmerge((cnf, kw))
- extra = ()
- for wmkey in ['screen', 'class_', 'class', 'visual',
- 'colormap']:
- if cnf.has_key(wmkey):
- val = cnf[wmkey]
- # TBD: a hack needed because some keys
- # are not valid as keyword arguments
- if wmkey[-1] == '_': opt = '-'+wmkey[:-1]
- else: opt = '-'+wmkey
- extra = extra + (opt, val)
- del cnf[wmkey]
- BaseWidget.__init__(self, master, 'toplevel', cnf, {}, extra)
- root = self._root()
- self.iconname(root.iconname())
- self.title(root.title())
- self.protocol("WM_DELETE_WINDOW", self.destroy)
-
-class Button(Widget):
- """Button widget."""
- def __init__(self, master=None, cnf={}, **kw):
- """Construct a button widget with the parent MASTER.
-
- STANDARD OPTIONS
-
- activebackground, activeforeground, anchor,
- background, bitmap, borderwidth, cursor,
- disabledforeground, font, foreground
- highlightbackground, highlightcolor,
- highlightthickness, image, justify,
- padx, pady, relief, repeatdelay,
- repeatinterval, takefocus, text,
- textvariable, underline, wraplength
-
- WIDGET-SPECIFIC OPTIONS
-
- command, compound, default, height,
- overrelief, state, width
- """
- Widget.__init__(self, master, 'button', cnf, kw)
-
- def tkButtonEnter(self, *dummy):
- self.tk.call('tkButtonEnter', self._w)
-
- def tkButtonLeave(self, *dummy):
- self.tk.call('tkButtonLeave', self._w)
-
- def tkButtonDown(self, *dummy):
- self.tk.call('tkButtonDown', self._w)
-
- def tkButtonUp(self, *dummy):
- self.tk.call('tkButtonUp', self._w)
-
- def tkButtonInvoke(self, *dummy):
- self.tk.call('tkButtonInvoke', self._w)
-
- def flash(self):
- """Flash the button.
-
- This is accomplished by redisplaying
- the button several times, alternating between active and
- normal colors. At the end of the flash the button is left
- in the same normal/active state as when the command was
- invoked. This command is ignored if the button's state is
- disabled.
- """
- self.tk.call(self._w, 'flash')
-
- def invoke(self):
- """Invoke the command associated with the button.
-
- The return value is the return value from the command,
- or an empty string if there is no command associated with
- the button. This command is ignored if the button's state
- is disabled.
- """
- return self.tk.call(self._w, 'invoke')
-
-# Indices:
-# XXX I don't like these -- take them away
-def AtEnd():
- return 'end'
-def AtInsert(*args):
- s = 'insert'
- for a in args:
- if a: s = s + (' ' + a)
- return s
-def AtSelFirst():
- return 'sel.first'
-def AtSelLast():
- return 'sel.last'
-def At(x, y=None):
- if y is None:
- return '@%r' % (x,)
- else:
- return '@%r,%r' % (x, y)
-
-class Canvas(Widget):
- """Canvas widget to display graphical elements like lines or text."""
- def __init__(self, master=None, cnf={}, **kw):
- """Construct a canvas widget with the parent MASTER.
-
- Valid resource names: background, bd, bg, borderwidth, closeenough,
- confine, cursor, height, highlightbackground, highlightcolor,
- highlightthickness, insertbackground, insertborderwidth,
- insertofftime, insertontime, insertwidth, offset, relief,
- scrollregion, selectbackground, selectborderwidth, selectforeground,
- state, takefocus, width, xscrollcommand, xscrollincrement,
- yscrollcommand, yscrollincrement."""
- Widget.__init__(self, master, 'canvas', cnf, kw)
- def addtag(self, *args):
- """Internal function."""
- self.tk.call((self._w, 'addtag') + args)
- def addtag_above(self, newtag, tagOrId):
- """Add tag NEWTAG to all items above TAGORID."""
- self.addtag(newtag, 'above', tagOrId)
- def addtag_all(self, newtag):
- """Add tag NEWTAG to all items."""
- self.addtag(newtag, 'all')
- def addtag_below(self, newtag, tagOrId):
- """Add tag NEWTAG to all items below TAGORID."""
- self.addtag(newtag, 'below', tagOrId)
- def addtag_closest(self, newtag, x, y, halo=None, start=None):
- """Add tag NEWTAG to item which is closest to pixel at X, Y.
- If several match take the top-most.
- All items closer than HALO are considered overlapping (all are
- closests). If START is specified the next below this tag is taken."""
- self.addtag(newtag, 'closest', x, y, halo, start)
- def addtag_enclosed(self, newtag, x1, y1, x2, y2):
- """Add tag NEWTAG to all items in the rectangle defined
- by X1,Y1,X2,Y2."""
- self.addtag(newtag, 'enclosed', x1, y1, x2, y2)
- def addtag_overlapping(self, newtag, x1, y1, x2, y2):
- """Add tag NEWTAG to all items which overlap the rectangle
- defined by X1,Y1,X2,Y2."""
- self.addtag(newtag, 'overlapping', x1, y1, x2, y2)
- def addtag_withtag(self, newtag, tagOrId):
- """Add tag NEWTAG to all items with TAGORID."""
- self.addtag(newtag, 'withtag', tagOrId)
- def bbox(self, *args):
- """Return a tuple of X1,Y1,X2,Y2 coordinates for a rectangle
- which encloses all items with tags specified as arguments."""
- return self._getints(
- self.tk.call((self._w, 'bbox') + args)) or None
- def tag_unbind(self, tagOrId, sequence, funcid=None):
- """Unbind for all items with TAGORID for event SEQUENCE the
- function identified with FUNCID."""
- self.tk.call(self._w, 'bind', tagOrId, sequence, '')
- if funcid:
- self.deletecommand(funcid)
- def tag_bind(self, tagOrId, sequence=None, func=None, add=None):
- """Bind to all items with TAGORID at event SEQUENCE a call to function FUNC.
-
- An additional boolean parameter ADD specifies whether FUNC will be
- called additionally to the other bound function or whether it will
- replace the previous function. See bind for the return value."""
- return self._bind((self._w, 'bind', tagOrId),
- sequence, func, add)
- def canvasx(self, screenx, gridspacing=None):
- """Return the canvas x coordinate of pixel position SCREENX rounded
- to nearest multiple of GRIDSPACING units."""
- return getdouble(self.tk.call(
- self._w, 'canvasx', screenx, gridspacing))
- def canvasy(self, screeny, gridspacing=None):
- """Return the canvas y coordinate of pixel position SCREENY rounded
- to nearest multiple of GRIDSPACING units."""
- return getdouble(self.tk.call(
- self._w, 'canvasy', screeny, gridspacing))
- def coords(self, *args):
- """Return a list of coordinates for the item given in ARGS."""
- # XXX Should use _flatten on args
- return map(getdouble,
- self.tk.splitlist(
- self.tk.call((self._w, 'coords') + args)))
- def _create(self, itemType, args, kw): # Args: (val, val, ..., cnf={})
- """Internal function."""
- args = _flatten(args)
- cnf = args[-1]
- if type(cnf) in (DictionaryType, TupleType):
- args = args[:-1]
- else:
- cnf = {}
- return getint(self.tk.call(
- self._w, 'create', itemType,
- *(args + self._options(cnf, kw))))
- def create_arc(self, *args, **kw):
- """Create arc shaped region with coordinates x1,y1,x2,y2."""
- return self._create('arc', args, kw)
- def create_bitmap(self, *args, **kw):
- """Create bitmap with coordinates x1,y1."""
- return self._create('bitmap', args, kw)
- def create_image(self, *args, **kw):
- """Create image item with coordinates x1,y1."""
- return self._create('image', args, kw)
- def create_line(self, *args, **kw):
- """Create line with coordinates x1,y1,...,xn,yn."""
- return self._create('line', args, kw)
- def create_oval(self, *args, **kw):
- """Create oval with coordinates x1,y1,x2,y2."""
- return self._create('oval', args, kw)
- def create_polygon(self, *args, **kw):
- """Create polygon with coordinates x1,y1,...,xn,yn."""
- return self._create('polygon', args, kw)
- def create_rectangle(self, *args, **kw):
- """Create rectangle with coordinates x1,y1,x2,y2."""
- return self._create('rectangle', args, kw)
- def create_text(self, *args, **kw):
- """Create text with coordinates x1,y1."""
- return self._create('text', args, kw)
- def create_window(self, *args, **kw):
- """Create window with coordinates x1,y1,x2,y2."""
- return self._create('window', args, kw)
- def dchars(self, *args):
- """Delete characters of text items identified by tag or id in ARGS (possibly
- several times) from FIRST to LAST character (including)."""
- self.tk.call((self._w, 'dchars') + args)
- def delete(self, *args):
- """Delete items identified by all tag or ids contained in ARGS."""
- self.tk.call((self._w, 'delete') + args)
- def dtag(self, *args):
- """Delete tag or id given as last arguments in ARGS from items
- identified by first argument in ARGS."""
- self.tk.call((self._w, 'dtag') + args)
- def find(self, *args):
- """Internal function."""
- return self._getints(
- self.tk.call((self._w, 'find') + args)) or ()
- def find_above(self, tagOrId):
- """Return items above TAGORID."""
- return self.find('above', tagOrId)
- def find_all(self):
- """Return all items."""
- return self.find('all')
- def find_below(self, tagOrId):
- """Return all items below TAGORID."""
- return self.find('below', tagOrId)
- def find_closest(self, x, y, halo=None, start=None):
- """Return item which is closest to pixel at X, Y.
- If several match take the top-most.
- All items closer than HALO are considered overlapping (all are
- closests). If START is specified the next below this tag is taken."""
- return self.find('closest', x, y, halo, start)
- def find_enclosed(self, x1, y1, x2, y2):
- """Return all items in rectangle defined
- by X1,Y1,X2,Y2."""
- return self.find('enclosed', x1, y1, x2, y2)
- def find_overlapping(self, x1, y1, x2, y2):
- """Return all items which overlap the rectangle
- defined by X1,Y1,X2,Y2."""
- return self.find('overlapping', x1, y1, x2, y2)
- def find_withtag(self, tagOrId):
- """Return all items with TAGORID."""
- return self.find('withtag', tagOrId)
- def focus(self, *args):
- """Set focus to the first item specified in ARGS."""
- return self.tk.call((self._w, 'focus') + args)
- def gettags(self, *args):
- """Return tags associated with the first item specified in ARGS."""
- return self.tk.splitlist(
- self.tk.call((self._w, 'gettags') + args))
- def icursor(self, *args):
- """Set cursor at position POS in the item identified by TAGORID.
- In ARGS TAGORID must be first."""
- self.tk.call((self._w, 'icursor') + args)
- def index(self, *args):
- """Return position of cursor as integer in item specified in ARGS."""
- return getint(self.tk.call((self._w, 'index') + args))
- def insert(self, *args):
- """Insert TEXT in item TAGORID at position POS. ARGS must
- be TAGORID POS TEXT."""
- self.tk.call((self._w, 'insert') + args)
- def itemcget(self, tagOrId, option):
- """Return the resource value for an OPTION for item TAGORID."""
- return self.tk.call(
- (self._w, 'itemcget') + (tagOrId, '-'+option))
- def itemconfigure(self, tagOrId, cnf=None, **kw):
- """Configure resources of an item TAGORID.
-
- The values for resources are specified as keyword
- arguments. To get an overview about
- the allowed keyword arguments call the method without arguments.
- """
- return self._configure(('itemconfigure', tagOrId), cnf, kw)
- itemconfig = itemconfigure
- # lower, tkraise/lift hide Misc.lower, Misc.tkraise/lift,
- # so the preferred name for them is tag_lower, tag_raise
- # (similar to tag_bind, and similar to the Text widget);
- # unfortunately can't delete the old ones yet (maybe in 1.6)
- def tag_lower(self, *args):
- """Lower an item TAGORID given in ARGS
- (optional below another item)."""
- self.tk.call((self._w, 'lower') + args)
- lower = tag_lower
- def move(self, *args):
- """Move an item TAGORID given in ARGS."""
- self.tk.call((self._w, 'move') + args)
- def postscript(self, cnf={}, **kw):
- """Print the contents of the canvas to a postscript
- file. Valid options: colormap, colormode, file, fontmap,
- height, pageanchor, pageheight, pagewidth, pagex, pagey,
- rotate, witdh, x, y."""
- return self.tk.call((self._w, 'postscript') +
- self._options(cnf, kw))
- def tag_raise(self, *args):
- """Raise an item TAGORID given in ARGS
- (optional above another item)."""
- self.tk.call((self._w, 'raise') + args)
- lift = tkraise = tag_raise
- def scale(self, *args):
- """Scale item TAGORID with XORIGIN, YORIGIN, XSCALE, YSCALE."""
- self.tk.call((self._w, 'scale') + args)
- def scan_mark(self, x, y):
- """Remember the current X, Y coordinates."""
- self.tk.call(self._w, 'scan', 'mark', x, y)
- def scan_dragto(self, x, y, gain=10):
- """Adjust the view of the canvas to GAIN times the
- difference between X and Y and the coordinates given in
- scan_mark."""
- self.tk.call(self._w, 'scan', 'dragto', x, y, gain)
- def select_adjust(self, tagOrId, index):
- """Adjust the end of the selection near the cursor of an item TAGORID to index."""
- self.tk.call(self._w, 'select', 'adjust', tagOrId, index)
- def select_clear(self):
- """Clear the selection if it is in this widget."""
- self.tk.call(self._w, 'select', 'clear')
- def select_from(self, tagOrId, index):
- """Set the fixed end of a selection in item TAGORID to INDEX."""
- self.tk.call(self._w, 'select', 'from', tagOrId, index)
- def select_item(self):
- """Return the item which has the selection."""
- return self.tk.call(self._w, 'select', 'item') or None
- def select_to(self, tagOrId, index):
- """Set the variable end of a selection in item TAGORID to INDEX."""
- self.tk.call(self._w, 'select', 'to', tagOrId, index)
- def type(self, tagOrId):
- """Return the type of the item TAGORID."""
- return self.tk.call(self._w, 'type', tagOrId) or None
- def xview(self, *args):
- """Query and change horizontal position of the view."""
- if not args:
- return self._getdoubles(self.tk.call(self._w, 'xview'))
- self.tk.call((self._w, 'xview') + args)
- def xview_moveto(self, fraction):
- """Adjusts the view in the window so that FRACTION of the
- total width of the canvas is off-screen to the left."""
- self.tk.call(self._w, 'xview', 'moveto', fraction)
- def xview_scroll(self, number, what):
- """Shift the x-view according to NUMBER which is measured in "units" or "pages" (WHAT)."""
- self.tk.call(self._w, 'xview', 'scroll', number, what)
- def yview(self, *args):
- """Query and change vertical position of the view."""
- if not args:
- return self._getdoubles(self.tk.call(self._w, 'yview'))
- self.tk.call((self._w, 'yview') + args)
- def yview_moveto(self, fraction):
- """Adjusts the view in the window so that FRACTION of the
- total height of the canvas is off-screen to the top."""
- self.tk.call(self._w, 'yview', 'moveto', fraction)
- def yview_scroll(self, number, what):
- """Shift the y-view according to NUMBER which is measured in "units" or "pages" (WHAT)."""
- self.tk.call(self._w, 'yview', 'scroll', number, what)
-
-class Checkbutton(Widget):
- """Checkbutton widget which is either in on- or off-state."""
- def __init__(self, master=None, cnf={}, **kw):
- """Construct a checkbutton widget with the parent MASTER.
-
- Valid resource names: activebackground, activeforeground, anchor,
- background, bd, bg, bitmap, borderwidth, command, cursor,
- disabledforeground, fg, font, foreground, height,
- highlightbackground, highlightcolor, highlightthickness, image,
- indicatoron, justify, offvalue, onvalue, padx, pady, relief,
- selectcolor, selectimage, state, takefocus, text, textvariable,
- underline, variable, width, wraplength."""
- Widget.__init__(self, master, 'checkbutton', cnf, kw)
- def deselect(self):
- """Put the button in off-state."""
- self.tk.call(self._w, 'deselect')
- def flash(self):
- """Flash the button."""
- self.tk.call(self._w, 'flash')
- def invoke(self):
- """Toggle the button and invoke a command if given as resource."""
- return self.tk.call(self._w, 'invoke')
- def select(self):
- """Put the button in on-state."""
- self.tk.call(self._w, 'select')
- def toggle(self):
- """Toggle the button."""
- self.tk.call(self._w, 'toggle')
-
-class Entry(Widget):
- """Entry widget which allows to display simple text."""
- def __init__(self, master=None, cnf={}, **kw):
- """Construct an entry widget with the parent MASTER.
-
- Valid resource names: background, bd, bg, borderwidth, cursor,
- exportselection, fg, font, foreground, highlightbackground,
- highlightcolor, highlightthickness, insertbackground,
- insertborderwidth, insertofftime, insertontime, insertwidth,
- invalidcommand, invcmd, justify, relief, selectbackground,
- selectborderwidth, selectforeground, show, state, takefocus,
- textvariable, validate, validatecommand, vcmd, width,
- xscrollcommand."""
- Widget.__init__(self, master, 'entry', cnf, kw)
- def delete(self, first, last=None):
- """Delete text from FIRST to LAST (not included)."""
- self.tk.call(self._w, 'delete', first, last)
- def get(self):
- """Return the text."""
- return self.tk.call(self._w, 'get')
- def icursor(self, index):
- """Insert cursor at INDEX."""
- self.tk.call(self._w, 'icursor', index)
- def index(self, index):
- """Return position of cursor."""
- return getint(self.tk.call(
- self._w, 'index', index))
- def insert(self, index, string):
- """Insert STRING at INDEX."""
- self.tk.call(self._w, 'insert', index, string)
- def scan_mark(self, x):
- """Remember the current X, Y coordinates."""
- self.tk.call(self._w, 'scan', 'mark', x)
- def scan_dragto(self, x):
- """Adjust the view of the canvas to 10 times the
- difference between X and Y and the coordinates given in
- scan_mark."""
- self.tk.call(self._w, 'scan', 'dragto', x)
- def selection_adjust(self, index):
- """Adjust the end of the selection near the cursor to INDEX."""
- self.tk.call(self._w, 'selection', 'adjust', index)
- select_adjust = selection_adjust
- def selection_clear(self):
- """Clear the selection if it is in this widget."""
- self.tk.call(self._w, 'selection', 'clear')
- select_clear = selection_clear
- def selection_from(self, index):
- """Set the fixed end of a selection to INDEX."""
- self.tk.call(self._w, 'selection', 'from', index)
- select_from = selection_from
- def selection_present(self):
- """Return whether the widget has the selection."""
- return self.tk.getboolean(
- self.tk.call(self._w, 'selection', 'present'))
- select_present = selection_present
- def selection_range(self, start, end):
- """Set the selection from START to END (not included)."""
- self.tk.call(self._w, 'selection', 'range', start, end)
- select_range = selection_range
- def selection_to(self, index):
- """Set the variable end of a selection to INDEX."""
- self.tk.call(self._w, 'selection', 'to', index)
- select_to = selection_to
- def xview(self, index):
- """Query and change horizontal position of the view."""
- self.tk.call(self._w, 'xview', index)
- def xview_moveto(self, fraction):
- """Adjust the view in the window so that FRACTION of the
- total width of the entry is off-screen to the left."""
- self.tk.call(self._w, 'xview', 'moveto', fraction)
- def xview_scroll(self, number, what):
- """Shift the x-view according to NUMBER which is measured in "units" or "pages" (WHAT)."""
- self.tk.call(self._w, 'xview', 'scroll', number, what)
-
-class Frame(Widget):
- """Frame widget which may contain other widgets and can have a 3D border."""
- def __init__(self, master=None, cnf={}, **kw):
- """Construct a frame widget with the parent MASTER.
-
- Valid resource names: background, bd, bg, borderwidth, class,
- colormap, container, cursor, height, highlightbackground,
- highlightcolor, highlightthickness, relief, takefocus, visual, width."""
- cnf = _cnfmerge((cnf, kw))
- extra = ()
- if cnf.has_key('class_'):
- extra = ('-class', cnf['class_'])
- del cnf['class_']
- elif cnf.has_key('class'):
- extra = ('-class', cnf['class'])
- del cnf['class']
- Widget.__init__(self, master, 'frame', cnf, {}, extra)
-
-class Label(Widget):
- """Label widget which can display text and bitmaps."""
- def __init__(self, master=None, cnf={}, **kw):
- """Construct a label widget with the parent MASTER.
-
- STANDARD OPTIONS
-
- activebackground, activeforeground, anchor,
- background, bitmap, borderwidth, cursor,
- disabledforeground, font, foreground,
- highlightbackground, highlightcolor,
- highlightthickness, image, justify,
- padx, pady, relief, takefocus, text,
- textvariable, underline, wraplength
-
- WIDGET-SPECIFIC OPTIONS
-
- height, state, width
-
- """
- Widget.__init__(self, master, 'label', cnf, kw)
-
-class Listbox(Widget):
- """Listbox widget which can display a list of strings."""
- def __init__(self, master=None, cnf={}, **kw):
- """Construct a listbox widget with the parent MASTER.
-
- Valid resource names: background, bd, bg, borderwidth, cursor,
- exportselection, fg, font, foreground, height, highlightbackground,
- highlightcolor, highlightthickness, relief, selectbackground,
- selectborderwidth, selectforeground, selectmode, setgrid, takefocus,
- width, xscrollcommand, yscrollcommand, listvariable."""
- Widget.__init__(self, master, 'listbox', cnf, kw)
- def activate(self, index):
- """Activate item identified by INDEX."""
- self.tk.call(self._w, 'activate', index)
- def bbox(self, *args):
- """Return a tuple of X1,Y1,X2,Y2 coordinates for a rectangle
- which encloses the item identified by index in ARGS."""
- return self._getints(
- self.tk.call((self._w, 'bbox') + args)) or None
- def curselection(self):
- """Return list of indices of currently selected item."""
- # XXX Ought to apply self._getints()...
- return self.tk.splitlist(self.tk.call(
- self._w, 'curselection'))
- def delete(self, first, last=None):
- """Delete items from FIRST to LAST (not included)."""
- self.tk.call(self._w, 'delete', first, last)
- def get(self, first, last=None):
- """Get list of items from FIRST to LAST (not included)."""
- if last:
- return self.tk.splitlist(self.tk.call(
- self._w, 'get', first, last))
- else:
- return self.tk.call(self._w, 'get', first)
- def index(self, index):
- """Return index of item identified with INDEX."""
- i = self.tk.call(self._w, 'index', index)
- if i == 'none': return None
- return getint(i)
- def insert(self, index, *elements):
- """Insert ELEMENTS at INDEX."""
- self.tk.call((self._w, 'insert', index) + elements)
- def nearest(self, y):
- """Get index of item which is nearest to y coordinate Y."""
- return getint(self.tk.call(
- self._w, 'nearest', y))
- def scan_mark(self, x, y):
- """Remember the current X, Y coordinates."""
- self.tk.call(self._w, 'scan', 'mark', x, y)
- def scan_dragto(self, x, y):
- """Adjust the view of the listbox to 10 times the
- difference between X and Y and the coordinates given in
- scan_mark."""
- self.tk.call(self._w, 'scan', 'dragto', x, y)
- def see(self, index):
- """Scroll such that INDEX is visible."""
- self.tk.call(self._w, 'see', index)
- def selection_anchor(self, index):
- """Set the fixed end oft the selection to INDEX."""
- self.tk.call(self._w, 'selection', 'anchor', index)
- select_anchor = selection_anchor
- def selection_clear(self, first, last=None):
- """Clear the selection from FIRST to LAST (not included)."""
- self.tk.call(self._w,
- 'selection', 'clear', first, last)
- select_clear = selection_clear
- def selection_includes(self, index):
- """Return 1 if INDEX is part of the selection."""
- return self.tk.getboolean(self.tk.call(
- self._w, 'selection', 'includes', index))
- select_includes = selection_includes
- def selection_set(self, first, last=None):
- """Set the selection from FIRST to LAST (not included) without
- changing the currently selected elements."""
- self.tk.call(self._w, 'selection', 'set', first, last)
- select_set = selection_set
- def size(self):
- """Return the number of elements in the listbox."""
- return getint(self.tk.call(self._w, 'size'))
- def xview(self, *what):
- """Query and change horizontal position of the view."""
- if not what:
- return self._getdoubles(self.tk.call(self._w, 'xview'))
- self.tk.call((self._w, 'xview') + what)
- def xview_moveto(self, fraction):
- """Adjust the view in the window so that FRACTION of the
- total width of the entry is off-screen to the left."""
- self.tk.call(self._w, 'xview', 'moveto', fraction)
- def xview_scroll(self, number, what):
- """Shift the x-view according to NUMBER which is measured in "units" or "pages" (WHAT)."""
- self.tk.call(self._w, 'xview', 'scroll', number, what)
- def yview(self, *what):
- """Query and change vertical position of the view."""
- if not what:
- return self._getdoubles(self.tk.call(self._w, 'yview'))
- self.tk.call((self._w, 'yview') + what)
- def yview_moveto(self, fraction):
- """Adjust the view in the window so that FRACTION of the
- total width of the entry is off-screen to the top."""
- self.tk.call(self._w, 'yview', 'moveto', fraction)
- def yview_scroll(self, number, what):
- """Shift the y-view according to NUMBER which is measured in "units" or "pages" (WHAT)."""
- self.tk.call(self._w, 'yview', 'scroll', number, what)
- def itemcget(self, index, option):
- """Return the resource value for an ITEM and an OPTION."""
- return self.tk.call(
- (self._w, 'itemcget') + (index, '-'+option))
- def itemconfigure(self, index, cnf=None, **kw):
- """Configure resources of an ITEM.
-
- The values for resources are specified as keyword arguments.
- To get an overview about the allowed keyword arguments
- call the method without arguments.
- Valid resource names: background, bg, foreground, fg,
- selectbackground, selectforeground."""
- return self._configure(('itemconfigure', index), cnf, kw)
- itemconfig = itemconfigure
-
-class Menu(Widget):
- """Menu widget which allows to display menu bars, pull-down menus and pop-up menus."""
- def __init__(self, master=None, cnf={}, **kw):
- """Construct menu widget with the parent MASTER.
-
- Valid resource names: activebackground, activeborderwidth,
- activeforeground, background, bd, bg, borderwidth, cursor,
- disabledforeground, fg, font, foreground, postcommand, relief,
- selectcolor, takefocus, tearoff, tearoffcommand, title, type."""
- Widget.__init__(self, master, 'menu', cnf, kw)
- def tk_bindForTraversal(self):
- pass # obsolete since Tk 4.0
- def tk_mbPost(self):
- self.tk.call('tk_mbPost', self._w)
- def tk_mbUnpost(self):
- self.tk.call('tk_mbUnpost')
- def tk_traverseToMenu(self, char):
- self.tk.call('tk_traverseToMenu', self._w, char)
- def tk_traverseWithinMenu(self, char):
- self.tk.call('tk_traverseWithinMenu', self._w, char)
- def tk_getMenuButtons(self):
- return self.tk.call('tk_getMenuButtons', self._w)
- def tk_nextMenu(self, count):
- self.tk.call('tk_nextMenu', count)
- def tk_nextMenuEntry(self, count):
- self.tk.call('tk_nextMenuEntry', count)
- def tk_invokeMenu(self):
- self.tk.call('tk_invokeMenu', self._w)
- def tk_firstMenu(self):
- self.tk.call('tk_firstMenu', self._w)
- def tk_mbButtonDown(self):
- self.tk.call('tk_mbButtonDown', self._w)
- def tk_popup(self, x, y, entry=""):
- """Post the menu at position X,Y with entry ENTRY."""
- self.tk.call('tk_popup', self._w, x, y, entry)
- def activate(self, index):
- """Activate entry at INDEX."""
- self.tk.call(self._w, 'activate', index)
- def add(self, itemType, cnf={}, **kw):
- """Internal function."""
- self.tk.call((self._w, 'add', itemType) +
- self._options(cnf, kw))
- def add_cascade(self, cnf={}, **kw):
- """Add hierarchical menu item."""
- self.add('cascade', cnf or kw)
- def add_checkbutton(self, cnf={}, **kw):
- """Add checkbutton menu item."""
- self.add('checkbutton', cnf or kw)
- def add_command(self, cnf={}, **kw):
- """Add command menu item."""
- self.add('command', cnf or kw)
- def add_radiobutton(self, cnf={}, **kw):
- """Addd radio menu item."""
- self.add('radiobutton', cnf or kw)
- def add_separator(self, cnf={}, **kw):
- """Add separator."""
- self.add('separator', cnf or kw)
- def insert(self, index, itemType, cnf={}, **kw):
- """Internal function."""
- self.tk.call((self._w, 'insert', index, itemType) +
- self._options(cnf, kw))
- def insert_cascade(self, index, cnf={}, **kw):
- """Add hierarchical menu item at INDEX."""
- self.insert(index, 'cascade', cnf or kw)
- def insert_checkbutton(self, index, cnf={}, **kw):
- """Add checkbutton menu item at INDEX."""
- self.insert(index, 'checkbutton', cnf or kw)
- def insert_command(self, index, cnf={}, **kw):
- """Add command menu item at INDEX."""
- self.insert(index, 'command', cnf or kw)
- def insert_radiobutton(self, index, cnf={}, **kw):
- """Addd radio menu item at INDEX."""
- self.insert(index, 'radiobutton', cnf or kw)
- def insert_separator(self, index, cnf={}, **kw):
- """Add separator at INDEX."""
- self.insert(index, 'separator', cnf or kw)
- def delete(self, index1, index2=None):
- """Delete menu items between INDEX1 and INDEX2 (not included)."""
- self.tk.call(self._w, 'delete', index1, index2)
- def entrycget(self, index, option):
- """Return the resource value of an menu item for OPTION at INDEX."""
- return self.tk.call(self._w, 'entrycget', index, '-' + option)
- def entryconfigure(self, index, cnf=None, **kw):
- """Configure a menu item at INDEX."""
- return self._configure(('entryconfigure', index), cnf, kw)
- entryconfig = entryconfigure
- def index(self, index):
- """Return the index of a menu item identified by INDEX."""
- i = self.tk.call(self._w, 'index', index)
- if i == 'none': return None
- return getint(i)
- def invoke(self, index):
- """Invoke a menu item identified by INDEX and execute
- the associated command."""
- return self.tk.call(self._w, 'invoke', index)
- def post(self, x, y):
- """Display a menu at position X,Y."""
- self.tk.call(self._w, 'post', x, y)
- def type(self, index):
- """Return the type of the menu item at INDEX."""
- return self.tk.call(self._w, 'type', index)
- def unpost(self):
- """Unmap a menu."""
- self.tk.call(self._w, 'unpost')
- def yposition(self, index):
- """Return the y-position of the topmost pixel of the menu item at INDEX."""
- return getint(self.tk.call(
- self._w, 'yposition', index))
-
-class Menubutton(Widget):
- """Menubutton widget, obsolete since Tk8.0."""
- def __init__(self, master=None, cnf={}, **kw):
- Widget.__init__(self, master, 'menubutton', cnf, kw)
-
-class Message(Widget):
- """Message widget to display multiline text. Obsolete since Label does it too."""
- def __init__(self, master=None, cnf={}, **kw):
- Widget.__init__(self, master, 'message', cnf, kw)
-
-class Radiobutton(Widget):
- """Radiobutton widget which shows only one of several buttons in on-state."""
- def __init__(self, master=None, cnf={}, **kw):
- """Construct a radiobutton widget with the parent MASTER.
-
- Valid resource names: activebackground, activeforeground, anchor,
- background, bd, bg, bitmap, borderwidth, command, cursor,
- disabledforeground, fg, font, foreground, height,
- highlightbackground, highlightcolor, highlightthickness, image,
- indicatoron, justify, padx, pady, relief, selectcolor, selectimage,
- state, takefocus, text, textvariable, underline, value, variable,
- width, wraplength."""
- Widget.__init__(self, master, 'radiobutton', cnf, kw)
- def deselect(self):
- """Put the button in off-state."""
-
- self.tk.call(self._w, 'deselect')
- def flash(self):
- """Flash the button."""
- self.tk.call(self._w, 'flash')
- def invoke(self):
- """Toggle the button and invoke a command if given as resource."""
- return self.tk.call(self._w, 'invoke')
- def select(self):
- """Put the button in on-state."""
- self.tk.call(self._w, 'select')
-
-class Scale(Widget):
- """Scale widget which can display a numerical scale."""
- def __init__(self, master=None, cnf={}, **kw):
- """Construct a scale widget with the parent MASTER.
-
- Valid resource names: activebackground, background, bigincrement, bd,
- bg, borderwidth, command, cursor, digits, fg, font, foreground, from,
- highlightbackground, highlightcolor, highlightthickness, label,
- length, orient, relief, repeatdelay, repeatinterval, resolution,
- showvalue, sliderlength, sliderrelief, state, takefocus,
- tickinterval, to, troughcolor, variable, width."""
- Widget.__init__(self, master, 'scale', cnf, kw)
- def get(self):
- """Get the current value as integer or float."""
- value = self.tk.call(self._w, 'get')
- try:
- return getint(value)
- except ValueError:
- return getdouble(value)
- def set(self, value):
- """Set the value to VALUE."""
- self.tk.call(self._w, 'set', value)
- def coords(self, value=None):
- """Return a tuple (X,Y) of the point along the centerline of the
- trough that corresponds to VALUE or the current value if None is
- given."""
-
- return self._getints(self.tk.call(self._w, 'coords', value))
- def identify(self, x, y):
- """Return where the point X,Y lies. Valid return values are "slider",
- "though1" and "though2"."""
- return self.tk.call(self._w, 'identify', x, y)
-
-class Scrollbar(Widget):
- """Scrollbar widget which displays a slider at a certain position."""
- def __init__(self, master=None, cnf={}, **kw):
- """Construct a scrollbar widget with the parent MASTER.
-
- Valid resource names: activebackground, activerelief,
- background, bd, bg, borderwidth, command, cursor,
- elementborderwidth, highlightbackground,
- highlightcolor, highlightthickness, jump, orient,
- relief, repeatdelay, repeatinterval, takefocus,
- troughcolor, width."""
- Widget.__init__(self, master, 'scrollbar', cnf, kw)
- def activate(self, index):
- """Display the element at INDEX with activebackground and activerelief.
- INDEX can be "arrow1","slider" or "arrow2"."""
- self.tk.call(self._w, 'activate', index)
- def delta(self, deltax, deltay):
- """Return the fractional change of the scrollbar setting if it
- would be moved by DELTAX or DELTAY pixels."""
- return getdouble(
- self.tk.call(self._w, 'delta', deltax, deltay))
- def fraction(self, x, y):
- """Return the fractional value which corresponds to a slider
- position of X,Y."""
- return getdouble(self.tk.call(self._w, 'fraction', x, y))
- def identify(self, x, y):
- """Return the element under position X,Y as one of
- "arrow1","slider","arrow2" or ""."""
- return self.tk.call(self._w, 'identify', x, y)
- def get(self):
- """Return the current fractional values (upper and lower end)
- of the slider position."""
- return self._getdoubles(self.tk.call(self._w, 'get'))
- def set(self, *args):
- """Set the fractional values of the slider position (upper and
- lower ends as value between 0 and 1)."""
- self.tk.call((self._w, 'set') + args)
-
-
-
-class Text(Widget):
- """Text widget which can display text in various forms."""
- def __init__(self, master=None, cnf={}, **kw):
- """Construct a text widget with the parent MASTER.
-
- STANDARD OPTIONS
-
- background, borderwidth, cursor,
- exportselection, font, foreground,
- highlightbackground, highlightcolor,
- highlightthickness, insertbackground,
- insertborderwidth, insertofftime,
- insertontime, insertwidth, padx, pady,
- relief, selectbackground,
- selectborderwidth, selectforeground,
- setgrid, takefocus,
- xscrollcommand, yscrollcommand,
-
- WIDGET-SPECIFIC OPTIONS
-
- autoseparators, height, maxundo,
- spacing1, spacing2, spacing3,
- state, tabs, undo, width, wrap,
-
- """
- Widget.__init__(self, master, 'text', cnf, kw)
- def bbox(self, *args):
- """Return a tuple of (x,y,width,height) which gives the bounding
- box of the visible part of the character at the index in ARGS."""
- return self._getints(
- self.tk.call((self._w, 'bbox') + args)) or None
- def tk_textSelectTo(self, index):
- self.tk.call('tk_textSelectTo', self._w, index)
- def tk_textBackspace(self):
- self.tk.call('tk_textBackspace', self._w)
- def tk_textIndexCloser(self, a, b, c):
- self.tk.call('tk_textIndexCloser', self._w, a, b, c)
- def tk_textResetAnchor(self, index):
- self.tk.call('tk_textResetAnchor', self._w, index)
- def compare(self, index1, op, index2):
- """Return whether between index INDEX1 and index INDEX2 the
- relation OP is satisfied. OP is one of <, <=, ==, >=, >, or !=."""
- return self.tk.getboolean(self.tk.call(
- self._w, 'compare', index1, op, index2))
- def debug(self, boolean=None):
- """Turn on the internal consistency checks of the B-Tree inside the text
- widget according to BOOLEAN."""
- return self.tk.getboolean(self.tk.call(
- self._w, 'debug', boolean))
- def delete(self, index1, index2=None):
- """Delete the characters between INDEX1 and INDEX2 (not included)."""
- self.tk.call(self._w, 'delete', index1, index2)
- def dlineinfo(self, index):
- """Return tuple (x,y,width,height,baseline) giving the bounding box
- and baseline position of the visible part of the line containing
- the character at INDEX."""
- return self._getints(self.tk.call(self._w, 'dlineinfo', index))
- def dump(self, index1, index2=None, command=None, **kw):
- """Return the contents of the widget between index1 and index2.
-
- The type of contents returned in filtered based on the keyword
- parameters; if 'all', 'image', 'mark', 'tag', 'text', or 'window' are
- given and true, then the corresponding items are returned. The result
- is a list of triples of the form (key, value, index). If none of the
- keywords are true then 'all' is used by default.
-
- If the 'command' argument is given, it is called once for each element
- of the list of triples, with the values of each triple serving as the
- arguments to the function. In this case the list is not returned."""
- args = []
- func_name = None
- result = None
- if not command:
- # Never call the dump command without the -command flag, since the
- # output could involve Tcl quoting and would be a pain to parse
- # right. Instead just set the command to build a list of triples
- # as if we had done the parsing.
- result = []
- def append_triple(key, value, index, result=result):
- result.append((key, value, index))
- command = append_triple
- try:
- if not isinstance(command, str):
- func_name = command = self._register(command)
- args += ["-command", command]
- for key in kw:
- if kw[key]: args.append("-" + key)
- args.append(index1)
- if index2:
- args.append(index2)
- self.tk.call(self._w, "dump", *args)
- return result
- finally:
- if func_name:
- self.deletecommand(func_name)
-
- ## new in tk8.4
- def edit(self, *args):
- """Internal method
-
- This method controls the undo mechanism and
- the modified flag. The exact behavior of the
- command depends on the option argument that
- follows the edit argument. The following forms
- of the command are currently supported:
-
- edit_modified, edit_redo, edit_reset, edit_separator
- and edit_undo
-
- """
- return self._getints(
- self.tk.call((self._w, 'edit') + args)) or ()
-
- def edit_modified(self, arg=None):
- """Get or Set the modified flag
-
- If arg is not specified, returns the modified
- flag of the widget. The insert, delete, edit undo and
- edit redo commands or the user can set or clear the
- modified flag. If boolean is specified, sets the
- modified flag of the widget to arg.
- """
- return self.edit("modified", arg)
-
- def edit_redo(self):
- """Redo the last undone edit
-
- When the undo option is true, reapplies the last
- undone edits provided no other edits were done since
- then. Generates an error when the redo stack is empty.
- Does nothing when the undo option is false.
- """
- return self.edit("redo")
-
- def edit_reset(self):
- """Clears the undo and redo stacks
- """
- return self.edit("reset")
-
- def edit_separator(self):
- """Inserts a separator (boundary) on the undo stack.
-
- Does nothing when the undo option is false
- """
- return self.edit("separator")
-
- def edit_undo(self):
- """Undoes the last edit action
-
- If the undo option is true. An edit action is defined
- as all the insert and delete commands that are recorded
- on the undo stack in between two separators. Generates
- an error when the undo stack is empty. Does nothing
- when the undo option is false
- """
- return self.edit("undo")
-
- def get(self, index1, index2=None):
- """Return the text from INDEX1 to INDEX2 (not included)."""
- return self.tk.call(self._w, 'get', index1, index2)
- # (Image commands are new in 8.0)
- def image_cget(self, index, option):
- """Return the value of OPTION of an embedded image at INDEX."""
- if option[:1] != "-":
- option = "-" + option
- if option[-1:] == "_":
- option = option[:-1]
- return self.tk.call(self._w, "image", "cget", index, option)
- def image_configure(self, index, cnf=None, **kw):
- """Configure an embedded image at INDEX."""
- return self._configure(('image', 'configure', index), cnf, kw)
- def image_create(self, index, cnf={}, **kw):
- """Create an embedded image at INDEX."""
- return self.tk.call(
- self._w, "image", "create", index,
- *self._options(cnf, kw))
- def image_names(self):
- """Return all names of embedded images in this widget."""
- return self.tk.call(self._w, "image", "names")
- def index(self, index):
- """Return the index in the form line.char for INDEX."""
- return self.tk.call(self._w, 'index', index)
- def insert(self, index, chars, *args):
- """Insert CHARS before the characters at INDEX. An additional
- tag can be given in ARGS. Additional CHARS and tags can follow in ARGS."""
- self.tk.call((self._w, 'insert', index, chars) + args)
- def mark_gravity(self, markName, direction=None):
- """Change the gravity of a mark MARKNAME to DIRECTION (LEFT or RIGHT).
- Return the current value if None is given for DIRECTION."""
- return self.tk.call(
- (self._w, 'mark', 'gravity', markName, direction))
- def mark_names(self):
- """Return all mark names."""
- return self.tk.splitlist(self.tk.call(
- self._w, 'mark', 'names'))
- def mark_set(self, markName, index):
- """Set mark MARKNAME before the character at INDEX."""
- self.tk.call(self._w, 'mark', 'set', markName, index)
- def mark_unset(self, *markNames):
- """Delete all marks in MARKNAMES."""
- self.tk.call((self._w, 'mark', 'unset') + markNames)
- def mark_next(self, index):
- """Return the name of the next mark after INDEX."""
- return self.tk.call(self._w, 'mark', 'next', index) or None
- def mark_previous(self, index):
- """Return the name of the previous mark before INDEX."""
- return self.tk.call(self._w, 'mark', 'previous', index) or None
- def scan_mark(self, x, y):
- """Remember the current X, Y coordinates."""
- self.tk.call(self._w, 'scan', 'mark', x, y)
- def scan_dragto(self, x, y):
- """Adjust the view of the text to 10 times the
- difference between X and Y and the coordinates given in
- scan_mark."""
- self.tk.call(self._w, 'scan', 'dragto', x, y)
- def search(self, pattern, index, stopindex=None,
- forwards=None, backwards=None, exact=None,
- regexp=None, nocase=None, count=None):
- """Search PATTERN beginning from INDEX until STOPINDEX.
- Return the index of the first character of a match or an empty string."""
- args = [self._w, 'search']
- if forwards: args.append('-forwards')
- if backwards: args.append('-backwards')
- if exact: args.append('-exact')
- if regexp: args.append('-regexp')
- if nocase: args.append('-nocase')
- if count: args.append('-count'); args.append(count)
- if pattern[0] == '-': args.append('--')
- args.append(pattern)
- args.append(index)
- if stopindex: args.append(stopindex)
- return self.tk.call(tuple(args))
- def see(self, index):
- """Scroll such that the character at INDEX is visible."""
- self.tk.call(self._w, 'see', index)
- def tag_add(self, tagName, index1, *args):
- """Add tag TAGNAME to all characters between INDEX1 and index2 in ARGS.
- Additional pairs of indices may follow in ARGS."""
- self.tk.call(
- (self._w, 'tag', 'add', tagName, index1) + args)
- def tag_unbind(self, tagName, sequence, funcid=None):
- """Unbind for all characters with TAGNAME for event SEQUENCE the
- function identified with FUNCID."""
- self.tk.call(self._w, 'tag', 'bind', tagName, sequence, '')
- if funcid:
- self.deletecommand(funcid)
- def tag_bind(self, tagName, sequence, func, add=None):
- """Bind to all characters with TAGNAME at event SEQUENCE a call to function FUNC.
-
- An additional boolean parameter ADD specifies whether FUNC will be
- called additionally to the other bound function or whether it will
- replace the previous function. See bind for the return value."""
- return self._bind((self._w, 'tag', 'bind', tagName),
- sequence, func, add)
- def tag_cget(self, tagName, option):
- """Return the value of OPTION for tag TAGNAME."""
- if option[:1] != '-':
- option = '-' + option
- if option[-1:] == '_':
- option = option[:-1]
- return self.tk.call(self._w, 'tag', 'cget', tagName, option)
- def tag_configure(self, tagName, cnf=None, **kw):
- """Configure a tag TAGNAME."""
- return self._configure(('tag', 'configure', tagName), cnf, kw)
- tag_config = tag_configure
- def tag_delete(self, *tagNames):
- """Delete all tags in TAGNAMES."""
- self.tk.call((self._w, 'tag', 'delete') + tagNames)
- def tag_lower(self, tagName, belowThis=None):
- """Change the priority of tag TAGNAME such that it is lower
- than the priority of BELOWTHIS."""
- self.tk.call(self._w, 'tag', 'lower', tagName, belowThis)
- def tag_names(self, index=None):
- """Return a list of all tag names."""
- return self.tk.splitlist(
- self.tk.call(self._w, 'tag', 'names', index))
- def tag_nextrange(self, tagName, index1, index2=None):
- """Return a list of start and end index for the first sequence of
- characters between INDEX1 and INDEX2 which all have tag TAGNAME.
- The text is searched forward from INDEX1."""
- return self.tk.splitlist(self.tk.call(
- self._w, 'tag', 'nextrange', tagName, index1, index2))
- def tag_prevrange(self, tagName, index1, index2=None):
- """Return a list of start and end index for the first sequence of
- characters between INDEX1 and INDEX2 which all have tag TAGNAME.
- The text is searched backwards from INDEX1."""
- return self.tk.splitlist(self.tk.call(
- self._w, 'tag', 'prevrange', tagName, index1, index2))
- def tag_raise(self, tagName, aboveThis=None):
- """Change the priority of tag TAGNAME such that it is higher
- than the priority of ABOVETHIS."""
- self.tk.call(
- self._w, 'tag', 'raise', tagName, aboveThis)
- def tag_ranges(self, tagName):
- """Return a list of ranges of text which have tag TAGNAME."""
- return self.tk.splitlist(self.tk.call(
- self._w, 'tag', 'ranges', tagName))
- def tag_remove(self, tagName, index1, index2=None):
- """Remove tag TAGNAME from all characters between INDEX1 and INDEX2."""
- self.tk.call(
- self._w, 'tag', 'remove', tagName, index1, index2)
- def window_cget(self, index, option):
- """Return the value of OPTION of an embedded window at INDEX."""
- if option[:1] != '-':
- option = '-' + option
- if option[-1:] == '_':
- option = option[:-1]
- return self.tk.call(self._w, 'window', 'cget', index, option)
- def window_configure(self, index, cnf=None, **kw):
- """Configure an embedded window at INDEX."""
- return self._configure(('window', 'configure', index), cnf, kw)
- window_config = window_configure
- def window_create(self, index, cnf={}, **kw):
- """Create a window at INDEX."""
- self.tk.call(
- (self._w, 'window', 'create', index)
- + self._options(cnf, kw))
- def window_names(self):
- """Return all names of embedded windows in this widget."""
- return self.tk.splitlist(
- self.tk.call(self._w, 'window', 'names'))
- def xview(self, *what):
- """Query and change horizontal position of the view."""
- if not what:
- return self._getdoubles(self.tk.call(self._w, 'xview'))
- self.tk.call((self._w, 'xview') + what)
- def xview_moveto(self, fraction):
- """Adjusts the view in the window so that FRACTION of the
- total width of the canvas is off-screen to the left."""
- self.tk.call(self._w, 'xview', 'moveto', fraction)
- def xview_scroll(self, number, what):
- """Shift the x-view according to NUMBER which is measured
- in "units" or "pages" (WHAT)."""
- self.tk.call(self._w, 'xview', 'scroll', number, what)
- def yview(self, *what):
- """Query and change vertical position of the view."""
- if not what:
- return self._getdoubles(self.tk.call(self._w, 'yview'))
- self.tk.call((self._w, 'yview') + what)
- def yview_moveto(self, fraction):
- """Adjusts the view in the window so that FRACTION of the
- total height of the canvas is off-screen to the top."""
- self.tk.call(self._w, 'yview', 'moveto', fraction)
- def yview_scroll(self, number, what):
- """Shift the y-view according to NUMBER which is measured
- in "units" or "pages" (WHAT)."""
- self.tk.call(self._w, 'yview', 'scroll', number, what)
- def yview_pickplace(self, *what):
- """Obsolete function, use see."""
- self.tk.call((self._w, 'yview', '-pickplace') + what)
-
-
-class _setit:
- """Internal class. It wraps the command in the widget OptionMenu."""
- def __init__(self, var, value, callback=None):
- self.__value = value
- self.__var = var
- self.__callback = callback
- def __call__(self, *args):
- self.__var.set(self.__value)
- if self.__callback:
- self.__callback(self.__value, *args)
-
-class OptionMenu(Menubutton):
- """OptionMenu which allows the user to select a value from a menu."""
- def __init__(self, master, variable, value, *values, **kwargs):
- """Construct an optionmenu widget with the parent MASTER, with
- the resource textvariable set to VARIABLE, the initially selected
- value VALUE, the other menu values VALUES and an additional
- keyword argument command."""
- kw = {"borderwidth": 2, "textvariable": variable,
- "indicatoron": 1, "relief": RAISED, "anchor": "c",
- "highlightthickness": 2}
- Widget.__init__(self, master, "menubutton", kw)
- self.widgetName = 'tk_optionMenu'
- menu = self.__menu = Menu(self, name="menu", tearoff=0)
- self.menuname = menu._w
- # 'command' is the only supported keyword
- callback = kwargs.get('command')
- if kwargs.has_key('command'):
- del kwargs['command']
- if kwargs:
- raise TclError, 'unknown option -'+kwargs.keys()[0]
- menu.add_command(label=value,
- command=_setit(variable, value, callback))
- for v in values:
- menu.add_command(label=v,
- command=_setit(variable, v, callback))
- self["menu"] = menu
-
- def __getitem__(self, name):
- if name == 'menu':
- return self.__menu
- return Widget.__getitem__(self, name)
-
- def destroy(self):
- """Destroy this widget and the associated menu."""
- Menubutton.destroy(self)
- self.__menu = None
-
-class Image:
- """Base class for images."""
- _last_id = 0
- def __init__(self, imgtype, name=None, cnf={}, master=None, **kw):
- self.name = None
- if not master:
- master = _default_root
- if not master:
- raise RuntimeError, 'Too early to create image'
- self.tk = master.tk
- if not name:
- Image._last_id += 1
- name = "pyimage%r" % (Image._last_id,) # tk itself would use image<x>
- # The following is needed for systems where id(x)
- # can return a negative number, such as Linux/m68k:
- if name[0] == '-': name = '_' + name[1:]
- if kw and cnf: cnf = _cnfmerge((cnf, kw))
- elif kw: cnf = kw
- options = ()
- for k, v in cnf.items():
- if callable(v):
- v = self._register(v)
- options = options + ('-'+k, v)
- self.tk.call(('image', 'create', imgtype, name,) + options)
- self.name = name
- def __str__(self): return self.name
- def __del__(self):
- if self.name:
- try:
- self.tk.call('image', 'delete', self.name)
- except TclError:
- # May happen if the root was destroyed
- pass
- def __setitem__(self, key, value):
- self.tk.call(self.name, 'configure', '-'+key, value)
- def __getitem__(self, key):
- return self.tk.call(self.name, 'configure', '-'+key)
- def configure(self, **kw):
- """Configure the image."""
- res = ()
- for k, v in _cnfmerge(kw).items():
- if v is not None:
- if k[-1] == '_': k = k[:-1]
- if callable(v):
- v = self._register(v)
- res = res + ('-'+k, v)
- self.tk.call((self.name, 'config') + res)
- config = configure
- def height(self):
- """Return the height of the image."""
- return getint(
- self.tk.call('image', 'height', self.name))
- def type(self):
- """Return the type of the imgage, e.g. "photo" or "bitmap"."""
- return self.tk.call('image', 'type', self.name)
- def width(self):
- """Return the width of the image."""
- return getint(
- self.tk.call('image', 'width', self.name))
-
-class PhotoImage(Image):
- """Widget which can display colored images in GIF, PPM/PGM format."""
- def __init__(self, name=None, cnf={}, master=None, **kw):
- """Create an image with NAME.
-
- Valid resource names: data, format, file, gamma, height, palette,
- width."""
- Image.__init__(self, 'photo', name, cnf, master, **kw)
- def blank(self):
- """Display a transparent image."""
- self.tk.call(self.name, 'blank')
- def cget(self, option):
- """Return the value of OPTION."""
- return self.tk.call(self.name, 'cget', '-' + option)
- # XXX config
- def __getitem__(self, key):
- return self.tk.call(self.name, 'cget', '-' + key)
- # XXX copy -from, -to, ...?
- def copy(self):
- """Return a new PhotoImage with the same image as this widget."""
- destImage = PhotoImage()
- self.tk.call(destImage, 'copy', self.name)
- return destImage
- def zoom(self,x,y=''):
- """Return a new PhotoImage with the same image as this widget
- but zoom it with X and Y."""
- destImage = PhotoImage()
- if y=='': y=x
- self.tk.call(destImage, 'copy', self.name, '-zoom',x,y)
- return destImage
- def subsample(self,x,y=''):
- """Return a new PhotoImage based on the same image as this widget
- but use only every Xth or Yth pixel."""
- destImage = PhotoImage()
- if y=='': y=x
- self.tk.call(destImage, 'copy', self.name, '-subsample',x,y)
- return destImage
- def get(self, x, y):
- """Return the color (red, green, blue) of the pixel at X,Y."""
- return self.tk.call(self.name, 'get', x, y)
- def put(self, data, to=None):
- """Put row formated colors to image starting from
- position TO, e.g. image.put("{red green} {blue yellow}", to=(4,6))"""
- args = (self.name, 'put', data)
- if to:
- if to[0] == '-to':
- to = to[1:]
- args = args + ('-to',) + tuple(to)
- self.tk.call(args)
- # XXX read
- def write(self, filename, format=None, from_coords=None):
- """Write image to file FILENAME in FORMAT starting from
- position FROM_COORDS."""
- args = (self.name, 'write', filename)
- if format:
- args = args + ('-format', format)
- if from_coords:
- args = args + ('-from',) + tuple(from_coords)
- self.tk.call(args)
-
-class BitmapImage(Image):
- """Widget which can display a bitmap."""
- def __init__(self, name=None, cnf={}, master=None, **kw):
- """Create a bitmap with NAME.
-
- Valid resource names: background, data, file, foreground, maskdata, maskfile."""
- Image.__init__(self, 'bitmap', name, cnf, master, **kw)
-
-def image_names(): return _default_root.tk.call('image', 'names')
-def image_types(): return _default_root.tk.call('image', 'types')
-
-
-class Spinbox(Widget):
- """spinbox widget."""
- def __init__(self, master=None, cnf={}, **kw):
- """Construct a spinbox widget with the parent MASTER.
-
- STANDARD OPTIONS
-
- activebackground, background, borderwidth,
- cursor, exportselection, font, foreground,
- highlightbackground, highlightcolor,
- highlightthickness, insertbackground,
- insertborderwidth, insertofftime,
- insertontime, insertwidth, justify, relief,
- repeatdelay, repeatinterval,
- selectbackground, selectborderwidth
- selectforeground, takefocus, textvariable
- xscrollcommand.
-
- WIDGET-SPECIFIC OPTIONS
-
- buttonbackground, buttoncursor,
- buttondownrelief, buttonuprelief,
- command, disabledbackground,
- disabledforeground, format, from,
- invalidcommand, increment,
- readonlybackground, state, to,
- validate, validatecommand values,
- width, wrap,
- """
- Widget.__init__(self, master, 'spinbox', cnf, kw)
-
- def bbox(self, index):
- """Return a tuple of X1,Y1,X2,Y2 coordinates for a
- rectangle which encloses the character given by index.
-
- The first two elements of the list give the x and y
- coordinates of the upper-left corner of the screen
- area covered by the character (in pixels relative
- to the widget) and the last two elements give the
- width and height of the character, in pixels. The
- bounding box may refer to a region outside the
- visible area of the window.
- """
- return self.tk.call(self._w, 'bbox', index)
-
- def delete(self, first, last=None):
- """Delete one or more elements of the spinbox.
-
- First is the index of the first character to delete,
- and last is the index of the character just after
- the last one to delete. If last isn't specified it
- defaults to first+1, i.e. a single character is
- deleted. This command returns an empty string.
- """
- return self.tk.call(self._w, 'delete', first, last)
-
- def get(self):
- """Returns the spinbox's string"""
- return self.tk.call(self._w, 'get')
-
- def icursor(self, index):
- """Alter the position of the insertion cursor.
-
- The insertion cursor will be displayed just before
- the character given by index. Returns an empty string
- """
- return self.tk.call(self._w, 'icursor', index)
-
- def identify(self, x, y):
- """Returns the name of the widget at position x, y
-
- Return value is one of: none, buttondown, buttonup, entry
- """
- return self.tk.call(self._w, 'identify', x, y)
-
- def index(self, index):
- """Returns the numerical index corresponding to index
- """
- return self.tk.call(self._w, 'index', index)
-
- def insert(self, index, s):
- """Insert string s at index
-
- Returns an empty string.
- """
- return self.tk.call(self._w, 'insert', index, s)
-
- def invoke(self, element):
- """Causes the specified element to be invoked
-
- The element could be buttondown or buttonup
- triggering the action associated with it.
- """
- return self.tk.call(self._w, 'invoke', element)
-
- def scan(self, *args):
- """Internal function."""
- return self._getints(
- self.tk.call((self._w, 'scan') + args)) or ()
-
- def scan_mark(self, x):
- """Records x and the current view in the spinbox window;
-
- used in conjunction with later scan dragto commands.
- Typically this command is associated with a mouse button
- press in the widget. It returns an empty string.
- """
- return self.scan("mark", x)
-
- def scan_dragto(self, x):
- """Compute the difference between the given x argument
- and the x argument to the last scan mark command
-
- It then adjusts the view left or right by 10 times the
- difference in x-coordinates. This command is typically
- associated with mouse motion events in the widget, to
- produce the effect of dragging the spinbox at high speed
- through the window. The return value is an empty string.
- """
- return self.scan("dragto", x)
-
- def selection(self, *args):
- """Internal function."""
- return self._getints(
- self.tk.call((self._w, 'selection') + args)) or ()
-
- def selection_adjust(self, index):
- """Locate the end of the selection nearest to the character
- given by index,
-
- Then adjust that end of the selection to be at index
- (i.e including but not going beyond index). The other
- end of the selection is made the anchor point for future
- select to commands. If the selection isn't currently in
- the spinbox, then a new selection is created to include
- the characters between index and the most recent selection
- anchor point, inclusive. Returns an empty string.
- """
- return self.selection("adjust", index)
-
- def selection_clear(self):
- """Clear the selection
-
- If the selection isn't in this widget then the
- command has no effect. Returns an empty string.
- """
- return self.selection("clear")
-
- def selection_element(self, element=None):
- """Sets or gets the currently selected element.
-
- If a spinbutton element is specified, it will be
- displayed depressed
- """
- return self.selection("element", element)
-
-###########################################################################
-
-class LabelFrame(Widget):
- """labelframe widget."""
- def __init__(self, master=None, cnf={}, **kw):
- """Construct a labelframe widget with the parent MASTER.
-
- STANDARD OPTIONS
-
- borderwidth, cursor, font, foreground,
- highlightbackground, highlightcolor,
- highlightthickness, padx, pady, relief,
- takefocus, text
-
- WIDGET-SPECIFIC OPTIONS
-
- background, class, colormap, container,
- height, labelanchor, labelwidget,
- visual, width
- """
- Widget.__init__(self, master, 'labelframe', cnf, kw)
-
-########################################################################
-
-class PanedWindow(Widget):
- """panedwindow widget."""
- def __init__(self, master=None, cnf={}, **kw):
- """Construct a panedwindow widget with the parent MASTER.
-
- STANDARD OPTIONS
-
- background, borderwidth, cursor, height,
- orient, relief, width
-
- WIDGET-SPECIFIC OPTIONS
-
- handlepad, handlesize, opaqueresize,
- sashcursor, sashpad, sashrelief,
- sashwidth, showhandle,
- """
- Widget.__init__(self, master, 'panedwindow', cnf, kw)
-
- def add(self, child, **kw):
- """Add a child widget to the panedwindow in a new pane.
-
- The child argument is the name of the child widget
- followed by pairs of arguments that specify how to
- manage the windows. Options may have any of the values
- accepted by the configure subcommand.
- """
- self.tk.call((self._w, 'add', child) + self._options(kw))
-
- def remove(self, child):
- """Remove the pane containing child from the panedwindow
-
- All geometry management options for child will be forgotten.
- """
- self.tk.call(self._w, 'forget', child)
- forget=remove
-
- def identify(self, x, y):
- """Identify the panedwindow component at point x, y
-
- If the point is over a sash or a sash handle, the result
- is a two element list containing the index of the sash or
- handle, and a word indicating whether it is over a sash
- or a handle, such as {0 sash} or {2 handle}. If the point
- is over any other part of the panedwindow, the result is
- an empty list.
- """
- return self.tk.call(self._w, 'identify', x, y)
-
- def proxy(self, *args):
- """Internal function."""
- return self._getints(
- self.tk.call((self._w, 'proxy') + args)) or ()
-
- def proxy_coord(self):
- """Return the x and y pair of the most recent proxy location
- """
- return self.proxy("coord")
-
- def proxy_forget(self):
- """Remove the proxy from the display.
- """
- return self.proxy("forget")
-
- def proxy_place(self, x, y):
- """Place the proxy at the given x and y coordinates.
- """
- return self.proxy("place", x, y)
-
- def sash(self, *args):
- """Internal function."""
- return self._getints(
- self.tk.call((self._w, 'sash') + args)) or ()
-
- def sash_coord(self, index):
- """Return the current x and y pair for the sash given by index.
-
- Index must be an integer between 0 and 1 less than the
- number of panes in the panedwindow. The coordinates given are
- those of the top left corner of the region containing the sash.
- pathName sash dragto index x y This command computes the
- difference between the given coordinates and the coordinates
- given to the last sash coord command for the given sash. It then
- moves that sash the computed difference. The return value is the
- empty string.
- """
- return self.sash("coord", index)
-
- def sash_mark(self, index):
- """Records x and y for the sash given by index;
-
- Used in conjunction with later dragto commands to move the sash.
- """
- return self.sash("mark", index)
-
- def sash_place(self, index, x, y):
- """Place the sash given by index at the given coordinates
- """
- return self.sash("place", index, x, y)
-
- def panecget(self, child, option):
- """Query a management option for window.
-
- Option may be any value allowed by the paneconfigure subcommand
- """
- return self.tk.call(
- (self._w, 'panecget') + (child, '-'+option))
-
- def paneconfigure(self, tagOrId, cnf=None, **kw):
- """Query or modify the management options for window.
-
- If no option is specified, returns a list describing all
- of the available options for pathName. If option is
- specified with no value, then the command returns a list
- describing the one named option (this list will be identical
- to the corresponding sublist of the value returned if no
- option is specified). If one or more option-value pairs are
- specified, then the command modifies the given widget
- option(s) to have the given value(s); in this case the
- command returns an empty string. The following options
- are supported:
-
- after window
- Insert the window after the window specified. window
- should be the name of a window already managed by pathName.
- before window
- Insert the window before the window specified. window
- should be the name of a window already managed by pathName.
- height size
- Specify a height for the window. The height will be the
- outer dimension of the window including its border, if
- any. If size is an empty string, or if -height is not
- specified, then the height requested internally by the
- window will be used initially; the height may later be
- adjusted by the movement of sashes in the panedwindow.
- Size may be any value accepted by Tk_GetPixels.
- minsize n
- Specifies that the size of the window cannot be made
- less than n. This constraint only affects the size of
- the widget in the paned dimension -- the x dimension
- for horizontal panedwindows, the y dimension for
- vertical panedwindows. May be any value accepted by
- Tk_GetPixels.
- padx n
- Specifies a non-negative value indicating how much
- extra space to leave on each side of the window in
- the X-direction. The value may have any of the forms
- accepted by Tk_GetPixels.
- pady n
- Specifies a non-negative value indicating how much
- extra space to leave on each side of the window in
- the Y-direction. The value may have any of the forms
- accepted by Tk_GetPixels.
- sticky style
- If a window's pane is larger than the requested
- dimensions of the window, this option may be used
- to position (or stretch) the window within its pane.
- Style is a string that contains zero or more of the
- characters n, s, e or w. The string can optionally
- contains spaces or commas, but they are ignored. Each
- letter refers to a side (north, south, east, or west)
- that the window will "stick" to. If both n and s
- (or e and w) are specified, the window will be
- stretched to fill the entire height (or width) of
- its cavity.
- width size
- Specify a width for the window. The width will be
- the outer dimension of the window including its
- border, if any. If size is an empty string, or
- if -width is not specified, then the width requested
- internally by the window will be used initially; the
- width may later be adjusted by the movement of sashes
- in the panedwindow. Size may be any value accepted by
- Tk_GetPixels.
-
- """
- if cnf is None and not kw:
- cnf = {}
- for x in self.tk.split(
- self.tk.call(self._w,
- 'paneconfigure', tagOrId)):
- cnf[x[0][1:]] = (x[0][1:],) + x[1:]
- return cnf
- if type(cnf) == StringType and not kw:
- x = self.tk.split(self.tk.call(
- self._w, 'paneconfigure', tagOrId, '-'+cnf))
- return (x[0][1:],) + x[1:]
- self.tk.call((self._w, 'paneconfigure', tagOrId) +
- self._options(cnf, kw))
- paneconfig = paneconfigure
-
- def panes(self):
- """Returns an ordered list of the child panes."""
- return self.tk.call(self._w, 'panes')
-
-######################################################################
-# Extensions:
-
-class Studbutton(Button):
- def __init__(self, master=None, cnf={}, **kw):
- Widget.__init__(self, master, 'studbutton', cnf, kw)
- self.bind('<Any-Enter>', self.tkButtonEnter)
- self.bind('<Any-Leave>', self.tkButtonLeave)
- self.bind('<1>', self.tkButtonDown)
- self.bind('<ButtonRelease-1>', self.tkButtonUp)
-
-class Tributton(Button):
- def __init__(self, master=None, cnf={}, **kw):
- Widget.__init__(self, master, 'tributton', cnf, kw)
- self.bind('<Any-Enter>', self.tkButtonEnter)
- self.bind('<Any-Leave>', self.tkButtonLeave)
- self.bind('<1>', self.tkButtonDown)
- self.bind('<ButtonRelease-1>', self.tkButtonUp)
- self['fg'] = self['bg']
- self['activebackground'] = self['bg']
-
-######################################################################
-# Test:
-
-def _test():
- root = Tk()
- text = "This is Tcl/Tk version %s" % TclVersion
- if TclVersion >= 8.1:
- try:
- text = text + unicode("\nThis should be a cedilla: \347",
- "iso-8859-1")
- except NameError:
- pass # no unicode support
- label = Label(root, text=text)
- label.pack()
- test = Button(root, text="Click me!",
- command=lambda root=root: root.test.configure(
- text="[%s]" % root.test['text']))
- test.pack()
- root.test = test
- quit = Button(root, text="QUIT", command=root.destroy)
- quit.pack()
- # The following three commands are needed so the window pops
- # up on top on Windows...
- root.iconify()
- root.update()
- root.deiconify()
- root.mainloop()
-
-if __name__ == '__main__':
- _test()
diff --git a/sys/lib/python/lib-tk/tkColorChooser.py b/sys/lib/python/lib-tk/tkColorChooser.py
deleted file mode 100644
index a55a797dd..000000000
--- a/sys/lib/python/lib-tk/tkColorChooser.py
+++ /dev/null
@@ -1,70 +0,0 @@
-# tk common colour chooser dialogue
-#
-# this module provides an interface to the native color dialogue
-# available in Tk 4.2 and newer.
-#
-# written by Fredrik Lundh, May 1997
-#
-# fixed initialcolor handling in August 1998
-#
-
-#
-# options (all have default values):
-#
-# - initialcolor: colour to mark as selected when dialog is displayed
-# (given as an RGB triplet or a Tk color string)
-#
-# - parent: which window to place the dialog on top of
-#
-# - title: dialog title
-#
-
-from tkCommonDialog import Dialog
-
-
-#
-# color chooser class
-
-class Chooser(Dialog):
- "Ask for a color"
-
- command = "tk_chooseColor"
-
- def _fixoptions(self):
- try:
- # make sure initialcolor is a tk color string
- color = self.options["initialcolor"]
- if type(color) == type(()):
- # assume an RGB triplet
- self.options["initialcolor"] = "#%02x%02x%02x" % color
- except KeyError:
- pass
-
- def _fixresult(self, widget, result):
- # to simplify application code, the color chooser returns
- # an RGB tuple together with the Tk color string
- if not result:
- return None, None # canceled
- r, g, b = widget.winfo_rgb(result)
- return (r/256, g/256, b/256), result
-
-
-#
-# convenience stuff
-
-def askcolor(color = None, **options):
- "Ask for a color"
-
- if color:
- options = options.copy()
- options["initialcolor"] = color
-
- return Chooser(**options).show()
-
-
-# --------------------------------------------------------------------
-# test stuff
-
-if __name__ == "__main__":
-
- print "color", askcolor()
diff --git a/sys/lib/python/lib-tk/tkCommonDialog.py b/sys/lib/python/lib-tk/tkCommonDialog.py
deleted file mode 100644
index 2cd9be4ea..000000000
--- a/sys/lib/python/lib-tk/tkCommonDialog.py
+++ /dev/null
@@ -1,60 +0,0 @@
-# base class for tk common dialogues
-#
-# this module provides a base class for accessing the common
-# dialogues available in Tk 4.2 and newer. use tkFileDialog,
-# tkColorChooser, and tkMessageBox to access the individual
-# dialogs.
-#
-# written by Fredrik Lundh, May 1997
-#
-
-from Tkinter import *
-
-class Dialog:
-
- command = None
-
- def __init__(self, master=None, **options):
-
- # FIXME: should this be placed on the module level instead?
- if TkVersion < 4.2:
- raise TclError, "this module requires Tk 4.2 or newer"
-
- self.master = master
- self.options = options
- if not master and options.get('parent'):
- self.master = options['parent']
-
- def _fixoptions(self):
- pass # hook
-
- def _fixresult(self, widget, result):
- return result # hook
-
- def show(self, **options):
-
- # update instance options
- for k, v in options.items():
- self.options[k] = v
-
- self._fixoptions()
-
- # we need a dummy widget to properly process the options
- # (at least as long as we use Tkinter 1.63)
- w = Frame(self.master)
-
- try:
-
- s = w.tk.call(self.command, *w._options(self.options))
-
- s = self._fixresult(w, s)
-
- finally:
-
- try:
- # get rid of the widget
- w.destroy()
- except:
- pass
-
- return s
diff --git a/sys/lib/python/lib-tk/tkFileDialog.py b/sys/lib/python/lib-tk/tkFileDialog.py
deleted file mode 100644
index 15c7d5f60..000000000
--- a/sys/lib/python/lib-tk/tkFileDialog.py
+++ /dev/null
@@ -1,215 +0,0 @@
-#
-# Instant Python
-# $Id: tkFileDialog.py 36560 2004-07-18 06:16:08Z tim_one $
-#
-# tk common file dialogues
-#
-# this module provides interfaces to the native file dialogues
-# available in Tk 4.2 and newer, and the directory dialogue available
-# in Tk 8.3 and newer.
-#
-# written by Fredrik Lundh, May 1997.
-#
-
-#
-# options (all have default values):
-#
-# - defaultextension: added to filename if not explicitly given
-#
-# - filetypes: sequence of (label, pattern) tuples. the same pattern
-# may occur with several patterns. use "*" as pattern to indicate
-# all files.
-#
-# - initialdir: initial directory. preserved by dialog instance.
-#
-# - initialfile: initial file (ignored by the open dialog). preserved
-# by dialog instance.
-#
-# - parent: which window to place the dialog on top of
-#
-# - title: dialog title
-#
-# - multiple: if true user may select more than one file
-#
-# options for the directory chooser:
-#
-# - initialdir, parent, title: see above
-#
-# - mustexist: if true, user must pick an existing directory
-#
-#
-
-
-from tkCommonDialog import Dialog
-
-class _Dialog(Dialog):
-
- def _fixoptions(self):
- try:
- # make sure "filetypes" is a tuple
- self.options["filetypes"] = tuple(self.options["filetypes"])
- except KeyError:
- pass
-
- def _fixresult(self, widget, result):
- if result:
- # keep directory and filename until next time
- import os
- # convert Tcl path objects to strings
- try:
- result = result.string
- except AttributeError:
- # it already is a string
- pass
- path, file = os.path.split(result)
- self.options["initialdir"] = path
- self.options["initialfile"] = file
- self.filename = result # compatibility
- return result
-
-
-#
-# file dialogs
-
-class Open(_Dialog):
- "Ask for a filename to open"
-
- command = "tk_getOpenFile"
-
- def _fixresult(self, widget, result):
- if isinstance(result, tuple):
- # multiple results:
- result = tuple([getattr(r, "string", r) for r in result])
- if result:
- import os
- path, file = os.path.split(result[0])
- self.options["initialdir"] = path
- # don't set initialfile or filename, as we have multiple of these
- return result
- if not widget.tk.wantobjects() and "multiple" in self.options:
- # Need to split result explicitly
- return self._fixresult(widget, widget.tk.splitlist(result))
- return _Dialog._fixresult(self, widget, result)
-
-class SaveAs(_Dialog):
- "Ask for a filename to save as"
-
- command = "tk_getSaveFile"
-
-
-# the directory dialog has its own _fix routines.
-class Directory(Dialog):
- "Ask for a directory"
-
- command = "tk_chooseDirectory"
-
- def _fixresult(self, widget, result):
- if result:
- # convert Tcl path objects to strings
- try:
- result = result.string
- except AttributeError:
- # it already is a string
- pass
- # keep directory until next time
- self.options["initialdir"] = result
- self.directory = result # compatibility
- return result
-
-#
-# convenience stuff
-
-def askopenfilename(**options):
- "Ask for a filename to open"
-
- return Open(**options).show()
-
-def asksaveasfilename(**options):
- "Ask for a filename to save as"
-
- return SaveAs(**options).show()
-
-def askopenfilenames(**options):
- """Ask for multiple filenames to open
-
- Returns a list of filenames or empty list if
- cancel button selected
- """
- options["multiple"]=1
- return Open(**options).show()
-
-# FIXME: are the following perhaps a bit too convenient?
-
-def askopenfile(mode = "r", **options):
- "Ask for a filename to open, and returned the opened file"
-
- filename = Open(**options).show()
- if filename:
- return open(filename, mode)
- return None
-
-def askopenfiles(mode = "r", **options):
- """Ask for multiple filenames and return the open file
- objects
-
- returns a list of open file objects or an empty list if
- cancel selected
- """
-
- files = askopenfilenames(**options)
- if files:
- ofiles=[]
- for filename in files:
- ofiles.append(open(filename, mode))
- files=ofiles
- return files
-
-
-def asksaveasfile(mode = "w", **options):
- "Ask for a filename to save as, and returned the opened file"
-
- filename = SaveAs(**options).show()
- if filename:
- return open(filename, mode)
- return None
-
-def askdirectory (**options):
- "Ask for a directory, and return the file name"
- return Directory(**options).show()
-
-# --------------------------------------------------------------------
-# test stuff
-
-if __name__ == "__main__":
- # Since the file name may contain non-ASCII characters, we need
- # to find an encoding that likely supports the file name, and
- # displays correctly on the terminal.
-
- # Start off with UTF-8
- enc = "utf-8"
- import sys
-
- # See whether CODESET is defined
- try:
- import locale
- locale.setlocale(locale.LC_ALL,'')
- enc = locale.nl_langinfo(locale.CODESET)
- except (ImportError, AttributeError):
- pass
-
- # dialog for openening files
-
- openfilename=askopenfilename(filetypes=[("all files", "*")])
- try:
- fp=open(openfilename,"r")
- fp.close()
- except:
- print "Could not open File: "
- print sys.exc_info()[1]
-
- print "open", openfilename.encode(enc)
-
- # dialog for saving files
-
- saveasfilename=asksaveasfilename()
- print "saveas", saveasfilename.encode(enc)
diff --git a/sys/lib/python/lib-tk/tkFont.py b/sys/lib/python/lib-tk/tkFont.py
deleted file mode 100644
index 15dea2eba..000000000
--- a/sys/lib/python/lib-tk/tkFont.py
+++ /dev/null
@@ -1,216 +0,0 @@
-# Tkinter font wrapper
-#
-# written by Fredrik Lundh, February 1998
-#
-# FIXME: should add 'displayof' option where relevant (actual, families,
-# measure, and metrics)
-#
-
-__version__ = "0.9"
-
-import Tkinter
-
-# weight/slant
-NORMAL = "normal"
-ROMAN = "roman"
-BOLD = "bold"
-ITALIC = "italic"
-
-def nametofont(name):
- """Given the name of a tk named font, returns a Font representation.
- """
- return Font(name=name, exists=True)
-
-class Font:
-
- """Represents a named font.
-
- Constructor options are:
-
- font -- font specifier (name, system font, or (family, size, style)-tuple)
- name -- name to use for this font configuration (defaults to a unique name)
- exists -- does a named font by this name already exist?
- Creates a new named font if False, points to the existing font if True.
- Raises _tkinter.TclError if the assertion is false.
-
- the following are ignored if font is specified:
-
- family -- font 'family', e.g. Courier, Times, Helvetica
- size -- font size in points
- weight -- font thickness: NORMAL, BOLD
- slant -- font slant: ROMAN, ITALIC
- underline -- font underlining: false (0), true (1)
- overstrike -- font strikeout: false (0), true (1)
-
- """
-
- def _set(self, kw):
- options = []
- for k, v in kw.items():
- options.append("-"+k)
- options.append(str(v))
- return tuple(options)
-
- def _get(self, args):
- options = []
- for k in args:
- options.append("-"+k)
- return tuple(options)
-
- def _mkdict(self, args):
- options = {}
- for i in range(0, len(args), 2):
- options[args[i][1:]] = args[i+1]
- return options
-
- def __init__(self, root=None, font=None, name=None, exists=False, **options):
- if not root:
- root = Tkinter._default_root
- if font:
- # get actual settings corresponding to the given font
- font = root.tk.splitlist(root.tk.call("font", "actual", font))
- else:
- font = self._set(options)
- if not name:
- name = "font" + str(id(self))
- self.name = name
-
- if exists:
- self.delete_font = False
- # confirm font exists
- if self.name not in root.tk.call("font", "names"):
- raise Tkinter._tkinter.TclError, "named font %s does not already exist" % (self.name,)
- # if font config info supplied, apply it
- if font:
- root.tk.call("font", "configure", self.name, *font)
- else:
- # create new font (raises TclError if the font exists)
- root.tk.call("font", "create", self.name, *font)
- self.delete_font = True
- # backlinks!
- self._root = root
- self._split = root.tk.splitlist
- self._call = root.tk.call
-
- def __str__(self):
- return self.name
-
- def __eq__(self, other):
- return self.name == other.name and isinstance(other, Font)
-
- def __getitem__(self, key):
- return self.cget(key)
-
- def __setitem__(self, key, value):
- self.configure(**{key: value})
-
- def __del__(self):
- try:
- if self.delete_font:
- self._call("font", "delete", self.name)
- except (KeyboardInterrupt, SystemExit):
- raise
- except Exception:
- pass
-
- def copy(self):
- "Return a distinct copy of the current font"
- return Font(self._root, **self.actual())
-
- def actual(self, option=None):
- "Return actual font attributes"
- if option:
- return self._call("font", "actual", self.name, "-"+option)
- else:
- return self._mkdict(
- self._split(self._call("font", "actual", self.name))
- )
-
- def cget(self, option):
- "Get font attribute"
- return self._call("font", "config", self.name, "-"+option)
-
- def config(self, **options):
- "Modify font attributes"
- if options:
- self._call("font", "config", self.name,
- *self._set(options))
- else:
- return self._mkdict(
- self._split(self._call("font", "config", self.name))
- )
-
- configure = config
-
- def measure(self, text):
- "Return text width"
- return int(self._call("font", "measure", self.name, text))
-
- def metrics(self, *options):
- """Return font metrics.
-
- For best performance, create a dummy widget
- using this font before calling this method."""
-
- if options:
- return int(
- self._call("font", "metrics", self.name, self._get(options))
- )
- else:
- res = self._split(self._call("font", "metrics", self.name))
- options = {}
- for i in range(0, len(res), 2):
- options[res[i][1:]] = int(res[i+1])
- return options
-
-def families(root=None):
- "Get font families (as a tuple)"
- if not root:
- root = Tkinter._default_root
- return root.tk.splitlist(root.tk.call("font", "families"))
-
-def names(root=None):
- "Get names of defined fonts (as a tuple)"
- if not root:
- root = Tkinter._default_root
- return root.tk.splitlist(root.tk.call("font", "names"))
-
-# --------------------------------------------------------------------
-# test stuff
-
-if __name__ == "__main__":
-
- root = Tkinter.Tk()
-
- # create a font
- f = Font(family="times", size=30, weight=NORMAL)
-
- print f.actual()
- print f.actual("family")
- print f.actual("weight")
-
- print f.config()
- print f.cget("family")
- print f.cget("weight")
-
- print names()
-
- print f.measure("hello"), f.metrics("linespace")
-
- print f.metrics()
-
- f = Font(font=("Courier", 20, "bold"))
- print f.measure("hello"), f.metrics("linespace")
-
- w = Tkinter.Label(root, text="Hello, world", font=f)
- w.pack()
-
- w = Tkinter.Button(root, text="Quit!", command=root.destroy)
- w.pack()
-
- fb = Font(font=w["font"]).copy()
- fb.config(weight=BOLD)
-
- w.config(font=fb)
-
- Tkinter.mainloop()
diff --git a/sys/lib/python/lib-tk/tkMessageBox.py b/sys/lib/python/lib-tk/tkMessageBox.py
deleted file mode 100644
index aff069bfa..000000000
--- a/sys/lib/python/lib-tk/tkMessageBox.py
+++ /dev/null
@@ -1,122 +0,0 @@
-# tk common message boxes
-#
-# this module provides an interface to the native message boxes
-# available in Tk 4.2 and newer.
-#
-# written by Fredrik Lundh, May 1997
-#
-
-#
-# options (all have default values):
-#
-# - default: which button to make default (one of the reply codes)
-#
-# - icon: which icon to display (see below)
-#
-# - message: the message to display
-#
-# - parent: which window to place the dialog on top of
-#
-# - title: dialog title
-#
-# - type: dialog type; that is, which buttons to display (see below)
-#
-
-from tkCommonDialog import Dialog
-
-#
-# constants
-
-# icons
-ERROR = "error"
-INFO = "info"
-QUESTION = "question"
-WARNING = "warning"
-
-# types
-ABORTRETRYIGNORE = "abortretryignore"
-OK = "ok"
-OKCANCEL = "okcancel"
-RETRYCANCEL = "retrycancel"
-YESNO = "yesno"
-YESNOCANCEL = "yesnocancel"
-
-# replies
-ABORT = "abort"
-RETRY = "retry"
-IGNORE = "ignore"
-OK = "ok"
-CANCEL = "cancel"
-YES = "yes"
-NO = "no"
-
-
-#
-# message dialog class
-
-class Message(Dialog):
- "A message box"
-
- command = "tk_messageBox"
-
-
-#
-# convenience stuff
-
-# Rename _icon and _type options to allow overriding them in options
-def _show(title=None, message=None, _icon=None, _type=None, **options):
- if _icon and "icon" not in options: options["icon"] = _icon
- if _type and "type" not in options: options["type"] = _type
- if title: options["title"] = title
- if message: options["message"] = message
- res = Message(**options).show()
- # In some Tcl installations, Tcl converts yes/no into a boolean
- if isinstance(res, bool):
- if res: return YES
- return NO
- return res
-
-def showinfo(title=None, message=None, **options):
- "Show an info message"
- return _show(title, message, INFO, OK, **options)
-
-def showwarning(title=None, message=None, **options):
- "Show a warning message"
- return _show(title, message, WARNING, OK, **options)
-
-def showerror(title=None, message=None, **options):
- "Show an error message"
- return _show(title, message, ERROR, OK, **options)
-
-def askquestion(title=None, message=None, **options):
- "Ask a question"
- return _show(title, message, QUESTION, YESNO, **options)
-
-def askokcancel(title=None, message=None, **options):
- "Ask if operation should proceed; return true if the answer is ok"
- s = _show(title, message, QUESTION, OKCANCEL, **options)
- return s == OK
-
-def askyesno(title=None, message=None, **options):
- "Ask a question; return true if the answer is yes"
- s = _show(title, message, QUESTION, YESNO, **options)
- return s == YES
-
-def askretrycancel(title=None, message=None, **options):
- "Ask if operation should be retried; return true if the answer is yes"
- s = _show(title, message, WARNING, RETRYCANCEL, **options)
- return s == RETRY
-
-
-# --------------------------------------------------------------------
-# test stuff
-
-if __name__ == "__main__":
-
- print "info", showinfo("Spam", "Egg Information")
- print "warning", showwarning("Spam", "Egg Warning")
- print "error", showerror("Spam", "Egg Alert")
- print "question", askquestion("Spam", "Question?")
- print "proceed", askokcancel("Spam", "Proceed?")
- print "yes/no", askyesno("Spam", "Got it?")
- print "try again", askretrycancel("Spam", "Try again?")
diff --git a/sys/lib/python/lib-tk/tkSimpleDialog.py b/sys/lib/python/lib-tk/tkSimpleDialog.py
deleted file mode 100644
index 445048440..000000000
--- a/sys/lib/python/lib-tk/tkSimpleDialog.py
+++ /dev/null
@@ -1,320 +0,0 @@
-#
-# An Introduction to Tkinter
-# tkSimpleDialog.py
-#
-# Copyright (c) 1997 by Fredrik Lundh
-#
-# fredrik@pythonware.com
-# http://www.pythonware.com
-#
-
-# --------------------------------------------------------------------
-# dialog base class
-
-'''Dialog boxes
-
-This module handles dialog boxes. It contains the following
-public symbols:
-
-Dialog -- a base class for dialogs
-
-askinteger -- get an integer from the user
-
-askfloat -- get a float from the user
-
-askstring -- get a string from the user
-'''
-
-from Tkinter import *
-import os
-
-class Dialog(Toplevel):
-
- '''Class to open dialogs.
-
- This class is intended as a base class for custom dialogs
- '''
-
- def __init__(self, parent, title = None):
-
- '''Initialize a dialog.
-
- Arguments:
-
- parent -- a parent window (the application window)
-
- title -- the dialog title
- '''
- Toplevel.__init__(self, parent)
-
- # If the master is not viewable, don't
- # make the child transient, or else it
- # would be opened withdrawn
- if parent.winfo_viewable():
- self.transient(parent)
-
- if title:
- self.title(title)
-
- self.parent = parent
-
- self.result = None
-
- body = Frame(self)
- self.initial_focus = self.body(body)
- body.pack(padx=5, pady=5)
-
- self.buttonbox()
-
- self.wait_visibility() # window needs to be visible for the grab
- self.grab_set()
-
- if not self.initial_focus:
- self.initial_focus = self
-
- self.protocol("WM_DELETE_WINDOW", self.cancel)
-
- if self.parent is not None:
- self.geometry("+%d+%d" % (parent.winfo_rootx()+50,
- parent.winfo_rooty()+50))
-
- self.initial_focus.focus_set()
-
- self.wait_window(self)
-
- def destroy(self):
- '''Destroy the window'''
- self.initial_focus = None
- Toplevel.destroy(self)
-
- #
- # construction hooks
-
- def body(self, master):
- '''create dialog body.
-
- return widget that should have initial focus.
- This method should be overridden, and is called
- by the __init__ method.
- '''
- pass
-
- def buttonbox(self):
- '''add standard button box.
-
- override if you do not want the standard buttons
- '''
-
- box = Frame(self)
-
- w = Button(box, text="OK", width=10, command=self.ok, default=ACTIVE)
- w.pack(side=LEFT, padx=5, pady=5)
- w = Button(box, text="Cancel", width=10, command=self.cancel)
- w.pack(side=LEFT, padx=5, pady=5)
-
- self.bind("<Return>", self.ok)
- self.bind("<Escape>", self.cancel)
-
- box.pack()
-
- #
- # standard button semantics
-
- def ok(self, event=None):
-
- if not self.validate():
- self.initial_focus.focus_set() # put focus back
- return
-
- self.withdraw()
- self.update_idletasks()
-
- try:
- self.apply()
- finally:
- self.cancel()
-
- def cancel(self, event=None):
-
- # put focus back to the parent window
- if self.parent is not None:
- self.parent.focus_set()
- self.destroy()
-
- #
- # command hooks
-
- def validate(self):
- '''validate the data
-
- This method is called automatically to validate the data before the
- dialog is destroyed. By default, it always validates OK.
- '''
-
- return 1 # override
-
- def apply(self):
- '''process the data
-
- This method is called automatically to process the data, *after*
- the dialog is destroyed. By default, it does nothing.
- '''
-
- pass # override
-
-
-# --------------------------------------------------------------------
-# convenience dialogues
-
-class _QueryDialog(Dialog):
-
- def __init__(self, title, prompt,
- initialvalue=None,
- minvalue = None, maxvalue = None,
- parent = None):
-
- if not parent:
- import Tkinter
- parent = Tkinter._default_root
-
- self.prompt = prompt
- self.minvalue = minvalue
- self.maxvalue = maxvalue
-
- self.initialvalue = initialvalue
-
- Dialog.__init__(self, parent, title)
-
- def destroy(self):
- self.entry = None
- Dialog.destroy(self)
-
- def body(self, master):
-
- w = Label(master, text=self.prompt, justify=LEFT)
- w.grid(row=0, padx=5, sticky=W)
-
- self.entry = Entry(master, name="entry")
- self.entry.grid(row=1, padx=5, sticky=W+E)
-
- if self.initialvalue:
- self.entry.insert(0, self.initialvalue)
- self.entry.select_range(0, END)
-
- return self.entry
-
- def validate(self):
-
- import tkMessageBox
-
- try:
- result = self.getresult()
- except ValueError:
- tkMessageBox.showwarning(
- "Illegal value",
- self.errormessage + "\nPlease try again",
- parent = self
- )
- return 0
-
- if self.minvalue is not None and result < self.minvalue:
- tkMessageBox.showwarning(
- "Too small",
- "The allowed minimum value is %s. "
- "Please try again." % self.minvalue,
- parent = self
- )
- return 0
-
- if self.maxvalue is not None and result > self.maxvalue:
- tkMessageBox.showwarning(
- "Too large",
- "The allowed maximum value is %s. "
- "Please try again." % self.maxvalue,
- parent = self
- )
- return 0
-
- self.result = result
-
- return 1
-
-
-class _QueryInteger(_QueryDialog):
- errormessage = "Not an integer."
- def getresult(self):
- return int(self.entry.get())
-
-def askinteger(title, prompt, **kw):
- '''get an integer from the user
-
- Arguments:
-
- title -- the dialog title
- prompt -- the label text
- **kw -- see SimpleDialog class
-
- Return value is an integer
- '''
- d = _QueryInteger(title, prompt, **kw)
- return d.result
-
-class _QueryFloat(_QueryDialog):
- errormessage = "Not a floating point value."
- def getresult(self):
- return float(self.entry.get())
-
-def askfloat(title, prompt, **kw):
- '''get a float from the user
-
- Arguments:
-
- title -- the dialog title
- prompt -- the label text
- **kw -- see SimpleDialog class
-
- Return value is a float
- '''
- d = _QueryFloat(title, prompt, **kw)
- return d.result
-
-class _QueryString(_QueryDialog):
- def __init__(self, *args, **kw):
- if kw.has_key("show"):
- self.__show = kw["show"]
- del kw["show"]
- else:
- self.__show = None
- _QueryDialog.__init__(self, *args, **kw)
-
- def body(self, master):
- entry = _QueryDialog.body(self, master)
- if self.__show is not None:
- entry.configure(show=self.__show)
- return entry
-
- def getresult(self):
- return self.entry.get()
-
-def askstring(title, prompt, **kw):
- '''get a string from the user
-
- Arguments:
-
- title -- the dialog title
- prompt -- the label text
- **kw -- see SimpleDialog class
-
- Return value is a string
- '''
- d = _QueryString(title, prompt, **kw)
- return d.result
-
-if __name__ == "__main__":
-
- root = Tk()
- root.update()
-
- print askinteger("Spam", "Egg count", initialvalue=12*12)
- print askfloat("Spam", "Egg weight\n(in tons)", minvalue=1, maxvalue=100)
- print askstring("Spam", "Egg label")
diff --git a/sys/lib/python/lib-tk/turtle.py b/sys/lib/python/lib-tk/turtle.py
deleted file mode 100644
index 01a55b152..000000000
--- a/sys/lib/python/lib-tk/turtle.py
+++ /dev/null
@@ -1,956 +0,0 @@
-# LogoMation-like turtle graphics
-
-"""
-Turtle graphics is a popular way for introducing programming to
-kids. It was part of the original Logo programming language developed
-by Wally Feurzeig and Seymour Papert in 1966.
-
-Imagine a robotic turtle starting at (0, 0) in the x-y plane. Give it
-the command turtle.forward(15), and it moves (on-screen!) 15 pixels in
-the direction it is facing, drawing a line as it moves. Give it the
-command turtle.left(25), and it rotates in-place 25 degrees clockwise.
-
-By combining together these and similar commands, intricate shapes and
-pictures can easily be drawn.
-"""
-
-from math import * # Also for export
-import Tkinter
-
-speeds = ['fastest', 'fast', 'normal', 'slow', 'slowest']
-
-class Error(Exception):
- pass
-
-class RawPen:
-
- def __init__(self, canvas):
- self._canvas = canvas
- self._items = []
- self._tracing = 1
- self._arrow = 0
- self._delay = 10 # default delay for drawing
- self._angle = 0.0
- self.degrees()
- self.reset()
-
- def degrees(self, fullcircle=360.0):
- """ Set angle measurement units to degrees.
-
- Example:
- >>> turtle.degrees()
- """
- # Don't try to change _angle if it is 0, because
- # _fullcircle might not be set, yet
- if self._angle:
- self._angle = (self._angle / self._fullcircle) * fullcircle
- self._fullcircle = fullcircle
- self._invradian = pi / (fullcircle * 0.5)
-
- def radians(self):
- """ Set the angle measurement units to radians.
-
- Example:
- >>> turtle.radians()
- """
- self.degrees(2.0*pi)
-
- def reset(self):
- """ Clear the screen, re-center the pen, and set variables to
- the default values.
-
- Example:
- >>> turtle.position()
- [0.0, -22.0]
- >>> turtle.heading()
- 100.0
- >>> turtle.reset()
- >>> turtle.position()
- [0.0, 0.0]
- >>> turtle.heading()
- 0.0
- """
- canvas = self._canvas
- self._canvas.update()
- width = canvas.winfo_width()
- height = canvas.winfo_height()
- if width <= 1:
- width = canvas['width']
- if height <= 1:
- height = canvas['height']
- self._origin = float(width)/2.0, float(height)/2.0
- self._position = self._origin
- self._angle = 0.0
- self._drawing = 1
- self._width = 1
- self._color = "black"
- self._filling = 0
- self._path = []
- self.clear()
- canvas._root().tkraise()
-
- def clear(self):
- """ Clear the screen. The turtle does not move.
-
- Example:
- >>> turtle.clear()
- """
- self.fill(0)
- canvas = self._canvas
- items = self._items
- self._items = []
- for item in items:
- canvas.delete(item)
- self._delete_turtle()
- self._draw_turtle()
-
- def tracer(self, flag):
- """ Set tracing on if flag is True, and off if it is False.
- Tracing means line are drawn more slowly, with an
- animation of an arrow along the line.
-
- Example:
- >>> turtle.tracer(False) # turns off Tracer
- """
- self._tracing = flag
- if not self._tracing:
- self._delete_turtle()
- self._draw_turtle()
-
- def forward(self, distance):
- """ Go forward distance steps.
-
- Example:
- >>> turtle.position()
- [0.0, 0.0]
- >>> turtle.forward(25)
- >>> turtle.position()
- [25.0, 0.0]
- >>> turtle.forward(-75)
- >>> turtle.position()
- [-50.0, 0.0]
- """
- x0, y0 = start = self._position
- x1 = x0 + distance * cos(self._angle*self._invradian)
- y1 = y0 - distance * sin(self._angle*self._invradian)
- self._goto(x1, y1)
-
- def backward(self, distance):
- """ Go backwards distance steps.
-
- The turtle's heading does not change.
-
- Example:
- >>> turtle.position()
- [0.0, 0.0]
- >>> turtle.backward(30)
- >>> turtle.position()
- [-30.0, 0.0]
- """
- self.forward(-distance)
-
- def left(self, angle):
- """ Turn left angle units (units are by default degrees,
- but can be set via the degrees() and radians() functions.)
-
- When viewed from above, the turning happens in-place around
- its front tip.
-
- Example:
- >>> turtle.heading()
- 22
- >>> turtle.left(45)
- >>> turtle.heading()
- 67.0
- """
- self._angle = (self._angle + angle) % self._fullcircle
- self._draw_turtle()
-
- def right(self, angle):
- """ Turn right angle units (units are by default degrees,
- but can be set via the degrees() and radians() functions.)
-
- When viewed from above, the turning happens in-place around
- its front tip.
-
- Example:
- >>> turtle.heading()
- 22
- >>> turtle.right(45)
- >>> turtle.heading()
- 337.0
- """
- self.left(-angle)
-
- def up(self):
- """ Pull the pen up -- no drawing when moving.
-
- Example:
- >>> turtle.up()
- """
- self._drawing = 0
-
- def down(self):
- """ Put the pen down -- draw when moving.
-
- Example:
- >>> turtle.down()
- """
- self._drawing = 1
-
- def width(self, width):
- """ Set the line to thickness to width.
-
- Example:
- >>> turtle.width(10)
- """
- self._width = float(width)
-
- def color(self, *args):
- """ Set the pen color.
-
- Three input formats are allowed:
-
- color(s)
- s is a Tk specification string, such as "red" or "yellow"
-
- color((r, g, b))
- *a tuple* of r, g, and b, which represent, an RGB color,
- and each of r, g, and b are in the range [0..1]
-
- color(r, g, b)
- r, g, and b represent an RGB color, and each of r, g, and b
- are in the range [0..1]
-
- Example:
-
- >>> turtle.color('brown')
- >>> tup = (0.2, 0.8, 0.55)
- >>> turtle.color(tup)
- >>> turtle.color(0, .5, 0)
- """
- if not args:
- raise Error, "no color arguments"
- if len(args) == 1:
- color = args[0]
- if type(color) == type(""):
- # Test the color first
- try:
- id = self._canvas.create_line(0, 0, 0, 0, fill=color)
- except Tkinter.TclError:
- raise Error, "bad color string: %r" % (color,)
- self._set_color(color)
- return
- try:
- r, g, b = color
- except:
- raise Error, "bad color sequence: %r" % (color,)
- else:
- try:
- r, g, b = args
- except:
- raise Error, "bad color arguments: %r" % (args,)
- assert 0 <= r <= 1
- assert 0 <= g <= 1
- assert 0 <= b <= 1
- x = 255.0
- y = 0.5
- self._set_color("#%02x%02x%02x" % (int(r*x+y), int(g*x+y), int(b*x+y)))
-
- def _set_color(self,color):
- self._color = color
- self._draw_turtle()
-
- def write(self, text, move=False):
- """ Write text at the current pen position.
-
- If move is true, the pen is moved to the bottom-right corner
- of the text. By default, move is False.
-
- Example:
- >>> turtle.write('The race is on!')
- >>> turtle.write('Home = (0, 0)', True)
- """
- x, y = self._position
- x = x-1 # correction -- calibrated for Windows
- item = self._canvas.create_text(x, y,
- text=str(text), anchor="sw",
- fill=self._color)
- self._items.append(item)
- if move:
- x0, y0, x1, y1 = self._canvas.bbox(item)
- self._goto(x1, y1)
- self._draw_turtle()
-
- def fill(self, flag):
- """ Call fill(1) before drawing the shape you
- want to fill, and fill(0) when done.
-
- Example:
- >>> turtle.fill(1)
- >>> turtle.forward(100)
- >>> turtle.left(90)
- >>> turtle.forward(100)
- >>> turtle.left(90)
- >>> turtle.forward(100)
- >>> turtle.left(90)
- >>> turtle.forward(100)
- >>> turtle.fill(0)
- """
- if self._filling:
- path = tuple(self._path)
- smooth = self._filling < 0
- if len(path) > 2:
- item = self._canvas._create('polygon', path,
- {'fill': self._color,
- 'smooth': smooth})
- self._items.append(item)
- self._path = []
- self._filling = flag
- if flag:
- self._path.append(self._position)
-
- def begin_fill(self):
- """ Called just before drawing a shape to be filled.
- Must eventually be followed by a corresponding end_fill() call.
- Otherwise it will be ignored.
-
- Example:
- >>> turtle.begin_fill()
- >>> turtle.forward(100)
- >>> turtle.left(90)
- >>> turtle.forward(100)
- >>> turtle.left(90)
- >>> turtle.forward(100)
- >>> turtle.left(90)
- >>> turtle.forward(100)
- >>> turtle.end_fill()
- """
- self._path = [self._position]
- self._filling = 1
-
- def end_fill(self):
- """ Called after drawing a shape to be filled.
-
- Example:
- >>> turtle.begin_fill()
- >>> turtle.forward(100)
- >>> turtle.left(90)
- >>> turtle.forward(100)
- >>> turtle.left(90)
- >>> turtle.forward(100)
- >>> turtle.left(90)
- >>> turtle.forward(100)
- >>> turtle.end_fill()
- """
- self.fill(0)
-
- def circle(self, radius, extent = None):
- """ Draw a circle with given radius.
- The center is radius units left of the turtle; extent
- determines which part of the circle is drawn. If not given,
- the entire circle is drawn.
-
- If extent is not a full circle, one endpoint of the arc is the
- current pen position. The arc is drawn in a counter clockwise
- direction if radius is positive, otherwise in a clockwise
- direction. In the process, the direction of the turtle is
- changed by the amount of the extent.
-
- >>> turtle.circle(50)
- >>> turtle.circle(120, 180) # half a circle
- """
- if extent is None:
- extent = self._fullcircle
- frac = abs(extent)/self._fullcircle
- steps = 1+int(min(11+abs(radius)/6.0, 59.0)*frac)
- w = 1.0 * extent / steps
- w2 = 0.5 * w
- l = 2.0 * radius * sin(w2*self._invradian)
- if radius < 0:
- l, w, w2 = -l, -w, -w2
- self.left(w2)
- for i in range(steps):
- self.forward(l)
- self.left(w)
- self.right(w2)
-
- def heading(self):
- """ Return the turtle's current heading.
-
- Example:
- >>> turtle.heading()
- 67.0
- """
- return self._angle
-
- def setheading(self, angle):
- """ Set the turtle facing the given angle.
-
- Here are some common directions in degrees:
-
- 0 - east
- 90 - north
- 180 - west
- 270 - south
-
- Example:
- >>> turtle.setheading(90)
- >>> turtle.heading()
- 90
- >>> turtle.setheading(128)
- >>> turtle.heading()
- 128
- """
- self._angle = angle
- self._draw_turtle()
-
- def window_width(self):
- """ Returns the width of the turtle window.
-
- Example:
- >>> turtle.window_width()
- 640
- """
- width = self._canvas.winfo_width()
- if width <= 1: # the window isn't managed by a geometry manager
- width = self._canvas['width']
- return width
-
- def window_height(self):
- """ Return the height of the turtle window.
-
- Example:
- >>> turtle.window_height()
- 768
- """
- height = self._canvas.winfo_height()
- if height <= 1: # the window isn't managed by a geometry manager
- height = self._canvas['height']
- return height
-
- def position(self):
- """ Return the current (x, y) location of the turtle.
-
- Example:
- >>> turtle.position()
- [0.0, 240.0]
- """
- x0, y0 = self._origin
- x1, y1 = self._position
- return [x1-x0, -y1+y0]
-
- def setx(self, xpos):
- """ Set the turtle's x coordinate to be xpos.
-
- Example:
- >>> turtle.position()
- [10.0, 240.0]
- >>> turtle.setx(10)
- >>> turtle.position()
- [10.0, 240.0]
- """
- x0, y0 = self._origin
- x1, y1 = self._position
- self._goto(x0+xpos, y1)
-
- def sety(self, ypos):
- """ Set the turtle's y coordinate to be ypos.
-
- Example:
- >>> turtle.position()
- [0.0, 0.0]
- >>> turtle.sety(-22)
- >>> turtle.position()
- [0.0, -22.0]
- """
- x0, y0 = self._origin
- x1, y1 = self._position
- self._goto(x1, y0-ypos)
-
- def towards(self, *args):
- """Returs the angle, which corresponds to the line
- from turtle-position to point (x,y).
-
- Argument can be two coordinates or one pair of coordinates
- or a RawPen/Pen instance.
-
- Example:
- >>> turtle.position()
- [10.0, 10.0]
- >>> turtle.towards(0,0)
- 225.0
- """
- if len(args) == 2:
- x, y = args
- else:
- arg = args[0]
- if isinstance(arg, RawPen):
- x, y = arg.position()
- else:
- x, y = arg
- x0, y0 = self.position()
- dx = x - x0
- dy = y - y0
- return (atan2(dy,dx) / self._invradian) % self._fullcircle
-
- def goto(self, *args):
- """ Go to the given point.
-
- If the pen is down, then a line will be drawn. The turtle's
- orientation does not change.
-
- Two input formats are accepted:
-
- goto(x, y)
- go to point (x, y)
-
- goto((x, y))
- go to point (x, y)
-
- Example:
- >>> turtle.position()
- [0.0, 0.0]
- >>> turtle.goto(50, -45)
- >>> turtle.position()
- [50.0, -45.0]
- """
- if len(args) == 1:
- try:
- x, y = args[0]
- except:
- raise Error, "bad point argument: %r" % (args[0],)
- else:
- try:
- x, y = args
- except:
- raise Error, "bad coordinates: %r" % (args[0],)
- x0, y0 = self._origin
- self._goto(x0+x, y0-y)
-
- def _goto(self, x1, y1):
- x0, y0 = self._position
- self._position = map(float, (x1, y1))
- if self._filling:
- self._path.append(self._position)
- if self._drawing:
- if self._tracing:
- dx = float(x1 - x0)
- dy = float(y1 - y0)
- distance = hypot(dx, dy)
- nhops = int(distance)
- item = self._canvas.create_line(x0, y0, x0, y0,
- width=self._width,
- capstyle="round",
- fill=self._color)
- try:
- for i in range(1, 1+nhops):
- x, y = x0 + dx*i/nhops, y0 + dy*i/nhops
- self._canvas.coords(item, x0, y0, x, y)
- self._draw_turtle((x,y))
- self._canvas.update()
- self._canvas.after(self._delay)
- # in case nhops==0
- self._canvas.coords(item, x0, y0, x1, y1)
- self._canvas.itemconfigure(item, arrow="none")
- except Tkinter.TclError:
- # Probably the window was closed!
- return
- else:
- item = self._canvas.create_line(x0, y0, x1, y1,
- width=self._width,
- capstyle="round",
- fill=self._color)
- self._items.append(item)
- self._draw_turtle()
-
- def speed(self, speed):
- """ Set the turtle's speed.
-
- speed must one of these five strings:
-
- 'fastest' is a 0 ms delay
- 'fast' is a 5 ms delay
- 'normal' is a 10 ms delay
- 'slow' is a 15 ms delay
- 'slowest' is a 20 ms delay
-
- Example:
- >>> turtle.speed('slow')
- """
- try:
- speed = speed.strip().lower()
- self._delay = speeds.index(speed) * 5
- except:
- raise ValueError("%r is not a valid speed. speed must be "
- "one of %s" % (speed, speeds))
-
-
- def delay(self, delay):
- """ Set the drawing delay in milliseconds.
-
- This is intended to allow finer control of the drawing speed
- than the speed() method
-
- Example:
- >>> turtle.delay(15)
- """
- if int(delay) < 0:
- raise ValueError("delay must be greater than or equal to 0")
- self._delay = int(delay)
-
- def _draw_turtle(self, position=[]):
- if not self._tracing:
- self._canvas.update()
- return
- if position == []:
- position = self._position
- x,y = position
- distance = 8
- dx = distance * cos(self._angle*self._invradian)
- dy = distance * sin(self._angle*self._invradian)
- self._delete_turtle()
- self._arrow = self._canvas.create_line(x-dx,y+dy,x,y,
- width=self._width,
- arrow="last",
- capstyle="round",
- fill=self._color)
- self._canvas.update()
-
- def _delete_turtle(self):
- if self._arrow != 0:
- self._canvas.delete(self._arrow)
- self._arrow = 0
-
-
-_root = None
-_canvas = None
-_pen = None
-_width = 0.50 # 50% of window width
-_height = 0.75 # 75% of window height
-_startx = None
-_starty = None
-_title = "Turtle Graphics" # default title
-
-class Pen(RawPen):
-
- def __init__(self):
- global _root, _canvas
- if _root is None:
- _root = Tkinter.Tk()
- _root.wm_protocol("WM_DELETE_WINDOW", self._destroy)
- _root.title(_title)
-
- if _canvas is None:
- # XXX Should have scroll bars
- _canvas = Tkinter.Canvas(_root, background="white")
- _canvas.pack(expand=1, fill="both")
-
- setup(width=_width, height= _height, startx=_startx, starty=_starty)
-
- RawPen.__init__(self, _canvas)
-
- def _destroy(self):
- global _root, _canvas, _pen
- root = self._canvas._root()
- if root is _root:
- _pen = None
- _root = None
- _canvas = None
- root.destroy()
-
-def _getpen():
- global _pen
- if not _pen:
- _pen = Pen()
- return _pen
-
-class Turtle(Pen):
- pass
-
-"""For documentation of the following functions see
- the RawPen methods with the same names
-"""
-
-def degrees(): _getpen().degrees()
-def radians(): _getpen().radians()
-def reset(): _getpen().reset()
-def clear(): _getpen().clear()
-def tracer(flag): _getpen().tracer(flag)
-def forward(distance): _getpen().forward(distance)
-def backward(distance): _getpen().backward(distance)
-def left(angle): _getpen().left(angle)
-def right(angle): _getpen().right(angle)
-def up(): _getpen().up()
-def down(): _getpen().down()
-def width(width): _getpen().width(width)
-def color(*args): _getpen().color(*args)
-def write(arg, move=0): _getpen().write(arg, move)
-def fill(flag): _getpen().fill(flag)
-def begin_fill(): _getpen().begin_fill()
-def end_fill(): _getpen().end_fill()
-def circle(radius, extent=None): _getpen().circle(radius, extent)
-def goto(*args): _getpen().goto(*args)
-def heading(): return _getpen().heading()
-def setheading(angle): _getpen().setheading(angle)
-def position(): return _getpen().position()
-def window_width(): return _getpen().window_width()
-def window_height(): return _getpen().window_height()
-def setx(xpos): _getpen().setx(xpos)
-def sety(ypos): _getpen().sety(ypos)
-def towards(*args): return _getpen().towards(*args)
-
-def done(): _root.mainloop()
-def delay(delay): return _getpen().delay(delay)
-def speed(speed): return _getpen().speed(speed)
-
-for methodname in dir(RawPen):
- """ copies RawPen docstrings to module functions of same name """
- if not methodname.startswith("_"):
- eval(methodname).__doc__ = RawPen.__dict__[methodname].__doc__
-
-
-def setup(**geometry):
- """ Sets the size and position of the main window.
-
- Keywords are width, height, startx and starty:
-
- width: either a size in pixels or a fraction of the screen.
- Default is 50% of screen.
- height: either the height in pixels or a fraction of the screen.
- Default is 75% of screen.
-
- Setting either width or height to None before drawing will force
- use of default geometry as in older versions of turtle.py
-
- startx: starting position in pixels from the left edge of the screen.
- Default is to center window. Setting startx to None is the default
- and centers window horizontally on screen.
-
- starty: starting position in pixels from the top edge of the screen.
- Default is to center window. Setting starty to None is the default
- and centers window vertically on screen.
-
- Examples:
- >>> setup (width=200, height=200, startx=0, starty=0)
-
- sets window to 200x200 pixels, in upper left of screen
-
- >>> setup(width=.75, height=0.5, startx=None, starty=None)
-
- sets window to 75% of screen by 50% of screen and centers
-
- >>> setup(width=None)
-
- forces use of default geometry as in older versions of turtle.py
- """
-
- global _width, _height, _startx, _starty
-
- width = geometry.get('width',_width)
- if width >= 0 or width == None:
- _width = width
- else:
- raise ValueError, "width can not be less than 0"
-
- height = geometry.get('height',_height)
- if height >= 0 or height == None:
- _height = height
- else:
- raise ValueError, "height can not be less than 0"
-
- startx = geometry.get('startx', _startx)
- if startx >= 0 or startx == None:
- _startx = _startx
- else:
- raise ValueError, "startx can not be less than 0"
-
- starty = geometry.get('starty', _starty)
- if starty >= 0 or starty == None:
- _starty = starty
- else:
- raise ValueError, "startx can not be less than 0"
-
-
- if _root and _width and _height:
- if 0 < _width <= 1:
- _width = _root.winfo_screenwidth() * +width
- if 0 < _height <= 1:
- _height = _root.winfo_screenheight() * _height
-
- # center window on screen
- if _startx is None:
- _startx = (_root.winfo_screenwidth() - _width) / 2
-
- if _starty is None:
- _starty = (_root.winfo_screenheight() - _height) / 2
-
- _root.geometry("%dx%d+%d+%d" % (_width, _height, _startx, _starty))
-
-def title(title):
- """Set the window title.
-
- By default this is set to 'Turtle Graphics'
-
- Example:
- >>> title("My Window")
- """
-
- global _title
- _title = title
-
-def demo():
- reset()
- tracer(1)
- up()
- backward(100)
- down()
- # draw 3 squares; the last filled
- width(3)
- for i in range(3):
- if i == 2:
- fill(1)
- for j in range(4):
- forward(20)
- left(90)
- if i == 2:
- color("maroon")
- fill(0)
- up()
- forward(30)
- down()
- width(1)
- color("black")
- # move out of the way
- tracer(0)
- up()
- right(90)
- forward(100)
- right(90)
- forward(100)
- right(180)
- down()
- # some text
- write("startstart", 1)
- write("start", 1)
- color("red")
- # staircase
- for i in range(5):
- forward(20)
- left(90)
- forward(20)
- right(90)
- # filled staircase
- fill(1)
- for i in range(5):
- forward(20)
- left(90)
- forward(20)
- right(90)
- fill(0)
- tracer(1)
- # more text
- write("end")
-
-def demo2():
- # exercises some new and improved features
- speed('fast')
- width(3)
-
- # draw a segmented half-circle
- setheading(towards(0,0))
- x,y = position()
- r = (x**2+y**2)**.5/2.0
- right(90)
- pendown = True
- for i in range(18):
- if pendown:
- up()
- pendown = False
- else:
- down()
- pendown = True
- circle(r,10)
- sleep(2)
-
- reset()
- left(90)
-
- # draw a series of triangles
- l = 10
- color("green")
- width(3)
- left(180)
- sp = 5
- for i in range(-2,16):
- if i > 0:
- color(1.0-0.05*i,0,0.05*i)
- fill(1)
- color("green")
- for j in range(3):
- forward(l)
- left(120)
- l += 10
- left(15)
- if sp > 0:
- sp = sp-1
- speed(speeds[sp])
- color(0.25,0,0.75)
- fill(0)
-
- # draw and fill a concave shape
- left(120)
- up()
- forward(70)
- right(30)
- down()
- color("red")
- speed("fastest")
- fill(1)
- for i in range(4):
- circle(50,90)
- right(90)
- forward(30)
- right(90)
- color("yellow")
- fill(0)
- left(90)
- up()
- forward(30)
- down();
-
- color("red")
-
- # create a second turtle and make the original pursue and catch it
- turtle=Turtle()
- turtle.reset()
- turtle.left(90)
- turtle.speed('normal')
- turtle.up()
- turtle.goto(280,40)
- turtle.left(24)
- turtle.down()
- turtle.speed('fast')
- turtle.color("blue")
- turtle.width(2)
- speed('fastest')
-
- # turn default turtle towards new turtle object
- setheading(towards(turtle))
- while ( abs(position()[0]-turtle.position()[0])>4 or
- abs(position()[1]-turtle.position()[1])>4):
- turtle.forward(3.5)
- turtle.left(0.6)
- # turn default turtle towards new turtle object
- setheading(towards(turtle))
- forward(4)
- write("CAUGHT! ", move=True)
-
-
-
-if __name__ == '__main__':
- from time import sleep
- demo()
- sleep(3)
- demo2()
- done()
diff --git a/sys/lib/python/linecache.py b/sys/lib/python/linecache.py
deleted file mode 100644
index 4838625f0..000000000
--- a/sys/lib/python/linecache.py
+++ /dev/null
@@ -1,136 +0,0 @@
-"""Cache lines from files.
-
-This is intended to read lines from modules imported -- hence if a filename
-is not found, it will look down the module search path for a file by
-that name.
-"""
-
-import sys
-import os
-
-__all__ = ["getline", "clearcache", "checkcache"]
-
-def getline(filename, lineno, module_globals=None):
- lines = getlines(filename, module_globals)
- if 1 <= lineno <= len(lines):
- return lines[lineno-1]
- else:
- return ''
-
-
-# The cache
-
-cache = {} # The cache
-
-
-def clearcache():
- """Clear the cache entirely."""
-
- global cache
- cache = {}
-
-
-def getlines(filename, module_globals=None):
- """Get the lines for a file from the cache.
- Update the cache if it doesn't contain an entry for this file already."""
-
- if filename in cache:
- return cache[filename][2]
- else:
- return updatecache(filename, module_globals)
-
-
-def checkcache(filename=None):
- """Discard cache entries that are out of date.
- (This is not checked upon each call!)"""
-
- if filename is None:
- filenames = cache.keys()
- else:
- if filename in cache:
- filenames = [filename]
- else:
- return
-
- for filename in filenames:
- size, mtime, lines, fullname = cache[filename]
- if mtime is None:
- continue # no-op for files loaded via a __loader__
- try:
- stat = os.stat(fullname)
- except os.error:
- del cache[filename]
- continue
- if size != stat.st_size or mtime != stat.st_mtime:
- del cache[filename]
-
-
-def updatecache(filename, module_globals=None):
- """Update a cache entry and return its list of lines.
- If something's wrong, print a message, discard the cache entry,
- and return an empty list."""
-
- if filename in cache:
- del cache[filename]
- if not filename or filename[0] + filename[-1] == '<>':
- return []
-
- fullname = filename
- try:
- stat = os.stat(fullname)
- except os.error, msg:
- basename = os.path.split(filename)[1]
-
- # Try for a __loader__, if available
- if module_globals and '__loader__' in module_globals:
- name = module_globals.get('__name__')
- loader = module_globals['__loader__']
- get_source = getattr(loader, 'get_source', None)
-
- if name and get_source:
- if basename.startswith(name.split('.')[-1]+'.'):
- try:
- data = get_source(name)
- except (ImportError, IOError):
- pass
- else:
- if data is None:
- # No luck, the PEP302 loader cannot find the source
- # for this module.
- return []
- cache[filename] = (
- len(data), None,
- [line+'\n' for line in data.splitlines()], fullname
- )
- return cache[filename][2]
-
- # Try looking through the module search path.
-
- for dirname in sys.path:
- # When using imputil, sys.path may contain things other than
- # strings; ignore them when it happens.
- try:
- fullname = os.path.join(dirname, basename)
- except (TypeError, AttributeError):
- # Not sufficiently string-like to do anything useful with.
- pass
- else:
- try:
- stat = os.stat(fullname)
- break
- except os.error:
- pass
- else:
- # No luck
-## print '*** Cannot stat', filename, ':', msg
- return []
- try:
- fp = open(fullname, 'rU')
- lines = fp.readlines()
- fp.close()
- except IOError, msg:
-## print '*** Cannot open', fullname, ':', msg
- return []
- size, mtime = stat.st_size, stat.st_mtime
- cache[filename] = size, mtime, lines, fullname
- return lines
diff --git a/sys/lib/python/locale.py b/sys/lib/python/locale.py
deleted file mode 100644
index fd549bbde..000000000
--- a/sys/lib/python/locale.py
+++ /dev/null
@@ -1,1562 +0,0 @@
-""" Locale support.
-
- The module provides low-level access to the C lib's locale APIs
- and adds high level number formatting APIs as well as a locale
- aliasing engine to complement these.
-
- The aliasing engine includes support for many commonly used locale
- names and maps them to values suitable for passing to the C lib's
- setlocale() function. It also includes default encodings for all
- supported locale names.
-
-"""
-
-import sys, encodings, encodings.aliases
-
-# Try importing the _locale module.
-#
-# If this fails, fall back on a basic 'C' locale emulation.
-
-# Yuck: LC_MESSAGES is non-standard: can't tell whether it exists before
-# trying the import. So __all__ is also fiddled at the end of the file.
-__all__ = ["setlocale","Error","localeconv","strcoll","strxfrm",
- "format","str","atof","atoi","LC_CTYPE","LC_COLLATE",
- "LC_TIME","LC_MONETARY","LC_NUMERIC", "LC_ALL","CHAR_MAX"]
-
-try:
-
- from _locale import *
-
-except ImportError:
-
- # Locale emulation
-
- CHAR_MAX = 127
- LC_ALL = 6
- LC_COLLATE = 3
- LC_CTYPE = 0
- LC_MESSAGES = 5
- LC_MONETARY = 4
- LC_NUMERIC = 1
- LC_TIME = 2
- Error = ValueError
-
- def localeconv():
- """ localeconv() -> dict.
- Returns numeric and monetary locale-specific parameters.
- """
- # 'C' locale default values
- return {'grouping': [127],
- 'currency_symbol': '',
- 'n_sign_posn': 127,
- 'p_cs_precedes': 127,
- 'n_cs_precedes': 127,
- 'mon_grouping': [],
- 'n_sep_by_space': 127,
- 'decimal_point': '.',
- 'negative_sign': '',
- 'positive_sign': '',
- 'p_sep_by_space': 127,
- 'int_curr_symbol': '',
- 'p_sign_posn': 127,
- 'thousands_sep': '',
- 'mon_thousands_sep': '',
- 'frac_digits': 127,
- 'mon_decimal_point': '',
- 'int_frac_digits': 127}
-
- def setlocale(category, value=None):
- """ setlocale(integer,string=None) -> string.
- Activates/queries locale processing.
- """
- if value not in (None, '', 'C'):
- raise Error, '_locale emulation only supports "C" locale'
- return 'C'
-
- def strcoll(a,b):
- """ strcoll(string,string) -> int.
- Compares two strings according to the locale.
- """
- return cmp(a,b)
-
- def strxfrm(s):
- """ strxfrm(string) -> string.
- Returns a string that behaves for cmp locale-aware.
- """
- return s
-
-### Number formatting APIs
-
-# Author: Martin von Loewis
-# improved by Georg Brandl
-
-#perform the grouping from right to left
-def _group(s, monetary=False):
- conv = localeconv()
- thousands_sep = conv[monetary and 'mon_thousands_sep' or 'thousands_sep']
- grouping = conv[monetary and 'mon_grouping' or 'grouping']
- if not grouping:
- return (s, 0)
- result = ""
- seps = 0
- spaces = ""
- if s[-1] == ' ':
- sp = s.find(' ')
- spaces = s[sp:]
- s = s[:sp]
- while s and grouping:
- # if grouping is -1, we are done
- if grouping[0] == CHAR_MAX:
- break
- # 0: re-use last group ad infinitum
- elif grouping[0] != 0:
- #process last group
- group = grouping[0]
- grouping = grouping[1:]
- if result:
- result = s[-group:] + thousands_sep + result
- seps += 1
- else:
- result = s[-group:]
- s = s[:-group]
- if s and s[-1] not in "0123456789":
- # the leading string is only spaces and signs
- return s + result + spaces, seps
- if not result:
- return s + spaces, seps
- if s:
- result = s + thousands_sep + result
- seps += 1
- return result + spaces, seps
-
-def format(percent, value, grouping=False, monetary=False, *additional):
- """Returns the locale-aware substitution of a %? specifier
- (percent).
-
- additional is for format strings which contain one or more
- '*' modifiers."""
- # this is only for one-percent-specifier strings and this should be checked
- if percent[0] != '%':
- raise ValueError("format() must be given exactly one %char "
- "format specifier")
- if additional:
- formatted = percent % ((value,) + additional)
- else:
- formatted = percent % value
- # floats and decimal ints need special action!
- if percent[-1] in 'eEfFgG':
- seps = 0
- parts = formatted.split('.')
- if grouping:
- parts[0], seps = _group(parts[0], monetary=monetary)
- decimal_point = localeconv()[monetary and 'mon_decimal_point'
- or 'decimal_point']
- formatted = decimal_point.join(parts)
- while seps:
- sp = formatted.find(' ')
- if sp == -1: break
- formatted = formatted[:sp] + formatted[sp+1:]
- seps -= 1
- elif percent[-1] in 'diu':
- if grouping:
- formatted = _group(formatted, monetary=monetary)[0]
- return formatted
-
-import re, operator
-_percent_re = re.compile(r'%(?:\((?P<key>.*?)\))?'
- r'(?P<modifiers>[-#0-9 +*.hlL]*?)[eEfFgGdiouxXcrs%]')
-
-def format_string(f, val, grouping=False):
- """Formats a string in the same way that the % formatting would use,
- but takes the current locale into account.
- Grouping is applied if the third parameter is true."""
- percents = list(_percent_re.finditer(f))
- new_f = _percent_re.sub('%s', f)
-
- if isinstance(val, tuple):
- new_val = list(val)
- i = 0
- for perc in percents:
- starcount = perc.group('modifiers').count('*')
- new_val[i] = format(perc.group(), new_val[i], grouping, False, *new_val[i+1:i+1+starcount])
- del new_val[i+1:i+1+starcount]
- i += (1 + starcount)
- val = tuple(new_val)
- elif operator.isMappingType(val):
- for perc in percents:
- key = perc.group("key")
- val[key] = format(perc.group(), val[key], grouping)
- else:
- # val is a single value
- val = format(percents[0].group(), val, grouping)
-
- return new_f % val
-
-def currency(val, symbol=True, grouping=False, international=False):
- """Formats val according to the currency settings
- in the current locale."""
- conv = localeconv()
-
- # check for illegal values
- digits = conv[international and 'int_frac_digits' or 'frac_digits']
- if digits == 127:
- raise ValueError("Currency formatting is not possible using "
- "the 'C' locale.")
-
- s = format('%%.%if' % digits, abs(val), grouping, monetary=True)
- # '<' and '>' are markers if the sign must be inserted between symbol and value
- s = '<' + s + '>'
-
- if symbol:
- smb = conv[international and 'int_curr_symbol' or 'currency_symbol']
- precedes = conv[val<0 and 'n_cs_precedes' or 'p_cs_precedes']
- separated = conv[val<0 and 'n_sep_by_space' or 'p_sep_by_space']
-
- if precedes:
- s = smb + (separated and ' ' or '') + s
- else:
- s = s + (separated and ' ' or '') + smb
-
- sign_pos = conv[val<0 and 'n_sign_posn' or 'p_sign_posn']
- sign = conv[val<0 and 'negative_sign' or 'positive_sign']
-
- if sign_pos == 0:
- s = '(' + s + ')'
- elif sign_pos == 1:
- s = sign + s
- elif sign_pos == 2:
- s = s + sign
- elif sign_pos == 3:
- s = s.replace('<', sign)
- elif sign_pos == 4:
- s = s.replace('>', sign)
- else:
- # the default if nothing specified;
- # this should be the most fitting sign position
- s = sign + s
-
- return s.replace('<', '').replace('>', '')
-
-def str(val):
- """Convert float to integer, taking the locale into account."""
- return format("%.12g", val)
-
-def atof(string, func=float):
- "Parses a string as a float according to the locale settings."
- #First, get rid of the grouping
- ts = localeconv()['thousands_sep']
- if ts:
- string = string.replace(ts, '')
- #next, replace the decimal point with a dot
- dd = localeconv()['decimal_point']
- if dd:
- string = string.replace(dd, '.')
- #finally, parse the string
- return func(string)
-
-def atoi(str):
- "Converts a string to an integer according to the locale settings."
- return atof(str, int)
-
-def _test():
- setlocale(LC_ALL, "")
- #do grouping
- s1 = format("%d", 123456789,1)
- print s1, "is", atoi(s1)
- #standard formatting
- s1 = str(3.14)
- print s1, "is", atof(s1)
-
-### Locale name aliasing engine
-
-# Author: Marc-Andre Lemburg, mal@lemburg.com
-# Various tweaks by Fredrik Lundh <fredrik@pythonware.com>
-
-# store away the low-level version of setlocale (it's
-# overridden below)
-_setlocale = setlocale
-
-def normalize(localename):
-
- """ Returns a normalized locale code for the given locale
- name.
-
- The returned locale code is formatted for use with
- setlocale().
-
- If normalization fails, the original name is returned
- unchanged.
-
- If the given encoding is not known, the function defaults to
- the default encoding for the locale code just like setlocale()
- does.
-
- """
- # Normalize the locale name and extract the encoding
- fullname = localename.lower()
- if ':' in fullname:
- # ':' is sometimes used as encoding delimiter.
- fullname = fullname.replace(':', '.')
- if '.' in fullname:
- langname, encoding = fullname.split('.')[:2]
- fullname = langname + '.' + encoding
- else:
- langname = fullname
- encoding = ''
-
- # First lookup: fullname (possibly with encoding)
- norm_encoding = encoding.replace('-', '')
- norm_encoding = norm_encoding.replace('_', '')
- lookup_name = langname + '.' + encoding
- code = locale_alias.get(lookup_name, None)
- if code is not None:
- return code
- #print 'first lookup failed'
-
- # Second try: langname (without encoding)
- code = locale_alias.get(langname, None)
- if code is not None:
- #print 'langname lookup succeeded'
- if '.' in code:
- langname, defenc = code.split('.')
- else:
- langname = code
- defenc = ''
- if encoding:
- # Convert the encoding to a C lib compatible encoding string
- norm_encoding = encodings.normalize_encoding(encoding)
- #print 'norm encoding: %r' % norm_encoding
- norm_encoding = encodings.aliases.aliases.get(norm_encoding,
- norm_encoding)
- #print 'aliased encoding: %r' % norm_encoding
- encoding = locale_encoding_alias.get(norm_encoding,
- norm_encoding)
- else:
- encoding = defenc
- #print 'found encoding %r' % encoding
- if encoding:
- return langname + '.' + encoding
- else:
- return langname
-
- else:
- return localename
-
-def _parse_localename(localename):
-
- """ Parses the locale code for localename and returns the
- result as tuple (language code, encoding).
-
- The localename is normalized and passed through the locale
- alias engine. A ValueError is raised in case the locale name
- cannot be parsed.
-
- The language code corresponds to RFC 1766. code and encoding
- can be None in case the values cannot be determined or are
- unknown to this implementation.
-
- """
- code = normalize(localename)
- if '@' in code:
- # Deal with locale modifiers
- code, modifier = code.split('@')
- if modifier == 'euro' and '.' not in code:
- # Assume Latin-9 for @euro locales. This is bogus,
- # since some systems may use other encodings for these
- # locales. Also, we ignore other modifiers.
- return code, 'iso-8859-15'
-
- if '.' in code:
- return tuple(code.split('.')[:2])
- elif code == 'C':
- return None, None
- raise ValueError, 'unknown locale: %s' % localename
-
-def _build_localename(localetuple):
-
- """ Builds a locale code from the given tuple (language code,
- encoding).
-
- No aliasing or normalizing takes place.
-
- """
- language, encoding = localetuple
- if language is None:
- language = 'C'
- if encoding is None:
- return language
- else:
- return language + '.' + encoding
-
-def getdefaultlocale(envvars=('LC_ALL', 'LC_CTYPE', 'LANG', 'LANGUAGE')):
-
- """ Tries to determine the default locale settings and returns
- them as tuple (language code, encoding).
-
- According to POSIX, a program which has not called
- setlocale(LC_ALL, "") runs using the portable 'C' locale.
- Calling setlocale(LC_ALL, "") lets it use the default locale as
- defined by the LANG variable. Since we don't want to interfere
- with the current locale setting we thus emulate the behavior
- in the way described above.
-
- To maintain compatibility with other platforms, not only the
- LANG variable is tested, but a list of variables given as
- envvars parameter. The first found to be defined will be
- used. envvars defaults to the search path used in GNU gettext;
- it must always contain the variable name 'LANG'.
-
- Except for the code 'C', the language code corresponds to RFC
- 1766. code and encoding can be None in case the values cannot
- be determined.
-
- """
-
- try:
- # check if it's supported by the _locale module
- import _locale
- code, encoding = _locale._getdefaultlocale()
- except (ImportError, AttributeError):
- pass
- else:
- # make sure the code/encoding values are valid
- if sys.platform == "win32" and code and code[:2] == "0x":
- # map windows language identifier to language name
- code = windows_locale.get(int(code, 0))
- # ...add other platform-specific processing here, if
- # necessary...
- return code, encoding
-
- # fall back on POSIX behaviour
- import os
- lookup = os.environ.get
- for variable in envvars:
- localename = lookup(variable,None)
- if localename:
- if variable == 'LANGUAGE':
- localename = localename.split(':')[0]
- break
- else:
- localename = 'C'
- return _parse_localename(localename)
-
-
-def getlocale(category=LC_CTYPE):
-
- """ Returns the current setting for the given locale category as
- tuple (language code, encoding).
-
- category may be one of the LC_* value except LC_ALL. It
- defaults to LC_CTYPE.
-
- Except for the code 'C', the language code corresponds to RFC
- 1766. code and encoding can be None in case the values cannot
- be determined.
-
- """
- localename = _setlocale(category)
- if category == LC_ALL and ';' in localename:
- raise TypeError, 'category LC_ALL is not supported'
- return _parse_localename(localename)
-
-def setlocale(category, locale=None):
-
- """ Set the locale for the given category. The locale can be
- a string, a locale tuple (language code, encoding), or None.
-
- Locale tuples are converted to strings the locale aliasing
- engine. Locale strings are passed directly to the C lib.
-
- category may be given as one of the LC_* values.
-
- """
- if locale and type(locale) is not type(""):
- # convert to string
- locale = normalize(_build_localename(locale))
- return _setlocale(category, locale)
-
-def resetlocale(category=LC_ALL):
-
- """ Sets the locale for category to the default setting.
-
- The default setting is determined by calling
- getdefaultlocale(). category defaults to LC_ALL.
-
- """
- _setlocale(category, _build_localename(getdefaultlocale()))
-
-if sys.platform in ('win32', 'darwin', 'mac'):
- # On Win32, this will return the ANSI code page
- # On the Mac, it should return the system encoding;
- # it might return "ascii" instead
- def getpreferredencoding(do_setlocale = True):
- """Return the charset that the user is likely using."""
- import _locale
- return _locale._getdefaultlocale()[1]
-else:
- # On Unix, if CODESET is available, use that.
- try:
- CODESET
- except NameError:
- # Fall back to parsing environment variables :-(
- def getpreferredencoding(do_setlocale = True):
- """Return the charset that the user is likely using,
- by looking at environment variables."""
- return getdefaultlocale()[1]
- else:
- def getpreferredencoding(do_setlocale = True):
- """Return the charset that the user is likely using,
- according to the system configuration."""
- if do_setlocale:
- oldloc = setlocale(LC_CTYPE)
- setlocale(LC_CTYPE, "")
- result = nl_langinfo(CODESET)
- setlocale(LC_CTYPE, oldloc)
- return result
- else:
- return nl_langinfo(CODESET)
-
-
-### Database
-#
-# The following data was extracted from the locale.alias file which
-# comes with X11 and then hand edited removing the explicit encoding
-# definitions and adding some more aliases. The file is usually
-# available as /usr/lib/X11/locale/locale.alias.
-#
-
-#
-# The local_encoding_alias table maps lowercase encoding alias names
-# to C locale encoding names (case-sensitive). Note that normalize()
-# first looks up the encoding in the encodings.aliases dictionary and
-# then applies this mapping to find the correct C lib name for the
-# encoding.
-#
-locale_encoding_alias = {
-
- # Mappings for non-standard encoding names used in locale names
- '437': 'C',
- 'c': 'C',
- 'en': 'ISO8859-1',
- 'jis': 'JIS7',
- 'jis7': 'JIS7',
- 'ajec': 'eucJP',
-
- # Mappings from Python codec names to C lib encoding names
- 'ascii': 'ISO8859-1',
- 'latin_1': 'ISO8859-1',
- 'iso8859_1': 'ISO8859-1',
- 'iso8859_10': 'ISO8859-10',
- 'iso8859_11': 'ISO8859-11',
- 'iso8859_13': 'ISO8859-13',
- 'iso8859_14': 'ISO8859-14',
- 'iso8859_15': 'ISO8859-15',
- 'iso8859_2': 'ISO8859-2',
- 'iso8859_3': 'ISO8859-3',
- 'iso8859_4': 'ISO8859-4',
- 'iso8859_5': 'ISO8859-5',
- 'iso8859_6': 'ISO8859-6',
- 'iso8859_7': 'ISO8859-7',
- 'iso8859_8': 'ISO8859-8',
- 'iso8859_9': 'ISO8859-9',
- 'iso2022_jp': 'JIS7',
- 'shift_jis': 'SJIS',
- 'tactis': 'TACTIS',
- 'euc_jp': 'eucJP',
- 'euc_kr': 'eucKR',
- 'utf_8': 'UTF8',
- 'koi8_r': 'KOI8-R',
- 'koi8_u': 'KOI8-U',
- # XXX This list is still incomplete. If you know more
- # mappings, please file a bug report. Thanks.
-}
-
-#
-# The locale_alias table maps lowercase alias names to C locale names
-# (case-sensitive). Encodings are always separated from the locale
-# name using a dot ('.'); they should only be given in case the
-# language name is needed to interpret the given encoding alias
-# correctly (CJK codes often have this need).
-#
-# Note that the normalize() function which uses this tables
-# removes '_' and '-' characters from the encoding part of the
-# locale name before doing the lookup. This saves a lot of
-# space in the table.
-#
-# MAL 2004-12-10:
-# Updated alias mapping to most recent locale.alias file
-# from X.org distribution using makelocalealias.py.
-#
-# These are the differences compared to the old mapping (Python 2.4
-# and older):
-#
-# updated 'bg' -> 'bg_BG.ISO8859-5' to 'bg_BG.CP1251'
-# updated 'bg_bg' -> 'bg_BG.ISO8859-5' to 'bg_BG.CP1251'
-# updated 'bulgarian' -> 'bg_BG.ISO8859-5' to 'bg_BG.CP1251'
-# updated 'cz' -> 'cz_CZ.ISO8859-2' to 'cs_CZ.ISO8859-2'
-# updated 'cz_cz' -> 'cz_CZ.ISO8859-2' to 'cs_CZ.ISO8859-2'
-# updated 'czech' -> 'cs_CS.ISO8859-2' to 'cs_CZ.ISO8859-2'
-# updated 'dutch' -> 'nl_BE.ISO8859-1' to 'nl_NL.ISO8859-1'
-# updated 'et' -> 'et_EE.ISO8859-4' to 'et_EE.ISO8859-15'
-# updated 'et_ee' -> 'et_EE.ISO8859-4' to 'et_EE.ISO8859-15'
-# updated 'fi' -> 'fi_FI.ISO8859-1' to 'fi_FI.ISO8859-15'
-# updated 'fi_fi' -> 'fi_FI.ISO8859-1' to 'fi_FI.ISO8859-15'
-# updated 'iw' -> 'iw_IL.ISO8859-8' to 'he_IL.ISO8859-8'
-# updated 'iw_il' -> 'iw_IL.ISO8859-8' to 'he_IL.ISO8859-8'
-# updated 'japanese' -> 'ja_JP.SJIS' to 'ja_JP.eucJP'
-# updated 'lt' -> 'lt_LT.ISO8859-4' to 'lt_LT.ISO8859-13'
-# updated 'lv' -> 'lv_LV.ISO8859-4' to 'lv_LV.ISO8859-13'
-# updated 'sl' -> 'sl_CS.ISO8859-2' to 'sl_SI.ISO8859-2'
-# updated 'slovene' -> 'sl_CS.ISO8859-2' to 'sl_SI.ISO8859-2'
-# updated 'th_th' -> 'th_TH.TACTIS' to 'th_TH.ISO8859-11'
-# updated 'zh_cn' -> 'zh_CN.eucCN' to 'zh_CN.gb2312'
-# updated 'zh_cn.big5' -> 'zh_TW.eucTW' to 'zh_TW.big5'
-# updated 'zh_tw' -> 'zh_TW.eucTW' to 'zh_TW.big5'
-#
-locale_alias = {
- 'a3': 'a3_AZ.KOI8-C',
- 'a3_az': 'a3_AZ.KOI8-C',
- 'a3_az.koi8c': 'a3_AZ.KOI8-C',
- 'af': 'af_ZA.ISO8859-1',
- 'af_za': 'af_ZA.ISO8859-1',
- 'af_za.iso88591': 'af_ZA.ISO8859-1',
- 'am': 'am_ET.UTF-8',
- 'american': 'en_US.ISO8859-1',
- 'american.iso88591': 'en_US.ISO8859-1',
- 'ar': 'ar_AA.ISO8859-6',
- 'ar_aa': 'ar_AA.ISO8859-6',
- 'ar_aa.iso88596': 'ar_AA.ISO8859-6',
- 'ar_ae': 'ar_AE.ISO8859-6',
- 'ar_bh': 'ar_BH.ISO8859-6',
- 'ar_dz': 'ar_DZ.ISO8859-6',
- 'ar_eg': 'ar_EG.ISO8859-6',
- 'ar_eg.iso88596': 'ar_EG.ISO8859-6',
- 'ar_iq': 'ar_IQ.ISO8859-6',
- 'ar_jo': 'ar_JO.ISO8859-6',
- 'ar_kw': 'ar_KW.ISO8859-6',
- 'ar_lb': 'ar_LB.ISO8859-6',
- 'ar_ly': 'ar_LY.ISO8859-6',
- 'ar_ma': 'ar_MA.ISO8859-6',
- 'ar_om': 'ar_OM.ISO8859-6',
- 'ar_qa': 'ar_QA.ISO8859-6',
- 'ar_sa': 'ar_SA.ISO8859-6',
- 'ar_sa.iso88596': 'ar_SA.ISO8859-6',
- 'ar_sd': 'ar_SD.ISO8859-6',
- 'ar_sy': 'ar_SY.ISO8859-6',
- 'ar_tn': 'ar_TN.ISO8859-6',
- 'ar_ye': 'ar_YE.ISO8859-6',
- 'arabic': 'ar_AA.ISO8859-6',
- 'arabic.iso88596': 'ar_AA.ISO8859-6',
- 'az': 'az_AZ.ISO8859-9E',
- 'az_az': 'az_AZ.ISO8859-9E',
- 'az_az.iso88599e': 'az_AZ.ISO8859-9E',
- 'be': 'be_BY.CP1251',
- 'be_by': 'be_BY.CP1251',
- 'be_by.cp1251': 'be_BY.CP1251',
- 'be_by.microsoftcp1251': 'be_BY.CP1251',
- 'bg': 'bg_BG.CP1251',
- 'bg_bg': 'bg_BG.CP1251',
- 'bg_bg.cp1251': 'bg_BG.CP1251',
- 'bg_bg.iso88595': 'bg_BG.ISO8859-5',
- 'bg_bg.koi8r': 'bg_BG.KOI8-R',
- 'bg_bg.microsoftcp1251': 'bg_BG.CP1251',
- 'bokmal': 'nb_NO.ISO8859-1',
- 'bokm\xe5l': 'nb_NO.ISO8859-1',
- 'br': 'br_FR.ISO8859-1',
- 'br_fr': 'br_FR.ISO8859-1',
- 'br_fr.iso88591': 'br_FR.ISO8859-1',
- 'br_fr.iso885914': 'br_FR.ISO8859-14',
- 'br_fr.iso885915': 'br_FR.ISO8859-15',
- 'br_fr@euro': 'br_FR.ISO8859-15',
- 'bulgarian': 'bg_BG.CP1251',
- 'c': 'C',
- 'c-french': 'fr_CA.ISO8859-1',
- 'c-french.iso88591': 'fr_CA.ISO8859-1',
- 'c.en': 'C',
- 'c.iso88591': 'en_US.ISO8859-1',
- 'c_c': 'C',
- 'c_c.c': 'C',
- 'ca': 'ca_ES.ISO8859-1',
- 'ca_es': 'ca_ES.ISO8859-1',
- 'ca_es.iso88591': 'ca_ES.ISO8859-1',
- 'ca_es.iso885915': 'ca_ES.ISO8859-15',
- 'ca_es@euro': 'ca_ES.ISO8859-15',
- 'catalan': 'ca_ES.ISO8859-1',
- 'cextend': 'en_US.ISO8859-1',
- 'cextend.en': 'en_US.ISO8859-1',
- 'chinese-s': 'zh_CN.eucCN',
- 'chinese-t': 'zh_TW.eucTW',
- 'croatian': 'hr_HR.ISO8859-2',
- 'cs': 'cs_CZ.ISO8859-2',
- 'cs_cs': 'cs_CZ.ISO8859-2',
- 'cs_cs.iso88592': 'cs_CZ.ISO8859-2',
- 'cs_cz': 'cs_CZ.ISO8859-2',
- 'cs_cz.iso88592': 'cs_CZ.ISO8859-2',
- 'cy': 'cy_GB.ISO8859-1',
- 'cy_gb': 'cy_GB.ISO8859-1',
- 'cy_gb.iso88591': 'cy_GB.ISO8859-1',
- 'cy_gb.iso885914': 'cy_GB.ISO8859-14',
- 'cy_gb.iso885915': 'cy_GB.ISO8859-15',
- 'cy_gb@euro': 'cy_GB.ISO8859-15',
- 'cz': 'cs_CZ.ISO8859-2',
- 'cz_cz': 'cs_CZ.ISO8859-2',
- 'czech': 'cs_CZ.ISO8859-2',
- 'da': 'da_DK.ISO8859-1',
- 'da_dk': 'da_DK.ISO8859-1',
- 'da_dk.88591': 'da_DK.ISO8859-1',
- 'da_dk.885915': 'da_DK.ISO8859-15',
- 'da_dk.iso88591': 'da_DK.ISO8859-1',
- 'da_dk.iso885915': 'da_DK.ISO8859-15',
- 'da_dk@euro': 'da_DK.ISO8859-15',
- 'danish': 'da_DK.ISO8859-1',
- 'danish.iso88591': 'da_DK.ISO8859-1',
- 'dansk': 'da_DK.ISO8859-1',
- 'de': 'de_DE.ISO8859-1',
- 'de_at': 'de_AT.ISO8859-1',
- 'de_at.iso88591': 'de_AT.ISO8859-1',
- 'de_at.iso885915': 'de_AT.ISO8859-15',
- 'de_at@euro': 'de_AT.ISO8859-15',
- 'de_be': 'de_BE.ISO8859-1',
- 'de_be.iso88591': 'de_BE.ISO8859-1',
- 'de_be.iso885915': 'de_BE.ISO8859-15',
- 'de_be@euro': 'de_BE.ISO8859-15',
- 'de_ch': 'de_CH.ISO8859-1',
- 'de_ch.iso88591': 'de_CH.ISO8859-1',
- 'de_ch.iso885915': 'de_CH.ISO8859-15',
- 'de_ch@euro': 'de_CH.ISO8859-15',
- 'de_de': 'de_DE.ISO8859-1',
- 'de_de.88591': 'de_DE.ISO8859-1',
- 'de_de.885915': 'de_DE.ISO8859-15',
- 'de_de.885915@euro': 'de_DE.ISO8859-15',
- 'de_de.iso88591': 'de_DE.ISO8859-1',
- 'de_de.iso885915': 'de_DE.ISO8859-15',
- 'de_de@euro': 'de_DE.ISO8859-15',
- 'de_lu': 'de_LU.ISO8859-1',
- 'de_lu.iso88591': 'de_LU.ISO8859-1',
- 'de_lu.iso885915': 'de_LU.ISO8859-15',
- 'de_lu@euro': 'de_LU.ISO8859-15',
- 'deutsch': 'de_DE.ISO8859-1',
- 'dutch': 'nl_NL.ISO8859-1',
- 'dutch.iso88591': 'nl_BE.ISO8859-1',
- 'ee': 'ee_EE.ISO8859-4',
- 'ee_ee': 'ee_EE.ISO8859-4',
- 'ee_ee.iso88594': 'ee_EE.ISO8859-4',
- 'eesti': 'et_EE.ISO8859-1',
- 'el': 'el_GR.ISO8859-7',
- 'el_gr': 'el_GR.ISO8859-7',
- 'el_gr.iso88597': 'el_GR.ISO8859-7',
- 'el_gr@euro': 'el_GR.ISO8859-15',
- 'en': 'en_US.ISO8859-1',
- 'en.iso88591': 'en_US.ISO8859-1',
- 'en_au': 'en_AU.ISO8859-1',
- 'en_au.iso88591': 'en_AU.ISO8859-1',
- 'en_be': 'en_BE.ISO8859-1',
- 'en_be@euro': 'en_BE.ISO8859-15',
- 'en_bw': 'en_BW.ISO8859-1',
- 'en_ca': 'en_CA.ISO8859-1',
- 'en_ca.iso88591': 'en_CA.ISO8859-1',
- 'en_gb': 'en_GB.ISO8859-1',
- 'en_gb.88591': 'en_GB.ISO8859-1',
- 'en_gb.iso88591': 'en_GB.ISO8859-1',
- 'en_gb.iso885915': 'en_GB.ISO8859-15',
- 'en_gb@euro': 'en_GB.ISO8859-15',
- 'en_hk': 'en_HK.ISO8859-1',
- 'en_ie': 'en_IE.ISO8859-1',
- 'en_ie.iso88591': 'en_IE.ISO8859-1',
- 'en_ie.iso885915': 'en_IE.ISO8859-15',
- 'en_ie@euro': 'en_IE.ISO8859-15',
- 'en_in': 'en_IN.ISO8859-1',
- 'en_nz': 'en_NZ.ISO8859-1',
- 'en_nz.iso88591': 'en_NZ.ISO8859-1',
- 'en_ph': 'en_PH.ISO8859-1',
- 'en_sg': 'en_SG.ISO8859-1',
- 'en_uk': 'en_GB.ISO8859-1',
- 'en_us': 'en_US.ISO8859-1',
- 'en_us.88591': 'en_US.ISO8859-1',
- 'en_us.885915': 'en_US.ISO8859-15',
- 'en_us.iso88591': 'en_US.ISO8859-1',
- 'en_us.iso885915': 'en_US.ISO8859-15',
- 'en_us.iso885915@euro': 'en_US.ISO8859-15',
- 'en_us@euro': 'en_US.ISO8859-15',
- 'en_us@euro@euro': 'en_US.ISO8859-15',
- 'en_za': 'en_ZA.ISO8859-1',
- 'en_za.88591': 'en_ZA.ISO8859-1',
- 'en_za.iso88591': 'en_ZA.ISO8859-1',
- 'en_za.iso885915': 'en_ZA.ISO8859-15',
- 'en_za@euro': 'en_ZA.ISO8859-15',
- 'en_zw': 'en_ZW.ISO8859-1',
- 'eng_gb': 'en_GB.ISO8859-1',
- 'eng_gb.8859': 'en_GB.ISO8859-1',
- 'english': 'en_EN.ISO8859-1',
- 'english.iso88591': 'en_EN.ISO8859-1',
- 'english_uk': 'en_GB.ISO8859-1',
- 'english_uk.8859': 'en_GB.ISO8859-1',
- 'english_united-states': 'en_US.ISO8859-1',
- 'english_united-states.437': 'C',
- 'english_us': 'en_US.ISO8859-1',
- 'english_us.8859': 'en_US.ISO8859-1',
- 'english_us.ascii': 'en_US.ISO8859-1',
- 'eo': 'eo_XX.ISO8859-3',
- 'eo_eo': 'eo_EO.ISO8859-3',
- 'eo_eo.iso88593': 'eo_EO.ISO8859-3',
- 'eo_xx': 'eo_XX.ISO8859-3',
- 'eo_xx.iso88593': 'eo_XX.ISO8859-3',
- 'es': 'es_ES.ISO8859-1',
- 'es_ar': 'es_AR.ISO8859-1',
- 'es_ar.iso88591': 'es_AR.ISO8859-1',
- 'es_bo': 'es_BO.ISO8859-1',
- 'es_bo.iso88591': 'es_BO.ISO8859-1',
- 'es_cl': 'es_CL.ISO8859-1',
- 'es_cl.iso88591': 'es_CL.ISO8859-1',
- 'es_co': 'es_CO.ISO8859-1',
- 'es_co.iso88591': 'es_CO.ISO8859-1',
- 'es_cr': 'es_CR.ISO8859-1',
- 'es_cr.iso88591': 'es_CR.ISO8859-1',
- 'es_do': 'es_DO.ISO8859-1',
- 'es_do.iso88591': 'es_DO.ISO8859-1',
- 'es_ec': 'es_EC.ISO8859-1',
- 'es_ec.iso88591': 'es_EC.ISO8859-1',
- 'es_es': 'es_ES.ISO8859-1',
- 'es_es.88591': 'es_ES.ISO8859-1',
- 'es_es.iso88591': 'es_ES.ISO8859-1',
- 'es_es.iso885915': 'es_ES.ISO8859-15',
- 'es_es@euro': 'es_ES.ISO8859-15',
- 'es_gt': 'es_GT.ISO8859-1',
- 'es_gt.iso88591': 'es_GT.ISO8859-1',
- 'es_hn': 'es_HN.ISO8859-1',
- 'es_hn.iso88591': 'es_HN.ISO8859-1',
- 'es_mx': 'es_MX.ISO8859-1',
- 'es_mx.iso88591': 'es_MX.ISO8859-1',
- 'es_ni': 'es_NI.ISO8859-1',
- 'es_ni.iso88591': 'es_NI.ISO8859-1',
- 'es_pa': 'es_PA.ISO8859-1',
- 'es_pa.iso88591': 'es_PA.ISO8859-1',
- 'es_pa.iso885915': 'es_PA.ISO8859-15',
- 'es_pa@euro': 'es_PA.ISO8859-15',
- 'es_pe': 'es_PE.ISO8859-1',
- 'es_pe.iso88591': 'es_PE.ISO8859-1',
- 'es_pe.iso885915': 'es_PE.ISO8859-15',
- 'es_pe@euro': 'es_PE.ISO8859-15',
- 'es_pr': 'es_PR.ISO8859-1',
- 'es_pr.iso88591': 'es_PR.ISO8859-1',
- 'es_py': 'es_PY.ISO8859-1',
- 'es_py.iso88591': 'es_PY.ISO8859-1',
- 'es_py.iso885915': 'es_PY.ISO8859-15',
- 'es_py@euro': 'es_PY.ISO8859-15',
- 'es_sv': 'es_SV.ISO8859-1',
- 'es_sv.iso88591': 'es_SV.ISO8859-1',
- 'es_sv.iso885915': 'es_SV.ISO8859-15',
- 'es_sv@euro': 'es_SV.ISO8859-15',
- 'es_us': 'es_US.ISO8859-1',
- 'es_uy': 'es_UY.ISO8859-1',
- 'es_uy.iso88591': 'es_UY.ISO8859-1',
- 'es_uy.iso885915': 'es_UY.ISO8859-15',
- 'es_uy@euro': 'es_UY.ISO8859-15',
- 'es_ve': 'es_VE.ISO8859-1',
- 'es_ve.iso88591': 'es_VE.ISO8859-1',
- 'es_ve.iso885915': 'es_VE.ISO8859-15',
- 'es_ve@euro': 'es_VE.ISO8859-15',
- 'estonian': 'et_EE.ISO8859-1',
- 'et': 'et_EE.ISO8859-15',
- 'et_ee': 'et_EE.ISO8859-15',
- 'et_ee.iso88591': 'et_EE.ISO8859-1',
- 'et_ee.iso885913': 'et_EE.ISO8859-13',
- 'et_ee.iso885915': 'et_EE.ISO8859-15',
- 'et_ee.iso88594': 'et_EE.ISO8859-4',
- 'et_ee@euro': 'et_EE.ISO8859-15',
- 'eu': 'eu_ES.ISO8859-1',
- 'eu_es': 'eu_ES.ISO8859-1',
- 'eu_es.iso88591': 'eu_ES.ISO8859-1',
- 'eu_es.iso885915': 'eu_ES.ISO8859-15',
- 'eu_es@euro': 'eu_ES.ISO8859-15',
- 'fa': 'fa_IR.UTF-8',
- 'fa_ir': 'fa_IR.UTF-8',
- 'fa_ir.isiri3342': 'fa_IR.ISIRI-3342',
- 'fi': 'fi_FI.ISO8859-15',
- 'fi_fi': 'fi_FI.ISO8859-15',
- 'fi_fi.88591': 'fi_FI.ISO8859-1',
- 'fi_fi.iso88591': 'fi_FI.ISO8859-1',
- 'fi_fi.iso885915': 'fi_FI.ISO8859-15',
- 'fi_fi.utf8@euro': 'fi_FI.UTF-8',
- 'fi_fi@euro': 'fi_FI.ISO8859-15',
- 'finnish': 'fi_FI.ISO8859-1',
- 'finnish.iso88591': 'fi_FI.ISO8859-1',
- 'fo': 'fo_FO.ISO8859-1',
- 'fo_fo': 'fo_FO.ISO8859-1',
- 'fo_fo.iso88591': 'fo_FO.ISO8859-1',
- 'fo_fo.iso885915': 'fo_FO.ISO8859-15',
- 'fo_fo@euro': 'fo_FO.ISO8859-15',
- 'fr': 'fr_FR.ISO8859-1',
- 'fr_be': 'fr_BE.ISO8859-1',
- 'fr_be.88591': 'fr_BE.ISO8859-1',
- 'fr_be.iso88591': 'fr_BE.ISO8859-1',
- 'fr_be.iso885915': 'fr_BE.ISO8859-15',
- 'fr_be@euro': 'fr_BE.ISO8859-15',
- 'fr_ca': 'fr_CA.ISO8859-1',
- 'fr_ca.88591': 'fr_CA.ISO8859-1',
- 'fr_ca.iso88591': 'fr_CA.ISO8859-1',
- 'fr_ca.iso885915': 'fr_CA.ISO8859-15',
- 'fr_ca@euro': 'fr_CA.ISO8859-15',
- 'fr_ch': 'fr_CH.ISO8859-1',
- 'fr_ch.88591': 'fr_CH.ISO8859-1',
- 'fr_ch.iso88591': 'fr_CH.ISO8859-1',
- 'fr_ch.iso885915': 'fr_CH.ISO8859-15',
- 'fr_ch@euro': 'fr_CH.ISO8859-15',
- 'fr_fr': 'fr_FR.ISO8859-1',
- 'fr_fr.88591': 'fr_FR.ISO8859-1',
- 'fr_fr.iso88591': 'fr_FR.ISO8859-1',
- 'fr_fr.iso885915': 'fr_FR.ISO8859-15',
- 'fr_fr@euro': 'fr_FR.ISO8859-15',
- 'fr_lu': 'fr_LU.ISO8859-1',
- 'fr_lu.88591': 'fr_LU.ISO8859-1',
- 'fr_lu.iso88591': 'fr_LU.ISO8859-1',
- 'fr_lu.iso885915': 'fr_LU.ISO8859-15',
- 'fr_lu@euro': 'fr_LU.ISO8859-15',
- 'fran\xe7ais': 'fr_FR.ISO8859-1',
- 'fre_fr': 'fr_FR.ISO8859-1',
- 'fre_fr.8859': 'fr_FR.ISO8859-1',
- 'french': 'fr_FR.ISO8859-1',
- 'french.iso88591': 'fr_CH.ISO8859-1',
- 'french_france': 'fr_FR.ISO8859-1',
- 'french_france.8859': 'fr_FR.ISO8859-1',
- 'ga': 'ga_IE.ISO8859-1',
- 'ga_ie': 'ga_IE.ISO8859-1',
- 'ga_ie.iso88591': 'ga_IE.ISO8859-1',
- 'ga_ie.iso885914': 'ga_IE.ISO8859-14',
- 'ga_ie.iso885915': 'ga_IE.ISO8859-15',
- 'ga_ie@euro': 'ga_IE.ISO8859-15',
- 'galego': 'gl_ES.ISO8859-1',
- 'galician': 'gl_ES.ISO8859-1',
- 'gd': 'gd_GB.ISO8859-1',
- 'gd_gb': 'gd_GB.ISO8859-1',
- 'gd_gb.iso88591': 'gd_GB.ISO8859-1',
- 'gd_gb.iso885914': 'gd_GB.ISO8859-14',
- 'gd_gb.iso885915': 'gd_GB.ISO8859-15',
- 'gd_gb@euro': 'gd_GB.ISO8859-15',
- 'ger_de': 'de_DE.ISO8859-1',
- 'ger_de.8859': 'de_DE.ISO8859-1',
- 'german': 'de_DE.ISO8859-1',
- 'german.iso88591': 'de_CH.ISO8859-1',
- 'german_germany': 'de_DE.ISO8859-1',
- 'german_germany.8859': 'de_DE.ISO8859-1',
- 'gl': 'gl_ES.ISO8859-1',
- 'gl_es': 'gl_ES.ISO8859-1',
- 'gl_es.iso88591': 'gl_ES.ISO8859-1',
- 'gl_es.iso885915': 'gl_ES.ISO8859-15',
- 'gl_es@euro': 'gl_ES.ISO8859-15',
- 'greek': 'el_GR.ISO8859-7',
- 'greek.iso88597': 'el_GR.ISO8859-7',
- 'gv': 'gv_GB.ISO8859-1',
- 'gv_gb': 'gv_GB.ISO8859-1',
- 'gv_gb.iso88591': 'gv_GB.ISO8859-1',
- 'gv_gb.iso885914': 'gv_GB.ISO8859-14',
- 'gv_gb.iso885915': 'gv_GB.ISO8859-15',
- 'gv_gb@euro': 'gv_GB.ISO8859-15',
- 'he': 'he_IL.ISO8859-8',
- 'he_il': 'he_IL.ISO8859-8',
- 'he_il.cp1255': 'he_IL.CP1255',
- 'he_il.iso88598': 'he_IL.ISO8859-8',
- 'he_il.microsoftcp1255': 'he_IL.CP1255',
- 'hebrew': 'iw_IL.ISO8859-8',
- 'hebrew.iso88598': 'iw_IL.ISO8859-8',
- 'hi': 'hi_IN.ISCII-DEV',
- 'hi_in': 'hi_IN.ISCII-DEV',
- 'hi_in.isciidev': 'hi_IN.ISCII-DEV',
- 'hr': 'hr_HR.ISO8859-2',
- 'hr_hr': 'hr_HR.ISO8859-2',
- 'hr_hr.iso88592': 'hr_HR.ISO8859-2',
- 'hrvatski': 'hr_HR.ISO8859-2',
- 'hu': 'hu_HU.ISO8859-2',
- 'hu_hu': 'hu_HU.ISO8859-2',
- 'hu_hu.iso88592': 'hu_HU.ISO8859-2',
- 'hungarian': 'hu_HU.ISO8859-2',
- 'icelandic': 'is_IS.ISO8859-1',
- 'icelandic.iso88591': 'is_IS.ISO8859-1',
- 'id': 'id_ID.ISO8859-1',
- 'id_id': 'id_ID.ISO8859-1',
- 'in': 'id_ID.ISO8859-1',
- 'in_id': 'id_ID.ISO8859-1',
- 'is': 'is_IS.ISO8859-1',
- 'is_is': 'is_IS.ISO8859-1',
- 'is_is.iso88591': 'is_IS.ISO8859-1',
- 'is_is.iso885915': 'is_IS.ISO8859-15',
- 'is_is@euro': 'is_IS.ISO8859-15',
- 'iso-8859-1': 'en_US.ISO8859-1',
- 'iso-8859-15': 'en_US.ISO8859-15',
- 'iso8859-1': 'en_US.ISO8859-1',
- 'iso8859-15': 'en_US.ISO8859-15',
- 'iso_8859_1': 'en_US.ISO8859-1',
- 'iso_8859_15': 'en_US.ISO8859-15',
- 'it': 'it_IT.ISO8859-1',
- 'it_ch': 'it_CH.ISO8859-1',
- 'it_ch.iso88591': 'it_CH.ISO8859-1',
- 'it_ch.iso885915': 'it_CH.ISO8859-15',
- 'it_ch@euro': 'it_CH.ISO8859-15',
- 'it_it': 'it_IT.ISO8859-1',
- 'it_it.88591': 'it_IT.ISO8859-1',
- 'it_it.iso88591': 'it_IT.ISO8859-1',
- 'it_it.iso885915': 'it_IT.ISO8859-15',
- 'it_it@euro': 'it_IT.ISO8859-15',
- 'italian': 'it_IT.ISO8859-1',
- 'italian.iso88591': 'it_IT.ISO8859-1',
- 'iu': 'iu_CA.NUNACOM-8',
- 'iu_ca': 'iu_CA.NUNACOM-8',
- 'iu_ca.nunacom8': 'iu_CA.NUNACOM-8',
- 'iw': 'he_IL.ISO8859-8',
- 'iw_il': 'he_IL.ISO8859-8',
- 'iw_il.iso88598': 'he_IL.ISO8859-8',
- 'ja': 'ja_JP.eucJP',
- 'ja.jis': 'ja_JP.JIS7',
- 'ja.sjis': 'ja_JP.SJIS',
- 'ja_jp': 'ja_JP.eucJP',
- 'ja_jp.ajec': 'ja_JP.eucJP',
- 'ja_jp.euc': 'ja_JP.eucJP',
- 'ja_jp.eucjp': 'ja_JP.eucJP',
- 'ja_jp.iso-2022-jp': 'ja_JP.JIS7',
- 'ja_jp.iso2022jp': 'ja_JP.JIS7',
- 'ja_jp.jis': 'ja_JP.JIS7',
- 'ja_jp.jis7': 'ja_JP.JIS7',
- 'ja_jp.mscode': 'ja_JP.SJIS',
- 'ja_jp.sjis': 'ja_JP.SJIS',
- 'ja_jp.ujis': 'ja_JP.eucJP',
- 'japan': 'ja_JP.eucJP',
- 'japanese': 'ja_JP.eucJP',
- 'japanese-euc': 'ja_JP.eucJP',
- 'japanese.euc': 'ja_JP.eucJP',
- 'japanese.sjis': 'ja_JP.SJIS',
- 'jp_jp': 'ja_JP.eucJP',
- 'ka': 'ka_GE.GEORGIAN-ACADEMY',
- 'ka_ge': 'ka_GE.GEORGIAN-ACADEMY',
- 'ka_ge.georgianacademy': 'ka_GE.GEORGIAN-ACADEMY',
- 'ka_ge.georgianps': 'ka_GE.GEORGIAN-PS',
- 'ka_ge.georgianrs': 'ka_GE.GEORGIAN-ACADEMY',
- 'kl': 'kl_GL.ISO8859-1',
- 'kl_gl': 'kl_GL.ISO8859-1',
- 'kl_gl.iso88591': 'kl_GL.ISO8859-1',
- 'kl_gl.iso885915': 'kl_GL.ISO8859-15',
- 'kl_gl@euro': 'kl_GL.ISO8859-15',
- 'ko': 'ko_KR.eucKR',
- 'ko_kr': 'ko_KR.eucKR',
- 'ko_kr.euc': 'ko_KR.eucKR',
- 'ko_kr.euckr': 'ko_KR.eucKR',
- 'korean': 'ko_KR.eucKR',
- 'korean.euc': 'ko_KR.eucKR',
- 'kw': 'kw_GB.ISO8859-1',
- 'kw_gb': 'kw_GB.ISO8859-1',
- 'kw_gb.iso88591': 'kw_GB.ISO8859-1',
- 'kw_gb.iso885914': 'kw_GB.ISO8859-14',
- 'kw_gb.iso885915': 'kw_GB.ISO8859-15',
- 'kw_gb@euro': 'kw_GB.ISO8859-15',
- 'lithuanian': 'lt_LT.ISO8859-13',
- 'lo': 'lo_LA.MULELAO-1',
- 'lo_la': 'lo_LA.MULELAO-1',
- 'lo_la.cp1133': 'lo_LA.IBM-CP1133',
- 'lo_la.ibmcp1133': 'lo_LA.IBM-CP1133',
- 'lo_la.mulelao1': 'lo_LA.MULELAO-1',
- 'lt': 'lt_LT.ISO8859-13',
- 'lt_lt': 'lt_LT.ISO8859-13',
- 'lt_lt.iso885913': 'lt_LT.ISO8859-13',
- 'lt_lt.iso88594': 'lt_LT.ISO8859-4',
- 'lv': 'lv_LV.ISO8859-13',
- 'lv_lv': 'lv_LV.ISO8859-13',
- 'lv_lv.iso885913': 'lv_LV.ISO8859-13',
- 'lv_lv.iso88594': 'lv_LV.ISO8859-4',
- 'mi': 'mi_NZ.ISO8859-1',
- 'mi_nz': 'mi_NZ.ISO8859-1',
- 'mi_nz.iso88591': 'mi_NZ.ISO8859-1',
- 'mk': 'mk_MK.ISO8859-5',
- 'mk_mk': 'mk_MK.ISO8859-5',
- 'mk_mk.cp1251': 'mk_MK.CP1251',
- 'mk_mk.iso88595': 'mk_MK.ISO8859-5',
- 'mk_mk.microsoftcp1251': 'mk_MK.CP1251',
- 'ms': 'ms_MY.ISO8859-1',
- 'ms_my': 'ms_MY.ISO8859-1',
- 'ms_my.iso88591': 'ms_MY.ISO8859-1',
- 'mt': 'mt_MT.ISO8859-3',
- 'mt_mt': 'mt_MT.ISO8859-3',
- 'mt_mt.iso88593': 'mt_MT.ISO8859-3',
- 'nb': 'nb_NO.ISO8859-1',
- 'nb_no': 'nb_NO.ISO8859-1',
- 'nb_no.88591': 'nb_NO.ISO8859-1',
- 'nb_no.iso88591': 'nb_NO.ISO8859-1',
- 'nb_no.iso885915': 'nb_NO.ISO8859-15',
- 'nb_no@euro': 'nb_NO.ISO8859-15',
- 'nl': 'nl_NL.ISO8859-1',
- 'nl_be': 'nl_BE.ISO8859-1',
- 'nl_be.88591': 'nl_BE.ISO8859-1',
- 'nl_be.iso88591': 'nl_BE.ISO8859-1',
- 'nl_be.iso885915': 'nl_BE.ISO8859-15',
- 'nl_be@euro': 'nl_BE.ISO8859-15',
- 'nl_nl': 'nl_NL.ISO8859-1',
- 'nl_nl.88591': 'nl_NL.ISO8859-1',
- 'nl_nl.iso88591': 'nl_NL.ISO8859-1',
- 'nl_nl.iso885915': 'nl_NL.ISO8859-15',
- 'nl_nl@euro': 'nl_NL.ISO8859-15',
- 'nn': 'nn_NO.ISO8859-1',
- 'nn_no': 'nn_NO.ISO8859-1',
- 'nn_no.88591': 'nn_NO.ISO8859-1',
- 'nn_no.iso88591': 'nn_NO.ISO8859-1',
- 'nn_no.iso885915': 'nn_NO.ISO8859-15',
- 'nn_no@euro': 'nn_NO.ISO8859-15',
- 'no': 'no_NO.ISO8859-1',
- 'no@nynorsk': 'ny_NO.ISO8859-1',
- 'no_no': 'no_NO.ISO8859-1',
- 'no_no.88591': 'no_NO.ISO8859-1',
- 'no_no.iso88591': 'no_NO.ISO8859-1',
- 'no_no.iso885915': 'no_NO.ISO8859-15',
- 'no_no@euro': 'no_NO.ISO8859-15',
- 'norwegian': 'no_NO.ISO8859-1',
- 'norwegian.iso88591': 'no_NO.ISO8859-1',
- 'ny': 'ny_NO.ISO8859-1',
- 'ny_no': 'ny_NO.ISO8859-1',
- 'ny_no.88591': 'ny_NO.ISO8859-1',
- 'ny_no.iso88591': 'ny_NO.ISO8859-1',
- 'ny_no.iso885915': 'ny_NO.ISO8859-15',
- 'ny_no@euro': 'ny_NO.ISO8859-15',
- 'nynorsk': 'nn_NO.ISO8859-1',
- 'oc': 'oc_FR.ISO8859-1',
- 'oc_fr': 'oc_FR.ISO8859-1',
- 'oc_fr.iso88591': 'oc_FR.ISO8859-1',
- 'oc_fr.iso885915': 'oc_FR.ISO8859-15',
- 'oc_fr@euro': 'oc_FR.ISO8859-15',
- 'pd': 'pd_US.ISO8859-1',
- 'pd_de': 'pd_DE.ISO8859-1',
- 'pd_de.iso88591': 'pd_DE.ISO8859-1',
- 'pd_de.iso885915': 'pd_DE.ISO8859-15',
- 'pd_de@euro': 'pd_DE.ISO8859-15',
- 'pd_us': 'pd_US.ISO8859-1',
- 'pd_us.iso88591': 'pd_US.ISO8859-1',
- 'pd_us.iso885915': 'pd_US.ISO8859-15',
- 'pd_us@euro': 'pd_US.ISO8859-15',
- 'ph': 'ph_PH.ISO8859-1',
- 'ph_ph': 'ph_PH.ISO8859-1',
- 'ph_ph.iso88591': 'ph_PH.ISO8859-1',
- 'pl': 'pl_PL.ISO8859-2',
- 'pl_pl': 'pl_PL.ISO8859-2',
- 'pl_pl.iso88592': 'pl_PL.ISO8859-2',
- 'polish': 'pl_PL.ISO8859-2',
- 'portuguese': 'pt_PT.ISO8859-1',
- 'portuguese.iso88591': 'pt_PT.ISO8859-1',
- 'portuguese_brazil': 'pt_BR.ISO8859-1',
- 'portuguese_brazil.8859': 'pt_BR.ISO8859-1',
- 'posix': 'C',
- 'posix-utf2': 'C',
- 'pp': 'pp_AN.ISO8859-1',
- 'pp_an': 'pp_AN.ISO8859-1',
- 'pp_an.iso88591': 'pp_AN.ISO8859-1',
- 'pt': 'pt_PT.ISO8859-1',
- 'pt_br': 'pt_BR.ISO8859-1',
- 'pt_br.88591': 'pt_BR.ISO8859-1',
- 'pt_br.iso88591': 'pt_BR.ISO8859-1',
- 'pt_br.iso885915': 'pt_BR.ISO8859-15',
- 'pt_br@euro': 'pt_BR.ISO8859-15',
- 'pt_pt': 'pt_PT.ISO8859-1',
- 'pt_pt.88591': 'pt_PT.ISO8859-1',
- 'pt_pt.iso88591': 'pt_PT.ISO8859-1',
- 'pt_pt.iso885915': 'pt_PT.ISO8859-15',
- 'pt_pt.utf8@euro': 'pt_PT.UTF-8',
- 'pt_pt@euro': 'pt_PT.ISO8859-15',
- 'ro': 'ro_RO.ISO8859-2',
- 'ro_ro': 'ro_RO.ISO8859-2',
- 'ro_ro.iso88592': 'ro_RO.ISO8859-2',
- 'romanian': 'ro_RO.ISO8859-2',
- 'ru': 'ru_RU.ISO8859-5',
- 'ru_ru': 'ru_RU.ISO8859-5',
- 'ru_ru.cp1251': 'ru_RU.CP1251',
- 'ru_ru.iso88595': 'ru_RU.ISO8859-5',
- 'ru_ru.koi8r': 'ru_RU.KOI8-R',
- 'ru_ru.microsoftcp1251': 'ru_RU.CP1251',
- 'ru_ua': 'ru_UA.KOI8-U',
- 'ru_ua.cp1251': 'ru_UA.CP1251',
- 'ru_ua.koi8u': 'ru_UA.KOI8-U',
- 'ru_ua.microsoftcp1251': 'ru_UA.CP1251',
- 'rumanian': 'ro_RO.ISO8859-2',
- 'russian': 'ru_RU.ISO8859-5',
- 'se_no': 'se_NO.UTF-8',
- 'serbocroatian': 'sh_YU.ISO8859-2',
- 'sh': 'sh_YU.ISO8859-2',
- 'sh_hr': 'sh_HR.ISO8859-2',
- 'sh_hr.iso88592': 'sh_HR.ISO8859-2',
- 'sh_sp': 'sh_YU.ISO8859-2',
- 'sh_yu': 'sh_YU.ISO8859-2',
- 'sk': 'sk_SK.ISO8859-2',
- 'sk_sk': 'sk_SK.ISO8859-2',
- 'sk_sk.iso88592': 'sk_SK.ISO8859-2',
- 'sl': 'sl_SI.ISO8859-2',
- 'sl_cs': 'sl_CS.ISO8859-2',
- 'sl_si': 'sl_SI.ISO8859-2',
- 'sl_si.iso88592': 'sl_SI.ISO8859-2',
- 'slovak': 'sk_SK.ISO8859-2',
- 'slovene': 'sl_SI.ISO8859-2',
- 'slovenian': 'sl_SI.ISO8859-2',
- 'sp': 'sp_YU.ISO8859-5',
- 'sp_yu': 'sp_YU.ISO8859-5',
- 'spanish': 'es_ES.ISO8859-1',
- 'spanish.iso88591': 'es_ES.ISO8859-1',
- 'spanish_spain': 'es_ES.ISO8859-1',
- 'spanish_spain.8859': 'es_ES.ISO8859-1',
- 'sq': 'sq_AL.ISO8859-2',
- 'sq_al': 'sq_AL.ISO8859-2',
- 'sq_al.iso88592': 'sq_AL.ISO8859-2',
- 'sr': 'sr_YU.ISO8859-5',
- 'sr@cyrillic': 'sr_YU.ISO8859-5',
- 'sr_sp': 'sr_SP.ISO8859-2',
- 'sr_yu': 'sr_YU.ISO8859-5',
- 'sr_yu.cp1251@cyrillic': 'sr_YU.CP1251',
- 'sr_yu.iso88592': 'sr_YU.ISO8859-2',
- 'sr_yu.iso88595': 'sr_YU.ISO8859-5',
- 'sr_yu.iso88595@cyrillic': 'sr_YU.ISO8859-5',
- 'sr_yu.microsoftcp1251@cyrillic': 'sr_YU.CP1251',
- 'sr_yu.utf8@cyrillic': 'sr_YU.UTF-8',
- 'sr_yu@cyrillic': 'sr_YU.ISO8859-5',
- 'sv': 'sv_SE.ISO8859-1',
- 'sv_fi': 'sv_FI.ISO8859-1',
- 'sv_fi.iso88591': 'sv_FI.ISO8859-1',
- 'sv_fi.iso885915': 'sv_FI.ISO8859-15',
- 'sv_fi@euro': 'sv_FI.ISO8859-15',
- 'sv_se': 'sv_SE.ISO8859-1',
- 'sv_se.88591': 'sv_SE.ISO8859-1',
- 'sv_se.iso88591': 'sv_SE.ISO8859-1',
- 'sv_se.iso885915': 'sv_SE.ISO8859-15',
- 'sv_se@euro': 'sv_SE.ISO8859-15',
- 'swedish': 'sv_SE.ISO8859-1',
- 'swedish.iso88591': 'sv_SE.ISO8859-1',
- 'ta': 'ta_IN.TSCII-0',
- 'ta_in': 'ta_IN.TSCII-0',
- 'ta_in.tscii': 'ta_IN.TSCII-0',
- 'ta_in.tscii0': 'ta_IN.TSCII-0',
- 'tg': 'tg_TJ.KOI8-C',
- 'tg_tj': 'tg_TJ.KOI8-C',
- 'tg_tj.koi8c': 'tg_TJ.KOI8-C',
- 'th': 'th_TH.ISO8859-11',
- 'th_th': 'th_TH.ISO8859-11',
- 'th_th.iso885911': 'th_TH.ISO8859-11',
- 'th_th.tactis': 'th_TH.TIS620',
- 'th_th.tis620': 'th_TH.TIS620',
- 'thai': 'th_TH.ISO8859-11',
- 'tl': 'tl_PH.ISO8859-1',
- 'tl_ph': 'tl_PH.ISO8859-1',
- 'tl_ph.iso88591': 'tl_PH.ISO8859-1',
- 'tr': 'tr_TR.ISO8859-9',
- 'tr_tr': 'tr_TR.ISO8859-9',
- 'tr_tr.iso88599': 'tr_TR.ISO8859-9',
- 'tt': 'tt_RU.TATAR-CYR',
- 'tt_ru': 'tt_RU.TATAR-CYR',
- 'tt_ru.koi8c': 'tt_RU.KOI8-C',
- 'tt_ru.tatarcyr': 'tt_RU.TATAR-CYR',
- 'turkish': 'tr_TR.ISO8859-9',
- 'turkish.iso88599': 'tr_TR.ISO8859-9',
- 'uk': 'uk_UA.KOI8-U',
- 'uk_ua': 'uk_UA.KOI8-U',
- 'uk_ua.cp1251': 'uk_UA.CP1251',
- 'uk_ua.iso88595': 'uk_UA.ISO8859-5',
- 'uk_ua.koi8u': 'uk_UA.KOI8-U',
- 'uk_ua.microsoftcp1251': 'uk_UA.CP1251',
- 'univ': 'en_US.utf',
- 'universal': 'en_US.utf',
- 'universal.utf8@ucs4': 'en_US.UTF-8',
- 'ur': 'ur_PK.CP1256',
- 'ur_pk': 'ur_PK.CP1256',
- 'ur_pk.cp1256': 'ur_PK.CP1256',
- 'ur_pk.microsoftcp1256': 'ur_PK.CP1256',
- 'uz': 'uz_UZ.UTF-8',
- 'uz_uz': 'uz_UZ.UTF-8',
- 'vi': 'vi_VN.TCVN',
- 'vi_vn': 'vi_VN.TCVN',
- 'vi_vn.tcvn': 'vi_VN.TCVN',
- 'vi_vn.tcvn5712': 'vi_VN.TCVN',
- 'vi_vn.viscii': 'vi_VN.VISCII',
- 'vi_vn.viscii111': 'vi_VN.VISCII',
- 'wa': 'wa_BE.ISO8859-1',
- 'wa_be': 'wa_BE.ISO8859-1',
- 'wa_be.iso88591': 'wa_BE.ISO8859-1',
- 'wa_be.iso885915': 'wa_BE.ISO8859-15',
- 'wa_be@euro': 'wa_BE.ISO8859-15',
- 'yi': 'yi_US.CP1255',
- 'yi_us': 'yi_US.CP1255',
- 'yi_us.cp1255': 'yi_US.CP1255',
- 'yi_us.microsoftcp1255': 'yi_US.CP1255',
- 'zh': 'zh_CN.eucCN',
- 'zh_cn': 'zh_CN.gb2312',
- 'zh_cn.big5': 'zh_TW.big5',
- 'zh_cn.euc': 'zh_CN.eucCN',
- 'zh_cn.gb18030': 'zh_CN.gb18030',
- 'zh_cn.gb2312': 'zh_CN.gb2312',
- 'zh_cn.gbk': 'zh_CN.gbk',
- 'zh_hk': 'zh_HK.big5hkscs',
- 'zh_hk.big5': 'zh_HK.big5',
- 'zh_hk.big5hkscs': 'zh_HK.big5hkscs',
- 'zh_tw': 'zh_TW.big5',
- 'zh_tw.big5': 'zh_TW.big5',
- 'zh_tw.euc': 'zh_TW.eucTW',
-}
-
-#
-# This maps Windows language identifiers to locale strings.
-#
-# This list has been updated from
-# http://msdn.microsoft.com/library/default.asp?url=/library/en-us/intl/nls_238z.asp
-# to include every locale up to Windows XP.
-#
-# NOTE: this mapping is incomplete. If your language is missing, please
-# submit a bug report to Python bug manager, which you can find via:
-# http://www.python.org/dev/
-# Make sure you include the missing language identifier and the suggested
-# locale code.
-#
-
-windows_locale = {
- 0x0436: "af_ZA", # Afrikaans
- 0x041c: "sq_AL", # Albanian
- 0x0401: "ar_SA", # Arabic - Saudi Arabia
- 0x0801: "ar_IQ", # Arabic - Iraq
- 0x0c01: "ar_EG", # Arabic - Egypt
- 0x1001: "ar_LY", # Arabic - Libya
- 0x1401: "ar_DZ", # Arabic - Algeria
- 0x1801: "ar_MA", # Arabic - Morocco
- 0x1c01: "ar_TN", # Arabic - Tunisia
- 0x2001: "ar_OM", # Arabic - Oman
- 0x2401: "ar_YE", # Arabic - Yemen
- 0x2801: "ar_SY", # Arabic - Syria
- 0x2c01: "ar_JO", # Arabic - Jordan
- 0x3001: "ar_LB", # Arabic - Lebanon
- 0x3401: "ar_KW", # Arabic - Kuwait
- 0x3801: "ar_AE", # Arabic - United Arab Emirates
- 0x3c01: "ar_BH", # Arabic - Bahrain
- 0x4001: "ar_QA", # Arabic - Qatar
- 0x042b: "hy_AM", # Armenian
- 0x042c: "az_AZ", # Azeri Latin
- 0x082c: "az_AZ", # Azeri - Cyrillic
- 0x042d: "eu_ES", # Basque
- 0x0423: "be_BY", # Belarusian
- 0x0445: "bn_IN", # Begali
- 0x201a: "bs_BA", # Bosnian
- 0x141a: "bs_BA", # Bosnian - Cyrillic
- 0x047e: "br_FR", # Breton - France
- 0x0402: "bg_BG", # Bulgarian
- 0x0403: "ca_ES", # Catalan
- 0x0004: "zh_CHS",# Chinese - Simplified
- 0x0404: "zh_TW", # Chinese - Taiwan
- 0x0804: "zh_CN", # Chinese - PRC
- 0x0c04: "zh_HK", # Chinese - Hong Kong S.A.R.
- 0x1004: "zh_SG", # Chinese - Singapore
- 0x1404: "zh_MO", # Chinese - Macao S.A.R.
- 0x7c04: "zh_CHT",# Chinese - Traditional
- 0x041a: "hr_HR", # Croatian
- 0x101a: "hr_BA", # Croatian - Bosnia
- 0x0405: "cs_CZ", # Czech
- 0x0406: "da_DK", # Danish
- 0x048c: "gbz_AF",# Dari - Afghanistan
- 0x0465: "div_MV",# Divehi - Maldives
- 0x0413: "nl_NL", # Dutch - The Netherlands
- 0x0813: "nl_BE", # Dutch - Belgium
- 0x0409: "en_US", # English - United States
- 0x0809: "en_GB", # English - United Kingdom
- 0x0c09: "en_AU", # English - Australia
- 0x1009: "en_CA", # English - Canada
- 0x1409: "en_NZ", # English - New Zealand
- 0x1809: "en_IE", # English - Ireland
- 0x1c09: "en_ZA", # English - South Africa
- 0x2009: "en_JA", # English - Jamaica
- 0x2409: "en_CB", # English - Carribbean
- 0x2809: "en_BZ", # English - Belize
- 0x2c09: "en_TT", # English - Trinidad
- 0x3009: "en_ZW", # English - Zimbabwe
- 0x3409: "en_PH", # English - Phillippines
- 0x0425: "et_EE", # Estonian
- 0x0438: "fo_FO", # Faroese
- 0x0464: "fil_PH",# Filipino
- 0x040b: "fi_FI", # Finnish
- 0x040c: "fr_FR", # French - France
- 0x080c: "fr_BE", # French - Belgium
- 0x0c0c: "fr_CA", # French - Canada
- 0x100c: "fr_CH", # French - Switzerland
- 0x140c: "fr_LU", # French - Luxembourg
- 0x180c: "fr_MC", # French - Monaco
- 0x0462: "fy_NL", # Frisian - Netherlands
- 0x0456: "gl_ES", # Galician
- 0x0437: "ka_GE", # Georgian
- 0x0407: "de_DE", # German - Germany
- 0x0807: "de_CH", # German - Switzerland
- 0x0c07: "de_AT", # German - Austria
- 0x1007: "de_LU", # German - Luxembourg
- 0x1407: "de_LI", # German - Liechtenstein
- 0x0408: "el_GR", # Greek
- 0x0447: "gu_IN", # Gujarati
- 0x040d: "he_IL", # Hebrew
- 0x0439: "hi_IN", # Hindi
- 0x040e: "hu_HU", # Hungarian
- 0x040f: "is_IS", # Icelandic
- 0x0421: "id_ID", # Indonesian
- 0x045d: "iu_CA", # Inuktitut
- 0x085d: "iu_CA", # Inuktitut - Latin
- 0x083c: "ga_IE", # Irish - Ireland
- 0x0434: "xh_ZA", # Xhosa - South Africa
- 0x0435: "zu_ZA", # Zulu
- 0x0410: "it_IT", # Italian - Italy
- 0x0810: "it_CH", # Italian - Switzerland
- 0x0411: "ja_JP", # Japanese
- 0x044b: "kn_IN", # Kannada - India
- 0x043f: "kk_KZ", # Kazakh
- 0x0457: "kok_IN",# Konkani
- 0x0412: "ko_KR", # Korean
- 0x0440: "ky_KG", # Kyrgyz
- 0x0426: "lv_LV", # Latvian
- 0x0427: "lt_LT", # Lithuanian
- 0x046e: "lb_LU", # Luxembourgish
- 0x042f: "mk_MK", # FYRO Macedonian
- 0x043e: "ms_MY", # Malay - Malaysia
- 0x083e: "ms_BN", # Malay - Brunei
- 0x044c: "ml_IN", # Malayalam - India
- 0x043a: "mt_MT", # Maltese
- 0x0481: "mi_NZ", # Maori
- 0x047a: "arn_CL",# Mapudungun
- 0x044e: "mr_IN", # Marathi
- 0x047c: "moh_CA",# Mohawk - Canada
- 0x0450: "mn_MN", # Mongolian
- 0x0461: "ne_NP", # Nepali
- 0x0414: "nb_NO", # Norwegian - Bokmal
- 0x0814: "nn_NO", # Norwegian - Nynorsk
- 0x0482: "oc_FR", # Occitan - France
- 0x0448: "or_IN", # Oriya - India
- 0x0463: "ps_AF", # Pashto - Afghanistan
- 0x0429: "fa_IR", # Persian
- 0x0415: "pl_PL", # Polish
- 0x0416: "pt_BR", # Portuguese - Brazil
- 0x0816: "pt_PT", # Portuguese - Portugal
- 0x0446: "pa_IN", # Punjabi
- 0x046b: "quz_BO",# Quechua (Bolivia)
- 0x086b: "quz_EC",# Quechua (Ecuador)
- 0x0c6b: "quz_PE",# Quechua (Peru)
- 0x0418: "ro_RO", # Romanian - Romania
- 0x0417: "rm_CH", # Raeto-Romanese
- 0x0419: "ru_RU", # Russian
- 0x243b: "smn_FI",# Sami Finland
- 0x103b: "smj_NO",# Sami Norway
- 0x143b: "smj_SE",# Sami Sweden
- 0x043b: "se_NO", # Sami Northern Norway
- 0x083b: "se_SE", # Sami Northern Sweden
- 0x0c3b: "se_FI", # Sami Northern Finland
- 0x203b: "sms_FI",# Sami Skolt
- 0x183b: "sma_NO",# Sami Southern Norway
- 0x1c3b: "sma_SE",# Sami Southern Sweden
- 0x044f: "sa_IN", # Sanskrit
- 0x0c1a: "sr_SP", # Serbian - Cyrillic
- 0x1c1a: "sr_BA", # Serbian - Bosnia Cyrillic
- 0x081a: "sr_SP", # Serbian - Latin
- 0x181a: "sr_BA", # Serbian - Bosnia Latin
- 0x046c: "ns_ZA", # Northern Sotho
- 0x0432: "tn_ZA", # Setswana - Southern Africa
- 0x041b: "sk_SK", # Slovak
- 0x0424: "sl_SI", # Slovenian
- 0x040a: "es_ES", # Spanish - Spain
- 0x080a: "es_MX", # Spanish - Mexico
- 0x0c0a: "es_ES", # Spanish - Spain (Modern)
- 0x100a: "es_GT", # Spanish - Guatemala
- 0x140a: "es_CR", # Spanish - Costa Rica
- 0x180a: "es_PA", # Spanish - Panama
- 0x1c0a: "es_DO", # Spanish - Dominican Republic
- 0x200a: "es_VE", # Spanish - Venezuela
- 0x240a: "es_CO", # Spanish - Colombia
- 0x280a: "es_PE", # Spanish - Peru
- 0x2c0a: "es_AR", # Spanish - Argentina
- 0x300a: "es_EC", # Spanish - Ecuador
- 0x340a: "es_CL", # Spanish - Chile
- 0x380a: "es_UR", # Spanish - Uruguay
- 0x3c0a: "es_PY", # Spanish - Paraguay
- 0x400a: "es_BO", # Spanish - Bolivia
- 0x440a: "es_SV", # Spanish - El Salvador
- 0x480a: "es_HN", # Spanish - Honduras
- 0x4c0a: "es_NI", # Spanish - Nicaragua
- 0x500a: "es_PR", # Spanish - Puerto Rico
- 0x0441: "sw_KE", # Swahili
- 0x041d: "sv_SE", # Swedish - Sweden
- 0x081d: "sv_FI", # Swedish - Finland
- 0x045a: "syr_SY",# Syriac
- 0x0449: "ta_IN", # Tamil
- 0x0444: "tt_RU", # Tatar
- 0x044a: "te_IN", # Telugu
- 0x041e: "th_TH", # Thai
- 0x041f: "tr_TR", # Turkish
- 0x0422: "uk_UA", # Ukrainian
- 0x0420: "ur_PK", # Urdu
- 0x0820: "ur_IN", # Urdu - India
- 0x0443: "uz_UZ", # Uzbek - Latin
- 0x0843: "uz_UZ", # Uzbek - Cyrillic
- 0x042a: "vi_VN", # Vietnamese
- 0x0452: "cy_GB", # Welsh
-}
-
-def _print_locale():
-
- """ Test function.
- """
- categories = {}
- def _init_categories(categories=categories):
- for k,v in globals().items():
- if k[:3] == 'LC_':
- categories[k] = v
- _init_categories()
- del categories['LC_ALL']
-
- print 'Locale defaults as determined by getdefaultlocale():'
- print '-'*72
- lang, enc = getdefaultlocale()
- print 'Language: ', lang or '(undefined)'
- print 'Encoding: ', enc or '(undefined)'
- print
-
- print 'Locale settings on startup:'
- print '-'*72
- for name,category in categories.items():
- print name, '...'
- lang, enc = getlocale(category)
- print ' Language: ', lang or '(undefined)'
- print ' Encoding: ', enc or '(undefined)'
- print
-
- print
- print 'Locale settings after calling resetlocale():'
- print '-'*72
- resetlocale()
- for name,category in categories.items():
- print name, '...'
- lang, enc = getlocale(category)
- print ' Language: ', lang or '(undefined)'
- print ' Encoding: ', enc or '(undefined)'
- print
-
- try:
- setlocale(LC_ALL, "")
- except:
- print 'NOTE:'
- print 'setlocale(LC_ALL, "") does not support the default locale'
- print 'given in the OS environment variables.'
- else:
- print
- print 'Locale settings after calling setlocale(LC_ALL, ""):'
- print '-'*72
- for name,category in categories.items():
- print name, '...'
- lang, enc = getlocale(category)
- print ' Language: ', lang or '(undefined)'
- print ' Encoding: ', enc or '(undefined)'
- print
-
-###
-
-try:
- LC_MESSAGES
-except NameError:
- pass
-else:
- __all__.append("LC_MESSAGES")
-
-if __name__=='__main__':
- print 'Locale aliasing:'
- print
- _print_locale()
- print
- print 'Number formatting:'
- print
- _test()
diff --git a/sys/lib/python/logging/__init__.py b/sys/lib/python/logging/__init__.py
deleted file mode 100644
index b1887dfe0..000000000
--- a/sys/lib/python/logging/__init__.py
+++ /dev/null
@@ -1,1372 +0,0 @@
-# Copyright 2001-2007 by Vinay Sajip. All Rights Reserved.
-#
-# Permission to use, copy, modify, and distribute this software and its
-# documentation for any purpose and without fee is hereby granted,
-# provided that the above copyright notice appear in all copies and that
-# both that copyright notice and this permission notice appear in
-# supporting documentation, and that the name of Vinay Sajip
-# not be used in advertising or publicity pertaining to distribution
-# of the software without specific, written prior permission.
-# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
-# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
-# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
-# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
-# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
-# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-"""
-Logging package for Python. Based on PEP 282 and comments thereto in
-comp.lang.python, and influenced by Apache's log4j system.
-
-Should work under Python versions >= 1.5.2, except that source line
-information is not available unless 'sys._getframe()' is.
-
-Copyright (C) 2001-2007 Vinay Sajip. All Rights Reserved.
-
-To use, simply 'import logging' and log away!
-"""
-
-import sys, os, types, time, string, cStringIO, traceback
-
-try:
- import codecs
-except ImportError:
- codecs = None
-
-try:
- import thread
- import threading
-except ImportError:
- thread = None
-
-__author__ = "Vinay Sajip <vinay_sajip@red-dove.com>"
-__status__ = "production"
-__version__ = "0.5.0.2"
-__date__ = "16 February 2007"
-
-#---------------------------------------------------------------------------
-# Miscellaneous module data
-#---------------------------------------------------------------------------
-
-#
-# _srcfile is used when walking the stack to check when we've got the first
-# caller stack frame.
-#
-if hasattr(sys, 'frozen'): #support for py2exe
- _srcfile = "logging%s__init__%s" % (os.sep, __file__[-4:])
-elif string.lower(__file__[-4:]) in ['.pyc', '.pyo']:
- _srcfile = __file__[:-4] + '.py'
-else:
- _srcfile = __file__
-_srcfile = os.path.normcase(_srcfile)
-
-# next bit filched from 1.5.2's inspect.py
-def currentframe():
- """Return the frame object for the caller's stack frame."""
- try:
- raise Exception
- except:
- return sys.exc_traceback.tb_frame.f_back
-
-if hasattr(sys, '_getframe'): currentframe = lambda: sys._getframe(3)
-# done filching
-
-# _srcfile is only used in conjunction with sys._getframe().
-# To provide compatibility with older versions of Python, set _srcfile
-# to None if _getframe() is not available; this value will prevent
-# findCaller() from being called.
-#if not hasattr(sys, "_getframe"):
-# _srcfile = None
-
-#
-#_startTime is used as the base when calculating the relative time of events
-#
-_startTime = time.time()
-
-#
-#raiseExceptions is used to see if exceptions during handling should be
-#propagated
-#
-raiseExceptions = 1
-
-#
-# If you don't want threading information in the log, set this to zero
-#
-logThreads = 1
-
-#
-# If you don't want process information in the log, set this to zero
-#
-logProcesses = 1
-
-#---------------------------------------------------------------------------
-# Level related stuff
-#---------------------------------------------------------------------------
-#
-# Default levels and level names, these can be replaced with any positive set
-# of values having corresponding names. There is a pseudo-level, NOTSET, which
-# is only really there as a lower limit for user-defined levels. Handlers and
-# loggers are initialized with NOTSET so that they will log all messages, even
-# at user-defined levels.
-#
-
-CRITICAL = 50
-FATAL = CRITICAL
-ERROR = 40
-WARNING = 30
-WARN = WARNING
-INFO = 20
-DEBUG = 10
-NOTSET = 0
-
-_levelNames = {
- CRITICAL : 'CRITICAL',
- ERROR : 'ERROR',
- WARNING : 'WARNING',
- INFO : 'INFO',
- DEBUG : 'DEBUG',
- NOTSET : 'NOTSET',
- 'CRITICAL' : CRITICAL,
- 'ERROR' : ERROR,
- 'WARN' : WARNING,
- 'WARNING' : WARNING,
- 'INFO' : INFO,
- 'DEBUG' : DEBUG,
- 'NOTSET' : NOTSET,
-}
-
-def getLevelName(level):
- """
- Return the textual representation of logging level 'level'.
-
- If the level is one of the predefined levels (CRITICAL, ERROR, WARNING,
- INFO, DEBUG) then you get the corresponding string. If you have
- associated levels with names using addLevelName then the name you have
- associated with 'level' is returned.
-
- If a numeric value corresponding to one of the defined levels is passed
- in, the corresponding string representation is returned.
-
- Otherwise, the string "Level %s" % level is returned.
- """
- return _levelNames.get(level, ("Level %s" % level))
-
-def addLevelName(level, levelName):
- """
- Associate 'levelName' with 'level'.
-
- This is used when converting levels to text during message formatting.
- """
- _acquireLock()
- try: #unlikely to cause an exception, but you never know...
- _levelNames[level] = levelName
- _levelNames[levelName] = level
- finally:
- _releaseLock()
-
-#---------------------------------------------------------------------------
-# Thread-related stuff
-#---------------------------------------------------------------------------
-
-#
-#_lock is used to serialize access to shared data structures in this module.
-#This needs to be an RLock because fileConfig() creates Handlers and so
-#might arbitrary user threads. Since Handler.__init__() updates the shared
-#dictionary _handlers, it needs to acquire the lock. But if configuring,
-#the lock would already have been acquired - so we need an RLock.
-#The same argument applies to Loggers and Manager.loggerDict.
-#
-_lock = None
-
-def _acquireLock():
- """
- Acquire the module-level lock for serializing access to shared data.
-
- This should be released with _releaseLock().
- """
- global _lock
- if (not _lock) and thread:
- _lock = threading.RLock()
- if _lock:
- _lock.acquire()
-
-def _releaseLock():
- """
- Release the module-level lock acquired by calling _acquireLock().
- """
- if _lock:
- _lock.release()
-
-#---------------------------------------------------------------------------
-# The logging record
-#---------------------------------------------------------------------------
-
-class LogRecord:
- """
- A LogRecord instance represents an event being logged.
-
- LogRecord instances are created every time something is logged. They
- contain all the information pertinent to the event being logged. The
- main information passed in is in msg and args, which are combined
- using str(msg) % args to create the message field of the record. The
- record also includes information such as when the record was created,
- the source line where the logging call was made, and any exception
- information to be logged.
- """
- def __init__(self, name, level, pathname, lineno,
- msg, args, exc_info, func=None):
- """
- Initialize a logging record with interesting information.
- """
- ct = time.time()
- self.name = name
- self.msg = msg
- #
- # The following statement allows passing of a dictionary as a sole
- # argument, so that you can do something like
- # logging.debug("a %(a)d b %(b)s", {'a':1, 'b':2})
- # Suggested by Stefan Behnel.
- # Note that without the test for args[0], we get a problem because
- # during formatting, we test to see if the arg is present using
- # 'if self.args:'. If the event being logged is e.g. 'Value is %d'
- # and if the passed arg fails 'if self.args:' then no formatting
- # is done. For example, logger.warn('Value is %d', 0) would log
- # 'Value is %d' instead of 'Value is 0'.
- # For the use case of passing a dictionary, this should not be a
- # problem.
- if args and (len(args) == 1) and args[0] and (type(args[0]) == types.DictType):
- args = args[0]
- self.args = args
- self.levelname = getLevelName(level)
- self.levelno = level
- self.pathname = pathname
- try:
- self.filename = os.path.basename(pathname)
- self.module = os.path.splitext(self.filename)[0]
- except:
- self.filename = pathname
- self.module = "Unknown module"
- self.exc_info = exc_info
- self.exc_text = None # used to cache the traceback text
- self.lineno = lineno
- self.funcName = func
- self.created = ct
- self.msecs = (ct - long(ct)) * 1000
- self.relativeCreated = (self.created - _startTime) * 1000
- if logThreads and thread:
- self.thread = thread.get_ident()
- self.threadName = threading.currentThread().getName()
- else:
- self.thread = None
- self.threadName = None
- if logProcesses and hasattr(os, 'getpid'):
- self.process = os.getpid()
- else:
- self.process = None
-
- def __str__(self):
- return '<LogRecord: %s, %s, %s, %s, "%s">'%(self.name, self.levelno,
- self.pathname, self.lineno, self.msg)
-
- def getMessage(self):
- """
- Return the message for this LogRecord.
-
- Return the message for this LogRecord after merging any user-supplied
- arguments with the message.
- """
- if not hasattr(types, "UnicodeType"): #if no unicode support...
- msg = str(self.msg)
- else:
- msg = self.msg
- if type(msg) not in (types.UnicodeType, types.StringType):
- try:
- msg = str(self.msg)
- except UnicodeError:
- msg = self.msg #Defer encoding till later
- if self.args:
- msg = msg % self.args
- return msg
-
-def makeLogRecord(dict):
- """
- Make a LogRecord whose attributes are defined by the specified dictionary,
- This function is useful for converting a logging event received over
- a socket connection (which is sent as a dictionary) into a LogRecord
- instance.
- """
- rv = LogRecord(None, None, "", 0, "", (), None, None)
- rv.__dict__.update(dict)
- return rv
-
-#---------------------------------------------------------------------------
-# Formatter classes and functions
-#---------------------------------------------------------------------------
-
-class Formatter:
- """
- Formatter instances are used to convert a LogRecord to text.
-
- Formatters need to know how a LogRecord is constructed. They are
- responsible for converting a LogRecord to (usually) a string which can
- be interpreted by either a human or an external system. The base Formatter
- allows a formatting string to be specified. If none is supplied, the
- default value of "%s(message)\\n" is used.
-
- The Formatter can be initialized with a format string which makes use of
- knowledge of the LogRecord attributes - e.g. the default value mentioned
- above makes use of the fact that the user's message and arguments are pre-
- formatted into a LogRecord's message attribute. Currently, the useful
- attributes in a LogRecord are described by:
-
- %(name)s Name of the logger (logging channel)
- %(levelno)s Numeric logging level for the message (DEBUG, INFO,
- WARNING, ERROR, CRITICAL)
- %(levelname)s Text logging level for the message ("DEBUG", "INFO",
- "WARNING", "ERROR", "CRITICAL")
- %(pathname)s Full pathname of the source file where the logging
- call was issued (if available)
- %(filename)s Filename portion of pathname
- %(module)s Module (name portion of filename)
- %(lineno)d Source line number where the logging call was issued
- (if available)
- %(funcName)s Function name
- %(created)f Time when the LogRecord was created (time.time()
- return value)
- %(asctime)s Textual time when the LogRecord was created
- %(msecs)d Millisecond portion of the creation time
- %(relativeCreated)d Time in milliseconds when the LogRecord was created,
- relative to the time the logging module was loaded
- (typically at application startup time)
- %(thread)d Thread ID (if available)
- %(threadName)s Thread name (if available)
- %(process)d Process ID (if available)
- %(message)s The result of record.getMessage(), computed just as
- the record is emitted
- """
-
- converter = time.localtime
-
- def __init__(self, fmt=None, datefmt=None):
- """
- Initialize the formatter with specified format strings.
-
- Initialize the formatter either with the specified format string, or a
- default as described above. Allow for specialized date formatting with
- the optional datefmt argument (if omitted, you get the ISO8601 format).
- """
- if fmt:
- self._fmt = fmt
- else:
- self._fmt = "%(message)s"
- self.datefmt = datefmt
-
- def formatTime(self, record, datefmt=None):
- """
- Return the creation time of the specified LogRecord as formatted text.
-
- This method should be called from format() by a formatter which
- wants to make use of a formatted time. This method can be overridden
- in formatters to provide for any specific requirement, but the
- basic behaviour is as follows: if datefmt (a string) is specified,
- it is used with time.strftime() to format the creation time of the
- record. Otherwise, the ISO8601 format is used. The resulting
- string is returned. This function uses a user-configurable function
- to convert the creation time to a tuple. By default, time.localtime()
- is used; to change this for a particular formatter instance, set the
- 'converter' attribute to a function with the same signature as
- time.localtime() or time.gmtime(). To change it for all formatters,
- for example if you want all logging times to be shown in GMT,
- set the 'converter' attribute in the Formatter class.
- """
- ct = self.converter(record.created)
- if datefmt:
- s = time.strftime(datefmt, ct)
- else:
- t = time.strftime("%Y-%m-%d %H:%M:%S", ct)
- s = "%s,%03d" % (t, record.msecs)
- return s
-
- def formatException(self, ei):
- """
- Format and return the specified exception information as a string.
-
- This default implementation just uses
- traceback.print_exception()
- """
- sio = cStringIO.StringIO()
- traceback.print_exception(ei[0], ei[1], ei[2], None, sio)
- s = sio.getvalue()
- sio.close()
- if s[-1] == "\n":
- s = s[:-1]
- return s
-
- def format(self, record):
- """
- Format the specified record as text.
-
- The record's attribute dictionary is used as the operand to a
- string formatting operation which yields the returned string.
- Before formatting the dictionary, a couple of preparatory steps
- are carried out. The message attribute of the record is computed
- using LogRecord.getMessage(). If the formatting string contains
- "%(asctime)", formatTime() is called to format the event time.
- If there is exception information, it is formatted using
- formatException() and appended to the message.
- """
- record.message = record.getMessage()
- if string.find(self._fmt,"%(asctime)") >= 0:
- record.asctime = self.formatTime(record, self.datefmt)
- s = self._fmt % record.__dict__
- if record.exc_info:
- # Cache the traceback text to avoid converting it multiple times
- # (it's constant anyway)
- if not record.exc_text:
- record.exc_text = self.formatException(record.exc_info)
- if record.exc_text:
- if s[-1] != "\n":
- s = s + "\n"
- s = s + record.exc_text
- return s
-
-#
-# The default formatter to use when no other is specified
-#
-_defaultFormatter = Formatter()
-
-class BufferingFormatter:
- """
- A formatter suitable for formatting a number of records.
- """
- def __init__(self, linefmt=None):
- """
- Optionally specify a formatter which will be used to format each
- individual record.
- """
- if linefmt:
- self.linefmt = linefmt
- else:
- self.linefmt = _defaultFormatter
-
- def formatHeader(self, records):
- """
- Return the header string for the specified records.
- """
- return ""
-
- def formatFooter(self, records):
- """
- Return the footer string for the specified records.
- """
- return ""
-
- def format(self, records):
- """
- Format the specified records and return the result as a string.
- """
- rv = ""
- if len(records) > 0:
- rv = rv + self.formatHeader(records)
- for record in records:
- rv = rv + self.linefmt.format(record)
- rv = rv + self.formatFooter(records)
- return rv
-
-#---------------------------------------------------------------------------
-# Filter classes and functions
-#---------------------------------------------------------------------------
-
-class Filter:
- """
- Filter instances are used to perform arbitrary filtering of LogRecords.
-
- Loggers and Handlers can optionally use Filter instances to filter
- records as desired. The base filter class only allows events which are
- below a certain point in the logger hierarchy. For example, a filter
- initialized with "A.B" will allow events logged by loggers "A.B",
- "A.B.C", "A.B.C.D", "A.B.D" etc. but not "A.BB", "B.A.B" etc. If
- initialized with the empty string, all events are passed.
- """
- def __init__(self, name=''):
- """
- Initialize a filter.
-
- Initialize with the name of the logger which, together with its
- children, will have its events allowed through the filter. If no
- name is specified, allow every event.
- """
- self.name = name
- self.nlen = len(name)
-
- def filter(self, record):
- """
- Determine if the specified record is to be logged.
-
- Is the specified record to be logged? Returns 0 for no, nonzero for
- yes. If deemed appropriate, the record may be modified in-place.
- """
- if self.nlen == 0:
- return 1
- elif self.name == record.name:
- return 1
- elif string.find(record.name, self.name, 0, self.nlen) != 0:
- return 0
- return (record.name[self.nlen] == ".")
-
-class Filterer:
- """
- A base class for loggers and handlers which allows them to share
- common code.
- """
- def __init__(self):
- """
- Initialize the list of filters to be an empty list.
- """
- self.filters = []
-
- def addFilter(self, filter):
- """
- Add the specified filter to this handler.
- """
- if not (filter in self.filters):
- self.filters.append(filter)
-
- def removeFilter(self, filter):
- """
- Remove the specified filter from this handler.
- """
- if filter in self.filters:
- self.filters.remove(filter)
-
- def filter(self, record):
- """
- Determine if a record is loggable by consulting all the filters.
-
- The default is to allow the record to be logged; any filter can veto
- this and the record is then dropped. Returns a zero value if a record
- is to be dropped, else non-zero.
- """
- rv = 1
- for f in self.filters:
- if not f.filter(record):
- rv = 0
- break
- return rv
-
-#---------------------------------------------------------------------------
-# Handler classes and functions
-#---------------------------------------------------------------------------
-
-_handlers = {} #repository of handlers (for flushing when shutdown called)
-_handlerList = [] # added to allow handlers to be removed in reverse of order initialized
-
-class Handler(Filterer):
- """
- Handler instances dispatch logging events to specific destinations.
-
- The base handler class. Acts as a placeholder which defines the Handler
- interface. Handlers can optionally use Formatter instances to format
- records as desired. By default, no formatter is specified; in this case,
- the 'raw' message as determined by record.message is logged.
- """
- def __init__(self, level=NOTSET):
- """
- Initializes the instance - basically setting the formatter to None
- and the filter list to empty.
- """
- Filterer.__init__(self)
- self.level = level
- self.formatter = None
- #get the module data lock, as we're updating a shared structure.
- _acquireLock()
- try: #unlikely to raise an exception, but you never know...
- _handlers[self] = 1
- _handlerList.insert(0, self)
- finally:
- _releaseLock()
- self.createLock()
-
- def createLock(self):
- """
- Acquire a thread lock for serializing access to the underlying I/O.
- """
- if thread:
- self.lock = threading.RLock()
- else:
- self.lock = None
-
- def acquire(self):
- """
- Acquire the I/O thread lock.
- """
- if self.lock:
- self.lock.acquire()
-
- def release(self):
- """
- Release the I/O thread lock.
- """
- if self.lock:
- self.lock.release()
-
- def setLevel(self, level):
- """
- Set the logging level of this handler.
- """
- self.level = level
-
- def format(self, record):
- """
- Format the specified record.
-
- If a formatter is set, use it. Otherwise, use the default formatter
- for the module.
- """
- if self.formatter:
- fmt = self.formatter
- else:
- fmt = _defaultFormatter
- return fmt.format(record)
-
- def emit(self, record):
- """
- Do whatever it takes to actually log the specified logging record.
-
- This version is intended to be implemented by subclasses and so
- raises a NotImplementedError.
- """
- raise NotImplementedError, 'emit must be implemented '\
- 'by Handler subclasses'
-
- def handle(self, record):
- """
- Conditionally emit the specified logging record.
-
- Emission depends on filters which may have been added to the handler.
- Wrap the actual emission of the record with acquisition/release of
- the I/O thread lock. Returns whether the filter passed the record for
- emission.
- """
- rv = self.filter(record)
- if rv:
- self.acquire()
- try:
- self.emit(record)
- finally:
- self.release()
- return rv
-
- def setFormatter(self, fmt):
- """
- Set the formatter for this handler.
- """
- self.formatter = fmt
-
- def flush(self):
- """
- Ensure all logging output has been flushed.
-
- This version does nothing and is intended to be implemented by
- subclasses.
- """
- pass
-
- def close(self):
- """
- Tidy up any resources used by the handler.
-
- This version does removes the handler from an internal list
- of handlers which is closed when shutdown() is called. Subclasses
- should ensure that this gets called from overridden close()
- methods.
- """
- #get the module data lock, as we're updating a shared structure.
- _acquireLock()
- try: #unlikely to raise an exception, but you never know...
- del _handlers[self]
- _handlerList.remove(self)
- finally:
- _releaseLock()
-
- def handleError(self, record):
- """
- Handle errors which occur during an emit() call.
-
- This method should be called from handlers when an exception is
- encountered during an emit() call. If raiseExceptions is false,
- exceptions get silently ignored. This is what is mostly wanted
- for a logging system - most users will not care about errors in
- the logging system, they are more interested in application errors.
- You could, however, replace this with a custom handler if you wish.
- The record which was being processed is passed in to this method.
- """
- if raiseExceptions:
- ei = sys.exc_info()
- traceback.print_exception(ei[0], ei[1], ei[2], None, sys.stderr)
- del ei
-
-class StreamHandler(Handler):
- """
- A handler class which writes logging records, appropriately formatted,
- to a stream. Note that this class does not close the stream, as
- sys.stdout or sys.stderr may be used.
- """
- def __init__(self, strm=None):
- """
- Initialize the handler.
-
- If strm is not specified, sys.stderr is used.
- """
- Handler.__init__(self)
- if strm is None:
- strm = sys.stderr
- self.stream = strm
- self.formatter = None
-
- def flush(self):
- """
- Flushes the stream.
- """
- self.stream.flush()
-
- def emit(self, record):
- """
- Emit a record.
-
- If a formatter is specified, it is used to format the record.
- The record is then written to the stream with a trailing newline
- [N.B. this may be removed depending on feedback]. If exception
- information is present, it is formatted using
- traceback.print_exception and appended to the stream.
- """
- try:
- msg = self.format(record)
- fs = "%s\n"
- if not hasattr(types, "UnicodeType"): #if no unicode support...
- self.stream.write(fs % msg)
- else:
- try:
- self.stream.write(fs % msg)
- except UnicodeError:
- self.stream.write(fs % msg.encode("UTF-8"))
- self.flush()
- except (KeyboardInterrupt, SystemExit):
- raise
- except:
- self.handleError(record)
-
-class FileHandler(StreamHandler):
- """
- A handler class which writes formatted logging records to disk files.
- """
- def __init__(self, filename, mode='a', encoding=None):
- """
- Open the specified file and use it as the stream for logging.
- """
- if codecs is None:
- encoding = None
- if encoding is None:
- stream = open(filename, mode)
- else:
- stream = codecs.open(filename, mode, encoding)
- StreamHandler.__init__(self, stream)
- #keep the absolute path, otherwise derived classes which use this
- #may come a cropper when the current directory changes
- self.baseFilename = os.path.abspath(filename)
- self.mode = mode
-
- def close(self):
- """
- Closes the stream.
- """
- self.flush()
- self.stream.close()
- StreamHandler.close(self)
-
-#---------------------------------------------------------------------------
-# Manager classes and functions
-#---------------------------------------------------------------------------
-
-class PlaceHolder:
- """
- PlaceHolder instances are used in the Manager logger hierarchy to take
- the place of nodes for which no loggers have been defined. This class is
- intended for internal use only and not as part of the public API.
- """
- def __init__(self, alogger):
- """
- Initialize with the specified logger being a child of this placeholder.
- """
- #self.loggers = [alogger]
- self.loggerMap = { alogger : None }
-
- def append(self, alogger):
- """
- Add the specified logger as a child of this placeholder.
- """
- #if alogger not in self.loggers:
- if not self.loggerMap.has_key(alogger):
- #self.loggers.append(alogger)
- self.loggerMap[alogger] = None
-
-#
-# Determine which class to use when instantiating loggers.
-#
-_loggerClass = None
-
-def setLoggerClass(klass):
- """
- Set the class to be used when instantiating a logger. The class should
- define __init__() such that only a name argument is required, and the
- __init__() should call Logger.__init__()
- """
- if klass != Logger:
- if not issubclass(klass, Logger):
- raise TypeError, "logger not derived from logging.Logger: " + \
- klass.__name__
- global _loggerClass
- _loggerClass = klass
-
-def getLoggerClass():
- """
- Return the class to be used when instantiating a logger.
- """
-
- return _loggerClass
-
-class Manager:
- """
- There is [under normal circumstances] just one Manager instance, which
- holds the hierarchy of loggers.
- """
- def __init__(self, rootnode):
- """
- Initialize the manager with the root node of the logger hierarchy.
- """
- self.root = rootnode
- self.disable = 0
- self.emittedNoHandlerWarning = 0
- self.loggerDict = {}
-
- def getLogger(self, name):
- """
- Get a logger with the specified name (channel name), creating it
- if it doesn't yet exist. This name is a dot-separated hierarchical
- name, such as "a", "a.b", "a.b.c" or similar.
-
- If a PlaceHolder existed for the specified name [i.e. the logger
- didn't exist but a child of it did], replace it with the created
- logger and fix up the parent/child references which pointed to the
- placeholder to now point to the logger.
- """
- rv = None
- _acquireLock()
- try:
- if self.loggerDict.has_key(name):
- rv = self.loggerDict[name]
- if isinstance(rv, PlaceHolder):
- ph = rv
- rv = _loggerClass(name)
- rv.manager = self
- self.loggerDict[name] = rv
- self._fixupChildren(ph, rv)
- self._fixupParents(rv)
- else:
- rv = _loggerClass(name)
- rv.manager = self
- self.loggerDict[name] = rv
- self._fixupParents(rv)
- finally:
- _releaseLock()
- return rv
-
- def _fixupParents(self, alogger):
- """
- Ensure that there are either loggers or placeholders all the way
- from the specified logger to the root of the logger hierarchy.
- """
- name = alogger.name
- i = string.rfind(name, ".")
- rv = None
- while (i > 0) and not rv:
- substr = name[:i]
- if not self.loggerDict.has_key(substr):
- self.loggerDict[substr] = PlaceHolder(alogger)
- else:
- obj = self.loggerDict[substr]
- if isinstance(obj, Logger):
- rv = obj
- else:
- assert isinstance(obj, PlaceHolder)
- obj.append(alogger)
- i = string.rfind(name, ".", 0, i - 1)
- if not rv:
- rv = self.root
- alogger.parent = rv
-
- def _fixupChildren(self, ph, alogger):
- """
- Ensure that children of the placeholder ph are connected to the
- specified logger.
- """
- name = alogger.name
- namelen = len(name)
- for c in ph.loggerMap.keys():
- #The if means ... if not c.parent.name.startswith(nm)
- #if string.find(c.parent.name, nm) <> 0:
- if c.parent.name[:namelen] != name:
- alogger.parent = c.parent
- c.parent = alogger
-
-#---------------------------------------------------------------------------
-# Logger classes and functions
-#---------------------------------------------------------------------------
-
-class Logger(Filterer):
- """
- Instances of the Logger class represent a single logging channel. A
- "logging channel" indicates an area of an application. Exactly how an
- "area" is defined is up to the application developer. Since an
- application can have any number of areas, logging channels are identified
- by a unique string. Application areas can be nested (e.g. an area
- of "input processing" might include sub-areas "read CSV files", "read
- XLS files" and "read Gnumeric files"). To cater for this natural nesting,
- channel names are organized into a namespace hierarchy where levels are
- separated by periods, much like the Java or Python package namespace. So
- in the instance given above, channel names might be "input" for the upper
- level, and "input.csv", "input.xls" and "input.gnu" for the sub-levels.
- There is no arbitrary limit to the depth of nesting.
- """
- def __init__(self, name, level=NOTSET):
- """
- Initialize the logger with a name and an optional level.
- """
- Filterer.__init__(self)
- self.name = name
- self.level = level
- self.parent = None
- self.propagate = 1
- self.handlers = []
- self.disabled = 0
-
- def setLevel(self, level):
- """
- Set the logging level of this logger.
- """
- self.level = level
-
- def debug(self, msg, *args, **kwargs):
- """
- Log 'msg % args' with severity 'DEBUG'.
-
- To pass exception information, use the keyword argument exc_info with
- a true value, e.g.
-
- logger.debug("Houston, we have a %s", "thorny problem", exc_info=1)
- """
- if self.manager.disable >= DEBUG:
- return
- if DEBUG >= self.getEffectiveLevel():
- apply(self._log, (DEBUG, msg, args), kwargs)
-
- def info(self, msg, *args, **kwargs):
- """
- Log 'msg % args' with severity 'INFO'.
-
- To pass exception information, use the keyword argument exc_info with
- a true value, e.g.
-
- logger.info("Houston, we have a %s", "interesting problem", exc_info=1)
- """
- if self.manager.disable >= INFO:
- return
- if INFO >= self.getEffectiveLevel():
- apply(self._log, (INFO, msg, args), kwargs)
-
- def warning(self, msg, *args, **kwargs):
- """
- Log 'msg % args' with severity 'WARNING'.
-
- To pass exception information, use the keyword argument exc_info with
- a true value, e.g.
-
- logger.warning("Houston, we have a %s", "bit of a problem", exc_info=1)
- """
- if self.manager.disable >= WARNING:
- return
- if self.isEnabledFor(WARNING):
- apply(self._log, (WARNING, msg, args), kwargs)
-
- warn = warning
-
- def error(self, msg, *args, **kwargs):
- """
- Log 'msg % args' with severity 'ERROR'.
-
- To pass exception information, use the keyword argument exc_info with
- a true value, e.g.
-
- logger.error("Houston, we have a %s", "major problem", exc_info=1)
- """
- if self.manager.disable >= ERROR:
- return
- if self.isEnabledFor(ERROR):
- apply(self._log, (ERROR, msg, args), kwargs)
-
- def exception(self, msg, *args):
- """
- Convenience method for logging an ERROR with exception information.
- """
- apply(self.error, (msg,) + args, {'exc_info': 1})
-
- def critical(self, msg, *args, **kwargs):
- """
- Log 'msg % args' with severity 'CRITICAL'.
-
- To pass exception information, use the keyword argument exc_info with
- a true value, e.g.
-
- logger.critical("Houston, we have a %s", "major disaster", exc_info=1)
- """
- if self.manager.disable >= CRITICAL:
- return
- if CRITICAL >= self.getEffectiveLevel():
- apply(self._log, (CRITICAL, msg, args), kwargs)
-
- fatal = critical
-
- def log(self, level, msg, *args, **kwargs):
- """
- Log 'msg % args' with the integer severity 'level'.
-
- To pass exception information, use the keyword argument exc_info with
- a true value, e.g.
-
- logger.log(level, "We have a %s", "mysterious problem", exc_info=1)
- """
- if type(level) != types.IntType:
- if raiseExceptions:
- raise TypeError, "level must be an integer"
- else:
- return
- if self.manager.disable >= level:
- return
- if self.isEnabledFor(level):
- apply(self._log, (level, msg, args), kwargs)
-
- def findCaller(self):
- """
- Find the stack frame of the caller so that we can note the source
- file name, line number and function name.
- """
- f = currentframe().f_back
- rv = "(unknown file)", 0, "(unknown function)"
- while hasattr(f, "f_code"):
- co = f.f_code
- filename = os.path.normcase(co.co_filename)
- if filename == _srcfile:
- f = f.f_back
- continue
- rv = (filename, f.f_lineno, co.co_name)
- break
- return rv
-
- def makeRecord(self, name, level, fn, lno, msg, args, exc_info, func=None, extra=None):
- """
- A factory method which can be overridden in subclasses to create
- specialized LogRecords.
- """
- rv = LogRecord(name, level, fn, lno, msg, args, exc_info, func)
- if extra:
- for key in extra:
- if (key in ["message", "asctime"]) or (key in rv.__dict__):
- raise KeyError("Attempt to overwrite %r in LogRecord" % key)
- rv.__dict__[key] = extra[key]
- return rv
-
- def _log(self, level, msg, args, exc_info=None, extra=None):
- """
- Low-level logging routine which creates a LogRecord and then calls
- all the handlers of this logger to handle the record.
- """
- if _srcfile:
- fn, lno, func = self.findCaller()
- else:
- fn, lno, func = "(unknown file)", 0, "(unknown function)"
- if exc_info:
- if type(exc_info) != types.TupleType:
- exc_info = sys.exc_info()
- record = self.makeRecord(self.name, level, fn, lno, msg, args, exc_info, func, extra)
- self.handle(record)
-
- def handle(self, record):
- """
- Call the handlers for the specified record.
-
- This method is used for unpickled records received from a socket, as
- well as those created locally. Logger-level filtering is applied.
- """
- if (not self.disabled) and self.filter(record):
- self.callHandlers(record)
-
- def addHandler(self, hdlr):
- """
- Add the specified handler to this logger.
- """
- if not (hdlr in self.handlers):
- self.handlers.append(hdlr)
-
- def removeHandler(self, hdlr):
- """
- Remove the specified handler from this logger.
- """
- if hdlr in self.handlers:
- #hdlr.close()
- hdlr.acquire()
- try:
- self.handlers.remove(hdlr)
- finally:
- hdlr.release()
-
- def callHandlers(self, record):
- """
- Pass a record to all relevant handlers.
-
- Loop through all handlers for this logger and its parents in the
- logger hierarchy. If no handler was found, output a one-off error
- message to sys.stderr. Stop searching up the hierarchy whenever a
- logger with the "propagate" attribute set to zero is found - that
- will be the last logger whose handlers are called.
- """
- c = self
- found = 0
- while c:
- for hdlr in c.handlers:
- found = found + 1
- if record.levelno >= hdlr.level:
- hdlr.handle(record)
- if not c.propagate:
- c = None #break out
- else:
- c = c.parent
- if (found == 0) and raiseExceptions and not self.manager.emittedNoHandlerWarning:
- sys.stderr.write("No handlers could be found for logger"
- " \"%s\"\n" % self.name)
- self.manager.emittedNoHandlerWarning = 1
-
- def getEffectiveLevel(self):
- """
- Get the effective level for this logger.
-
- Loop through this logger and its parents in the logger hierarchy,
- looking for a non-zero logging level. Return the first one found.
- """
- logger = self
- while logger:
- if logger.level:
- return logger.level
- logger = logger.parent
- return NOTSET
-
- def isEnabledFor(self, level):
- """
- Is this logger enabled for level 'level'?
- """
- if self.manager.disable >= level:
- return 0
- return level >= self.getEffectiveLevel()
-
-class RootLogger(Logger):
- """
- A root logger is not that different to any other logger, except that
- it must have a logging level and there is only one instance of it in
- the hierarchy.
- """
- def __init__(self, level):
- """
- Initialize the logger with the name "root".
- """
- Logger.__init__(self, "root", level)
-
-_loggerClass = Logger
-
-root = RootLogger(WARNING)
-Logger.root = root
-Logger.manager = Manager(Logger.root)
-
-#---------------------------------------------------------------------------
-# Configuration classes and functions
-#---------------------------------------------------------------------------
-
-BASIC_FORMAT = "%(levelname)s:%(name)s:%(message)s"
-
-def basicConfig(**kwargs):
- """
- Do basic configuration for the logging system.
-
- This function does nothing if the root logger already has handlers
- configured. It is a convenience method intended for use by simple scripts
- to do one-shot configuration of the logging package.
-
- The default behaviour is to create a StreamHandler which writes to
- sys.stderr, set a formatter using the BASIC_FORMAT format string, and
- add the handler to the root logger.
-
- A number of optional keyword arguments may be specified, which can alter
- the default behaviour.
-
- filename Specifies that a FileHandler be created, using the specified
- filename, rather than a StreamHandler.
- filemode Specifies the mode to open the file, if filename is specified
- (if filemode is unspecified, it defaults to 'a').
- format Use the specified format string for the handler.
- datefmt Use the specified date/time format.
- level Set the root logger level to the specified level.
- stream Use the specified stream to initialize the StreamHandler. Note
- that this argument is incompatible with 'filename' - if both
- are present, 'stream' is ignored.
-
- Note that you could specify a stream created using open(filename, mode)
- rather than passing the filename and mode in. However, it should be
- remembered that StreamHandler does not close its stream (since it may be
- using sys.stdout or sys.stderr), whereas FileHandler closes its stream
- when the handler is closed.
- """
- if len(root.handlers) == 0:
- filename = kwargs.get("filename")
- if filename:
- mode = kwargs.get("filemode", 'a')
- hdlr = FileHandler(filename, mode)
- else:
- stream = kwargs.get("stream")
- hdlr = StreamHandler(stream)
- fs = kwargs.get("format", BASIC_FORMAT)
- dfs = kwargs.get("datefmt", None)
- fmt = Formatter(fs, dfs)
- hdlr.setFormatter(fmt)
- root.addHandler(hdlr)
- level = kwargs.get("level")
- if level:
- root.setLevel(level)
-
-#---------------------------------------------------------------------------
-# Utility functions at module level.
-# Basically delegate everything to the root logger.
-#---------------------------------------------------------------------------
-
-def getLogger(name=None):
- """
- Return a logger with the specified name, creating it if necessary.
-
- If no name is specified, return the root logger.
- """
- if name:
- return Logger.manager.getLogger(name)
- else:
- return root
-
-#def getRootLogger():
-# """
-# Return the root logger.
-#
-# Note that getLogger('') now does the same thing, so this function is
-# deprecated and may disappear in the future.
-# """
-# return root
-
-def critical(msg, *args, **kwargs):
- """
- Log a message with severity 'CRITICAL' on the root logger.
- """
- if len(root.handlers) == 0:
- basicConfig()
- apply(root.critical, (msg,)+args, kwargs)
-
-fatal = critical
-
-def error(msg, *args, **kwargs):
- """
- Log a message with severity 'ERROR' on the root logger.
- """
- if len(root.handlers) == 0:
- basicConfig()
- apply(root.error, (msg,)+args, kwargs)
-
-def exception(msg, *args):
- """
- Log a message with severity 'ERROR' on the root logger,
- with exception information.
- """
- apply(error, (msg,)+args, {'exc_info': 1})
-
-def warning(msg, *args, **kwargs):
- """
- Log a message with severity 'WARNING' on the root logger.
- """
- if len(root.handlers) == 0:
- basicConfig()
- apply(root.warning, (msg,)+args, kwargs)
-
-warn = warning
-
-def info(msg, *args, **kwargs):
- """
- Log a message with severity 'INFO' on the root logger.
- """
- if len(root.handlers) == 0:
- basicConfig()
- apply(root.info, (msg,)+args, kwargs)
-
-def debug(msg, *args, **kwargs):
- """
- Log a message with severity 'DEBUG' on the root logger.
- """
- if len(root.handlers) == 0:
- basicConfig()
- apply(root.debug, (msg,)+args, kwargs)
-
-def log(level, msg, *args, **kwargs):
- """
- Log 'msg % args' with the integer severity 'level' on the root logger.
- """
- if len(root.handlers) == 0:
- basicConfig()
- apply(root.log, (level, msg)+args, kwargs)
-
-def disable(level):
- """
- Disable all logging calls less severe than 'level'.
- """
- root.manager.disable = level
-
-def shutdown(handlerList=_handlerList):
- """
- Perform any cleanup actions in the logging system (e.g. flushing
- buffers).
-
- Should be called at application exit.
- """
- for h in handlerList[:]:
- #errors might occur, for example, if files are locked
- #we just ignore them if raiseExceptions is not set
- try:
- h.flush()
- h.close()
- except:
- if raiseExceptions:
- raise
- #else, swallow
-
-#Let's try and shutdown automatically on application exit...
-try:
- import atexit
- atexit.register(shutdown)
-except ImportError: # for Python versions < 2.0
- def exithook(status, old_exit=sys.exit):
- try:
- shutdown()
- finally:
- old_exit(status)
-
- sys.exit = exithook
diff --git a/sys/lib/python/logging/config.py b/sys/lib/python/logging/config.py
deleted file mode 100644
index 11d2b7a79..000000000
--- a/sys/lib/python/logging/config.py
+++ /dev/null
@@ -1,348 +0,0 @@
-# Copyright 2001-2005 by Vinay Sajip. All Rights Reserved.
-#
-# Permission to use, copy, modify, and distribute this software and its
-# documentation for any purpose and without fee is hereby granted,
-# provided that the above copyright notice appear in all copies and that
-# both that copyright notice and this permission notice appear in
-# supporting documentation, and that the name of Vinay Sajip
-# not be used in advertising or publicity pertaining to distribution
-# of the software without specific, written prior permission.
-# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
-# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
-# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
-# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
-# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
-# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-"""
-Configuration functions for the logging package for Python. The core package
-is based on PEP 282 and comments thereto in comp.lang.python, and influenced
-by Apache's log4j system.
-
-Should work under Python versions >= 1.5.2, except that source line
-information is not available unless 'sys._getframe()' is.
-
-Copyright (C) 2001-2004 Vinay Sajip. All Rights Reserved.
-
-To use, simply 'import logging' and log away!
-"""
-
-import sys, logging, logging.handlers, string, socket, struct, os, traceback, types
-
-try:
- import thread
- import threading
-except ImportError:
- thread = None
-
-from SocketServer import ThreadingTCPServer, StreamRequestHandler
-
-
-DEFAULT_LOGGING_CONFIG_PORT = 9030
-
-if sys.platform == "win32":
- RESET_ERROR = 10054 #WSAECONNRESET
-else:
- RESET_ERROR = 104 #ECONNRESET
-
-#
-# The following code implements a socket listener for on-the-fly
-# reconfiguration of logging.
-#
-# _listener holds the server object doing the listening
-_listener = None
-
-def fileConfig(fname, defaults=None):
- """
- Read the logging configuration from a ConfigParser-format file.
-
- This can be called several times from an application, allowing an end user
- the ability to select from various pre-canned configurations (if the
- developer provides a mechanism to present the choices and load the chosen
- configuration).
- In versions of ConfigParser which have the readfp method [typically
- shipped in 2.x versions of Python], you can pass in a file-like object
- rather than a filename, in which case the file-like object will be read
- using readfp.
- """
- import ConfigParser
-
- cp = ConfigParser.ConfigParser(defaults)
- if hasattr(cp, 'readfp') and hasattr(fname, 'readline'):
- cp.readfp(fname)
- else:
- cp.read(fname)
-
- formatters = _create_formatters(cp)
-
- # critical section
- logging._acquireLock()
- try:
- logging._handlers.clear()
- del logging._handlerList[:]
- # Handlers add themselves to logging._handlers
- handlers = _install_handlers(cp, formatters)
- _install_loggers(cp, handlers)
- finally:
- logging._releaseLock()
-
-
-def _resolve(name):
- """Resolve a dotted name to a global object."""
- name = string.split(name, '.')
- used = name.pop(0)
- found = __import__(used)
- for n in name:
- used = used + '.' + n
- try:
- found = getattr(found, n)
- except AttributeError:
- __import__(used)
- found = getattr(found, n)
- return found
-
-
-def _create_formatters(cp):
- """Create and return formatters"""
- flist = cp.get("formatters", "keys")
- if not len(flist):
- return {}
- flist = string.split(flist, ",")
- formatters = {}
- for form in flist:
- sectname = "formatter_%s" % string.strip(form)
- opts = cp.options(sectname)
- if "format" in opts:
- fs = cp.get(sectname, "format", 1)
- else:
- fs = None
- if "datefmt" in opts:
- dfs = cp.get(sectname, "datefmt", 1)
- else:
- dfs = None
- c = logging.Formatter
- if "class" in opts:
- class_name = cp.get(sectname, "class")
- if class_name:
- c = _resolve(class_name)
- f = c(fs, dfs)
- formatters[form] = f
- return formatters
-
-
-def _install_handlers(cp, formatters):
- """Install and return handlers"""
- hlist = cp.get("handlers", "keys")
- if not len(hlist):
- return {}
- hlist = string.split(hlist, ",")
- handlers = {}
- fixups = [] #for inter-handler references
- for hand in hlist:
- sectname = "handler_%s" % string.strip(hand)
- klass = cp.get(sectname, "class")
- opts = cp.options(sectname)
- if "formatter" in opts:
- fmt = cp.get(sectname, "formatter")
- else:
- fmt = ""
- klass = eval(klass, vars(logging))
- args = cp.get(sectname, "args")
- args = eval(args, vars(logging))
- h = apply(klass, args)
- if "level" in opts:
- level = cp.get(sectname, "level")
- h.setLevel(logging._levelNames[level])
- if len(fmt):
- h.setFormatter(formatters[fmt])
- #temporary hack for FileHandler and MemoryHandler.
- if klass == logging.handlers.MemoryHandler:
- if "target" in opts:
- target = cp.get(sectname,"target")
- else:
- target = ""
- if len(target): #the target handler may not be loaded yet, so keep for later...
- fixups.append((h, target))
- handlers[hand] = h
- #now all handlers are loaded, fixup inter-handler references...
- for h, t in fixups:
- h.setTarget(handlers[t])
- return handlers
-
-
-def _install_loggers(cp, handlers):
- """Create and install loggers"""
-
- # configure the root first
- llist = cp.get("loggers", "keys")
- llist = string.split(llist, ",")
- llist = map(lambda x: string.strip(x), llist)
- llist.remove("root")
- sectname = "logger_root"
- root = logging.root
- log = root
- opts = cp.options(sectname)
- if "level" in opts:
- level = cp.get(sectname, "level")
- log.setLevel(logging._levelNames[level])
- for h in root.handlers[:]:
- root.removeHandler(h)
- hlist = cp.get(sectname, "handlers")
- if len(hlist):
- hlist = string.split(hlist, ",")
- for hand in hlist:
- log.addHandler(handlers[string.strip(hand)])
-
- #and now the others...
- #we don't want to lose the existing loggers,
- #since other threads may have pointers to them.
- #existing is set to contain all existing loggers,
- #and as we go through the new configuration we
- #remove any which are configured. At the end,
- #what's left in existing is the set of loggers
- #which were in the previous configuration but
- #which are not in the new configuration.
- existing = root.manager.loggerDict.keys()
- #now set up the new ones...
- for log in llist:
- sectname = "logger_%s" % log
- qn = cp.get(sectname, "qualname")
- opts = cp.options(sectname)
- if "propagate" in opts:
- propagate = cp.getint(sectname, "propagate")
- else:
- propagate = 1
- logger = logging.getLogger(qn)
- if qn in existing:
- existing.remove(qn)
- if "level" in opts:
- level = cp.get(sectname, "level")
- logger.setLevel(logging._levelNames[level])
- for h in logger.handlers[:]:
- logger.removeHandler(h)
- logger.propagate = propagate
- logger.disabled = 0
- hlist = cp.get(sectname, "handlers")
- if len(hlist):
- hlist = string.split(hlist, ",")
- for hand in hlist:
- logger.addHandler(handlers[string.strip(hand)])
-
- #Disable any old loggers. There's no point deleting
- #them as other threads may continue to hold references
- #and by disabling them, you stop them doing any logging.
- for log in existing:
- root.manager.loggerDict[log].disabled = 1
-
-
-def listen(port=DEFAULT_LOGGING_CONFIG_PORT):
- """
- Start up a socket server on the specified port, and listen for new
- configurations.
-
- These will be sent as a file suitable for processing by fileConfig().
- Returns a Thread object on which you can call start() to start the server,
- and which you can join() when appropriate. To stop the server, call
- stopListening().
- """
- if not thread:
- raise NotImplementedError, "listen() needs threading to work"
-
- class ConfigStreamHandler(StreamRequestHandler):
- """
- Handler for a logging configuration request.
-
- It expects a completely new logging configuration and uses fileConfig
- to install it.
- """
- def handle(self):
- """
- Handle a request.
-
- Each request is expected to be a 4-byte length, packed using
- struct.pack(">L", n), followed by the config file.
- Uses fileConfig() to do the grunt work.
- """
- import tempfile
- try:
- conn = self.connection
- chunk = conn.recv(4)
- if len(chunk) == 4:
- slen = struct.unpack(">L", chunk)[0]
- chunk = self.connection.recv(slen)
- while len(chunk) < slen:
- chunk = chunk + conn.recv(slen - len(chunk))
- #Apply new configuration. We'd like to be able to
- #create a StringIO and pass that in, but unfortunately
- #1.5.2 ConfigParser does not support reading file
- #objects, only actual files. So we create a temporary
- #file and remove it later.
- file = tempfile.mktemp(".ini")
- f = open(file, "w")
- f.write(chunk)
- f.close()
- try:
- fileConfig(file)
- except (KeyboardInterrupt, SystemExit):
- raise
- except:
- traceback.print_exc()
- os.remove(file)
- except socket.error, e:
- if type(e.args) != types.TupleType:
- raise
- else:
- errcode = e.args[0]
- if errcode != RESET_ERROR:
- raise
-
- class ConfigSocketReceiver(ThreadingTCPServer):
- """
- A simple TCP socket-based logging config receiver.
- """
-
- allow_reuse_address = 1
-
- def __init__(self, host='localhost', port=DEFAULT_LOGGING_CONFIG_PORT,
- handler=None):
- ThreadingTCPServer.__init__(self, (host, port), handler)
- logging._acquireLock()
- self.abort = 0
- logging._releaseLock()
- self.timeout = 1
-
- def serve_until_stopped(self):
- import select
- abort = 0
- while not abort:
- rd, wr, ex = select.select([self.socket.fileno()],
- [], [],
- self.timeout)
- if rd:
- self.handle_request()
- logging._acquireLock()
- abort = self.abort
- logging._releaseLock()
-
- def serve(rcvr, hdlr, port):
- server = rcvr(port=port, handler=hdlr)
- global _listener
- logging._acquireLock()
- _listener = server
- logging._releaseLock()
- server.serve_until_stopped()
-
- return threading.Thread(target=serve,
- args=(ConfigSocketReceiver,
- ConfigStreamHandler, port))
-
-def stopListening():
- """
- Stop the listening server which was created with a call to listen().
- """
- global _listener
- if _listener:
- logging._acquireLock()
- _listener.abort = 1
- _listener = None
- logging._releaseLock()
diff --git a/sys/lib/python/logging/handlers.py b/sys/lib/python/logging/handlers.py
deleted file mode 100644
index 4ef896ed5..000000000
--- a/sys/lib/python/logging/handlers.py
+++ /dev/null
@@ -1,1019 +0,0 @@
-# Copyright 2001-2005 by Vinay Sajip. All Rights Reserved.
-#
-# Permission to use, copy, modify, and distribute this software and its
-# documentation for any purpose and without fee is hereby granted,
-# provided that the above copyright notice appear in all copies and that
-# both that copyright notice and this permission notice appear in
-# supporting documentation, and that the name of Vinay Sajip
-# not be used in advertising or publicity pertaining to distribution
-# of the software without specific, written prior permission.
-# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
-# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
-# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
-# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
-# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
-# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-"""
-Additional handlers for the logging package for Python. The core package is
-based on PEP 282 and comments thereto in comp.lang.python, and influenced by
-Apache's log4j system.
-
-Should work under Python versions >= 1.5.2, except that source line
-information is not available unless 'sys._getframe()' is.
-
-Copyright (C) 2001-2004 Vinay Sajip. All Rights Reserved.
-
-To use, simply 'import logging' and log away!
-"""
-
-import sys, logging, socket, types, os, string, cPickle, struct, time, glob
-
-try:
- import codecs
-except ImportError:
- codecs = None
-
-#
-# Some constants...
-#
-
-DEFAULT_TCP_LOGGING_PORT = 9020
-DEFAULT_UDP_LOGGING_PORT = 9021
-DEFAULT_HTTP_LOGGING_PORT = 9022
-DEFAULT_SOAP_LOGGING_PORT = 9023
-SYSLOG_UDP_PORT = 514
-
-_MIDNIGHT = 24 * 60 * 60 # number of seconds in a day
-
-class BaseRotatingHandler(logging.FileHandler):
- """
- Base class for handlers that rotate log files at a certain point.
- Not meant to be instantiated directly. Instead, use RotatingFileHandler
- or TimedRotatingFileHandler.
- """
- def __init__(self, filename, mode, encoding=None):
- """
- Use the specified filename for streamed logging
- """
- if codecs is None:
- encoding = None
- logging.FileHandler.__init__(self, filename, mode, encoding)
- self.mode = mode
- self.encoding = encoding
-
- def emit(self, record):
- """
- Emit a record.
-
- Output the record to the file, catering for rollover as described
- in doRollover().
- """
- try:
- if self.shouldRollover(record):
- self.doRollover()
- logging.FileHandler.emit(self, record)
- except (KeyboardInterrupt, SystemExit):
- raise
- except:
- self.handleError(record)
-
-class RotatingFileHandler(BaseRotatingHandler):
- """
- Handler for logging to a set of files, which switches from one file
- to the next when the current file reaches a certain size.
- """
- def __init__(self, filename, mode='a', maxBytes=0, backupCount=0, encoding=None):
- """
- Open the specified file and use it as the stream for logging.
-
- By default, the file grows indefinitely. You can specify particular
- values of maxBytes and backupCount to allow the file to rollover at
- a predetermined size.
-
- Rollover occurs whenever the current log file is nearly maxBytes in
- length. If backupCount is >= 1, the system will successively create
- new files with the same pathname as the base file, but with extensions
- ".1", ".2" etc. appended to it. For example, with a backupCount of 5
- and a base file name of "app.log", you would get "app.log",
- "app.log.1", "app.log.2", ... through to "app.log.5". The file being
- written to is always "app.log" - when it gets filled up, it is closed
- and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc.
- exist, then they are renamed to "app.log.2", "app.log.3" etc.
- respectively.
-
- If maxBytes is zero, rollover never occurs.
- """
- if maxBytes > 0:
- mode = 'a' # doesn't make sense otherwise!
- BaseRotatingHandler.__init__(self, filename, mode, encoding)
- self.maxBytes = maxBytes
- self.backupCount = backupCount
-
- def doRollover(self):
- """
- Do a rollover, as described in __init__().
- """
-
- self.stream.close()
- if self.backupCount > 0:
- for i in range(self.backupCount - 1, 0, -1):
- sfn = "%s.%d" % (self.baseFilename, i)
- dfn = "%s.%d" % (self.baseFilename, i + 1)
- if os.path.exists(sfn):
- #print "%s -> %s" % (sfn, dfn)
- if os.path.exists(dfn):
- os.remove(dfn)
- os.rename(sfn, dfn)
- dfn = self.baseFilename + ".1"
- if os.path.exists(dfn):
- os.remove(dfn)
- os.rename(self.baseFilename, dfn)
- #print "%s -> %s" % (self.baseFilename, dfn)
- if self.encoding:
- self.stream = codecs.open(self.baseFilename, 'w', self.encoding)
- else:
- self.stream = open(self.baseFilename, 'w')
-
- def shouldRollover(self, record):
- """
- Determine if rollover should occur.
-
- Basically, see if the supplied record would cause the file to exceed
- the size limit we have.
- """
- if self.maxBytes > 0: # are we rolling over?
- msg = "%s\n" % self.format(record)
- self.stream.seek(0, 2) #due to non-posix-compliant Windows feature
- if self.stream.tell() + len(msg) >= self.maxBytes:
- return 1
- return 0
-
-class TimedRotatingFileHandler(BaseRotatingHandler):
- """
- Handler for logging to a file, rotating the log file at certain timed
- intervals.
-
- If backupCount is > 0, when rollover is done, no more than backupCount
- files are kept - the oldest ones are deleted.
- """
- def __init__(self, filename, when='h', interval=1, backupCount=0, encoding=None):
- BaseRotatingHandler.__init__(self, filename, 'a', encoding)
- self.when = string.upper(when)
- self.backupCount = backupCount
- # Calculate the real rollover interval, which is just the number of
- # seconds between rollovers. Also set the filename suffix used when
- # a rollover occurs. Current 'when' events supported:
- # S - Seconds
- # M - Minutes
- # H - Hours
- # D - Days
- # midnight - roll over at midnight
- # W{0-6} - roll over on a certain day; 0 - Monday
- #
- # Case of the 'when' specifier is not important; lower or upper case
- # will work.
- currentTime = int(time.time())
- if self.when == 'S':
- self.interval = 1 # one second
- self.suffix = "%Y-%m-%d_%H-%M-%S"
- elif self.when == 'M':
- self.interval = 60 # one minute
- self.suffix = "%Y-%m-%d_%H-%M"
- elif self.when == 'H':
- self.interval = 60 * 60 # one hour
- self.suffix = "%Y-%m-%d_%H"
- elif self.when == 'D' or self.when == 'MIDNIGHT':
- self.interval = 60 * 60 * 24 # one day
- self.suffix = "%Y-%m-%d"
- elif self.when.startswith('W'):
- self.interval = 60 * 60 * 24 * 7 # one week
- if len(self.when) != 2:
- raise ValueError("You must specify a day for weekly rollover from 0 to 6 (0 is Monday): %s" % self.when)
- if self.when[1] < '0' or self.when[1] > '6':
- raise ValueError("Invalid day specified for weekly rollover: %s" % self.when)
- self.dayOfWeek = int(self.when[1])
- self.suffix = "%Y-%m-%d"
- else:
- raise ValueError("Invalid rollover interval specified: %s" % self.when)
-
- self.interval = self.interval * interval # multiply by units requested
- self.rolloverAt = currentTime + self.interval
-
- # If we are rolling over at midnight or weekly, then the interval is already known.
- # What we need to figure out is WHEN the next interval is. In other words,
- # if you are rolling over at midnight, then your base interval is 1 day,
- # but you want to start that one day clock at midnight, not now. So, we
- # have to fudge the rolloverAt value in order to trigger the first rollover
- # at the right time. After that, the regular interval will take care of
- # the rest. Note that this code doesn't care about leap seconds. :)
- if self.when == 'MIDNIGHT' or self.when.startswith('W'):
- # This could be done with less code, but I wanted it to be clear
- t = time.localtime(currentTime)
- currentHour = t[3]
- currentMinute = t[4]
- currentSecond = t[5]
- # r is the number of seconds left between now and midnight
- r = _MIDNIGHT - ((currentHour * 60 + currentMinute) * 60 +
- currentSecond)
- self.rolloverAt = currentTime + r
- # If we are rolling over on a certain day, add in the number of days until
- # the next rollover, but offset by 1 since we just calculated the time
- # until the next day starts. There are three cases:
- # Case 1) The day to rollover is today; in this case, do nothing
- # Case 2) The day to rollover is further in the interval (i.e., today is
- # day 2 (Wednesday) and rollover is on day 6 (Sunday). Days to
- # next rollover is simply 6 - 2 - 1, or 3.
- # Case 3) The day to rollover is behind us in the interval (i.e., today
- # is day 5 (Saturday) and rollover is on day 3 (Thursday).
- # Days to rollover is 6 - 5 + 3, or 4. In this case, it's the
- # number of days left in the current week (1) plus the number
- # of days in the next week until the rollover day (3).
- if when.startswith('W'):
- day = t[6] # 0 is Monday
- if day > self.dayOfWeek:
- daysToWait = (day - self.dayOfWeek) - 1
- self.rolloverAt = self.rolloverAt + (daysToWait * (60 * 60 * 24))
- if day < self.dayOfWeek:
- daysToWait = (6 - self.dayOfWeek) + day
- self.rolloverAt = self.rolloverAt + (daysToWait * (60 * 60 * 24))
-
- #print "Will rollover at %d, %d seconds from now" % (self.rolloverAt, self.rolloverAt - currentTime)
-
- def shouldRollover(self, record):
- """
- Determine if rollover should occur
-
- record is not used, as we are just comparing times, but it is needed so
- the method siguratures are the same
- """
- t = int(time.time())
- if t >= self.rolloverAt:
- return 1
- #print "No need to rollover: %d, %d" % (t, self.rolloverAt)
- return 0
-
- def doRollover(self):
- """
- do a rollover; in this case, a date/time stamp is appended to the filename
- when the rollover happens. However, you want the file to be named for the
- start of the interval, not the current time. If there is a backup count,
- then we have to get a list of matching filenames, sort them and remove
- the one with the oldest suffix.
- """
- self.stream.close()
- # get the time that this sequence started at and make it a TimeTuple
- t = self.rolloverAt - self.interval
- timeTuple = time.localtime(t)
- dfn = self.baseFilename + "." + time.strftime(self.suffix, timeTuple)
- if os.path.exists(dfn):
- os.remove(dfn)
- os.rename(self.baseFilename, dfn)
- if self.backupCount > 0:
- # find the oldest log file and delete it
- s = glob.glob(self.baseFilename + ".20*")
- if len(s) > self.backupCount:
- s.sort()
- os.remove(s[0])
- #print "%s -> %s" % (self.baseFilename, dfn)
- if self.encoding:
- self.stream = codecs.open(self.baseFilename, 'w', self.encoding)
- else:
- self.stream = open(self.baseFilename, 'w')
- self.rolloverAt = self.rolloverAt + self.interval
-
-class SocketHandler(logging.Handler):
- """
- A handler class which writes logging records, in pickle format, to
- a streaming socket. The socket is kept open across logging calls.
- If the peer resets it, an attempt is made to reconnect on the next call.
- The pickle which is sent is that of the LogRecord's attribute dictionary
- (__dict__), so that the receiver does not need to have the logging module
- installed in order to process the logging event.
-
- To unpickle the record at the receiving end into a LogRecord, use the
- makeLogRecord function.
- """
-
- def __init__(self, host, port):
- """
- Initializes the handler with a specific host address and port.
-
- The attribute 'closeOnError' is set to 1 - which means that if
- a socket error occurs, the socket is silently closed and then
- reopened on the next logging call.
- """
- logging.Handler.__init__(self)
- self.host = host
- self.port = port
- self.sock = None
- self.closeOnError = 0
- self.retryTime = None
- #
- # Exponential backoff parameters.
- #
- self.retryStart = 1.0
- self.retryMax = 30.0
- self.retryFactor = 2.0
-
- def makeSocket(self):
- """
- A factory method which allows subclasses to define the precise
- type of socket they want.
- """
- s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- s.connect((self.host, self.port))
- return s
-
- def createSocket(self):
- """
- Try to create a socket, using an exponential backoff with
- a max retry time. Thanks to Robert Olson for the original patch
- (SF #815911) which has been slightly refactored.
- """
- now = time.time()
- # Either retryTime is None, in which case this
- # is the first time back after a disconnect, or
- # we've waited long enough.
- if self.retryTime is None:
- attempt = 1
- else:
- attempt = (now >= self.retryTime)
- if attempt:
- try:
- self.sock = self.makeSocket()
- self.retryTime = None # next time, no delay before trying
- except:
- #Creation failed, so set the retry time and return.
- if self.retryTime is None:
- self.retryPeriod = self.retryStart
- else:
- self.retryPeriod = self.retryPeriod * self.retryFactor
- if self.retryPeriod > self.retryMax:
- self.retryPeriod = self.retryMax
- self.retryTime = now + self.retryPeriod
-
- def send(self, s):
- """
- Send a pickled string to the socket.
-
- This function allows for partial sends which can happen when the
- network is busy.
- """
- if self.sock is None:
- self.createSocket()
- #self.sock can be None either because we haven't reached the retry
- #time yet, or because we have reached the retry time and retried,
- #but are still unable to connect.
- if self.sock:
- try:
- if hasattr(self.sock, "sendall"):
- self.sock.sendall(s)
- else:
- sentsofar = 0
- left = len(s)
- while left > 0:
- sent = self.sock.send(s[sentsofar:])
- sentsofar = sentsofar + sent
- left = left - sent
- except socket.error:
- self.sock.close()
- self.sock = None # so we can call createSocket next time
-
- def makePickle(self, record):
- """
- Pickles the record in binary format with a length prefix, and
- returns it ready for transmission across the socket.
- """
- ei = record.exc_info
- if ei:
- dummy = self.format(record) # just to get traceback text into record.exc_text
- record.exc_info = None # to avoid Unpickleable error
- s = cPickle.dumps(record.__dict__, 1)
- if ei:
- record.exc_info = ei # for next handler
- slen = struct.pack(">L", len(s))
- return slen + s
-
- def handleError(self, record):
- """
- Handle an error during logging.
-
- An error has occurred during logging. Most likely cause -
- connection lost. Close the socket so that we can retry on the
- next event.
- """
- if self.closeOnError and self.sock:
- self.sock.close()
- self.sock = None #try to reconnect next time
- else:
- logging.Handler.handleError(self, record)
-
- def emit(self, record):
- """
- Emit a record.
-
- Pickles the record and writes it to the socket in binary format.
- If there is an error with the socket, silently drop the packet.
- If there was a problem with the socket, re-establishes the
- socket.
- """
- try:
- s = self.makePickle(record)
- self.send(s)
- except (KeyboardInterrupt, SystemExit):
- raise
- except:
- self.handleError(record)
-
- def close(self):
- """
- Closes the socket.
- """
- if self.sock:
- self.sock.close()
- self.sock = None
- logging.Handler.close(self)
-
-class DatagramHandler(SocketHandler):
- """
- A handler class which writes logging records, in pickle format, to
- a datagram socket. The pickle which is sent is that of the LogRecord's
- attribute dictionary (__dict__), so that the receiver does not need to
- have the logging module installed in order to process the logging event.
-
- To unpickle the record at the receiving end into a LogRecord, use the
- makeLogRecord function.
-
- """
- def __init__(self, host, port):
- """
- Initializes the handler with a specific host address and port.
- """
- SocketHandler.__init__(self, host, port)
- self.closeOnError = 0
-
- def makeSocket(self):
- """
- The factory method of SocketHandler is here overridden to create
- a UDP socket (SOCK_DGRAM).
- """
- s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
- return s
-
- def send(self, s):
- """
- Send a pickled string to a socket.
-
- This function no longer allows for partial sends which can happen
- when the network is busy - UDP does not guarantee delivery and
- can deliver packets out of sequence.
- """
- if self.sock is None:
- self.createSocket()
- self.sock.sendto(s, (self.host, self.port))
-
-class SysLogHandler(logging.Handler):
- """
- A handler class which sends formatted logging records to a syslog
- server. Based on Sam Rushing's syslog module:
- http://www.nightmare.com/squirl/python-ext/misc/syslog.py
- Contributed by Nicolas Untz (after which minor refactoring changes
- have been made).
- """
-
- # from <linux/sys/syslog.h>:
- # ======================================================================
- # priorities/facilities are encoded into a single 32-bit quantity, where
- # the bottom 3 bits are the priority (0-7) and the top 28 bits are the
- # facility (0-big number). Both the priorities and the facilities map
- # roughly one-to-one to strings in the syslogd(8) source code. This
- # mapping is included in this file.
- #
- # priorities (these are ordered)
-
- LOG_EMERG = 0 # system is unusable
- LOG_ALERT = 1 # action must be taken immediately
- LOG_CRIT = 2 # critical conditions
- LOG_ERR = 3 # error conditions
- LOG_WARNING = 4 # warning conditions
- LOG_NOTICE = 5 # normal but significant condition
- LOG_INFO = 6 # informational
- LOG_DEBUG = 7 # debug-level messages
-
- # facility codes
- LOG_KERN = 0 # kernel messages
- LOG_USER = 1 # random user-level messages
- LOG_MAIL = 2 # mail system
- LOG_DAEMON = 3 # system daemons
- LOG_AUTH = 4 # security/authorization messages
- LOG_SYSLOG = 5 # messages generated internally by syslogd
- LOG_LPR = 6 # line printer subsystem
- LOG_NEWS = 7 # network news subsystem
- LOG_UUCP = 8 # UUCP subsystem
- LOG_CRON = 9 # clock daemon
- LOG_AUTHPRIV = 10 # security/authorization messages (private)
-
- # other codes through 15 reserved for system use
- LOG_LOCAL0 = 16 # reserved for local use
- LOG_LOCAL1 = 17 # reserved for local use
- LOG_LOCAL2 = 18 # reserved for local use
- LOG_LOCAL3 = 19 # reserved for local use
- LOG_LOCAL4 = 20 # reserved for local use
- LOG_LOCAL5 = 21 # reserved for local use
- LOG_LOCAL6 = 22 # reserved for local use
- LOG_LOCAL7 = 23 # reserved for local use
-
- priority_names = {
- "alert": LOG_ALERT,
- "crit": LOG_CRIT,
- "critical": LOG_CRIT,
- "debug": LOG_DEBUG,
- "emerg": LOG_EMERG,
- "err": LOG_ERR,
- "error": LOG_ERR, # DEPRECATED
- "info": LOG_INFO,
- "notice": LOG_NOTICE,
- "panic": LOG_EMERG, # DEPRECATED
- "warn": LOG_WARNING, # DEPRECATED
- "warning": LOG_WARNING,
- }
-
- facility_names = {
- "auth": LOG_AUTH,
- "authpriv": LOG_AUTHPRIV,
- "cron": LOG_CRON,
- "daemon": LOG_DAEMON,
- "kern": LOG_KERN,
- "lpr": LOG_LPR,
- "mail": LOG_MAIL,
- "news": LOG_NEWS,
- "security": LOG_AUTH, # DEPRECATED
- "syslog": LOG_SYSLOG,
- "user": LOG_USER,
- "uucp": LOG_UUCP,
- "local0": LOG_LOCAL0,
- "local1": LOG_LOCAL1,
- "local2": LOG_LOCAL2,
- "local3": LOG_LOCAL3,
- "local4": LOG_LOCAL4,
- "local5": LOG_LOCAL5,
- "local6": LOG_LOCAL6,
- "local7": LOG_LOCAL7,
- }
-
- #The map below appears to be trivially lowercasing the key. However,
- #there's more to it than meets the eye - in some locales, lowercasing
- #gives unexpected results. See SF #1524081: in the Turkish locale,
- #"INFO".lower() != "info"
- priority_map = {
- "DEBUG" : "debug",
- "INFO" : "info",
- "WARNING" : "warning",
- "ERROR" : "error",
- "CRITICAL" : "critical"
- }
-
- def __init__(self, address=('localhost', SYSLOG_UDP_PORT), facility=LOG_USER):
- """
- Initialize a handler.
-
- If address is specified as a string, UNIX socket is used.
- If facility is not specified, LOG_USER is used.
- """
- logging.Handler.__init__(self)
-
- self.address = address
- self.facility = facility
- if type(address) == types.StringType:
- self.unixsocket = 1
- self._connect_unixsocket(address)
- else:
- self.unixsocket = 0
- self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
-
- self.formatter = None
-
- def _connect_unixsocket(self, address):
- self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
- # syslog may require either DGRAM or STREAM sockets
- try:
- self.socket.connect(address)
- except socket.error:
- self.socket.close()
- self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
- self.socket.connect(address)
-
- # curious: when talking to the unix-domain '/dev/log' socket, a
- # zero-terminator seems to be required. this string is placed
- # into a class variable so that it can be overridden if
- # necessary.
- log_format_string = '<%d>%s\000'
-
- def encodePriority(self, facility, priority):
- """
- Encode the facility and priority. You can pass in strings or
- integers - if strings are passed, the facility_names and
- priority_names mapping dictionaries are used to convert them to
- integers.
- """
- if type(facility) == types.StringType:
- facility = self.facility_names[facility]
- if type(priority) == types.StringType:
- priority = self.priority_names[priority]
- return (facility << 3) | priority
-
- def close (self):
- """
- Closes the socket.
- """
- if self.unixsocket:
- self.socket.close()
- logging.Handler.close(self)
-
- def mapPriority(self, levelName):
- """
- Map a logging level name to a key in the priority_names map.
- This is useful in two scenarios: when custom levels are being
- used, and in the case where you can't do a straightforward
- mapping by lowercasing the logging level name because of locale-
- specific issues (see SF #1524081).
- """
- return self.priority_map.get(levelName, "warning")
-
- def emit(self, record):
- """
- Emit a record.
-
- The record is formatted, and then sent to the syslog server. If
- exception information is present, it is NOT sent to the server.
- """
- msg = self.format(record)
- """
- We need to convert record level to lowercase, maybe this will
- change in the future.
- """
- msg = self.log_format_string % (
- self.encodePriority(self.facility,
- self.mapPriority(record.levelname)),
- msg)
- try:
- if self.unixsocket:
- try:
- self.socket.send(msg)
- except socket.error:
- self._connect_unixsocket(self.address)
- self.socket.send(msg)
- else:
- self.socket.sendto(msg, self.address)
- except (KeyboardInterrupt, SystemExit):
- raise
- except:
- self.handleError(record)
-
-class SMTPHandler(logging.Handler):
- """
- A handler class which sends an SMTP email for each logging event.
- """
- def __init__(self, mailhost, fromaddr, toaddrs, subject):
- """
- Initialize the handler.
-
- Initialize the instance with the from and to addresses and subject
- line of the email. To specify a non-standard SMTP port, use the
- (host, port) tuple format for the mailhost argument.
- """
- logging.Handler.__init__(self)
- if type(mailhost) == types.TupleType:
- host, port = mailhost
- self.mailhost = host
- self.mailport = port
- else:
- self.mailhost = mailhost
- self.mailport = None
- self.fromaddr = fromaddr
- if type(toaddrs) == types.StringType:
- toaddrs = [toaddrs]
- self.toaddrs = toaddrs
- self.subject = subject
-
- def getSubject(self, record):
- """
- Determine the subject for the email.
-
- If you want to specify a subject line which is record-dependent,
- override this method.
- """
- return self.subject
-
- weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
-
- monthname = [None,
- 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
- 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
-
- def date_time(self):
- """
- Return the current date and time formatted for a MIME header.
- Needed for Python 1.5.2 (no email package available)
- """
- year, month, day, hh, mm, ss, wd, y, z = time.gmtime(time.time())
- s = "%s, %02d %3s %4d %02d:%02d:%02d GMT" % (
- self.weekdayname[wd],
- day, self.monthname[month], year,
- hh, mm, ss)
- return s
-
- def emit(self, record):
- """
- Emit a record.
-
- Format the record and send it to the specified addressees.
- """
- try:
- import smtplib
- try:
- from email.Utils import formatdate
- except:
- formatdate = self.date_time
- port = self.mailport
- if not port:
- port = smtplib.SMTP_PORT
- smtp = smtplib.SMTP(self.mailhost, port)
- msg = self.format(record)
- msg = "From: %s\r\nTo: %s\r\nSubject: %s\r\nDate: %s\r\n\r\n%s" % (
- self.fromaddr,
- string.join(self.toaddrs, ","),
- self.getSubject(record),
- formatdate(), msg)
- smtp.sendmail(self.fromaddr, self.toaddrs, msg)
- smtp.quit()
- except (KeyboardInterrupt, SystemExit):
- raise
- except:
- self.handleError(record)
-
-class NTEventLogHandler(logging.Handler):
- """
- A handler class which sends events to the NT Event Log. Adds a
- registry entry for the specified application name. If no dllname is
- provided, win32service.pyd (which contains some basic message
- placeholders) is used. Note that use of these placeholders will make
- your event logs big, as the entire message source is held in the log.
- If you want slimmer logs, you have to pass in the name of your own DLL
- which contains the message definitions you want to use in the event log.
- """
- def __init__(self, appname, dllname=None, logtype="Application"):
- logging.Handler.__init__(self)
- try:
- import win32evtlogutil, win32evtlog
- self.appname = appname
- self._welu = win32evtlogutil
- if not dllname:
- dllname = os.path.split(self._welu.__file__)
- dllname = os.path.split(dllname[0])
- dllname = os.path.join(dllname[0], r'win32service.pyd')
- self.dllname = dllname
- self.logtype = logtype
- self._welu.AddSourceToRegistry(appname, dllname, logtype)
- self.deftype = win32evtlog.EVENTLOG_ERROR_TYPE
- self.typemap = {
- logging.DEBUG : win32evtlog.EVENTLOG_INFORMATION_TYPE,
- logging.INFO : win32evtlog.EVENTLOG_INFORMATION_TYPE,
- logging.WARNING : win32evtlog.EVENTLOG_WARNING_TYPE,
- logging.ERROR : win32evtlog.EVENTLOG_ERROR_TYPE,
- logging.CRITICAL: win32evtlog.EVENTLOG_ERROR_TYPE,
- }
- except ImportError:
- print "The Python Win32 extensions for NT (service, event "\
- "logging) appear not to be available."
- self._welu = None
-
- def getMessageID(self, record):
- """
- Return the message ID for the event record. If you are using your
- own messages, you could do this by having the msg passed to the
- logger being an ID rather than a formatting string. Then, in here,
- you could use a dictionary lookup to get the message ID. This
- version returns 1, which is the base message ID in win32service.pyd.
- """
- return 1
-
- def getEventCategory(self, record):
- """
- Return the event category for the record.
-
- Override this if you want to specify your own categories. This version
- returns 0.
- """
- return 0
-
- def getEventType(self, record):
- """
- Return the event type for the record.
-
- Override this if you want to specify your own types. This version does
- a mapping using the handler's typemap attribute, which is set up in
- __init__() to a dictionary which contains mappings for DEBUG, INFO,
- WARNING, ERROR and CRITICAL. If you are using your own levels you will
- either need to override this method or place a suitable dictionary in
- the handler's typemap attribute.
- """
- return self.typemap.get(record.levelno, self.deftype)
-
- def emit(self, record):
- """
- Emit a record.
-
- Determine the message ID, event category and event type. Then
- log the message in the NT event log.
- """
- if self._welu:
- try:
- id = self.getMessageID(record)
- cat = self.getEventCategory(record)
- type = self.getEventType(record)
- msg = self.format(record)
- self._welu.ReportEvent(self.appname, id, cat, type, [msg])
- except (KeyboardInterrupt, SystemExit):
- raise
- except:
- self.handleError(record)
-
- def close(self):
- """
- Clean up this handler.
-
- You can remove the application name from the registry as a
- source of event log entries. However, if you do this, you will
- not be able to see the events as you intended in the Event Log
- Viewer - it needs to be able to access the registry to get the
- DLL name.
- """
- #self._welu.RemoveSourceFromRegistry(self.appname, self.logtype)
- logging.Handler.close(self)
-
-class HTTPHandler(logging.Handler):
- """
- A class which sends records to a Web server, using either GET or
- POST semantics.
- """
- def __init__(self, host, url, method="GET"):
- """
- Initialize the instance with the host, the request URL, and the method
- ("GET" or "POST")
- """
- logging.Handler.__init__(self)
- method = string.upper(method)
- if method not in ["GET", "POST"]:
- raise ValueError, "method must be GET or POST"
- self.host = host
- self.url = url
- self.method = method
-
- def mapLogRecord(self, record):
- """
- Default implementation of mapping the log record into a dict
- that is sent as the CGI data. Overwrite in your class.
- Contributed by Franz Glasner.
- """
- return record.__dict__
-
- def emit(self, record):
- """
- Emit a record.
-
- Send the record to the Web server as an URL-encoded dictionary
- """
- try:
- import httplib, urllib
- host = self.host
- h = httplib.HTTP(host)
- url = self.url
- data = urllib.urlencode(self.mapLogRecord(record))
- if self.method == "GET":
- if (string.find(url, '?') >= 0):
- sep = '&'
- else:
- sep = '?'
- url = url + "%c%s" % (sep, data)
- h.putrequest(self.method, url)
- # support multiple hosts on one IP address...
- # need to strip optional :port from host, if present
- i = string.find(host, ":")
- if i >= 0:
- host = host[:i]
- h.putheader("Host", host)
- if self.method == "POST":
- h.putheader("Content-type",
- "application/x-www-form-urlencoded")
- h.putheader("Content-length", str(len(data)))
- h.endheaders()
- if self.method == "POST":
- h.send(data)
- h.getreply() #can't do anything with the result
- except (KeyboardInterrupt, SystemExit):
- raise
- except:
- self.handleError(record)
-
-class BufferingHandler(logging.Handler):
- """
- A handler class which buffers logging records in memory. Whenever each
- record is added to the buffer, a check is made to see if the buffer should
- be flushed. If it should, then flush() is expected to do what's needed.
- """
- def __init__(self, capacity):
- """
- Initialize the handler with the buffer size.
- """
- logging.Handler.__init__(self)
- self.capacity = capacity
- self.buffer = []
-
- def shouldFlush(self, record):
- """
- Should the handler flush its buffer?
-
- Returns true if the buffer is up to capacity. This method can be
- overridden to implement custom flushing strategies.
- """
- return (len(self.buffer) >= self.capacity)
-
- def emit(self, record):
- """
- Emit a record.
-
- Append the record. If shouldFlush() tells us to, call flush() to process
- the buffer.
- """
- self.buffer.append(record)
- if self.shouldFlush(record):
- self.flush()
-
- def flush(self):
- """
- Override to implement custom flushing behaviour.
-
- This version just zaps the buffer to empty.
- """
- self.buffer = []
-
- def close(self):
- """
- Close the handler.
-
- This version just flushes and chains to the parent class' close().
- """
- self.flush()
- logging.Handler.close(self)
-
-class MemoryHandler(BufferingHandler):
- """
- A handler class which buffers logging records in memory, periodically
- flushing them to a target handler. Flushing occurs whenever the buffer
- is full, or when an event of a certain severity or greater is seen.
- """
- def __init__(self, capacity, flushLevel=logging.ERROR, target=None):
- """
- Initialize the handler with the buffer size, the level at which
- flushing should occur and an optional target.
-
- Note that without a target being set either here or via setTarget(),
- a MemoryHandler is no use to anyone!
- """
- BufferingHandler.__init__(self, capacity)
- self.flushLevel = flushLevel
- self.target = target
-
- def shouldFlush(self, record):
- """
- Check for buffer full or a record at the flushLevel or higher.
- """
- return (len(self.buffer) >= self.capacity) or \
- (record.levelno >= self.flushLevel)
-
- def setTarget(self, target):
- """
- Set the target handler for this handler.
- """
- self.target = target
-
- def flush(self):
- """
- For a MemoryHandler, flushing means just sending the buffered
- records to the target, if there is one. Override if you want
- different behaviour.
- """
- if self.target:
- for record in self.buffer:
- self.target.handle(record)
- self.buffer = []
-
- def close(self):
- """
- Flush, set the target to None and lose the buffer.
- """
- self.flush()
- self.target = None
- BufferingHandler.close(self)
diff --git a/sys/lib/python/macpath.py b/sys/lib/python/macpath.py
deleted file mode 100644
index f93ceb154..000000000
--- a/sys/lib/python/macpath.py
+++ /dev/null
@@ -1,275 +0,0 @@
-"""Pathname and path-related operations for the Macintosh."""
-
-import os
-from stat import *
-
-__all__ = ["normcase","isabs","join","splitdrive","split","splitext",
- "basename","dirname","commonprefix","getsize","getmtime",
- "getatime","getctime", "islink","exists","lexists","isdir","isfile",
- "walk","expanduser","expandvars","normpath","abspath",
- "curdir","pardir","sep","pathsep","defpath","altsep","extsep",
- "devnull","realpath","supports_unicode_filenames"]
-
-# strings representing various path-related bits and pieces
-curdir = ':'
-pardir = '::'
-extsep = '.'
-sep = ':'
-pathsep = '\n'
-defpath = ':'
-altsep = None
-devnull = 'Dev:Null'
-
-# Normalize the case of a pathname. Dummy in Posix, but <s>.lower() here.
-
-def normcase(path):
- return path.lower()
-
-
-def isabs(s):
- """Return true if a path is absolute.
- On the Mac, relative paths begin with a colon,
- but as a special case, paths with no colons at all are also relative.
- Anything else is absolute (the string up to the first colon is the
- volume name)."""
-
- return ':' in s and s[0] != ':'
-
-
-def join(s, *p):
- path = s
- for t in p:
- if (not s) or isabs(t):
- path = t
- continue
- if t[:1] == ':':
- t = t[1:]
- if ':' not in path:
- path = ':' + path
- if path[-1:] != ':':
- path = path + ':'
- path = path + t
- return path
-
-
-def split(s):
- """Split a pathname into two parts: the directory leading up to the final
- bit, and the basename (the filename, without colons, in that directory).
- The result (s, t) is such that join(s, t) yields the original argument."""
-
- if ':' not in s: return '', s
- colon = 0
- for i in range(len(s)):
- if s[i] == ':': colon = i + 1
- path, file = s[:colon-1], s[colon:]
- if path and not ':' in path:
- path = path + ':'
- return path, file
-
-
-def splitext(p):
- """Split a path into root and extension.
- The extension is everything starting at the last dot in the last
- pathname component; the root is everything before that.
- It is always true that root + ext == p."""
-
- i = p.rfind('.')
- if i<=p.rfind(':'):
- return p, ''
- else:
- return p[:i], p[i:]
-
-
-def splitdrive(p):
- """Split a pathname into a drive specification and the rest of the
- path. Useful on DOS/Windows/NT; on the Mac, the drive is always
- empty (don't use the volume name -- it doesn't have the same
- syntactic and semantic oddities as DOS drive letters, such as there
- being a separate current directory per drive)."""
-
- return '', p
-
-
-# Short interfaces to split()
-
-def dirname(s): return split(s)[0]
-def basename(s): return split(s)[1]
-
-def ismount(s):
- if not isabs(s):
- return False
- components = split(s)
- return len(components) == 2 and components[1] == ''
-
-def isdir(s):
- """Return true if the pathname refers to an existing directory."""
-
- try:
- st = os.stat(s)
- except os.error:
- return 0
- return S_ISDIR(st.st_mode)
-
-
-# Get size, mtime, atime of files.
-
-def getsize(filename):
- """Return the size of a file, reported by os.stat()."""
- return os.stat(filename).st_size
-
-def getmtime(filename):
- """Return the last modification time of a file, reported by os.stat()."""
- return os.stat(filename).st_mtime
-
-def getatime(filename):
- """Return the last access time of a file, reported by os.stat()."""
- return os.stat(filename).st_atime
-
-
-def islink(s):
- """Return true if the pathname refers to a symbolic link."""
-
- try:
- import Carbon.File
- return Carbon.File.ResolveAliasFile(s, 0)[2]
- except:
- return False
-
-
-def isfile(s):
- """Return true if the pathname refers to an existing regular file."""
-
- try:
- st = os.stat(s)
- except os.error:
- return False
- return S_ISREG(st.st_mode)
-
-def getctime(filename):
- """Return the creation time of a file, reported by os.stat()."""
- return os.stat(filename).st_ctime
-
-def exists(s):
- """Test whether a path exists. Returns False for broken symbolic links"""
-
- try:
- st = os.stat(s)
- except os.error:
- return False
- return True
-
-# Is `stat`/`lstat` a meaningful difference on the Mac? This is safe in any
-# case.
-
-def lexists(path):
- """Test whether a path exists. Returns True for broken symbolic links"""
-
- try:
- st = os.lstat(path)
- except os.error:
- return False
- return True
-
-# Return the longest prefix of all list elements.
-
-def commonprefix(m):
- "Given a list of pathnames, returns the longest common leading component"
- if not m: return ''
- s1 = min(m)
- s2 = max(m)
- n = min(len(s1), len(s2))
- for i in xrange(n):
- if s1[i] != s2[i]:
- return s1[:i]
- return s1[:n]
-
-
-def expandvars(path):
- """Dummy to retain interface-compatibility with other operating systems."""
- return path
-
-
-def expanduser(path):
- """Dummy to retain interface-compatibility with other operating systems."""
- return path
-
-class norm_error(Exception):
- """Path cannot be normalized"""
-
-def normpath(s):
- """Normalize a pathname. Will return the same result for
- equivalent paths."""
-
- if ":" not in s:
- return ":"+s
-
- comps = s.split(":")
- i = 1
- while i < len(comps)-1:
- if comps[i] == "" and comps[i-1] != "":
- if i > 1:
- del comps[i-1:i+1]
- i = i - 1
- else:
- # best way to handle this is to raise an exception
- raise norm_error, 'Cannot use :: immediately after volume name'
- else:
- i = i + 1
-
- s = ":".join(comps)
-
- # remove trailing ":" except for ":" and "Volume:"
- if s[-1] == ":" and len(comps) > 2 and s != ":"*len(s):
- s = s[:-1]
- return s
-
-
-def walk(top, func, arg):
- """Directory tree walk with callback function.
-
- For each directory in the directory tree rooted at top (including top
- itself, but excluding '.' and '..'), call func(arg, dirname, fnames).
- dirname is the name of the directory, and fnames a list of the names of
- the files and subdirectories in dirname (excluding '.' and '..'). func
- may modify the fnames list in-place (e.g. via del or slice assignment),
- and walk will only recurse into the subdirectories whose names remain in
- fnames; this can be used to implement a filter, or to impose a specific
- order of visiting. No semantics are defined for, or required of, arg,
- beyond that arg is always passed to func. It can be used, e.g., to pass
- a filename pattern, or a mutable object designed to accumulate
- statistics. Passing None for arg is common."""
-
- try:
- names = os.listdir(top)
- except os.error:
- return
- func(arg, top, names)
- for name in names:
- name = join(top, name)
- if isdir(name) and not islink(name):
- walk(name, func, arg)
-
-
-def abspath(path):
- """Return an absolute path."""
- if not isabs(path):
- path = join(os.getcwd(), path)
- return normpath(path)
-
-# realpath is a no-op on systems without islink support
-def realpath(path):
- path = abspath(path)
- try:
- import Carbon.File
- except ImportError:
- return path
- if not path:
- return path
- components = path.split(':')
- path = components[0] + ':'
- for c in components[1:]:
- path = join(path, c)
- path = Carbon.File.FSResolveAliasFile(path, 1)[0].as_pathname()
- return path
-
-supports_unicode_filenames = False
diff --git a/sys/lib/python/macurl2path.py b/sys/lib/python/macurl2path.py
deleted file mode 100644
index 4c5ae6457..000000000
--- a/sys/lib/python/macurl2path.py
+++ /dev/null
@@ -1,97 +0,0 @@
-"""Macintosh-specific module for conversion between pathnames and URLs.
-
-Do not import directly; use urllib instead."""
-
-import urllib
-import os
-
-__all__ = ["url2pathname","pathname2url"]
-
-def url2pathname(pathname):
- """OS-specific conversion from a relative URL of the 'file' scheme
- to a file system path; not recommended for general use."""
- #
- # XXXX The .. handling should be fixed...
- #
- tp = urllib.splittype(pathname)[0]
- if tp and tp != 'file':
- raise RuntimeError, 'Cannot convert non-local URL to pathname'
- # Turn starting /// into /, an empty hostname means current host
- if pathname[:3] == '///':
- pathname = pathname[2:]
- elif pathname[:2] == '//':
- raise RuntimeError, 'Cannot convert non-local URL to pathname'
- components = pathname.split('/')
- # Remove . and embedded ..
- i = 0
- while i < len(components):
- if components[i] == '.':
- del components[i]
- elif components[i] == '..' and i > 0 and \
- components[i-1] not in ('', '..'):
- del components[i-1:i+1]
- i = i-1
- elif components[i] == '' and i > 0 and components[i-1] != '':
- del components[i]
- else:
- i = i+1
- if not components[0]:
- # Absolute unix path, don't start with colon
- rv = ':'.join(components[1:])
- else:
- # relative unix path, start with colon. First replace
- # leading .. by empty strings (giving ::file)
- i = 0
- while i < len(components) and components[i] == '..':
- components[i] = ''
- i = i + 1
- rv = ':' + ':'.join(components)
- # and finally unquote slashes and other funny characters
- return urllib.unquote(rv)
-
-def pathname2url(pathname):
- """OS-specific conversion from a file system path to a relative URL
- of the 'file' scheme; not recommended for general use."""
- if '/' in pathname:
- raise RuntimeError, "Cannot convert pathname containing slashes"
- components = pathname.split(':')
- # Remove empty first and/or last component
- if components[0] == '':
- del components[0]
- if components[-1] == '':
- del components[-1]
- # Replace empty string ('::') by .. (will result in '/../' later)
- for i in range(len(components)):
- if components[i] == '':
- components[i] = '..'
- # Truncate names longer than 31 bytes
- components = map(_pncomp2url, components)
-
- if os.path.isabs(pathname):
- return '/' + '/'.join(components)
- else:
- return '/'.join(components)
-
-def _pncomp2url(component):
- component = urllib.quote(component[:31], safe='') # We want to quote slashes
- return component
-
-def test():
- for url in ["index.html",
- "bar/index.html",
- "/foo/bar/index.html",
- "/foo/bar/",
- "/"]:
- print '%r -> %r' % (url, url2pathname(url))
- for path in ["drive:",
- "drive:dir:",
- "drive:dir:file",
- "drive:file",
- "file",
- ":file",
- ":dir:",
- ":dir:file"]:
- print '%r -> %r' % (path, pathname2url(path))
-
-if __name__ == '__main__':
- test()
diff --git a/sys/lib/python/mailbox.py b/sys/lib/python/mailbox.py
deleted file mode 100755
index 3e5d0b4e1..000000000
--- a/sys/lib/python/mailbox.py
+++ /dev/null
@@ -1,2090 +0,0 @@
-#! /usr/bin/env python
-
-"""Read/write support for Maildir, mbox, MH, Babyl, and MMDF mailboxes."""
-
-# Notes for authors of new mailbox subclasses:
-#
-# Remember to fsync() changes to disk before closing a modified file
-# or returning from a flush() method. See functions _sync_flush() and
-# _sync_close().
-
-import sys
-import os
-import time
-import calendar
-import socket
-import errno
-import copy
-import email
-import email.Message
-import email.Generator
-import rfc822
-import StringIO
-try:
- if sys.platform == 'os2emx':
- # OS/2 EMX fcntl() not adequate
- raise ImportError
- import fcntl
-except ImportError:
- fcntl = None
-
-__all__ = [ 'Mailbox', 'Maildir', 'mbox', 'MH', 'Babyl', 'MMDF',
- 'Message', 'MaildirMessage', 'mboxMessage', 'MHMessage',
- 'BabylMessage', 'MMDFMessage', 'UnixMailbox',
- 'PortableUnixMailbox', 'MmdfMailbox', 'MHMailbox', 'BabylMailbox' ]
-
-class Mailbox:
- """A group of messages in a particular place."""
-
- def __init__(self, path, factory=None, create=True):
- """Initialize a Mailbox instance."""
- self._path = os.path.abspath(os.path.expanduser(path))
- self._factory = factory
-
- def add(self, message):
- """Add message and return assigned key."""
- raise NotImplementedError('Method must be implemented by subclass')
-
- def remove(self, key):
- """Remove the keyed message; raise KeyError if it doesn't exist."""
- raise NotImplementedError('Method must be implemented by subclass')
-
- def __delitem__(self, key):
- self.remove(key)
-
- def discard(self, key):
- """If the keyed message exists, remove it."""
- try:
- self.remove(key)
- except KeyError:
- pass
-
- def __setitem__(self, key, message):
- """Replace the keyed message; raise KeyError if it doesn't exist."""
- raise NotImplementedError('Method must be implemented by subclass')
-
- def get(self, key, default=None):
- """Return the keyed message, or default if it doesn't exist."""
- try:
- return self.__getitem__(key)
- except KeyError:
- return default
-
- def __getitem__(self, key):
- """Return the keyed message; raise KeyError if it doesn't exist."""
- if not self._factory:
- return self.get_message(key)
- else:
- return self._factory(self.get_file(key))
-
- def get_message(self, key):
- """Return a Message representation or raise a KeyError."""
- raise NotImplementedError('Method must be implemented by subclass')
-
- def get_string(self, key):
- """Return a string representation or raise a KeyError."""
- raise NotImplementedError('Method must be implemented by subclass')
-
- def get_file(self, key):
- """Return a file-like representation or raise a KeyError."""
- raise NotImplementedError('Method must be implemented by subclass')
-
- def iterkeys(self):
- """Return an iterator over keys."""
- raise NotImplementedError('Method must be implemented by subclass')
-
- def keys(self):
- """Return a list of keys."""
- return list(self.iterkeys())
-
- def itervalues(self):
- """Return an iterator over all messages."""
- for key in self.iterkeys():
- try:
- value = self[key]
- except KeyError:
- continue
- yield value
-
- def __iter__(self):
- return self.itervalues()
-
- def values(self):
- """Return a list of messages. Memory intensive."""
- return list(self.itervalues())
-
- def iteritems(self):
- """Return an iterator over (key, message) tuples."""
- for key in self.iterkeys():
- try:
- value = self[key]
- except KeyError:
- continue
- yield (key, value)
-
- def items(self):
- """Return a list of (key, message) tuples. Memory intensive."""
- return list(self.iteritems())
-
- def has_key(self, key):
- """Return True if the keyed message exists, False otherwise."""
- raise NotImplementedError('Method must be implemented by subclass')
-
- def __contains__(self, key):
- return self.has_key(key)
-
- def __len__(self):
- """Return a count of messages in the mailbox."""
- raise NotImplementedError('Method must be implemented by subclass')
-
- def clear(self):
- """Delete all messages."""
- for key in self.iterkeys():
- self.discard(key)
-
- def pop(self, key, default=None):
- """Delete the keyed message and return it, or default."""
- try:
- result = self[key]
- except KeyError:
- return default
- self.discard(key)
- return result
-
- def popitem(self):
- """Delete an arbitrary (key, message) pair and return it."""
- for key in self.iterkeys():
- return (key, self.pop(key)) # This is only run once.
- else:
- raise KeyError('No messages in mailbox')
-
- def update(self, arg=None):
- """Change the messages that correspond to certain keys."""
- if hasattr(arg, 'iteritems'):
- source = arg.iteritems()
- elif hasattr(arg, 'items'):
- source = arg.items()
- else:
- source = arg
- bad_key = False
- for key, message in source:
- try:
- self[key] = message
- except KeyError:
- bad_key = True
- if bad_key:
- raise KeyError('No message with key(s)')
-
- def flush(self):
- """Write any pending changes to the disk."""
- raise NotImplementedError('Method must be implemented by subclass')
-
- def lock(self):
- """Lock the mailbox."""
- raise NotImplementedError('Method must be implemented by subclass')
-
- def unlock(self):
- """Unlock the mailbox if it is locked."""
- raise NotImplementedError('Method must be implemented by subclass')
-
- def close(self):
- """Flush and close the mailbox."""
- raise NotImplementedError('Method must be implemented by subclass')
-
- def _dump_message(self, message, target, mangle_from_=False):
- # Most files are opened in binary mode to allow predictable seeking.
- # To get native line endings on disk, the user-friendly \n line endings
- # used in strings and by email.Message are translated here.
- """Dump message contents to target file."""
- if isinstance(message, email.Message.Message):
- buffer = StringIO.StringIO()
- gen = email.Generator.Generator(buffer, mangle_from_, 0)
- gen.flatten(message)
- buffer.seek(0)
- target.write(buffer.read().replace('\n', os.linesep))
- elif isinstance(message, str):
- if mangle_from_:
- message = message.replace('\nFrom ', '\n>From ')
- message = message.replace('\n', os.linesep)
- target.write(message)
- elif hasattr(message, 'read'):
- while True:
- line = message.readline()
- if line == '':
- break
- if mangle_from_ and line.startswith('From '):
- line = '>From ' + line[5:]
- line = line.replace('\n', os.linesep)
- target.write(line)
- else:
- raise TypeError('Invalid message type: %s' % type(message))
-
-
-class Maildir(Mailbox):
- """A qmail-style Maildir mailbox."""
-
- colon = ':'
-
- def __init__(self, dirname, factory=rfc822.Message, create=True):
- """Initialize a Maildir instance."""
- Mailbox.__init__(self, dirname, factory, create)
- if not os.path.exists(self._path):
- if create:
- os.mkdir(self._path, 0700)
- os.mkdir(os.path.join(self._path, 'tmp'), 0700)
- os.mkdir(os.path.join(self._path, 'new'), 0700)
- os.mkdir(os.path.join(self._path, 'cur'), 0700)
- else:
- raise NoSuchMailboxError(self._path)
- self._toc = {}
-
- def add(self, message):
- """Add message and return assigned key."""
- tmp_file = self._create_tmp()
- try:
- self._dump_message(message, tmp_file)
- finally:
- _sync_close(tmp_file)
- if isinstance(message, MaildirMessage):
- subdir = message.get_subdir()
- suffix = self.colon + message.get_info()
- if suffix == self.colon:
- suffix = ''
- else:
- subdir = 'new'
- suffix = ''
- uniq = os.path.basename(tmp_file.name).split(self.colon)[0]
- dest = os.path.join(self._path, subdir, uniq + suffix)
- try:
- if hasattr(os, 'link'):
- os.link(tmp_file.name, dest)
- os.remove(tmp_file.name)
- else:
- os.rename(tmp_file.name, dest)
- except OSError, e:
- os.remove(tmp_file.name)
- if e.errno == errno.EEXIST:
- raise ExternalClashError('Name clash with existing message: %s'
- % dest)
- else:
- raise
- if isinstance(message, MaildirMessage):
- os.utime(dest, (os.path.getatime(dest), message.get_date()))
- return uniq
-
- def remove(self, key):
- """Remove the keyed message; raise KeyError if it doesn't exist."""
- os.remove(os.path.join(self._path, self._lookup(key)))
-
- def discard(self, key):
- """If the keyed message exists, remove it."""
- # This overrides an inapplicable implementation in the superclass.
- try:
- self.remove(key)
- except KeyError:
- pass
- except OSError, e:
- if e.errno != errno.ENOENT:
- raise
-
- def __setitem__(self, key, message):
- """Replace the keyed message; raise KeyError if it doesn't exist."""
- old_subpath = self._lookup(key)
- temp_key = self.add(message)
- temp_subpath = self._lookup(temp_key)
- if isinstance(message, MaildirMessage):
- # temp's subdir and suffix were specified by message.
- dominant_subpath = temp_subpath
- else:
- # temp's subdir and suffix were defaults from add().
- dominant_subpath = old_subpath
- subdir = os.path.dirname(dominant_subpath)
- if self.colon in dominant_subpath:
- suffix = self.colon + dominant_subpath.split(self.colon)[-1]
- else:
- suffix = ''
- self.discard(key)
- new_path = os.path.join(self._path, subdir, key + suffix)
- os.rename(os.path.join(self._path, temp_subpath), new_path)
- if isinstance(message, MaildirMessage):
- os.utime(new_path, (os.path.getatime(new_path),
- message.get_date()))
-
- def get_message(self, key):
- """Return a Message representation or raise a KeyError."""
- subpath = self._lookup(key)
- f = open(os.path.join(self._path, subpath), 'r')
- try:
- msg = MaildirMessage(f)
- finally:
- f.close()
- subdir, name = os.path.split(subpath)
- msg.set_subdir(subdir)
- if self.colon in name:
- msg.set_info(name.split(self.colon)[-1])
- msg.set_date(os.path.getmtime(os.path.join(self._path, subpath)))
- return msg
-
- def get_string(self, key):
- """Return a string representation or raise a KeyError."""
- f = open(os.path.join(self._path, self._lookup(key)), 'r')
- try:
- return f.read()
- finally:
- f.close()
-
- def get_file(self, key):
- """Return a file-like representation or raise a KeyError."""
- f = open(os.path.join(self._path, self._lookup(key)), 'rb')
- return _ProxyFile(f)
-
- def iterkeys(self):
- """Return an iterator over keys."""
- self._refresh()
- for key in self._toc:
- try:
- self._lookup(key)
- except KeyError:
- continue
- yield key
-
- def has_key(self, key):
- """Return True if the keyed message exists, False otherwise."""
- self._refresh()
- return key in self._toc
-
- def __len__(self):
- """Return a count of messages in the mailbox."""
- self._refresh()
- return len(self._toc)
-
- def flush(self):
- """Write any pending changes to disk."""
- return # Maildir changes are always written immediately.
-
- def lock(self):
- """Lock the mailbox."""
- return
-
- def unlock(self):
- """Unlock the mailbox if it is locked."""
- return
-
- def close(self):
- """Flush and close the mailbox."""
- return
-
- def list_folders(self):
- """Return a list of folder names."""
- result = []
- for entry in os.listdir(self._path):
- if len(entry) > 1 and entry[0] == '.' and \
- os.path.isdir(os.path.join(self._path, entry)):
- result.append(entry[1:])
- return result
-
- def get_folder(self, folder):
- """Return a Maildir instance for the named folder."""
- return Maildir(os.path.join(self._path, '.' + folder),
- factory=self._factory,
- create=False)
-
- def add_folder(self, folder):
- """Create a folder and return a Maildir instance representing it."""
- path = os.path.join(self._path, '.' + folder)
- result = Maildir(path, factory=self._factory)
- maildirfolder_path = os.path.join(path, 'maildirfolder')
- if not os.path.exists(maildirfolder_path):
- os.close(os.open(maildirfolder_path, os.O_CREAT | os.O_WRONLY))
- return result
-
- def remove_folder(self, folder):
- """Delete the named folder, which must be empty."""
- path = os.path.join(self._path, '.' + folder)
- for entry in os.listdir(os.path.join(path, 'new')) + \
- os.listdir(os.path.join(path, 'cur')):
- if len(entry) < 1 or entry[0] != '.':
- raise NotEmptyError('Folder contains message(s): %s' % folder)
- for entry in os.listdir(path):
- if entry != 'new' and entry != 'cur' and entry != 'tmp' and \
- os.path.isdir(os.path.join(path, entry)):
- raise NotEmptyError("Folder contains subdirectory '%s': %s" %
- (folder, entry))
- for root, dirs, files in os.walk(path, topdown=False):
- for entry in files:
- os.remove(os.path.join(root, entry))
- for entry in dirs:
- os.rmdir(os.path.join(root, entry))
- os.rmdir(path)
-
- def clean(self):
- """Delete old files in "tmp"."""
- now = time.time()
- for entry in os.listdir(os.path.join(self._path, 'tmp')):
- path = os.path.join(self._path, 'tmp', entry)
- if now - os.path.getatime(path) > 129600: # 60 * 60 * 36
- os.remove(path)
-
- _count = 1 # This is used to generate unique file names.
-
- def _create_tmp(self):
- """Create a file in the tmp subdirectory and open and return it."""
- now = time.time()
- hostname = socket.gethostname()
- if '/' in hostname:
- hostname = hostname.replace('/', r'\057')
- if ':' in hostname:
- hostname = hostname.replace(':', r'\072')
- uniq = "%s.M%sP%sQ%s.%s" % (int(now), int(now % 1 * 1e6), os.getpid(),
- Maildir._count, hostname)
- path = os.path.join(self._path, 'tmp', uniq)
- try:
- os.stat(path)
- except OSError, e:
- if e.errno == errno.ENOENT:
- Maildir._count += 1
- try:
- return _create_carefully(path)
- except OSError, e:
- if e.errno != errno.EEXIST:
- raise
- else:
- raise
-
- # Fall through to here if stat succeeded or open raised EEXIST.
- raise ExternalClashError('Name clash prevented file creation: %s' %
- path)
-
- def _refresh(self):
- """Update table of contents mapping."""
- self._toc = {}
- for subdir in ('new', 'cur'):
- for entry in os.listdir(os.path.join(self._path, subdir)):
- uniq = entry.split(self.colon)[0]
- self._toc[uniq] = os.path.join(subdir, entry)
-
- def _lookup(self, key):
- """Use TOC to return subpath for given key, or raise a KeyError."""
- try:
- if os.path.exists(os.path.join(self._path, self._toc[key])):
- return self._toc[key]
- except KeyError:
- pass
- self._refresh()
- try:
- return self._toc[key]
- except KeyError:
- raise KeyError('No message with key: %s' % key)
-
- # This method is for backward compatibility only.
- def next(self):
- """Return the next message in a one-time iteration."""
- if not hasattr(self, '_onetime_keys'):
- self._onetime_keys = self.iterkeys()
- while True:
- try:
- return self[self._onetime_keys.next()]
- except StopIteration:
- return None
- except KeyError:
- continue
-
-
-class _singlefileMailbox(Mailbox):
- """A single-file mailbox."""
-
- def __init__(self, path, factory=None, create=True):
- """Initialize a single-file mailbox."""
- Mailbox.__init__(self, path, factory, create)
- try:
- f = open(self._path, 'rb+')
- except IOError, e:
- if e.errno == errno.ENOENT:
- if create:
- f = open(self._path, 'wb+')
- else:
- raise NoSuchMailboxError(self._path)
- elif e.errno == errno.EACCES:
- f = open(self._path, 'rb')
- else:
- raise
- self._file = f
- self._toc = None
- self._next_key = 0
- self._pending = False # No changes require rewriting the file.
- self._locked = False
-
- def add(self, message):
- """Add message and return assigned key."""
- self._lookup()
- self._toc[self._next_key] = self._append_message(message)
- self._next_key += 1
- self._pending = True
- return self._next_key - 1
-
- def remove(self, key):
- """Remove the keyed message; raise KeyError if it doesn't exist."""
- self._lookup(key)
- del self._toc[key]
- self._pending = True
-
- def __setitem__(self, key, message):
- """Replace the keyed message; raise KeyError if it doesn't exist."""
- self._lookup(key)
- self._toc[key] = self._append_message(message)
- self._pending = True
-
- def iterkeys(self):
- """Return an iterator over keys."""
- self._lookup()
- for key in self._toc.keys():
- yield key
-
- def has_key(self, key):
- """Return True if the keyed message exists, False otherwise."""
- self._lookup()
- return key in self._toc
-
- def __len__(self):
- """Return a count of messages in the mailbox."""
- self._lookup()
- return len(self._toc)
-
- def lock(self):
- """Lock the mailbox."""
- if not self._locked:
- _lock_file(self._file)
- self._locked = True
-
- def unlock(self):
- """Unlock the mailbox if it is locked."""
- if self._locked:
- _unlock_file(self._file)
- self._locked = False
-
- def flush(self):
- """Write any pending changes to disk."""
- if not self._pending:
- return
- self._lookup()
- new_file = _create_temporary(self._path)
- try:
- new_toc = {}
- self._pre_mailbox_hook(new_file)
- for key in sorted(self._toc.keys()):
- start, stop = self._toc[key]
- self._file.seek(start)
- self._pre_message_hook(new_file)
- new_start = new_file.tell()
- while True:
- buffer = self._file.read(min(4096,
- stop - self._file.tell()))
- if buffer == '':
- break
- new_file.write(buffer)
- new_toc[key] = (new_start, new_file.tell())
- self._post_message_hook(new_file)
- except:
- new_file.close()
- os.remove(new_file.name)
- raise
- _sync_close(new_file)
- # self._file is about to get replaced, so no need to sync.
- self._file.close()
- try:
- os.rename(new_file.name, self._path)
- except OSError, e:
- if e.errno == errno.EEXIST or \
- (os.name == 'os2' and e.errno == errno.EACCES):
- os.remove(self._path)
- os.rename(new_file.name, self._path)
- else:
- raise
- self._file = open(self._path, 'rb+')
- self._toc = new_toc
- self._pending = False
- if self._locked:
- _lock_file(self._file, dotlock=False)
-
- def _pre_mailbox_hook(self, f):
- """Called before writing the mailbox to file f."""
- return
-
- def _pre_message_hook(self, f):
- """Called before writing each message to file f."""
- return
-
- def _post_message_hook(self, f):
- """Called after writing each message to file f."""
- return
-
- def close(self):
- """Flush and close the mailbox."""
- self.flush()
- if self._locked:
- self.unlock()
- self._file.close() # Sync has been done by self.flush() above.
-
- def _lookup(self, key=None):
- """Return (start, stop) or raise KeyError."""
- if self._toc is None:
- self._generate_toc()
- if key is not None:
- try:
- return self._toc[key]
- except KeyError:
- raise KeyError('No message with key: %s' % key)
-
- def _append_message(self, message):
- """Append message to mailbox and return (start, stop) offsets."""
- self._file.seek(0, 2)
- self._pre_message_hook(self._file)
- offsets = self._install_message(message)
- self._post_message_hook(self._file)
- self._file.flush()
- return offsets
-
-
-
-class _mboxMMDF(_singlefileMailbox):
- """An mbox or MMDF mailbox."""
-
- _mangle_from_ = True
-
- def get_message(self, key):
- """Return a Message representation or raise a KeyError."""
- start, stop = self._lookup(key)
- self._file.seek(start)
- from_line = self._file.readline().replace(os.linesep, '')
- string = self._file.read(stop - self._file.tell())
- msg = self._message_factory(string.replace(os.linesep, '\n'))
- msg.set_from(from_line[5:])
- return msg
-
- def get_string(self, key, from_=False):
- """Return a string representation or raise a KeyError."""
- start, stop = self._lookup(key)
- self._file.seek(start)
- if not from_:
- self._file.readline()
- string = self._file.read(stop - self._file.tell())
- return string.replace(os.linesep, '\n')
-
- def get_file(self, key, from_=False):
- """Return a file-like representation or raise a KeyError."""
- start, stop = self._lookup(key)
- self._file.seek(start)
- if not from_:
- self._file.readline()
- return _PartialFile(self._file, self._file.tell(), stop)
-
- def _install_message(self, message):
- """Format a message and blindly write to self._file."""
- from_line = None
- if isinstance(message, str) and message.startswith('From '):
- newline = message.find('\n')
- if newline != -1:
- from_line = message[:newline]
- message = message[newline + 1:]
- else:
- from_line = message
- message = ''
- elif isinstance(message, _mboxMMDFMessage):
- from_line = 'From ' + message.get_from()
- elif isinstance(message, email.Message.Message):
- from_line = message.get_unixfrom() # May be None.
- if from_line is None:
- from_line = 'From MAILER-DAEMON %s' % time.asctime(time.gmtime())
- start = self._file.tell()
- self._file.write(from_line + os.linesep)
- self._dump_message(message, self._file, self._mangle_from_)
- stop = self._file.tell()
- return (start, stop)
-
-
-class mbox(_mboxMMDF):
- """A classic mbox mailbox."""
-
- _mangle_from_ = True
-
- def __init__(self, path, factory=None, create=True):
- """Initialize an mbox mailbox."""
- self._message_factory = mboxMessage
- _mboxMMDF.__init__(self, path, factory, create)
-
- def _pre_message_hook(self, f):
- """Called before writing each message to file f."""
- if f.tell() != 0:
- f.write(os.linesep)
-
- def _generate_toc(self):
- """Generate key-to-(start, stop) table of contents."""
- starts, stops = [], []
- self._file.seek(0)
- while True:
- line_pos = self._file.tell()
- line = self._file.readline()
- if line.startswith('From '):
- if len(stops) < len(starts):
- stops.append(line_pos - len(os.linesep))
- starts.append(line_pos)
- elif line == '':
- stops.append(line_pos)
- break
- self._toc = dict(enumerate(zip(starts, stops)))
- self._next_key = len(self._toc)
-
-
-class MMDF(_mboxMMDF):
- """An MMDF mailbox."""
-
- def __init__(self, path, factory=None, create=True):
- """Initialize an MMDF mailbox."""
- self._message_factory = MMDFMessage
- _mboxMMDF.__init__(self, path, factory, create)
-
- def _pre_message_hook(self, f):
- """Called before writing each message to file f."""
- f.write('\001\001\001\001' + os.linesep)
-
- def _post_message_hook(self, f):
- """Called after writing each message to file f."""
- f.write(os.linesep + '\001\001\001\001' + os.linesep)
-
- def _generate_toc(self):
- """Generate key-to-(start, stop) table of contents."""
- starts, stops = [], []
- self._file.seek(0)
- next_pos = 0
- while True:
- line_pos = next_pos
- line = self._file.readline()
- next_pos = self._file.tell()
- if line.startswith('\001\001\001\001' + os.linesep):
- starts.append(next_pos)
- while True:
- line_pos = next_pos
- line = self._file.readline()
- next_pos = self._file.tell()
- if line == '\001\001\001\001' + os.linesep:
- stops.append(line_pos - len(os.linesep))
- break
- elif line == '':
- stops.append(line_pos)
- break
- elif line == '':
- break
- self._toc = dict(enumerate(zip(starts, stops)))
- self._next_key = len(self._toc)
-
-
-class MH(Mailbox):
- """An MH mailbox."""
-
- def __init__(self, path, factory=None, create=True):
- """Initialize an MH instance."""
- Mailbox.__init__(self, path, factory, create)
- if not os.path.exists(self._path):
- if create:
- os.mkdir(self._path, 0700)
- os.close(os.open(os.path.join(self._path, '.mh_sequences'),
- os.O_CREAT | os.O_EXCL | os.O_WRONLY, 0600))
- else:
- raise NoSuchMailboxError(self._path)
- self._locked = False
-
- def add(self, message):
- """Add message and return assigned key."""
- keys = self.keys()
- if len(keys) == 0:
- new_key = 1
- else:
- new_key = max(keys) + 1
- new_path = os.path.join(self._path, str(new_key))
- f = _create_carefully(new_path)
- try:
- if self._locked:
- _lock_file(f)
- try:
- self._dump_message(message, f)
- if isinstance(message, MHMessage):
- self._dump_sequences(message, new_key)
- finally:
- if self._locked:
- _unlock_file(f)
- finally:
- _sync_close(f)
- return new_key
-
- def remove(self, key):
- """Remove the keyed message; raise KeyError if it doesn't exist."""
- path = os.path.join(self._path, str(key))
- try:
- f = open(path, 'rb+')
- except IOError, e:
- if e.errno == errno.ENOENT:
- raise KeyError('No message with key: %s' % key)
- else:
- raise
- try:
- if self._locked:
- _lock_file(f)
- try:
- f.close()
- os.remove(os.path.join(self._path, str(key)))
- finally:
- if self._locked:
- _unlock_file(f)
- finally:
- f.close()
-
- def __setitem__(self, key, message):
- """Replace the keyed message; raise KeyError if it doesn't exist."""
- path = os.path.join(self._path, str(key))
- try:
- f = open(path, 'rb+')
- except IOError, e:
- if e.errno == errno.ENOENT:
- raise KeyError('No message with key: %s' % key)
- else:
- raise
- try:
- if self._locked:
- _lock_file(f)
- try:
- os.close(os.open(path, os.O_WRONLY | os.O_TRUNC))
- self._dump_message(message, f)
- if isinstance(message, MHMessage):
- self._dump_sequences(message, key)
- finally:
- if self._locked:
- _unlock_file(f)
- finally:
- _sync_close(f)
-
- def get_message(self, key):
- """Return a Message representation or raise a KeyError."""
- try:
- if self._locked:
- f = open(os.path.join(self._path, str(key)), 'r+')
- else:
- f = open(os.path.join(self._path, str(key)), 'r')
- except IOError, e:
- if e.errno == errno.ENOENT:
- raise KeyError('No message with key: %s' % key)
- else:
- raise
- try:
- if self._locked:
- _lock_file(f)
- try:
- msg = MHMessage(f)
- finally:
- if self._locked:
- _unlock_file(f)
- finally:
- f.close()
- for name, key_list in self.get_sequences():
- if key in key_list:
- msg.add_sequence(name)
- return msg
-
- def get_string(self, key):
- """Return a string representation or raise a KeyError."""
- try:
- if self._locked:
- f = open(os.path.join(self._path, str(key)), 'r+')
- else:
- f = open(os.path.join(self._path, str(key)), 'r')
- except IOError, e:
- if e.errno == errno.ENOENT:
- raise KeyError('No message with key: %s' % key)
- else:
- raise
- try:
- if self._locked:
- _lock_file(f)
- try:
- return f.read()
- finally:
- if self._locked:
- _unlock_file(f)
- finally:
- f.close()
-
- def get_file(self, key):
- """Return a file-like representation or raise a KeyError."""
- try:
- f = open(os.path.join(self._path, str(key)), 'rb')
- except IOError, e:
- if e.errno == errno.ENOENT:
- raise KeyError('No message with key: %s' % key)
- else:
- raise
- return _ProxyFile(f)
-
- def iterkeys(self):
- """Return an iterator over keys."""
- return iter(sorted(int(entry) for entry in os.listdir(self._path)
- if entry.isdigit()))
-
- def has_key(self, key):
- """Return True if the keyed message exists, False otherwise."""
- return os.path.exists(os.path.join(self._path, str(key)))
-
- def __len__(self):
- """Return a count of messages in the mailbox."""
- return len(list(self.iterkeys()))
-
- def lock(self):
- """Lock the mailbox."""
- if not self._locked:
- self._file = open(os.path.join(self._path, '.mh_sequences'), 'rb+')
- _lock_file(self._file)
- self._locked = True
-
- def unlock(self):
- """Unlock the mailbox if it is locked."""
- if self._locked:
- _unlock_file(self._file)
- _sync_close(self._file)
- del self._file
- self._locked = False
-
- def flush(self):
- """Write any pending changes to the disk."""
- return
-
- def close(self):
- """Flush and close the mailbox."""
- if self._locked:
- self.unlock()
-
- def list_folders(self):
- """Return a list of folder names."""
- result = []
- for entry in os.listdir(self._path):
- if os.path.isdir(os.path.join(self._path, entry)):
- result.append(entry)
- return result
-
- def get_folder(self, folder):
- """Return an MH instance for the named folder."""
- return MH(os.path.join(self._path, folder),
- factory=self._factory, create=False)
-
- def add_folder(self, folder):
- """Create a folder and return an MH instance representing it."""
- return MH(os.path.join(self._path, folder),
- factory=self._factory)
-
- def remove_folder(self, folder):
- """Delete the named folder, which must be empty."""
- path = os.path.join(self._path, folder)
- entries = os.listdir(path)
- if entries == ['.mh_sequences']:
- os.remove(os.path.join(path, '.mh_sequences'))
- elif entries == []:
- pass
- else:
- raise NotEmptyError('Folder not empty: %s' % self._path)
- os.rmdir(path)
-
- def get_sequences(self):
- """Return a name-to-key-list dictionary to define each sequence."""
- results = {}
- f = open(os.path.join(self._path, '.mh_sequences'), 'r')
- try:
- all_keys = set(self.keys())
- for line in f:
- try:
- name, contents = line.split(':')
- keys = set()
- for spec in contents.split():
- if spec.isdigit():
- keys.add(int(spec))
- else:
- start, stop = (int(x) for x in spec.split('-'))
- keys.update(range(start, stop + 1))
- results[name] = [key for key in sorted(keys) \
- if key in all_keys]
- if len(results[name]) == 0:
- del results[name]
- except ValueError:
- raise FormatError('Invalid sequence specification: %s' %
- line.rstrip())
- finally:
- f.close()
- return results
-
- def set_sequences(self, sequences):
- """Set sequences using the given name-to-key-list dictionary."""
- f = open(os.path.join(self._path, '.mh_sequences'), 'r+')
- try:
- os.close(os.open(f.name, os.O_WRONLY | os.O_TRUNC))
- for name, keys in sequences.iteritems():
- if len(keys) == 0:
- continue
- f.write('%s:' % name)
- prev = None
- completing = False
- for key in sorted(set(keys)):
- if key - 1 == prev:
- if not completing:
- completing = True
- f.write('-')
- elif completing:
- completing = False
- f.write('%s %s' % (prev, key))
- else:
- f.write(' %s' % key)
- prev = key
- if completing:
- f.write(str(prev) + '\n')
- else:
- f.write('\n')
- finally:
- _sync_close(f)
-
- def pack(self):
- """Re-name messages to eliminate numbering gaps. Invalidates keys."""
- sequences = self.get_sequences()
- prev = 0
- changes = []
- for key in self.iterkeys():
- if key - 1 != prev:
- changes.append((key, prev + 1))
- if hasattr(os, 'link'):
- os.link(os.path.join(self._path, str(key)),
- os.path.join(self._path, str(prev + 1)))
- os.unlink(os.path.join(self._path, str(key)))
- else:
- os.rename(os.path.join(self._path, str(key)),
- os.path.join(self._path, str(prev + 1)))
- prev += 1
- self._next_key = prev + 1
- if len(changes) == 0:
- return
- for name, key_list in sequences.items():
- for old, new in changes:
- if old in key_list:
- key_list[key_list.index(old)] = new
- self.set_sequences(sequences)
-
- def _dump_sequences(self, message, key):
- """Inspect a new MHMessage and update sequences appropriately."""
- pending_sequences = message.get_sequences()
- all_sequences = self.get_sequences()
- for name, key_list in all_sequences.iteritems():
- if name in pending_sequences:
- key_list.append(key)
- elif key in key_list:
- del key_list[key_list.index(key)]
- for sequence in pending_sequences:
- if sequence not in all_sequences:
- all_sequences[sequence] = [key]
- self.set_sequences(all_sequences)
-
-
-class Babyl(_singlefileMailbox):
- """An Rmail-style Babyl mailbox."""
-
- _special_labels = frozenset(('unseen', 'deleted', 'filed', 'answered',
- 'forwarded', 'edited', 'resent'))
-
- def __init__(self, path, factory=None, create=True):
- """Initialize a Babyl mailbox."""
- _singlefileMailbox.__init__(self, path, factory, create)
- self._labels = {}
-
- def add(self, message):
- """Add message and return assigned key."""
- key = _singlefileMailbox.add(self, message)
- if isinstance(message, BabylMessage):
- self._labels[key] = message.get_labels()
- return key
-
- def remove(self, key):
- """Remove the keyed message; raise KeyError if it doesn't exist."""
- _singlefileMailbox.remove(self, key)
- if key in self._labels:
- del self._labels[key]
-
- def __setitem__(self, key, message):
- """Replace the keyed message; raise KeyError if it doesn't exist."""
- _singlefileMailbox.__setitem__(self, key, message)
- if isinstance(message, BabylMessage):
- self._labels[key] = message.get_labels()
-
- def get_message(self, key):
- """Return a Message representation or raise a KeyError."""
- start, stop = self._lookup(key)
- self._file.seek(start)
- self._file.readline() # Skip '1,' line specifying labels.
- original_headers = StringIO.StringIO()
- while True:
- line = self._file.readline()
- if line == '*** EOOH ***' + os.linesep or line == '':
- break
- original_headers.write(line.replace(os.linesep, '\n'))
- visible_headers = StringIO.StringIO()
- while True:
- line = self._file.readline()
- if line == os.linesep or line == '':
- break
- visible_headers.write(line.replace(os.linesep, '\n'))
- body = self._file.read(stop - self._file.tell()).replace(os.linesep,
- '\n')
- msg = BabylMessage(original_headers.getvalue() + body)
- msg.set_visible(visible_headers.getvalue())
- if key in self._labels:
- msg.set_labels(self._labels[key])
- return msg
-
- def get_string(self, key):
- """Return a string representation or raise a KeyError."""
- start, stop = self._lookup(key)
- self._file.seek(start)
- self._file.readline() # Skip '1,' line specifying labels.
- original_headers = StringIO.StringIO()
- while True:
- line = self._file.readline()
- if line == '*** EOOH ***' + os.linesep or line == '':
- break
- original_headers.write(line.replace(os.linesep, '\n'))
- while True:
- line = self._file.readline()
- if line == os.linesep or line == '':
- break
- return original_headers.getvalue() + \
- self._file.read(stop - self._file.tell()).replace(os.linesep,
- '\n')
-
- def get_file(self, key):
- """Return a file-like representation or raise a KeyError."""
- return StringIO.StringIO(self.get_string(key).replace('\n',
- os.linesep))
-
- def get_labels(self):
- """Return a list of user-defined labels in the mailbox."""
- self._lookup()
- labels = set()
- for label_list in self._labels.values():
- labels.update(label_list)
- labels.difference_update(self._special_labels)
- return list(labels)
-
- def _generate_toc(self):
- """Generate key-to-(start, stop) table of contents."""
- starts, stops = [], []
- self._file.seek(0)
- next_pos = 0
- label_lists = []
- while True:
- line_pos = next_pos
- line = self._file.readline()
- next_pos = self._file.tell()
- if line == '\037\014' + os.linesep:
- if len(stops) < len(starts):
- stops.append(line_pos - len(os.linesep))
- starts.append(next_pos)
- labels = [label.strip() for label
- in self._file.readline()[1:].split(',')
- if label.strip() != '']
- label_lists.append(labels)
- elif line == '\037' or line == '\037' + os.linesep:
- if len(stops) < len(starts):
- stops.append(line_pos - len(os.linesep))
- elif line == '':
- stops.append(line_pos - len(os.linesep))
- break
- self._toc = dict(enumerate(zip(starts, stops)))
- self._labels = dict(enumerate(label_lists))
- self._next_key = len(self._toc)
-
- def _pre_mailbox_hook(self, f):
- """Called before writing the mailbox to file f."""
- f.write('BABYL OPTIONS:%sVersion: 5%sLabels:%s%s\037' %
- (os.linesep, os.linesep, ','.join(self.get_labels()),
- os.linesep))
-
- def _pre_message_hook(self, f):
- """Called before writing each message to file f."""
- f.write('\014' + os.linesep)
-
- def _post_message_hook(self, f):
- """Called after writing each message to file f."""
- f.write(os.linesep + '\037')
-
- def _install_message(self, message):
- """Write message contents and return (start, stop)."""
- start = self._file.tell()
- if isinstance(message, BabylMessage):
- special_labels = []
- labels = []
- for label in message.get_labels():
- if label in self._special_labels:
- special_labels.append(label)
- else:
- labels.append(label)
- self._file.write('1')
- for label in special_labels:
- self._file.write(', ' + label)
- self._file.write(',,')
- for label in labels:
- self._file.write(' ' + label + ',')
- self._file.write(os.linesep)
- else:
- self._file.write('1,,' + os.linesep)
- if isinstance(message, email.Message.Message):
- orig_buffer = StringIO.StringIO()
- orig_generator = email.Generator.Generator(orig_buffer, False, 0)
- orig_generator.flatten(message)
- orig_buffer.seek(0)
- while True:
- line = orig_buffer.readline()
- self._file.write(line.replace('\n', os.linesep))
- if line == '\n' or line == '':
- break
- self._file.write('*** EOOH ***' + os.linesep)
- if isinstance(message, BabylMessage):
- vis_buffer = StringIO.StringIO()
- vis_generator = email.Generator.Generator(vis_buffer, False, 0)
- vis_generator.flatten(message.get_visible())
- while True:
- line = vis_buffer.readline()
- self._file.write(line.replace('\n', os.linesep))
- if line == '\n' or line == '':
- break
- else:
- orig_buffer.seek(0)
- while True:
- line = orig_buffer.readline()
- self._file.write(line.replace('\n', os.linesep))
- if line == '\n' or line == '':
- break
- while True:
- buffer = orig_buffer.read(4096) # Buffer size is arbitrary.
- if buffer == '':
- break
- self._file.write(buffer.replace('\n', os.linesep))
- elif isinstance(message, str):
- body_start = message.find('\n\n') + 2
- if body_start - 2 != -1:
- self._file.write(message[:body_start].replace('\n',
- os.linesep))
- self._file.write('*** EOOH ***' + os.linesep)
- self._file.write(message[:body_start].replace('\n',
- os.linesep))
- self._file.write(message[body_start:].replace('\n',
- os.linesep))
- else:
- self._file.write('*** EOOH ***' + os.linesep + os.linesep)
- self._file.write(message.replace('\n', os.linesep))
- elif hasattr(message, 'readline'):
- original_pos = message.tell()
- first_pass = True
- while True:
- line = message.readline()
- self._file.write(line.replace('\n', os.linesep))
- if line == '\n' or line == '':
- self._file.write('*** EOOH ***' + os.linesep)
- if first_pass:
- first_pass = False
- message.seek(original_pos)
- else:
- break
- while True:
- buffer = message.read(4096) # Buffer size is arbitrary.
- if buffer == '':
- break
- self._file.write(buffer.replace('\n', os.linesep))
- else:
- raise TypeError('Invalid message type: %s' % type(message))
- stop = self._file.tell()
- return (start, stop)
-
-
-class Message(email.Message.Message):
- """Message with mailbox-format-specific properties."""
-
- def __init__(self, message=None):
- """Initialize a Message instance."""
- if isinstance(message, email.Message.Message):
- self._become_message(copy.deepcopy(message))
- if isinstance(message, Message):
- message._explain_to(self)
- elif isinstance(message, str):
- self._become_message(email.message_from_string(message))
- elif hasattr(message, "read"):
- self._become_message(email.message_from_file(message))
- elif message is None:
- email.Message.Message.__init__(self)
- else:
- raise TypeError('Invalid message type: %s' % type(message))
-
- def _become_message(self, message):
- """Assume the non-format-specific state of message."""
- for name in ('_headers', '_unixfrom', '_payload', '_charset',
- 'preamble', 'epilogue', 'defects', '_default_type'):
- self.__dict__[name] = message.__dict__[name]
-
- def _explain_to(self, message):
- """Copy format-specific state to message insofar as possible."""
- if isinstance(message, Message):
- return # There's nothing format-specific to explain.
- else:
- raise TypeError('Cannot convert to specified type')
-
-
-class MaildirMessage(Message):
- """Message with Maildir-specific properties."""
-
- def __init__(self, message=None):
- """Initialize a MaildirMessage instance."""
- self._subdir = 'new'
- self._info = ''
- self._date = time.time()
- Message.__init__(self, message)
-
- def get_subdir(self):
- """Return 'new' or 'cur'."""
- return self._subdir
-
- def set_subdir(self, subdir):
- """Set subdir to 'new' or 'cur'."""
- if subdir == 'new' or subdir == 'cur':
- self._subdir = subdir
- else:
- raise ValueError("subdir must be 'new' or 'cur': %s" % subdir)
-
- def get_flags(self):
- """Return as a string the flags that are set."""
- if self._info.startswith('2,'):
- return self._info[2:]
- else:
- return ''
-
- def set_flags(self, flags):
- """Set the given flags and unset all others."""
- self._info = '2,' + ''.join(sorted(flags))
-
- def add_flag(self, flag):
- """Set the given flag(s) without changing others."""
- self.set_flags(''.join(set(self.get_flags()) | set(flag)))
-
- def remove_flag(self, flag):
- """Unset the given string flag(s) without changing others."""
- if self.get_flags() != '':
- self.set_flags(''.join(set(self.get_flags()) - set(flag)))
-
- def get_date(self):
- """Return delivery date of message, in seconds since the epoch."""
- return self._date
-
- def set_date(self, date):
- """Set delivery date of message, in seconds since the epoch."""
- try:
- self._date = float(date)
- except ValueError:
- raise TypeError("can't convert to float: %s" % date)
-
- def get_info(self):
- """Get the message's "info" as a string."""
- return self._info
-
- def set_info(self, info):
- """Set the message's "info" string."""
- if isinstance(info, str):
- self._info = info
- else:
- raise TypeError('info must be a string: %s' % type(info))
-
- def _explain_to(self, message):
- """Copy Maildir-specific state to message insofar as possible."""
- if isinstance(message, MaildirMessage):
- message.set_flags(self.get_flags())
- message.set_subdir(self.get_subdir())
- message.set_date(self.get_date())
- elif isinstance(message, _mboxMMDFMessage):
- flags = set(self.get_flags())
- if 'S' in flags:
- message.add_flag('R')
- if self.get_subdir() == 'cur':
- message.add_flag('O')
- if 'T' in flags:
- message.add_flag('D')
- if 'F' in flags:
- message.add_flag('F')
- if 'R' in flags:
- message.add_flag('A')
- message.set_from('MAILER-DAEMON', time.gmtime(self.get_date()))
- elif isinstance(message, MHMessage):
- flags = set(self.get_flags())
- if 'S' not in flags:
- message.add_sequence('unseen')
- if 'R' in flags:
- message.add_sequence('replied')
- if 'F' in flags:
- message.add_sequence('flagged')
- elif isinstance(message, BabylMessage):
- flags = set(self.get_flags())
- if 'S' not in flags:
- message.add_label('unseen')
- if 'T' in flags:
- message.add_label('deleted')
- if 'R' in flags:
- message.add_label('answered')
- if 'P' in flags:
- message.add_label('forwarded')
- elif isinstance(message, Message):
- pass
- else:
- raise TypeError('Cannot convert to specified type: %s' %
- type(message))
-
-
-class _mboxMMDFMessage(Message):
- """Message with mbox- or MMDF-specific properties."""
-
- def __init__(self, message=None):
- """Initialize an mboxMMDFMessage instance."""
- self.set_from('MAILER-DAEMON', True)
- if isinstance(message, email.Message.Message):
- unixfrom = message.get_unixfrom()
- if unixfrom is not None and unixfrom.startswith('From '):
- self.set_from(unixfrom[5:])
- Message.__init__(self, message)
-
- def get_from(self):
- """Return contents of "From " line."""
- return self._from
-
- def set_from(self, from_, time_=None):
- """Set "From " line, formatting and appending time_ if specified."""
- if time_ is not None:
- if time_ is True:
- time_ = time.gmtime()
- from_ += ' ' + time.asctime(time_)
- self._from = from_
-
- def get_flags(self):
- """Return as a string the flags that are set."""
- return self.get('Status', '') + self.get('X-Status', '')
-
- def set_flags(self, flags):
- """Set the given flags and unset all others."""
- flags = set(flags)
- status_flags, xstatus_flags = '', ''
- for flag in ('R', 'O'):
- if flag in flags:
- status_flags += flag
- flags.remove(flag)
- for flag in ('D', 'F', 'A'):
- if flag in flags:
- xstatus_flags += flag
- flags.remove(flag)
- xstatus_flags += ''.join(sorted(flags))
- try:
- self.replace_header('Status', status_flags)
- except KeyError:
- self.add_header('Status', status_flags)
- try:
- self.replace_header('X-Status', xstatus_flags)
- except KeyError:
- self.add_header('X-Status', xstatus_flags)
-
- def add_flag(self, flag):
- """Set the given flag(s) without changing others."""
- self.set_flags(''.join(set(self.get_flags()) | set(flag)))
-
- def remove_flag(self, flag):
- """Unset the given string flag(s) without changing others."""
- if 'Status' in self or 'X-Status' in self:
- self.set_flags(''.join(set(self.get_flags()) - set(flag)))
-
- def _explain_to(self, message):
- """Copy mbox- or MMDF-specific state to message insofar as possible."""
- if isinstance(message, MaildirMessage):
- flags = set(self.get_flags())
- if 'O' in flags:
- message.set_subdir('cur')
- if 'F' in flags:
- message.add_flag('F')
- if 'A' in flags:
- message.add_flag('R')
- if 'R' in flags:
- message.add_flag('S')
- if 'D' in flags:
- message.add_flag('T')
- del message['status']
- del message['x-status']
- maybe_date = ' '.join(self.get_from().split()[-5:])
- try:
- message.set_date(calendar.timegm(time.strptime(maybe_date,
- '%a %b %d %H:%M:%S %Y')))
- except (ValueError, OverflowError):
- pass
- elif isinstance(message, _mboxMMDFMessage):
- message.set_flags(self.get_flags())
- message.set_from(self.get_from())
- elif isinstance(message, MHMessage):
- flags = set(self.get_flags())
- if 'R' not in flags:
- message.add_sequence('unseen')
- if 'A' in flags:
- message.add_sequence('replied')
- if 'F' in flags:
- message.add_sequence('flagged')
- del message['status']
- del message['x-status']
- elif isinstance(message, BabylMessage):
- flags = set(self.get_flags())
- if 'R' not in flags:
- message.add_label('unseen')
- if 'D' in flags:
- message.add_label('deleted')
- if 'A' in flags:
- message.add_label('answered')
- del message['status']
- del message['x-status']
- elif isinstance(message, Message):
- pass
- else:
- raise TypeError('Cannot convert to specified type: %s' %
- type(message))
-
-
-class mboxMessage(_mboxMMDFMessage):
- """Message with mbox-specific properties."""
-
-
-class MHMessage(Message):
- """Message with MH-specific properties."""
-
- def __init__(self, message=None):
- """Initialize an MHMessage instance."""
- self._sequences = []
- Message.__init__(self, message)
-
- def get_sequences(self):
- """Return a list of sequences that include the message."""
- return self._sequences[:]
-
- def set_sequences(self, sequences):
- """Set the list of sequences that include the message."""
- self._sequences = list(sequences)
-
- def add_sequence(self, sequence):
- """Add sequence to list of sequences including the message."""
- if isinstance(sequence, str):
- if not sequence in self._sequences:
- self._sequences.append(sequence)
- else:
- raise TypeError('sequence must be a string: %s' % type(sequence))
-
- def remove_sequence(self, sequence):
- """Remove sequence from the list of sequences including the message."""
- try:
- self._sequences.remove(sequence)
- except ValueError:
- pass
-
- def _explain_to(self, message):
- """Copy MH-specific state to message insofar as possible."""
- if isinstance(message, MaildirMessage):
- sequences = set(self.get_sequences())
- if 'unseen' in sequences:
- message.set_subdir('cur')
- else:
- message.set_subdir('cur')
- message.add_flag('S')
- if 'flagged' in sequences:
- message.add_flag('F')
- if 'replied' in sequences:
- message.add_flag('R')
- elif isinstance(message, _mboxMMDFMessage):
- sequences = set(self.get_sequences())
- if 'unseen' not in sequences:
- message.add_flag('RO')
- else:
- message.add_flag('O')
- if 'flagged' in sequences:
- message.add_flag('F')
- if 'replied' in sequences:
- message.add_flag('A')
- elif isinstance(message, MHMessage):
- for sequence in self.get_sequences():
- message.add_sequence(sequence)
- elif isinstance(message, BabylMessage):
- sequences = set(self.get_sequences())
- if 'unseen' in sequences:
- message.add_label('unseen')
- if 'replied' in sequences:
- message.add_label('answered')
- elif isinstance(message, Message):
- pass
- else:
- raise TypeError('Cannot convert to specified type: %s' %
- type(message))
-
-
-class BabylMessage(Message):
- """Message with Babyl-specific properties."""
-
- def __init__(self, message=None):
- """Initialize an BabylMessage instance."""
- self._labels = []
- self._visible = Message()
- Message.__init__(self, message)
-
- def get_labels(self):
- """Return a list of labels on the message."""
- return self._labels[:]
-
- def set_labels(self, labels):
- """Set the list of labels on the message."""
- self._labels = list(labels)
-
- def add_label(self, label):
- """Add label to list of labels on the message."""
- if isinstance(label, str):
- if label not in self._labels:
- self._labels.append(label)
- else:
- raise TypeError('label must be a string: %s' % type(label))
-
- def remove_label(self, label):
- """Remove label from the list of labels on the message."""
- try:
- self._labels.remove(label)
- except ValueError:
- pass
-
- def get_visible(self):
- """Return a Message representation of visible headers."""
- return Message(self._visible)
-
- def set_visible(self, visible):
- """Set the Message representation of visible headers."""
- self._visible = Message(visible)
-
- def update_visible(self):
- """Update and/or sensibly generate a set of visible headers."""
- for header in self._visible.keys():
- if header in self:
- self._visible.replace_header(header, self[header])
- else:
- del self._visible[header]
- for header in ('Date', 'From', 'Reply-To', 'To', 'CC', 'Subject'):
- if header in self and header not in self._visible:
- self._visible[header] = self[header]
-
- def _explain_to(self, message):
- """Copy Babyl-specific state to message insofar as possible."""
- if isinstance(message, MaildirMessage):
- labels = set(self.get_labels())
- if 'unseen' in labels:
- message.set_subdir('cur')
- else:
- message.set_subdir('cur')
- message.add_flag('S')
- if 'forwarded' in labels or 'resent' in labels:
- message.add_flag('P')
- if 'answered' in labels:
- message.add_flag('R')
- if 'deleted' in labels:
- message.add_flag('T')
- elif isinstance(message, _mboxMMDFMessage):
- labels = set(self.get_labels())
- if 'unseen' not in labels:
- message.add_flag('RO')
- else:
- message.add_flag('O')
- if 'deleted' in labels:
- message.add_flag('D')
- if 'answered' in labels:
- message.add_flag('A')
- elif isinstance(message, MHMessage):
- labels = set(self.get_labels())
- if 'unseen' in labels:
- message.add_sequence('unseen')
- if 'answered' in labels:
- message.add_sequence('replied')
- elif isinstance(message, BabylMessage):
- message.set_visible(self.get_visible())
- for label in self.get_labels():
- message.add_label(label)
- elif isinstance(message, Message):
- pass
- else:
- raise TypeError('Cannot convert to specified type: %s' %
- type(message))
-
-
-class MMDFMessage(_mboxMMDFMessage):
- """Message with MMDF-specific properties."""
-
-
-class _ProxyFile:
- """A read-only wrapper of a file."""
-
- def __init__(self, f, pos=None):
- """Initialize a _ProxyFile."""
- self._file = f
- if pos is None:
- self._pos = f.tell()
- else:
- self._pos = pos
-
- def read(self, size=None):
- """Read bytes."""
- return self._read(size, self._file.read)
-
- def readline(self, size=None):
- """Read a line."""
- return self._read(size, self._file.readline)
-
- def readlines(self, sizehint=None):
- """Read multiple lines."""
- result = []
- for line in self:
- result.append(line)
- if sizehint is not None:
- sizehint -= len(line)
- if sizehint <= 0:
- break
- return result
-
- def __iter__(self):
- """Iterate over lines."""
- return iter(self.readline, "")
-
- def tell(self):
- """Return the position."""
- return self._pos
-
- def seek(self, offset, whence=0):
- """Change position."""
- if whence == 1:
- self._file.seek(self._pos)
- self._file.seek(offset, whence)
- self._pos = self._file.tell()
-
- def close(self):
- """Close the file."""
- del self._file
-
- def _read(self, size, read_method):
- """Read size bytes using read_method."""
- if size is None:
- size = -1
- self._file.seek(self._pos)
- result = read_method(size)
- self._pos = self._file.tell()
- return result
-
-
-class _PartialFile(_ProxyFile):
- """A read-only wrapper of part of a file."""
-
- def __init__(self, f, start=None, stop=None):
- """Initialize a _PartialFile."""
- _ProxyFile.__init__(self, f, start)
- self._start = start
- self._stop = stop
-
- def tell(self):
- """Return the position with respect to start."""
- return _ProxyFile.tell(self) - self._start
-
- def seek(self, offset, whence=0):
- """Change position, possibly with respect to start or stop."""
- if whence == 0:
- self._pos = self._start
- whence = 1
- elif whence == 2:
- self._pos = self._stop
- whence = 1
- _ProxyFile.seek(self, offset, whence)
-
- def _read(self, size, read_method):
- """Read size bytes using read_method, honoring start and stop."""
- remaining = self._stop - self._pos
- if remaining <= 0:
- return ''
- if size is None or size < 0 or size > remaining:
- size = remaining
- return _ProxyFile._read(self, size, read_method)
-
-
-def _lock_file(f, dotlock=True):
- """Lock file f using lockf and dot locking."""
- dotlock_done = False
- try:
- if fcntl:
- try:
- fcntl.lockf(f, fcntl.LOCK_EX | fcntl.LOCK_NB)
- except IOError, e:
- if e.errno in (errno.EAGAIN, errno.EACCES):
- raise ExternalClashError('lockf: lock unavailable: %s' %
- f.name)
- else:
- raise
- if dotlock:
- try:
- pre_lock = _create_temporary(f.name + '.lock')
- pre_lock.close()
- except IOError, e:
- if e.errno == errno.EACCES:
- return # Without write access, just skip dotlocking.
- else:
- raise
- try:
- if hasattr(os, 'link'):
- os.link(pre_lock.name, f.name + '.lock')
- dotlock_done = True
- os.unlink(pre_lock.name)
- else:
- os.rename(pre_lock.name, f.name + '.lock')
- dotlock_done = True
- except OSError, e:
- if e.errno == errno.EEXIST or \
- (os.name == 'os2' and e.errno == errno.EACCES):
- os.remove(pre_lock.name)
- raise ExternalClashError('dot lock unavailable: %s' %
- f.name)
- else:
- raise
- except:
- if fcntl:
- fcntl.lockf(f, fcntl.LOCK_UN)
- if dotlock_done:
- os.remove(f.name + '.lock')
- raise
-
-def _unlock_file(f):
- """Unlock file f using lockf and dot locking."""
- if fcntl:
- fcntl.lockf(f, fcntl.LOCK_UN)
- if os.path.exists(f.name + '.lock'):
- os.remove(f.name + '.lock')
-
-def _create_carefully(path):
- """Create a file if it doesn't exist and open for reading and writing."""
- fd = os.open(path, os.O_CREAT | os.O_EXCL | os.O_RDWR)
- try:
- return open(path, 'rb+')
- finally:
- os.close(fd)
-
-def _create_temporary(path):
- """Create a temp file based on path and open for reading and writing."""
- return _create_carefully('%s.%s.%s.%s' % (path, int(time.time()),
- socket.gethostname(),
- os.getpid()))
-
-def _sync_flush(f):
- """Ensure changes to file f are physically on disk."""
- f.flush()
- if hasattr(os, 'fsync'):
- os.fsync(f.fileno())
-
-def _sync_close(f):
- """Close file f, ensuring all changes are physically on disk."""
- _sync_flush(f)
- f.close()
-
-## Start: classes from the original module (for backward compatibility).
-
-# Note that the Maildir class, whose name is unchanged, itself offers a next()
-# method for backward compatibility.
-
-class _Mailbox:
-
- def __init__(self, fp, factory=rfc822.Message):
- self.fp = fp
- self.seekp = 0
- self.factory = factory
-
- def __iter__(self):
- return iter(self.next, None)
-
- def next(self):
- while 1:
- self.fp.seek(self.seekp)
- try:
- self._search_start()
- except EOFError:
- self.seekp = self.fp.tell()
- return None
- start = self.fp.tell()
- self._search_end()
- self.seekp = stop = self.fp.tell()
- if start != stop:
- break
- return self.factory(_PartialFile(self.fp, start, stop))
-
-# Recommended to use PortableUnixMailbox instead!
-class UnixMailbox(_Mailbox):
-
- def _search_start(self):
- while 1:
- pos = self.fp.tell()
- line = self.fp.readline()
- if not line:
- raise EOFError
- if line[:5] == 'From ' and self._isrealfromline(line):
- self.fp.seek(pos)
- return
-
- def _search_end(self):
- self.fp.readline() # Throw away header line
- while 1:
- pos = self.fp.tell()
- line = self.fp.readline()
- if not line:
- return
- if line[:5] == 'From ' and self._isrealfromline(line):
- self.fp.seek(pos)
- return
-
- # An overridable mechanism to test for From-line-ness. You can either
- # specify a different regular expression or define a whole new
- # _isrealfromline() method. Note that this only gets called for lines
- # starting with the 5 characters "From ".
- #
- # BAW: According to
- #http://home.netscape.com/eng/mozilla/2.0/relnotes/demo/content-length.html
- # the only portable, reliable way to find message delimiters in a BSD (i.e
- # Unix mailbox) style folder is to search for "\n\nFrom .*\n", or at the
- # beginning of the file, "^From .*\n". While _fromlinepattern below seems
- # like a good idea, in practice, there are too many variations for more
- # strict parsing of the line to be completely accurate.
- #
- # _strict_isrealfromline() is the old version which tries to do stricter
- # parsing of the From_ line. _portable_isrealfromline() simply returns
- # true, since it's never called if the line doesn't already start with
- # "From ".
- #
- # This algorithm, and the way it interacts with _search_start() and
- # _search_end() may not be completely correct, because it doesn't check
- # that the two characters preceding "From " are \n\n or the beginning of
- # the file. Fixing this would require a more extensive rewrite than is
- # necessary. For convenience, we've added a PortableUnixMailbox class
- # which does no checking of the format of the 'From' line.
-
- _fromlinepattern = (r"From \s*[^\s]+\s+\w\w\w\s+\w\w\w\s+\d?\d\s+"
- r"\d?\d:\d\d(:\d\d)?(\s+[^\s]+)?\s+\d\d\d\d\s*"
- r"[^\s]*\s*"
- "$")
- _regexp = None
-
- def _strict_isrealfromline(self, line):
- if not self._regexp:
- import re
- self._regexp = re.compile(self._fromlinepattern)
- return self._regexp.match(line)
-
- def _portable_isrealfromline(self, line):
- return True
-
- _isrealfromline = _strict_isrealfromline
-
-
-class PortableUnixMailbox(UnixMailbox):
- _isrealfromline = UnixMailbox._portable_isrealfromline
-
-
-class MmdfMailbox(_Mailbox):
-
- def _search_start(self):
- while 1:
- line = self.fp.readline()
- if not line:
- raise EOFError
- if line[:5] == '\001\001\001\001\n':
- return
-
- def _search_end(self):
- while 1:
- pos = self.fp.tell()
- line = self.fp.readline()
- if not line:
- return
- if line == '\001\001\001\001\n':
- self.fp.seek(pos)
- return
-
-
-class MHMailbox:
-
- def __init__(self, dirname, factory=rfc822.Message):
- import re
- pat = re.compile('^[1-9][0-9]*$')
- self.dirname = dirname
- # the three following lines could be combined into:
- # list = map(long, filter(pat.match, os.listdir(self.dirname)))
- list = os.listdir(self.dirname)
- list = filter(pat.match, list)
- list = map(long, list)
- list.sort()
- # This only works in Python 1.6 or later;
- # before that str() added 'L':
- self.boxes = map(str, list)
- self.boxes.reverse()
- self.factory = factory
-
- def __iter__(self):
- return iter(self.next, None)
-
- def next(self):
- if not self.boxes:
- return None
- fn = self.boxes.pop()
- fp = open(os.path.join(self.dirname, fn))
- msg = self.factory(fp)
- try:
- msg._mh_msgno = fn
- except (AttributeError, TypeError):
- pass
- return msg
-
-
-class BabylMailbox(_Mailbox):
-
- def _search_start(self):
- while 1:
- line = self.fp.readline()
- if not line:
- raise EOFError
- if line == '*** EOOH ***\n':
- return
-
- def _search_end(self):
- while 1:
- pos = self.fp.tell()
- line = self.fp.readline()
- if not line:
- return
- if line == '\037\014\n' or line == '\037':
- self.fp.seek(pos)
- return
-
-## End: classes from the original module (for backward compatibility).
-
-
-class Error(Exception):
- """Raised for module-specific errors."""
-
-class NoSuchMailboxError(Error):
- """The specified mailbox does not exist and won't be created."""
-
-class NotEmptyError(Error):
- """The specified mailbox is not empty and deletion was requested."""
-
-class ExternalClashError(Error):
- """Another process caused an action to fail."""
-
-class FormatError(Error):
- """A file appears to have an invalid format."""
diff --git a/sys/lib/python/mailcap.py b/sys/lib/python/mailcap.py
deleted file mode 100644
index b2ddacd04..000000000
--- a/sys/lib/python/mailcap.py
+++ /dev/null
@@ -1,255 +0,0 @@
-"""Mailcap file handling. See RFC 1524."""
-
-import os
-
-__all__ = ["getcaps","findmatch"]
-
-# Part 1: top-level interface.
-
-def getcaps():
- """Return a dictionary containing the mailcap database.
-
- The dictionary maps a MIME type (in all lowercase, e.g. 'text/plain')
- to a list of dictionaries corresponding to mailcap entries. The list
- collects all the entries for that MIME type from all available mailcap
- files. Each dictionary contains key-value pairs for that MIME type,
- where the viewing command is stored with the key "view".
-
- """
- caps = {}
- for mailcap in listmailcapfiles():
- try:
- fp = open(mailcap, 'r')
- except IOError:
- continue
- morecaps = readmailcapfile(fp)
- fp.close()
- for key, value in morecaps.iteritems():
- if not key in caps:
- caps[key] = value
- else:
- caps[key] = caps[key] + value
- return caps
-
-def listmailcapfiles():
- """Return a list of all mailcap files found on the system."""
- # XXX Actually, this is Unix-specific
- if 'MAILCAPS' in os.environ:
- str = os.environ['MAILCAPS']
- mailcaps = str.split(':')
- else:
- if 'HOME' in os.environ:
- home = os.environ['HOME']
- else:
- # Don't bother with getpwuid()
- home = '.' # Last resort
- mailcaps = [home + '/.mailcap', '/etc/mailcap',
- '/usr/etc/mailcap', '/usr/local/etc/mailcap']
- return mailcaps
-
-
-# Part 2: the parser.
-
-def readmailcapfile(fp):
- """Read a mailcap file and return a dictionary keyed by MIME type.
-
- Each MIME type is mapped to an entry consisting of a list of
- dictionaries; the list will contain more than one such dictionary
- if a given MIME type appears more than once in the mailcap file.
- Each dictionary contains key-value pairs for that MIME type, where
- the viewing command is stored with the key "view".
- """
- caps = {}
- while 1:
- line = fp.readline()
- if not line: break
- # Ignore comments and blank lines
- if line[0] == '#' or line.strip() == '':
- continue
- nextline = line
- # Join continuation lines
- while nextline[-2:] == '\\\n':
- nextline = fp.readline()
- if not nextline: nextline = '\n'
- line = line[:-2] + nextline
- # Parse the line
- key, fields = parseline(line)
- if not (key and fields):
- continue
- # Normalize the key
- types = key.split('/')
- for j in range(len(types)):
- types[j] = types[j].strip()
- key = '/'.join(types).lower()
- # Update the database
- if key in caps:
- caps[key].append(fields)
- else:
- caps[key] = [fields]
- return caps
-
-def parseline(line):
- """Parse one entry in a mailcap file and return a dictionary.
-
- The viewing command is stored as the value with the key "view",
- and the rest of the fields produce key-value pairs in the dict.
- """
- fields = []
- i, n = 0, len(line)
- while i < n:
- field, i = parsefield(line, i, n)
- fields.append(field)
- i = i+1 # Skip semicolon
- if len(fields) < 2:
- return None, None
- key, view, rest = fields[0], fields[1], fields[2:]
- fields = {'view': view}
- for field in rest:
- i = field.find('=')
- if i < 0:
- fkey = field
- fvalue = ""
- else:
- fkey = field[:i].strip()
- fvalue = field[i+1:].strip()
- if fkey in fields:
- # Ignore it
- pass
- else:
- fields[fkey] = fvalue
- return key, fields
-
-def parsefield(line, i, n):
- """Separate one key-value pair in a mailcap entry."""
- start = i
- while i < n:
- c = line[i]
- if c == ';':
- break
- elif c == '\\':
- i = i+2
- else:
- i = i+1
- return line[start:i].strip(), i
-
-
-# Part 3: using the database.
-
-def findmatch(caps, MIMEtype, key='view', filename="/dev/null", plist=[]):
- """Find a match for a mailcap entry.
-
- Return a tuple containing the command line, and the mailcap entry
- used; (None, None) if no match is found. This may invoke the
- 'test' command of several matching entries before deciding which
- entry to use.
-
- """
- entries = lookup(caps, MIMEtype, key)
- # XXX This code should somehow check for the needsterminal flag.
- for e in entries:
- if 'test' in e:
- test = subst(e['test'], filename, plist)
- if test and os.system(test) != 0:
- continue
- command = subst(e[key], MIMEtype, filename, plist)
- return command, e
- return None, None
-
-def lookup(caps, MIMEtype, key=None):
- entries = []
- if MIMEtype in caps:
- entries = entries + caps[MIMEtype]
- MIMEtypes = MIMEtype.split('/')
- MIMEtype = MIMEtypes[0] + '/*'
- if MIMEtype in caps:
- entries = entries + caps[MIMEtype]
- if key is not None:
- entries = filter(lambda e, key=key: key in e, entries)
- return entries
-
-def subst(field, MIMEtype, filename, plist=[]):
- # XXX Actually, this is Unix-specific
- res = ''
- i, n = 0, len(field)
- while i < n:
- c = field[i]; i = i+1
- if c != '%':
- if c == '\\':
- c = field[i:i+1]; i = i+1
- res = res + c
- else:
- c = field[i]; i = i+1
- if c == '%':
- res = res + c
- elif c == 's':
- res = res + filename
- elif c == 't':
- res = res + MIMEtype
- elif c == '{':
- start = i
- while i < n and field[i] != '}':
- i = i+1
- name = field[start:i]
- i = i+1
- res = res + findparam(name, plist)
- # XXX To do:
- # %n == number of parts if type is multipart/*
- # %F == list of alternating type and filename for parts
- else:
- res = res + '%' + c
- return res
-
-def findparam(name, plist):
- name = name.lower() + '='
- n = len(name)
- for p in plist:
- if p[:n].lower() == name:
- return p[n:]
- return ''
-
-
-# Part 4: test program.
-
-def test():
- import sys
- caps = getcaps()
- if not sys.argv[1:]:
- show(caps)
- return
- for i in range(1, len(sys.argv), 2):
- args = sys.argv[i:i+2]
- if len(args) < 2:
- print "usage: mailcap [MIMEtype file] ..."
- return
- MIMEtype = args[0]
- file = args[1]
- command, e = findmatch(caps, MIMEtype, 'view', file)
- if not command:
- print "No viewer found for", type
- else:
- print "Executing:", command
- sts = os.system(command)
- if sts:
- print "Exit status:", sts
-
-def show(caps):
- print "Mailcap files:"
- for fn in listmailcapfiles(): print "\t" + fn
- print
- if not caps: caps = getcaps()
- print "Mailcap entries:"
- print
- ckeys = caps.keys()
- ckeys.sort()
- for type in ckeys:
- print type
- entries = caps[type]
- for e in entries:
- keys = e.keys()
- keys.sort()
- for k in keys:
- print " %-15s" % k, e[k]
- print
-
-if __name__ == '__main__':
- test()
diff --git a/sys/lib/python/markupbase.py b/sys/lib/python/markupbase.py
deleted file mode 100644
index 24808d185..000000000
--- a/sys/lib/python/markupbase.py
+++ /dev/null
@@ -1,392 +0,0 @@
-"""Shared support for scanning document type declarations in HTML and XHTML.
-
-This module is used as a foundation for the HTMLParser and sgmllib
-modules (indirectly, for htmllib as well). It has no documented
-public API and should not be used directly.
-
-"""
-
-import re
-
-_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9]*\s*').match
-_declstringlit_match = re.compile(r'(\'[^\']*\'|"[^"]*")\s*').match
-_commentclose = re.compile(r'--\s*>')
-_markedsectionclose = re.compile(r']\s*]\s*>')
-
-# An analysis of the MS-Word extensions is available at
-# http://www.planetpublish.com/xmlarena/xap/Thursday/WordtoXML.pdf
-
-_msmarkedsectionclose = re.compile(r']\s*>')
-
-del re
-
-
-class ParserBase:
- """Parser base class which provides some common support methods used
- by the SGML/HTML and XHTML parsers."""
-
- def __init__(self):
- if self.__class__ is ParserBase:
- raise RuntimeError(
- "markupbase.ParserBase must be subclassed")
-
- def error(self, message):
- raise NotImplementedError(
- "subclasses of ParserBase must override error()")
-
- def reset(self):
- self.lineno = 1
- self.offset = 0
-
- def getpos(self):
- """Return current line number and offset."""
- return self.lineno, self.offset
-
- # Internal -- update line number and offset. This should be
- # called for each piece of data exactly once, in order -- in other
- # words the concatenation of all the input strings to this
- # function should be exactly the entire input.
- def updatepos(self, i, j):
- if i >= j:
- return j
- rawdata = self.rawdata
- nlines = rawdata.count("\n", i, j)
- if nlines:
- self.lineno = self.lineno + nlines
- pos = rawdata.rindex("\n", i, j) # Should not fail
- self.offset = j-(pos+1)
- else:
- self.offset = self.offset + j-i
- return j
-
- _decl_otherchars = ''
-
- # Internal -- parse declaration (for use by subclasses).
- def parse_declaration(self, i):
- # This is some sort of declaration; in "HTML as
- # deployed," this should only be the document type
- # declaration ("<!DOCTYPE html...>").
- # ISO 8879:1986, however, has more complex
- # declaration syntax for elements in <!...>, including:
- # --comment--
- # [marked section]
- # name in the following list: ENTITY, DOCTYPE, ELEMENT,
- # ATTLIST, NOTATION, SHORTREF, USEMAP,
- # LINKTYPE, LINK, IDLINK, USELINK, SYSTEM
- rawdata = self.rawdata
- j = i + 2
- assert rawdata[i:j] == "<!", "unexpected call to parse_declaration"
- if rawdata[j:j+1] == ">":
- # the empty comment <!>
- return j + 1
- if rawdata[j:j+1] in ("-", ""):
- # Start of comment followed by buffer boundary,
- # or just a buffer boundary.
- return -1
- # A simple, practical version could look like: ((name|stringlit) S*) + '>'
- n = len(rawdata)
- if rawdata[j:j+2] == '--': #comment
- # Locate --.*-- as the body of the comment
- return self.parse_comment(i)
- elif rawdata[j] == '[': #marked section
- # Locate [statusWord [...arbitrary SGML...]] as the body of the marked section
- # Where statusWord is one of TEMP, CDATA, IGNORE, INCLUDE, RCDATA
- # Note that this is extended by Microsoft Office "Save as Web" function
- # to include [if...] and [endif].
- return self.parse_marked_section(i)
- else: #all other declaration elements
- decltype, j = self._scan_name(j, i)
- if j < 0:
- return j
- if decltype == "doctype":
- self._decl_otherchars = ''
- while j < n:
- c = rawdata[j]
- if c == ">":
- # end of declaration syntax
- data = rawdata[i+2:j]
- if decltype == "doctype":
- self.handle_decl(data)
- else:
- self.unknown_decl(data)
- return j + 1
- if c in "\"'":
- m = _declstringlit_match(rawdata, j)
- if not m:
- return -1 # incomplete
- j = m.end()
- elif c in "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ":
- name, j = self._scan_name(j, i)
- elif c in self._decl_otherchars:
- j = j + 1
- elif c == "[":
- # this could be handled in a separate doctype parser
- if decltype == "doctype":
- j = self._parse_doctype_subset(j + 1, i)
- elif decltype in ("attlist", "linktype", "link", "element"):
- # must tolerate []'d groups in a content model in an element declaration
- # also in data attribute specifications of attlist declaration
- # also link type declaration subsets in linktype declarations
- # also link attribute specification lists in link declarations
- self.error("unsupported '[' char in %s declaration" % decltype)
- else:
- self.error("unexpected '[' char in declaration")
- else:
- self.error(
- "unexpected %r char in declaration" % rawdata[j])
- if j < 0:
- return j
- return -1 # incomplete
-
- # Internal -- parse a marked section
- # Override this to handle MS-word extension syntax <![if word]>content<![endif]>
- def parse_marked_section(self, i, report=1):
- rawdata= self.rawdata
- assert rawdata[i:i+3] == '<![', "unexpected call to parse_marked_section()"
- sectName, j = self._scan_name( i+3, i )
- if j < 0:
- return j
- if sectName in ("temp", "cdata", "ignore", "include", "rcdata"):
- # look for standard ]]> ending
- match= _markedsectionclose.search(rawdata, i+3)
- elif sectName in ("if", "else", "endif"):
- # look for MS Office ]> ending
- match= _msmarkedsectionclose.search(rawdata, i+3)
- else:
- self.error('unknown status keyword %r in marked section' % rawdata[i+3:j])
- if not match:
- return -1
- if report:
- j = match.start(0)
- self.unknown_decl(rawdata[i+3: j])
- return match.end(0)
-
- # Internal -- parse comment, return length or -1 if not terminated
- def parse_comment(self, i, report=1):
- rawdata = self.rawdata
- if rawdata[i:i+4] != '<!--':
- self.error('unexpected call to parse_comment()')
- match = _commentclose.search(rawdata, i+4)
- if not match:
- return -1
- if report:
- j = match.start(0)
- self.handle_comment(rawdata[i+4: j])
- return match.end(0)
-
- # Internal -- scan past the internal subset in a <!DOCTYPE declaration,
- # returning the index just past any whitespace following the trailing ']'.
- def _parse_doctype_subset(self, i, declstartpos):
- rawdata = self.rawdata
- n = len(rawdata)
- j = i
- while j < n:
- c = rawdata[j]
- if c == "<":
- s = rawdata[j:j+2]
- if s == "<":
- # end of buffer; incomplete
- return -1
- if s != "<!":
- self.updatepos(declstartpos, j + 1)
- self.error("unexpected char in internal subset (in %r)" % s)
- if (j + 2) == n:
- # end of buffer; incomplete
- return -1
- if (j + 4) > n:
- # end of buffer; incomplete
- return -1
- if rawdata[j:j+4] == "<!--":
- j = self.parse_comment(j, report=0)
- if j < 0:
- return j
- continue
- name, j = self._scan_name(j + 2, declstartpos)
- if j == -1:
- return -1
- if name not in ("attlist", "element", "entity", "notation"):
- self.updatepos(declstartpos, j + 2)
- self.error(
- "unknown declaration %r in internal subset" % name)
- # handle the individual names
- meth = getattr(self, "_parse_doctype_" + name)
- j = meth(j, declstartpos)
- if j < 0:
- return j
- elif c == "%":
- # parameter entity reference
- if (j + 1) == n:
- # end of buffer; incomplete
- return -1
- s, j = self._scan_name(j + 1, declstartpos)
- if j < 0:
- return j
- if rawdata[j] == ";":
- j = j + 1
- elif c == "]":
- j = j + 1
- while j < n and rawdata[j].isspace():
- j = j + 1
- if j < n:
- if rawdata[j] == ">":
- return j
- self.updatepos(declstartpos, j)
- self.error("unexpected char after internal subset")
- else:
- return -1
- elif c.isspace():
- j = j + 1
- else:
- self.updatepos(declstartpos, j)
- self.error("unexpected char %r in internal subset" % c)
- # end of buffer reached
- return -1
-
- # Internal -- scan past <!ELEMENT declarations
- def _parse_doctype_element(self, i, declstartpos):
- name, j = self._scan_name(i, declstartpos)
- if j == -1:
- return -1
- # style content model; just skip until '>'
- rawdata = self.rawdata
- if '>' in rawdata[j:]:
- return rawdata.find(">", j) + 1
- return -1
-
- # Internal -- scan past <!ATTLIST declarations
- def _parse_doctype_attlist(self, i, declstartpos):
- rawdata = self.rawdata
- name, j = self._scan_name(i, declstartpos)
- c = rawdata[j:j+1]
- if c == "":
- return -1
- if c == ">":
- return j + 1
- while 1:
- # scan a series of attribute descriptions; simplified:
- # name type [value] [#constraint]
- name, j = self._scan_name(j, declstartpos)
- if j < 0:
- return j
- c = rawdata[j:j+1]
- if c == "":
- return -1
- if c == "(":
- # an enumerated type; look for ')'
- if ")" in rawdata[j:]:
- j = rawdata.find(")", j) + 1
- else:
- return -1
- while rawdata[j:j+1].isspace():
- j = j + 1
- if not rawdata[j:]:
- # end of buffer, incomplete
- return -1
- else:
- name, j = self._scan_name(j, declstartpos)
- c = rawdata[j:j+1]
- if not c:
- return -1
- if c in "'\"":
- m = _declstringlit_match(rawdata, j)
- if m:
- j = m.end()
- else:
- return -1
- c = rawdata[j:j+1]
- if not c:
- return -1
- if c == "#":
- if rawdata[j:] == "#":
- # end of buffer
- return -1
- name, j = self._scan_name(j + 1, declstartpos)
- if j < 0:
- return j
- c = rawdata[j:j+1]
- if not c:
- return -1
- if c == '>':
- # all done
- return j + 1
-
- # Internal -- scan past <!NOTATION declarations
- def _parse_doctype_notation(self, i, declstartpos):
- name, j = self._scan_name(i, declstartpos)
- if j < 0:
- return j
- rawdata = self.rawdata
- while 1:
- c = rawdata[j:j+1]
- if not c:
- # end of buffer; incomplete
- return -1
- if c == '>':
- return j + 1
- if c in "'\"":
- m = _declstringlit_match(rawdata, j)
- if not m:
- return -1
- j = m.end()
- else:
- name, j = self._scan_name(j, declstartpos)
- if j < 0:
- return j
-
- # Internal -- scan past <!ENTITY declarations
- def _parse_doctype_entity(self, i, declstartpos):
- rawdata = self.rawdata
- if rawdata[i:i+1] == "%":
- j = i + 1
- while 1:
- c = rawdata[j:j+1]
- if not c:
- return -1
- if c.isspace():
- j = j + 1
- else:
- break
- else:
- j = i
- name, j = self._scan_name(j, declstartpos)
- if j < 0:
- return j
- while 1:
- c = self.rawdata[j:j+1]
- if not c:
- return -1
- if c in "'\"":
- m = _declstringlit_match(rawdata, j)
- if m:
- j = m.end()
- else:
- return -1 # incomplete
- elif c == ">":
- return j + 1
- else:
- name, j = self._scan_name(j, declstartpos)
- if j < 0:
- return j
-
- # Internal -- scan a name token and the new position and the token, or
- # return -1 if we've reached the end of the buffer.
- def _scan_name(self, i, declstartpos):
- rawdata = self.rawdata
- n = len(rawdata)
- if i == n:
- return None, -1
- m = _declname_match(rawdata, i)
- if m:
- s = m.group()
- name = s.strip()
- if (i + len(s)) == n:
- return None, -1 # end of buffer
- return name.lower(), m.end()
- else:
- self.updatepos(declstartpos, i)
- self.error("expected name token at %r"
- % rawdata[declstartpos:declstartpos+20])
-
- # To be overridden -- handlers for unknown objects
- def unknown_decl(self, data):
- pass
diff --git a/sys/lib/python/md5.py b/sys/lib/python/md5.py
deleted file mode 100644
index a23167852..000000000
--- a/sys/lib/python/md5.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# $Id: md5.py 39316 2005-08-21 18:45:59Z greg $
-#
-# Copyright (C) 2005 Gregory P. Smith (greg@electricrain.com)
-# Licensed to PSF under a Contributor Agreement.
-
-from hashlib import md5
-new = md5
-
-blocksize = 1 # legacy value (wrong in any useful sense)
-digest_size = 16
diff --git a/sys/lib/python/mercurial/__init__.py b/sys/lib/python/mercurial/__init__.py
deleted file mode 100644
index e69de29bb..000000000
--- a/sys/lib/python/mercurial/__init__.py
+++ /dev/null
diff --git a/sys/lib/python/mercurial/ancestor.py b/sys/lib/python/mercurial/ancestor.py
deleted file mode 100644
index 56464283b..000000000
--- a/sys/lib/python/mercurial/ancestor.py
+++ /dev/null
@@ -1,85 +0,0 @@
-# ancestor.py - generic DAG ancestor algorithm for mercurial
-#
-# Copyright 2006 Matt Mackall <mpm@selenic.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-import heapq
-
-def ancestor(a, b, pfunc):
- """
- return the least common ancestor of nodes a and b or None if there
- is no such ancestor.
-
- pfunc must return a list of parent vertices
- """
-
- if a == b:
- return a
-
- # find depth from root of all ancestors
- parentcache = {}
- visit = [a, b]
- depth = {}
- while visit:
- vertex = visit[-1]
- pl = pfunc(vertex)
- parentcache[vertex] = pl
- if not pl:
- depth[vertex] = 0
- visit.pop()
- else:
- for p in pl:
- if p == a or p == b: # did we find a or b as a parent?
- return p # we're done
- if p not in depth:
- visit.append(p)
- if visit[-1] == vertex:
- depth[vertex] = min([depth[p] for p in pl]) - 1
- visit.pop()
-
- # traverse ancestors in order of decreasing distance from root
- def ancestors(vertex):
- h = [(depth[vertex], vertex)]
- seen = set()
- while h:
- d, n = heapq.heappop(h)
- if n not in seen:
- seen.add(n)
- yield (d, n)
- for p in parentcache[n]:
- heapq.heappush(h, (depth[p], p))
-
- def generations(vertex):
- sg, s = None, set()
- for g, v in ancestors(vertex):
- if g != sg:
- if sg:
- yield sg, s
- sg, s = g, set((v,))
- else:
- s.add(v)
- yield sg, s
-
- x = generations(a)
- y = generations(b)
- gx = x.next()
- gy = y.next()
-
- # increment each ancestor list until it is closer to root than
- # the other, or they match
- try:
- while 1:
- if gx[0] == gy[0]:
- for v in gx[1]:
- if v in gy[1]:
- return v
- gy = y.next()
- gx = x.next()
- elif gx[0] > gy[0]:
- gy = y.next()
- else:
- gx = x.next()
- except StopIteration:
- return None
diff --git a/sys/lib/python/mercurial/archival.py b/sys/lib/python/mercurial/archival.py
deleted file mode 100644
index 17093ce0f..000000000
--- a/sys/lib/python/mercurial/archival.py
+++ /dev/null
@@ -1,226 +0,0 @@
-# archival.py - revision archival for mercurial
-#
-# Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-from i18n import _
-from node import hex
-import util
-import cStringIO, os, stat, tarfile, time, zipfile
-import zlib, gzip
-
-def tidyprefix(dest, prefix, suffixes):
- '''choose prefix to use for names in archive. make sure prefix is
- safe for consumers.'''
-
- if prefix:
- prefix = util.normpath(prefix)
- else:
- if not isinstance(dest, str):
- raise ValueError('dest must be string if no prefix')
- prefix = os.path.basename(dest)
- lower = prefix.lower()
- for sfx in suffixes:
- if lower.endswith(sfx):
- prefix = prefix[:-len(sfx)]
- break
- lpfx = os.path.normpath(util.localpath(prefix))
- prefix = util.pconvert(lpfx)
- if not prefix.endswith('/'):
- prefix += '/'
- if prefix.startswith('../') or os.path.isabs(lpfx) or '/../' in prefix:
- raise util.Abort(_('archive prefix contains illegal components'))
- return prefix
-
-class tarit(object):
- '''write archive to tar file or stream. can write uncompressed,
- or compress with gzip or bzip2.'''
-
- class GzipFileWithTime(gzip.GzipFile):
-
- def __init__(self, *args, **kw):
- timestamp = None
- if 'timestamp' in kw:
- timestamp = kw.pop('timestamp')
- if timestamp is None:
- self.timestamp = time.time()
- else:
- self.timestamp = timestamp
- gzip.GzipFile.__init__(self, *args, **kw)
-
- def _write_gzip_header(self):
- self.fileobj.write('\037\213') # magic header
- self.fileobj.write('\010') # compression method
- # Python 2.6 deprecates self.filename
- fname = getattr(self, 'name', None) or self.filename
- flags = 0
- if fname:
- flags = gzip.FNAME
- self.fileobj.write(chr(flags))
- gzip.write32u(self.fileobj, long(self.timestamp))
- self.fileobj.write('\002')
- self.fileobj.write('\377')
- if fname:
- self.fileobj.write(fname + '\000')
-
- def __init__(self, dest, prefix, mtime, kind=''):
- self.prefix = tidyprefix(dest, prefix, ['.tar', '.tar.bz2', '.tar.gz',
- '.tgz', '.tbz2'])
- self.mtime = mtime
-
- def taropen(name, mode, fileobj=None):
- if kind == 'gz':
- mode = mode[0]
- if not fileobj:
- fileobj = open(name, mode + 'b')
- gzfileobj = self.GzipFileWithTime(name, mode + 'b',
- zlib.Z_BEST_COMPRESSION,
- fileobj, timestamp=mtime)
- return tarfile.TarFile.taropen(name, mode, gzfileobj)
- else:
- return tarfile.open(name, mode + kind, fileobj)
-
- if isinstance(dest, str):
- self.z = taropen(dest, mode='w:')
- else:
- # Python 2.5-2.5.1 have a regression that requires a name arg
- self.z = taropen(name='', mode='w|', fileobj=dest)
-
- def addfile(self, name, mode, islink, data):
- i = tarfile.TarInfo(self.prefix + name)
- i.mtime = self.mtime
- i.size = len(data)
- if islink:
- i.type = tarfile.SYMTYPE
- i.mode = 0777
- i.linkname = data
- data = None
- i.size = 0
- else:
- i.mode = mode
- data = cStringIO.StringIO(data)
- self.z.addfile(i, data)
-
- def done(self):
- self.z.close()
-
-class tellable(object):
- '''provide tell method for zipfile.ZipFile when writing to http
- response file object.'''
-
- def __init__(self, fp):
- self.fp = fp
- self.offset = 0
-
- def __getattr__(self, key):
- return getattr(self.fp, key)
-
- def write(self, s):
- self.fp.write(s)
- self.offset += len(s)
-
- def tell(self):
- return self.offset
-
-class zipit(object):
- '''write archive to zip file or stream. can write uncompressed,
- or compressed with deflate.'''
-
- def __init__(self, dest, prefix, mtime, compress=True):
- self.prefix = tidyprefix(dest, prefix, ('.zip',))
- if not isinstance(dest, str):
- try:
- dest.tell()
- except (AttributeError, IOError):
- dest = tellable(dest)
- self.z = zipfile.ZipFile(dest, 'w',
- compress and zipfile.ZIP_DEFLATED or
- zipfile.ZIP_STORED)
- self.date_time = time.gmtime(mtime)[:6]
-
- def addfile(self, name, mode, islink, data):
- i = zipfile.ZipInfo(self.prefix + name, self.date_time)
- i.compress_type = self.z.compression
- # unzip will not honor unix file modes unless file creator is
- # set to unix (id 3).
- i.create_system = 3
- ftype = stat.S_IFREG
- if islink:
- mode = 0777
- ftype = stat.S_IFLNK
- i.external_attr = (mode | ftype) << 16L
- self.z.writestr(i, data)
-
- def done(self):
- self.z.close()
-
-class fileit(object):
- '''write archive as files in directory.'''
-
- def __init__(self, name, prefix, mtime):
- if prefix:
- raise util.Abort(_('cannot give prefix when archiving to files'))
- self.basedir = name
- self.opener = util.opener(self.basedir)
-
- def addfile(self, name, mode, islink, data):
- if islink:
- self.opener.symlink(data, name)
- return
- f = self.opener(name, "w", atomictemp=True)
- f.write(data)
- f.rename()
- destfile = os.path.join(self.basedir, name)
- os.chmod(destfile, mode)
-
- def done(self):
- pass
-
-archivers = {
- 'files': fileit,
- 'tar': tarit,
- 'tbz2': lambda name, prefix, mtime: tarit(name, prefix, mtime, 'bz2'),
- 'tgz': lambda name, prefix, mtime: tarit(name, prefix, mtime, 'gz'),
- 'uzip': lambda name, prefix, mtime: zipit(name, prefix, mtime, False),
- 'zip': zipit,
- }
-
-def archive(repo, dest, node, kind, decode=True, matchfn=None,
- prefix=None, mtime=None):
- '''create archive of repo as it was at node.
-
- dest can be name of directory, name of archive file, or file
- object to write archive to.
-
- kind is type of archive to create.
-
- decode tells whether to put files through decode filters from
- hgrc.
-
- matchfn is function to filter names of files to write to archive.
-
- prefix is name of path to put before every archive member.'''
-
- def write(name, mode, islink, getdata):
- if matchfn and not matchfn(name): return
- data = getdata()
- if decode:
- data = repo.wwritedata(name, data)
- archiver.addfile(name, mode, islink, data)
-
- if kind not in archivers:
- raise util.Abort(_("unknown archive type '%s'") % kind)
-
- ctx = repo[node]
- archiver = archivers[kind](dest, prefix, mtime or ctx.date()[0])
-
- if repo.ui.configbool("ui", "archivemeta", True):
- write('.hg_archival.txt', 0644, False,
- lambda: 'repo: %s\nnode: %s\n' % (
- hex(repo.changelog.node(0)), hex(node)))
- for f in ctx:
- ff = ctx.flags(f)
- write(f, 'x' in ff and 0755 or 0644, 'l' in ff, ctx[f].data)
- archiver.done()
diff --git a/sys/lib/python/mercurial/base85.c b/sys/lib/python/mercurial/base85.c
deleted file mode 100644
index 3e6c0614c..000000000
--- a/sys/lib/python/mercurial/base85.c
+++ /dev/null
@@ -1,155 +0,0 @@
-/*
- base85 codec
-
- Copyright 2006 Brendan Cully <brendan@kublai.com>
-
- This software may be used and distributed according to the terms of
- the GNU General Public License, incorporated herein by reference.
-
- Largely based on git's implementation
-*/
-
-#include <Python.h>
-
-static const char b85chars[] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
- "abcdefghijklmnopqrstuvwxyz!#$%&()*+-;<=>?@^_`{|}~";
-static char b85dec[256];
-
-static void
-b85prep(void)
-{
- int i;
-
- memset(b85dec, 0, sizeof(b85dec));
- for (i = 0; i < sizeof(b85chars); i++)
- b85dec[(int)(b85chars[i])] = i + 1;
-}
-
-static PyObject *
-b85encode(PyObject *self, PyObject *args)
-{
- const unsigned char *text;
- PyObject *out;
- char *dst;
- int len, olen, i;
- unsigned int acc, val, ch;
- int pad = 0;
-
- if (!PyArg_ParseTuple(args, "s#|i", &text, &len, &pad))
- return NULL;
-
- if (pad)
- olen = ((len + 3) / 4 * 5) - 3;
- else {
- olen = len % 4;
- if (olen)
- olen++;
- olen += len / 4 * 5;
- }
- if (!(out = PyString_FromStringAndSize(NULL, olen + 3)))
- return NULL;
-
- dst = PyString_AS_STRING(out);
-
- while (len) {
- acc = 0;
- for (i = 24; i >= 0; i -= 8) {
- ch = *text++;
- acc |= ch << i;
- if (--len == 0)
- break;
- }
- for (i = 4; i >= 0; i--) {
- val = acc % 85;
- acc /= 85;
- dst[i] = b85chars[val];
- }
- dst += 5;
- }
-
- if (!pad)
- _PyString_Resize(&out, olen);
-
- return out;
-}
-
-static PyObject *
-b85decode(PyObject *self, PyObject *args)
-{
- PyObject *out;
- const char *text;
- char *dst;
- int len, i, j, olen, c, cap;
- unsigned int acc;
-
- if (!PyArg_ParseTuple(args, "s#", &text, &len))
- return NULL;
-
- olen = len / 5 * 4;
- i = len % 5;
- if (i)
- olen += i - 1;
- if (!(out = PyString_FromStringAndSize(NULL, olen)))
- return NULL;
-
- dst = PyString_AS_STRING(out);
-
- i = 0;
- while (i < len)
- {
- acc = 0;
- cap = len - i - 1;
- if (cap > 4)
- cap = 4;
- for (j = 0; j < cap; i++, j++)
- {
- c = b85dec[(int)*text++] - 1;
- if (c < 0)
- return PyErr_Format(PyExc_ValueError, "Bad base85 character at position %d", i);
- acc = acc * 85 + c;
- }
- if (i++ < len)
- {
- c = b85dec[(int)*text++] - 1;
- if (c < 0)
- return PyErr_Format(PyExc_ValueError, "Bad base85 character at position %d", i);
- /* overflow detection: 0xffffffff == "|NsC0",
- * "|NsC" == 0x03030303 */
- if (acc > 0x03030303 || (acc *= 85) > 0xffffffff - c)
- return PyErr_Format(PyExc_ValueError, "Bad base85 sequence at position %d", i);
- acc += c;
- }
-
- cap = olen < 4 ? olen : 4;
- olen -= cap;
- for (j = 0; j < 4 - cap; j++)
- acc *= 85;
- if (cap && cap < 4)
- acc += 0xffffff >> (cap - 1) * 8;
- for (j = 0; j < cap; j++)
- {
- acc = (acc << 8) | (acc >> 24);
- *dst++ = acc;
- }
- }
-
- return out;
-}
-
-static char base85_doc[] = "Base85 Data Encoding";
-
-static PyMethodDef methods[] = {
- {"b85encode", b85encode, METH_VARARGS,
- "Encode text in base85.\n\n"
- "If the second parameter is true, pad the result to a multiple of "
- "five characters.\n"},
- {"b85decode", b85decode, METH_VARARGS, "Decode base85 text.\n"},
- {NULL, NULL}
-};
-
-PyMODINIT_FUNC initbase85(void)
-{
- Py_InitModule3("base85", methods, base85_doc);
-
- b85prep();
-}
diff --git a/sys/lib/python/mercurial/bdiff.c b/sys/lib/python/mercurial/bdiff.c
deleted file mode 100644
index 60d3c633b..000000000
--- a/sys/lib/python/mercurial/bdiff.c
+++ /dev/null
@@ -1,401 +0,0 @@
-/*
- bdiff.c - efficient binary diff extension for Mercurial
-
- Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
-
- This software may be used and distributed according to the terms of
- the GNU General Public License, incorporated herein by reference.
-
- Based roughly on Python difflib
-*/
-
-#include <Python.h>
-#include <stdlib.h>
-#include <string.h>
-#include <limits.h>
-
-#if defined __hpux || defined __SUNPRO_C || defined _AIX
-# define inline
-#endif
-
-#ifdef __linux
-# define inline __inline
-#endif
-
-#ifdef _WIN32
-#ifdef _MSC_VER
-#define inline __inline
-typedef unsigned long uint32_t;
-#else
-#include <stdint.h>
-#endif
-static uint32_t htonl(uint32_t x)
-{
- return ((x & 0x000000ffUL) << 24) |
- ((x & 0x0000ff00UL) << 8) |
- ((x & 0x00ff0000UL) >> 8) |
- ((x & 0xff000000UL) >> 24);
-}
-#else
-#include <sys/types.h>
-#if defined __BEOS__ && !defined __HAIKU__
-#include <ByteOrder.h>
-#else
-#include <arpa/inet.h>
-#endif
-#include <inttypes.h>
-#endif
-
-struct line {
- int h, len, n, e;
- const char *l;
-};
-
-struct pos {
- int pos, len;
-};
-
-struct hunk {
- int a1, a2, b1, b2;
-};
-
-struct hunklist {
- struct hunk *base, *head;
-};
-
-int splitlines(const char *a, int len, struct line **lr)
-{
- int h, i;
- const char *p, *b = a;
- const char * const plast = a + len - 1;
- struct line *l;
-
- /* count the lines */
- i = 1; /* extra line for sentinel */
- for (p = a; p < a + len; p++)
- if (*p == '\n' || p == plast)
- i++;
-
- *lr = l = (struct line *)malloc(sizeof(struct line) * i);
- if (!l)
- return -1;
-
- /* build the line array and calculate hashes */
- h = 0;
- for (p = a; p < a + len; p++) {
- /* Leonid Yuriev's hash */
- h = (h * 1664525) + *p + 1013904223;
-
- if (*p == '\n' || p == plast) {
- l->h = h;
- h = 0;
- l->len = p - b + 1;
- l->l = b;
- l->n = INT_MAX;
- l++;
- b = p + 1;
- }
- }
-
- /* set up a sentinel */
- l->h = l->len = 0;
- l->l = a + len;
- return i - 1;
-}
-
-int inline cmp(struct line *a, struct line *b)
-{
- return a->h != b->h || a->len != b->len || memcmp(a->l, b->l, a->len);
-}
-
-static int equatelines(struct line *a, int an, struct line *b, int bn)
-{
- int i, j, buckets = 1, t, scale;
- struct pos *h = NULL;
-
- /* build a hash table of the next highest power of 2 */
- while (buckets < bn + 1)
- buckets *= 2;
-
- /* try to allocate a large hash table to avoid collisions */
- for (scale = 4; scale; scale /= 2) {
- h = (struct pos *)malloc(scale * buckets * sizeof(struct pos));
- if (h)
- break;
- }
-
- if (!h)
- return 0;
-
- buckets = buckets * scale - 1;
-
- /* clear the hash table */
- for (i = 0; i <= buckets; i++) {
- h[i].pos = INT_MAX;
- h[i].len = 0;
- }
-
- /* add lines to the hash table chains */
- for (i = bn - 1; i >= 0; i--) {
- /* find the equivalence class */
- for (j = b[i].h & buckets; h[j].pos != INT_MAX;
- j = (j + 1) & buckets)
- if (!cmp(b + i, b + h[j].pos))
- break;
-
- /* add to the head of the equivalence class */
- b[i].n = h[j].pos;
- b[i].e = j;
- h[j].pos = i;
- h[j].len++; /* keep track of popularity */
- }
-
- /* compute popularity threshold */
- t = (bn >= 4000) ? bn / 1000 : bn + 1;
-
- /* match items in a to their equivalence class in b */
- for (i = 0; i < an; i++) {
- /* find the equivalence class */
- for (j = a[i].h & buckets; h[j].pos != INT_MAX;
- j = (j + 1) & buckets)
- if (!cmp(a + i, b + h[j].pos))
- break;
-
- a[i].e = j; /* use equivalence class for quick compare */
- if (h[j].len <= t)
- a[i].n = h[j].pos; /* point to head of match list */
- else
- a[i].n = INT_MAX; /* too popular */
- }
-
- /* discard hash tables */
- free(h);
- return 1;
-}
-
-static int longest_match(struct line *a, struct line *b, struct pos *pos,
- int a1, int a2, int b1, int b2, int *omi, int *omj)
-{
- int mi = a1, mj = b1, mk = 0, mb = 0, i, j, k;
-
- for (i = a1; i < a2; i++) {
- /* skip things before the current block */
- for (j = a[i].n; j < b1; j = b[j].n)
- ;
-
- /* loop through all lines match a[i] in b */
- for (; j < b2; j = b[j].n) {
- /* does this extend an earlier match? */
- if (i > a1 && j > b1 && pos[j - 1].pos == i - 1)
- k = pos[j - 1].len + 1;
- else
- k = 1;
- pos[j].pos = i;
- pos[j].len = k;
-
- /* best match so far? */
- if (k > mk) {
- mi = i;
- mj = j;
- mk = k;
- }
- }
- }
-
- if (mk) {
- mi = mi - mk + 1;
- mj = mj - mk + 1;
- }
-
- /* expand match to include neighboring popular lines */
- while (mi - mb > a1 && mj - mb > b1 &&
- a[mi - mb - 1].e == b[mj - mb - 1].e)
- mb++;
- while (mi + mk < a2 && mj + mk < b2 &&
- a[mi + mk].e == b[mj + mk].e)
- mk++;
-
- *omi = mi - mb;
- *omj = mj - mb;
-
- return mk + mb;
-}
-
-static void recurse(struct line *a, struct line *b, struct pos *pos,
- int a1, int a2, int b1, int b2, struct hunklist *l)
-{
- int i, j, k;
-
- /* find the longest match in this chunk */
- k = longest_match(a, b, pos, a1, a2, b1, b2, &i, &j);
- if (!k)
- return;
-
- /* and recurse on the remaining chunks on either side */
- recurse(a, b, pos, a1, i, b1, j, l);
- l->head->a1 = i;
- l->head->a2 = i + k;
- l->head->b1 = j;
- l->head->b2 = j + k;
- l->head++;
- recurse(a, b, pos, i + k, a2, j + k, b2, l);
-}
-
-static struct hunklist diff(struct line *a, int an, struct line *b, int bn)
-{
- struct hunklist l;
- struct hunk *curr;
- struct pos *pos;
- int t;
-
- /* allocate and fill arrays */
- t = equatelines(a, an, b, bn);
- pos = (struct pos *)calloc(bn ? bn : 1, sizeof(struct pos));
- /* we can't have more matches than lines in the shorter file */
- l.head = l.base = (struct hunk *)malloc(sizeof(struct hunk) *
- ((an<bn ? an:bn) + 1));
-
- if (pos && l.base && t) {
- /* generate the matching block list */
- recurse(a, b, pos, 0, an, 0, bn, &l);
- l.head->a1 = l.head->a2 = an;
- l.head->b1 = l.head->b2 = bn;
- l.head++;
- }
-
- free(pos);
-
- /* normalize the hunk list, try to push each hunk towards the end */
- for (curr = l.base; curr != l.head; curr++) {
- struct hunk *next = curr+1;
- int shift = 0;
-
- if (next == l.head)
- break;
-
- if (curr->a2 == next->a1)
- while (curr->a2+shift < an && curr->b2+shift < bn
- && !cmp(a+curr->a2+shift, b+curr->b2+shift))
- shift++;
- else if (curr->b2 == next->b1)
- while (curr->b2+shift < bn && curr->a2+shift < an
- && !cmp(b+curr->b2+shift, a+curr->a2+shift))
- shift++;
- if (!shift)
- continue;
- curr->b2 += shift;
- next->b1 += shift;
- curr->a2 += shift;
- next->a1 += shift;
- }
-
- return l;
-}
-
-static PyObject *blocks(PyObject *self, PyObject *args)
-{
- PyObject *sa, *sb, *rl = NULL, *m;
- struct line *a, *b;
- struct hunklist l = {NULL, NULL};
- struct hunk *h;
- int an, bn, pos = 0;
-
- if (!PyArg_ParseTuple(args, "SS:bdiff", &sa, &sb))
- return NULL;
-
- an = splitlines(PyString_AsString(sa), PyString_Size(sa), &a);
- bn = splitlines(PyString_AsString(sb), PyString_Size(sb), &b);
- if (!a || !b)
- goto nomem;
-
- l = diff(a, an, b, bn);
- rl = PyList_New(l.head - l.base);
- if (!l.head || !rl)
- goto nomem;
-
- for (h = l.base; h != l.head; h++) {
- m = Py_BuildValue("iiii", h->a1, h->a2, h->b1, h->b2);
- PyList_SetItem(rl, pos, m);
- pos++;
- }
-
-nomem:
- free(a);
- free(b);
- free(l.base);
- return rl ? rl : PyErr_NoMemory();
-}
-
-static PyObject *bdiff(PyObject *self, PyObject *args)
-{
- char *sa, *sb;
- PyObject *result = NULL;
- struct line *al, *bl;
- struct hunklist l = {NULL, NULL};
- struct hunk *h;
- char encode[12], *rb;
- int an, bn, len = 0, la, lb;
-
- if (!PyArg_ParseTuple(args, "s#s#:bdiff", &sa, &la, &sb, &lb))
- return NULL;
-
- an = splitlines(sa, la, &al);
- bn = splitlines(sb, lb, &bl);
- if (!al || !bl)
- goto nomem;
-
- l = diff(al, an, bl, bn);
- if (!l.head)
- goto nomem;
-
- /* calculate length of output */
- la = lb = 0;
- for (h = l.base; h != l.head; h++) {
- if (h->a1 != la || h->b1 != lb)
- len += 12 + bl[h->b1].l - bl[lb].l;
- la = h->a2;
- lb = h->b2;
- }
-
- result = PyString_FromStringAndSize(NULL, len);
- if (!result)
- goto nomem;
-
- /* build binary patch */
- rb = PyString_AsString(result);
- la = lb = 0;
-
- for (h = l.base; h != l.head; h++) {
- if (h->a1 != la || h->b1 != lb) {
- len = bl[h->b1].l - bl[lb].l;
- *(uint32_t *)(encode) = htonl(al[la].l - al->l);
- *(uint32_t *)(encode + 4) = htonl(al[h->a1].l - al->l);
- *(uint32_t *)(encode + 8) = htonl(len);
- memcpy(rb, encode, 12);
- memcpy(rb + 12, bl[lb].l, len);
- rb += 12 + len;
- }
- la = h->a2;
- lb = h->b2;
- }
-
-nomem:
- free(al);
- free(bl);
- free(l.base);
- return result ? result : PyErr_NoMemory();
-}
-
-static char mdiff_doc[] = "Efficient binary diff.";
-
-static PyMethodDef methods[] = {
- {"bdiff", bdiff, METH_VARARGS, "calculate a binary diff\n"},
- {"blocks", blocks, METH_VARARGS, "find a list of matching lines\n"},
- {NULL, NULL}
-};
-
-PyMODINIT_FUNC initbdiff(void)
-{
- Py_InitModule3("bdiff", methods, mdiff_doc);
-}
-
diff --git a/sys/lib/python/mercurial/bundlerepo.py b/sys/lib/python/mercurial/bundlerepo.py
deleted file mode 100644
index 14d74e1e5..000000000
--- a/sys/lib/python/mercurial/bundlerepo.py
+++ /dev/null
@@ -1,303 +0,0 @@
-# bundlerepo.py - repository class for viewing uncompressed bundles
-#
-# Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-"""Repository class for viewing uncompressed bundles.
-
-This provides a read-only repository interface to bundles as if they
-were part of the actual repository.
-"""
-
-from node import nullid
-from i18n import _
-import os, struct, bz2, zlib, tempfile, shutil
-import changegroup, util, mdiff
-import localrepo, changelog, manifest, filelog, revlog, error
-
-class bundlerevlog(revlog.revlog):
- def __init__(self, opener, indexfile, bundlefile,
- linkmapper=None):
- # How it works:
- # to retrieve a revision, we need to know the offset of
- # the revision in the bundlefile (an opened file).
- #
- # We store this offset in the index (start), to differentiate a
- # rev in the bundle and from a rev in the revlog, we check
- # len(index[r]). If the tuple is bigger than 7, it is a bundle
- # (it is bigger since we store the node to which the delta is)
- #
- revlog.revlog.__init__(self, opener, indexfile)
- self.bundlefile = bundlefile
- self.basemap = {}
- def chunkpositer():
- for chunk in changegroup.chunkiter(bundlefile):
- pos = bundlefile.tell()
- yield chunk, pos - len(chunk)
- n = len(self)
- prev = None
- for chunk, start in chunkpositer():
- size = len(chunk)
- if size < 80:
- raise util.Abort(_("invalid changegroup"))
- start += 80
- size -= 80
- node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
- if node in self.nodemap:
- prev = node
- continue
- for p in (p1, p2):
- if not p in self.nodemap:
- raise error.LookupError(p1, self.indexfile,
- _("unknown parent"))
- if linkmapper is None:
- link = n
- else:
- link = linkmapper(cs)
-
- if not prev:
- prev = p1
- # start, size, full unc. size, base (unused), link, p1, p2, node
- e = (revlog.offset_type(start, 0), size, -1, -1, link,
- self.rev(p1), self.rev(p2), node)
- self.basemap[n] = prev
- self.index.insert(-1, e)
- self.nodemap[node] = n
- prev = node
- n += 1
-
- def bundle(self, rev):
- """is rev from the bundle"""
- if rev < 0:
- return False
- return rev in self.basemap
- def bundlebase(self, rev): return self.basemap[rev]
- def chunk(self, rev, df=None, cachelen=4096):
- # Warning: in case of bundle, the diff is against bundlebase,
- # not against rev - 1
- # XXX: could use some caching
- if not self.bundle(rev):
- return revlog.revlog.chunk(self, rev, df)
- self.bundlefile.seek(self.start(rev))
- return self.bundlefile.read(self.length(rev))
-
- def revdiff(self, rev1, rev2):
- """return or calculate a delta between two revisions"""
- if self.bundle(rev1) and self.bundle(rev2):
- # hot path for bundle
- revb = self.rev(self.bundlebase(rev2))
- if revb == rev1:
- return self.chunk(rev2)
- elif not self.bundle(rev1) and not self.bundle(rev2):
- return revlog.revlog.revdiff(self, rev1, rev2)
-
- return mdiff.textdiff(self.revision(self.node(rev1)),
- self.revision(self.node(rev2)))
-
- def revision(self, node):
- """return an uncompressed revision of a given"""
- if node == nullid: return ""
-
- text = None
- chain = []
- iter_node = node
- rev = self.rev(iter_node)
- # reconstruct the revision if it is from a changegroup
- while self.bundle(rev):
- if self._cache and self._cache[0] == iter_node:
- text = self._cache[2]
- break
- chain.append(rev)
- iter_node = self.bundlebase(rev)
- rev = self.rev(iter_node)
- if text is None:
- text = revlog.revlog.revision(self, iter_node)
-
- while chain:
- delta = self.chunk(chain.pop())
- text = mdiff.patches(text, [delta])
-
- p1, p2 = self.parents(node)
- if node != revlog.hash(text, p1, p2):
- raise error.RevlogError(_("integrity check failed on %s:%d")
- % (self.datafile, self.rev(node)))
-
- self._cache = (node, self.rev(node), text)
- return text
-
- def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
- raise NotImplementedError
- def addgroup(self, revs, linkmapper, transaction):
- raise NotImplementedError
- def strip(self, rev, minlink):
- raise NotImplementedError
- def checksize(self):
- raise NotImplementedError
-
-class bundlechangelog(bundlerevlog, changelog.changelog):
- def __init__(self, opener, bundlefile):
- changelog.changelog.__init__(self, opener)
- bundlerevlog.__init__(self, opener, self.indexfile, bundlefile)
-
-class bundlemanifest(bundlerevlog, manifest.manifest):
- def __init__(self, opener, bundlefile, linkmapper):
- manifest.manifest.__init__(self, opener)
- bundlerevlog.__init__(self, opener, self.indexfile, bundlefile,
- linkmapper)
-
-class bundlefilelog(bundlerevlog, filelog.filelog):
- def __init__(self, opener, path, bundlefile, linkmapper):
- filelog.filelog.__init__(self, opener, path)
- bundlerevlog.__init__(self, opener, self.indexfile, bundlefile,
- linkmapper)
-
-class bundlerepository(localrepo.localrepository):
- def __init__(self, ui, path, bundlename):
- self._tempparent = None
- try:
- localrepo.localrepository.__init__(self, ui, path)
- except error.RepoError:
- self._tempparent = tempfile.mkdtemp()
- localrepo.instance(ui, self._tempparent, 1)
- localrepo.localrepository.__init__(self, ui, self._tempparent)
-
- if path:
- self._url = 'bundle:' + path + '+' + bundlename
- else:
- self._url = 'bundle:' + bundlename
-
- self.tempfile = None
- self.bundlefile = open(bundlename, "rb")
- header = self.bundlefile.read(6)
- if not header.startswith("HG"):
- raise util.Abort(_("%s: not a Mercurial bundle file") % bundlename)
- elif not header.startswith("HG10"):
- raise util.Abort(_("%s: unknown bundle version") % bundlename)
- elif (header == "HG10BZ") or (header == "HG10GZ"):
- fdtemp, temp = tempfile.mkstemp(prefix="hg-bundle-",
- suffix=".hg10un", dir=self.path)
- self.tempfile = temp
- fptemp = os.fdopen(fdtemp, 'wb')
- def generator(f):
- if header == "HG10BZ":
- zd = bz2.BZ2Decompressor()
- zd.decompress("BZ")
- elif header == "HG10GZ":
- zd = zlib.decompressobj()
- for chunk in f:
- yield zd.decompress(chunk)
- gen = generator(util.filechunkiter(self.bundlefile, 4096))
-
- try:
- fptemp.write("HG10UN")
- for chunk in gen:
- fptemp.write(chunk)
- finally:
- fptemp.close()
- self.bundlefile.close()
-
- self.bundlefile = open(self.tempfile, "rb")
- # seek right after the header
- self.bundlefile.seek(6)
- elif header == "HG10UN":
- # nothing to do
- pass
- else:
- raise util.Abort(_("%s: unknown bundle compression type")
- % bundlename)
- # dict with the mapping 'filename' -> position in the bundle
- self.bundlefilespos = {}
-
- @util.propertycache
- def changelog(self):
- c = bundlechangelog(self.sopener, self.bundlefile)
- self.manstart = self.bundlefile.tell()
- return c
-
- @util.propertycache
- def manifest(self):
- self.bundlefile.seek(self.manstart)
- m = bundlemanifest(self.sopener, self.bundlefile, self.changelog.rev)
- self.filestart = self.bundlefile.tell()
- return m
-
- @util.propertycache
- def manstart(self):
- self.changelog
- return self.manstart
-
- @util.propertycache
- def filestart(self):
- self.manifest
- return self.filestart
-
- def url(self):
- return self._url
-
- def file(self, f):
- if not self.bundlefilespos:
- self.bundlefile.seek(self.filestart)
- while 1:
- chunk = changegroup.getchunk(self.bundlefile)
- if not chunk:
- break
- self.bundlefilespos[chunk] = self.bundlefile.tell()
- for c in changegroup.chunkiter(self.bundlefile):
- pass
-
- if f[0] == '/':
- f = f[1:]
- if f in self.bundlefilespos:
- self.bundlefile.seek(self.bundlefilespos[f])
- return bundlefilelog(self.sopener, f, self.bundlefile,
- self.changelog.rev)
- else:
- return filelog.filelog(self.sopener, f)
-
- def close(self):
- """Close assigned bundle file immediately."""
- self.bundlefile.close()
-
- def __del__(self):
- bundlefile = getattr(self, 'bundlefile', None)
- if bundlefile and not bundlefile.closed:
- bundlefile.close()
- tempfile = getattr(self, 'tempfile', None)
- if tempfile is not None:
- os.unlink(tempfile)
- if self._tempparent:
- shutil.rmtree(self._tempparent, True)
-
- def cancopy(self):
- return False
-
- def getcwd(self):
- return os.getcwd() # always outside the repo
-
-def instance(ui, path, create):
- if create:
- raise util.Abort(_('cannot create new bundle repository'))
- parentpath = ui.config("bundle", "mainreporoot", "")
- if parentpath:
- # Try to make the full path relative so we get a nice, short URL.
- # In particular, we don't want temp dir names in test outputs.
- cwd = os.getcwd()
- if parentpath == cwd:
- parentpath = ''
- else:
- cwd = os.path.join(cwd,'')
- if parentpath.startswith(cwd):
- parentpath = parentpath[len(cwd):]
- path = util.drop_scheme('file', path)
- if path.startswith('bundle:'):
- path = util.drop_scheme('bundle', path)
- s = path.split("+", 1)
- if len(s) == 1:
- repopath, bundlename = parentpath, s[0]
- else:
- repopath, bundlename = s
- else:
- repopath, bundlename = parentpath, path
- return bundlerepository(ui, repopath, bundlename)
diff --git a/sys/lib/python/mercurial/byterange.py b/sys/lib/python/mercurial/byterange.py
deleted file mode 100644
index f833e8270..000000000
--- a/sys/lib/python/mercurial/byterange.py
+++ /dev/null
@@ -1,468 +0,0 @@
-# This library is free software; you can redistribute it and/or
-# modify it under the terms of the GNU Lesser General Public
-# License as published by the Free Software Foundation; either
-# version 2.1 of the License, or (at your option) any later version.
-#
-# This library is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-# Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public
-# License along with this library; if not, write to the
-# Free Software Foundation, Inc.,
-# 59 Temple Place, Suite 330,
-# Boston, MA 02111-1307 USA
-
-# This file is part of urlgrabber, a high-level cross-protocol url-grabber
-# Copyright 2002-2004 Michael D. Stenner, Ryan Tomayko
-
-# $Id: byterange.py,v 1.9 2005/02/14 21:55:07 mstenner Exp $
-
-import os
-import stat
-import urllib
-import urllib2
-import email.Utils
-
-try:
- from cStringIO import StringIO
-except ImportError, msg:
- from StringIO import StringIO
-
-class RangeError(IOError):
- """Error raised when an unsatisfiable range is requested."""
- pass
-
-class HTTPRangeHandler(urllib2.BaseHandler):
- """Handler that enables HTTP Range headers.
-
- This was extremely simple. The Range header is a HTTP feature to
- begin with so all this class does is tell urllib2 that the
- "206 Partial Content" reponse from the HTTP server is what we
- expected.
-
- Example:
- import urllib2
- import byterange
-
- range_handler = range.HTTPRangeHandler()
- opener = urllib2.build_opener(range_handler)
-
- # install it
- urllib2.install_opener(opener)
-
- # create Request and set Range header
- req = urllib2.Request('http://www.python.org/')
- req.header['Range'] = 'bytes=30-50'
- f = urllib2.urlopen(req)
- """
-
- def http_error_206(self, req, fp, code, msg, hdrs):
- # 206 Partial Content Response
- r = urllib.addinfourl(fp, hdrs, req.get_full_url())
- r.code = code
- r.msg = msg
- return r
-
- def http_error_416(self, req, fp, code, msg, hdrs):
- # HTTP's Range Not Satisfiable error
- raise RangeError('Requested Range Not Satisfiable')
-
-class RangeableFileObject:
- """File object wrapper to enable raw range handling.
- This was implemented primarilary for handling range
- specifications for file:// urls. This object effectively makes
- a file object look like it consists only of a range of bytes in
- the stream.
-
- Examples:
- # expose 10 bytes, starting at byte position 20, from
- # /etc/aliases.
- >>> fo = RangeableFileObject(file('/etc/passwd', 'r'), (20,30))
- # seek seeks within the range (to position 23 in this case)
- >>> fo.seek(3)
- # tell tells where your at _within the range_ (position 3 in
- # this case)
- >>> fo.tell()
- # read EOFs if an attempt is made to read past the last
- # byte in the range. the following will return only 7 bytes.
- >>> fo.read(30)
- """
-
- def __init__(self, fo, rangetup):
- """Create a RangeableFileObject.
- fo -- a file like object. only the read() method need be
- supported but supporting an optimized seek() is
- preferable.
- rangetup -- a (firstbyte,lastbyte) tuple specifying the range
- to work over.
- The file object provided is assumed to be at byte offset 0.
- """
- self.fo = fo
- (self.firstbyte, self.lastbyte) = range_tuple_normalize(rangetup)
- self.realpos = 0
- self._do_seek(self.firstbyte)
-
- def __getattr__(self, name):
- """This effectively allows us to wrap at the instance level.
- Any attribute not found in _this_ object will be searched for
- in self.fo. This includes methods."""
- if hasattr(self.fo, name):
- return getattr(self.fo, name)
- raise AttributeError(name)
-
- def tell(self):
- """Return the position within the range.
- This is different from fo.seek in that position 0 is the
- first byte position of the range tuple. For example, if
- this object was created with a range tuple of (500,899),
- tell() will return 0 when at byte position 500 of the file.
- """
- return (self.realpos - self.firstbyte)
-
- def seek(self, offset, whence=0):
- """Seek within the byte range.
- Positioning is identical to that described under tell().
- """
- assert whence in (0, 1, 2)
- if whence == 0: # absolute seek
- realoffset = self.firstbyte + offset
- elif whence == 1: # relative seek
- realoffset = self.realpos + offset
- elif whence == 2: # absolute from end of file
- # XXX: are we raising the right Error here?
- raise IOError('seek from end of file not supported.')
-
- # do not allow seek past lastbyte in range
- if self.lastbyte and (realoffset >= self.lastbyte):
- realoffset = self.lastbyte
-
- self._do_seek(realoffset - self.realpos)
-
- def read(self, size=-1):
- """Read within the range.
- This method will limit the size read based on the range.
- """
- size = self._calc_read_size(size)
- rslt = self.fo.read(size)
- self.realpos += len(rslt)
- return rslt
-
- def readline(self, size=-1):
- """Read lines within the range.
- This method will limit the size read based on the range.
- """
- size = self._calc_read_size(size)
- rslt = self.fo.readline(size)
- self.realpos += len(rslt)
- return rslt
-
- def _calc_read_size(self, size):
- """Handles calculating the amount of data to read based on
- the range.
- """
- if self.lastbyte:
- if size > -1:
- if ((self.realpos + size) >= self.lastbyte):
- size = (self.lastbyte - self.realpos)
- else:
- size = (self.lastbyte - self.realpos)
- return size
-
- def _do_seek(self, offset):
- """Seek based on whether wrapped object supports seek().
- offset is relative to the current position (self.realpos).
- """
- assert offset >= 0
- if not hasattr(self.fo, 'seek'):
- self._poor_mans_seek(offset)
- else:
- self.fo.seek(self.realpos + offset)
- self.realpos += offset
-
- def _poor_mans_seek(self, offset):
- """Seek by calling the wrapped file objects read() method.
- This is used for file like objects that do not have native
- seek support. The wrapped objects read() method is called
- to manually seek to the desired position.
- offset -- read this number of bytes from the wrapped
- file object.
- raise RangeError if we encounter EOF before reaching the
- specified offset.
- """
- pos = 0
- bufsize = 1024
- while pos < offset:
- if (pos + bufsize) > offset:
- bufsize = offset - pos
- buf = self.fo.read(bufsize)
- if len(buf) != bufsize:
- raise RangeError('Requested Range Not Satisfiable')
- pos += bufsize
-
-class FileRangeHandler(urllib2.FileHandler):
- """FileHandler subclass that adds Range support.
- This class handles Range headers exactly like an HTTP
- server would.
- """
- def open_local_file(self, req):
- import mimetypes
- import email
- host = req.get_host()
- file = req.get_selector()
- localfile = urllib.url2pathname(file)
- stats = os.stat(localfile)
- size = stats[stat.ST_SIZE]
- modified = email.Utils.formatdate(stats[stat.ST_MTIME])
- mtype = mimetypes.guess_type(file)[0]
- if host:
- host, port = urllib.splitport(host)
- if port or socket.gethostbyname(host) not in self.get_names():
- raise urllib2.URLError('file not on local host')
- fo = open(localfile,'rb')
- brange = req.headers.get('Range', None)
- brange = range_header_to_tuple(brange)
- assert brange != ()
- if brange:
- (fb, lb) = brange
- if lb == '':
- lb = size
- if fb < 0 or fb > size or lb > size:
- raise RangeError('Requested Range Not Satisfiable')
- size = (lb - fb)
- fo = RangeableFileObject(fo, (fb, lb))
- headers = email.message_from_string(
- 'Content-Type: %s\nContent-Length: %d\nLast-Modified: %s\n' %
- (mtype or 'text/plain', size, modified))
- return urllib.addinfourl(fo, headers, 'file:'+file)
-
-
-# FTP Range Support
-# Unfortunately, a large amount of base FTP code had to be copied
-# from urllib and urllib2 in order to insert the FTP REST command.
-# Code modifications for range support have been commented as
-# follows:
-# -- range support modifications start/end here
-
-from urllib import splitport, splituser, splitpasswd, splitattr, \
- unquote, addclosehook, addinfourl
-import ftplib
-import socket
-import sys
-import mimetypes
-import email
-
-class FTPRangeHandler(urllib2.FTPHandler):
- def ftp_open(self, req):
- host = req.get_host()
- if not host:
- raise IOError('ftp error', 'no host given')
- host, port = splitport(host)
- if port is None:
- port = ftplib.FTP_PORT
-
- # username/password handling
- user, host = splituser(host)
- if user:
- user, passwd = splitpasswd(user)
- else:
- passwd = None
- host = unquote(host)
- user = unquote(user or '')
- passwd = unquote(passwd or '')
-
- try:
- host = socket.gethostbyname(host)
- except socket.error, msg:
- raise urllib2.URLError(msg)
- path, attrs = splitattr(req.get_selector())
- dirs = path.split('/')
- dirs = map(unquote, dirs)
- dirs, file = dirs[:-1], dirs[-1]
- if dirs and not dirs[0]:
- dirs = dirs[1:]
- try:
- fw = self.connect_ftp(user, passwd, host, port, dirs)
- type = file and 'I' or 'D'
- for attr in attrs:
- attr, value = splitattr(attr)
- if attr.lower() == 'type' and \
- value in ('a', 'A', 'i', 'I', 'd', 'D'):
- type = value.upper()
-
- # -- range support modifications start here
- rest = None
- range_tup = range_header_to_tuple(req.headers.get('Range', None))
- assert range_tup != ()
- if range_tup:
- (fb, lb) = range_tup
- if fb > 0:
- rest = fb
- # -- range support modifications end here
-
- fp, retrlen = fw.retrfile(file, type, rest)
-
- # -- range support modifications start here
- if range_tup:
- (fb, lb) = range_tup
- if lb == '':
- if retrlen is None or retrlen == 0:
- raise RangeError('Requested Range Not Satisfiable due to unobtainable file length.')
- lb = retrlen
- retrlen = lb - fb
- if retrlen < 0:
- # beginning of range is larger than file
- raise RangeError('Requested Range Not Satisfiable')
- else:
- retrlen = lb - fb
- fp = RangeableFileObject(fp, (0, retrlen))
- # -- range support modifications end here
-
- headers = ""
- mtype = mimetypes.guess_type(req.get_full_url())[0]
- if mtype:
- headers += "Content-Type: %s\n" % mtype
- if retrlen is not None and retrlen >= 0:
- headers += "Content-Length: %d\n" % retrlen
- headers = email.message_from_string(headers)
- return addinfourl(fp, headers, req.get_full_url())
- except ftplib.all_errors, msg:
- raise IOError('ftp error', msg), sys.exc_info()[2]
-
- def connect_ftp(self, user, passwd, host, port, dirs):
- fw = ftpwrapper(user, passwd, host, port, dirs)
- return fw
-
-class ftpwrapper(urllib.ftpwrapper):
- # range support note:
- # this ftpwrapper code is copied directly from
- # urllib. The only enhancement is to add the rest
- # argument and pass it on to ftp.ntransfercmd
- def retrfile(self, file, type, rest=None):
- self.endtransfer()
- if type in ('d', 'D'):
- cmd = 'TYPE A'
- isdir = 1
- else:
- cmd = 'TYPE ' + type
- isdir = 0
- try:
- self.ftp.voidcmd(cmd)
- except ftplib.all_errors:
- self.init()
- self.ftp.voidcmd(cmd)
- conn = None
- if file and not isdir:
- # Use nlst to see if the file exists at all
- try:
- self.ftp.nlst(file)
- except ftplib.error_perm, reason:
- raise IOError('ftp error', reason), sys.exc_info()[2]
- # Restore the transfer mode!
- self.ftp.voidcmd(cmd)
- # Try to retrieve as a file
- try:
- cmd = 'RETR ' + file
- conn = self.ftp.ntransfercmd(cmd, rest)
- except ftplib.error_perm, reason:
- if str(reason).startswith('501'):
- # workaround for REST not supported error
- fp, retrlen = self.retrfile(file, type)
- fp = RangeableFileObject(fp, (rest,''))
- return (fp, retrlen)
- elif not str(reason).startswith('550'):
- raise IOError('ftp error', reason), sys.exc_info()[2]
- if not conn:
- # Set transfer mode to ASCII!
- self.ftp.voidcmd('TYPE A')
- # Try a directory listing
- if file:
- cmd = 'LIST ' + file
- else:
- cmd = 'LIST'
- conn = self.ftp.ntransfercmd(cmd)
- self.busy = 1
- # Pass back both a suitably decorated object and a retrieval length
- return (addclosehook(conn[0].makefile('rb'),
- self.endtransfer), conn[1])
-
-
-####################################################################
-# Range Tuple Functions
-# XXX: These range tuple functions might go better in a class.
-
-_rangere = None
-def range_header_to_tuple(range_header):
- """Get a (firstbyte,lastbyte) tuple from a Range header value.
-
- Range headers have the form "bytes=<firstbyte>-<lastbyte>". This
- function pulls the firstbyte and lastbyte values and returns
- a (firstbyte,lastbyte) tuple. If lastbyte is not specified in
- the header value, it is returned as an empty string in the
- tuple.
-
- Return None if range_header is None
- Return () if range_header does not conform to the range spec
- pattern.
-
- """
- global _rangere
- if range_header is None:
- return None
- if _rangere is None:
- import re
- _rangere = re.compile(r'^bytes=(\d{1,})-(\d*)')
- match = _rangere.match(range_header)
- if match:
- tup = range_tuple_normalize(match.group(1, 2))
- if tup and tup[1]:
- tup = (tup[0], tup[1]+1)
- return tup
- return ()
-
-def range_tuple_to_header(range_tup):
- """Convert a range tuple to a Range header value.
- Return a string of the form "bytes=<firstbyte>-<lastbyte>" or None
- if no range is needed.
- """
- if range_tup is None:
- return None
- range_tup = range_tuple_normalize(range_tup)
- if range_tup:
- if range_tup[1]:
- range_tup = (range_tup[0], range_tup[1] - 1)
- return 'bytes=%s-%s' % range_tup
-
-def range_tuple_normalize(range_tup):
- """Normalize a (first_byte,last_byte) range tuple.
- Return a tuple whose first element is guaranteed to be an int
- and whose second element will be '' (meaning: the last byte) or
- an int. Finally, return None if the normalized tuple == (0,'')
- as that is equivelant to retrieving the entire file.
- """
- if range_tup is None:
- return None
- # handle first byte
- fb = range_tup[0]
- if fb in (None, ''):
- fb = 0
- else:
- fb = int(fb)
- # handle last byte
- try:
- lb = range_tup[1]
- except IndexError:
- lb = ''
- else:
- if lb is None:
- lb = ''
- elif lb != '':
- lb = int(lb)
- # check if range is over the entire file
- if (fb, lb) == (0, ''):
- return None
- # check that the range is valid
- if lb < fb:
- raise RangeError('Invalid byte range: %s-%s' % (fb, lb))
- return (fb, lb)
diff --git a/sys/lib/python/mercurial/changegroup.py b/sys/lib/python/mercurial/changegroup.py
deleted file mode 100644
index a4ada4eb6..000000000
--- a/sys/lib/python/mercurial/changegroup.py
+++ /dev/null
@@ -1,140 +0,0 @@
-# changegroup.py - Mercurial changegroup manipulation functions
-#
-# Copyright 2006 Matt Mackall <mpm@selenic.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-from i18n import _
-import util
-import struct, os, bz2, zlib, tempfile
-
-def getchunk(source):
- """get a chunk from a changegroup"""
- d = source.read(4)
- if not d:
- return ""
- l = struct.unpack(">l", d)[0]
- if l <= 4:
- return ""
- d = source.read(l - 4)
- if len(d) < l - 4:
- raise util.Abort(_("premature EOF reading chunk"
- " (got %d bytes, expected %d)")
- % (len(d), l - 4))
- return d
-
-def chunkiter(source):
- """iterate through the chunks in source"""
- while 1:
- c = getchunk(source)
- if not c:
- break
- yield c
-
-def chunkheader(length):
- """build a changegroup chunk header"""
- return struct.pack(">l", length + 4)
-
-def closechunk():
- return struct.pack(">l", 0)
-
-class nocompress(object):
- def compress(self, x):
- return x
- def flush(self):
- return ""
-
-bundletypes = {
- "": ("", nocompress),
- "HG10UN": ("HG10UN", nocompress),
- "HG10BZ": ("HG10", lambda: bz2.BZ2Compressor()),
- "HG10GZ": ("HG10GZ", lambda: zlib.compressobj()),
-}
-
-# hgweb uses this list to communicate its preferred type
-bundlepriority = ['HG10GZ', 'HG10BZ', 'HG10UN']
-
-def writebundle(cg, filename, bundletype):
- """Write a bundle file and return its filename.
-
- Existing files will not be overwritten.
- If no filename is specified, a temporary file is created.
- bz2 compression can be turned off.
- The bundle file will be deleted in case of errors.
- """
-
- fh = None
- cleanup = None
- try:
- if filename:
- fh = open(filename, "wb")
- else:
- fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
- fh = os.fdopen(fd, "wb")
- cleanup = filename
-
- header, compressor = bundletypes[bundletype]
- fh.write(header)
- z = compressor()
-
- # parse the changegroup data, otherwise we will block
- # in case of sshrepo because we don't know the end of the stream
-
- # an empty chunkiter is the end of the changegroup
- # a changegroup has at least 2 chunkiters (changelog and manifest).
- # after that, an empty chunkiter is the end of the changegroup
- empty = False
- count = 0
- while not empty or count <= 2:
- empty = True
- count += 1
- for chunk in chunkiter(cg):
- empty = False
- fh.write(z.compress(chunkheader(len(chunk))))
- pos = 0
- while pos < len(chunk):
- next = pos + 2**20
- fh.write(z.compress(chunk[pos:next]))
- pos = next
- fh.write(z.compress(closechunk()))
- fh.write(z.flush())
- cleanup = None
- return filename
- finally:
- if fh is not None:
- fh.close()
- if cleanup is not None:
- os.unlink(cleanup)
-
-def unbundle(header, fh):
- if header == 'HG10UN':
- return fh
- elif not header.startswith('HG'):
- # old client with uncompressed bundle
- def generator(f):
- yield header
- for chunk in f:
- yield chunk
- elif header == 'HG10GZ':
- def generator(f):
- zd = zlib.decompressobj()
- for chunk in f:
- yield zd.decompress(chunk)
- elif header == 'HG10BZ':
- def generator(f):
- zd = bz2.BZ2Decompressor()
- zd.decompress("BZ")
- for chunk in util.filechunkiter(f, 4096):
- yield zd.decompress(chunk)
- return util.chunkbuffer(generator(fh))
-
-def readbundle(fh, fname):
- header = fh.read(6)
- if not header.startswith('HG'):
- raise util.Abort(_('%s: not a Mercurial bundle file') % fname)
- if not header.startswith('HG10'):
- raise util.Abort(_('%s: unknown bundle version') % fname)
- elif header not in bundletypes:
- raise util.Abort(_('%s: unknown bundle compression type') % fname)
- return unbundle(header, fh)
diff --git a/sys/lib/python/mercurial/changelog.py b/sys/lib/python/mercurial/changelog.py
deleted file mode 100644
index f99479022..000000000
--- a/sys/lib/python/mercurial/changelog.py
+++ /dev/null
@@ -1,228 +0,0 @@
-# changelog.py - changelog class for mercurial
-#
-# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-from node import bin, hex, nullid
-from i18n import _
-import util, error, revlog, encoding
-
-def _string_escape(text):
- """
- >>> d = {'nl': chr(10), 'bs': chr(92), 'cr': chr(13), 'nul': chr(0)}
- >>> s = "ab%(nl)scd%(bs)s%(bs)sn%(nul)sab%(cr)scd%(bs)s%(nl)s" % d
- >>> s
- 'ab\\ncd\\\\\\\\n\\x00ab\\rcd\\\\\\n'
- >>> res = _string_escape(s)
- >>> s == res.decode('string_escape')
- True
- """
- # subset of the string_escape codec
- text = text.replace('\\', '\\\\').replace('\n', '\\n').replace('\r', '\\r')
- return text.replace('\0', '\\0')
-
-def decodeextra(text):
- extra = {}
- for l in text.split('\0'):
- if l:
- k, v = l.decode('string_escape').split(':', 1)
- extra[k] = v
- return extra
-
-def encodeextra(d):
- # keys must be sorted to produce a deterministic changelog entry
- items = [_string_escape('%s:%s' % (k, d[k])) for k in sorted(d)]
- return "\0".join(items)
-
-class appender(object):
- '''the changelog index must be updated last on disk, so we use this class
- to delay writes to it'''
- def __init__(self, fp, buf):
- self.data = buf
- self.fp = fp
- self.offset = fp.tell()
- self.size = util.fstat(fp).st_size
-
- def end(self):
- return self.size + len("".join(self.data))
- def tell(self):
- return self.offset
- def flush(self):
- pass
- def close(self):
- self.fp.close()
-
- def seek(self, offset, whence=0):
- '''virtual file offset spans real file and data'''
- if whence == 0:
- self.offset = offset
- elif whence == 1:
- self.offset += offset
- elif whence == 2:
- self.offset = self.end() + offset
- if self.offset < self.size:
- self.fp.seek(self.offset)
-
- def read(self, count=-1):
- '''only trick here is reads that span real file and data'''
- ret = ""
- if self.offset < self.size:
- s = self.fp.read(count)
- ret = s
- self.offset += len(s)
- if count > 0:
- count -= len(s)
- if count != 0:
- doff = self.offset - self.size
- self.data.insert(0, "".join(self.data))
- del self.data[1:]
- s = self.data[0][doff:doff+count]
- self.offset += len(s)
- ret += s
- return ret
-
- def write(self, s):
- self.data.append(str(s))
- self.offset += len(s)
-
-def delayopener(opener, target, divert, buf):
- def o(name, mode='r'):
- if name != target:
- return opener(name, mode)
- if divert:
- return opener(name + ".a", mode.replace('a', 'w'))
- # otherwise, divert to memory
- return appender(opener(name, mode), buf)
- return o
-
-class changelog(revlog.revlog):
- def __init__(self, opener):
- revlog.revlog.__init__(self, opener, "00changelog.i")
- self._realopener = opener
- self._delayed = False
- self._divert = False
-
- def delayupdate(self):
- "delay visibility of index updates to other readers"
- self._delayed = True
- self._divert = (len(self) == 0)
- self._delaybuf = []
- self.opener = delayopener(self._realopener, self.indexfile,
- self._divert, self._delaybuf)
-
- def finalize(self, tr):
- "finalize index updates"
- self._delayed = False
- self.opener = self._realopener
- # move redirected index data back into place
- if self._divert:
- n = self.opener(self.indexfile + ".a").name
- util.rename(n, n[:-2])
- elif self._delaybuf:
- fp = self.opener(self.indexfile, 'a')
- fp.write("".join(self._delaybuf))
- fp.close()
- self._delaybuf = []
- # split when we're done
- self.checkinlinesize(tr)
-
- def readpending(self, file):
- r = revlog.revlog(self.opener, file)
- self.index = r.index
- self.nodemap = r.nodemap
- self._chunkcache = r._chunkcache
-
- def writepending(self):
- "create a file containing the unfinalized state for pretxnchangegroup"
- if self._delaybuf:
- # make a temporary copy of the index
- fp1 = self._realopener(self.indexfile)
- fp2 = self._realopener(self.indexfile + ".a", "w")
- fp2.write(fp1.read())
- # add pending data
- fp2.write("".join(self._delaybuf))
- fp2.close()
- # switch modes so finalize can simply rename
- self._delaybuf = []
- self._divert = True
-
- if self._divert:
- return True
-
- return False
-
- def checkinlinesize(self, tr, fp=None):
- if not self._delayed:
- revlog.revlog.checkinlinesize(self, tr, fp)
-
- def read(self, node):
- """
- format used:
- nodeid\n : manifest node in ascii
- user\n : user, no \n or \r allowed
- time tz extra\n : date (time is int or float, timezone is int)
- : extra is metadatas, encoded and separated by '\0'
- : older versions ignore it
- files\n\n : files modified by the cset, no \n or \r allowed
- (.*) : comment (free text, ideally utf-8)
-
- changelog v0 doesn't use extra
- """
- text = self.revision(node)
- if not text:
- return (nullid, "", (0, 0), [], "", {'branch': 'default'})
- last = text.index("\n\n")
- desc = encoding.tolocal(text[last + 2:])
- l = text[:last].split('\n')
- manifest = bin(l[0])
- user = encoding.tolocal(l[1])
-
- extra_data = l[2].split(' ', 2)
- if len(extra_data) != 3:
- time = float(extra_data.pop(0))
- try:
- # various tools did silly things with the time zone field.
- timezone = int(extra_data[0])
- except:
- timezone = 0
- extra = {}
- else:
- time, timezone, extra = extra_data
- time, timezone = float(time), int(timezone)
- extra = decodeextra(extra)
- if not extra.get('branch'):
- extra['branch'] = 'default'
- files = l[3:]
- return (manifest, user, (time, timezone), files, desc, extra)
-
- def add(self, manifest, files, desc, transaction, p1, p2,
- user, date=None, extra={}):
- user = user.strip()
- # An empty username or a username with a "\n" will make the
- # revision text contain two "\n\n" sequences -> corrupt
- # repository since read cannot unpack the revision.
- if not user:
- raise error.RevlogError(_("empty username"))
- if "\n" in user:
- raise error.RevlogError(_("username %s contains a newline")
- % repr(user))
-
- # strip trailing whitespace and leading and trailing empty lines
- desc = '\n'.join([l.rstrip() for l in desc.splitlines()]).strip('\n')
-
- user, desc = encoding.fromlocal(user), encoding.fromlocal(desc)
-
- if date:
- parseddate = "%d %d" % util.parsedate(date)
- else:
- parseddate = "%d %d" % util.makedate()
- if extra and extra.get("branch") in ("default", ""):
- del extra["branch"]
- if extra:
- extra = encodeextra(extra)
- parseddate = "%s %s" % (parseddate, extra)
- l = [hex(manifest), user, parseddate] + sorted(files) + ["", desc]
- text = "\n".join(l)
- return self.addrevision(text, transaction, len(self), p1, p2)
diff --git a/sys/lib/python/mercurial/cmdutil.py b/sys/lib/python/mercurial/cmdutil.py
deleted file mode 100644
index 1c58d6bdc..000000000
--- a/sys/lib/python/mercurial/cmdutil.py
+++ /dev/null
@@ -1,1254 +0,0 @@
-# cmdutil.py - help for command processing in mercurial
-#
-# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-from node import hex, nullid, nullrev, short
-from i18n import _
-import os, sys, errno, re, glob
-import mdiff, bdiff, util, templater, patch, error, encoding
-import match as _match
-
-revrangesep = ':'
-
-def findpossible(cmd, table, strict=False):
- """
- Return cmd -> (aliases, command table entry)
- for each matching command.
- Return debug commands (or their aliases) only if no normal command matches.
- """
- choice = {}
- debugchoice = {}
- for e in table.keys():
- aliases = e.lstrip("^").split("|")
- found = None
- if cmd in aliases:
- found = cmd
- elif not strict:
- for a in aliases:
- if a.startswith(cmd):
- found = a
- break
- if found is not None:
- if aliases[0].startswith("debug") or found.startswith("debug"):
- debugchoice[found] = (aliases, table[e])
- else:
- choice[found] = (aliases, table[e])
-
- if not choice and debugchoice:
- choice = debugchoice
-
- return choice
-
-def findcmd(cmd, table, strict=True):
- """Return (aliases, command table entry) for command string."""
- choice = findpossible(cmd, table, strict)
-
- if cmd in choice:
- return choice[cmd]
-
- if len(choice) > 1:
- clist = choice.keys()
- clist.sort()
- raise error.AmbiguousCommand(cmd, clist)
-
- if choice:
- return choice.values()[0]
-
- raise error.UnknownCommand(cmd)
-
-def bail_if_changed(repo):
- if repo.dirstate.parents()[1] != nullid:
- raise util.Abort(_('outstanding uncommitted merge'))
- modified, added, removed, deleted = repo.status()[:4]
- if modified or added or removed or deleted:
- raise util.Abort(_("outstanding uncommitted changes"))
-
-def logmessage(opts):
- """ get the log message according to -m and -l option """
- message = opts.get('message')
- logfile = opts.get('logfile')
-
- if message and logfile:
- raise util.Abort(_('options --message and --logfile are mutually '
- 'exclusive'))
- if not message and logfile:
- try:
- if logfile == '-':
- message = sys.stdin.read()
- else:
- message = open(logfile).read()
- except IOError, inst:
- raise util.Abort(_("can't read commit message '%s': %s") %
- (logfile, inst.strerror))
- return message
-
-def loglimit(opts):
- """get the log limit according to option -l/--limit"""
- limit = opts.get('limit')
- if limit:
- try:
- limit = int(limit)
- except ValueError:
- raise util.Abort(_('limit must be a positive integer'))
- if limit <= 0: raise util.Abort(_('limit must be positive'))
- else:
- limit = sys.maxint
- return limit
-
-def remoteui(src, opts):
- 'build a remote ui from ui or repo and opts'
- if hasattr(src, 'baseui'): # looks like a repository
- dst = src.baseui.copy() # drop repo-specific config
- src = src.ui # copy target options from repo
- else: # assume it's a global ui object
- dst = src.copy() # keep all global options
-
- # copy ssh-specific options
- for o in 'ssh', 'remotecmd':
- v = opts.get(o) or src.config('ui', o)
- if v:
- dst.setconfig("ui", o, v)
- # copy bundle-specific options
- r = src.config('bundle', 'mainreporoot')
- if r:
- dst.setconfig('bundle', 'mainreporoot', r)
-
- return dst
-
-def revpair(repo, revs):
- '''return pair of nodes, given list of revisions. second item can
- be None, meaning use working dir.'''
-
- def revfix(repo, val, defval):
- if not val and val != 0 and defval is not None:
- val = defval
- return repo.lookup(val)
-
- if not revs:
- return repo.dirstate.parents()[0], None
- end = None
- if len(revs) == 1:
- if revrangesep in revs[0]:
- start, end = revs[0].split(revrangesep, 1)
- start = revfix(repo, start, 0)
- end = revfix(repo, end, len(repo) - 1)
- else:
- start = revfix(repo, revs[0], None)
- elif len(revs) == 2:
- if revrangesep in revs[0] or revrangesep in revs[1]:
- raise util.Abort(_('too many revisions specified'))
- start = revfix(repo, revs[0], None)
- end = revfix(repo, revs[1], None)
- else:
- raise util.Abort(_('too many revisions specified'))
- return start, end
-
-def revrange(repo, revs):
- """Yield revision as strings from a list of revision specifications."""
-
- def revfix(repo, val, defval):
- if not val and val != 0 and defval is not None:
- return defval
- return repo.changelog.rev(repo.lookup(val))
-
- seen, l = set(), []
- for spec in revs:
- if revrangesep in spec:
- start, end = spec.split(revrangesep, 1)
- start = revfix(repo, start, 0)
- end = revfix(repo, end, len(repo) - 1)
- step = start > end and -1 or 1
- for rev in xrange(start, end+step, step):
- if rev in seen:
- continue
- seen.add(rev)
- l.append(rev)
- else:
- rev = revfix(repo, spec, None)
- if rev in seen:
- continue
- seen.add(rev)
- l.append(rev)
-
- return l
-
-def make_filename(repo, pat, node,
- total=None, seqno=None, revwidth=None, pathname=None):
- node_expander = {
- 'H': lambda: hex(node),
- 'R': lambda: str(repo.changelog.rev(node)),
- 'h': lambda: short(node),
- }
- expander = {
- '%': lambda: '%',
- 'b': lambda: os.path.basename(repo.root),
- }
-
- try:
- if node:
- expander.update(node_expander)
- if node:
- expander['r'] = (lambda:
- str(repo.changelog.rev(node)).zfill(revwidth or 0))
- if total is not None:
- expander['N'] = lambda: str(total)
- if seqno is not None:
- expander['n'] = lambda: str(seqno)
- if total is not None and seqno is not None:
- expander['n'] = lambda: str(seqno).zfill(len(str(total)))
- if pathname is not None:
- expander['s'] = lambda: os.path.basename(pathname)
- expander['d'] = lambda: os.path.dirname(pathname) or '.'
- expander['p'] = lambda: pathname
-
- newname = []
- patlen = len(pat)
- i = 0
- while i < patlen:
- c = pat[i]
- if c == '%':
- i += 1
- c = pat[i]
- c = expander[c]()
- newname.append(c)
- i += 1
- return ''.join(newname)
- except KeyError, inst:
- raise util.Abort(_("invalid format spec '%%%s' in output filename") %
- inst.args[0])
-
-def make_file(repo, pat, node=None,
- total=None, seqno=None, revwidth=None, mode='wb', pathname=None):
-
- writable = 'w' in mode or 'a' in mode
-
- if not pat or pat == '-':
- return writable and sys.stdout or sys.stdin
- if hasattr(pat, 'write') and writable:
- return pat
- if hasattr(pat, 'read') and 'r' in mode:
- return pat
- return open(make_filename(repo, pat, node, total, seqno, revwidth,
- pathname),
- mode)
-
-def expandpats(pats):
- if not util.expandglobs:
- return list(pats)
- ret = []
- for p in pats:
- kind, name = _match._patsplit(p, None)
- if kind is None:
- try:
- globbed = glob.glob(name)
- except re.error:
- globbed = [name]
- if globbed:
- ret.extend(globbed)
- continue
- ret.append(p)
- return ret
-
-def match(repo, pats=[], opts={}, globbed=False, default='relpath'):
- if not globbed and default == 'relpath':
- pats = expandpats(pats or [])
- m = _match.match(repo.root, repo.getcwd(), pats,
- opts.get('include'), opts.get('exclude'), default)
- def badfn(f, msg):
- repo.ui.warn("%s: %s\n" % (m.rel(f), msg))
- m.bad = badfn
- return m
-
-def matchall(repo):
- return _match.always(repo.root, repo.getcwd())
-
-def matchfiles(repo, files):
- return _match.exact(repo.root, repo.getcwd(), files)
-
-def findrenames(repo, added, removed, threshold):
- '''find renamed files -- yields (before, after, score) tuples'''
- ctx = repo['.']
- for a in added:
- aa = repo.wread(a)
- bestname, bestscore = None, threshold
- for r in removed:
- if r not in ctx:
- continue
- rr = ctx.filectx(r).data()
-
- # bdiff.blocks() returns blocks of matching lines
- # count the number of bytes in each
- equal = 0
- alines = mdiff.splitnewlines(aa)
- matches = bdiff.blocks(aa, rr)
- for x1,x2,y1,y2 in matches:
- for line in alines[x1:x2]:
- equal += len(line)
-
- lengths = len(aa) + len(rr)
- if lengths:
- myscore = equal*2.0 / lengths
- if myscore >= bestscore:
- bestname, bestscore = r, myscore
- if bestname:
- yield bestname, a, bestscore
-
-def addremove(repo, pats=[], opts={}, dry_run=None, similarity=None):
- if dry_run is None:
- dry_run = opts.get('dry_run')
- if similarity is None:
- similarity = float(opts.get('similarity') or 0)
- # we'd use status here, except handling of symlinks and ignore is tricky
- added, unknown, deleted, removed = [], [], [], []
- audit_path = util.path_auditor(repo.root)
- m = match(repo, pats, opts)
- for abs in repo.walk(m):
- target = repo.wjoin(abs)
- good = True
- try:
- audit_path(abs)
- except:
- good = False
- rel = m.rel(abs)
- exact = m.exact(abs)
- if good and abs not in repo.dirstate:
- unknown.append(abs)
- if repo.ui.verbose or not exact:
- repo.ui.status(_('adding %s\n') % ((pats and rel) or abs))
- elif repo.dirstate[abs] != 'r' and (not good or not util.lexists(target)
- or (os.path.isdir(target) and not os.path.islink(target))):
- deleted.append(abs)
- if repo.ui.verbose or not exact:
- repo.ui.status(_('removing %s\n') % ((pats and rel) or abs))
- # for finding renames
- elif repo.dirstate[abs] == 'r':
- removed.append(abs)
- elif repo.dirstate[abs] == 'a':
- added.append(abs)
- if not dry_run:
- repo.remove(deleted)
- repo.add(unknown)
- if similarity > 0:
- for old, new, score in findrenames(repo, added + unknown,
- removed + deleted, similarity):
- if repo.ui.verbose or not m.exact(old) or not m.exact(new):
- repo.ui.status(_('recording removal of %s as rename to %s '
- '(%d%% similar)\n') %
- (m.rel(old), m.rel(new), score * 100))
- if not dry_run:
- repo.copy(old, new)
-
-def copy(ui, repo, pats, opts, rename=False):
- # called with the repo lock held
- #
- # hgsep => pathname that uses "/" to separate directories
- # ossep => pathname that uses os.sep to separate directories
- cwd = repo.getcwd()
- targets = {}
- after = opts.get("after")
- dryrun = opts.get("dry_run")
-
- def walkpat(pat):
- srcs = []
- m = match(repo, [pat], opts, globbed=True)
- for abs in repo.walk(m):
- state = repo.dirstate[abs]
- rel = m.rel(abs)
- exact = m.exact(abs)
- if state in '?r':
- if exact and state == '?':
- ui.warn(_('%s: not copying - file is not managed\n') % rel)
- if exact and state == 'r':
- ui.warn(_('%s: not copying - file has been marked for'
- ' remove\n') % rel)
- continue
- # abs: hgsep
- # rel: ossep
- srcs.append((abs, rel, exact))
- return srcs
-
- # abssrc: hgsep
- # relsrc: ossep
- # otarget: ossep
- def copyfile(abssrc, relsrc, otarget, exact):
- abstarget = util.canonpath(repo.root, cwd, otarget)
- reltarget = repo.pathto(abstarget, cwd)
- target = repo.wjoin(abstarget)
- src = repo.wjoin(abssrc)
- state = repo.dirstate[abstarget]
-
- # check for collisions
- prevsrc = targets.get(abstarget)
- if prevsrc is not None:
- ui.warn(_('%s: not overwriting - %s collides with %s\n') %
- (reltarget, repo.pathto(abssrc, cwd),
- repo.pathto(prevsrc, cwd)))
- return
-
- # check for overwrites
- exists = os.path.exists(target)
- if not after and exists or after and state in 'mn':
- if not opts['force']:
- ui.warn(_('%s: not overwriting - file exists\n') %
- reltarget)
- return
-
- if after:
- if not exists:
- return
- elif not dryrun:
- try:
- if exists:
- os.unlink(target)
- targetdir = os.path.dirname(target) or '.'
- if not os.path.isdir(targetdir):
- os.makedirs(targetdir)
- util.copyfile(src, target)
- except IOError, inst:
- if inst.errno == errno.ENOENT:
- ui.warn(_('%s: deleted in working copy\n') % relsrc)
- else:
- ui.warn(_('%s: cannot copy - %s\n') %
- (relsrc, inst.strerror))
- return True # report a failure
-
- if ui.verbose or not exact:
- if rename:
- ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
- else:
- ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
-
- targets[abstarget] = abssrc
-
- # fix up dirstate
- origsrc = repo.dirstate.copied(abssrc) or abssrc
- if abstarget == origsrc: # copying back a copy?
- if state not in 'mn' and not dryrun:
- repo.dirstate.normallookup(abstarget)
- else:
- if repo.dirstate[origsrc] == 'a' and origsrc == abssrc:
- if not ui.quiet:
- ui.warn(_("%s has not been committed yet, so no copy "
- "data will be stored for %s.\n")
- % (repo.pathto(origsrc, cwd), reltarget))
- if repo.dirstate[abstarget] in '?r' and not dryrun:
- repo.add([abstarget])
- elif not dryrun:
- repo.copy(origsrc, abstarget)
-
- if rename and not dryrun:
- repo.remove([abssrc], not after)
-
- # pat: ossep
- # dest ossep
- # srcs: list of (hgsep, hgsep, ossep, bool)
- # return: function that takes hgsep and returns ossep
- def targetpathfn(pat, dest, srcs):
- if os.path.isdir(pat):
- abspfx = util.canonpath(repo.root, cwd, pat)
- abspfx = util.localpath(abspfx)
- if destdirexists:
- striplen = len(os.path.split(abspfx)[0])
- else:
- striplen = len(abspfx)
- if striplen:
- striplen += len(os.sep)
- res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
- elif destdirexists:
- res = lambda p: os.path.join(dest,
- os.path.basename(util.localpath(p)))
- else:
- res = lambda p: dest
- return res
-
- # pat: ossep
- # dest ossep
- # srcs: list of (hgsep, hgsep, ossep, bool)
- # return: function that takes hgsep and returns ossep
- def targetpathafterfn(pat, dest, srcs):
- if _match.patkind(pat):
- # a mercurial pattern
- res = lambda p: os.path.join(dest,
- os.path.basename(util.localpath(p)))
- else:
- abspfx = util.canonpath(repo.root, cwd, pat)
- if len(abspfx) < len(srcs[0][0]):
- # A directory. Either the target path contains the last
- # component of the source path or it does not.
- def evalpath(striplen):
- score = 0
- for s in srcs:
- t = os.path.join(dest, util.localpath(s[0])[striplen:])
- if os.path.exists(t):
- score += 1
- return score
-
- abspfx = util.localpath(abspfx)
- striplen = len(abspfx)
- if striplen:
- striplen += len(os.sep)
- if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
- score = evalpath(striplen)
- striplen1 = len(os.path.split(abspfx)[0])
- if striplen1:
- striplen1 += len(os.sep)
- if evalpath(striplen1) > score:
- striplen = striplen1
- res = lambda p: os.path.join(dest,
- util.localpath(p)[striplen:])
- else:
- # a file
- if destdirexists:
- res = lambda p: os.path.join(dest,
- os.path.basename(util.localpath(p)))
- else:
- res = lambda p: dest
- return res
-
-
- pats = expandpats(pats)
- if not pats:
- raise util.Abort(_('no source or destination specified'))
- if len(pats) == 1:
- raise util.Abort(_('no destination specified'))
- dest = pats.pop()
- destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
- if not destdirexists:
- if len(pats) > 1 or _match.patkind(pats[0]):
- raise util.Abort(_('with multiple sources, destination must be an '
- 'existing directory'))
- if util.endswithsep(dest):
- raise util.Abort(_('destination %s is not a directory') % dest)
-
- tfn = targetpathfn
- if after:
- tfn = targetpathafterfn
- copylist = []
- for pat in pats:
- srcs = walkpat(pat)
- if not srcs:
- continue
- copylist.append((tfn(pat, dest, srcs), srcs))
- if not copylist:
- raise util.Abort(_('no files to copy'))
-
- errors = 0
- for targetpath, srcs in copylist:
- for abssrc, relsrc, exact in srcs:
- if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
- errors += 1
-
- if errors:
- ui.warn(_('(consider using --after)\n'))
-
- return errors
-
-def service(opts, parentfn=None, initfn=None, runfn=None, logfile=None):
- '''Run a command as a service.'''
-
- if opts['daemon'] and not opts['daemon_pipefds']:
- rfd, wfd = os.pipe()
- args = sys.argv[:]
- args.append('--daemon-pipefds=%d,%d' % (rfd, wfd))
- # Don't pass --cwd to the child process, because we've already
- # changed directory.
- for i in xrange(1,len(args)):
- if args[i].startswith('--cwd='):
- del args[i]
- break
- elif args[i].startswith('--cwd'):
- del args[i:i+2]
- break
- pid = os.spawnvp(os.P_NOWAIT | getattr(os, 'P_DETACH', 0),
- args[0], args)
- os.close(wfd)
- os.read(rfd, 1)
- if parentfn:
- return parentfn(pid)
- else:
- os._exit(0)
-
- if initfn:
- initfn()
-
- if opts['pid_file']:
- fp = open(opts['pid_file'], 'w')
- fp.write(str(os.getpid()) + '\n')
- fp.close()
-
- if opts['daemon_pipefds']:
- rfd, wfd = [int(x) for x in opts['daemon_pipefds'].split(',')]
- os.close(rfd)
- try:
- os.setsid()
- except AttributeError:
- pass
- os.write(wfd, 'y')
- os.close(wfd)
- sys.stdout.flush()
- sys.stderr.flush()
-
- nullfd = os.open(util.nulldev, os.O_RDWR)
- logfilefd = nullfd
- if logfile:
- logfilefd = os.open(logfile, os.O_RDWR | os.O_CREAT | os.O_APPEND)
- os.dup2(nullfd, 0)
- os.dup2(logfilefd, 1)
- os.dup2(logfilefd, 2)
- if nullfd not in (0, 1, 2):
- os.close(nullfd)
- if logfile and logfilefd not in (0, 1, 2):
- os.close(logfilefd)
-
- if runfn:
- return runfn()
-
-class changeset_printer(object):
- '''show changeset information when templating not requested.'''
-
- def __init__(self, ui, repo, patch, diffopts, buffered):
- self.ui = ui
- self.repo = repo
- self.buffered = buffered
- self.patch = patch
- self.diffopts = diffopts
- self.header = {}
- self.hunk = {}
- self.lastheader = None
-
- def flush(self, rev):
- if rev in self.header:
- h = self.header[rev]
- if h != self.lastheader:
- self.lastheader = h
- self.ui.write(h)
- del self.header[rev]
- if rev in self.hunk:
- self.ui.write(self.hunk[rev])
- del self.hunk[rev]
- return 1
- return 0
-
- def show(self, ctx, copies=(), **props):
- if self.buffered:
- self.ui.pushbuffer()
- self._show(ctx, copies, props)
- self.hunk[ctx.rev()] = self.ui.popbuffer()
- else:
- self._show(ctx, copies, props)
-
- def _show(self, ctx, copies, props):
- '''show a single changeset or file revision'''
- changenode = ctx.node()
- rev = ctx.rev()
-
- if self.ui.quiet:
- self.ui.write("%d:%s\n" % (rev, short(changenode)))
- return
-
- log = self.repo.changelog
- changes = log.read(changenode)
- date = util.datestr(changes[2])
- extra = changes[5]
- branch = extra.get("branch")
-
- hexfunc = self.ui.debugflag and hex or short
-
- parents = [(p, hexfunc(log.node(p)))
- for p in self._meaningful_parentrevs(log, rev)]
-
- self.ui.write(_("changeset: %d:%s\n") % (rev, hexfunc(changenode)))
-
- # don't show the default branch name
- if branch != 'default':
- branch = encoding.tolocal(branch)
- self.ui.write(_("branch: %s\n") % branch)
- for tag in self.repo.nodetags(changenode):
- self.ui.write(_("tag: %s\n") % tag)
- for parent in parents:
- self.ui.write(_("parent: %d:%s\n") % parent)
-
- if self.ui.debugflag:
- self.ui.write(_("manifest: %d:%s\n") %
- (self.repo.manifest.rev(changes[0]), hex(changes[0])))
- self.ui.write(_("user: %s\n") % changes[1])
- self.ui.write(_("date: %s\n") % date)
-
- if self.ui.debugflag:
- files = self.repo.status(log.parents(changenode)[0], changenode)[:3]
- for key, value in zip([_("files:"), _("files+:"), _("files-:")],
- files):
- if value:
- self.ui.write("%-12s %s\n" % (key, " ".join(value)))
- elif changes[3] and self.ui.verbose:
- self.ui.write(_("files: %s\n") % " ".join(changes[3]))
- if copies and self.ui.verbose:
- copies = ['%s (%s)' % c for c in copies]
- self.ui.write(_("copies: %s\n") % ' '.join(copies))
-
- if extra and self.ui.debugflag:
- for key, value in sorted(extra.items()):
- self.ui.write(_("extra: %s=%s\n")
- % (key, value.encode('string_escape')))
-
- description = changes[4].strip()
- if description:
- if self.ui.verbose:
- self.ui.write(_("description:\n"))
- self.ui.write(description)
- self.ui.write("\n\n")
- else:
- self.ui.write(_("summary: %s\n") %
- description.splitlines()[0])
- self.ui.write("\n")
-
- self.showpatch(changenode)
-
- def showpatch(self, node):
- if self.patch:
- prev = self.repo.changelog.parents(node)[0]
- chunks = patch.diff(self.repo, prev, node, match=self.patch,
- opts=patch.diffopts(self.ui, self.diffopts))
- for chunk in chunks:
- self.ui.write(chunk)
- self.ui.write("\n")
-
- def _meaningful_parentrevs(self, log, rev):
- """Return list of meaningful (or all if debug) parentrevs for rev.
-
- For merges (two non-nullrev revisions) both parents are meaningful.
- Otherwise the first parent revision is considered meaningful if it
- is not the preceding revision.
- """
- parents = log.parentrevs(rev)
- if not self.ui.debugflag and parents[1] == nullrev:
- if parents[0] >= rev - 1:
- parents = []
- else:
- parents = [parents[0]]
- return parents
-
-
-class changeset_templater(changeset_printer):
- '''format changeset information.'''
-
- def __init__(self, ui, repo, patch, diffopts, mapfile, buffered):
- changeset_printer.__init__(self, ui, repo, patch, diffopts, buffered)
- formatnode = ui.debugflag and (lambda x: x) or (lambda x: x[:12])
- self.t = templater.templater(mapfile, {'formatnode': formatnode},
- cache={
- 'parent': '{rev}:{node|formatnode} ',
- 'manifest': '{rev}:{node|formatnode}',
- 'filecopy': '{name} ({source})'})
-
- def use_template(self, t):
- '''set template string to use'''
- self.t.cache['changeset'] = t
-
- def _meaningful_parentrevs(self, ctx):
- """Return list of meaningful (or all if debug) parentrevs for rev.
- """
- parents = ctx.parents()
- if len(parents) > 1:
- return parents
- if self.ui.debugflag:
- return [parents[0], self.repo['null']]
- if parents[0].rev() >= ctx.rev() - 1:
- return []
- return parents
-
- def _show(self, ctx, copies, props):
- '''show a single changeset or file revision'''
-
- def showlist(name, values, plural=None, **args):
- '''expand set of values.
- name is name of key in template map.
- values is list of strings or dicts.
- plural is plural of name, if not simply name + 's'.
-
- expansion works like this, given name 'foo'.
-
- if values is empty, expand 'no_foos'.
-
- if 'foo' not in template map, return values as a string,
- joined by space.
-
- expand 'start_foos'.
-
- for each value, expand 'foo'. if 'last_foo' in template
- map, expand it instead of 'foo' for last key.
-
- expand 'end_foos'.
- '''
- if plural: names = plural
- else: names = name + 's'
- if not values:
- noname = 'no_' + names
- if noname in self.t:
- yield self.t(noname, **args)
- return
- if name not in self.t:
- if isinstance(values[0], str):
- yield ' '.join(values)
- else:
- for v in values:
- yield dict(v, **args)
- return
- startname = 'start_' + names
- if startname in self.t:
- yield self.t(startname, **args)
- vargs = args.copy()
- def one(v, tag=name):
- try:
- vargs.update(v)
- except (AttributeError, ValueError):
- try:
- for a, b in v:
- vargs[a] = b
- except ValueError:
- vargs[name] = v
- return self.t(tag, **vargs)
- lastname = 'last_' + name
- if lastname in self.t:
- last = values.pop()
- else:
- last = None
- for v in values:
- yield one(v)
- if last is not None:
- yield one(last, tag=lastname)
- endname = 'end_' + names
- if endname in self.t:
- yield self.t(endname, **args)
-
- def showbranches(**args):
- branch = ctx.branch()
- if branch != 'default':
- branch = encoding.tolocal(branch)
- return showlist('branch', [branch], plural='branches', **args)
-
- def showparents(**args):
- parents = [[('rev', p.rev()), ('node', p.hex())]
- for p in self._meaningful_parentrevs(ctx)]
- return showlist('parent', parents, **args)
-
- def showtags(**args):
- return showlist('tag', ctx.tags(), **args)
-
- def showextras(**args):
- for key, value in sorted(ctx.extra().items()):
- args = args.copy()
- args.update(dict(key=key, value=value))
- yield self.t('extra', **args)
-
- def showcopies(**args):
- c = [{'name': x[0], 'source': x[1]} for x in copies]
- return showlist('file_copy', c, plural='file_copies', **args)
-
- files = []
- def getfiles():
- if not files:
- files[:] = self.repo.status(ctx.parents()[0].node(),
- ctx.node())[:3]
- return files
- def showfiles(**args):
- return showlist('file', ctx.files(), **args)
- def showmods(**args):
- return showlist('file_mod', getfiles()[0], **args)
- def showadds(**args):
- return showlist('file_add', getfiles()[1], **args)
- def showdels(**args):
- return showlist('file_del', getfiles()[2], **args)
- def showmanifest(**args):
- args = args.copy()
- args.update(dict(rev=self.repo.manifest.rev(ctx.changeset()[0]),
- node=hex(ctx.changeset()[0])))
- return self.t('manifest', **args)
-
- def showdiffstat(**args):
- diff = patch.diff(self.repo, ctx.parents()[0].node(), ctx.node())
- files, adds, removes = 0, 0, 0
- for i in patch.diffstatdata(util.iterlines(diff)):
- files += 1
- adds += i[1]
- removes += i[2]
- return '%s: +%s/-%s' % (files, adds, removes)
-
- defprops = {
- 'author': ctx.user(),
- 'branches': showbranches,
- 'date': ctx.date(),
- 'desc': ctx.description().strip(),
- 'file_adds': showadds,
- 'file_dels': showdels,
- 'file_mods': showmods,
- 'files': showfiles,
- 'file_copies': showcopies,
- 'manifest': showmanifest,
- 'node': ctx.hex(),
- 'parents': showparents,
- 'rev': ctx.rev(),
- 'tags': showtags,
- 'extras': showextras,
- 'diffstat': showdiffstat,
- }
- props = props.copy()
- props.update(defprops)
-
- # find correct templates for current mode
-
- tmplmodes = [
- (True, None),
- (self.ui.verbose, 'verbose'),
- (self.ui.quiet, 'quiet'),
- (self.ui.debugflag, 'debug'),
- ]
-
- types = {'header': '', 'changeset': 'changeset'}
- for mode, postfix in tmplmodes:
- for type in types:
- cur = postfix and ('%s_%s' % (type, postfix)) or type
- if mode and cur in self.t:
- types[type] = cur
-
- try:
-
- # write header
- if types['header']:
- h = templater.stringify(self.t(types['header'], **props))
- if self.buffered:
- self.header[ctx.rev()] = h
- else:
- self.ui.write(h)
-
- # write changeset metadata, then patch if requested
- key = types['changeset']
- self.ui.write(templater.stringify(self.t(key, **props)))
- self.showpatch(ctx.node())
-
- except KeyError, inst:
- msg = _("%s: no key named '%s'")
- raise util.Abort(msg % (self.t.mapfile, inst.args[0]))
- except SyntaxError, inst:
- raise util.Abort(_('%s: %s') % (self.t.mapfile, inst.args[0]))
-
-def show_changeset(ui, repo, opts, buffered=False, matchfn=False):
- """show one changeset using template or regular display.
-
- Display format will be the first non-empty hit of:
- 1. option 'template'
- 2. option 'style'
- 3. [ui] setting 'logtemplate'
- 4. [ui] setting 'style'
- If all of these values are either the unset or the empty string,
- regular display via changeset_printer() is done.
- """
- # options
- patch = False
- if opts.get('patch'):
- patch = matchfn or matchall(repo)
-
- tmpl = opts.get('template')
- style = None
- if tmpl:
- tmpl = templater.parsestring(tmpl, quoted=False)
- else:
- style = opts.get('style')
-
- # ui settings
- if not (tmpl or style):
- tmpl = ui.config('ui', 'logtemplate')
- if tmpl:
- tmpl = templater.parsestring(tmpl)
- else:
- style = ui.config('ui', 'style')
-
- if not (tmpl or style):
- return changeset_printer(ui, repo, patch, opts, buffered)
-
- mapfile = None
- if style and not tmpl:
- mapfile = style
- if not os.path.split(mapfile)[0]:
- mapname = (templater.templatepath('map-cmdline.' + mapfile)
- or templater.templatepath(mapfile))
- if mapname: mapfile = mapname
-
- try:
- t = changeset_templater(ui, repo, patch, opts, mapfile, buffered)
- except SyntaxError, inst:
- raise util.Abort(inst.args[0])
- if tmpl: t.use_template(tmpl)
- return t
-
-def finddate(ui, repo, date):
- """Find the tipmost changeset that matches the given date spec"""
- df = util.matchdate(date)
- get = util.cachefunc(lambda r: repo[r].changeset())
- changeiter, matchfn = walkchangerevs(ui, repo, [], get, {'rev':None})
- results = {}
- for st, rev, fns in changeiter:
- if st == 'add':
- d = get(rev)[2]
- if df(d[0]):
- results[rev] = d
- elif st == 'iter':
- if rev in results:
- ui.status(_("Found revision %s from %s\n") %
- (rev, util.datestr(results[rev])))
- return str(rev)
-
- raise util.Abort(_("revision matching date not found"))
-
-def walkchangerevs(ui, repo, pats, change, opts):
- '''Iterate over files and the revs in which they changed.
-
- Callers most commonly need to iterate backwards over the history
- in which they are interested. Doing so has awful (quadratic-looking)
- performance, so we use iterators in a "windowed" way.
-
- We walk a window of revisions in the desired order. Within the
- window, we first walk forwards to gather data, then in the desired
- order (usually backwards) to display it.
-
- This function returns an (iterator, matchfn) tuple. The iterator
- yields 3-tuples. They will be of one of the following forms:
-
- "window", incrementing, lastrev: stepping through a window,
- positive if walking forwards through revs, last rev in the
- sequence iterated over - use to reset state for the current window
-
- "add", rev, fns: out-of-order traversal of the given filenames
- fns, which changed during revision rev - use to gather data for
- possible display
-
- "iter", rev, None: in-order traversal of the revs earlier iterated
- over with "add" - use to display data'''
-
- def increasing_windows(start, end, windowsize=8, sizelimit=512):
- if start < end:
- while start < end:
- yield start, min(windowsize, end-start)
- start += windowsize
- if windowsize < sizelimit:
- windowsize *= 2
- else:
- while start > end:
- yield start, min(windowsize, start-end-1)
- start -= windowsize
- if windowsize < sizelimit:
- windowsize *= 2
-
- m = match(repo, pats, opts)
- follow = opts.get('follow') or opts.get('follow_first')
-
- if not len(repo):
- return [], m
-
- if follow:
- defrange = '%s:0' % repo['.'].rev()
- else:
- defrange = '-1:0'
- revs = revrange(repo, opts['rev'] or [defrange])
- wanted = set()
- slowpath = m.anypats() or (m.files() and opts.get('removed'))
- fncache = {}
-
- if not slowpath and not m.files():
- # No files, no patterns. Display all revs.
- wanted = set(revs)
- copies = []
- if not slowpath:
- # Only files, no patterns. Check the history of each file.
- def filerevgen(filelog, node):
- cl_count = len(repo)
- if node is None:
- last = len(filelog) - 1
- else:
- last = filelog.rev(node)
- for i, window in increasing_windows(last, nullrev):
- revs = []
- for j in xrange(i - window, i + 1):
- n = filelog.node(j)
- revs.append((filelog.linkrev(j),
- follow and filelog.renamed(n)))
- for rev in reversed(revs):
- # only yield rev for which we have the changelog, it can
- # happen while doing "hg log" during a pull or commit
- if rev[0] < cl_count:
- yield rev
- def iterfiles():
- for filename in m.files():
- yield filename, None
- for filename_node in copies:
- yield filename_node
- minrev, maxrev = min(revs), max(revs)
- for file_, node in iterfiles():
- filelog = repo.file(file_)
- if not len(filelog):
- if node is None:
- # A zero count may be a directory or deleted file, so
- # try to find matching entries on the slow path.
- if follow:
- raise util.Abort(_('cannot follow nonexistent file: "%s"') % file_)
- slowpath = True
- break
- else:
- ui.warn(_('%s:%s copy source revision cannot be found!\n')
- % (file_, short(node)))
- continue
- for rev, copied in filerevgen(filelog, node):
- if rev <= maxrev:
- if rev < minrev:
- break
- fncache.setdefault(rev, [])
- fncache[rev].append(file_)
- wanted.add(rev)
- if follow and copied:
- copies.append(copied)
- if slowpath:
- if follow:
- raise util.Abort(_('can only follow copies/renames for explicit '
- 'filenames'))
-
- # The slow path checks files modified in every changeset.
- def changerevgen():
- for i, window in increasing_windows(len(repo) - 1, nullrev):
- for j in xrange(i - window, i + 1):
- yield j, change(j)[3]
-
- for rev, changefiles in changerevgen():
- matches = filter(m, changefiles)
- if matches:
- fncache[rev] = matches
- wanted.add(rev)
-
- class followfilter(object):
- def __init__(self, onlyfirst=False):
- self.startrev = nullrev
- self.roots = []
- self.onlyfirst = onlyfirst
-
- def match(self, rev):
- def realparents(rev):
- if self.onlyfirst:
- return repo.changelog.parentrevs(rev)[0:1]
- else:
- return filter(lambda x: x != nullrev,
- repo.changelog.parentrevs(rev))
-
- if self.startrev == nullrev:
- self.startrev = rev
- return True
-
- if rev > self.startrev:
- # forward: all descendants
- if not self.roots:
- self.roots.append(self.startrev)
- for parent in realparents(rev):
- if parent in self.roots:
- self.roots.append(rev)
- return True
- else:
- # backwards: all parents
- if not self.roots:
- self.roots.extend(realparents(self.startrev))
- if rev in self.roots:
- self.roots.remove(rev)
- self.roots.extend(realparents(rev))
- return True
-
- return False
-
- # it might be worthwhile to do this in the iterator if the rev range
- # is descending and the prune args are all within that range
- for rev in opts.get('prune', ()):
- rev = repo.changelog.rev(repo.lookup(rev))
- ff = followfilter()
- stop = min(revs[0], revs[-1])
- for x in xrange(rev, stop-1, -1):
- if ff.match(x):
- wanted.discard(x)
-
- def iterate():
- if follow and not m.files():
- ff = followfilter(onlyfirst=opts.get('follow_first'))
- def want(rev):
- return ff.match(rev) and rev in wanted
- else:
- def want(rev):
- return rev in wanted
-
- for i, window in increasing_windows(0, len(revs)):
- yield 'window', revs[0] < revs[-1], revs[-1]
- nrevs = [rev for rev in revs[i:i+window] if want(rev)]
- for rev in sorted(nrevs):
- fns = fncache.get(rev)
- if not fns:
- def fns_generator():
- for f in change(rev)[3]:
- if m(f):
- yield f
- fns = fns_generator()
- yield 'add', rev, fns
- for rev in nrevs:
- yield 'iter', rev, None
- return iterate(), m
-
-def commit(ui, repo, commitfunc, pats, opts):
- '''commit the specified files or all outstanding changes'''
- date = opts.get('date')
- if date:
- opts['date'] = util.parsedate(date)
- message = logmessage(opts)
-
- # extract addremove carefully -- this function can be called from a command
- # that doesn't support addremove
- if opts.get('addremove'):
- addremove(repo, pats, opts)
-
- return commitfunc(ui, repo, message, match(repo, pats, opts), opts)
-
-def commiteditor(repo, ctx, subs):
- if ctx.description():
- return ctx.description()
- return commitforceeditor(repo, ctx, subs)
-
-def commitforceeditor(repo, ctx, subs):
- edittext = []
- modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
- if ctx.description():
- edittext.append(ctx.description())
- edittext.append("")
- edittext.append("") # Empty line between message and comments.
- edittext.append(_("HG: Enter commit message."
- " Lines beginning with 'HG:' are removed."))
- edittext.append(_("HG: Leave message empty to abort commit."))
- edittext.append("HG: --")
- edittext.append(_("HG: user: %s") % ctx.user())
- if ctx.p2():
- edittext.append(_("HG: branch merge"))
- if ctx.branch():
- edittext.append(_("HG: branch '%s'")
- % encoding.tolocal(ctx.branch()))
- edittext.extend([_("HG: subrepo %s") % s for s in subs])
- edittext.extend([_("HG: added %s") % f for f in added])
- edittext.extend([_("HG: changed %s") % f for f in modified])
- edittext.extend([_("HG: removed %s") % f for f in removed])
- if not added and not modified and not removed:
- edittext.append(_("HG: no files changed"))
- edittext.append("")
- # run editor in the repository root
- olddir = os.getcwd()
- os.chdir(repo.root)
- text = repo.ui.edit("\n".join(edittext), ctx.user())
- text = re.sub("(?m)^HG:.*\n", "", text)
- os.chdir(olddir)
-
- if not text.strip():
- raise util.Abort(_("empty commit message"))
-
- return text
diff --git a/sys/lib/python/mercurial/commands.py b/sys/lib/python/mercurial/commands.py
deleted file mode 100644
index 2dfd3def0..000000000
--- a/sys/lib/python/mercurial/commands.py
+++ /dev/null
@@ -1,3565 +0,0 @@
-# commands.py - command processing for mercurial
-#
-# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-from node import hex, nullid, nullrev, short
-from lock import release
-from i18n import _, gettext
-import os, re, sys, subprocess, difflib, time, tempfile
-import hg, util, revlog, bundlerepo, extensions, copies, context, error
-import patch, help, mdiff, url, encoding
-import archival, changegroup, cmdutil, sshserver, hbisect
-from hgweb import server
-import merge as merge_
-import minirst
-
-# Commands start here, listed alphabetically
-
-def add(ui, repo, *pats, **opts):
- """add the specified files on the next commit
-
- Schedule files to be version controlled and added to the
- repository.
-
- The files will be added to the repository at the next commit. To
- undo an add before that, see hg forget.
-
- If no names are given, add all files to the repository.
- """
-
- bad = []
- exacts = {}
- names = []
- m = cmdutil.match(repo, pats, opts)
- oldbad = m.bad
- m.bad = lambda x,y: bad.append(x) or oldbad(x,y)
-
- for f in repo.walk(m):
- exact = m.exact(f)
- if exact or f not in repo.dirstate:
- names.append(f)
- if ui.verbose or not exact:
- ui.status(_('adding %s\n') % m.rel(f))
- if not opts.get('dry_run'):
- bad += [f for f in repo.add(names) if f in m.files()]
- return bad and 1 or 0
-
-def addremove(ui, repo, *pats, **opts):
- """add all new files, delete all missing files
-
- Add all new files and remove all missing files from the
- repository.
-
- New files are ignored if they match any of the patterns in
- .hgignore. As with add, these changes take effect at the next
- commit.
-
- Use the -s/--similarity option to detect renamed files. With a
- parameter greater than 0, this compares every removed file with
- every added file and records those similar enough as renames. This
- option takes a percentage between 0 (disabled) and 100 (files must
- be identical) as its parameter. Detecting renamed files this way
- can be expensive.
- """
- try:
- sim = float(opts.get('similarity') or 0)
- except ValueError:
- raise util.Abort(_('similarity must be a number'))
- if sim < 0 or sim > 100:
- raise util.Abort(_('similarity must be between 0 and 100'))
- return cmdutil.addremove(repo, pats, opts, similarity=sim/100.)
-
-def annotate(ui, repo, *pats, **opts):
- """show changeset information by line for each file
-
- List changes in files, showing the revision id responsible for
- each line
-
- This command is useful for discovering when a change was made and
- by whom.
-
- Without the -a/--text option, annotate will avoid processing files
- it detects as binary. With -a, annotate will annotate the file
- anyway, although the results will probably be neither useful
- nor desirable.
- """
- datefunc = ui.quiet and util.shortdate or util.datestr
- getdate = util.cachefunc(lambda x: datefunc(x[0].date()))
-
- if not pats:
- raise util.Abort(_('at least one filename or pattern is required'))
-
- opmap = [('user', lambda x: ui.shortuser(x[0].user())),
- ('number', lambda x: str(x[0].rev())),
- ('changeset', lambda x: short(x[0].node())),
- ('date', getdate),
- ('follow', lambda x: x[0].path()),
- ]
-
- if (not opts.get('user') and not opts.get('changeset') and not opts.get('date')
- and not opts.get('follow')):
- opts['number'] = 1
-
- linenumber = opts.get('line_number') is not None
- if (linenumber and (not opts.get('changeset')) and (not opts.get('number'))):
- raise util.Abort(_('at least one of -n/-c is required for -l'))
-
- funcmap = [func for op, func in opmap if opts.get(op)]
- if linenumber:
- lastfunc = funcmap[-1]
- funcmap[-1] = lambda x: "%s:%s" % (lastfunc(x), x[1])
-
- ctx = repo[opts.get('rev')]
-
- m = cmdutil.match(repo, pats, opts)
- for abs in ctx.walk(m):
- fctx = ctx[abs]
- if not opts.get('text') and util.binary(fctx.data()):
- ui.write(_("%s: binary file\n") % ((pats and m.rel(abs)) or abs))
- continue
-
- lines = fctx.annotate(follow=opts.get('follow'),
- linenumber=linenumber)
- pieces = []
-
- for f in funcmap:
- l = [f(n) for n, dummy in lines]
- if l:
- ml = max(map(len, l))
- pieces.append(["%*s" % (ml, x) for x in l])
-
- if pieces:
- for p, l in zip(zip(*pieces), lines):
- ui.write("%s: %s" % (" ".join(p), l[1]))
-
-def archive(ui, repo, dest, **opts):
- '''create an unversioned archive of a repository revision
-
- By default, the revision used is the parent of the working
- directory; use -r/--rev to specify a different revision.
-
- To specify the type of archive to create, use -t/--type. Valid
- types are::
-
- "files" (default): a directory full of files
- "tar": tar archive, uncompressed
- "tbz2": tar archive, compressed using bzip2
- "tgz": tar archive, compressed using gzip
- "uzip": zip archive, uncompressed
- "zip": zip archive, compressed using deflate
-
- The exact name of the destination archive or directory is given
- using a format string; see 'hg help export' for details.
-
- Each member added to an archive file has a directory prefix
- prepended. Use -p/--prefix to specify a format string for the
- prefix. The default is the basename of the archive, with suffixes
- removed.
- '''
-
- ctx = repo[opts.get('rev')]
- if not ctx:
- raise util.Abort(_('no working directory: please specify a revision'))
- node = ctx.node()
- dest = cmdutil.make_filename(repo, dest, node)
- if os.path.realpath(dest) == repo.root:
- raise util.Abort(_('repository root cannot be destination'))
- matchfn = cmdutil.match(repo, [], opts)
- kind = opts.get('type') or 'files'
- prefix = opts.get('prefix')
- if dest == '-':
- if kind == 'files':
- raise util.Abort(_('cannot archive plain files to stdout'))
- dest = sys.stdout
- if not prefix: prefix = os.path.basename(repo.root) + '-%h'
- prefix = cmdutil.make_filename(repo, prefix, node)
- archival.archive(repo, dest, node, kind, not opts.get('no_decode'),
- matchfn, prefix)
-
-def backout(ui, repo, node=None, rev=None, **opts):
- '''reverse effect of earlier changeset
-
- Commit the backed out changes as a new changeset. The new
- changeset is a child of the backed out changeset.
-
- If you backout a changeset other than the tip, a new head is
- created. This head will be the new tip and you should merge this
- backout changeset with another head.
-
- The --merge option remembers the parent of the working directory
- before starting the backout, then merges the new head with that
- changeset afterwards. This saves you from doing the merge by hand.
- The result of this merge is not committed, as with a normal merge.
-
- See 'hg help dates' for a list of formats valid for -d/--date.
- '''
- if rev and node:
- raise util.Abort(_("please specify just one revision"))
-
- if not rev:
- rev = node
-
- if not rev:
- raise util.Abort(_("please specify a revision to backout"))
-
- date = opts.get('date')
- if date:
- opts['date'] = util.parsedate(date)
-
- cmdutil.bail_if_changed(repo)
- node = repo.lookup(rev)
-
- op1, op2 = repo.dirstate.parents()
- a = repo.changelog.ancestor(op1, node)
- if a != node:
- raise util.Abort(_('cannot backout change on a different branch'))
-
- p1, p2 = repo.changelog.parents(node)
- if p1 == nullid:
- raise util.Abort(_('cannot backout a change with no parents'))
- if p2 != nullid:
- if not opts.get('parent'):
- raise util.Abort(_('cannot backout a merge changeset without '
- '--parent'))
- p = repo.lookup(opts['parent'])
- if p not in (p1, p2):
- raise util.Abort(_('%s is not a parent of %s') %
- (short(p), short(node)))
- parent = p
- else:
- if opts.get('parent'):
- raise util.Abort(_('cannot use --parent on non-merge changeset'))
- parent = p1
-
- # the backout should appear on the same branch
- branch = repo.dirstate.branch()
- hg.clean(repo, node, show_stats=False)
- repo.dirstate.setbranch(branch)
- revert_opts = opts.copy()
- revert_opts['date'] = None
- revert_opts['all'] = True
- revert_opts['rev'] = hex(parent)
- revert_opts['no_backup'] = None
- revert(ui, repo, **revert_opts)
- commit_opts = opts.copy()
- commit_opts['addremove'] = False
- if not commit_opts['message'] and not commit_opts['logfile']:
- # we don't translate commit messages
- commit_opts['message'] = "Backed out changeset %s" % short(node)
- commit_opts['force_editor'] = True
- commit(ui, repo, **commit_opts)
- def nice(node):
- return '%d:%s' % (repo.changelog.rev(node), short(node))
- ui.status(_('changeset %s backs out changeset %s\n') %
- (nice(repo.changelog.tip()), nice(node)))
- if op1 != node:
- hg.clean(repo, op1, show_stats=False)
- if opts.get('merge'):
- ui.status(_('merging with changeset %s\n') % nice(repo.changelog.tip()))
- hg.merge(repo, hex(repo.changelog.tip()))
- else:
- ui.status(_('the backout changeset is a new head - '
- 'do not forget to merge\n'))
- ui.status(_('(use "backout --merge" '
- 'if you want to auto-merge)\n'))
-
-def bisect(ui, repo, rev=None, extra=None, command=None,
- reset=None, good=None, bad=None, skip=None, noupdate=None):
- """subdivision search of changesets
-
- This command helps to find changesets which introduce problems. To
- use, mark the earliest changeset you know exhibits the problem as
- bad, then mark the latest changeset which is free from the problem
- as good. Bisect will update your working directory to a revision
- for testing (unless the -U/--noupdate option is specified). Once
- you have performed tests, mark the working directory as good or
- bad, and bisect will either update to another candidate changeset
- or announce that it has found the bad revision.
-
- As a shortcut, you can also use the revision argument to mark a
- revision as good or bad without checking it out first.
-
- If you supply a command, it will be used for automatic bisection.
- Its exit status will be used to mark revisions as good or bad:
- status 0 means good, 125 means to skip the revision, 127
- (command not found) will abort the bisection, and any other
- non-zero exit status means the revision is bad.
- """
- def print_result(nodes, good):
- displayer = cmdutil.show_changeset(ui, repo, {})
- if len(nodes) == 1:
- # narrowed it down to a single revision
- if good:
- ui.write(_("The first good revision is:\n"))
- else:
- ui.write(_("The first bad revision is:\n"))
- displayer.show(repo[nodes[0]])
- else:
- # multiple possible revisions
- if good:
- ui.write(_("Due to skipped revisions, the first "
- "good revision could be any of:\n"))
- else:
- ui.write(_("Due to skipped revisions, the first "
- "bad revision could be any of:\n"))
- for n in nodes:
- displayer.show(repo[n])
-
- def check_state(state, interactive=True):
- if not state['good'] or not state['bad']:
- if (good or bad or skip or reset) and interactive:
- return
- if not state['good']:
- raise util.Abort(_('cannot bisect (no known good revisions)'))
- else:
- raise util.Abort(_('cannot bisect (no known bad revisions)'))
- return True
-
- # backward compatibility
- if rev in "good bad reset init".split():
- ui.warn(_("(use of 'hg bisect <cmd>' is deprecated)\n"))
- cmd, rev, extra = rev, extra, None
- if cmd == "good":
- good = True
- elif cmd == "bad":
- bad = True
- else:
- reset = True
- elif extra or good + bad + skip + reset + bool(command) > 1:
- raise util.Abort(_('incompatible arguments'))
-
- if reset:
- p = repo.join("bisect.state")
- if os.path.exists(p):
- os.unlink(p)
- return
-
- state = hbisect.load_state(repo)
-
- if command:
- commandpath = util.find_exe(command)
- if commandpath is None:
- raise util.Abort(_("cannot find executable: %s") % command)
- changesets = 1
- try:
- while changesets:
- # update state
- status = subprocess.call([commandpath])
- if status == 125:
- transition = "skip"
- elif status == 0:
- transition = "good"
- # status < 0 means process was killed
- elif status == 127:
- raise util.Abort(_("failed to execute %s") % command)
- elif status < 0:
- raise util.Abort(_("%s killed") % command)
- else:
- transition = "bad"
- ctx = repo[rev or '.']
- state[transition].append(ctx.node())
- ui.status(_('Changeset %d:%s: %s\n') % (ctx, ctx, transition))
- check_state(state, interactive=False)
- # bisect
- nodes, changesets, good = hbisect.bisect(repo.changelog, state)
- # update to next check
- cmdutil.bail_if_changed(repo)
- hg.clean(repo, nodes[0], show_stats=False)
- finally:
- hbisect.save_state(repo, state)
- return print_result(nodes, not status)
-
- # update state
- node = repo.lookup(rev or '.')
- if good:
- state['good'].append(node)
- elif bad:
- state['bad'].append(node)
- elif skip:
- state['skip'].append(node)
-
- hbisect.save_state(repo, state)
-
- if not check_state(state):
- return
-
- # actually bisect
- nodes, changesets, good = hbisect.bisect(repo.changelog, state)
- if changesets == 0:
- print_result(nodes, good)
- else:
- assert len(nodes) == 1 # only a single node can be tested next
- node = nodes[0]
- # compute the approximate number of remaining tests
- tests, size = 0, 2
- while size <= changesets:
- tests, size = tests + 1, size * 2
- rev = repo.changelog.rev(node)
- ui.write(_("Testing changeset %d:%s "
- "(%d changesets remaining, ~%d tests)\n")
- % (rev, short(node), changesets, tests))
- if not noupdate:
- cmdutil.bail_if_changed(repo)
- return hg.clean(repo, node)
-
-def branch(ui, repo, label=None, **opts):
- """set or show the current branch name
-
- With no argument, show the current branch name. With one argument,
- set the working directory branch name (the branch will not exist
- in the repository until the next commit). Standard practice
- recommends that primary development take place on the 'default'
- branch.
-
- Unless -f/--force is specified, branch will not let you set a
- branch name that already exists, even if it's inactive.
-
- Use -C/--clean to reset the working directory branch to that of
- the parent of the working directory, negating a previous branch
- change.
-
- Use the command 'hg update' to switch to an existing branch. Use
- 'hg commit --close-branch' to mark this branch as closed.
- """
-
- if opts.get('clean'):
- label = repo[None].parents()[0].branch()
- repo.dirstate.setbranch(label)
- ui.status(_('reset working directory to branch %s\n') % label)
- elif label:
- if not opts.get('force') and label in repo.branchtags():
- if label not in [p.branch() for p in repo.parents()]:
- raise util.Abort(_('a branch of the same name already exists'
- ' (use --force to override)'))
- repo.dirstate.setbranch(encoding.fromlocal(label))
- ui.status(_('marked working directory as branch %s\n') % label)
- else:
- ui.write("%s\n" % encoding.tolocal(repo.dirstate.branch()))
-
-def branches(ui, repo, active=False, closed=False):
- """list repository named branches
-
- List the repository's named branches, indicating which ones are
- inactive. If -c/--closed is specified, also list branches which have
- been marked closed (see hg commit --close-branch).
-
- If -a/--active is specified, only show active branches. A branch
- is considered active if it contains repository heads.
-
- Use the command 'hg update' to switch to an existing branch.
- """
-
- hexfunc = ui.debugflag and hex or short
- activebranches = [encoding.tolocal(repo[n].branch())
- for n in repo.heads()]
- def testactive(tag, node):
- realhead = tag in activebranches
- open = node in repo.branchheads(tag, closed=False)
- return realhead and open
- branches = sorted([(testactive(tag, node), repo.changelog.rev(node), tag)
- for tag, node in repo.branchtags().items()],
- reverse=True)
-
- for isactive, node, tag in branches:
- if (not active) or isactive:
- if ui.quiet:
- ui.write("%s\n" % tag)
- else:
- hn = repo.lookup(node)
- if isactive:
- notice = ''
- elif hn not in repo.branchheads(tag, closed=False):
- if not closed:
- continue
- notice = ' (closed)'
- else:
- notice = ' (inactive)'
- rev = str(node).rjust(31 - encoding.colwidth(tag))
- data = tag, rev, hexfunc(hn), notice
- ui.write("%s %s:%s%s\n" % data)
-
-def bundle(ui, repo, fname, dest=None, **opts):
- """create a changegroup file
-
- Generate a compressed changegroup file collecting changesets not
- known to be in another repository.
-
- If no destination repository is specified the destination is
- assumed to have all the nodes specified by one or more --base
- parameters. To create a bundle containing all changesets, use
- -a/--all (or --base null).
-
- You can change compression method with the -t/--type option.
- The available compression methods are: none, bzip2, and
- gzip (by default, bundles are compressed using bzip2).
-
- The bundle file can then be transferred using conventional means
- and applied to another repository with the unbundle or pull
- command. This is useful when direct push and pull are not
- available or when exporting an entire repository is undesirable.
-
- Applying bundles preserves all changeset contents including
- permissions, copy/rename information, and revision history.
- """
- revs = opts.get('rev') or None
- if revs:
- revs = [repo.lookup(rev) for rev in revs]
- if opts.get('all'):
- base = ['null']
- else:
- base = opts.get('base')
- if base:
- if dest:
- raise util.Abort(_("--base is incompatible with specifying "
- "a destination"))
- base = [repo.lookup(rev) for rev in base]
- # create the right base
- # XXX: nodesbetween / changegroup* should be "fixed" instead
- o = []
- has = set((nullid,))
- for n in base:
- has.update(repo.changelog.reachable(n))
- if revs:
- visit = list(revs)
- else:
- visit = repo.changelog.heads()
- seen = {}
- while visit:
- n = visit.pop(0)
- parents = [p for p in repo.changelog.parents(n) if p not in has]
- if len(parents) == 0:
- o.insert(0, n)
- else:
- for p in parents:
- if p not in seen:
- seen[p] = 1
- visit.append(p)
- else:
- dest, revs, checkout = hg.parseurl(
- ui.expandpath(dest or 'default-push', dest or 'default'), revs)
- other = hg.repository(cmdutil.remoteui(repo, opts), dest)
- o = repo.findoutgoing(other, force=opts.get('force'))
-
- if revs:
- cg = repo.changegroupsubset(o, revs, 'bundle')
- else:
- cg = repo.changegroup(o, 'bundle')
-
- bundletype = opts.get('type', 'bzip2').lower()
- btypes = {'none': 'HG10UN', 'bzip2': 'HG10BZ', 'gzip': 'HG10GZ'}
- bundletype = btypes.get(bundletype)
- if bundletype not in changegroup.bundletypes:
- raise util.Abort(_('unknown bundle type specified with --type'))
-
- changegroup.writebundle(cg, fname, bundletype)
-
-def cat(ui, repo, file1, *pats, **opts):
- """output the current or given revision of files
-
- Print the specified files as they were at the given revision. If
- no revision is given, the parent of the working directory is used,
- or tip if no revision is checked out.
-
- Output may be to a file, in which case the name of the file is
- given using a format string. The formatting rules are the same as
- for the export command, with the following additions::
-
- %s basename of file being printed
- %d dirname of file being printed, or '.' if in repository root
- %p root-relative path name of file being printed
- """
- ctx = repo[opts.get('rev')]
- err = 1
- m = cmdutil.match(repo, (file1,) + pats, opts)
- for abs in ctx.walk(m):
- fp = cmdutil.make_file(repo, opts.get('output'), ctx.node(), pathname=abs)
- data = ctx[abs].data()
- if opts.get('decode'):
- data = repo.wwritedata(abs, data)
- fp.write(data)
- err = 0
- return err
-
-def clone(ui, source, dest=None, **opts):
- """make a copy of an existing repository
-
- Create a copy of an existing repository in a new directory.
-
- If no destination directory name is specified, it defaults to the
- basename of the source.
-
- The location of the source is added to the new repository's
- .hg/hgrc file, as the default to be used for future pulls.
-
- If you use the -r/--rev option to clone up to a specific revision,
- no subsequent revisions (including subsequent tags) will be
- present in the cloned repository. This option implies --pull, even
- on local repositories.
-
- By default, clone will check out the head of the 'default' branch.
- If the -U/--noupdate option is used, the new clone will contain
- only a repository (.hg) and no working copy (the working copy
- parent is the null revision).
-
- See 'hg help urls' for valid source format details.
-
- It is possible to specify an ssh:// URL as the destination, but no
- .hg/hgrc and working directory will be created on the remote side.
- Please see 'hg help urls' for important details about ssh:// URLs.
-
- For efficiency, hardlinks are used for cloning whenever the source
- and destination are on the same filesystem (note this applies only
- to the repository data, not to the checked out files). Some
- filesystems, such as AFS, implement hardlinking incorrectly, but
- do not report errors. In these cases, use the --pull option to
- avoid hardlinking.
-
- In some cases, you can clone repositories and checked out files
- using full hardlinks with ::
-
- $ cp -al REPO REPOCLONE
-
- This is the fastest way to clone, but it is not always safe. The
- operation is not atomic (making sure REPO is not modified during
- the operation is up to you) and you have to make sure your editor
- breaks hardlinks (Emacs and most Linux Kernel tools do so). Also,
- this is not compatible with certain extensions that place their
- metadata under the .hg directory, such as mq.
- """
- hg.clone(cmdutil.remoteui(ui, opts), source, dest,
- pull=opts.get('pull'),
- stream=opts.get('uncompressed'),
- rev=opts.get('rev'),
- update=not opts.get('noupdate'))
-
-def commit(ui, repo, *pats, **opts):
- """commit the specified files or all outstanding changes
-
- Commit changes to the given files into the repository. Unlike a
- centralized RCS, this operation is a local operation. See hg push
- for a way to actively distribute your changes.
-
- If a list of files is omitted, all changes reported by "hg status"
- will be committed.
-
- If you are committing the result of a merge, do not provide any
- filenames or -I/-X filters.
-
- If no commit message is specified, the configured editor is
- started to prompt you for a message.
-
- See 'hg help dates' for a list of formats valid for -d/--date.
- """
- extra = {}
- if opts.get('close_branch'):
- extra['close'] = 1
- e = cmdutil.commiteditor
- if opts.get('force_editor'):
- e = cmdutil.commitforceeditor
-
- def commitfunc(ui, repo, message, match, opts):
- return repo.commit(message, opts.get('user'), opts.get('date'), match,
- editor=e, extra=extra)
-
- node = cmdutil.commit(ui, repo, commitfunc, pats, opts)
- if not node:
- ui.status(_("nothing changed\n"))
- return
- cl = repo.changelog
- rev = cl.rev(node)
- parents = cl.parentrevs(rev)
- if rev - 1 in parents:
- # one of the parents was the old tip
- pass
- elif (parents == (nullrev, nullrev) or
- len(cl.heads(cl.node(parents[0]))) > 1 and
- (parents[1] == nullrev or len(cl.heads(cl.node(parents[1]))) > 1)):
- ui.status(_('created new head\n'))
-
- if ui.debugflag:
- ui.write(_('committed changeset %d:%s\n') % (rev, hex(node)))
- elif ui.verbose:
- ui.write(_('committed changeset %d:%s\n') % (rev, short(node)))
-
-def copy(ui, repo, *pats, **opts):
- """mark files as copied for the next commit
-
- Mark dest as having copies of source files. If dest is a
- directory, copies are put in that directory. If dest is a file,
- the source must be a single file.
-
- By default, this command copies the contents of files as they
- exist in the working directory. If invoked with -A/--after, the
- operation is recorded, but no copying is performed.
-
- This command takes effect with the next commit. To undo a copy
- before that, see hg revert.
- """
- wlock = repo.wlock(False)
- try:
- return cmdutil.copy(ui, repo, pats, opts)
- finally:
- wlock.release()
-
-def debugancestor(ui, repo, *args):
- """find the ancestor revision of two revisions in a given index"""
- if len(args) == 3:
- index, rev1, rev2 = args
- r = revlog.revlog(util.opener(os.getcwd(), audit=False), index)
- lookup = r.lookup
- elif len(args) == 2:
- if not repo:
- raise util.Abort(_("There is no Mercurial repository here "
- "(.hg not found)"))
- rev1, rev2 = args
- r = repo.changelog
- lookup = repo.lookup
- else:
- raise util.Abort(_('either two or three arguments required'))
- a = r.ancestor(lookup(rev1), lookup(rev2))
- ui.write("%d:%s\n" % (r.rev(a), hex(a)))
-
-def debugcommands(ui, cmd='', *args):
- for cmd, vals in sorted(table.iteritems()):
- cmd = cmd.split('|')[0].strip('^')
- opts = ', '.join([i[1] for i in vals[1]])
- ui.write('%s: %s\n' % (cmd, opts))
-
-def debugcomplete(ui, cmd='', **opts):
- """returns the completion list associated with the given command"""
-
- if opts.get('options'):
- options = []
- otables = [globalopts]
- if cmd:
- aliases, entry = cmdutil.findcmd(cmd, table, False)
- otables.append(entry[1])
- for t in otables:
- for o in t:
- if o[0]:
- options.append('-%s' % o[0])
- options.append('--%s' % o[1])
- ui.write("%s\n" % "\n".join(options))
- return
-
- cmdlist = cmdutil.findpossible(cmd, table)
- if ui.verbose:
- cmdlist = [' '.join(c[0]) for c in cmdlist.values()]
- ui.write("%s\n" % "\n".join(sorted(cmdlist)))
-
-def debugfsinfo(ui, path = "."):
- open('.debugfsinfo', 'w').write('')
- ui.write('exec: %s\n' % (util.checkexec(path) and 'yes' or 'no'))
- ui.write('symlink: %s\n' % (util.checklink(path) and 'yes' or 'no'))
- ui.write('case-sensitive: %s\n' % (util.checkcase('.debugfsinfo')
- and 'yes' or 'no'))
- os.unlink('.debugfsinfo')
-
-def debugrebuildstate(ui, repo, rev="tip"):
- """rebuild the dirstate as it would look like for the given revision"""
- ctx = repo[rev]
- wlock = repo.wlock()
- try:
- repo.dirstate.rebuild(ctx.node(), ctx.manifest())
- finally:
- wlock.release()
-
-def debugcheckstate(ui, repo):
- """validate the correctness of the current dirstate"""
- parent1, parent2 = repo.dirstate.parents()
- m1 = repo[parent1].manifest()
- m2 = repo[parent2].manifest()
- errors = 0
- for f in repo.dirstate:
- state = repo.dirstate[f]
- if state in "nr" and f not in m1:
- ui.warn(_("%s in state %s, but not in manifest1\n") % (f, state))
- errors += 1
- if state in "a" and f in m1:
- ui.warn(_("%s in state %s, but also in manifest1\n") % (f, state))
- errors += 1
- if state in "m" and f not in m1 and f not in m2:
- ui.warn(_("%s in state %s, but not in either manifest\n") %
- (f, state))
- errors += 1
- for f in m1:
- state = repo.dirstate[f]
- if state not in "nrm":
- ui.warn(_("%s in manifest1, but listed as state %s") % (f, state))
- errors += 1
- if errors:
- error = _(".hg/dirstate inconsistent with current parent's manifest")
- raise util.Abort(error)
-
-def showconfig(ui, repo, *values, **opts):
- """show combined config settings from all hgrc files
-
- With no arguments, print names and values of all config items.
-
- With one argument of the form section.name, print just the value
- of that config item.
-
- With multiple arguments, print names and values of all config
- items with matching section names.
-
- With --debug, the source (filename and line number) is printed
- for each config item.
- """
-
- untrusted = bool(opts.get('untrusted'))
- if values:
- if len([v for v in values if '.' in v]) > 1:
- raise util.Abort(_('only one config item permitted'))
- for section, name, value in ui.walkconfig(untrusted=untrusted):
- sectname = section + '.' + name
- if values:
- for v in values:
- if v == section:
- ui.debug('%s: ' %
- ui.configsource(section, name, untrusted))
- ui.write('%s=%s\n' % (sectname, value))
- elif v == sectname:
- ui.debug('%s: ' %
- ui.configsource(section, name, untrusted))
- ui.write(value, '\n')
- else:
- ui.debug('%s: ' %
- ui.configsource(section, name, untrusted))
- ui.write('%s=%s\n' % (sectname, value))
-
-def debugsetparents(ui, repo, rev1, rev2=None):
- """manually set the parents of the current working directory
-
- This is useful for writing repository conversion tools, but should
- be used with care.
- """
-
- if not rev2:
- rev2 = hex(nullid)
-
- wlock = repo.wlock()
- try:
- repo.dirstate.setparents(repo.lookup(rev1), repo.lookup(rev2))
- finally:
- wlock.release()
-
-def debugstate(ui, repo, nodates=None):
- """show the contents of the current dirstate"""
- timestr = ""
- showdate = not nodates
- for file_, ent in sorted(repo.dirstate._map.iteritems()):
- if showdate:
- if ent[3] == -1:
- # Pad or slice to locale representation
- locale_len = len(time.strftime("%Y-%m-%d %H:%M:%S ", time.localtime(0)))
- timestr = 'unset'
- timestr = timestr[:locale_len] + ' '*(locale_len - len(timestr))
- else:
- timestr = time.strftime("%Y-%m-%d %H:%M:%S ", time.localtime(ent[3]))
- if ent[1] & 020000:
- mode = 'lnk'
- else:
- mode = '%3o' % (ent[1] & 0777)
- ui.write("%c %s %10d %s%s\n" % (ent[0], mode, ent[2], timestr, file_))
- for f in repo.dirstate.copies():
- ui.write(_("copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
-
-def debugsub(ui, repo, rev=None):
- if rev == '':
- rev = None
- for k,v in sorted(repo[rev].substate.items()):
- ui.write('path %s\n' % k)
- ui.write(' source %s\n' % v[0])
- ui.write(' revision %s\n' % v[1])
-
-def debugdata(ui, file_, rev):
- """dump the contents of a data file revision"""
- r = revlog.revlog(util.opener(os.getcwd(), audit=False), file_[:-2] + ".i")
- try:
- ui.write(r.revision(r.lookup(rev)))
- except KeyError:
- raise util.Abort(_('invalid revision identifier %s') % rev)
-
-def debugdate(ui, date, range=None, **opts):
- """parse and display a date"""
- if opts["extended"]:
- d = util.parsedate(date, util.extendeddateformats)
- else:
- d = util.parsedate(date)
- ui.write("internal: %s %s\n" % d)
- ui.write("standard: %s\n" % util.datestr(d))
- if range:
- m = util.matchdate(range)
- ui.write("match: %s\n" % m(d[0]))
-
-def debugindex(ui, file_):
- """dump the contents of an index file"""
- r = revlog.revlog(util.opener(os.getcwd(), audit=False), file_)
- ui.write(" rev offset length base linkrev"
- " nodeid p1 p2\n")
- for i in r:
- node = r.node(i)
- try:
- pp = r.parents(node)
- except:
- pp = [nullid, nullid]
- ui.write("% 6d % 9d % 7d % 6d % 7d %s %s %s\n" % (
- i, r.start(i), r.length(i), r.base(i), r.linkrev(i),
- short(node), short(pp[0]), short(pp[1])))
-
-def debugindexdot(ui, file_):
- """dump an index DAG as a graphviz dot file"""
- r = revlog.revlog(util.opener(os.getcwd(), audit=False), file_)
- ui.write("digraph G {\n")
- for i in r:
- node = r.node(i)
- pp = r.parents(node)
- ui.write("\t%d -> %d\n" % (r.rev(pp[0]), i))
- if pp[1] != nullid:
- ui.write("\t%d -> %d\n" % (r.rev(pp[1]), i))
- ui.write("}\n")
-
-def debuginstall(ui):
- '''test Mercurial installation'''
-
- def writetemp(contents):
- (fd, name) = tempfile.mkstemp(prefix="hg-debuginstall-")
- f = os.fdopen(fd, "wb")
- f.write(contents)
- f.close()
- return name
-
- problems = 0
-
- # encoding
- ui.status(_("Checking encoding (%s)...\n") % encoding.encoding)
- try:
- encoding.fromlocal("test")
- except util.Abort, inst:
- ui.write(" %s\n" % inst)
- ui.write(_(" (check that your locale is properly set)\n"))
- problems += 1
-
- # compiled modules
- ui.status(_("Checking extensions...\n"))
- try:
- import bdiff, mpatch, base85
- except Exception, inst:
- ui.write(" %s\n" % inst)
- ui.write(_(" One or more extensions could not be found"))
- ui.write(_(" (check that you compiled the extensions)\n"))
- problems += 1
-
- # templates
- ui.status(_("Checking templates...\n"))
- try:
- import templater
- templater.templater(templater.templatepath("map-cmdline.default"))
- except Exception, inst:
- ui.write(" %s\n" % inst)
- ui.write(_(" (templates seem to have been installed incorrectly)\n"))
- problems += 1
-
- # patch
- ui.status(_("Checking patch...\n"))
- patchproblems = 0
- a = "1\n2\n3\n4\n"
- b = "1\n2\n3\ninsert\n4\n"
- fa = writetemp(a)
- d = mdiff.unidiff(a, None, b, None, os.path.basename(fa),
- os.path.basename(fa))
- fd = writetemp(d)
-
- files = {}
- try:
- patch.patch(fd, ui, cwd=os.path.dirname(fa), files=files)
- except util.Abort, e:
- ui.write(_(" patch call failed:\n"))
- ui.write(" " + str(e) + "\n")
- patchproblems += 1
- else:
- if list(files) != [os.path.basename(fa)]:
- ui.write(_(" unexpected patch output!\n"))
- patchproblems += 1
- a = open(fa).read()
- if a != b:
- ui.write(_(" patch test failed!\n"))
- patchproblems += 1
-
- if patchproblems:
- if ui.config('ui', 'patch'):
- ui.write(_(" (Current patch tool may be incompatible with patch,"
- " or misconfigured. Please check your .hgrc file)\n"))
- else:
- ui.write(_(" Internal patcher failure, please report this error"
- " to http://mercurial.selenic.com/bts/\n"))
- problems += patchproblems
-
- os.unlink(fa)
- os.unlink(fd)
-
- # editor
- ui.status(_("Checking commit editor...\n"))
- editor = ui.geteditor()
- cmdpath = util.find_exe(editor) or util.find_exe(editor.split()[0])
- if not cmdpath:
- if editor == 'vi':
- ui.write(_(" No commit editor set and can't find vi in PATH\n"))
- ui.write(_(" (specify a commit editor in your .hgrc file)\n"))
- else:
- ui.write(_(" Can't find editor '%s' in PATH\n") % editor)
- ui.write(_(" (specify a commit editor in your .hgrc file)\n"))
- problems += 1
-
- # check username
- ui.status(_("Checking username...\n"))
- user = os.environ.get("HGUSER")
- if user is None:
- user = ui.config("ui", "username")
- if user is None:
- user = os.environ.get("EMAIL")
- if not user:
- ui.warn(" ")
- ui.username()
- ui.write(_(" (specify a username in your .hgrc file)\n"))
-
- if not problems:
- ui.status(_("No problems detected\n"))
- else:
- ui.write(_("%s problems detected,"
- " please check your install!\n") % problems)
-
- return problems
-
-def debugrename(ui, repo, file1, *pats, **opts):
- """dump rename information"""
-
- ctx = repo[opts.get('rev')]
- m = cmdutil.match(repo, (file1,) + pats, opts)
- for abs in ctx.walk(m):
- fctx = ctx[abs]
- o = fctx.filelog().renamed(fctx.filenode())
- rel = m.rel(abs)
- if o:
- ui.write(_("%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
- else:
- ui.write(_("%s not renamed\n") % rel)
-
-def debugwalk(ui, repo, *pats, **opts):
- """show how files match on given patterns"""
- m = cmdutil.match(repo, pats, opts)
- items = list(repo.walk(m))
- if not items:
- return
- fmt = 'f %%-%ds %%-%ds %%s' % (
- max([len(abs) for abs in items]),
- max([len(m.rel(abs)) for abs in items]))
- for abs in items:
- line = fmt % (abs, m.rel(abs), m.exact(abs) and 'exact' or '')
- ui.write("%s\n" % line.rstrip())
-
-def diff(ui, repo, *pats, **opts):
- """diff repository (or selected files)
-
- Show differences between revisions for the specified files.
-
- Differences between files are shown using the unified diff format.
-
- NOTE: diff may generate unexpected results for merges, as it will
- default to comparing against the working directory's first parent
- changeset if no revisions are specified.
-
- When two revision arguments are given, then changes are shown
- between those revisions. If only one revision is specified then
- that revision is compared to the working directory, and, when no
- revisions are specified, the working directory files are compared
- to its parent.
-
- Without the -a/--text option, diff will avoid generating diffs of
- files it detects as binary. With -a, diff will generate a diff
- anyway, probably with undesirable results.
-
- Use the -g/--git option to generate diffs in the git extended diff
- format. For more information, read 'hg help diffs'.
- """
-
- revs = opts.get('rev')
- change = opts.get('change')
-
- if revs and change:
- msg = _('cannot specify --rev and --change at the same time')
- raise util.Abort(msg)
- elif change:
- node2 = repo.lookup(change)
- node1 = repo[node2].parents()[0].node()
- else:
- node1, node2 = cmdutil.revpair(repo, revs)
-
- m = cmdutil.match(repo, pats, opts)
- it = patch.diff(repo, node1, node2, match=m, opts=patch.diffopts(ui, opts))
- for chunk in it:
- ui.write(chunk)
-
-def export(ui, repo, *changesets, **opts):
- """dump the header and diffs for one or more changesets
-
- Print the changeset header and diffs for one or more revisions.
-
- The information shown in the changeset header is: author,
- changeset hash, parent(s) and commit comment.
-
- NOTE: export may generate unexpected diff output for merge
- changesets, as it will compare the merge changeset against its
- first parent only.
-
- Output may be to a file, in which case the name of the file is
- given using a format string. The formatting rules are as follows::
-
- %% literal "%" character
- %H changeset hash (40 bytes of hexadecimal)
- %N number of patches being generated
- %R changeset revision number
- %b basename of the exporting repository
- %h short-form changeset hash (12 bytes of hexadecimal)
- %n zero-padded sequence number, starting at 1
- %r zero-padded changeset revision number
-
- Without the -a/--text option, export will avoid generating diffs
- of files it detects as binary. With -a, export will generate a
- diff anyway, probably with undesirable results.
-
- Use the -g/--git option to generate diffs in the git extended diff
- format. See 'hg help diffs' for more information.
-
- With the --switch-parent option, the diff will be against the
- second parent. It can be useful to review a merge.
- """
- if not changesets:
- raise util.Abort(_("export requires at least one changeset"))
- revs = cmdutil.revrange(repo, changesets)
- if len(revs) > 1:
- ui.note(_('exporting patches:\n'))
- else:
- ui.note(_('exporting patch:\n'))
- patch.export(repo, revs, template=opts.get('output'),
- switch_parent=opts.get('switch_parent'),
- opts=patch.diffopts(ui, opts))
-
-def forget(ui, repo, *pats, **opts):
- """forget the specified files on the next commit
-
- Mark the specified files so they will no longer be tracked
- after the next commit.
-
- This only removes files from the current branch, not from the
- entire project history, and it does not delete them from the
- working directory.
-
- To undo a forget before the next commit, see hg add.
- """
-
- if not pats:
- raise util.Abort(_('no files specified'))
-
- m = cmdutil.match(repo, pats, opts)
- s = repo.status(match=m, clean=True)
- forget = sorted(s[0] + s[1] + s[3] + s[6])
-
- for f in m.files():
- if f not in repo.dirstate and not os.path.isdir(m.rel(f)):
- ui.warn(_('not removing %s: file is already untracked\n')
- % m.rel(f))
-
- for f in forget:
- if ui.verbose or not m.exact(f):
- ui.status(_('removing %s\n') % m.rel(f))
-
- repo.remove(forget, unlink=False)
-
-def grep(ui, repo, pattern, *pats, **opts):
- """search for a pattern in specified files and revisions
-
- Search revisions of files for a regular expression.
-
- This command behaves differently than Unix grep. It only accepts
- Python/Perl regexps. It searches repository history, not the
- working directory. It always prints the revision number in which a
- match appears.
-
- By default, grep only prints output for the first revision of a
- file in which it finds a match. To get it to print every revision
- that contains a change in match status ("-" for a match that
- becomes a non-match, or "+" for a non-match that becomes a match),
- use the --all flag.
- """
- reflags = 0
- if opts.get('ignore_case'):
- reflags |= re.I
- try:
- regexp = re.compile(pattern, reflags)
- except Exception, inst:
- ui.warn(_("grep: invalid match pattern: %s\n") % inst)
- return None
- sep, eol = ':', '\n'
- if opts.get('print0'):
- sep = eol = '\0'
-
- getfile = util.lrucachefunc(repo.file)
-
- def matchlines(body):
- begin = 0
- linenum = 0
- while True:
- match = regexp.search(body, begin)
- if not match:
- break
- mstart, mend = match.span()
- linenum += body.count('\n', begin, mstart) + 1
- lstart = body.rfind('\n', begin, mstart) + 1 or begin
- begin = body.find('\n', mend) + 1 or len(body)
- lend = begin - 1
- yield linenum, mstart - lstart, mend - lstart, body[lstart:lend]
-
- class linestate(object):
- def __init__(self, line, linenum, colstart, colend):
- self.line = line
- self.linenum = linenum
- self.colstart = colstart
- self.colend = colend
-
- def __hash__(self):
- return hash((self.linenum, self.line))
-
- def __eq__(self, other):
- return self.line == other.line
-
- matches = {}
- copies = {}
- def grepbody(fn, rev, body):
- matches[rev].setdefault(fn, [])
- m = matches[rev][fn]
- for lnum, cstart, cend, line in matchlines(body):
- s = linestate(line, lnum, cstart, cend)
- m.append(s)
-
- def difflinestates(a, b):
- sm = difflib.SequenceMatcher(None, a, b)
- for tag, alo, ahi, blo, bhi in sm.get_opcodes():
- if tag == 'insert':
- for i in xrange(blo, bhi):
- yield ('+', b[i])
- elif tag == 'delete':
- for i in xrange(alo, ahi):
- yield ('-', a[i])
- elif tag == 'replace':
- for i in xrange(alo, ahi):
- yield ('-', a[i])
- for i in xrange(blo, bhi):
- yield ('+', b[i])
-
- def display(fn, r, pstates, states):
- datefunc = ui.quiet and util.shortdate or util.datestr
- found = False
- filerevmatches = {}
- if opts.get('all'):
- iter = difflinestates(pstates, states)
- else:
- iter = [('', l) for l in states]
- for change, l in iter:
- cols = [fn, str(r)]
- if opts.get('line_number'):
- cols.append(str(l.linenum))
- if opts.get('all'):
- cols.append(change)
- if opts.get('user'):
- cols.append(ui.shortuser(get(r)[1]))
- if opts.get('date'):
- cols.append(datefunc(get(r)[2]))
- if opts.get('files_with_matches'):
- c = (fn, r)
- if c in filerevmatches:
- continue
- filerevmatches[c] = 1
- else:
- cols.append(l.line)
- ui.write(sep.join(cols), eol)
- found = True
- return found
-
- skip = {}
- revfiles = {}
- get = util.cachefunc(lambda r: repo[r].changeset())
- changeiter, matchfn = cmdutil.walkchangerevs(ui, repo, pats, get, opts)
- found = False
- follow = opts.get('follow')
- for st, rev, fns in changeiter:
- if st == 'window':
- matches.clear()
- revfiles.clear()
- elif st == 'add':
- ctx = repo[rev]
- pctx = ctx.parents()[0]
- parent = pctx.rev()
- matches.setdefault(rev, {})
- matches.setdefault(parent, {})
- files = revfiles.setdefault(rev, [])
- for fn in fns:
- flog = getfile(fn)
- try:
- fnode = ctx.filenode(fn)
- except error.LookupError:
- continue
-
- copied = flog.renamed(fnode)
- copy = follow and copied and copied[0]
- if copy:
- copies.setdefault(rev, {})[fn] = copy
- if fn in skip:
- if copy:
- skip[copy] = True
- continue
- files.append(fn)
-
- if not matches[rev].has_key(fn):
- grepbody(fn, rev, flog.read(fnode))
-
- pfn = copy or fn
- if not matches[parent].has_key(pfn):
- try:
- fnode = pctx.filenode(pfn)
- grepbody(pfn, parent, flog.read(fnode))
- except error.LookupError:
- pass
- elif st == 'iter':
- parent = repo[rev].parents()[0].rev()
- for fn in sorted(revfiles.get(rev, [])):
- states = matches[rev][fn]
- copy = copies.get(rev, {}).get(fn)
- if fn in skip:
- if copy:
- skip[copy] = True
- continue
- pstates = matches.get(parent, {}).get(copy or fn, [])
- if pstates or states:
- r = display(fn, rev, pstates, states)
- found = found or r
- if r and not opts.get('all'):
- skip[fn] = True
- if copy:
- skip[copy] = True
-
-def heads(ui, repo, *branchrevs, **opts):
- """show current repository heads or show branch heads
-
- With no arguments, show all repository head changesets.
-
- Repository "heads" are changesets that don't have child
- changesets. They are where development generally takes place and
- are the usual targets for update and merge operations.
-
- If one or more REV is given, the "branch heads" will be shown for
- the named branch associated with that revision. The name of the
- branch is called the revision's branch tag.
-
- Branch heads are revisions on a given named branch that do not have
- any descendants on the same branch. A branch head could be a true head
- or it could be the last changeset on a branch before a new branch
- was created. If none of the branch heads are true heads, the branch
- is considered inactive. If -c/--closed is specified, also show branch
- heads marked closed (see hg commit --close-branch).
-
- If STARTREV is specified only those heads (or branch heads) that
- are descendants of STARTREV will be displayed.
- """
- if opts.get('rev'):
- start = repo.lookup(opts['rev'])
- else:
- start = None
- closed = opts.get('closed')
- hideinactive, _heads = opts.get('active'), None
- if not branchrevs:
- if closed:
- raise error.Abort(_('you must specify a branch to use --closed'))
- # Assume we're looking repo-wide heads if no revs were specified.
- heads = repo.heads(start)
- else:
- if hideinactive:
- _heads = repo.heads(start)
- heads = []
- visitedset = set()
- for branchrev in branchrevs:
- branch = repo[branchrev].branch()
- if branch in visitedset:
- continue
- visitedset.add(branch)
- bheads = repo.branchheads(branch, start, closed=closed)
- if not bheads:
- if not opts.get('rev'):
- ui.warn(_("no open branch heads on branch %s\n") % branch)
- elif branch != branchrev:
- ui.warn(_("no changes on branch %s containing %s are "
- "reachable from %s\n")
- % (branch, branchrev, opts.get('rev')))
- else:
- ui.warn(_("no changes on branch %s are reachable from %s\n")
- % (branch, opts.get('rev')))
- if hideinactive:
- bheads = [bhead for bhead in bheads if bhead in _heads]
- heads.extend(bheads)
- if not heads:
- return 1
- displayer = cmdutil.show_changeset(ui, repo, opts)
- for n in heads:
- displayer.show(repo[n])
-
-def help_(ui, name=None, with_version=False):
- """show help for a given topic or a help overview
-
- With no arguments, print a list of commands with short help messages.
-
- Given a topic, extension, or command name, print help for that
- topic."""
- option_lists = []
- textwidth = util.termwidth() - 2
-
- def addglobalopts(aliases):
- if ui.verbose:
- option_lists.append((_("global options:"), globalopts))
- if name == 'shortlist':
- option_lists.append((_('use "hg help" for the full list '
- 'of commands'), ()))
- else:
- if name == 'shortlist':
- msg = _('use "hg help" for the full list of commands '
- 'or "hg -v" for details')
- elif aliases:
- msg = _('use "hg -v help%s" to show aliases and '
- 'global options') % (name and " " + name or "")
- else:
- msg = _('use "hg -v help %s" to show global options') % name
- option_lists.append((msg, ()))
-
- def helpcmd(name):
- if with_version:
- version_(ui)
- ui.write('\n')
-
- try:
- aliases, i = cmdutil.findcmd(name, table, False)
- except error.AmbiguousCommand, inst:
- # py3k fix: except vars can't be used outside the scope of the
- # except block, nor can be used inside a lambda. python issue4617
- prefix = inst.args[0]
- select = lambda c: c.lstrip('^').startswith(prefix)
- helplist(_('list of commands:\n\n'), select)
- return
-
- # synopsis
- if len(i) > 2:
- if i[2].startswith('hg'):
- ui.write("%s\n" % i[2])
- else:
- ui.write('hg %s %s\n' % (aliases[0], i[2]))
- else:
- ui.write('hg %s\n' % aliases[0])
-
- # aliases
- if not ui.quiet and len(aliases) > 1:
- ui.write(_("\naliases: %s\n") % ', '.join(aliases[1:]))
-
- # description
- doc = gettext(i[0].__doc__)
- if not doc:
- doc = _("(no help text available)")
- if ui.quiet:
- doc = doc.splitlines()[0]
- ui.write("\n%s\n" % minirst.format(doc, textwidth))
-
- if not ui.quiet:
- # options
- if i[1]:
- option_lists.append((_("options:\n"), i[1]))
-
- addglobalopts(False)
-
- def helplist(header, select=None):
- h = {}
- cmds = {}
- for c, e in table.iteritems():
- f = c.split("|", 1)[0]
- if select and not select(f):
- continue
- if (not select and name != 'shortlist' and
- e[0].__module__ != __name__):
- continue
- if name == "shortlist" and not f.startswith("^"):
- continue
- f = f.lstrip("^")
- if not ui.debugflag and f.startswith("debug"):
- continue
- doc = e[0].__doc__
- if doc and 'DEPRECATED' in doc and not ui.verbose:
- continue
- doc = gettext(doc)
- if not doc:
- doc = _("(no help text available)")
- h[f] = doc.splitlines()[0].rstrip()
- cmds[f] = c.lstrip("^")
-
- if not h:
- ui.status(_('no commands defined\n'))
- return
-
- ui.status(header)
- fns = sorted(h)
- m = max(map(len, fns))
- for f in fns:
- if ui.verbose:
- commands = cmds[f].replace("|",", ")
- ui.write(" %s:\n %s\n"%(commands, h[f]))
- else:
- ui.write(' %-*s %s\n' % (m, f, util.wrap(h[f], m + 4)))
-
- if name != 'shortlist':
- exts, maxlength = extensions.enabled()
- text = help.listexts(_('enabled extensions:'), exts, maxlength)
- if text:
- ui.write("\n%s\n" % minirst.format(text, textwidth))
-
- if not ui.quiet:
- addglobalopts(True)
-
- def helptopic(name):
- for names, header, doc in help.helptable:
- if name in names:
- break
- else:
- raise error.UnknownCommand(name)
-
- # description
- if not doc:
- doc = _("(no help text available)")
- if hasattr(doc, '__call__'):
- doc = doc()
-
- ui.write("%s\n\n" % header)
- ui.write("%s\n" % minirst.format(doc, textwidth))
-
- def helpext(name):
- try:
- mod = extensions.find(name)
- except KeyError:
- raise error.UnknownCommand(name)
-
- doc = gettext(mod.__doc__) or _('no help text available')
- if '\n' not in doc:
- head, tail = doc, ""
- else:
- head, tail = doc.split('\n', 1)
- ui.write(_('%s extension - %s\n\n') % (name.split('.')[-1], head))
- if tail:
- ui.write(minirst.format(tail, textwidth))
- ui.status('\n\n')
-
- try:
- ct = mod.cmdtable
- except AttributeError:
- ct = {}
-
- modcmds = set([c.split('|', 1)[0] for c in ct])
- helplist(_('list of commands:\n\n'), modcmds.__contains__)
-
- if name and name != 'shortlist':
- i = None
- for f in (helptopic, helpcmd, helpext):
- try:
- f(name)
- i = None
- break
- except error.UnknownCommand, inst:
- i = inst
- if i:
- raise i
-
- else:
- # program name
- if ui.verbose or with_version:
- version_(ui)
- else:
- ui.status(_("Mercurial Distributed SCM\n"))
- ui.status('\n')
-
- # list of commands
- if name == "shortlist":
- header = _('basic commands:\n\n')
- else:
- header = _('list of commands:\n\n')
-
- helplist(header)
-
- # list all option lists
- opt_output = []
- for title, options in option_lists:
- opt_output.append(("\n%s" % title, None))
- for shortopt, longopt, default, desc in options:
- if "DEPRECATED" in desc and not ui.verbose: continue
- opt_output.append(("%2s%s" % (shortopt and "-%s" % shortopt,
- longopt and " --%s" % longopt),
- "%s%s" % (desc,
- default
- and _(" (default: %s)") % default
- or "")))
-
- if not name:
- ui.write(_("\nadditional help topics:\n\n"))
- topics = []
- for names, header, doc in help.helptable:
- names = [(-len(name), name) for name in names]
- names.sort()
- topics.append((names[0][1], header))
- topics_len = max([len(s[0]) for s in topics])
- for t, desc in topics:
- ui.write(" %-*s %s\n" % (topics_len, t, desc))
-
- if opt_output:
- opts_len = max([len(line[0]) for line in opt_output if line[1]] or [0])
- for first, second in opt_output:
- if second:
- second = util.wrap(second, opts_len + 3)
- ui.write(" %-*s %s\n" % (opts_len, first, second))
- else:
- ui.write("%s\n" % first)
-
-def identify(ui, repo, source=None,
- rev=None, num=None, id=None, branch=None, tags=None):
- """identify the working copy or specified revision
-
- With no revision, print a summary of the current state of the
- repository.
-
- Specifying a path to a repository root or Mercurial bundle will
- cause lookup to operate on that repository/bundle.
-
- This summary identifies the repository state using one or two
- parent hash identifiers, followed by a "+" if there are
- uncommitted changes in the working directory, a list of tags for
- this revision and a branch name for non-default branches.
- """
-
- if not repo and not source:
- raise util.Abort(_("There is no Mercurial repository here "
- "(.hg not found)"))
-
- hexfunc = ui.debugflag and hex or short
- default = not (num or id or branch or tags)
- output = []
-
- revs = []
- if source:
- source, revs, checkout = hg.parseurl(ui.expandpath(source), [])
- repo = hg.repository(ui, source)
-
- if not repo.local():
- if not rev and revs:
- rev = revs[0]
- if not rev:
- rev = "tip"
- if num or branch or tags:
- raise util.Abort(
- "can't query remote revision number, branch, or tags")
- output = [hexfunc(repo.lookup(rev))]
- elif not rev:
- ctx = repo[None]
- parents = ctx.parents()
- changed = False
- if default or id or num:
- changed = ctx.files() + ctx.deleted()
- if default or id:
- output = ["%s%s" % ('+'.join([hexfunc(p.node()) for p in parents]),
- (changed) and "+" or "")]
- if num:
- output.append("%s%s" % ('+'.join([str(p.rev()) for p in parents]),
- (changed) and "+" or ""))
- else:
- ctx = repo[rev]
- if default or id:
- output = [hexfunc(ctx.node())]
- if num:
- output.append(str(ctx.rev()))
-
- if repo.local() and default and not ui.quiet:
- b = encoding.tolocal(ctx.branch())
- if b != 'default':
- output.append("(%s)" % b)
-
- # multiple tags for a single parent separated by '/'
- t = "/".join(ctx.tags())
- if t:
- output.append(t)
-
- if branch:
- output.append(encoding.tolocal(ctx.branch()))
-
- if tags:
- output.extend(ctx.tags())
-
- ui.write("%s\n" % ' '.join(output))
-
-def import_(ui, repo, patch1, *patches, **opts):
- """import an ordered set of patches
-
- Import a list of patches and commit them individually.
-
- If there are outstanding changes in the working directory, import
- will abort unless given the -f/--force flag.
-
- You can import a patch straight from a mail message. Even patches
- as attachments work (to use the body part, it must have type
- text/plain or text/x-patch). From and Subject headers of email
- message are used as default committer and commit message. All
- text/plain body parts before first diff are added to commit
- message.
-
- If the imported patch was generated by hg export, user and
- description from patch override values from message headers and
- body. Values given on command line with -m/--message and -u/--user
- override these.
-
- If --exact is specified, import will set the working directory to
- the parent of each patch before applying it, and will abort if the
- resulting changeset has a different ID than the one recorded in
- the patch. This may happen due to character set problems or other
- deficiencies in the text patch format.
-
- With -s/--similarity, hg will attempt to discover renames and
- copies in the patch in the same way as 'addremove'.
-
- To read a patch from standard input, use "-" as the patch name. If
- a URL is specified, the patch will be downloaded from it.
- See 'hg help dates' for a list of formats valid for -d/--date.
- """
- patches = (patch1,) + patches
-
- date = opts.get('date')
- if date:
- opts['date'] = util.parsedate(date)
-
- try:
- sim = float(opts.get('similarity') or 0)
- except ValueError:
- raise util.Abort(_('similarity must be a number'))
- if sim < 0 or sim > 100:
- raise util.Abort(_('similarity must be between 0 and 100'))
-
- if opts.get('exact') or not opts.get('force'):
- cmdutil.bail_if_changed(repo)
-
- d = opts["base"]
- strip = opts["strip"]
- wlock = lock = None
- try:
- wlock = repo.wlock()
- lock = repo.lock()
- for p in patches:
- pf = os.path.join(d, p)
-
- if pf == '-':
- ui.status(_("applying patch from stdin\n"))
- pf = sys.stdin
- else:
- ui.status(_("applying %s\n") % p)
- pf = url.open(ui, pf)
- data = patch.extract(ui, pf)
- tmpname, message, user, date, branch, nodeid, p1, p2 = data
-
- if tmpname is None:
- raise util.Abort(_('no diffs found'))
-
- try:
- cmdline_message = cmdutil.logmessage(opts)
- if cmdline_message:
- # pickup the cmdline msg
- message = cmdline_message
- elif message:
- # pickup the patch msg
- message = message.strip()
- else:
- # launch the editor
- message = None
- ui.debug(_('message:\n%s\n') % message)
-
- wp = repo.parents()
- if opts.get('exact'):
- if not nodeid or not p1:
- raise util.Abort(_('not a Mercurial patch'))
- p1 = repo.lookup(p1)
- p2 = repo.lookup(p2 or hex(nullid))
-
- if p1 != wp[0].node():
- hg.clean(repo, p1)
- repo.dirstate.setparents(p1, p2)
- elif p2:
- try:
- p1 = repo.lookup(p1)
- p2 = repo.lookup(p2)
- if p1 == wp[0].node():
- repo.dirstate.setparents(p1, p2)
- except error.RepoError:
- pass
- if opts.get('exact') or opts.get('import_branch'):
- repo.dirstate.setbranch(branch or 'default')
-
- files = {}
- try:
- patch.patch(tmpname, ui, strip=strip, cwd=repo.root,
- files=files, eolmode=None)
- finally:
- files = patch.updatedir(ui, repo, files, similarity=sim/100.)
- if not opts.get('no_commit'):
- m = cmdutil.matchfiles(repo, files or [])
- n = repo.commit(message, opts.get('user') or user,
- opts.get('date') or date, match=m,
- editor=cmdutil.commiteditor)
- if opts.get('exact'):
- if hex(n) != nodeid:
- repo.rollback()
- raise util.Abort(_('patch is damaged'
- ' or loses information'))
- # Force a dirstate write so that the next transaction
- # backups an up-do-date file.
- repo.dirstate.write()
- finally:
- os.unlink(tmpname)
- finally:
- release(lock, wlock)
-
-def incoming(ui, repo, source="default", **opts):
- """show new changesets found in source
-
- Show new changesets found in the specified path/URL or the default
- pull location. These are the changesets that would have been pulled
- if a pull at the time you issued this command.
-
- For remote repository, using --bundle avoids downloading the
- changesets twice if the incoming is followed by a pull.
-
- See pull for valid source format details.
- """
- limit = cmdutil.loglimit(opts)
- source, revs, checkout = hg.parseurl(ui.expandpath(source), opts.get('rev'))
- other = hg.repository(cmdutil.remoteui(repo, opts), source)
- ui.status(_('comparing with %s\n') % url.hidepassword(source))
- if revs:
- revs = [other.lookup(rev) for rev in revs]
- common, incoming, rheads = repo.findcommonincoming(other, heads=revs,
- force=opts["force"])
- if not incoming:
- try:
- os.unlink(opts["bundle"])
- except:
- pass
- ui.status(_("no changes found\n"))
- return 1
-
- cleanup = None
- try:
- fname = opts["bundle"]
- if fname or not other.local():
- # create a bundle (uncompressed if other repo is not local)
-
- if revs is None and other.capable('changegroupsubset'):
- revs = rheads
-
- if revs is None:
- cg = other.changegroup(incoming, "incoming")
- else:
- cg = other.changegroupsubset(incoming, revs, 'incoming')
- bundletype = other.local() and "HG10BZ" or "HG10UN"
- fname = cleanup = changegroup.writebundle(cg, fname, bundletype)
- # keep written bundle?
- if opts["bundle"]:
- cleanup = None
- if not other.local():
- # use the created uncompressed bundlerepo
- other = bundlerepo.bundlerepository(ui, repo.root, fname)
-
- o = other.changelog.nodesbetween(incoming, revs)[0]
- if opts.get('newest_first'):
- o.reverse()
- displayer = cmdutil.show_changeset(ui, other, opts)
- count = 0
- for n in o:
- if count >= limit:
- break
- parents = [p for p in other.changelog.parents(n) if p != nullid]
- if opts.get('no_merges') and len(parents) == 2:
- continue
- count += 1
- displayer.show(other[n])
- finally:
- if hasattr(other, 'close'):
- other.close()
- if cleanup:
- os.unlink(cleanup)
-
-def init(ui, dest=".", **opts):
- """create a new repository in the given directory
-
- Initialize a new repository in the given directory. If the given
- directory does not exist, it will be created.
-
- If no directory is given, the current directory is used.
-
- It is possible to specify an ssh:// URL as the destination.
- See 'hg help urls' for more information.
- """
- hg.repository(cmdutil.remoteui(ui, opts), dest, create=1)
-
-def locate(ui, repo, *pats, **opts):
- """locate files matching specific patterns
-
- Print files under Mercurial control in the working directory whose
- names match the given patterns.
-
- By default, this command searches all directories in the working
- directory. To search just the current directory and its
- subdirectories, use "--include .".
-
- If no patterns are given to match, this command prints the names
- of all files under Mercurial control in the working directory.
-
- If you want to feed the output of this command into the "xargs"
- command, use the -0 option to both this command and "xargs". This
- will avoid the problem of "xargs" treating single filenames that
- contain whitespace as multiple filenames.
- """
- end = opts.get('print0') and '\0' or '\n'
- rev = opts.get('rev') or None
-
- ret = 1
- m = cmdutil.match(repo, pats, opts, default='relglob')
- m.bad = lambda x,y: False
- for abs in repo[rev].walk(m):
- if not rev and abs not in repo.dirstate:
- continue
- if opts.get('fullpath'):
- ui.write(repo.wjoin(abs), end)
- else:
- ui.write(((pats and m.rel(abs)) or abs), end)
- ret = 0
-
- return ret
-
-def log(ui, repo, *pats, **opts):
- """show revision history of entire repository or files
-
- Print the revision history of the specified files or the entire
- project.
-
- File history is shown without following rename or copy history of
- files. Use -f/--follow with a filename to follow history across
- renames and copies. --follow without a filename will only show
- ancestors or descendants of the starting revision. --follow-first
- only follows the first parent of merge revisions.
-
- If no revision range is specified, the default is tip:0 unless
- --follow is set, in which case the working directory parent is
- used as the starting revision.
-
- See 'hg help dates' for a list of formats valid for -d/--date.
-
- By default this command prints revision number and changeset id,
- tags, non-trivial parents, user, date and time, and a summary for
- each commit. When the -v/--verbose switch is used, the list of
- changed files and full commit message are shown.
-
- NOTE: log -p/--patch may generate unexpected diff output for merge
- changesets, as it will only compare the merge changeset against
- its first parent. Also, only files different from BOTH parents
- will appear in files:.
- """
-
- get = util.cachefunc(lambda r: repo[r].changeset())
- changeiter, matchfn = cmdutil.walkchangerevs(ui, repo, pats, get, opts)
-
- limit = cmdutil.loglimit(opts)
- count = 0
-
- if opts.get('copies') and opts.get('rev'):
- endrev = max(cmdutil.revrange(repo, opts.get('rev'))) + 1
- else:
- endrev = len(repo)
- rcache = {}
- ncache = {}
- def getrenamed(fn, rev):
- '''looks up all renames for a file (up to endrev) the first
- time the file is given. It indexes on the changerev and only
- parses the manifest if linkrev != changerev.
- Returns rename info for fn at changerev rev.'''
- if fn not in rcache:
- rcache[fn] = {}
- ncache[fn] = {}
- fl = repo.file(fn)
- for i in fl:
- node = fl.node(i)
- lr = fl.linkrev(i)
- renamed = fl.renamed(node)
- rcache[fn][lr] = renamed
- if renamed:
- ncache[fn][node] = renamed
- if lr >= endrev:
- break
- if rev in rcache[fn]:
- return rcache[fn][rev]
-
- # If linkrev != rev (i.e. rev not found in rcache) fallback to
- # filectx logic.
-
- try:
- return repo[rev][fn].renamed()
- except error.LookupError:
- pass
- return None
-
- df = False
- if opts["date"]:
- df = util.matchdate(opts["date"])
-
- only_branches = opts.get('only_branch')
-
- displayer = cmdutil.show_changeset(ui, repo, opts, True, matchfn)
- for st, rev, fns in changeiter:
- if st == 'add':
- parents = [p for p in repo.changelog.parentrevs(rev)
- if p != nullrev]
- if opts.get('no_merges') and len(parents) == 2:
- continue
- if opts.get('only_merges') and len(parents) != 2:
- continue
-
- if only_branches:
- revbranch = get(rev)[5]['branch']
- if revbranch not in only_branches:
- continue
-
- if df:
- changes = get(rev)
- if not df(changes[2][0]):
- continue
-
- if opts.get('keyword'):
- changes = get(rev)
- miss = 0
- for k in [kw.lower() for kw in opts['keyword']]:
- if not (k in changes[1].lower() or
- k in changes[4].lower() or
- k in " ".join(changes[3]).lower()):
- miss = 1
- break
- if miss:
- continue
-
- if opts['user']:
- changes = get(rev)
- if not [k for k in opts['user'] if k in changes[1]]:
- continue
-
- copies = []
- if opts.get('copies') and rev:
- for fn in get(rev)[3]:
- rename = getrenamed(fn, rev)
- if rename:
- copies.append((fn, rename[0]))
- displayer.show(context.changectx(repo, rev), copies=copies)
- elif st == 'iter':
- if count == limit: break
- if displayer.flush(rev):
- count += 1
-
-def manifest(ui, repo, node=None, rev=None):
- """output the current or given revision of the project manifest
-
- Print a list of version controlled files for the given revision.
- If no revision is given, the first parent of the working directory
- is used, or the null revision if no revision is checked out.
-
- With -v, print file permissions, symlink and executable bits.
- With --debug, print file revision hashes.
- """
-
- if rev and node:
- raise util.Abort(_("please specify just one revision"))
-
- if not node:
- node = rev
-
- decor = {'l':'644 @ ', 'x':'755 * ', '':'644 '}
- ctx = repo[node]
- for f in ctx:
- if ui.debugflag:
- ui.write("%40s " % hex(ctx.manifest()[f]))
- if ui.verbose:
- ui.write(decor[ctx.flags(f)])
- ui.write("%s\n" % f)
-
-def merge(ui, repo, node=None, **opts):
- """merge working directory with another revision
-
- The current working directory is updated with all changes made in
- the requested revision since the last common predecessor revision.
-
- Files that changed between either parent are marked as changed for
- the next commit and a commit must be performed before any further
- updates to the repository are allowed. The next commit will have
- two parents.
-
- If no revision is specified, the working directory's parent is a
- head revision, and the current branch contains exactly one other
- head, the other head is merged with by default. Otherwise, an
- explicit revision with which to merge with must be provided.
- """
-
- if opts.get('rev') and node:
- raise util.Abort(_("please specify just one revision"))
- if not node:
- node = opts.get('rev')
-
- if not node:
- branch = repo.changectx(None).branch()
- bheads = repo.branchheads(branch)
- if len(bheads) > 2:
- raise util.Abort(_("branch '%s' has %d heads - "
- "please merge with an explicit rev") %
- (branch, len(bheads)))
-
- parent = repo.dirstate.parents()[0]
- if len(bheads) == 1:
- if len(repo.heads()) > 1:
- raise util.Abort(_("branch '%s' has one head - "
- "please merge with an explicit rev") %
- branch)
- msg = _('there is nothing to merge')
- if parent != repo.lookup(repo[None].branch()):
- msg = _('%s - use "hg update" instead') % msg
- raise util.Abort(msg)
-
- if parent not in bheads:
- raise util.Abort(_('working dir not at a head rev - '
- 'use "hg update" or merge with an explicit rev'))
- node = parent == bheads[0] and bheads[-1] or bheads[0]
-
- if opts.get('preview'):
- p1 = repo['.']
- p2 = repo[node]
- common = p1.ancestor(p2)
- roots, heads = [common.node()], [p2.node()]
- displayer = cmdutil.show_changeset(ui, repo, opts)
- for node in repo.changelog.nodesbetween(roots=roots, heads=heads)[0]:
- displayer.show(repo[node])
- return 0
-
- return hg.merge(repo, node, force=opts.get('force'))
-
-def outgoing(ui, repo, dest=None, **opts):
- """show changesets not found in destination
-
- Show changesets not found in the specified destination repository
- or the default push location. These are the changesets that would
- be pushed if a push was requested.
-
- See pull for valid destination format details.
- """
- limit = cmdutil.loglimit(opts)
- dest, revs, checkout = hg.parseurl(
- ui.expandpath(dest or 'default-push', dest or 'default'), opts.get('rev'))
- if revs:
- revs = [repo.lookup(rev) for rev in revs]
-
- other = hg.repository(cmdutil.remoteui(repo, opts), dest)
- ui.status(_('comparing with %s\n') % url.hidepassword(dest))
- o = repo.findoutgoing(other, force=opts.get('force'))
- if not o:
- ui.status(_("no changes found\n"))
- return 1
- o = repo.changelog.nodesbetween(o, revs)[0]
- if opts.get('newest_first'):
- o.reverse()
- displayer = cmdutil.show_changeset(ui, repo, opts)
- count = 0
- for n in o:
- if count >= limit:
- break
- parents = [p for p in repo.changelog.parents(n) if p != nullid]
- if opts.get('no_merges') and len(parents) == 2:
- continue
- count += 1
- displayer.show(repo[n])
-
-def parents(ui, repo, file_=None, **opts):
- """show the parents of the working directory or revision
-
- Print the working directory's parent revisions. If a revision is
- given via -r/--rev, the parent of that revision will be printed.
- If a file argument is given, the revision in which the file was
- last changed (before the working directory revision or the
- argument to --rev if given) is printed.
- """
- rev = opts.get('rev')
- if rev:
- ctx = repo[rev]
- else:
- ctx = repo[None]
-
- if file_:
- m = cmdutil.match(repo, (file_,), opts)
- if m.anypats() or len(m.files()) != 1:
- raise util.Abort(_('can only specify an explicit filename'))
- file_ = m.files()[0]
- filenodes = []
- for cp in ctx.parents():
- if not cp:
- continue
- try:
- filenodes.append(cp.filenode(file_))
- except error.LookupError:
- pass
- if not filenodes:
- raise util.Abort(_("'%s' not found in manifest!") % file_)
- fl = repo.file(file_)
- p = [repo.lookup(fl.linkrev(fl.rev(fn))) for fn in filenodes]
- else:
- p = [cp.node() for cp in ctx.parents()]
-
- displayer = cmdutil.show_changeset(ui, repo, opts)
- for n in p:
- if n != nullid:
- displayer.show(repo[n])
-
-def paths(ui, repo, search=None):
- """show aliases for remote repositories
-
- Show definition of symbolic path name NAME. If no name is given,
- show definition of all available names.
-
- Path names are defined in the [paths] section of /etc/mercurial/hgrc
- and $HOME/.hgrc. If run inside a repository, .hg/hgrc is used, too.
-
- See 'hg help urls' for more information.
- """
- if search:
- for name, path in ui.configitems("paths"):
- if name == search:
- ui.write("%s\n" % url.hidepassword(path))
- return
- ui.warn(_("not found!\n"))
- return 1
- else:
- for name, path in ui.configitems("paths"):
- ui.write("%s = %s\n" % (name, url.hidepassword(path)))
-
-def postincoming(ui, repo, modheads, optupdate, checkout):
- if modheads == 0:
- return
- if optupdate:
- if (modheads <= 1 or len(repo.branchheads()) == 1) or checkout:
- return hg.update(repo, checkout)
- else:
- ui.status(_("not updating, since new heads added\n"))
- if modheads > 1:
- ui.status(_("(run 'hg heads' to see heads, 'hg merge' to merge)\n"))
- else:
- ui.status(_("(run 'hg update' to get a working copy)\n"))
-
-def pull(ui, repo, source="default", **opts):
- """pull changes from the specified source
-
- Pull changes from a remote repository to a local one.
-
- This finds all changes from the repository at the specified path
- or URL and adds them to a local repository (the current one unless
- -R is specified). By default, this does not update the copy of the
- project in the working directory.
-
- Use hg incoming if you want to see what would have been added by a
- pull at the time you issued this command. If you then decide to
- added those changes to the repository, you should use pull -r X
- where X is the last changeset listed by hg incoming.
-
- If SOURCE is omitted, the 'default' path will be used.
- See 'hg help urls' for more information.
- """
- source, revs, checkout = hg.parseurl(ui.expandpath(source), opts.get('rev'))
- other = hg.repository(cmdutil.remoteui(repo, opts), source)
- ui.status(_('pulling from %s\n') % url.hidepassword(source))
- if revs:
- try:
- revs = [other.lookup(rev) for rev in revs]
- except error.CapabilityError:
- err = _("Other repository doesn't support revision lookup, "
- "so a rev cannot be specified.")
- raise util.Abort(err)
-
- modheads = repo.pull(other, heads=revs, force=opts.get('force'))
- return postincoming(ui, repo, modheads, opts.get('update'), checkout)
-
-def push(ui, repo, dest=None, **opts):
- """push changes to the specified destination
-
- Push changes from the local repository to the given destination.
-
- This is the symmetrical operation for pull. It moves changes from
- the current repository to a different one. If the destination is
- local this is identical to a pull in that directory from the
- current one.
-
- By default, push will refuse to run if it detects the result would
- increase the number of remote heads. This generally indicates the
- user forgot to pull and merge before pushing.
-
- If -r/--rev is used, the named revision and all its ancestors will
- be pushed to the remote repository.
-
- Please see 'hg help urls' for important details about ssh://
- URLs. If DESTINATION is omitted, a default path will be used.
- """
- dest, revs, checkout = hg.parseurl(
- ui.expandpath(dest or 'default-push', dest or 'default'), opts.get('rev'))
- other = hg.repository(cmdutil.remoteui(repo, opts), dest)
- ui.status(_('pushing to %s\n') % url.hidepassword(dest))
- if revs:
- revs = [repo.lookup(rev) for rev in revs]
-
- # push subrepos depth-first for coherent ordering
- c = repo['']
- subs = c.substate # only repos that are committed
- for s in sorted(subs):
- c.sub(s).push(opts.get('force'))
-
- r = repo.push(other, opts.get('force'), revs=revs)
- return r == 0
-
-def recover(ui, repo):
- """roll back an interrupted transaction
-
- Recover from an interrupted commit or pull.
-
- This command tries to fix the repository status after an
- interrupted operation. It should only be necessary when Mercurial
- suggests it.
- """
- if repo.recover():
- return hg.verify(repo)
- return 1
-
-def remove(ui, repo, *pats, **opts):
- """remove the specified files on the next commit
-
- Schedule the indicated files for removal from the repository.
-
- This only removes files from the current branch, not from the
- entire project history. -A/--after can be used to remove only
- files that have already been deleted, -f/--force can be used to
- force deletion, and -Af can be used to remove files from the next
- revision without deleting them from the working directory.
-
- The following table details the behavior of remove for different
- file states (columns) and option combinations (rows). The file
- states are Added [A], Clean [C], Modified [M] and Missing [!] (as
- reported by hg status). The actions are Warn, Remove (from branch)
- and Delete (from disk)::
-
- A C M !
- none W RD W R
- -f R RD RD R
- -A W W W R
- -Af R R R R
-
- This command schedules the files to be removed at the next commit.
- To undo a remove before that, see hg revert.
- """
-
- after, force = opts.get('after'), opts.get('force')
- if not pats and not after:
- raise util.Abort(_('no files specified'))
-
- m = cmdutil.match(repo, pats, opts)
- s = repo.status(match=m, clean=True)
- modified, added, deleted, clean = s[0], s[1], s[3], s[6]
-
- for f in m.files():
- if f not in repo.dirstate and not os.path.isdir(m.rel(f)):
- ui.warn(_('not removing %s: file is untracked\n') % m.rel(f))
-
- def warn(files, reason):
- for f in files:
- ui.warn(_('not removing %s: file %s (use -f to force removal)\n')
- % (m.rel(f), reason))
-
- if force:
- remove, forget = modified + deleted + clean, added
- elif after:
- remove, forget = deleted, []
- warn(modified + added + clean, _('still exists'))
- else:
- remove, forget = deleted + clean, []
- warn(modified, _('is modified'))
- warn(added, _('has been marked for add'))
-
- for f in sorted(remove + forget):
- if ui.verbose or not m.exact(f):
- ui.status(_('removing %s\n') % m.rel(f))
-
- repo.forget(forget)
- repo.remove(remove, unlink=not after)
-
-def rename(ui, repo, *pats, **opts):
- """rename files; equivalent of copy + remove
-
- Mark dest as copies of sources; mark sources for deletion. If dest
- is a directory, copies are put in that directory. If dest is a
- file, there can only be one source.
-
- By default, this command copies the contents of files as they
- exist in the working directory. If invoked with -A/--after, the
- operation is recorded, but no copying is performed.
-
- This command takes effect at the next commit. To undo a rename
- before that, see hg revert.
- """
- wlock = repo.wlock(False)
- try:
- return cmdutil.copy(ui, repo, pats, opts, rename=True)
- finally:
- wlock.release()
-
-def resolve(ui, repo, *pats, **opts):
- """retry file merges from a merge or update
-
- This command will cleanly retry unresolved file merges using file
- revisions preserved from the last update or merge. To attempt to
- resolve all unresolved files, use the -a/--all switch.
-
- If a conflict is resolved manually, please note that the changes
- will be overwritten if the merge is retried with resolve. The
- -m/--mark switch should be used to mark the file as resolved.
-
- This command also allows listing resolved files and manually
- indicating whether or not files are resolved. All files must be
- marked as resolved before a commit is permitted.
-
- The codes used to show the status of files are::
-
- U = unresolved
- R = resolved
- """
-
- all, mark, unmark, show = [opts.get(o) for o in 'all mark unmark list'.split()]
-
- if (show and (mark or unmark)) or (mark and unmark):
- raise util.Abort(_("too many options specified"))
- if pats and all:
- raise util.Abort(_("can't specify --all and patterns"))
- if not (all or pats or show or mark or unmark):
- raise util.Abort(_('no files or directories specified; '
- 'use --all to remerge all files'))
-
- ms = merge_.mergestate(repo)
- m = cmdutil.match(repo, pats, opts)
-
- for f in ms:
- if m(f):
- if show:
- ui.write("%s %s\n" % (ms[f].upper(), f))
- elif mark:
- ms.mark(f, "r")
- elif unmark:
- ms.mark(f, "u")
- else:
- wctx = repo[None]
- mctx = wctx.parents()[-1]
-
- # backup pre-resolve (merge uses .orig for its own purposes)
- a = repo.wjoin(f)
- util.copyfile(a, a + ".resolve")
-
- # resolve file
- ms.resolve(f, wctx, mctx)
-
- # replace filemerge's .orig file with our resolve file
- util.rename(a + ".resolve", a + ".orig")
-
-def revert(ui, repo, *pats, **opts):
- """restore individual files or directories to an earlier state
-
- (Use update -r to check out earlier revisions, revert does not
- change the working directory parents.)
-
- With no revision specified, revert the named files or directories
- to the contents they had in the parent of the working directory.
- This restores the contents of the affected files to an unmodified
- state and unschedules adds, removes, copies, and renames. If the
- working directory has two parents, you must explicitly specify the
- revision to revert to.
-
- Using the -r/--rev option, revert the given files or directories
- to their contents as of a specific revision. This can be helpful
- to "roll back" some or all of an earlier change. See 'hg help
- dates' for a list of formats valid for -d/--date.
-
- Revert modifies the working directory. It does not commit any
- changes, or change the parent of the working directory. If you
- revert to a revision other than the parent of the working
- directory, the reverted files will thus appear modified
- afterwards.
-
- If a file has been deleted, it is restored. If the executable mode
- of a file was changed, it is reset.
-
- If names are given, all files matching the names are reverted.
- If no arguments are given, no files are reverted.
-
- Modified files are saved with a .orig suffix before reverting.
- To disable these backups, use --no-backup.
- """
-
- if opts["date"]:
- if opts["rev"]:
- raise util.Abort(_("you can't specify a revision and a date"))
- opts["rev"] = cmdutil.finddate(ui, repo, opts["date"])
-
- if not pats and not opts.get('all'):
- raise util.Abort(_('no files or directories specified; '
- 'use --all to revert the whole repo'))
-
- parent, p2 = repo.dirstate.parents()
- if not opts.get('rev') and p2 != nullid:
- raise util.Abort(_('uncommitted merge - please provide a '
- 'specific revision'))
- ctx = repo[opts.get('rev')]
- node = ctx.node()
- mf = ctx.manifest()
- if node == parent:
- pmf = mf
- else:
- pmf = None
-
- # need all matching names in dirstate and manifest of target rev,
- # so have to walk both. do not print errors if files exist in one
- # but not other.
-
- names = {}
-
- wlock = repo.wlock()
- try:
- # walk dirstate.
-
- m = cmdutil.match(repo, pats, opts)
- m.bad = lambda x,y: False
- for abs in repo.walk(m):
- names[abs] = m.rel(abs), m.exact(abs)
-
- # walk target manifest.
-
- def badfn(path, msg):
- if path in names:
- return
- path_ = path + '/'
- for f in names:
- if f.startswith(path_):
- return
- ui.warn("%s: %s\n" % (m.rel(path), msg))
-
- m = cmdutil.match(repo, pats, opts)
- m.bad = badfn
- for abs in repo[node].walk(m):
- if abs not in names:
- names[abs] = m.rel(abs), m.exact(abs)
-
- m = cmdutil.matchfiles(repo, names)
- changes = repo.status(match=m)[:4]
- modified, added, removed, deleted = map(set, changes)
-
- # if f is a rename, also revert the source
- cwd = repo.getcwd()
- for f in added:
- src = repo.dirstate.copied(f)
- if src and src not in names and repo.dirstate[src] == 'r':
- removed.add(src)
- names[src] = (repo.pathto(src, cwd), True)
-
- def removeforget(abs):
- if repo.dirstate[abs] == 'a':
- return _('forgetting %s\n')
- return _('removing %s\n')
-
- revert = ([], _('reverting %s\n'))
- add = ([], _('adding %s\n'))
- remove = ([], removeforget)
- undelete = ([], _('undeleting %s\n'))
-
- disptable = (
- # dispatch table:
- # file state
- # action if in target manifest
- # action if not in target manifest
- # make backup if in target manifest
- # make backup if not in target manifest
- (modified, revert, remove, True, True),
- (added, revert, remove, True, False),
- (removed, undelete, None, False, False),
- (deleted, revert, remove, False, False),
- )
-
- for abs, (rel, exact) in sorted(names.items()):
- mfentry = mf.get(abs)
- target = repo.wjoin(abs)
- def handle(xlist, dobackup):
- xlist[0].append(abs)
- if dobackup and not opts.get('no_backup') and util.lexists(target):
- bakname = "%s.orig" % rel
- ui.note(_('saving current version of %s as %s\n') %
- (rel, bakname))
- if not opts.get('dry_run'):
- util.copyfile(target, bakname)
- if ui.verbose or not exact:
- msg = xlist[1]
- if not isinstance(msg, basestring):
- msg = msg(abs)
- ui.status(msg % rel)
- for table, hitlist, misslist, backuphit, backupmiss in disptable:
- if abs not in table: continue
- # file has changed in dirstate
- if mfentry:
- handle(hitlist, backuphit)
- elif misslist is not None:
- handle(misslist, backupmiss)
- break
- else:
- if abs not in repo.dirstate:
- if mfentry:
- handle(add, True)
- elif exact:
- ui.warn(_('file not managed: %s\n') % rel)
- continue
- # file has not changed in dirstate
- if node == parent:
- if exact: ui.warn(_('no changes needed to %s\n') % rel)
- continue
- if pmf is None:
- # only need parent manifest in this unlikely case,
- # so do not read by default
- pmf = repo[parent].manifest()
- if abs in pmf:
- if mfentry:
- # if version of file is same in parent and target
- # manifests, do nothing
- if (pmf[abs] != mfentry or
- pmf.flags(abs) != mf.flags(abs)):
- handle(revert, False)
- else:
- handle(remove, False)
-
- if not opts.get('dry_run'):
- def checkout(f):
- fc = ctx[f]
- repo.wwrite(f, fc.data(), fc.flags())
-
- audit_path = util.path_auditor(repo.root)
- for f in remove[0]:
- if repo.dirstate[f] == 'a':
- repo.dirstate.forget(f)
- continue
- audit_path(f)
- try:
- util.unlink(repo.wjoin(f))
- except OSError:
- pass
- repo.dirstate.remove(f)
-
- normal = None
- if node == parent:
- # We're reverting to our parent. If possible, we'd like status
- # to report the file as clean. We have to use normallookup for
- # merges to avoid losing information about merged/dirty files.
- if p2 != nullid:
- normal = repo.dirstate.normallookup
- else:
- normal = repo.dirstate.normal
- for f in revert[0]:
- checkout(f)
- if normal:
- normal(f)
-
- for f in add[0]:
- checkout(f)
- repo.dirstate.add(f)
-
- normal = repo.dirstate.normallookup
- if node == parent and p2 == nullid:
- normal = repo.dirstate.normal
- for f in undelete[0]:
- checkout(f)
- normal(f)
-
- finally:
- wlock.release()
-
-def rollback(ui, repo):
- """roll back the last transaction
-
- This command should be used with care. There is only one level of
- rollback, and there is no way to undo a rollback. It will also
- restore the dirstate at the time of the last transaction, losing
- any dirstate changes since that time. This command does not alter
- the working directory.
-
- Transactions are used to encapsulate the effects of all commands
- that create new changesets or propagate existing changesets into a
- repository. For example, the following commands are transactional,
- and their effects can be rolled back::
-
- commit
- import
- pull
- push (with this repository as destination)
- unbundle
-
- This command is not intended for use on public repositories. Once
- changes are visible for pull by other users, rolling a transaction
- back locally is ineffective (someone else may already have pulled
- the changes). Furthermore, a race is possible with readers of the
- repository; for example an in-progress pull from the repository
- may fail if a rollback is performed.
- """
- repo.rollback()
-
-def root(ui, repo):
- """print the root (top) of the current working directory
-
- Print the root directory of the current repository.
- """
- ui.write(repo.root + "\n")
-
-def serve(ui, repo, **opts):
- """export the repository via HTTP
-
- Start a local HTTP repository browser and pull server.
-
- By default, the server logs accesses to stdout and errors to
- stderr. Use the -A/--accesslog and -E/--errorlog options to log to
- files.
- """
-
- if opts["stdio"]:
- if repo is None:
- raise error.RepoError(_("There is no Mercurial repository here"
- " (.hg not found)"))
- s = sshserver.sshserver(ui, repo)
- s.serve_forever()
-
- baseui = repo and repo.baseui or ui
- optlist = ("name templates style address port prefix ipv6"
- " accesslog errorlog webdir_conf certificate encoding")
- for o in optlist.split():
- if opts.get(o, None):
- baseui.setconfig("web", o, str(opts[o]))
- if (repo is not None) and (repo.ui != baseui):
- repo.ui.setconfig("web", o, str(opts[o]))
-
- if repo is None and not ui.config("web", "webdir_conf"):
- raise error.RepoError(_("There is no Mercurial repository here"
- " (.hg not found)"))
-
- class service(object):
- def init(self):
- util.set_signal_handler()
- self.httpd = server.create_server(baseui, repo)
-
- if not ui.verbose: return
-
- if self.httpd.prefix:
- prefix = self.httpd.prefix.strip('/') + '/'
- else:
- prefix = ''
-
- port = ':%d' % self.httpd.port
- if port == ':80':
- port = ''
-
- bindaddr = self.httpd.addr
- if bindaddr == '0.0.0.0':
- bindaddr = '*'
- elif ':' in bindaddr: # IPv6
- bindaddr = '[%s]' % bindaddr
-
- fqaddr = self.httpd.fqaddr
- if ':' in fqaddr:
- fqaddr = '[%s]' % fqaddr
- ui.status(_('listening at http://%s%s/%s (bound to %s:%d)\n') %
- (fqaddr, port, prefix, bindaddr, self.httpd.port))
-
- def run(self):
- self.httpd.serve_forever()
-
- service = service()
-
- cmdutil.service(opts, initfn=service.init, runfn=service.run)
-
-def status(ui, repo, *pats, **opts):
- """show changed files in the working directory
-
- Show status of files in the repository. If names are given, only
- files that match are shown. Files that are clean or ignored or
- the source of a copy/move operation, are not listed unless
- -c/--clean, -i/--ignored, -C/--copies or -A/--all are given.
- Unless options described with "show only ..." are given, the
- options -mardu are used.
-
- Option -q/--quiet hides untracked (unknown and ignored) files
- unless explicitly requested with -u/--unknown or -i/--ignored.
-
- NOTE: status may appear to disagree with diff if permissions have
- changed or a merge has occurred. The standard diff format does not
- report permission changes and diff only reports changes relative
- to one merge parent.
-
- If one revision is given, it is used as the base revision.
- If two revisions are given, the differences between them are
- shown.
-
- The codes used to show the status of files are::
-
- M = modified
- A = added
- R = removed
- C = clean
- ! = missing (deleted by non-hg command, but still tracked)
- ? = not tracked
- I = ignored
- = origin of the previous file listed as A (added)
- """
-
- node1, node2 = cmdutil.revpair(repo, opts.get('rev'))
- cwd = (pats and repo.getcwd()) or ''
- end = opts.get('print0') and '\0' or '\n'
- copy = {}
- states = 'modified added removed deleted unknown ignored clean'.split()
- show = [k for k in states if opts.get(k)]
- if opts.get('all'):
- show += ui.quiet and (states[:4] + ['clean']) or states
- if not show:
- show = ui.quiet and states[:4] or states[:5]
-
- stat = repo.status(node1, node2, cmdutil.match(repo, pats, opts),
- 'ignored' in show, 'clean' in show, 'unknown' in show)
- changestates = zip(states, 'MAR!?IC', stat)
-
- if (opts.get('all') or opts.get('copies')) and not opts.get('no_status'):
- ctxn = repo[nullid]
- ctx1 = repo[node1]
- ctx2 = repo[node2]
- added = stat[1]
- if node2 is None:
- added = stat[0] + stat[1] # merged?
-
- for k, v in copies.copies(repo, ctx1, ctx2, ctxn)[0].iteritems():
- if k in added:
- copy[k] = v
- elif v in added:
- copy[v] = k
-
- for state, char, files in changestates:
- if state in show:
- format = "%s %%s%s" % (char, end)
- if opts.get('no_status'):
- format = "%%s%s" % end
-
- for f in files:
- ui.write(format % repo.pathto(f, cwd))
- if f in copy:
- ui.write(' %s%s' % (repo.pathto(copy[f], cwd), end))
-
-def tag(ui, repo, name1, *names, **opts):
- """add one or more tags for the current or given revision
-
- Name a particular revision using <name>.
-
- Tags are used to name particular revisions of the repository and are
- very useful to compare different revisions, to go back to significant
- earlier versions or to mark branch points as releases, etc.
-
- If no revision is given, the parent of the working directory is
- used, or tip if no revision is checked out.
-
- To facilitate version control, distribution, and merging of tags,
- they are stored as a file named ".hgtags" which is managed
- similarly to other project files and can be hand-edited if
- necessary. The file '.hg/localtags' is used for local tags (not
- shared among repositories).
-
- See 'hg help dates' for a list of formats valid for -d/--date.
- """
-
- rev_ = "."
- names = (name1,) + names
- if len(names) != len(set(names)):
- raise util.Abort(_('tag names must be unique'))
- for n in names:
- if n in ['tip', '.', 'null']:
- raise util.Abort(_('the name \'%s\' is reserved') % n)
- if opts.get('rev') and opts.get('remove'):
- raise util.Abort(_("--rev and --remove are incompatible"))
- if opts.get('rev'):
- rev_ = opts['rev']
- message = opts.get('message')
- if opts.get('remove'):
- expectedtype = opts.get('local') and 'local' or 'global'
- for n in names:
- if not repo.tagtype(n):
- raise util.Abort(_('tag \'%s\' does not exist') % n)
- if repo.tagtype(n) != expectedtype:
- if expectedtype == 'global':
- raise util.Abort(_('tag \'%s\' is not a global tag') % n)
- else:
- raise util.Abort(_('tag \'%s\' is not a local tag') % n)
- rev_ = nullid
- if not message:
- # we don't translate commit messages
- message = 'Removed tag %s' % ', '.join(names)
- elif not opts.get('force'):
- for n in names:
- if n in repo.tags():
- raise util.Abort(_('tag \'%s\' already exists '
- '(use -f to force)') % n)
- if not rev_ and repo.dirstate.parents()[1] != nullid:
- raise util.Abort(_('uncommitted merge - please provide a '
- 'specific revision'))
- r = repo[rev_].node()
-
- if not message:
- # we don't translate commit messages
- message = ('Added tag %s for changeset %s' %
- (', '.join(names), short(r)))
-
- date = opts.get('date')
- if date:
- date = util.parsedate(date)
-
- repo.tag(names, r, message, opts.get('local'), opts.get('user'), date)
-
-def tags(ui, repo):
- """list repository tags
-
- This lists both regular and local tags. When the -v/--verbose
- switch is used, a third column "local" is printed for local tags.
- """
-
- hexfunc = ui.debugflag and hex or short
- tagtype = ""
-
- for t, n in reversed(repo.tagslist()):
- if ui.quiet:
- ui.write("%s\n" % t)
- continue
-
- try:
- hn = hexfunc(n)
- r = "%5d:%s" % (repo.changelog.rev(n), hn)
- except error.LookupError:
- r = " ?:%s" % hn
- else:
- spaces = " " * (30 - encoding.colwidth(t))
- if ui.verbose:
- if repo.tagtype(t) == 'local':
- tagtype = " local"
- else:
- tagtype = ""
- ui.write("%s%s %s%s\n" % (t, spaces, r, tagtype))
-
-def tip(ui, repo, **opts):
- """show the tip revision
-
- The tip revision (usually just called the tip) is the changeset
- most recently added to the repository (and therefore the most
- recently changed head).
-
- If you have just made a commit, that commit will be the tip. If
- you have just pulled changes from another repository, the tip of
- that repository becomes the current tip. The "tip" tag is special
- and cannot be renamed or assigned to a different changeset.
- """
- cmdutil.show_changeset(ui, repo, opts).show(repo[len(repo) - 1])
-
-def unbundle(ui, repo, fname1, *fnames, **opts):
- """apply one or more changegroup files
-
- Apply one or more compressed changegroup files generated by the
- bundle command.
- """
- fnames = (fname1,) + fnames
-
- lock = repo.lock()
- try:
- for fname in fnames:
- f = url.open(ui, fname)
- gen = changegroup.readbundle(f, fname)
- modheads = repo.addchangegroup(gen, 'unbundle', 'bundle:' + fname)
- finally:
- lock.release()
-
- return postincoming(ui, repo, modheads, opts.get('update'), None)
-
-def update(ui, repo, node=None, rev=None, clean=False, date=None, check=False):
- """update working directory
-
- Update the repository's working directory to the specified
- revision, or the tip of the current branch if none is specified.
- Use null as the revision to remove the working copy (like 'hg
- clone -U').
-
- When the working directory contains no uncommitted changes, it
- will be replaced by the state of the requested revision from the
- repository. When the requested revision is on a different branch,
- the working directory will additionally be switched to that
- branch.
-
- When there are uncommitted changes, use option -C/--clean to
- discard them, forcibly replacing the state of the working
- directory with the requested revision. Alternately, use -c/--check
- to abort.
-
- When there are uncommitted changes and option -C/--clean is not
- used, and the parent revision and requested revision are on the
- same branch, and one of them is an ancestor of the other, then the
- new working directory will contain the requested revision merged
- with the uncommitted changes. Otherwise, the update will fail with
- a suggestion to use 'merge' or 'update -C' instead.
-
- If you want to update just one file to an older revision, use
- revert.
-
- See 'hg help dates' for a list of formats valid for -d/--date.
- """
- if rev and node:
- raise util.Abort(_("please specify just one revision"))
-
- if not rev:
- rev = node
-
- if not clean and check:
- # we could use dirty() but we can ignore merge and branch trivia
- c = repo[None]
- if c.modified() or c.added() or c.removed():
- raise util.Abort(_("uncommitted local changes"))
-
- if date:
- if rev:
- raise util.Abort(_("you can't specify a revision and a date"))
- rev = cmdutil.finddate(ui, repo, date)
-
- if clean or check:
- return hg.clean(repo, rev)
- else:
- return hg.update(repo, rev)
-
-def verify(ui, repo):
- """verify the integrity of the repository
-
- Verify the integrity of the current repository.
-
- This will perform an extensive check of the repository's
- integrity, validating the hashes and checksums of each entry in
- the changelog, manifest, and tracked files, as well as the
- integrity of their crosslinks and indices.
- """
- return hg.verify(repo)
-
-def version_(ui):
- """output version and copyright information"""
- ui.write(_("Mercurial Distributed SCM (version %s)\n")
- % util.version())
- ui.status(_(
- "\nCopyright (C) 2005-2009 Matt Mackall <mpm@selenic.com> and others\n"
- "This is free software; see the source for copying conditions. "
- "There is NO\nwarranty; "
- "not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n"
- ))
-
-# Command options and aliases are listed here, alphabetically
-
-globalopts = [
- ('R', 'repository', '',
- _('repository root directory or symbolic path name')),
- ('', 'cwd', '', _('change working directory')),
- ('y', 'noninteractive', None,
- _('do not prompt, assume \'yes\' for any required answers')),
- ('q', 'quiet', None, _('suppress output')),
- ('v', 'verbose', None, _('enable additional output')),
- ('', 'config', [], _('set/override config option')),
- ('', 'debug', None, _('enable debugging output')),
- ('', 'debugger', None, _('start debugger')),
- ('', 'encoding', encoding.encoding, _('set the charset encoding')),
- ('', 'encodingmode', encoding.encodingmode,
- _('set the charset encoding mode')),
- ('', 'traceback', None, _('print traceback on exception')),
- ('', 'time', None, _('time how long the command takes')),
- ('', 'profile', None, _('print command execution profile')),
- ('', 'version', None, _('output version information and exit')),
- ('h', 'help', None, _('display help and exit')),
-]
-
-dryrunopts = [('n', 'dry-run', None,
- _('do not perform actions, just print output'))]
-
-remoteopts = [
- ('e', 'ssh', '', _('specify ssh command to use')),
- ('', 'remotecmd', '', _('specify hg command to run on the remote side')),
-]
-
-walkopts = [
- ('I', 'include', [], _('include names matching the given patterns')),
- ('X', 'exclude', [], _('exclude names matching the given patterns')),
-]
-
-commitopts = [
- ('m', 'message', '', _('use <text> as commit message')),
- ('l', 'logfile', '', _('read commit message from <file>')),
-]
-
-commitopts2 = [
- ('d', 'date', '', _('record datecode as commit date')),
- ('u', 'user', '', _('record the specified user as committer')),
-]
-
-templateopts = [
- ('', 'style', '', _('display using template map file')),
- ('', 'template', '', _('display with template')),
-]
-
-logopts = [
- ('p', 'patch', None, _('show patch')),
- ('g', 'git', None, _('use git extended diff format')),
- ('l', 'limit', '', _('limit number of changes displayed')),
- ('M', 'no-merges', None, _('do not show merges')),
-] + templateopts
-
-diffopts = [
- ('a', 'text', None, _('treat all files as text')),
- ('g', 'git', None, _('use git extended diff format')),
- ('', 'nodates', None, _("don't include dates in diff headers"))
-]
-
-diffopts2 = [
- ('p', 'show-function', None, _('show which function each change is in')),
- ('w', 'ignore-all-space', None,
- _('ignore white space when comparing lines')),
- ('b', 'ignore-space-change', None,
- _('ignore changes in the amount of white space')),
- ('B', 'ignore-blank-lines', None,
- _('ignore changes whose lines are all blank')),
- ('U', 'unified', '', _('number of lines of context to show'))
-]
-
-similarityopts = [
- ('s', 'similarity', '',
- _('guess renamed files by similarity (0<=s<=100)'))
-]
-
-table = {
- "^add": (add, walkopts + dryrunopts, _('[OPTION]... [FILE]...')),
- "addremove":
- (addremove, similarityopts + walkopts + dryrunopts,
- _('[OPTION]... [FILE]...')),
- "^annotate|blame":
- (annotate,
- [('r', 'rev', '', _('annotate the specified revision')),
- ('f', 'follow', None, _('follow file copies and renames')),
- ('a', 'text', None, _('treat all files as text')),
- ('u', 'user', None, _('list the author (long with -v)')),
- ('d', 'date', None, _('list the date (short with -q)')),
- ('n', 'number', None, _('list the revision number (default)')),
- ('c', 'changeset', None, _('list the changeset')),
- ('l', 'line-number', None,
- _('show line number at the first appearance'))
- ] + walkopts,
- _('[-r REV] [-f] [-a] [-u] [-d] [-n] [-c] [-l] FILE...')),
- "archive":
- (archive,
- [('', 'no-decode', None, _('do not pass files through decoders')),
- ('p', 'prefix', '', _('directory prefix for files in archive')),
- ('r', 'rev', '', _('revision to distribute')),
- ('t', 'type', '', _('type of distribution to create')),
- ] + walkopts,
- _('[OPTION]... DEST')),
- "backout":
- (backout,
- [('', 'merge', None,
- _('merge with old dirstate parent after backout')),
- ('', 'parent', '', _('parent to choose when backing out merge')),
- ('r', 'rev', '', _('revision to backout')),
- ] + walkopts + commitopts + commitopts2,
- _('[OPTION]... [-r] REV')),
- "bisect":
- (bisect,
- [('r', 'reset', False, _('reset bisect state')),
- ('g', 'good', False, _('mark changeset good')),
- ('b', 'bad', False, _('mark changeset bad')),
- ('s', 'skip', False, _('skip testing changeset')),
- ('c', 'command', '', _('use command to check changeset state')),
- ('U', 'noupdate', False, _('do not update to target'))],
- _("[-gbsr] [-c CMD] [REV]")),
- "branch":
- (branch,
- [('f', 'force', None,
- _('set branch name even if it shadows an existing branch')),
- ('C', 'clean', None, _('reset branch name to parent branch name'))],
- _('[-fC] [NAME]')),
- "branches":
- (branches,
- [('a', 'active', False,
- _('show only branches that have unmerged heads')),
- ('c', 'closed', False,
- _('show normal and closed branches'))],
- _('[-a]')),
- "bundle":
- (bundle,
- [('f', 'force', None,
- _('run even when remote repository is unrelated')),
- ('r', 'rev', [],
- _('a changeset up to which you would like to bundle')),
- ('', 'base', [],
- _('a base changeset to specify instead of a destination')),
- ('a', 'all', None, _('bundle all changesets in the repository')),
- ('t', 'type', 'bzip2', _('bundle compression type to use')),
- ] + remoteopts,
- _('[-f] [-a] [-r REV]... [--base REV]... FILE [DEST]')),
- "cat":
- (cat,
- [('o', 'output', '', _('print output to file with formatted name')),
- ('r', 'rev', '', _('print the given revision')),
- ('', 'decode', None, _('apply any matching decode filter')),
- ] + walkopts,
- _('[OPTION]... FILE...')),
- "^clone":
- (clone,
- [('U', 'noupdate', None,
- _('the clone will only contain a repository (no working copy)')),
- ('r', 'rev', [],
- _('a changeset you would like to have after cloning')),
- ('', 'pull', None, _('use pull protocol to copy metadata')),
- ('', 'uncompressed', None,
- _('use uncompressed transfer (fast over LAN)')),
- ] + remoteopts,
- _('[OPTION]... SOURCE [DEST]')),
- "^commit|ci":
- (commit,
- [('A', 'addremove', None,
- _('mark new/missing files as added/removed before committing')),
- ('', 'close-branch', None,
- _('mark a branch as closed, hiding it from the branch list')),
- ] + walkopts + commitopts + commitopts2,
- _('[OPTION]... [FILE]...')),
- "copy|cp":
- (copy,
- [('A', 'after', None, _('record a copy that has already occurred')),
- ('f', 'force', None,
- _('forcibly copy over an existing managed file')),
- ] + walkopts + dryrunopts,
- _('[OPTION]... [SOURCE]... DEST')),
- "debugancestor": (debugancestor, [], _('[INDEX] REV1 REV2')),
- "debugcheckstate": (debugcheckstate, []),
- "debugcommands": (debugcommands, [], _('[COMMAND]')),
- "debugcomplete":
- (debugcomplete,
- [('o', 'options', None, _('show the command options'))],
- _('[-o] CMD')),
- "debugdate":
- (debugdate,
- [('e', 'extended', None, _('try extended date formats'))],
- _('[-e] DATE [RANGE]')),
- "debugdata": (debugdata, [], _('FILE REV')),
- "debugfsinfo": (debugfsinfo, [], _('[PATH]')),
- "debugindex": (debugindex, [], _('FILE')),
- "debugindexdot": (debugindexdot, [], _('FILE')),
- "debuginstall": (debuginstall, []),
- "debugrebuildstate":
- (debugrebuildstate,
- [('r', 'rev', '', _('revision to rebuild to'))],
- _('[-r REV] [REV]')),
- "debugrename":
- (debugrename,
- [('r', 'rev', '', _('revision to debug'))],
- _('[-r REV] FILE')),
- "debugsetparents":
- (debugsetparents, [], _('REV1 [REV2]')),
- "debugstate":
- (debugstate,
- [('', 'nodates', None, _('do not display the saved mtime'))],
- _('[OPTION]...')),
- "debugsub":
- (debugsub,
- [('r', 'rev', '', _('revision to check'))],
- _('[-r REV] [REV]')),
- "debugwalk": (debugwalk, walkopts, _('[OPTION]... [FILE]...')),
- "^diff":
- (diff,
- [('r', 'rev', [], _('revision')),
- ('c', 'change', '', _('change made by revision'))
- ] + diffopts + diffopts2 + walkopts,
- _('[OPTION]... [-r REV1 [-r REV2]] [FILE]...')),
- "^export":
- (export,
- [('o', 'output', '', _('print output to file with formatted name')),
- ('', 'switch-parent', None, _('diff against the second parent'))
- ] + diffopts,
- _('[OPTION]... [-o OUTFILESPEC] REV...')),
- "^forget":
- (forget,
- [] + walkopts,
- _('[OPTION]... FILE...')),
- "grep":
- (grep,
- [('0', 'print0', None, _('end fields with NUL')),
- ('', 'all', None, _('print all revisions that match')),
- ('f', 'follow', None,
- _('follow changeset history, or file history across copies and renames')),
- ('i', 'ignore-case', None, _('ignore case when matching')),
- ('l', 'files-with-matches', None,
- _('print only filenames and revisions that match')),
- ('n', 'line-number', None, _('print matching line numbers')),
- ('r', 'rev', [], _('search in given revision range')),
- ('u', 'user', None, _('list the author (long with -v)')),
- ('d', 'date', None, _('list the date (short with -q)')),
- ] + walkopts,
- _('[OPTION]... PATTERN [FILE]...')),
- "heads":
- (heads,
- [('r', 'rev', '', _('show only heads which are descendants of REV')),
- ('a', 'active', False,
- _('show only the active branch heads from open branches')),
- ('c', 'closed', False,
- _('show normal and closed branch heads')),
- ] + templateopts,
- _('[-r STARTREV] [REV]...')),
- "help": (help_, [], _('[TOPIC]')),
- "identify|id":
- (identify,
- [('r', 'rev', '', _('identify the specified revision')),
- ('n', 'num', None, _('show local revision number')),
- ('i', 'id', None, _('show global revision id')),
- ('b', 'branch', None, _('show branch')),
- ('t', 'tags', None, _('show tags'))],
- _('[-nibt] [-r REV] [SOURCE]')),
- "import|patch":
- (import_,
- [('p', 'strip', 1,
- _('directory strip option for patch. This has the same '
- 'meaning as the corresponding patch option')),
- ('b', 'base', '', _('base path')),
- ('f', 'force', None,
- _('skip check for outstanding uncommitted changes')),
- ('', 'no-commit', None, _("don't commit, just update the working directory")),
- ('', 'exact', None,
- _('apply patch to the nodes from which it was generated')),
- ('', 'import-branch', None,
- _('use any branch information in patch (implied by --exact)'))] +
- commitopts + commitopts2 + similarityopts,
- _('[OPTION]... PATCH...')),
- "incoming|in":
- (incoming,
- [('f', 'force', None,
- _('run even when remote repository is unrelated')),
- ('n', 'newest-first', None, _('show newest record first')),
- ('', 'bundle', '', _('file to store the bundles into')),
- ('r', 'rev', [],
- _('a specific revision up to which you would like to pull')),
- ] + logopts + remoteopts,
- _('[-p] [-n] [-M] [-f] [-r REV]...'
- ' [--bundle FILENAME] [SOURCE]')),
- "^init":
- (init,
- remoteopts,
- _('[-e CMD] [--remotecmd CMD] [DEST]')),
- "locate":
- (locate,
- [('r', 'rev', '', _('search the repository as it stood at REV')),
- ('0', 'print0', None,
- _('end filenames with NUL, for use with xargs')),
- ('f', 'fullpath', None,
- _('print complete paths from the filesystem root')),
- ] + walkopts,
- _('[OPTION]... [PATTERN]...')),
- "^log|history":
- (log,
- [('f', 'follow', None,
- _('follow changeset history, or file history across copies and renames')),
- ('', 'follow-first', None,
- _('only follow the first parent of merge changesets')),
- ('d', 'date', '', _('show revisions matching date spec')),
- ('C', 'copies', None, _('show copied files')),
- ('k', 'keyword', [], _('do case-insensitive search for a keyword')),
- ('r', 'rev', [], _('show the specified revision or range')),
- ('', 'removed', None, _('include revisions where files were removed')),
- ('m', 'only-merges', None, _('show only merges')),
- ('u', 'user', [], _('revisions committed by user')),
- ('b', 'only-branch', [],
- _('show only changesets within the given named branch')),
- ('P', 'prune', [], _('do not display revision or any of its ancestors')),
- ] + logopts + walkopts,
- _('[OPTION]... [FILE]')),
- "manifest":
- (manifest,
- [('r', 'rev', '', _('revision to display'))],
- _('[-r REV]')),
- "^merge":
- (merge,
- [('f', 'force', None, _('force a merge with outstanding changes')),
- ('r', 'rev', '', _('revision to merge')),
- ('P', 'preview', None,
- _('review revisions to merge (no merge is performed)'))],
- _('[-f] [[-r] REV]')),
- "outgoing|out":
- (outgoing,
- [('f', 'force', None,
- _('run even when remote repository is unrelated')),
- ('r', 'rev', [],
- _('a specific revision up to which you would like to push')),
- ('n', 'newest-first', None, _('show newest record first')),
- ] + logopts + remoteopts,
- _('[-M] [-p] [-n] [-f] [-r REV]... [DEST]')),
- "^parents":
- (parents,
- [('r', 'rev', '', _('show parents from the specified revision')),
- ] + templateopts,
- _('[-r REV] [FILE]')),
- "paths": (paths, [], _('[NAME]')),
- "^pull":
- (pull,
- [('u', 'update', None,
- _('update to new tip if changesets were pulled')),
- ('f', 'force', None,
- _('run even when remote repository is unrelated')),
- ('r', 'rev', [],
- _('a specific revision up to which you would like to pull')),
- ] + remoteopts,
- _('[-u] [-f] [-r REV]... [-e CMD] [--remotecmd CMD] [SOURCE]')),
- "^push":
- (push,
- [('f', 'force', None, _('force push')),
- ('r', 'rev', [],
- _('a specific revision up to which you would like to push')),
- ] + remoteopts,
- _('[-f] [-r REV]... [-e CMD] [--remotecmd CMD] [DEST]')),
- "recover": (recover, []),
- "^remove|rm":
- (remove,
- [('A', 'after', None, _('record delete for missing files')),
- ('f', 'force', None,
- _('remove (and delete) file even if added or modified')),
- ] + walkopts,
- _('[OPTION]... FILE...')),
- "rename|mv":
- (rename,
- [('A', 'after', None, _('record a rename that has already occurred')),
- ('f', 'force', None,
- _('forcibly copy over an existing managed file')),
- ] + walkopts + dryrunopts,
- _('[OPTION]... SOURCE... DEST')),
- "resolve":
- (resolve,
- [('a', 'all', None, _('remerge all unresolved files')),
- ('l', 'list', None, _('list state of files needing merge')),
- ('m', 'mark', None, _('mark files as resolved')),
- ('u', 'unmark', None, _('unmark files as resolved'))]
- + walkopts,
- _('[OPTION]... [FILE]...')),
- "revert":
- (revert,
- [('a', 'all', None, _('revert all changes when no arguments given')),
- ('d', 'date', '', _('tipmost revision matching date')),
- ('r', 'rev', '', _('revision to revert to')),
- ('', 'no-backup', None, _('do not save backup copies of files')),
- ] + walkopts + dryrunopts,
- _('[OPTION]... [-r REV] [NAME]...')),
- "rollback": (rollback, []),
- "root": (root, []),
- "^serve":
- (serve,
- [('A', 'accesslog', '', _('name of access log file to write to')),
- ('d', 'daemon', None, _('run server in background')),
- ('', 'daemon-pipefds', '', _('used internally by daemon mode')),
- ('E', 'errorlog', '', _('name of error log file to write to')),
- ('p', 'port', 0, _('port to listen on (default: 8000)')),
- ('a', 'address', '', _('address to listen on (default: all interfaces)')),
- ('', 'prefix', '', _('prefix path to serve from (default: server root)')),
- ('n', 'name', '',
- _('name to show in web pages (default: working directory)')),
- ('', 'webdir-conf', '', _('name of the webdir config file'
- ' (serve more than one repository)')),
- ('', 'pid-file', '', _('name of file to write process ID to')),
- ('', 'stdio', None, _('for remote clients')),
- ('t', 'templates', '', _('web templates to use')),
- ('', 'style', '', _('template style to use')),
- ('6', 'ipv6', None, _('use IPv6 in addition to IPv4')),
- ('', 'certificate', '', _('SSL certificate file'))],
- _('[OPTION]...')),
- "showconfig|debugconfig":
- (showconfig,
- [('u', 'untrusted', None, _('show untrusted configuration options'))],
- _('[-u] [NAME]...')),
- "^status|st":
- (status,
- [('A', 'all', None, _('show status of all files')),
- ('m', 'modified', None, _('show only modified files')),
- ('a', 'added', None, _('show only added files')),
- ('r', 'removed', None, _('show only removed files')),
- ('d', 'deleted', None, _('show only deleted (but tracked) files')),
- ('c', 'clean', None, _('show only files without changes')),
- ('u', 'unknown', None, _('show only unknown (not tracked) files')),
- ('i', 'ignored', None, _('show only ignored files')),
- ('n', 'no-status', None, _('hide status prefix')),
- ('C', 'copies', None, _('show source of copied files')),
- ('0', 'print0', None,
- _('end filenames with NUL, for use with xargs')),
- ('', 'rev', [], _('show difference from revision')),
- ] + walkopts,
- _('[OPTION]... [FILE]...')),
- "tag":
- (tag,
- [('f', 'force', None, _('replace existing tag')),
- ('l', 'local', None, _('make the tag local')),
- ('r', 'rev', '', _('revision to tag')),
- ('', 'remove', None, _('remove a tag')),
- # -l/--local is already there, commitopts cannot be used
- ('m', 'message', '', _('use <text> as commit message')),
- ] + commitopts2,
- _('[-l] [-m TEXT] [-d DATE] [-u USER] [-r REV] NAME...')),
- "tags": (tags, []),
- "tip":
- (tip,
- [('p', 'patch', None, _('show patch')),
- ('g', 'git', None, _('use git extended diff format')),
- ] + templateopts,
- _('[-p]')),
- "unbundle":
- (unbundle,
- [('u', 'update', None,
- _('update to new tip if changesets were unbundled'))],
- _('[-u] FILE...')),
- "^update|up|checkout|co":
- (update,
- [('C', 'clean', None, _('overwrite locally modified files (no backup)')),
- ('c', 'check', None, _('check for uncommitted changes')),
- ('d', 'date', '', _('tipmost revision matching date')),
- ('r', 'rev', '', _('revision'))],
- _('[-C] [-d DATE] [[-r] REV]')),
- "verify": (verify, []),
- "version": (version_, []),
-}
-
-norepo = ("clone init version help debugcommands debugcomplete debugdata"
- " debugindex debugindexdot debugdate debuginstall debugfsinfo")
-optionalrepo = ("identify paths serve showconfig debugancestor")
diff --git a/sys/lib/python/mercurial/config.py b/sys/lib/python/mercurial/config.py
deleted file mode 100644
index 08a8c071b..000000000
--- a/sys/lib/python/mercurial/config.py
+++ /dev/null
@@ -1,137 +0,0 @@
-# config.py - configuration parsing for Mercurial
-#
-# Copyright 2009 Matt Mackall <mpm@selenic.com> and others
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-from i18n import _
-import error
-import re, os
-
-class sortdict(dict):
- 'a simple sorted dictionary'
- def __init__(self, data=None):
- self._list = []
- if data:
- self.update(data)
- def copy(self):
- return sortdict(self)
- def __setitem__(self, key, val):
- if key in self:
- self._list.remove(key)
- self._list.append(key)
- dict.__setitem__(self, key, val)
- def __iter__(self):
- return self._list.__iter__()
- def update(self, src):
- for k in src:
- self[k] = src[k]
- def items(self):
- return [(k, self[k]) for k in self._list]
- def __delitem__(self, key):
- dict.__delitem__(self, key)
- self._list.remove(key)
-
-class config(object):
- def __init__(self, data=None):
- self._data = {}
- self._source = {}
- if data:
- for k in data._data:
- self._data[k] = data[k].copy()
- self._source = data._source.copy()
- def copy(self):
- return config(self)
- def __contains__(self, section):
- return section in self._data
- def __getitem__(self, section):
- return self._data.get(section, {})
- def __iter__(self):
- for d in self.sections():
- yield d
- def update(self, src):
- for s in src:
- if s not in self:
- self._data[s] = sortdict()
- self._data[s].update(src._data[s])
- self._source.update(src._source)
- def get(self, section, item, default=None):
- return self._data.get(section, {}).get(item, default)
- def source(self, section, item):
- return self._source.get((section, item), "")
- def sections(self):
- return sorted(self._data.keys())
- def items(self, section):
- return self._data.get(section, {}).items()
- def set(self, section, item, value, source=""):
- if section not in self:
- self._data[section] = sortdict()
- self._data[section][item] = value
- self._source[(section, item)] = source
-
- def parse(self, src, data, sections=None, remap=None, include=None):
- sectionre = re.compile(r'\[([^\[]+)\]')
- itemre = re.compile(r'([^=\s][^=]*?)\s*=\s*(.*\S|)')
- contre = re.compile(r'\s+(\S.*\S)')
- emptyre = re.compile(r'(;|#|\s*$)')
- unsetre = re.compile(r'%unset\s+(\S+)')
- includere = re.compile(r'%include\s+(\S.*\S)')
- section = ""
- item = None
- line = 0
- cont = False
-
- for l in data.splitlines(True):
- line += 1
- if cont:
- m = contre.match(l)
- if m:
- if sections and section not in sections:
- continue
- v = self.get(section, item) + "\n" + m.group(1)
- self.set(section, item, v, "%s:%d" % (src, line))
- continue
- item = None
- m = includere.match(l)
- if m:
- inc = m.group(1)
- base = os.path.dirname(src)
- inc = os.path.normpath(os.path.join(base, inc))
- if include:
- include(inc, remap=remap, sections=sections)
- continue
- if emptyre.match(l):
- continue
- m = sectionre.match(l)
- if m:
- section = m.group(1)
- if remap:
- section = remap.get(section, section)
- if section not in self:
- self._data[section] = sortdict()
- continue
- m = itemre.match(l)
- if m:
- item = m.group(1)
- cont = True
- if sections and section not in sections:
- continue
- self.set(section, item, m.group(2), "%s:%d" % (src, line))
- continue
- m = unsetre.match(l)
- if m:
- name = m.group(1)
- if sections and section not in sections:
- continue
- if self.get(section, name) != None:
- del self._data[section][name]
- continue
-
- raise error.ConfigError(_("config error at %s:%d: '%s'")
- % (src, line, l.rstrip()))
-
- def read(self, path, fp=None, sections=None, remap=None):
- if not fp:
- fp = open(path)
- self.parse(path, fp.read(), sections, remap, self.read)
diff --git a/sys/lib/python/mercurial/context.py b/sys/lib/python/mercurial/context.py
deleted file mode 100644
index 8ba3aee10..000000000
--- a/sys/lib/python/mercurial/context.py
+++ /dev/null
@@ -1,818 +0,0 @@
-# context.py - changeset and file context objects for mercurial
-#
-# Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-from node import nullid, nullrev, short, hex
-from i18n import _
-import ancestor, bdiff, error, util, subrepo
-import os, errno
-
-propertycache = util.propertycache
-
-class changectx(object):
- """A changecontext object makes access to data related to a particular
- changeset convenient."""
- def __init__(self, repo, changeid=''):
- """changeid is a revision number, node, or tag"""
- if changeid == '':
- changeid = '.'
- self._repo = repo
- if isinstance(changeid, (long, int)):
- self._rev = changeid
- self._node = self._repo.changelog.node(changeid)
- else:
- self._node = self._repo.lookup(changeid)
- self._rev = self._repo.changelog.rev(self._node)
-
- def __str__(self):
- return short(self.node())
-
- def __int__(self):
- return self.rev()
-
- def __repr__(self):
- return "<changectx %s>" % str(self)
-
- def __hash__(self):
- try:
- return hash(self._rev)
- except AttributeError:
- return id(self)
-
- def __eq__(self, other):
- try:
- return self._rev == other._rev
- except AttributeError:
- return False
-
- def __ne__(self, other):
- return not (self == other)
-
- def __nonzero__(self):
- return self._rev != nullrev
-
- @propertycache
- def _changeset(self):
- return self._repo.changelog.read(self.node())
-
- @propertycache
- def _manifest(self):
- return self._repo.manifest.read(self._changeset[0])
-
- @propertycache
- def _manifestdelta(self):
- return self._repo.manifest.readdelta(self._changeset[0])
-
- @propertycache
- def _parents(self):
- p = self._repo.changelog.parentrevs(self._rev)
- if p[1] == nullrev:
- p = p[:-1]
- return [changectx(self._repo, x) for x in p]
-
- @propertycache
- def substate(self):
- return subrepo.state(self)
-
- def __contains__(self, key):
- return key in self._manifest
-
- def __getitem__(self, key):
- return self.filectx(key)
-
- def __iter__(self):
- for f in sorted(self._manifest):
- yield f
-
- def changeset(self): return self._changeset
- def manifest(self): return self._manifest
- def manifestnode(self): return self._changeset[0]
-
- def rev(self): return self._rev
- def node(self): return self._node
- def hex(self): return hex(self._node)
- def user(self): return self._changeset[1]
- def date(self): return self._changeset[2]
- def files(self): return self._changeset[3]
- def description(self): return self._changeset[4]
- def branch(self): return self._changeset[5].get("branch")
- def extra(self): return self._changeset[5]
- def tags(self): return self._repo.nodetags(self._node)
-
- def parents(self):
- """return contexts for each parent changeset"""
- return self._parents
-
- def p1(self):
- return self._parents[0]
-
- def p2(self):
- if len(self._parents) == 2:
- return self._parents[1]
- return changectx(self._repo, -1)
-
- def children(self):
- """return contexts for each child changeset"""
- c = self._repo.changelog.children(self._node)
- return [changectx(self._repo, x) for x in c]
-
- def ancestors(self):
- for a in self._repo.changelog.ancestors(self._rev):
- yield changectx(self._repo, a)
-
- def descendants(self):
- for d in self._repo.changelog.descendants(self._rev):
- yield changectx(self._repo, d)
-
- def _fileinfo(self, path):
- if '_manifest' in self.__dict__:
- try:
- return self._manifest[path], self._manifest.flags(path)
- except KeyError:
- raise error.LookupError(self._node, path,
- _('not found in manifest'))
- if '_manifestdelta' in self.__dict__ or path in self.files():
- if path in self._manifestdelta:
- return self._manifestdelta[path], self._manifestdelta.flags(path)
- node, flag = self._repo.manifest.find(self._changeset[0], path)
- if not node:
- raise error.LookupError(self._node, path,
- _('not found in manifest'))
-
- return node, flag
-
- def filenode(self, path):
- return self._fileinfo(path)[0]
-
- def flags(self, path):
- try:
- return self._fileinfo(path)[1]
- except error.LookupError:
- return ''
-
- def filectx(self, path, fileid=None, filelog=None):
- """get a file context from this changeset"""
- if fileid is None:
- fileid = self.filenode(path)
- return filectx(self._repo, path, fileid=fileid,
- changectx=self, filelog=filelog)
-
- def ancestor(self, c2):
- """
- return the ancestor context of self and c2
- """
- n = self._repo.changelog.ancestor(self._node, c2._node)
- return changectx(self._repo, n)
-
- def walk(self, match):
- fset = set(match.files())
- # for dirstate.walk, files=['.'] means "walk the whole tree".
- # follow that here, too
- fset.discard('.')
- for fn in self:
- for ffn in fset:
- # match if the file is the exact name or a directory
- if ffn == fn or fn.startswith("%s/" % ffn):
- fset.remove(ffn)
- break
- if match(fn):
- yield fn
- for fn in sorted(fset):
- if match.bad(fn, 'No such file in rev ' + str(self)) and match(fn):
- yield fn
-
- def sub(self, path):
- return subrepo.subrepo(self, path)
-
-class filectx(object):
- """A filecontext object makes access to data related to a particular
- filerevision convenient."""
- def __init__(self, repo, path, changeid=None, fileid=None,
- filelog=None, changectx=None):
- """changeid can be a changeset revision, node, or tag.
- fileid can be a file revision or node."""
- self._repo = repo
- self._path = path
-
- assert (changeid is not None
- or fileid is not None
- or changectx is not None), \
- ("bad args: changeid=%r, fileid=%r, changectx=%r"
- % (changeid, fileid, changectx))
-
- if filelog:
- self._filelog = filelog
-
- if changeid is not None:
- self._changeid = changeid
- if changectx is not None:
- self._changectx = changectx
- if fileid is not None:
- self._fileid = fileid
-
- @propertycache
- def _changectx(self):
- return changectx(self._repo, self._changeid)
-
- @propertycache
- def _filelog(self):
- return self._repo.file(self._path)
-
- @propertycache
- def _changeid(self):
- if '_changectx' in self.__dict__:
- return self._changectx.rev()
- else:
- return self._filelog.linkrev(self._filerev)
-
- @propertycache
- def _filenode(self):
- if '_fileid' in self.__dict__:
- return self._filelog.lookup(self._fileid)
- else:
- return self._changectx.filenode(self._path)
-
- @propertycache
- def _filerev(self):
- return self._filelog.rev(self._filenode)
-
- @propertycache
- def _repopath(self):
- return self._path
-
- def __nonzero__(self):
- try:
- self._filenode
- return True
- except error.LookupError:
- # file is missing
- return False
-
- def __str__(self):
- return "%s@%s" % (self.path(), short(self.node()))
-
- def __repr__(self):
- return "<filectx %s>" % str(self)
-
- def __hash__(self):
- try:
- return hash((self._path, self._fileid))
- except AttributeError:
- return id(self)
-
- def __eq__(self, other):
- try:
- return (self._path == other._path
- and self._fileid == other._fileid)
- except AttributeError:
- return False
-
- def __ne__(self, other):
- return not (self == other)
-
- def filectx(self, fileid):
- '''opens an arbitrary revision of the file without
- opening a new filelog'''
- return filectx(self._repo, self._path, fileid=fileid,
- filelog=self._filelog)
-
- def filerev(self): return self._filerev
- def filenode(self): return self._filenode
- def flags(self): return self._changectx.flags(self._path)
- def filelog(self): return self._filelog
-
- def rev(self):
- if '_changectx' in self.__dict__:
- return self._changectx.rev()
- if '_changeid' in self.__dict__:
- return self._changectx.rev()
- return self._filelog.linkrev(self._filerev)
-
- def linkrev(self): return self._filelog.linkrev(self._filerev)
- def node(self): return self._changectx.node()
- def hex(self): return hex(self.node())
- def user(self): return self._changectx.user()
- def date(self): return self._changectx.date()
- def files(self): return self._changectx.files()
- def description(self): return self._changectx.description()
- def branch(self): return self._changectx.branch()
- def manifest(self): return self._changectx.manifest()
- def changectx(self): return self._changectx
-
- def data(self): return self._filelog.read(self._filenode)
- def path(self): return self._path
- def size(self): return self._filelog.size(self._filerev)
-
- def cmp(self, text): return self._filelog.cmp(self._filenode, text)
-
- def renamed(self):
- """check if file was actually renamed in this changeset revision
-
- If rename logged in file revision, we report copy for changeset only
- if file revisions linkrev points back to the changeset in question
- or both changeset parents contain different file revisions.
- """
-
- renamed = self._filelog.renamed(self._filenode)
- if not renamed:
- return renamed
-
- if self.rev() == self.linkrev():
- return renamed
-
- name = self.path()
- fnode = self._filenode
- for p in self._changectx.parents():
- try:
- if fnode == p.filenode(name):
- return None
- except error.LookupError:
- pass
- return renamed
-
- def parents(self):
- p = self._path
- fl = self._filelog
- pl = [(p, n, fl) for n in self._filelog.parents(self._filenode)]
-
- r = self._filelog.renamed(self._filenode)
- if r:
- pl[0] = (r[0], r[1], None)
-
- return [filectx(self._repo, p, fileid=n, filelog=l)
- for p,n,l in pl if n != nullid]
-
- def children(self):
- # hard for renames
- c = self._filelog.children(self._filenode)
- return [filectx(self._repo, self._path, fileid=x,
- filelog=self._filelog) for x in c]
-
- def annotate(self, follow=False, linenumber=None):
- '''returns a list of tuples of (ctx, line) for each line
- in the file, where ctx is the filectx of the node where
- that line was last changed.
- This returns tuples of ((ctx, linenumber), line) for each line,
- if "linenumber" parameter is NOT "None".
- In such tuples, linenumber means one at the first appearance
- in the managed file.
- To reduce annotation cost,
- this returns fixed value(False is used) as linenumber,
- if "linenumber" parameter is "False".'''
-
- def decorate_compat(text, rev):
- return ([rev] * len(text.splitlines()), text)
-
- def without_linenumber(text, rev):
- return ([(rev, False)] * len(text.splitlines()), text)
-
- def with_linenumber(text, rev):
- size = len(text.splitlines())
- return ([(rev, i) for i in xrange(1, size + 1)], text)
-
- decorate = (((linenumber is None) and decorate_compat) or
- (linenumber and with_linenumber) or
- without_linenumber)
-
- def pair(parent, child):
- for a1, a2, b1, b2 in bdiff.blocks(parent[1], child[1]):
- child[0][b1:b2] = parent[0][a1:a2]
- return child
-
- getlog = util.lrucachefunc(lambda x: self._repo.file(x))
- def getctx(path, fileid):
- log = path == self._path and self._filelog or getlog(path)
- return filectx(self._repo, path, fileid=fileid, filelog=log)
- getctx = util.lrucachefunc(getctx)
-
- def parents(f):
- # we want to reuse filectx objects as much as possible
- p = f._path
- if f._filerev is None: # working dir
- pl = [(n.path(), n.filerev()) for n in f.parents()]
- else:
- pl = [(p, n) for n in f._filelog.parentrevs(f._filerev)]
-
- if follow:
- r = f.renamed()
- if r:
- pl[0] = (r[0], getlog(r[0]).rev(r[1]))
-
- return [getctx(p, n) for p, n in pl if n != nullrev]
-
- # use linkrev to find the first changeset where self appeared
- if self.rev() != self.linkrev():
- base = self.filectx(self.filerev())
- else:
- base = self
-
- # find all ancestors
- needed = {base: 1}
- visit = [base]
- files = [base._path]
- while visit:
- f = visit.pop(0)
- for p in parents(f):
- if p not in needed:
- needed[p] = 1
- visit.append(p)
- if p._path not in files:
- files.append(p._path)
- else:
- # count how many times we'll use this
- needed[p] += 1
-
- # sort by revision (per file) which is a topological order
- visit = []
- for f in files:
- fn = [(n.rev(), n) for n in needed if n._path == f]
- visit.extend(fn)
-
- hist = {}
- for r, f in sorted(visit):
- curr = decorate(f.data(), f)
- for p in parents(f):
- if p != nullid:
- curr = pair(hist[p], curr)
- # trim the history of unneeded revs
- needed[p] -= 1
- if not needed[p]:
- del hist[p]
- hist[f] = curr
-
- return zip(hist[f][0], hist[f][1].splitlines(True))
-
- def ancestor(self, fc2):
- """
- find the common ancestor file context, if any, of self, and fc2
- """
-
- acache = {}
-
- # prime the ancestor cache for the working directory
- for c in (self, fc2):
- if c._filerev is None:
- pl = [(n.path(), n.filenode()) for n in c.parents()]
- acache[(c._path, None)] = pl
-
- flcache = {self._repopath:self._filelog, fc2._repopath:fc2._filelog}
- def parents(vertex):
- if vertex in acache:
- return acache[vertex]
- f, n = vertex
- if f not in flcache:
- flcache[f] = self._repo.file(f)
- fl = flcache[f]
- pl = [(f, p) for p in fl.parents(n) if p != nullid]
- re = fl.renamed(n)
- if re:
- pl.append(re)
- acache[vertex] = pl
- return pl
-
- a, b = (self._path, self._filenode), (fc2._path, fc2._filenode)
- v = ancestor.ancestor(a, b, parents)
- if v:
- f, n = v
- return filectx(self._repo, f, fileid=n, filelog=flcache[f])
-
- return None
-
-class workingctx(changectx):
- """A workingctx object makes access to data related to
- the current working directory convenient.
- parents - a pair of parent nodeids, or None to use the dirstate.
- date - any valid date string or (unixtime, offset), or None.
- user - username string, or None.
- extra - a dictionary of extra values, or None.
- changes - a list of file lists as returned by localrepo.status()
- or None to use the repository status.
- """
- def __init__(self, repo, parents=None, text="", user=None, date=None,
- extra=None, changes=None):
- self._repo = repo
- self._rev = None
- self._node = None
- self._text = text
- if date:
- self._date = util.parsedate(date)
- if user:
- self._user = user
- if parents:
- self._parents = [changectx(self._repo, p) for p in parents]
- if changes:
- self._status = list(changes)
-
- self._extra = {}
- if extra:
- self._extra = extra.copy()
- if 'branch' not in self._extra:
- branch = self._repo.dirstate.branch()
- try:
- branch = branch.decode('UTF-8').encode('UTF-8')
- except UnicodeDecodeError:
- raise util.Abort(_('branch name not in UTF-8!'))
- self._extra['branch'] = branch
- if self._extra['branch'] == '':
- self._extra['branch'] = 'default'
-
- def __str__(self):
- return str(self._parents[0]) + "+"
-
- def __nonzero__(self):
- return True
-
- def __contains__(self, key):
- return self._repo.dirstate[key] not in "?r"
-
- @propertycache
- def _manifest(self):
- """generate a manifest corresponding to the working directory"""
-
- man = self._parents[0].manifest().copy()
- copied = self._repo.dirstate.copies()
- cf = lambda x: man.flags(copied.get(x, x))
- ff = self._repo.dirstate.flagfunc(cf)
- modified, added, removed, deleted, unknown = self._status[:5]
- for i, l in (("a", added), ("m", modified), ("u", unknown)):
- for f in l:
- man[f] = man.get(copied.get(f, f), nullid) + i
- try:
- man.set(f, ff(f))
- except OSError:
- pass
-
- for f in deleted + removed:
- if f in man:
- del man[f]
-
- return man
-
- @propertycache
- def _status(self):
- return self._repo.status(unknown=True)
-
- @propertycache
- def _user(self):
- return self._repo.ui.username()
-
- @propertycache
- def _date(self):
- return util.makedate()
-
- @propertycache
- def _parents(self):
- p = self._repo.dirstate.parents()
- if p[1] == nullid:
- p = p[:-1]
- self._parents = [changectx(self._repo, x) for x in p]
- return self._parents
-
- def manifest(self): return self._manifest
-
- def user(self): return self._user or self._repo.ui.username()
- def date(self): return self._date
- def description(self): return self._text
- def files(self):
- return sorted(self._status[0] + self._status[1] + self._status[2])
-
- def modified(self): return self._status[0]
- def added(self): return self._status[1]
- def removed(self): return self._status[2]
- def deleted(self): return self._status[3]
- def unknown(self): return self._status[4]
- def clean(self): return self._status[5]
- def branch(self): return self._extra['branch']
- def extra(self): return self._extra
-
- def tags(self):
- t = []
- [t.extend(p.tags()) for p in self.parents()]
- return t
-
- def children(self):
- return []
-
- def flags(self, path):
- if '_manifest' in self.__dict__:
- try:
- return self._manifest.flags(path)
- except KeyError:
- return ''
-
- pnode = self._parents[0].changeset()[0]
- orig = self._repo.dirstate.copies().get(path, path)
- node, flag = self._repo.manifest.find(pnode, orig)
- try:
- ff = self._repo.dirstate.flagfunc(lambda x: flag or '')
- return ff(path)
- except OSError:
- pass
-
- if not node or path in self.deleted() or path in self.removed():
- return ''
- return flag
-
- def filectx(self, path, filelog=None):
- """get a file context from the working directory"""
- return workingfilectx(self._repo, path, workingctx=self,
- filelog=filelog)
-
- def ancestor(self, c2):
- """return the ancestor context of self and c2"""
- return self._parents[0].ancestor(c2) # punt on two parents for now
-
- def walk(self, match):
- return sorted(self._repo.dirstate.walk(match, True, False))
-
- def dirty(self, missing=False):
- "check whether a working directory is modified"
-
- return (self.p2() or self.branch() != self.p1().branch() or
- self.modified() or self.added() or self.removed() or
- (missing and self.deleted()))
-
-class workingfilectx(filectx):
- """A workingfilectx object makes access to data related to a particular
- file in the working directory convenient."""
- def __init__(self, repo, path, filelog=None, workingctx=None):
- """changeid can be a changeset revision, node, or tag.
- fileid can be a file revision or node."""
- self._repo = repo
- self._path = path
- self._changeid = None
- self._filerev = self._filenode = None
-
- if filelog:
- self._filelog = filelog
- if workingctx:
- self._changectx = workingctx
-
- @propertycache
- def _changectx(self):
- return workingctx(self._repo)
-
- def __nonzero__(self):
- return True
-
- def __str__(self):
- return "%s@%s" % (self.path(), self._changectx)
-
- def data(self): return self._repo.wread(self._path)
- def renamed(self):
- rp = self._repo.dirstate.copied(self._path)
- if not rp:
- return None
- return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
-
- def parents(self):
- '''return parent filectxs, following copies if necessary'''
- def filenode(ctx, path):
- return ctx._manifest.get(path, nullid)
-
- path = self._path
- fl = self._filelog
- pcl = self._changectx._parents
- renamed = self.renamed()
-
- if renamed:
- pl = [renamed + (None,)]
- else:
- pl = [(path, filenode(pcl[0], path), fl)]
-
- for pc in pcl[1:]:
- pl.append((path, filenode(pc, path), fl))
-
- return [filectx(self._repo, p, fileid=n, filelog=l)
- for p,n,l in pl if n != nullid]
-
- def children(self):
- return []
-
- def size(self): return os.stat(self._repo.wjoin(self._path)).st_size
- def date(self):
- t, tz = self._changectx.date()
- try:
- return (int(os.lstat(self._repo.wjoin(self._path)).st_mtime), tz)
- except OSError, err:
- if err.errno != errno.ENOENT: raise
- return (t, tz)
-
- def cmp(self, text): return self._repo.wread(self._path) == text
-
-class memctx(object):
- """Use memctx to perform in-memory commits via localrepo.commitctx().
-
- Revision information is supplied at initialization time while
- related files data and is made available through a callback
- mechanism. 'repo' is the current localrepo, 'parents' is a
- sequence of two parent revisions identifiers (pass None for every
- missing parent), 'text' is the commit message and 'files' lists
- names of files touched by the revision (normalized and relative to
- repository root).
-
- filectxfn(repo, memctx, path) is a callable receiving the
- repository, the current memctx object and the normalized path of
- requested file, relative to repository root. It is fired by the
- commit function for every file in 'files', but calls order is
- undefined. If the file is available in the revision being
- committed (updated or added), filectxfn returns a memfilectx
- object. If the file was removed, filectxfn raises an
- IOError. Moved files are represented by marking the source file
- removed and the new file added with copy information (see
- memfilectx).
-
- user receives the committer name and defaults to current
- repository username, date is the commit date in any format
- supported by util.parsedate() and defaults to current date, extra
- is a dictionary of metadata or is left empty.
- """
- def __init__(self, repo, parents, text, files, filectxfn, user=None,
- date=None, extra=None):
- self._repo = repo
- self._rev = None
- self._node = None
- self._text = text
- self._date = date and util.parsedate(date) or util.makedate()
- self._user = user
- parents = [(p or nullid) for p in parents]
- p1, p2 = parents
- self._parents = [changectx(self._repo, p) for p in (p1, p2)]
- files = sorted(set(files))
- self._status = [files, [], [], [], []]
- self._filectxfn = filectxfn
-
- self._extra = extra and extra.copy() or {}
- if 'branch' not in self._extra:
- self._extra['branch'] = 'default'
- elif self._extra.get('branch') == '':
- self._extra['branch'] = 'default'
-
- def __str__(self):
- return str(self._parents[0]) + "+"
-
- def __int__(self):
- return self._rev
-
- def __nonzero__(self):
- return True
-
- def __getitem__(self, key):
- return self.filectx(key)
-
- def p1(self): return self._parents[0]
- def p2(self): return self._parents[1]
-
- def user(self): return self._user or self._repo.ui.username()
- def date(self): return self._date
- def description(self): return self._text
- def files(self): return self.modified()
- def modified(self): return self._status[0]
- def added(self): return self._status[1]
- def removed(self): return self._status[2]
- def deleted(self): return self._status[3]
- def unknown(self): return self._status[4]
- def clean(self): return self._status[5]
- def branch(self): return self._extra['branch']
- def extra(self): return self._extra
- def flags(self, f): return self[f].flags()
-
- def parents(self):
- """return contexts for each parent changeset"""
- return self._parents
-
- def filectx(self, path, filelog=None):
- """get a file context from the working directory"""
- return self._filectxfn(self._repo, self, path)
-
-class memfilectx(object):
- """memfilectx represents an in-memory file to commit.
-
- See memctx for more details.
- """
- def __init__(self, path, data, islink, isexec, copied):
- """
- path is the normalized file path relative to repository root.
- data is the file content as a string.
- islink is True if the file is a symbolic link.
- isexec is True if the file is executable.
- copied is the source file path if current file was copied in the
- revision being committed, or None."""
- self._path = path
- self._data = data
- self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
- self._copied = None
- if copied:
- self._copied = (copied, nullid)
-
- def __nonzero__(self): return True
- def __str__(self): return "%s@%s" % (self.path(), self._changectx)
- def path(self): return self._path
- def data(self): return self._data
- def flags(self): return self._flags
- def isexec(self): return 'x' in self._flags
- def islink(self): return 'l' in self._flags
- def renamed(self): return self._copied
diff --git a/sys/lib/python/mercurial/copies.py b/sys/lib/python/mercurial/copies.py
deleted file mode 100644
index 63c80a3f6..000000000
--- a/sys/lib/python/mercurial/copies.py
+++ /dev/null
@@ -1,233 +0,0 @@
-# copies.py - copy detection for Mercurial
-#
-# Copyright 2008 Matt Mackall <mpm@selenic.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-from i18n import _
-import util
-import heapq
-
-def _nonoverlap(d1, d2, d3):
- "Return list of elements in d1 not in d2 or d3"
- return sorted([d for d in d1 if d not in d3 and d not in d2])
-
-def _dirname(f):
- s = f.rfind("/")
- if s == -1:
- return ""
- return f[:s]
-
-def _dirs(files):
- d = set()
- for f in files:
- f = _dirname(f)
- while f not in d:
- d.add(f)
- f = _dirname(f)
- return d
-
-def _findoldnames(fctx, limit):
- "find files that path was copied from, back to linkrev limit"
- old = {}
- seen = set()
- orig = fctx.path()
- visit = [(fctx, 0)]
- while visit:
- fc, depth = visit.pop()
- s = str(fc)
- if s in seen:
- continue
- seen.add(s)
- if fc.path() != orig and fc.path() not in old:
- old[fc.path()] = (depth, fc.path()) # remember depth
- if fc.rev() is not None and fc.rev() < limit:
- continue
- visit += [(p, depth - 1) for p in fc.parents()]
-
- # return old names sorted by depth
- return [o[1] for o in sorted(old.values())]
-
-def _findlimit(repo, a, b):
- "find the earliest revision that's an ancestor of a or b but not both"
- # basic idea:
- # - mark a and b with different sides
- # - if a parent's children are all on the same side, the parent is
- # on that side, otherwise it is on no side
- # - walk the graph in topological order with the help of a heap;
- # - add unseen parents to side map
- # - clear side of any parent that has children on different sides
- # - track number of interesting revs that might still be on a side
- # - track the lowest interesting rev seen
- # - quit when interesting revs is zero
-
- cl = repo.changelog
- working = len(cl) # pseudo rev for the working directory
- if a is None:
- a = working
- if b is None:
- b = working
-
- side = {a: -1, b: 1}
- visit = [-a, -b]
- heapq.heapify(visit)
- interesting = len(visit)
- limit = working
-
- while interesting:
- r = -heapq.heappop(visit)
- if r == working:
- parents = [cl.rev(p) for p in repo.dirstate.parents()]
- else:
- parents = cl.parentrevs(r)
- for p in parents:
- if p not in side:
- # first time we see p; add it to visit
- side[p] = side[r]
- if side[p]:
- interesting += 1
- heapq.heappush(visit, -p)
- elif side[p] and side[p] != side[r]:
- # p was interesting but now we know better
- side[p] = 0
- interesting -= 1
- if side[r]:
- limit = r # lowest rev visited
- interesting -= 1
- return limit
-
-def copies(repo, c1, c2, ca, checkdirs=False):
- """
- Find moves and copies between context c1 and c2
- """
- # avoid silly behavior for update from empty dir
- if not c1 or not c2 or c1 == c2:
- return {}, {}
-
- # avoid silly behavior for parent -> working dir
- if c2.node() is None and c1.node() == repo.dirstate.parents()[0]:
- return repo.dirstate.copies(), {}
-
- limit = _findlimit(repo, c1.rev(), c2.rev())
- m1 = c1.manifest()
- m2 = c2.manifest()
- ma = ca.manifest()
-
- def makectx(f, n):
- if len(n) != 20: # in a working context?
- if c1.rev() is None:
- return c1.filectx(f)
- return c2.filectx(f)
- return repo.filectx(f, fileid=n)
-
- ctx = util.lrucachefunc(makectx)
- copy = {}
- fullcopy = {}
- diverge = {}
-
- def checkcopies(f, m1, m2):
- '''check possible copies of f from m1 to m2'''
- c1 = ctx(f, m1[f])
- for of in _findoldnames(c1, limit):
- fullcopy[f] = of # remember for dir rename detection
- if of in m2: # original file not in other manifest?
- # if the original file is unchanged on the other branch,
- # no merge needed
- if m2[of] != ma.get(of):
- c2 = ctx(of, m2[of])
- ca = c1.ancestor(c2)
- # related and named changed on only one side?
- if ca and (ca.path() == f or ca.path() == c2.path()):
- if c1 != ca or c2 != ca: # merge needed?
- copy[f] = of
- elif of in ma:
- diverge.setdefault(of, []).append(f)
-
- repo.ui.debug(_(" searching for copies back to rev %d\n") % limit)
-
- u1 = _nonoverlap(m1, m2, ma)
- u2 = _nonoverlap(m2, m1, ma)
-
- if u1:
- repo.ui.debug(_(" unmatched files in local:\n %s\n")
- % "\n ".join(u1))
- if u2:
- repo.ui.debug(_(" unmatched files in other:\n %s\n")
- % "\n ".join(u2))
-
- for f in u1:
- checkcopies(f, m1, m2)
- for f in u2:
- checkcopies(f, m2, m1)
-
- diverge2 = set()
- for of, fl in diverge.items():
- if len(fl) == 1:
- del diverge[of] # not actually divergent
- else:
- diverge2.update(fl) # reverse map for below
-
- if fullcopy:
- repo.ui.debug(_(" all copies found (* = to merge, ! = divergent):\n"))
- for f in fullcopy:
- note = ""
- if f in copy: note += "*"
- if f in diverge2: note += "!"
- repo.ui.debug(" %s -> %s %s\n" % (f, fullcopy[f], note))
- del diverge2
-
- if not fullcopy or not checkdirs:
- return copy, diverge
-
- repo.ui.debug(_(" checking for directory renames\n"))
-
- # generate a directory move map
- d1, d2 = _dirs(m1), _dirs(m2)
- invalid = set()
- dirmove = {}
-
- # examine each file copy for a potential directory move, which is
- # when all the files in a directory are moved to a new directory
- for dst, src in fullcopy.iteritems():
- dsrc, ddst = _dirname(src), _dirname(dst)
- if dsrc in invalid:
- # already seen to be uninteresting
- continue
- elif dsrc in d1 and ddst in d1:
- # directory wasn't entirely moved locally
- invalid.add(dsrc)
- elif dsrc in d2 and ddst in d2:
- # directory wasn't entirely moved remotely
- invalid.add(dsrc)
- elif dsrc in dirmove and dirmove[dsrc] != ddst:
- # files from the same directory moved to two different places
- invalid.add(dsrc)
- else:
- # looks good so far
- dirmove[dsrc + "/"] = ddst + "/"
-
- for i in invalid:
- if i in dirmove:
- del dirmove[i]
- del d1, d2, invalid
-
- if not dirmove:
- return copy, diverge
-
- for d in dirmove:
- repo.ui.debug(_(" dir %s -> %s\n") % (d, dirmove[d]))
-
- # check unaccounted nonoverlapping files against directory moves
- for f in u1 + u2:
- if f not in fullcopy:
- for d in dirmove:
- if f.startswith(d):
- # new file added in a directory that was moved, move it
- df = dirmove[d] + f[len(d):]
- if df not in copy:
- copy[f] = df
- repo.ui.debug(_(" file %s -> %s\n") % (f, copy[f]))
- break
-
- return copy, diverge
diff --git a/sys/lib/python/mercurial/demandimport.py b/sys/lib/python/mercurial/demandimport.py
deleted file mode 100644
index a620bb243..000000000
--- a/sys/lib/python/mercurial/demandimport.py
+++ /dev/null
@@ -1,139 +0,0 @@
-# demandimport.py - global demand-loading of modules for Mercurial
-#
-# Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-'''
-demandimport - automatic demandloading of modules
-
-To enable this module, do:
-
- import demandimport; demandimport.enable()
-
-Imports of the following forms will be demand-loaded:
-
- import a, b.c
- import a.b as c
- from a import b,c # a will be loaded immediately
-
-These imports will not be delayed:
-
- from a import *
- b = __import__(a)
-'''
-
-import __builtin__
-_origimport = __import__
-
-class _demandmod(object):
- """module demand-loader and proxy"""
- def __init__(self, name, globals, locals):
- if '.' in name:
- head, rest = name.split('.', 1)
- after = [rest]
- else:
- head = name
- after = []
- object.__setattr__(self, "_data", (head, globals, locals, after))
- object.__setattr__(self, "_module", None)
- def _extend(self, name):
- """add to the list of submodules to load"""
- self._data[3].append(name)
- def _load(self):
- if not self._module:
- head, globals, locals, after = self._data
- mod = _origimport(head, globals, locals)
- # load submodules
- def subload(mod, p):
- h, t = p, None
- if '.' in p:
- h, t = p.split('.', 1)
- if not hasattr(mod, h):
- setattr(mod, h, _demandmod(p, mod.__dict__, mod.__dict__))
- elif t:
- subload(getattr(mod, h), t)
-
- for x in after:
- subload(mod, x)
-
- # are we in the locals dictionary still?
- if locals and locals.get(head) == self:
- locals[head] = mod
- object.__setattr__(self, "_module", mod)
-
- def __repr__(self):
- if self._module:
- return "<proxied module '%s'>" % self._data[0]
- return "<unloaded module '%s'>" % self._data[0]
- def __call__(self, *args, **kwargs):
- raise TypeError("%s object is not callable" % repr(self))
- def __getattribute__(self, attr):
- if attr in ('_data', '_extend', '_load', '_module'):
- return object.__getattribute__(self, attr)
- self._load()
- return getattr(self._module, attr)
- def __setattr__(self, attr, val):
- self._load()
- setattr(self._module, attr, val)
-
-def _demandimport(name, globals=None, locals=None, fromlist=None, level=None):
- if not locals or name in ignore or fromlist == ('*',):
- # these cases we can't really delay
- if level is None:
- return _origimport(name, globals, locals, fromlist)
- else:
- return _origimport(name, globals, locals, fromlist, level)
- elif not fromlist:
- # import a [as b]
- if '.' in name: # a.b
- base, rest = name.split('.', 1)
- # email.__init__ loading email.mime
- if globals and globals.get('__name__', None) == base:
- return _origimport(name, globals, locals, fromlist)
- # if a is already demand-loaded, add b to its submodule list
- if base in locals:
- if isinstance(locals[base], _demandmod):
- locals[base]._extend(rest)
- return locals[base]
- return _demandmod(name, globals, locals)
- else:
- if level is not None:
- # from . import b,c,d or from .a import b,c,d
- return _origimport(name, globals, locals, fromlist, level)
- # from a import b,c,d
- mod = _origimport(name, globals, locals)
- # recurse down the module chain
- for comp in name.split('.')[1:]:
- if not hasattr(mod, comp):
- setattr(mod, comp, _demandmod(comp, mod.__dict__, mod.__dict__))
- mod = getattr(mod, comp)
- for x in fromlist:
- # set requested submodules for demand load
- if not(hasattr(mod, x)):
- setattr(mod, x, _demandmod(x, mod.__dict__, locals))
- return mod
-
-ignore = [
- '_hashlib',
- '_xmlplus',
- 'fcntl',
- 'win32com.gen_py',
- 'pythoncom',
- # imported by tarfile, not available under Windows
- 'pwd',
- 'grp',
- # imported by profile, itself imported by hotshot.stats,
- # not available under Windows
- 'resource',
- ]
-
-def enable():
- "enable global demand-loading of modules"
- __builtin__.__import__ = _demandimport
-
-def disable():
- "disable global demand-loading of modules"
- __builtin__.__import__ = _origimport
-
diff --git a/sys/lib/python/mercurial/diffhelpers.c b/sys/lib/python/mercurial/diffhelpers.c
deleted file mode 100644
index d9316ea4b..000000000
--- a/sys/lib/python/mercurial/diffhelpers.c
+++ /dev/null
@@ -1,156 +0,0 @@
-/*
- * diffhelpers.c - helper routines for mpatch
- *
- * Copyright 2007 Chris Mason <chris.mason@oracle.com>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License v2, incorporated herein by reference.
- */
-
-#include <Python.h>
-#include <stdlib.h>
-#include <string.h>
-
-static char diffhelpers_doc[] = "Efficient diff parsing";
-static PyObject *diffhelpers_Error;
-
-
-/* fixup the last lines of a and b when the patch has no newline at eof */
-static void _fix_newline(PyObject *hunk, PyObject *a, PyObject *b)
-{
- int hunksz = PyList_Size(hunk);
- PyObject *s = PyList_GET_ITEM(hunk, hunksz-1);
- char *l = PyString_AS_STRING(s);
- int sz = PyString_GET_SIZE(s);
- int alen = PyList_Size(a);
- int blen = PyList_Size(b);
- char c = l[0];
-
- PyObject *hline = PyString_FromStringAndSize(l, sz-1);
- if (c == ' ' || c == '+') {
- PyObject *rline = PyString_FromStringAndSize(l+1, sz-2);
- PyList_SetItem(b, blen-1, rline);
- }
- if (c == ' ' || c == '-') {
- Py_INCREF(hline);
- PyList_SetItem(a, alen-1, hline);
- }
- PyList_SetItem(hunk, hunksz-1, hline);
-}
-
-/* python callable form of _fix_newline */
-static PyObject *
-fix_newline(PyObject *self, PyObject *args)
-{
- PyObject *hunk, *a, *b;
- if (!PyArg_ParseTuple(args, "OOO", &hunk, &a, &b))
- return NULL;
- _fix_newline(hunk, a, b);
- return Py_BuildValue("l", 0);
-}
-
-/*
- * read lines from fp into the hunk. The hunk is parsed into two arrays
- * a and b. a gets the old state of the text, b gets the new state
- * The control char from the hunk is saved when inserting into a, but not b
- * (for performance while deleting files)
- */
-static PyObject *
-addlines(PyObject *self, PyObject *args)
-{
-
- PyObject *fp, *hunk, *a, *b, *x;
- int i;
- int lena, lenb;
- int num;
- int todoa, todob;
- char *s, c;
- PyObject *l;
- if (!PyArg_ParseTuple(args, "OOiiOO", &fp, &hunk, &lena, &lenb, &a, &b))
- return NULL;
-
- while(1) {
- todoa = lena - PyList_Size(a);
- todob = lenb - PyList_Size(b);
- num = todoa > todob ? todoa : todob;
- if (num == 0)
- break;
- for (i = 0 ; i < num ; i++) {
- x = PyFile_GetLine(fp, 0);
- s = PyString_AS_STRING(x);
- c = *s;
- if (strcmp(s, "\\ No newline at end of file\n") == 0) {
- _fix_newline(hunk, a, b);
- continue;
- }
- if (c == '\n') {
- /* Some patches may be missing the control char
- * on empty lines. Supply a leading space. */
- Py_DECREF(x);
- x = PyString_FromString(" \n");
- }
- PyList_Append(hunk, x);
- if (c == '+') {
- l = PyString_FromString(s + 1);
- PyList_Append(b, l);
- Py_DECREF(l);
- } else if (c == '-') {
- PyList_Append(a, x);
- } else {
- l = PyString_FromString(s + 1);
- PyList_Append(b, l);
- Py_DECREF(l);
- PyList_Append(a, x);
- }
- Py_DECREF(x);
- }
- }
- return Py_BuildValue("l", 0);
-}
-
-/*
- * compare the lines in a with the lines in b. a is assumed to have
- * a control char at the start of each line, this char is ignored in the
- * compare
- */
-static PyObject *
-testhunk(PyObject *self, PyObject *args)
-{
-
- PyObject *a, *b;
- long bstart;
- int alen, blen;
- int i;
- char *sa, *sb;
-
- if (!PyArg_ParseTuple(args, "OOl", &a, &b, &bstart))
- return NULL;
- alen = PyList_Size(a);
- blen = PyList_Size(b);
- if (alen > blen - bstart) {
- return Py_BuildValue("l", -1);
- }
- for (i = 0 ; i < alen ; i++) {
- sa = PyString_AS_STRING(PyList_GET_ITEM(a, i));
- sb = PyString_AS_STRING(PyList_GET_ITEM(b, i + bstart));
- if (strcmp(sa+1, sb) != 0)
- return Py_BuildValue("l", -1);
- }
- return Py_BuildValue("l", 0);
-}
-
-static PyMethodDef methods[] = {
- {"addlines", addlines, METH_VARARGS, "add lines to a hunk\n"},
- {"fix_newline", fix_newline, METH_VARARGS, "fixup newline counters\n"},
- {"testhunk", testhunk, METH_VARARGS, "test lines in a hunk\n"},
- {NULL, NULL}
-};
-
-PyMODINIT_FUNC
-initdiffhelpers(void)
-{
- Py_InitModule3("diffhelpers", methods, diffhelpers_doc);
- diffhelpers_Error = PyErr_NewException("diffhelpers.diffhelpersError",
- NULL, NULL);
-}
-
diff --git a/sys/lib/python/mercurial/dirstate.py b/sys/lib/python/mercurial/dirstate.py
deleted file mode 100644
index c10e3a6c7..000000000
--- a/sys/lib/python/mercurial/dirstate.py
+++ /dev/null
@@ -1,601 +0,0 @@
-# dirstate.py - working directory tracking for mercurial
-#
-# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-from node import nullid
-from i18n import _
-import util, ignore, osutil, parsers
-import struct, os, stat, errno
-import cStringIO, sys
-
-_unknown = ('?', 0, 0, 0)
-_format = ">cllll"
-propertycache = util.propertycache
-
-def _finddirs(path):
- pos = path.rfind('/')
- while pos != -1:
- yield path[:pos]
- pos = path.rfind('/', 0, pos)
-
-def _incdirs(dirs, path):
- for base in _finddirs(path):
- if base in dirs:
- dirs[base] += 1
- return
- dirs[base] = 1
-
-def _decdirs(dirs, path):
- for base in _finddirs(path):
- if dirs[base] > 1:
- dirs[base] -= 1
- return
- del dirs[base]
-
-class dirstate(object):
-
- def __init__(self, opener, ui, root):
- self._opener = opener
- self._root = root
- self._rootdir = os.path.join(root, '')
- self._dirty = False
- self._dirtypl = False
- self._ui = ui
-
- @propertycache
- def _map(self):
- self._read()
- return self._map
-
- @propertycache
- def _copymap(self):
- self._read()
- return self._copymap
-
- @propertycache
- def _foldmap(self):
- f = {}
- for name in self._map:
- f[os.path.normcase(name)] = name
- return f
-
- @propertycache
- def _branch(self):
- try:
- return self._opener("branch").read().strip() or "default"
- except IOError:
- return "default"
-
- @propertycache
- def _pl(self):
- try:
- st = self._opener("dirstate").read(40)
- l = len(st)
- if l == 40:
- return st[:20], st[20:40]
- elif l > 0 and l < 40:
- raise util.Abort(_('working directory state appears damaged!'))
- except IOError, err:
- if err.errno != errno.ENOENT: raise
- return [nullid, nullid]
-
- @propertycache
- def _dirs(self):
- dirs = {}
- for f,s in self._map.iteritems():
- if s[0] != 'r':
- _incdirs(dirs, f)
- return dirs
-
- @propertycache
- def _ignore(self):
- files = [self._join('.hgignore')]
- for name, path in self._ui.configitems("ui"):
- if name == 'ignore' or name.startswith('ignore.'):
- files.append(os.path.expanduser(path))
- return ignore.ignore(self._root, files, self._ui.warn)
-
- @propertycache
- def _slash(self):
- return self._ui.configbool('ui', 'slash') and os.sep != '/'
-
- @propertycache
- def _checklink(self):
- return util.checklink(self._root)
-
- @propertycache
- def _checkexec(self):
- return util.checkexec(self._root)
-
- @propertycache
- def _checkcase(self):
- return not util.checkcase(self._join('.hg'))
-
- def _join(self, f):
- # much faster than os.path.join()
- # it's safe because f is always a relative path
- return self._rootdir + f
-
- def flagfunc(self, fallback):
- if self._checklink:
- if self._checkexec:
- def f(x):
- p = self._join(x)
- if os.path.islink(p):
- return 'l'
- if util.is_exec(p):
- return 'x'
- return ''
- return f
- def f(x):
- if os.path.islink(self._join(x)):
- return 'l'
- if 'x' in fallback(x):
- return 'x'
- return ''
- return f
- if self._checkexec:
- def f(x):
- if 'l' in fallback(x):
- return 'l'
- if util.is_exec(self._join(x)):
- return 'x'
- return ''
- return f
- return fallback
-
- def getcwd(self):
- cwd = os.getcwd()
- if cwd == self._root: return ''
- # self._root ends with a path separator if self._root is '/' or 'C:\'
- rootsep = self._root
- if not util.endswithsep(rootsep):
- rootsep += os.sep
- if cwd.startswith(rootsep):
- return cwd[len(rootsep):]
- else:
- # we're outside the repo. return an absolute path.
- return cwd
-
- def pathto(self, f, cwd=None):
- if cwd is None:
- cwd = self.getcwd()
- path = util.pathto(self._root, cwd, f)
- if self._slash:
- return util.normpath(path)
- return path
-
- def __getitem__(self, key):
- ''' current states:
- n normal
- m needs merging
- r marked for removal
- a marked for addition
- ? not tracked'''
- return self._map.get(key, ("?",))[0]
-
- def __contains__(self, key):
- return key in self._map
-
- def __iter__(self):
- for x in sorted(self._map):
- yield x
-
- def parents(self):
- return self._pl
-
- def branch(self):
- return self._branch
-
- def setparents(self, p1, p2=nullid):
- self._dirty = self._dirtypl = True
- self._pl = p1, p2
-
- def setbranch(self, branch):
- self._branch = branch
- self._opener("branch", "w").write(branch + '\n')
-
- def _read(self):
- self._map = {}
- self._copymap = {}
- try:
- st = self._opener("dirstate").read()
- except IOError, err:
- if err.errno != errno.ENOENT: raise
- return
- if not st:
- return
-
- p = parsers.parse_dirstate(self._map, self._copymap, st)
- if not self._dirtypl:
- self._pl = p
-
- def invalidate(self):
- for a in "_map _copymap _foldmap _branch _pl _dirs _ignore".split():
- if a in self.__dict__:
- delattr(self, a)
- self._dirty = False
-
- def copy(self, source, dest):
- """Mark dest as a copy of source. Unmark dest if source is None.
- """
- if source == dest:
- return
- self._dirty = True
- if source is not None:
- self._copymap[dest] = source
- elif dest in self._copymap:
- del self._copymap[dest]
-
- def copied(self, file):
- return self._copymap.get(file, None)
-
- def copies(self):
- return self._copymap
-
- def _droppath(self, f):
- if self[f] not in "?r" and "_dirs" in self.__dict__:
- _decdirs(self._dirs, f)
-
- def _addpath(self, f, check=False):
- oldstate = self[f]
- if check or oldstate == "r":
- if '\r' in f or '\n' in f:
- raise util.Abort(
- _("'\\n' and '\\r' disallowed in filenames: %r") % f)
- if f in self._dirs:
- raise util.Abort(_('directory %r already in dirstate') % f)
- # shadows
- for d in _finddirs(f):
- if d in self._dirs:
- break
- if d in self._map and self[d] != 'r':
- raise util.Abort(
- _('file %r in dirstate clashes with %r') % (d, f))
- if oldstate in "?r" and "_dirs" in self.__dict__:
- _incdirs(self._dirs, f)
-
- def normal(self, f):
- 'mark a file normal and clean'
- self._dirty = True
- self._addpath(f)
- s = os.lstat(self._join(f))
- self._map[f] = ('n', s.st_mode, s.st_size, int(s.st_mtime))
- if f in self._copymap:
- del self._copymap[f]
-
- def normallookup(self, f):
- 'mark a file normal, but possibly dirty'
- if self._pl[1] != nullid and f in self._map:
- # if there is a merge going on and the file was either
- # in state 'm' or dirty before being removed, restore that state.
- entry = self._map[f]
- if entry[0] == 'r' and entry[2] in (-1, -2):
- source = self._copymap.get(f)
- if entry[2] == -1:
- self.merge(f)
- elif entry[2] == -2:
- self.normaldirty(f)
- if source:
- self.copy(source, f)
- return
- if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
- return
- self._dirty = True
- self._addpath(f)
- self._map[f] = ('n', 0, -1, -1)
- if f in self._copymap:
- del self._copymap[f]
-
- def normaldirty(self, f):
- 'mark a file normal, but dirty'
- self._dirty = True
- self._addpath(f)
- self._map[f] = ('n', 0, -2, -1)
- if f in self._copymap:
- del self._copymap[f]
-
- def add(self, f):
- 'mark a file added'
- self._dirty = True
- self._addpath(f, True)
- self._map[f] = ('a', 0, -1, -1)
- if f in self._copymap:
- del self._copymap[f]
-
- def remove(self, f):
- 'mark a file removed'
- self._dirty = True
- self._droppath(f)
- size = 0
- if self._pl[1] != nullid and f in self._map:
- entry = self._map[f]
- if entry[0] == 'm':
- size = -1
- elif entry[0] == 'n' and entry[2] == -2:
- size = -2
- self._map[f] = ('r', 0, size, 0)
- if size == 0 and f in self._copymap:
- del self._copymap[f]
-
- def merge(self, f):
- 'mark a file merged'
- self._dirty = True
- s = os.lstat(self._join(f))
- self._addpath(f)
- self._map[f] = ('m', s.st_mode, s.st_size, int(s.st_mtime))
- if f in self._copymap:
- del self._copymap[f]
-
- def forget(self, f):
- 'forget a file'
- self._dirty = True
- try:
- self._droppath(f)
- del self._map[f]
- except KeyError:
- self._ui.warn(_("not in dirstate: %s\n") % f)
-
- def _normalize(self, path, knownpath):
- norm_path = os.path.normcase(path)
- fold_path = self._foldmap.get(norm_path, None)
- if fold_path is None:
- if knownpath or not os.path.exists(os.path.join(self._root, path)):
- fold_path = path
- else:
- fold_path = self._foldmap.setdefault(norm_path,
- util.fspath(path, self._root))
- return fold_path
-
- def clear(self):
- self._map = {}
- if "_dirs" in self.__dict__:
- delattr(self, "_dirs");
- self._copymap = {}
- self._pl = [nullid, nullid]
- self._dirty = True
-
- def rebuild(self, parent, files):
- self.clear()
- for f in files:
- if 'x' in files.flags(f):
- self._map[f] = ('n', 0777, -1, 0)
- else:
- self._map[f] = ('n', 0666, -1, 0)
- self._pl = (parent, nullid)
- self._dirty = True
-
- def write(self):
- if not self._dirty:
- return
- st = self._opener("dirstate", "w", atomictemp=True)
-
- try:
- gran = int(self._ui.config('dirstate', 'granularity', 1))
- except ValueError:
- gran = 1
- limit = sys.maxint
- if gran > 0:
- limit = util.fstat(st).st_mtime - gran
-
- cs = cStringIO.StringIO()
- copymap = self._copymap
- pack = struct.pack
- write = cs.write
- write("".join(self._pl))
- for f, e in self._map.iteritems():
- if f in copymap:
- f = "%s\0%s" % (f, copymap[f])
- if e[3] > limit and e[0] == 'n':
- e = (e[0], 0, -1, -1)
- e = pack(_format, e[0], e[1], e[2], e[3], len(f))
- write(e)
- write(f)
- st.write(cs.getvalue())
- st.rename()
- self._dirty = self._dirtypl = False
-
- def _dirignore(self, f):
- if f == '.':
- return False
- if self._ignore(f):
- return True
- for p in _finddirs(f):
- if self._ignore(p):
- return True
- return False
-
- def walk(self, match, unknown, ignored):
- '''
- walk recursively through the directory tree, finding all files
- matched by the match function
-
- results are yielded in a tuple (filename, stat), where stat
- and st is the stat result if the file was found in the directory.
- '''
-
- def fwarn(f, msg):
- self._ui.warn('%s: %s\n' % (self.pathto(f), msg))
- return False
-
- def badtype(mode):
- kind = _('unknown')
- if stat.S_ISCHR(mode): kind = _('character device')
- elif stat.S_ISBLK(mode): kind = _('block device')
- elif stat.S_ISFIFO(mode): kind = _('fifo')
- elif stat.S_ISSOCK(mode): kind = _('socket')
- elif stat.S_ISDIR(mode): kind = _('directory')
- return _('unsupported file type (type is %s)') % kind
-
- ignore = self._ignore
- dirignore = self._dirignore
- if ignored:
- ignore = util.never
- dirignore = util.never
- elif not unknown:
- # if unknown and ignored are False, skip step 2
- ignore = util.always
- dirignore = util.always
-
- matchfn = match.matchfn
- badfn = match.bad
- dmap = self._map
- normpath = util.normpath
- listdir = osutil.listdir
- lstat = os.lstat
- getkind = stat.S_IFMT
- dirkind = stat.S_IFDIR
- regkind = stat.S_IFREG
- lnkkind = stat.S_IFLNK
- join = self._join
- work = []
- wadd = work.append
-
- if self._checkcase:
- normalize = self._normalize
- else:
- normalize = lambda x, y: x
-
- exact = skipstep3 = False
- if matchfn == match.exact: # match.exact
- exact = True
- dirignore = util.always # skip step 2
- elif match.files() and not match.anypats(): # match.match, no patterns
- skipstep3 = True
-
- files = set(match.files())
- if not files or '.' in files:
- files = ['']
- results = {'.hg': None}
-
- # step 1: find all explicit files
- for ff in sorted(files):
- nf = normalize(normpath(ff), False)
- if nf in results:
- continue
-
- try:
- st = lstat(join(nf))
- kind = getkind(st.st_mode)
- if kind == dirkind:
- skipstep3 = False
- if nf in dmap:
- #file deleted on disk but still in dirstate
- results[nf] = None
- match.dir(nf)
- if not dirignore(nf):
- wadd(nf)
- elif kind == regkind or kind == lnkkind:
- results[nf] = st
- else:
- badfn(ff, badtype(kind))
- if nf in dmap:
- results[nf] = None
- except OSError, inst:
- if nf in dmap: # does it exactly match a file?
- results[nf] = None
- else: # does it match a directory?
- prefix = nf + "/"
- for fn in dmap:
- if fn.startswith(prefix):
- match.dir(nf)
- skipstep3 = False
- break
- else:
- badfn(ff, inst.strerror)
-
- # step 2: visit subdirectories
- while work:
- nd = work.pop()
- skip = None
- if nd == '.':
- nd = ''
- else:
- skip = '.hg'
- try:
- entries = listdir(join(nd), stat=True, skip=skip)
- except OSError, inst:
- if inst.errno == errno.EACCES:
- fwarn(nd, inst.strerror)
- continue
- raise
- for f, kind, st in entries:
- nf = normalize(nd and (nd + "/" + f) or f, True)
- if nf not in results:
- if kind == dirkind:
- if not ignore(nf):
- match.dir(nf)
- wadd(nf)
- if nf in dmap and matchfn(nf):
- results[nf] = None
- elif kind == regkind or kind == lnkkind:
- if nf in dmap:
- if matchfn(nf):
- results[nf] = st
- elif matchfn(nf) and not ignore(nf):
- results[nf] = st
- elif nf in dmap and matchfn(nf):
- results[nf] = None
-
- # step 3: report unseen items in the dmap hash
- if not skipstep3 and not exact:
- visit = sorted([f for f in dmap if f not in results and matchfn(f)])
- for nf, st in zip(visit, util.statfiles([join(i) for i in visit])):
- if not st is None and not getkind(st.st_mode) in (regkind, lnkkind):
- st = None
- results[nf] = st
-
- del results['.hg']
- return results
-
- def status(self, match, ignored, clean, unknown):
- listignored, listclean, listunknown = ignored, clean, unknown
- lookup, modified, added, unknown, ignored = [], [], [], [], []
- removed, deleted, clean = [], [], []
-
- dmap = self._map
- ladd = lookup.append
- madd = modified.append
- aadd = added.append
- uadd = unknown.append
- iadd = ignored.append
- radd = removed.append
- dadd = deleted.append
- cadd = clean.append
-
- for fn, st in self.walk(match, listunknown, listignored).iteritems():
- if fn not in dmap:
- if (listignored or match.exact(fn)) and self._dirignore(fn):
- if listignored:
- iadd(fn)
- elif listunknown:
- uadd(fn)
- continue
-
- state, mode, size, time = dmap[fn]
-
- if not st and state in "nma":
- dadd(fn)
- elif state == 'n':
- if (size >= 0 and
- (size != st.st_size
- or ((mode ^ st.st_mode) & 0100 and self._checkexec))
- or size == -2
- or fn in self._copymap):
- madd(fn)
- elif time != int(st.st_mtime):
- ladd(fn)
- elif listclean:
- cadd(fn)
- elif state == 'm':
- madd(fn)
- elif state == 'a':
- aadd(fn)
- elif state == 'r':
- radd(fn)
-
- return (lookup, modified, added, removed, deleted, unknown, ignored,
- clean)
diff --git a/sys/lib/python/mercurial/dispatch.py b/sys/lib/python/mercurial/dispatch.py
deleted file mode 100644
index a01cf8204..000000000
--- a/sys/lib/python/mercurial/dispatch.py
+++ /dev/null
@@ -1,501 +0,0 @@
-# dispatch.py - command dispatching for mercurial
-#
-# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-from i18n import _
-import os, sys, atexit, signal, pdb, socket, errno, shlex, time
-import util, commands, hg, fancyopts, extensions, hook, error
-import cmdutil, encoding
-import ui as _ui
-
-def run():
- "run the command in sys.argv"
- sys.exit(dispatch(sys.argv[1:]))
-
-def dispatch(args):
- "run the command specified in args"
- try:
- u = _ui.ui()
- if '--traceback' in args:
- u.setconfig('ui', 'traceback', 'on')
- except util.Abort, inst:
- sys.stderr.write(_("abort: %s\n") % inst)
- return -1
- return _runcatch(u, args)
-
-def _runcatch(ui, args):
- def catchterm(*args):
- raise error.SignalInterrupt
-
- for name in 'SIGBREAK', 'SIGHUP', 'SIGTERM':
- num = getattr(signal, name, None)
- if num: signal.signal(num, catchterm)
-
- try:
- try:
- # enter the debugger before command execution
- if '--debugger' in args:
- pdb.set_trace()
- try:
- return _dispatch(ui, args)
- finally:
- ui.flush()
- except:
- # enter the debugger when we hit an exception
- if '--debugger' in args:
- pdb.post_mortem(sys.exc_info()[2])
- ui.traceback()
- raise
-
- # Global exception handling, alphabetically
- # Mercurial-specific first, followed by built-in and library exceptions
- except error.AmbiguousCommand, inst:
- ui.warn(_("hg: command '%s' is ambiguous:\n %s\n") %
- (inst.args[0], " ".join(inst.args[1])))
- except error.ConfigError, inst:
- ui.warn(_("hg: %s\n") % inst.args[0])
- except error.LockHeld, inst:
- if inst.errno == errno.ETIMEDOUT:
- reason = _('timed out waiting for lock held by %s') % inst.locker
- else:
- reason = _('lock held by %s') % inst.locker
- ui.warn(_("abort: %s: %s\n") % (inst.desc or inst.filename, reason))
- except error.LockUnavailable, inst:
- ui.warn(_("abort: could not lock %s: %s\n") %
- (inst.desc or inst.filename, inst.strerror))
- except error.ParseError, inst:
- if inst.args[0]:
- ui.warn(_("hg %s: %s\n") % (inst.args[0], inst.args[1]))
- commands.help_(ui, inst.args[0])
- else:
- ui.warn(_("hg: %s\n") % inst.args[1])
- commands.help_(ui, 'shortlist')
- except error.RepoError, inst:
- ui.warn(_("abort: %s!\n") % inst)
- except error.ResponseError, inst:
- ui.warn(_("abort: %s") % inst.args[0])
- if not isinstance(inst.args[1], basestring):
- ui.warn(" %r\n" % (inst.args[1],))
- elif not inst.args[1]:
- ui.warn(_(" empty string\n"))
- else:
- ui.warn("\n%r\n" % util.ellipsis(inst.args[1]))
- except error.RevlogError, inst:
- ui.warn(_("abort: %s!\n") % inst)
- except error.SignalInterrupt:
- ui.warn(_("killed!\n"))
- except error.UnknownCommand, inst:
- ui.warn(_("hg: unknown command '%s'\n") % inst.args[0])
- commands.help_(ui, 'shortlist')
- except util.Abort, inst:
- ui.warn(_("abort: %s\n") % inst)
- except ImportError, inst:
- m = str(inst).split()[-1]
- ui.warn(_("abort: could not import module %s!\n") % m)
- if m in "mpatch bdiff".split():
- ui.warn(_("(did you forget to compile extensions?)\n"))
- elif m in "zlib".split():
- ui.warn(_("(is your Python install correct?)\n"))
- except IOError, inst:
- if hasattr(inst, "code"):
- ui.warn(_("abort: %s\n") % inst)
- elif hasattr(inst, "reason"):
- try: # usually it is in the form (errno, strerror)
- reason = inst.reason.args[1]
- except: # it might be anything, for example a string
- reason = inst.reason
- ui.warn(_("abort: error: %s\n") % reason)
- elif hasattr(inst, "args") and inst.args[0] == errno.EPIPE:
- if ui.debugflag:
- ui.warn(_("broken pipe\n"))
- elif getattr(inst, "strerror", None):
- if getattr(inst, "filename", None):
- ui.warn(_("abort: %s: %s\n") % (inst.strerror, inst.filename))
- else:
- ui.warn(_("abort: %s\n") % inst.strerror)
- else:
- raise
- except OSError, inst:
- if getattr(inst, "filename", None):
- ui.warn(_("abort: %s: %s\n") % (inst.strerror, inst.filename))
- else:
- ui.warn(_("abort: %s\n") % inst.strerror)
- except KeyboardInterrupt:
- try:
- ui.warn(_("interrupted!\n"))
- except IOError, inst:
- if inst.errno == errno.EPIPE:
- if ui.debugflag:
- ui.warn(_("\nbroken pipe\n"))
- else:
- raise
- except MemoryError:
- ui.warn(_("abort: out of memory\n"))
- except SystemExit, inst:
- # Commands shouldn't sys.exit directly, but give a return code.
- # Just in case catch this and and pass exit code to caller.
- return inst.code
- except socket.error, inst:
- ui.warn(_("abort: %s\n") % inst.args[-1])
- except:
- ui.warn(_("** unknown exception encountered, details follow\n"))
- ui.warn(_("** report bug details to "
- "http://mercurial.selenic.com/bts/\n"))
- ui.warn(_("** or mercurial@selenic.com\n"))
- ui.warn(_("** Mercurial Distributed SCM (version %s)\n")
- % util.version())
- ui.warn(_("** Extensions loaded: %s\n")
- % ", ".join([x[0] for x in extensions.extensions()]))
- raise
-
- return -1
-
-def _findrepo(p):
- while not os.path.isdir(os.path.join(p, ".hg")):
- oldp, p = p, os.path.dirname(p)
- if p == oldp:
- return None
-
- return p
-
-def aliasargs(fn):
- if hasattr(fn, 'args'):
- return fn.args
- return []
-
-class cmdalias(object):
- def __init__(self, name, definition, cmdtable):
- self.name = name
- self.definition = definition
- self.args = []
- self.opts = []
- self.help = ''
- self.norepo = True
-
- try:
- cmdutil.findcmd(self.name, cmdtable, True)
- self.shadows = True
- except error.UnknownCommand:
- self.shadows = False
-
- if not self.definition:
- def fn(ui, *args):
- ui.warn(_("no definition for alias '%s'\n") % self.name)
- return 1
- self.fn = fn
-
- return
-
- args = shlex.split(self.definition)
- cmd = args.pop(0)
- opts = []
- help = ''
-
- try:
- self.fn, self.opts, self.help = cmdutil.findcmd(cmd, cmdtable, False)[1]
- self.args = aliasargs(self.fn) + args
- if cmd not in commands.norepo.split(' '):
- self.norepo = False
- except error.UnknownCommand:
- def fn(ui, *args):
- ui.warn(_("alias '%s' resolves to unknown command '%s'\n") \
- % (self.name, cmd))
- return 1
- self.fn = fn
- except error.AmbiguousCommand:
- def fn(ui, *args):
- ui.warn(_("alias '%s' resolves to ambiguous command '%s'\n") \
- % (self.name, cmd))
- return 1
- self.fn = fn
-
- def __call__(self, ui, *args, **opts):
- if self.shadows:
- ui.debug(_("alias '%s' shadows command\n") % self.name)
-
- return self.fn(ui, *args, **opts)
-
-def addaliases(ui, cmdtable):
- # aliases are processed after extensions have been loaded, so they
- # may use extension commands. Aliases can also use other alias definitions,
- # but only if they have been defined prior to the current definition.
- for alias, definition in ui.configitems('alias'):
- aliasdef = cmdalias(alias, definition, cmdtable)
- cmdtable[alias] = (aliasdef, aliasdef.opts, aliasdef.help)
- if aliasdef.norepo:
- commands.norepo += ' %s' % alias
-
-def _parse(ui, args):
- options = {}
- cmdoptions = {}
-
- try:
- args = fancyopts.fancyopts(args, commands.globalopts, options)
- except fancyopts.getopt.GetoptError, inst:
- raise error.ParseError(None, inst)
-
- if args:
- cmd, args = args[0], args[1:]
- aliases, i = cmdutil.findcmd(cmd, commands.table,
- ui.config("ui", "strict"))
- cmd = aliases[0]
- args = aliasargs(i[0]) + args
- defaults = ui.config("defaults", cmd)
- if defaults:
- args = shlex.split(defaults) + args
- c = list(i[1])
- else:
- cmd = None
- c = []
-
- # combine global options into local
- for o in commands.globalopts:
- c.append((o[0], o[1], options[o[1]], o[3]))
-
- try:
- args = fancyopts.fancyopts(args, c, cmdoptions, True)
- except fancyopts.getopt.GetoptError, inst:
- raise error.ParseError(cmd, inst)
-
- # separate global options back out
- for o in commands.globalopts:
- n = o[1]
- options[n] = cmdoptions[n]
- del cmdoptions[n]
-
- return (cmd, cmd and i[0] or None, args, options, cmdoptions)
-
-def _parseconfig(ui, config):
- """parse the --config options from the command line"""
- for cfg in config:
- try:
- name, value = cfg.split('=', 1)
- section, name = name.split('.', 1)
- if not section or not name:
- raise IndexError
- ui.setconfig(section, name, value)
- except (IndexError, ValueError):
- raise util.Abort(_('malformed --config option: %s') % cfg)
-
-def _earlygetopt(aliases, args):
- """Return list of values for an option (or aliases).
-
- The values are listed in the order they appear in args.
- The options and values are removed from args.
- """
- try:
- argcount = args.index("--")
- except ValueError:
- argcount = len(args)
- shortopts = [opt for opt in aliases if len(opt) == 2]
- values = []
- pos = 0
- while pos < argcount:
- if args[pos] in aliases:
- if pos + 1 >= argcount:
- # ignore and let getopt report an error if there is no value
- break
- del args[pos]
- values.append(args.pop(pos))
- argcount -= 2
- elif args[pos][:2] in shortopts:
- # short option can have no following space, e.g. hg log -Rfoo
- values.append(args.pop(pos)[2:])
- argcount -= 1
- else:
- pos += 1
- return values
-
-def runcommand(lui, repo, cmd, fullargs, ui, options, d):
- # run pre-hook, and abort if it fails
- ret = hook.hook(lui, repo, "pre-%s" % cmd, False, args=" ".join(fullargs))
- if ret:
- return ret
- ret = _runcommand(ui, options, cmd, d)
- # run post-hook, passing command result
- hook.hook(lui, repo, "post-%s" % cmd, False, args=" ".join(fullargs),
- result = ret)
- return ret
-
-_loaded = set()
-def _dispatch(ui, args):
- # read --config before doing anything else
- # (e.g. to change trust settings for reading .hg/hgrc)
- _parseconfig(ui, _earlygetopt(['--config'], args))
-
- # check for cwd
- cwd = _earlygetopt(['--cwd'], args)
- if cwd:
- os.chdir(cwd[-1])
-
- # read the local repository .hgrc into a local ui object
- path = _findrepo(os.getcwd()) or ""
- if not path:
- lui = ui
- if path:
- try:
- lui = ui.copy()
- lui.readconfig(os.path.join(path, ".hg", "hgrc"))
- except IOError:
- pass
-
- # now we can expand paths, even ones in .hg/hgrc
- rpath = _earlygetopt(["-R", "--repository", "--repo"], args)
- if rpath:
- path = lui.expandpath(rpath[-1])
- lui = ui.copy()
- lui.readconfig(os.path.join(path, ".hg", "hgrc"))
-
- extensions.loadall(lui)
- for name, module in extensions.extensions():
- if name in _loaded:
- continue
-
- # setup extensions
- # TODO this should be generalized to scheme, where extensions can
- # redepend on other extensions. then we should toposort them, and
- # do initialization in correct order
- extsetup = getattr(module, 'extsetup', None)
- if extsetup:
- extsetup()
-
- cmdtable = getattr(module, 'cmdtable', {})
- overrides = [cmd for cmd in cmdtable if cmd in commands.table]
- if overrides:
- ui.warn(_("extension '%s' overrides commands: %s\n")
- % (name, " ".join(overrides)))
- commands.table.update(cmdtable)
- _loaded.add(name)
-
- addaliases(lui, commands.table)
-
- # check for fallback encoding
- fallback = lui.config('ui', 'fallbackencoding')
- if fallback:
- encoding.fallbackencoding = fallback
-
- fullargs = args
- cmd, func, args, options, cmdoptions = _parse(lui, args)
-
- if options["config"]:
- raise util.Abort(_("Option --config may not be abbreviated!"))
- if options["cwd"]:
- raise util.Abort(_("Option --cwd may not be abbreviated!"))
- if options["repository"]:
- raise util.Abort(_(
- "Option -R has to be separated from other options (e.g. not -qR) "
- "and --repository may only be abbreviated as --repo!"))
-
- if options["encoding"]:
- encoding.encoding = options["encoding"]
- if options["encodingmode"]:
- encoding.encodingmode = options["encodingmode"]
- if options["time"]:
- def get_times():
- t = os.times()
- if t[4] == 0.0: # Windows leaves this as zero, so use time.clock()
- t = (t[0], t[1], t[2], t[3], time.clock())
- return t
- s = get_times()
- def print_time():
- t = get_times()
- ui.warn(_("Time: real %.3f secs (user %.3f+%.3f sys %.3f+%.3f)\n") %
- (t[4]-s[4], t[0]-s[0], t[2]-s[2], t[1]-s[1], t[3]-s[3]))
- atexit.register(print_time)
-
- if options['verbose'] or options['debug'] or options['quiet']:
- ui.setconfig('ui', 'verbose', str(bool(options['verbose'])))
- ui.setconfig('ui', 'debug', str(bool(options['debug'])))
- ui.setconfig('ui', 'quiet', str(bool(options['quiet'])))
- if options['traceback']:
- ui.setconfig('ui', 'traceback', 'on')
- if options['noninteractive']:
- ui.setconfig('ui', 'interactive', 'off')
-
- if options['help']:
- return commands.help_(ui, cmd, options['version'])
- elif options['version']:
- return commands.version_(ui)
- elif not cmd:
- return commands.help_(ui, 'shortlist')
-
- repo = None
- if cmd not in commands.norepo.split():
- try:
- repo = hg.repository(ui, path=path)
- ui = repo.ui
- if not repo.local():
- raise util.Abort(_("repository '%s' is not local") % path)
- ui.setconfig("bundle", "mainreporoot", repo.root)
- except error.RepoError:
- if cmd not in commands.optionalrepo.split():
- if args and not path: # try to infer -R from command args
- repos = map(_findrepo, args)
- guess = repos[0]
- if guess and repos.count(guess) == len(repos):
- return _dispatch(ui, ['--repository', guess] + fullargs)
- if not path:
- raise error.RepoError(_("There is no Mercurial repository"
- " here (.hg not found)"))
- raise
- args.insert(0, repo)
- elif rpath:
- ui.warn("warning: --repository ignored\n")
-
- d = lambda: util.checksignature(func)(ui, *args, **cmdoptions)
- return runcommand(lui, repo, cmd, fullargs, ui, options, d)
-
-def _runcommand(ui, options, cmd, cmdfunc):
- def checkargs():
- try:
- return cmdfunc()
- except error.SignatureError:
- raise error.ParseError(cmd, _("invalid arguments"))
-
- if options['profile']:
- format = ui.config('profiling', 'format', default='text')
-
- if not format in ['text', 'kcachegrind']:
- ui.warn(_("unrecognized profiling format '%s'"
- " - Ignored\n") % format)
- format = 'text'
-
- output = ui.config('profiling', 'output')
-
- if output:
- path = os.path.expanduser(output)
- path = ui.expandpath(path)
- ostream = open(path, 'wb')
- else:
- ostream = sys.stderr
-
- try:
- from mercurial import lsprof
- except ImportError:
- raise util.Abort(_(
- 'lsprof not available - install from '
- 'http://codespeak.net/svn/user/arigo/hack/misc/lsprof/'))
- p = lsprof.Profiler()
- p.enable(subcalls=True)
- try:
- return checkargs()
- finally:
- p.disable()
-
- if format == 'kcachegrind':
- import lsprofcalltree
- calltree = lsprofcalltree.KCacheGrind(p)
- calltree.output(ostream)
- else:
- # format == 'text'
- stats = lsprof.Stats(p.getstats())
- stats.sort()
- stats.pprint(top=10, file=ostream, climit=5)
-
- if output:
- ostream.close()
- else:
- return checkargs()
diff --git a/sys/lib/python/mercurial/encoding.py b/sys/lib/python/mercurial/encoding.py
deleted file mode 100644
index b286cc865..000000000
--- a/sys/lib/python/mercurial/encoding.py
+++ /dev/null
@@ -1,75 +0,0 @@
-# encoding.py - character transcoding support for Mercurial
-#
-# Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-import error
-import sys, unicodedata, locale, os
-
-_encodingfixup = {'646': 'ascii', 'ANSI_X3.4-1968': 'ascii'}
-
-try:
- encoding = os.environ.get("HGENCODING")
- if sys.platform == 'darwin' and not encoding:
- # On darwin, getpreferredencoding ignores the locale environment and
- # always returns mac-roman. We override this if the environment is
- # not C (has been customized by the user).
- locale.setlocale(locale.LC_CTYPE, '')
- encoding = locale.getlocale()[1]
- if not encoding:
- encoding = locale.getpreferredencoding() or 'ascii'
- encoding = _encodingfixup.get(encoding, encoding)
-except locale.Error:
- encoding = 'ascii'
-encodingmode = os.environ.get("HGENCODINGMODE", "strict")
-fallbackencoding = 'ISO-8859-1'
-
-def tolocal(s):
- """
- Convert a string from internal UTF-8 to local encoding
-
- All internal strings should be UTF-8 but some repos before the
- implementation of locale support may contain latin1 or possibly
- other character sets. We attempt to decode everything strictly
- using UTF-8, then Latin-1, and failing that, we use UTF-8 and
- replace unknown characters.
- """
- for e in ('UTF-8', fallbackencoding):
- try:
- u = s.decode(e) # attempt strict decoding
- return u.encode(encoding, "replace")
- except LookupError, k:
- raise error.Abort("%s, please check your locale settings" % k)
- except UnicodeDecodeError:
- pass
- u = s.decode("utf-8", "replace") # last ditch
- return u.encode(encoding, "replace")
-
-def fromlocal(s):
- """
- Convert a string from the local character encoding to UTF-8
-
- We attempt to decode strings using the encoding mode set by
- HGENCODINGMODE, which defaults to 'strict'. In this mode, unknown
- characters will cause an error message. Other modes include
- 'replace', which replaces unknown characters with a special
- Unicode character, and 'ignore', which drops the character.
- """
- try:
- return s.decode(encoding, encodingmode).encode("utf-8")
- except UnicodeDecodeError, inst:
- sub = s[max(0, inst.start-10):inst.start+10]
- raise error.Abort("decoding near '%s': %s!" % (sub, inst))
- except LookupError, k:
- raise error.Abort("%s, please check your locale settings" % k)
-
-def colwidth(s):
- "Find the column width of a UTF-8 string for display"
- d = s.decode(encoding, 'replace')
- if hasattr(unicodedata, 'east_asian_width'):
- w = unicodedata.east_asian_width
- return sum([w(c) in 'WF' and 2 or 1 for c in d])
- return len(d)
-
diff --git a/sys/lib/python/mercurial/error.py b/sys/lib/python/mercurial/error.py
deleted file mode 100644
index f4ed42e33..000000000
--- a/sys/lib/python/mercurial/error.py
+++ /dev/null
@@ -1,72 +0,0 @@
-# error.py - Mercurial exceptions
-#
-# Copyright 2005-2008 Matt Mackall <mpm@selenic.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-"""Mercurial exceptions.
-
-This allows us to catch exceptions at higher levels without forcing
-imports.
-"""
-
-# Do not import anything here, please
-
-class RevlogError(Exception):
- pass
-
-class LookupError(RevlogError, KeyError):
- def __init__(self, name, index, message):
- self.name = name
- if isinstance(name, str) and len(name) == 20:
- from node import short
- name = short(name)
- RevlogError.__init__(self, '%s@%s: %s' % (index, name, message))
-
- def __str__(self):
- return RevlogError.__str__(self)
-
-class ParseError(Exception):
- """Exception raised on errors in parsing the command line."""
-
-class ConfigError(Exception):
- 'Exception raised when parsing config files'
-
-class RepoError(Exception):
- pass
-
-class CapabilityError(RepoError):
- pass
-
-class LockError(IOError):
- def __init__(self, errno, strerror, filename, desc):
- IOError.__init__(self, errno, strerror, filename)
- self.desc = desc
-
-class LockHeld(LockError):
- def __init__(self, errno, filename, desc, locker):
- LockError.__init__(self, errno, 'Lock held', filename, desc)
- self.locker = locker
-
-class LockUnavailable(LockError):
- pass
-
-class ResponseError(Exception):
- """Raised to print an error with part of output and exit."""
-
-class UnknownCommand(Exception):
- """Exception raised if command is not in the command table."""
-
-class AmbiguousCommand(Exception):
- """Exception raised if command shortcut matches more than one command."""
-
-# derived from KeyboardInterrupt to simplify some breakout code
-class SignalInterrupt(KeyboardInterrupt):
- """Exception raised on SIGTERM and SIGHUP."""
-
-class SignatureError(Exception):
- pass
-
-class Abort(Exception):
- """Raised if a command needs to print an error and exit."""
diff --git a/sys/lib/python/mercurial/extensions.py b/sys/lib/python/mercurial/extensions.py
deleted file mode 100644
index 30da1ebd1..000000000
--- a/sys/lib/python/mercurial/extensions.py
+++ /dev/null
@@ -1,178 +0,0 @@
-# extensions.py - extension handling for mercurial
-#
-# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-import imp, os
-import util, cmdutil, help
-from i18n import _, gettext
-
-_extensions = {}
-_order = []
-
-def extensions():
- for name in _order:
- module = _extensions[name]
- if module:
- yield name, module
-
-def find(name):
- '''return module with given extension name'''
- try:
- return _extensions[name]
- except KeyError:
- for k, v in _extensions.iteritems():
- if k.endswith('.' + name) or k.endswith('/' + name):
- return v
- raise KeyError(name)
-
-def loadpath(path, module_name):
- module_name = module_name.replace('.', '_')
- path = os.path.expanduser(path)
- if os.path.isdir(path):
- # module/__init__.py style
- d, f = os.path.split(path.rstrip('/'))
- fd, fpath, desc = imp.find_module(f, [d])
- return imp.load_module(module_name, fd, fpath, desc)
- else:
- return imp.load_source(module_name, path)
-
-def load(ui, name, path):
- if name.startswith('hgext.') or name.startswith('hgext/'):
- shortname = name[6:]
- else:
- shortname = name
- if shortname in _extensions:
- return
- _extensions[shortname] = None
- if path:
- # the module will be loaded in sys.modules
- # choose an unique name so that it doesn't
- # conflicts with other modules
- mod = loadpath(path, 'hgext.%s' % name)
- else:
- def importh(name):
- mod = __import__(name)
- components = name.split('.')
- for comp in components[1:]:
- mod = getattr(mod, comp)
- return mod
- try:
- mod = importh("hgext.%s" % name)
- except ImportError:
- mod = importh(name)
- _extensions[shortname] = mod
- _order.append(shortname)
-
- uisetup = getattr(mod, 'uisetup', None)
- if uisetup:
- uisetup(ui)
-
-def loadall(ui):
- result = ui.configitems("extensions")
- for (name, path) in result:
- if path:
- if path[0] == '!':
- continue
- try:
- load(ui, name, path)
- except KeyboardInterrupt:
- raise
- except Exception, inst:
- if path:
- ui.warn(_("*** failed to import extension %s from %s: %s\n")
- % (name, path, inst))
- else:
- ui.warn(_("*** failed to import extension %s: %s\n")
- % (name, inst))
- if ui.traceback():
- return 1
-
-def wrapcommand(table, command, wrapper):
- aliases, entry = cmdutil.findcmd(command, table)
- for alias, e in table.iteritems():
- if e is entry:
- key = alias
- break
-
- origfn = entry[0]
- def wrap(*args, **kwargs):
- return util.checksignature(wrapper)(
- util.checksignature(origfn), *args, **kwargs)
-
- wrap.__doc__ = getattr(origfn, '__doc__')
- wrap.__module__ = getattr(origfn, '__module__')
-
- newentry = list(entry)
- newentry[0] = wrap
- table[key] = tuple(newentry)
- return entry
-
-def wrapfunction(container, funcname, wrapper):
- def wrap(*args, **kwargs):
- return wrapper(origfn, *args, **kwargs)
-
- origfn = getattr(container, funcname)
- setattr(container, funcname, wrap)
- return origfn
-
-def disabled():
- '''find disabled extensions from hgext
- returns a dict of {name: desc}, and the max name length'''
-
- import hgext
- extpath = os.path.dirname(os.path.abspath(hgext.__file__))
-
- try: # might not be a filesystem path
- files = os.listdir(extpath)
- except OSError:
- return None, 0
-
- exts = {}
- maxlength = 0
- for e in files:
-
- if e.endswith('.py'):
- name = e.rsplit('.', 1)[0]
- path = os.path.join(extpath, e)
- else:
- name = e
- path = os.path.join(extpath, e, '__init__.py')
- if not os.path.exists(path):
- continue
-
- if name in exts or name in _order or name == '__init__':
- continue
-
- try:
- file = open(path)
- except IOError:
- continue
- else:
- doc = help.moduledoc(file)
- file.close()
-
- if doc: # extracting localized synopsis
- exts[name] = gettext(doc).splitlines()[0]
- else:
- exts[name] = _('(no help text available)')
-
- if len(name) > maxlength:
- maxlength = len(name)
-
- return exts, maxlength
-
-def enabled():
- '''return a dict of {name: desc} of extensions, and the max name length'''
- exts = {}
- maxlength = 0
- exthelps = []
- for ename, ext in extensions():
- doc = (gettext(ext.__doc__) or _('(no help text available)'))
- ename = ename.split('.')[-1]
- maxlength = max(len(ename), maxlength)
- exts[ename] = doc.splitlines()[0].strip()
-
- return exts, maxlength
diff --git a/sys/lib/python/mercurial/fancyopts.py b/sys/lib/python/mercurial/fancyopts.py
deleted file mode 100644
index 5acf0143d..000000000
--- a/sys/lib/python/mercurial/fancyopts.py
+++ /dev/null
@@ -1,110 +0,0 @@
-# fancyopts.py - better command line parsing
-#
-# Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-import getopt
-
-def gnugetopt(args, options, longoptions):
- """Parse options mostly like getopt.gnu_getopt.
-
- This is different from getopt.gnu_getopt in that an argument of - will
- become an argument of - instead of vanishing completely.
- """
- extraargs = []
- if '--' in args:
- stopindex = args.index('--')
- extraargs = args[stopindex+1:]
- args = args[:stopindex]
- opts, parseargs = getopt.getopt(args, options, longoptions)
- args = []
- while parseargs:
- arg = parseargs.pop(0)
- if arg and arg[0] == '-' and len(arg) > 1:
- parseargs.insert(0, arg)
- topts, newparseargs = getopt.getopt(parseargs, options, longoptions)
- opts = opts + topts
- parseargs = newparseargs
- else:
- args.append(arg)
- args.extend(extraargs)
- return opts, args
-
-
-def fancyopts(args, options, state, gnu=False):
- """
- read args, parse options, and store options in state
-
- each option is a tuple of:
-
- short option or ''
- long option
- default value
- description
-
- option types include:
-
- boolean or none - option sets variable in state to true
- string - parameter string is stored in state
- list - parameter string is added to a list
- integer - parameter strings is stored as int
- function - call function with parameter
-
- non-option args are returned
- """
- namelist = []
- shortlist = ''
- argmap = {}
- defmap = {}
-
- for short, name, default, comment in options:
- # convert opts to getopt format
- oname = name
- name = name.replace('-', '_')
-
- argmap['-' + short] = argmap['--' + oname] = name
- defmap[name] = default
-
- # copy defaults to state
- if isinstance(default, list):
- state[name] = default[:]
- elif hasattr(default, '__call__'):
- state[name] = None
- else:
- state[name] = default
-
- # does it take a parameter?
- if not (default is None or default is True or default is False):
- if short: short += ':'
- if oname: oname += '='
- if short:
- shortlist += short
- if name:
- namelist.append(oname)
-
- # parse arguments
- if gnu:
- parse = gnugetopt
- else:
- parse = getopt.getopt
- opts, args = parse(args, shortlist, namelist)
-
- # transfer result to state
- for opt, val in opts:
- name = argmap[opt]
- t = type(defmap[name])
- if t is type(fancyopts):
- state[name] = defmap[name](val)
- elif t is type(1):
- state[name] = int(val)
- elif t is type(''):
- state[name] = val
- elif t is type([]):
- state[name].append(val)
- elif t is type(None) or t is type(False):
- state[name] = True
-
- # return unparsed args
- return args
diff --git a/sys/lib/python/mercurial/filelog.py b/sys/lib/python/mercurial/filelog.py
deleted file mode 100644
index 01ca4f1b4..000000000
--- a/sys/lib/python/mercurial/filelog.py
+++ /dev/null
@@ -1,68 +0,0 @@
-# filelog.py - file history class for mercurial
-#
-# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-import revlog
-
-class filelog(revlog.revlog):
- def __init__(self, opener, path):
- revlog.revlog.__init__(self, opener,
- "/".join(("data", path + ".i")))
-
- def read(self, node):
- t = self.revision(node)
- if not t.startswith('\1\n'):
- return t
- s = t.index('\1\n', 2)
- return t[s+2:]
-
- def _readmeta(self, node):
- t = self.revision(node)
- if not t.startswith('\1\n'):
- return {}
- s = t.index('\1\n', 2)
- mt = t[2:s]
- m = {}
- for l in mt.splitlines():
- k, v = l.split(": ", 1)
- m[k] = v
- return m
-
- def add(self, text, meta, transaction, link, p1=None, p2=None):
- if meta or text.startswith('\1\n'):
- mt = ""
- if meta:
- mt = ["%s: %s\n" % (k, v) for k, v in meta.iteritems()]
- text = "\1\n%s\1\n%s" % ("".join(mt), text)
- return self.addrevision(text, transaction, link, p1, p2)
-
- def renamed(self, node):
- if self.parents(node)[0] != revlog.nullid:
- return False
- m = self._readmeta(node)
- if m and "copy" in m:
- return (m["copy"], revlog.bin(m["copyrev"]))
- return False
-
- def size(self, rev):
- """return the size of a given revision"""
-
- # for revisions with renames, we have to go the slow way
- node = self.node(rev)
- if self.renamed(node):
- return len(self.read(node))
-
- return revlog.revlog.size(self, rev)
-
- def cmp(self, node, text):
- """compare text with a given file revision"""
-
- # for renames, we have to go the slow way
- if self.renamed(node):
- t2 = self.read(node)
- return t2 != text
-
- return revlog.revlog.cmp(self, node, text)
diff --git a/sys/lib/python/mercurial/filemerge.py b/sys/lib/python/mercurial/filemerge.py
deleted file mode 100644
index 5431f8a96..000000000
--- a/sys/lib/python/mercurial/filemerge.py
+++ /dev/null
@@ -1,231 +0,0 @@
-# filemerge.py - file-level merge handling for Mercurial
-#
-# Copyright 2006, 2007, 2008 Matt Mackall <mpm@selenic.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-from node import short
-from i18n import _
-import util, simplemerge, match
-import os, tempfile, re, filecmp
-
-def _toolstr(ui, tool, part, default=""):
- return ui.config("merge-tools", tool + "." + part, default)
-
-def _toolbool(ui, tool, part, default=False):
- return ui.configbool("merge-tools", tool + "." + part, default)
-
-_internal = ['internal:' + s
- for s in 'fail local other merge prompt dump'.split()]
-
-def _findtool(ui, tool):
- if tool in _internal:
- return tool
- k = _toolstr(ui, tool, "regkey")
- if k:
- p = util.lookup_reg(k, _toolstr(ui, tool, "regname"))
- if p:
- p = util.find_exe(p + _toolstr(ui, tool, "regappend"))
- if p:
- return p
- return util.find_exe(_toolstr(ui, tool, "executable", tool))
-
-def _picktool(repo, ui, path, binary, symlink):
- def check(tool, pat, symlink, binary):
- tmsg = tool
- if pat:
- tmsg += " specified for " + pat
- if not _findtool(ui, tool):
- if pat: # explicitly requested tool deserves a warning
- ui.warn(_("couldn't find merge tool %s\n") % tmsg)
- else: # configured but non-existing tools are more silent
- ui.note(_("couldn't find merge tool %s\n") % tmsg)
- elif symlink and not _toolbool(ui, tool, "symlink"):
- ui.warn(_("tool %s can't handle symlinks\n") % tmsg)
- elif binary and not _toolbool(ui, tool, "binary"):
- ui.warn(_("tool %s can't handle binary\n") % tmsg)
- elif not util.gui() and _toolbool(ui, tool, "gui"):
- ui.warn(_("tool %s requires a GUI\n") % tmsg)
- else:
- return True
- return False
-
- # HGMERGE takes precedence
- hgmerge = os.environ.get("HGMERGE")
- if hgmerge:
- return (hgmerge, hgmerge)
-
- # then patterns
- for pat, tool in ui.configitems("merge-patterns"):
- mf = match.match(repo.root, '', [pat])
- if mf(path) and check(tool, pat, symlink, False):
- toolpath = _findtool(ui, tool)
- return (tool, '"' + toolpath + '"')
-
- # then merge tools
- tools = {}
- for k,v in ui.configitems("merge-tools"):
- t = k.split('.')[0]
- if t not in tools:
- tools[t] = int(_toolstr(ui, t, "priority", "0"))
- names = tools.keys()
- tools = sorted([(-p,t) for t,p in tools.items()])
- uimerge = ui.config("ui", "merge")
- if uimerge:
- if uimerge not in names:
- return (uimerge, uimerge)
- tools.insert(0, (None, uimerge)) # highest priority
- tools.append((None, "hgmerge")) # the old default, if found
- for p,t in tools:
- if check(t, None, symlink, binary):
- toolpath = _findtool(ui, t)
- return (t, '"' + toolpath + '"')
- # internal merge as last resort
- return (not (symlink or binary) and "internal:merge" or None, None)
-
-def _eoltype(data):
- "Guess the EOL type of a file"
- if '\0' in data: # binary
- return None
- if '\r\n' in data: # Windows
- return '\r\n'
- if '\r' in data: # Old Mac
- return '\r'
- if '\n' in data: # UNIX
- return '\n'
- return None # unknown
-
-def _matcheol(file, origfile):
- "Convert EOL markers in a file to match origfile"
- tostyle = _eoltype(open(origfile, "rb").read())
- if tostyle:
- data = open(file, "rb").read()
- style = _eoltype(data)
- if style:
- newdata = data.replace(style, tostyle)
- if newdata != data:
- open(file, "wb").write(newdata)
-
-def filemerge(repo, mynode, orig, fcd, fco, fca):
- """perform a 3-way merge in the working directory
-
- mynode = parent node before merge
- orig = original local filename before merge
- fco = other file context
- fca = ancestor file context
- fcd = local file context for current/destination file
- """
-
- def temp(prefix, ctx):
- pre = "%s~%s." % (os.path.basename(ctx.path()), prefix)
- (fd, name) = tempfile.mkstemp(prefix=pre)
- data = repo.wwritedata(ctx.path(), ctx.data())
- f = os.fdopen(fd, "wb")
- f.write(data)
- f.close()
- return name
-
- def isbin(ctx):
- try:
- return util.binary(ctx.data())
- except IOError:
- return False
-
- if not fco.cmp(fcd.data()): # files identical?
- return None
-
- ui = repo.ui
- fd = fcd.path()
- binary = isbin(fcd) or isbin(fco) or isbin(fca)
- symlink = 'l' in fcd.flags() + fco.flags()
- tool, toolpath = _picktool(repo, ui, fd, binary, symlink)
- ui.debug(_("picked tool '%s' for %s (binary %s symlink %s)\n") %
- (tool, fd, binary, symlink))
-
- if not tool or tool == 'internal:prompt':
- tool = "internal:local"
- if ui.promptchoice(_(" no tool found to merge %s\n"
- "keep (l)ocal or take (o)ther?") % fd,
- (_("&Local"), _("&Other")), 0):
- tool = "internal:other"
- if tool == "internal:local":
- return 0
- if tool == "internal:other":
- repo.wwrite(fd, fco.data(), fco.flags())
- return 0
- if tool == "internal:fail":
- return 1
-
- # do the actual merge
- a = repo.wjoin(fd)
- b = temp("base", fca)
- c = temp("other", fco)
- out = ""
- back = a + ".orig"
- util.copyfile(a, back)
-
- if orig != fco.path():
- ui.status(_("merging %s and %s to %s\n") % (orig, fco.path(), fd))
- else:
- ui.status(_("merging %s\n") % fd)
-
- ui.debug(_("my %s other %s ancestor %s\n") % (fcd, fco, fca))
-
- # do we attempt to simplemerge first?
- if _toolbool(ui, tool, "premerge", not (binary or symlink)):
- r = simplemerge.simplemerge(ui, a, b, c, quiet=True)
- if not r:
- ui.debug(_(" premerge successful\n"))
- os.unlink(back)
- os.unlink(b)
- os.unlink(c)
- return 0
- util.copyfile(back, a) # restore from backup and try again
-
- env = dict(HG_FILE=fd,
- HG_MY_NODE=short(mynode),
- HG_OTHER_NODE=str(fco.changectx()),
- HG_MY_ISLINK='l' in fcd.flags(),
- HG_OTHER_ISLINK='l' in fco.flags(),
- HG_BASE_ISLINK='l' in fca.flags())
-
- if tool == "internal:merge":
- r = simplemerge.simplemerge(ui, a, b, c, label=['local', 'other'])
- elif tool == 'internal:dump':
- a = repo.wjoin(fd)
- util.copyfile(a, a + ".local")
- repo.wwrite(fd + ".other", fco.data(), fco.flags())
- repo.wwrite(fd + ".base", fca.data(), fca.flags())
- return 1 # unresolved
- else:
- args = _toolstr(ui, tool, "args", '$local $base $other')
- if "$output" in args:
- out, a = a, back # read input from backup, write to original
- replace = dict(local=a, base=b, other=c, output=out)
- args = re.sub("\$(local|base|other|output)",
- lambda x: '"%s"' % replace[x.group()[1:]], args)
- r = util.system(toolpath + ' ' + args, cwd=repo.root, environ=env)
-
- if not r and _toolbool(ui, tool, "checkconflicts"):
- if re.match("^(<<<<<<< .*|=======|>>>>>>> .*)$", fcd.data()):
- r = 1
-
- if not r and _toolbool(ui, tool, "checkchanged"):
- if filecmp.cmp(repo.wjoin(fd), back):
- if ui.promptchoice(_(" output file %s appears unchanged\n"
- "was merge successful (yn)?") % fd,
- (_("&Yes"), _("&No")), 1):
- r = 1
-
- if _toolbool(ui, tool, "fixeol"):
- _matcheol(repo.wjoin(fd), back)
-
- if r:
- ui.warn(_("merging %s failed!\n") % fd)
- else:
- os.unlink(back)
-
- os.unlink(b)
- os.unlink(c)
- return r
diff --git a/sys/lib/python/mercurial/graphmod.py b/sys/lib/python/mercurial/graphmod.py
deleted file mode 100644
index e8f9573c0..000000000
--- a/sys/lib/python/mercurial/graphmod.py
+++ /dev/null
@@ -1,119 +0,0 @@
-# Revision graph generator for Mercurial
-#
-# Copyright 2008 Dirkjan Ochtman <dirkjan@ochtman.nl>
-# Copyright 2007 Joel Rosdahl <joel@rosdahl.net>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-"""supports walking the history as DAGs suitable for graphical output
-
-The most basic format we use is that of::
-
- (id, type, data, [parentids])
-
-The node and parent ids are arbitrary integers which identify a node in the
-context of the graph returned. Type is a constant specifying the node type.
-Data depends on type.
-"""
-
-from mercurial.node import nullrev
-
-CHANGESET = 'C'
-
-def revisions(repo, start, stop):
- """cset DAG generator yielding (id, CHANGESET, ctx, [parentids]) tuples
-
- This generator function walks through the revision history from revision
- start to revision stop (which must be less than or equal to start). It
- returns a tuple for each node. The node and parent ids are arbitrary
- integers which identify a node in the context of the graph returned.
- """
- cur = start
- while cur >= stop:
- ctx = repo[cur]
- parents = [p.rev() for p in ctx.parents() if p.rev() != nullrev]
- yield (cur, CHANGESET, ctx, sorted(parents))
- cur -= 1
-
-def filerevs(repo, path, start, stop):
- """file cset DAG generator yielding (id, CHANGESET, ctx, [parentids]) tuples
-
- This generator function walks through the revision history of a single
- file from revision start down to revision stop.
- """
- filerev = len(repo.file(path)) - 1
- while filerev >= 0:
- fctx = repo.filectx(path, fileid=filerev)
- parents = [f.linkrev() for f in fctx.parents() if f.path() == path]
- rev = fctx.rev()
- if rev <= start:
- yield (rev, CHANGESET, fctx, sorted(parents))
- if rev <= stop:
- break
- filerev -= 1
-
-def nodes(repo, nodes):
- """cset DAG generator yielding (id, CHANGESET, ctx, [parentids]) tuples
-
- This generator function walks the given nodes. It only returns parents
- that are in nodes, too.
- """
- include = set(nodes)
- for node in nodes:
- ctx = repo[node]
- parents = [p.rev() for p in ctx.parents() if p.node() in include]
- yield (ctx.rev(), CHANGESET, ctx, sorted(parents))
-
-def colored(dag):
- """annotates a DAG with colored edge information
-
- For each DAG node this function emits tuples::
-
- (id, type, data, (col, color), [(col, nextcol, color)])
-
- with the following new elements:
-
- - Tuple (col, color) with column and color index for the current node
- - A list of tuples indicating the edges between the current node and its
- parents.
- """
- seen = []
- colors = {}
- newcolor = 1
- for (cur, type, data, parents) in dag:
-
- # Compute seen and next
- if cur not in seen:
- seen.append(cur) # new head
- colors[cur] = newcolor
- newcolor += 1
-
- col = seen.index(cur)
- color = colors.pop(cur)
- next = seen[:]
-
- # Add parents to next
- addparents = [p for p in parents if p not in next]
- next[col:col + 1] = addparents
-
- # Set colors for the parents
- for i, p in enumerate(addparents):
- if not i:
- colors[p] = color
- else:
- colors[p] = newcolor
- newcolor += 1
-
- # Add edges to the graph
- edges = []
- for ecol, eid in enumerate(seen):
- if eid in next:
- edges.append((ecol, next.index(eid), colors[eid]))
- elif eid == cur:
- for p in parents:
- edges.append((ecol, next.index(p), colors[p]))
-
- # Yield and move on
- yield (cur, type, data, (col, color), edges)
- seen = next
diff --git a/sys/lib/python/mercurial/hbisect.py b/sys/lib/python/mercurial/hbisect.py
deleted file mode 100644
index 5f6389a20..000000000
--- a/sys/lib/python/mercurial/hbisect.py
+++ /dev/null
@@ -1,145 +0,0 @@
-# changelog bisection for mercurial
-#
-# Copyright 2007 Matt Mackall
-# Copyright 2005, 2006 Benoit Boissinot <benoit.boissinot@ens-lyon.org>
-#
-# Inspired by git bisect, extension skeleton taken from mq.py.
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-import os
-from i18n import _
-from node import short, hex
-import util
-
-def bisect(changelog, state):
- """find the next node (if any) for testing during a bisect search.
- returns a (nodes, number, good) tuple.
-
- 'nodes' is the final result of the bisect if 'number' is 0.
- Otherwise 'number' indicates the remaining possible candidates for
- the search and 'nodes' contains the next bisect target.
- 'good' is True if bisect is searching for a first good changeset, False
- if searching for a first bad one.
- """
-
- clparents = changelog.parentrevs
- skip = set([changelog.rev(n) for n in state['skip']])
-
- def buildancestors(bad, good):
- # only the earliest bad revision matters
- badrev = min([changelog.rev(n) for n in bad])
- goodrevs = [changelog.rev(n) for n in good]
- # build ancestors array
- ancestors = [[]] * (len(changelog) + 1) # an extra for [-1]
-
- # clear good revs from array
- for node in goodrevs:
- ancestors[node] = None
- for rev in xrange(len(changelog), -1, -1):
- if ancestors[rev] is None:
- for prev in clparents(rev):
- ancestors[prev] = None
-
- if ancestors[badrev] is None:
- return badrev, None
- return badrev, ancestors
-
- good = 0
- badrev, ancestors = buildancestors(state['bad'], state['good'])
- if not ancestors: # looking for bad to good transition?
- good = 1
- badrev, ancestors = buildancestors(state['good'], state['bad'])
- bad = changelog.node(badrev)
- if not ancestors: # now we're confused
- raise util.Abort(_("Inconsistent state, %s:%s is good and bad")
- % (badrev, short(bad)))
-
- # build children dict
- children = {}
- visit = [badrev]
- candidates = []
- while visit:
- rev = visit.pop(0)
- if ancestors[rev] == []:
- candidates.append(rev)
- for prev in clparents(rev):
- if prev != -1:
- if prev in children:
- children[prev].append(rev)
- else:
- children[prev] = [rev]
- visit.append(prev)
-
- candidates.sort()
- # have we narrowed it down to one entry?
- # or have all other possible candidates besides 'bad' have been skipped?
- tot = len(candidates)
- unskipped = [c for c in candidates if (c not in skip) and (c != badrev)]
- if tot == 1 or not unskipped:
- return ([changelog.node(rev) for rev in candidates], 0, good)
- perfect = tot // 2
-
- # find the best node to test
- best_rev = None
- best_len = -1
- poison = set()
- for rev in candidates:
- if rev in poison:
- # poison children
- poison.update(children.get(rev, []))
- continue
-
- a = ancestors[rev] or [rev]
- ancestors[rev] = None
-
- x = len(a) # number of ancestors
- y = tot - x # number of non-ancestors
- value = min(x, y) # how good is this test?
- if value > best_len and rev not in skip:
- best_len = value
- best_rev = rev
- if value == perfect: # found a perfect candidate? quit early
- break
-
- if y < perfect and rev not in skip: # all downhill from here?
- # poison children
- poison.update(children.get(rev, []))
- continue
-
- for c in children.get(rev, []):
- if ancestors[c]:
- ancestors[c] = list(set(ancestors[c] + a))
- else:
- ancestors[c] = a + [c]
-
- assert best_rev is not None
- best_node = changelog.node(best_rev)
-
- return ([best_node], tot, good)
-
-
-def load_state(repo):
- state = {'good': [], 'bad': [], 'skip': []}
- if os.path.exists(repo.join("bisect.state")):
- for l in repo.opener("bisect.state"):
- kind, node = l[:-1].split()
- node = repo.lookup(node)
- if kind not in state:
- raise util.Abort(_("unknown bisect kind %s") % kind)
- state[kind].append(node)
- return state
-
-
-def save_state(repo, state):
- f = repo.opener("bisect.state", "w", atomictemp=True)
- wlock = repo.wlock()
- try:
- for kind in state:
- for node in state[kind]:
- f.write("%s %s\n" % (kind, hex(node)))
- f.rename()
- finally:
- wlock.release()
-
diff --git a/sys/lib/python/mercurial/help.py b/sys/lib/python/mercurial/help.py
deleted file mode 100644
index a553ac15e..000000000
--- a/sys/lib/python/mercurial/help.py
+++ /dev/null
@@ -1,527 +0,0 @@
-# help.py - help data for mercurial
-#
-# Copyright 2006 Matt Mackall <mpm@selenic.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-from i18n import _
-import extensions, util
-
-
-def moduledoc(file):
- '''return the top-level python documentation for the given file
-
- Loosely inspired by pydoc.source_synopsis(), but rewritten to handle \'''
- as well as """ and to return the whole text instead of just the synopsis'''
- result = []
-
- line = file.readline()
- while line[:1] == '#' or not line.strip():
- line = file.readline()
- if not line: break
-
- start = line[:3]
- if start == '"""' or start == "'''":
- line = line[3:]
- while line:
- if line.rstrip().endswith(start):
- line = line.split(start)[0]
- if line:
- result.append(line)
- break
- elif not line:
- return None # unmatched delimiter
- result.append(line)
- line = file.readline()
- else:
- return None
-
- return ''.join(result)
-
-def listexts(header, exts, maxlength):
- '''return a text listing of the given extensions'''
- if not exts:
- return ''
- result = '\n%s\n\n' % header
- for name, desc in sorted(exts.iteritems()):
- result += ' %-*s %s\n' % (maxlength + 2, ':%s:' % name, desc)
- return result
-
-def extshelp():
- doc = _(r'''
- Mercurial has the ability to add new features through the use of
- extensions. Extensions may add new commands, add options to
- existing commands, change the default behavior of commands, or
- implement hooks.
-
- Extensions are not loaded by default for a variety of reasons:
- they can increase startup overhead; they may be meant for advanced
- usage only; they may provide potentially dangerous abilities (such
- as letting you destroy or modify history); they might not be ready
- for prime time; or they may alter some usual behaviors of stock
- Mercurial. It is thus up to the user to activate extensions as
- needed.
-
- To enable the "foo" extension, either shipped with Mercurial or in
- the Python search path, create an entry for it in your hgrc, like
- this::
-
- [extensions]
- foo =
-
- You may also specify the full path to an extension::
-
- [extensions]
- myfeature = ~/.hgext/myfeature.py
-
- To explicitly disable an extension enabled in an hgrc of broader
- scope, prepend its path with !::
-
- [extensions]
- # disabling extension bar residing in /path/to/extension/bar.py
- hgext.bar = !/path/to/extension/bar.py
- # ditto, but no path was supplied for extension baz
- hgext.baz = !
- ''')
-
- exts, maxlength = extensions.enabled()
- doc += listexts(_('enabled extensions:'), exts, maxlength)
-
- exts, maxlength = extensions.disabled()
- doc += listexts(_('disabled extensions:'), exts, maxlength)
-
- return doc
-
-helptable = (
- (["dates"], _("Date Formats"),
- _(r'''
- Some commands allow the user to specify a date, e.g.:
-
- - backout, commit, import, tag: Specify the commit date.
- - log, revert, update: Select revision(s) by date.
-
- Many date formats are valid. Here are some examples::
-
- "Wed Dec 6 13:18:29 2006" (local timezone assumed)
- "Dec 6 13:18 -0600" (year assumed, time offset provided)
- "Dec 6 13:18 UTC" (UTC and GMT are aliases for +0000)
- "Dec 6" (midnight)
- "13:18" (today assumed)
- "3:39" (3:39AM assumed)
- "3:39pm" (15:39)
- "2006-12-06 13:18:29" (ISO 8601 format)
- "2006-12-6 13:18"
- "2006-12-6"
- "12-6"
- "12/6"
- "12/6/6" (Dec 6 2006)
-
- Lastly, there is Mercurial's internal format::
-
- "1165432709 0" (Wed Dec 6 13:18:29 2006 UTC)
-
- This is the internal representation format for dates. unixtime is
- the number of seconds since the epoch (1970-01-01 00:00 UTC).
- offset is the offset of the local timezone, in seconds west of UTC
- (negative if the timezone is east of UTC).
-
- The log command also accepts date ranges::
-
- "<{datetime}" - at or before a given date/time
- ">{datetime}" - on or after a given date/time
- "{datetime} to {datetime}" - a date range, inclusive
- "-{days}" - within a given number of days of today
- ''')),
-
- (["patterns"], _("File Name Patterns"),
- _(r'''
- Mercurial accepts several notations for identifying one or more
- files at a time.
-
- By default, Mercurial treats filenames as shell-style extended
- glob patterns.
-
- Alternate pattern notations must be specified explicitly.
-
- To use a plain path name without any pattern matching, start it
- with "path:". These path names must completely match starting at
- the current repository root.
-
- To use an extended glob, start a name with "glob:". Globs are
- rooted at the current directory; a glob such as "``*.c``" will
- only match files in the current directory ending with ".c".
-
- The supported glob syntax extensions are "``**``" to match any
- string across path separators and "{a,b}" to mean "a or b".
-
- To use a Perl/Python regular expression, start a name with "re:".
- Regexp pattern matching is anchored at the root of the repository.
-
- Plain examples::
-
- path:foo/bar a name bar in a directory named foo in the root
- of the repository
- path:path:name a file or directory named "path:name"
-
- Glob examples::
-
- glob:*.c any name ending in ".c" in the current directory
- *.c any name ending in ".c" in the current directory
- **.c any name ending in ".c" in any subdirectory of the
- current directory including itself.
- foo/*.c any name ending in ".c" in the directory foo
- foo/**.c any name ending in ".c" in any subdirectory of foo
- including itself.
-
- Regexp examples::
-
- re:.*\.c$ any name ending in ".c", anywhere in the repository
-
- ''')),
-
- (['environment', 'env'], _('Environment Variables'),
- _(r'''
-HG
- Path to the 'hg' executable, automatically passed when running
- hooks, extensions or external tools. If unset or empty, this is
- the hg executable's name if it's frozen, or an executable named
- 'hg' (with %PATHEXT% [defaulting to COM/EXE/BAT/CMD] extensions on
- Windows) is searched.
-
-HGEDITOR
- This is the name of the editor to run when committing. See EDITOR.
-
- (deprecated, use .hgrc)
-
-HGENCODING
- This overrides the default locale setting detected by Mercurial.
- This setting is used to convert data including usernames,
- changeset descriptions, tag names, and branches. This setting can
- be overridden with the --encoding command-line option.
-
-HGENCODINGMODE
- This sets Mercurial's behavior for handling unknown characters
- while transcoding user input. The default is "strict", which
- causes Mercurial to abort if it can't map a character. Other
- settings include "replace", which replaces unknown characters, and
- "ignore", which drops them. This setting can be overridden with
- the --encodingmode command-line option.
-
-HGMERGE
- An executable to use for resolving merge conflicts. The program
- will be executed with three arguments: local file, remote file,
- ancestor file.
-
- (deprecated, use .hgrc)
-
-HGRCPATH
- A list of files or directories to search for hgrc files. Item
- separator is ":" on Unix, ";" on Windows. If HGRCPATH is not set,
- platform default search path is used. If empty, only the .hg/hgrc
- from the current repository is read.
-
- For each element in HGRCPATH:
-
- - if it's a directory, all files ending with .rc are added
- - otherwise, the file itself will be added
-
-HGUSER
- This is the string used as the author of a commit. If not set,
- available values will be considered in this order:
-
- - HGUSER (deprecated)
- - hgrc files from the HGRCPATH
- - EMAIL
- - interactive prompt
- - LOGNAME (with '@hostname' appended)
-
- (deprecated, use .hgrc)
-
-EMAIL
- May be used as the author of a commit; see HGUSER.
-
-LOGNAME
- May be used as the author of a commit; see HGUSER.
-
-VISUAL
- This is the name of the editor to use when committing. See EDITOR.
-
-EDITOR
- Sometimes Mercurial needs to open a text file in an editor for a
- user to modify, for example when writing commit messages. The
- editor it uses is determined by looking at the environment
- variables HGEDITOR, VISUAL and EDITOR, in that order. The first
- non-empty one is chosen. If all of them are empty, the editor
- defaults to 'vi'.
-
-PYTHONPATH
- This is used by Python to find imported modules and may need to be
- set appropriately if this Mercurial is not installed system-wide.
- ''')),
-
- (['revs', 'revisions'], _('Specifying Single Revisions'),
- _(r'''
- Mercurial supports several ways to specify individual revisions.
-
- A plain integer is treated as a revision number. Negative integers
- are treated as sequential offsets from the tip, with -1 denoting
- the tip, -2 denoting the revision prior to the tip, and so forth.
-
- A 40-digit hexadecimal string is treated as a unique revision
- identifier.
-
- A hexadecimal string less than 40 characters long is treated as a
- unique revision identifier and is referred to as a short-form
- identifier. A short-form identifier is only valid if it is the
- prefix of exactly one full-length identifier.
-
- Any other string is treated as a tag or branch name. A tag name is
- a symbolic name associated with a revision identifier. A branch
- name denotes the tipmost revision of that branch. Tag and branch
- names must not contain the ":" character.
-
- The reserved name "tip" is a special tag that always identifies
- the most recent revision.
-
- The reserved name "null" indicates the null revision. This is the
- revision of an empty repository, and the parent of revision 0.
-
- The reserved name "." indicates the working directory parent. If
- no working directory is checked out, it is equivalent to null. If
- an uncommitted merge is in progress, "." is the revision of the
- first parent.
- ''')),
-
- (['mrevs', 'multirevs'], _('Specifying Multiple Revisions'),
- _(r'''
- When Mercurial accepts more than one revision, they may be
- specified individually, or provided as a topologically continuous
- range, separated by the ":" character.
-
- The syntax of range notation is [BEGIN]:[END], where BEGIN and END
- are revision identifiers. Both BEGIN and END are optional. If
- BEGIN is not specified, it defaults to revision number 0. If END
- is not specified, it defaults to the tip. The range ":" thus means
- "all revisions".
-
- If BEGIN is greater than END, revisions are treated in reverse
- order.
-
- A range acts as a closed interval. This means that a range of 3:5
- gives 3, 4 and 5. Similarly, a range of 9:6 gives 9, 8, 7, and 6.
- ''')),
-
- (['diffs'], _('Diff Formats'),
- _(r'''
- Mercurial's default format for showing changes between two
- versions of a file is compatible with the unified format of GNU
- diff, which can be used by GNU patch and many other standard
- tools.
-
- While this standard format is often enough, it does not encode the
- following information:
-
- - executable status and other permission bits
- - copy or rename information
- - changes in binary files
- - creation or deletion of empty files
-
- Mercurial also supports the extended diff format from the git VCS
- which addresses these limitations. The git diff format is not
- produced by default because a few widespread tools still do not
- understand this format.
-
- This means that when generating diffs from a Mercurial repository
- (e.g. with "hg export"), you should be careful about things like
- file copies and renames or other things mentioned above, because
- when applying a standard diff to a different repository, this
- extra information is lost. Mercurial's internal operations (like
- push and pull) are not affected by this, because they use an
- internal binary format for communicating changes.
-
- To make Mercurial produce the git extended diff format, use the
- --git option available for many commands, or set 'git = True' in
- the [diff] section of your hgrc. You do not need to set this
- option when importing diffs in this format or using them in the mq
- extension.
- ''')),
- (['templating', 'templates'], _('Template Usage'),
- _(r'''
- Mercurial allows you to customize output of commands through
- templates. You can either pass in a template from the command
- line, via the --template option, or select an existing
- template-style (--style).
-
- You can customize output for any "log-like" command: log,
- outgoing, incoming, tip, parents, heads and glog.
-
- Three styles are packaged with Mercurial: default (the style used
- when no explicit preference is passed), compact and changelog.
- Usage::
-
- $ hg log -r1 --style changelog
-
- A template is a piece of text, with markup to invoke variable
- expansion::
-
- $ hg log -r1 --template "{node}\n"
- b56ce7b07c52de7d5fd79fb89701ea538af65746
-
- Strings in curly braces are called keywords. The availability of
- keywords depends on the exact context of the templater. These
- keywords are usually available for templating a log-like command:
-
- :author: String. The unmodified author of the changeset.
- :branches: String. The name of the branch on which the changeset
- was committed. Will be empty if the branch name was
- default.
- :date: Date information. The date when the changeset was
- committed.
- :desc: String. The text of the changeset description.
- :diffstat: String. Statistics of changes with the following
- format: "modified files: +added/-removed lines"
- :files: List of strings. All files modified, added, or removed
- by this changeset.
- :file_adds: List of strings. Files added by this changeset.
- :file_mods: List of strings. Files modified by this changeset.
- :file_dels: List of strings. Files removed by this changeset.
- :node: String. The changeset identification hash, as a
- 40-character hexadecimal string.
- :parents: List of strings. The parents of the changeset.
- :rev: Integer. The repository-local changeset revision
- number.
- :tags: List of strings. Any tags associated with the
- changeset.
-
- The "date" keyword does not produce human-readable output. If you
- want to use a date in your output, you can use a filter to process
- it. Filters are functions which return a string based on the input
- variable. You can also use a chain of filters to get the desired
- output::
-
- $ hg tip --template "{date|isodate}\n"
- 2008-08-21 18:22 +0000
-
- List of filters:
-
- :addbreaks: Any text. Add an XHTML "<br />" tag before the end of
- every line except the last.
- :age: Date. Returns a human-readable date/time difference
- between the given date/time and the current
- date/time.
- :basename: Any text. Treats the text as a path, and returns the
- last component of the path after splitting by the
- path separator (ignoring trailing separators). For
- example, "foo/bar/baz" becomes "baz" and "foo/bar//"
- becomes "bar".
- :stripdir: Treat the text as path and strip a directory level,
- if possible. For example, "foo" and "foo/bar" becomes
- "foo".
- :date: Date. Returns a date in a Unix date format, including
- the timezone: "Mon Sep 04 15:13:13 2006 0700".
- :domain: Any text. Finds the first string that looks like an
- email address, and extracts just the domain
- component. Example: 'User <user@example.com>' becomes
- 'example.com'.
- :email: Any text. Extracts the first string that looks like
- an email address. Example: 'User <user@example.com>'
- becomes 'user@example.com'.
- :escape: Any text. Replaces the special XML/XHTML characters
- "&", "<" and ">" with XML entities.
- :fill68: Any text. Wraps the text to fit in 68 columns.
- :fill76: Any text. Wraps the text to fit in 76 columns.
- :firstline: Any text. Returns the first line of text.
- :nonempty: Any text. Returns '(none)' if the string is empty.
- :hgdate: Date. Returns the date as a pair of numbers:
- "1157407993 25200" (Unix timestamp, timezone offset).
- :isodate: Date. Returns the date in ISO 8601 format.
- :localdate: Date. Converts a date to local date.
- :obfuscate: Any text. Returns the input text rendered as a
- sequence of XML entities.
- :person: Any text. Returns the text before an email address.
- :rfc822date: Date. Returns a date using the same format used in
- email headers.
- :short: Changeset hash. Returns the short form of a changeset
- hash, i.e. a 12-byte hexadecimal string.
- :shortdate: Date. Returns a date like "2006-09-18".
- :strip: Any text. Strips all leading and trailing whitespace.
- :tabindent: Any text. Returns the text, with every line except
- the first starting with a tab character.
- :urlescape: Any text. Escapes all "special" characters. For
- example, "foo bar" becomes "foo%20bar".
- :user: Any text. Returns the user portion of an email
- address.
- ''')),
-
- (['urls'], _('URL Paths'),
- _(r'''
- Valid URLs are of the form::
-
- local/filesystem/path[#revision]
- file://local/filesystem/path[#revision]
- http://[user[:pass]@]host[:port]/[path][#revision]
- https://[user[:pass]@]host[:port]/[path][#revision]
- ssh://[user[:pass]@]host[:port]/[path][#revision]
-
- Paths in the local filesystem can either point to Mercurial
- repositories or to bundle files (as created by 'hg bundle' or 'hg
- incoming --bundle').
-
- An optional identifier after # indicates a particular branch, tag,
- or changeset to use from the remote repository. See also 'hg help
- revisions'.
-
- Some features, such as pushing to http:// and https:// URLs are
- only possible if the feature is explicitly enabled on the remote
- Mercurial server.
-
- Some notes about using SSH with Mercurial:
-
- - SSH requires an accessible shell account on the destination
- machine and a copy of hg in the remote path or specified with as
- remotecmd.
- - path is relative to the remote user's home directory by default.
- Use an extra slash at the start of a path to specify an absolute
- path::
-
- ssh://example.com//tmp/repository
-
- - Mercurial doesn't use its own compression via SSH; the right
- thing to do is to configure it in your ~/.ssh/config, e.g.::
-
- Host *.mylocalnetwork.example.com
- Compression no
- Host *
- Compression yes
-
- Alternatively specify "ssh -C" as your ssh command in your hgrc
- or with the --ssh command line option.
-
- These URLs can all be stored in your hgrc with path aliases under
- the [paths] section like so::
-
- [paths]
- alias1 = URL1
- alias2 = URL2
- ...
-
- You can then use the alias for any command that uses a URL (for
- example 'hg pull alias1' would pull from the 'alias1' path).
-
- Two path aliases are special because they are used as defaults
- when you do not provide the URL to a command:
-
- default:
- When you create a repository with hg clone, the clone command
- saves the location of the source repository as the new
- repository's 'default' path. This is then used when you omit
- path from push- and pull-like commands (including incoming and
- outgoing).
-
- default-push:
- The push command will look for a path named 'default-push', and
- prefer it over 'default' if both are defined.
- ''')),
- (["extensions"], _("Using additional features"), extshelp),
-)
diff --git a/sys/lib/python/mercurial/hg.py b/sys/lib/python/mercurial/hg.py
deleted file mode 100644
index 504bc1256..000000000
--- a/sys/lib/python/mercurial/hg.py
+++ /dev/null
@@ -1,367 +0,0 @@
-# hg.py - repository classes for mercurial
-#
-# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
-# Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-from i18n import _
-from lock import release
-import localrepo, bundlerepo, httprepo, sshrepo, statichttprepo
-import lock, util, extensions, error
-import merge as _merge
-import verify as _verify
-import errno, os, shutil
-
-def _local(path):
- return (os.path.isfile(util.drop_scheme('file', path)) and
- bundlerepo or localrepo)
-
-def parseurl(url, revs=[]):
- '''parse url#branch, returning url, branch + revs'''
-
- if '#' not in url:
- return url, (revs or None), revs and revs[-1] or None
-
- url, branch = url.split('#', 1)
- checkout = revs and revs[-1] or branch
- return url, (revs or []) + [branch], checkout
-
-schemes = {
- 'bundle': bundlerepo,
- 'file': _local,
- 'http': httprepo,
- 'https': httprepo,
- 'ssh': sshrepo,
- 'static-http': statichttprepo,
-}
-
-def _lookup(path):
- scheme = 'file'
- if path:
- c = path.find(':')
- if c > 0:
- scheme = path[:c]
- thing = schemes.get(scheme) or schemes['file']
- try:
- return thing(path)
- except TypeError:
- return thing
-
-def islocal(repo):
- '''return true if repo or path is local'''
- if isinstance(repo, str):
- try:
- return _lookup(repo).islocal(repo)
- except AttributeError:
- return False
- return repo.local()
-
-def repository(ui, path='', create=False):
- """return a repository object for the specified path"""
- repo = _lookup(path).instance(ui, path, create)
- ui = getattr(repo, "ui", ui)
- for name, module in extensions.extensions():
- hook = getattr(module, 'reposetup', None)
- if hook:
- hook(ui, repo)
- return repo
-
-def defaultdest(source):
- '''return default destination of clone if none is given'''
- return os.path.basename(os.path.normpath(source))
-
-def localpath(path):
- if path.startswith('file://localhost/'):
- return path[16:]
- if path.startswith('file://'):
- return path[7:]
- if path.startswith('file:'):
- return path[5:]
- return path
-
-def share(ui, source, dest=None, update=True):
- '''create a shared repository'''
-
- if not islocal(source):
- raise util.Abort(_('can only share local repositories'))
-
- if not dest:
- dest = os.path.basename(source)
- else:
- dest = ui.expandpath(dest)
-
- if isinstance(source, str):
- origsource = ui.expandpath(source)
- source, rev, checkout = parseurl(origsource, '')
- srcrepo = repository(ui, source)
- else:
- srcrepo = source
- origsource = source = srcrepo.url()
- checkout = None
-
- sharedpath = srcrepo.sharedpath # if our source is already sharing
-
- root = os.path.realpath(dest)
- roothg = os.path.join(root, '.hg')
-
- if os.path.exists(roothg):
- raise util.Abort(_('destination already exists'))
-
- if not os.path.isdir(root):
- os.mkdir(root)
- os.mkdir(roothg)
-
- requirements = ''
- try:
- requirements = srcrepo.opener('requires').read()
- except IOError, inst:
- if inst.errno != errno.ENOENT:
- raise
-
- requirements += 'shared\n'
- file(os.path.join(roothg, 'requires'), 'w').write(requirements)
- file(os.path.join(roothg, 'sharedpath'), 'w').write(sharedpath)
-
- default = srcrepo.ui.config('paths', 'default')
- if default:
- f = file(os.path.join(roothg, 'hgrc'), 'w')
- f.write('[paths]\ndefault = %s\n' % default)
- f.close()
-
- r = repository(ui, root)
-
- if update:
- r.ui.status(_("updating working directory\n"))
- if update is not True:
- checkout = update
- for test in (checkout, 'default', 'tip'):
- try:
- uprev = r.lookup(test)
- break
- except LookupError:
- continue
- _update(r, uprev)
-
-def clone(ui, source, dest=None, pull=False, rev=None, update=True,
- stream=False):
- """Make a copy of an existing repository.
-
- Create a copy of an existing repository in a new directory. The
- source and destination are URLs, as passed to the repository
- function. Returns a pair of repository objects, the source and
- newly created destination.
-
- The location of the source is added to the new repository's
- .hg/hgrc file, as the default to be used for future pulls and
- pushes.
-
- If an exception is raised, the partly cloned/updated destination
- repository will be deleted.
-
- Arguments:
-
- source: repository object or URL
-
- dest: URL of destination repository to create (defaults to base
- name of source repository)
-
- pull: always pull from source repository, even in local case
-
- stream: stream raw data uncompressed from repository (fast over
- LAN, slow over WAN)
-
- rev: revision to clone up to (implies pull=True)
-
- update: update working directory after clone completes, if
- destination is local repository (True means update to default rev,
- anything else is treated as a revision)
- """
-
- if isinstance(source, str):
- origsource = ui.expandpath(source)
- source, rev, checkout = parseurl(origsource, rev)
- src_repo = repository(ui, source)
- else:
- src_repo = source
- origsource = source = src_repo.url()
- checkout = rev and rev[-1] or None
-
- if dest is None:
- dest = defaultdest(source)
- ui.status(_("destination directory: %s\n") % dest)
- else:
- dest = ui.expandpath(dest)
-
- dest = localpath(dest)
- source = localpath(source)
-
- if os.path.exists(dest):
- if not os.path.isdir(dest):
- raise util.Abort(_("destination '%s' already exists") % dest)
- elif os.listdir(dest):
- raise util.Abort(_("destination '%s' is not empty") % dest)
-
- class DirCleanup(object):
- def __init__(self, dir_):
- self.rmtree = shutil.rmtree
- self.dir_ = dir_
- def close(self):
- self.dir_ = None
- def cleanup(self):
- if self.dir_:
- self.rmtree(self.dir_, True)
-
- src_lock = dest_lock = dir_cleanup = None
- try:
- if islocal(dest):
- dir_cleanup = DirCleanup(dest)
-
- abspath = origsource
- copy = False
- if src_repo.cancopy() and islocal(dest):
- abspath = os.path.abspath(util.drop_scheme('file', origsource))
- copy = not pull and not rev
-
- if copy:
- try:
- # we use a lock here because if we race with commit, we
- # can end up with extra data in the cloned revlogs that's
- # not pointed to by changesets, thus causing verify to
- # fail
- src_lock = src_repo.lock(wait=False)
- except error.LockError:
- copy = False
-
- if copy:
- src_repo.hook('preoutgoing', throw=True, source='clone')
- hgdir = os.path.realpath(os.path.join(dest, ".hg"))
- if not os.path.exists(dest):
- os.mkdir(dest)
- else:
- # only clean up directories we create ourselves
- dir_cleanup.dir_ = hgdir
- try:
- dest_path = hgdir
- os.mkdir(dest_path)
- except OSError, inst:
- if inst.errno == errno.EEXIST:
- dir_cleanup.close()
- raise util.Abort(_("destination '%s' already exists")
- % dest)
- raise
-
- for f in src_repo.store.copylist():
- src = os.path.join(src_repo.path, f)
- dst = os.path.join(dest_path, f)
- dstbase = os.path.dirname(dst)
- if dstbase and not os.path.exists(dstbase):
- os.mkdir(dstbase)
- if os.path.exists(src):
- if dst.endswith('data'):
- # lock to avoid premature writing to the target
- dest_lock = lock.lock(os.path.join(dstbase, "lock"))
- util.copyfiles(src, dst)
-
- # we need to re-init the repo after manually copying the data
- # into it
- dest_repo = repository(ui, dest)
- src_repo.hook('outgoing', source='clone', node='0'*40)
- else:
- try:
- dest_repo = repository(ui, dest, create=True)
- except OSError, inst:
- if inst.errno == errno.EEXIST:
- dir_cleanup.close()
- raise util.Abort(_("destination '%s' already exists")
- % dest)
- raise
-
- revs = None
- if rev:
- if 'lookup' not in src_repo.capabilities:
- raise util.Abort(_("src repository does not support "
- "revision lookup and so doesn't "
- "support clone by revision"))
- revs = [src_repo.lookup(r) for r in rev]
- checkout = revs[0]
- if dest_repo.local():
- dest_repo.clone(src_repo, heads=revs, stream=stream)
- elif src_repo.local():
- src_repo.push(dest_repo, revs=revs)
- else:
- raise util.Abort(_("clone from remote to remote not supported"))
-
- if dir_cleanup:
- dir_cleanup.close()
-
- if dest_repo.local():
- fp = dest_repo.opener("hgrc", "w", text=True)
- fp.write("[paths]\n")
- fp.write("default = %s\n" % abspath)
- fp.close()
-
- dest_repo.ui.setconfig('paths', 'default', abspath)
-
- if update:
- dest_repo.ui.status(_("updating working directory\n"))
- if update is not True:
- checkout = update
- for test in (checkout, 'default', 'tip'):
- try:
- uprev = dest_repo.lookup(test)
- break
- except:
- continue
- _update(dest_repo, uprev)
-
- return src_repo, dest_repo
- finally:
- release(src_lock, dest_lock)
- if dir_cleanup is not None:
- dir_cleanup.cleanup()
-
-def _showstats(repo, stats):
- stats = ((stats[0], _("updated")),
- (stats[1], _("merged")),
- (stats[2], _("removed")),
- (stats[3], _("unresolved")))
- note = ", ".join([_("%d files %s") % s for s in stats])
- repo.ui.status("%s\n" % note)
-
-def update(repo, node):
- """update the working directory to node, merging linear changes"""
- stats = _merge.update(repo, node, False, False, None)
- _showstats(repo, stats)
- if stats[3]:
- repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
- return stats[3] > 0
-
-# naming conflict in clone()
-_update = update
-
-def clean(repo, node, show_stats=True):
- """forcibly switch the working directory to node, clobbering changes"""
- stats = _merge.update(repo, node, False, True, None)
- if show_stats: _showstats(repo, stats)
- return stats[3] > 0
-
-def merge(repo, node, force=None, remind=True):
- """branch merge with node, resolving changes"""
- stats = _merge.update(repo, node, True, force, False)
- _showstats(repo, stats)
- if stats[3]:
- repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
- "or 'hg up --clean' to abandon\n"))
- elif remind:
- repo.ui.status(_("(branch merge, don't forget to commit)\n"))
- return stats[3] > 0
-
-def revert(repo, node, choose):
- """revert changes to revision in node without updating dirstate"""
- return _merge.update(repo, node, False, True, choose)[3] > 0
-
-def verify(repo):
- """verify the consistency of a repository"""
- return _verify.verify(repo)
diff --git a/sys/lib/python/mercurial/hgweb/__init__.py b/sys/lib/python/mercurial/hgweb/__init__.py
deleted file mode 100644
index 3cb3e95e2..000000000
--- a/sys/lib/python/mercurial/hgweb/__init__.py
+++ /dev/null
@@ -1,16 +0,0 @@
-# hgweb/__init__.py - web interface to a mercurial repository
-#
-# Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
-# Copyright 2005 Matt Mackall <mpm@selenic.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-import hgweb_mod, hgwebdir_mod
-
-def hgweb(*args, **kwargs):
- return hgweb_mod.hgweb(*args, **kwargs)
-
-def hgwebdir(*args, **kwargs):
- return hgwebdir_mod.hgwebdir(*args, **kwargs)
-
diff --git a/sys/lib/python/mercurial/hgweb/common.py b/sys/lib/python/mercurial/hgweb/common.py
deleted file mode 100644
index effa3d1f4..000000000
--- a/sys/lib/python/mercurial/hgweb/common.py
+++ /dev/null
@@ -1,105 +0,0 @@
-# hgweb/common.py - Utility functions needed by hgweb_mod and hgwebdir_mod
-#
-# Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
-# Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-import errno, mimetypes, os
-
-HTTP_OK = 200
-HTTP_BAD_REQUEST = 400
-HTTP_UNAUTHORIZED = 401
-HTTP_FORBIDDEN = 403
-HTTP_NOT_FOUND = 404
-HTTP_METHOD_NOT_ALLOWED = 405
-HTTP_SERVER_ERROR = 500
-
-class ErrorResponse(Exception):
- def __init__(self, code, message=None, headers=[]):
- Exception.__init__(self)
- self.code = code
- self.headers = headers
- if message is not None:
- self.message = message
- else:
- self.message = _statusmessage(code)
-
-def _statusmessage(code):
- from BaseHTTPServer import BaseHTTPRequestHandler
- responses = BaseHTTPRequestHandler.responses
- return responses.get(code, ('Error', 'Unknown error'))[0]
-
-def statusmessage(code):
- return '%d %s' % (code, _statusmessage(code))
-
-def get_mtime(repo_path):
- store_path = os.path.join(repo_path, ".hg")
- if not os.path.isdir(os.path.join(store_path, "data")):
- store_path = os.path.join(store_path, "store")
- cl_path = os.path.join(store_path, "00changelog.i")
- if os.path.exists(cl_path):
- return os.stat(cl_path).st_mtime
- else:
- return os.stat(store_path).st_mtime
-
-def staticfile(directory, fname, req):
- """return a file inside directory with guessed Content-Type header
-
- fname always uses '/' as directory separator and isn't allowed to
- contain unusual path components.
- Content-Type is guessed using the mimetypes module.
- Return an empty string if fname is illegal or file not found.
-
- """
- parts = fname.split('/')
- for part in parts:
- if (part in ('', os.curdir, os.pardir) or
- os.sep in part or os.altsep is not None and os.altsep in part):
- return ""
- fpath = os.path.join(*parts)
- if isinstance(directory, str):
- directory = [directory]
- for d in directory:
- path = os.path.join(d, fpath)
- if os.path.exists(path):
- break
- try:
- os.stat(path)
- ct = mimetypes.guess_type(path)[0] or "text/plain"
- req.respond(HTTP_OK, ct, length = os.path.getsize(path))
- return open(path, 'rb').read()
- except TypeError:
- raise ErrorResponse(HTTP_SERVER_ERROR, 'illegal filename')
- except OSError, err:
- if err.errno == errno.ENOENT:
- raise ErrorResponse(HTTP_NOT_FOUND)
- else:
- raise ErrorResponse(HTTP_SERVER_ERROR, err.strerror)
-
-def paritygen(stripecount, offset=0):
- """count parity of horizontal stripes for easier reading"""
- if stripecount and offset:
- # account for offset, e.g. due to building the list in reverse
- count = (stripecount + offset) % stripecount
- parity = (stripecount + offset) / stripecount & 1
- else:
- count = 0
- parity = 0
- while True:
- yield parity
- count += 1
- if stripecount and count >= stripecount:
- parity = 1 - parity
- count = 0
-
-def get_contact(config):
- """Return repo contact information or empty string.
-
- web.contact is the primary source, but if that is not set, try
- ui.username or $EMAIL as a fallback to display something useful.
- """
- return (config("web", "contact") or
- config("ui", "username") or
- os.environ.get("EMAIL") or "")
diff --git a/sys/lib/python/mercurial/hgweb/hgweb_mod.py b/sys/lib/python/mercurial/hgweb/hgweb_mod.py
deleted file mode 100644
index 31638106f..000000000
--- a/sys/lib/python/mercurial/hgweb/hgweb_mod.py
+++ /dev/null
@@ -1,315 +0,0 @@
-# hgweb/hgweb_mod.py - Web interface for a repository.
-#
-# Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
-# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-import os
-from mercurial import ui, hg, hook, error, encoding, templater
-from common import get_mtime, ErrorResponse
-from common import HTTP_OK, HTTP_BAD_REQUEST, HTTP_NOT_FOUND, HTTP_SERVER_ERROR
-from common import HTTP_UNAUTHORIZED, HTTP_METHOD_NOT_ALLOWED
-from request import wsgirequest
-import webcommands, protocol, webutil
-
-perms = {
- 'changegroup': 'pull',
- 'changegroupsubset': 'pull',
- 'unbundle': 'push',
- 'stream_out': 'pull',
-}
-
-class hgweb(object):
- def __init__(self, repo, name=None):
- if isinstance(repo, str):
- u = ui.ui()
- u.setconfig('ui', 'report_untrusted', 'off')
- u.setconfig('ui', 'interactive', 'off')
- self.repo = hg.repository(u, repo)
- else:
- self.repo = repo
-
- hook.redirect(True)
- self.mtime = -1
- self.reponame = name
- self.archives = 'zip', 'gz', 'bz2'
- self.stripecount = 1
- # a repo owner may set web.templates in .hg/hgrc to get any file
- # readable by the user running the CGI script
- self.templatepath = self.config('web', 'templates')
-
- # The CGI scripts are often run by a user different from the repo owner.
- # Trust the settings from the .hg/hgrc files by default.
- def config(self, section, name, default=None, untrusted=True):
- return self.repo.ui.config(section, name, default,
- untrusted=untrusted)
-
- def configbool(self, section, name, default=False, untrusted=True):
- return self.repo.ui.configbool(section, name, default,
- untrusted=untrusted)
-
- def configlist(self, section, name, default=None, untrusted=True):
- return self.repo.ui.configlist(section, name, default,
- untrusted=untrusted)
-
- def refresh(self):
- mtime = get_mtime(self.repo.root)
- if mtime != self.mtime:
- self.mtime = mtime
- self.repo = hg.repository(self.repo.ui, self.repo.root)
- self.maxchanges = int(self.config("web", "maxchanges", 10))
- self.stripecount = int(self.config("web", "stripes", 1))
- self.maxshortchanges = int(self.config("web", "maxshortchanges", 60))
- self.maxfiles = int(self.config("web", "maxfiles", 10))
- self.allowpull = self.configbool("web", "allowpull", True)
- encoding.encoding = self.config("web", "encoding",
- encoding.encoding)
-
- def run(self):
- if not os.environ.get('GATEWAY_INTERFACE', '').startswith("CGI/1."):
- raise RuntimeError("This function is only intended to be "
- "called while running as a CGI script.")
- import mercurial.hgweb.wsgicgi as wsgicgi
- wsgicgi.launch(self)
-
- def __call__(self, env, respond):
- req = wsgirequest(env, respond)
- return self.run_wsgi(req)
-
- def run_wsgi(self, req):
-
- self.refresh()
-
- # work with CGI variables to create coherent structure
- # use SCRIPT_NAME, PATH_INFO and QUERY_STRING as well as our REPO_NAME
-
- req.url = req.env['SCRIPT_NAME']
- if not req.url.endswith('/'):
- req.url += '/'
- if 'REPO_NAME' in req.env:
- req.url += req.env['REPO_NAME'] + '/'
-
- if 'PATH_INFO' in req.env:
- parts = req.env['PATH_INFO'].strip('/').split('/')
- repo_parts = req.env.get('REPO_NAME', '').split('/')
- if parts[:len(repo_parts)] == repo_parts:
- parts = parts[len(repo_parts):]
- query = '/'.join(parts)
- else:
- query = req.env['QUERY_STRING'].split('&', 1)[0]
- query = query.split(';', 1)[0]
-
- # process this if it's a protocol request
- # protocol bits don't need to create any URLs
- # and the clients always use the old URL structure
-
- cmd = req.form.get('cmd', [''])[0]
- if cmd and cmd in protocol.__all__:
- if query:
- raise ErrorResponse(HTTP_NOT_FOUND)
- try:
- if cmd in perms:
- try:
- self.check_perm(req, perms[cmd])
- except ErrorResponse, inst:
- if cmd == 'unbundle':
- req.drain()
- raise
- method = getattr(protocol, cmd)
- return method(self.repo, req)
- except ErrorResponse, inst:
- req.respond(inst, protocol.HGTYPE)
- if not inst.message:
- return []
- return '0\n%s\n' % inst.message,
-
- # translate user-visible url structure to internal structure
-
- args = query.split('/', 2)
- if 'cmd' not in req.form and args and args[0]:
-
- cmd = args.pop(0)
- style = cmd.rfind('-')
- if style != -1:
- req.form['style'] = [cmd[:style]]
- cmd = cmd[style+1:]
-
- # avoid accepting e.g. style parameter as command
- if hasattr(webcommands, cmd):
- req.form['cmd'] = [cmd]
- else:
- cmd = ''
-
- if cmd == 'static':
- req.form['file'] = ['/'.join(args)]
- else:
- if args and args[0]:
- node = args.pop(0)
- req.form['node'] = [node]
- if args:
- req.form['file'] = args
-
- if cmd == 'archive':
- fn = req.form['node'][0]
- for type_, spec in self.archive_specs.iteritems():
- ext = spec[2]
- if fn.endswith(ext):
- req.form['node'] = [fn[:-len(ext)]]
- req.form['type'] = [type_]
-
- # process the web interface request
-
- try:
- tmpl = self.templater(req)
- ctype = tmpl('mimetype', encoding=encoding.encoding)
- ctype = templater.stringify(ctype)
-
- # check read permissions non-static content
- if cmd != 'static':
- self.check_perm(req, None)
-
- if cmd == '':
- req.form['cmd'] = [tmpl.cache['default']]
- cmd = req.form['cmd'][0]
-
- if cmd not in webcommands.__all__:
- msg = 'no such method: %s' % cmd
- raise ErrorResponse(HTTP_BAD_REQUEST, msg)
- elif cmd == 'file' and 'raw' in req.form.get('style', []):
- self.ctype = ctype
- content = webcommands.rawfile(self, req, tmpl)
- else:
- content = getattr(webcommands, cmd)(self, req, tmpl)
- req.respond(HTTP_OK, ctype)
-
- return content
-
- except error.LookupError, err:
- req.respond(HTTP_NOT_FOUND, ctype)
- msg = str(err)
- if 'manifest' not in msg:
- msg = 'revision not found: %s' % err.name
- return tmpl('error', error=msg)
- except (error.RepoError, error.RevlogError), inst:
- req.respond(HTTP_SERVER_ERROR, ctype)
- return tmpl('error', error=str(inst))
- except ErrorResponse, inst:
- req.respond(inst, ctype)
- return tmpl('error', error=inst.message)
-
- def templater(self, req):
-
- # determine scheme, port and server name
- # this is needed to create absolute urls
-
- proto = req.env.get('wsgi.url_scheme')
- if proto == 'https':
- proto = 'https'
- default_port = "443"
- else:
- proto = 'http'
- default_port = "80"
-
- port = req.env["SERVER_PORT"]
- port = port != default_port and (":" + port) or ""
- urlbase = '%s://%s%s' % (proto, req.env['SERVER_NAME'], port)
- staticurl = self.config("web", "staticurl") or req.url + 'static/'
- if not staticurl.endswith('/'):
- staticurl += '/'
-
- # some functions for the templater
-
- def header(**map):
- yield tmpl('header', encoding=encoding.encoding, **map)
-
- def footer(**map):
- yield tmpl("footer", **map)
-
- def motd(**map):
- yield self.config("web", "motd", "")
-
- # figure out which style to use
-
- vars = {}
- style = self.config("web", "style", "paper")
- if 'style' in req.form:
- style = req.form['style'][0]
- vars['style'] = style
-
- start = req.url[-1] == '?' and '&' or '?'
- sessionvars = webutil.sessionvars(vars, start)
- mapfile = templater.stylemap(style, self.templatepath)
-
- if not self.reponame:
- self.reponame = (self.config("web", "name")
- or req.env.get('REPO_NAME')
- or req.url.strip('/') or self.repo.root)
-
- # create the templater
-
- tmpl = templater.templater(mapfile,
- defaults={"url": req.url,
- "staticurl": staticurl,
- "urlbase": urlbase,
- "repo": self.reponame,
- "header": header,
- "footer": footer,
- "motd": motd,
- "sessionvars": sessionvars
- })
- return tmpl
-
- def archivelist(self, nodeid):
- allowed = self.configlist("web", "allow_archive")
- for i, spec in self.archive_specs.iteritems():
- if i in allowed or self.configbool("web", "allow" + i):
- yield {"type" : i, "extension" : spec[2], "node" : nodeid}
-
- archive_specs = {
- 'bz2': ('application/x-tar', 'tbz2', '.tar.bz2', None),
- 'gz': ('application/x-tar', 'tgz', '.tar.gz', None),
- 'zip': ('application/zip', 'zip', '.zip', None),
- }
-
- def check_perm(self, req, op):
- '''Check permission for operation based on request data (including
- authentication info). Return if op allowed, else raise an ErrorResponse
- exception.'''
-
- user = req.env.get('REMOTE_USER')
-
- deny_read = self.configlist('web', 'deny_read')
- if deny_read and (not user or deny_read == ['*'] or user in deny_read):
- raise ErrorResponse(HTTP_UNAUTHORIZED, 'read not authorized')
-
- allow_read = self.configlist('web', 'allow_read')
- result = (not allow_read) or (allow_read == ['*'])
- if not (result or user in allow_read):
- raise ErrorResponse(HTTP_UNAUTHORIZED, 'read not authorized')
-
- if op == 'pull' and not self.allowpull:
- raise ErrorResponse(HTTP_UNAUTHORIZED, 'pull not authorized')
- elif op == 'pull' or op is None: # op is None for interface requests
- return
-
- # enforce that you can only push using POST requests
- if req.env['REQUEST_METHOD'] != 'POST':
- msg = 'push requires POST request'
- raise ErrorResponse(HTTP_METHOD_NOT_ALLOWED, msg)
-
- # require ssl by default for pushing, auth info cannot be sniffed
- # and replayed
- scheme = req.env.get('wsgi.url_scheme')
- if self.configbool('web', 'push_ssl', True) and scheme != 'https':
- raise ErrorResponse(HTTP_OK, 'ssl required')
-
- deny = self.configlist('web', 'deny_push')
- if deny and (not user or deny == ['*'] or user in deny):
- raise ErrorResponse(HTTP_UNAUTHORIZED, 'push not authorized')
-
- allow = self.configlist('web', 'allow_push')
- result = allow and (allow == ['*'] or user in allow)
- if not result:
- raise ErrorResponse(HTTP_UNAUTHORIZED, 'push not authorized')
diff --git a/sys/lib/python/mercurial/hgweb/hgwebdir_mod.py b/sys/lib/python/mercurial/hgweb/hgwebdir_mod.py
deleted file mode 100644
index 64cc99899..000000000
--- a/sys/lib/python/mercurial/hgweb/hgwebdir_mod.py
+++ /dev/null
@@ -1,333 +0,0 @@
-# hgweb/hgwebdir_mod.py - Web interface for a directory of repositories.
-#
-# Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
-# Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-import os, re, time
-from mercurial.i18n import _
-from mercurial import ui, hg, util, templater
-from mercurial import error, encoding
-from common import ErrorResponse, get_mtime, staticfile, paritygen,\
- get_contact, HTTP_OK, HTTP_NOT_FOUND, HTTP_SERVER_ERROR
-from hgweb_mod import hgweb
-from request import wsgirequest
-import webutil
-
-def cleannames(items):
- return [(util.pconvert(name).strip('/'), path) for name, path in items]
-
-def findrepos(paths):
- repos = {}
- for prefix, root in cleannames(paths):
- roothead, roottail = os.path.split(root)
- # "foo = /bar/*" makes every subrepo of /bar/ to be
- # mounted as foo/subrepo
- # and "foo = /bar/**" also recurses into the subdirectories,
- # remember to use it without working dir.
- try:
- recurse = {'*': False, '**': True}[roottail]
- except KeyError:
- repos[prefix] = root
- continue
- roothead = os.path.normpath(roothead)
- for path in util.walkrepos(roothead, followsym=True, recurse=recurse):
- path = os.path.normpath(path)
- name = util.pconvert(path[len(roothead):]).strip('/')
- if prefix:
- name = prefix + '/' + name
- repos[name] = path
- return repos.items()
-
-class hgwebdir(object):
- refreshinterval = 20
-
- def __init__(self, conf, baseui=None):
- self.conf = conf
- self.baseui = baseui
- self.lastrefresh = 0
- self.refresh()
-
- def refresh(self):
- if self.lastrefresh + self.refreshinterval > time.time():
- return
-
- if self.baseui:
- self.ui = self.baseui.copy()
- else:
- self.ui = ui.ui()
- self.ui.setconfig('ui', 'report_untrusted', 'off')
- self.ui.setconfig('ui', 'interactive', 'off')
-
- if not isinstance(self.conf, (dict, list, tuple)):
- map = {'paths': 'hgweb-paths'}
- self.ui.readconfig(self.conf, remap=map, trust=True)
- paths = self.ui.configitems('hgweb-paths')
- elif isinstance(self.conf, (list, tuple)):
- paths = self.conf
- elif isinstance(self.conf, dict):
- paths = self.conf.items()
-
- encoding.encoding = self.ui.config('web', 'encoding',
- encoding.encoding)
- self.motd = self.ui.config('web', 'motd')
- self.style = self.ui.config('web', 'style', 'paper')
- self.stripecount = self.ui.config('web', 'stripes', 1)
- if self.stripecount:
- self.stripecount = int(self.stripecount)
- self._baseurl = self.ui.config('web', 'baseurl')
-
- self.repos = findrepos(paths)
- for prefix, root in self.ui.configitems('collections'):
- prefix = util.pconvert(prefix)
- for path in util.walkrepos(root, followsym=True):
- repo = os.path.normpath(path)
- name = util.pconvert(repo)
- if name.startswith(prefix):
- name = name[len(prefix):]
- self.repos.append((name.lstrip('/'), repo))
-
- self.repos.sort()
- self.lastrefresh = time.time()
-
- def run(self):
- if not os.environ.get('GATEWAY_INTERFACE', '').startswith("CGI/1."):
- raise RuntimeError("This function is only intended to be "
- "called while running as a CGI script.")
- import mercurial.hgweb.wsgicgi as wsgicgi
- wsgicgi.launch(self)
-
- def __call__(self, env, respond):
- req = wsgirequest(env, respond)
- return self.run_wsgi(req)
-
- def read_allowed(self, ui, req):
- """Check allow_read and deny_read config options of a repo's ui object
- to determine user permissions. By default, with neither option set (or
- both empty), allow all users to read the repo. There are two ways a
- user can be denied read access: (1) deny_read is not empty, and the
- user is unauthenticated or deny_read contains user (or *), and (2)
- allow_read is not empty and the user is not in allow_read. Return True
- if user is allowed to read the repo, else return False."""
-
- user = req.env.get('REMOTE_USER')
-
- deny_read = ui.configlist('web', 'deny_read', untrusted=True)
- if deny_read and (not user or deny_read == ['*'] or user in deny_read):
- return False
-
- allow_read = ui.configlist('web', 'allow_read', untrusted=True)
- # by default, allow reading if no allow_read option has been set
- if (not allow_read) or (allow_read == ['*']) or (user in allow_read):
- return True
-
- return False
-
- def run_wsgi(self, req):
- try:
- try:
- self.refresh()
-
- virtual = req.env.get("PATH_INFO", "").strip('/')
- tmpl = self.templater(req)
- ctype = tmpl('mimetype', encoding=encoding.encoding)
- ctype = templater.stringify(ctype)
-
- # a static file
- if virtual.startswith('static/') or 'static' in req.form:
- if virtual.startswith('static/'):
- fname = virtual[7:]
- else:
- fname = req.form['static'][0]
- static = templater.templatepath('static')
- return (staticfile(static, fname, req),)
-
- # top-level index
- elif not virtual:
- req.respond(HTTP_OK, ctype)
- return self.makeindex(req, tmpl)
-
- # nested indexes and hgwebs
-
- repos = dict(self.repos)
- while virtual:
- real = repos.get(virtual)
- if real:
- req.env['REPO_NAME'] = virtual
- try:
- repo = hg.repository(self.ui, real)
- return hgweb(repo).run_wsgi(req)
- except IOError, inst:
- msg = inst.strerror
- raise ErrorResponse(HTTP_SERVER_ERROR, msg)
- except error.RepoError, inst:
- raise ErrorResponse(HTTP_SERVER_ERROR, str(inst))
-
- # browse subdirectories
- subdir = virtual + '/'
- if [r for r in repos if r.startswith(subdir)]:
- req.respond(HTTP_OK, ctype)
- return self.makeindex(req, tmpl, subdir)
-
- up = virtual.rfind('/')
- if up < 0:
- break
- virtual = virtual[:up]
-
- # prefixes not found
- req.respond(HTTP_NOT_FOUND, ctype)
- return tmpl("notfound", repo=virtual)
-
- except ErrorResponse, err:
- req.respond(err, ctype)
- return tmpl('error', error=err.message or '')
- finally:
- tmpl = None
-
- def makeindex(self, req, tmpl, subdir=""):
-
- def archivelist(ui, nodeid, url):
- allowed = ui.configlist("web", "allow_archive", untrusted=True)
- for i in [('zip', '.zip'), ('gz', '.tar.gz'), ('bz2', '.tar.bz2')]:
- if i[0] in allowed or ui.configbool("web", "allow" + i[0],
- untrusted=True):
- yield {"type" : i[0], "extension": i[1],
- "node": nodeid, "url": url}
-
- sortdefault = 'name', False
- def entries(sortcolumn="", descending=False, subdir="", **map):
- rows = []
- parity = paritygen(self.stripecount)
- for name, path in self.repos:
- if not name.startswith(subdir):
- continue
- name = name[len(subdir):]
-
- u = self.ui.copy()
- try:
- u.readconfig(os.path.join(path, '.hg', 'hgrc'))
- except Exception, e:
- u.warn(_('error reading %s/.hg/hgrc: %s\n') % (path, e))
- continue
- def get(section, name, default=None):
- return u.config(section, name, default, untrusted=True)
-
- if u.configbool("web", "hidden", untrusted=True):
- continue
-
- if not self.read_allowed(u, req):
- continue
-
- parts = [name]
- if 'PATH_INFO' in req.env:
- parts.insert(0, req.env['PATH_INFO'].rstrip('/'))
- if req.env['SCRIPT_NAME']:
- parts.insert(0, req.env['SCRIPT_NAME'])
- m = re.match('((?:https?://)?)(.*)', '/'.join(parts))
- # squish repeated slashes out of the path component
- url = m.group(1) + re.sub('/+', '/', m.group(2)) + '/'
-
- # update time with local timezone
- try:
- d = (get_mtime(path), util.makedate()[1])
- except OSError:
- continue
-
- contact = get_contact(get)
- description = get("web", "description", "")
- name = get("web", "name", name)
- row = dict(contact=contact or "unknown",
- contact_sort=contact.upper() or "unknown",
- name=name,
- name_sort=name,
- url=url,
- description=description or "unknown",
- description_sort=description.upper() or "unknown",
- lastchange=d,
- lastchange_sort=d[1]-d[0],
- archives=archivelist(u, "tip", url))
- if (not sortcolumn or (sortcolumn, descending) == sortdefault):
- # fast path for unsorted output
- row['parity'] = parity.next()
- yield row
- else:
- rows.append((row["%s_sort" % sortcolumn], row))
- if rows:
- rows.sort()
- if descending:
- rows.reverse()
- for key, row in rows:
- row['parity'] = parity.next()
- yield row
-
- self.refresh()
- sortable = ["name", "description", "contact", "lastchange"]
- sortcolumn, descending = sortdefault
- if 'sort' in req.form:
- sortcolumn = req.form['sort'][0]
- descending = sortcolumn.startswith('-')
- if descending:
- sortcolumn = sortcolumn[1:]
- if sortcolumn not in sortable:
- sortcolumn = ""
-
- sort = [("sort_%s" % column,
- "%s%s" % ((not descending and column == sortcolumn)
- and "-" or "", column))
- for column in sortable]
-
- self.refresh()
- if self._baseurl is not None:
- req.env['SCRIPT_NAME'] = self._baseurl
-
- return tmpl("index", entries=entries, subdir=subdir,
- sortcolumn=sortcolumn, descending=descending,
- **dict(sort))
-
- def templater(self, req):
-
- def header(**map):
- yield tmpl('header', encoding=encoding.encoding, **map)
-
- def footer(**map):
- yield tmpl("footer", **map)
-
- def motd(**map):
- if self.motd is not None:
- yield self.motd
- else:
- yield config('web', 'motd', '')
-
- def config(section, name, default=None, untrusted=True):
- return self.ui.config(section, name, default, untrusted)
-
- if self._baseurl is not None:
- req.env['SCRIPT_NAME'] = self._baseurl
-
- url = req.env.get('SCRIPT_NAME', '')
- if not url.endswith('/'):
- url += '/'
-
- vars = {}
- style = self.style
- if 'style' in req.form:
- vars['style'] = style = req.form['style'][0]
- start = url[-1] == '?' and '&' or '?'
- sessionvars = webutil.sessionvars(vars, start)
-
- staticurl = config('web', 'staticurl') or url + 'static/'
- if not staticurl.endswith('/'):
- staticurl += '/'
-
- style = 'style' in req.form and req.form['style'][0] or self.style
- mapfile = templater.stylemap(style)
- tmpl = templater.templater(mapfile,
- defaults={"header": header,
- "footer": footer,
- "motd": motd,
- "url": url,
- "staticurl": staticurl,
- "sessionvars": sessionvars})
- return tmpl
diff --git a/sys/lib/python/mercurial/hgweb/protocol.py b/sys/lib/python/mercurial/hgweb/protocol.py
deleted file mode 100644
index a411fdb97..000000000
--- a/sys/lib/python/mercurial/hgweb/protocol.py
+++ /dev/null
@@ -1,206 +0,0 @@
-#
-# Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
-# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-import cStringIO, zlib, tempfile, errno, os, sys, urllib
-from mercurial import util, streamclone
-from mercurial.node import bin, hex
-from mercurial import changegroup as changegroupmod
-from common import ErrorResponse, HTTP_OK, HTTP_NOT_FOUND, HTTP_SERVER_ERROR
-
-# __all__ is populated with the allowed commands. Be sure to add to it if
-# you're adding a new command, or the new command won't work.
-
-__all__ = [
- 'lookup', 'heads', 'branches', 'between', 'changegroup',
- 'changegroupsubset', 'capabilities', 'unbundle', 'stream_out',
- 'branchmap',
-]
-
-HGTYPE = 'application/mercurial-0.1'
-
-def lookup(repo, req):
- try:
- r = hex(repo.lookup(req.form['key'][0]))
- success = 1
- except Exception, inst:
- r = str(inst)
- success = 0
- resp = "%s %s\n" % (success, r)
- req.respond(HTTP_OK, HGTYPE, length=len(resp))
- yield resp
-
-def heads(repo, req):
- resp = " ".join(map(hex, repo.heads())) + "\n"
- req.respond(HTTP_OK, HGTYPE, length=len(resp))
- yield resp
-
-def branchmap(repo, req):
- branches = repo.branchmap()
- heads = []
- for branch, nodes in branches.iteritems():
- branchname = urllib.quote(branch)
- branchnodes = [hex(node) for node in nodes]
- heads.append('%s %s' % (branchname, ' '.join(branchnodes)))
- resp = '\n'.join(heads)
- req.respond(HTTP_OK, HGTYPE, length=len(resp))
- yield resp
-
-def branches(repo, req):
- nodes = []
- if 'nodes' in req.form:
- nodes = map(bin, req.form['nodes'][0].split(" "))
- resp = cStringIO.StringIO()
- for b in repo.branches(nodes):
- resp.write(" ".join(map(hex, b)) + "\n")
- resp = resp.getvalue()
- req.respond(HTTP_OK, HGTYPE, length=len(resp))
- yield resp
-
-def between(repo, req):
- if 'pairs' in req.form:
- pairs = [map(bin, p.split("-"))
- for p in req.form['pairs'][0].split(" ")]
- resp = cStringIO.StringIO()
- for b in repo.between(pairs):
- resp.write(" ".join(map(hex, b)) + "\n")
- resp = resp.getvalue()
- req.respond(HTTP_OK, HGTYPE, length=len(resp))
- yield resp
-
-def changegroup(repo, req):
- req.respond(HTTP_OK, HGTYPE)
- nodes = []
-
- if 'roots' in req.form:
- nodes = map(bin, req.form['roots'][0].split(" "))
-
- z = zlib.compressobj()
- f = repo.changegroup(nodes, 'serve')
- while 1:
- chunk = f.read(4096)
- if not chunk:
- break
- yield z.compress(chunk)
-
- yield z.flush()
-
-def changegroupsubset(repo, req):
- req.respond(HTTP_OK, HGTYPE)
- bases = []
- heads = []
-
- if 'bases' in req.form:
- bases = [bin(x) for x in req.form['bases'][0].split(' ')]
- if 'heads' in req.form:
- heads = [bin(x) for x in req.form['heads'][0].split(' ')]
-
- z = zlib.compressobj()
- f = repo.changegroupsubset(bases, heads, 'serve')
- while 1:
- chunk = f.read(4096)
- if not chunk:
- break
- yield z.compress(chunk)
-
- yield z.flush()
-
-def capabilities(repo, req):
- caps = ['lookup', 'changegroupsubset', 'branchmap']
- if repo.ui.configbool('server', 'uncompressed', untrusted=True):
- caps.append('stream=%d' % repo.changelog.version)
- if changegroupmod.bundlepriority:
- caps.append('unbundle=%s' % ','.join(changegroupmod.bundlepriority))
- rsp = ' '.join(caps)
- req.respond(HTTP_OK, HGTYPE, length=len(rsp))
- yield rsp
-
-def unbundle(repo, req):
-
- proto = req.env.get('wsgi.url_scheme') or 'http'
- their_heads = req.form['heads'][0].split(' ')
-
- def check_heads():
- heads = map(hex, repo.heads())
- return their_heads == [hex('force')] or their_heads == heads
-
- # fail early if possible
- if not check_heads():
- req.drain()
- raise ErrorResponse(HTTP_OK, 'unsynced changes')
-
- # do not lock repo until all changegroup data is
- # streamed. save to temporary file.
-
- fd, tempname = tempfile.mkstemp(prefix='hg-unbundle-')
- fp = os.fdopen(fd, 'wb+')
- try:
- length = int(req.env['CONTENT_LENGTH'])
- for s in util.filechunkiter(req, limit=length):
- fp.write(s)
-
- try:
- lock = repo.lock()
- try:
- if not check_heads():
- raise ErrorResponse(HTTP_OK, 'unsynced changes')
-
- fp.seek(0)
- header = fp.read(6)
- if header.startswith('HG') and not header.startswith('HG10'):
- raise ValueError('unknown bundle version')
- elif header not in changegroupmod.bundletypes:
- raise ValueError('unknown bundle compression type')
- gen = changegroupmod.unbundle(header, fp)
-
- # send addchangegroup output to client
-
- oldio = sys.stdout, sys.stderr
- sys.stderr = sys.stdout = cStringIO.StringIO()
-
- try:
- url = 'remote:%s:%s:%s' % (
- proto,
- urllib.quote(req.env.get('REMOTE_HOST', '')),
- urllib.quote(req.env.get('REMOTE_USER', '')))
- try:
- ret = repo.addchangegroup(gen, 'serve', url)
- except util.Abort, inst:
- sys.stdout.write("abort: %s\n" % inst)
- ret = 0
- finally:
- val = sys.stdout.getvalue()
- sys.stdout, sys.stderr = oldio
- req.respond(HTTP_OK, HGTYPE)
- return '%d\n%s' % (ret, val),
- finally:
- lock.release()
- except ValueError, inst:
- raise ErrorResponse(HTTP_OK, inst)
- except (OSError, IOError), inst:
- filename = getattr(inst, 'filename', '')
- # Don't send our filesystem layout to the client
- if filename.startswith(repo.root):
- filename = filename[len(repo.root)+1:]
- else:
- filename = ''
- error = getattr(inst, 'strerror', 'Unknown error')
- if inst.errno == errno.ENOENT:
- code = HTTP_NOT_FOUND
- else:
- code = HTTP_SERVER_ERROR
- raise ErrorResponse(code, '%s: %s' % (error, filename))
- finally:
- fp.close()
- os.unlink(tempname)
-
-def stream_out(repo, req):
- req.respond(HTTP_OK, HGTYPE)
- try:
- for chunk in streamclone.stream_out(repo, untrusted=True):
- yield chunk
- except streamclone.StreamException, inst:
- yield str(inst)
diff --git a/sys/lib/python/mercurial/hgweb/request.py b/sys/lib/python/mercurial/hgweb/request.py
deleted file mode 100644
index 4c7c5bf4b..000000000
--- a/sys/lib/python/mercurial/hgweb/request.py
+++ /dev/null
@@ -1,134 +0,0 @@
-# hgweb/request.py - An http request from either CGI or the standalone server.
-#
-# Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
-# Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-import socket, cgi, errno
-from mercurial import util
-from common import ErrorResponse, statusmessage
-
-shortcuts = {
- 'cl': [('cmd', ['changelog']), ('rev', None)],
- 'sl': [('cmd', ['shortlog']), ('rev', None)],
- 'cs': [('cmd', ['changeset']), ('node', None)],
- 'f': [('cmd', ['file']), ('filenode', None)],
- 'fl': [('cmd', ['filelog']), ('filenode', None)],
- 'fd': [('cmd', ['filediff']), ('node', None)],
- 'fa': [('cmd', ['annotate']), ('filenode', None)],
- 'mf': [('cmd', ['manifest']), ('manifest', None)],
- 'ca': [('cmd', ['archive']), ('node', None)],
- 'tags': [('cmd', ['tags'])],
- 'tip': [('cmd', ['changeset']), ('node', ['tip'])],
- 'static': [('cmd', ['static']), ('file', None)]
-}
-
-def expand(form):
- for k in shortcuts.iterkeys():
- if k in form:
- for name, value in shortcuts[k]:
- if value is None:
- value = form[k]
- form[name] = value
- del form[k]
- return form
-
-class wsgirequest(object):
- def __init__(self, wsgienv, start_response):
- version = wsgienv['wsgi.version']
- if (version < (1, 0)) or (version >= (2, 0)):
- raise RuntimeError("Unknown and unsupported WSGI version %d.%d"
- % version)
- self.inp = wsgienv['wsgi.input']
- self.err = wsgienv['wsgi.errors']
- self.threaded = wsgienv['wsgi.multithread']
- self.multiprocess = wsgienv['wsgi.multiprocess']
- self.run_once = wsgienv['wsgi.run_once']
- self.env = wsgienv
- self.form = expand(cgi.parse(self.inp, self.env, keep_blank_values=1))
- self._start_response = start_response
- self.server_write = None
- self.headers = []
-
- def __iter__(self):
- return iter([])
-
- def read(self, count=-1):
- return self.inp.read(count)
-
- def drain(self):
- '''need to read all data from request, httplib is half-duplex'''
- length = int(self.env.get('CONTENT_LENGTH', 0))
- for s in util.filechunkiter(self.inp, limit=length):
- pass
-
- def respond(self, status, type=None, filename=None, length=0):
- if self._start_response is not None:
-
- self.httphdr(type, filename, length)
- if not self.headers:
- raise RuntimeError("request.write called before headers sent")
-
- for k, v in self.headers:
- if not isinstance(v, str):
- raise TypeError('header value must be string: %r' % v)
-
- if isinstance(status, ErrorResponse):
- self.header(status.headers)
- status = statusmessage(status.code)
- elif status == 200:
- status = '200 Script output follows'
- elif isinstance(status, int):
- status = statusmessage(status)
-
- self.server_write = self._start_response(status, self.headers)
- self._start_response = None
- self.headers = []
-
- def write(self, thing):
- if hasattr(thing, "__iter__"):
- for part in thing:
- self.write(part)
- else:
- thing = str(thing)
- try:
- self.server_write(thing)
- except socket.error, inst:
- if inst[0] != errno.ECONNRESET:
- raise
-
- def writelines(self, lines):
- for line in lines:
- self.write(line)
-
- def flush(self):
- return None
-
- def close(self):
- return None
-
- def header(self, headers=[('Content-Type','text/html')]):
- self.headers.extend(headers)
-
- def httphdr(self, type=None, filename=None, length=0, headers={}):
- headers = headers.items()
- if type is not None:
- headers.append(('Content-Type', type))
- if filename:
- filename = (filename.split('/')[-1]
- .replace('\\', '\\\\').replace('"', '\\"'))
- headers.append(('Content-Disposition',
- 'inline; filename="%s"' % filename))
- if length:
- headers.append(('Content-Length', str(length)))
- self.header(headers)
-
-def wsgiapplication(app_maker):
- '''For compatibility with old CGI scripts. A plain hgweb() or hgwebdir()
- can and should now be used as a WSGI application.'''
- application = app_maker()
- def run_wsgi(env, respond):
- return application(env, respond)
- return run_wsgi
diff --git a/sys/lib/python/mercurial/hgweb/server.py b/sys/lib/python/mercurial/hgweb/server.py
deleted file mode 100644
index a14bc757f..000000000
--- a/sys/lib/python/mercurial/hgweb/server.py
+++ /dev/null
@@ -1,298 +0,0 @@
-# hgweb/server.py - The standalone hg web server.
-#
-# Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
-# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-import os, sys, errno, urllib, BaseHTTPServer, socket, SocketServer, traceback
-from mercurial import hg, util, error
-from hgweb_mod import hgweb
-from hgwebdir_mod import hgwebdir
-from mercurial.i18n import _
-
-def _splitURI(uri):
- """ Return path and query splited from uri
-
- Just like CGI environment, the path is unquoted, the query is
- not.
- """
- if '?' in uri:
- path, query = uri.split('?', 1)
- else:
- path, query = uri, ''
- return urllib.unquote(path), query
-
-class _error_logger(object):
- def __init__(self, handler):
- self.handler = handler
- def flush(self):
- pass
- def write(self, str):
- self.writelines(str.split('\n'))
- def writelines(self, seq):
- for msg in seq:
- self.handler.log_error("HG error: %s", msg)
-
-class _hgwebhandler(BaseHTTPServer.BaseHTTPRequestHandler):
-
- url_scheme = 'http'
-
- def __init__(self, *args, **kargs):
- self.protocol_version = 'HTTP/1.1'
- BaseHTTPServer.BaseHTTPRequestHandler.__init__(self, *args, **kargs)
-
- def _log_any(self, fp, format, *args):
- fp.write("%s - - [%s] %s\n" % (self.client_address[0],
- self.log_date_time_string(),
- format % args))
- fp.flush()
-
- def log_error(self, format, *args):
- self._log_any(self.server.errorlog, format, *args)
-
- def log_message(self, format, *args):
- self._log_any(self.server.accesslog, format, *args)
-
- def do_write(self):
- try:
- self.do_hgweb()
- except socket.error, inst:
- if inst[0] != errno.EPIPE:
- raise
-
- def do_POST(self):
- try:
- self.do_write()
- except StandardError:
- self._start_response("500 Internal Server Error", [])
- self._write("Internal Server Error")
- tb = "".join(traceback.format_exception(*sys.exc_info()))
- self.log_error("Exception happened during processing "
- "request '%s':\n%s", self.path, tb)
-
- def do_GET(self):
- self.do_POST()
-
- def do_hgweb(self):
- path, query = _splitURI(self.path)
-
- env = {}
- env['GATEWAY_INTERFACE'] = 'CGI/1.1'
- env['REQUEST_METHOD'] = self.command
- env['SERVER_NAME'] = self.server.server_name
- env['SERVER_PORT'] = str(self.server.server_port)
- env['REQUEST_URI'] = self.path
- env['SCRIPT_NAME'] = self.server.prefix
- env['PATH_INFO'] = path[len(self.server.prefix):]
- env['REMOTE_HOST'] = self.client_address[0]
- env['REMOTE_ADDR'] = self.client_address[0]
- if query:
- env['QUERY_STRING'] = query
-
- if self.headers.typeheader is None:
- env['CONTENT_TYPE'] = self.headers.type
- else:
- env['CONTENT_TYPE'] = self.headers.typeheader
- length = self.headers.getheader('content-length')
- if length:
- env['CONTENT_LENGTH'] = length
- for header in [h for h in self.headers.keys()
- if h not in ('content-type', 'content-length')]:
- hkey = 'HTTP_' + header.replace('-', '_').upper()
- hval = self.headers.getheader(header)
- hval = hval.replace('\n', '').strip()
- if hval:
- env[hkey] = hval
- env['SERVER_PROTOCOL'] = self.request_version
- env['wsgi.version'] = (1, 0)
- env['wsgi.url_scheme'] = self.url_scheme
- env['wsgi.input'] = self.rfile
- env['wsgi.errors'] = _error_logger(self)
- env['wsgi.multithread'] = isinstance(self.server,
- SocketServer.ThreadingMixIn)
- env['wsgi.multiprocess'] = isinstance(self.server,
- SocketServer.ForkingMixIn)
- env['wsgi.run_once'] = 0
-
- self.close_connection = True
- self.saved_status = None
- self.saved_headers = []
- self.sent_headers = False
- self.length = None
- for chunk in self.server.application(env, self._start_response):
- self._write(chunk)
-
- def send_headers(self):
- if not self.saved_status:
- raise AssertionError("Sending headers before "
- "start_response() called")
- saved_status = self.saved_status.split(None, 1)
- saved_status[0] = int(saved_status[0])
- self.send_response(*saved_status)
- should_close = True
- for h in self.saved_headers:
- self.send_header(*h)
- if h[0].lower() == 'content-length':
- should_close = False
- self.length = int(h[1])
- # The value of the Connection header is a list of case-insensitive
- # tokens separated by commas and optional whitespace.
- if 'close' in [token.strip().lower() for token in
- self.headers.get('connection', '').split(',')]:
- should_close = True
- if should_close:
- self.send_header('Connection', 'close')
- self.close_connection = should_close
- self.end_headers()
- self.sent_headers = True
-
- def _start_response(self, http_status, headers, exc_info=None):
- code, msg = http_status.split(None, 1)
- code = int(code)
- self.saved_status = http_status
- bad_headers = ('connection', 'transfer-encoding')
- self.saved_headers = [h for h in headers
- if h[0].lower() not in bad_headers]
- return self._write
-
- def _write(self, data):
- if not self.saved_status:
- raise AssertionError("data written before start_response() called")
- elif not self.sent_headers:
- self.send_headers()
- if self.length is not None:
- if len(data) > self.length:
- raise AssertionError("Content-length header sent, but more "
- "bytes than specified are being written.")
- self.length = self.length - len(data)
- self.wfile.write(data)
- self.wfile.flush()
-
-class _shgwebhandler(_hgwebhandler):
-
- url_scheme = 'https'
-
- def setup(self):
- self.connection = self.request
- self.rfile = socket._fileobject(self.request, "rb", self.rbufsize)
- self.wfile = socket._fileobject(self.request, "wb", self.wbufsize)
-
- def do_write(self):
- from OpenSSL.SSL import SysCallError
- try:
- super(_shgwebhandler, self).do_write()
- except SysCallError, inst:
- if inst.args[0] != errno.EPIPE:
- raise
-
- def handle_one_request(self):
- from OpenSSL.SSL import SysCallError, ZeroReturnError
- try:
- super(_shgwebhandler, self).handle_one_request()
- except (SysCallError, ZeroReturnError):
- self.close_connection = True
- pass
-
-def create_server(ui, repo):
- use_threads = True
-
- def openlog(opt, default):
- if opt and opt != '-':
- return open(opt, 'a')
- return default
-
- if repo is None:
- myui = ui
- else:
- myui = repo.ui
- address = myui.config("web", "address", "")
- port = int(myui.config("web", "port", 8000))
- prefix = myui.config("web", "prefix", "")
- if prefix:
- prefix = "/" + prefix.strip("/")
- use_ipv6 = myui.configbool("web", "ipv6")
- webdir_conf = myui.config("web", "webdir_conf")
- ssl_cert = myui.config("web", "certificate")
- accesslog = openlog(myui.config("web", "accesslog", "-"), sys.stdout)
- errorlog = openlog(myui.config("web", "errorlog", "-"), sys.stderr)
-
- if use_threads:
- try:
- from threading import activeCount
- except ImportError:
- use_threads = False
-
- if use_threads:
- _mixin = SocketServer.ThreadingMixIn
- else:
- if hasattr(os, "fork"):
- _mixin = SocketServer.ForkingMixIn
- else:
- class _mixin:
- pass
-
- class MercurialHTTPServer(object, _mixin, BaseHTTPServer.HTTPServer):
-
- # SO_REUSEADDR has broken semantics on windows
- if os.name == 'nt':
- allow_reuse_address = 0
-
- def __init__(self, *args, **kargs):
- BaseHTTPServer.HTTPServer.__init__(self, *args, **kargs)
- self.accesslog = accesslog
- self.errorlog = errorlog
- self.daemon_threads = True
- def make_handler():
- if webdir_conf:
- hgwebobj = hgwebdir(webdir_conf, ui)
- elif repo is not None:
- hgwebobj = hgweb(hg.repository(repo.ui, repo.root))
- else:
- raise error.RepoError(_("There is no Mercurial repository"
- " here (.hg not found)"))
- return hgwebobj
- self.application = make_handler()
-
- if ssl_cert:
- try:
- from OpenSSL import SSL
- ctx = SSL.Context(SSL.SSLv23_METHOD)
- except ImportError:
- raise util.Abort(_("SSL support is unavailable"))
- ctx.use_privatekey_file(ssl_cert)
- ctx.use_certificate_file(ssl_cert)
- sock = socket.socket(self.address_family, self.socket_type)
- self.socket = SSL.Connection(ctx, sock)
- self.server_bind()
- self.server_activate()
-
- self.addr, self.port = self.socket.getsockname()[0:2]
- self.prefix = prefix
- self.fqaddr = socket.getfqdn(address)
-
- class IPv6HTTPServer(MercurialHTTPServer):
- address_family = getattr(socket, 'AF_INET6', None)
-
- def __init__(self, *args, **kwargs):
- if self.address_family is None:
- raise error.RepoError(_('IPv6 is not available on this system'))
- super(IPv6HTTPServer, self).__init__(*args, **kwargs)
-
- if ssl_cert:
- handler = _shgwebhandler
- else:
- handler = _hgwebhandler
-
- # ugly hack due to python issue5853 (for threaded use)
- import mimetypes; mimetypes.init()
-
- try:
- if use_ipv6:
- return IPv6HTTPServer((address, port), handler)
- else:
- return MercurialHTTPServer((address, port), handler)
- except socket.error, inst:
- raise util.Abort(_("cannot start server at '%s:%d': %s")
- % (address, port, inst.args[1]))
diff --git a/sys/lib/python/mercurial/hgweb/webcommands.py b/sys/lib/python/mercurial/hgweb/webcommands.py
deleted file mode 100644
index c12425e02..000000000
--- a/sys/lib/python/mercurial/hgweb/webcommands.py
+++ /dev/null
@@ -1,690 +0,0 @@
-#
-# Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
-# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-import os, mimetypes, re, cgi, copy
-import webutil
-from mercurial import error, archival, templater, templatefilters
-from mercurial.node import short, hex
-from mercurial.util import binary
-from common import paritygen, staticfile, get_contact, ErrorResponse
-from common import HTTP_OK, HTTP_FORBIDDEN, HTTP_NOT_FOUND
-from mercurial import graphmod
-
-# __all__ is populated with the allowed commands. Be sure to add to it if
-# you're adding a new command, or the new command won't work.
-
-__all__ = [
- 'log', 'rawfile', 'file', 'changelog', 'shortlog', 'changeset', 'rev',
- 'manifest', 'tags', 'branches', 'summary', 'filediff', 'diff', 'annotate',
- 'filelog', 'archive', 'static', 'graph',
-]
-
-def log(web, req, tmpl):
- if 'file' in req.form and req.form['file'][0]:
- return filelog(web, req, tmpl)
- else:
- return changelog(web, req, tmpl)
-
-def rawfile(web, req, tmpl):
- path = webutil.cleanpath(web.repo, req.form.get('file', [''])[0])
- if not path:
- content = manifest(web, req, tmpl)
- req.respond(HTTP_OK, web.ctype)
- return content
-
- try:
- fctx = webutil.filectx(web.repo, req)
- except error.LookupError, inst:
- try:
- content = manifest(web, req, tmpl)
- req.respond(HTTP_OK, web.ctype)
- return content
- except ErrorResponse:
- raise inst
-
- path = fctx.path()
- text = fctx.data()
- mt = mimetypes.guess_type(path)[0]
- if mt is None:
- mt = binary(text) and 'application/octet-stream' or 'text/plain'
-
- req.respond(HTTP_OK, mt, path, len(text))
- return [text]
-
-def _filerevision(web, tmpl, fctx):
- f = fctx.path()
- text = fctx.data()
- parity = paritygen(web.stripecount)
-
- if binary(text):
- mt = mimetypes.guess_type(f)[0] or 'application/octet-stream'
- text = '(binary:%s)' % mt
-
- def lines():
- for lineno, t in enumerate(text.splitlines(True)):
- yield {"line": t,
- "lineid": "l%d" % (lineno + 1),
- "linenumber": "% 6d" % (lineno + 1),
- "parity": parity.next()}
-
- return tmpl("filerevision",
- file=f,
- path=webutil.up(f),
- text=lines(),
- rev=fctx.rev(),
- node=hex(fctx.node()),
- author=fctx.user(),
- date=fctx.date(),
- desc=fctx.description(),
- branch=webutil.nodebranchnodefault(fctx),
- parent=webutil.parents(fctx),
- child=webutil.children(fctx),
- rename=webutil.renamelink(fctx),
- permissions=fctx.manifest().flags(f))
-
-def file(web, req, tmpl):
- path = webutil.cleanpath(web.repo, req.form.get('file', [''])[0])
- if not path:
- return manifest(web, req, tmpl)
- try:
- return _filerevision(web, tmpl, webutil.filectx(web.repo, req))
- except error.LookupError, inst:
- try:
- return manifest(web, req, tmpl)
- except ErrorResponse:
- raise inst
-
-def _search(web, tmpl, query):
-
- def changelist(**map):
- cl = web.repo.changelog
- count = 0
- qw = query.lower().split()
-
- def revgen():
- for i in xrange(len(cl) - 1, 0, -100):
- l = []
- for j in xrange(max(0, i - 100), i + 1):
- ctx = web.repo[j]
- l.append(ctx)
- l.reverse()
- for e in l:
- yield e
-
- for ctx in revgen():
- miss = 0
- for q in qw:
- if not (q in ctx.user().lower() or
- q in ctx.description().lower() or
- q in " ".join(ctx.files()).lower()):
- miss = 1
- break
- if miss:
- continue
-
- count += 1
- n = ctx.node()
- showtags = webutil.showtag(web.repo, tmpl, 'changelogtag', n)
- files = webutil.listfilediffs(tmpl, ctx.files(), n, web.maxfiles)
-
- yield tmpl('searchentry',
- parity=parity.next(),
- author=ctx.user(),
- parent=webutil.parents(ctx),
- child=webutil.children(ctx),
- changelogtag=showtags,
- desc=ctx.description(),
- date=ctx.date(),
- files=files,
- rev=ctx.rev(),
- node=hex(n),
- tags=webutil.nodetagsdict(web.repo, n),
- inbranch=webutil.nodeinbranch(web.repo, ctx),
- branches=webutil.nodebranchdict(web.repo, ctx))
-
- if count >= web.maxchanges:
- break
-
- cl = web.repo.changelog
- parity = paritygen(web.stripecount)
-
- return tmpl('search',
- query=query,
- node=hex(cl.tip()),
- entries=changelist,
- archives=web.archivelist("tip"))
-
-def changelog(web, req, tmpl, shortlog = False):
- if 'node' in req.form:
- ctx = webutil.changectx(web.repo, req)
- else:
- if 'rev' in req.form:
- hi = req.form['rev'][0]
- else:
- hi = len(web.repo) - 1
- try:
- ctx = web.repo[hi]
- except error.RepoError:
- return _search(web, tmpl, hi) # XXX redirect to 404 page?
-
- def changelist(limit=0, **map):
- l = [] # build a list in forward order for efficiency
- for i in xrange(start, end):
- ctx = web.repo[i]
- n = ctx.node()
- showtags = webutil.showtag(web.repo, tmpl, 'changelogtag', n)
- files = webutil.listfilediffs(tmpl, ctx.files(), n, web.maxfiles)
-
- l.insert(0, {"parity": parity.next(),
- "author": ctx.user(),
- "parent": webutil.parents(ctx, i - 1),
- "child": webutil.children(ctx, i + 1),
- "changelogtag": showtags,
- "desc": ctx.description(),
- "date": ctx.date(),
- "files": files,
- "rev": i,
- "node": hex(n),
- "tags": webutil.nodetagsdict(web.repo, n),
- "inbranch": webutil.nodeinbranch(web.repo, ctx),
- "branches": webutil.nodebranchdict(web.repo, ctx)
- })
-
- if limit > 0:
- l = l[:limit]
-
- for e in l:
- yield e
-
- maxchanges = shortlog and web.maxshortchanges or web.maxchanges
- cl = web.repo.changelog
- count = len(cl)
- pos = ctx.rev()
- start = max(0, pos - maxchanges + 1)
- end = min(count, start + maxchanges)
- pos = end - 1
- parity = paritygen(web.stripecount, offset=start-end)
-
- changenav = webutil.revnavgen(pos, maxchanges, count, web.repo.changectx)
-
- return tmpl(shortlog and 'shortlog' or 'changelog',
- changenav=changenav,
- node=hex(ctx.node()),
- rev=pos, changesets=count,
- entries=lambda **x: changelist(limit=0,**x),
- latestentry=lambda **x: changelist(limit=1,**x),
- archives=web.archivelist("tip"))
-
-def shortlog(web, req, tmpl):
- return changelog(web, req, tmpl, shortlog = True)
-
-def changeset(web, req, tmpl):
- ctx = webutil.changectx(web.repo, req)
- showtags = webutil.showtag(web.repo, tmpl, 'changesettag', ctx.node())
- showbranch = webutil.nodebranchnodefault(ctx)
-
- files = []
- parity = paritygen(web.stripecount)
- for f in ctx.files():
- template = f in ctx and 'filenodelink' or 'filenolink'
- files.append(tmpl(template,
- node=ctx.hex(), file=f,
- parity=parity.next()))
-
- parity = paritygen(web.stripecount)
- diffs = webutil.diffs(web.repo, tmpl, ctx, None, parity)
- return tmpl('changeset',
- diff=diffs,
- rev=ctx.rev(),
- node=ctx.hex(),
- parent=webutil.parents(ctx),
- child=webutil.children(ctx),
- changesettag=showtags,
- changesetbranch=showbranch,
- author=ctx.user(),
- desc=ctx.description(),
- date=ctx.date(),
- files=files,
- archives=web.archivelist(ctx.hex()),
- tags=webutil.nodetagsdict(web.repo, ctx.node()),
- branch=webutil.nodebranchnodefault(ctx),
- inbranch=webutil.nodeinbranch(web.repo, ctx),
- branches=webutil.nodebranchdict(web.repo, ctx))
-
-rev = changeset
-
-def manifest(web, req, tmpl):
- ctx = webutil.changectx(web.repo, req)
- path = webutil.cleanpath(web.repo, req.form.get('file', [''])[0])
- mf = ctx.manifest()
- node = ctx.node()
-
- files = {}
- dirs = {}
- parity = paritygen(web.stripecount)
-
- if path and path[-1] != "/":
- path += "/"
- l = len(path)
- abspath = "/" + path
-
- for f, n in mf.iteritems():
- if f[:l] != path:
- continue
- remain = f[l:]
- elements = remain.split('/')
- if len(elements) == 1:
- files[remain] = f
- else:
- h = dirs # need to retain ref to dirs (root)
- for elem in elements[0:-1]:
- if elem not in h:
- h[elem] = {}
- h = h[elem]
- if len(h) > 1:
- break
- h[None] = None # denotes files present
-
- if mf and not files and not dirs:
- raise ErrorResponse(HTTP_NOT_FOUND, 'path not found: ' + path)
-
- def filelist(**map):
- for f in sorted(files):
- full = files[f]
-
- fctx = ctx.filectx(full)
- yield {"file": full,
- "parity": parity.next(),
- "basename": f,
- "date": fctx.date(),
- "size": fctx.size(),
- "permissions": mf.flags(full)}
-
- def dirlist(**map):
- for d in sorted(dirs):
-
- emptydirs = []
- h = dirs[d]
- while isinstance(h, dict) and len(h) == 1:
- k,v = h.items()[0]
- if v:
- emptydirs.append(k)
- h = v
-
- path = "%s%s" % (abspath, d)
- yield {"parity": parity.next(),
- "path": path,
- "emptydirs": "/".join(emptydirs),
- "basename": d}
-
- return tmpl("manifest",
- rev=ctx.rev(),
- node=hex(node),
- path=abspath,
- up=webutil.up(abspath),
- upparity=parity.next(),
- fentries=filelist,
- dentries=dirlist,
- archives=web.archivelist(hex(node)),
- tags=webutil.nodetagsdict(web.repo, node),
- inbranch=webutil.nodeinbranch(web.repo, ctx),
- branches=webutil.nodebranchdict(web.repo, ctx))
-
-def tags(web, req, tmpl):
- i = web.repo.tagslist()
- i.reverse()
- parity = paritygen(web.stripecount)
-
- def entries(notip=False, limit=0, **map):
- count = 0
- for k, n in i:
- if notip and k == "tip":
- continue
- if limit > 0 and count >= limit:
- continue
- count = count + 1
- yield {"parity": parity.next(),
- "tag": k,
- "date": web.repo[n].date(),
- "node": hex(n)}
-
- return tmpl("tags",
- node=hex(web.repo.changelog.tip()),
- entries=lambda **x: entries(False,0, **x),
- entriesnotip=lambda **x: entries(True,0, **x),
- latestentry=lambda **x: entries(True,1, **x))
-
-def branches(web, req, tmpl):
- b = web.repo.branchtags()
- tips = (web.repo[n] for t, n in web.repo.branchtags().iteritems())
- heads = web.repo.heads()
- parity = paritygen(web.stripecount)
- sortkey = lambda ctx: ('close' not in ctx.extra(), ctx.rev())
-
- def entries(limit, **map):
- count = 0
- for ctx in sorted(tips, key=sortkey, reverse=True):
- if limit > 0 and count >= limit:
- return
- count += 1
- if ctx.node() not in heads:
- status = 'inactive'
- elif not web.repo.branchheads(ctx.branch()):
- status = 'closed'
- else:
- status = 'open'
- yield {'parity': parity.next(),
- 'branch': ctx.branch(),
- 'status': status,
- 'node': ctx.hex(),
- 'date': ctx.date()}
-
- return tmpl('branches', node=hex(web.repo.changelog.tip()),
- entries=lambda **x: entries(0, **x),
- latestentry=lambda **x: entries(1, **x))
-
-def summary(web, req, tmpl):
- i = web.repo.tagslist()
- i.reverse()
-
- def tagentries(**map):
- parity = paritygen(web.stripecount)
- count = 0
- for k, n in i:
- if k == "tip": # skip tip
- continue
-
- count += 1
- if count > 10: # limit to 10 tags
- break
-
- yield tmpl("tagentry",
- parity=parity.next(),
- tag=k,
- node=hex(n),
- date=web.repo[n].date())
-
- def branches(**map):
- parity = paritygen(web.stripecount)
-
- b = web.repo.branchtags()
- l = [(-web.repo.changelog.rev(n), n, t) for t, n in b.iteritems()]
- for r,n,t in sorted(l):
- yield {'parity': parity.next(),
- 'branch': t,
- 'node': hex(n),
- 'date': web.repo[n].date()}
-
- def changelist(**map):
- parity = paritygen(web.stripecount, offset=start-end)
- l = [] # build a list in forward order for efficiency
- for i in xrange(start, end):
- ctx = web.repo[i]
- n = ctx.node()
- hn = hex(n)
-
- l.insert(0, tmpl(
- 'shortlogentry',
- parity=parity.next(),
- author=ctx.user(),
- desc=ctx.description(),
- date=ctx.date(),
- rev=i,
- node=hn,
- tags=webutil.nodetagsdict(web.repo, n),
- inbranch=webutil.nodeinbranch(web.repo, ctx),
- branches=webutil.nodebranchdict(web.repo, ctx)))
-
- yield l
-
- cl = web.repo.changelog
- count = len(cl)
- start = max(0, count - web.maxchanges)
- end = min(count, start + web.maxchanges)
-
- return tmpl("summary",
- desc=web.config("web", "description", "unknown"),
- owner=get_contact(web.config) or "unknown",
- lastchange=cl.read(cl.tip())[2],
- tags=tagentries,
- branches=branches,
- shortlog=changelist,
- node=hex(cl.tip()),
- archives=web.archivelist("tip"))
-
-def filediff(web, req, tmpl):
- fctx, ctx = None, None
- try:
- fctx = webutil.filectx(web.repo, req)
- except LookupError:
- ctx = webutil.changectx(web.repo, req)
- path = webutil.cleanpath(web.repo, req.form['file'][0])
- if path not in ctx.files():
- raise
-
- if fctx is not None:
- n = fctx.node()
- path = fctx.path()
- else:
- n = ctx.node()
- # path already defined in except clause
-
- parity = paritygen(web.stripecount)
- diffs = webutil.diffs(web.repo, tmpl, fctx or ctx, [path], parity)
- rename = fctx and webutil.renamelink(fctx) or []
- ctx = fctx and fctx or ctx
- return tmpl("filediff",
- file=path,
- node=hex(n),
- rev=ctx.rev(),
- date=ctx.date(),
- desc=ctx.description(),
- author=ctx.user(),
- rename=rename,
- branch=webutil.nodebranchnodefault(ctx),
- parent=webutil.parents(ctx),
- child=webutil.children(ctx),
- diff=diffs)
-
-diff = filediff
-
-def annotate(web, req, tmpl):
- fctx = webutil.filectx(web.repo, req)
- f = fctx.path()
- parity = paritygen(web.stripecount)
-
- def annotate(**map):
- last = None
- if binary(fctx.data()):
- mt = (mimetypes.guess_type(fctx.path())[0]
- or 'application/octet-stream')
- lines = enumerate([((fctx.filectx(fctx.filerev()), 1),
- '(binary:%s)' % mt)])
- else:
- lines = enumerate(fctx.annotate(follow=True, linenumber=True))
- for lineno, ((f, targetline), l) in lines:
- fnode = f.filenode()
-
- if last != fnode:
- last = fnode
-
- yield {"parity": parity.next(),
- "node": hex(f.node()),
- "rev": f.rev(),
- "author": f.user(),
- "desc": f.description(),
- "file": f.path(),
- "targetline": targetline,
- "line": l,
- "lineid": "l%d" % (lineno + 1),
- "linenumber": "% 6d" % (lineno + 1)}
-
- return tmpl("fileannotate",
- file=f,
- annotate=annotate,
- path=webutil.up(f),
- rev=fctx.rev(),
- node=hex(fctx.node()),
- author=fctx.user(),
- date=fctx.date(),
- desc=fctx.description(),
- rename=webutil.renamelink(fctx),
- branch=webutil.nodebranchnodefault(fctx),
- parent=webutil.parents(fctx),
- child=webutil.children(fctx),
- permissions=fctx.manifest().flags(f))
-
-def filelog(web, req, tmpl):
-
- try:
- fctx = webutil.filectx(web.repo, req)
- f = fctx.path()
- fl = fctx.filelog()
- except error.LookupError:
- f = webutil.cleanpath(web.repo, req.form['file'][0])
- fl = web.repo.file(f)
- numrevs = len(fl)
- if not numrevs: # file doesn't exist at all
- raise
- rev = webutil.changectx(web.repo, req).rev()
- first = fl.linkrev(0)
- if rev < first: # current rev is from before file existed
- raise
- frev = numrevs - 1
- while fl.linkrev(frev) > rev:
- frev -= 1
- fctx = web.repo.filectx(f, fl.linkrev(frev))
-
- count = fctx.filerev() + 1
- pagelen = web.maxshortchanges
- start = max(0, fctx.filerev() - pagelen + 1) # first rev on this page
- end = min(count, start + pagelen) # last rev on this page
- parity = paritygen(web.stripecount, offset=start-end)
-
- def entries(limit=0, **map):
- l = []
-
- repo = web.repo
- for i in xrange(start, end):
- iterfctx = fctx.filectx(i)
-
- l.insert(0, {"parity": parity.next(),
- "filerev": i,
- "file": f,
- "node": hex(iterfctx.node()),
- "author": iterfctx.user(),
- "date": iterfctx.date(),
- "rename": webutil.renamelink(iterfctx),
- "parent": webutil.parents(iterfctx),
- "child": webutil.children(iterfctx),
- "desc": iterfctx.description(),
- "tags": webutil.nodetagsdict(repo, iterfctx.node()),
- "branch": webutil.nodebranchnodefault(iterfctx),
- "inbranch": webutil.nodeinbranch(repo, iterfctx),
- "branches": webutil.nodebranchdict(repo, iterfctx)})
-
- if limit > 0:
- l = l[:limit]
-
- for e in l:
- yield e
-
- nodefunc = lambda x: fctx.filectx(fileid=x)
- nav = webutil.revnavgen(end - 1, pagelen, count, nodefunc)
- return tmpl("filelog", file=f, node=hex(fctx.node()), nav=nav,
- entries=lambda **x: entries(limit=0, **x),
- latestentry=lambda **x: entries(limit=1, **x))
-
-
-def archive(web, req, tmpl):
- type_ = req.form.get('type', [None])[0]
- allowed = web.configlist("web", "allow_archive")
- key = req.form['node'][0]
-
- if type_ not in web.archives:
- msg = 'Unsupported archive type: %s' % type_
- raise ErrorResponse(HTTP_NOT_FOUND, msg)
-
- if not ((type_ in allowed or
- web.configbool("web", "allow" + type_, False))):
- msg = 'Archive type not allowed: %s' % type_
- raise ErrorResponse(HTTP_FORBIDDEN, msg)
-
- reponame = re.sub(r"\W+", "-", os.path.basename(web.reponame))
- cnode = web.repo.lookup(key)
- arch_version = key
- if cnode == key or key == 'tip':
- arch_version = short(cnode)
- name = "%s-%s" % (reponame, arch_version)
- mimetype, artype, extension, encoding = web.archive_specs[type_]
- headers = [
- ('Content-Type', mimetype),
- ('Content-Disposition', 'attachment; filename=%s%s' % (name, extension))
- ]
- if encoding:
- headers.append(('Content-Encoding', encoding))
- req.header(headers)
- req.respond(HTTP_OK)
- archival.archive(web.repo, req, cnode, artype, prefix=name)
- return []
-
-
-def static(web, req, tmpl):
- fname = req.form['file'][0]
- # a repo owner may set web.static in .hg/hgrc to get any file
- # readable by the user running the CGI script
- static = web.config("web", "static", None, untrusted=False)
- if not static:
- tp = web.templatepath or templater.templatepath()
- if isinstance(tp, str):
- tp = [tp]
- static = [os.path.join(p, 'static') for p in tp]
- return [staticfile(static, fname, req)]
-
-def graph(web, req, tmpl):
- rev = webutil.changectx(web.repo, req).rev()
- bg_height = 39
-
- revcount = 25
- if 'revcount' in req.form:
- revcount = int(req.form.get('revcount', [revcount])[0])
- tmpl.defaults['sessionvars']['revcount'] = revcount
-
- lessvars = copy.copy(tmpl.defaults['sessionvars'])
- lessvars['revcount'] = revcount / 2
- morevars = copy.copy(tmpl.defaults['sessionvars'])
- morevars['revcount'] = revcount * 2
-
- max_rev = len(web.repo) - 1
- revcount = min(max_rev, revcount)
- revnode = web.repo.changelog.node(rev)
- revnode_hex = hex(revnode)
- uprev = min(max_rev, rev + revcount)
- downrev = max(0, rev - revcount)
- count = len(web.repo)
- changenav = webutil.revnavgen(rev, revcount, count, web.repo.changectx)
-
- dag = graphmod.revisions(web.repo, rev, downrev)
- tree = list(graphmod.colored(dag))
- canvasheight = (len(tree) + 1) * bg_height - 27;
- data = []
- for (id, type, ctx, vtx, edges) in tree:
- if type != graphmod.CHANGESET:
- continue
- node = short(ctx.node())
- age = templatefilters.age(ctx.date())
- desc = templatefilters.firstline(ctx.description())
- desc = cgi.escape(templatefilters.nonempty(desc))
- user = cgi.escape(templatefilters.person(ctx.user()))
- branch = ctx.branch()
- branch = branch, web.repo.branchtags().get(branch) == ctx.node()
- data.append((node, vtx, edges, desc, user, age, branch, ctx.tags()))
-
- return tmpl('graph', rev=rev, revcount=revcount, uprev=uprev,
- lessvars=lessvars, morevars=morevars, downrev=downrev,
- canvasheight=canvasheight, jsdata=data, bg_height=bg_height,
- node=revnode_hex, changenav=changenav)
diff --git a/sys/lib/python/mercurial/hgweb/webutil.py b/sys/lib/python/mercurial/hgweb/webutil.py
deleted file mode 100644
index bd29528f8..000000000
--- a/sys/lib/python/mercurial/hgweb/webutil.py
+++ /dev/null
@@ -1,218 +0,0 @@
-# hgweb/webutil.py - utility library for the web interface.
-#
-# Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
-# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-import os, copy
-from mercurial import match, patch, util, error
-from mercurial.node import hex, nullid
-
-def up(p):
- if p[0] != "/":
- p = "/" + p
- if p[-1] == "/":
- p = p[:-1]
- up = os.path.dirname(p)
- if up == "/":
- return "/"
- return up + "/"
-
-def revnavgen(pos, pagelen, limit, nodefunc):
- def seq(factor, limit=None):
- if limit:
- yield limit
- if limit >= 20 and limit <= 40:
- yield 50
- else:
- yield 1 * factor
- yield 3 * factor
- for f in seq(factor * 10):
- yield f
-
- def nav(**map):
- l = []
- last = 0
- for f in seq(1, pagelen):
- if f < pagelen or f <= last:
- continue
- if f > limit:
- break
- last = f
- if pos + f < limit:
- l.append(("+%d" % f, hex(nodefunc(pos + f).node())))
- if pos - f >= 0:
- l.insert(0, ("-%d" % f, hex(nodefunc(pos - f).node())))
-
- try:
- yield {"label": "(0)", "node": hex(nodefunc('0').node())}
-
- for label, node in l:
- yield {"label": label, "node": node}
-
- yield {"label": "tip", "node": "tip"}
- except error.RepoError:
- pass
-
- return nav
-
-def _siblings(siblings=[], hiderev=None):
- siblings = [s for s in siblings if s.node() != nullid]
- if len(siblings) == 1 and siblings[0].rev() == hiderev:
- return
- for s in siblings:
- d = {'node': hex(s.node()), 'rev': s.rev()}
- d['user'] = s.user()
- d['date'] = s.date()
- d['description'] = s.description()
- d['branch'] = s.branch()
- if hasattr(s, 'path'):
- d['file'] = s.path()
- yield d
-
-def parents(ctx, hide=None):
- return _siblings(ctx.parents(), hide)
-
-def children(ctx, hide=None):
- return _siblings(ctx.children(), hide)
-
-def renamelink(fctx):
- r = fctx.renamed()
- if r:
- return [dict(file=r[0], node=hex(r[1]))]
- return []
-
-def nodetagsdict(repo, node):
- return [{"name": i} for i in repo.nodetags(node)]
-
-def nodebranchdict(repo, ctx):
- branches = []
- branch = ctx.branch()
- # If this is an empty repo, ctx.node() == nullid,
- # ctx.branch() == 'default', but branchtags() is
- # an empty dict. Using dict.get avoids a traceback.
- if repo.branchtags().get(branch) == ctx.node():
- branches.append({"name": branch})
- return branches
-
-def nodeinbranch(repo, ctx):
- branches = []
- branch = ctx.branch()
- if branch != 'default' and repo.branchtags().get(branch) != ctx.node():
- branches.append({"name": branch})
- return branches
-
-def nodebranchnodefault(ctx):
- branches = []
- branch = ctx.branch()
- if branch != 'default':
- branches.append({"name": branch})
- return branches
-
-def showtag(repo, tmpl, t1, node=nullid, **args):
- for t in repo.nodetags(node):
- yield tmpl(t1, tag=t, **args)
-
-def cleanpath(repo, path):
- path = path.lstrip('/')
- return util.canonpath(repo.root, '', path)
-
-def changectx(repo, req):
- changeid = "tip"
- if 'node' in req.form:
- changeid = req.form['node'][0]
- elif 'manifest' in req.form:
- changeid = req.form['manifest'][0]
-
- try:
- ctx = repo[changeid]
- except error.RepoError:
- man = repo.manifest
- ctx = repo[man.linkrev(man.rev(man.lookup(changeid)))]
-
- return ctx
-
-def filectx(repo, req):
- path = cleanpath(repo, req.form['file'][0])
- if 'node' in req.form:
- changeid = req.form['node'][0]
- else:
- changeid = req.form['filenode'][0]
- try:
- fctx = repo[changeid][path]
- except error.RepoError:
- fctx = repo.filectx(path, fileid=changeid)
-
- return fctx
-
-def listfilediffs(tmpl, files, node, max):
- for f in files[:max]:
- yield tmpl('filedifflink', node=hex(node), file=f)
- if len(files) > max:
- yield tmpl('fileellipses')
-
-def diffs(repo, tmpl, ctx, files, parity):
-
- def countgen():
- start = 1
- while True:
- yield start
- start += 1
-
- blockcount = countgen()
- def prettyprintlines(diff):
- blockno = blockcount.next()
- for lineno, l in enumerate(diff.splitlines(True)):
- lineno = "%d.%d" % (blockno, lineno + 1)
- if l.startswith('+'):
- ltype = "difflineplus"
- elif l.startswith('-'):
- ltype = "difflineminus"
- elif l.startswith('@'):
- ltype = "difflineat"
- else:
- ltype = "diffline"
- yield tmpl(ltype,
- line=l,
- lineid="l%s" % lineno,
- linenumber="% 8s" % lineno)
-
- if files:
- m = match.exact(repo.root, repo.getcwd(), files)
- else:
- m = match.always(repo.root, repo.getcwd())
-
- diffopts = patch.diffopts(repo.ui, untrusted=True)
- parents = ctx.parents()
- node1 = parents and parents[0].node() or nullid
- node2 = ctx.node()
-
- block = []
- for chunk in patch.diff(repo, node1, node2, m, opts=diffopts):
- if chunk.startswith('diff') and block:
- yield tmpl('diffblock', parity=parity.next(),
- lines=prettyprintlines(''.join(block)))
- block = []
- if chunk.startswith('diff'):
- chunk = ''.join(chunk.splitlines(True)[1:])
- block.append(chunk)
- yield tmpl('diffblock', parity=parity.next(),
- lines=prettyprintlines(''.join(block)))
-
-class sessionvars(object):
- def __init__(self, vars, start='?'):
- self.start = start
- self.vars = vars
- def __getitem__(self, key):
- return self.vars[key]
- def __setitem__(self, key, value):
- self.vars[key] = value
- def __copy__(self):
- return sessionvars(copy.copy(self.vars), self.start)
- def __iter__(self):
- separator = self.start
- for key, value in self.vars.iteritems():
- yield {'name': key, 'value': str(value), 'separator': separator}
- separator = '&'
diff --git a/sys/lib/python/mercurial/hgweb/wsgicgi.py b/sys/lib/python/mercurial/hgweb/wsgicgi.py
deleted file mode 100644
index 9dfb76978..000000000
--- a/sys/lib/python/mercurial/hgweb/wsgicgi.py
+++ /dev/null
@@ -1,70 +0,0 @@
-# hgweb/wsgicgi.py - CGI->WSGI translator
-#
-# Copyright 2006 Eric Hopper <hopper@omnifarious.org>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-#
-# This was originally copied from the public domain code at
-# http://www.python.org/dev/peps/pep-0333/#the-server-gateway-side
-
-import os, sys
-from mercurial import util
-
-def launch(application):
- util.set_binary(sys.stdin)
- util.set_binary(sys.stdout)
-
- environ = dict(os.environ.iteritems())
- environ.setdefault('PATH_INFO', '')
- if '.cgi' in environ['PATH_INFO']:
- environ['PATH_INFO'] = environ['PATH_INFO'].split('.cgi', 1)[1]
-
- environ['wsgi.input'] = sys.stdin
- environ['wsgi.errors'] = sys.stderr
- environ['wsgi.version'] = (1, 0)
- environ['wsgi.multithread'] = False
- environ['wsgi.multiprocess'] = True
- environ['wsgi.run_once'] = True
-
- if environ.get('HTTPS','off').lower() in ('on','1','yes'):
- environ['wsgi.url_scheme'] = 'https'
- else:
- environ['wsgi.url_scheme'] = 'http'
-
- headers_set = []
- headers_sent = []
- out = sys.stdout
-
- def write(data):
- if not headers_set:
- raise AssertionError("write() before start_response()")
-
- elif not headers_sent:
- # Before the first output, send the stored headers
- status, response_headers = headers_sent[:] = headers_set
- out.write('Status: %s\r\n' % status)
- for header in response_headers:
- out.write('%s: %s\r\n' % header)
- out.write('\r\n')
-
- out.write(data)
- out.flush()
-
- def start_response(status, response_headers, exc_info=None):
- if exc_info:
- try:
- if headers_sent:
- # Re-raise original exception if headers sent
- raise exc_info[0](exc_info[1], exc_info[2])
- finally:
- exc_info = None # avoid dangling circular ref
- elif headers_set:
- raise AssertionError("Headers already set!")
-
- headers_set[:] = [status, response_headers]
- return write
-
- content = application(environ, start_response)
- for chunk in content:
- write(chunk)
diff --git a/sys/lib/python/mercurial/hook.py b/sys/lib/python/mercurial/hook.py
deleted file mode 100644
index c5b536df9..000000000
--- a/sys/lib/python/mercurial/hook.py
+++ /dev/null
@@ -1,135 +0,0 @@
-# hook.py - hook support for mercurial
-#
-# Copyright 2007 Matt Mackall <mpm@selenic.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-from i18n import _
-import os, sys
-import extensions, util
-
-def _pythonhook(ui, repo, name, hname, funcname, args, throw):
- '''call python hook. hook is callable object, looked up as
- name in python module. if callable returns "true", hook
- fails, else passes. if hook raises exception, treated as
- hook failure. exception propagates if throw is "true".
-
- reason for "true" meaning "hook failed" is so that
- unmodified commands (e.g. mercurial.commands.update) can
- be run as hooks without wrappers to convert return values.'''
-
- ui.note(_("calling hook %s: %s\n") % (hname, funcname))
- obj = funcname
- if not hasattr(obj, '__call__'):
- d = funcname.rfind('.')
- if d == -1:
- raise util.Abort(_('%s hook is invalid ("%s" not in '
- 'a module)') % (hname, funcname))
- modname = funcname[:d]
- oldpaths = sys.path[:]
- if hasattr(sys, "frozen"):
- # binary installs require sys.path manipulation
- path, name = os.path.split(modname)
- if path and name:
- sys.path.append(path)
- modname = name
- try:
- obj = __import__(modname)
- except ImportError:
- try:
- # extensions are loaded with hgext_ prefix
- obj = __import__("hgext_%s" % modname)
- except ImportError:
- raise util.Abort(_('%s hook is invalid '
- '(import of "%s" failed)') %
- (hname, modname))
- sys.path = oldpaths
- try:
- for p in funcname.split('.')[1:]:
- obj = getattr(obj, p)
- except AttributeError:
- raise util.Abort(_('%s hook is invalid '
- '("%s" is not defined)') %
- (hname, funcname))
- if not hasattr(obj, '__call__'):
- raise util.Abort(_('%s hook is invalid '
- '("%s" is not callable)') %
- (hname, funcname))
- try:
- r = obj(ui=ui, repo=repo, hooktype=name, **args)
- except KeyboardInterrupt:
- raise
- except Exception, exc:
- if isinstance(exc, util.Abort):
- ui.warn(_('error: %s hook failed: %s\n') %
- (hname, exc.args[0]))
- else:
- ui.warn(_('error: %s hook raised an exception: '
- '%s\n') % (hname, exc))
- if throw:
- raise
- ui.traceback()
- return True
- if r:
- if throw:
- raise util.Abort(_('%s hook failed') % hname)
- ui.warn(_('warning: %s hook failed\n') % hname)
- return r
-
-def _exthook(ui, repo, name, cmd, args, throw):
- ui.note(_("running hook %s: %s\n") % (name, cmd))
-
- env = {}
- for k, v in args.iteritems():
- if hasattr(v, '__call__'):
- v = v()
- env['HG_' + k.upper()] = v
-
- if repo:
- cwd = repo.root
- else:
- cwd = os.getcwd()
- r = util.system(cmd, environ=env, cwd=cwd)
- if r:
- desc, r = util.explain_exit(r)
- if throw:
- raise util.Abort(_('%s hook %s') % (name, desc))
- ui.warn(_('warning: %s hook %s\n') % (name, desc))
- return r
-
-_redirect = False
-def redirect(state):
- global _redirect
- _redirect = state
-
-def hook(ui, repo, name, throw=False, **args):
- r = False
-
- if _redirect:
- # temporarily redirect stdout to stderr
- oldstdout = os.dup(sys.__stdout__.fileno())
- os.dup2(sys.__stderr__.fileno(), sys.__stdout__.fileno())
-
- try:
- for hname, cmd in ui.configitems('hooks'):
- if hname.split('.')[0] != name or not cmd:
- continue
- if hasattr(cmd, '__call__'):
- r = _pythonhook(ui, repo, name, hname, cmd, args, throw) or r
- elif cmd.startswith('python:'):
- if cmd.count(':') >= 2:
- path, cmd = cmd[7:].rsplit(':', 1)
- mod = extensions.loadpath(path, 'hghook.%s' % hname)
- hookfn = getattr(mod, cmd)
- else:
- hookfn = cmd[7:].strip()
- r = _pythonhook(ui, repo, name, hname, hookfn, args, throw) or r
- else:
- r = _exthook(ui, repo, hname, cmd, args, throw) or r
- finally:
- if _redirect:
- os.dup2(oldstdout, sys.__stdout__.fileno())
- os.close(oldstdout)
-
- return r
diff --git a/sys/lib/python/mercurial/httprepo.py b/sys/lib/python/mercurial/httprepo.py
deleted file mode 100644
index a766bf07a..000000000
--- a/sys/lib/python/mercurial/httprepo.py
+++ /dev/null
@@ -1,258 +0,0 @@
-# httprepo.py - HTTP repository proxy classes for mercurial
-#
-# Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
-# Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-from node import bin, hex, nullid
-from i18n import _
-import repo, changegroup, statichttprepo, error, url, util
-import os, urllib, urllib2, urlparse, zlib, httplib
-import errno, socket
-
-def zgenerator(f):
- zd = zlib.decompressobj()
- try:
- for chunk in util.filechunkiter(f):
- yield zd.decompress(chunk)
- except httplib.HTTPException:
- raise IOError(None, _('connection ended unexpectedly'))
- yield zd.flush()
-
-class httprepository(repo.repository):
- def __init__(self, ui, path):
- self.path = path
- self.caps = None
- self.handler = None
- scheme, netloc, urlpath, query, frag = urlparse.urlsplit(path)
- if query or frag:
- raise util.Abort(_('unsupported URL component: "%s"') %
- (query or frag))
-
- # urllib cannot handle URLs with embedded user or passwd
- self._url, authinfo = url.getauthinfo(path)
-
- self.ui = ui
- self.ui.debug(_('using %s\n') % self._url)
-
- self.urlopener = url.opener(ui, authinfo)
-
- def __del__(self):
- for h in self.urlopener.handlers:
- h.close()
- if hasattr(h, "close_all"):
- h.close_all()
-
- def url(self):
- return self.path
-
- # look up capabilities only when needed
-
- def get_caps(self):
- if self.caps is None:
- try:
- self.caps = set(self.do_read('capabilities').split())
- except error.RepoError:
- self.caps = set()
- self.ui.debug(_('capabilities: %s\n') %
- (' '.join(self.caps or ['none'])))
- return self.caps
-
- capabilities = property(get_caps)
-
- def lock(self):
- raise util.Abort(_('operation not supported over http'))
-
- def do_cmd(self, cmd, **args):
- data = args.pop('data', None)
- headers = args.pop('headers', {})
- self.ui.debug(_("sending %s command\n") % cmd)
- q = {"cmd": cmd}
- q.update(args)
- qs = '?%s' % urllib.urlencode(q)
- cu = "%s%s" % (self._url, qs)
- try:
- if data:
- self.ui.debug(_("sending %s bytes\n") % len(data))
- resp = self.urlopener.open(urllib2.Request(cu, data, headers))
- except urllib2.HTTPError, inst:
- if inst.code == 401:
- raise util.Abort(_('authorization failed'))
- raise
- except httplib.HTTPException, inst:
- self.ui.debug(_('http error while sending %s command\n') % cmd)
- self.ui.traceback()
- raise IOError(None, inst)
- except IndexError:
- # this only happens with Python 2.3, later versions raise URLError
- raise util.Abort(_('http error, possibly caused by proxy setting'))
- # record the url we got redirected to
- resp_url = resp.geturl()
- if resp_url.endswith(qs):
- resp_url = resp_url[:-len(qs)]
- if self._url != resp_url:
- self.ui.status(_('real URL is %s\n') % resp_url)
- self._url = resp_url
- try:
- proto = resp.getheader('content-type')
- except AttributeError:
- proto = resp.headers['content-type']
-
- safeurl = url.hidepassword(self._url)
- # accept old "text/plain" and "application/hg-changegroup" for now
- if not (proto.startswith('application/mercurial-') or
- proto.startswith('text/plain') or
- proto.startswith('application/hg-changegroup')):
- self.ui.debug(_("requested URL: '%s'\n") % url.hidepassword(cu))
- raise error.RepoError(_("'%s' does not appear to be an hg repository")
- % safeurl)
-
- if proto.startswith('application/mercurial-'):
- try:
- version = proto.split('-', 1)[1]
- version_info = tuple([int(n) for n in version.split('.')])
- except ValueError:
- raise error.RepoError(_("'%s' sent a broken Content-Type "
- "header (%s)") % (safeurl, proto))
- if version_info > (0, 1):
- raise error.RepoError(_("'%s' uses newer protocol %s") %
- (safeurl, version))
-
- return resp
-
- def do_read(self, cmd, **args):
- fp = self.do_cmd(cmd, **args)
- try:
- return fp.read()
- finally:
- # if using keepalive, allow connection to be reused
- fp.close()
-
- def lookup(self, key):
- self.requirecap('lookup', _('look up remote revision'))
- d = self.do_cmd("lookup", key = key).read()
- success, data = d[:-1].split(' ', 1)
- if int(success):
- return bin(data)
- raise error.RepoError(data)
-
- def heads(self):
- d = self.do_read("heads")
- try:
- return map(bin, d[:-1].split(" "))
- except:
- raise error.ResponseError(_("unexpected response:"), d)
-
- def branchmap(self):
- d = self.do_read("branchmap")
- try:
- branchmap = {}
- for branchpart in d.splitlines():
- branchheads = branchpart.split(' ')
- branchname = urllib.unquote(branchheads[0])
- branchheads = [bin(x) for x in branchheads[1:]]
- branchmap[branchname] = branchheads
- return branchmap
- except:
- raise error.ResponseError(_("unexpected response:"), d)
-
- def branches(self, nodes):
- n = " ".join(map(hex, nodes))
- d = self.do_read("branches", nodes=n)
- try:
- br = [ tuple(map(bin, b.split(" "))) for b in d.splitlines() ]
- return br
- except:
- raise error.ResponseError(_("unexpected response:"), d)
-
- def between(self, pairs):
- batch = 8 # avoid giant requests
- r = []
- for i in xrange(0, len(pairs), batch):
- n = " ".join(["-".join(map(hex, p)) for p in pairs[i:i + batch]])
- d = self.do_read("between", pairs=n)
- try:
- r += [ l and map(bin, l.split(" ")) or [] for l in d.splitlines() ]
- except:
- raise error.ResponseError(_("unexpected response:"), d)
- return r
-
- def changegroup(self, nodes, kind):
- n = " ".join(map(hex, nodes))
- f = self.do_cmd("changegroup", roots=n)
- return util.chunkbuffer(zgenerator(f))
-
- def changegroupsubset(self, bases, heads, source):
- self.requirecap('changegroupsubset', _('look up remote changes'))
- baselst = " ".join([hex(n) for n in bases])
- headlst = " ".join([hex(n) for n in heads])
- f = self.do_cmd("changegroupsubset", bases=baselst, heads=headlst)
- return util.chunkbuffer(zgenerator(f))
-
- def unbundle(self, cg, heads, source):
- # have to stream bundle to a temp file because we do not have
- # http 1.1 chunked transfer.
-
- type = ""
- types = self.capable('unbundle')
- # servers older than d1b16a746db6 will send 'unbundle' as a
- # boolean capability
- try:
- types = types.split(',')
- except AttributeError:
- types = [""]
- if types:
- for x in types:
- if x in changegroup.bundletypes:
- type = x
- break
-
- tempname = changegroup.writebundle(cg, None, type)
- fp = url.httpsendfile(tempname, "rb")
- try:
- try:
- resp = self.do_read(
- 'unbundle', data=fp,
- headers={'Content-Type': 'application/octet-stream'},
- heads=' '.join(map(hex, heads)))
- resp_code, output = resp.split('\n', 1)
- try:
- ret = int(resp_code)
- except ValueError, err:
- raise error.ResponseError(
- _('push failed (unexpected response):'), resp)
- self.ui.write(output)
- return ret
- except socket.error, err:
- if err[0] in (errno.ECONNRESET, errno.EPIPE):
- raise util.Abort(_('push failed: %s') % err[1])
- raise util.Abort(err[1])
- finally:
- fp.close()
- os.unlink(tempname)
-
- def stream_out(self):
- return self.do_cmd('stream_out')
-
-class httpsrepository(httprepository):
- def __init__(self, ui, path):
- if not url.has_https:
- raise util.Abort(_('Python support for SSL and HTTPS '
- 'is not installed'))
- httprepository.__init__(self, ui, path)
-
-def instance(ui, path, create):
- if create:
- raise util.Abort(_('cannot create new http repository'))
- try:
- if path.startswith('https:'):
- inst = httpsrepository(ui, path)
- else:
- inst = httprepository(ui, path)
- inst.between([(nullid, nullid)])
- return inst
- except error.RepoError:
- ui.note('(falling back to static-http)\n')
- return statichttprepo.instance(ui, "static-" + path, create)
diff --git a/sys/lib/python/mercurial/i18n.py b/sys/lib/python/mercurial/i18n.py
deleted file mode 100644
index c8ef2e9a5..000000000
--- a/sys/lib/python/mercurial/i18n.py
+++ /dev/null
@@ -1,52 +0,0 @@
-# i18n.py - internationalization support for mercurial
-#
-# Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-import encoding
-import gettext, sys, os
-
-# modelled after templater.templatepath:
-if hasattr(sys, 'frozen'):
- module = sys.executable
-else:
- module = __file__
-
-base = os.path.dirname(module)
-for dir in ('.', '..'):
- localedir = os.path.normpath(os.path.join(base, dir, 'locale'))
- if os.path.isdir(localedir):
- break
-
-t = gettext.translation('hg', localedir, fallback=True)
-
-def gettext(message):
- """Translate message.
-
- The message is looked up in the catalog to get a Unicode string,
- which is encoded in the local encoding before being returned.
-
- Important: message is restricted to characters in the encoding
- given by sys.getdefaultencoding() which is most likely 'ascii'.
- """
- # If message is None, t.ugettext will return u'None' as the
- # translation whereas our callers expect us to return None.
- if message is None:
- return message
-
- u = t.ugettext(message)
- try:
- # encoding.tolocal cannot be used since it will first try to
- # decode the Unicode string. Calling u.decode(enc) really
- # means u.encode(sys.getdefaultencoding()).decode(enc). Since
- # the Python encoding defaults to 'ascii', this fails if the
- # translated string use non-ASCII characters.
- return u.encode(encoding.encoding, "replace")
- except LookupError:
- # An unknown encoding results in a LookupError.
- return message
-
-_ = gettext
-
diff --git a/sys/lib/python/mercurial/ignore.py b/sys/lib/python/mercurial/ignore.py
deleted file mode 100644
index 72532ea5d..000000000
--- a/sys/lib/python/mercurial/ignore.py
+++ /dev/null
@@ -1,103 +0,0 @@
-# ignore.py - ignored file handling for mercurial
-#
-# Copyright 2007 Matt Mackall <mpm@selenic.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-from i18n import _
-import util, match
-import re
-
-_commentre = None
-
-def ignorepats(lines):
- '''parse lines (iterable) of .hgignore text, returning a tuple of
- (patterns, parse errors). These patterns should be given to compile()
- to be validated and converted into a match function.'''
- syntaxes = {'re': 'relre:', 'regexp': 'relre:', 'glob': 'relglob:'}
- syntax = 'relre:'
- patterns = []
- warnings = []
-
- for line in lines:
- if "#" in line:
- global _commentre
- if not _commentre:
- _commentre = re.compile(r'((^|[^\\])(\\\\)*)#.*')
- # remove comments prefixed by an even number of escapes
- line = _commentre.sub(r'\1', line)
- # fixup properly escaped comments that survived the above
- line = line.replace("\\#", "#")
- line = line.rstrip()
- if not line:
- continue
-
- if line.startswith('syntax:'):
- s = line[7:].strip()
- try:
- syntax = syntaxes[s]
- except KeyError:
- warnings.append(_("ignoring invalid syntax '%s'") % s)
- continue
- pat = syntax + line
- for s, rels in syntaxes.iteritems():
- if line.startswith(rels):
- pat = line
- break
- elif line.startswith(s+':'):
- pat = rels + line[len(s)+1:]
- break
- patterns.append(pat)
-
- return patterns, warnings
-
-def ignore(root, files, warn):
- '''return matcher covering patterns in 'files'.
-
- the files parsed for patterns include:
- .hgignore in the repository root
- any additional files specified in the [ui] section of ~/.hgrc
-
- trailing white space is dropped.
- the escape character is backslash.
- comments start with #.
- empty lines are skipped.
-
- lines can be of the following formats:
-
- syntax: regexp # defaults following lines to non-rooted regexps
- syntax: glob # defaults following lines to non-rooted globs
- re:pattern # non-rooted regular expression
- glob:pattern # non-rooted glob
- pattern # pattern of the current default type'''
-
- pats = {}
- for f in files:
- try:
- pats[f] = []
- fp = open(f)
- pats[f], warnings = ignorepats(fp)
- for warning in warnings:
- warn("%s: %s\n" % (f, warning))
- except IOError, inst:
- if f != files[0]:
- warn(_("skipping unreadable ignore file '%s': %s\n") %
- (f, inst.strerror))
-
- allpats = []
- [allpats.extend(patlist) for patlist in pats.values()]
- if not allpats:
- return util.never
-
- try:
- ignorefunc = match.match(root, '', [], allpats)
- except util.Abort:
- # Re-raise an exception where the src is the right file
- for f, patlist in pats.iteritems():
- try:
- match.match(root, '', [], patlist)
- except util.Abort, inst:
- raise util.Abort('%s: %s' % (f, inst[0]))
-
- return ignorefunc
diff --git a/sys/lib/python/mercurial/keepalive.py b/sys/lib/python/mercurial/keepalive.py
deleted file mode 100644
index aa19ffbed..000000000
--- a/sys/lib/python/mercurial/keepalive.py
+++ /dev/null
@@ -1,671 +0,0 @@
-# This library is free software; you can redistribute it and/or
-# modify it under the terms of the GNU Lesser General Public
-# License as published by the Free Software Foundation; either
-# version 2.1 of the License, or (at your option) any later version.
-#
-# This library is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-# Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public
-# License along with this library; if not, write to the
-# Free Software Foundation, Inc.,
-# 59 Temple Place, Suite 330,
-# Boston, MA 02111-1307 USA
-
-# This file is part of urlgrabber, a high-level cross-protocol url-grabber
-# Copyright 2002-2004 Michael D. Stenner, Ryan Tomayko
-
-# Modified by Benoit Boissinot:
-# - fix for digest auth (inspired from urllib2.py @ Python v2.4)
-# Modified by Dirkjan Ochtman:
-# - import md5 function from a local util module
-# Modified by Martin Geisler:
-# - moved md5 function from local util module to this module
-
-"""An HTTP handler for urllib2 that supports HTTP 1.1 and keepalive.
-
->>> import urllib2
->>> from keepalive import HTTPHandler
->>> keepalive_handler = HTTPHandler()
->>> opener = urllib2.build_opener(keepalive_handler)
->>> urllib2.install_opener(opener)
->>>
->>> fo = urllib2.urlopen('http://www.python.org')
-
-If a connection to a given host is requested, and all of the existing
-connections are still in use, another connection will be opened. If
-the handler tries to use an existing connection but it fails in some
-way, it will be closed and removed from the pool.
-
-To remove the handler, simply re-run build_opener with no arguments, and
-install that opener.
-
-You can explicitly close connections by using the close_connection()
-method of the returned file-like object (described below) or you can
-use the handler methods:
-
- close_connection(host)
- close_all()
- open_connections()
-
-NOTE: using the close_connection and close_all methods of the handler
-should be done with care when using multiple threads.
- * there is nothing that prevents another thread from creating new
- connections immediately after connections are closed
- * no checks are done to prevent in-use connections from being closed
-
->>> keepalive_handler.close_all()
-
-EXTRA ATTRIBUTES AND METHODS
-
- Upon a status of 200, the object returned has a few additional
- attributes and methods, which should not be used if you want to
- remain consistent with the normal urllib2-returned objects:
-
- close_connection() - close the connection to the host
- readlines() - you know, readlines()
- status - the return status (ie 404)
- reason - english translation of status (ie 'File not found')
-
- If you want the best of both worlds, use this inside an
- AttributeError-catching try:
-
- >>> try: status = fo.status
- >>> except AttributeError: status = None
-
- Unfortunately, these are ONLY there if status == 200, so it's not
- easy to distinguish between non-200 responses. The reason is that
- urllib2 tries to do clever things with error codes 301, 302, 401,
- and 407, and it wraps the object upon return.
-
- For python versions earlier than 2.4, you can avoid this fancy error
- handling by setting the module-level global HANDLE_ERRORS to zero.
- You see, prior to 2.4, it's the HTTP Handler's job to determine what
- to handle specially, and what to just pass up. HANDLE_ERRORS == 0
- means "pass everything up". In python 2.4, however, this job no
- longer belongs to the HTTP Handler and is now done by a NEW handler,
- HTTPErrorProcessor. Here's the bottom line:
-
- python version < 2.4
- HANDLE_ERRORS == 1 (default) pass up 200, treat the rest as
- errors
- HANDLE_ERRORS == 0 pass everything up, error processing is
- left to the calling code
- python version >= 2.4
- HANDLE_ERRORS == 1 pass up 200, treat the rest as errors
- HANDLE_ERRORS == 0 (default) pass everything up, let the
- other handlers (specifically,
- HTTPErrorProcessor) decide what to do
-
- In practice, setting the variable either way makes little difference
- in python 2.4, so for the most consistent behavior across versions,
- you probably just want to use the defaults, which will give you
- exceptions on errors.
-
-"""
-
-# $Id: keepalive.py,v 1.14 2006/04/04 21:00:32 mstenner Exp $
-
-import urllib2
-import httplib
-import socket
-import thread
-
-DEBUG = None
-
-import sys
-if sys.version_info < (2, 4): HANDLE_ERRORS = 1
-else: HANDLE_ERRORS = 0
-
-class ConnectionManager:
- """
- The connection manager must be able to:
- * keep track of all existing
- """
- def __init__(self):
- self._lock = thread.allocate_lock()
- self._hostmap = {} # map hosts to a list of connections
- self._connmap = {} # map connections to host
- self._readymap = {} # map connection to ready state
-
- def add(self, host, connection, ready):
- self._lock.acquire()
- try:
- if not host in self._hostmap: self._hostmap[host] = []
- self._hostmap[host].append(connection)
- self._connmap[connection] = host
- self._readymap[connection] = ready
- finally:
- self._lock.release()
-
- def remove(self, connection):
- self._lock.acquire()
- try:
- try:
- host = self._connmap[connection]
- except KeyError:
- pass
- else:
- del self._connmap[connection]
- del self._readymap[connection]
- self._hostmap[host].remove(connection)
- if not self._hostmap[host]: del self._hostmap[host]
- finally:
- self._lock.release()
-
- def set_ready(self, connection, ready):
- try: self._readymap[connection] = ready
- except KeyError: pass
-
- def get_ready_conn(self, host):
- conn = None
- self._lock.acquire()
- try:
- if host in self._hostmap:
- for c in self._hostmap[host]:
- if self._readymap[c]:
- self._readymap[c] = 0
- conn = c
- break
- finally:
- self._lock.release()
- return conn
-
- def get_all(self, host=None):
- if host:
- return list(self._hostmap.get(host, []))
- else:
- return dict(self._hostmap)
-
-class KeepAliveHandler:
- def __init__(self):
- self._cm = ConnectionManager()
-
- #### Connection Management
- def open_connections(self):
- """return a list of connected hosts and the number of connections
- to each. [('foo.com:80', 2), ('bar.org', 1)]"""
- return [(host, len(li)) for (host, li) in self._cm.get_all().items()]
-
- def close_connection(self, host):
- """close connection(s) to <host>
- host is the host:port spec, as in 'www.cnn.com:8080' as passed in.
- no error occurs if there is no connection to that host."""
- for h in self._cm.get_all(host):
- self._cm.remove(h)
- h.close()
-
- def close_all(self):
- """close all open connections"""
- for host, conns in self._cm.get_all().iteritems():
- for h in conns:
- self._cm.remove(h)
- h.close()
-
- def _request_closed(self, request, host, connection):
- """tells us that this request is now closed and the the
- connection is ready for another request"""
- self._cm.set_ready(connection, 1)
-
- def _remove_connection(self, host, connection, close=0):
- if close: connection.close()
- self._cm.remove(connection)
-
- #### Transaction Execution
- def http_open(self, req):
- return self.do_open(HTTPConnection, req)
-
- def do_open(self, http_class, req):
- host = req.get_host()
- if not host:
- raise urllib2.URLError('no host given')
-
- try:
- h = self._cm.get_ready_conn(host)
- while h:
- r = self._reuse_connection(h, req, host)
-
- # if this response is non-None, then it worked and we're
- # done. Break out, skipping the else block.
- if r: break
-
- # connection is bad - possibly closed by server
- # discard it and ask for the next free connection
- h.close()
- self._cm.remove(h)
- h = self._cm.get_ready_conn(host)
- else:
- # no (working) free connections were found. Create a new one.
- h = http_class(host)
- if DEBUG: DEBUG.info("creating new connection to %s (%d)",
- host, id(h))
- self._cm.add(host, h, 0)
- self._start_transaction(h, req)
- r = h.getresponse()
- except (socket.error, httplib.HTTPException), err:
- raise urllib2.URLError(err)
-
- # if not a persistent connection, don't try to reuse it
- if r.will_close: self._cm.remove(h)
-
- if DEBUG: DEBUG.info("STATUS: %s, %s", r.status, r.reason)
- r._handler = self
- r._host = host
- r._url = req.get_full_url()
- r._connection = h
- r.code = r.status
- r.headers = r.msg
- r.msg = r.reason
-
- if r.status == 200 or not HANDLE_ERRORS:
- return r
- else:
- return self.parent.error('http', req, r,
- r.status, r.msg, r.headers)
-
- def _reuse_connection(self, h, req, host):
- """start the transaction with a re-used connection
- return a response object (r) upon success or None on failure.
- This DOES not close or remove bad connections in cases where
- it returns. However, if an unexpected exception occurs, it
- will close and remove the connection before re-raising.
- """
- try:
- self._start_transaction(h, req)
- r = h.getresponse()
- # note: just because we got something back doesn't mean it
- # worked. We'll check the version below, too.
- except (socket.error, httplib.HTTPException):
- r = None
- except:
- # adding this block just in case we've missed
- # something we will still raise the exception, but
- # lets try and close the connection and remove it
- # first. We previously got into a nasty loop
- # where an exception was uncaught, and so the
- # connection stayed open. On the next try, the
- # same exception was raised, etc. The tradeoff is
- # that it's now possible this call will raise
- # a DIFFERENT exception
- if DEBUG: DEBUG.error("unexpected exception - closing " + \
- "connection to %s (%d)", host, id(h))
- self._cm.remove(h)
- h.close()
- raise
-
- if r is None or r.version == 9:
- # httplib falls back to assuming HTTP 0.9 if it gets a
- # bad header back. This is most likely to happen if
- # the socket has been closed by the server since we
- # last used the connection.
- if DEBUG: DEBUG.info("failed to re-use connection to %s (%d)",
- host, id(h))
- r = None
- else:
- if DEBUG: DEBUG.info("re-using connection to %s (%d)", host, id(h))
-
- return r
-
- def _start_transaction(self, h, req):
- # What follows mostly reimplements HTTPConnection.request()
- # except it adds self.parent.addheaders in the mix.
- headers = req.headers.copy()
- if sys.version_info >= (2, 4):
- headers.update(req.unredirected_hdrs)
- headers.update(self.parent.addheaders)
- headers = dict((n.lower(), v) for n,v in headers.items())
- skipheaders = {}
- for n in ('host', 'accept-encoding'):
- if n in headers:
- skipheaders['skip_' + n.replace('-', '_')] = 1
- try:
- if req.has_data():
- data = req.get_data()
- h.putrequest('POST', req.get_selector(), **skipheaders)
- if 'content-type' not in headers:
- h.putheader('Content-type',
- 'application/x-www-form-urlencoded')
- if 'content-length' not in headers:
- h.putheader('Content-length', '%d' % len(data))
- else:
- h.putrequest('GET', req.get_selector(), **skipheaders)
- except (socket.error), err:
- raise urllib2.URLError(err)
- for k, v in headers.items():
- h.putheader(k, v)
- h.endheaders()
- if req.has_data():
- h.send(data)
-
-class HTTPHandler(KeepAliveHandler, urllib2.HTTPHandler):
- pass
-
-class HTTPResponse(httplib.HTTPResponse):
- # we need to subclass HTTPResponse in order to
- # 1) add readline() and readlines() methods
- # 2) add close_connection() methods
- # 3) add info() and geturl() methods
-
- # in order to add readline(), read must be modified to deal with a
- # buffer. example: readline must read a buffer and then spit back
- # one line at a time. The only real alternative is to read one
- # BYTE at a time (ick). Once something has been read, it can't be
- # put back (ok, maybe it can, but that's even uglier than this),
- # so if you THEN do a normal read, you must first take stuff from
- # the buffer.
-
- # the read method wraps the original to accomodate buffering,
- # although read() never adds to the buffer.
- # Both readline and readlines have been stolen with almost no
- # modification from socket.py
-
-
- def __init__(self, sock, debuglevel=0, strict=0, method=None):
- if method: # the httplib in python 2.3 uses the method arg
- httplib.HTTPResponse.__init__(self, sock, debuglevel, method)
- else: # 2.2 doesn't
- httplib.HTTPResponse.__init__(self, sock, debuglevel)
- self.fileno = sock.fileno
- self.code = None
- self._rbuf = ''
- self._rbufsize = 8096
- self._handler = None # inserted by the handler later
- self._host = None # (same)
- self._url = None # (same)
- self._connection = None # (same)
-
- _raw_read = httplib.HTTPResponse.read
-
- def close(self):
- if self.fp:
- self.fp.close()
- self.fp = None
- if self._handler:
- self._handler._request_closed(self, self._host,
- self._connection)
-
- def close_connection(self):
- self._handler._remove_connection(self._host, self._connection, close=1)
- self.close()
-
- def info(self):
- return self.headers
-
- def geturl(self):
- return self._url
-
- def read(self, amt=None):
- # the _rbuf test is only in this first if for speed. It's not
- # logically necessary
- if self._rbuf and not amt is None:
- L = len(self._rbuf)
- if amt > L:
- amt -= L
- else:
- s = self._rbuf[:amt]
- self._rbuf = self._rbuf[amt:]
- return s
-
- s = self._rbuf + self._raw_read(amt)
- self._rbuf = ''
- return s
-
- # stolen from Python SVN #68532 to fix issue1088
- def _read_chunked(self, amt):
- chunk_left = self.chunk_left
- value = ''
-
- # XXX This accumulates chunks by repeated string concatenation,
- # which is not efficient as the number or size of chunks gets big.
- while True:
- if chunk_left is None:
- line = self.fp.readline()
- i = line.find(';')
- if i >= 0:
- line = line[:i] # strip chunk-extensions
- try:
- chunk_left = int(line, 16)
- except ValueError:
- # close the connection as protocol synchronisation is
- # probably lost
- self.close()
- raise httplib.IncompleteRead(value)
- if chunk_left == 0:
- break
- if amt is None:
- value += self._safe_read(chunk_left)
- elif amt < chunk_left:
- value += self._safe_read(amt)
- self.chunk_left = chunk_left - amt
- return value
- elif amt == chunk_left:
- value += self._safe_read(amt)
- self._safe_read(2) # toss the CRLF at the end of the chunk
- self.chunk_left = None
- return value
- else:
- value += self._safe_read(chunk_left)
- amt -= chunk_left
-
- # we read the whole chunk, get another
- self._safe_read(2) # toss the CRLF at the end of the chunk
- chunk_left = None
-
- # read and discard trailer up to the CRLF terminator
- ### note: we shouldn't have any trailers!
- while True:
- line = self.fp.readline()
- if not line:
- # a vanishingly small number of sites EOF without
- # sending the trailer
- break
- if line == '\r\n':
- break
-
- # we read everything; close the "file"
- self.close()
-
- return value
-
- def readline(self, limit=-1):
- i = self._rbuf.find('\n')
- while i < 0 and not (0 < limit <= len(self._rbuf)):
- new = self._raw_read(self._rbufsize)
- if not new: break
- i = new.find('\n')
- if i >= 0: i = i + len(self._rbuf)
- self._rbuf = self._rbuf + new
- if i < 0: i = len(self._rbuf)
- else: i = i+1
- if 0 <= limit < len(self._rbuf): i = limit
- data, self._rbuf = self._rbuf[:i], self._rbuf[i:]
- return data
-
- def readlines(self, sizehint = 0):
- total = 0
- list = []
- while 1:
- line = self.readline()
- if not line: break
- list.append(line)
- total += len(line)
- if sizehint and total >= sizehint:
- break
- return list
-
-
-class HTTPConnection(httplib.HTTPConnection):
- # use the modified response class
- response_class = HTTPResponse
-
-#########################################################################
-##### TEST FUNCTIONS
-#########################################################################
-
-def error_handler(url):
- global HANDLE_ERRORS
- orig = HANDLE_ERRORS
- keepalive_handler = HTTPHandler()
- opener = urllib2.build_opener(keepalive_handler)
- urllib2.install_opener(opener)
- pos = {0: 'off', 1: 'on'}
- for i in (0, 1):
- print " fancy error handling %s (HANDLE_ERRORS = %i)" % (pos[i], i)
- HANDLE_ERRORS = i
- try:
- fo = urllib2.urlopen(url)
- fo.read()
- fo.close()
- try: status, reason = fo.status, fo.reason
- except AttributeError: status, reason = None, None
- except IOError, e:
- print " EXCEPTION: %s" % e
- raise
- else:
- print " status = %s, reason = %s" % (status, reason)
- HANDLE_ERRORS = orig
- hosts = keepalive_handler.open_connections()
- print "open connections:", hosts
- keepalive_handler.close_all()
-
-def md5(s):
- try:
- from hashlib import md5 as _md5
- except ImportError:
- from md5 import md5 as _md5
- global md5
- md5 = _md5
- return _md5(s)
-
-def continuity(url):
- format = '%25s: %s'
-
- # first fetch the file with the normal http handler
- opener = urllib2.build_opener()
- urllib2.install_opener(opener)
- fo = urllib2.urlopen(url)
- foo = fo.read()
- fo.close()
- m = md5.new(foo)
- print format % ('normal urllib', m.hexdigest())
-
- # now install the keepalive handler and try again
- opener = urllib2.build_opener(HTTPHandler())
- urllib2.install_opener(opener)
-
- fo = urllib2.urlopen(url)
- foo = fo.read()
- fo.close()
- m = md5.new(foo)
- print format % ('keepalive read', m.hexdigest())
-
- fo = urllib2.urlopen(url)
- foo = ''
- while 1:
- f = fo.readline()
- if f: foo = foo + f
- else: break
- fo.close()
- m = md5.new(foo)
- print format % ('keepalive readline', m.hexdigest())
-
-def comp(N, url):
- print ' making %i connections to:\n %s' % (N, url)
-
- sys.stdout.write(' first using the normal urllib handlers')
- # first use normal opener
- opener = urllib2.build_opener()
- urllib2.install_opener(opener)
- t1 = fetch(N, url)
- print ' TIME: %.3f s' % t1
-
- sys.stdout.write(' now using the keepalive handler ')
- # now install the keepalive handler and try again
- opener = urllib2.build_opener(HTTPHandler())
- urllib2.install_opener(opener)
- t2 = fetch(N, url)
- print ' TIME: %.3f s' % t2
- print ' improvement factor: %.2f' % (t1/t2, )
-
-def fetch(N, url, delay=0):
- import time
- lens = []
- starttime = time.time()
- for i in range(N):
- if delay and i > 0: time.sleep(delay)
- fo = urllib2.urlopen(url)
- foo = fo.read()
- fo.close()
- lens.append(len(foo))
- diff = time.time() - starttime
-
- j = 0
- for i in lens[1:]:
- j = j + 1
- if not i == lens[0]:
- print "WARNING: inconsistent length on read %i: %i" % (j, i)
-
- return diff
-
-def test_timeout(url):
- global DEBUG
- dbbackup = DEBUG
- class FakeLogger:
- def debug(self, msg, *args): print msg % args
- info = warning = error = debug
- DEBUG = FakeLogger()
- print " fetching the file to establish a connection"
- fo = urllib2.urlopen(url)
- data1 = fo.read()
- fo.close()
-
- i = 20
- print " waiting %i seconds for the server to close the connection" % i
- while i > 0:
- sys.stdout.write('\r %2i' % i)
- sys.stdout.flush()
- time.sleep(1)
- i -= 1
- sys.stderr.write('\r')
-
- print " fetching the file a second time"
- fo = urllib2.urlopen(url)
- data2 = fo.read()
- fo.close()
-
- if data1 == data2:
- print ' data are identical'
- else:
- print ' ERROR: DATA DIFFER'
-
- DEBUG = dbbackup
-
-
-def test(url, N=10):
- print "checking error hander (do this on a non-200)"
- try: error_handler(url)
- except IOError:
- print "exiting - exception will prevent further tests"
- sys.exit()
- print
- print "performing continuity test (making sure stuff isn't corrupted)"
- continuity(url)
- print
- print "performing speed comparison"
- comp(N, url)
- print
- print "performing dropped-connection check"
- test_timeout(url)
-
-if __name__ == '__main__':
- import time
- import sys
- try:
- N = int(sys.argv[1])
- url = sys.argv[2]
- except:
- print "%s <integer> <url>" % sys.argv[0]
- else:
- test(url, N)
diff --git a/sys/lib/python/mercurial/localrepo.py b/sys/lib/python/mercurial/localrepo.py
deleted file mode 100644
index f6000b502..000000000
--- a/sys/lib/python/mercurial/localrepo.py
+++ /dev/null
@@ -1,2156 +0,0 @@
-# localrepo.py - read/write repository class for mercurial
-#
-# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-from node import bin, hex, nullid, nullrev, short
-from i18n import _
-import repo, changegroup, subrepo
-import changelog, dirstate, filelog, manifest, context
-import lock, transaction, store, encoding
-import util, extensions, hook, error
-import match as match_
-import merge as merge_
-import tags as tags_
-from lock import release
-import weakref, stat, errno, os, time, inspect
-propertycache = util.propertycache
-
-class localrepository(repo.repository):
- capabilities = set(('lookup', 'changegroupsubset', 'branchmap'))
- supported = set('revlogv1 store fncache shared'.split())
-
- def __init__(self, baseui, path=None, create=0):
- repo.repository.__init__(self)
- self.root = os.path.realpath(path)
- self.path = os.path.join(self.root, ".hg")
- self.origroot = path
- self.opener = util.opener(self.path)
- self.wopener = util.opener(self.root)
- self.baseui = baseui
- self.ui = baseui.copy()
-
- try:
- self.ui.readconfig(self.join("hgrc"), self.root)
- extensions.loadall(self.ui)
- except IOError:
- pass
-
- if not os.path.isdir(self.path):
- if create:
- if not os.path.exists(path):
- os.makedirs(path)
- os.mkdir(self.path)
- requirements = ["revlogv1"]
- if self.ui.configbool('format', 'usestore', True):
- os.mkdir(os.path.join(self.path, "store"))
- requirements.append("store")
- if self.ui.configbool('format', 'usefncache', True):
- requirements.append("fncache")
- # create an invalid changelog
- self.opener("00changelog.i", "a").write(
- '\0\0\0\2' # represents revlogv2
- ' dummy changelog to prevent using the old repo layout'
- )
- reqfile = self.opener("requires", "w")
- for r in requirements:
- reqfile.write("%s\n" % r)
- reqfile.close()
- else:
- raise error.RepoError(_("repository %s not found") % path)
- elif create:
- raise error.RepoError(_("repository %s already exists") % path)
- else:
- # find requirements
- requirements = set()
- try:
- requirements = set(self.opener("requires").read().splitlines())
- except IOError, inst:
- if inst.errno != errno.ENOENT:
- raise
- for r in requirements - self.supported:
- raise error.RepoError(_("requirement '%s' not supported") % r)
-
- self.sharedpath = self.path
- try:
- s = os.path.realpath(self.opener("sharedpath").read())
- if not os.path.exists(s):
- raise error.RepoError(
- _('.hg/sharedpath points to nonexistent directory %s') % s)
- self.sharedpath = s
- except IOError, inst:
- if inst.errno != errno.ENOENT:
- raise
-
- self.store = store.store(requirements, self.sharedpath, util.opener)
- self.spath = self.store.path
- self.sopener = self.store.opener
- self.sjoin = self.store.join
- self.opener.createmode = self.store.createmode
-
- # These two define the set of tags for this repository. _tags
- # maps tag name to node; _tagtypes maps tag name to 'global' or
- # 'local'. (Global tags are defined by .hgtags across all
- # heads, and local tags are defined in .hg/localtags.) They
- # constitute the in-memory cache of tags.
- self._tags = None
- self._tagtypes = None
-
- self.branchcache = None
- self._ubranchcache = None # UTF-8 version of branchcache
- self._branchcachetip = None
- self.nodetagscache = None
- self.filterpats = {}
- self._datafilters = {}
- self._transref = self._lockref = self._wlockref = None
-
- @propertycache
- def changelog(self):
- c = changelog.changelog(self.sopener)
- if 'HG_PENDING' in os.environ:
- p = os.environ['HG_PENDING']
- if p.startswith(self.root):
- c.readpending('00changelog.i.a')
- self.sopener.defversion = c.version
- return c
-
- @propertycache
- def manifest(self):
- return manifest.manifest(self.sopener)
-
- @propertycache
- def dirstate(self):
- return dirstate.dirstate(self.opener, self.ui, self.root)
-
- def __getitem__(self, changeid):
- if changeid is None:
- return context.workingctx(self)
- return context.changectx(self, changeid)
-
- def __nonzero__(self):
- return True
-
- def __len__(self):
- return len(self.changelog)
-
- def __iter__(self):
- for i in xrange(len(self)):
- yield i
-
- def url(self):
- return 'file:' + self.root
-
- def hook(self, name, throw=False, **args):
- return hook.hook(self.ui, self, name, throw, **args)
-
- tag_disallowed = ':\r\n'
-
- def _tag(self, names, node, message, local, user, date, extra={}):
- if isinstance(names, str):
- allchars = names
- names = (names,)
- else:
- allchars = ''.join(names)
- for c in self.tag_disallowed:
- if c in allchars:
- raise util.Abort(_('%r cannot be used in a tag name') % c)
-
- for name in names:
- self.hook('pretag', throw=True, node=hex(node), tag=name,
- local=local)
-
- def writetags(fp, names, munge, prevtags):
- fp.seek(0, 2)
- if prevtags and prevtags[-1] != '\n':
- fp.write('\n')
- for name in names:
- m = munge and munge(name) or name
- if self._tagtypes and name in self._tagtypes:
- old = self._tags.get(name, nullid)
- fp.write('%s %s\n' % (hex(old), m))
- fp.write('%s %s\n' % (hex(node), m))
- fp.close()
-
- prevtags = ''
- if local:
- try:
- fp = self.opener('localtags', 'r+')
- except IOError:
- fp = self.opener('localtags', 'a')
- else:
- prevtags = fp.read()
-
- # local tags are stored in the current charset
- writetags(fp, names, None, prevtags)
- for name in names:
- self.hook('tag', node=hex(node), tag=name, local=local)
- return
-
- try:
- fp = self.wfile('.hgtags', 'rb+')
- except IOError:
- fp = self.wfile('.hgtags', 'ab')
- else:
- prevtags = fp.read()
-
- # committed tags are stored in UTF-8
- writetags(fp, names, encoding.fromlocal, prevtags)
-
- if '.hgtags' not in self.dirstate:
- self.add(['.hgtags'])
-
- m = match_.exact(self.root, '', ['.hgtags'])
- tagnode = self.commit(message, user, date, extra=extra, match=m)
-
- for name in names:
- self.hook('tag', node=hex(node), tag=name, local=local)
-
- return tagnode
-
- def tag(self, names, node, message, local, user, date):
- '''tag a revision with one or more symbolic names.
-
- names is a list of strings or, when adding a single tag, names may be a
- string.
-
- if local is True, the tags are stored in a per-repository file.
- otherwise, they are stored in the .hgtags file, and a new
- changeset is committed with the change.
-
- keyword arguments:
-
- local: whether to store tags in non-version-controlled file
- (default False)
-
- message: commit message to use if committing
-
- user: name of user to use if committing
-
- date: date tuple to use if committing'''
-
- for x in self.status()[:5]:
- if '.hgtags' in x:
- raise util.Abort(_('working copy of .hgtags is changed '
- '(please commit .hgtags manually)'))
-
- self.tags() # instantiate the cache
- self._tag(names, node, message, local, user, date)
-
- def tags(self):
- '''return a mapping of tag to node'''
- if self._tags is None:
- (self._tags, self._tagtypes) = self._findtags()
-
- return self._tags
-
- def _findtags(self):
- '''Do the hard work of finding tags. Return a pair of dicts
- (tags, tagtypes) where tags maps tag name to node, and tagtypes
- maps tag name to a string like \'global\' or \'local\'.
- Subclasses or extensions are free to add their own tags, but
- should be aware that the returned dicts will be retained for the
- duration of the localrepo object.'''
-
- # XXX what tagtype should subclasses/extensions use? Currently
- # mq and bookmarks add tags, but do not set the tagtype at all.
- # Should each extension invent its own tag type? Should there
- # be one tagtype for all such "virtual" tags? Or is the status
- # quo fine?
-
- alltags = {} # map tag name to (node, hist)
- tagtypes = {}
-
- tags_.findglobaltags(self.ui, self, alltags, tagtypes)
- tags_.readlocaltags(self.ui, self, alltags, tagtypes)
-
- # Build the return dicts. Have to re-encode tag names because
- # the tags module always uses UTF-8 (in order not to lose info
- # writing to the cache), but the rest of Mercurial wants them in
- # local encoding.
- tags = {}
- for (name, (node, hist)) in alltags.iteritems():
- if node != nullid:
- tags[encoding.tolocal(name)] = node
- tags['tip'] = self.changelog.tip()
- tagtypes = dict([(encoding.tolocal(name), value)
- for (name, value) in tagtypes.iteritems()])
- return (tags, tagtypes)
-
- def tagtype(self, tagname):
- '''
- return the type of the given tag. result can be:
-
- 'local' : a local tag
- 'global' : a global tag
- None : tag does not exist
- '''
-
- self.tags()
-
- return self._tagtypes.get(tagname)
-
- def tagslist(self):
- '''return a list of tags ordered by revision'''
- l = []
- for t, n in self.tags().iteritems():
- try:
- r = self.changelog.rev(n)
- except:
- r = -2 # sort to the beginning of the list if unknown
- l.append((r, t, n))
- return [(t, n) for r, t, n in sorted(l)]
-
- def nodetags(self, node):
- '''return the tags associated with a node'''
- if not self.nodetagscache:
- self.nodetagscache = {}
- for t, n in self.tags().iteritems():
- self.nodetagscache.setdefault(n, []).append(t)
- return self.nodetagscache.get(node, [])
-
- def _branchtags(self, partial, lrev):
- # TODO: rename this function?
- tiprev = len(self) - 1
- if lrev != tiprev:
- self._updatebranchcache(partial, lrev+1, tiprev+1)
- self._writebranchcache(partial, self.changelog.tip(), tiprev)
-
- return partial
-
- def branchmap(self):
- tip = self.changelog.tip()
- if self.branchcache is not None and self._branchcachetip == tip:
- return self.branchcache
-
- oldtip = self._branchcachetip
- self._branchcachetip = tip
- if self.branchcache is None:
- self.branchcache = {} # avoid recursion in changectx
- else:
- self.branchcache.clear() # keep using the same dict
- if oldtip is None or oldtip not in self.changelog.nodemap:
- partial, last, lrev = self._readbranchcache()
- else:
- lrev = self.changelog.rev(oldtip)
- partial = self._ubranchcache
-
- self._branchtags(partial, lrev)
- # this private cache holds all heads (not just tips)
- self._ubranchcache = partial
-
- # the branch cache is stored on disk as UTF-8, but in the local
- # charset internally
- for k, v in partial.iteritems():
- self.branchcache[encoding.tolocal(k)] = v
- return self.branchcache
-
-
- def branchtags(self):
- '''return a dict where branch names map to the tipmost head of
- the branch, open heads come before closed'''
- bt = {}
- for bn, heads in self.branchmap().iteritems():
- head = None
- for i in range(len(heads)-1, -1, -1):
- h = heads[i]
- if 'close' not in self.changelog.read(h)[5]:
- head = h
- break
- # no open heads were found
- if head is None:
- head = heads[-1]
- bt[bn] = head
- return bt
-
-
- def _readbranchcache(self):
- partial = {}
- try:
- f = self.opener("branchheads.cache")
- lines = f.read().split('\n')
- f.close()
- except (IOError, OSError):
- return {}, nullid, nullrev
-
- try:
- last, lrev = lines.pop(0).split(" ", 1)
- last, lrev = bin(last), int(lrev)
- if lrev >= len(self) or self[lrev].node() != last:
- # invalidate the cache
- raise ValueError('invalidating branch cache (tip differs)')
- for l in lines:
- if not l: continue
- node, label = l.split(" ", 1)
- partial.setdefault(label.strip(), []).append(bin(node))
- except KeyboardInterrupt:
- raise
- except Exception, inst:
- if self.ui.debugflag:
- self.ui.warn(str(inst), '\n')
- partial, last, lrev = {}, nullid, nullrev
- return partial, last, lrev
-
- def _writebranchcache(self, branches, tip, tiprev):
- try:
- f = self.opener("branchheads.cache", "w", atomictemp=True)
- f.write("%s %s\n" % (hex(tip), tiprev))
- for label, nodes in branches.iteritems():
- for node in nodes:
- f.write("%s %s\n" % (hex(node), label))
- f.rename()
- except (IOError, OSError):
- pass
-
- def _updatebranchcache(self, partial, start, end):
- # collect new branch entries
- newbranches = {}
- for r in xrange(start, end):
- c = self[r]
- newbranches.setdefault(c.branch(), []).append(c.node())
- # if older branchheads are reachable from new ones, they aren't
- # really branchheads. Note checking parents is insufficient:
- # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
- for branch, newnodes in newbranches.iteritems():
- bheads = partial.setdefault(branch, [])
- bheads.extend(newnodes)
- if len(bheads) < 2:
- continue
- newbheads = []
- # starting from tip means fewer passes over reachable
- while newnodes:
- latest = newnodes.pop()
- if latest not in bheads:
- continue
- minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
- reachable = self.changelog.reachable(latest, minbhrev)
- bheads = [b for b in bheads if b not in reachable]
- newbheads.insert(0, latest)
- bheads.extend(newbheads)
- partial[branch] = bheads
-
- def lookup(self, key):
- if isinstance(key, int):
- return self.changelog.node(key)
- elif key == '.':
- return self.dirstate.parents()[0]
- elif key == 'null':
- return nullid
- elif key == 'tip':
- return self.changelog.tip()
- n = self.changelog._match(key)
- if n:
- return n
- if key in self.tags():
- return self.tags()[key]
- if key in self.branchtags():
- return self.branchtags()[key]
- n = self.changelog._partialmatch(key)
- if n:
- return n
-
- # can't find key, check if it might have come from damaged dirstate
- if key in self.dirstate.parents():
- raise error.Abort(_("working directory has unknown parent '%s'!")
- % short(key))
- try:
- if len(key) == 20:
- key = hex(key)
- except:
- pass
- raise error.RepoError(_("unknown revision '%s'") % key)
-
- def local(self):
- return True
-
- def join(self, f):
- return os.path.join(self.path, f)
-
- def wjoin(self, f):
- return os.path.join(self.root, f)
-
- def rjoin(self, f):
- return os.path.join(self.root, util.pconvert(f))
-
- def file(self, f):
- if f[0] == '/':
- f = f[1:]
- return filelog.filelog(self.sopener, f)
-
- def changectx(self, changeid):
- return self[changeid]
-
- def parents(self, changeid=None):
- '''get list of changectxs for parents of changeid'''
- return self[changeid].parents()
-
- def filectx(self, path, changeid=None, fileid=None):
- """changeid can be a changeset revision, node, or tag.
- fileid can be a file revision or node."""
- return context.filectx(self, path, changeid, fileid)
-
- def getcwd(self):
- return self.dirstate.getcwd()
-
- def pathto(self, f, cwd=None):
- return self.dirstate.pathto(f, cwd)
-
- def wfile(self, f, mode='r'):
- return self.wopener(f, mode)
-
- def _link(self, f):
- return os.path.islink(self.wjoin(f))
-
- def _filter(self, filter, filename, data):
- if filter not in self.filterpats:
- l = []
- for pat, cmd in self.ui.configitems(filter):
- if cmd == '!':
- continue
- mf = match_.match(self.root, '', [pat])
- fn = None
- params = cmd
- for name, filterfn in self._datafilters.iteritems():
- if cmd.startswith(name):
- fn = filterfn
- params = cmd[len(name):].lstrip()
- break
- if not fn:
- fn = lambda s, c, **kwargs: util.filter(s, c)
- # Wrap old filters not supporting keyword arguments
- if not inspect.getargspec(fn)[2]:
- oldfn = fn
- fn = lambda s, c, **kwargs: oldfn(s, c)
- l.append((mf, fn, params))
- self.filterpats[filter] = l
-
- for mf, fn, cmd in self.filterpats[filter]:
- if mf(filename):
- self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
- data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
- break
-
- return data
-
- def adddatafilter(self, name, filter):
- self._datafilters[name] = filter
-
- def wread(self, filename):
- if self._link(filename):
- data = os.readlink(self.wjoin(filename))
- else:
- data = self.wopener(filename, 'r').read()
- return self._filter("encode", filename, data)
-
- def wwrite(self, filename, data, flags):
- data = self._filter("decode", filename, data)
- try:
- os.unlink(self.wjoin(filename))
- except OSError:
- pass
- if 'l' in flags:
- self.wopener.symlink(data, filename)
- else:
- self.wopener(filename, 'w').write(data)
- if 'x' in flags:
- util.set_flags(self.wjoin(filename), False, True)
-
- def wwritedata(self, filename, data):
- return self._filter("decode", filename, data)
-
- def transaction(self):
- tr = self._transref and self._transref() or None
- if tr and tr.running():
- return tr.nest()
-
- # abort here if the journal already exists
- if os.path.exists(self.sjoin("journal")):
- raise error.RepoError(_("journal already exists - run hg recover"))
-
- # save dirstate for rollback
- try:
- ds = self.opener("dirstate").read()
- except IOError:
- ds = ""
- self.opener("journal.dirstate", "w").write(ds)
- self.opener("journal.branch", "w").write(self.dirstate.branch())
-
- renames = [(self.sjoin("journal"), self.sjoin("undo")),
- (self.join("journal.dirstate"), self.join("undo.dirstate")),
- (self.join("journal.branch"), self.join("undo.branch"))]
- tr = transaction.transaction(self.ui.warn, self.sopener,
- self.sjoin("journal"),
- aftertrans(renames),
- self.store.createmode)
- self._transref = weakref.ref(tr)
- return tr
-
- def recover(self):
- lock = self.lock()
- try:
- if os.path.exists(self.sjoin("journal")):
- self.ui.status(_("rolling back interrupted transaction\n"))
- transaction.rollback(self.sopener, self.sjoin("journal"), self.ui.warn)
- self.invalidate()
- return True
- else:
- self.ui.warn(_("no interrupted transaction available\n"))
- return False
- finally:
- lock.release()
-
- def rollback(self):
- wlock = lock = None
- try:
- wlock = self.wlock()
- lock = self.lock()
- if os.path.exists(self.sjoin("undo")):
- self.ui.status(_("rolling back last transaction\n"))
- transaction.rollback(self.sopener, self.sjoin("undo"), self.ui.warn)
- util.rename(self.join("undo.dirstate"), self.join("dirstate"))
- try:
- branch = self.opener("undo.branch").read()
- self.dirstate.setbranch(branch)
- except IOError:
- self.ui.warn(_("Named branch could not be reset, "
- "current branch still is: %s\n")
- % encoding.tolocal(self.dirstate.branch()))
- self.invalidate()
- self.dirstate.invalidate()
- self.destroyed()
- else:
- self.ui.warn(_("no rollback information available\n"))
- finally:
- release(lock, wlock)
-
- def invalidate(self):
- for a in "changelog manifest".split():
- if a in self.__dict__:
- delattr(self, a)
- self._tags = None
- self._tagtypes = None
- self.nodetagscache = None
- self.branchcache = None
- self._ubranchcache = None
- self._branchcachetip = None
-
- def _lock(self, lockname, wait, releasefn, acquirefn, desc):
- try:
- l = lock.lock(lockname, 0, releasefn, desc=desc)
- except error.LockHeld, inst:
- if not wait:
- raise
- self.ui.warn(_("waiting for lock on %s held by %r\n") %
- (desc, inst.locker))
- # default to 600 seconds timeout
- l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
- releasefn, desc=desc)
- if acquirefn:
- acquirefn()
- return l
-
- def lock(self, wait=True):
- '''Lock the repository store (.hg/store) and return a weak reference
- to the lock. Use this before modifying the store (e.g. committing or
- stripping). If you are opening a transaction, get a lock as well.)'''
- l = self._lockref and self._lockref()
- if l is not None and l.held:
- l.lock()
- return l
-
- l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
- _('repository %s') % self.origroot)
- self._lockref = weakref.ref(l)
- return l
-
- def wlock(self, wait=True):
- '''Lock the non-store parts of the repository (everything under
- .hg except .hg/store) and return a weak reference to the lock.
- Use this before modifying files in .hg.'''
- l = self._wlockref and self._wlockref()
- if l is not None and l.held:
- l.lock()
- return l
-
- l = self._lock(self.join("wlock"), wait, self.dirstate.write,
- self.dirstate.invalidate, _('working directory of %s') %
- self.origroot)
- self._wlockref = weakref.ref(l)
- return l
-
- def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
- """
- commit an individual file as part of a larger transaction
- """
-
- fname = fctx.path()
- text = fctx.data()
- flog = self.file(fname)
- fparent1 = manifest1.get(fname, nullid)
- fparent2 = fparent2o = manifest2.get(fname, nullid)
-
- meta = {}
- copy = fctx.renamed()
- if copy and copy[0] != fname:
- # Mark the new revision of this file as a copy of another
- # file. This copy data will effectively act as a parent
- # of this new revision. If this is a merge, the first
- # parent will be the nullid (meaning "look up the copy data")
- # and the second one will be the other parent. For example:
- #
- # 0 --- 1 --- 3 rev1 changes file foo
- # \ / rev2 renames foo to bar and changes it
- # \- 2 -/ rev3 should have bar with all changes and
- # should record that bar descends from
- # bar in rev2 and foo in rev1
- #
- # this allows this merge to succeed:
- #
- # 0 --- 1 --- 3 rev4 reverts the content change from rev2
- # \ / merging rev3 and rev4 should use bar@rev2
- # \- 2 --- 4 as the merge base
- #
-
- cfname = copy[0]
- crev = manifest1.get(cfname)
- newfparent = fparent2
-
- if manifest2: # branch merge
- if fparent2 == nullid or crev is None: # copied on remote side
- if cfname in manifest2:
- crev = manifest2[cfname]
- newfparent = fparent1
-
- # find source in nearest ancestor if we've lost track
- if not crev:
- self.ui.debug(_(" %s: searching for copy revision for %s\n") %
- (fname, cfname))
- for ancestor in self['.'].ancestors():
- if cfname in ancestor:
- crev = ancestor[cfname].filenode()
- break
-
- self.ui.debug(_(" %s: copy %s:%s\n") % (fname, cfname, hex(crev)))
- meta["copy"] = cfname
- meta["copyrev"] = hex(crev)
- fparent1, fparent2 = nullid, newfparent
- elif fparent2 != nullid:
- # is one parent an ancestor of the other?
- fparentancestor = flog.ancestor(fparent1, fparent2)
- if fparentancestor == fparent1:
- fparent1, fparent2 = fparent2, nullid
- elif fparentancestor == fparent2:
- fparent2 = nullid
-
- # is the file changed?
- if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
- changelist.append(fname)
- return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
-
- # are just the flags changed during merge?
- if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
- changelist.append(fname)
-
- return fparent1
-
- def commit(self, text="", user=None, date=None, match=None, force=False,
- editor=False, extra={}):
- """Add a new revision to current repository.
-
- Revision information is gathered from the working directory,
- match can be used to filter the committed files. If editor is
- supplied, it is called to get a commit message.
- """
-
- def fail(f, msg):
- raise util.Abort('%s: %s' % (f, msg))
-
- if not match:
- match = match_.always(self.root, '')
-
- if not force:
- vdirs = []
- match.dir = vdirs.append
- match.bad = fail
-
- wlock = self.wlock()
- try:
- p1, p2 = self.dirstate.parents()
- wctx = self[None]
-
- if (not force and p2 != nullid and match and
- (match.files() or match.anypats())):
- raise util.Abort(_('cannot partially commit a merge '
- '(do not specify files or patterns)'))
-
- changes = self.status(match=match, clean=force)
- if force:
- changes[0].extend(changes[6]) # mq may commit unchanged files
-
- # check subrepos
- subs = []
- for s in wctx.substate:
- if match(s) and wctx.sub(s).dirty():
- subs.append(s)
- if subs and '.hgsubstate' not in changes[0]:
- changes[0].insert(0, '.hgsubstate')
-
- # make sure all explicit patterns are matched
- if not force and match.files():
- matched = set(changes[0] + changes[1] + changes[2])
-
- for f in match.files():
- if f == '.' or f in matched or f in wctx.substate:
- continue
- if f in changes[3]: # missing
- fail(f, _('file not found!'))
- if f in vdirs: # visited directory
- d = f + '/'
- for mf in matched:
- if mf.startswith(d):
- break
- else:
- fail(f, _("no match under directory!"))
- elif f not in self.dirstate:
- fail(f, _("file not tracked!"))
-
- if (not force and not extra.get("close") and p2 == nullid
- and not (changes[0] or changes[1] or changes[2])
- and self[None].branch() == self['.'].branch()):
- return None
-
- ms = merge_.mergestate(self)
- for f in changes[0]:
- if f in ms and ms[f] == 'u':
- raise util.Abort(_("unresolved merge conflicts "
- "(see hg resolve)"))
-
- cctx = context.workingctx(self, (p1, p2), text, user, date,
- extra, changes)
- if editor:
- cctx._text = editor(self, cctx, subs)
-
- # commit subs
- if subs:
- state = wctx.substate.copy()
- for s in subs:
- self.ui.status(_('committing subrepository %s\n') % s)
- sr = wctx.sub(s).commit(cctx._text, user, date)
- state[s] = (state[s][0], sr)
- subrepo.writestate(self, state)
-
- ret = self.commitctx(cctx, True)
-
- # update dirstate and mergestate
- for f in changes[0] + changes[1]:
- self.dirstate.normal(f)
- for f in changes[2]:
- self.dirstate.forget(f)
- self.dirstate.setparents(ret)
- ms.reset()
-
- return ret
-
- finally:
- wlock.release()
-
- def commitctx(self, ctx, error=False):
- """Add a new revision to current repository.
-
- Revision information is passed via the context argument.
- """
-
- tr = lock = None
- removed = ctx.removed()
- p1, p2 = ctx.p1(), ctx.p2()
- m1 = p1.manifest().copy()
- m2 = p2.manifest()
- user = ctx.user()
-
- xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
- self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
-
- lock = self.lock()
- try:
- tr = self.transaction()
- trp = weakref.proxy(tr)
-
- # check in files
- new = {}
- changed = []
- linkrev = len(self)
- for f in sorted(ctx.modified() + ctx.added()):
- self.ui.note(f + "\n")
- try:
- fctx = ctx[f]
- new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
- changed)
- m1.set(f, fctx.flags())
- except (OSError, IOError):
- if error:
- self.ui.warn(_("trouble committing %s!\n") % f)
- raise
- else:
- removed.append(f)
-
- # update manifest
- m1.update(new)
- removed = [f for f in sorted(removed) if f in m1 or f in m2]
- drop = [f for f in removed if f in m1]
- for f in drop:
- del m1[f]
- mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
- p2.manifestnode(), (new, drop))
-
- # update changelog
- self.changelog.delayupdate()
- n = self.changelog.add(mn, changed + removed, ctx.description(),
- trp, p1.node(), p2.node(),
- user, ctx.date(), ctx.extra().copy())
- p = lambda: self.changelog.writepending() and self.root or ""
- self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
- parent2=xp2, pending=p)
- self.changelog.finalize(trp)
- tr.close()
-
- if self.branchcache:
- self.branchtags()
-
- self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
- return n
- finally:
- del tr
- lock.release()
-
- def destroyed(self):
- '''Inform the repository that nodes have been destroyed.
- Intended for use by strip and rollback, so there's a common
- place for anything that has to be done after destroying history.'''
- # XXX it might be nice if we could take the list of destroyed
- # nodes, but I don't see an easy way for rollback() to do that
-
- # Ensure the persistent tag cache is updated. Doing it now
- # means that the tag cache only has to worry about destroyed
- # heads immediately after a strip/rollback. That in turn
- # guarantees that "cachetip == currenttip" (comparing both rev
- # and node) always means no nodes have been added or destroyed.
-
- # XXX this is suboptimal when qrefresh'ing: we strip the current
- # head, refresh the tag cache, then immediately add a new head.
- # But I think doing it this way is necessary for the "instant
- # tag cache retrieval" case to work.
- tags_.findglobaltags(self.ui, self, {}, {})
-
- def walk(self, match, node=None):
- '''
- walk recursively through the directory tree or a given
- changeset, finding all files matched by the match
- function
- '''
- return self[node].walk(match)
-
- def status(self, node1='.', node2=None, match=None,
- ignored=False, clean=False, unknown=False):
- """return status of files between two nodes or node and working directory
-
- If node1 is None, use the first dirstate parent instead.
- If node2 is None, compare node1 with working directory.
- """
-
- def mfmatches(ctx):
- mf = ctx.manifest().copy()
- for fn in mf.keys():
- if not match(fn):
- del mf[fn]
- return mf
-
- if isinstance(node1, context.changectx):
- ctx1 = node1
- else:
- ctx1 = self[node1]
- if isinstance(node2, context.changectx):
- ctx2 = node2
- else:
- ctx2 = self[node2]
-
- working = ctx2.rev() is None
- parentworking = working and ctx1 == self['.']
- match = match or match_.always(self.root, self.getcwd())
- listignored, listclean, listunknown = ignored, clean, unknown
-
- # load earliest manifest first for caching reasons
- if not working and ctx2.rev() < ctx1.rev():
- ctx2.manifest()
-
- if not parentworking:
- def bad(f, msg):
- if f not in ctx1:
- self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
- match.bad = bad
-
- if working: # we need to scan the working dir
- s = self.dirstate.status(match, listignored, listclean, listunknown)
- cmp, modified, added, removed, deleted, unknown, ignored, clean = s
-
- # check for any possibly clean files
- if parentworking and cmp:
- fixup = []
- # do a full compare of any files that might have changed
- for f in sorted(cmp):
- if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
- or ctx1[f].cmp(ctx2[f].data())):
- modified.append(f)
- else:
- fixup.append(f)
-
- if listclean:
- clean += fixup
-
- # update dirstate for files that are actually clean
- if fixup:
- try:
- # updating the dirstate is optional
- # so we don't wait on the lock
- wlock = self.wlock(False)
- try:
- for f in fixup:
- self.dirstate.normal(f)
- finally:
- wlock.release()
- except error.LockError:
- pass
-
- if not parentworking:
- mf1 = mfmatches(ctx1)
- if working:
- # we are comparing working dir against non-parent
- # generate a pseudo-manifest for the working dir
- mf2 = mfmatches(self['.'])
- for f in cmp + modified + added:
- mf2[f] = None
- mf2.set(f, ctx2.flags(f))
- for f in removed:
- if f in mf2:
- del mf2[f]
- else:
- # we are comparing two revisions
- deleted, unknown, ignored = [], [], []
- mf2 = mfmatches(ctx2)
-
- modified, added, clean = [], [], []
- for fn in mf2:
- if fn in mf1:
- if (mf1.flags(fn) != mf2.flags(fn) or
- (mf1[fn] != mf2[fn] and
- (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
- modified.append(fn)
- elif listclean:
- clean.append(fn)
- del mf1[fn]
- else:
- added.append(fn)
- removed = mf1.keys()
-
- r = modified, added, removed, deleted, unknown, ignored, clean
- [l.sort() for l in r]
- return r
-
- def add(self, list):
- wlock = self.wlock()
- try:
- rejected = []
- for f in list:
- p = self.wjoin(f)
- try:
- st = os.lstat(p)
- except:
- self.ui.warn(_("%s does not exist!\n") % f)
- rejected.append(f)
- continue
- if st.st_size > 10000000:
- self.ui.warn(_("%s: files over 10MB may cause memory and"
- " performance problems\n"
- "(use 'hg revert %s' to unadd the file)\n")
- % (f, f))
- if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
- self.ui.warn(_("%s not added: only files and symlinks "
- "supported currently\n") % f)
- rejected.append(p)
- elif self.dirstate[f] in 'amn':
- self.ui.warn(_("%s already tracked!\n") % f)
- elif self.dirstate[f] == 'r':
- self.dirstate.normallookup(f)
- else:
- self.dirstate.add(f)
- return rejected
- finally:
- wlock.release()
-
- def forget(self, list):
- wlock = self.wlock()
- try:
- for f in list:
- if self.dirstate[f] != 'a':
- self.ui.warn(_("%s not added!\n") % f)
- else:
- self.dirstate.forget(f)
- finally:
- wlock.release()
-
- def remove(self, list, unlink=False):
- if unlink:
- for f in list:
- try:
- util.unlink(self.wjoin(f))
- except OSError, inst:
- if inst.errno != errno.ENOENT:
- raise
- wlock = self.wlock()
- try:
- for f in list:
- if unlink and os.path.exists(self.wjoin(f)):
- self.ui.warn(_("%s still exists!\n") % f)
- elif self.dirstate[f] == 'a':
- self.dirstate.forget(f)
- elif f not in self.dirstate:
- self.ui.warn(_("%s not tracked!\n") % f)
- else:
- self.dirstate.remove(f)
- finally:
- wlock.release()
-
- def undelete(self, list):
- manifests = [self.manifest.read(self.changelog.read(p)[0])
- for p in self.dirstate.parents() if p != nullid]
- wlock = self.wlock()
- try:
- for f in list:
- if self.dirstate[f] != 'r':
- self.ui.warn(_("%s not removed!\n") % f)
- else:
- m = f in manifests[0] and manifests[0] or manifests[1]
- t = self.file(f).read(m[f])
- self.wwrite(f, t, m.flags(f))
- self.dirstate.normal(f)
- finally:
- wlock.release()
-
- def copy(self, source, dest):
- p = self.wjoin(dest)
- if not (os.path.exists(p) or os.path.islink(p)):
- self.ui.warn(_("%s does not exist!\n") % dest)
- elif not (os.path.isfile(p) or os.path.islink(p)):
- self.ui.warn(_("copy failed: %s is not a file or a "
- "symbolic link\n") % dest)
- else:
- wlock = self.wlock()
- try:
- if self.dirstate[dest] in '?r':
- self.dirstate.add(dest)
- self.dirstate.copy(source, dest)
- finally:
- wlock.release()
-
- def heads(self, start=None):
- heads = self.changelog.heads(start)
- # sort the output in rev descending order
- heads = [(-self.changelog.rev(h), h) for h in heads]
- return [n for (r, n) in sorted(heads)]
-
- def branchheads(self, branch=None, start=None, closed=False):
- if branch is None:
- branch = self[None].branch()
- branches = self.branchmap()
- if branch not in branches:
- return []
- bheads = branches[branch]
- # the cache returns heads ordered lowest to highest
- bheads.reverse()
- if start is not None:
- # filter out the heads that cannot be reached from startrev
- bheads = self.changelog.nodesbetween([start], bheads)[2]
- if not closed:
- bheads = [h for h in bheads if
- ('close' not in self.changelog.read(h)[5])]
- return bheads
-
- def branches(self, nodes):
- if not nodes:
- nodes = [self.changelog.tip()]
- b = []
- for n in nodes:
- t = n
- while 1:
- p = self.changelog.parents(n)
- if p[1] != nullid or p[0] == nullid:
- b.append((t, n, p[0], p[1]))
- break
- n = p[0]
- return b
-
- def between(self, pairs):
- r = []
-
- for top, bottom in pairs:
- n, l, i = top, [], 0
- f = 1
-
- while n != bottom and n != nullid:
- p = self.changelog.parents(n)[0]
- if i == f:
- l.append(n)
- f = f * 2
- n = p
- i += 1
-
- r.append(l)
-
- return r
-
- def findincoming(self, remote, base=None, heads=None, force=False):
- """Return list of roots of the subsets of missing nodes from remote
-
- If base dict is specified, assume that these nodes and their parents
- exist on the remote side and that no child of a node of base exists
- in both remote and self.
- Furthermore base will be updated to include the nodes that exists
- in self and remote but no children exists in self and remote.
- If a list of heads is specified, return only nodes which are heads
- or ancestors of these heads.
-
- All the ancestors of base are in self and in remote.
- All the descendants of the list returned are missing in self.
- (and so we know that the rest of the nodes are missing in remote, see
- outgoing)
- """
- return self.findcommonincoming(remote, base, heads, force)[1]
-
- def findcommonincoming(self, remote, base=None, heads=None, force=False):
- """Return a tuple (common, missing roots, heads) used to identify
- missing nodes from remote.
-
- If base dict is specified, assume that these nodes and their parents
- exist on the remote side and that no child of a node of base exists
- in both remote and self.
- Furthermore base will be updated to include the nodes that exists
- in self and remote but no children exists in self and remote.
- If a list of heads is specified, return only nodes which are heads
- or ancestors of these heads.
-
- All the ancestors of base are in self and in remote.
- """
- m = self.changelog.nodemap
- search = []
- fetch = set()
- seen = set()
- seenbranch = set()
- if base is None:
- base = {}
-
- if not heads:
- heads = remote.heads()
-
- if self.changelog.tip() == nullid:
- base[nullid] = 1
- if heads != [nullid]:
- return [nullid], [nullid], list(heads)
- return [nullid], [], []
-
- # assume we're closer to the tip than the root
- # and start by examining the heads
- self.ui.status(_("searching for changes\n"))
-
- unknown = []
- for h in heads:
- if h not in m:
- unknown.append(h)
- else:
- base[h] = 1
-
- heads = unknown
- if not unknown:
- return base.keys(), [], []
-
- req = set(unknown)
- reqcnt = 0
-
- # search through remote branches
- # a 'branch' here is a linear segment of history, with four parts:
- # head, root, first parent, second parent
- # (a branch always has two parents (or none) by definition)
- unknown = remote.branches(unknown)
- while unknown:
- r = []
- while unknown:
- n = unknown.pop(0)
- if n[0] in seen:
- continue
-
- self.ui.debug(_("examining %s:%s\n")
- % (short(n[0]), short(n[1])))
- if n[0] == nullid: # found the end of the branch
- pass
- elif n in seenbranch:
- self.ui.debug(_("branch already found\n"))
- continue
- elif n[1] and n[1] in m: # do we know the base?
- self.ui.debug(_("found incomplete branch %s:%s\n")
- % (short(n[0]), short(n[1])))
- search.append(n[0:2]) # schedule branch range for scanning
- seenbranch.add(n)
- else:
- if n[1] not in seen and n[1] not in fetch:
- if n[2] in m and n[3] in m:
- self.ui.debug(_("found new changeset %s\n") %
- short(n[1]))
- fetch.add(n[1]) # earliest unknown
- for p in n[2:4]:
- if p in m:
- base[p] = 1 # latest known
-
- for p in n[2:4]:
- if p not in req and p not in m:
- r.append(p)
- req.add(p)
- seen.add(n[0])
-
- if r:
- reqcnt += 1
- self.ui.debug(_("request %d: %s\n") %
- (reqcnt, " ".join(map(short, r))))
- for p in xrange(0, len(r), 10):
- for b in remote.branches(r[p:p+10]):
- self.ui.debug(_("received %s:%s\n") %
- (short(b[0]), short(b[1])))
- unknown.append(b)
-
- # do binary search on the branches we found
- while search:
- newsearch = []
- reqcnt += 1
- for n, l in zip(search, remote.between(search)):
- l.append(n[1])
- p = n[0]
- f = 1
- for i in l:
- self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
- if i in m:
- if f <= 2:
- self.ui.debug(_("found new branch changeset %s\n") %
- short(p))
- fetch.add(p)
- base[i] = 1
- else:
- self.ui.debug(_("narrowed branch search to %s:%s\n")
- % (short(p), short(i)))
- newsearch.append((p, i))
- break
- p, f = i, f * 2
- search = newsearch
-
- # sanity check our fetch list
- for f in fetch:
- if f in m:
- raise error.RepoError(_("already have changeset ")
- + short(f[:4]))
-
- if base.keys() == [nullid]:
- if force:
- self.ui.warn(_("warning: repository is unrelated\n"))
- else:
- raise util.Abort(_("repository is unrelated"))
-
- self.ui.debug(_("found new changesets starting at ") +
- " ".join([short(f) for f in fetch]) + "\n")
-
- self.ui.debug(_("%d total queries\n") % reqcnt)
-
- return base.keys(), list(fetch), heads
-
- def findoutgoing(self, remote, base=None, heads=None, force=False):
- """Return list of nodes that are roots of subsets not in remote
-
- If base dict is specified, assume that these nodes and their parents
- exist on the remote side.
- If a list of heads is specified, return only nodes which are heads
- or ancestors of these heads, and return a second element which
- contains all remote heads which get new children.
- """
- if base is None:
- base = {}
- self.findincoming(remote, base, heads, force=force)
-
- self.ui.debug(_("common changesets up to ")
- + " ".join(map(short, base.keys())) + "\n")
-
- remain = set(self.changelog.nodemap)
-
- # prune everything remote has from the tree
- remain.remove(nullid)
- remove = base.keys()
- while remove:
- n = remove.pop(0)
- if n in remain:
- remain.remove(n)
- for p in self.changelog.parents(n):
- remove.append(p)
-
- # find every node whose parents have been pruned
- subset = []
- # find every remote head that will get new children
- updated_heads = set()
- for n in remain:
- p1, p2 = self.changelog.parents(n)
- if p1 not in remain and p2 not in remain:
- subset.append(n)
- if heads:
- if p1 in heads:
- updated_heads.add(p1)
- if p2 in heads:
- updated_heads.add(p2)
-
- # this is the set of all roots we have to push
- if heads:
- return subset, list(updated_heads)
- else:
- return subset
-
- def pull(self, remote, heads=None, force=False):
- lock = self.lock()
- try:
- common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
- force=force)
- if fetch == [nullid]:
- self.ui.status(_("requesting all changes\n"))
-
- if not fetch:
- self.ui.status(_("no changes found\n"))
- return 0
-
- if heads is None and remote.capable('changegroupsubset'):
- heads = rheads
-
- if heads is None:
- cg = remote.changegroup(fetch, 'pull')
- else:
- if not remote.capable('changegroupsubset'):
- raise util.Abort(_("Partial pull cannot be done because "
- "other repository doesn't support "
- "changegroupsubset."))
- cg = remote.changegroupsubset(fetch, heads, 'pull')
- return self.addchangegroup(cg, 'pull', remote.url())
- finally:
- lock.release()
-
- def push(self, remote, force=False, revs=None):
- # there are two ways to push to remote repo:
- #
- # addchangegroup assumes local user can lock remote
- # repo (local filesystem, old ssh servers).
- #
- # unbundle assumes local user cannot lock remote repo (new ssh
- # servers, http servers).
-
- if remote.capable('unbundle'):
- return self.push_unbundle(remote, force, revs)
- return self.push_addchangegroup(remote, force, revs)
-
- def prepush(self, remote, force, revs):
- common = {}
- remote_heads = remote.heads()
- inc = self.findincoming(remote, common, remote_heads, force=force)
-
- update, updated_heads = self.findoutgoing(remote, common, remote_heads)
- if revs is not None:
- msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
- else:
- bases, heads = update, self.changelog.heads()
-
- def checkbranch(lheads, rheads, updatelh):
- '''
- check whether there are more local heads than remote heads on
- a specific branch.
-
- lheads: local branch heads
- rheads: remote branch heads
- updatelh: outgoing local branch heads
- '''
-
- warn = 0
-
- if not revs and len(lheads) > len(rheads):
- warn = 1
- else:
- updatelheads = [self.changelog.heads(x, lheads)
- for x in updatelh]
- newheads = set(sum(updatelheads, [])) & set(lheads)
-
- if not newheads:
- return True
-
- for r in rheads:
- if r in self.changelog.nodemap:
- desc = self.changelog.heads(r, heads)
- l = [h for h in heads if h in desc]
- if not l:
- newheads.add(r)
- else:
- newheads.add(r)
- if len(newheads) > len(rheads):
- warn = 1
-
- if warn:
- if not rheads: # new branch requires --force
- self.ui.warn(_("abort: push creates new"
- " remote branch '%s'!\n") %
- self[updatelh[0]].branch())
- else:
- self.ui.warn(_("abort: push creates new remote heads!\n"))
-
- self.ui.status(_("(did you forget to merge?"
- " use push -f to force)\n"))
- return False
- return True
-
- if not bases:
- self.ui.status(_("no changes found\n"))
- return None, 1
- elif not force:
- # Check for each named branch if we're creating new remote heads.
- # To be a remote head after push, node must be either:
- # - unknown locally
- # - a local outgoing head descended from update
- # - a remote head that's known locally and not
- # ancestral to an outgoing head
- #
- # New named branches cannot be created without --force.
-
- if remote_heads != [nullid]:
- if remote.capable('branchmap'):
- localhds = {}
- if not revs:
- localhds = self.branchmap()
- else:
- for n in heads:
- branch = self[n].branch()
- if branch in localhds:
- localhds[branch].append(n)
- else:
- localhds[branch] = [n]
-
- remotehds = remote.branchmap()
-
- for lh in localhds:
- if lh in remotehds:
- rheads = remotehds[lh]
- else:
- rheads = []
- lheads = localhds[lh]
- updatelh = [upd for upd in update
- if self[upd].branch() == lh]
- if not updatelh:
- continue
- if not checkbranch(lheads, rheads, updatelh):
- return None, 0
- else:
- if not checkbranch(heads, remote_heads, update):
- return None, 0
-
- if inc:
- self.ui.warn(_("note: unsynced remote changes!\n"))
-
-
- if revs is None:
- # use the fast path, no race possible on push
- cg = self._changegroup(common.keys(), 'push')
- else:
- cg = self.changegroupsubset(update, revs, 'push')
- return cg, remote_heads
-
- def push_addchangegroup(self, remote, force, revs):
- lock = remote.lock()
- try:
- ret = self.prepush(remote, force, revs)
- if ret[0] is not None:
- cg, remote_heads = ret
- return remote.addchangegroup(cg, 'push', self.url())
- return ret[1]
- finally:
- lock.release()
-
- def push_unbundle(self, remote, force, revs):
- # local repo finds heads on server, finds out what revs it
- # must push. once revs transferred, if server finds it has
- # different heads (someone else won commit/push race), server
- # aborts.
-
- ret = self.prepush(remote, force, revs)
- if ret[0] is not None:
- cg, remote_heads = ret
- if force: remote_heads = ['force']
- return remote.unbundle(cg, remote_heads, 'push')
- return ret[1]
-
- def changegroupinfo(self, nodes, source):
- if self.ui.verbose or source == 'bundle':
- self.ui.status(_("%d changesets found\n") % len(nodes))
- if self.ui.debugflag:
- self.ui.debug(_("list of changesets:\n"))
- for node in nodes:
- self.ui.debug("%s\n" % hex(node))
-
- def changegroupsubset(self, bases, heads, source, extranodes=None):
- """This function generates a changegroup consisting of all the nodes
- that are descendents of any of the bases, and ancestors of any of
- the heads.
-
- It is fairly complex as determining which filenodes and which
- manifest nodes need to be included for the changeset to be complete
- is non-trivial.
-
- Another wrinkle is doing the reverse, figuring out which changeset in
- the changegroup a particular filenode or manifestnode belongs to.
-
- The caller can specify some nodes that must be included in the
- changegroup using the extranodes argument. It should be a dict
- where the keys are the filenames (or 1 for the manifest), and the
- values are lists of (node, linknode) tuples, where node is a wanted
- node and linknode is the changelog node that should be transmitted as
- the linkrev.
- """
-
- if extranodes is None:
- # can we go through the fast path ?
- heads.sort()
- allheads = self.heads()
- allheads.sort()
- if heads == allheads:
- common = []
- # parents of bases are known from both sides
- for n in bases:
- for p in self.changelog.parents(n):
- if p != nullid:
- common.append(p)
- return self._changegroup(common, source)
-
- self.hook('preoutgoing', throw=True, source=source)
-
- # Set up some initial variables
- # Make it easy to refer to self.changelog
- cl = self.changelog
- # msng is short for missing - compute the list of changesets in this
- # changegroup.
- msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
- self.changegroupinfo(msng_cl_lst, source)
- # Some bases may turn out to be superfluous, and some heads may be
- # too. nodesbetween will return the minimal set of bases and heads
- # necessary to re-create the changegroup.
-
- # Known heads are the list of heads that it is assumed the recipient
- # of this changegroup will know about.
- knownheads = set()
- # We assume that all parents of bases are known heads.
- for n in bases:
- knownheads.update(cl.parents(n))
- knownheads.discard(nullid)
- knownheads = list(knownheads)
- if knownheads:
- # Now that we know what heads are known, we can compute which
- # changesets are known. The recipient must know about all
- # changesets required to reach the known heads from the null
- # changeset.
- has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
- junk = None
- # Transform the list into a set.
- has_cl_set = set(has_cl_set)
- else:
- # If there were no known heads, the recipient cannot be assumed to
- # know about any changesets.
- has_cl_set = set()
-
- # Make it easy to refer to self.manifest
- mnfst = self.manifest
- # We don't know which manifests are missing yet
- msng_mnfst_set = {}
- # Nor do we know which filenodes are missing.
- msng_filenode_set = {}
-
- junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
- junk = None
-
- # A changeset always belongs to itself, so the changenode lookup
- # function for a changenode is identity.
- def identity(x):
- return x
-
- # If we determine that a particular file or manifest node must be a
- # node that the recipient of the changegroup will already have, we can
- # also assume the recipient will have all the parents. This function
- # prunes them from the set of missing nodes.
- def prune_parents(revlog, hasset, msngset):
- haslst = list(hasset)
- haslst.sort(key=revlog.rev)
- for node in haslst:
- parentlst = [p for p in revlog.parents(node) if p != nullid]
- while parentlst:
- n = parentlst.pop()
- if n not in hasset:
- hasset.add(n)
- p = [p for p in revlog.parents(n) if p != nullid]
- parentlst.extend(p)
- for n in hasset:
- msngset.pop(n, None)
-
- # This is a function generating function used to set up an environment
- # for the inner function to execute in.
- def manifest_and_file_collector(changedfileset):
- # This is an information gathering function that gathers
- # information from each changeset node that goes out as part of
- # the changegroup. The information gathered is a list of which
- # manifest nodes are potentially required (the recipient may
- # already have them) and total list of all files which were
- # changed in any changeset in the changegroup.
- #
- # We also remember the first changenode we saw any manifest
- # referenced by so we can later determine which changenode 'owns'
- # the manifest.
- def collect_manifests_and_files(clnode):
- c = cl.read(clnode)
- for f in c[3]:
- # This is to make sure we only have one instance of each
- # filename string for each filename.
- changedfileset.setdefault(f, f)
- msng_mnfst_set.setdefault(c[0], clnode)
- return collect_manifests_and_files
-
- # Figure out which manifest nodes (of the ones we think might be part
- # of the changegroup) the recipient must know about and remove them
- # from the changegroup.
- def prune_manifests():
- has_mnfst_set = set()
- for n in msng_mnfst_set:
- # If a 'missing' manifest thinks it belongs to a changenode
- # the recipient is assumed to have, obviously the recipient
- # must have that manifest.
- linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
- if linknode in has_cl_set:
- has_mnfst_set.add(n)
- prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
-
- # Use the information collected in collect_manifests_and_files to say
- # which changenode any manifestnode belongs to.
- def lookup_manifest_link(mnfstnode):
- return msng_mnfst_set[mnfstnode]
-
- # A function generating function that sets up the initial environment
- # the inner function.
- def filenode_collector(changedfiles):
- next_rev = [0]
- # This gathers information from each manifestnode included in the
- # changegroup about which filenodes the manifest node references
- # so we can include those in the changegroup too.
- #
- # It also remembers which changenode each filenode belongs to. It
- # does this by assuming the a filenode belongs to the changenode
- # the first manifest that references it belongs to.
- def collect_msng_filenodes(mnfstnode):
- r = mnfst.rev(mnfstnode)
- if r == next_rev[0]:
- # If the last rev we looked at was the one just previous,
- # we only need to see a diff.
- deltamf = mnfst.readdelta(mnfstnode)
- # For each line in the delta
- for f, fnode in deltamf.iteritems():
- f = changedfiles.get(f, None)
- # And if the file is in the list of files we care
- # about.
- if f is not None:
- # Get the changenode this manifest belongs to
- clnode = msng_mnfst_set[mnfstnode]
- # Create the set of filenodes for the file if
- # there isn't one already.
- ndset = msng_filenode_set.setdefault(f, {})
- # And set the filenode's changelog node to the
- # manifest's if it hasn't been set already.
- ndset.setdefault(fnode, clnode)
- else:
- # Otherwise we need a full manifest.
- m = mnfst.read(mnfstnode)
- # For every file in we care about.
- for f in changedfiles:
- fnode = m.get(f, None)
- # If it's in the manifest
- if fnode is not None:
- # See comments above.
- clnode = msng_mnfst_set[mnfstnode]
- ndset = msng_filenode_set.setdefault(f, {})
- ndset.setdefault(fnode, clnode)
- # Remember the revision we hope to see next.
- next_rev[0] = r + 1
- return collect_msng_filenodes
-
- # We have a list of filenodes we think we need for a file, lets remove
- # all those we know the recipient must have.
- def prune_filenodes(f, filerevlog):
- msngset = msng_filenode_set[f]
- hasset = set()
- # If a 'missing' filenode thinks it belongs to a changenode we
- # assume the recipient must have, then the recipient must have
- # that filenode.
- for n in msngset:
- clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
- if clnode in has_cl_set:
- hasset.add(n)
- prune_parents(filerevlog, hasset, msngset)
-
- # A function generator function that sets up the a context for the
- # inner function.
- def lookup_filenode_link_func(fname):
- msngset = msng_filenode_set[fname]
- # Lookup the changenode the filenode belongs to.
- def lookup_filenode_link(fnode):
- return msngset[fnode]
- return lookup_filenode_link
-
- # Add the nodes that were explicitly requested.
- def add_extra_nodes(name, nodes):
- if not extranodes or name not in extranodes:
- return
-
- for node, linknode in extranodes[name]:
- if node not in nodes:
- nodes[node] = linknode
-
- # Now that we have all theses utility functions to help out and
- # logically divide up the task, generate the group.
- def gengroup():
- # The set of changed files starts empty.
- changedfiles = {}
- # Create a changenode group generator that will call our functions
- # back to lookup the owning changenode and collect information.
- group = cl.group(msng_cl_lst, identity,
- manifest_and_file_collector(changedfiles))
- for chnk in group:
- yield chnk
-
- # The list of manifests has been collected by the generator
- # calling our functions back.
- prune_manifests()
- add_extra_nodes(1, msng_mnfst_set)
- msng_mnfst_lst = msng_mnfst_set.keys()
- # Sort the manifestnodes by revision number.
- msng_mnfst_lst.sort(key=mnfst.rev)
- # Create a generator for the manifestnodes that calls our lookup
- # and data collection functions back.
- group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
- filenode_collector(changedfiles))
- for chnk in group:
- yield chnk
-
- # These are no longer needed, dereference and toss the memory for
- # them.
- msng_mnfst_lst = None
- msng_mnfst_set.clear()
-
- if extranodes:
- for fname in extranodes:
- if isinstance(fname, int):
- continue
- msng_filenode_set.setdefault(fname, {})
- changedfiles[fname] = 1
- # Go through all our files in order sorted by name.
- for fname in sorted(changedfiles):
- filerevlog = self.file(fname)
- if not len(filerevlog):
- raise util.Abort(_("empty or missing revlog for %s") % fname)
- # Toss out the filenodes that the recipient isn't really
- # missing.
- if fname in msng_filenode_set:
- prune_filenodes(fname, filerevlog)
- add_extra_nodes(fname, msng_filenode_set[fname])
- msng_filenode_lst = msng_filenode_set[fname].keys()
- else:
- msng_filenode_lst = []
- # If any filenodes are left, generate the group for them,
- # otherwise don't bother.
- if len(msng_filenode_lst) > 0:
- yield changegroup.chunkheader(len(fname))
- yield fname
- # Sort the filenodes by their revision #
- msng_filenode_lst.sort(key=filerevlog.rev)
- # Create a group generator and only pass in a changenode
- # lookup function as we need to collect no information
- # from filenodes.
- group = filerevlog.group(msng_filenode_lst,
- lookup_filenode_link_func(fname))
- for chnk in group:
- yield chnk
- if fname in msng_filenode_set:
- # Don't need this anymore, toss it to free memory.
- del msng_filenode_set[fname]
- # Signal that no more groups are left.
- yield changegroup.closechunk()
-
- if msng_cl_lst:
- self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
-
- return util.chunkbuffer(gengroup())
-
- def changegroup(self, basenodes, source):
- # to avoid a race we use changegroupsubset() (issue1320)
- return self.changegroupsubset(basenodes, self.heads(), source)
-
- def _changegroup(self, common, source):
- """Generate a changegroup of all nodes that we have that a recipient
- doesn't.
-
- This is much easier than the previous function as we can assume that
- the recipient has any changenode we aren't sending them.
-
- common is the set of common nodes between remote and self"""
-
- self.hook('preoutgoing', throw=True, source=source)
-
- cl = self.changelog
- nodes = cl.findmissing(common)
- revset = set([cl.rev(n) for n in nodes])
- self.changegroupinfo(nodes, source)
-
- def identity(x):
- return x
-
- def gennodelst(log):
- for r in log:
- if log.linkrev(r) in revset:
- yield log.node(r)
-
- def changed_file_collector(changedfileset):
- def collect_changed_files(clnode):
- c = cl.read(clnode)
- changedfileset.update(c[3])
- return collect_changed_files
-
- def lookuprevlink_func(revlog):
- def lookuprevlink(n):
- return cl.node(revlog.linkrev(revlog.rev(n)))
- return lookuprevlink
-
- def gengroup():
- # construct a list of all changed files
- changedfiles = set()
-
- for chnk in cl.group(nodes, identity,
- changed_file_collector(changedfiles)):
- yield chnk
-
- mnfst = self.manifest
- nodeiter = gennodelst(mnfst)
- for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
- yield chnk
-
- for fname in sorted(changedfiles):
- filerevlog = self.file(fname)
- if not len(filerevlog):
- raise util.Abort(_("empty or missing revlog for %s") % fname)
- nodeiter = gennodelst(filerevlog)
- nodeiter = list(nodeiter)
- if nodeiter:
- yield changegroup.chunkheader(len(fname))
- yield fname
- lookup = lookuprevlink_func(filerevlog)
- for chnk in filerevlog.group(nodeiter, lookup):
- yield chnk
-
- yield changegroup.closechunk()
-
- if nodes:
- self.hook('outgoing', node=hex(nodes[0]), source=source)
-
- return util.chunkbuffer(gengroup())
-
- def addchangegroup(self, source, srctype, url, emptyok=False):
- """add changegroup to repo.
-
- return values:
- - nothing changed or no source: 0
- - more heads than before: 1+added heads (2..n)
- - less heads than before: -1-removed heads (-2..-n)
- - number of heads stays the same: 1
- """
- def csmap(x):
- self.ui.debug(_("add changeset %s\n") % short(x))
- return len(cl)
-
- def revmap(x):
- return cl.rev(x)
-
- if not source:
- return 0
-
- self.hook('prechangegroup', throw=True, source=srctype, url=url)
-
- changesets = files = revisions = 0
-
- # write changelog data to temp files so concurrent readers will not see
- # inconsistent view
- cl = self.changelog
- cl.delayupdate()
- oldheads = len(cl.heads())
-
- tr = self.transaction()
- try:
- trp = weakref.proxy(tr)
- # pull off the changeset group
- self.ui.status(_("adding changesets\n"))
- clstart = len(cl)
- chunkiter = changegroup.chunkiter(source)
- if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
- raise util.Abort(_("received changelog group is empty"))
- clend = len(cl)
- changesets = clend - clstart
-
- # pull off the manifest group
- self.ui.status(_("adding manifests\n"))
- chunkiter = changegroup.chunkiter(source)
- # no need to check for empty manifest group here:
- # if the result of the merge of 1 and 2 is the same in 3 and 4,
- # no new manifest will be created and the manifest group will
- # be empty during the pull
- self.manifest.addgroup(chunkiter, revmap, trp)
-
- # process the files
- self.ui.status(_("adding file changes\n"))
- while 1:
- f = changegroup.getchunk(source)
- if not f:
- break
- self.ui.debug(_("adding %s revisions\n") % f)
- fl = self.file(f)
- o = len(fl)
- chunkiter = changegroup.chunkiter(source)
- if fl.addgroup(chunkiter, revmap, trp) is None:
- raise util.Abort(_("received file revlog group is empty"))
- revisions += len(fl) - o
- files += 1
-
- newheads = len(cl.heads())
- heads = ""
- if oldheads and newheads != oldheads:
- heads = _(" (%+d heads)") % (newheads - oldheads)
-
- self.ui.status(_("added %d changesets"
- " with %d changes to %d files%s\n")
- % (changesets, revisions, files, heads))
-
- if changesets > 0:
- p = lambda: cl.writepending() and self.root or ""
- self.hook('pretxnchangegroup', throw=True,
- node=hex(cl.node(clstart)), source=srctype,
- url=url, pending=p)
-
- # make changelog see real files again
- cl.finalize(trp)
-
- tr.close()
- finally:
- del tr
-
- if changesets > 0:
- # forcefully update the on-disk branch cache
- self.ui.debug(_("updating the branch cache\n"))
- self.branchtags()
- self.hook("changegroup", node=hex(cl.node(clstart)),
- source=srctype, url=url)
-
- for i in xrange(clstart, clend):
- self.hook("incoming", node=hex(cl.node(i)),
- source=srctype, url=url)
-
- # never return 0 here:
- if newheads < oldheads:
- return newheads - oldheads - 1
- else:
- return newheads - oldheads + 1
-
-
- def stream_in(self, remote):
- fp = remote.stream_out()
- l = fp.readline()
- try:
- resp = int(l)
- except ValueError:
- raise error.ResponseError(
- _('Unexpected response from remote server:'), l)
- if resp == 1:
- raise util.Abort(_('operation forbidden by server'))
- elif resp == 2:
- raise util.Abort(_('locking the remote repository failed'))
- elif resp != 0:
- raise util.Abort(_('the server sent an unknown error code'))
- self.ui.status(_('streaming all changes\n'))
- l = fp.readline()
- try:
- total_files, total_bytes = map(int, l.split(' ', 1))
- except (ValueError, TypeError):
- raise error.ResponseError(
- _('Unexpected response from remote server:'), l)
- self.ui.status(_('%d files to transfer, %s of data\n') %
- (total_files, util.bytecount(total_bytes)))
- start = time.time()
- for i in xrange(total_files):
- # XXX doesn't support '\n' or '\r' in filenames
- l = fp.readline()
- try:
- name, size = l.split('\0', 1)
- size = int(size)
- except (ValueError, TypeError):
- raise error.ResponseError(
- _('Unexpected response from remote server:'), l)
- self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
- # for backwards compat, name was partially encoded
- ofp = self.sopener(store.decodedir(name), 'w')
- for chunk in util.filechunkiter(fp, limit=size):
- ofp.write(chunk)
- ofp.close()
- elapsed = time.time() - start
- if elapsed <= 0:
- elapsed = 0.001
- self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
- (util.bytecount(total_bytes), elapsed,
- util.bytecount(total_bytes / elapsed)))
- self.invalidate()
- return len(self.heads()) + 1
-
- def clone(self, remote, heads=[], stream=False):
- '''clone remote repository.
-
- keyword arguments:
- heads: list of revs to clone (forces use of pull)
- stream: use streaming clone if possible'''
-
- # now, all clients that can request uncompressed clones can
- # read repo formats supported by all servers that can serve
- # them.
-
- # if revlog format changes, client will have to check version
- # and format flags on "stream" capability, and use
- # uncompressed only if compatible.
-
- if stream and not heads and remote.capable('stream'):
- return self.stream_in(remote)
- return self.pull(remote, heads)
-
-# used to avoid circular references so destructors work
-def aftertrans(files):
- renamefiles = [tuple(t) for t in files]
- def a():
- for src, dest in renamefiles:
- util.rename(src, dest)
- return a
-
-def instance(ui, path, create):
- return localrepository(ui, util.drop_scheme('file', path), create)
-
-def islocal(path):
- return True
diff --git a/sys/lib/python/mercurial/lock.py b/sys/lib/python/mercurial/lock.py
deleted file mode 100644
index a3d116e6a..000000000
--- a/sys/lib/python/mercurial/lock.py
+++ /dev/null
@@ -1,137 +0,0 @@
-# lock.py - simple advisory locking scheme for mercurial
-#
-# Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-import util, error
-import errno, os, socket, time
-import warnings
-
-class lock(object):
- '''An advisory lock held by one process to control access to a set
- of files. Non-cooperating processes or incorrectly written scripts
- can ignore Mercurial's locking scheme and stomp all over the
- repository, so don't do that.
-
- Typically used via localrepository.lock() to lock the repository
- store (.hg/store/) or localrepository.wlock() to lock everything
- else under .hg/.'''
-
- # lock is symlink on platforms that support it, file on others.
-
- # symlink is used because create of directory entry and contents
- # are atomic even over nfs.
-
- # old-style lock: symlink to pid
- # new-style lock: symlink to hostname:pid
-
- _host = None
-
- def __init__(self, file, timeout=-1, releasefn=None, desc=None):
- self.f = file
- self.held = 0
- self.timeout = timeout
- self.releasefn = releasefn
- self.desc = desc
- self.lock()
-
- def __del__(self):
- if self.held:
- warnings.warn("use lock.release instead of del lock",
- category=DeprecationWarning,
- stacklevel=2)
-
- # ensure the lock will be removed
- # even if recursive locking did occur
- self.held = 1
-
- self.release()
-
- def lock(self):
- timeout = self.timeout
- while 1:
- try:
- self.trylock()
- return 1
- except error.LockHeld, inst:
- if timeout != 0:
- time.sleep(1)
- if timeout > 0:
- timeout -= 1
- continue
- raise error.LockHeld(errno.ETIMEDOUT, inst.filename, self.desc,
- inst.locker)
-
- def trylock(self):
- if self.held:
- self.held += 1
- return
- if lock._host is None:
- lock._host = socket.gethostname()
- lockname = '%s:%s' % (lock._host, os.getpid())
- while not self.held:
- try:
- util.makelock(lockname, self.f)
- self.held = 1
- except (OSError, IOError), why:
- if why.errno == errno.EEXIST:
- locker = self.testlock()
- if locker is not None:
- raise error.LockHeld(errno.EAGAIN, self.f, self.desc,
- locker)
- else:
- raise error.LockUnavailable(why.errno, why.strerror,
- why.filename, self.desc)
-
- def testlock(self):
- """return id of locker if lock is valid, else None.
-
- If old-style lock, we cannot tell what machine locker is on.
- with new-style lock, if locker is on this machine, we can
- see if locker is alive. If locker is on this machine but
- not alive, we can safely break lock.
-
- The lock file is only deleted when None is returned.
-
- """
- locker = util.readlock(self.f)
- try:
- host, pid = locker.split(":", 1)
- except ValueError:
- return locker
- if host != lock._host:
- return locker
- try:
- pid = int(pid)
- except:
- return locker
- if util.testpid(pid):
- return locker
- # if locker dead, break lock. must do this with another lock
- # held, or can race and break valid lock.
- try:
- l = lock(self.f + '.break')
- l.trylock()
- os.unlink(self.f)
- l.release()
- except error.LockError:
- return locker
-
- def release(self):
- if self.held > 1:
- self.held -= 1
- elif self.held is 1:
- self.held = 0
- if self.releasefn:
- self.releasefn()
- try:
- os.unlink(self.f)
- except: pass
-
-def release(*locks):
- for lock in locks:
- if lock is not None:
- lock.release()
-
diff --git a/sys/lib/python/mercurial/lsprof.py b/sys/lib/python/mercurial/lsprof.py
deleted file mode 100644
index 07f9425ff..000000000
--- a/sys/lib/python/mercurial/lsprof.py
+++ /dev/null
@@ -1,113 +0,0 @@
-#! /usr/bin/env python
-
-import sys
-from _lsprof import Profiler, profiler_entry
-
-__all__ = ['profile', 'Stats']
-
-def profile(f, *args, **kwds):
- """XXX docstring"""
- p = Profiler()
- p.enable(subcalls=True, builtins=True)
- try:
- f(*args, **kwds)
- finally:
- p.disable()
- return Stats(p.getstats())
-
-
-class Stats(object):
- """XXX docstring"""
-
- def __init__(self, data):
- self.data = data
-
- def sort(self, crit="inlinetime"):
- """XXX docstring"""
- if crit not in profiler_entry.__dict__:
- raise ValueError("Can't sort by %s" % crit)
- self.data.sort(key=lambda x: getattr(x, crit), reverse=True)
- for e in self.data:
- if e.calls:
- e.calls.sort(key=lambda x: getattr(x, crit), reverse=True)
-
- def pprint(self, top=None, file=None, limit=None, climit=None):
- """XXX docstring"""
- if file is None:
- file = sys.stdout
- d = self.data
- if top is not None:
- d = d[:top]
- cols = "% 12s %12s %11.4f %11.4f %s\n"
- hcols = "% 12s %12s %12s %12s %s\n"
- file.write(hcols % ("CallCount", "Recursive", "Total(ms)",
- "Inline(ms)", "module:lineno(function)"))
- count = 0
- for e in d:
- file.write(cols % (e.callcount, e.reccallcount, e.totaltime,
- e.inlinetime, label(e.code)))
- count += 1
- if limit is not None and count == limit:
- return
- ccount = 0
- if e.calls:
- for se in e.calls:
- file.write(cols % ("+%s" % se.callcount, se.reccallcount,
- se.totaltime, se.inlinetime,
- "+%s" % label(se.code)))
- count += 1
- ccount += 1
- if limit is not None and count == limit:
- return
- if climit is not None and ccount == climit:
- break
-
- def freeze(self):
- """Replace all references to code objects with string
- descriptions; this makes it possible to pickle the instance."""
-
- # this code is probably rather ickier than it needs to be!
- for i in range(len(self.data)):
- e = self.data[i]
- if not isinstance(e.code, str):
- self.data[i] = type(e)((label(e.code),) + e[1:])
- if e.calls:
- for j in range(len(e.calls)):
- se = e.calls[j]
- if not isinstance(se.code, str):
- e.calls[j] = type(se)((label(se.code),) + se[1:])
-
-_fn2mod = {}
-
-def label(code):
- if isinstance(code, str):
- return code
- try:
- mname = _fn2mod[code.co_filename]
- except KeyError:
- for k, v in list(sys.modules.iteritems()):
- if v is None:
- continue
- if not hasattr(v, '__file__'):
- continue
- if not isinstance(v.__file__, str):
- continue
- if v.__file__.startswith(code.co_filename):
- mname = _fn2mod[code.co_filename] = k
- break
- else:
- mname = _fn2mod[code.co_filename] = '<%s>'%code.co_filename
-
- return '%s:%d(%s)' % (mname, code.co_firstlineno, code.co_name)
-
-
-if __name__ == '__main__':
- import os
- sys.argv = sys.argv[1:]
- if not sys.argv:
- print >> sys.stderr, "usage: lsprof.py <script> <arguments...>"
- sys.exit(2)
- sys.path.insert(0, os.path.abspath(os.path.dirname(sys.argv[0])))
- stats = profile(execfile, sys.argv[0], globals(), locals())
- stats.sort()
- stats.pprint()
diff --git a/sys/lib/python/mercurial/lsprofcalltree.py b/sys/lib/python/mercurial/lsprofcalltree.py
deleted file mode 100644
index 358b951d1..000000000
--- a/sys/lib/python/mercurial/lsprofcalltree.py
+++ /dev/null
@@ -1,86 +0,0 @@
-"""
-lsprofcalltree.py - lsprof output which is readable by kcachegrind
-
-Authors:
- * David Allouche <david <at> allouche.net>
- * Jp Calderone & Itamar Shtull-Trauring
- * Johan Dahlin
-
-This software may be used and distributed according to the terms
-of the GNU General Public License, incorporated herein by reference.
-"""
-
-def label(code):
- if isinstance(code, str):
- return '~' + code # built-in functions ('~' sorts at the end)
- else:
- return '%s %s:%d' % (code.co_name,
- code.co_filename,
- code.co_firstlineno)
-
-class KCacheGrind(object):
- def __init__(self, profiler):
- self.data = profiler.getstats()
- self.out_file = None
-
- def output(self, out_file):
- self.out_file = out_file
- print >> out_file, 'events: Ticks'
- self._print_summary()
- for entry in self.data:
- self._entry(entry)
-
- def _print_summary(self):
- max_cost = 0
- for entry in self.data:
- totaltime = int(entry.totaltime * 1000)
- max_cost = max(max_cost, totaltime)
- print >> self.out_file, 'summary: %d' % (max_cost,)
-
- def _entry(self, entry):
- out_file = self.out_file
-
- code = entry.code
- #print >> out_file, 'ob=%s' % (code.co_filename,)
- if isinstance(code, str):
- print >> out_file, 'fi=~'
- else:
- print >> out_file, 'fi=%s' % (code.co_filename,)
- print >> out_file, 'fn=%s' % (label(code),)
-
- inlinetime = int(entry.inlinetime * 1000)
- if isinstance(code, str):
- print >> out_file, '0 ', inlinetime
- else:
- print >> out_file, '%d %d' % (code.co_firstlineno, inlinetime)
-
- # recursive calls are counted in entry.calls
- if entry.calls:
- calls = entry.calls
- else:
- calls = []
-
- if isinstance(code, str):
- lineno = 0
- else:
- lineno = code.co_firstlineno
-
- for subentry in calls:
- self._subentry(lineno, subentry)
- print >> out_file
-
- def _subentry(self, lineno, subentry):
- out_file = self.out_file
- code = subentry.code
- #print >> out_file, 'cob=%s' % (code.co_filename,)
- print >> out_file, 'cfn=%s' % (label(code),)
- if isinstance(code, str):
- print >> out_file, 'cfi=~'
- print >> out_file, 'calls=%d 0' % (subentry.callcount,)
- else:
- print >> out_file, 'cfi=%s' % (code.co_filename,)
- print >> out_file, 'calls=%d %d' % (
- subentry.callcount, code.co_firstlineno)
-
- totaltime = int(subentry.totaltime * 1000)
- print >> out_file, '%d %d' % (lineno, totaltime)
diff --git a/sys/lib/python/mercurial/mail.py b/sys/lib/python/mercurial/mail.py
deleted file mode 100644
index 3d8222c4f..000000000
--- a/sys/lib/python/mercurial/mail.py
+++ /dev/null
@@ -1,190 +0,0 @@
-# mail.py - mail sending bits for mercurial
-#
-# Copyright 2006 Matt Mackall <mpm@selenic.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-from i18n import _
-import util, encoding
-import os, smtplib, socket, quopri
-import email.Header, email.MIMEText, email.Utils
-
-def _smtp(ui):
- '''build an smtp connection and return a function to send mail'''
- local_hostname = ui.config('smtp', 'local_hostname')
- s = smtplib.SMTP(local_hostname=local_hostname)
- mailhost = ui.config('smtp', 'host')
- if not mailhost:
- raise util.Abort(_('no [smtp]host in hgrc - cannot send mail'))
- mailport = int(ui.config('smtp', 'port', 25))
- ui.note(_('sending mail: smtp host %s, port %s\n') %
- (mailhost, mailport))
- s.connect(host=mailhost, port=mailport)
- if ui.configbool('smtp', 'tls'):
- if not hasattr(socket, 'ssl'):
- raise util.Abort(_("can't use TLS: Python SSL support "
- "not installed"))
- ui.note(_('(using tls)\n'))
- s.ehlo()
- s.starttls()
- s.ehlo()
- username = ui.config('smtp', 'username')
- password = ui.config('smtp', 'password')
- if username and not password:
- password = ui.getpass()
- if username and password:
- ui.note(_('(authenticating to mail server as %s)\n') %
- (username))
- try:
- s.login(username, password)
- except smtplib.SMTPException, inst:
- raise util.Abort(inst)
-
- def send(sender, recipients, msg):
- try:
- return s.sendmail(sender, recipients, msg)
- except smtplib.SMTPRecipientsRefused, inst:
- recipients = [r[1] for r in inst.recipients.values()]
- raise util.Abort('\n' + '\n'.join(recipients))
- except smtplib.SMTPException, inst:
- raise util.Abort(inst)
-
- return send
-
-def _sendmail(ui, sender, recipients, msg):
- '''send mail using sendmail.'''
- program = ui.config('email', 'method')
- cmdline = '%s -f %s %s' % (program, util.email(sender),
- ' '.join(map(util.email, recipients)))
- ui.note(_('sending mail: %s\n') % cmdline)
- fp = util.popen(cmdline, 'w')
- fp.write(msg)
- ret = fp.close()
- if ret:
- raise util.Abort('%s %s' % (
- os.path.basename(program.split(None, 1)[0]),
- util.explain_exit(ret)[0]))
-
-def connect(ui):
- '''make a mail connection. return a function to send mail.
- call as sendmail(sender, list-of-recipients, msg).'''
- if ui.config('email', 'method', 'smtp') == 'smtp':
- return _smtp(ui)
- return lambda s, r, m: _sendmail(ui, s, r, m)
-
-def sendmail(ui, sender, recipients, msg):
- send = connect(ui)
- return send(sender, recipients, msg)
-
-def validateconfig(ui):
- '''determine if we have enough config data to try sending email.'''
- method = ui.config('email', 'method', 'smtp')
- if method == 'smtp':
- if not ui.config('smtp', 'host'):
- raise util.Abort(_('smtp specified as email transport, '
- 'but no smtp host configured'))
- else:
- if not util.find_exe(method):
- raise util.Abort(_('%r specified as email transport, '
- 'but not in PATH') % method)
-
-def mimetextpatch(s, subtype='plain', display=False):
- '''If patch in utf-8 transfer-encode it.'''
-
- enc = None
- for line in s.splitlines():
- if len(line) > 950:
- s = quopri.encodestring(s)
- enc = "quoted-printable"
- break
-
- cs = 'us-ascii'
- if not display:
- try:
- s.decode('us-ascii')
- except UnicodeDecodeError:
- try:
- s.decode('utf-8')
- cs = 'utf-8'
- except UnicodeDecodeError:
- # We'll go with us-ascii as a fallback.
- pass
-
- msg = email.MIMEText.MIMEText(s, subtype, cs)
- if enc:
- del msg['Content-Transfer-Encoding']
- msg['Content-Transfer-Encoding'] = enc
- return msg
-
-def _charsets(ui):
- '''Obtains charsets to send mail parts not containing patches.'''
- charsets = [cs.lower() for cs in ui.configlist('email', 'charsets')]
- fallbacks = [encoding.fallbackencoding.lower(),
- encoding.encoding.lower(), 'utf-8']
- for cs in fallbacks: # find unique charsets while keeping order
- if cs not in charsets:
- charsets.append(cs)
- return [cs for cs in charsets if not cs.endswith('ascii')]
-
-def _encode(ui, s, charsets):
- '''Returns (converted) string, charset tuple.
- Finds out best charset by cycling through sendcharsets in descending
- order. Tries both encoding and fallbackencoding for input. Only as
- last resort send as is in fake ascii.
- Caveat: Do not use for mail parts containing patches!'''
- try:
- s.decode('ascii')
- except UnicodeDecodeError:
- sendcharsets = charsets or _charsets(ui)
- for ics in (encoding.encoding, encoding.fallbackencoding):
- try:
- u = s.decode(ics)
- except UnicodeDecodeError:
- continue
- for ocs in sendcharsets:
- try:
- return u.encode(ocs), ocs
- except UnicodeEncodeError:
- pass
- except LookupError:
- ui.warn(_('ignoring invalid sendcharset: %s\n') % ocs)
- # if ascii, or all conversion attempts fail, send (broken) ascii
- return s, 'us-ascii'
-
-def headencode(ui, s, charsets=None, display=False):
- '''Returns RFC-2047 compliant header from given string.'''
- if not display:
- # split into words?
- s, cs = _encode(ui, s, charsets)
- return str(email.Header.Header(s, cs))
- return s
-
-def addressencode(ui, address, charsets=None, display=False):
- '''Turns address into RFC-2047 compliant header.'''
- if display or not address:
- return address or ''
- name, addr = email.Utils.parseaddr(address)
- name = headencode(ui, name, charsets)
- try:
- acc, dom = addr.split('@')
- acc = acc.encode('ascii')
- dom = dom.encode('idna')
- addr = '%s@%s' % (acc, dom)
- except UnicodeDecodeError:
- raise util.Abort(_('invalid email address: %s') % addr)
- except ValueError:
- try:
- # too strict?
- addr = addr.encode('ascii')
- except UnicodeDecodeError:
- raise util.Abort(_('invalid local address: %s') % addr)
- return email.Utils.formataddr((name, addr))
-
-def mimeencode(ui, s, charsets=None, display=False):
- '''creates mime text object, encodes it if needed, and sets
- charset and transfer-encoding accordingly.'''
- cs = 'us-ascii'
- if not display:
- s, cs = _encode(ui, s, charsets)
- return email.MIMEText.MIMEText(s, 'plain', cs)
diff --git a/sys/lib/python/mercurial/manifest.py b/sys/lib/python/mercurial/manifest.py
deleted file mode 100644
index 7f7558403..000000000
--- a/sys/lib/python/mercurial/manifest.py
+++ /dev/null
@@ -1,201 +0,0 @@
-# manifest.py - manifest revision class for mercurial
-#
-# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-from i18n import _
-import mdiff, parsers, error, revlog
-import array, struct
-
-class manifestdict(dict):
- def __init__(self, mapping=None, flags=None):
- if mapping is None: mapping = {}
- if flags is None: flags = {}
- dict.__init__(self, mapping)
- self._flags = flags
- def flags(self, f):
- return self._flags.get(f, "")
- def set(self, f, flags):
- self._flags[f] = flags
- def copy(self):
- return manifestdict(dict.copy(self), dict.copy(self._flags))
-
-class manifest(revlog.revlog):
- def __init__(self, opener):
- self.mapcache = None
- self.listcache = None
- revlog.revlog.__init__(self, opener, "00manifest.i")
-
- def parse(self, lines):
- mfdict = manifestdict()
- parsers.parse_manifest(mfdict, mfdict._flags, lines)
- return mfdict
-
- def readdelta(self, node):
- r = self.rev(node)
- return self.parse(mdiff.patchtext(self.revdiff(r - 1, r)))
-
- def read(self, node):
- if node == revlog.nullid:
- return manifestdict() # don't upset local cache
- if self.mapcache and self.mapcache[0] == node:
- return self.mapcache[1]
- text = self.revision(node)
- self.listcache = array.array('c', text)
- mapping = self.parse(text)
- self.mapcache = (node, mapping)
- return mapping
-
- def _search(self, m, s, lo=0, hi=None):
- '''return a tuple (start, end) that says where to find s within m.
-
- If the string is found m[start:end] are the line containing
- that string. If start == end the string was not found and
- they indicate the proper sorted insertion point. This was
- taken from bisect_left, and modified to find line start/end as
- it goes along.
-
- m should be a buffer or a string
- s is a string'''
- def advance(i, c):
- while i < lenm and m[i] != c:
- i += 1
- return i
- if not s:
- return (lo, lo)
- lenm = len(m)
- if not hi:
- hi = lenm
- while lo < hi:
- mid = (lo + hi) // 2
- start = mid
- while start > 0 and m[start-1] != '\n':
- start -= 1
- end = advance(start, '\0')
- if m[start:end] < s:
- # we know that after the null there are 40 bytes of sha1
- # this translates to the bisect lo = mid + 1
- lo = advance(end + 40, '\n') + 1
- else:
- # this translates to the bisect hi = mid
- hi = start
- end = advance(lo, '\0')
- found = m[lo:end]
- if cmp(s, found) == 0:
- # we know that after the null there are 40 bytes of sha1
- end = advance(end + 40, '\n')
- return (lo, end+1)
- else:
- return (lo, lo)
-
- def find(self, node, f):
- '''look up entry for a single file efficiently.
- return (node, flags) pair if found, (None, None) if not.'''
- if self.mapcache and node == self.mapcache[0]:
- return self.mapcache[1].get(f), self.mapcache[1].flags(f)
- text = self.revision(node)
- start, end = self._search(text, f)
- if start == end:
- return None, None
- l = text[start:end]
- f, n = l.split('\0')
- return revlog.bin(n[:40]), n[40:-1]
-
- def add(self, map, transaction, link, p1=None, p2=None,
- changed=None):
- # apply the changes collected during the bisect loop to our addlist
- # return a delta suitable for addrevision
- def addlistdelta(addlist, x):
- # start from the bottom up
- # so changes to the offsets don't mess things up.
- i = len(x)
- while i > 0:
- i -= 1
- start = x[i][0]
- end = x[i][1]
- if x[i][2]:
- addlist[start:end] = array.array('c', x[i][2])
- else:
- del addlist[start:end]
- return "".join([struct.pack(">lll", d[0], d[1], len(d[2])) + d[2]
- for d in x ])
-
- def checkforbidden(l):
- for f in l:
- if '\n' in f or '\r' in f:
- raise error.RevlogError(
- _("'\\n' and '\\r' disallowed in filenames: %r") % f)
-
- # if we're using the listcache, make sure it is valid and
- # parented by the same node we're diffing against
- if not (changed and self.listcache and p1 and self.mapcache[0] == p1):
- files = sorted(map)
- checkforbidden(files)
-
- # if this is changed to support newlines in filenames,
- # be sure to check the templates/ dir again (especially *-raw.tmpl)
- hex, flags = revlog.hex, map.flags
- text = ["%s\000%s%s\n" % (f, hex(map[f]), flags(f))
- for f in files]
- self.listcache = array.array('c', "".join(text))
- cachedelta = None
- else:
- addlist = self.listcache
-
- checkforbidden(changed[0])
- # combine the changed lists into one list for sorting
- work = [[x, 0] for x in changed[0]]
- work[len(work):] = [[x, 1] for x in changed[1]]
- work.sort()
-
- delta = []
- dstart = None
- dend = None
- dline = [""]
- start = 0
- # zero copy representation of addlist as a buffer
- addbuf = buffer(addlist)
-
- # start with a readonly loop that finds the offset of
- # each line and creates the deltas
- for w in work:
- f = w[0]
- # bs will either be the index of the item or the insert point
- start, end = self._search(addbuf, f, start)
- if w[1] == 0:
- l = "%s\000%s%s\n" % (f, revlog.hex(map[f]), map.flags(f))
- else:
- l = ""
- if start == end and w[1] == 1:
- # item we want to delete was not found, error out
- raise AssertionError(
- _("failed to remove %s from manifest") % f)
- if dstart != None and dstart <= start and dend >= start:
- if dend < end:
- dend = end
- if l:
- dline.append(l)
- else:
- if dstart != None:
- delta.append([dstart, dend, "".join(dline)])
- dstart = start
- dend = end
- dline = [l]
-
- if dstart != None:
- delta.append([dstart, dend, "".join(dline)])
- # apply the delta to the addlist, and get a delta for addrevision
- cachedelta = addlistdelta(addlist, delta)
-
- # the delta is only valid if we've been processing the tip revision
- if self.mapcache[0] != self.tip():
- cachedelta = None
- self.listcache = addlist
-
- n = self.addrevision(buffer(self.listcache), transaction, link,
- p1, p2, cachedelta)
- self.mapcache = (n, map)
-
- return n
diff --git a/sys/lib/python/mercurial/match.py b/sys/lib/python/mercurial/match.py
deleted file mode 100644
index 34b6fc1e8..000000000
--- a/sys/lib/python/mercurial/match.py
+++ /dev/null
@@ -1,249 +0,0 @@
-# match.py - filename matching
-#
-# Copyright 2008, 2009 Matt Mackall <mpm@selenic.com> and others
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-import re
-import util
-
-class match(object):
- def __init__(self, root, cwd, patterns, include=[], exclude=[],
- default='glob', exact=False):
- """build an object to match a set of file patterns
-
- arguments:
- root - the canonical root of the tree you're matching against
- cwd - the current working directory, if relevant
- patterns - patterns to find
- include - patterns to include
- exclude - patterns to exclude
- default - if a pattern in names has no explicit type, assume this one
- exact - patterns are actually literals
-
- a pattern is one of:
- 'glob:<glob>' - a glob relative to cwd
- 're:<regexp>' - a regular expression
- 'path:<path>' - a path relative to canonroot
- 'relglob:<glob>' - an unrooted glob (*.c matches C files in all dirs)
- 'relpath:<path>' - a path relative to cwd
- 'relre:<regexp>' - a regexp that needn't match the start of a name
- '<something>' - a pattern of the specified default type
- """
-
- self._root = root
- self._cwd = cwd
- self._files = []
- self._anypats = bool(include or exclude)
-
- if include:
- im = _buildmatch(_normalize(include, 'glob', root, cwd), '(?:/|$)')
- if exclude:
- em = _buildmatch(_normalize(exclude, 'glob', root, cwd), '(?:/|$)')
- if exact:
- self._files = patterns
- pm = self.exact
- elif patterns:
- pats = _normalize(patterns, default, root, cwd)
- self._files = _roots(pats)
- self._anypats = self._anypats or _anypats(pats)
- pm = _buildmatch(pats, '$')
-
- if patterns or exact:
- if include:
- if exclude:
- m = lambda f: im(f) and not em(f) and pm(f)
- else:
- m = lambda f: im(f) and pm(f)
- else:
- if exclude:
- m = lambda f: not em(f) and pm(f)
- else:
- m = pm
- else:
- if include:
- if exclude:
- m = lambda f: im(f) and not em(f)
- else:
- m = im
- else:
- if exclude:
- m = lambda f: not em(f)
- else:
- m = lambda f: True
-
- self.matchfn = m
- self._fmap = set(self._files)
-
- def __call__(self, fn):
- return self.matchfn(fn)
- def __iter__(self):
- for f in self._files:
- yield f
- def bad(self, f, msg):
- '''callback for each explicit file that can't be
- found/accessed, with an error message
- '''
- pass
- def dir(self, f):
- pass
- def missing(self, f):
- pass
- def exact(self, f):
- return f in self._fmap
- def rel(self, f):
- return util.pathto(self._root, self._cwd, f)
- def files(self):
- return self._files
- def anypats(self):
- return self._anypats
-
-class exact(match):
- def __init__(self, root, cwd, files):
- match.__init__(self, root, cwd, files, exact = True)
-
-class always(match):
- def __init__(self, root, cwd):
- match.__init__(self, root, cwd, [])
-
-def patkind(pat):
- return _patsplit(pat, None)[0]
-
-def _patsplit(pat, default):
- """Split a string into an optional pattern kind prefix and the
- actual pattern."""
- if ':' in pat:
- kind, val = pat.split(':', 1)
- if kind in ('re', 'glob', 'path', 'relglob', 'relpath', 'relre'):
- return kind, val
- return default, pat
-
-def _globre(pat):
- "convert a glob pattern into a regexp"
- i, n = 0, len(pat)
- res = ''
- group = 0
- escape = re.escape
- def peek(): return i < n and pat[i]
- while i < n:
- c = pat[i]
- i = i+1
- if c not in '*?[{},\\':
- res += escape(c)
- elif c == '*':
- if peek() == '*':
- i += 1
- res += '.*'
- else:
- res += '[^/]*'
- elif c == '?':
- res += '.'
- elif c == '[':
- j = i
- if j < n and pat[j] in '!]':
- j += 1
- while j < n and pat[j] != ']':
- j += 1
- if j >= n:
- res += '\\['
- else:
- stuff = pat[i:j].replace('\\','\\\\')
- i = j + 1
- if stuff[0] == '!':
- stuff = '^' + stuff[1:]
- elif stuff[0] == '^':
- stuff = '\\' + stuff
- res = '%s[%s]' % (res, stuff)
- elif c == '{':
- group += 1
- res += '(?:'
- elif c == '}' and group:
- res += ')'
- group -= 1
- elif c == ',' and group:
- res += '|'
- elif c == '\\':
- p = peek()
- if p:
- i += 1
- res += escape(p)
- else:
- res += escape(c)
- else:
- res += escape(c)
- return res
-
-def _regex(kind, name, tail):
- '''convert a pattern into a regular expression'''
- if not name:
- return ''
- if kind == 're':
- return name
- elif kind == 'path':
- return '^' + re.escape(name) + '(?:/|$)'
- elif kind == 'relglob':
- return '(?:|.*/)' + _globre(name) + tail
- elif kind == 'relpath':
- return re.escape(name) + '(?:/|$)'
- elif kind == 'relre':
- if name.startswith('^'):
- return name
- return '.*' + name
- return _globre(name) + tail
-
-def _buildmatch(pats, tail):
- """build a matching function from a set of patterns"""
- try:
- pat = '(?:%s)' % '|'.join([_regex(k, p, tail) for (k, p) in pats])
- if len(pat) > 20000:
- raise OverflowError()
- return re.compile(pat).match
- except OverflowError:
- # We're using a Python with a tiny regex engine and we
- # made it explode, so we'll divide the pattern list in two
- # until it works
- l = len(pats)
- if l < 2:
- raise
- a, b = _buildmatch(pats[:l//2], tail), _buildmatch(pats[l//2:], tail)
- return lambda s: a(s) or b(s)
- except re.error:
- for k, p in pats:
- try:
- re.compile('(?:%s)' % _regex(k, p, tail))
- except re.error:
- raise util.Abort("invalid pattern (%s): %s" % (k, p))
- raise util.Abort("invalid pattern")
-
-def _normalize(names, default, root, cwd):
- pats = []
- for kind, name in [_patsplit(p, default) for p in names]:
- if kind in ('glob', 'relpath'):
- name = util.canonpath(root, cwd, name)
- elif kind in ('relglob', 'path'):
- name = util.normpath(name)
-
- pats.append((kind, name))
- return pats
-
-def _roots(patterns):
- r = []
- for kind, name in patterns:
- if kind == 'glob': # find the non-glob prefix
- root = []
- for p in name.split('/'):
- if '[' in p or '{' in p or '*' in p or '?' in p:
- break
- root.append(p)
- r.append('/'.join(root) or '.')
- elif kind in ('relpath', 'path'):
- r.append(name or '.')
- elif kind == 'relglob':
- r.append('.')
- return r
-
-def _anypats(patterns):
- for kind, name in patterns:
- if kind in ('glob', 're', 'relglob', 'relre'):
- return True
diff --git a/sys/lib/python/mercurial/mdiff.py b/sys/lib/python/mercurial/mdiff.py
deleted file mode 100644
index 44d4d4560..000000000
--- a/sys/lib/python/mercurial/mdiff.py
+++ /dev/null
@@ -1,269 +0,0 @@
-# mdiff.py - diff and patch routines for mercurial
-#
-# Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-from i18n import _
-import bdiff, mpatch, util
-import re, struct
-
-def splitnewlines(text):
- '''like str.splitlines, but only split on newlines.'''
- lines = [l + '\n' for l in text.split('\n')]
- if lines:
- if lines[-1] == '\n':
- lines.pop()
- else:
- lines[-1] = lines[-1][:-1]
- return lines
-
-class diffopts(object):
- '''context is the number of context lines
- text treats all files as text
- showfunc enables diff -p output
- git enables the git extended patch format
- nodates removes dates from diff headers
- ignorews ignores all whitespace changes in the diff
- ignorewsamount ignores changes in the amount of whitespace
- ignoreblanklines ignores changes whose lines are all blank'''
-
- defaults = {
- 'context': 3,
- 'text': False,
- 'showfunc': False,
- 'git': False,
- 'nodates': False,
- 'ignorews': False,
- 'ignorewsamount': False,
- 'ignoreblanklines': False,
- }
-
- __slots__ = defaults.keys()
-
- def __init__(self, **opts):
- for k in self.__slots__:
- v = opts.get(k)
- if v is None:
- v = self.defaults[k]
- setattr(self, k, v)
-
- try:
- self.context = int(self.context)
- except ValueError:
- raise util.Abort(_('diff context lines count must be '
- 'an integer, not %r') % self.context)
-
-defaultopts = diffopts()
-
-def wsclean(opts, text):
- if opts.ignorews:
- text = re.sub('[ \t]+', '', text)
- elif opts.ignorewsamount:
- text = re.sub('[ \t]+', ' ', text)
- text = re.sub('[ \t]+\n', '\n', text)
- if opts.ignoreblanklines:
- text = re.sub('\n+', '', text)
- return text
-
-def diffline(revs, a, b, opts):
- parts = ['diff']
- if opts.git:
- parts.append('--git')
- if revs and not opts.git:
- parts.append(' '.join(["-r %s" % rev for rev in revs]))
- if opts.git:
- parts.append('a/%s' % a)
- parts.append('b/%s' % b)
- else:
- parts.append(a)
- return ' '.join(parts) + '\n'
-
-def unidiff(a, ad, b, bd, fn1, fn2, r=None, opts=defaultopts):
- def datetag(date, addtab=True):
- if not opts.git and not opts.nodates:
- return '\t%s\n' % date
- if addtab and ' ' in fn1:
- return '\t\n'
- return '\n'
-
- if not a and not b: return ""
- epoch = util.datestr((0, 0))
-
- if not opts.text and (util.binary(a) or util.binary(b)):
- if a and b and len(a) == len(b) and a == b:
- return ""
- l = ['Binary file %s has changed\n' % fn1]
- elif not a:
- b = splitnewlines(b)
- if a is None:
- l1 = '--- /dev/null%s' % datetag(epoch, False)
- else:
- l1 = "--- %s%s" % ("a/" + fn1, datetag(ad))
- l2 = "+++ %s%s" % ("b/" + fn2, datetag(bd))
- l3 = "@@ -0,0 +1,%d @@\n" % len(b)
- l = [l1, l2, l3] + ["+" + e for e in b]
- elif not b:
- a = splitnewlines(a)
- l1 = "--- %s%s" % ("a/" + fn1, datetag(ad))
- if b is None:
- l2 = '+++ /dev/null%s' % datetag(epoch, False)
- else:
- l2 = "+++ %s%s" % ("b/" + fn2, datetag(bd))
- l3 = "@@ -1,%d +0,0 @@\n" % len(a)
- l = [l1, l2, l3] + ["-" + e for e in a]
- else:
- al = splitnewlines(a)
- bl = splitnewlines(b)
- l = list(bunidiff(a, b, al, bl, "a/" + fn1, "b/" + fn2, opts=opts))
- if not l: return ""
- # difflib uses a space, rather than a tab
- l[0] = "%s%s" % (l[0][:-2], datetag(ad))
- l[1] = "%s%s" % (l[1][:-2], datetag(bd))
-
- for ln in xrange(len(l)):
- if l[ln][-1] != '\n':
- l[ln] += "\n\ No newline at end of file\n"
-
- if r:
- l.insert(0, diffline(r, fn1, fn2, opts))
-
- return "".join(l)
-
-# somewhat self contained replacement for difflib.unified_diff
-# t1 and t2 are the text to be diffed
-# l1 and l2 are the text broken up into lines
-# header1 and header2 are the filenames for the diff output
-def bunidiff(t1, t2, l1, l2, header1, header2, opts=defaultopts):
- def contextend(l, len):
- ret = l + opts.context
- if ret > len:
- ret = len
- return ret
-
- def contextstart(l):
- ret = l - opts.context
- if ret < 0:
- return 0
- return ret
-
- def yieldhunk(hunk, header):
- if header:
- for x in header:
- yield x
- (astart, a2, bstart, b2, delta) = hunk
- aend = contextend(a2, len(l1))
- alen = aend - astart
- blen = b2 - bstart + aend - a2
-
- func = ""
- if opts.showfunc:
- # walk backwards from the start of the context
- # to find a line starting with an alphanumeric char.
- for x in xrange(astart - 1, -1, -1):
- t = l1[x].rstrip()
- if funcre.match(t):
- func = ' ' + t[:40]
- break
-
- yield "@@ -%d,%d +%d,%d @@%s\n" % (astart + 1, alen,
- bstart + 1, blen, func)
- for x in delta:
- yield x
- for x in xrange(a2, aend):
- yield ' ' + l1[x]
-
- header = [ "--- %s\t\n" % header1, "+++ %s\t\n" % header2 ]
-
- if opts.showfunc:
- funcre = re.compile('\w')
-
- # bdiff.blocks gives us the matching sequences in the files. The loop
- # below finds the spaces between those matching sequences and translates
- # them into diff output.
- #
- diff = bdiff.blocks(t1, t2)
- hunk = None
- for i, s1 in enumerate(diff):
- # The first match is special.
- # we've either found a match starting at line 0 or a match later
- # in the file. If it starts later, old and new below will both be
- # empty and we'll continue to the next match.
- if i > 0:
- s = diff[i-1]
- else:
- s = [0, 0, 0, 0]
- delta = []
- a1 = s[1]
- a2 = s1[0]
- b1 = s[3]
- b2 = s1[2]
-
- old = l1[a1:a2]
- new = l2[b1:b2]
-
- # bdiff sometimes gives huge matches past eof, this check eats them,
- # and deals with the special first match case described above
- if not old and not new:
- continue
-
- if opts.ignorews or opts.ignorewsamount or opts.ignoreblanklines:
- if wsclean(opts, "".join(old)) == wsclean(opts, "".join(new)):
- continue
-
- astart = contextstart(a1)
- bstart = contextstart(b1)
- prev = None
- if hunk:
- # join with the previous hunk if it falls inside the context
- if astart < hunk[1] + opts.context + 1:
- prev = hunk
- astart = hunk[1]
- bstart = hunk[3]
- else:
- for x in yieldhunk(hunk, header):
- yield x
- # we only want to yield the header if the files differ, and
- # we only want to yield it once.
- header = None
- if prev:
- # we've joined the previous hunk, record the new ending points.
- hunk[1] = a2
- hunk[3] = b2
- delta = hunk[4]
- else:
- # create a new hunk
- hunk = [ astart, a2, bstart, b2, delta ]
-
- delta[len(delta):] = [ ' ' + x for x in l1[astart:a1] ]
- delta[len(delta):] = [ '-' + x for x in old ]
- delta[len(delta):] = [ '+' + x for x in new ]
-
- if hunk:
- for x in yieldhunk(hunk, header):
- yield x
-
-def patchtext(bin):
- pos = 0
- t = []
- while pos < len(bin):
- p1, p2, l = struct.unpack(">lll", bin[pos:pos + 12])
- pos += 12
- t.append(bin[pos:pos + l])
- pos += l
- return "".join(t)
-
-def patch(a, bin):
- return mpatch.patches(a, [bin])
-
-# similar to difflib.SequenceMatcher.get_matching_blocks
-def get_matching_blocks(a, b):
- return [(d[0], d[2], d[1] - d[0]) for d in bdiff.blocks(a, b)]
-
-def trivialdiffheader(length):
- return struct.pack(">lll", 0, 0, length)
-
-patches = mpatch.patches
-patchedsize = mpatch.patchedsize
-textdiff = bdiff.bdiff
diff --git a/sys/lib/python/mercurial/merge.py b/sys/lib/python/mercurial/merge.py
deleted file mode 100644
index fc04d040d..000000000
--- a/sys/lib/python/mercurial/merge.py
+++ /dev/null
@@ -1,481 +0,0 @@
-# merge.py - directory-level update/merge handling for Mercurial
-#
-# Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-from node import nullid, nullrev, hex, bin
-from i18n import _
-import util, filemerge, copies, subrepo
-import errno, os, shutil
-
-class mergestate(object):
- '''track 3-way merge state of individual files'''
- def __init__(self, repo):
- self._repo = repo
- self._read()
- def reset(self, node=None):
- self._state = {}
- if node:
- self._local = node
- shutil.rmtree(self._repo.join("merge"), True)
- def _read(self):
- self._state = {}
- try:
- localnode = None
- f = self._repo.opener("merge/state")
- for i, l in enumerate(f):
- if i == 0:
- localnode = l[:-1]
- else:
- bits = l[:-1].split("\0")
- self._state[bits[0]] = bits[1:]
- self._local = bin(localnode)
- except IOError, err:
- if err.errno != errno.ENOENT:
- raise
- def _write(self):
- f = self._repo.opener("merge/state", "w")
- f.write(hex(self._local) + "\n")
- for d, v in self._state.iteritems():
- f.write("\0".join([d] + v) + "\n")
- def add(self, fcl, fco, fca, fd, flags):
- hash = util.sha1(fcl.path()).hexdigest()
- self._repo.opener("merge/" + hash, "w").write(fcl.data())
- self._state[fd] = ['u', hash, fcl.path(), fca.path(),
- hex(fca.filenode()), fco.path(), flags]
- self._write()
- def __contains__(self, dfile):
- return dfile in self._state
- def __getitem__(self, dfile):
- return self._state[dfile][0]
- def __iter__(self):
- l = self._state.keys()
- l.sort()
- for f in l:
- yield f
- def mark(self, dfile, state):
- self._state[dfile][0] = state
- self._write()
- def resolve(self, dfile, wctx, octx):
- if self[dfile] == 'r':
- return 0
- state, hash, lfile, afile, anode, ofile, flags = self._state[dfile]
- f = self._repo.opener("merge/" + hash)
- self._repo.wwrite(dfile, f.read(), flags)
- fcd = wctx[dfile]
- fco = octx[ofile]
- fca = self._repo.filectx(afile, fileid=anode)
- r = filemerge.filemerge(self._repo, self._local, lfile, fcd, fco, fca)
- if not r:
- self.mark(dfile, 'r')
- return r
-
-def _checkunknown(wctx, mctx):
- "check for collisions between unknown files and files in mctx"
- for f in wctx.unknown():
- if f in mctx and mctx[f].cmp(wctx[f].data()):
- raise util.Abort(_("untracked file in working directory differs"
- " from file in requested revision: '%s'") % f)
-
-def _checkcollision(mctx):
- "check for case folding collisions in the destination context"
- folded = {}
- for fn in mctx:
- fold = fn.lower()
- if fold in folded:
- raise util.Abort(_("case-folding collision between %s and %s")
- % (fn, folded[fold]))
- folded[fold] = fn
-
-def _forgetremoved(wctx, mctx, branchmerge):
- """
- Forget removed files
-
- If we're jumping between revisions (as opposed to merging), and if
- neither the working directory nor the target rev has the file,
- then we need to remove it from the dirstate, to prevent the
- dirstate from listing the file when it is no longer in the
- manifest.
-
- If we're merging, and the other revision has removed a file
- that is not present in the working directory, we need to mark it
- as removed.
- """
-
- action = []
- state = branchmerge and 'r' or 'f'
- for f in wctx.deleted():
- if f not in mctx:
- action.append((f, state))
-
- if not branchmerge:
- for f in wctx.removed():
- if f not in mctx:
- action.append((f, "f"))
-
- return action
-
-def manifestmerge(repo, p1, p2, pa, overwrite, partial):
- """
- Merge p1 and p2 with ancestor ma and generate merge action list
-
- overwrite = whether we clobber working files
- partial = function to filter file lists
- """
-
- def fmerge(f, f2, fa):
- """merge flags"""
- a, m, n = ma.flags(fa), m1.flags(f), m2.flags(f2)
- if m == n: # flags agree
- return m # unchanged
- if m and n and not a: # flags set, don't agree, differ from parent
- r = repo.ui.promptchoice(
- _(" conflicting flags for %s\n"
- "(n)one, e(x)ec or sym(l)ink?") % f,
- (_("&None"), _("E&xec"), _("Sym&link")), 0)
- if r == 1: return "x" # Exec
- if r == 2: return "l" # Symlink
- return ""
- if m and m != a: # changed from a to m
- return m
- if n and n != a: # changed from a to n
- return n
- return '' # flag was cleared
-
- def act(msg, m, f, *args):
- repo.ui.debug(" %s: %s -> %s\n" % (f, msg, m))
- action.append((f, m) + args)
-
- action, copy = [], {}
-
- if overwrite:
- pa = p1
- elif pa == p2: # backwards
- pa = p1.p1()
- elif pa and repo.ui.configbool("merge", "followcopies", True):
- dirs = repo.ui.configbool("merge", "followdirs", True)
- copy, diverge = copies.copies(repo, p1, p2, pa, dirs)
- for of, fl in diverge.iteritems():
- act("divergent renames", "dr", of, fl)
-
- repo.ui.note(_("resolving manifests\n"))
- repo.ui.debug(_(" overwrite %s partial %s\n") % (overwrite, bool(partial)))
- repo.ui.debug(_(" ancestor %s local %s remote %s\n") % (pa, p1, p2))
-
- m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
- copied = set(copy.values())
-
- # Compare manifests
- for f, n in m1.iteritems():
- if partial and not partial(f):
- continue
- if f in m2:
- rflags = fmerge(f, f, f)
- a = ma.get(f, nullid)
- if n == m2[f] or m2[f] == a: # same or local newer
- if m1.flags(f) != rflags:
- act("update permissions", "e", f, rflags)
- elif n == a: # remote newer
- act("remote is newer", "g", f, rflags)
- else: # both changed
- act("versions differ", "m", f, f, f, rflags, False)
- elif f in copied: # files we'll deal with on m2 side
- pass
- elif f in copy:
- f2 = copy[f]
- if f2 not in m2: # directory rename
- act("remote renamed directory to " + f2, "d",
- f, None, f2, m1.flags(f))
- else: # case 2 A,B/B/B or case 4,21 A/B/B
- act("local copied/moved to " + f2, "m",
- f, f2, f, fmerge(f, f2, f2), False)
- elif f in ma: # clean, a different, no remote
- if n != ma[f]:
- if repo.ui.promptchoice(
- _(" local changed %s which remote deleted\n"
- "use (c)hanged version or (d)elete?") % f,
- (_("&Changed"), _("&Delete")), 0):
- act("prompt delete", "r", f)
- else:
- act("prompt keep", "a", f)
- elif n[20:] == "a": # added, no remote
- act("remote deleted", "f", f)
- elif n[20:] != "u":
- act("other deleted", "r", f)
-
- for f, n in m2.iteritems():
- if partial and not partial(f):
- continue
- if f in m1 or f in copied: # files already visited
- continue
- if f in copy:
- f2 = copy[f]
- if f2 not in m1: # directory rename
- act("local renamed directory to " + f2, "d",
- None, f, f2, m2.flags(f))
- elif f2 in m2: # rename case 1, A/A,B/A
- act("remote copied to " + f, "m",
- f2, f, f, fmerge(f2, f, f2), False)
- else: # case 3,20 A/B/A
- act("remote moved to " + f, "m",
- f2, f, f, fmerge(f2, f, f2), True)
- elif f not in ma:
- act("remote created", "g", f, m2.flags(f))
- elif n != ma[f]:
- if repo.ui.promptchoice(
- _("remote changed %s which local deleted\n"
- "use (c)hanged version or leave (d)eleted?") % f,
- (_("&Changed"), _("&Deleted")), 0) == 0:
- act("prompt recreating", "g", f, m2.flags(f))
-
- return action
-
-def actionkey(a):
- return a[1] == 'r' and -1 or 0, a
-
-def applyupdates(repo, action, wctx, mctx):
- "apply the merge action list to the working directory"
-
- updated, merged, removed, unresolved = 0, 0, 0, 0
- ms = mergestate(repo)
- ms.reset(wctx.parents()[0].node())
- moves = []
- action.sort(key=actionkey)
- substate = wctx.substate # prime
-
- # prescan for merges
- for a in action:
- f, m = a[:2]
- if m == 'm': # merge
- f2, fd, flags, move = a[2:]
- if f == '.hgsubstate': # merged internally
- continue
- repo.ui.debug(_("preserving %s for resolve of %s\n") % (f, fd))
- fcl = wctx[f]
- fco = mctx[f2]
- fca = fcl.ancestor(fco) or repo.filectx(f, fileid=nullrev)
- ms.add(fcl, fco, fca, fd, flags)
- if f != fd and move:
- moves.append(f)
-
- # remove renamed files after safely stored
- for f in moves:
- if util.lexists(repo.wjoin(f)):
- repo.ui.debug(_("removing %s\n") % f)
- os.unlink(repo.wjoin(f))
-
- audit_path = util.path_auditor(repo.root)
-
- for a in action:
- f, m = a[:2]
- if f and f[0] == "/":
- continue
- if m == "r": # remove
- repo.ui.note(_("removing %s\n") % f)
- audit_path(f)
- if f == '.hgsubstate': # subrepo states need updating
- subrepo.submerge(repo, wctx, mctx, wctx)
- try:
- util.unlink(repo.wjoin(f))
- except OSError, inst:
- if inst.errno != errno.ENOENT:
- repo.ui.warn(_("update failed to remove %s: %s!\n") %
- (f, inst.strerror))
- removed += 1
- elif m == "m": # merge
- if f == '.hgsubstate': # subrepo states need updating
- subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx))
- continue
- f2, fd, flags, move = a[2:]
- r = ms.resolve(fd, wctx, mctx)
- if r is not None and r > 0:
- unresolved += 1
- else:
- if r is None:
- updated += 1
- else:
- merged += 1
- util.set_flags(repo.wjoin(fd), 'l' in flags, 'x' in flags)
- if f != fd and move and util.lexists(repo.wjoin(f)):
- repo.ui.debug(_("removing %s\n") % f)
- os.unlink(repo.wjoin(f))
- elif m == "g": # get
- flags = a[2]
- repo.ui.note(_("getting %s\n") % f)
- t = mctx.filectx(f).data()
- repo.wwrite(f, t, flags)
- updated += 1
- if f == '.hgsubstate': # subrepo states need updating
- subrepo.submerge(repo, wctx, mctx, wctx)
- elif m == "d": # directory rename
- f2, fd, flags = a[2:]
- if f:
- repo.ui.note(_("moving %s to %s\n") % (f, fd))
- t = wctx.filectx(f).data()
- repo.wwrite(fd, t, flags)
- util.unlink(repo.wjoin(f))
- if f2:
- repo.ui.note(_("getting %s to %s\n") % (f2, fd))
- t = mctx.filectx(f2).data()
- repo.wwrite(fd, t, flags)
- updated += 1
- elif m == "dr": # divergent renames
- fl = a[2]
- repo.ui.warn(_("warning: detected divergent renames of %s to:\n") % f)
- for nf in fl:
- repo.ui.warn(" %s\n" % nf)
- elif m == "e": # exec
- flags = a[2]
- util.set_flags(repo.wjoin(f), 'l' in flags, 'x' in flags)
-
- return updated, merged, removed, unresolved
-
-def recordupdates(repo, action, branchmerge):
- "record merge actions to the dirstate"
-
- for a in action:
- f, m = a[:2]
- if m == "r": # remove
- if branchmerge:
- repo.dirstate.remove(f)
- else:
- repo.dirstate.forget(f)
- elif m == "a": # re-add
- if not branchmerge:
- repo.dirstate.add(f)
- elif m == "f": # forget
- repo.dirstate.forget(f)
- elif m == "e": # exec change
- repo.dirstate.normallookup(f)
- elif m == "g": # get
- if branchmerge:
- repo.dirstate.normaldirty(f)
- else:
- repo.dirstate.normal(f)
- elif m == "m": # merge
- f2, fd, flag, move = a[2:]
- if branchmerge:
- # We've done a branch merge, mark this file as merged
- # so that we properly record the merger later
- repo.dirstate.merge(fd)
- if f != f2: # copy/rename
- if move:
- repo.dirstate.remove(f)
- if f != fd:
- repo.dirstate.copy(f, fd)
- else:
- repo.dirstate.copy(f2, fd)
- else:
- # We've update-merged a locally modified file, so
- # we set the dirstate to emulate a normal checkout
- # of that file some time in the past. Thus our
- # merge will appear as a normal local file
- # modification.
- repo.dirstate.normallookup(fd)
- if move:
- repo.dirstate.forget(f)
- elif m == "d": # directory rename
- f2, fd, flag = a[2:]
- if not f2 and f not in repo.dirstate:
- # untracked file moved
- continue
- if branchmerge:
- repo.dirstate.add(fd)
- if f:
- repo.dirstate.remove(f)
- repo.dirstate.copy(f, fd)
- if f2:
- repo.dirstate.copy(f2, fd)
- else:
- repo.dirstate.normal(fd)
- if f:
- repo.dirstate.forget(f)
-
-def update(repo, node, branchmerge, force, partial):
- """
- Perform a merge between the working directory and the given node
-
- branchmerge = whether to merge between branches
- force = whether to force branch merging or file overwriting
- partial = a function to filter file lists (dirstate not updated)
- """
-
- wlock = repo.wlock()
- try:
- wc = repo[None]
- if node is None:
- # tip of current branch
- try:
- node = repo.branchtags()[wc.branch()]
- except KeyError:
- if wc.branch() == "default": # no default branch!
- node = repo.lookup("tip") # update to tip
- else:
- raise util.Abort(_("branch %s not found") % wc.branch())
- overwrite = force and not branchmerge
- pl = wc.parents()
- p1, p2 = pl[0], repo[node]
- pa = p1.ancestor(p2)
- fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
- fastforward = False
-
- ### check phase
- if not overwrite and len(pl) > 1:
- raise util.Abort(_("outstanding uncommitted merges"))
- if branchmerge:
- if pa == p2:
- raise util.Abort(_("can't merge with ancestor"))
- elif pa == p1:
- if p1.branch() != p2.branch():
- fastforward = True
- else:
- raise util.Abort(_("nothing to merge (use 'hg update'"
- " or check 'hg heads')"))
- if not force and (wc.files() or wc.deleted()):
- raise util.Abort(_("outstanding uncommitted changes "
- "(use 'hg status' to list changes)"))
- elif not overwrite:
- if pa == p1 or pa == p2: # linear
- pass # all good
- elif p1.branch() == p2.branch():
- if wc.files() or wc.deleted():
- raise util.Abort(_("crosses branches (use 'hg merge' or "
- "'hg update -C' to discard changes)"))
- raise util.Abort(_("crosses branches (use 'hg merge' "
- "or 'hg update -C')"))
- elif wc.files() or wc.deleted():
- raise util.Abort(_("crosses named branches (use "
- "'hg update -C' to discard changes)"))
- else:
- # Allow jumping branches if there are no changes
- overwrite = True
-
- ### calculate phase
- action = []
- if not force:
- _checkunknown(wc, p2)
- if not util.checkcase(repo.path):
- _checkcollision(p2)
- action += _forgetremoved(wc, p2, branchmerge)
- action += manifestmerge(repo, wc, p2, pa, overwrite, partial)
-
- ### apply phase
- if not branchmerge: # just jump to the new rev
- fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
- if not partial:
- repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
-
- stats = applyupdates(repo, action, wc, p2)
-
- if not partial:
- recordupdates(repo, action, branchmerge)
- repo.dirstate.setparents(fp1, fp2)
- if not branchmerge and not fastforward:
- repo.dirstate.setbranch(p2.branch())
- repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
-
- return stats
- finally:
- wlock.release()
diff --git a/sys/lib/python/mercurial/minirst.py b/sys/lib/python/mercurial/minirst.py
deleted file mode 100644
index 201c21d99..000000000
--- a/sys/lib/python/mercurial/minirst.py
+++ /dev/null
@@ -1,343 +0,0 @@
-# minirst.py - minimal reStructuredText parser
-#
-# Copyright 2009 Matt Mackall <mpm@selenic.com> and others
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-"""simplified reStructuredText parser.
-
-This parser knows just enough about reStructuredText to parse the
-Mercurial docstrings.
-
-It cheats in a major way: nested blocks are not really nested. They
-are just indented blocks that look like they are nested. This relies
-on the user to keep the right indentation for the blocks.
-
-It only supports a small subset of reStructuredText:
-
-- paragraphs
-
-- definition lists (must use ' ' to indent definitions)
-
-- lists (items must start with '-')
-
-- field lists (colons cannot be escaped)
-
-- literal blocks
-
-- option lists (supports only long options without arguments)
-
-- inline markup is not recognized at all.
-"""
-
-import re, sys, textwrap
-
-
-def findblocks(text):
- """Find continuous blocks of lines in text.
-
- Returns a list of dictionaries representing the blocks. Each block
- has an 'indent' field and a 'lines' field.
- """
- blocks = [[]]
- lines = text.splitlines()
- for line in lines:
- if line.strip():
- blocks[-1].append(line)
- elif blocks[-1]:
- blocks.append([])
- if not blocks[-1]:
- del blocks[-1]
-
- for i, block in enumerate(blocks):
- indent = min((len(l) - len(l.lstrip())) for l in block)
- blocks[i] = dict(indent=indent, lines=[l[indent:] for l in block])
- return blocks
-
-
-def findliteralblocks(blocks):
- """Finds literal blocks and adds a 'type' field to the blocks.
-
- Literal blocks are given the type 'literal', all other blocks are
- given type the 'paragraph'.
- """
- i = 0
- while i < len(blocks):
- # Searching for a block that looks like this:
- #
- # +------------------------------+
- # | paragraph |
- # | (ends with "::") |
- # +------------------------------+
- # +---------------------------+
- # | indented literal block |
- # +---------------------------+
- blocks[i]['type'] = 'paragraph'
- if blocks[i]['lines'][-1].endswith('::') and i+1 < len(blocks):
- indent = blocks[i]['indent']
- adjustment = blocks[i+1]['indent'] - indent
-
- if blocks[i]['lines'] == ['::']:
- # Expanded form: remove block
- del blocks[i]
- i -= 1
- elif blocks[i]['lines'][-1].endswith(' ::'):
- # Partially minimized form: remove space and both
- # colons.
- blocks[i]['lines'][-1] = blocks[i]['lines'][-1][:-3]
- else:
- # Fully minimized form: remove just one colon.
- blocks[i]['lines'][-1] = blocks[i]['lines'][-1][:-1]
-
- # List items are formatted with a hanging indent. We must
- # correct for this here while we still have the original
- # information on the indentation of the subsequent literal
- # blocks available.
- if blocks[i]['lines'][0].startswith('- '):
- indent += 2
- adjustment -= 2
-
- # Mark the following indented blocks.
- while i+1 < len(blocks) and blocks[i+1]['indent'] > indent:
- blocks[i+1]['type'] = 'literal'
- blocks[i+1]['indent'] -= adjustment
- i += 1
- i += 1
- return blocks
-
-
-def findsections(blocks):
- """Finds sections.
-
- The blocks must have a 'type' field, i.e., they should have been
- run through findliteralblocks first.
- """
- for block in blocks:
- # Searching for a block that looks like this:
- #
- # +------------------------------+
- # | Section title |
- # | ------------- |
- # +------------------------------+
- if (block['type'] == 'paragraph' and
- len(block['lines']) == 2 and
- block['lines'][1] == '-' * len(block['lines'][0])):
- block['type'] = 'section'
- return blocks
-
-
-def findbulletlists(blocks):
- """Finds bullet lists.
-
- The blocks must have a 'type' field, i.e., they should have been
- run through findliteralblocks first.
- """
- i = 0
- while i < len(blocks):
- # Searching for a paragraph that looks like this:
- #
- # +------+-----------------------+
- # | "- " | list item |
- # +------| (body elements)+ |
- # +-----------------------+
- if (blocks[i]['type'] == 'paragraph' and
- blocks[i]['lines'][0].startswith('- ')):
- items = []
- for line in blocks[i]['lines']:
- if line.startswith('- '):
- items.append(dict(type='bullet', lines=[],
- indent=blocks[i]['indent']))
- line = line[2:]
- items[-1]['lines'].append(line)
- blocks[i:i+1] = items
- i += len(items) - 1
- i += 1
- return blocks
-
-
-_optionre = re.compile(r'^(--[a-z-]+)((?:[ =][a-zA-Z][\w-]*)? +)(.*)$')
-def findoptionlists(blocks):
- """Finds option lists.
-
- The blocks must have a 'type' field, i.e., they should have been
- run through findliteralblocks first.
- """
- i = 0
- while i < len(blocks):
- # Searching for a paragraph that looks like this:
- #
- # +----------------------------+-------------+
- # | "--" option " " | description |
- # +-------+--------------------+ |
- # | (body elements)+ |
- # +----------------------------------+
- if (blocks[i]['type'] == 'paragraph' and
- _optionre.match(blocks[i]['lines'][0])):
- options = []
- for line in blocks[i]['lines']:
- m = _optionre.match(line)
- if m:
- option, arg, rest = m.groups()
- width = len(option) + len(arg)
- options.append(dict(type='option', lines=[],
- indent=blocks[i]['indent'],
- width=width))
- options[-1]['lines'].append(line)
- blocks[i:i+1] = options
- i += len(options) - 1
- i += 1
- return blocks
-
-
-_fieldre = re.compile(r':(?![: ])([^:]*)(?<! ):( +)(.*)')
-def findfieldlists(blocks):
- """Finds fields lists.
-
- The blocks must have a 'type' field, i.e., they should have been
- run through findliteralblocks first.
- """
- i = 0
- while i < len(blocks):
- # Searching for a paragraph that looks like this:
- #
- #
- # +--------------------+----------------------+
- # | ":" field name ":" | field body |
- # +-------+------------+ |
- # | (body elements)+ |
- # +-----------------------------------+
- if (blocks[i]['type'] == 'paragraph' and
- _fieldre.match(blocks[i]['lines'][0])):
- indent = blocks[i]['indent']
- fields = []
- for line in blocks[i]['lines']:
- m = _fieldre.match(line)
- if m:
- key, spaces, rest = m.groups()
- width = 2 + len(key) + len(spaces)
- fields.append(dict(type='field', lines=[],
- indent=indent, width=width))
- # Turn ":foo: bar" into "foo bar".
- line = '%s %s%s' % (key, spaces, rest)
- fields[-1]['lines'].append(line)
- blocks[i:i+1] = fields
- i += len(fields) - 1
- i += 1
- return blocks
-
-
-def finddefinitionlists(blocks):
- """Finds definition lists.
-
- The blocks must have a 'type' field, i.e., they should have been
- run through findliteralblocks first.
- """
- i = 0
- while i < len(blocks):
- # Searching for a paragraph that looks like this:
- #
- # +----------------------------+
- # | term |
- # +--+-------------------------+--+
- # | definition |
- # | (body elements)+ |
- # +----------------------------+
- if (blocks[i]['type'] == 'paragraph' and
- len(blocks[i]['lines']) > 1 and
- not blocks[i]['lines'][0].startswith(' ') and
- blocks[i]['lines'][1].startswith(' ')):
- definitions = []
- for line in blocks[i]['lines']:
- if not line.startswith(' '):
- definitions.append(dict(type='definition', lines=[],
- indent=blocks[i]['indent']))
- definitions[-1]['lines'].append(line)
- definitions[-1]['hang'] = len(line) - len(line.lstrip())
- blocks[i:i+1] = definitions
- i += len(definitions) - 1
- i += 1
- return blocks
-
-
-def addmargins(blocks):
- """Adds empty blocks for vertical spacing.
-
- This groups bullets, options, and definitions together with no vertical
- space between them, and adds an empty block between all other blocks.
- """
- i = 1
- while i < len(blocks):
- if (blocks[i]['type'] == blocks[i-1]['type'] and
- blocks[i]['type'] in ('bullet', 'option', 'field', 'definition')):
- i += 1
- else:
- blocks.insert(i, dict(lines=[''], indent=0, type='margin'))
- i += 2
- return blocks
-
-
-def formatblock(block, width):
- """Format a block according to width."""
- indent = ' ' * block['indent']
- if block['type'] == 'margin':
- return ''
- elif block['type'] == 'literal':
- indent += ' '
- return indent + ('\n' + indent).join(block['lines'])
- elif block['type'] == 'section':
- return indent + ('\n' + indent).join(block['lines'])
- elif block['type'] == 'definition':
- term = indent + block['lines'][0]
- defindent = indent + block['hang'] * ' '
- text = ' '.join(map(str.strip, block['lines'][1:]))
- return "%s\n%s" % (term, textwrap.fill(text, width=width,
- initial_indent=defindent,
- subsequent_indent=defindent))
- else:
- initindent = subindent = indent
- text = ' '.join(map(str.strip, block['lines']))
- if block['type'] == 'bullet':
- initindent = indent + '- '
- subindent = indent + ' '
- elif block['type'] in ('option', 'field'):
- subindent = indent + block['width'] * ' '
-
- return textwrap.fill(text, width=width,
- initial_indent=initindent,
- subsequent_indent=subindent)
-
-
-def format(text, width):
- """Parse and format the text according to width."""
- blocks = findblocks(text)
- blocks = findliteralblocks(blocks)
- blocks = findsections(blocks)
- blocks = findbulletlists(blocks)
- blocks = findoptionlists(blocks)
- blocks = findfieldlists(blocks)
- blocks = finddefinitionlists(blocks)
- blocks = addmargins(blocks)
- return '\n'.join(formatblock(b, width) for b in blocks)
-
-
-if __name__ == "__main__":
- from pprint import pprint
-
- def debug(func, blocks):
- blocks = func(blocks)
- print "*** after %s:" % func.__name__
- pprint(blocks)
- print
- return blocks
-
- text = open(sys.argv[1]).read()
- blocks = debug(findblocks, text)
- blocks = debug(findliteralblocks, blocks)
- blocks = debug(findsections, blocks)
- blocks = debug(findbulletlists, blocks)
- blocks = debug(findoptionlists, blocks)
- blocks = debug(findfieldlists, blocks)
- blocks = debug(finddefinitionlists, blocks)
- blocks = debug(addmargins, blocks)
- print '\n'.join(formatblock(b, 30) for b in blocks)
diff --git a/sys/lib/python/mercurial/mpatch.c b/sys/lib/python/mercurial/mpatch.c
deleted file mode 100644
index 86400d1a2..000000000
--- a/sys/lib/python/mercurial/mpatch.c
+++ /dev/null
@@ -1,444 +0,0 @@
-/*
- mpatch.c - efficient binary patching for Mercurial
-
- This implements a patch algorithm that's O(m + nlog n) where m is the
- size of the output and n is the number of patches.
-
- Given a list of binary patches, it unpacks each into a hunk list,
- then combines the hunk lists with a treewise recursion to form a
- single hunk list. This hunk list is then applied to the original
- text.
-
- The text (or binary) fragments are copied directly from their source
- Python objects into a preallocated output string to avoid the
- allocation of intermediate Python objects. Working memory is about 2x
- the total number of hunks.
-
- Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
-
- This software may be used and distributed according to the terms
- of the GNU General Public License, incorporated herein by reference.
-*/
-
-#include <Python.h>
-#include <stdlib.h>
-#include <string.h>
-
-/* Definitions to get compatibility with python 2.4 and earlier which
- does not have Py_ssize_t. See also PEP 353.
- Note: msvc (8 or earlier) does not have ssize_t, so we use Py_ssize_t.
-*/
-#if PY_VERSION_HEX < 0x02050000 && !defined(PY_SSIZE_T_MIN)
-typedef int Py_ssize_t;
-#define PY_SSIZE_T_MAX INT_MAX
-#define PY_SSIZE_T_MIN INT_MIN
-#endif
-
-#ifdef _WIN32
-# ifdef _MSC_VER
-/* msvc 6.0 has problems */
-# define inline __inline
-typedef unsigned long uint32_t;
-# else
-# include <stdint.h>
-# endif
-static uint32_t ntohl(uint32_t x)
-{
- return ((x & 0x000000ffUL) << 24) |
- ((x & 0x0000ff00UL) << 8) |
- ((x & 0x00ff0000UL) >> 8) |
- ((x & 0xff000000UL) >> 24);
-}
-#else
-/* not windows */
-# include <sys/types.h>
-# if defined __BEOS__ && !defined __HAIKU__
-# include <ByteOrder.h>
-# else
-# include <arpa/inet.h>
-# endif
-# include <inttypes.h>
-#endif
-
-static char mpatch_doc[] = "Efficient binary patching.";
-static PyObject *mpatch_Error;
-
-struct frag {
- int start, end, len;
- const char *data;
-};
-
-struct flist {
- struct frag *base, *head, *tail;
-};
-
-static struct flist *lalloc(int size)
-{
- struct flist *a = NULL;
-
- if (size < 1)
- size = 1;
-
- a = (struct flist *)malloc(sizeof(struct flist));
- if (a) {
- a->base = (struct frag *)malloc(sizeof(struct frag) * size);
- if (a->base) {
- a->head = a->tail = a->base;
- return a;
- }
- free(a);
- a = NULL;
- }
- if (!PyErr_Occurred())
- PyErr_NoMemory();
- return NULL;
-}
-
-static void lfree(struct flist *a)
-{
- if (a) {
- free(a->base);
- free(a);
- }
-}
-
-static int lsize(struct flist *a)
-{
- return a->tail - a->head;
-}
-
-/* move hunks in source that are less cut to dest, compensating
- for changes in offset. the last hunk may be split if necessary.
-*/
-static int gather(struct flist *dest, struct flist *src, int cut, int offset)
-{
- struct frag *d = dest->tail, *s = src->head;
- int postend, c, l;
-
- while (s != src->tail) {
- if (s->start + offset >= cut)
- break; /* we've gone far enough */
-
- postend = offset + s->start + s->len;
- if (postend <= cut) {
- /* save this hunk */
- offset += s->start + s->len - s->end;
- *d++ = *s++;
- }
- else {
- /* break up this hunk */
- c = cut - offset;
- if (s->end < c)
- c = s->end;
- l = cut - offset - s->start;
- if (s->len < l)
- l = s->len;
-
- offset += s->start + l - c;
-
- d->start = s->start;
- d->end = c;
- d->len = l;
- d->data = s->data;
- d++;
- s->start = c;
- s->len = s->len - l;
- s->data = s->data + l;
-
- break;
- }
- }
-
- dest->tail = d;
- src->head = s;
- return offset;
-}
-
-/* like gather, but with no output list */
-static int discard(struct flist *src, int cut, int offset)
-{
- struct frag *s = src->head;
- int postend, c, l;
-
- while (s != src->tail) {
- if (s->start + offset >= cut)
- break;
-
- postend = offset + s->start + s->len;
- if (postend <= cut) {
- offset += s->start + s->len - s->end;
- s++;
- }
- else {
- c = cut - offset;
- if (s->end < c)
- c = s->end;
- l = cut - offset - s->start;
- if (s->len < l)
- l = s->len;
-
- offset += s->start + l - c;
- s->start = c;
- s->len = s->len - l;
- s->data = s->data + l;
-
- break;
- }
- }
-
- src->head = s;
- return offset;
-}
-
-/* combine hunk lists a and b, while adjusting b for offset changes in a/
- this deletes a and b and returns the resultant list. */
-static struct flist *combine(struct flist *a, struct flist *b)
-{
- struct flist *c = NULL;
- struct frag *bh, *ct;
- int offset = 0, post;
-
- if (a && b)
- c = lalloc((lsize(a) + lsize(b)) * 2);
-
- if (c) {
-
- for (bh = b->head; bh != b->tail; bh++) {
- /* save old hunks */
- offset = gather(c, a, bh->start, offset);
-
- /* discard replaced hunks */
- post = discard(a, bh->end, offset);
-
- /* insert new hunk */
- ct = c->tail;
- ct->start = bh->start - offset;
- ct->end = bh->end - post;
- ct->len = bh->len;
- ct->data = bh->data;
- c->tail++;
- offset = post;
- }
-
- /* hold on to tail from a */
- memcpy(c->tail, a->head, sizeof(struct frag) * lsize(a));
- c->tail += lsize(a);
- }
-
- lfree(a);
- lfree(b);
- return c;
-}
-
-/* decode a binary patch into a hunk list */
-static struct flist *decode(const char *bin, int len)
-{
- struct flist *l;
- struct frag *lt;
- const char *data = bin + 12, *end = bin + len;
- char decode[12]; /* for dealing with alignment issues */
-
- /* assume worst case size, we won't have many of these lists */
- l = lalloc(len / 12 + 1);
- if (!l)
- return NULL;
-
- lt = l->tail;
-
- while (data <= end) {
- memcpy(decode, bin, 12);
- lt->start = ntohl(*(uint32_t *)decode);
- lt->end = ntohl(*(uint32_t *)(decode + 4));
- lt->len = ntohl(*(uint32_t *)(decode + 8));
- if (lt->start > lt->end || lt->len < 0)
- break; /* sanity check */
- bin = data + lt->len;
- if (bin < data)
- break; /* big data + big (bogus) len can wrap around */
- lt->data = data;
- data = bin + 12;
- lt++;
- }
-
- if (bin != end) {
- if (!PyErr_Occurred())
- PyErr_SetString(mpatch_Error, "patch cannot be decoded");
- lfree(l);
- return NULL;
- }
-
- l->tail = lt;
- return l;
-}
-
-/* calculate the size of resultant text */
-static int calcsize(int len, struct flist *l)
-{
- int outlen = 0, last = 0;
- struct frag *f = l->head;
-
- while (f != l->tail) {
- if (f->start < last || f->end > len) {
- if (!PyErr_Occurred())
- PyErr_SetString(mpatch_Error,
- "invalid patch");
- return -1;
- }
- outlen += f->start - last;
- last = f->end;
- outlen += f->len;
- f++;
- }
-
- outlen += len - last;
- return outlen;
-}
-
-static int apply(char *buf, const char *orig, int len, struct flist *l)
-{
- struct frag *f = l->head;
- int last = 0;
- char *p = buf;
-
- while (f != l->tail) {
- if (f->start < last || f->end > len) {
- if (!PyErr_Occurred())
- PyErr_SetString(mpatch_Error,
- "invalid patch");
- return 0;
- }
- memcpy(p, orig + last, f->start - last);
- p += f->start - last;
- memcpy(p, f->data, f->len);
- last = f->end;
- p += f->len;
- f++;
- }
- memcpy(p, orig + last, len - last);
- return 1;
-}
-
-/* recursively generate a patch of all bins between start and end */
-static struct flist *fold(PyObject *bins, int start, int end)
-{
- int len;
- Py_ssize_t blen;
- const char *buffer;
-
- if (start + 1 == end) {
- /* trivial case, output a decoded list */
- PyObject *tmp = PyList_GetItem(bins, start);
- if (!tmp)
- return NULL;
- if (PyObject_AsCharBuffer(tmp, &buffer, &blen))
- return NULL;
- return decode(buffer, blen);
- }
-
- /* divide and conquer, memory management is elsewhere */
- len = (end - start) / 2;
- return combine(fold(bins, start, start + len),
- fold(bins, start + len, end));
-}
-
-static PyObject *
-patches(PyObject *self, PyObject *args)
-{
- PyObject *text, *bins, *result;
- struct flist *patch;
- const char *in;
- char *out;
- int len, outlen;
- Py_ssize_t inlen;
-
- if (!PyArg_ParseTuple(args, "OO:mpatch", &text, &bins))
- return NULL;
-
- len = PyList_Size(bins);
- if (!len) {
- /* nothing to do */
- Py_INCREF(text);
- return text;
- }
-
- if (PyObject_AsCharBuffer(text, &in, &inlen))
- return NULL;
-
- patch = fold(bins, 0, len);
- if (!patch)
- return NULL;
-
- outlen = calcsize(inlen, patch);
- if (outlen < 0) {
- result = NULL;
- goto cleanup;
- }
- result = PyString_FromStringAndSize(NULL, outlen);
- if (!result) {
- result = NULL;
- goto cleanup;
- }
- out = PyString_AsString(result);
- if (!apply(out, in, inlen, patch)) {
- Py_DECREF(result);
- result = NULL;
- }
-cleanup:
- lfree(patch);
- return result;
-}
-
-/* calculate size of a patched file directly */
-static PyObject *
-patchedsize(PyObject *self, PyObject *args)
-{
- long orig, start, end, len, outlen = 0, last = 0;
- int patchlen;
- char *bin, *binend, *data;
- char decode[12]; /* for dealing with alignment issues */
-
- if (!PyArg_ParseTuple(args, "ls#", &orig, &bin, &patchlen))
- return NULL;
-
- binend = bin + patchlen;
- data = bin + 12;
-
- while (data <= binend) {
- memcpy(decode, bin, 12);
- start = ntohl(*(uint32_t *)decode);
- end = ntohl(*(uint32_t *)(decode + 4));
- len = ntohl(*(uint32_t *)(decode + 8));
- if (start > end)
- break; /* sanity check */
- bin = data + len;
- if (bin < data)
- break; /* big data + big (bogus) len can wrap around */
- data = bin + 12;
- outlen += start - last;
- last = end;
- outlen += len;
- }
-
- if (bin != binend) {
- if (!PyErr_Occurred())
- PyErr_SetString(mpatch_Error, "patch cannot be decoded");
- return NULL;
- }
-
- outlen += orig - last;
- return Py_BuildValue("l", outlen);
-}
-
-static PyMethodDef methods[] = {
- {"patches", patches, METH_VARARGS, "apply a series of patches\n"},
- {"patchedsize", patchedsize, METH_VARARGS, "calculed patched size\n"},
- {NULL, NULL}
-};
-
-PyMODINIT_FUNC
-initmpatch(void)
-{
- Py_InitModule3("mpatch", methods, mpatch_doc);
- mpatch_Error = PyErr_NewException("mpatch.mpatchError", NULL, NULL);
-}
-
diff --git a/sys/lib/python/mercurial/node.py b/sys/lib/python/mercurial/node.py
deleted file mode 100644
index 2a3be39c1..000000000
--- a/sys/lib/python/mercurial/node.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# node.py - basic nodeid manipulation for mercurial
-#
-# Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-import binascii
-
-nullrev = -1
-nullid = "\0" * 20
-
-# This ugly style has a noticeable effect in manifest parsing
-hex = binascii.hexlify
-bin = binascii.unhexlify
-
-def short(node):
- return hex(node[:6])
diff --git a/sys/lib/python/mercurial/osutil.c b/sys/lib/python/mercurial/osutil.c
deleted file mode 100644
index a9874d0c9..000000000
--- a/sys/lib/python/mercurial/osutil.c
+++ /dev/null
@@ -1,534 +0,0 @@
-/*
- osutil.c - native operating system services
-
- Copyright 2007 Matt Mackall and others
-
- This software may be used and distributed according to the terms of
- the GNU General Public License, incorporated herein by reference.
-*/
-
-#define _ATFILE_SOURCE
-#include <Python.h>
-#include <fcntl.h>
-#include <stdio.h>
-#include <string.h>
-
-#ifdef _WIN32
-# include <windows.h>
-# include <io.h>
-#else
-# include <dirent.h>
-# include <sys/stat.h>
-# include <sys/types.h>
-# include <unistd.h>
-#endif
-
-// some platforms lack the PATH_MAX definition (eg. GNU/Hurd)
-#ifndef PATH_MAX
-#define PATH_MAX 4096
-#endif
-
-#ifdef _WIN32
-/*
-stat struct compatible with hg expectations
-Mercurial only uses st_mode, st_size and st_mtime
-the rest is kept to minimize changes between implementations
-*/
-struct hg_stat {
- int st_dev;
- int st_mode;
- int st_nlink;
- __int64 st_size;
- int st_mtime;
- int st_ctime;
-};
-struct listdir_stat {
- PyObject_HEAD
- struct hg_stat st;
-};
-#else
-struct listdir_stat {
- PyObject_HEAD
- struct stat st;
-};
-#endif
-
-#define listdir_slot(name) \
- static PyObject *listdir_stat_##name(PyObject *self, void *x) \
- { \
- return PyInt_FromLong(((struct listdir_stat *)self)->st.name); \
- }
-
-listdir_slot(st_dev)
-listdir_slot(st_mode)
-listdir_slot(st_nlink)
-#ifdef _WIN32
-static PyObject *listdir_stat_st_size(PyObject *self, void *x)
-{
- return PyLong_FromLongLong(
- (PY_LONG_LONG)((struct listdir_stat *)self)->st.st_size);
-}
-#else
-listdir_slot(st_size)
-#endif
-listdir_slot(st_mtime)
-listdir_slot(st_ctime)
-
-static struct PyGetSetDef listdir_stat_getsets[] = {
- {"st_dev", listdir_stat_st_dev, 0, 0, 0},
- {"st_mode", listdir_stat_st_mode, 0, 0, 0},
- {"st_nlink", listdir_stat_st_nlink, 0, 0, 0},
- {"st_size", listdir_stat_st_size, 0, 0, 0},
- {"st_mtime", listdir_stat_st_mtime, 0, 0, 0},
- {"st_ctime", listdir_stat_st_ctime, 0, 0, 0},
- {0, 0, 0, 0, 0}
-};
-
-static PyObject *listdir_stat_new(PyTypeObject *t, PyObject *a, PyObject *k)
-{
- return t->tp_alloc(t, 0);
-}
-
-static void listdir_stat_dealloc(PyObject *o)
-{
- o->ob_type->tp_free(o);
-}
-
-static PyTypeObject listdir_stat_type = {
- PyObject_HEAD_INIT(NULL)
- 0, /*ob_size*/
- "osutil.stat", /*tp_name*/
- sizeof(struct listdir_stat), /*tp_basicsize*/
- 0, /*tp_itemsize*/
- (destructor)listdir_stat_dealloc, /*tp_dealloc*/
- 0, /*tp_print*/
- 0, /*tp_getattr*/
- 0, /*tp_setattr*/
- 0, /*tp_compare*/
- 0, /*tp_repr*/
- 0, /*tp_as_number*/
- 0, /*tp_as_sequence*/
- 0, /*tp_as_mapping*/
- 0, /*tp_hash */
- 0, /*tp_call*/
- 0, /*tp_str*/
- 0, /*tp_getattro*/
- 0, /*tp_setattro*/
- 0, /*tp_as_buffer*/
- Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /*tp_flags*/
- "stat objects", /* tp_doc */
- 0, /* tp_traverse */
- 0, /* tp_clear */
- 0, /* tp_richcompare */
- 0, /* tp_weaklistoffset */
- 0, /* tp_iter */
- 0, /* tp_iternext */
- 0, /* tp_methods */
- 0, /* tp_members */
- listdir_stat_getsets, /* tp_getset */
- 0, /* tp_base */
- 0, /* tp_dict */
- 0, /* tp_descr_get */
- 0, /* tp_descr_set */
- 0, /* tp_dictoffset */
- 0, /* tp_init */
- 0, /* tp_alloc */
- listdir_stat_new, /* tp_new */
-};
-
-#ifdef _WIN32
-
-static int to_python_time(const FILETIME *tm)
-{
- /* number of seconds between epoch and January 1 1601 */
- const __int64 a0 = (__int64)134774L * (__int64)24L * (__int64)3600L;
- /* conversion factor from 100ns to 1s */
- const __int64 a1 = 10000000;
- /* explicit (int) cast to suspend compiler warnings */
- return (int)((((__int64)tm->dwHighDateTime << 32)
- + tm->dwLowDateTime) / a1 - a0);
-}
-
-static PyObject *make_item(const WIN32_FIND_DATAA *fd, int wantstat)
-{
- PyObject *py_st;
- struct hg_stat *stp;
-
- int kind = (fd->dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY)
- ? _S_IFDIR : _S_IFREG;
-
- if (!wantstat)
- return Py_BuildValue("si", fd->cFileName, kind);
-
- py_st = PyObject_CallObject((PyObject *)&listdir_stat_type, NULL);
- if (!py_st)
- return NULL;
-
- stp = &((struct listdir_stat *)py_st)->st;
- /*
- use kind as st_mode
- rwx bits on Win32 are meaningless
- and Hg does not use them anyway
- */
- stp->st_mode = kind;
- stp->st_mtime = to_python_time(&fd->ftLastWriteTime);
- stp->st_ctime = to_python_time(&fd->ftCreationTime);
- if (kind == _S_IFREG)
- stp->st_size = ((__int64)fd->nFileSizeHigh << 32)
- + fd->nFileSizeLow;
- return Py_BuildValue("siN", fd->cFileName,
- kind, py_st);
-}
-
-static PyObject *_listdir(char *path, int plen, int wantstat, char *skip)
-{
- PyObject *rval = NULL; /* initialize - return value */
- PyObject *list;
- HANDLE fh;
- WIN32_FIND_DATAA fd;
- char *pattern;
-
- /* build the path + \* pattern string */
- pattern = malloc(plen+3); /* path + \* + \0 */
- if (!pattern) {
- PyErr_NoMemory();
- goto error_nomem;
- }
- strcpy(pattern, path);
-
- if (plen > 0) {
- char c = path[plen-1];
- if (c != ':' && c != '/' && c != '\\')
- pattern[plen++] = '\\';
- }
- strcpy(pattern + plen, "*");
-
- fh = FindFirstFileA(pattern, &fd);
- if (fh == INVALID_HANDLE_VALUE) {
- PyErr_SetFromWindowsErrWithFilename(GetLastError(), path);
- goto error_file;
- }
-
- list = PyList_New(0);
- if (!list)
- goto error_list;
-
- do {
- PyObject *item;
-
- if (fd.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) {
- if (!strcmp(fd.cFileName, ".")
- || !strcmp(fd.cFileName, ".."))
- continue;
-
- if (skip && !strcmp(fd.cFileName, skip)) {
- rval = PyList_New(0);
- goto error;
- }
- }
-
- item = make_item(&fd, wantstat);
- if (!item)
- goto error;
-
- if (PyList_Append(list, item)) {
- Py_XDECREF(item);
- goto error;
- }
-
- Py_XDECREF(item);
- } while (FindNextFileA(fh, &fd));
-
- if (GetLastError() != ERROR_NO_MORE_FILES) {
- PyErr_SetFromWindowsErrWithFilename(GetLastError(), path);
- goto error;
- }
-
- rval = list;
- Py_XINCREF(rval);
-error:
- Py_XDECREF(list);
-error_list:
- FindClose(fh);
-error_file:
- free(pattern);
-error_nomem:
- return rval;
-}
-
-#else
-
-int entkind(struct dirent *ent)
-{
-#ifdef DT_REG
- switch (ent->d_type) {
- case DT_REG: return S_IFREG;
- case DT_DIR: return S_IFDIR;
- case DT_LNK: return S_IFLNK;
- case DT_BLK: return S_IFBLK;
- case DT_CHR: return S_IFCHR;
- case DT_FIFO: return S_IFIFO;
- case DT_SOCK: return S_IFSOCK;
- }
-#endif
- return -1;
-}
-
-static PyObject *_listdir(char *path, int pathlen, int keepstat, char *skip)
-{
- PyObject *list, *elem, *stat, *ret = NULL;
- char fullpath[PATH_MAX + 10];
- int kind, err;
- struct stat st;
- struct dirent *ent;
- DIR *dir;
-#ifdef AT_SYMLINK_NOFOLLOW
- int dfd = -1;
-#endif
-
- if (pathlen >= PATH_MAX) {
- PyErr_SetString(PyExc_ValueError, "path too long");
- goto error_value;
- }
- strncpy(fullpath, path, PATH_MAX);
- fullpath[pathlen] = '/';
-
-#ifdef AT_SYMLINK_NOFOLLOW
- dfd = open(path, O_RDONLY);
- if (dfd == -1) {
- PyErr_SetFromErrnoWithFilename(PyExc_OSError, path);
- goto error_value;
- }
- dir = fdopendir(dfd);
-#else
- dir = opendir(path);
-#endif
- if (!dir) {
- PyErr_SetFromErrnoWithFilename(PyExc_OSError, path);
- goto error_dir;
- }
-
- list = PyList_New(0);
- if (!list)
- goto error_list;
-
- while ((ent = readdir(dir))) {
- if (!strcmp(ent->d_name, ".") || !strcmp(ent->d_name, ".."))
- continue;
-
- kind = entkind(ent);
- if (kind == -1 || keepstat) {
-#ifdef AT_SYMLINK_NOFOLLOW
- err = fstatat(dfd, ent->d_name, &st,
- AT_SYMLINK_NOFOLLOW);
-#else
- strncpy(fullpath + pathlen + 1, ent->d_name,
- PATH_MAX - pathlen);
- fullpath[PATH_MAX] = 0;
- err = lstat(fullpath, &st);
-#endif
- if (err == -1) {
- strncpy(fullpath + pathlen + 1, ent->d_name,
- PATH_MAX - pathlen);
- fullpath[PATH_MAX] = 0;
- PyErr_SetFromErrnoWithFilename(PyExc_OSError,
- fullpath);
- goto error;
- }
- kind = st.st_mode & S_IFMT;
- }
-
- /* quit early? */
- if (skip && kind == S_IFDIR && !strcmp(ent->d_name, skip)) {
- ret = PyList_New(0);
- goto error;
- }
-
- if (keepstat) {
- stat = PyObject_CallObject((PyObject *)&listdir_stat_type, NULL);
- if (!stat)
- goto error;
- memcpy(&((struct listdir_stat *)stat)->st, &st, sizeof(st));
- elem = Py_BuildValue("siN", ent->d_name, kind, stat);
- } else
- elem = Py_BuildValue("si", ent->d_name, kind);
- if (!elem)
- goto error;
-
- PyList_Append(list, elem);
- Py_DECREF(elem);
- }
-
- ret = list;
- Py_INCREF(ret);
-
-error:
- Py_DECREF(list);
-error_list:
- closedir(dir);
-error_dir:
-#ifdef AT_SYMLINK_NOFOLLOW
- close(dfd);
-#endif
-error_value:
- return ret;
-}
-
-#endif /* ndef _WIN32 */
-
-static PyObject *listdir(PyObject *self, PyObject *args, PyObject *kwargs)
-{
- PyObject *statobj = NULL; /* initialize - optional arg */
- PyObject *skipobj = NULL; /* initialize - optional arg */
- char *path, *skip = NULL;
- int wantstat, plen;
-
- static char *kwlist[] = {"path", "stat", "skip", NULL};
-
- if (!PyArg_ParseTupleAndKeywords(args, kwargs, "s#|OO:listdir",
- kwlist, &path, &plen, &statobj, &skipobj))
- return NULL;
-
- wantstat = statobj && PyObject_IsTrue(statobj);
-
- if (skipobj && skipobj != Py_None) {
- skip = PyString_AsString(skipobj);
- if (!skip)
- return NULL;
- }
-
- return _listdir(path, plen, wantstat, skip);
-}
-
-#ifdef _WIN32
-static PyObject *posixfile(PyObject *self, PyObject *args, PyObject *kwds)
-{
- static char *kwlist[] = {"name", "mode", "buffering", NULL};
- PyObject *file_obj = NULL;
- char *name = NULL;
- char *mode = "rb";
- DWORD access = 0;
- DWORD creation;
- HANDLE handle;
- int fd, flags = 0;
- int bufsize = -1;
- char m0, m1, m2;
- char fpmode[4];
- int fppos = 0;
- int plus;
- FILE *fp;
-
- if (!PyArg_ParseTupleAndKeywords(args, kwds, "et|si:posixfile", kwlist,
- Py_FileSystemDefaultEncoding,
- &name, &mode, &bufsize))
- return NULL;
-
- m0 = mode[0];
- m1 = m0 ? mode[1] : '\0';
- m2 = m1 ? mode[2] : '\0';
- plus = m1 == '+' || m2 == '+';
-
- fpmode[fppos++] = m0;
- if (m1 == 'b' || m2 == 'b') {
- flags = _O_BINARY;
- fpmode[fppos++] = 'b';
- }
- else
- flags = _O_TEXT;
- if (plus) {
- flags |= _O_RDWR;
- access = GENERIC_READ | GENERIC_WRITE;
- fpmode[fppos++] = '+';
- }
- fpmode[fppos++] = '\0';
-
- switch (m0) {
- case 'r':
- creation = OPEN_EXISTING;
- if (!plus) {
- flags |= _O_RDONLY;
- access = GENERIC_READ;
- }
- break;
- case 'w':
- creation = CREATE_ALWAYS;
- if (!plus) {
- access = GENERIC_WRITE;
- flags |= _O_WRONLY;
- }
- break;
- case 'a':
- creation = OPEN_ALWAYS;
- flags |= _O_APPEND;
- if (!plus) {
- flags |= _O_WRONLY;
- access = GENERIC_WRITE;
- }
- break;
- default:
- PyErr_Format(PyExc_ValueError,
- "mode string must begin with one of 'r', 'w', "
- "or 'a', not '%c'", m0);
- goto bail;
- }
-
- handle = CreateFile(name, access,
- FILE_SHARE_READ | FILE_SHARE_WRITE |
- FILE_SHARE_DELETE,
- NULL,
- creation,
- FILE_ATTRIBUTE_NORMAL,
- 0);
-
- if (handle == INVALID_HANDLE_VALUE) {
- PyErr_SetFromWindowsErrWithFilename(GetLastError(), name);
- goto bail;
- }
-
- fd = _open_osfhandle((intptr_t) handle, flags);
- if (fd == -1) {
- CloseHandle(handle);
- PyErr_SetFromErrnoWithFilename(PyExc_IOError, name);
- goto bail;
- }
-
- fp = _fdopen(fd, fpmode);
- if (fp == NULL) {
- _close(fd);
- PyErr_SetFromErrnoWithFilename(PyExc_IOError, name);
- goto bail;
- }
-
- file_obj = PyFile_FromFile(fp, name, mode, fclose);
- if (file_obj == NULL) {
- fclose(fp);
- goto bail;
- }
-
- PyFile_SetBufSize(file_obj, bufsize);
-bail:
- PyMem_Free(name);
- return file_obj;
-}
-#endif
-
-static char osutil_doc[] = "Native operating system services.";
-
-static PyMethodDef methods[] = {
- {"listdir", (PyCFunction)listdir, METH_VARARGS | METH_KEYWORDS,
- "list a directory\n"},
-#ifdef _WIN32
- {"posixfile", (PyCFunction)posixfile, METH_VARARGS | METH_KEYWORDS,
- "Open a file with POSIX-like semantics.\n"
-"On error, this function may raise either a WindowsError or an IOError."},
-#endif
- {NULL, NULL}
-};
-
-PyMODINIT_FUNC initosutil(void)
-{
- if (PyType_Ready(&listdir_stat_type) == -1)
- return;
-
- Py_InitModule3("osutil", methods, osutil_doc);
-}
diff --git a/sys/lib/python/mercurial/parsers.c b/sys/lib/python/mercurial/parsers.c
deleted file mode 100644
index 93c10c05d..000000000
--- a/sys/lib/python/mercurial/parsers.c
+++ /dev/null
@@ -1,435 +0,0 @@
-/*
- parsers.c - efficient content parsing
-
- Copyright 2008 Matt Mackall <mpm@selenic.com> and others
-
- This software may be used and distributed according to the terms of
- the GNU General Public License, incorporated herein by reference.
-*/
-
-#include <Python.h>
-#include <ctype.h>
-#include <string.h>
-
-static int hexdigit(char c)
-{
- if (c >= '0' && c <= '9')
- return c - '0';
- if (c >= 'a' && c <= 'f')
- return c - 'a' + 10;
- if (c >= 'A' && c <= 'F')
- return c - 'A' + 10;
-
- PyErr_SetString(PyExc_ValueError, "input contains non-hex character");
- return 0;
-}
-
-/*
- * Turn a hex-encoded string into binary.
- */
-static PyObject *unhexlify(const char *str, int len)
-{
- PyObject *ret;
- const char *c;
- char *d;
-
- ret = PyString_FromStringAndSize(NULL, len / 2);
- if (!ret)
- return NULL;
-
- d = PyString_AS_STRING(ret);
- for (c = str; c < str + len;) {
- int hi = hexdigit(*c++);
- int lo = hexdigit(*c++);
- *d++ = (hi << 4) | lo;
- }
-
- return ret;
-}
-
-/*
- * This code assumes that a manifest is stitched together with newline
- * ('\n') characters.
- */
-static PyObject *parse_manifest(PyObject *self, PyObject *args)
-{
- PyObject *mfdict, *fdict;
- char *str, *cur, *start, *zero;
- int len;
-
- if (!PyArg_ParseTuple(args, "O!O!s#:parse_manifest",
- &PyDict_Type, &mfdict,
- &PyDict_Type, &fdict,
- &str, &len))
- goto quit;
-
- for (start = cur = str, zero = NULL; cur < str + len; cur++) {
- PyObject *file = NULL, *node = NULL;
- PyObject *flags = NULL;
- int nlen;
-
- if (!*cur) {
- zero = cur;
- continue;
- }
- else if (*cur != '\n')
- continue;
-
- if (!zero) {
- PyErr_SetString(PyExc_ValueError,
- "manifest entry has no separator");
- goto quit;
- }
-
- file = PyString_FromStringAndSize(start, zero - start);
- if (!file)
- goto bail;
-
- nlen = cur - zero - 1;
-
- node = unhexlify(zero + 1, nlen > 40 ? 40 : nlen);
- if (!node)
- goto bail;
-
- if (nlen > 40) {
- PyObject *flags;
-
- flags = PyString_FromStringAndSize(zero + 41,
- nlen - 40);
- if (!flags)
- goto bail;
-
- if (PyDict_SetItem(fdict, file, flags) == -1)
- goto bail;
- }
-
- if (PyDict_SetItem(mfdict, file, node) == -1)
- goto bail;
-
- start = cur + 1;
- zero = NULL;
-
- Py_XDECREF(flags);
- Py_XDECREF(node);
- Py_XDECREF(file);
- continue;
- bail:
- Py_XDECREF(flags);
- Py_XDECREF(node);
- Py_XDECREF(file);
- goto quit;
- }
-
- if (len > 0 && *(cur - 1) != '\n') {
- PyErr_SetString(PyExc_ValueError,
- "manifest contains trailing garbage");
- goto quit;
- }
-
- Py_INCREF(Py_None);
- return Py_None;
-quit:
- return NULL;
-}
-
-#ifdef _WIN32
-# ifdef _MSC_VER
-/* msvc 6.0 has problems */
-# define inline __inline
-typedef unsigned long uint32_t;
-typedef unsigned __int64 uint64_t;
-# else
-# include <stdint.h>
-# endif
-static uint32_t ntohl(uint32_t x)
-{
- return ((x & 0x000000ffUL) << 24) |
- ((x & 0x0000ff00UL) << 8) |
- ((x & 0x00ff0000UL) >> 8) |
- ((x & 0xff000000UL) >> 24);
-}
-#else
-/* not windows */
-# include <sys/types.h>
-# if defined __BEOS__ && !defined __HAIKU__
-# include <ByteOrder.h>
-# else
-# include <arpa/inet.h>
-# endif
-# include <inttypes.h>
-#endif
-
-static PyObject *parse_dirstate(PyObject *self, PyObject *args)
-{
- PyObject *dmap, *cmap, *parents = NULL, *ret = NULL;
- PyObject *fname = NULL, *cname = NULL, *entry = NULL;
- char *str, *cur, *end, *cpos;
- int state, mode, size, mtime;
- unsigned int flen;
- int len;
- char decode[16]; /* for alignment */
-
- if (!PyArg_ParseTuple(args, "O!O!s#:parse_dirstate",
- &PyDict_Type, &dmap,
- &PyDict_Type, &cmap,
- &str, &len))
- goto quit;
-
- /* read parents */
- if (len < 40)
- goto quit;
-
- parents = Py_BuildValue("s#s#", str, 20, str + 20, 20);
- if (!parents)
- goto quit;
-
- /* read filenames */
- cur = str + 40;
- end = str + len;
-
- while (cur < end - 17) {
- /* unpack header */
- state = *cur;
- memcpy(decode, cur + 1, 16);
- mode = ntohl(*(uint32_t *)(decode));
- size = ntohl(*(uint32_t *)(decode + 4));
- mtime = ntohl(*(uint32_t *)(decode + 8));
- flen = ntohl(*(uint32_t *)(decode + 12));
- cur += 17;
- if (flen > end - cur) {
- PyErr_SetString(PyExc_ValueError, "overflow in dirstate");
- goto quit;
- }
-
- entry = Py_BuildValue("ciii", state, mode, size, mtime);
- if (!entry)
- goto quit;
- PyObject_GC_UnTrack(entry); /* don't waste time with this */
-
- cpos = memchr(cur, 0, flen);
- if (cpos) {
- fname = PyString_FromStringAndSize(cur, cpos - cur);
- cname = PyString_FromStringAndSize(cpos + 1,
- flen - (cpos - cur) - 1);
- if (!fname || !cname ||
- PyDict_SetItem(cmap, fname, cname) == -1 ||
- PyDict_SetItem(dmap, fname, entry) == -1)
- goto quit;
- Py_DECREF(cname);
- } else {
- fname = PyString_FromStringAndSize(cur, flen);
- if (!fname ||
- PyDict_SetItem(dmap, fname, entry) == -1)
- goto quit;
- }
- cur += flen;
- Py_DECREF(fname);
- Py_DECREF(entry);
- fname = cname = entry = NULL;
- }
-
- ret = parents;
- Py_INCREF(ret);
-quit:
- Py_XDECREF(fname);
- Py_XDECREF(cname);
- Py_XDECREF(entry);
- Py_XDECREF(parents);
- return ret;
-}
-
-const char nullid[20];
-const int nullrev = -1;
-
-/* create an index tuple, insert into the nodemap */
-static PyObject * _build_idx_entry(PyObject *nodemap, int n, uint64_t offset_flags,
- int comp_len, int uncomp_len, int base_rev,
- int link_rev, int parent_1, int parent_2,
- const char *c_node_id)
-{
- int err;
- PyObject *entry, *node_id, *n_obj;
-
- node_id = PyString_FromStringAndSize(c_node_id, 20);
- n_obj = PyInt_FromLong(n);
- if (!node_id || !n_obj)
- err = -1;
- else
- err = PyDict_SetItem(nodemap, node_id, n_obj);
-
- Py_XDECREF(n_obj);
- if (err)
- goto error_dealloc;
-
- entry = Py_BuildValue("LiiiiiiN", offset_flags, comp_len,
- uncomp_len, base_rev, link_rev,
- parent_1, parent_2, node_id);
- if (!entry)
- goto error_dealloc;
- PyObject_GC_UnTrack(entry); /* don't waste time with this */
-
- return entry;
-
-error_dealloc:
- Py_XDECREF(node_id);
- return NULL;
-}
-
-/* RevlogNG format (all in big endian, data may be inlined):
- * 6 bytes: offset
- * 2 bytes: flags
- * 4 bytes: compressed length
- * 4 bytes: uncompressed length
- * 4 bytes: base revision
- * 4 bytes: link revision
- * 4 bytes: parent 1 revision
- * 4 bytes: parent 2 revision
- * 32 bytes: nodeid (only 20 bytes used)
- */
-static int _parse_index_ng (const char *data, int size, int inlined,
- PyObject *index, PyObject *nodemap)
-{
- PyObject *entry;
- int n = 0, err;
- uint64_t offset_flags;
- int comp_len, uncomp_len, base_rev, link_rev, parent_1, parent_2;
- const char *c_node_id;
- const char *end = data + size;
- char decode[64]; /* to enforce alignment with inline data */
-
- while (data < end) {
- unsigned int step;
-
- memcpy(decode, data, 64);
- offset_flags = ntohl(*((uint32_t *) (decode + 4)));
- if (n == 0) /* mask out version number for the first entry */
- offset_flags &= 0xFFFF;
- else {
- uint32_t offset_high = ntohl(*((uint32_t *) decode));
- offset_flags |= ((uint64_t) offset_high) << 32;
- }
-
- comp_len = ntohl(*((uint32_t *) (decode + 8)));
- uncomp_len = ntohl(*((uint32_t *) (decode + 12)));
- base_rev = ntohl(*((uint32_t *) (decode + 16)));
- link_rev = ntohl(*((uint32_t *) (decode + 20)));
- parent_1 = ntohl(*((uint32_t *) (decode + 24)));
- parent_2 = ntohl(*((uint32_t *) (decode + 28)));
- c_node_id = decode + 32;
-
- entry = _build_idx_entry(nodemap, n, offset_flags,
- comp_len, uncomp_len, base_rev,
- link_rev, parent_1, parent_2,
- c_node_id);
- if (!entry)
- return 0;
-
- if (inlined) {
- err = PyList_Append(index, entry);
- Py_DECREF(entry);
- if (err)
- return 0;
- } else
- PyList_SET_ITEM(index, n, entry); /* steals reference */
-
- n++;
- step = 64 + (inlined ? comp_len : 0);
- if (end - data < step)
- break;
- data += step;
- }
- if (data != end) {
- if (!PyErr_Occurred())
- PyErr_SetString(PyExc_ValueError, "corrupt index file");
- return 0;
- }
-
- /* create the nullid/nullrev entry in the nodemap and the
- * magic nullid entry in the index at [-1] */
- entry = _build_idx_entry(nodemap,
- nullrev, 0, 0, 0, -1, -1, -1, -1, nullid);
- if (!entry)
- return 0;
- if (inlined) {
- err = PyList_Append(index, entry);
- Py_DECREF(entry);
- if (err)
- return 0;
- } else
- PyList_SET_ITEM(index, n, entry); /* steals reference */
-
- return 1;
-}
-
-/* This function parses a index file and returns a Python tuple of the
- * following format: (index, nodemap, cache)
- *
- * index: a list of tuples containing the RevlogNG records
- * nodemap: a dict mapping node ids to indices in the index list
- * cache: if data is inlined, a tuple (index_file_content, 0) else None
- */
-static PyObject *parse_index(PyObject *self, PyObject *args)
-{
- const char *data;
- int size, inlined;
- PyObject *rval = NULL, *index = NULL, *nodemap = NULL, *cache = NULL;
- PyObject *data_obj = NULL, *inlined_obj;
-
- if (!PyArg_ParseTuple(args, "s#O", &data, &size, &inlined_obj))
- return NULL;
- inlined = inlined_obj && PyObject_IsTrue(inlined_obj);
-
- /* If no data is inlined, we know the size of the index list in
- * advance: size divided by size of one one revlog record (64 bytes)
- * plus one for the nullid */
- index = inlined ? PyList_New(0) : PyList_New(size / 64 + 1);
- if (!index)
- goto quit;
-
- nodemap = PyDict_New();
- if (!nodemap)
- goto quit;
-
- /* set up the cache return value */
- if (inlined) {
- /* Note that the reference to data_obj is only borrowed */
- data_obj = PyTuple_GET_ITEM(args, 0);
- cache = Py_BuildValue("iO", 0, data_obj);
- if (!cache)
- goto quit;
- } else {
- cache = Py_None;
- Py_INCREF(Py_None);
- }
-
- /* actually populate the index and the nodemap with data */
- if (!_parse_index_ng (data, size, inlined, index, nodemap))
- goto quit;
-
- rval = Py_BuildValue("NNN", index, nodemap, cache);
- if (!rval)
- goto quit;
- return rval;
-
-quit:
- Py_XDECREF(index);
- Py_XDECREF(nodemap);
- Py_XDECREF(cache);
- Py_XDECREF(rval);
- return NULL;
-}
-
-
-static char parsers_doc[] = "Efficient content parsing.";
-
-static PyMethodDef methods[] = {
- {"parse_manifest", parse_manifest, METH_VARARGS, "parse a manifest\n"},
- {"parse_dirstate", parse_dirstate, METH_VARARGS, "parse a dirstate\n"},
- {"parse_index", parse_index, METH_VARARGS, "parse a revlog index\n"},
- {NULL, NULL}
-};
-
-PyMODINIT_FUNC initparsers(void)
-{
- Py_InitModule3("parsers", methods, parsers_doc);
-}
diff --git a/sys/lib/python/mercurial/patch.py b/sys/lib/python/mercurial/patch.py
deleted file mode 100644
index d04a76aaf..000000000
--- a/sys/lib/python/mercurial/patch.py
+++ /dev/null
@@ -1,1454 +0,0 @@
-# patch.py - patch file parsing routines
-#
-# Copyright 2006 Brendan Cully <brendan@kublai.com>
-# Copyright 2007 Chris Mason <chris.mason@oracle.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-from i18n import _
-from node import hex, nullid, short
-import base85, cmdutil, mdiff, util, diffhelpers, copies
-import cStringIO, email.Parser, os, re
-import sys, tempfile, zlib
-
-gitre = re.compile('diff --git a/(.*) b/(.*)')
-
-class PatchError(Exception):
- pass
-
-class NoHunks(PatchError):
- pass
-
-# helper functions
-
-def copyfile(src, dst, basedir):
- abssrc, absdst = [util.canonpath(basedir, basedir, x) for x in [src, dst]]
- if os.path.exists(absdst):
- raise util.Abort(_("cannot create %s: destination already exists") %
- dst)
-
- dstdir = os.path.dirname(absdst)
- if dstdir and not os.path.isdir(dstdir):
- try:
- os.makedirs(dstdir)
- except IOError:
- raise util.Abort(
- _("cannot create %s: unable to create destination directory")
- % dst)
-
- util.copyfile(abssrc, absdst)
-
-# public functions
-
-def extract(ui, fileobj):
- '''extract patch from data read from fileobj.
-
- patch can be a normal patch or contained in an email message.
-
- return tuple (filename, message, user, date, node, p1, p2).
- Any item in the returned tuple can be None. If filename is None,
- fileobj did not contain a patch. Caller must unlink filename when done.'''
-
- # attempt to detect the start of a patch
- # (this heuristic is borrowed from quilt)
- diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
- r'retrieving revision [0-9]+(\.[0-9]+)*$|'
- r'(---|\*\*\*)[ \t])', re.MULTILINE)
-
- fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
- tmpfp = os.fdopen(fd, 'w')
- try:
- msg = email.Parser.Parser().parse(fileobj)
-
- subject = msg['Subject']
- user = msg['From']
- gitsendmail = 'git-send-email' in msg.get('X-Mailer', '')
- # should try to parse msg['Date']
- date = None
- nodeid = None
- branch = None
- parents = []
-
- if subject:
- if subject.startswith('[PATCH'):
- pend = subject.find(']')
- if pend >= 0:
- subject = subject[pend+1:].lstrip()
- subject = subject.replace('\n\t', ' ')
- ui.debug('Subject: %s\n' % subject)
- if user:
- ui.debug('From: %s\n' % user)
- diffs_seen = 0
- ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
- message = ''
- for part in msg.walk():
- content_type = part.get_content_type()
- ui.debug('Content-Type: %s\n' % content_type)
- if content_type not in ok_types:
- continue
- payload = part.get_payload(decode=True)
- m = diffre.search(payload)
- if m:
- hgpatch = False
- ignoretext = False
-
- ui.debug(_('found patch at byte %d\n') % m.start(0))
- diffs_seen += 1
- cfp = cStringIO.StringIO()
- for line in payload[:m.start(0)].splitlines():
- if line.startswith('# HG changeset patch'):
- ui.debug(_('patch generated by hg export\n'))
- hgpatch = True
- # drop earlier commit message content
- cfp.seek(0)
- cfp.truncate()
- subject = None
- elif hgpatch:
- if line.startswith('# User '):
- user = line[7:]
- ui.debug('From: %s\n' % user)
- elif line.startswith("# Date "):
- date = line[7:]
- elif line.startswith("# Branch "):
- branch = line[9:]
- elif line.startswith("# Node ID "):
- nodeid = line[10:]
- elif line.startswith("# Parent "):
- parents.append(line[10:])
- elif line == '---' and gitsendmail:
- ignoretext = True
- if not line.startswith('# ') and not ignoretext:
- cfp.write(line)
- cfp.write('\n')
- message = cfp.getvalue()
- if tmpfp:
- tmpfp.write(payload)
- if not payload.endswith('\n'):
- tmpfp.write('\n')
- elif not diffs_seen and message and content_type == 'text/plain':
- message += '\n' + payload
- except:
- tmpfp.close()
- os.unlink(tmpname)
- raise
-
- if subject and not message.startswith(subject):
- message = '%s\n%s' % (subject, message)
- tmpfp.close()
- if not diffs_seen:
- os.unlink(tmpname)
- return None, message, user, date, branch, None, None, None
- p1 = parents and parents.pop(0) or None
- p2 = parents and parents.pop(0) or None
- return tmpname, message, user, date, branch, nodeid, p1, p2
-
-GP_PATCH = 1 << 0 # we have to run patch
-GP_FILTER = 1 << 1 # there's some copy/rename operation
-GP_BINARY = 1 << 2 # there's a binary patch
-
-class patchmeta(object):
- """Patched file metadata
-
- 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
- or COPY. 'path' is patched file path. 'oldpath' is set to the
- origin file when 'op' is either COPY or RENAME, None otherwise. If
- file mode is changed, 'mode' is a tuple (islink, isexec) where
- 'islink' is True if the file is a symlink and 'isexec' is True if
- the file is executable. Otherwise, 'mode' is None.
- """
- def __init__(self, path):
- self.path = path
- self.oldpath = None
- self.mode = None
- self.op = 'MODIFY'
- self.lineno = 0
- self.binary = False
-
- def setmode(self, mode):
- islink = mode & 020000
- isexec = mode & 0100
- self.mode = (islink, isexec)
-
-def readgitpatch(lr):
- """extract git-style metadata about patches from <patchname>"""
-
- # Filter patch for git information
- gp = None
- gitpatches = []
- # Can have a git patch with only metadata, causing patch to complain
- dopatch = 0
-
- lineno = 0
- for line in lr:
- lineno += 1
- line = line.rstrip(' \r\n')
- if line.startswith('diff --git'):
- m = gitre.match(line)
- if m:
- if gp:
- gitpatches.append(gp)
- src, dst = m.group(1, 2)
- gp = patchmeta(dst)
- gp.lineno = lineno
- elif gp:
- if line.startswith('--- '):
- if gp.op in ('COPY', 'RENAME'):
- dopatch |= GP_FILTER
- gitpatches.append(gp)
- gp = None
- dopatch |= GP_PATCH
- continue
- if line.startswith('rename from '):
- gp.op = 'RENAME'
- gp.oldpath = line[12:]
- elif line.startswith('rename to '):
- gp.path = line[10:]
- elif line.startswith('copy from '):
- gp.op = 'COPY'
- gp.oldpath = line[10:]
- elif line.startswith('copy to '):
- gp.path = line[8:]
- elif line.startswith('deleted file'):
- gp.op = 'DELETE'
- # is the deleted file a symlink?
- gp.setmode(int(line[-6:], 8))
- elif line.startswith('new file mode '):
- gp.op = 'ADD'
- gp.setmode(int(line[-6:], 8))
- elif line.startswith('new mode '):
- gp.setmode(int(line[-6:], 8))
- elif line.startswith('GIT binary patch'):
- dopatch |= GP_BINARY
- gp.binary = True
- if gp:
- gitpatches.append(gp)
-
- if not gitpatches:
- dopatch = GP_PATCH
-
- return (dopatch, gitpatches)
-
-class linereader(object):
- # simple class to allow pushing lines back into the input stream
- def __init__(self, fp, textmode=False):
- self.fp = fp
- self.buf = []
- self.textmode = textmode
-
- def push(self, line):
- if line is not None:
- self.buf.append(line)
-
- def readline(self):
- if self.buf:
- l = self.buf[0]
- del self.buf[0]
- return l
- l = self.fp.readline()
- if self.textmode and l.endswith('\r\n'):
- l = l[:-2] + '\n'
- return l
-
- def __iter__(self):
- while 1:
- l = self.readline()
- if not l:
- break
- yield l
-
-# @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
-unidesc = re.compile('@@ -(\d+)(,(\d+))? \+(\d+)(,(\d+))? @@')
-contextdesc = re.compile('(---|\*\*\*) (\d+)(,(\d+))? (---|\*\*\*)')
-
-class patchfile(object):
- def __init__(self, ui, fname, opener, missing=False, eol=None):
- self.fname = fname
- self.eol = eol
- self.opener = opener
- self.ui = ui
- self.lines = []
- self.exists = False
- self.missing = missing
- if not missing:
- try:
- self.lines = self.readlines(fname)
- self.exists = True
- except IOError:
- pass
- else:
- self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
-
- self.hash = {}
- self.dirty = 0
- self.offset = 0
- self.rej = []
- self.fileprinted = False
- self.printfile(False)
- self.hunks = 0
-
- def readlines(self, fname):
- fp = self.opener(fname, 'r')
- try:
- return list(linereader(fp, self.eol is not None))
- finally:
- fp.close()
-
- def writelines(self, fname, lines):
- fp = self.opener(fname, 'w')
- try:
- if self.eol and self.eol != '\n':
- for l in lines:
- if l and l[-1] == '\n':
- l = l[:-1] + self.eol
- fp.write(l)
- else:
- fp.writelines(lines)
- finally:
- fp.close()
-
- def unlink(self, fname):
- os.unlink(fname)
-
- def printfile(self, warn):
- if self.fileprinted:
- return
- if warn or self.ui.verbose:
- self.fileprinted = True
- s = _("patching file %s\n") % self.fname
- if warn:
- self.ui.warn(s)
- else:
- self.ui.note(s)
-
-
- def findlines(self, l, linenum):
- # looks through the hash and finds candidate lines. The
- # result is a list of line numbers sorted based on distance
- # from linenum
-
- try:
- cand = self.hash[l]
- except:
- return []
-
- if len(cand) > 1:
- # resort our list of potentials forward then back.
- cand.sort(key=lambda x: abs(x - linenum))
- return cand
-
- def hashlines(self):
- self.hash = {}
- for x, s in enumerate(self.lines):
- self.hash.setdefault(s, []).append(x)
-
- def write_rej(self):
- # our rejects are a little different from patch(1). This always
- # creates rejects in the same form as the original patch. A file
- # header is inserted so that you can run the reject through patch again
- # without having to type the filename.
-
- if not self.rej:
- return
-
- fname = self.fname + ".rej"
- self.ui.warn(
- _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
- (len(self.rej), self.hunks, fname))
-
- def rejlines():
- base = os.path.basename(self.fname)
- yield "--- %s\n+++ %s\n" % (base, base)
- for x in self.rej:
- for l in x.hunk:
- yield l
- if l[-1] != '\n':
- yield "\n\ No newline at end of file\n"
-
- self.writelines(fname, rejlines())
-
- def write(self, dest=None):
- if not self.dirty:
- return
- if not dest:
- dest = self.fname
- self.writelines(dest, self.lines)
-
- def close(self):
- self.write()
- self.write_rej()
-
- def apply(self, h, reverse):
- if not h.complete():
- raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
- (h.number, h.desc, len(h.a), h.lena, len(h.b),
- h.lenb))
-
- self.hunks += 1
- if reverse:
- h.reverse()
-
- if self.missing:
- self.rej.append(h)
- return -1
-
- if self.exists and h.createfile():
- self.ui.warn(_("file %s already exists\n") % self.fname)
- self.rej.append(h)
- return -1
-
- if isinstance(h, githunk):
- if h.rmfile():
- self.unlink(self.fname)
- else:
- self.lines[:] = h.new()
- self.offset += len(h.new())
- self.dirty = 1
- return 0
-
- # fast case first, no offsets, no fuzz
- old = h.old()
- # patch starts counting at 1 unless we are adding the file
- if h.starta == 0:
- start = 0
- else:
- start = h.starta + self.offset - 1
- orig_start = start
- if diffhelpers.testhunk(old, self.lines, start) == 0:
- if h.rmfile():
- self.unlink(self.fname)
- else:
- self.lines[start : start + h.lena] = h.new()
- self.offset += h.lenb - h.lena
- self.dirty = 1
- return 0
-
- # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
- self.hashlines()
- if h.hunk[-1][0] != ' ':
- # if the hunk tried to put something at the bottom of the file
- # override the start line and use eof here
- search_start = len(self.lines)
- else:
- search_start = orig_start
-
- for fuzzlen in xrange(3):
- for toponly in [ True, False ]:
- old = h.old(fuzzlen, toponly)
-
- cand = self.findlines(old[0][1:], search_start)
- for l in cand:
- if diffhelpers.testhunk(old, self.lines, l) == 0:
- newlines = h.new(fuzzlen, toponly)
- self.lines[l : l + len(old)] = newlines
- self.offset += len(newlines) - len(old)
- self.dirty = 1
- if fuzzlen:
- fuzzstr = "with fuzz %d " % fuzzlen
- f = self.ui.warn
- self.printfile(True)
- else:
- fuzzstr = ""
- f = self.ui.note
- offset = l - orig_start - fuzzlen
- if offset == 1:
- msg = _("Hunk #%d succeeded at %d %s"
- "(offset %d line).\n")
- else:
- msg = _("Hunk #%d succeeded at %d %s"
- "(offset %d lines).\n")
- f(msg % (h.number, l+1, fuzzstr, offset))
- return fuzzlen
- self.printfile(True)
- self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
- self.rej.append(h)
- return -1
-
-class hunk(object):
- def __init__(self, desc, num, lr, context, create=False, remove=False):
- self.number = num
- self.desc = desc
- self.hunk = [ desc ]
- self.a = []
- self.b = []
- if context:
- self.read_context_hunk(lr)
- else:
- self.read_unified_hunk(lr)
- self.create = create
- self.remove = remove and not create
-
- def read_unified_hunk(self, lr):
- m = unidesc.match(self.desc)
- if not m:
- raise PatchError(_("bad hunk #%d") % self.number)
- self.starta, foo, self.lena, self.startb, foo2, self.lenb = m.groups()
- if self.lena is None:
- self.lena = 1
- else:
- self.lena = int(self.lena)
- if self.lenb is None:
- self.lenb = 1
- else:
- self.lenb = int(self.lenb)
- self.starta = int(self.starta)
- self.startb = int(self.startb)
- diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a, self.b)
- # if we hit eof before finishing out the hunk, the last line will
- # be zero length. Lets try to fix it up.
- while len(self.hunk[-1]) == 0:
- del self.hunk[-1]
- del self.a[-1]
- del self.b[-1]
- self.lena -= 1
- self.lenb -= 1
-
- def read_context_hunk(self, lr):
- self.desc = lr.readline()
- m = contextdesc.match(self.desc)
- if not m:
- raise PatchError(_("bad hunk #%d") % self.number)
- foo, self.starta, foo2, aend, foo3 = m.groups()
- self.starta = int(self.starta)
- if aend is None:
- aend = self.starta
- self.lena = int(aend) - self.starta
- if self.starta:
- self.lena += 1
- for x in xrange(self.lena):
- l = lr.readline()
- if l.startswith('---'):
- lr.push(l)
- break
- s = l[2:]
- if l.startswith('- ') or l.startswith('! '):
- u = '-' + s
- elif l.startswith(' '):
- u = ' ' + s
- else:
- raise PatchError(_("bad hunk #%d old text line %d") %
- (self.number, x))
- self.a.append(u)
- self.hunk.append(u)
-
- l = lr.readline()
- if l.startswith('\ '):
- s = self.a[-1][:-1]
- self.a[-1] = s
- self.hunk[-1] = s
- l = lr.readline()
- m = contextdesc.match(l)
- if not m:
- raise PatchError(_("bad hunk #%d") % self.number)
- foo, self.startb, foo2, bend, foo3 = m.groups()
- self.startb = int(self.startb)
- if bend is None:
- bend = self.startb
- self.lenb = int(bend) - self.startb
- if self.startb:
- self.lenb += 1
- hunki = 1
- for x in xrange(self.lenb):
- l = lr.readline()
- if l.startswith('\ '):
- s = self.b[-1][:-1]
- self.b[-1] = s
- self.hunk[hunki-1] = s
- continue
- if not l:
- lr.push(l)
- break
- s = l[2:]
- if l.startswith('+ ') or l.startswith('! '):
- u = '+' + s
- elif l.startswith(' '):
- u = ' ' + s
- elif len(self.b) == 0:
- # this can happen when the hunk does not add any lines
- lr.push(l)
- break
- else:
- raise PatchError(_("bad hunk #%d old text line %d") %
- (self.number, x))
- self.b.append(s)
- while True:
- if hunki >= len(self.hunk):
- h = ""
- else:
- h = self.hunk[hunki]
- hunki += 1
- if h == u:
- break
- elif h.startswith('-'):
- continue
- else:
- self.hunk.insert(hunki-1, u)
- break
-
- if not self.a:
- # this happens when lines were only added to the hunk
- for x in self.hunk:
- if x.startswith('-') or x.startswith(' '):
- self.a.append(x)
- if not self.b:
- # this happens when lines were only deleted from the hunk
- for x in self.hunk:
- if x.startswith('+') or x.startswith(' '):
- self.b.append(x[1:])
- # @@ -start,len +start,len @@
- self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
- self.startb, self.lenb)
- self.hunk[0] = self.desc
-
- def reverse(self):
- self.create, self.remove = self.remove, self.create
- origlena = self.lena
- origstarta = self.starta
- self.lena = self.lenb
- self.starta = self.startb
- self.lenb = origlena
- self.startb = origstarta
- self.a = []
- self.b = []
- # self.hunk[0] is the @@ description
- for x in xrange(1, len(self.hunk)):
- o = self.hunk[x]
- if o.startswith('-'):
- n = '+' + o[1:]
- self.b.append(o[1:])
- elif o.startswith('+'):
- n = '-' + o[1:]
- self.a.append(n)
- else:
- n = o
- self.b.append(o[1:])
- self.a.append(o)
- self.hunk[x] = o
-
- def fix_newline(self):
- diffhelpers.fix_newline(self.hunk, self.a, self.b)
-
- def complete(self):
- return len(self.a) == self.lena and len(self.b) == self.lenb
-
- def createfile(self):
- return self.starta == 0 and self.lena == 0 and self.create
-
- def rmfile(self):
- return self.startb == 0 and self.lenb == 0 and self.remove
-
- def fuzzit(self, l, fuzz, toponly):
- # this removes context lines from the top and bottom of list 'l'. It
- # checks the hunk to make sure only context lines are removed, and then
- # returns a new shortened list of lines.
- fuzz = min(fuzz, len(l)-1)
- if fuzz:
- top = 0
- bot = 0
- hlen = len(self.hunk)
- for x in xrange(hlen-1):
- # the hunk starts with the @@ line, so use x+1
- if self.hunk[x+1][0] == ' ':
- top += 1
- else:
- break
- if not toponly:
- for x in xrange(hlen-1):
- if self.hunk[hlen-bot-1][0] == ' ':
- bot += 1
- else:
- break
-
- # top and bot now count context in the hunk
- # adjust them if either one is short
- context = max(top, bot, 3)
- if bot < context:
- bot = max(0, fuzz - (context - bot))
- else:
- bot = min(fuzz, bot)
- if top < context:
- top = max(0, fuzz - (context - top))
- else:
- top = min(fuzz, top)
-
- return l[top:len(l)-bot]
- return l
-
- def old(self, fuzz=0, toponly=False):
- return self.fuzzit(self.a, fuzz, toponly)
-
- def newctrl(self):
- res = []
- for x in self.hunk:
- c = x[0]
- if c == ' ' or c == '+':
- res.append(x)
- return res
-
- def new(self, fuzz=0, toponly=False):
- return self.fuzzit(self.b, fuzz, toponly)
-
-class githunk(object):
- """A git hunk"""
- def __init__(self, gitpatch):
- self.gitpatch = gitpatch
- self.text = None
- self.hunk = []
-
- def createfile(self):
- return self.gitpatch.op in ('ADD', 'RENAME', 'COPY')
-
- def rmfile(self):
- return self.gitpatch.op == 'DELETE'
-
- def complete(self):
- return self.text is not None
-
- def new(self):
- return [self.text]
-
-class binhunk(githunk):
- 'A binary patch file. Only understands literals so far.'
- def __init__(self, gitpatch):
- super(binhunk, self).__init__(gitpatch)
- self.hunk = ['GIT binary patch\n']
-
- def extract(self, lr):
- line = lr.readline()
- self.hunk.append(line)
- while line and not line.startswith('literal '):
- line = lr.readline()
- self.hunk.append(line)
- if not line:
- raise PatchError(_('could not extract binary patch'))
- size = int(line[8:].rstrip())
- dec = []
- line = lr.readline()
- self.hunk.append(line)
- while len(line) > 1:
- l = line[0]
- if l <= 'Z' and l >= 'A':
- l = ord(l) - ord('A') + 1
- else:
- l = ord(l) - ord('a') + 27
- dec.append(base85.b85decode(line[1:-1])[:l])
- line = lr.readline()
- self.hunk.append(line)
- text = zlib.decompress(''.join(dec))
- if len(text) != size:
- raise PatchError(_('binary patch is %d bytes, not %d') %
- len(text), size)
- self.text = text
-
-class symlinkhunk(githunk):
- """A git symlink hunk"""
- def __init__(self, gitpatch, hunk):
- super(symlinkhunk, self).__init__(gitpatch)
- self.hunk = hunk
-
- def complete(self):
- return True
-
- def fix_newline(self):
- return
-
-def parsefilename(str):
- # --- filename \t|space stuff
- s = str[4:].rstrip('\r\n')
- i = s.find('\t')
- if i < 0:
- i = s.find(' ')
- if i < 0:
- return s
- return s[:i]
-
-def selectfile(afile_orig, bfile_orig, hunk, strip, reverse):
- def pathstrip(path, count=1):
- pathlen = len(path)
- i = 0
- if count == 0:
- return '', path.rstrip()
- while count > 0:
- i = path.find('/', i)
- if i == -1:
- raise PatchError(_("unable to strip away %d dirs from %s") %
- (count, path))
- i += 1
- # consume '//' in the path
- while i < pathlen - 1 and path[i] == '/':
- i += 1
- count -= 1
- return path[:i].lstrip(), path[i:].rstrip()
-
- nulla = afile_orig == "/dev/null"
- nullb = bfile_orig == "/dev/null"
- abase, afile = pathstrip(afile_orig, strip)
- gooda = not nulla and util.lexists(afile)
- bbase, bfile = pathstrip(bfile_orig, strip)
- if afile == bfile:
- goodb = gooda
- else:
- goodb = not nullb and os.path.exists(bfile)
- createfunc = hunk.createfile
- if reverse:
- createfunc = hunk.rmfile
- missing = not goodb and not gooda and not createfunc()
-
- # some diff programs apparently produce create patches where the
- # afile is not /dev/null, but rather the same name as the bfile
- if missing and afile == bfile:
- # this isn't very pretty
- hunk.create = True
- if createfunc():
- missing = False
- else:
- hunk.create = False
-
- # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
- # diff is between a file and its backup. In this case, the original
- # file should be patched (see original mpatch code).
- isbackup = (abase == bbase and bfile.startswith(afile))
- fname = None
- if not missing:
- if gooda and goodb:
- fname = isbackup and afile or bfile
- elif gooda:
- fname = afile
-
- if not fname:
- if not nullb:
- fname = isbackup and afile or bfile
- elif not nulla:
- fname = afile
- else:
- raise PatchError(_("undefined source and destination files"))
-
- return fname, missing
-
-def scangitpatch(lr, firstline):
- """
- Git patches can emit:
- - rename a to b
- - change b
- - copy a to c
- - change c
-
- We cannot apply this sequence as-is, the renamed 'a' could not be
- found for it would have been renamed already. And we cannot copy
- from 'b' instead because 'b' would have been changed already. So
- we scan the git patch for copy and rename commands so we can
- perform the copies ahead of time.
- """
- pos = 0
- try:
- pos = lr.fp.tell()
- fp = lr.fp
- except IOError:
- fp = cStringIO.StringIO(lr.fp.read())
- gitlr = linereader(fp, lr.textmode)
- gitlr.push(firstline)
- (dopatch, gitpatches) = readgitpatch(gitlr)
- fp.seek(pos)
- return dopatch, gitpatches
-
-def iterhunks(ui, fp, sourcefile=None, textmode=False):
- """Read a patch and yield the following events:
- - ("file", afile, bfile, firsthunk): select a new target file.
- - ("hunk", hunk): a new hunk is ready to be applied, follows a
- "file" event.
- - ("git", gitchanges): current diff is in git format, gitchanges
- maps filenames to gitpatch records. Unique event.
-
- If textmode is True, input line-endings are normalized to LF.
- """
- changed = {}
- current_hunk = None
- afile = ""
- bfile = ""
- state = None
- hunknum = 0
- emitfile = False
- git = False
-
- # our states
- BFILE = 1
- context = None
- lr = linereader(fp, textmode)
- dopatch = True
- # gitworkdone is True if a git operation (copy, rename, ...) was
- # performed already for the current file. Useful when the file
- # section may have no hunk.
- gitworkdone = False
-
- while True:
- newfile = False
- x = lr.readline()
- if not x:
- break
- if current_hunk:
- if x.startswith('\ '):
- current_hunk.fix_newline()
- yield 'hunk', current_hunk
- current_hunk = None
- gitworkdone = False
- if ((sourcefile or state == BFILE) and ((not context and x[0] == '@') or
- ((context is not False) and x.startswith('***************')))):
- try:
- if context is None and x.startswith('***************'):
- context = True
- gpatch = changed.get(bfile)
- create = afile == '/dev/null' or gpatch and gpatch.op == 'ADD'
- remove = bfile == '/dev/null' or gpatch and gpatch.op == 'DELETE'
- current_hunk = hunk(x, hunknum + 1, lr, context, create, remove)
- if remove:
- gpatch = changed.get(afile[2:])
- if gpatch and gpatch.mode[0]:
- current_hunk = symlinkhunk(gpatch, current_hunk)
- except PatchError, err:
- ui.debug(err)
- current_hunk = None
- continue
- hunknum += 1
- if emitfile:
- emitfile = False
- yield 'file', (afile, bfile, current_hunk)
- elif state == BFILE and x.startswith('GIT binary patch'):
- current_hunk = binhunk(changed[bfile])
- hunknum += 1
- if emitfile:
- emitfile = False
- yield 'file', ('a/' + afile, 'b/' + bfile, current_hunk)
- current_hunk.extract(lr)
- elif x.startswith('diff --git'):
- # check for git diff, scanning the whole patch file if needed
- m = gitre.match(x)
- if m:
- afile, bfile = m.group(1, 2)
- if not git:
- git = True
- dopatch, gitpatches = scangitpatch(lr, x)
- yield 'git', gitpatches
- for gp in gitpatches:
- changed[gp.path] = gp
- # else error?
- # copy/rename + modify should modify target, not source
- gp = changed.get(bfile)
- if gp and gp.op in ('COPY', 'DELETE', 'RENAME', 'ADD'):
- afile = bfile
- gitworkdone = True
- newfile = True
- elif x.startswith('---'):
- # check for a unified diff
- l2 = lr.readline()
- if not l2.startswith('+++'):
- lr.push(l2)
- continue
- newfile = True
- context = False
- afile = parsefilename(x)
- bfile = parsefilename(l2)
- elif x.startswith('***'):
- # check for a context diff
- l2 = lr.readline()
- if not l2.startswith('---'):
- lr.push(l2)
- continue
- l3 = lr.readline()
- lr.push(l3)
- if not l3.startswith("***************"):
- lr.push(l2)
- continue
- newfile = True
- context = True
- afile = parsefilename(x)
- bfile = parsefilename(l2)
-
- if newfile:
- emitfile = True
- state = BFILE
- hunknum = 0
- if current_hunk:
- if current_hunk.complete():
- yield 'hunk', current_hunk
- else:
- raise PatchError(_("malformed patch %s %s") % (afile,
- current_hunk.desc))
-
- if hunknum == 0 and dopatch and not gitworkdone:
- raise NoHunks
-
-def applydiff(ui, fp, changed, strip=1, sourcefile=None, reverse=False,
- eol=None):
- """
- Reads a patch from fp and tries to apply it.
-
- The dict 'changed' is filled in with all of the filenames changed
- by the patch. Returns 0 for a clean patch, -1 if any rejects were
- found and 1 if there was any fuzz.
-
- If 'eol' is None, the patch content and patched file are read in
- binary mode. Otherwise, line endings are ignored when patching then
- normalized to 'eol' (usually '\n' or \r\n').
- """
- rejects = 0
- err = 0
- current_file = None
- gitpatches = None
- opener = util.opener(os.getcwd())
- textmode = eol is not None
-
- def closefile():
- if not current_file:
- return 0
- current_file.close()
- return len(current_file.rej)
-
- for state, values in iterhunks(ui, fp, sourcefile, textmode):
- if state == 'hunk':
- if not current_file:
- continue
- current_hunk = values
- ret = current_file.apply(current_hunk, reverse)
- if ret >= 0:
- changed.setdefault(current_file.fname, None)
- if ret > 0:
- err = 1
- elif state == 'file':
- rejects += closefile()
- afile, bfile, first_hunk = values
- try:
- if sourcefile:
- current_file = patchfile(ui, sourcefile, opener, eol=eol)
- else:
- current_file, missing = selectfile(afile, bfile, first_hunk,
- strip, reverse)
- current_file = patchfile(ui, current_file, opener, missing, eol)
- except PatchError, err:
- ui.warn(str(err) + '\n')
- current_file, current_hunk = None, None
- rejects += 1
- continue
- elif state == 'git':
- gitpatches = values
- cwd = os.getcwd()
- for gp in gitpatches:
- if gp.op in ('COPY', 'RENAME'):
- copyfile(gp.oldpath, gp.path, cwd)
- changed[gp.path] = gp
- else:
- raise util.Abort(_('unsupported parser state: %s') % state)
-
- rejects += closefile()
-
- if rejects:
- return -1
- return err
-
-def diffopts(ui, opts={}, untrusted=False):
- def get(key, name=None, getter=ui.configbool):
- return (opts.get(key) or
- getter('diff', name or key, None, untrusted=untrusted))
- return mdiff.diffopts(
- text=opts.get('text'),
- git=get('git'),
- nodates=get('nodates'),
- showfunc=get('show_function', 'showfunc'),
- ignorews=get('ignore_all_space', 'ignorews'),
- ignorewsamount=get('ignore_space_change', 'ignorewsamount'),
- ignoreblanklines=get('ignore_blank_lines', 'ignoreblanklines'),
- context=get('unified', getter=ui.config))
-
-def updatedir(ui, repo, patches, similarity=0):
- '''Update dirstate after patch application according to metadata'''
- if not patches:
- return
- copies = []
- removes = set()
- cfiles = patches.keys()
- cwd = repo.getcwd()
- if cwd:
- cfiles = [util.pathto(repo.root, cwd, f) for f in patches.keys()]
- for f in patches:
- gp = patches[f]
- if not gp:
- continue
- if gp.op == 'RENAME':
- copies.append((gp.oldpath, gp.path))
- removes.add(gp.oldpath)
- elif gp.op == 'COPY':
- copies.append((gp.oldpath, gp.path))
- elif gp.op == 'DELETE':
- removes.add(gp.path)
- for src, dst in copies:
- repo.copy(src, dst)
- if (not similarity) and removes:
- repo.remove(sorted(removes), True)
- for f in patches:
- gp = patches[f]
- if gp and gp.mode:
- islink, isexec = gp.mode
- dst = repo.wjoin(gp.path)
- # patch won't create empty files
- if gp.op == 'ADD' and not os.path.exists(dst):
- flags = (isexec and 'x' or '') + (islink and 'l' or '')
- repo.wwrite(gp.path, '', flags)
- elif gp.op != 'DELETE':
- util.set_flags(dst, islink, isexec)
- cmdutil.addremove(repo, cfiles, similarity=similarity)
- files = patches.keys()
- files.extend([r for r in removes if r not in files])
- return sorted(files)
-
-def externalpatch(patcher, args, patchname, ui, strip, cwd, files):
- """use <patcher> to apply <patchname> to the working directory.
- returns whether patch was applied with fuzz factor."""
-
- fuzz = False
- if cwd:
- args.append('-d %s' % util.shellquote(cwd))
- fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
- util.shellquote(patchname)))
-
- for line in fp:
- line = line.rstrip()
- ui.note(line + '\n')
- if line.startswith('patching file '):
- pf = util.parse_patch_output(line)
- printed_file = False
- files.setdefault(pf, None)
- elif line.find('with fuzz') >= 0:
- fuzz = True
- if not printed_file:
- ui.warn(pf + '\n')
- printed_file = True
- ui.warn(line + '\n')
- elif line.find('saving rejects to file') >= 0:
- ui.warn(line + '\n')
- elif line.find('FAILED') >= 0:
- if not printed_file:
- ui.warn(pf + '\n')
- printed_file = True
- ui.warn(line + '\n')
- code = fp.close()
- if code:
- raise PatchError(_("patch command failed: %s") %
- util.explain_exit(code)[0])
- return fuzz
-
-def internalpatch(patchobj, ui, strip, cwd, files={}, eolmode='strict'):
- """use builtin patch to apply <patchobj> to the working directory.
- returns whether patch was applied with fuzz factor."""
-
- if eolmode is None:
- eolmode = ui.config('patch', 'eol', 'strict')
- try:
- eol = {'strict': None, 'crlf': '\r\n', 'lf': '\n'}[eolmode.lower()]
- except KeyError:
- raise util.Abort(_('Unsupported line endings type: %s') % eolmode)
-
- try:
- fp = open(patchobj, 'rb')
- except TypeError:
- fp = patchobj
- if cwd:
- curdir = os.getcwd()
- os.chdir(cwd)
- try:
- ret = applydiff(ui, fp, files, strip=strip, eol=eol)
- finally:
- if cwd:
- os.chdir(curdir)
- if ret < 0:
- raise PatchError
- return ret > 0
-
-def patch(patchname, ui, strip=1, cwd=None, files={}, eolmode='strict'):
- """Apply <patchname> to the working directory.
-
- 'eolmode' specifies how end of lines should be handled. It can be:
- - 'strict': inputs are read in binary mode, EOLs are preserved
- - 'crlf': EOLs are ignored when patching and reset to CRLF
- - 'lf': EOLs are ignored when patching and reset to LF
- - None: get it from user settings, default to 'strict'
- 'eolmode' is ignored when using an external patcher program.
-
- Returns whether patch was applied with fuzz factor.
- """
- patcher = ui.config('ui', 'patch')
- args = []
- try:
- if patcher:
- return externalpatch(patcher, args, patchname, ui, strip, cwd,
- files)
- else:
- try:
- return internalpatch(patchname, ui, strip, cwd, files, eolmode)
- except NoHunks:
- patcher = util.find_exe('gpatch') or util.find_exe('patch') or 'patch'
- ui.debug(_('no valid hunks found; trying with %r instead\n') %
- patcher)
- if util.needbinarypatch():
- args.append('--binary')
- return externalpatch(patcher, args, patchname, ui, strip, cwd,
- files)
- except PatchError, err:
- s = str(err)
- if s:
- raise util.Abort(s)
- else:
- raise util.Abort(_('patch failed to apply'))
-
-def b85diff(to, tn):
- '''print base85-encoded binary diff'''
- def gitindex(text):
- if not text:
- return '0' * 40
- l = len(text)
- s = util.sha1('blob %d\0' % l)
- s.update(text)
- return s.hexdigest()
-
- def fmtline(line):
- l = len(line)
- if l <= 26:
- l = chr(ord('A') + l - 1)
- else:
- l = chr(l - 26 + ord('a') - 1)
- return '%c%s\n' % (l, base85.b85encode(line, True))
-
- def chunk(text, csize=52):
- l = len(text)
- i = 0
- while i < l:
- yield text[i:i+csize]
- i += csize
-
- tohash = gitindex(to)
- tnhash = gitindex(tn)
- if tohash == tnhash:
- return ""
-
- # TODO: deltas
- ret = ['index %s..%s\nGIT binary patch\nliteral %s\n' %
- (tohash, tnhash, len(tn))]
- for l in chunk(zlib.compress(tn)):
- ret.append(fmtline(l))
- ret.append('\n')
- return ''.join(ret)
-
-def _addmodehdr(header, omode, nmode):
- if omode != nmode:
- header.append('old mode %s\n' % omode)
- header.append('new mode %s\n' % nmode)
-
-def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None):
- '''yields diff of changes to files between two nodes, or node and
- working directory.
-
- if node1 is None, use first dirstate parent instead.
- if node2 is None, compare node1 with working directory.'''
-
- if opts is None:
- opts = mdiff.defaultopts
-
- if not node1:
- node1 = repo.dirstate.parents()[0]
-
- def lrugetfilectx():
- cache = {}
- order = []
- def getfilectx(f, ctx):
- fctx = ctx.filectx(f, filelog=cache.get(f))
- if f not in cache:
- if len(cache) > 20:
- del cache[order.pop(0)]
- cache[f] = fctx._filelog
- else:
- order.remove(f)
- order.append(f)
- return fctx
- return getfilectx
- getfilectx = lrugetfilectx()
-
- ctx1 = repo[node1]
- ctx2 = repo[node2]
-
- if not changes:
- changes = repo.status(ctx1, ctx2, match=match)
- modified, added, removed = changes[:3]
-
- if not modified and not added and not removed:
- return
-
- date1 = util.datestr(ctx1.date())
- man1 = ctx1.manifest()
-
- if repo.ui.quiet:
- r = None
- else:
- hexfunc = repo.ui.debugflag and hex or short
- r = [hexfunc(node) for node in [node1, node2] if node]
-
- if opts.git:
- copy, diverge = copies.copies(repo, ctx1, ctx2, repo[nullid])
- copy = copy.copy()
- for k, v in copy.items():
- copy[v] = k
-
- gone = set()
- gitmode = {'l': '120000', 'x': '100755', '': '100644'}
-
- for f in sorted(modified + added + removed):
- to = None
- tn = None
- dodiff = True
- header = []
- if f in man1:
- to = getfilectx(f, ctx1).data()
- if f not in removed:
- tn = getfilectx(f, ctx2).data()
- a, b = f, f
- if opts.git:
- if f in added:
- mode = gitmode[ctx2.flags(f)]
- if f in copy:
- a = copy[f]
- omode = gitmode[man1.flags(a)]
- _addmodehdr(header, omode, mode)
- if a in removed and a not in gone:
- op = 'rename'
- gone.add(a)
- else:
- op = 'copy'
- header.append('%s from %s\n' % (op, a))
- header.append('%s to %s\n' % (op, f))
- to = getfilectx(a, ctx1).data()
- else:
- header.append('new file mode %s\n' % mode)
- if util.binary(tn):
- dodiff = 'binary'
- elif f in removed:
- # have we already reported a copy above?
- if f in copy and copy[f] in added and copy[copy[f]] == f:
- dodiff = False
- else:
- header.append('deleted file mode %s\n' %
- gitmode[man1.flags(f)])
- else:
- omode = gitmode[man1.flags(f)]
- nmode = gitmode[ctx2.flags(f)]
- _addmodehdr(header, omode, nmode)
- if util.binary(to) or util.binary(tn):
- dodiff = 'binary'
- r = None
- header.insert(0, mdiff.diffline(r, a, b, opts))
- if dodiff:
- if dodiff == 'binary':
- text = b85diff(to, tn)
- else:
- text = mdiff.unidiff(to, date1,
- # ctx2 date may be dynamic
- tn, util.datestr(ctx2.date()),
- a, b, r, opts=opts)
- if header and (text or len(header) > 1):
- yield ''.join(header)
- if text:
- yield text
-
-def export(repo, revs, template='hg-%h.patch', fp=None, switch_parent=False,
- opts=None):
- '''export changesets as hg patches.'''
-
- total = len(revs)
- revwidth = max([len(str(rev)) for rev in revs])
-
- def single(rev, seqno, fp):
- ctx = repo[rev]
- node = ctx.node()
- parents = [p.node() for p in ctx.parents() if p]
- branch = ctx.branch()
- if switch_parent:
- parents.reverse()
- prev = (parents and parents[0]) or nullid
-
- if not fp:
- fp = cmdutil.make_file(repo, template, node, total=total,
- seqno=seqno, revwidth=revwidth,
- mode='ab')
- if fp != sys.stdout and hasattr(fp, 'name'):
- repo.ui.note("%s\n" % fp.name)
-
- fp.write("# HG changeset patch\n")
- fp.write("# User %s\n" % ctx.user())
- fp.write("# Date %d %d\n" % ctx.date())
- if branch and (branch != 'default'):
- fp.write("# Branch %s\n" % branch)
- fp.write("# Node ID %s\n" % hex(node))
- fp.write("# Parent %s\n" % hex(prev))
- if len(parents) > 1:
- fp.write("# Parent %s\n" % hex(parents[1]))
- fp.write(ctx.description().rstrip())
- fp.write("\n\n")
-
- for chunk in diff(repo, prev, node, opts=opts):
- fp.write(chunk)
-
- for seqno, rev in enumerate(revs):
- single(rev, seqno+1, fp)
-
-def diffstatdata(lines):
- filename, adds, removes = None, 0, 0
- for line in lines:
- if line.startswith('diff'):
- if filename:
- yield (filename, adds, removes)
- # set numbers to 0 anyway when starting new file
- adds, removes = 0, 0
- if line.startswith('diff --git'):
- filename = gitre.search(line).group(1)
- else:
- # format: "diff -r ... -r ... filename"
- filename = line.split(None, 5)[-1]
- elif line.startswith('+') and not line.startswith('+++'):
- adds += 1
- elif line.startswith('-') and not line.startswith('---'):
- removes += 1
- if filename:
- yield (filename, adds, removes)
-
-def diffstat(lines, width=80):
- output = []
- stats = list(diffstatdata(lines))
-
- maxtotal, maxname = 0, 0
- totaladds, totalremoves = 0, 0
- for filename, adds, removes in stats:
- totaladds += adds
- totalremoves += removes
- maxname = max(maxname, len(filename))
- maxtotal = max(maxtotal, adds+removes)
-
- countwidth = len(str(maxtotal))
- graphwidth = width - countwidth - maxname - 6
- if graphwidth < 10:
- graphwidth = 10
-
- def scale(i):
- if maxtotal <= graphwidth:
- return i
- # If diffstat runs out of room it doesn't print anything,
- # which isn't very useful, so always print at least one + or -
- # if there were at least some changes.
- return max(i * graphwidth // maxtotal, int(bool(i)))
-
- for filename, adds, removes in stats:
- pluses = '+' * scale(adds)
- minuses = '-' * scale(removes)
- output.append(' %-*s | %*.d %s%s\n' % (maxname, filename, countwidth,
- adds+removes, pluses, minuses))
-
- if stats:
- output.append(_(' %d files changed, %d insertions(+), %d deletions(-)\n')
- % (len(stats), totaladds, totalremoves))
-
- return ''.join(output)
diff --git a/sys/lib/python/mercurial/posix.py b/sys/lib/python/mercurial/posix.py
deleted file mode 100644
index 09fd7cdc5..000000000
--- a/sys/lib/python/mercurial/posix.py
+++ /dev/null
@@ -1,252 +0,0 @@
-# posix.py - Posix utility function implementations for Mercurial
-#
-# Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-from i18n import _
-import osutil
-import os, sys, errno, stat, getpass, pwd, grp, fcntl
-
-posixfile = open
-nulldev = '/dev/null'
-normpath = os.path.normpath
-samestat = os.path.samestat
-expandglobs = False
-
-umask = os.umask(0)
-os.umask(umask)
-
-def openhardlinks():
- '''return true if it is safe to hold open file handles to hardlinks'''
- return True
-
-def rcfiles(path):
- rcs = [os.path.join(path, 'hgrc')]
- rcdir = os.path.join(path, 'hgrc.d')
- try:
- rcs.extend([os.path.join(rcdir, f)
- for f, kind in osutil.listdir(rcdir)
- if f.endswith(".rc")])
- except OSError:
- pass
- return rcs
-
-def system_rcpath():
- path = []
- # old mod_python does not set sys.argv
- if len(getattr(sys, 'argv', [])) > 0:
- path.extend(rcfiles(os.path.dirname(sys.argv[0]) +
- '/../etc/mercurial'))
- path.extend(rcfiles('/etc/mercurial'))
- return path
-
-def user_rcpath():
- return [os.path.expanduser('~/.hgrc')]
-
-def parse_patch_output(output_line):
- """parses the output produced by patch and returns the filename"""
- pf = output_line[14:]
- if os.sys.platform == 'OpenVMS':
- if pf[0] == '`':
- pf = pf[1:-1] # Remove the quotes
- else:
- if pf.startswith("'") and pf.endswith("'") and " " in pf:
- pf = pf[1:-1] # Remove the quotes
- return pf
-
-def sshargs(sshcmd, host, user, port):
- '''Build argument list for ssh'''
- args = user and ("%s@%s" % (user, host)) or host
- return port and ("%s -p %s" % (args, port)) or args
-
-def is_exec(f):
- """check whether a file is executable"""
- return (os.lstat(f).st_mode & 0100 != 0)
-
-def set_flags(f, l, x):
- s = os.lstat(f).st_mode
- if l:
- if not stat.S_ISLNK(s):
- # switch file to link
- data = open(f).read()
- os.unlink(f)
- try:
- os.symlink(data, f)
- except:
- # failed to make a link, rewrite file
- open(f, "w").write(data)
- # no chmod needed at this point
- return
- if stat.S_ISLNK(s):
- # switch link to file
- data = os.readlink(f)
- os.unlink(f)
- open(f, "w").write(data)
- s = 0666 & ~umask # avoid restatting for chmod
-
- sx = s & 0100
- if x and not sx:
- # Turn on +x for every +r bit when making a file executable
- # and obey umask.
- os.chmod(f, s | (s & 0444) >> 2 & ~umask)
- elif not x and sx:
- # Turn off all +x bits
- os.chmod(f, s & 0666)
-
-def set_binary(fd):
- pass
-
-def pconvert(path):
- return path
-
-def localpath(path):
- return path
-
-if sys.platform == 'darwin':
- def realpath(path):
- '''
- Returns the true, canonical file system path equivalent to the given
- path.
-
- Equivalent means, in this case, resulting in the same, unique
- file system link to the path. Every file system entry, whether a file,
- directory, hard link or symbolic link or special, will have a single
- path preferred by the system, but may allow multiple, differing path
- lookups to point to it.
-
- Most regular UNIX file systems only allow a file system entry to be
- looked up by its distinct path. Obviously, this does not apply to case
- insensitive file systems, whether case preserving or not. The most
- complex issue to deal with is file systems transparently reencoding the
- path, such as the non-standard Unicode normalisation required for HFS+
- and HFSX.
- '''
- # Constants copied from /usr/include/sys/fcntl.h
- F_GETPATH = 50
- O_SYMLINK = 0x200000
-
- try:
- fd = os.open(path, O_SYMLINK)
- except OSError, err:
- if err.errno is errno.ENOENT:
- return path
- raise
-
- try:
- return fcntl.fcntl(fd, F_GETPATH, '\0' * 1024).rstrip('\0')
- finally:
- os.close(fd)
-else:
- # Fallback to the likely inadequate Python builtin function.
- realpath = os.path.realpath
-
-def shellquote(s):
- if os.sys.platform == 'OpenVMS':
- return '"%s"' % s
- else:
- return "'%s'" % s.replace("'", "'\\''")
-
-def quotecommand(cmd):
- return cmd
-
-def popen(command, mode='r'):
- return os.popen(command, mode)
-
-def testpid(pid):
- '''return False if pid dead, True if running or not sure'''
- if os.sys.platform == 'OpenVMS':
- return True
- try:
- os.kill(pid, 0)
- return True
- except OSError, inst:
- return inst.errno != errno.ESRCH
-
-def explain_exit(code):
- """return a 2-tuple (desc, code) describing a process's status"""
- if os.WIFEXITED(code):
- val = os.WEXITSTATUS(code)
- return _("exited with status %d") % val, val
- elif os.WIFSIGNALED(code):
- val = os.WTERMSIG(code)
- return _("killed by signal %d") % val, val
- elif os.WIFSTOPPED(code):
- val = os.WSTOPSIG(code)
- return _("stopped by signal %d") % val, val
- raise ValueError(_("invalid exit code"))
-
-def isowner(st):
- """Return True if the stat object st is from the current user."""
- return st.st_uid == os.getuid()
-
-def find_exe(command):
- '''Find executable for command searching like which does.
- If command is a basename then PATH is searched for command.
- PATH isn't searched if command is an absolute or relative path.
- If command isn't found None is returned.'''
- if sys.platform == 'OpenVMS':
- return command
-
- def findexisting(executable):
- 'Will return executable if existing file'
- if os.path.exists(executable):
- return executable
- return None
-
- if os.sep in command:
- return findexisting(command)
-
- for path in os.environ.get('PATH', '').split(os.pathsep):
- executable = findexisting(os.path.join(path, command))
- if executable is not None:
- return executable
- return None
-
-def set_signal_handler():
- pass
-
-def statfiles(files):
- 'Stat each file in files and yield stat or None if file does not exist.'
- lstat = os.lstat
- for nf in files:
- try:
- st = lstat(nf)
- except OSError, err:
- if err.errno not in (errno.ENOENT, errno.ENOTDIR):
- raise
- st = None
- yield st
-
-def getuser():
- '''return name of current user'''
- return getpass.getuser()
-
-def expand_glob(pats):
- '''On Windows, expand the implicit globs in a list of patterns'''
- return list(pats)
-
-def username(uid=None):
- """Return the name of the user with the given uid.
-
- If uid is None, return the name of the current user."""
-
- if uid is None:
- uid = os.getuid()
- try:
- return pwd.getpwuid(uid)[0]
- except KeyError:
- return str(uid)
-
-def groupname(gid=None):
- """Return the name of the group with the given gid.
-
- If gid is None, return the name of the current group."""
-
- if gid is None:
- gid = os.getgid()
- try:
- return grp.getgrgid(gid)[0]
- except KeyError:
- return str(gid)
diff --git a/sys/lib/python/mercurial/pure/base85.py b/sys/lib/python/mercurial/pure/base85.py
deleted file mode 100644
index 88cb1ce8c..000000000
--- a/sys/lib/python/mercurial/pure/base85.py
+++ /dev/null
@@ -1,74 +0,0 @@
-# base85.py: pure python base85 codec
-#
-# Copyright (C) 2009 Brendan Cully <brendan@kublai.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-import struct
-
-_b85chars = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" \
- "abcdefghijklmnopqrstuvwxyz!#$%&()*+-;<=>?@^_`{|}~"
-_b85chars2 = [(a + b) for a in _b85chars for b in _b85chars]
-_b85dec = {}
-
-def _mkb85dec():
- for i, c in enumerate(_b85chars):
- _b85dec[c] = i
-
-def b85encode(text, pad=False):
- """encode text in base85 format"""
- l = len(text)
- r = l % 4
- if r:
- text += '\0' * (4 - r)
- longs = len(text) >> 2
- words = struct.unpack('>%dL' % (longs), text)
-
- out = ''.join(_b85chars[(word // 52200625) % 85] +
- _b85chars2[(word // 7225) % 7225] +
- _b85chars2[word % 7225]
- for word in words)
-
- if pad:
- return out
-
- # Trim padding
- olen = l % 4
- if olen:
- olen += 1
- olen += l // 4 * 5
- return out[:olen]
-
-def b85decode(text):
- """decode base85-encoded text"""
- if not _b85dec:
- _mkb85dec()
-
- l = len(text)
- out = []
- for i in range(0, len(text), 5):
- chunk = text[i:i+5]
- acc = 0
- for j, c in enumerate(chunk):
- try:
- acc = acc * 85 + _b85dec[c]
- except KeyError:
- raise TypeError('Bad base85 character at byte %d' % (i + j))
- if acc > 4294967295:
- raise OverflowError('Base85 overflow in hunk starting at byte %d' % i)
- out.append(acc)
-
- # Pad final chunk if necessary
- cl = l % 5
- if cl:
- acc *= 85 ** (5 - cl)
- if cl > 1:
- acc += 0xffffff >> (cl - 2) * 8
- out[-1] = acc
-
- out = struct.pack('>%dL' % (len(out)), *out)
- if cl:
- out = out[:-(5 - cl)]
-
- return out
diff --git a/sys/lib/python/mercurial/pure/bdiff.py b/sys/lib/python/mercurial/pure/bdiff.py
deleted file mode 100644
index 70fa54478..000000000
--- a/sys/lib/python/mercurial/pure/bdiff.py
+++ /dev/null
@@ -1,76 +0,0 @@
-# bdiff.py - Python implementation of bdiff.c
-#
-# Copyright 2009 Matt Mackall <mpm@selenic.com> and others
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-import struct, difflib
-
-def splitnewlines(text):
- '''like str.splitlines, but only split on newlines.'''
- lines = [l + '\n' for l in text.split('\n')]
- if lines:
- if lines[-1] == '\n':
- lines.pop()
- else:
- lines[-1] = lines[-1][:-1]
- return lines
-
-def _normalizeblocks(a, b, blocks):
- prev = None
- for curr in blocks:
- if prev is None:
- prev = curr
- continue
- shift = 0
-
- a1, b1, l1 = prev
- a1end = a1 + l1
- b1end = b1 + l1
-
- a2, b2, l2 = curr
- a2end = a2 + l2
- b2end = b2 + l2
- if a1end == a2:
- while a1end+shift < a2end and a[a1end+shift] == b[b1end+shift]:
- shift += 1
- elif b1end == b2:
- while b1end+shift < b2end and a[a1end+shift] == b[b1end+shift]:
- shift += 1
- yield a1, b1, l1+shift
- prev = a2+shift, b2+shift, l2-shift
- yield prev
-
-def bdiff(a, b):
- a = str(a).splitlines(True)
- b = str(b).splitlines(True)
-
- if not a:
- s = "".join(b)
- return s and (struct.pack(">lll", 0, 0, len(s)) + s)
-
- bin = []
- p = [0]
- for i in a: p.append(p[-1] + len(i))
-
- d = difflib.SequenceMatcher(None, a, b).get_matching_blocks()
- d = _normalizeblocks(a, b, d)
- la = 0
- lb = 0
- for am, bm, size in d:
- s = "".join(b[lb:bm])
- if am > la or s:
- bin.append(struct.pack(">lll", p[la], p[am], len(s)) + s)
- la = am + size
- lb = bm + size
-
- return "".join(bin)
-
-def blocks(a, b):
- an = splitnewlines(a)
- bn = splitnewlines(b)
- d = difflib.SequenceMatcher(None, an, bn).get_matching_blocks()
- d = _normalizeblocks(an, bn, d)
- return [(i, i + n, j, j + n) for (i, j, n) in d]
-
diff --git a/sys/lib/python/mercurial/pure/diffhelpers.py b/sys/lib/python/mercurial/pure/diffhelpers.py
deleted file mode 100644
index e7f2e915e..000000000
--- a/sys/lib/python/mercurial/pure/diffhelpers.py
+++ /dev/null
@@ -1,56 +0,0 @@
-# diffhelpers.py - pure Python implementation of diffhelpers.c
-#
-# Copyright 2009 Matt Mackall <mpm@selenic.com> and others
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-def addlines(fp, hunk, lena, lenb, a, b):
- while True:
- todoa = lena - len(a)
- todob = lenb - len(b)
- num = max(todoa, todob)
- if num == 0:
- break
- for i in xrange(num):
- s = fp.readline()
- c = s[0]
- if s == "\\ No newline at end of file\n":
- fix_newline(hunk, a, b)
- continue
- if c == "\n":
- # Some patches may be missing the control char
- # on empty lines. Supply a leading space.
- s = " \n"
- hunk.append(s)
- if c == "+":
- b.append(s[1:])
- elif c == "-":
- a.append(s)
- else:
- b.append(s[1:])
- a.append(s)
- return 0
-
-def fix_newline(hunk, a, b):
- l = hunk[-1]
- c = l[0]
- hline = l[:-1]
-
- if c == " " or c == "+":
- b[-1] = l[1:-1]
- if c == " " or c == "-":
- a[-1] = hline
- hunk[-1] = hline
- return 0
-
-
-def testhunk(a, b, bstart):
- alen = len(a)
- blen = len(b)
- if alen > blen - bstart:
- return -1
- for i in xrange(alen):
- if a[i][1:] != b[i + bstart]:
- return -1
- return 0
diff --git a/sys/lib/python/mercurial/pure/mpatch.py b/sys/lib/python/mercurial/pure/mpatch.py
deleted file mode 100644
index dd2d84098..000000000
--- a/sys/lib/python/mercurial/pure/mpatch.py
+++ /dev/null
@@ -1,116 +0,0 @@
-# mpatch.py - Python implementation of mpatch.c
-#
-# Copyright 2009 Matt Mackall <mpm@selenic.com> and others
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-import struct
-try:
- from cStringIO import StringIO
-except ImportError:
- from StringIO import StringIO
-
-# This attempts to apply a series of patches in time proportional to
-# the total size of the patches, rather than patches * len(text). This
-# means rather than shuffling strings around, we shuffle around
-# pointers to fragments with fragment lists.
-#
-# When the fragment lists get too long, we collapse them. To do this
-# efficiently, we do all our operations inside a buffer created by
-# mmap and simply use memmove. This avoids creating a bunch of large
-# temporary string buffers.
-
-def patches(a, bins):
- if not bins: return a
-
- plens = [len(x) for x in bins]
- pl = sum(plens)
- bl = len(a) + pl
- tl = bl + bl + pl # enough for the patches and two working texts
- b1, b2 = 0, bl
-
- if not tl: return a
-
- m = StringIO()
- def move(dest, src, count):
- """move count bytes from src to dest
-
- The file pointer is left at the end of dest.
- """
- m.seek(src)
- buf = m.read(count)
- m.seek(dest)
- m.write(buf)
-
- # load our original text
- m.write(a)
- frags = [(len(a), b1)]
-
- # copy all the patches into our segment so we can memmove from them
- pos = b2 + bl
- m.seek(pos)
- for p in bins: m.write(p)
-
- def pull(dst, src, l): # pull l bytes from src
- while l:
- f = src.pop(0)
- if f[0] > l: # do we need to split?
- src.insert(0, (f[0] - l, f[1] + l))
- dst.append((l, f[1]))
- return
- dst.append(f)
- l -= f[0]
-
- def collect(buf, list):
- start = buf
- for l, p in list:
- move(buf, p, l)
- buf += l
- return (buf - start, start)
-
- for plen in plens:
- # if our list gets too long, execute it
- if len(frags) > 128:
- b2, b1 = b1, b2
- frags = [collect(b1, frags)]
-
- new = []
- end = pos + plen
- last = 0
- while pos < end:
- m.seek(pos)
- p1, p2, l = struct.unpack(">lll", m.read(12))
- pull(new, frags, p1 - last) # what didn't change
- pull([], frags, p2 - p1) # what got deleted
- new.append((l, pos + 12)) # what got added
- pos += l + 12
- last = p2
- frags = new + frags # what was left at the end
-
- t = collect(b2, frags)
-
- m.seek(t[1])
- return m.read(t[0])
-
-def patchedsize(orig, delta):
- outlen, last, bin = 0, 0, 0
- binend = len(delta)
- data = 12
-
- while data <= binend:
- decode = delta[bin:bin + 12]
- start, end, length = struct.unpack(">lll", decode)
- if start > end:
- break
- bin = data + length
- data = bin + 12
- outlen += start - last
- last = end
- outlen += length
-
- if bin != binend:
- raise Exception("patch cannot be decoded")
-
- outlen += orig - last
- return outlen
diff --git a/sys/lib/python/mercurial/pure/osutil.py b/sys/lib/python/mercurial/pure/osutil.py
deleted file mode 100644
index 86e8291f6..000000000
--- a/sys/lib/python/mercurial/pure/osutil.py
+++ /dev/null
@@ -1,52 +0,0 @@
-# osutil.py - pure Python version of osutil.c
-#
-# Copyright 2009 Matt Mackall <mpm@selenic.com> and others
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-import os
-import stat as _stat
-
-posixfile = open
-
-def _mode_to_kind(mode):
- if _stat.S_ISREG(mode): return _stat.S_IFREG
- if _stat.S_ISDIR(mode): return _stat.S_IFDIR
- if _stat.S_ISLNK(mode): return _stat.S_IFLNK
- if _stat.S_ISBLK(mode): return _stat.S_IFBLK
- if _stat.S_ISCHR(mode): return _stat.S_IFCHR
- if _stat.S_ISFIFO(mode): return _stat.S_IFIFO
- if _stat.S_ISSOCK(mode): return _stat.S_IFSOCK
- return mode
-
-def listdir(path, stat=False, skip=None):
- '''listdir(path, stat=False) -> list_of_tuples
-
- Return a sorted list containing information about the entries
- in the directory.
-
- If stat is True, each element is a 3-tuple:
-
- (name, type, stat object)
-
- Otherwise, each element is a 2-tuple:
-
- (name, type)
- '''
- result = []
- prefix = path
- if not prefix.endswith(os.sep):
- prefix += os.sep
- names = os.listdir(path)
- names.sort()
- for fn in names:
- st = os.lstat(prefix + fn)
- if fn == skip and _stat.S_ISDIR(st.st_mode):
- return []
- if stat:
- result.append((fn, _mode_to_kind(st.st_mode), st))
- else:
- result.append((fn, _mode_to_kind(st.st_mode)))
- return result
-
diff --git a/sys/lib/python/mercurial/pure/parsers.py b/sys/lib/python/mercurial/pure/parsers.py
deleted file mode 100644
index feebb6a18..000000000
--- a/sys/lib/python/mercurial/pure/parsers.py
+++ /dev/null
@@ -1,90 +0,0 @@
-# parsers.py - Python implementation of parsers.c
-#
-# Copyright 2009 Matt Mackall <mpm@selenic.com> and others
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-from mercurial.node import bin, nullid, nullrev
-from mercurial import util
-import struct, zlib
-
-_pack = struct.pack
-_unpack = struct.unpack
-_compress = zlib.compress
-_decompress = zlib.decompress
-_sha = util.sha1
-
-def parse_manifest(mfdict, fdict, lines):
- for l in lines.splitlines():
- f, n = l.split('\0')
- if len(n) > 40:
- fdict[f] = n[40:]
- mfdict[f] = bin(n[:40])
- else:
- mfdict[f] = bin(n)
-
-def parse_index(data, inline):
- def gettype(q):
- return int(q & 0xFFFF)
-
- def offset_type(offset, type):
- return long(long(offset) << 16 | type)
-
- indexformatng = ">Qiiiiii20s12x"
-
- s = struct.calcsize(indexformatng)
- index = []
- cache = None
- nodemap = {nullid: nullrev}
- n = off = 0
- # if we're not using lazymap, always read the whole index
- l = len(data) - s
- append = index.append
- if inline:
- cache = (0, data)
- while off <= l:
- e = _unpack(indexformatng, data[off:off + s])
- nodemap[e[7]] = n
- append(e)
- n += 1
- if e[1] < 0:
- break
- off += e[1] + s
- else:
- while off <= l:
- e = _unpack(indexformatng, data[off:off + s])
- nodemap[e[7]] = n
- append(e)
- n += 1
- off += s
-
- e = list(index[0])
- type = gettype(e[0])
- e[0] = offset_type(0, type)
- index[0] = tuple(e)
-
- # add the magic null revision at -1
- index.append((0, 0, 0, -1, -1, -1, -1, nullid))
-
- return index, nodemap, cache
-
-def parse_dirstate(dmap, copymap, st):
- parents = [st[:20], st[20: 40]]
- # deref fields so they will be local in loop
- format = ">cllll"
- e_size = struct.calcsize(format)
- pos1 = 40
- l = len(st)
-
- # the inner loop
- while pos1 < l:
- pos2 = pos1 + e_size
- e = _unpack(">cllll", st[pos1:pos2]) # a literal here is faster
- pos1 = pos2 + e[4]
- f = st[pos2:pos1]
- if '\0' in f:
- f, c = f.split('\0')
- copymap[f] = c
- dmap[f] = e[:4]
- return parents
diff --git a/sys/lib/python/mercurial/repair.py b/sys/lib/python/mercurial/repair.py
deleted file mode 100644
index f4d8d2641..000000000
--- a/sys/lib/python/mercurial/repair.py
+++ /dev/null
@@ -1,145 +0,0 @@
-# repair.py - functions for repository repair for mercurial
-#
-# Copyright 2005, 2006 Chris Mason <mason@suse.com>
-# Copyright 2007 Matt Mackall
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-import changegroup
-from node import nullrev, short
-from i18n import _
-import os
-
-def _bundle(repo, bases, heads, node, suffix, extranodes=None):
- """create a bundle with the specified revisions as a backup"""
- cg = repo.changegroupsubset(bases, heads, 'strip', extranodes)
- backupdir = repo.join("strip-backup")
- if not os.path.isdir(backupdir):
- os.mkdir(backupdir)
- name = os.path.join(backupdir, "%s-%s" % (short(node), suffix))
- repo.ui.warn(_("saving bundle to %s\n") % name)
- return changegroup.writebundle(cg, name, "HG10BZ")
-
-def _collectfiles(repo, striprev):
- """find out the filelogs affected by the strip"""
- files = set()
-
- for x in xrange(striprev, len(repo)):
- files.update(repo[x].files())
-
- return sorted(files)
-
-def _collectextranodes(repo, files, link):
- """return the nodes that have to be saved before the strip"""
- def collectone(revlog):
- extra = []
- startrev = count = len(revlog)
- # find the truncation point of the revlog
- for i in xrange(count):
- lrev = revlog.linkrev(i)
- if lrev >= link:
- startrev = i + 1
- break
-
- # see if any revision after that point has a linkrev less than link
- # (we have to manually save these guys)
- for i in xrange(startrev, count):
- node = revlog.node(i)
- lrev = revlog.linkrev(i)
- if lrev < link:
- extra.append((node, cl.node(lrev)))
-
- return extra
-
- extranodes = {}
- cl = repo.changelog
- extra = collectone(repo.manifest)
- if extra:
- extranodes[1] = extra
- for fname in files:
- f = repo.file(fname)
- extra = collectone(f)
- if extra:
- extranodes[fname] = extra
-
- return extranodes
-
-def strip(ui, repo, node, backup="all"):
- cl = repo.changelog
- # TODO delete the undo files, and handle undo of merge sets
- striprev = cl.rev(node)
-
- # Some revisions with rev > striprev may not be descendants of striprev.
- # We have to find these revisions and put them in a bundle, so that
- # we can restore them after the truncations.
- # To create the bundle we use repo.changegroupsubset which requires
- # the list of heads and bases of the set of interesting revisions.
- # (head = revision in the set that has no descendant in the set;
- # base = revision in the set that has no ancestor in the set)
- tostrip = set((striprev,))
- saveheads = set()
- savebases = []
- for r in xrange(striprev + 1, len(cl)):
- parents = cl.parentrevs(r)
- if parents[0] in tostrip or parents[1] in tostrip:
- # r is a descendant of striprev
- tostrip.add(r)
- # if this is a merge and one of the parents does not descend
- # from striprev, mark that parent as a savehead.
- if parents[1] != nullrev:
- for p in parents:
- if p not in tostrip and p > striprev:
- saveheads.add(p)
- else:
- # if no parents of this revision will be stripped, mark it as
- # a savebase
- if parents[0] < striprev and parents[1] < striprev:
- savebases.append(cl.node(r))
-
- saveheads.difference_update(parents)
- saveheads.add(r)
-
- saveheads = [cl.node(r) for r in saveheads]
- files = _collectfiles(repo, striprev)
-
- extranodes = _collectextranodes(repo, files, striprev)
-
- # create a changegroup for all the branches we need to keep
- if backup == "all":
- _bundle(repo, [node], cl.heads(), node, 'backup')
- if saveheads or extranodes:
- chgrpfile = _bundle(repo, savebases, saveheads, node, 'temp',
- extranodes)
-
- mfst = repo.manifest
-
- tr = repo.transaction()
- offset = len(tr.entries)
-
- tr.startgroup()
- cl.strip(striprev, tr)
- mfst.strip(striprev, tr)
- for fn in files:
- repo.file(fn).strip(striprev, tr)
- tr.endgroup()
-
- try:
- for i in xrange(offset, len(tr.entries)):
- file, troffset, ignore = tr.entries[i]
- repo.sopener(file, 'a').truncate(troffset)
- tr.close()
- except:
- tr.abort()
- raise
-
- if saveheads or extranodes:
- ui.status(_("adding branch\n"))
- f = open(chgrpfile, "rb")
- gen = changegroup.readbundle(f, chgrpfile)
- repo.addchangegroup(gen, 'strip', 'bundle:' + chgrpfile, True)
- f.close()
- if backup != "strip":
- os.unlink(chgrpfile)
-
- repo.destroyed()
diff --git a/sys/lib/python/mercurial/repo.py b/sys/lib/python/mercurial/repo.py
deleted file mode 100644
index 00cc1cf84..000000000
--- a/sys/lib/python/mercurial/repo.py
+++ /dev/null
@@ -1,43 +0,0 @@
-# repo.py - repository base classes for mercurial
-#
-# Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
-# Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-from i18n import _
-import error
-
-class repository(object):
- def capable(self, name):
- '''tell whether repo supports named capability.
- return False if not supported.
- if boolean capability, return True.
- if string capability, return string.'''
- if name in self.capabilities:
- return True
- name_eq = name + '='
- for cap in self.capabilities:
- if cap.startswith(name_eq):
- return cap[len(name_eq):]
- return False
-
- def requirecap(self, name, purpose):
- '''raise an exception if the given capability is not present'''
- if not self.capable(name):
- raise error.CapabilityError(
- _('cannot %s; remote repository does not '
- 'support the %r capability') % (purpose, name))
-
- def local(self):
- return False
-
- def cancopy(self):
- return self.local()
-
- def rjoin(self, path):
- url = self.url()
- if url.endswith('/'):
- return url + path
- return url + '/' + path
diff --git a/sys/lib/python/mercurial/revlog.py b/sys/lib/python/mercurial/revlog.py
deleted file mode 100644
index 2b2d4acb9..000000000
--- a/sys/lib/python/mercurial/revlog.py
+++ /dev/null
@@ -1,1376 +0,0 @@
-# revlog.py - storage back-end for mercurial
-#
-# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-"""Storage back-end for Mercurial.
-
-This provides efficient delta storage with O(1) retrieve and append
-and O(changes) merge between branches.
-"""
-
-# import stuff from node for others to import from revlog
-from node import bin, hex, nullid, nullrev, short #@UnusedImport
-from i18n import _
-import changegroup, ancestor, mdiff, parsers, error, util
-import struct, zlib, errno
-
-_pack = struct.pack
-_unpack = struct.unpack
-_compress = zlib.compress
-_decompress = zlib.decompress
-_sha = util.sha1
-
-# revlog flags
-REVLOGV0 = 0
-REVLOGNG = 1
-REVLOGNGINLINEDATA = (1 << 16)
-REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
-REVLOG_DEFAULT_FORMAT = REVLOGNG
-REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
-
-_prereadsize = 1048576
-
-RevlogError = error.RevlogError
-LookupError = error.LookupError
-
-def getoffset(q):
- return int(q >> 16)
-
-def gettype(q):
- return int(q & 0xFFFF)
-
-def offset_type(offset, type):
- return long(long(offset) << 16 | type)
-
-nullhash = _sha(nullid)
-
-def hash(text, p1, p2):
- """generate a hash from the given text and its parent hashes
-
- This hash combines both the current file contents and its history
- in a manner that makes it easy to distinguish nodes with the same
- content in the revision graph.
- """
- # As of now, if one of the parent node is null, p2 is null
- if p2 == nullid:
- # deep copy of a hash is faster than creating one
- s = nullhash.copy()
- s.update(p1)
- else:
- # none of the parent nodes are nullid
- l = [p1, p2]
- l.sort()
- s = _sha(l[0])
- s.update(l[1])
- s.update(text)
- return s.digest()
-
-def compress(text):
- """ generate a possibly-compressed representation of text """
- if not text:
- return ("", text)
- l = len(text)
- bin = None
- if l < 44:
- pass
- elif l > 1000000:
- # zlib makes an internal copy, thus doubling memory usage for
- # large files, so lets do this in pieces
- z = zlib.compressobj()
- p = []
- pos = 0
- while pos < l:
- pos2 = pos + 2**20
- p.append(z.compress(text[pos:pos2]))
- pos = pos2
- p.append(z.flush())
- if sum(map(len, p)) < l:
- bin = "".join(p)
- else:
- bin = _compress(text)
- if bin is None or len(bin) > l:
- if text[0] == '\0':
- return ("", text)
- return ('u', text)
- return ("", bin)
-
-def decompress(bin):
- """ decompress the given input """
- if not bin:
- return bin
- t = bin[0]
- if t == '\0':
- return bin
- if t == 'x':
- return _decompress(bin)
- if t == 'u':
- return bin[1:]
- raise RevlogError(_("unknown compression type %r") % t)
-
-class lazyparser(object):
- """
- this class avoids the need to parse the entirety of large indices
- """
-
- # lazyparser is not safe to use on windows if win32 extensions not
- # available. it keeps file handle open, which make it not possible
- # to break hardlinks on local cloned repos.
-
- def __init__(self, dataf):
- try:
- size = util.fstat(dataf).st_size
- except AttributeError:
- size = 0
- self.dataf = dataf
- self.s = struct.calcsize(indexformatng)
- self.datasize = size
- self.l = size/self.s
- self.index = [None] * self.l
- self.map = {nullid: nullrev}
- self.allmap = 0
- self.all = 0
- self.mapfind_count = 0
-
- def loadmap(self):
- """
- during a commit, we need to make sure the rev being added is
- not a duplicate. This requires loading the entire index,
- which is fairly slow. loadmap can load up just the node map,
- which takes much less time.
- """
- if self.allmap:
- return
- end = self.datasize
- self.allmap = 1
- cur = 0
- count = 0
- blocksize = self.s * 256
- self.dataf.seek(0)
- while cur < end:
- data = self.dataf.read(blocksize)
- off = 0
- for x in xrange(256):
- n = data[off + ngshaoffset:off + ngshaoffset + 20]
- self.map[n] = count
- count += 1
- if count >= self.l:
- break
- off += self.s
- cur += blocksize
-
- def loadblock(self, blockstart, blocksize, data=None):
- if self.all:
- return
- if data is None:
- self.dataf.seek(blockstart)
- if blockstart + blocksize > self.datasize:
- # the revlog may have grown since we've started running,
- # but we don't have space in self.index for more entries.
- # limit blocksize so that we don't get too much data.
- blocksize = max(self.datasize - blockstart, 0)
- data = self.dataf.read(blocksize)
- lend = len(data) / self.s
- i = blockstart / self.s
- off = 0
- # lazyindex supports __delitem__
- if lend > len(self.index) - i:
- lend = len(self.index) - i
- for x in xrange(lend):
- if self.index[i + x] is None:
- b = data[off : off + self.s]
- self.index[i + x] = b
- n = b[ngshaoffset:ngshaoffset + 20]
- self.map[n] = i + x
- off += self.s
-
- def findnode(self, node):
- """search backwards through the index file for a specific node"""
- if self.allmap:
- return None
-
- # hg log will cause many many searches for the manifest
- # nodes. After we get called a few times, just load the whole
- # thing.
- if self.mapfind_count > 8:
- self.loadmap()
- if node in self.map:
- return node
- return None
- self.mapfind_count += 1
- last = self.l - 1
- while self.index[last] != None:
- if last == 0:
- self.all = 1
- self.allmap = 1
- return None
- last -= 1
- end = (last + 1) * self.s
- blocksize = self.s * 256
- while end >= 0:
- start = max(end - blocksize, 0)
- self.dataf.seek(start)
- data = self.dataf.read(end - start)
- findend = end - start
- while True:
- # we're searching backwards, so we have to make sure
- # we don't find a changeset where this node is a parent
- off = data.find(node, 0, findend)
- findend = off
- if off >= 0:
- i = off / self.s
- off = i * self.s
- n = data[off + ngshaoffset:off + ngshaoffset + 20]
- if n == node:
- self.map[n] = i + start / self.s
- return node
- else:
- break
- end -= blocksize
- return None
-
- def loadindex(self, i=None, end=None):
- if self.all:
- return
- all = False
- if i is None:
- blockstart = 0
- blocksize = (65536 / self.s) * self.s
- end = self.datasize
- all = True
- else:
- if end:
- blockstart = i * self.s
- end = end * self.s
- blocksize = end - blockstart
- else:
- blockstart = (i & ~1023) * self.s
- blocksize = self.s * 1024
- end = blockstart + blocksize
- while blockstart < end:
- self.loadblock(blockstart, blocksize)
- blockstart += blocksize
- if all:
- self.all = True
-
-class lazyindex(object):
- """a lazy version of the index array"""
- def __init__(self, parser):
- self.p = parser
- def __len__(self):
- return len(self.p.index)
- def load(self, pos):
- if pos < 0:
- pos += len(self.p.index)
- self.p.loadindex(pos)
- return self.p.index[pos]
- def __getitem__(self, pos):
- return _unpack(indexformatng, self.p.index[pos] or self.load(pos))
- def __setitem__(self, pos, item):
- self.p.index[pos] = _pack(indexformatng, *item)
- def __delitem__(self, pos):
- del self.p.index[pos]
- def insert(self, pos, e):
- self.p.index.insert(pos, _pack(indexformatng, *e))
- def append(self, e):
- self.p.index.append(_pack(indexformatng, *e))
-
-class lazymap(object):
- """a lazy version of the node map"""
- def __init__(self, parser):
- self.p = parser
- def load(self, key):
- n = self.p.findnode(key)
- if n is None:
- raise KeyError(key)
- def __contains__(self, key):
- if key in self.p.map:
- return True
- self.p.loadmap()
- return key in self.p.map
- def __iter__(self):
- yield nullid
- for i in xrange(self.p.l):
- ret = self.p.index[i]
- if not ret:
- self.p.loadindex(i)
- ret = self.p.index[i]
- if isinstance(ret, str):
- ret = _unpack(indexformatng, ret)
- yield ret[7]
- def __getitem__(self, key):
- try:
- return self.p.map[key]
- except KeyError:
- try:
- self.load(key)
- return self.p.map[key]
- except KeyError:
- raise KeyError("node " + hex(key))
- def __setitem__(self, key, val):
- self.p.map[key] = val
- def __delitem__(self, key):
- del self.p.map[key]
-
-indexformatv0 = ">4l20s20s20s"
-v0shaoffset = 56
-
-class revlogoldio(object):
- def __init__(self):
- self.size = struct.calcsize(indexformatv0)
-
- def parseindex(self, fp, data, inline):
- s = self.size
- index = []
- nodemap = {nullid: nullrev}
- n = off = 0
- if len(data) == _prereadsize:
- data += fp.read() # read the rest
- l = len(data)
- while off + s <= l:
- cur = data[off:off + s]
- off += s
- e = _unpack(indexformatv0, cur)
- # transform to revlogv1 format
- e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
- nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
- index.append(e2)
- nodemap[e[6]] = n
- n += 1
-
- return index, nodemap, None
-
- def packentry(self, entry, node, version, rev):
- e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
- node(entry[5]), node(entry[6]), entry[7])
- return _pack(indexformatv0, *e2)
-
-# index ng:
-# 6 bytes offset
-# 2 bytes flags
-# 4 bytes compressed length
-# 4 bytes uncompressed length
-# 4 bytes: base rev
-# 4 bytes link rev
-# 4 bytes parent 1 rev
-# 4 bytes parent 2 rev
-# 32 bytes: nodeid
-indexformatng = ">Qiiiiii20s12x"
-ngshaoffset = 32
-versionformat = ">I"
-
-class revlogio(object):
- def __init__(self):
- self.size = struct.calcsize(indexformatng)
-
- def parseindex(self, fp, data, inline):
- if len(data) == _prereadsize:
- if util.openhardlinks() and not inline:
- # big index, let's parse it on demand
- parser = lazyparser(fp)
- index = lazyindex(parser)
- nodemap = lazymap(parser)
- e = list(index[0])
- type = gettype(e[0])
- e[0] = offset_type(0, type)
- index[0] = e
- return index, nodemap, None
- else:
- data += fp.read()
-
- # call the C implementation to parse the index data
- index, nodemap, cache = parsers.parse_index(data, inline)
- return index, nodemap, cache
-
- def packentry(self, entry, node, version, rev):
- p = _pack(indexformatng, *entry)
- if rev == 0:
- p = _pack(versionformat, version) + p[4:]
- return p
-
-class revlog(object):
- """
- the underlying revision storage object
-
- A revlog consists of two parts, an index and the revision data.
-
- The index is a file with a fixed record size containing
- information on each revision, including its nodeid (hash), the
- nodeids of its parents, the position and offset of its data within
- the data file, and the revision it's based on. Finally, each entry
- contains a linkrev entry that can serve as a pointer to external
- data.
-
- The revision data itself is a linear collection of data chunks.
- Each chunk represents a revision and is usually represented as a
- delta against the previous chunk. To bound lookup time, runs of
- deltas are limited to about 2 times the length of the original
- version data. This makes retrieval of a version proportional to
- its size, or O(1) relative to the number of revisions.
-
- Both pieces of the revlog are written to in an append-only
- fashion, which means we never need to rewrite a file to insert or
- remove data, and can use some simple techniques to avoid the need
- for locking while reading.
- """
- def __init__(self, opener, indexfile):
- """
- create a revlog object
-
- opener is a function that abstracts the file opening operation
- and can be used to implement COW semantics or the like.
- """
- self.indexfile = indexfile
- self.datafile = indexfile[:-2] + ".d"
- self.opener = opener
- self._cache = None
- self._chunkcache = (0, '')
- self.nodemap = {nullid: nullrev}
- self.index = []
-
- v = REVLOG_DEFAULT_VERSION
- if hasattr(opener, "defversion"):
- v = opener.defversion
- if v & REVLOGNG:
- v |= REVLOGNGINLINEDATA
-
- i = ''
- try:
- f = self.opener(self.indexfile)
- i = f.read(_prereadsize)
- if len(i) > 0:
- v = struct.unpack(versionformat, i[:4])[0]
- except IOError, inst:
- if inst.errno != errno.ENOENT:
- raise
-
- self.version = v
- self._inline = v & REVLOGNGINLINEDATA
- flags = v & ~0xFFFF
- fmt = v & 0xFFFF
- if fmt == REVLOGV0 and flags:
- raise RevlogError(_("index %s unknown flags %#04x for format v0")
- % (self.indexfile, flags >> 16))
- elif fmt == REVLOGNG and flags & ~REVLOGNGINLINEDATA:
- raise RevlogError(_("index %s unknown flags %#04x for revlogng")
- % (self.indexfile, flags >> 16))
- elif fmt > REVLOGNG:
- raise RevlogError(_("index %s unknown format %d")
- % (self.indexfile, fmt))
-
- self._io = revlogio()
- if self.version == REVLOGV0:
- self._io = revlogoldio()
- if i:
- try:
- d = self._io.parseindex(f, i, self._inline)
- except (ValueError, IndexError), e:
- raise RevlogError(_("index %s is corrupted") % (self.indexfile))
- self.index, self.nodemap, self._chunkcache = d
- if not self._chunkcache:
- self._chunkclear()
-
- # add the magic null revision at -1 (if it hasn't been done already)
- if (self.index == [] or isinstance(self.index, lazyindex) or
- self.index[-1][7] != nullid) :
- self.index.append((0, 0, 0, -1, -1, -1, -1, nullid))
-
- def _loadindex(self, start, end):
- """load a block of indexes all at once from the lazy parser"""
- if isinstance(self.index, lazyindex):
- self.index.p.loadindex(start, end)
-
- def _loadindexmap(self):
- """loads both the map and the index from the lazy parser"""
- if isinstance(self.index, lazyindex):
- p = self.index.p
- p.loadindex()
- self.nodemap = p.map
-
- def _loadmap(self):
- """loads the map from the lazy parser"""
- if isinstance(self.nodemap, lazymap):
- self.nodemap.p.loadmap()
- self.nodemap = self.nodemap.p.map
-
- def tip(self):
- return self.node(len(self.index) - 2)
- def __len__(self):
- return len(self.index) - 1
- def __iter__(self):
- for i in xrange(len(self)):
- yield i
- def rev(self, node):
- try:
- return self.nodemap[node]
- except KeyError:
- raise LookupError(node, self.indexfile, _('no node'))
- def node(self, rev):
- return self.index[rev][7]
- def linkrev(self, rev):
- return self.index[rev][4]
- def parents(self, node):
- i = self.index
- d = i[self.rev(node)]
- return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
- def parentrevs(self, rev):
- return self.index[rev][5:7]
- def start(self, rev):
- return int(self.index[rev][0] >> 16)
- def end(self, rev):
- return self.start(rev) + self.length(rev)
- def length(self, rev):
- return self.index[rev][1]
- def base(self, rev):
- return self.index[rev][3]
-
- def size(self, rev):
- """return the length of the uncompressed text for a given revision"""
- l = self.index[rev][2]
- if l >= 0:
- return l
-
- t = self.revision(self.node(rev))
- return len(t)
-
- # Alternate implementation. The advantage to this code is it
- # will be faster for a single revision. However, the results
- # are not cached, so finding the size of every revision will
- # be slower.
- #
- # if self.cache and self.cache[1] == rev:
- # return len(self.cache[2])
- #
- # base = self.base(rev)
- # if self.cache and self.cache[1] >= base and self.cache[1] < rev:
- # base = self.cache[1]
- # text = self.cache[2]
- # else:
- # text = self.revision(self.node(base))
- #
- # l = len(text)
- # for x in xrange(base + 1, rev + 1):
- # l = mdiff.patchedsize(l, self._chunk(x))
- # return l
-
- def reachable(self, node, stop=None):
- """return the set of all nodes ancestral to a given node, including
- the node itself, stopping when stop is matched"""
- reachable = set((node,))
- visit = [node]
- if stop:
- stopn = self.rev(stop)
- else:
- stopn = 0
- while visit:
- n = visit.pop(0)
- if n == stop:
- continue
- if n == nullid:
- continue
- for p in self.parents(n):
- if self.rev(p) < stopn:
- continue
- if p not in reachable:
- reachable.add(p)
- visit.append(p)
- return reachable
-
- def ancestors(self, *revs):
- 'Generate the ancestors of revs using a breadth-first visit'
- visit = list(revs)
- seen = set([nullrev])
- while visit:
- for parent in self.parentrevs(visit.pop(0)):
- if parent not in seen:
- visit.append(parent)
- seen.add(parent)
- yield parent
-
- def descendants(self, *revs):
- 'Generate the descendants of revs in topological order'
- seen = set(revs)
- for i in xrange(min(revs) + 1, len(self)):
- for x in self.parentrevs(i):
- if x != nullrev and x in seen:
- seen.add(i)
- yield i
- break
-
- def findmissing(self, common=None, heads=None):
- '''
- returns the topologically sorted list of nodes from the set:
- missing = (ancestors(heads) \ ancestors(common))
-
- where ancestors() is the set of ancestors from heads, heads included
-
- if heads is None, the heads of the revlog are used
- if common is None, nullid is assumed to be a common node
- '''
- if common is None:
- common = [nullid]
- if heads is None:
- heads = self.heads()
-
- common = [self.rev(n) for n in common]
- heads = [self.rev(n) for n in heads]
-
- # we want the ancestors, but inclusive
- has = set(self.ancestors(*common))
- has.add(nullrev)
- has.update(common)
-
- # take all ancestors from heads that aren't in has
- missing = set()
- visit = [r for r in heads if r not in has]
- while visit:
- r = visit.pop(0)
- if r in missing:
- continue
- else:
- missing.add(r)
- for p in self.parentrevs(r):
- if p not in has:
- visit.append(p)
- missing = list(missing)
- missing.sort()
- return [self.node(r) for r in missing]
-
- def nodesbetween(self, roots=None, heads=None):
- """Return a tuple containing three elements. Elements 1 and 2 contain
- a final list bases and heads after all the unreachable ones have been
- pruned. Element 0 contains a topologically sorted list of all
-
- nodes that satisfy these constraints:
- 1. All nodes must be descended from a node in roots (the nodes on
- roots are considered descended from themselves).
- 2. All nodes must also be ancestors of a node in heads (the nodes in
- heads are considered to be their own ancestors).
-
- If roots is unspecified, nullid is assumed as the only root.
- If heads is unspecified, it is taken to be the output of the
- heads method (i.e. a list of all nodes in the repository that
- have no children)."""
- nonodes = ([], [], [])
- if roots is not None:
- roots = list(roots)
- if not roots:
- return nonodes
- lowestrev = min([self.rev(n) for n in roots])
- else:
- roots = [nullid] # Everybody's a descendent of nullid
- lowestrev = nullrev
- if (lowestrev == nullrev) and (heads is None):
- # We want _all_ the nodes!
- return ([self.node(r) for r in self], [nullid], list(self.heads()))
- if heads is None:
- # All nodes are ancestors, so the latest ancestor is the last
- # node.
- highestrev = len(self) - 1
- # Set ancestors to None to signal that every node is an ancestor.
- ancestors = None
- # Set heads to an empty dictionary for later discovery of heads
- heads = {}
- else:
- heads = list(heads)
- if not heads:
- return nonodes
- ancestors = set()
- # Turn heads into a dictionary so we can remove 'fake' heads.
- # Also, later we will be using it to filter out the heads we can't
- # find from roots.
- heads = dict.fromkeys(heads, 0)
- # Start at the top and keep marking parents until we're done.
- nodestotag = set(heads)
- # Remember where the top was so we can use it as a limit later.
- highestrev = max([self.rev(n) for n in nodestotag])
- while nodestotag:
- # grab a node to tag
- n = nodestotag.pop()
- # Never tag nullid
- if n == nullid:
- continue
- # A node's revision number represents its place in a
- # topologically sorted list of nodes.
- r = self.rev(n)
- if r >= lowestrev:
- if n not in ancestors:
- # If we are possibly a descendent of one of the roots
- # and we haven't already been marked as an ancestor
- ancestors.add(n) # Mark as ancestor
- # Add non-nullid parents to list of nodes to tag.
- nodestotag.update([p for p in self.parents(n) if
- p != nullid])
- elif n in heads: # We've seen it before, is it a fake head?
- # So it is, real heads should not be the ancestors of
- # any other heads.
- heads.pop(n)
- if not ancestors:
- return nonodes
- # Now that we have our set of ancestors, we want to remove any
- # roots that are not ancestors.
-
- # If one of the roots was nullid, everything is included anyway.
- if lowestrev > nullrev:
- # But, since we weren't, let's recompute the lowest rev to not
- # include roots that aren't ancestors.
-
- # Filter out roots that aren't ancestors of heads
- roots = [n for n in roots if n in ancestors]
- # Recompute the lowest revision
- if roots:
- lowestrev = min([self.rev(n) for n in roots])
- else:
- # No more roots? Return empty list
- return nonodes
- else:
- # We are descending from nullid, and don't need to care about
- # any other roots.
- lowestrev = nullrev
- roots = [nullid]
- # Transform our roots list into a set.
- descendents = set(roots)
- # Also, keep the original roots so we can filter out roots that aren't
- # 'real' roots (i.e. are descended from other roots).
- roots = descendents.copy()
- # Our topologically sorted list of output nodes.
- orderedout = []
- # Don't start at nullid since we don't want nullid in our output list,
- # and if nullid shows up in descedents, empty parents will look like
- # they're descendents.
- for r in xrange(max(lowestrev, 0), highestrev + 1):
- n = self.node(r)
- isdescendent = False
- if lowestrev == nullrev: # Everybody is a descendent of nullid
- isdescendent = True
- elif n in descendents:
- # n is already a descendent
- isdescendent = True
- # This check only needs to be done here because all the roots
- # will start being marked is descendents before the loop.
- if n in roots:
- # If n was a root, check if it's a 'real' root.
- p = tuple(self.parents(n))
- # If any of its parents are descendents, it's not a root.
- if (p[0] in descendents) or (p[1] in descendents):
- roots.remove(n)
- else:
- p = tuple(self.parents(n))
- # A node is a descendent if either of its parents are
- # descendents. (We seeded the dependents list with the roots
- # up there, remember?)
- if (p[0] in descendents) or (p[1] in descendents):
- descendents.add(n)
- isdescendent = True
- if isdescendent and ((ancestors is None) or (n in ancestors)):
- # Only include nodes that are both descendents and ancestors.
- orderedout.append(n)
- if (ancestors is not None) and (n in heads):
- # We're trying to figure out which heads are reachable
- # from roots.
- # Mark this head as having been reached
- heads[n] = 1
- elif ancestors is None:
- # Otherwise, we're trying to discover the heads.
- # Assume this is a head because if it isn't, the next step
- # will eventually remove it.
- heads[n] = 1
- # But, obviously its parents aren't.
- for p in self.parents(n):
- heads.pop(p, None)
- heads = [n for n in heads.iterkeys() if heads[n] != 0]
- roots = list(roots)
- assert orderedout
- assert roots
- assert heads
- return (orderedout, roots, heads)
-
- def heads(self, start=None, stop=None):
- """return the list of all nodes that have no children
-
- if start is specified, only heads that are descendants of
- start will be returned
- if stop is specified, it will consider all the revs from stop
- as if they had no children
- """
- if start is None and stop is None:
- count = len(self)
- if not count:
- return [nullid]
- ishead = [1] * (count + 1)
- index = self.index
- for r in xrange(count):
- e = index[r]
- ishead[e[5]] = ishead[e[6]] = 0
- return [self.node(r) for r in xrange(count) if ishead[r]]
-
- if start is None:
- start = nullid
- if stop is None:
- stop = []
- stoprevs = set([self.rev(n) for n in stop])
- startrev = self.rev(start)
- reachable = set((startrev,))
- heads = set((startrev,))
-
- parentrevs = self.parentrevs
- for r in xrange(startrev + 1, len(self)):
- for p in parentrevs(r):
- if p in reachable:
- if r not in stoprevs:
- reachable.add(r)
- heads.add(r)
- if p in heads and p not in stoprevs:
- heads.remove(p)
-
- return [self.node(r) for r in heads]
-
- def children(self, node):
- """find the children of a given node"""
- c = []
- p = self.rev(node)
- for r in range(p + 1, len(self)):
- prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
- if prevs:
- for pr in prevs:
- if pr == p:
- c.append(self.node(r))
- elif p == nullrev:
- c.append(self.node(r))
- return c
-
- def _match(self, id):
- if isinstance(id, (long, int)):
- # rev
- return self.node(id)
- if len(id) == 20:
- # possibly a binary node
- # odds of a binary node being all hex in ASCII are 1 in 10**25
- try:
- node = id
- self.rev(node) # quick search the index
- return node
- except LookupError:
- pass # may be partial hex id
- try:
- # str(rev)
- rev = int(id)
- if str(rev) != id:
- raise ValueError
- if rev < 0:
- rev = len(self) + rev
- if rev < 0 or rev >= len(self):
- raise ValueError
- return self.node(rev)
- except (ValueError, OverflowError):
- pass
- if len(id) == 40:
- try:
- # a full hex nodeid?
- node = bin(id)
- self.rev(node)
- return node
- except (TypeError, LookupError):
- pass
-
- def _partialmatch(self, id):
- if len(id) < 40:
- try:
- # hex(node)[:...]
- l = len(id) // 2 # grab an even number of digits
- bin_id = bin(id[:l*2])
- nl = [n for n in self.nodemap if n[:l] == bin_id]
- nl = [n for n in nl if hex(n).startswith(id)]
- if len(nl) > 0:
- if len(nl) == 1:
- return nl[0]
- raise LookupError(id, self.indexfile,
- _('ambiguous identifier'))
- return None
- except TypeError:
- pass
-
- def lookup(self, id):
- """locate a node based on:
- - revision number or str(revision number)
- - nodeid or subset of hex nodeid
- """
- n = self._match(id)
- if n is not None:
- return n
- n = self._partialmatch(id)
- if n:
- return n
-
- raise LookupError(id, self.indexfile, _('no match found'))
-
- def cmp(self, node, text):
- """compare text with a given file revision"""
- p1, p2 = self.parents(node)
- return hash(text, p1, p2) != node
-
- def _addchunk(self, offset, data):
- o, d = self._chunkcache
- # try to add to existing cache
- if o + len(d) == offset and len(d) + len(data) < _prereadsize:
- self._chunkcache = o, d + data
- else:
- self._chunkcache = offset, data
-
- def _loadchunk(self, offset, length):
- if self._inline:
- df = self.opener(self.indexfile)
- else:
- df = self.opener(self.datafile)
-
- readahead = max(65536, length)
- df.seek(offset)
- d = df.read(readahead)
- self._addchunk(offset, d)
- if readahead > length:
- return d[:length]
- return d
-
- def _getchunk(self, offset, length):
- o, d = self._chunkcache
- l = len(d)
-
- # is it in the cache?
- cachestart = offset - o
- cacheend = cachestart + length
- if cachestart >= 0 and cacheend <= l:
- if cachestart == 0 and cacheend == l:
- return d # avoid a copy
- return d[cachestart:cacheend]
-
- return self._loadchunk(offset, length)
-
- def _chunkraw(self, startrev, endrev):
- start = self.start(startrev)
- length = self.end(endrev) - start
- if self._inline:
- start += (startrev + 1) * self._io.size
- return self._getchunk(start, length)
-
- def _chunk(self, rev):
- return decompress(self._chunkraw(rev, rev))
-
- def _chunkclear(self):
- self._chunkcache = (0, '')
-
- def revdiff(self, rev1, rev2):
- """return or calculate a delta between two revisions"""
- if rev1 + 1 == rev2 and self.base(rev1) == self.base(rev2):
- return self._chunk(rev2)
-
- return mdiff.textdiff(self.revision(self.node(rev1)),
- self.revision(self.node(rev2)))
-
- def revision(self, node):
- """return an uncompressed revision of a given node"""
- if node == nullid:
- return ""
- if self._cache and self._cache[0] == node:
- return str(self._cache[2])
-
- # look up what we need to read
- text = None
- rev = self.rev(node)
- base = self.base(rev)
-
- # check rev flags
- if self.index[rev][0] & 0xFFFF:
- raise RevlogError(_('incompatible revision flag %x') %
- (self.index[rev][0] & 0xFFFF))
-
- # do we have useful data cached?
- if self._cache and self._cache[1] >= base and self._cache[1] < rev:
- base = self._cache[1]
- text = str(self._cache[2])
-
- self._loadindex(base, rev + 1)
- self._chunkraw(base, rev)
- if text is None:
- text = self._chunk(base)
-
- bins = [self._chunk(r) for r in xrange(base + 1, rev + 1)]
- text = mdiff.patches(text, bins)
- p1, p2 = self.parents(node)
- if node != hash(text, p1, p2):
- raise RevlogError(_("integrity check failed on %s:%d")
- % (self.indexfile, rev))
-
- self._cache = (node, rev, text)
- return text
-
- def checkinlinesize(self, tr, fp=None):
- if not self._inline or (self.start(-2) + self.length(-2)) < 131072:
- return
-
- trinfo = tr.find(self.indexfile)
- if trinfo is None:
- raise RevlogError(_("%s not found in the transaction")
- % self.indexfile)
-
- trindex = trinfo[2]
- dataoff = self.start(trindex)
-
- tr.add(self.datafile, dataoff)
-
- if fp:
- fp.flush()
- fp.close()
-
- df = self.opener(self.datafile, 'w')
- try:
- for r in self:
- df.write(self._chunkraw(r, r))
- finally:
- df.close()
-
- fp = self.opener(self.indexfile, 'w', atomictemp=True)
- self.version &= ~(REVLOGNGINLINEDATA)
- self._inline = False
- for i in self:
- e = self._io.packentry(self.index[i], self.node, self.version, i)
- fp.write(e)
-
- # if we don't call rename, the temp file will never replace the
- # real index
- fp.rename()
-
- tr.replace(self.indexfile, trindex * self._io.size)
- self._chunkclear()
-
- def addrevision(self, text, transaction, link, p1, p2, d=None):
- """add a revision to the log
-
- text - the revision data to add
- transaction - the transaction object used for rollback
- link - the linkrev data to add
- p1, p2 - the parent nodeids of the revision
- d - an optional precomputed delta
- """
- dfh = None
- if not self._inline:
- dfh = self.opener(self.datafile, "a")
- ifh = self.opener(self.indexfile, "a+")
- try:
- return self._addrevision(text, transaction, link, p1, p2, d, ifh, dfh)
- finally:
- if dfh:
- dfh.close()
- ifh.close()
-
- def _addrevision(self, text, transaction, link, p1, p2, d, ifh, dfh):
- node = hash(text, p1, p2)
- if node in self.nodemap:
- return node
-
- curr = len(self)
- prev = curr - 1
- base = self.base(prev)
- offset = self.end(prev)
-
- if curr:
- if not d:
- ptext = self.revision(self.node(prev))
- d = mdiff.textdiff(ptext, text)
- data = compress(d)
- l = len(data[1]) + len(data[0])
- dist = l + offset - self.start(base)
-
- # full versions are inserted when the needed deltas
- # become comparable to the uncompressed text
- if not curr or dist > len(text) * 2:
- data = compress(text)
- l = len(data[1]) + len(data[0])
- base = curr
-
- e = (offset_type(offset, 0), l, len(text),
- base, link, self.rev(p1), self.rev(p2), node)
- self.index.insert(-1, e)
- self.nodemap[node] = curr
-
- entry = self._io.packentry(e, self.node, self.version, curr)
- if not self._inline:
- transaction.add(self.datafile, offset)
- transaction.add(self.indexfile, curr * len(entry))
- if data[0]:
- dfh.write(data[0])
- dfh.write(data[1])
- dfh.flush()
- ifh.write(entry)
- else:
- offset += curr * self._io.size
- transaction.add(self.indexfile, offset, curr)
- ifh.write(entry)
- ifh.write(data[0])
- ifh.write(data[1])
- self.checkinlinesize(transaction, ifh)
-
- self._cache = (node, curr, text)
- return node
-
- def ancestor(self, a, b):
- """calculate the least common ancestor of nodes a and b"""
-
- def parents(rev):
- return [p for p in self.parentrevs(rev) if p != nullrev]
-
- c = ancestor.ancestor(self.rev(a), self.rev(b), parents)
- if c is None:
- return nullid
-
- return self.node(c)
-
- def group(self, nodelist, lookup, infocollect=None):
- """calculate a delta group
-
- Given a list of changeset revs, return a set of deltas and
- metadata corresponding to nodes. the first delta is
- parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
- have this parent as it has all history before these
- changesets. parent is parent[0]
- """
-
- revs = [self.rev(n) for n in nodelist]
-
- # if we don't have any revisions touched by these changesets, bail
- if not revs:
- yield changegroup.closechunk()
- return
-
- # add the parent of the first rev
- p = self.parentrevs(revs[0])[0]
- revs.insert(0, p)
-
- # build deltas
- for d in xrange(len(revs) - 1):
- a, b = revs[d], revs[d + 1]
- nb = self.node(b)
-
- if infocollect is not None:
- infocollect(nb)
-
- p = self.parents(nb)
- meta = nb + p[0] + p[1] + lookup(nb)
- if a == -1:
- d = self.revision(nb)
- meta += mdiff.trivialdiffheader(len(d))
- else:
- d = self.revdiff(a, b)
- yield changegroup.chunkheader(len(meta) + len(d))
- yield meta
- if len(d) > 2**20:
- pos = 0
- while pos < len(d):
- pos2 = pos + 2 ** 18
- yield d[pos:pos2]
- pos = pos2
- else:
- yield d
-
- yield changegroup.closechunk()
-
- def addgroup(self, revs, linkmapper, transaction):
- """
- add a delta group
-
- given a set of deltas, add them to the revision log. the
- first delta is against its parent, which should be in our
- log, the rest are against the previous delta.
- """
-
- #track the base of the current delta log
- r = len(self)
- t = r - 1
- node = None
-
- base = prev = nullrev
- start = end = textlen = 0
- if r:
- end = self.end(t)
-
- ifh = self.opener(self.indexfile, "a+")
- isize = r * self._io.size
- if self._inline:
- transaction.add(self.indexfile, end + isize, r)
- dfh = None
- else:
- transaction.add(self.indexfile, isize, r)
- transaction.add(self.datafile, end)
- dfh = self.opener(self.datafile, "a")
-
- try:
- # loop through our set of deltas
- chain = None
- for chunk in revs:
- node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
- link = linkmapper(cs)
- if node in self.nodemap:
- # this can happen if two branches make the same change
- chain = node
- continue
- delta = buffer(chunk, 80)
- del chunk
-
- for p in (p1, p2):
- if not p in self.nodemap:
- raise LookupError(p, self.indexfile, _('unknown parent'))
-
- if not chain:
- # retrieve the parent revision of the delta chain
- chain = p1
- if not chain in self.nodemap:
- raise LookupError(chain, self.indexfile, _('unknown base'))
-
- # full versions are inserted when the needed deltas become
- # comparable to the uncompressed text or when the previous
- # version is not the one we have a delta against. We use
- # the size of the previous full rev as a proxy for the
- # current size.
-
- if chain == prev:
- cdelta = compress(delta)
- cdeltalen = len(cdelta[0]) + len(cdelta[1])
- textlen = mdiff.patchedsize(textlen, delta)
-
- if chain != prev or (end - start + cdeltalen) > textlen * 2:
- # flush our writes here so we can read it in revision
- if dfh:
- dfh.flush()
- ifh.flush()
- text = self.revision(chain)
- if len(text) == 0:
- # skip over trivial delta header
- text = buffer(delta, 12)
- else:
- text = mdiff.patches(text, [delta])
- del delta
- chk = self._addrevision(text, transaction, link, p1, p2, None,
- ifh, dfh)
- if not dfh and not self._inline:
- # addrevision switched from inline to conventional
- # reopen the index
- dfh = self.opener(self.datafile, "a")
- ifh = self.opener(self.indexfile, "a")
- if chk != node:
- raise RevlogError(_("consistency error adding group"))
- textlen = len(text)
- else:
- e = (offset_type(end, 0), cdeltalen, textlen, base,
- link, self.rev(p1), self.rev(p2), node)
- self.index.insert(-1, e)
- self.nodemap[node] = r
- entry = self._io.packentry(e, self.node, self.version, r)
- if self._inline:
- ifh.write(entry)
- ifh.write(cdelta[0])
- ifh.write(cdelta[1])
- self.checkinlinesize(transaction, ifh)
- if not self._inline:
- dfh = self.opener(self.datafile, "a")
- ifh = self.opener(self.indexfile, "a")
- else:
- dfh.write(cdelta[0])
- dfh.write(cdelta[1])
- ifh.write(entry)
-
- t, r, chain, prev = r, r + 1, node, node
- base = self.base(t)
- start = self.start(base)
- end = self.end(t)
- finally:
- if dfh:
- dfh.close()
- ifh.close()
-
- return node
-
- def strip(self, minlink, transaction):
- """truncate the revlog on the first revision with a linkrev >= minlink
-
- This function is called when we're stripping revision minlink and
- its descendants from the repository.
-
- We have to remove all revisions with linkrev >= minlink, because
- the equivalent changelog revisions will be renumbered after the
- strip.
-
- So we truncate the revlog on the first of these revisions, and
- trust that the caller has saved the revisions that shouldn't be
- removed and that it'll readd them after this truncation.
- """
- if len(self) == 0:
- return
-
- if isinstance(self.index, lazyindex):
- self._loadindexmap()
-
- for rev in self:
- if self.index[rev][4] >= minlink:
- break
- else:
- return
-
- # first truncate the files on disk
- end = self.start(rev)
- if not self._inline:
- transaction.add(self.datafile, end)
- end = rev * self._io.size
- else:
- end += rev * self._io.size
-
- transaction.add(self.indexfile, end)
-
- # then reset internal state in memory to forget those revisions
- self._cache = None
- self._chunkclear()
- for x in xrange(rev, len(self)):
- del self.nodemap[self.node(x)]
-
- del self.index[rev:-1]
-
- def checksize(self):
- expected = 0
- if len(self):
- expected = max(0, self.end(len(self) - 1))
-
- try:
- f = self.opener(self.datafile)
- f.seek(0, 2)
- actual = f.tell()
- dd = actual - expected
- except IOError, inst:
- if inst.errno != errno.ENOENT:
- raise
- dd = 0
-
- try:
- f = self.opener(self.indexfile)
- f.seek(0, 2)
- actual = f.tell()
- s = self._io.size
- i = max(0, actual // s)
- di = actual - (i * s)
- if self._inline:
- databytes = 0
- for r in self:
- databytes += max(0, self.length(r))
- dd = 0
- di = actual - len(self) * s - databytes
- except IOError, inst:
- if inst.errno != errno.ENOENT:
- raise
- di = 0
-
- return (dd, di)
-
- def files(self):
- res = [ self.indexfile ]
- if not self._inline:
- res.append(self.datafile)
- return res
diff --git a/sys/lib/python/mercurial/simplemerge.py b/sys/lib/python/mercurial/simplemerge.py
deleted file mode 100644
index d876b47b9..000000000
--- a/sys/lib/python/mercurial/simplemerge.py
+++ /dev/null
@@ -1,451 +0,0 @@
-#!/usr/bin/env python
-# Copyright (C) 2004, 2005 Canonical Ltd
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-# mbp: "you know that thing where cvs gives you conflict markers?"
-# s: "i hate that."
-
-from i18n import _
-import util, mdiff
-import sys, os
-
-class CantReprocessAndShowBase(Exception):
- pass
-
-def intersect(ra, rb):
- """Given two ranges return the range where they intersect or None.
-
- >>> intersect((0, 10), (0, 6))
- (0, 6)
- >>> intersect((0, 10), (5, 15))
- (5, 10)
- >>> intersect((0, 10), (10, 15))
- >>> intersect((0, 9), (10, 15))
- >>> intersect((0, 9), (7, 15))
- (7, 9)
- """
- assert ra[0] <= ra[1]
- assert rb[0] <= rb[1]
-
- sa = max(ra[0], rb[0])
- sb = min(ra[1], rb[1])
- if sa < sb:
- return sa, sb
- else:
- return None
-
-def compare_range(a, astart, aend, b, bstart, bend):
- """Compare a[astart:aend] == b[bstart:bend], without slicing.
- """
- if (aend-astart) != (bend-bstart):
- return False
- for ia, ib in zip(xrange(astart, aend), xrange(bstart, bend)):
- if a[ia] != b[ib]:
- return False
- else:
- return True
-
-class Merge3Text(object):
- """3-way merge of texts.
-
- Given strings BASE, OTHER, THIS, tries to produce a combined text
- incorporating the changes from both BASE->OTHER and BASE->THIS."""
- def __init__(self, basetext, atext, btext, base=None, a=None, b=None):
- self.basetext = basetext
- self.atext = atext
- self.btext = btext
- if base is None:
- base = mdiff.splitnewlines(basetext)
- if a is None:
- a = mdiff.splitnewlines(atext)
- if b is None:
- b = mdiff.splitnewlines(btext)
- self.base = base
- self.a = a
- self.b = b
-
- def merge_lines(self,
- name_a=None,
- name_b=None,
- name_base=None,
- start_marker='<<<<<<<',
- mid_marker='=======',
- end_marker='>>>>>>>',
- base_marker=None,
- reprocess=False):
- """Return merge in cvs-like form.
- """
- self.conflicts = False
- newline = '\n'
- if len(self.a) > 0:
- if self.a[0].endswith('\r\n'):
- newline = '\r\n'
- elif self.a[0].endswith('\r'):
- newline = '\r'
- if base_marker and reprocess:
- raise CantReprocessAndShowBase()
- if name_a:
- start_marker = start_marker + ' ' + name_a
- if name_b:
- end_marker = end_marker + ' ' + name_b
- if name_base and base_marker:
- base_marker = base_marker + ' ' + name_base
- merge_regions = self.merge_regions()
- if reprocess is True:
- merge_regions = self.reprocess_merge_regions(merge_regions)
- for t in merge_regions:
- what = t[0]
- if what == 'unchanged':
- for i in range(t[1], t[2]):
- yield self.base[i]
- elif what == 'a' or what == 'same':
- for i in range(t[1], t[2]):
- yield self.a[i]
- elif what == 'b':
- for i in range(t[1], t[2]):
- yield self.b[i]
- elif what == 'conflict':
- self.conflicts = True
- yield start_marker + newline
- for i in range(t[3], t[4]):
- yield self.a[i]
- if base_marker is not None:
- yield base_marker + newline
- for i in range(t[1], t[2]):
- yield self.base[i]
- yield mid_marker + newline
- for i in range(t[5], t[6]):
- yield self.b[i]
- yield end_marker + newline
- else:
- raise ValueError(what)
-
- def merge_annotated(self):
- """Return merge with conflicts, showing origin of lines.
-
- Most useful for debugging merge.
- """
- for t in self.merge_regions():
- what = t[0]
- if what == 'unchanged':
- for i in range(t[1], t[2]):
- yield 'u | ' + self.base[i]
- elif what == 'a' or what == 'same':
- for i in range(t[1], t[2]):
- yield what[0] + ' | ' + self.a[i]
- elif what == 'b':
- for i in range(t[1], t[2]):
- yield 'b | ' + self.b[i]
- elif what == 'conflict':
- yield '<<<<\n'
- for i in range(t[3], t[4]):
- yield 'A | ' + self.a[i]
- yield '----\n'
- for i in range(t[5], t[6]):
- yield 'B | ' + self.b[i]
- yield '>>>>\n'
- else:
- raise ValueError(what)
-
- def merge_groups(self):
- """Yield sequence of line groups. Each one is a tuple:
-
- 'unchanged', lines
- Lines unchanged from base
-
- 'a', lines
- Lines taken from a
-
- 'same', lines
- Lines taken from a (and equal to b)
-
- 'b', lines
- Lines taken from b
-
- 'conflict', base_lines, a_lines, b_lines
- Lines from base were changed to either a or b and conflict.
- """
- for t in self.merge_regions():
- what = t[0]
- if what == 'unchanged':
- yield what, self.base[t[1]:t[2]]
- elif what == 'a' or what == 'same':
- yield what, self.a[t[1]:t[2]]
- elif what == 'b':
- yield what, self.b[t[1]:t[2]]
- elif what == 'conflict':
- yield (what,
- self.base[t[1]:t[2]],
- self.a[t[3]:t[4]],
- self.b[t[5]:t[6]])
- else:
- raise ValueError(what)
-
- def merge_regions(self):
- """Return sequences of matching and conflicting regions.
-
- This returns tuples, where the first value says what kind we
- have:
-
- 'unchanged', start, end
- Take a region of base[start:end]
-
- 'same', astart, aend
- b and a are different from base but give the same result
-
- 'a', start, end
- Non-clashing insertion from a[start:end]
-
- Method is as follows:
-
- The two sequences align only on regions which match the base
- and both descendents. These are found by doing a two-way diff
- of each one against the base, and then finding the
- intersections between those regions. These "sync regions"
- are by definition unchanged in both and easily dealt with.
-
- The regions in between can be in any of three cases:
- conflicted, or changed on only one side.
- """
-
- # section a[0:ia] has been disposed of, etc
- iz = ia = ib = 0
-
- for zmatch, zend, amatch, aend, bmatch, bend in self.find_sync_regions():
- #print 'match base [%d:%d]' % (zmatch, zend)
-
- matchlen = zend - zmatch
- assert matchlen >= 0
- assert matchlen == (aend - amatch)
- assert matchlen == (bend - bmatch)
-
- len_a = amatch - ia
- len_b = bmatch - ib
- len_base = zmatch - iz
- assert len_a >= 0
- assert len_b >= 0
- assert len_base >= 0
-
- #print 'unmatched a=%d, b=%d' % (len_a, len_b)
-
- if len_a or len_b:
- # try to avoid actually slicing the lists
- equal_a = compare_range(self.a, ia, amatch,
- self.base, iz, zmatch)
- equal_b = compare_range(self.b, ib, bmatch,
- self.base, iz, zmatch)
- same = compare_range(self.a, ia, amatch,
- self.b, ib, bmatch)
-
- if same:
- yield 'same', ia, amatch
- elif equal_a and not equal_b:
- yield 'b', ib, bmatch
- elif equal_b and not equal_a:
- yield 'a', ia, amatch
- elif not equal_a and not equal_b:
- yield 'conflict', iz, zmatch, ia, amatch, ib, bmatch
- else:
- raise AssertionError("can't handle a=b=base but unmatched")
-
- ia = amatch
- ib = bmatch
- iz = zmatch
-
- # if the same part of the base was deleted on both sides
- # that's OK, we can just skip it.
-
-
- if matchlen > 0:
- assert ia == amatch
- assert ib == bmatch
- assert iz == zmatch
-
- yield 'unchanged', zmatch, zend
- iz = zend
- ia = aend
- ib = bend
-
- def reprocess_merge_regions(self, merge_regions):
- """Where there are conflict regions, remove the agreed lines.
-
- Lines where both A and B have made the same changes are
- eliminated.
- """
- for region in merge_regions:
- if region[0] != "conflict":
- yield region
- continue
- type, iz, zmatch, ia, amatch, ib, bmatch = region
- a_region = self.a[ia:amatch]
- b_region = self.b[ib:bmatch]
- matches = mdiff.get_matching_blocks(''.join(a_region),
- ''.join(b_region))
- next_a = ia
- next_b = ib
- for region_ia, region_ib, region_len in matches[:-1]:
- region_ia += ia
- region_ib += ib
- reg = self.mismatch_region(next_a, region_ia, next_b,
- region_ib)
- if reg is not None:
- yield reg
- yield 'same', region_ia, region_len+region_ia
- next_a = region_ia + region_len
- next_b = region_ib + region_len
- reg = self.mismatch_region(next_a, amatch, next_b, bmatch)
- if reg is not None:
- yield reg
-
- def mismatch_region(next_a, region_ia, next_b, region_ib):
- if next_a < region_ia or next_b < region_ib:
- return 'conflict', None, None, next_a, region_ia, next_b, region_ib
- mismatch_region = staticmethod(mismatch_region)
-
- def find_sync_regions(self):
- """Return a list of sync regions, where both descendents match the base.
-
- Generates a list of (base1, base2, a1, a2, b1, b2). There is
- always a zero-length sync region at the end of all the files.
- """
-
- ia = ib = 0
- amatches = mdiff.get_matching_blocks(self.basetext, self.atext)
- bmatches = mdiff.get_matching_blocks(self.basetext, self.btext)
- len_a = len(amatches)
- len_b = len(bmatches)
-
- sl = []
-
- while ia < len_a and ib < len_b:
- abase, amatch, alen = amatches[ia]
- bbase, bmatch, blen = bmatches[ib]
-
- # there is an unconflicted block at i; how long does it
- # extend? until whichever one ends earlier.
- i = intersect((abase, abase+alen), (bbase, bbase+blen))
- if i:
- intbase = i[0]
- intend = i[1]
- intlen = intend - intbase
-
- # found a match of base[i[0], i[1]]; this may be less than
- # the region that matches in either one
- assert intlen <= alen
- assert intlen <= blen
- assert abase <= intbase
- assert bbase <= intbase
-
- asub = amatch + (intbase - abase)
- bsub = bmatch + (intbase - bbase)
- aend = asub + intlen
- bend = bsub + intlen
-
- assert self.base[intbase:intend] == self.a[asub:aend], \
- (self.base[intbase:intend], self.a[asub:aend])
-
- assert self.base[intbase:intend] == self.b[bsub:bend]
-
- sl.append((intbase, intend,
- asub, aend,
- bsub, bend))
-
- # advance whichever one ends first in the base text
- if (abase + alen) < (bbase + blen):
- ia += 1
- else:
- ib += 1
-
- intbase = len(self.base)
- abase = len(self.a)
- bbase = len(self.b)
- sl.append((intbase, intbase, abase, abase, bbase, bbase))
-
- return sl
-
- def find_unconflicted(self):
- """Return a list of ranges in base that are not conflicted."""
- am = mdiff.get_matching_blocks(self.basetext, self.atext)
- bm = mdiff.get_matching_blocks(self.basetext, self.btext)
-
- unc = []
-
- while am and bm:
- # there is an unconflicted block at i; how long does it
- # extend? until whichever one ends earlier.
- a1 = am[0][0]
- a2 = a1 + am[0][2]
- b1 = bm[0][0]
- b2 = b1 + bm[0][2]
- i = intersect((a1, a2), (b1, b2))
- if i:
- unc.append(i)
-
- if a2 < b2:
- del am[0]
- else:
- del bm[0]
-
- return unc
-
-def simplemerge(ui, local, base, other, **opts):
- def readfile(filename):
- f = open(filename, "rb")
- text = f.read()
- f.close()
- if util.binary(text):
- msg = _("%s looks like a binary file.") % filename
- if not opts.get('text'):
- raise util.Abort(msg)
- elif not opts.get('quiet'):
- ui.warn(_('warning: %s\n') % msg)
- return text
-
- name_a = local
- name_b = other
- labels = opts.get('label', [])
- if labels:
- name_a = labels.pop(0)
- if labels:
- name_b = labels.pop(0)
- if labels:
- raise util.Abort(_("can only specify two labels."))
-
- localtext = readfile(local)
- basetext = readfile(base)
- othertext = readfile(other)
-
- local = os.path.realpath(local)
- if not opts.get('print'):
- opener = util.opener(os.path.dirname(local))
- out = opener(os.path.basename(local), "w", atomictemp=True)
- else:
- out = sys.stdout
-
- reprocess = not opts.get('no_minimal')
-
- m3 = Merge3Text(basetext, localtext, othertext)
- for line in m3.merge_lines(name_a=name_a, name_b=name_b,
- reprocess=reprocess):
- out.write(line)
-
- if not opts.get('print'):
- out.rename()
-
- if m3.conflicts:
- if not opts.get('quiet'):
- ui.warn(_("warning: conflicts during merge.\n"))
- return 1
diff --git a/sys/lib/python/mercurial/sshrepo.py b/sys/lib/python/mercurial/sshrepo.py
deleted file mode 100644
index c6915bf65..000000000
--- a/sys/lib/python/mercurial/sshrepo.py
+++ /dev/null
@@ -1,260 +0,0 @@
-# sshrepo.py - ssh repository proxy class for mercurial
-#
-# Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-from node import bin, hex
-from i18n import _
-import repo, util, error
-import re, urllib
-
-class remotelock(object):
- def __init__(self, repo):
- self.repo = repo
- def release(self):
- self.repo.unlock()
- self.repo = None
- def __del__(self):
- if self.repo:
- self.release()
-
-class sshrepository(repo.repository):
- def __init__(self, ui, path, create=0):
- self._url = path
- self.ui = ui
-
- m = re.match(r'^ssh://(([^@]+)@)?([^:/]+)(:(\d+))?(/(.*))?$', path)
- if not m:
- self.abort(error.RepoError(_("couldn't parse location %s") % path))
-
- self.user = m.group(2)
- self.host = m.group(3)
- self.port = m.group(5)
- self.path = m.group(7) or "."
-
- sshcmd = self.ui.config("ui", "ssh", "ssh")
- remotecmd = self.ui.config("ui", "remotecmd", "hg")
-
- args = util.sshargs(sshcmd, self.host, self.user, self.port)
-
- if create:
- cmd = '%s %s "%s init %s"'
- cmd = cmd % (sshcmd, args, remotecmd, self.path)
-
- ui.note(_('running %s\n') % cmd)
- res = util.system(cmd)
- if res != 0:
- self.abort(error.RepoError(_("could not create remote repo")))
-
- self.validate_repo(ui, sshcmd, args, remotecmd)
-
- def url(self):
- return self._url
-
- def validate_repo(self, ui, sshcmd, args, remotecmd):
- # cleanup up previous run
- self.cleanup()
-
- cmd = '%s %s "%s -R %s serve --stdio"'
- cmd = cmd % (sshcmd, args, remotecmd, self.path)
-
- cmd = util.quotecommand(cmd)
- ui.note(_('running %s\n') % cmd)
- self.pipeo, self.pipei, self.pipee = util.popen3(cmd)
-
- # skip any noise generated by remote shell
- self.do_cmd("hello")
- r = self.do_cmd("between", pairs=("%s-%s" % ("0"*40, "0"*40)))
- lines = ["", "dummy"]
- max_noise = 500
- while lines[-1] and max_noise:
- l = r.readline()
- self.readerr()
- if lines[-1] == "1\n" and l == "\n":
- break
- if l:
- ui.debug(_("remote: "), l)
- lines.append(l)
- max_noise -= 1
- else:
- self.abort(error.RepoError(_("no suitable response from remote hg")))
-
- self.capabilities = set()
- for l in reversed(lines):
- if l.startswith("capabilities:"):
- self.capabilities.update(l[:-1].split(":")[1].split())
- break
-
- def readerr(self):
- while 1:
- size = util.fstat(self.pipee).st_size
- if size == 0: break
- l = self.pipee.readline()
- if not l: break
- self.ui.status(_("remote: "), l)
-
- def abort(self, exception):
- self.cleanup()
- raise exception
-
- def cleanup(self):
- try:
- self.pipeo.close()
- self.pipei.close()
- # read the error descriptor until EOF
- for l in self.pipee:
- self.ui.status(_("remote: "), l)
- self.pipee.close()
- except:
- pass
-
- __del__ = cleanup
-
- def do_cmd(self, cmd, **args):
- self.ui.debug(_("sending %s command\n") % cmd)
- self.pipeo.write("%s\n" % cmd)
- for k, v in args.iteritems():
- self.pipeo.write("%s %d\n" % (k, len(v)))
- self.pipeo.write(v)
- self.pipeo.flush()
-
- return self.pipei
-
- def call(self, cmd, **args):
- self.do_cmd(cmd, **args)
- return self._recv()
-
- def _recv(self):
- l = self.pipei.readline()
- self.readerr()
- try:
- l = int(l)
- except:
- self.abort(error.ResponseError(_("unexpected response:"), l))
- return self.pipei.read(l)
-
- def _send(self, data, flush=False):
- self.pipeo.write("%d\n" % len(data))
- if data:
- self.pipeo.write(data)
- if flush:
- self.pipeo.flush()
- self.readerr()
-
- def lock(self):
- self.call("lock")
- return remotelock(self)
-
- def unlock(self):
- self.call("unlock")
-
- def lookup(self, key):
- self.requirecap('lookup', _('look up remote revision'))
- d = self.call("lookup", key=key)
- success, data = d[:-1].split(" ", 1)
- if int(success):
- return bin(data)
- else:
- self.abort(error.RepoError(data))
-
- def heads(self):
- d = self.call("heads")
- try:
- return map(bin, d[:-1].split(" "))
- except:
- self.abort(error.ResponseError(_("unexpected response:"), d))
-
- def branchmap(self):
- d = self.call("branchmap")
- try:
- branchmap = {}
- for branchpart in d.splitlines():
- branchheads = branchpart.split(' ')
- branchname = urllib.unquote(branchheads[0])
- branchheads = [bin(x) for x in branchheads[1:]]
- branchmap[branchname] = branchheads
- return branchmap
- except:
- raise error.ResponseError(_("unexpected response:"), d)
-
- def branches(self, nodes):
- n = " ".join(map(hex, nodes))
- d = self.call("branches", nodes=n)
- try:
- br = [ tuple(map(bin, b.split(" "))) for b in d.splitlines() ]
- return br
- except:
- self.abort(error.ResponseError(_("unexpected response:"), d))
-
- def between(self, pairs):
- n = " ".join(["-".join(map(hex, p)) for p in pairs])
- d = self.call("between", pairs=n)
- try:
- p = [ l and map(bin, l.split(" ")) or [] for l in d.splitlines() ]
- return p
- except:
- self.abort(error.ResponseError(_("unexpected response:"), d))
-
- def changegroup(self, nodes, kind):
- n = " ".join(map(hex, nodes))
- return self.do_cmd("changegroup", roots=n)
-
- def changegroupsubset(self, bases, heads, kind):
- self.requirecap('changegroupsubset', _('look up remote changes'))
- bases = " ".join(map(hex, bases))
- heads = " ".join(map(hex, heads))
- return self.do_cmd("changegroupsubset", bases=bases, heads=heads)
-
- def unbundle(self, cg, heads, source):
- d = self.call("unbundle", heads=' '.join(map(hex, heads)))
- if d:
- # remote may send "unsynced changes"
- self.abort(error.RepoError(_("push refused: %s") % d))
-
- while 1:
- d = cg.read(4096)
- if not d:
- break
- self._send(d)
-
- self._send("", flush=True)
-
- r = self._recv()
- if r:
- # remote may send "unsynced changes"
- self.abort(error.RepoError(_("push failed: %s") % r))
-
- r = self._recv()
- try:
- return int(r)
- except:
- self.abort(error.ResponseError(_("unexpected response:"), r))
-
- def addchangegroup(self, cg, source, url):
- d = self.call("addchangegroup")
- if d:
- self.abort(error.RepoError(_("push refused: %s") % d))
- while 1:
- d = cg.read(4096)
- if not d:
- break
- self.pipeo.write(d)
- self.readerr()
-
- self.pipeo.flush()
-
- self.readerr()
- r = self._recv()
- if not r:
- return 1
- try:
- return int(r)
- except:
- self.abort(error.ResponseError(_("unexpected response:"), r))
-
- def stream_out(self):
- return self.do_cmd('stream_out')
-
-instance = sshrepository
diff --git a/sys/lib/python/mercurial/sshserver.py b/sys/lib/python/mercurial/sshserver.py
deleted file mode 100644
index d5fccbc43..000000000
--- a/sys/lib/python/mercurial/sshserver.py
+++ /dev/null
@@ -1,225 +0,0 @@
-# sshserver.py - ssh protocol server support for mercurial
-#
-# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
-# Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-from i18n import _
-from node import bin, hex
-import streamclone, util, hook
-import os, sys, tempfile, urllib
-
-class sshserver(object):
- def __init__(self, ui, repo):
- self.ui = ui
- self.repo = repo
- self.lock = None
- self.fin = sys.stdin
- self.fout = sys.stdout
-
- hook.redirect(True)
- sys.stdout = sys.stderr
-
- # Prevent insertion/deletion of CRs
- util.set_binary(self.fin)
- util.set_binary(self.fout)
-
- def getarg(self):
- argline = self.fin.readline()[:-1]
- arg, l = argline.split()
- val = self.fin.read(int(l))
- return arg, val
-
- def respond(self, v):
- self.fout.write("%d\n" % len(v))
- self.fout.write(v)
- self.fout.flush()
-
- def serve_forever(self):
- try:
- while self.serve_one(): pass
- finally:
- if self.lock is not None:
- self.lock.release()
- sys.exit(0)
-
- def serve_one(self):
- cmd = self.fin.readline()[:-1]
- if cmd:
- impl = getattr(self, 'do_' + cmd, None)
- if impl: impl()
- else: self.respond("")
- return cmd != ''
-
- def do_lookup(self):
- arg, key = self.getarg()
- assert arg == 'key'
- try:
- r = hex(self.repo.lookup(key))
- success = 1
- except Exception, inst:
- r = str(inst)
- success = 0
- self.respond("%s %s\n" % (success, r))
-
- def do_branchmap(self):
- branchmap = self.repo.branchmap()
- heads = []
- for branch, nodes in branchmap.iteritems():
- branchname = urllib.quote(branch)
- branchnodes = [hex(node) for node in nodes]
- heads.append('%s %s' % (branchname, ' '.join(branchnodes)))
- self.respond('\n'.join(heads))
-
- def do_heads(self):
- h = self.repo.heads()
- self.respond(" ".join(map(hex, h)) + "\n")
-
- def do_hello(self):
- '''the hello command returns a set of lines describing various
- interesting things about the server, in an RFC822-like format.
- Currently the only one defined is "capabilities", which
- consists of a line in the form:
-
- capabilities: space separated list of tokens
- '''
-
- caps = ['unbundle', 'lookup', 'changegroupsubset', 'branchmap']
- if self.ui.configbool('server', 'uncompressed'):
- caps.append('stream=%d' % self.repo.changelog.version)
- self.respond("capabilities: %s\n" % (' '.join(caps),))
-
- def do_lock(self):
- '''DEPRECATED - allowing remote client to lock repo is not safe'''
-
- self.lock = self.repo.lock()
- self.respond("")
-
- def do_unlock(self):
- '''DEPRECATED'''
-
- if self.lock:
- self.lock.release()
- self.lock = None
- self.respond("")
-
- def do_branches(self):
- arg, nodes = self.getarg()
- nodes = map(bin, nodes.split(" "))
- r = []
- for b in self.repo.branches(nodes):
- r.append(" ".join(map(hex, b)) + "\n")
- self.respond("".join(r))
-
- def do_between(self):
- arg, pairs = self.getarg()
- pairs = [map(bin, p.split("-")) for p in pairs.split(" ")]
- r = []
- for b in self.repo.between(pairs):
- r.append(" ".join(map(hex, b)) + "\n")
- self.respond("".join(r))
-
- def do_changegroup(self):
- nodes = []
- arg, roots = self.getarg()
- nodes = map(bin, roots.split(" "))
-
- cg = self.repo.changegroup(nodes, 'serve')
- while True:
- d = cg.read(4096)
- if not d:
- break
- self.fout.write(d)
-
- self.fout.flush()
-
- def do_changegroupsubset(self):
- argmap = dict([self.getarg(), self.getarg()])
- bases = [bin(n) for n in argmap['bases'].split(' ')]
- heads = [bin(n) for n in argmap['heads'].split(' ')]
-
- cg = self.repo.changegroupsubset(bases, heads, 'serve')
- while True:
- d = cg.read(4096)
- if not d:
- break
- self.fout.write(d)
-
- self.fout.flush()
-
- def do_addchangegroup(self):
- '''DEPRECATED'''
-
- if not self.lock:
- self.respond("not locked")
- return
-
- self.respond("")
- r = self.repo.addchangegroup(self.fin, 'serve', self.client_url())
- self.respond(str(r))
-
- def client_url(self):
- client = os.environ.get('SSH_CLIENT', '').split(' ', 1)[0]
- return 'remote:ssh:' + client
-
- def do_unbundle(self):
- their_heads = self.getarg()[1].split()
-
- def check_heads():
- heads = map(hex, self.repo.heads())
- return their_heads == [hex('force')] or their_heads == heads
-
- # fail early if possible
- if not check_heads():
- self.respond(_('unsynced changes'))
- return
-
- self.respond('')
-
- # write bundle data to temporary file because it can be big
- tempname = fp = None
- try:
- fd, tempname = tempfile.mkstemp(prefix='hg-unbundle-')
- fp = os.fdopen(fd, 'wb+')
-
- count = int(self.fin.readline())
- while count:
- fp.write(self.fin.read(count))
- count = int(self.fin.readline())
-
- was_locked = self.lock is not None
- if not was_locked:
- self.lock = self.repo.lock()
- try:
- if not check_heads():
- # someone else committed/pushed/unbundled while we
- # were transferring data
- self.respond(_('unsynced changes'))
- return
- self.respond('')
-
- # push can proceed
-
- fp.seek(0)
- r = self.repo.addchangegroup(fp, 'serve', self.client_url())
- self.respond(str(r))
- finally:
- if not was_locked:
- self.lock.release()
- self.lock = None
- finally:
- if fp is not None:
- fp.close()
- if tempname is not None:
- os.unlink(tempname)
-
- def do_stream_out(self):
- try:
- for chunk in streamclone.stream_out(self.repo):
- self.fout.write(chunk)
- self.fout.flush()
- except streamclone.StreamException, inst:
- self.fout.write(str(inst))
- self.fout.flush()
diff --git a/sys/lib/python/mercurial/statichttprepo.py b/sys/lib/python/mercurial/statichttprepo.py
deleted file mode 100644
index 0913d2fbb..000000000
--- a/sys/lib/python/mercurial/statichttprepo.py
+++ /dev/null
@@ -1,134 +0,0 @@
-# statichttprepo.py - simple http repository class for mercurial
-#
-# This provides read-only repo access to repositories exported via static http
-#
-# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-from i18n import _
-import changelog, byterange, url, error
-import localrepo, manifest, util, store
-import urllib, urllib2, errno
-
-class httprangereader(object):
- def __init__(self, url, opener):
- # we assume opener has HTTPRangeHandler
- self.url = url
- self.pos = 0
- self.opener = opener
- def seek(self, pos):
- self.pos = pos
- def read(self, bytes=None):
- req = urllib2.Request(self.url)
- end = ''
- if bytes:
- end = self.pos + bytes - 1
- req.add_header('Range', 'bytes=%d-%s' % (self.pos, end))
-
- try:
- f = self.opener.open(req)
- data = f.read()
- if hasattr(f, 'getcode'):
- # python 2.6+
- code = f.getcode()
- elif hasattr(f, 'code'):
- # undocumented attribute, seems to be set in 2.4 and 2.5
- code = f.code
- else:
- # Don't know how to check, hope for the best.
- code = 206
- except urllib2.HTTPError, inst:
- num = inst.code == 404 and errno.ENOENT or None
- raise IOError(num, inst)
- except urllib2.URLError, inst:
- raise IOError(None, inst.reason[1])
-
- if code == 200:
- # HTTPRangeHandler does nothing if remote does not support
- # Range headers and returns the full entity. Let's slice it.
- if bytes:
- data = data[self.pos:self.pos + bytes]
- else:
- data = data[self.pos:]
- elif bytes:
- data = data[:bytes]
- self.pos += len(data)
- return data
-
-def build_opener(ui, authinfo):
- # urllib cannot handle URLs with embedded user or passwd
- urlopener = url.opener(ui, authinfo)
- urlopener.add_handler(byterange.HTTPRangeHandler())
-
- def opener(base):
- """return a function that opens files over http"""
- p = base
- def o(path, mode="r"):
- f = "/".join((p, urllib.quote(path)))
- return httprangereader(f, urlopener)
- return o
-
- return opener
-
-class statichttprepository(localrepo.localrepository):
- def __init__(self, ui, path):
- self._url = path
- self.ui = ui
-
- self.path, authinfo = url.getauthinfo(path.rstrip('/') + "/.hg")
-
- opener = build_opener(ui, authinfo)
- self.opener = opener(self.path)
-
- # find requirements
- try:
- requirements = self.opener("requires").read().splitlines()
- except IOError, inst:
- if inst.errno != errno.ENOENT:
- raise
- # check if it is a non-empty old-style repository
- try:
- self.opener("00changelog.i").read(1)
- except IOError, inst:
- if inst.errno != errno.ENOENT:
- raise
- # we do not care about empty old-style repositories here
- msg = _("'%s' does not appear to be an hg repository") % path
- raise error.RepoError(msg)
- requirements = []
-
- # check them
- for r in requirements:
- if r not in self.supported:
- raise error.RepoError(_("requirement '%s' not supported") % r)
-
- # setup store
- def pjoin(a, b):
- return a + '/' + b
- self.store = store.store(requirements, self.path, opener, pjoin)
- self.spath = self.store.path
- self.sopener = self.store.opener
- self.sjoin = self.store.join
-
- self.manifest = manifest.manifest(self.sopener)
- self.changelog = changelog.changelog(self.sopener)
- self._tags = None
- self.nodetagscache = None
- self.encodepats = None
- self.decodepats = None
-
- def url(self):
- return self._url
-
- def local(self):
- return False
-
- def lock(self, wait=True):
- raise util.Abort(_('cannot lock static-http repository'))
-
-def instance(ui, path, create):
- if create:
- raise util.Abort(_('cannot create new static-http repository'))
- return statichttprepository(ui, path[7:])
diff --git a/sys/lib/python/mercurial/store.py b/sys/lib/python/mercurial/store.py
deleted file mode 100644
index eec9dd519..000000000
--- a/sys/lib/python/mercurial/store.py
+++ /dev/null
@@ -1,333 +0,0 @@
-# store.py - repository store handling for Mercurial
-#
-# Copyright 2008 Matt Mackall <mpm@selenic.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-from i18n import _
-import osutil, util
-import os, stat
-
-_sha = util.sha1
-
-# This avoids a collision between a file named foo and a dir named
-# foo.i or foo.d
-def encodedir(path):
- if not path.startswith('data/'):
- return path
- return (path
- .replace(".hg/", ".hg.hg/")
- .replace(".i/", ".i.hg/")
- .replace(".d/", ".d.hg/"))
-
-def decodedir(path):
- if not path.startswith('data/'):
- return path
- return (path
- .replace(".d.hg/", ".d/")
- .replace(".i.hg/", ".i/")
- .replace(".hg.hg/", ".hg/"))
-
-def _buildencodefun():
- e = '_'
- win_reserved = [ord(x) for x in '\\:*?"<>|']
- cmap = dict([ (chr(x), chr(x)) for x in xrange(127) ])
- for x in (range(32) + range(126, 256) + win_reserved):
- cmap[chr(x)] = "~%02x" % x
- for x in range(ord("A"), ord("Z")+1) + [ord(e)]:
- cmap[chr(x)] = e + chr(x).lower()
- dmap = {}
- for k, v in cmap.iteritems():
- dmap[v] = k
- def decode(s):
- i = 0
- while i < len(s):
- for l in xrange(1, 4):
- try:
- yield dmap[s[i:i+l]]
- i += l
- break
- except KeyError:
- pass
- else:
- raise KeyError
- return (lambda s: "".join([cmap[c] for c in encodedir(s)]),
- lambda s: decodedir("".join(list(decode(s)))))
-
-encodefilename, decodefilename = _buildencodefun()
-
-def _build_lower_encodefun():
- win_reserved = [ord(x) for x in '\\:*?"<>|']
- cmap = dict([ (chr(x), chr(x)) for x in xrange(127) ])
- for x in (range(32) + range(126, 256) + win_reserved):
- cmap[chr(x)] = "~%02x" % x
- for x in range(ord("A"), ord("Z")+1):
- cmap[chr(x)] = chr(x).lower()
- return lambda s: "".join([cmap[c] for c in s])
-
-lowerencode = _build_lower_encodefun()
-
-_windows_reserved_filenames = '''con prn aux nul
- com1 com2 com3 com4 com5 com6 com7 com8 com9
- lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
-def auxencode(path):
- res = []
- for n in path.split('/'):
- if n:
- base = n.split('.')[0]
- if base and (base in _windows_reserved_filenames):
- # encode third letter ('aux' -> 'au~78')
- ec = "~%02x" % ord(n[2])
- n = n[0:2] + ec + n[3:]
- if n[-1] in '. ':
- # encode last period or space ('foo...' -> 'foo..~2e')
- n = n[:-1] + "~%02x" % ord(n[-1])
- res.append(n)
- return '/'.join(res)
-
-MAX_PATH_LEN_IN_HGSTORE = 120
-DIR_PREFIX_LEN = 8
-_MAX_SHORTENED_DIRS_LEN = 8 * (DIR_PREFIX_LEN + 1) - 4
-def hybridencode(path):
- '''encodes path with a length limit
-
- Encodes all paths that begin with 'data/', according to the following.
-
- Default encoding (reversible):
-
- Encodes all uppercase letters 'X' as '_x'. All reserved or illegal
- characters are encoded as '~xx', where xx is the two digit hex code
- of the character (see encodefilename).
- Relevant path components consisting of Windows reserved filenames are
- masked by encoding the third character ('aux' -> 'au~78', see auxencode).
-
- Hashed encoding (not reversible):
-
- If the default-encoded path is longer than MAX_PATH_LEN_IN_HGSTORE, a
- non-reversible hybrid hashing of the path is done instead.
- This encoding uses up to DIR_PREFIX_LEN characters of all directory
- levels of the lowerencoded path, but not more levels than can fit into
- _MAX_SHORTENED_DIRS_LEN.
- Then follows the filler followed by the sha digest of the full path.
- The filler is the beginning of the basename of the lowerencoded path
- (the basename is everything after the last path separator). The filler
- is as long as possible, filling in characters from the basename until
- the encoded path has MAX_PATH_LEN_IN_HGSTORE characters (or all chars
- of the basename have been taken).
- The extension (e.g. '.i' or '.d') is preserved.
-
- The string 'data/' at the beginning is replaced with 'dh/', if the hashed
- encoding was used.
- '''
- if not path.startswith('data/'):
- return path
- # escape directories ending with .i and .d
- path = encodedir(path)
- ndpath = path[len('data/'):]
- res = 'data/' + auxencode(encodefilename(ndpath))
- if len(res) > MAX_PATH_LEN_IN_HGSTORE:
- digest = _sha(path).hexdigest()
- aep = auxencode(lowerencode(ndpath))
- _root, ext = os.path.splitext(aep)
- parts = aep.split('/')
- basename = parts[-1]
- sdirs = []
- for p in parts[:-1]:
- d = p[:DIR_PREFIX_LEN]
- if d[-1] in '. ':
- # Windows can't access dirs ending in period or space
- d = d[:-1] + '_'
- t = '/'.join(sdirs) + '/' + d
- if len(t) > _MAX_SHORTENED_DIRS_LEN:
- break
- sdirs.append(d)
- dirs = '/'.join(sdirs)
- if len(dirs) > 0:
- dirs += '/'
- res = 'dh/' + dirs + digest + ext
- space_left = MAX_PATH_LEN_IN_HGSTORE - len(res)
- if space_left > 0:
- filler = basename[:space_left]
- res = 'dh/' + dirs + filler + digest + ext
- return res
-
-def _calcmode(path):
- try:
- # files in .hg/ will be created using this mode
- mode = os.stat(path).st_mode
- # avoid some useless chmods
- if (0777 & ~util.umask) == (0777 & mode):
- mode = None
- except OSError:
- mode = None
- return mode
-
-_data = 'data 00manifest.d 00manifest.i 00changelog.d 00changelog.i'
-
-class basicstore(object):
- '''base class for local repository stores'''
- def __init__(self, path, opener, pathjoiner):
- self.pathjoiner = pathjoiner
- self.path = path
- self.createmode = _calcmode(path)
- op = opener(self.path)
- op.createmode = self.createmode
- self.opener = lambda f, *args, **kw: op(encodedir(f), *args, **kw)
-
- def join(self, f):
- return self.pathjoiner(self.path, encodedir(f))
-
- def _walk(self, relpath, recurse):
- '''yields (unencoded, encoded, size)'''
- path = self.pathjoiner(self.path, relpath)
- striplen = len(self.path) + len(os.sep)
- l = []
- if os.path.isdir(path):
- visit = [path]
- while visit:
- p = visit.pop()
- for f, kind, st in osutil.listdir(p, stat=True):
- fp = self.pathjoiner(p, f)
- if kind == stat.S_IFREG and f[-2:] in ('.d', '.i'):
- n = util.pconvert(fp[striplen:])
- l.append((decodedir(n), n, st.st_size))
- elif kind == stat.S_IFDIR and recurse:
- visit.append(fp)
- return sorted(l)
-
- def datafiles(self):
- return self._walk('data', True)
-
- def walk(self):
- '''yields (unencoded, encoded, size)'''
- # yield data files first
- for x in self.datafiles():
- yield x
- # yield manifest before changelog
- for x in reversed(self._walk('', False)):
- yield x
-
- def copylist(self):
- return ['requires'] + _data.split()
-
-class encodedstore(basicstore):
- def __init__(self, path, opener, pathjoiner):
- self.pathjoiner = pathjoiner
- self.path = self.pathjoiner(path, 'store')
- self.createmode = _calcmode(self.path)
- op = opener(self.path)
- op.createmode = self.createmode
- self.opener = lambda f, *args, **kw: op(encodefilename(f), *args, **kw)
-
- def datafiles(self):
- for a, b, size in self._walk('data', True):
- try:
- a = decodefilename(a)
- except KeyError:
- a = None
- yield a, b, size
-
- def join(self, f):
- return self.pathjoiner(self.path, encodefilename(f))
-
- def copylist(self):
- return (['requires', '00changelog.i'] +
- [self.pathjoiner('store', f) for f in _data.split()])
-
-class fncache(object):
- # the filename used to be partially encoded
- # hence the encodedir/decodedir dance
- def __init__(self, opener):
- self.opener = opener
- self.entries = None
-
- def _load(self):
- '''fill the entries from the fncache file'''
- self.entries = set()
- try:
- fp = self.opener('fncache', mode='rb')
- except IOError:
- # skip nonexistent file
- return
- for n, line in enumerate(fp):
- if (len(line) < 2) or (line[-1] != '\n'):
- t = _('invalid entry in fncache, line %s') % (n + 1)
- raise util.Abort(t)
- self.entries.add(decodedir(line[:-1]))
- fp.close()
-
- def rewrite(self, files):
- fp = self.opener('fncache', mode='wb')
- for p in files:
- fp.write(encodedir(p) + '\n')
- fp.close()
- self.entries = set(files)
-
- def add(self, fn):
- if self.entries is None:
- self._load()
- self.opener('fncache', 'ab').write(encodedir(fn) + '\n')
-
- def __contains__(self, fn):
- if self.entries is None:
- self._load()
- return fn in self.entries
-
- def __iter__(self):
- if self.entries is None:
- self._load()
- return iter(self.entries)
-
-class fncachestore(basicstore):
- def __init__(self, path, opener, pathjoiner):
- self.pathjoiner = pathjoiner
- self.path = self.pathjoiner(path, 'store')
- self.createmode = _calcmode(self.path)
- op = opener(self.path)
- op.createmode = self.createmode
- fnc = fncache(op)
- self.fncache = fnc
-
- def fncacheopener(path, mode='r', *args, **kw):
- if (mode not in ('r', 'rb')
- and path.startswith('data/')
- and path not in fnc):
- fnc.add(path)
- return op(hybridencode(path), mode, *args, **kw)
- self.opener = fncacheopener
-
- def join(self, f):
- return self.pathjoiner(self.path, hybridencode(f))
-
- def datafiles(self):
- rewrite = False
- existing = []
- pjoin = self.pathjoiner
- spath = self.path
- for f in self.fncache:
- ef = hybridencode(f)
- try:
- st = os.stat(pjoin(spath, ef))
- yield f, ef, st.st_size
- existing.append(f)
- except OSError:
- # nonexistent entry
- rewrite = True
- if rewrite:
- # rewrite fncache to remove nonexistent entries
- # (may be caused by rollback / strip)
- self.fncache.rewrite(existing)
-
- def copylist(self):
- d = _data + ' dh fncache'
- return (['requires', '00changelog.i'] +
- [self.pathjoiner('store', f) for f in d.split()])
-
-def store(requirements, path, opener, pathjoiner=None):
- pathjoiner = pathjoiner or os.path.join
- if 'store' in requirements:
- if 'fncache' in requirements:
- return fncachestore(path, opener, pathjoiner)
- return encodedstore(path, opener, pathjoiner)
- return basicstore(path, opener, pathjoiner)
diff --git a/sys/lib/python/mercurial/streamclone.py b/sys/lib/python/mercurial/streamclone.py
deleted file mode 100644
index 82cd2f730..000000000
--- a/sys/lib/python/mercurial/streamclone.py
+++ /dev/null
@@ -1,67 +0,0 @@
-# streamclone.py - streaming clone server support for mercurial
-#
-# Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-import util, error
-from i18n import _
-
-from mercurial import store
-
-class StreamException(Exception):
- def __init__(self, code):
- Exception.__init__(self)
- self.code = code
- def __str__(self):
- return '%i\n' % self.code
-
-# if server supports streaming clone, it advertises "stream"
-# capability with value that is version+flags of repo it is serving.
-# client only streams if it can read that repo format.
-
-# stream file format is simple.
-#
-# server writes out line that says how many files, how many total
-# bytes. separator is ascii space, byte counts are strings.
-#
-# then for each file:
-#
-# server writes out line that says filename, how many bytes in
-# file. separator is ascii nul, byte count is string.
-#
-# server writes out raw file data.
-
-def stream_out(repo, untrusted=False):
- '''stream out all metadata files in repository.
- writes to file-like object, must support write() and optional flush().'''
-
- if not repo.ui.configbool('server', 'uncompressed', untrusted=untrusted):
- raise StreamException(1)
-
- entries = []
- total_bytes = 0
- try:
- # get consistent snapshot of repo, lock during scan
- lock = repo.lock()
- try:
- repo.ui.debug(_('scanning\n'))
- for name, ename, size in repo.store.walk():
- # for backwards compat, name was partially encoded
- entries.append((store.encodedir(name), size))
- total_bytes += size
- finally:
- lock.release()
- except error.LockError:
- raise StreamException(2)
-
- yield '0\n'
- repo.ui.debug(_('%d files, %d bytes to transfer\n') %
- (len(entries), total_bytes))
- yield '%d %d\n' % (len(entries), total_bytes)
- for name, size in entries:
- repo.ui.debug(_('sending %s (%d bytes)\n') % (name, size))
- yield '%s\0%d\n' % (name, size)
- for chunk in util.filechunkiter(repo.sopener(name), limit=size):
- yield chunk
diff --git a/sys/lib/python/mercurial/strutil.py b/sys/lib/python/mercurial/strutil.py
deleted file mode 100644
index fab37c419..000000000
--- a/sys/lib/python/mercurial/strutil.py
+++ /dev/null
@@ -1,34 +0,0 @@
-# strutil.py - string utilities for Mercurial
-#
-# Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-def findall(haystack, needle, start=0, end=None):
- if end is None:
- end = len(haystack)
- if end < 0:
- end += len(haystack)
- if start < 0:
- start += len(haystack)
- while start < end:
- c = haystack.find(needle, start, end)
- if c == -1:
- break
- yield c
- start = c + 1
-
-def rfindall(haystack, needle, start=0, end=None):
- if end is None:
- end = len(haystack)
- if end < 0:
- end += len(haystack)
- if start < 0:
- start += len(haystack)
- while end >= 0:
- c = haystack.rfind(needle, start, end)
- if c == -1:
- break
- yield c
- end = c - 1
diff --git a/sys/lib/python/mercurial/subrepo.py b/sys/lib/python/mercurial/subrepo.py
deleted file mode 100644
index 0eb313fe0..000000000
--- a/sys/lib/python/mercurial/subrepo.py
+++ /dev/null
@@ -1,197 +0,0 @@
-# subrepo.py - sub-repository handling for Mercurial
-#
-# Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-import errno, os
-from i18n import _
-import config, util, node, error
-hg = None
-
-nullstate = ('', '')
-
-def state(ctx):
- p = config.config()
- def read(f, sections=None, remap=None):
- if f in ctx:
- try:
- p.parse(f, ctx[f].data(), sections, remap)
- except IOError, err:
- if err.errno != errno.ENOENT:
- raise
- read('.hgsub')
-
- rev = {}
- if '.hgsubstate' in ctx:
- try:
- for l in ctx['.hgsubstate'].data().splitlines():
- revision, path = l.split()
- rev[path] = revision
- except IOError, err:
- if err.errno != errno.ENOENT:
- raise
-
- state = {}
- for path, src in p[''].items():
- state[path] = (src, rev.get(path, ''))
-
- return state
-
-def writestate(repo, state):
- repo.wwrite('.hgsubstate',
- ''.join(['%s %s\n' % (state[s][1], s)
- for s in sorted(state)]), '')
-
-def submerge(repo, wctx, mctx, actx):
- if mctx == actx: # backwards?
- actx = wctx.p1()
- s1 = wctx.substate
- s2 = mctx.substate
- sa = actx.substate
- sm = {}
-
- for s, l in s1.items():
- a = sa.get(s, nullstate)
- if s in s2:
- r = s2[s]
- if l == r or r == a: # no change or local is newer
- sm[s] = l
- continue
- elif l == a: # other side changed
- wctx.sub(s).get(r)
- sm[s] = r
- elif l[0] != r[0]: # sources differ
- if repo.ui.promptchoice(
- _(' subrepository sources for %s differ\n'
- 'use (l)ocal source (%s) or (r)emote source (%s)?')
- % (s, l[0], r[0]),
- (_('&Local'), _('&Remote')), 0):
- wctx.sub(s).get(r)
- sm[s] = r
- elif l[1] == a[1]: # local side is unchanged
- wctx.sub(s).get(r)
- sm[s] = r
- else:
- wctx.sub(s).merge(r)
- sm[s] = l
- elif l == a: # remote removed, local unchanged
- wctx.sub(s).remove()
- else:
- if repo.ui.promptchoice(
- _(' local changed subrepository %s which remote removed\n'
- 'use (c)hanged version or (d)elete?') % s,
- (_('&Changed'), _('&Delete')), 0):
- wctx.sub(s).remove()
-
- for s, r in s2.items():
- if s in s1:
- continue
- elif s not in sa:
- wctx.sub(s).get(r)
- sm[s] = r
- elif r != sa[s]:
- if repo.ui.promptchoice(
- _(' remote changed subrepository %s which local removed\n'
- 'use (c)hanged version or (d)elete?') % s,
- (_('&Changed'), _('&Delete')), 0) == 0:
- wctx.sub(s).get(r)
- sm[s] = r
-
- # record merged .hgsubstate
- writestate(repo, sm)
-
-def _abssource(repo, push=False):
- if hasattr(repo, '_subparent'):
- source = repo._subsource
- if source.startswith('/') or '://' in source:
- return source
- parent = _abssource(repo._subparent)
- if '://' in parent:
- if parent[-1] == '/':
- parent = parent[:-1]
- return parent + '/' + source
- return os.path.join(parent, repo._subsource)
- if push and repo.ui.config('paths', 'default-push'):
- return repo.ui.config('paths', 'default-push', repo.root)
- return repo.ui.config('paths', 'default', repo.root)
-
-def subrepo(ctx, path):
- # subrepo inherently violates our import layering rules
- # because it wants to make repo objects from deep inside the stack
- # so we manually delay the circular imports to not break
- # scripts that don't use our demand-loading
- global hg
- import hg as h
- hg = h
-
- util.path_auditor(ctx._repo.root)(path)
- state = ctx.substate.get(path, nullstate)
- if state[0].startswith('['): # future expansion
- raise error.Abort('unknown subrepo source %s' % state[0])
- return hgsubrepo(ctx, path, state)
-
-class hgsubrepo(object):
- def __init__(self, ctx, path, state):
- self._path = path
- self._state = state
- r = ctx._repo
- root = r.wjoin(path)
- if os.path.exists(os.path.join(root, '.hg')):
- self._repo = hg.repository(r.ui, root)
- else:
- util.makedirs(root)
- self._repo = hg.repository(r.ui, root, create=True)
- self._repo._subparent = r
- self._repo._subsource = state[0]
-
- def dirty(self):
- r = self._state[1]
- if r == '':
- return True
- w = self._repo[None]
- if w.p1() != self._repo[r]: # version checked out changed
- return True
- return w.dirty() # working directory changed
-
- def commit(self, text, user, date):
- n = self._repo.commit(text, user, date)
- if not n:
- return self._repo['.'].hex() # different version checked out
- return node.hex(n)
-
- def remove(self):
- # we can't fully delete the repository as it may contain
- # local-only history
- self._repo.ui.note(_('removing subrepo %s\n') % self._path)
- hg.clean(self._repo, node.nullid, False)
-
- def get(self, state):
- source, revision = state
- try:
- self._repo.lookup(revision)
- except error.RepoError:
- self._repo._subsource = source
- self._repo.ui.status(_('pulling subrepo %s\n') % self._path)
- srcurl = _abssource(self._repo)
- other = hg.repository(self._repo.ui, srcurl)
- self._repo.pull(other)
-
- hg.clean(self._repo, revision, False)
-
- def merge(self, state):
- hg.merge(self._repo, state[1], remind=False)
-
- def push(self, force):
- # push subrepos depth-first for coherent ordering
- c = self._repo['']
- subs = c.substate # only repos that are committed
- for s in sorted(subs):
- c.sub(s).push(force)
-
- self._repo.ui.status(_('pushing subrepo %s\n') % self._path)
- dsturl = _abssource(self._repo, True)
- other = hg.repository(self._repo.ui, dsturl)
- self._repo.push(other, force)
-
diff --git a/sys/lib/python/mercurial/tags.py b/sys/lib/python/mercurial/tags.py
deleted file mode 100644
index 41a2ddc9a..000000000
--- a/sys/lib/python/mercurial/tags.py
+++ /dev/null
@@ -1,338 +0,0 @@
-# tags.py - read tag info from local repository
-#
-# Copyright 2009 Matt Mackall <mpm@selenic.com>
-# Copyright 2009 Greg Ward <greg@gerg.ca>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-# Currently this module only deals with reading and caching tags.
-# Eventually, it could take care of updating (adding/removing/moving)
-# tags too.
-
-import os
-from node import nullid, bin, hex, short
-from i18n import _
-import encoding
-import error
-
-def _debugalways(ui, *msg):
- ui.write(*msg)
-
-def _debugconditional(ui, *msg):
- ui.debug(*msg)
-
-def _debugnever(ui, *msg):
- pass
-
-_debug = _debugalways
-_debug = _debugnever
-
-def findglobaltags1(ui, repo, alltags, tagtypes):
- '''Find global tags in repo by reading .hgtags from every head that
- has a distinct version of it. Updates the dicts alltags, tagtypes
- in place: alltags maps tag name to (node, hist) pair (see _readtags()
- below), and tagtypes maps tag name to tag type ('global' in this
- case).'''
-
- seen = set()
- fctx = None
- ctxs = [] # list of filectx
- for node in repo.heads():
- try:
- fnode = repo[node].filenode('.hgtags')
- except error.LookupError:
- continue
- if fnode not in seen:
- seen.add(fnode)
- if not fctx:
- fctx = repo.filectx('.hgtags', fileid=fnode)
- else:
- fctx = fctx.filectx(fnode)
- ctxs.append(fctx)
-
- # read the tags file from each head, ending with the tip
- for fctx in reversed(ctxs):
- filetags = _readtags(
- ui, repo, fctx.data().splitlines(), fctx)
- _updatetags(filetags, "global", alltags, tagtypes)
-
-def findglobaltags2(ui, repo, alltags, tagtypes):
- '''Same as findglobaltags1(), but with caching.'''
- # This is so we can be lazy and assume alltags contains only global
- # tags when we pass it to _writetagcache().
- assert len(alltags) == len(tagtypes) == 0, \
- "findglobaltags() should be called first"
-
- (heads, tagfnode, cachetags, shouldwrite) = _readtagcache(ui, repo)
- if cachetags is not None:
- assert not shouldwrite
- # XXX is this really 100% correct? are there oddball special
- # cases where a global tag should outrank a local tag but won't,
- # because cachetags does not contain rank info?
- _updatetags(cachetags, 'global', alltags, tagtypes)
- return
-
- _debug(ui, "reading tags from %d head(s): %s\n"
- % (len(heads), map(short, reversed(heads))))
- seen = set() # set of fnode
- fctx = None
- for head in reversed(heads): # oldest to newest
- assert head in repo.changelog.nodemap, \
- "tag cache returned bogus head %s" % short(head)
-
- fnode = tagfnode.get(head)
- if fnode and fnode not in seen:
- seen.add(fnode)
- if not fctx:
- fctx = repo.filectx('.hgtags', fileid=fnode)
- else:
- fctx = fctx.filectx(fnode)
-
- filetags = _readtags(ui, repo, fctx.data().splitlines(), fctx)
- _updatetags(filetags, 'global', alltags, tagtypes)
-
- # and update the cache (if necessary)
- if shouldwrite:
- _writetagcache(ui, repo, heads, tagfnode, alltags)
-
-# Set this to findglobaltags1 to disable tag caching.
-findglobaltags = findglobaltags1
-
-def readlocaltags(ui, repo, alltags, tagtypes):
- '''Read local tags in repo. Update alltags and tagtypes.'''
- try:
- # localtags is in the local encoding; re-encode to UTF-8 on
- # input for consistency with the rest of this module.
- data = repo.opener("localtags").read()
- filetags = _readtags(
- ui, repo, data.splitlines(), "localtags",
- recode=encoding.fromlocal)
- _updatetags(filetags, "local", alltags, tagtypes)
- except IOError:
- pass
-
-def _readtags(ui, repo, lines, fn, recode=None):
- '''Read tag definitions from a file (or any source of lines).
- Return a mapping from tag name to (node, hist): node is the node id
- from the last line read for that name, and hist is the list of node
- ids previously associated with it (in file order). All node ids are
- binary, not hex.'''
-
- filetags = {} # map tag name to (node, hist)
- count = 0
-
- def warn(msg):
- ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
-
- for line in lines:
- count += 1
- if not line:
- continue
- try:
- (nodehex, name) = line.split(" ", 1)
- except ValueError:
- warn(_("cannot parse entry"))
- continue
- name = name.strip()
- if recode:
- name = recode(name)
- try:
- nodebin = bin(nodehex)
- except TypeError:
- warn(_("node '%s' is not well formed") % nodehex)
- continue
- if nodebin not in repo.changelog.nodemap:
- # silently ignore as pull -r might cause this
- continue
-
- # update filetags
- hist = []
- if name in filetags:
- n, hist = filetags[name]
- hist.append(n)
- filetags[name] = (nodebin, hist)
- return filetags
-
-def _updatetags(filetags, tagtype, alltags, tagtypes):
- '''Incorporate the tag info read from one file into the two
- dictionaries, alltags and tagtypes, that contain all tag
- info (global across all heads plus local).'''
-
- for name, nodehist in filetags.iteritems():
- if name not in alltags:
- alltags[name] = nodehist
- tagtypes[name] = tagtype
- continue
-
- # we prefer alltags[name] if:
- # it supercedes us OR
- # mutual supercedes and it has a higher rank
- # otherwise we win because we're tip-most
- anode, ahist = nodehist
- bnode, bhist = alltags[name]
- if (bnode != anode and anode in bhist and
- (bnode not in ahist or len(bhist) > len(ahist))):
- anode = bnode
- ahist.extend([n for n in bhist if n not in ahist])
- alltags[name] = anode, ahist
- tagtypes[name] = tagtype
-
-
-# The tag cache only stores info about heads, not the tag contents
-# from each head. I.e. it doesn't try to squeeze out the maximum
-# performance, but is simpler has a better chance of actually
-# working correctly. And this gives the biggest performance win: it
-# avoids looking up .hgtags in the manifest for every head, and it
-# can avoid calling heads() at all if there have been no changes to
-# the repo.
-
-def _readtagcache(ui, repo):
- '''Read the tag cache and return a tuple (heads, fnodes, cachetags,
- shouldwrite). If the cache is completely up-to-date, cachetags is a
- dict of the form returned by _readtags(); otherwise, it is None and
- heads and fnodes are set. In that case, heads is the list of all
- heads currently in the repository (ordered from tip to oldest) and
- fnodes is a mapping from head to .hgtags filenode. If those two are
- set, caller is responsible for reading tag info from each head.'''
-
- try:
- cachefile = repo.opener('tags.cache', 'r')
- _debug(ui, 'reading tag cache from %s\n' % cachefile.name)
- except IOError:
- cachefile = None
-
- # The cache file consists of lines like
- # <headrev> <headnode> [<tagnode>]
- # where <headrev> and <headnode> redundantly identify a repository
- # head from the time the cache was written, and <tagnode> is the
- # filenode of .hgtags on that head. Heads with no .hgtags file will
- # have no <tagnode>. The cache is ordered from tip to oldest (which
- # is part of why <headrev> is there: a quick visual check is all
- # that's required to ensure correct order).
- #
- # This information is enough to let us avoid the most expensive part
- # of finding global tags, which is looking up <tagnode> in the
- # manifest for each head.
- cacherevs = [] # list of headrev
- cacheheads = [] # list of headnode
- cachefnode = {} # map headnode to filenode
- if cachefile:
- for line in cachefile:
- if line == "\n":
- break
- line = line.rstrip().split()
- cacherevs.append(int(line[0]))
- headnode = bin(line[1])
- cacheheads.append(headnode)
- if len(line) == 3:
- fnode = bin(line[2])
- cachefnode[headnode] = fnode
-
- tipnode = repo.changelog.tip()
- tiprev = len(repo.changelog) - 1
-
- # Case 1 (common): tip is the same, so nothing has changed.
- # (Unchanged tip trivially means no changesets have been added.
- # But, thanks to localrepository.destroyed(), it also means none
- # have been destroyed by strip or rollback.)
- if cacheheads and cacheheads[0] == tipnode and cacherevs[0] == tiprev:
- _debug(ui, "tag cache: tip unchanged\n")
- tags = _readtags(ui, repo, cachefile, cachefile.name)
- cachefile.close()
- return (None, None, tags, False)
- if cachefile:
- cachefile.close() # ignore rest of file
-
- repoheads = repo.heads()
- # Case 2 (uncommon): empty repo; get out quickly and don't bother
- # writing an empty cache.
- if repoheads == [nullid]:
- return ([], {}, {}, False)
-
- # Case 3 (uncommon): cache file missing or empty.
- if not cacheheads:
- _debug(ui, 'tag cache: cache file missing or empty\n')
-
- # Case 4 (uncommon): tip rev decreased. This should only happen
- # when we're called from localrepository.destroyed(). Refresh the
- # cache so future invocations will not see disappeared heads in the
- # cache.
- elif cacheheads and tiprev < cacherevs[0]:
- _debug(ui,
- 'tag cache: tip rev decremented (from %d to %d), '
- 'so we must be destroying nodes\n'
- % (cacherevs[0], tiprev))
-
- # Case 5 (common): tip has changed, so we've added/replaced heads.
- else:
- _debug(ui,
- 'tag cache: tip has changed (%d:%s); must find new heads\n'
- % (tiprev, short(tipnode)))
-
- # Luckily, the code to handle cases 3, 4, 5 is the same. So the
- # above if/elif/else can disappear once we're confident this thing
- # actually works and we don't need the debug output.
-
- # N.B. in case 4 (nodes destroyed), "new head" really means "newly
- # exposed".
- newheads = [head
- for head in repoheads
- if head not in set(cacheheads)]
- _debug(ui, 'tag cache: found %d head(s) not in cache: %s\n'
- % (len(newheads), map(short, newheads)))
-
- # Now we have to lookup the .hgtags filenode for every new head.
- # This is the most expensive part of finding tags, so performance
- # depends primarily on the size of newheads. Worst case: no cache
- # file, so newheads == repoheads.
- for head in newheads:
- cctx = repo[head]
- try:
- fnode = cctx.filenode('.hgtags')
- cachefnode[head] = fnode
- except error.LookupError:
- # no .hgtags file on this head
- pass
-
- # Caller has to iterate over all heads, but can use the filenodes in
- # cachefnode to get to each .hgtags revision quickly.
- return (repoheads, cachefnode, None, True)
-
-def _writetagcache(ui, repo, heads, tagfnode, cachetags):
-
- cachefile = repo.opener('tags.cache', 'w', atomictemp=True)
- _debug(ui, 'writing cache file %s\n' % cachefile.name)
-
- realheads = repo.heads() # for sanity checks below
- for head in heads:
- # temporary sanity checks; these can probably be removed
- # once this code has been in crew for a few weeks
- assert head in repo.changelog.nodemap, \
- 'trying to write non-existent node %s to tag cache' % short(head)
- assert head in realheads, \
- 'trying to write non-head %s to tag cache' % short(head)
- assert head != nullid, \
- 'trying to write nullid to tag cache'
-
- # This can't fail because of the first assert above. When/if we
- # remove that assert, we might want to catch LookupError here
- # and downgrade it to a warning.
- rev = repo.changelog.rev(head)
-
- fnode = tagfnode.get(head)
- if fnode:
- cachefile.write('%d %s %s\n' % (rev, hex(head), hex(fnode)))
- else:
- cachefile.write('%d %s\n' % (rev, hex(head)))
-
- # Tag names in the cache are in UTF-8 -- which is the whole reason
- # we keep them in UTF-8 throughout this module. If we converted
- # them local encoding on input, we would lose info writing them to
- # the cache.
- cachefile.write('\n')
- for (name, (node, hist)) in cachetags.iteritems():
- cachefile.write("%s %s\n" % (hex(node), name))
-
- cachefile.rename()
- cachefile.close()
diff --git a/sys/lib/python/mercurial/templatefilters.py b/sys/lib/python/mercurial/templatefilters.py
deleted file mode 100644
index 34358d26b..000000000
--- a/sys/lib/python/mercurial/templatefilters.py
+++ /dev/null
@@ -1,211 +0,0 @@
-# template-filters.py - common template expansion filters
-#
-# Copyright 2005-2008 Matt Mackall <mpm@selenic.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-import cgi, re, os, time, urllib, textwrap
-import util, encoding
-
-def stringify(thing):
- '''turn nested template iterator into string.'''
- if hasattr(thing, '__iter__') and not isinstance(thing, str):
- return "".join([stringify(t) for t in thing if t is not None])
- return str(thing)
-
-agescales = [("second", 1),
- ("minute", 60),
- ("hour", 3600),
- ("day", 3600 * 24),
- ("week", 3600 * 24 * 7),
- ("month", 3600 * 24 * 30),
- ("year", 3600 * 24 * 365)]
-
-agescales.reverse()
-
-def age(date):
- '''turn a (timestamp, tzoff) tuple into an age string.'''
-
- def plural(t, c):
- if c == 1:
- return t
- return t + "s"
- def fmt(t, c):
- return "%d %s" % (c, plural(t, c))
-
- now = time.time()
- then = date[0]
- if then > now:
- return 'in the future'
-
- delta = max(1, int(now - then))
- for t, s in agescales:
- n = delta // s
- if n >= 2 or s == 1:
- return fmt(t, n)
-
-para_re = None
-space_re = None
-
-def fill(text, width):
- '''fill many paragraphs.'''
- global para_re, space_re
- if para_re is None:
- para_re = re.compile('(\n\n|\n\\s*[-*]\\s*)', re.M)
- space_re = re.compile(r' +')
-
- def findparas():
- start = 0
- while True:
- m = para_re.search(text, start)
- if not m:
- w = len(text)
- while w > start and text[w-1].isspace(): w -= 1
- yield text[start:w], text[w:]
- break
- yield text[start:m.start(0)], m.group(1)
- start = m.end(1)
-
- return "".join([space_re.sub(' ', textwrap.fill(para, width)) + rest
- for para, rest in findparas()])
-
-def firstline(text):
- '''return the first line of text'''
- try:
- return text.splitlines(True)[0].rstrip('\r\n')
- except IndexError:
- return ''
-
-def nl2br(text):
- '''replace raw newlines with xhtml line breaks.'''
- return text.replace('\n', '<br/>\n')
-
-def obfuscate(text):
- text = unicode(text, encoding.encoding, 'replace')
- return ''.join(['&#%d;' % ord(c) for c in text])
-
-def domain(author):
- '''get domain of author, or empty string if none.'''
- f = author.find('@')
- if f == -1: return ''
- author = author[f+1:]
- f = author.find('>')
- if f >= 0: author = author[:f]
- return author
-
-def person(author):
- '''get name of author, or else username.'''
- if not '@' in author: return author
- f = author.find('<')
- if f == -1: return util.shortuser(author)
- return author[:f].rstrip()
-
-def indent(text, prefix):
- '''indent each non-empty line of text after first with prefix.'''
- lines = text.splitlines()
- num_lines = len(lines)
- def indenter():
- for i in xrange(num_lines):
- l = lines[i]
- if i and l.strip():
- yield prefix
- yield l
- if i < num_lines - 1 or text.endswith('\n'):
- yield '\n'
- return "".join(indenter())
-
-def permissions(flags):
- if "l" in flags:
- return "lrwxrwxrwx"
- if "x" in flags:
- return "-rwxr-xr-x"
- return "-rw-r--r--"
-
-def xmlescape(text):
- text = (text
- .replace('&', '&amp;')
- .replace('<', '&lt;')
- .replace('>', '&gt;')
- .replace('"', '&quot;')
- .replace("'", '&#39;')) # &apos; invalid in HTML
- return re.sub('[\x00-\x08\x0B\x0C\x0E-\x1F]', ' ', text)
-
-_escapes = [
- ('\\', '\\\\'), ('"', '\\"'), ('\t', '\\t'), ('\n', '\\n'),
- ('\r', '\\r'), ('\f', '\\f'), ('\b', '\\b'),
-]
-
-def jsonescape(s):
- for k, v in _escapes:
- s = s.replace(k, v)
- return s
-
-def json(obj):
- if obj is None or obj is False or obj is True:
- return {None: 'null', False: 'false', True: 'true'}[obj]
- elif isinstance(obj, int) or isinstance(obj, float):
- return str(obj)
- elif isinstance(obj, str):
- return '"%s"' % jsonescape(obj)
- elif isinstance(obj, unicode):
- return json(obj.encode('utf-8'))
- elif hasattr(obj, 'keys'):
- out = []
- for k, v in obj.iteritems():
- s = '%s: %s' % (json(k), json(v))
- out.append(s)
- return '{' + ', '.join(out) + '}'
- elif hasattr(obj, '__iter__'):
- out = []
- for i in obj:
- out.append(json(i))
- return '[' + ', '.join(out) + ']'
- else:
- raise TypeError('cannot encode type %s' % obj.__class__.__name__)
-
-def stripdir(text):
- '''Treat the text as path and strip a directory level, if possible.'''
- dir = os.path.dirname(text)
- if dir == "":
- return os.path.basename(text)
- else:
- return dir
-
-def nonempty(str):
- return str or "(none)"
-
-filters = {
- "addbreaks": nl2br,
- "basename": os.path.basename,
- "stripdir": stripdir,
- "age": age,
- "date": lambda x: util.datestr(x),
- "domain": domain,
- "email": util.email,
- "escape": lambda x: cgi.escape(x, True),
- "fill68": lambda x: fill(x, width=68),
- "fill76": lambda x: fill(x, width=76),
- "firstline": firstline,
- "tabindent": lambda x: indent(x, '\t'),
- "hgdate": lambda x: "%d %d" % x,
- "isodate": lambda x: util.datestr(x, '%Y-%m-%d %H:%M %1%2'),
- "isodatesec": lambda x: util.datestr(x, '%Y-%m-%d %H:%M:%S %1%2'),
- "json": json,
- "jsonescape": jsonescape,
- "localdate": lambda x: (x[0], util.makedate()[1]),
- "nonempty": nonempty,
- "obfuscate": obfuscate,
- "permissions": permissions,
- "person": person,
- "rfc822date": lambda x: util.datestr(x, "%a, %d %b %Y %H:%M:%S %1%2"),
- "rfc3339date": lambda x: util.datestr(x, "%Y-%m-%dT%H:%M:%S%1:%2"),
- "short": lambda x: x[:12],
- "shortdate": util.shortdate,
- "stringify": stringify,
- "strip": lambda x: x.strip(),
- "urlescape": lambda x: urllib.quote(x),
- "user": lambda x: util.shortuser(x),
- "stringescape": lambda x: x.encode('string_escape'),
- "xmlescape": xmlescape,
-}
diff --git a/sys/lib/python/mercurial/templater.py b/sys/lib/python/mercurial/templater.py
deleted file mode 100644
index 86a674fbb..000000000
--- a/sys/lib/python/mercurial/templater.py
+++ /dev/null
@@ -1,245 +0,0 @@
-# templater.py - template expansion for output
-#
-# Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-from i18n import _
-import re, sys, os
-import util, config, templatefilters
-
-path = ['templates', '../templates']
-stringify = templatefilters.stringify
-
-def parsestring(s, quoted=True):
- '''parse a string using simple c-like syntax.
- string must be in quotes if quoted is True.'''
- if quoted:
- if len(s) < 2 or s[0] != s[-1]:
- raise SyntaxError(_('unmatched quotes'))
- return s[1:-1].decode('string_escape')
-
- return s.decode('string_escape')
-
-class engine(object):
- '''template expansion engine.
-
- template expansion works like this. a map file contains key=value
- pairs. if value is quoted, it is treated as string. otherwise, it
- is treated as name of template file.
-
- templater is asked to expand a key in map. it looks up key, and
- looks for strings like this: {foo}. it expands {foo} by looking up
- foo in map, and substituting it. expansion is recursive: it stops
- when there is no more {foo} to replace.
-
- expansion also allows formatting and filtering.
-
- format uses key to expand each item in list. syntax is
- {key%format}.
-
- filter uses function to transform value. syntax is
- {key|filter1|filter2|...}.'''
-
- template_re = re.compile(r'{([\w\|%]+)}|#([\w\|%]+)#')
-
- def __init__(self, loader, filters={}, defaults={}):
- self.loader = loader
- self.filters = filters
- self.defaults = defaults
- self.cache = {}
-
- def process(self, t, map):
- '''Perform expansion. t is name of map element to expand. map contains
- added elements for use during expansion. Is a generator.'''
- tmpl = self.loader(t)
- iters = [self._process(tmpl, map)]
- while iters:
- try:
- item = iters[0].next()
- except StopIteration:
- iters.pop(0)
- continue
- if isinstance(item, str):
- yield item
- elif item is None:
- yield ''
- elif hasattr(item, '__iter__'):
- iters.insert(0, iter(item))
- else:
- yield str(item)
-
- def _format(self, expr, get, map):
- key, format = expr.split('%')
- v = get(key)
- if not hasattr(v, '__iter__'):
- raise SyntaxError(_("error expanding '%s%%%s'") % (key, format))
- lm = map.copy()
- for i in v:
- lm.update(i)
- yield self.process(format, lm)
-
- def _filter(self, expr, get, map):
- if expr not in self.cache:
- parts = expr.split('|')
- val = parts[0]
- try:
- filters = [self.filters[f] for f in parts[1:]]
- except KeyError, i:
- raise SyntaxError(_("unknown filter '%s'") % i[0])
- def apply(get):
- x = get(val)
- for f in filters:
- x = f(x)
- return x
- self.cache[expr] = apply
- return self.cache[expr](get)
-
- def _process(self, tmpl, map):
- '''Render a template. Returns a generator.'''
-
- def get(key):
- v = map.get(key)
- if v is None:
- v = self.defaults.get(key, '')
- if hasattr(v, '__call__'):
- v = v(**map)
- return v
-
- while tmpl:
- m = self.template_re.search(tmpl)
- if not m:
- yield tmpl
- break
-
- start, end = m.span(0)
- variants = m.groups()
- expr = variants[0] or variants[1]
-
- if start:
- yield tmpl[:start]
- tmpl = tmpl[end:]
-
- if '%' in expr:
- yield self._format(expr, get, map)
- elif '|' in expr:
- yield self._filter(expr, get, map)
- else:
- yield get(expr)
-
-engines = {'default': engine}
-
-class templater(object):
-
- def __init__(self, mapfile, filters={}, defaults={}, cache={},
- minchunk=1024, maxchunk=65536):
- '''set up template engine.
- mapfile is name of file to read map definitions from.
- filters is dict of functions. each transforms a value into another.
- defaults is dict of default map definitions.'''
- self.mapfile = mapfile or 'template'
- self.cache = cache.copy()
- self.map = {}
- self.base = (mapfile and os.path.dirname(mapfile)) or ''
- self.filters = templatefilters.filters.copy()
- self.filters.update(filters)
- self.defaults = defaults
- self.minchunk, self.maxchunk = minchunk, maxchunk
- self.engines = {}
-
- if not mapfile:
- return
- if not os.path.exists(mapfile):
- raise util.Abort(_('style not found: %s') % mapfile)
-
- conf = config.config()
- conf.read(mapfile)
-
- for key, val in conf[''].items():
- if val[0] in "'\"":
- try:
- self.cache[key] = parsestring(val)
- except SyntaxError, inst:
- raise SyntaxError('%s: %s' %
- (conf.source('', key), inst.args[0]))
- else:
- val = 'default', val
- if ':' in val[1]:
- val = val[1].split(':', 1)
- self.map[key] = val[0], os.path.join(self.base, val[1])
-
- def __contains__(self, key):
- return key in self.cache or key in self.map
-
- def load(self, t):
- '''Get the template for the given template name. Use a local cache.'''
- if not t in self.cache:
- try:
- self.cache[t] = open(self.map[t][1]).read()
- except IOError, inst:
- raise IOError(inst.args[0], _('template file %s: %s') %
- (self.map[t][1], inst.args[1]))
- return self.cache[t]
-
- def __call__(self, t, **map):
- ttype = t in self.map and self.map[t][0] or 'default'
- proc = self.engines.get(ttype)
- if proc is None:
- proc = engines[ttype](self.load, self.filters, self.defaults)
- self.engines[ttype] = proc
-
- stream = proc.process(t, map)
- if self.minchunk:
- stream = util.increasingchunks(stream, min=self.minchunk,
- max=self.maxchunk)
- return stream
-
-def templatepath(name=None):
- '''return location of template file or directory (if no name).
- returns None if not found.'''
- normpaths = []
-
- # executable version (py2exe) doesn't support __file__
- if hasattr(sys, 'frozen'):
- module = sys.executable
- else:
- module = __file__
- for f in path:
- if f.startswith('/'):
- p = f
- else:
- fl = f.split('/')
- p = os.path.join(os.path.dirname(module), *fl)
- if name:
- p = os.path.join(p, name)
- if name and os.path.exists(p):
- return os.path.normpath(p)
- elif os.path.isdir(p):
- normpaths.append(os.path.normpath(p))
-
- return normpaths
-
-def stylemap(style, paths=None):
- """Return path to mapfile for a given style.
-
- Searches mapfile in the following locations:
- 1. templatepath/style/map
- 2. templatepath/map-style
- 3. templatepath/map
- """
-
- if paths is None:
- paths = templatepath()
- elif isinstance(paths, str):
- paths = [paths]
-
- locations = style and [os.path.join(style, "map"), "map-" + style] or []
- locations.append("map")
- for path in paths:
- for location in locations:
- mapfile = os.path.join(path, location)
- if os.path.isfile(mapfile):
- return mapfile
-
- raise RuntimeError("No hgweb templates found in %r" % paths)
diff --git a/sys/lib/python/mercurial/templates/atom/changelog.tmpl b/sys/lib/python/mercurial/templates/atom/changelog.tmpl
deleted file mode 100644
index 29902ab21..000000000
--- a/sys/lib/python/mercurial/templates/atom/changelog.tmpl
+++ /dev/null
@@ -1,10 +0,0 @@
-{header}
- <!-- Changelog -->
- <id>{urlbase}{url}</id>
- <link rel="self" href="{urlbase}{url}atom-log"/>
- <link rel="alternate" href="{urlbase}{url}"/>
- <title>{repo|escape} Changelog</title>
- {latestentry%feedupdated}
-
-{entries%changelogentry}
-</feed>
diff --git a/sys/lib/python/mercurial/templates/atom/changelogentry.tmpl b/sys/lib/python/mercurial/templates/atom/changelogentry.tmpl
deleted file mode 100644
index 02c2e9bd0..000000000
--- a/sys/lib/python/mercurial/templates/atom/changelogentry.tmpl
+++ /dev/null
@@ -1,16 +0,0 @@
- <entry>
- <title>{desc|strip|firstline|strip|escape|nonempty}</title>
- <id>{urlbase}{url}#changeset-{node}</id>
- <link href="{urlbase}{url}rev/{node}"/>
- <author>
- <name>{author|person|escape}</name>
- <email>{author|email|obfuscate}</email>
- </author>
- <updated>{date|rfc3339date}</updated>
- <published>{date|rfc3339date}</published>
- <content type="xhtml">
- <div xmlns="http://www.w3.org/1999/xhtml">
- <pre xml:space="preserve">{desc|escape|nonempty}</pre>
- </div>
- </content>
- </entry>
diff --git a/sys/lib/python/mercurial/templates/atom/error.tmpl b/sys/lib/python/mercurial/templates/atom/error.tmpl
deleted file mode 100644
index 5735fbab5..000000000
--- a/sys/lib/python/mercurial/templates/atom/error.tmpl
+++ /dev/null
@@ -1,17 +0,0 @@
-{header}
- <!-- Error -->
- <id>{urlbase}{url}</id>
- <link rel="self" href="{urlbase}{url}atom-log"/>
- <link rel="alternate" href="{urlbase}{url}"/>
- <title>Error</title>
- <updated>1970-01-01T00:00:00+00:00</updated>
- <entry>
- <title>Error</title>
- <id>http://mercurial.selenic.com/#error</id>
- <author>
- <name>mercurial</name>
- </author>
- <updated>1970-01-01T00:00:00+00:00</updated>
- <content type="text">{error|escape}</content>
- </entry>
-</feed>
diff --git a/sys/lib/python/mercurial/templates/atom/filelog.tmpl b/sys/lib/python/mercurial/templates/atom/filelog.tmpl
deleted file mode 100644
index 99d4e9b89..000000000
--- a/sys/lib/python/mercurial/templates/atom/filelog.tmpl
+++ /dev/null
@@ -1,8 +0,0 @@
-{header}
- <id>{urlbase}{url}atom-log/tip/{file|escape}</id>
- <link rel="self" href="{urlbase}{url}atom-log/tip/{file|urlescape}"/>
- <title>{repo|escape}: {file|escape} history</title>
- {latestentry%feedupdated}
-
-{entries%changelogentry}
-</feed>
diff --git a/sys/lib/python/mercurial/templates/atom/header.tmpl b/sys/lib/python/mercurial/templates/atom/header.tmpl
deleted file mode 100644
index 90ffceb77..000000000
--- a/sys/lib/python/mercurial/templates/atom/header.tmpl
+++ /dev/null
@@ -1,2 +0,0 @@
-<?xml version="1.0" encoding="{encoding}"?>
-<feed xmlns="http://www.w3.org/2005/Atom"> \ No newline at end of file
diff --git a/sys/lib/python/mercurial/templates/atom/map b/sys/lib/python/mercurial/templates/atom/map
deleted file mode 100644
index c016b5590..000000000
--- a/sys/lib/python/mercurial/templates/atom/map
+++ /dev/null
@@ -1,11 +0,0 @@
-default = 'changelog'
-feedupdated = '<updated>{date|rfc3339date}</updated>'
-mimetype = 'application/atom+xml; charset={encoding}'
-header = header.tmpl
-changelog = changelog.tmpl
-changelogentry = changelogentry.tmpl
-filelog = filelog.tmpl
-filelogentry = filelogentry.tmpl
-tags = tags.tmpl
-tagentry = tagentry.tmpl
-error = error.tmpl
diff --git a/sys/lib/python/mercurial/templates/atom/tagentry.tmpl b/sys/lib/python/mercurial/templates/atom/tagentry.tmpl
deleted file mode 100644
index 776d8a948..000000000
--- a/sys/lib/python/mercurial/templates/atom/tagentry.tmpl
+++ /dev/null
@@ -1,8 +0,0 @@
- <entry>
- <title>{tag|escape}</title>
- <link rel="alternate" href="{urlbase}{url}rev/{node}"/>
- <id>{urlbase}{url}#tag-{node}</id>
- <updated>{date|rfc3339date}</updated>
- <published>{date|rfc3339date}</published>
- <content type="text">{tag|strip|escape}</content>
- </entry>
diff --git a/sys/lib/python/mercurial/templates/atom/tags.tmpl b/sys/lib/python/mercurial/templates/atom/tags.tmpl
deleted file mode 100644
index 82294ecb8..000000000
--- a/sys/lib/python/mercurial/templates/atom/tags.tmpl
+++ /dev/null
@@ -1,11 +0,0 @@
-{header}
- <id>{urlbase}{url}</id>
- <link rel="self" href="{urlbase}{url}atom-tags"/>
- <link rel="alternate" href="{urlbase}{url}tags"/>
- <title>{repo|escape}: tags</title>
- <summary>{repo|escape} tag history</summary>
- <author><name>Mercurial SCM</name></author>
- {latestentry%feedupdated}
-
-{entriesnotip%tagentry}
-</feed>
diff --git a/sys/lib/python/mercurial/templates/coal/header.tmpl b/sys/lib/python/mercurial/templates/coal/header.tmpl
deleted file mode 100644
index ed0f42d38..000000000
--- a/sys/lib/python/mercurial/templates/coal/header.tmpl
+++ /dev/null
@@ -1,6 +0,0 @@
-<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
-<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en-US">
-<head>
-<link rel="icon" href="{staticurl}hgicon.png" type="image/png" />
-<meta name="robots" content="index, nofollow" />
-<link rel="stylesheet" href="{staticurl}style-coal.css" type="text/css" />
diff --git a/sys/lib/python/mercurial/templates/coal/map b/sys/lib/python/mercurial/templates/coal/map
deleted file mode 100644
index 430580b6c..000000000
--- a/sys/lib/python/mercurial/templates/coal/map
+++ /dev/null
@@ -1,191 +0,0 @@
-default = 'shortlog'
-
-mimetype = 'text/html; charset={encoding}'
-header = header.tmpl
-footer = ../paper/footer.tmpl
-search = ../paper/search.tmpl
-
-changelog = ../paper/shortlog.tmpl
-shortlog = ../paper/shortlog.tmpl
-shortlogentry = ../paper/shortlogentry.tmpl
-graph = ../paper/graph.tmpl
-
-naventry = '<a href="{url}log/{node|short}{sessionvars%urlparameter}">{label|escape}</a> '
-navshortentry = '<a href="{url}shortlog/{node|short}{sessionvars%urlparameter}">{label|escape}</a> '
-navgraphentry = '<a href="{url}graph/{node|short}{sessionvars%urlparameter}">{label|escape}</a> '
-filenaventry = '<a href="{url}log/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{label|escape}</a> '
-filedifflink = '<a href="{url}diff/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{file|escape}</a> '
-filenodelink = '<a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{file|escape}</a> '
-filenolink = '{file|escape} '
-fileellipses = '...'
-changelogentry = ../paper/shortlogentry.tmpl
-searchentry = ../paper/shortlogentry.tmpl
-changeset = ../paper/changeset.tmpl
-manifest = ../paper/manifest.tmpl
-
-direntry = '
- <tr class="fileline parity{parity}">
- <td class="name">
- <a href="{url}file/{node|short}{path|urlescape}{sessionvars%urlparameter}">
- <img src="{staticurl}coal-folder.png" alt="dir."/> {basename|escape}/
- </a>
- <a href="{url}file/{node|short}{path|urlescape}/{emptydirs|urlescape}{sessionvars%urlparameter}">
- {emptydirs|escape}
- </a>
- </td>
- <td class="size"></td>
- <td class="permissions">drwxr-xr-x</td>
- </tr>'
-
-fileentry = '
- <tr class="fileline parity{parity}">
- <td class="filename">
- <a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">
- <img src="{staticurl}coal-file.png" alt="file"/> {basename|escape}
- </a>
- </td>
- <td class="size">{size}</td>
- <td class="permissions">{permissions|permissions}</td>
- </tr>'
-
-filerevision = ../paper/filerevision.tmpl
-fileannotate = ../paper/fileannotate.tmpl
-filediff = ../paper/filediff.tmpl
-filelog = ../paper/filelog.tmpl
-fileline = '
- <div class="parity{parity} source"><a href="#{lineid}" id="{lineid}">{linenumber}</a> {line|escape}</div>'
-filelogentry = ../paper/filelogentry.tmpl
-
-annotateline = '
- <tr class="parity{parity}">
- <td class="annotate">
- <a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}#{targetline}"
- title="{node|short}: {desc|escape|firstline}">{author|user}@{rev}</a>
- </td>
- <td class="source"><a href="#{lineid}" id="{lineid}">{linenumber}</a> {line|escape}</td>
- </tr>'
-
-diffblock = '<div class="source bottomline parity{parity}"><pre>{lines}</pre></div>'
-difflineplus = '<a href="#{lineid}" id="{lineid}">{linenumber}</a> <span class="plusline">{line|escape}</span>'
-difflineminus = '<a href="#{lineid}" id="{lineid}">{linenumber}</a> <span class="minusline">{line|escape}</span>'
-difflineat = '<a href="#{lineid}" id="{lineid}">{linenumber}</a> <span class="atline">{line|escape}</span>'
-diffline = '<a href="#{lineid}" id="{lineid}">{linenumber}</a> {line|escape}'
-
-changelogparent = '
- <tr>
- <th class="parent">parent {rev}:</th>
- <td class="parent"><a href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a></td>
- </tr>'
-
-changesetparent = '<a href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a> '
-
-filerevparent = '<a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{rename%filerename}{node|short}</a> '
-filerevchild = '<a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{node|short}</a> '
-
-filerename = '{file|escape}@'
-filelogrename = '
- <tr>
- <th>base:</th>
- <td>
- <a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">
- {file|escape}@{node|short}
- </a>
- </td>
- </tr>'
-fileannotateparent = '
- <tr>
- <td class="metatag">parent:</td>
- <td>
- <a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}">
- {rename%filerename}{node|short}
- </a>
- </td>
- </tr>'
-changesetchild = ' <a href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a>'
-changelogchild = '
- <tr>
- <th class="child">child</th>
- <td class="child">
- <a href="{url}rev/{node|short}{sessionvars%urlparameter}">
- {node|short}
- </a>
- </td>
- </tr>'
-fileannotatechild = '
- <tr>
- <td class="metatag">child:</td>
- <td>
- <a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}">
- {node|short}
- </a>
- </td>
- </tr>'
-tags = ../paper/tags.tmpl
-tagentry = '
- <tr class="tagEntry parity{parity}">
- <td>
- <a href="{url}rev/{node|short}{sessionvars%urlparameter}">
- {tag|escape}
- </a>
- </td>
- <td class="node">
- {node|short}
- </td>
- </tr>'
-branches = ../paper/branches.tmpl
-branchentry = '
- <tr class="tagEntry parity{parity}">
- <td>
- <a href="{url}shortlog/{node|short}{sessionvars%urlparameter}" class="{status}">
- {branch|escape}
- </a>
- </td>
- <td class="node">
- {node|short}
- </td>
- </tr>'
-changelogtag = '<span class="tag">{name|escape}</span> '
-changesettag = '<span class="tag">{tag|escape}</span> '
-changelogbranchhead = '<span class="branchhead">{name|escape}</span> '
-changelogbranchname = '<span class="branchname">{name|escape}</span> '
-
-filediffparent = '
- <tr>
- <th class="parent">parent {rev}:</th>
- <td class="parent"><a href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a></td>
- </tr>'
-filelogparent = '
- <tr>
- <th>parent {rev}:</th>
- <td><a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{node|short}</a></td>
- </tr>'
-filediffchild = '
- <tr>
- <th class="child">child {rev}:</th>
- <td class="child"><a href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a>
- </td>
- </tr>'
-filelogchild = '
- <tr>
- <th>child {rev}:</th>
- <td><a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{node|short}</a></td>
- </tr>'
-
-indexentry = '
- <tr class="parity{parity}">
- <td><a href="{url}{sessionvars%urlparameter}">{name|escape}</a></td>
- <td>{description}</td>
- <td>{contact|obfuscate}</td>
- <td class="age">{lastchange|age} ago</td>
- <td class="indexlinks">{archives%indexarchiveentry}</td>
- </tr>\n'
-indexarchiveentry = '<a href="{url}archive/{node|short}{extension|urlescape}">&nbsp;&darr;{type|escape}</a>'
-index = ../paper/index.tmpl
-archiveentry = '
- <li>
- <a href="{url}archive/{node|short}{extension|urlescape}">{type|escape}</a>
- </li>'
-notfound = ../paper/notfound.tmpl
-error = ../paper/error.tmpl
-urlparameter = '{separator}{name}={value|urlescape}'
-hiddenformentry = '<input type="hidden" name="{name}" value="{value|escape}" />'
diff --git a/sys/lib/python/mercurial/templates/gitweb/branches.tmpl b/sys/lib/python/mercurial/templates/gitweb/branches.tmpl
deleted file mode 100644
index 80ceb7e0b..000000000
--- a/sys/lib/python/mercurial/templates/gitweb/branches.tmpl
+++ /dev/null
@@ -1,30 +0,0 @@
-{header}
-<title>{repo|escape}: Branches</title>
-<link rel="alternate" type="application/atom+xml"
- href="{url}atom-tags" title="Atom feed for {repo|escape}"/>
-<link rel="alternate" type="application/rss+xml"
- href="{url}rss-tags" title="RSS feed for {repo|escape}"/>
-</head>
-<body>
-
-<div class="page_header">
-<a href="http://mercurial.selenic.com/" title="Mercurial" style="float: right;">Mercurial</a><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / branches
-</div>
-
-<div class="page_nav">
-<a href="{url}summary{sessionvars%urlparameter}">summary</a> |
-<a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a> |
-<a href="{url}log{sessionvars%urlparameter}">changelog</a> |
-<a href="{url}graph{sessionvars%urlparameter}">graph</a> |
-<a href="{url}tags{sessionvars%urlparameter}">tags</a> |
-branches |
-<a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a>
-<br/>
-</div>
-
-<div class="title">&nbsp;</div>
-<table cellspacing="0">
-{entries%branchentry}
-</table>
-
-{footer}
diff --git a/sys/lib/python/mercurial/templates/gitweb/changelog.tmpl b/sys/lib/python/mercurial/templates/gitweb/changelog.tmpl
deleted file mode 100644
index 2b587a244..000000000
--- a/sys/lib/python/mercurial/templates/gitweb/changelog.tmpl
+++ /dev/null
@@ -1,39 +0,0 @@
-{header}
-<title>{repo|escape}: Changelog</title>
-<link rel="alternate" type="application/atom+xml"
- href="{url}atom-log" title="Atom feed for {repo|escape}"/>
-<link rel="alternate" type="application/rss+xml"
- href="{url}rss-log" title="RSS feed for {repo|escape}"/>
-</head>
-<body>
-
-<div class="page_header">
-<a href="http://mercurial.selenic.com/" title="Mercurial" style="float: right;">Mercurial</a><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / changelog
-</div>
-
-<form action="{url}log">
-{sessionvars%hiddenformentry}
-<div class="search">
-<input type="text" name="rev" />
-</div>
-</form>
-
-<div class="page_nav">
-<a href="{url}summary{sessionvars%urlparameter}">summary</a> |
-<a href="{url}shortlog/{rev}{sessionvars%urlparameter}">shortlog</a> |
-changelog |
-<a href="{url}graph{sessionvars%urlparameter}">graph</a> |
-<a href="{url}tags{sessionvars%urlparameter}">tags</a> |
-<a href="{url}branches{sessionvars%urlparameter}">branches</a> |
-<a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a>{archives%archiveentry}
-<br/>
-{changenav%naventry}<br/>
-</div>
-
-{entries%changelogentry}
-
-<div class="page_nav">
-{changenav%naventry}<br/>
-</div>
-
-{footer}
diff --git a/sys/lib/python/mercurial/templates/gitweb/changelogentry.tmpl b/sys/lib/python/mercurial/templates/gitweb/changelogentry.tmpl
deleted file mode 100644
index 2a7c3cb25..000000000
--- a/sys/lib/python/mercurial/templates/gitweb/changelogentry.tmpl
+++ /dev/null
@@ -1,14 +0,0 @@
-<div>
-<a class="title" href="{url}rev/{node|short}{sessionvars%urlparameter}"><span class="age">{date|age} ago</span>{desc|strip|firstline|escape|nonempty}<span class="logtags"> {inbranch%inbranchtag}{branches%branchtag}{tags%tagtag}</span></a>
-</div>
-<div class="title_text">
-<div class="log_link">
-<a href="{url}rev/{node|short}{sessionvars%urlparameter}">changeset</a><br/>
-</div>
-<i>{author|obfuscate} [{date|rfc822date}] rev {rev}</i><br/>
-</div>
-<div class="log_body">
-{desc|strip|escape|addbreaks|nonempty}
-<br/>
-<br/>
-</div>
diff --git a/sys/lib/python/mercurial/templates/gitweb/changeset.tmpl b/sys/lib/python/mercurial/templates/gitweb/changeset.tmpl
deleted file mode 100644
index 2487dd66e..000000000
--- a/sys/lib/python/mercurial/templates/gitweb/changeset.tmpl
+++ /dev/null
@@ -1,50 +0,0 @@
-{header}
-<title>{repo|escape}: changeset {rev}:{node|short}</title>
-<link rel="alternate" type="application/atom+xml"
- href="{url}atom-log" title="Atom feed for {repo|escape}"/>
-<link rel="alternate" type="application/rss+xml"
- href="{url}rss-log" title="RSS feed for {repo|escape}"/>
-</head>
-<body>
-
-<div class="page_header">
-<a href="http://mercurial.selenic.com/" title="Mercurial" style="float: right;">Mercurial</a><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / changeset
-</div>
-
-<div class="page_nav">
-<a href="{url}summary{sessionvars%urlparameter}">summary</a> |
-<a href="{url}shortlog/{rev}{sessionvars%urlparameter}">shortlog</a> |
-<a href="{url}log/{rev}{sessionvars%urlparameter}">changelog</a> |
-<a href="{url}graph{sessionvars%urlparameter}">graph</a> |
-<a href="{url}tags{sessionvars%urlparameter}">tags</a> |
-<a href="{url}branches{sessionvars%urlparameter}">branches</a> |
-<a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a> |
-changeset |
-<a href="{url}raw-rev/{node|short}">raw</a> {archives%archiveentry}<br/>
-</div>
-
-<div>
-<a class="title" href="{url}raw-rev/{node|short}">{desc|strip|escape|firstline|nonempty} <span class="logtags">{inbranch%inbranchtag}{branches%branchtag}{tags%tagtag}</span></a>
-</div>
-<div class="title_text">
-<table cellspacing="0">
-<tr><td>author</td><td>{author|obfuscate}</td></tr>
-<tr><td></td><td>{date|date} ({date|age} ago)</td></tr>
-{branch%changesetbranch}
-<tr><td>changeset {rev}</td><td style="font-family:monospace">{node|short}</td></tr>
-{parent%changesetparent}
-{child%changesetchild}
-</table></div>
-
-<div class="page_body">
-{desc|strip|escape|addbreaks|nonempty}
-</div>
-<div class="list_head"></div>
-<div class="title_text">
-<table cellspacing="0">
-{files}
-</table></div>
-
-<div class="page_body">{diff}</div>
-
-{footer}
diff --git a/sys/lib/python/mercurial/templates/gitweb/error.tmpl b/sys/lib/python/mercurial/templates/gitweb/error.tmpl
deleted file mode 100644
index 25b71fc3a..000000000
--- a/sys/lib/python/mercurial/templates/gitweb/error.tmpl
+++ /dev/null
@@ -1,25 +0,0 @@
-{header}
-<title>{repo|escape}: Error</title>
-<link rel="alternate" type="application/atom+xml"
- href="{url}atom-log" title="Atom feed for {repo|escape}"/>
-<link rel="alternate" type="application/rss+xml"
- href="{url}rss-log" title="RSS feed for {repo|escape}"/>
-</head>
-<body>
-
-<div class="page_header">
-<a href="http://mercurial.selenic.com/" title="Mercurial" style="float: right;">Mercurial</a><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / error
-</div>
-
-<div class="page_nav">
-<a href="{url}summary{sessionvars%urlparameter}">summary</a> | <a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a> | <a href="{url}log{sessionvars%urlparameter}">changelog</a> | <a href="{url}tags{sessionvars%urlparameter}">tags</a> | <a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a><br/>
-</div>
-
-<div class="page_body">
-<br/>
-<i>An error occurred while processing your request</i><br/>
-<br/>
-{error|escape}
-</div>
-
-{footer}
diff --git a/sys/lib/python/mercurial/templates/gitweb/fileannotate.tmpl b/sys/lib/python/mercurial/templates/gitweb/fileannotate.tmpl
deleted file mode 100644
index 87e8cc316..000000000
--- a/sys/lib/python/mercurial/templates/gitweb/fileannotate.tmpl
+++ /dev/null
@@ -1,61 +0,0 @@
-{header}
-<title>{repo|escape}: {file|escape}@{node|short} (annotated)</title>
-<link rel="alternate" type="application/atom+xml"
- href="{url}atom-log" title="Atom feed for {repo|escape}"/>
-<link rel="alternate" type="application/rss+xml"
- href="{url}rss-log" title="RSS feed for {repo|escape}"/>
-</head>
-<body>
-
-<div class="page_header">
-<a href="http://mercurial.selenic.com/" title="Mercurial" style="float: right;">Mercurial</a><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / annotate
-</div>
-
-<div class="page_nav">
-<a href="{url}summary{sessionvars%urlparameter}">summary</a> |
-<a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a> |
-<a href="{url}log{sessionvars%urlparameter}">changelog</a> |
-<a href="{url}graph{sessionvars%urlparameter}">graph</a> |
-<a href="{url}tags{sessionvars%urlparameter}">tags</a> |
-<a href="{url}branches{sessionvars%urlparameter}">branches</a> |
-<a href="{url}file/{node|short}{path|urlescape}{sessionvars%urlparameter}">files</a> |
-<a href="{url}rev/{node|short}{sessionvars%urlparameter}">changeset</a> |
-<a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">file</a> |
-<a href="{url}log/{node|short}/{file|urlescape}{sessionvars%urlparameter}">revisions</a> |
-annotate |
-<a href="{url}diff/{node|short}/{file|urlescape}{sessionvars%urlparameter}">diff</a> |
-<a href="{url}raw-annotate/{node|short}/{file|urlescape}">raw</a><br/>
-</div>
-
-<div class="title">{file|escape}</div>
-
-<div class="title_text">
-<table cellspacing="0">
-<tr>
- <td>author</td>
- <td>{author|obfuscate}</td></tr>
-<tr>
- <td></td>
- <td>{date|date} ({date|age} ago)</td></tr>
-{branch%filerevbranch}
-<tr>
- <td>changeset {rev}</td>
- <td style="font-family:monospace"><a class="list" href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a></td></tr>
-{parent%fileannotateparent}
-{child%fileannotatechild}
-<tr>
- <td>permissions</td>
- <td style="font-family:monospace">{permissions|permissions}</td></tr>
-</table>
-</div>
-
-<div class="page_path">
-{desc|strip|escape|addbreaks|nonempty}
-</div>
-<div class="page_body">
-<table>
-{annotate%annotateline}
-</table>
-</div>
-
-{footer}
diff --git a/sys/lib/python/mercurial/templates/gitweb/filediff.tmpl b/sys/lib/python/mercurial/templates/gitweb/filediff.tmpl
deleted file mode 100644
index 967d79098..000000000
--- a/sys/lib/python/mercurial/templates/gitweb/filediff.tmpl
+++ /dev/null
@@ -1,47 +0,0 @@
-{header}
-<title>{repo|escape}: diff {file|escape}</title>
-<link rel="alternate" type="application/atom+xml"
- href="{url}atom-log" title="Atom feed for {repo|escape}"/>
-<link rel="alternate" type="application/rss+xml"
- href="{url}rss-log" title="RSS feed for {repo|escape}"/>
-</head>
-<body>
-
-<div class="page_header">
-<a href="http://mercurial.selenic.com/" title="Mercurial" style="float: right;">Mercurial</a><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / diff
-</div>
-
-<div class="page_nav">
-<a href="{url}summary{sessionvars%urlparameter}">summary</a> |
-<a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a> |
-<a href="{url}log{sessionvars%urlparameter}">changelog</a> |
-<a href="{url}graph{sessionvars%urlparameter}">graph</a> |
-<a href="{url}tags{sessionvars%urlparameter}">tags</a> |
-<a href="{url}branches{sessionvars%urlparameter}">branches</a> |
-<a href="{url}file/{node|short}{path|urlescape}{sessionvars%urlparameter}">files</a> |
-<a href="{url}rev/{node|short}{sessionvars%urlparameter}">changeset</a> |
-<a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">file</a> |
-<a href="{url}log/{node|short}/{file|urlescape}{sessionvars%urlparameter}">revisions</a> |
-<a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}">annotate</a> |
-diff |
-<a href="{url}raw-diff/{node|short}/{file|urlescape}">raw</a><br/>
-</div>
-
-<div class="title">{file|escape}</div>
-
-<table>
-{branch%filerevbranch}
-<tr>
- <td>changeset {rev}</td>
- <td style="font-family:monospace"><a class="list" href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a></td></tr>
-{parent%filediffparent}
-{child%filediffchild}
-</table>
-
-<div class="list_head"></div>
-
-<div class="page_body">
-{diff}
-</div>
-
-{footer}
diff --git a/sys/lib/python/mercurial/templates/gitweb/filelog.tmpl b/sys/lib/python/mercurial/templates/gitweb/filelog.tmpl
deleted file mode 100644
index 50f36c8eb..000000000
--- a/sys/lib/python/mercurial/templates/gitweb/filelog.tmpl
+++ /dev/null
@@ -1,40 +0,0 @@
-{header}
-<title>{repo|escape}: File revisions</title>
-<link rel="alternate" type="application/atom+xml"
- href="{url}atom-log" title="Atom feed for {repo|escape}"/>
-<link rel="alternate" type="application/rss+xml"
- href="{url}rss-log" title="RSS feed for {repo|escape}"/>
-</head>
-<body>
-
-<div class="page_header">
-<a href="http://mercurial.selenic.com/" title="Mercurial" style="float: right;">Mercurial</a><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / file revisions
-</div>
-
-<div class="page_nav">
-<a href="{url}summary{sessionvars%urlparameter}">summary</a> |
-<a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a> |
-<a href="{url}log{sessionvars%urlparameter}">changelog</a> |
-<a href="{url}graph{sessionvars%urlparameter}">graph</a> |
-<a href="{url}tags{sessionvars%urlparameter}">tags</a> |
-<a href="{url}branches{sessionvars%urlparameter}">branches</a> |
-<a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">file</a> |
-revisions |
-<a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}">annotate</a> |
-<a href="{url}diff/{node|short}/{file|urlescape}{sessionvars%urlparameter}">diff</a> |
-<a href="{url}rss-log/{node|short}/{file|urlescape}">rss</a>
-<br/>
-{nav%filenaventry}
-</div>
-
-<div class="title" >{file|urlescape}</div>
-
-<table>
-{entries%filelogentry}
-</table>
-
-<div class="page_nav">
-{nav%filenaventry}
-</div>
-
-{footer}
diff --git a/sys/lib/python/mercurial/templates/gitweb/filerevision.tmpl b/sys/lib/python/mercurial/templates/gitweb/filerevision.tmpl
deleted file mode 100644
index d64c632af..000000000
--- a/sys/lib/python/mercurial/templates/gitweb/filerevision.tmpl
+++ /dev/null
@@ -1,60 +0,0 @@
-{header}
-<title>{repo|escape}: {file|escape}@{node|short}</title>
-<link rel="alternate" type="application/atom+xml"
- href="{url}atom-log" title="Atom feed for {repo|escape}"/>
-<link rel="alternate" type="application/rss+xml"
- href="{url}rss-log" title="RSS feed for {repo|escape}"/>
-</head>
-<body>
-
-<div class="page_header">
-<a href="http://mercurial.selenic.com/" title="Mercurial" style="float: right;">Mercurial</a><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / file revision
-</div>
-
-<div class="page_nav">
-<a href="{url}summary{sessionvars%urlparameter}">summary</a> |
-<a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a> |
-<a href="{url}log{sessionvars%urlparameter}">changelog</a> |
-<a href="{url}graph{sessionvars%urlparameter}">graph</a> |
-<a href="{url}tags{sessionvars%urlparameter}">tags</a> |
-<a href="{url}branches{sessionvars%urlparameter}">branches</a> |
-<a href="{url}file/{node|short}{path|urlescape}{sessionvars%urlparameter}">files</a> |
-<a href="{url}rev/{node|short}{sessionvars%urlparameter}">changeset</a> |
-file |
-<a href="{url}log/{node|short}/{file|urlescape}{sessionvars%urlparameter}">revisions</a> |
-<a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}">annotate</a> |
-<a href="{url}diff/{node|short}/{file|urlescape}{sessionvars%urlparameter}">diff</a> |
-<a href="{url}raw-file/{node|short}/{file|urlescape}">raw</a><br/>
-</div>
-
-<div class="title">{file|escape}</div>
-
-<div class="title_text">
-<table cellspacing="0">
-<tr>
- <td>author</td>
- <td>{author|obfuscate}</td></tr>
-<tr>
- <td></td>
- <td>{date|date} ({date|age} ago)</td></tr>
-{branch%filerevbranch}
-<tr>
- <td>changeset {rev}</td>
- <td style="font-family:monospace"><a class="list" href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a></td></tr>
-{parent%filerevparent}
-{child%filerevchild}
-<tr>
- <td>permissions</td>
- <td style="font-family:monospace">{permissions|permissions}</td></tr>
-</table>
-</div>
-
-<div class="page_path">
-{desc|strip|escape|addbreaks|nonempty}
-</div>
-
-<div class="page_body">
-{text%fileline}
-</div>
-
-{footer}
diff --git a/sys/lib/python/mercurial/templates/gitweb/footer.tmpl b/sys/lib/python/mercurial/templates/gitweb/footer.tmpl
deleted file mode 100644
index a5f74c38d..000000000
--- a/sys/lib/python/mercurial/templates/gitweb/footer.tmpl
+++ /dev/null
@@ -1,11 +0,0 @@
-<div class="page_footer">
-<div class="page_footer_text">{repo|escape}</div>
-<div class="rss_logo">
-<a href="{url}rss-log">RSS</a>
-<a href="{url}atom-log">Atom</a>
-</div>
-<br />
-{motd}
-</div>
-</body>
-</html>
diff --git a/sys/lib/python/mercurial/templates/gitweb/graph.tmpl b/sys/lib/python/mercurial/templates/gitweb/graph.tmpl
deleted file mode 100644
index 52b399ae5..000000000
--- a/sys/lib/python/mercurial/templates/gitweb/graph.tmpl
+++ /dev/null
@@ -1,121 +0,0 @@
-{header}
-<title>{repo|escape}: Graph</title>
-<link rel="alternate" type="application/atom+xml"
- href="{url}atom-log" title="Atom feed for {repo|escape}"/>
-<link rel="alternate" type="application/rss+xml"
- href="{url}rss-log" title="RSS feed for {repo|escape}"/>
-<!--[if IE]><script type="text/javascript" src="{staticurl}excanvas.js"></script><![endif]-->
-</head>
-<body>
-
-<div class="page_header">
-<a href="http://mercurial.selenic.com/" title="Mercurial" style="float: right;">Mercurial</a><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / graph
-</div>
-
-<form action="{url}log">
-{sessionvars%hiddenformentry}
-<div class="search">
-<input type="text" name="rev" />
-</div>
-</form>
-<div class="page_nav">
-<a href="{url}summary{sessionvars%urlparameter}">summary</a> |
-<a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a> |
-<a href="{url}log/{rev}{sessionvars%urlparameter}">changelog</a> |
-graph |
-<a href="{url}tags{sessionvars%urlparameter}">tags</a> |
-<a href="{url}branches{sessionvars%urlparameter}">branches</a> |
-<a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a>
-<br/>
-<a href="{url}graph/{rev}{lessvars%urlparameter}">less</a>
-<a href="{url}graph/{rev}{morevars%urlparameter}">more</a>
-| {changenav%navgraphentry}<br/>
-</div>
-
-<div class="title">&nbsp;</div>
-
-<noscript>The revision graph only works with JavaScript-enabled browsers.</noscript>
-
-<div id="wrapper">
-<ul id="nodebgs"></ul>
-<canvas id="graph" width="224" height="{canvasheight}"></canvas>
-<ul id="graphnodes"></ul>
-</div>
-
-<script type="text/javascript" src="{staticurl}graph.js"></script>
-<script>
-<!-- hide script content
-
-var data = {jsdata|json};
-var graph = new Graph();
-graph.scale({bg_height});
-
-graph.edge = function(x0, y0, x1, y1, color) {
-
- this.setColor(color, 0.0, 0.65);
- this.ctx.beginPath();
- this.ctx.moveTo(x0, y0);
- this.ctx.lineTo(x1, y1);
- this.ctx.stroke();
-
-}
-
-var revlink = '<li style="_STYLE"><span class="desc">';
-revlink += '<a class="list" href="{url}rev/_NODEID{sessionvars%urlparameter}" title="_NODEID"><b>_DESC</b></a>';
-revlink += '</span> _TAGS';
-revlink += '<span class="info">_DATE ago, by _USER</span></li>';
-
-graph.vertex = function(x, y, color, parity, cur) {
-
- this.ctx.beginPath();
- color = this.setColor(color, 0.25, 0.75);
- this.ctx.arc(x, y, radius, 0, Math.PI * 2, true);
- this.ctx.fill();
-
- var bg = '<li class="bg parity' + parity + '"></li>';
- var left = (this.columns + 1) * this.bg_height;
- var nstyle = 'padding-left: ' + left + 'px;';
- var item = revlink.replace(/_STYLE/, nstyle);
- item = item.replace(/_PARITY/, 'parity' + parity);
- item = item.replace(/_NODEID/, cur[0]);
- item = item.replace(/_NODEID/, cur[0]);
- item = item.replace(/_DESC/, cur[3]);
- item = item.replace(/_USER/, cur[4]);
- item = item.replace(/_DATE/, cur[5]);
-
- var tagspan = '';
- if (cur[7].length || (cur[6][0] != 'default' || cur[6][1])) {
- tagspan = '<span class="logtags">';
- if (cur[6][1]) {
- tagspan += '<span class="branchtag" title="' + cur[6][0] + '">';
- tagspan += cur[6][0] + '</span> ';
- } else if (!cur[6][1] && cur[6][0] != 'default') {
- tagspan += '<span class="inbranchtag" title="' + cur[6][0] + '">';
- tagspan += cur[6][0] + '</span> ';
- }
- if (cur[7].length) {
- for (var t in cur[7]) {
- var tag = cur[7][t];
- tagspan += '<span class="tagtag">' + tag + '</span> ';
- }
- }
- tagspan += '</span>';
- }
-
- item = item.replace(/_TAGS/, tagspan);
- return [bg, item];
-
-}
-
-graph.render(data);
-
-// stop hiding script -->
-</script>
-
-<div class="page_nav">
-<a href="{url}graph/{rev}{lessvars%urlparameter}">less</a>
-<a href="{url}graph/{rev}{morevars%urlparameter}">more</a>
-| {changenav%navgraphentry}
-</div>
-
-{footer}
diff --git a/sys/lib/python/mercurial/templates/gitweb/header.tmpl b/sys/lib/python/mercurial/templates/gitweb/header.tmpl
deleted file mode 100644
index f3df3d7c3..000000000
--- a/sys/lib/python/mercurial/templates/gitweb/header.tmpl
+++ /dev/null
@@ -1,8 +0,0 @@
-<?xml version="1.0" encoding="{encoding}"?>
-<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
-<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en-US" lang="en-US">
-<head>
-<link rel="icon" href="{staticurl}hgicon.png" type="image/png" />
-<meta name="robots" content="index, nofollow"/>
-<link rel="stylesheet" href="{staticurl}style-gitweb.css" type="text/css" />
-
diff --git a/sys/lib/python/mercurial/templates/gitweb/index.tmpl b/sys/lib/python/mercurial/templates/gitweb/index.tmpl
deleted file mode 100644
index 858aaf1c7..000000000
--- a/sys/lib/python/mercurial/templates/gitweb/index.tmpl
+++ /dev/null
@@ -1,26 +0,0 @@
-{header}
-<title>Mercurial repositories index</title>
-</head>
-<body>
-
-<div class="page_header">
- <a href="http://mercurial.selenic.com/" title="Mercurial" style="float: right;">Mercurial</a>
- Repositories list
-</div>
-
-<table cellspacing="0">
- <tr>
- <td><a href="?sort={sort_name}">Name</a></td>
- <td><a href="?sort={sort_description}">Description</a></td>
- <td><a href="?sort={sort_contact}">Contact</a></td>
- <td><a href="?sort={sort_lastchange}">Last change</a></td>
- <td>&nbsp;</td>
- <td>&nbsp;</td>
- </tr>
- {entries%indexentry}
-</table>
-<div class="page_footer">
-{motd}
-</div>
-</body>
-</html>
diff --git a/sys/lib/python/mercurial/templates/gitweb/manifest.tmpl b/sys/lib/python/mercurial/templates/gitweb/manifest.tmpl
deleted file mode 100644
index 4bfb39f8f..000000000
--- a/sys/lib/python/mercurial/templates/gitweb/manifest.tmpl
+++ /dev/null
@@ -1,38 +0,0 @@
-{header}
-<title>{repo|escape}: files</title>
-<link rel="alternate" type="application/atom+xml"
- href="{url}atom-log" title="Atom feed for {repo|escape}"/>
-<link rel="alternate" type="application/rss+xml"
- href="{url}rss-log" title="RSS feed for {repo|escape}"/>
-</head>
-<body>
-
-<div class="page_header">
-<a href="http://mercurial.selenic.com/" title="Mercurial" style="float: right;">Mercurial</a><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / files
-</div>
-
-<div class="page_nav">
-<a href="{url}summary{sessionvars%urlparameter}">summary</a> |
-<a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a> |
-<a href="{url}log{sessionvars%urlparameter}">changelog</a> |
-<a href="{url}graph{sessionvars%urlparameter}">graph</a> |
-<a href="{url}tags{sessionvars%urlparameter}">tags</a> |
-<a href="{url}branches{sessionvars%urlparameter}">branches</a> |
-files |
-<a href="{url}rev/{node|short}{sessionvars%urlparameter}">changeset</a> {archives%archiveentry}<br/>
-</div>
-
-<div class="title">{path|escape} <span class="logtags">{inbranch%inbranchtag}{branches%branchtag}{tags%tagtag}</span></div>
-<table cellspacing="0">
-<tr class="parity{upparity}">
-<td style="font-family:monospace">drwxr-xr-x</td>
-<td style="font-family:monospace"></td>
-<td style="font-family:monospace"></td>
-<td><a href="{url}file/{node|short}{up|urlescape}{sessionvars%urlparameter}">[up]</a></td>
-<td class="link">&nbsp;</td>
-</tr>
-{dentries%direntry}
-{fentries%fileentry}
-</table>
-
-{footer}
diff --git a/sys/lib/python/mercurial/templates/gitweb/map b/sys/lib/python/mercurial/templates/gitweb/map
deleted file mode 100644
index fc0fedd57..000000000
--- a/sys/lib/python/mercurial/templates/gitweb/map
+++ /dev/null
@@ -1,248 +0,0 @@
-default = 'summary'
-mimetype = 'text/html; charset={encoding}'
-header = header.tmpl
-footer = footer.tmpl
-search = search.tmpl
-changelog = changelog.tmpl
-summary = summary.tmpl
-error = error.tmpl
-notfound = notfound.tmpl
-naventry = '<a href="{url}log/{node|short}{sessionvars%urlparameter}">{label|escape}</a> '
-navshortentry = '<a href="{url}shortlog/{node|short}{sessionvars%urlparameter}">{label|escape}</a> '
-navgraphentry = '<a href="{url}graph/{node|short}{sessionvars%urlparameter}">{label|escape}</a> '
-filenaventry = '<a href="{url}log/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{label|escape}</a> '
-filedifflink = '<a href="{url}diff/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{file|escape}</a> '
-filenodelink = '
- <tr class="parity{parity}">
- <td><a class="list" href="{url}diff/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{file|escape}</a></td>
- <td></td>
- <td class="link">
- <a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">file</a> |
- <a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}">annotate</a> |
- <a href="{url}diff/{node|short}/{file|urlescape}{sessionvars%urlparameter}">diff</a> |
- <a href="{url}log/{node|short}/{file|urlescape}{sessionvars%urlparameter}">revisions</a>
- </td>
- </tr>'
-filenolink = '
- <tr class="parity{parity}">
- <td><a class="list" href="{url}diff/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{file|escape}</a></td>
- <td></td>
- <td class="link">
- file |
- annotate |
- <a href="{url}diff/{node|short}/{file|urlescape}{sessionvars%urlparameter}">diff</a> |
- <a href="{url}log/{node|short}/{file|urlescape}{sessionvars%urlparameter}">revisions</a>
- </td>
- </tr>'
-fileellipses = '...'
-changelogentry = changelogentry.tmpl
-searchentry = changelogentry.tmpl
-changeset = changeset.tmpl
-manifest = manifest.tmpl
-direntry = '
- <tr class="parity{parity}">
- <td style="font-family:monospace">drwxr-xr-x</td>
- <td style="font-family:monospace"></td>
- <td style="font-family:monospace"></td>
- <td>
- <a href="{url}file/{node|short}{path|urlescape}{sessionvars%urlparameter}">{basename|escape}</a>
- <a href="{url}file/{node|short}{path|urlescape}/{emptydirs|urlescape}{sessionvars%urlparameter}">{emptydirs|escape}</a>
- </td>
- <td class="link">
- <a href="{url}file/{node|short}{path|urlescape}{sessionvars%urlparameter}">files</a>
- </td>
- </tr>'
-fileentry = '
- <tr class="parity{parity}">
- <td style="font-family:monospace">{permissions|permissions}</td>
- <td style="font-family:monospace" align=right>{date|isodate}</td>
- <td style="font-family:monospace" align=right>{size}</td>
- <td class="list">
- <a class="list" href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{basename|escape}</a>
- </td>
- <td class="link">
- <a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">file</a> |
- <a href="{url}log/{node|short}/{file|urlescape}{sessionvars%urlparameter}">revisions</a> |
- <a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}">annotate</a>
- </td>
- </tr>'
-filerevision = filerevision.tmpl
-fileannotate = fileannotate.tmpl
-filediff = filediff.tmpl
-filelog = filelog.tmpl
-fileline = '
- <div style="font-family:monospace" class="parity{parity}">
- <pre><a class="linenr" href="#{lineid}" id="{lineid}">{linenumber}</a> {line|escape}</pre>
- </div>'
-annotateline = '
- <tr style="font-family:monospace" class="parity{parity}">
- <td class="linenr" style="text-align: right;">
- <a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}#l{targetline}"
- title="{node|short}: {desc|escape|firstline}">{author|user}@{rev}</a>
- </td>
- <td><pre><a class="linenr" href="#{lineid}" id="{lineid}">{linenumber}</a></pre></td>
- <td><pre>{line|escape}</pre></td>
- </tr>'
-difflineplus = '<span style="color:#008800;"><a class="linenr" href="#{lineid}" id="{lineid}">{linenumber}</a> {line|escape}</span>'
-difflineminus = '<span style="color:#cc0000;"><a class="linenr" href="#{lineid}" id="{lineid}">{linenumber}</a> {line|escape}</span>'
-difflineat = '<span style="color:#990099;"><a class="linenr" href="#{lineid}" id="{lineid}">{linenumber}</a> {line|escape}</span>'
-diffline = '<span><a class="linenr" href="#{lineid}" id="{lineid}">{linenumber}</a> {line|escape}</span>'
-changelogparent = '
- <tr>
- <th class="parent">parent {rev}:</th>
- <td class="parent">
- <a href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a>
- </td>
- </tr>'
-changesetbranch = '<tr><td>branch</td><td>{name}</td></tr>'
-changesetparent = '
- <tr>
- <td>parent {rev}</td>
- <td style="font-family:monospace">
- <a class="list" href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a>
- </td>
- </tr>'
-filerevbranch = '<tr><td>branch</td><td>{name}</td></tr>'
-filerevparent = '
- <tr>
- <td>parent {rev}</td>
- <td style="font-family:monospace">
- <a class="list" href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">
- {rename%filerename}{node|short}
- </a>
- </td>
- </tr>'
-filerename = '{file|escape}@'
-filelogrename = '| <a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">base</a>'
-fileannotateparent = '
- <tr>
- <td>parent {rev}</td>
- <td style="font-family:monospace">
- <a class="list" href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}">
- {rename%filerename}{node|short}
- </a>
- </td>
- </tr>'
-changelogchild = '
- <tr>
- <th class="child">child {rev}:</th>
- <td class="child"><a href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a></td>
- </tr>'
-changesetchild = '
- <tr>
- <td>child {rev}</td>
- <td style="font-family:monospace">
- <a class="list" href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a>
- </td>
- </tr>'
-filerevchild = '
- <tr>
- <td>child {rev}</td>
- <td style="font-family:monospace">
- <a class="list" href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{node|short}</a></td>
- </tr>'
-fileannotatechild = '
- <tr>
- <td>child {rev}</td>
- <td style="font-family:monospace">
- <a class="list" href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{node|short}</a></td>
- </tr>'
-tags = tags.tmpl
-tagentry = '
- <tr class="parity{parity}">
- <td class="age"><i>{date|age} ago</i></td>
- <td><a class="list" href="{url}rev/{node|short}{sessionvars%urlparameter}"><b>{tag|escape}</b></a></td>
- <td class="link">
- <a href="{url}rev/{node|short}{sessionvars%urlparameter}">changeset</a> |
- <a href="{url}log/{node|short}{sessionvars%urlparameter}">changelog</a> |
- <a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a>
- </td>
- </tr>'
-branches = branches.tmpl
-branchentry = '
- <tr class="parity{parity}">
- <td class="age"><i>{date|age} ago</i></td>
- <td><a class="list" href="{url}shortlog/{node|short}{sessionvars%urlparameter}"><b>{node|short}</b></a></td>
- <td class="{status}">{branch|escape}</td>
- <td class="link">
- <a href="{url}changeset/{node|short}{sessionvars%urlparameter}">changeset</a> |
- <a href="{url}log/{node|short}{sessionvars%urlparameter}">changelog</a> |
- <a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a>
- </td>
- </tr>'
-diffblock = '<pre>{lines}</pre>'
-filediffparent = '
- <tr>
- <td>parent {rev}</td>
- <td style="font-family:monospace">
- <a class="list" href="{url}diff/{node|short}/{file|urlescape}{sessionvars%urlparameter}">
- {node|short}
- </a>
- </td>
- </tr>'
-filelogparent = '
- <tr>
- <td align="right">parent {rev}:&nbsp;</td>
- <td><a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{node|short}</a></td>
- </tr>'
-filediffchild = '
- <tr>
- <td>child {rev}</td>
- <td style="font-family:monospace">
- <a class="list" href="{url}diff/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{node|short}</a>
- </td>
- </tr>'
-filelogchild = '
- <tr>
- <td align="right">child {rev}:&nbsp;</td>
- <td><a href="{url}file{node|short}/{file|urlescape}{sessionvars%urlparameter}">{node|short}</a></td>
- </tr>'
-shortlog = shortlog.tmpl
-graph = graph.tmpl
-tagtag = '<span class="tagtag" title="{name}">{name}</span> '
-branchtag = '<span class="branchtag" title="{name}">{name}</span> '
-inbranchtag = '<span class="inbranchtag" title="{name}">{name}</span> '
-shortlogentry = '
- <tr class="parity{parity}">
- <td class="age"><i>{date|age} ago</i></td>
- <td><i>{author|person}</i></td>
- <td>
- <a class="list" href="{url}rev/{node|short}{sessionvars%urlparameter}">
- <b>{desc|strip|firstline|escape|nonempty}</b>
- <span class="logtags">{inbranch%inbranchtag}{branches%branchtag}{tags%tagtag}</span>
- </a>
- </td>
- <td class="link" nowrap>
- <a href="{url}rev/{node|short}{sessionvars%urlparameter}">changeset</a> |
- <a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a>
- </td>
- </tr>'
-filelogentry = '
- <tr class="parity{parity}">
- <td class="age"><i>{date|age} ago</i></td>
- <td>
- <a class="list" href="{url}rev/{node|short}{sessionvars%urlparameter}">
- <b>{desc|strip|firstline|escape|nonempty}</b>
- </a>
- </td>
- <td class="link">
- <a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">file</a>&nbsp;|&nbsp;<a href="{url}diff/{node|short}/{file|urlescape}{sessionvars%urlparameter}">diff</a>&nbsp;|&nbsp;<a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}">annotate</a> {rename%filelogrename}</td>
- </tr>'
-archiveentry = ' | <a href="{url}archive/{node|short}{extension}">{type|escape}</a> '
-indexentry = '
- <tr class="parity{parity}">
- <td>
- <a class="list" href="{url}{sessionvars%urlparameter}">
- <b>{name|escape}</b>
- </a>
- </td>
- <td>{description}</td>
- <td>{contact|obfuscate}</td>
- <td class="age">{lastchange|age} ago</td>
- <td class="indexlinks">{archives%indexarchiveentry}</td>
- <td><div class="rss_logo"><a href="{url}rss-log">RSS</a> <a href="{url}atom-log">Atom</a></div></td>
- </tr>\n'
-indexarchiveentry = ' <a href="{url}archive/{node|short}{extension}">{type|escape}</a> '
-index = index.tmpl
-urlparameter = '{separator}{name}={value|urlescape}'
-hiddenformentry = '<input type="hidden" name="{name}" value="{value|escape}" />'
diff --git a/sys/lib/python/mercurial/templates/gitweb/notfound.tmpl b/sys/lib/python/mercurial/templates/gitweb/notfound.tmpl
deleted file mode 100644
index 073bb11d4..000000000
--- a/sys/lib/python/mercurial/templates/gitweb/notfound.tmpl
+++ /dev/null
@@ -1,18 +0,0 @@
-{header}
-<title>Mercurial repository not found</title>
-</head>
-
-<body>
-
-<div class="page_header">
-<a href="http://mercurial.selenic.com/" title="Mercurial" style="float: right;">Mercurial</a> Not found: {repo|escape}
-</div>
-
-<div class="page_body">
-The specified repository "{repo|escape}" is unknown, sorry.
-<br/>
-<br/>
-Please go back to the <a href="{url}">main repository list page</a>.
-</div>
-
-{footer}
diff --git a/sys/lib/python/mercurial/templates/gitweb/search.tmpl b/sys/lib/python/mercurial/templates/gitweb/search.tmpl
deleted file mode 100644
index 0ac85f238..000000000
--- a/sys/lib/python/mercurial/templates/gitweb/search.tmpl
+++ /dev/null
@@ -1,36 +0,0 @@
-{header}
-<title>{repo|escape}: Search</title>
-<link rel="alternate" type="application/atom+xml"
- href="{url}atom-log" title="Atom feed for {repo|escape}"/>
-<link rel="alternate" type="application/rss+xml"
- href="{url}rss-log" title="RSS feed for {repo|escape}"/>
-</head>
-<body>
-
-<div class="page_header">
-<a href="http://mercurial.selenic.com/" title="Mercurial" style="float: right;">Mercurial</a><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / search
-
-<form action="{url}log">
-{sessionvars%hiddenformentry}
-<div class="search">
-<input type="text" name="rev" value="{query|escape}" />
-</div>
-</form>
-</div>
-
-<div class="page_nav">
-<a href="{url}summary{sessionvars%urlparameter}">summary</a> |
-<a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a> |
-<a href="{url}log{sessionvars%urlparameter}">changelog</a> |
-<a href="{url}graph{sessionvars%urlparameter}">graph</a> |
-<a href="{url}tags{sessionvars%urlparameter}">tags</a> |
-<a href="{url}branches{sessionvars%urlparameter}">branches</a> |
-<a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a>{archives%archiveentry}
-<br/>
-</div>
-
-<div class="title">searching for {query|escape}</div>
-
-{entries}
-
-{footer}
diff --git a/sys/lib/python/mercurial/templates/gitweb/shortlog.tmpl b/sys/lib/python/mercurial/templates/gitweb/shortlog.tmpl
deleted file mode 100644
index d40259739..000000000
--- a/sys/lib/python/mercurial/templates/gitweb/shortlog.tmpl
+++ /dev/null
@@ -1,41 +0,0 @@
-{header}
-<title>{repo|escape}: Shortlog</title>
-<link rel="alternate" type="application/atom+xml"
- href="{url}atom-log" title="Atom feed for {repo|escape}"/>
-<link rel="alternate" type="application/rss+xml"
- href="{url}rss-log" title="RSS feed for {repo|escape}"/>
-</head>
-<body>
-
-<div class="page_header">
-<a href="http://mercurial.selenic.com/" title="Mercurial" style="float: right;">Mercurial</a><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / shortlog
-</div>
-
-<form action="{url}log">
-{sessionvars%hiddenformentry}
-<div class="search">
-<input type="text" name="rev" />
-</div>
-</form>
-<div class="page_nav">
-<a href="{url}summary{sessionvars%urlparameter}">summary</a> |
-shortlog |
-<a href="{url}log/{rev}{sessionvars%urlparameter}">changelog</a> |
-<a href="{url}graph{sessionvars%urlparameter}">graph</a> |
-<a href="{url}tags{sessionvars%urlparameter}">tags</a> |
-<a href="{url}branches{sessionvars%urlparameter}">branches</a> |
-<a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a>{archives%archiveentry}
-<br/>
-{changenav%navshortentry}<br/>
-</div>
-
-<div class="title">&nbsp;</div>
-<table cellspacing="0">
-{entries%shortlogentry}
-</table>
-
-<div class="page_nav">
-{changenav%navshortentry}
-</div>
-
-{footer}
diff --git a/sys/lib/python/mercurial/templates/gitweb/summary.tmpl b/sys/lib/python/mercurial/templates/gitweb/summary.tmpl
deleted file mode 100644
index f6473a5a9..000000000
--- a/sys/lib/python/mercurial/templates/gitweb/summary.tmpl
+++ /dev/null
@@ -1,58 +0,0 @@
-{header}
-<title>{repo|escape}: Summary</title>
-<link rel="alternate" type="application/atom+xml"
- href="{url}atom-log" title="Atom feed for {repo|escape}"/>
-<link rel="alternate" type="application/rss+xml"
- href="{url}rss-log" title="RSS feed for {repo|escape}"/>
-</head>
-<body>
-
-<div class="page_header">
-<a href="http://mercurial.selenic.com/" title="Mercurial" style="float: right;">Mercurial</a><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / summary
-
-<form action="{url}log">
-{sessionvars%hiddenformentry}
-<div class="search">
-<input type="text" name="rev" />
-</div>
-</form>
-</div>
-
-<div class="page_nav">
-summary |
-<a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a> |
-<a href="{url}log{sessionvars%urlparameter}">changelog</a> |
-<a href="{url}graph{sessionvars%urlparameter}">graph</a> |
-<a href="{url}tags{sessionvars%urlparameter}">tags</a> |
-<a href="{url}branches{sessionvars%urlparameter}">branches</a> |
-<a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a>{archives%archiveentry}
-<br/>
-</div>
-
-<div class="title">&nbsp;</div>
-<table cellspacing="0">
-<tr><td>description</td><td>{desc}</td></tr>
-<tr><td>owner</td><td>{owner|obfuscate}</td></tr>
-<tr><td>last change</td><td>{lastchange|rfc822date}</td></tr>
-</table>
-
-<div><a class="title" href="{url}shortlog{sessionvars%urlparameter}">changes</a></div>
-<table cellspacing="0">
-{shortlog}
-<tr class="light"><td colspan="4"><a class="list" href="{url}shortlog{sessionvars%urlparameter}">...</a></td></tr>
-</table>
-
-<div><a class="title" href="{url}tags{sessionvars%urlparameter}">tags</a></div>
-<table cellspacing="0">
-{tags}
-<tr class="light"><td colspan="3"><a class="list" href="{url}tags{sessionvars%urlparameter}">...</a></td></tr>
-</table>
-
-<div><a class="title" href="#">branches</a></div>
-<table cellspacing="0">
-{branches%branchentry}
-<tr class="light">
- <td colspan="4"><a class="list" href="#">...</a></td>
-</tr>
-</table>
-{footer}
diff --git a/sys/lib/python/mercurial/templates/gitweb/tags.tmpl b/sys/lib/python/mercurial/templates/gitweb/tags.tmpl
deleted file mode 100644
index b5e64b60e..000000000
--- a/sys/lib/python/mercurial/templates/gitweb/tags.tmpl
+++ /dev/null
@@ -1,30 +0,0 @@
-{header}
-<title>{repo|escape}: Tags</title>
-<link rel="alternate" type="application/atom+xml"
- href="{url}atom-tags" title="Atom feed for {repo|escape}"/>
-<link rel="alternate" type="application/rss+xml"
- href="{url}rss-tags" title="RSS feed for {repo|escape}"/>
-</head>
-<body>
-
-<div class="page_header">
-<a href="http://mercurial.selenic.com/" title="Mercurial" style="float: right;">Mercurial</a><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / tags
-</div>
-
-<div class="page_nav">
-<a href="{url}summary{sessionvars%urlparameter}">summary</a> |
-<a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a> |
-<a href="{url}log{sessionvars%urlparameter}">changelog</a> |
-<a href="{url}graph{sessionvars%urlparameter}">graph</a> |
-tags |
-<a href="{url}branches{sessionvars%urlparameter}">branches</a> |
-<a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a>
-<br/>
-</div>
-
-<div class="title">&nbsp;</div>
-<table cellspacing="0">
-{entries%tagentry}
-</table>
-
-{footer}
diff --git a/sys/lib/python/mercurial/templates/map-cmdline.changelog b/sys/lib/python/mercurial/templates/map-cmdline.changelog
deleted file mode 100644
index 8ae39b59b..000000000
--- a/sys/lib/python/mercurial/templates/map-cmdline.changelog
+++ /dev/null
@@ -1,14 +0,0 @@
-header = '{date|shortdate} {author|person} <{author|email}>\n\n'
-header_verbose = ''
-changeset = '\t* {files|stringify|fill68|tabindent}{desc|fill68|tabindent|strip}\n\t[{node|short}]{tags}\n\n'
-changeset_quiet = '\t* {desc|firstline|fill68|tabindent|strip}\n\n'
-changeset_verbose = '{date|isodate} {author|person} <{author|email}> ({node|short}{tags})\n\n\t* {file_adds|stringify|fill68|tabindent}{file_dels|stringify|fill68|tabindent}{files|stringify|fill68|tabindent}{desc|fill68|tabindent|strip}\n\n'
-start_tags = ' ['
-tag = '{tag}, '
-last_tag = '{tag}]'
-file = '{file}, '
-last_file = '{file}:\n\t'
-file_add = '{file_add}, '
-last_file_add = '{file_add}: new file.\n* '
-file_del = '{file_del}, '
-last_file_del = '{file_del}: deleted file.\n* '
diff --git a/sys/lib/python/mercurial/templates/map-cmdline.compact b/sys/lib/python/mercurial/templates/map-cmdline.compact
deleted file mode 100644
index ee66bff97..000000000
--- a/sys/lib/python/mercurial/templates/map-cmdline.compact
+++ /dev/null
@@ -1,9 +0,0 @@
-changeset = '{rev}{tags}{parents} {node|short} {date|isodate} {author|user}\n {desc|firstline|strip}\n\n'
-changeset_quiet = '{rev}:{node|short}\n'
-changeset_verbose = '{rev}{tags}{parents} {node|short} {date|isodate} {author}\n {desc|strip}\n\n'
-start_tags = '['
-tag = '{tag},'
-last_tag = '{tag}]'
-start_parents = ':'
-parent = '{rev},'
-last_parent = '{rev}'
diff --git a/sys/lib/python/mercurial/templates/map-cmdline.default b/sys/lib/python/mercurial/templates/map-cmdline.default
deleted file mode 100644
index 3ceb2973b..000000000
--- a/sys/lib/python/mercurial/templates/map-cmdline.default
+++ /dev/null
@@ -1,24 +0,0 @@
-changeset = 'changeset: {rev}:{node|short}\n{branches}{tags}{parents}user: {author}\ndate: {date|date}\nsummary: {desc|firstline}\n\n'
-changeset_quiet = '{rev}:{node|short}\n'
-changeset_verbose = 'changeset: {rev}:{node|short}\n{branches}{tags}{parents}user: {author}\ndate: {date|date}\n{files}{file_copies}description:\n{desc|strip}\n\n\n'
-changeset_debug = 'changeset: {rev}:{node}\n{branches}{tags}{parents}{manifest}user: {author}\ndate: {date|date}\n{file_mods}{file_adds}{file_dels}{file_copies}{extras}description:\n{desc|strip}\n\n\n'
-start_files = 'files: '
-file = ' {file}'
-end_files = '\n'
-start_file_mods = 'files: '
-file_mod = ' {file_mod}'
-end_file_mods = '\n'
-start_file_adds = 'files+: '
-file_add = ' {file_add}'
-end_file_adds = '\n'
-start_file_dels = 'files-: '
-file_del = ' {file_del}'
-end_file_dels = '\n'
-start_file_copies = 'copies: '
-file_copy = ' {name} ({source})'
-end_file_copies = '\n'
-parent = 'parent: {rev}:{node|formatnode}\n'
-manifest = 'manifest: {rev}:{node}\n'
-branch = 'branch: {branch}\n'
-tag = 'tag: {tag}\n'
-extra = 'extra: {key}={value|stringescape}\n'
diff --git a/sys/lib/python/mercurial/templates/monoblue/branches.tmpl b/sys/lib/python/mercurial/templates/monoblue/branches.tmpl
deleted file mode 100644
index 2c75da03d..000000000
--- a/sys/lib/python/mercurial/templates/monoblue/branches.tmpl
+++ /dev/null
@@ -1,36 +0,0 @@
-{header}
- <title>{repo|escape}: Branches</title>
- <link rel="alternate" type="application/atom+xml" href="{url}atom-log" title="Atom feed for {repo|escape}"/>
- <link rel="alternate" type="application/rss+xml" href="{url}rss-log" title="RSS feed for {repo|escape}"/>
-</head>
-
-<body>
-<div id="container">
- <div class="page-header">
- <h1><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / Branches</h1>
-
- <form action="{url}log">
- {sessionvars%hiddenformentry}
- <dl class="search">
- <dt><label>Search: </label></dt>
- <dd><input type="text" name="rev" /></dd>
- </dl>
- </form>
-
- <ul class="page-nav">
- <li><a href="{url}summary{sessionvars%urlparameter}">summary</a></li>
- <li><a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a></li>
- <li><a href="{url}changelog{sessionvars%urlparameter}">changelog</a></li>
- <li><a href="{url}graph/{node|short}{sessionvars%urlparameter}">graph</a></li>
- <li><a href="{url}tags{sessionvars%urlparameter}">tags</a></li>
- <li class="current">branches</li>
- <li><a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a></li>
- </ul>
- </div>
-
- <h2 class="no-link no-border">tags</h2>
- <table cellspacing="0">
-{entries%branchentry}
- </table>
-
-{footer}
diff --git a/sys/lib/python/mercurial/templates/monoblue/changelog.tmpl b/sys/lib/python/mercurial/templates/monoblue/changelog.tmpl
deleted file mode 100644
index 8b361e585..000000000
--- a/sys/lib/python/mercurial/templates/monoblue/changelog.tmpl
+++ /dev/null
@@ -1,40 +0,0 @@
-{header}
- <title>{repo|escape}: changelog</title>
- <link rel="alternate" type="application/atom+xml" href="{url}atom-log" title="Atom feed for {repo|escape}"/>
- <link rel="alternate" type="application/rss+xml" href="{url}rss-log" title="RSS feed for {repo|escape}"/>
-</head>
-
-<body>
-<div id="container">
- <div class="page-header">
- <h1><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / changelog</h1>
-
- <form action="{url}log">
- {sessionvars%hiddenformentry}
- <dl class="search">
- <dt><label>Search: </label></dt>
- <dd><input type="text" name="rev" /></dd>
- </dl>
- </form>
-
- <ul class="page-nav">
- <li><a href="{url}summary{sessionvars%urlparameter}">summary</a></li>
- <li><a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a></li>
- <li class="current">changelog</li>
- <li><a href="{url}graph/{node|short}{sessionvars%urlparameter}">graph</a></li>
- <li><a href="{url}tags{sessionvars%urlparameter}">tags</a></li>
- <li><a href="{url}branches{sessionvars%urlparameter}">branches</a></li>
- <li><a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a>{archives%archiveentry}</li>
- </ul>
- </div>
-
- <h2 class="no-link no-border">changelog</h2>
- <div>
- {entries%changelogentry}
- </div>
-
- <div class="page-path">
-{changenav%naventry}
- </div>
-
-{footer}
diff --git a/sys/lib/python/mercurial/templates/monoblue/changelogentry.tmpl b/sys/lib/python/mercurial/templates/monoblue/changelogentry.tmpl
deleted file mode 100644
index df03c9758..000000000
--- a/sys/lib/python/mercurial/templates/monoblue/changelogentry.tmpl
+++ /dev/null
@@ -1,6 +0,0 @@
-<h3 class="changelog"><a class="title" href="{url}rev/{node|short}{sessionvars%urlparameter}">{desc|strip|firstline|escape|nonempty}<span class="logtags"> {inbranch%inbranchtag}{branches%branchtag}{tags%tagtag}</span></a></h3>
-<ul class="changelog-entry">
- <li class="age">{date|age} ago</li>
- <li>by <span class="name">{author|obfuscate}</span> <span class="revdate">[{date|rfc822date}] rev {rev}</span></li>
- <li class="description">{desc|strip|escape|addbreaks|nonempty}</li>
-</ul>
diff --git a/sys/lib/python/mercurial/templates/monoblue/changeset.tmpl b/sys/lib/python/mercurial/templates/monoblue/changeset.tmpl
deleted file mode 100644
index 8919778f2..000000000
--- a/sys/lib/python/mercurial/templates/monoblue/changeset.tmpl
+++ /dev/null
@@ -1,63 +0,0 @@
-{header}
-<title>{repo|escape}: changeset {rev}:{node|short}</title>
- <link rel="alternate" type="application/atom+xml" href="{url}atom-log" title="Atom feed for {repo|escape}"/>
- <link rel="alternate" type="application/rss+xml" href="{url}rss-log" title="RSS feed for {repo|escape}"/>
-</head>
-
-<body>
-<div id="container">
- <div class="page-header">
- <h1><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / files</h1>
-
- <form action="{url}log">
- {sessionvars%hiddenformentry}
- <dl class="search">
- <dt><label>Search: </label></dt>
- <dd><input type="text" name="rev" /></dd>
- </dl>
- </form>
-
- <ul class="page-nav">
- <li><a href="{url}summary{sessionvars%urlparameter}">summary</a></li>
- <li><a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a></li>
- <li><a href="{url}changelog{sessionvars%urlparameter}">changelog</a></li>
- <li><a href="{url}graph/{node|short}{sessionvars%urlparameter}">graph</a></li>
- <li><a href="{url}tags{sessionvars%urlparameter}">tags</a></li>
- <li><a href="{url}branches{sessionvars%urlparameter}">branches</a></li>
- <li><a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a></li>
- </ul>
- </div>
-
- <ul class="submenu">
- <li class="current">changeset</li>
- <li><a href="{url}raw-rev/{node|short}">raw</a> {archives%archiveentry}</li>
- </ul>
-
- <h2 class="no-link no-border">changeset</h2>
-
- <h3 class="changeset"><a href="{url}raw-rev/{node|short}">{desc|strip|escape|firstline|nonempty} <span class="logtags">{inbranch%inbranchtag}{branches%branchtag}{tags%tagtag}</span></a></h3>
- <p class="changeset-age"><span>{date|age} ago</span></p>
-
- <dl class="overview">
- <dt>author</dt>
- <dd>{author|obfuscate}</dd>
- <dt>date</dt>
- <dd>{date|date}</dd>
- {branch%changesetbranch}
- <dt>changeset {rev}</dt>
- <dd>{node|short}</dd>
- {parent%changesetparent}
- {child%changesetchild}
- </dl>
-
- <p class="description">{desc|strip|escape|addbreaks|nonempty}</p>
-
- <table>
- {files}
- </table>
-
- <div class="diff">
- {diff}
- </div>
-
-{footer}
diff --git a/sys/lib/python/mercurial/templates/monoblue/error.tmpl b/sys/lib/python/mercurial/templates/monoblue/error.tmpl
deleted file mode 100644
index a7f23d20e..000000000
--- a/sys/lib/python/mercurial/templates/monoblue/error.tmpl
+++ /dev/null
@@ -1,34 +0,0 @@
-{header}
- <title>{repo|escape}: Error</title>
- <link rel="alternate" type="application/atom+xml" href="{url}atom-log" title="Atom feed for {repo|escape}"/>
- <link rel="alternate" type="application/rss+xml" href="{url}rss-log" title="RSS feed for {repo|escape}"/>
-</head>
-
-<body>
-<div id="container">
- <div class="page-header">
- <h1><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / Not found: {repo|escape}</h1>
-
- <form action="{url}log">
- {sessionvars%hiddenformentry}
- <dl class="search">
- <dt><label>Search: </label></dt>
- <dd><input type="text" name="rev" /></dd>
- </dl>
- </form>
-
- <ul class="page-nav">
- <li class="current">summary</li>
- <li><a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a></li>
- <li><a href="{url}log{sessionvars%urlparameter}">changelog</a></li>
- <li><a href="{url}graph/{node|short}{sessionvars%urlparameter}">graph</a></li>
- <li><a href="{url}tags{sessionvars%urlparameter}">tags</a></li>
- <li><a href="{url}branches{sessionvars%urlparameter}">branches</a></li>
- <li><a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a></li>
- </ul>
- </div>
-
- <h2 class="no-link no-border">An error occurred while processing your request</h2>
- <p class="normal">{error|escape}</p>
-
-{footer}
diff --git a/sys/lib/python/mercurial/templates/monoblue/fileannotate.tmpl b/sys/lib/python/mercurial/templates/monoblue/fileannotate.tmpl
deleted file mode 100644
index 13094f699..000000000
--- a/sys/lib/python/mercurial/templates/monoblue/fileannotate.tmpl
+++ /dev/null
@@ -1,63 +0,0 @@
-{header}
-<title>{repo|escape}: {file|escape}@{node|short} (annotated)</title>
- <link rel="alternate" type="application/atom+xml" href="{url}atom-log" title="Atom feed for {repo|escape}"/>
- <link rel="alternate" type="application/rss+xml" href="{url}rss-log" title="RSS feed for {repo|escape}"/>
-</head>
-
-<body>
-<div id="container">
- <div class="page-header">
- <h1><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / annotate</h1>
-
- <form action="{url}log">
- {sessionvars%hiddenformentry}
- <dl class="search">
- <dt><label>Search: </label></dt>
- <dd><input type="text" name="rev" /></dd>
- </dl>
- </form>
-
- <ul class="page-nav">
- <li><a href="{url}summary{sessionvars%urlparameter}">summary</a></li>
- <li><a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a></li>
- <li><a href="{url}log{sessionvars%urlparameter}">changelog</a></li>
- <li><a href="{url}graph/{node|short}{sessionvars%urlparameter}">graph</a></li>
- <li><a href="{url}tags{sessionvars%urlparameter}">tags</a></li>
- <li><a href="{url}branches{sessionvars%urlparameter}">branches</a></li>
- <li><a href="{url}file/{node|short}{path|urlescape}{sessionvars%urlparameter}">files</a></li>
- </ul>
- </div>
-
- <ul class="submenu">
- <li><a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">file</a></li>
- <li><a href="{url}log/{node|short}/{file|urlescape}{sessionvars%urlparameter}">revisions</a></li>
- <li class="current">annotate</li>
- <li><a href="{url}diff/{node|short}/{file|urlescape}{sessionvars%urlparameter}">diff</a></li>
- <li><a href="{url}raw-annotate/{node|short}/{file|urlescape}">raw</a></li>
- </ul>
-
- <h2 class="no-link no-border">{file|escape}@{node|short} (annotated)</h2>
- <h3 class="changeset">{file|escape}</h3>
- <p class="changeset-age"><span>{date|age} ago</span></p>
-
- <dl class="overview">
- <dt>author</dt>
- <dd>{author|obfuscate}</dd>
- <dt>date</dt>
- <dd>{date|date}</dd>
- {branch%filerevbranch}
- <dt>changeset {rev}</dt>
- <dd><a href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a></dd>
- {parent%fileannotateparent}
- {child%fileannotatechild}
- <dt>permissions</dt>
- <dd>{permissions|permissions}</dd>
- </dl>
-
- <p class="description">{desc|strip|escape|addbreaks|nonempty}</p>
-
- <table class="annotated">
- {annotate%annotateline}
- </table>
-
-{footer}
diff --git a/sys/lib/python/mercurial/templates/monoblue/filediff.tmpl b/sys/lib/python/mercurial/templates/monoblue/filediff.tmpl
deleted file mode 100644
index ccf2e8530..000000000
--- a/sys/lib/python/mercurial/templates/monoblue/filediff.tmpl
+++ /dev/null
@@ -1,54 +0,0 @@
-{header}
-<title>{repo|escape}: diff {file|escape}</title>
- <link rel="alternate" type="application/atom+xml" href="{url}atom-log" title="Atom feed for {repo|escape}"/>
- <link rel="alternate" type="application/rss+xml" href="{url}rss-log" title="RSS feed for {repo|escape}"/>
-</head>
-
-<body>
-<div id="container">
- <div class="page-header">
- <h1><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / file diff</h1>
-
- <form action="{url}log">
- {sessionvars%hiddenformentry}
- <dl class="search">
- <dt><label>Search: </label></dt>
- <dd><input type="text" name="rev" /></dd>
- </dl>
- </form>
-
- <ul class="page-nav">
- <li><a href="{url}summary{sessionvars%urlparameter}">summary</a></li>
- <li><a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a></li>
- <li><a href="{url}log{sessionvars%urlparameter}">changelog</a></li>
- <li><a href="{url}graph/{node|short}{sessionvars%urlparameter}">graph</a></li>
- <li><a href="{url}tags{sessionvars%urlparameter}">tags</a></li>
- <li><a href="{url}branches{sessionvars%urlparameter}">branches</a></li>
- <li><a href="{url}file/{node|short}{path|urlescape}{sessionvars%urlparameter}">files</a></li>
- </ul>
- </div>
-
- <ul class="submenu">
- <li><a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">file</a></li>
- <li><a href="{url}log/{node|short}/{file|urlescape}{sessionvars%urlparameter}">revisions</a></li>
- <li><a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}">annotate</a></li>
- <li class="current">diff</li>
- <li><a href="{url}raw-diff/{node|short}/{file|urlescape}">raw</a></li>
- </ul>
-
- <h2 class="no-link no-border">diff: {file|escape}</h2>
- <h3 class="changeset">{file|escape}</h3>
-
- <dl class="overview">
- {branch%filerevbranch}
- <dt>changeset {rev}</dt>
- <dd><a href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a></dd>
- {parent%filediffparent}
- {child%filediffchild}
- </dl>
-
- <div class="diff">
- {diff}
- </div>
-
-{footer}
diff --git a/sys/lib/python/mercurial/templates/monoblue/filelog.tmpl b/sys/lib/python/mercurial/templates/monoblue/filelog.tmpl
deleted file mode 100644
index 4ebc5cce8..000000000
--- a/sys/lib/python/mercurial/templates/monoblue/filelog.tmpl
+++ /dev/null
@@ -1,49 +0,0 @@
-{header}
-<title>{repo|escape}: File revisions</title>
- <link rel="alternate" type="application/atom+xml" href="{url}atom-log" title="Atom feed for {repo|escape}"/>
- <link rel="alternate" type="application/rss+xml" href="{url}rss-log" title="RSS feed for {repo|escape}"/>
-</head>
-
-<body>
-<div id="container">
- <div class="page-header">
- <h1><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / file revisions</h1>
-
- <form action="{url}log">
- {sessionvars%hiddenformentry}
- <dl class="search">
- <dt><label>Search: </label></dt>
- <dd><input type="text" name="rev" /></dd>
- </dl>
- </form>
-
- <ul class="page-nav">
- <li><a href="{url}summary{sessionvars%urlparameter}">summary</a></li>
- <li><a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a></li>
- <li><a href="{url}log{sessionvars%urlparameter}">changelog</a></li>
- <li><a href="{url}graph/{node|short}{sessionvars%urlparameter}">graph</a></li>
- <li><a href="{url}tags{sessionvars%urlparameter}">tags</a></li>
- <li><a href="{url}branches{sessionvars%urlparameter}">branches</a></li>
- <li><a href="{url}file/{node|short}{path|urlescape}{sessionvars%urlparameter}">files</a></li>
- </ul>
- </div>
-
- <ul class="submenu">
- <li><a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">file</a></li>
- <li class="current">revisions</li>
- <li><a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}">annotate</a></li>
- <li><a href="{url}diff/{node|short}/{file|urlescape}{sessionvars%urlparameter}">diff</a></li>
- <li><a href="{url}rss-log/{node|short}/{file|urlescape}">rss</a></li>
- </ul>
-
- <h2 class="no-link no-border">{file|urlescape}</h2>
-
- <table>
- {entries%filelogentry}
- </table>
-
- <div class="page-path">
- {nav%filenaventry}
- </div>
-
-{footer}
diff --git a/sys/lib/python/mercurial/templates/monoblue/filerevision.tmpl b/sys/lib/python/mercurial/templates/monoblue/filerevision.tmpl
deleted file mode 100644
index f58c63324..000000000
--- a/sys/lib/python/mercurial/templates/monoblue/filerevision.tmpl
+++ /dev/null
@@ -1,63 +0,0 @@
-{header}
-<title>{repo|escape}: {file|escape}@{node|short}</title>
- <link rel="alternate" type="application/atom+xml" href="{url}atom-log" title="Atom feed for {repo|escape}"/>
- <link rel="alternate" type="application/rss+xml" href="{url}rss-log" title="RSS feed for {repo|escape}"/>
-</head>
-
-<body>
-<div id="container">
- <div class="page-header">
- <h1><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / file revision</h1>
-
- <form action="{url}log">
- {sessionvars%hiddenformentry}
- <dl class="search">
- <dt><label>Search: </label></dt>
- <dd><input type="text" name="rev" /></dd>
- </dl>
- </form>
-
- <ul class="page-nav">
- <li><a href="{url}summary{sessionvars%urlparameter}">summary</a></li>
- <li><a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a></li>
- <li><a href="{url}changelog{sessionvars%urlparameter}">changelog</a></li>
- <li><a href="{url}graph/{node|short}{sessionvars%urlparameter}">graph</a></li>
- <li><a href="{url}tags{sessionvars%urlparameter}">tags</a></li>
- <li><a href="{url}branches{sessionvars%urlparameter}">branches</a></li>
- <li><a href="{url}file/{node|short}{path|urlescape}{sessionvars%urlparameter}">files</a></li>
- </ul>
- </div>
-
- <ul class="submenu">
- <li class="current">file</li>
- <li><a href="{url}log/{node|short}/{file|urlescape}{sessionvars%urlparameter}">revisions</a></li>
- <li><a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}">annotate</a></li>
- <li><a href="{url}diff/{node|short}/{file|urlescape}{sessionvars%urlparameter}">diff</a></li>
- <li><a href="{url}raw-file/{node|short}/{file|urlescape}">raw</a></li>
- </ul>
-
- <h2 class="no-link no-border">{file|escape}@{node|short}</h2>
- <h3 class="changeset">{file|escape}</h3>
- <p class="changeset-age"><span>{date|age} ago</span></p>
-
- <dl class="overview">
- <dt>author</dt>
- <dd>{author|obfuscate}</dd>
- <dt>date</dt>
- <dd>{date|date}</dd>
- {branch%filerevbranch}
- <dt>changeset {rev}</dt>
- <dd><a class="list" href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a></dd>
- {parent%filerevparent}
- {child%filerevchild}
- <dt>permissions</dt>
- <dd>{permissions|permissions}</dd>
- </dl>
-
- <p class="description">{desc|strip|escape|addbreaks|nonempty}</p>
-
- <div class="source">
- {text%fileline}
- </div>
-
-{footer}
diff --git a/sys/lib/python/mercurial/templates/monoblue/footer.tmpl b/sys/lib/python/mercurial/templates/monoblue/footer.tmpl
deleted file mode 100644
index cddaa9267..000000000
--- a/sys/lib/python/mercurial/templates/monoblue/footer.tmpl
+++ /dev/null
@@ -1,22 +0,0 @@
- <div class="page-footer">
- <p>Mercurial Repository: {repo|escape}</p>
- <ul class="rss-logo">
- <li><a href="{url}rss-log">RSS</a></li>
- <li><a href="{url}atom-log">Atom</a></li>
- </ul>
- {motd}
- </div>
-
- <div id="powered-by">
- <p><a href="http://mercurial.selenic.com/" title="Mercurial"><img src="{staticurl}hglogo.png" width=75 height=90 border=0 alt="mercurial"></a></p>
- </div>
-
- <div id="corner-top-left"></div>
- <div id="corner-top-right"></div>
- <div id="corner-bottom-left"></div>
- <div id="corner-bottom-right"></div>
-
-</div>
-
-</body>
-</html>
diff --git a/sys/lib/python/mercurial/templates/monoblue/graph.tmpl b/sys/lib/python/mercurial/templates/monoblue/graph.tmpl
deleted file mode 100644
index ffd6b4771..000000000
--- a/sys/lib/python/mercurial/templates/monoblue/graph.tmpl
+++ /dev/null
@@ -1,118 +0,0 @@
-{header}
- <title>{repo|escape}: graph</title>
- <link rel="alternate" type="application/atom+xml" href="{url}atom-log" title="Atom feed for {repo|escape}"/>
- <link rel="alternate" type="application/rss+xml" href="{url}rss-log" title="RSS feed for {repo|escape}"/>
- <!--[if IE]><script type="text/javascript" src="{staticurl}excanvas.js"></script><![endif]-->
-</head>
-
-<body>
-<div id="container">
- <div class="page-header">
- <h1><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / graph</h1>
-
- <form action="{url}log">
- {sessionvars%hiddenformentry}
- <dl class="search">
- <dt><label>Search: </label></dt>
- <dd><input type="text" name="rev" /></dd>
- </dl>
- </form>
-
- <ul class="page-nav">
- <li><a href="{url}summary{sessionvars%urlparameter}">summary</a></li>
- <li><a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a></li>
- <li><a href="{url}changelog{sessionvars%urlparameter}">changelog</a></li>
- <li class="current">graph</li>
- <li><a href="{url}tags{sessionvars%urlparameter}">tags</a></li>
- <li><a href="{url}branches{sessionvars%urlparameter}">branches</a></li>
- <li><a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a></li>
- </ul>
- </div>
-
- <h2 class="no-link no-border">graph</h2>
-
- <div id="noscript">The revision graph only works with JavaScript-enabled browsers.</div>
- <div id="wrapper">
- <ul id="nodebgs"></ul>
- <canvas id="graph" width="224" height="{canvasheight}"></canvas>
- <ul id="graphnodes"></ul>
- </div>
-
- <script type="text/javascript" src="{staticurl}graph.js"></script>
- <script>
- <!-- hide script content
-
- document.getElementById('noscript').style.display = 'none';
-
- var data = {jsdata|json};
- var graph = new Graph();
- graph.scale({bg_height});
-
- graph.edge = function(x0, y0, x1, y1, color) {
-
- this.setColor(color, 0.0, 0.65);
- this.ctx.beginPath();
- this.ctx.moveTo(x0, y0);
- this.ctx.lineTo(x1, y1);
- this.ctx.stroke();
-
- }
-
- var revlink = '<li style="_STYLE"><span class="desc">';
- revlink += '<a href="{url}rev/_NODEID{sessionvars%urlparameter}" title="_NODEID">_DESC</a>';
- revlink += '</span>_TAGS<span class="info">_DATE ago, by _USER</span></li>';
-
- graph.vertex = function(x, y, color, parity, cur) {
-
- this.ctx.beginPath();
- color = this.setColor(color, 0.25, 0.75);
- this.ctx.arc(x, y, radius, 0, Math.PI * 2, true);
- this.ctx.fill();
-
- var bg = '<li class="bg parity' + parity + '"></li>';
- var left = (this.columns + 1) * this.bg_height;
- var nstyle = 'padding-left: ' + left + 'px;';
- var item = revlink.replace(/_STYLE/, nstyle);
- item = item.replace(/_PARITY/, 'parity' + parity);
- item = item.replace(/_NODEID/, cur[0]);
- item = item.replace(/_NODEID/, cur[0]);
- item = item.replace(/_DESC/, cur[3]);
- item = item.replace(/_USER/, cur[4]);
- item = item.replace(/_DATE/, cur[5]);
-
- var tagspan = '';
- if (cur[7].length || (cur[6][0] != 'default' || cur[6][1])) {
- tagspan = '<span class="logtags">';
- if (cur[6][1]) {
- tagspan += '<span class="branchtag" title="' + cur[6][0] + '">';
- tagspan += cur[6][0] + '</span> ';
- } else if (!cur[6][1] && cur[6][0] != 'default') {
- tagspan += '<span class="inbranchtag" title="' + cur[6][0] + '">';
- tagspan += cur[6][0] + '</span> ';
- }
- if (cur[7].length) {
- for (var t in cur[7]) {
- var tag = cur[7][t];
- tagspan += '<span class="tagtag">' + tag + '</span> ';
- }
- }
- tagspan += '</span>';
- }
-
- item = item.replace(/_TAGS/, tagspan);
- return [bg, item];
-
- }
-
- graph.render(data);
-
- // stop hiding script -->
- </script>
-
- <div class="page-path">
- <a href="{url}graph/{rev}{lessvars%urlparameter}">less</a>
- <a href="{url}graph/{rev}{morevars%urlparameter}">more</a>
- | {changenav%navgraphentry}
- </div>
-
-{footer}
diff --git a/sys/lib/python/mercurial/templates/monoblue/header.tmpl b/sys/lib/python/mercurial/templates/monoblue/header.tmpl
deleted file mode 100644
index dd038847a..000000000
--- a/sys/lib/python/mercurial/templates/monoblue/header.tmpl
+++ /dev/null
@@ -1,6 +0,0 @@
-<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
-<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
-<head>
- <link rel="icon" href="{staticurl}hgicon.png" type="image/png" />
- <meta name="robots" content="index, nofollow"/>
- <link rel="stylesheet" href="{staticurl}style-monoblue.css" type="text/css" />
diff --git a/sys/lib/python/mercurial/templates/monoblue/index.tmpl b/sys/lib/python/mercurial/templates/monoblue/index.tmpl
deleted file mode 100644
index e04f5c27e..000000000
--- a/sys/lib/python/mercurial/templates/monoblue/index.tmpl
+++ /dev/null
@@ -1,39 +0,0 @@
-{header}
- <title>{repo|escape}: Mercurial repositories index</title>
-</head>
-
-<body>
-<div id="container">
- <div class="page-header">
- <h1>Mercurial Repositories</h1>
- <ul class="page-nav">
- </ul>
- </div>
-
- <table cellspacing="0">
- <tr>
- <td><a href="?sort={sort_name}">Name</a></td>
- <td><a href="?sort={sort_description}">Description</a></td>
- <td><a href="?sort={sort_contact}">Contact</a></td>
- <td><a href="?sort={sort_lastchange}">Last change</a></td>
- <td>&nbsp;</td>
- <td>&nbsp;</td>
- </tr>
- {entries%indexentry}
- </table>
- <div class="page-footer">
- {motd}
- </div>
-
- <div id="powered-by">
- <p><a href="http://mercurial.selenic.com/" title="Mercurial"><img src="{staticurl}hglogo.png" width=75 height=90 border=0 alt="mercurial"></a></p>
- </div>
-
- <div id="corner-top-left"></div>
- <div id="corner-top-right"></div>
- <div id="corner-bottom-left"></div>
- <div id="corner-bottom-right"></div>
-
-</div>
-</body>
-</html>
diff --git a/sys/lib/python/mercurial/templates/monoblue/manifest.tmpl b/sys/lib/python/mercurial/templates/monoblue/manifest.tmpl
deleted file mode 100644
index f9da08199..000000000
--- a/sys/lib/python/mercurial/templates/monoblue/manifest.tmpl
+++ /dev/null
@@ -1,51 +0,0 @@
-{header}
-<title>{repo|escape}: files</title>
- <link rel="alternate" type="application/atom+xml" href="{url}atom-log" title="Atom feed for {repo|escape}"/>
- <link rel="alternate" type="application/rss+xml" href="{url}rss-log" title="RSS feed for {repo|escape}"/>
-</head>
-
-<body>
-<div id="container">
- <div class="page-header">
- <h1><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / files</h1>
-
- <form action="{url}log">
- {sessionvars%hiddenformentry}
- <dl class="search">
- <dt><label>Search: </label></dt>
- <dd><input type="text" name="rev" /></dd>
- </dl>
- </form>
-
- <ul class="page-nav">
- <li><a href="{url}summary{sessionvars%urlparameter}">summary</a></li>
- <li><a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a></li>
- <li><a href="{url}changelog{sessionvars%urlparameter}">changelog</a></li>
- <li><a href="{url}graph/{node|short}{sessionvars%urlparameter}">graph</a></li>
- <li><a href="{url}tags{sessionvars%urlparameter}">tags</a></li>
- <li><a href="{url}branches{sessionvars%urlparameter}">branches</a></li>
- <li class="current">files</li>
- </ul>
- </div>
-
- <ul class="submenu">
- <li><a href="{url}rev/{node|short}{sessionvars%urlparameter}">changeset</a> {archives%archiveentry}</li>
- {archives%archiveentry}
- </ul>
-
- <h2 class="no-link no-border">files</h2>
- <p class="files">{path|escape} <span class="logtags">{inbranch%inbranchtag}{branches%branchtag}{tags%tagtag}</span></p>
-
- <table>
- <tr class="parity{upparity}">
- <td>drwxr-xr-x</td>
- <td></td>
- <td></td>
- <td><a href="{url}file/{node|short}{up|urlescape}{sessionvars%urlparameter}">[up]</a></td>
- <td class="link">&nbsp;</td>
- </tr>
- {dentries%direntry}
- {fentries%fileentry}
- </table>
-
-{footer}
diff --git a/sys/lib/python/mercurial/templates/monoblue/map b/sys/lib/python/mercurial/templates/monoblue/map
deleted file mode 100644
index 146e3f62f..000000000
--- a/sys/lib/python/mercurial/templates/monoblue/map
+++ /dev/null
@@ -1,214 +0,0 @@
-default = 'summary'
-mimetype = 'text/html; charset={encoding}'
-header = header.tmpl
-footer = footer.tmpl
-search = search.tmpl
-changelog = changelog.tmpl
-summary = summary.tmpl
-error = error.tmpl
-notfound = notfound.tmpl
-naventry = '<a href="{url}log/{node|short}{sessionvars%urlparameter}">{label|escape}</a> '
-navshortentry = '<a href="{url}shortlog/{node|short}{sessionvars%urlparameter}">{label|escape}</a> '
-navgraphentry = '<a href="{url}graph/{node|short}{sessionvars%urlparameter}">{label|escape}</a> '
-filenaventry = '<a href="{url}log/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{label|escape}</a>'
-filedifflink = '<a href="{url}diff/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{file|escape}</a> '
-filenodelink = '
- <tr class="parity{parity}">
- <td><a href="{url}diff/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{file|escape}</a></td>
- <td></td>
- <td>
- <a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">file</a> |
- <a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}">annotate</a> |
- <a href="{url}diff/{node|short}/{file|urlescape}{sessionvars%urlparameter}">diff</a> |
- <a href="{url}log/{node|short}/{file|urlescape}{sessionvars%urlparameter}">revisions</a>
- </td>
- </tr>'
-filenolink = '
- <tr class="parity{parity}">
- <td>
- <a href="{url}diff/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{file|escape}</a></td><td></td><td>file |
- annotate |
- <a href="{url}diff/{node|short}/{file|urlescape}{sessionvars%urlparameter}">diff</a> |
- <a href="{url}log/{node|short}/{file|urlescape}{sessionvars%urlparameter}">revisions</a>
- </td>
- </tr>'
-fileellipses = '...'
-changelogentry = changelogentry.tmpl
-searchentry = changelogentry.tmpl
-changeset = changeset.tmpl
-manifest = manifest.tmpl
-direntry = '
- <tr class="parity{parity}">
- <td>drwxr-xr-x</td>
- <td></td>
- <td></td>
- <td><a href="{url}file/{node|short}{path|urlescape}{sessionvars%urlparameter}">{basename|escape}</a></td>
- <td><a href="{url}file/{node|short}{path|urlescape}{sessionvars%urlparameter}">files</a></td>
- </tr>'
-fileentry = '
- <tr class="parity{parity}">
- <td>{permissions|permissions}</td>
- <td>{date|isodate}</td>
- <td>{size}</td>
- <td><a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{basename|escape}</a></td>
- <td>
- <a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">file</a> |
- <a href="{url}log/{node|short}/{file|urlescape}{sessionvars%urlparameter}">revisions</a> |
- <a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}">annotate</a>
- </td>
- </tr>'
-filerevision = filerevision.tmpl
-fileannotate = fileannotate.tmpl
-filediff = filediff.tmpl
-filelog = filelog.tmpl
-fileline = '
- <div style="font-family:monospace" class="parity{parity}">
- <pre><a class="linenr" href="#{lineid}" id="{lineid}">{linenumber}</a> {line|escape}</pre>
- </div>'
-annotateline = '
- <tr class="parity{parity}">
- <td class="linenr">
- <a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}#{targetline}"
- title="{node|short}: {desc|escape|firstline}">{author|user}@{rev}</a>
- </td>
- <td class="lineno">
- <a href="#{lineid}" id="{lineid}">{linenumber}</a>
- </td>
- <td class="source">{line|escape}</td>
- </tr>'
-difflineplus = '<span style="color:#008800;"><a class="linenr" href="#{lineid}" id="{lineid}">{linenumber}</a> {line|escape}</span>'
-difflineminus = '<span style="color:#cc0000;"><a class="linenr" href="#{lineid}" id="{lineid}">{linenumber}</a> {line|escape}</span>'
-difflineat = '<span style="color:#990099;"><a class="linenr" href="#{lineid}" id="{lineid}">{linenumber}</a> {line|escape}</span>'
-diffline = '<span><a class="linenr" href="#{lineid}" id="{lineid}">{linenumber}</a> {line|escape}</span>'
-changelogparent = '
- <tr>
- <th class="parent">parent {rev}:</th>
- <td class="parent">
- <a href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a>
- </td>
- </tr>'
-changesetbranch = '<dt>branch</dt><dd>{name}</dd>'
-changesetparent = '
- <dt>parent {rev}</dt>
- <dd><a href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a></dd>'
-filerevbranch = '<dt>branch</dt><dd>{name}</dd>'
-filerevparent = '
- <dt>parent {rev}</dt>
- <dd>
- <a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">
- {rename%filerename}{node|short}
- </a>
- </dd>'
-filerename = '{file|escape}@'
-filelogrename = '| <a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">base</a>'
-fileannotateparent = '
- <dt>parent {rev}</dt>
- <dd>
- <a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}">
- {rename%filerename}{node|short}
- </a>
- </dd>'
-changelogchild = '
- <dt>child {rev}:</dt>
- <dd><a href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a></dd>'
-changesetchild = '
- <dt>child {rev}</dt>
- <dd><a href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a></dd>'
-filerevchild = '
- <dt>child {rev}</dt>
- <dd>
- <a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{node|short}</a>
- </dd>'
-fileannotatechild = '
- <dt>child {rev}</dt>
- <dd>
- <a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{node|short}</a>
- </dd>'
-tags = tags.tmpl
-tagentry = '
- <tr class="parity{parity}">
- <td class="nowrap">{date|age} ago</td>
- <td><a href="{url}rev/{node|short}{sessionvars%urlparameter}">{tag|escape}</a></td>
- <td class="nowrap">
- <a href="{url}rev/{node|short}{sessionvars%urlparameter}">changeset</a> |
- <a href="{url}log/{node|short}{sessionvars%urlparameter}">changelog</a> |
- <a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a>
- </td>
- </tr>'
-branches = branches.tmpl
-branchentry = '
- <tr class="parity{parity}">
- <td class="nowrap">{date|age} ago</td>
- <td><a href="{url}shortlog/{node|short}{sessionvars%urlparameter}">{node|short}</a></td>
- <td class="{status}">{branch|escape}</td>
- <td class="nowrap">
- <a href="{url}rev/{node|short}{sessionvars%urlparameter}">changeset</a> |
- <a href="{url}log/{node|short}{sessionvars%urlparameter}">changelog</a> |
- <a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a>
- </td>
- </tr>'
-diffblock = '<pre>{lines}</pre>'
-filediffparent = '
- <dt>parent {rev}</dt>
- <dd><a href="{url}diff/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{node|short}</a></dd>'
-filelogparent = '
- <tr>
- <td align="right">parent {rev}:&nbsp;</td>
- <td><a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{node|short}</a></td>
- </tr>'
-filediffchild = '
- <dt>child {rev}</dt>
- <dd><a href="{url}diff/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{node|short}</a></dd>'
-filelogchild = '
- <tr>
- <td align="right">child {rev}:&nbsp;</td>
- <td><a href="{url}file{node|short}/{file|urlescape}{sessionvars%urlparameter}">{node|short}</a></td>
- </tr>'
-shortlog = shortlog.tmpl
-tagtag = '<span class="tagtag" title="{name}">{name}</span> '
-branchtag = '<span class="branchtag" title="{name}">{name}</span> '
-inbranchtag = '<span class="inbranchtag" title="{name}">{name}</span> '
-shortlogentry = '
- <tr class="parity{parity}">
- <td class="nowrap">{date|age} ago</td>
- <td>{author|person}</td>
- <td>
- <a href="{url}rev/{node|short}{sessionvars%urlparameter}">
- {desc|strip|firstline|escape|nonempty}
- <span class="logtags">{inbranch%inbranchtag}{branches%branchtag}{tags%tagtag}</span>
- </a>
- </td>
- <td class="nowrap">
- <a href="{url}rev/{node|short}{sessionvars%urlparameter}">changeset</a> |
- <a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a>
- </td>
- </tr>'
-filelogentry = '
- <tr class="parity{parity}">
- <td class="nowrap">{date|age} ago</td>
- <td><a href="{url}rev/{node|short}{sessionvars%urlparameter}">{desc|strip|firstline|escape|nonempty}</a></td>
- <td class="nowrap">
- <a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">file</a>&nbsp;|&nbsp;<a href="{url}diff/{node|short}/{file|urlescape}{sessionvars%urlparameter}">diff</a>&nbsp;|&nbsp;<a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}">annotate</a>
- {rename%filelogrename}
- </td>
- </tr>'
-archiveentry = '<li><a href="{url}archive/{node|short}{extension}">{type|escape}</a></li>'
-indexentry = '
- <tr class="parity{parity}">
- <td><a href="{url}{sessionvars%urlparameter}">{name|escape}</a></td>
- <td>{description}</td>
- <td>{contact|obfuscate}</td>
- <td>{lastchange|age} ago</td>
- <td class="indexlinks">{archives%indexarchiveentry}</td>
- <td>
- <div class="rss_logo">
- <a href="{url}rss-log">RSS</a>
- <a href="{url}atom-log">Atom</a>
- </div>
- </td>
- </tr>\n'
-indexarchiveentry = '<a href="{url}archive/{node|short}{extension}">{type|escape}</a> '
-index = index.tmpl
-urlparameter = '{separator}{name}={value|urlescape}'
-hiddenformentry = '<input type="hidden" name="{name}" value="{value|escape}" />'
-graph = graph.tmpl
diff --git a/sys/lib/python/mercurial/templates/monoblue/notfound.tmpl b/sys/lib/python/mercurial/templates/monoblue/notfound.tmpl
deleted file mode 100644
index 230bc04dc..000000000
--- a/sys/lib/python/mercurial/templates/monoblue/notfound.tmpl
+++ /dev/null
@@ -1,35 +0,0 @@
-{header}
- <title>{repo|escape}: Mercurial repository not found</title>
- <link rel="alternate" type="application/atom+xml" href="{url}atom-log" title="Atom feed for {repo|escape}"/>
- <link rel="alternate" type="application/rss+xml" href="{url}rss-log" title="RSS feed for {repo|escape}"/>
-</head>
-
-<body>
-<div id="container">
- <div class="page-header">
- <h1><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / Not found: {repo|escape}</h1>
-
- <form action="{url}log">
- {sessionvars%hiddenformentry}
- <dl class="search">
- <dt><label>Search: </label></dt>
- <dd><input type="text" name="rev" /></dd>
- </dl>
- </form>
-
- <ul class="page-nav">
- <li class="current">summary</li>
- <li><a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a></li>
- <li><a href="{url}log{sessionvars%urlparameter}">changelog</a></li>
- <li><a href="{url}graph/{node|short}{sessionvars%urlparameter}">graph</a></li>
- <li><a href="{url}tags{sessionvars%urlparameter}">tags</a></li>
- <li><a href="{url}branches{sessionvars%urlparameter}">branches</a></li>
- <li><a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a>{archives%archiveentry}</li>
- </ul>
- </div>
-
- <h2 class="no-link no-border">Not Found</h2>
- <p class="normal">The specified repository "{repo|escape}" is unknown, sorry.</p>
- <p class="normal">Please go back to the <a href="{url}">main repository list page</a>.</p>
-
-{footer}
diff --git a/sys/lib/python/mercurial/templates/monoblue/search.tmpl b/sys/lib/python/mercurial/templates/monoblue/search.tmpl
deleted file mode 100644
index 7b37c52a9..000000000
--- a/sys/lib/python/mercurial/templates/monoblue/search.tmpl
+++ /dev/null
@@ -1,34 +0,0 @@
-{header}
- <title>{repo|escape}: Search</title>
- <link rel="alternate" type="application/atom+xml" href="{url}atom-log" title="Atom feed for {repo|escape}"/>
- <link rel="alternate" type="application/rss+xml" href="{url}rss-log" title="RSS feed for {repo|escape}"/>
-</head>
-
-<body>
-<div id="container">
- <div class="page-header">
- <h1><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / search</h1>
-
- <form action="{url}log">
- {sessionvars%hiddenformentry}
- <dl class="search">
- <dt><label>Search: </label></dt>
- <dd><input type="text" name="rev" value="{query|escape}" /></dd>
- </dl>
- </form>
-
- <ul class="page-nav">
- <li><a href="{url}summary{sessionvars%urlparameter}">summary</a></li>
- <li><a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a></li>
- <li><a href="{url}log{sessionvars%urlparameter}">changelog</a></li>
- <li><a href="{url}graph/{node|short}{sessionvars%urlparameter}">graph</a></li>
- <li><a href="{url}tags{sessionvars%urlparameter}">tags</a></li>
- <li><a href="{url}branches{sessionvars%urlparameter}">branches</a></li>
- <li><a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a>{archives%archiveentry}
- </ul>
- </div>
-
- <h2 class="no-link no-border">searching for {query|escape}</h2>
- {entries}
-
-{footer}
diff --git a/sys/lib/python/mercurial/templates/monoblue/shortlog.tmpl b/sys/lib/python/mercurial/templates/monoblue/shortlog.tmpl
deleted file mode 100644
index 5de092783..000000000
--- a/sys/lib/python/mercurial/templates/monoblue/shortlog.tmpl
+++ /dev/null
@@ -1,41 +0,0 @@
-{header}
- <title>{repo|escape}: shortlog</title>
- <link rel="alternate" type="application/atom+xml" href="{url}atom-log" title="Atom feed for {repo|escape}"/>
- <link rel="alternate" type="application/rss+xml" href="{url}rss-log" title="RSS feed for {repo|escape}"/>
-</head>
-
-<body>
-<div id="container">
- <div class="page-header">
- <h1><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / shortlog</h1>
-
- <form action="{url}log">
- {sessionvars%hiddenformentry}
- <dl class="search">
- <dt><label>Search: </label></dt>
- <dd><input type="text" name="rev" /></dd>
- </dl>
- </form>
-
- <ul class="page-nav">
- <li><a href="{url}summary{sessionvars%urlparameter}">summary</a></li>
- <li class="current">shortlog</li>
- <li><a href="{url}log{sessionvars%urlparameter}">changelog</a></li>
- <li><a href="{url}graph/{node|short}{sessionvars%urlparameter}">graph</a></li>
- <li><a href="{url}tags{sessionvars%urlparameter}">tags</a></li>
- <li><a href="{url}branches{sessionvars%urlparameter}">branches</a></li>
- <li><a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a>{archives%archiveentry}</li>
- </ul>
- </div>
-
- <h2 class="no-link no-border">shortlog</h2>
-
- <table>
-{entries%shortlogentry}
- </table>
-
- <div class="page-path">
-{changenav%navshortentry}
- </div>
-
-{footer}
diff --git a/sys/lib/python/mercurial/templates/monoblue/summary.tmpl b/sys/lib/python/mercurial/templates/monoblue/summary.tmpl
deleted file mode 100644
index b1679655f..000000000
--- a/sys/lib/python/mercurial/templates/monoblue/summary.tmpl
+++ /dev/null
@@ -1,66 +0,0 @@
-{header}
- <title>{repo|escape}: Summary</title>
- <link rel="alternate" type="application/atom+xml" href="{url}atom-log" title="Atom feed for {repo|escape}"/>
- <link rel="alternate" type="application/rss+xml" href="{url}rss-log" title="RSS feed for {repo|escape}"/>
-</head>
-
-<body>
-<div id="container">
- <div class="page-header">
- <h1><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / summary</h1>
-
- <form action="{url}log">
- {sessionvars%hiddenformentry}
- <dl class="search">
- <dt><label>Search: </label></dt>
- <dd><input type="text" name="rev" /></dd>
- </dl>
- </form>
-
- <ul class="page-nav">
- <li class="current">summary</li>
- <li><a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a></li>
- <li><a href="{url}log{sessionvars%urlparameter}">changelog</a></li>
- <li><a href="{url}graph/{node|short}{sessionvars%urlparameter}">graph</a></li>
- <li><a href="{url}tags{sessionvars%urlparameter}">tags</a></li>
- <li><a href="{url}branches{sessionvars%urlparameter}">branches</a></li>
- <li><a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a></li>
- </ul>
- </div>
-
- <h2 class="no-link no-border">Mercurial Repository Overview</h2>
- <dl class="overview">
- <dt>name</dt>
- <dd>{repo|escape}</dd>
- <dt>description</dt>
- <dd>{desc}</dd>
- <dt>owner</dt>
- <dd>{owner|obfuscate}</dd>
- <dt>last change</dt>
- <dd>{lastchange|rfc822date}</dd>
- </dl>
-
- <h2><a href="{url}shortlog{sessionvars%urlparameter}">Changes</a></h2>
- <table>
-{shortlog}
- <tr class="light">
- <td colspan="4"><a class="list" href="{url}shortlog{sessionvars%urlparameter}">...</a></td>
- </tr>
- </table>
-
- <h2><a href="{url}tags{sessionvars%urlparameter}">Tags</a></h2>
- <table>
-{tags}
- <tr class="light">
- <td colspan="3"><a class="list" href="{url}tags{sessionvars%urlparameter}">...</a></td>
- </tr>
- </table>
-
- <h2 class="no-link">Branches</h2>
- <table>
- {branches%branchentry}
- <tr class="light">
- <td colspan="4"><a class="list" href="#">...</a></td>
- </tr>
- </table>
-{footer}
diff --git a/sys/lib/python/mercurial/templates/monoblue/tags.tmpl b/sys/lib/python/mercurial/templates/monoblue/tags.tmpl
deleted file mode 100644
index 54a672107..000000000
--- a/sys/lib/python/mercurial/templates/monoblue/tags.tmpl
+++ /dev/null
@@ -1,36 +0,0 @@
-{header}
- <title>{repo|escape}: Tags</title>
- <link rel="alternate" type="application/atom+xml" href="{url}atom-log" title="Atom feed for {repo|escape}"/>
- <link rel="alternate" type="application/rss+xml" href="{url}rss-log" title="RSS feed for {repo|escape}"/>
-</head>
-
-<body>
-<div id="container">
- <div class="page-header">
- <h1><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / Tags</h1>
-
- <form action="{url}log">
- {sessionvars%hiddenformentry}
- <dl class="search">
- <dt><label>Search: </label></dt>
- <dd><input type="text" name="rev" /></dd>
- </dl>
- </form>
-
- <ul class="page-nav">
- <li><a href="{url}summary{sessionvars%urlparameter}">summary</a></li>
- <li><a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a></li>
- <li><a href="{url}changelog{sessionvars%urlparameter}">changelog</a></li>
- <li><a href="{url}graph/{node|short}{sessionvars%urlparameter}">graph</a></li>
- <li class="current">tags</li>
- <li><a href="{url}branches{sessionvars%urlparameter}">branches</a></li>
- <li><a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a></li>
- </ul>
- </div>
-
- <h2 class="no-link no-border">tags</h2>
- <table cellspacing="0">
-{entries%tagentry}
- </table>
-
-{footer}
diff --git a/sys/lib/python/mercurial/templates/paper/branches.tmpl b/sys/lib/python/mercurial/templates/paper/branches.tmpl
deleted file mode 100644
index e3648b867..000000000
--- a/sys/lib/python/mercurial/templates/paper/branches.tmpl
+++ /dev/null
@@ -1,45 +0,0 @@
-{header}
-<title>{repo|escape}: branches</title>
-<link rel="alternate" type="application/atom+xml"
- href="{url}atom-tags" title="Atom feed for {repo|escape}: branches" />
-<link rel="alternate" type="application/rss+xml"
- href="{url}rss-tags" title="RSS feed for {repo|escape}: branches" />
-</head>
-<body>
-
-<div class="container">
-<div class="menu">
-<div class="logo">
-<a href="http://mercurial.selenic.com/">
-<img src="{staticurl}hglogo.png" alt="mercurial" /></a>
-</div>
-<ul>
-<li><a href="{url}shortlog{sessionvars%urlparameter}">log</a></li>
-<li><a href="{url}graph{sessionvars%urlparameter}">graph</a></li>
-<li><a href="{url}tags{sessionvars%urlparameter}">tags</a></li>
-<li class="active">branches</li>
-</ul>
-</div>
-
-<div class="main">
-<h2><a href="{url}{sessionvars%urlparameter}">{repo|escape}</a></h2>
-<h3>branches</h3>
-
-<form class="search" action="{url}log">
-{sessionvars%hiddenformentry}
-<p><input name="rev" id="search1" type="text" size="30" /></p>
-<div id="hint">find changesets by author, revision,
-files, or words in the commit message</div>
-</form>
-
-<table class="bigtable">
-<tr>
- <th>branch</th>
- <th>node</th>
-</tr>
-{entries%branchentry}
-</table>
-</div>
-</div>
-
-{footer}
diff --git a/sys/lib/python/mercurial/templates/paper/changeset.tmpl b/sys/lib/python/mercurial/templates/paper/changeset.tmpl
deleted file mode 100644
index cce7aaee7..000000000
--- a/sys/lib/python/mercurial/templates/paper/changeset.tmpl
+++ /dev/null
@@ -1,71 +0,0 @@
-{header}
-<title>{repo|escape}: {node|short}</title>
-</head>
-<body>
-<div class="container">
-<div class="menu">
-<div class="logo">
-<a href="http://mercurial.selenic.com/">
-<img src="{staticurl}hglogo.png" alt="mercurial" /></a>
-</div>
-<ul>
- <li><a href="{url}shortlog/{node|short}{sessionvars%urlparameter}">log</a></li>
- <li><a href="{url}graph/{node|short}{sessionvars%urlparameter}">graph</a></li>
- <li><a href="{url}tags{sessionvars%urlparameter}">tags</a></li>
- <li><a href="{url}branches{sessionvars%urlparameter}">branches</a></li>
-</ul>
-<ul>
- <li class="active">changeset</li>
- <li><a href="{url}raw-rev/{node|short}{sessionvars%urlparameter}">raw</a></li>
- <li><a href="{url}file/{node|short}{sessionvars%urlparameter}">browse</a></li>
-</ul>
-<ul>
- {archives%archiveentry}
-</ul>
-</div>
-
-<div class="main">
-
-<h2><a href="{url}{sessionvars%urlparameter}">{repo|escape}</a></h2>
-<h3>changeset {rev}:{node|short} {changesetbranch%changelogbranchname} {changesettag}</h3>
-
-<form class="search" action="{url}log">
-{sessionvars%hiddenformentry}
-<p><input name="rev" id="search1" type="text" size="30" /></p>
-<div id="hint">find changesets by author, revision,
-files, or words in the commit message</div>
-</form>
-
-<div class="description">{desc|strip|escape|addbreaks|nonempty}</div>
-
-<table id="changesetEntry">
-<tr>
- <th class="author">author</th>
- <td class="author">{author|obfuscate}</td>
-</tr>
-<tr>
- <th class="date">date</th>
- <td class="date">{date|date} ({date|age} ago)</td></tr>
-<tr>
- <th class="author">parents</th>
- <td class="author">{parent%changesetparent}</td>
-</tr>
-<tr>
- <th class="author">children</th>
- <td class="author">{child%changesetchild}</td>
-</tr>
-<tr>
- <th class="files">files</th>
- <td class="files">{files}</td>
-</tr>
-</table>
-
-<div class="overflow">
-<div class="sourcefirst"> line diff</div>
-
-{diff}
-</div>
-
-</div>
-</div>
-{footer}
diff --git a/sys/lib/python/mercurial/templates/paper/error.tmpl b/sys/lib/python/mercurial/templates/paper/error.tmpl
deleted file mode 100644
index d43102563..000000000
--- a/sys/lib/python/mercurial/templates/paper/error.tmpl
+++ /dev/null
@@ -1,43 +0,0 @@
-{header}
-<title>{repo|escape}: error</title>
-</head>
-<body>
-
-<div class="container">
-<div class="menu">
-<div class="logo">
-<a href="http://mercurial.selenic.com/">
-<img src="{staticurl}hglogo.png" width=75 height=90 border=0 alt="mercurial" /></a>
-</div>
-<ul>
-<li><a href="{url}shortlog{sessionvars%urlparameter}">log</a></li>
-<li><a href="{url}graph{sessionvars%urlparameter}">graph</a></li>
-<li><a href="{url}tags{sessionvars%urlparameter}">tags</a></li>
-<li><a href="{url}branches{sessionvars%urlparameter}">branches</a></li>
-</ul>
-</div>
-
-<div class="main">
-
-<h2><a href="{url}{sessionvars%urlparameter}">{repo|escape}</a></h2>
-<h3>error</h3>
-
-<form class="search" action="{url}log">
-{sessionvars%hiddenformentry}
-<p><input name="rev" id="search1" type="text" size="30"></p>
-<div id="hint">find changesets by author, revision,
-files, or words in the commit message</div>
-</form>
-
-<div class="description">
-<p>
-An error occurred while processing your request:
-</p>
-<p>
-{error|escape}
-</p>
-</div>
-</div>
-</div>
-
-{footer}
diff --git a/sys/lib/python/mercurial/templates/paper/fileannotate.tmpl b/sys/lib/python/mercurial/templates/paper/fileannotate.tmpl
deleted file mode 100644
index 5465a48ca..000000000
--- a/sys/lib/python/mercurial/templates/paper/fileannotate.tmpl
+++ /dev/null
@@ -1,77 +0,0 @@
-{header}
-<title>{repo|escape}: {file|escape} annotate</title>
-</head>
-<body>
-
-<div class="container">
-<div class="menu">
-<div class="logo">
-<a href="http://mercurial.selenic.com/">
-<img src="{staticurl}hglogo.png" alt="mercurial" /></a>
-</div>
-<ul>
-<li><a href="{url}shortlog/{node|short}{sessionvars%urlparameter}">log</a></li>
-<li><a href="{url}graph/{node|short}{sessionvars%urlparameter}">graph</a></li>
-<li><a href="{url}tags{sessionvars%urlparameter}">tags</a></li>
-<li><a href="{url}branches{sessionvars%urlparameter}">branches</a></li>
-</ul>
-
-<ul>
-<li><a href="{url}rev/{node|short}{sessionvars%urlparameter}">changeset</a></li>
-<li><a href="{url}file/{node|short}{path|urlescape}{sessionvars%urlparameter}">browse</a></li>
-</ul>
-<ul>
-<li><a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">file</a></li>
-<li><a href="{url}diff/{node|short}/{file|urlescape}{sessionvars%urlparameter}">diff</a></li>
-<li class="active">annotate</li>
-<li><a href="{url}log/{node|short}/{file|urlescape}{sessionvars%urlparameter}">file log</a></li>
-<li><a href="{url}raw-annotate/{node|short}/{file|urlescape}">raw</a></li>
-</ul>
-</div>
-
-<div class="main">
-<h2><a href="{url}{sessionvars%urlparameter}">{repo|escape}</a></h2>
-<h3>annotate {file|escape} @ {rev}:{node|short}</h3>
-
-<form class="search" action="{url}log">
-{sessionvars%hiddenformentry}
-<p><input name="rev" id="search1" type="text" size="30" /></p>
-<div id="hint">find changesets by author, revision,
-files, or words in the commit message</div>
-</form>
-
-<div class="description">{desc|strip|escape|addbreaks|nonempty}</div>
-
-<table id="changesetEntry">
-<tr>
- <th class="author">author</th>
- <td class="author">{author|obfuscate}</td>
-</tr>
-<tr>
- <th class="date">date</th>
- <td class="date">{date|date} ({date|age} ago)</td>
-</tr>
-<tr>
- <th class="author">parents</th>
- <td class="author">{parent%filerevparent}</td>
-</tr>
-<tr>
- <th class="author">children</th>
- <td class="author">{child%filerevchild}</td>
-</tr>
-{changesettag}
-</table>
-
-<div class="overflow">
-<table class="bigtable">
-<tr>
- <th class="annotate">rev</th>
- <th class="line">&nbsp;&nbsp;line source</th>
-</tr>
-{annotate%annotateline}
-</table>
-</div>
-</div>
-</div>
-
-{footer}
diff --git a/sys/lib/python/mercurial/templates/paper/filediff.tmpl b/sys/lib/python/mercurial/templates/paper/filediff.tmpl
deleted file mode 100644
index d031b68c4..000000000
--- a/sys/lib/python/mercurial/templates/paper/filediff.tmpl
+++ /dev/null
@@ -1,72 +0,0 @@
-{header}
-<title>{repo|escape}: {file|escape} diff</title>
-</head>
-<body>
-
-<div class="container">
-<div class="menu">
-<div class="logo">
-<a href="http://mercurial.selenic.com/">
-<img src="{staticurl}hglogo.png" alt="mercurial" /></a>
-</div>
-<ul>
-<li><a href="{url}shortlog/{node|short}{sessionvars%urlparameter}">log</a></li>
-<li><a href="{url}graph/{node|short}{sessionvars%urlparameter}">graph</a></li>
-<li><a href="{url}tags{sessionvars%urlparameter}">tags</a></li>
-<li><a href="{url}branches{sessionvars%urlparameter}">branches</a></li>
-</ul>
-<ul>
-<li><a href="{url}rev/{node|short}{sessionvars%urlparameter}">changeset</a></li>
-<li><a href="{url}file/{node|short}{path|urlescape}{sessionvars%urlparameter}">browse</a></li>
-</ul>
-<ul>
-<li><a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">file</a></li>
-<li class="active">diff</li>
-<li><a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}">annotate</a></li>
-<li><a href="{url}log/{node|short}/{file|urlescape}{sessionvars%urlparameter}">file log</a></li>
-<li><a href="{url}raw-file/{node|short}/{file|urlescape}">raw</a></li>
-</ul>
-</div>
-
-<div class="main">
-<h2><a href="{url}{sessionvars%urlparameter}">{repo|escape}</a></h2>
-<h3>diff {file|escape} @ {rev}:{node|short}</h3>
-
-<form class="search" action="{url}log">
-<p>{sessionvars%hiddenformentry}</p>
-<p><input name="rev" id="search1" type="text" size="30" /></p>
-<div id="hint">find changesets by author, revision,
-files, or words in the commit message</div>
-</form>
-
-<div class="description">{desc|strip|escape|addbreaks|nonempty}</div>
-
-<table id="changesetEntry">
-<tr>
- <th>author</th>
- <td>{author|obfuscate}</td>
-</tr>
-<tr>
- <th>date</th>
- <td>{date|date} ({date|age} ago)</td>
-</tr>
-<tr>
- <th>parents</th>
- <td>{parent%filerevparent}</td>
-</tr>
-<tr>
- <th>children</th>
- <td>{child%filerevchild}</td>
-</tr>
-{changesettag}
-</table>
-
-<div class="overflow">
-<div class="sourcefirst"> line diff</div>
-
-{diff}
-</div>
-</div>
-</div>
-
-{footer}
diff --git a/sys/lib/python/mercurial/templates/paper/filelog.tmpl b/sys/lib/python/mercurial/templates/paper/filelog.tmpl
deleted file mode 100644
index 3a2d28523..000000000
--- a/sys/lib/python/mercurial/templates/paper/filelog.tmpl
+++ /dev/null
@@ -1,60 +0,0 @@
-{header}
-<title>{repo|escape}: {file|escape} history</title>
-<link rel="alternate" type="application/atom+xml"
- href="{url}atom-log/tip/{file|urlescape}" title="Atom feed for {repo|escape}:{file}" />
-<link rel="alternate" type="application/rss+xml"
- href="{url}rss-log/tip/{file|urlescape}" title="RSS feed for {repo|escape}:{file}" />
-</head>
-<body>
-
-<div class="container">
-<div class="menu">
-<div class="logo">
-<a href="http://mercurial.selenic.com/">
-<img src="{staticurl}hglogo.png" alt="mercurial" /></a>
-</div>
-<ul>
-<li><a href="{url}shortlog/{node|short}{sessionvars%urlparameter}">log</a></li>
-<li><a href="{url}graph/{node|short}{sessionvars%urlparameter}">graph</a></li>
-<li><a href="{url}tags{sessionvars%urlparameter}">tags</a></li>
-<li><a href="{url}branches{sessionvars%urlparameter}">branches</a></li>
-</ul>
-<ul>
-<li><a href="{url}rev/{node|short}{sessionvars%urlparameter}">changeset</a></li>
-<li><a href="{url}file/{node|short}{path|urlescape}{sessionvars%urlparameter}">browse</a></li>
-</ul>
-<ul>
-<li><a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">file</a></li>
-<li><a href="{url}diff/{node|short}/{file|urlescape}{sessionvars%urlparameter}">diff</a></li>
-<li><a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}">annotate</a></li>
-<li class="active">file log</li>
-<li><a href="{url}raw-file/{node|short}/{file|urlescape}">raw</a></li>
-</ul>
-</div>
-
-<div class="main">
-<h2><a href="{url}{sessionvars%urlparameter}">{repo|escape}</a></h2>
-<h3>log {file|escape}</h3>
-
-<form class="search" action="{url}log">
-{sessionvars%hiddenformentry}
-<p><input name="rev" id="search1" type="text" size="30" /></p>
-<div id="hint">find changesets by author, revision,
-files, or words in the commit message</div>
-</form>
-
-<div class="navigate">{nav%filenaventry}</div>
-
-<table class="bigtable">
- <tr>
- <th class="age">age</th>
- <th class="author">author</th>
- <th class="description">description</th>
- </tr>
-{entries%filelogentry}
-</table>
-
-</div>
-</div>
-
-{footer}
diff --git a/sys/lib/python/mercurial/templates/paper/filelogentry.tmpl b/sys/lib/python/mercurial/templates/paper/filelogentry.tmpl
deleted file mode 100644
index 43e068c84..000000000
--- a/sys/lib/python/mercurial/templates/paper/filelogentry.tmpl
+++ /dev/null
@@ -1,5 +0,0 @@
- <tr class="parity{parity}">
- <td class="age">{date|age}</td>
- <td class="author">{author|person}</td>
- <td class="description"><a href="{url}rev/{node|short}{sessionvars%urlparameter}">{desc|strip|firstline|escape|nonempty}</a>{inbranch%changelogbranchname}{branches%changelogbranchhead}{tags%changelogtag}</td>
- </tr>
diff --git a/sys/lib/python/mercurial/templates/paper/filerevision.tmpl b/sys/lib/python/mercurial/templates/paper/filerevision.tmpl
deleted file mode 100644
index fec81b7b9..000000000
--- a/sys/lib/python/mercurial/templates/paper/filerevision.tmpl
+++ /dev/null
@@ -1,72 +0,0 @@
-{header}
-<title>{repo|escape}: {node|short} {file|escape}</title>
-</head>
-<body>
-
-<div class="container">
-<div class="menu">
-<div class="logo">
-<a href="http://mercurial.selenic.com/">
-<img src="{staticurl}hglogo.png" alt="mercurial" /></a>
-</div>
-<ul>
-<li><a href="{url}shortlog/{node|short}{sessionvars%urlparameter}">log</a></li>
-<li><a href="{url}graph/{node|short}{sessionvars%urlparameter}">graph</a></li>
-<li><a href="{url}tags{sessionvars%urlparameter}">tags</a></li>
-<li><a href="{url}branches{sessionvars%urlparameter}">branches</a></li>
-</ul>
-<ul>
-<li><a href="{url}rev/{node|short}{sessionvars%urlparameter}">changeset</a></li>
-<li><a href="{url}file/{node|short}{path|urlescape}{sessionvars%urlparameter}">browse</a></li>
-</ul>
-<ul>
-<li class="active">file</li>
-<li><a href="{url}diff/{node|short}/{file|urlescape}{sessionvars%urlparameter}">diff</a></li>
-<li><a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}">annotate</a></li>
-<li><a href="{url}log/{node|short}/{file|urlescape}{sessionvars%urlparameter}">file log</a></li>
-<li><a href="{url}raw-file/{node|short}/{file|urlescape}">raw</a></li>
-</ul>
-</div>
-
-<div class="main">
-<h2><a href="{url}{sessionvars%urlparameter}">{repo|escape}</a></h2>
-<h3>view {file|escape} @ {rev}:{node|short}</h3>
-
-<form class="search" action="{url}log">
-{sessionvars%hiddenformentry}
-<p><input name="rev" id="search1" type="text" size="30" /></p>
-<div id="hint">find changesets by author, revision,
-files, or words in the commit message</div>
-</form>
-
-<div class="description">{desc|strip|escape|addbreaks|nonempty}</div>
-
-<table id="changesetEntry">
-<tr>
- <th class="author">author</th>
- <td class="author">{author|obfuscate}</td>
-</tr>
-<tr>
- <th class="date">date</th>
- <td class="date">{date|date} ({date|age} ago)</td>
-</tr>
-<tr>
- <th class="author">parents</th>
- <td class="author">{parent%filerevparent}</td>
-</tr>
-<tr>
- <th class="author">children</th>
- <td class="author">{child%filerevchild}</td>
-</tr>
-{changesettag}
-</table>
-
-<div class="overflow">
-<div class="sourcefirst"> line source</div>
-{text%fileline}
-<div class="sourcelast"></div>
-</div>
-</div>
-</div>
-
-{footer}
diff --git a/sys/lib/python/mercurial/templates/paper/footer.tmpl b/sys/lib/python/mercurial/templates/paper/footer.tmpl
deleted file mode 100644
index 6231a3c19..000000000
--- a/sys/lib/python/mercurial/templates/paper/footer.tmpl
+++ /dev/null
@@ -1,4 +0,0 @@
-{motd}
-
-</body>
-</html>
diff --git a/sys/lib/python/mercurial/templates/paper/graph.tmpl b/sys/lib/python/mercurial/templates/paper/graph.tmpl
deleted file mode 100644
index 78b035c47..000000000
--- a/sys/lib/python/mercurial/templates/paper/graph.tmpl
+++ /dev/null
@@ -1,132 +0,0 @@
-{header}
-<title>{repo|escape}: revision graph</title>
-<link rel="alternate" type="application/atom+xml"
- href="{url}atom-log" title="Atom feed for {repo|escape}: log" />
-<link rel="alternate" type="application/rss+xml"
- href="{url}rss-log" title="RSS feed for {repo|escape}: log" />
-<!--[if IE]><script type="text/javascript" src="{staticurl}excanvas.js"></script><![endif]-->
-</head>
-<body>
-
-<div class="container">
-<div class="menu">
-<div class="logo">
-<a href="http://mercurial.selenic.com/">
-<img src="{staticurl}hglogo.png" alt="mercurial" /></a>
-</div>
-<ul>
-<li><a href="{url}shortlog/{node|short}{sessionvars%urlparameter}">log</a></li>
-<li class="active">graph</li>
-<li><a href="{url}tags{sessionvars%urlparameter}">tags</a></li>
-<li><a href="{url}branches{sessionvars%urlparameter}">branches</a></li>
-</ul>
-<ul>
-<li><a href="{url}rev/{node|short}{sessionvars%urlparameter}">changeset</a></li>
-<li><a href="{url}file/{node|short}{path|urlescape}{sessionvars%urlparameter}">browse</a></li>
-</ul>
-</div>
-
-<div class="main">
-<h2><a href="{url}{sessionvars%urlparameter}">{repo|escape}</a></h2>
-<h3>graph</h3>
-
-<form class="search" action="{url}log">
-{sessionvars%hiddenformentry}
-<p><input name="rev" id="search1" type="text" size="30" /></p>
-<div id="hint">find changesets by author, revision,
-files, or words in the commit message</div>
-</form>
-
-<div class="navigate">
-<a href="{url}graph/{rev}{lessvars%urlparameter}">less</a>
-<a href="{url}graph/{rev}{morevars%urlparameter}">more</a>
-| rev {rev}: {changenav%navgraphentry}
-</div>
-
-<noscript><p>The revision graph only works with JavaScript-enabled browsers.</p></noscript>
-
-<div id="wrapper">
-<ul id="nodebgs"></ul>
-<canvas id="graph" width="224" height="{canvasheight}"></canvas>
-<ul id="graphnodes"></ul>
-</div>
-
-<script type="text/javascript" src="{staticurl}graph.js"></script>
-<script type="text/javascript">
-<!-- hide script content
-
-var data = {jsdata|json};
-var graph = new Graph();
-graph.scale({bg_height});
-
-graph.edge = function(x0, y0, x1, y1, color) {
-
- this.setColor(color, 0.0, 0.65);
- this.ctx.beginPath();
- this.ctx.moveTo(x0, y0);
- this.ctx.lineTo(x1, y1);
- this.ctx.stroke();
-
-}
-
-var revlink = '<li style="_STYLE"><span class="desc">';
-revlink += '<a href="{url}rev/_NODEID{sessionvars%urlparameter}" title="_NODEID">_DESC</a>';
-revlink += '</span>_TAGS<span class="info">_DATE ago, by _USER</span></li>';
-
-graph.vertex = function(x, y, color, parity, cur) {
-
- this.ctx.beginPath();
- color = this.setColor(color, 0.25, 0.75);
- this.ctx.arc(x, y, radius, 0, Math.PI * 2, true);
- this.ctx.fill();
-
- var bg = '<li class="bg parity' + parity + '"></li>';
- var left = (this.columns + 1) * this.bg_height;
- var nstyle = 'padding-left: ' + left + 'px;';
- var item = revlink.replace(/_STYLE/, nstyle);
- item = item.replace(/_PARITY/, 'parity' + parity);
- item = item.replace(/_NODEID/, cur[0]);
- item = item.replace(/_NODEID/, cur[0]);
- item = item.replace(/_DESC/, cur[3]);
- item = item.replace(/_USER/, cur[4]);
- item = item.replace(/_DATE/, cur[5]);
-
- var tagspan = '';
- if (cur[7].length || (cur[6][0] != 'default' || cur[6][1])) {
- tagspan = '<span class="logtags">';
- if (cur[6][1]) {
- tagspan += '<span class="branchhead" title="' + cur[6][0] + '">';
- tagspan += cur[6][0] + '</span> ';
- } else if (!cur[6][1] && cur[6][0] != 'default') {
- tagspan += '<span class="branchname" title="' + cur[6][0] + '">';
- tagspan += cur[6][0] + '</span> ';
- }
- if (cur[7].length) {
- for (var t in cur[7]) {
- var tag = cur[7][t];
- tagspan += '<span class="tag">' + tag + '</span> ';
- }
- }
- tagspan += '</span>';
- }
-
- item = item.replace(/_TAGS/, tagspan);
- return [bg, item];
-
-}
-
-graph.render(data);
-
-// stop hiding script -->
-</script>
-
-<div class="navigate">
-<a href="{url}graph/{rev}{lessvars%urlparameter}">less</a>
-<a href="{url}graph/{rev}{morevars%urlparameter}">more</a>
-| rev {rev}: {changenav%navgraphentry}
-</div>
-
-</div>
-</div>
-
-{footer}
diff --git a/sys/lib/python/mercurial/templates/paper/header.tmpl b/sys/lib/python/mercurial/templates/paper/header.tmpl
deleted file mode 100644
index 305bc2f35..000000000
--- a/sys/lib/python/mercurial/templates/paper/header.tmpl
+++ /dev/null
@@ -1,6 +0,0 @@
-<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
-<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en-US">
-<head>
-<link rel="icon" href="{staticurl}hgicon.png" type="image/png" />
-<meta name="robots" content="index, nofollow" />
-<link rel="stylesheet" href="{staticurl}style-paper.css" type="text/css" />
diff --git a/sys/lib/python/mercurial/templates/paper/index.tmpl b/sys/lib/python/mercurial/templates/paper/index.tmpl
deleted file mode 100644
index 35915b570..000000000
--- a/sys/lib/python/mercurial/templates/paper/index.tmpl
+++ /dev/null
@@ -1,26 +0,0 @@
-{header}
-<title>Mercurial repositories index</title>
-</head>
-<body>
-
-<div class="container">
-<div class="menu">
-<a href="http://mercurial.selenic.com/">
-<img src="{staticurl}hglogo.png" width=75 height=90 border=0 alt="mercurial" /></a>
-</div>
-<div class="main">
-<h2>Mercurial Repositories</h2>
-
-<table class="bigtable">
- <tr>
- <th><a href="?sort={sort_name}">Name</a></th>
- <th><a href="?sort={sort_description}">Description</a></th>
- <th><a href="?sort={sort_contact}">Contact</a></th>
- <th><a href="?sort={sort_lastchange}">Last change</a></th>
- <th>&nbsp;</th>
- </tr>
- {entries%indexentry}
-</table>
-</div>
-</div>
-{footer}
diff --git a/sys/lib/python/mercurial/templates/paper/manifest.tmpl b/sys/lib/python/mercurial/templates/paper/manifest.tmpl
deleted file mode 100644
index a5e65725d..000000000
--- a/sys/lib/python/mercurial/templates/paper/manifest.tmpl
+++ /dev/null
@@ -1,54 +0,0 @@
-{header}
-<title>{repo|escape}: {node|short} {path|escape}</title>
-</head>
-<body>
-
-<div class="container">
-<div class="menu">
-<div class="logo">
-<a href="http://mercurial.selenic.com/">
-<img src="{staticurl}hglogo.png" alt="mercurial" /></a>
-</div>
-<ul>
-<li><a href="{url}shortlog/{node|short}{sessionvars%urlparameter}">log</a></li>
-<li><a href="{url}graph/{node|short}{sessionvars%urlparameter}">graph</a></li>
-<li><a href="{url}tags{sessionvars%urlparameter}">tags</a></li>
-<li><a href="{url}branches{sessionvars%urlparameter}">branches</a></li>
-</ul>
-<ul>
-<li><a href="{url}rev/{node|short}{sessionvars%urlparameter}">changeset</a></li>
-<li class="active">browse</li>
-</ul>
-<ul>
-{archives%archiveentry}
-</ul>
-</div>
-
-<div class="main">
-<h2><a href="{url}{sessionvars%urlparameter}">{repo|escape}</a></h2>
-<h3>directory {path|escape} @ {rev}:{node|short} {tags%changelogtag}</h3>
-
-<form class="search" action="{url}log">
-{sessionvars%hiddenformentry}
-<p><input name="rev" id="search1" type="text" size="30" /></p>
-<div id="hint">find changesets by author, revision,
-files, or words in the commit message</div>
-</form>
-
-<table class="bigtable">
-<tr>
- <th class="name">name</th>
- <th class="size">size</th>
- <th class="permissions">permissions</th>
-</tr>
-<tr class="fileline parity{upparity}">
- <td class="name"><a href="{url}file/{node|short}{up|urlescape}{sessionvars%urlparameter}">[up]</a></td>
- <td class="size"></td>
- <td class="permissions">drwxr-xr-x</td>
-</tr>
-{dentries%direntry}
-{fentries%fileentry}
-</table>
-</div>
-</div>
-{footer}
diff --git a/sys/lib/python/mercurial/templates/paper/map b/sys/lib/python/mercurial/templates/paper/map
deleted file mode 100644
index 96b8a82e6..000000000
--- a/sys/lib/python/mercurial/templates/paper/map
+++ /dev/null
@@ -1,191 +0,0 @@
-default = 'shortlog'
-
-mimetype = 'text/html; charset={encoding}'
-header = header.tmpl
-footer = footer.tmpl
-search = search.tmpl
-
-changelog = shortlog.tmpl
-shortlog = shortlog.tmpl
-shortlogentry = shortlogentry.tmpl
-graph = graph.tmpl
-
-naventry = '<a href="{url}log/{node|short}{sessionvars%urlparameter}">{label|escape}</a> '
-navshortentry = '<a href="{url}shortlog/{node|short}{sessionvars%urlparameter}">{label|escape}</a> '
-navgraphentry = '<a href="{url}graph/{node|short}{sessionvars%urlparameter}">{label|escape}</a> '
-filenaventry = '<a href="{url}log/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{label|escape}</a> '
-filedifflink = '<a href="{url}diff/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{file|escape}</a> '
-filenodelink = '<a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{file|escape}</a> '
-filenolink = '{file|escape} '
-fileellipses = '...'
-changelogentry = shortlogentry.tmpl
-searchentry = shortlogentry.tmpl
-changeset = changeset.tmpl
-manifest = manifest.tmpl
-
-direntry = '
- <tr class="fileline parity{parity}">
- <td class="name">
- <a href="{url}file/{node|short}{path|urlescape}{sessionvars%urlparameter}">
- <img src="{staticurl}coal-folder.png" alt="dir."/> {basename|escape}/
- </a>
- <a href="{url}file/{node|short}{path|urlescape}/{emptydirs|urlescape}{sessionvars%urlparameter}">
- {emptydirs|escape}
- </a>
- </td>
- <td class="size"></td>
- <td class="permissions">drwxr-xr-x</td>
- </tr>'
-
-fileentry = '
- <tr class="fileline parity{parity}">
- <td class="filename">
- <a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">
- <img src="{staticurl}coal-file.png" alt="file"/> {basename|escape}
- </a>
- </td>
- <td class="size">{size}</td>
- <td class="permissions">{permissions|permissions}</td>
- </tr>'
-
-filerevision = filerevision.tmpl
-fileannotate = fileannotate.tmpl
-filediff = filediff.tmpl
-filelog = filelog.tmpl
-fileline = '
- <div class="parity{parity} source"><a href="#{lineid}" id="{lineid}">{linenumber}</a> {line|escape}</div>'
-filelogentry = filelogentry.tmpl
-
-annotateline = '
- <tr class="parity{parity}">
- <td class="annotate">
- <a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}#{targetline}"
- title="{node|short}: {desc|escape|firstline}">{author|user}@{rev}</a>
- </td>
- <td class="source"><a href="#{lineid}" id="{lineid}">{linenumber}</a> {line|escape}</td>
- </tr>'
-
-diffblock = '<div class="source bottomline parity{parity}"><pre>{lines}</pre></div>'
-difflineplus = '<a href="#{lineid}" id="{lineid}">{linenumber}</a> <span class="plusline">{line|escape}</span>'
-difflineminus = '<a href="#{lineid}" id="{lineid}">{linenumber}</a> <span class="minusline">{line|escape}</span>'
-difflineat = '<a href="#{lineid}" id="{lineid}">{linenumber}</a> <span class="atline">{line|escape}</span>'
-diffline = '<a href="#{lineid}" id="{lineid}">{linenumber}</a> {line|escape}'
-
-changelogparent = '
- <tr>
- <th class="parent">parent {rev}:</th>
- <td class="parent"><a href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a></td>
- </tr>'
-
-changesetparent = '<a href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a> '
-
-filerevparent = '<a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{rename%filerename}{node|short}</a> '
-filerevchild = '<a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{node|short}</a> '
-
-filerename = '{file|escape}@'
-filelogrename = '
- <tr>
- <th>base:</th>
- <td>
- <a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">
- {file|escape}@{node|short}
- </a>
- </td>
- </tr>'
-fileannotateparent = '
- <tr>
- <td class="metatag">parent:</td>
- <td>
- <a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}">
- {rename%filerename}{node|short}
- </a>
- </td>
- </tr>'
-changesetchild = ' <a href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a>'
-changelogchild = '
- <tr>
- <th class="child">child</th>
- <td class="child">
- <a href="{url}rev/{node|short}{sessionvars%urlparameter}">
- {node|short}
- </a>
- </td>
- </tr>'
-fileannotatechild = '
- <tr>
- <td class="metatag">child:</td>
- <td>
- <a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}">
- {node|short}
- </a>
- </td>
- </tr>'
-tags = tags.tmpl
-tagentry = '
- <tr class="tagEntry parity{parity}">
- <td>
- <a href="{url}rev/{node|short}{sessionvars%urlparameter}">
- {tag|escape}
- </a>
- </td>
- <td class="node">
- {node|short}
- </td>
- </tr>'
-branches = branches.tmpl
-branchentry = '
- <tr class="tagEntry parity{parity}">
- <td>
- <a href="{url}shortlog/{node|short}{sessionvars%urlparameter}" class="{status}">
- {branch|escape}
- </a>
- </td>
- <td class="node">
- {node|short}
- </td>
- </tr>'
-changelogtag = '<span class="tag">{name|escape}</span> '
-changesettag = '<span class="tag">{tag|escape}</span> '
-changelogbranchhead = '<span class="branchhead">{name|escape}</span> '
-changelogbranchname = '<span class="branchname">{name|escape}</span> '
-
-filediffparent = '
- <tr>
- <th class="parent">parent {rev}:</th>
- <td class="parent"><a href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a></td>
- </tr>'
-filelogparent = '
- <tr>
- <th>parent {rev}:</th>
- <td><a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{node|short}</a></td>
- </tr>'
-filediffchild = '
- <tr>
- <th class="child">child {rev}:</th>
- <td class="child"><a href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a>
- </td>
- </tr>'
-filelogchild = '
- <tr>
- <th>child {rev}:</th>
- <td><a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{node|short}</a></td>
- </tr>'
-
-indexentry = '
- <tr class="parity{parity}">
- <td><a href="{url}{sessionvars%urlparameter}">{name|escape}</a></td>
- <td>{description}</td>
- <td>{contact|obfuscate}</td>
- <td class="age">{lastchange|age} ago</td>
- <td class="indexlinks">{archives%indexarchiveentry}</td>
- </tr>\n'
-indexarchiveentry = '<a href="{url}archive/{node|short}{extension|urlescape}">&nbsp;&darr;{type|escape}</a>'
-index = index.tmpl
-archiveentry = '
- <li>
- <a href="{url}archive/{node|short}{extension|urlescape}">{type|escape}</a>
- </li>'
-notfound = notfound.tmpl
-error = error.tmpl
-urlparameter = '{separator}{name}={value|urlescape}'
-hiddenformentry = '<input type="hidden" name="{name}" value="{value|escape}" />'
diff --git a/sys/lib/python/mercurial/templates/paper/notfound.tmpl b/sys/lib/python/mercurial/templates/paper/notfound.tmpl
deleted file mode 100644
index e9e6ba420..000000000
--- a/sys/lib/python/mercurial/templates/paper/notfound.tmpl
+++ /dev/null
@@ -1,12 +0,0 @@
-{header}
-<title>Mercurial repository not found</title>
-</head>
-<body>
-
-<h2>Mercurial repository not found</h2>
-
-The specified repository "{repo|escape}" is unknown, sorry.
-
-Please go back to the <a href="{url}">main repository list page</a>.
-
-{footer}
diff --git a/sys/lib/python/mercurial/templates/paper/search.tmpl b/sys/lib/python/mercurial/templates/paper/search.tmpl
deleted file mode 100644
index 67b20c8bb..000000000
--- a/sys/lib/python/mercurial/templates/paper/search.tmpl
+++ /dev/null
@@ -1,43 +0,0 @@
-{header}
-<title>{repo|escape}: searching for {query|escape}</title>
-</head>
-<body>
-
-<div class="container">
-<div class="menu">
-<div class="logo">
-<a href="http://mercurial.selenic.com/">
-<img src="{staticurl}hglogo.png" width=75 height=90 border=0 alt="mercurial"></a>
-</div>
-<ul>
-<li><a href="{url}shortlog{sessionvars%urlparameter}">log</a></li>
-<li><a href="{url}graph{sessionvars%urlparameter}">graph</a></li>
-<li><a href="{url}tags{sessionvars%urlparameter}">tags</a></li>
-<li><a href="{url}branches{sessionvars%urlparameter}">branches</a></li>
-</ul>
-</div>
-
-<div class="main">
-<h2><a href="{url}{sessionvars%urlparameter}">{repo|escape}</a></h2>
-<h3>searching for '{query|escape}'</h3>
-
-<form class="search" action="{url}log">
-{sessionvars%hiddenformentry}
-<p><input name="rev" id="search1" type="text" size="30"></p>
-<div id="hint">find changesets by author, revision,
-files, or words in the commit message</div>
-</form>
-
-<table class="bigtable">
- <tr>
- <th class="age">age</th>
- <th class="author">author</th>
- <th class="description">description</th>
- </tr>
-{entries}
-</table>
-
-</div>
-</div>
-
-{footer}
diff --git a/sys/lib/python/mercurial/templates/paper/shortlog.tmpl b/sys/lib/python/mercurial/templates/paper/shortlog.tmpl
deleted file mode 100644
index 96fc6d9c2..000000000
--- a/sys/lib/python/mercurial/templates/paper/shortlog.tmpl
+++ /dev/null
@@ -1,57 +0,0 @@
-{header}
-<title>{repo|escape}: log</title>
-<link rel="alternate" type="application/atom+xml"
- href="{url}atom-log" title="Atom feed for {repo|escape}" />
-<link rel="alternate" type="application/rss+xml"
- href="{url}rss-log" title="RSS feed for {repo|escape}" />
-</head>
-<body>
-
-<div class="container">
-<div class="menu">
-<div class="logo">
-<a href="http://mercurial.selenic.com/">
-<img src="{staticurl}hglogo.png" alt="mercurial" /></a>
-</div>
-<ul>
-<li class="active">log</li>
-<li><a href="{url}graph/{node|short}{sessionvars%urlparameter}">graph</a></li>
-<li><a href="{url}tags{sessionvars%urlparameter}">tags</a></li>
-<li><a href="{url}branches{sessionvars%urlparameter}">branches</a></li>
-</ul>
-<ul>
-<li><a href="{url}rev/{node|short}{sessionvars%urlparameter}">changeset</a></li>
-<li><a href="{url}file/{node|short}{path|urlescape}{sessionvars%urlparameter}">browse</a></li>
-</ul>
-<ul>
-{archives%archiveentry}
-</ul>
-</div>
-
-<div class="main">
-<h2><a href="{url}{sessionvars%urlparameter}">{repo|escape}</a></h2>
-<h3>log</h3>
-
-<form class="search" action="{url}log">
-{sessionvars%hiddenformentry}
-<p><input name="rev" id="search1" type="text" size="30" /></p>
-<div id="hint">find changesets by author, revision,
-files, or words in the commit message</div>
-</form>
-
-<div class="navigate">rev {rev}: {changenav%navshortentry}</div>
-
-<table class="bigtable">
- <tr>
- <th class="age">age</th>
- <th class="author">author</th>
- <th class="description">description</th>
- </tr>
-{entries%shortlogentry}
-</table>
-
-<div class="navigate">rev {rev}: {changenav%navshortentry}</div>
-</div>
-</div>
-
-{footer}
diff --git a/sys/lib/python/mercurial/templates/paper/shortlogentry.tmpl b/sys/lib/python/mercurial/templates/paper/shortlogentry.tmpl
deleted file mode 100644
index 43e068c84..000000000
--- a/sys/lib/python/mercurial/templates/paper/shortlogentry.tmpl
+++ /dev/null
@@ -1,5 +0,0 @@
- <tr class="parity{parity}">
- <td class="age">{date|age}</td>
- <td class="author">{author|person}</td>
- <td class="description"><a href="{url}rev/{node|short}{sessionvars%urlparameter}">{desc|strip|firstline|escape|nonempty}</a>{inbranch%changelogbranchname}{branches%changelogbranchhead}{tags%changelogtag}</td>
- </tr>
diff --git a/sys/lib/python/mercurial/templates/paper/tags.tmpl b/sys/lib/python/mercurial/templates/paper/tags.tmpl
deleted file mode 100644
index 1566e9f6b..000000000
--- a/sys/lib/python/mercurial/templates/paper/tags.tmpl
+++ /dev/null
@@ -1,45 +0,0 @@
-{header}
-<title>{repo|escape}: tags</title>
-<link rel="alternate" type="application/atom+xml"
- href="{url}atom-tags" title="Atom feed for {repo|escape}: tags" />
-<link rel="alternate" type="application/rss+xml"
- href="{url}rss-tags" title="RSS feed for {repo|escape}: tags" />
-</head>
-<body>
-
-<div class="container">
-<div class="menu">
-<div class="logo">
-<a href="http://mercurial.selenic.com/">
-<img src="{staticurl}hglogo.png" alt="mercurial" /></a>
-</div>
-<ul>
-<li><a href="{url}shortlog{sessionvars%urlparameter}">log</a></li>
-<li><a href="{url}graph{sessionvars%urlparameter}">graph</a></li>
-<li class="active">tags</li>
-<li><a href="{url}branches{sessionvars%urlparameter}">branches</a></li>
-</ul>
-</div>
-
-<div class="main">
-<h2><a href="{url}{sessionvars%urlparameter}">{repo|escape}</a></h2>
-<h3>tags</h3>
-
-<form class="search" action="{url}log">
-{sessionvars%hiddenformentry}
-<p><input name="rev" id="search1" type="text" size="30" /></p>
-<div id="hint">find changesets by author, revision,
-files, or words in the commit message</div>
-</form>
-
-<table class="bigtable">
-<tr>
- <th>tag</th>
- <th>node</th>
-</tr>
-{entries%tagentry}
-</table>
-</div>
-</div>
-
-{footer}
diff --git a/sys/lib/python/mercurial/templates/raw/changeset.tmpl b/sys/lib/python/mercurial/templates/raw/changeset.tmpl
deleted file mode 100644
index b59d99b59..000000000
--- a/sys/lib/python/mercurial/templates/raw/changeset.tmpl
+++ /dev/null
@@ -1,9 +0,0 @@
-{header}
-# HG changeset patch
-# User {author}
-# Date {date|hgdate}
-# Node ID {node}
-{parent%changesetparent}
-{desc}
-
-{diff}
diff --git a/sys/lib/python/mercurial/templates/raw/error.tmpl b/sys/lib/python/mercurial/templates/raw/error.tmpl
deleted file mode 100644
index 9407c132d..000000000
--- a/sys/lib/python/mercurial/templates/raw/error.tmpl
+++ /dev/null
@@ -1,2 +0,0 @@
-{header}
-error: {error}
diff --git a/sys/lib/python/mercurial/templates/raw/fileannotate.tmpl b/sys/lib/python/mercurial/templates/raw/fileannotate.tmpl
deleted file mode 100644
index ad1bed62a..000000000
--- a/sys/lib/python/mercurial/templates/raw/fileannotate.tmpl
+++ /dev/null
@@ -1,5 +0,0 @@
-{header}
-{annotate%annotateline}
-{footer}
-
-
diff --git a/sys/lib/python/mercurial/templates/raw/filediff.tmpl b/sys/lib/python/mercurial/templates/raw/filediff.tmpl
deleted file mode 100644
index c4014bc69..000000000
--- a/sys/lib/python/mercurial/templates/raw/filediff.tmpl
+++ /dev/null
@@ -1,5 +0,0 @@
-{header}
-{diff}
-{footer}
-
-
diff --git a/sys/lib/python/mercurial/templates/raw/index.tmpl b/sys/lib/python/mercurial/templates/raw/index.tmpl
deleted file mode 100644
index 29d7c9e1e..000000000
--- a/sys/lib/python/mercurial/templates/raw/index.tmpl
+++ /dev/null
@@ -1,2 +0,0 @@
-{header}
-{entries%indexentry}
diff --git a/sys/lib/python/mercurial/templates/raw/manifest.tmpl b/sys/lib/python/mercurial/templates/raw/manifest.tmpl
deleted file mode 100644
index 8d4a934e9..000000000
--- a/sys/lib/python/mercurial/templates/raw/manifest.tmpl
+++ /dev/null
@@ -1,3 +0,0 @@
-{header}
-{dentries%direntry}{fentries%fileentry}
-{footer}
diff --git a/sys/lib/python/mercurial/templates/raw/map b/sys/lib/python/mercurial/templates/raw/map
deleted file mode 100644
index 2c14d095b..000000000
--- a/sys/lib/python/mercurial/templates/raw/map
+++ /dev/null
@@ -1,23 +0,0 @@
-mimetype = 'text/plain; charset={encoding}'
-header = ''
-footer = ''
-changeset = changeset.tmpl
-difflineplus = '{line}'
-difflineminus = '{line}'
-difflineat = '{line}'
-diffline = '{line}'
-changesetparent = '# Parent {node}'
-changesetchild = '# Child {node}'
-filenodelink = ''
-fileline = '{line}'
-diffblock = '{lines}'
-filediff = filediff.tmpl
-fileannotate = fileannotate.tmpl
-annotateline = '{author|user}@{rev}: {line}'
-manifest = manifest.tmpl
-direntry = 'drwxr-xr-x {basename}\n'
-fileentry = '{permissions|permissions} {size} {basename}\n'
-index = index.tmpl
-notfound = notfound.tmpl
-error = error.tmpl
-indexentry = '{url}\n'
diff --git a/sys/lib/python/mercurial/templates/raw/notfound.tmpl b/sys/lib/python/mercurial/templates/raw/notfound.tmpl
deleted file mode 100644
index a7b325110..000000000
--- a/sys/lib/python/mercurial/templates/raw/notfound.tmpl
+++ /dev/null
@@ -1,2 +0,0 @@
-{header}
-error: repository {repo} not found
diff --git a/sys/lib/python/mercurial/templates/rss/changelog.tmpl b/sys/lib/python/mercurial/templates/rss/changelog.tmpl
deleted file mode 100644
index 65b96ad9e..000000000
--- a/sys/lib/python/mercurial/templates/rss/changelog.tmpl
+++ /dev/null
@@ -1,6 +0,0 @@
-{header}
- <title>{repo|escape} Changelog</title>
- <description>{repo|escape} Changelog</description>
- {entries%changelogentry}
- </channel>
-</rss> \ No newline at end of file
diff --git a/sys/lib/python/mercurial/templates/rss/changelogentry.tmpl b/sys/lib/python/mercurial/templates/rss/changelogentry.tmpl
deleted file mode 100644
index 12fe8e05c..000000000
--- a/sys/lib/python/mercurial/templates/rss/changelogentry.tmpl
+++ /dev/null
@@ -1,7 +0,0 @@
-<item>
- <title>{desc|strip|firstline|strip|escape}</title>
- <guid isPermaLink="true">{urlbase}{url}rev/{node|short}</guid>
- <description><![CDATA[{desc|strip|escape|addbreaks|nonempty}]]></description>
- <author>{author|obfuscate}</author>
- <pubDate>{date|rfc822date}</pubDate>
-</item>
diff --git a/sys/lib/python/mercurial/templates/rss/error.tmpl b/sys/lib/python/mercurial/templates/rss/error.tmpl
deleted file mode 100644
index 87e6009ce..000000000
--- a/sys/lib/python/mercurial/templates/rss/error.tmpl
+++ /dev/null
@@ -1,10 +0,0 @@
-{header}
- <title>Error</title>
- <description>Error</description>
- <item>
- <title>Error</title>
- <description>{error|escape}</description>
- <guid>http://mercurial.selenic.com/#error</guid>
- </item>
- </channel>
-</rss>
diff --git a/sys/lib/python/mercurial/templates/rss/filelog.tmpl b/sys/lib/python/mercurial/templates/rss/filelog.tmpl
deleted file mode 100644
index 31f4dc78b..000000000
--- a/sys/lib/python/mercurial/templates/rss/filelog.tmpl
+++ /dev/null
@@ -1,6 +0,0 @@
-{header}
- <title>{repo|escape}: {file|escape} history</title>
- <description>{file|escape} revision history</description>
- {entries%filelogentry}
- </channel>
-</rss>
diff --git a/sys/lib/python/mercurial/templates/rss/filelogentry.tmpl b/sys/lib/python/mercurial/templates/rss/filelogentry.tmpl
deleted file mode 100644
index 220dc4a24..000000000
--- a/sys/lib/python/mercurial/templates/rss/filelogentry.tmpl
+++ /dev/null
@@ -1,7 +0,0 @@
-<item>
- <title>{desc|strip|firstline|strip|escape}</title>
- <link>{urlbase}{url}log{{node|short}}/{file|urlescape}</link>
- <description><![CDATA[{desc|strip|escape|addbreaks|nonempty}]]></description>
- <author>{author|obfuscate}</author>
- <pubDate>{date|rfc822date}</pubDate>
-</item>
diff --git a/sys/lib/python/mercurial/templates/rss/header.tmpl b/sys/lib/python/mercurial/templates/rss/header.tmpl
deleted file mode 100644
index ed29196d3..000000000
--- a/sys/lib/python/mercurial/templates/rss/header.tmpl
+++ /dev/null
@@ -1,5 +0,0 @@
-<?xml version="1.0" encoding="{encoding}"?>
-<rss version="2.0">
- <channel>
- <link>{urlbase}{url}</link>
- <language>en-us</language>
diff --git a/sys/lib/python/mercurial/templates/rss/map b/sys/lib/python/mercurial/templates/rss/map
deleted file mode 100644
index 2f777b796..000000000
--- a/sys/lib/python/mercurial/templates/rss/map
+++ /dev/null
@@ -1,10 +0,0 @@
-default = 'changelog'
-mimetype = 'text/xml; charset={encoding}'
-header = header.tmpl
-changelog = changelog.tmpl
-changelogentry = changelogentry.tmpl
-filelog = filelog.tmpl
-filelogentry = filelogentry.tmpl
-tags = tags.tmpl
-tagentry = tagentry.tmpl
-error = error.tmpl
diff --git a/sys/lib/python/mercurial/templates/rss/tagentry.tmpl b/sys/lib/python/mercurial/templates/rss/tagentry.tmpl
deleted file mode 100644
index 42fa038f6..000000000
--- a/sys/lib/python/mercurial/templates/rss/tagentry.tmpl
+++ /dev/null
@@ -1,6 +0,0 @@
-<item>
- <title>{tag|escape}</title>
- <link>{urlbase}{url}rev/{node|short}</link>
- <description><![CDATA[{tag|strip|escape|addbreaks}]]></description>
- <pubDate>{date|rfc822date}</pubDate>
-</item>
diff --git a/sys/lib/python/mercurial/templates/rss/tags.tmpl b/sys/lib/python/mercurial/templates/rss/tags.tmpl
deleted file mode 100644
index 93f1e96b5..000000000
--- a/sys/lib/python/mercurial/templates/rss/tags.tmpl
+++ /dev/null
@@ -1,6 +0,0 @@
-{header}
- <title>{repo|escape}: tags </title>
- <description>{repo|escape} tag history</description>
- {entriesnotip%tagentry}
- </channel>
-</rss>
diff --git a/sys/lib/python/mercurial/templates/spartan/branches.tmpl b/sys/lib/python/mercurial/templates/spartan/branches.tmpl
deleted file mode 100644
index 883050163..000000000
--- a/sys/lib/python/mercurial/templates/spartan/branches.tmpl
+++ /dev/null
@@ -1,26 +0,0 @@
-{header}
-<title>{repo|escape}: branches</title>
-<link rel="alternate" type="application/atom+xml"
- href="{url}atom-branches" title="Atom feed for {repo|escape}: branches">
-<link rel="alternate" type="application/rss+xml"
- href="{url}rss-branches" title="RSS feed for {repo|escape}: branches">
-</head>
-<body>
-
-<div class="buttons">
-<a href="{url}log{sessionvars%urlparameter}">changelog</a>
-<a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a>
-<a href="{url}graph{sessionvars%urlparameter}">graph</a>
-<a href="{url}tags{sessionvars%urlparameter}">tags</a>
-<a href="{url}file/{node|short}/{sessionvars%urlparameter}">files</a>
-<a type="application/rss+xml" href="{url}rss-branches">rss</a>
-<a type="application/atom+xml" href="{url}atom-branches">atom</a>
-</div>
-
-<h2>branches:</h2>
-
-<ul id="tagEntries">
-{entries%branchentry}
-</ul>
-
-{footer}
diff --git a/sys/lib/python/mercurial/templates/spartan/changelog.tmpl b/sys/lib/python/mercurial/templates/spartan/changelog.tmpl
deleted file mode 100644
index 710d7b6ef..000000000
--- a/sys/lib/python/mercurial/templates/spartan/changelog.tmpl
+++ /dev/null
@@ -1,43 +0,0 @@
-{header}
-<title>{repo|escape}: changelog</title>
-<link rel="alternate" type="application/atom+xml"
- href="{url}atom-log" title="Atom feed for {repo|escape}">
-<link rel="alternate" type="application/rss+xml"
- href="{url}rss-log" title="RSS feed for {repo|escape}">
-</head>
-<body>
-
-<div class="buttons">
-<a href="{url}shortlog/{rev}{sessionvars%urlparameter}">shortlog</a>
-<a href="{url}graph{sessionvars%urlparameter}">graph</a>
-<a href="{url}tags{sessionvars%urlparameter}">tags</a>
-<a href="{url}branches{sessionvars%urlparameter}">branches</a>
-<a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a>
-{archives%archiveentry}
-<a type="application/rss+xml" href="{url}rss-log">rss</a>
-<a type="application/atom+xml" href="{url}atom-log" title="Atom feed for {repo|escape}">atom</a>
-</div>
-
-<h2>changelog for {repo|escape}</h2>
-
-<form action="{url}log">
-{sessionvars%hiddenformentry}
-<p>
-<label for="search1">search:</label>
-<input name="rev" id="search1" type="text" size="30">
-navigate: <small class="navigate">{changenav%naventry}</small>
-</p>
-</form>
-
-{entries%changelogentry}
-
-<form action="{url}log">
-{sessionvars%hiddenformentry}
-<p>
-<label for="search2">search:</label>
-<input name="rev" id="search2" type="text" size="30">
-navigate: <small class="navigate">{changenav%naventry}</small>
-</p>
-</form>
-
-{footer}
diff --git a/sys/lib/python/mercurial/templates/spartan/changelogentry.tmpl b/sys/lib/python/mercurial/templates/spartan/changelogentry.tmpl
deleted file mode 100644
index e4d2c0ef5..000000000
--- a/sys/lib/python/mercurial/templates/spartan/changelogentry.tmpl
+++ /dev/null
@@ -1,25 +0,0 @@
-<table class="logEntry parity{parity}">
- <tr>
- <th class="age">{date|age} ago:</th>
- <th class="firstline">{desc|strip|firstline|escape|nonempty}</th>
- </tr>
- <tr>
- <th class="revision">changeset {rev}:</th>
- <td class="node"><a href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a></td>
- </tr>
- {parent%changelogparent}
- {child%changelogchild}
- {changelogtag}
- <tr>
- <th class="author">author:</th>
- <td class="author">{author|obfuscate}</td>
- </tr>
- <tr>
- <th class="date">date:</th>
- <td class="date">{date|date}</td>
- </tr>
- <tr>
- <th class="files"><a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a>:</th>
- <td class="files">{files}</td>
- </tr>
-</table>
diff --git a/sys/lib/python/mercurial/templates/spartan/changeset.tmpl b/sys/lib/python/mercurial/templates/spartan/changeset.tmpl
deleted file mode 100644
index 4826c08a1..000000000
--- a/sys/lib/python/mercurial/templates/spartan/changeset.tmpl
+++ /dev/null
@@ -1,51 +0,0 @@
-{header}
-<title>{repo|escape}: changeset {node|short}</title>
-</head>
-<body>
-
-<div class="buttons">
-<a href="{url}log/{rev}{sessionvars%urlparameter}">changelog</a>
-<a href="{url}shortlog/{rev}{sessionvars%urlparameter}">shortlog</a>
-<a href="{url}graph{sessionvars%urlparameter}">graph</a>
-<a href="{url}tags{sessionvars%urlparameter}">tags</a>
-<a href="{url}branches{sessionvars%urlparameter}">branches</a>
-<a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a>
-<a href="{url}raw-rev/{node|short}">raw</a>
-{archives%archiveentry}
-</div>
-
-<h2>changeset: {desc|strip|escape|firstline|nonempty}</h2>
-
-<table id="changesetEntry">
-<tr>
- <th class="changeset">changeset {rev}:</th>
- <td class="changeset"><a href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a></td>
-</tr>
-{parent%changesetparent}
-{child%changesetchild}
-{changesettag}
-<tr>
- <th class="author">author:</th>
- <td class="author">{author|obfuscate}</td>
-</tr>
-<tr>
- <th class="date">date:</th>
- <td class="date">{date|date} ({date|age} ago)</td>
-</tr>
-<tr>
- <th class="files">files:</th>
- <td class="files">{files}</td>
-</tr>
-<tr>
- <th class="description">description:</th>
- <td class="description">{desc|strip|escape|addbreaks|nonempty}</td>
-</tr>
-</table>
-
-<div id="changesetDiff">
-{diff}
-</div>
-
-{footer}
-
-
diff --git a/sys/lib/python/mercurial/templates/spartan/error.tmpl b/sys/lib/python/mercurial/templates/spartan/error.tmpl
deleted file mode 100644
index fc2c78866..000000000
--- a/sys/lib/python/mercurial/templates/spartan/error.tmpl
+++ /dev/null
@@ -1,15 +0,0 @@
-{header}
-<title>Mercurial Error</title>
-</head>
-<body>
-
-<h2>Mercurial Error</h2>
-
-<p>
-An error occurred while processing your request:
-</p>
-<p>
-{error|escape}
-</p>
-
-{footer}
diff --git a/sys/lib/python/mercurial/templates/spartan/fileannotate.tmpl b/sys/lib/python/mercurial/templates/spartan/fileannotate.tmpl
deleted file mode 100644
index 2631fbe97..000000000
--- a/sys/lib/python/mercurial/templates/spartan/fileannotate.tmpl
+++ /dev/null
@@ -1,48 +0,0 @@
-{header}
-<title>{repo|escape}: {file|escape} annotate</title>
-</head>
-<body>
-
-<div class="buttons">
-<a href="{url}log/{rev}{sessionvars%urlparameter}">changelog</a>
-<a href="{url}shortlog/{rev}{sessionvars%urlparameter}">shortlog</a>
-<a href="{url}graph{sessionvars%urlparameter}">graph</a>
-<a href="{url}tags{sessionvars%urlparameter}">tags</a>
-<a href="{url}branches{sessionvars%urlparameter}">branches</a>
-<a href="{url}rev/{node|short}{sessionvars%urlparameter}">changeset</a>
-<a href="{url}file/{node|short}{path|urlescape}{sessionvars%urlparameter}">files</a>
-<a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">file</a>
-<a href="{url}log/{node|short}/{file|urlescape}{sessionvars%urlparameter}">revisions</a>
-<a href="{url}raw-annotate/{node|short}/{file|urlescape}">raw</a>
-</div>
-
-<h2>Annotate {file|escape}</h2>
-
-<table>
-<tr>
- <td class="metatag">changeset {rev}:</td>
- <td><a href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a></td></tr>
-{parent%fileannotateparent}
-{child%fileannotatechild}
-<tr>
- <td class="metatag">author:</td>
- <td>{author|obfuscate}</td></tr>
-<tr>
- <td class="metatag">date:</td>
- <td>{date|date} ({date|age} ago)</td>
-</tr>
-<tr>
- <td class="metatag">permissions:</td>
- <td>{permissions|permissions}</td>
-</tr>
-<tr>
- <td class="metatag">description:</td>
- <td>{desc|strip|escape|addbreaks|nonempty}</td>
-</tr>
-</table>
-
-<table cellspacing="0" cellpadding="0">
-{annotate%annotateline}
-</table>
-
-{footer}
diff --git a/sys/lib/python/mercurial/templates/spartan/filediff.tmpl b/sys/lib/python/mercurial/templates/spartan/filediff.tmpl
deleted file mode 100644
index 2a6b60e53..000000000
--- a/sys/lib/python/mercurial/templates/spartan/filediff.tmpl
+++ /dev/null
@@ -1,36 +0,0 @@
-{header}
-<title>{repo|escape}: {file|escape} diff</title>
-</head>
-<body>
-
-<div class="buttons">
-<a href="{url}log/{rev}{sessionvars%urlparameter}">changelog</a>
-<a href="{url}shortlog/{rev}{sessionvars%urlparameter}">shortlog</a>
-<a href="{url}graph{sessionvars%urlparameter}">graph</a>
-<a href="{url}tags{sessionvars%urlparameter}">tags</a>
-<a href="{url}branches{sessionvars%urlparameter}">branches</a>
-<a href="{url}rev/{node|short}{sessionvars%urlparameter}">changeset</a>
-<a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">file</a>
-<a href="{url}log/{node|short}/{file|urlescape}{sessionvars%urlparameter}">revisions</a>
-<a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}">annotate</a>
-<a href="{url}raw-diff/{node|short}/{file|urlescape}">raw</a>
-</div>
-
-<h2>{file|escape}</h2>
-
-<table id="filediffEntry">
-<tr>
- <th class="revision">revision {rev}:</th>
- <td class="revision"><a href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a></td>
-</tr>
-{parent%filediffparent}
-{child%filediffchild}
-</table>
-
-<div id="fileDiff">
-{diff}
-</div>
-
-{footer}
-
-
diff --git a/sys/lib/python/mercurial/templates/spartan/filelog.tmpl b/sys/lib/python/mercurial/templates/spartan/filelog.tmpl
deleted file mode 100644
index 0cb4e8990..000000000
--- a/sys/lib/python/mercurial/templates/spartan/filelog.tmpl
+++ /dev/null
@@ -1,28 +0,0 @@
-{header}
-<title>{repo|escape}: {file|escape} history</title>
-<link rel="alternate" type="application/atom+xml"
- href="{url}atom-log/tip/{file|urlescape}" title="Atom feed for {repo|escape}:{file}">
-<link rel="alternate" type="application/rss+xml"
- href="{url}rss-log/tip/{file|urlescape}" title="RSS feed for {repo|escape}:{file}">
-</head>
-<body>
-
-<div class="buttons">
-<a href="{url}log{sessionvars%urlparameter}">changelog</a>
-<a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a>
-<a href="{url}graph{sessionvars%urlparameter}">graph</a>
-<a href="{url}tags{sessionvars%urlparameter}">tags</a>
-<a href="{url}branches{sessionvars%urlparameter}">branches</a>
-<a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">file</a>
-<a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}">annotate</a>
-<a type="application/rss+xml" href="{url}rss-log/tip/{file|urlescape}">rss</a>
-<a type="application/atom+xml" href="{url}atom-log/tip/{file|urlescape}" title="Atom feed for {repo|escape}:{file}">atom</a>
-</div>
-
-<h2>{file|escape} revision history</h2>
-
-<p>navigate: <small class="navigate">{nav%filenaventry}</small></p>
-
-{entries%filelogentry}
-
-{footer}
diff --git a/sys/lib/python/mercurial/templates/spartan/filelogentry.tmpl b/sys/lib/python/mercurial/templates/spartan/filelogentry.tmpl
deleted file mode 100644
index 526016b84..000000000
--- a/sys/lib/python/mercurial/templates/spartan/filelogentry.tmpl
+++ /dev/null
@@ -1,25 +0,0 @@
-<table class="logEntry parity{parity}">
- <tr>
- <th class="age">{date|age} ago:</th>
- <th class="firstline"><a href="{url}rev/{node|short}{sessionvars%urlparameter}">{desc|strip|firstline|escape|nonempty}</a></th>
- </tr>
- <tr>
- <th class="revision">revision {filerev}:</td>
- <td class="node">
- <a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{node|short}</a>
- <a href="{url}diff/{node|short}/{file|urlescape}{sessionvars%urlparameter}">(diff)</a>
- <a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}">(annotate)</a>
- </td>
- </tr>
- {rename%filelogrename}
- <tr>
- <th class="author">author:</th>
- <td class="author">{author|obfuscate}</td>
- </tr>
- <tr>
- <th class="date">date:</th>
- <td class="date">{date|date}</td>
- </tr>
-</table>
-
-
diff --git a/sys/lib/python/mercurial/templates/spartan/filerevision.tmpl b/sys/lib/python/mercurial/templates/spartan/filerevision.tmpl
deleted file mode 100644
index b13695d23..000000000
--- a/sys/lib/python/mercurial/templates/spartan/filerevision.tmpl
+++ /dev/null
@@ -1,46 +0,0 @@
-{header}
-<title>{repo|escape}:{file|escape}</title>
-</head>
-<body>
-
-<div class="buttons">
-<a href="{url}log/{rev}{sessionvars%urlparameter}">changelog</a>
-<a href="{url}shortlog/{rev}{sessionvars%urlparameter}">shortlog</a>
-<a href="{url}graph{sessionvars%urlparameter}">graph</a>
-<a href="{url}tags{sessionvars%urlparameter}">tags</a>
-<a href="{url}branches{sessionvars%urlparameter}">branches</a>
-<a href="{url}rev/{node|short}{sessionvars%urlparameter}">changeset</a>
-<a href="{url}file/{node|short}{path|urlescape}{sessionvars%urlparameter}">files</a>
-<a href="{url}log/{node|short}/{file|urlescape}{sessionvars%urlparameter}">revisions</a>
-<a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}">annotate</a>
-<a href="{url}raw-file/{node|short}/{file|urlescape}">raw</a>
-</div>
-
-<h2>{file|escape}</h2>
-
-<table>
-<tr>
- <td class="metatag">changeset {rev}:</td>
- <td><a href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a></td></tr>
-{parent%filerevparent}
-{child%filerevchild}
-<tr>
- <td class="metatag">author:</td>
- <td>{author|obfuscate}</td></tr>
-<tr>
- <td class="metatag">date:</td>
- <td>{date|date} ({date|age} ago)</td></tr>
-<tr>
- <td class="metatag">permissions:</td>
- <td>{permissions|permissions}</td></tr>
-<tr>
- <td class="metatag">description:</td>
- <td>{desc|strip|escape|addbreaks|nonempty}</td>
-</tr>
-</table>
-
-<pre>
-{text%fileline}
-</pre>
-
-{footer}
diff --git a/sys/lib/python/mercurial/templates/spartan/footer.tmpl b/sys/lib/python/mercurial/templates/spartan/footer.tmpl
deleted file mode 100644
index afcb2d08f..000000000
--- a/sys/lib/python/mercurial/templates/spartan/footer.tmpl
+++ /dev/null
@@ -1,8 +0,0 @@
-{motd}
-<div class="logo">
-<a href="http://mercurial.selenic.com/">
-<img src="{staticurl}hglogo.png" width=75 height=90 border=0 alt="mercurial"></a>
-</div>
-
-</body>
-</html>
diff --git a/sys/lib/python/mercurial/templates/spartan/graph.tmpl b/sys/lib/python/mercurial/templates/spartan/graph.tmpl
deleted file mode 100644
index 62c3ea0d6..000000000
--- a/sys/lib/python/mercurial/templates/spartan/graph.tmpl
+++ /dev/null
@@ -1,96 +0,0 @@
-{header}
-<title>{repo|escape}: graph</title>
-<link rel="alternate" type="application/atom+xml"
- href="{url}atom-tags" title="Atom feed for {repo|escape}: tags">
-<link rel="alternate" type="application/rss+xml"
- href="{url}rss-tags" title="RSS feed for {repo|escape}: tags">
-<!--[if IE]><script type="text/javascript" src="{staticurl}excanvas.js"></script><![endif]-->
-</head>
-<body>
-
-<div class="buttons">
-<a href="{url}log{sessionvars%urlparameter}">changelog</a>
-<a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a>
-<a href="{url}tags{sessionvars%urlparameter}">tags</a>
-<a href="{url}branches{sessionvars%urlparameter}">branches</a>
-<a href="{url}file/{node|short}/{sessionvars%urlparameter}">files</a>
-</div>
-
-<h2>graph</h2>
-
-<form action="{url}log">
-{sessionvars%hiddenformentry}
-<p>
-<label for="search1">search:</label>
-<input name="rev" id="search1" type="text" size="30">
-navigate: <small class="navigate">{changenav%navgraphentry}</small>
-</p>
-</form>
-
-<noscript>The revision graph only works with JavaScript-enabled browsers.</noscript>
-
-<div id="wrapper">
-<ul id="nodebgs"></ul>
-<canvas id="graph" width="224" height="{canvasheight}"></canvas>
-<ul id="graphnodes"></ul>
-</div>
-
-<script type="text/javascript" src="{staticurl}graph.js"></script>
-<script type="text/javascript">
-<!-- hide script content
-
-var data = {jsdata|json};
-var graph = new Graph();
-graph.scale({bg_height});
-
-graph.edge = function(x0, y0, x1, y1, color) {
-
- this.setColor(color, 0.0, 0.65);
- this.ctx.beginPath();
- this.ctx.moveTo(x0, y0);
- this.ctx.lineTo(x1, y1);
- this.ctx.stroke();
-
-}
-
-var revlink = '<li style="_STYLE"><span class="desc">';
-revlink += '<a href="{url}rev/_NODEID{sessionvars%urlparameter}" title="_NODEID">_DESC</a>';
-revlink += '</span><span class="info">_DATE ago, by _USER</span></li>';
-
-graph.vertex = function(x, y, color, parity, cur) {
-
- this.ctx.beginPath();
- color = this.setColor(color, 0.25, 0.75);
- this.ctx.arc(x, y, radius, 0, Math.PI * 2, true);
- this.ctx.fill();
-
- var bg = '<li class="bg parity' + parity + '"></li>';
- var left = (this.columns + 1) * this.bg_height;
- var nstyle = 'padding-left: ' + left + 'px;';
- var item = revlink.replace(/_STYLE/, nstyle);
- item = item.replace(/_PARITY/, 'parity' + parity);
- item = item.replace(/_NODEID/, cur[0]);
- item = item.replace(/_NODEID/, cur[0]);
- item = item.replace(/_DESC/, cur[3]);
- item = item.replace(/_USER/, cur[4]);
- item = item.replace(/_DATE/, cur[5]);
-
- return [bg, item];
-
-}
-
-graph.render(data);
-
-// stop hiding script -->
-</script>
-
-<form action="{url}log">
-{sessionvars%hiddenformentry}
-<p>
-<label for="search1">search:</label>
-<input name="rev" id="search1" type="text" size="30">
-navigate: <small class="navigate">{changenav%navgraphentry}</small>
-</p>
-</form>
-
-{footer}
diff --git a/sys/lib/python/mercurial/templates/spartan/header.tmpl b/sys/lib/python/mercurial/templates/spartan/header.tmpl
deleted file mode 100644
index 646b2fe30..000000000
--- a/sys/lib/python/mercurial/templates/spartan/header.tmpl
+++ /dev/null
@@ -1,6 +0,0 @@
-<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
-<html>
-<head>
-<link rel="icon" href="{staticurl}hgicon.png" type="image/png">
-<meta name="robots" content="index, nofollow" />
-<link rel="stylesheet" href="{staticurl}style.css" type="text/css" />
diff --git a/sys/lib/python/mercurial/templates/spartan/index.tmpl b/sys/lib/python/mercurial/templates/spartan/index.tmpl
deleted file mode 100644
index bf8d4de9f..000000000
--- a/sys/lib/python/mercurial/templates/spartan/index.tmpl
+++ /dev/null
@@ -1,19 +0,0 @@
-{header}
-<title>Mercurial repositories index</title>
-</head>
-<body>
-
-<h2>Mercurial Repositories</h2>
-
-<table>
- <tr>
- <td><a href="?sort={sort_name}">Name</a></td>
- <td><a href="?sort={sort_description}">Description</a></td>
- <td><a href="?sort={sort_contact}">Contact</a></td>
- <td><a href="?sort={sort_lastchange}">Last change</a></td>
- <td>&nbsp;</td>
- </tr>
- {entries%indexentry}
-</table>
-
-{footer}
diff --git a/sys/lib/python/mercurial/templates/spartan/manifest.tmpl b/sys/lib/python/mercurial/templates/spartan/manifest.tmpl
deleted file mode 100644
index 5b2e881a8..000000000
--- a/sys/lib/python/mercurial/templates/spartan/manifest.tmpl
+++ /dev/null
@@ -1,28 +0,0 @@
-{header}
-<title>{repo|escape}: files for changeset {node|short}</title>
-</head>
-<body>
-
-<div class="buttons">
-<a href="{url}log/{rev}{sessionvars%urlparameter}">changelog</a>
-<a href="{url}shortlog/{rev}{sessionvars%urlparameter}">shortlog</a>
-<a href="{url}graph{sessionvars%urlparameter}">graph</a>
-<a href="{url}tags{sessionvars%urlparameter}">tags</a>
-<a href="{url}branches{sessionvars%urlparameter}">branches</a>
-<a href="{url}rev/{node|short}{sessionvars%urlparameter}">changeset</a>
-{archives%archiveentry}
-</div>
-
-<h2>files for changeset {node|short}: {path|escape}</h2>
-
-<table cellpadding="0" cellspacing="0">
-<tr class="parity{upparity}">
- <td><tt>drwxr-xr-x</tt>&nbsp;
- <td>&nbsp;
- <td>&nbsp;
- <td><a href="{url}file/{node|short}{up|urlescape}{sessionvars%urlparameter}">[up]</a>
-</tr>
-{dentries%direntry}
-{fentries%fileentry}
-</table>
-{footer}
diff --git a/sys/lib/python/mercurial/templates/spartan/map b/sys/lib/python/mercurial/templates/spartan/map
deleted file mode 100644
index 4432972ef..000000000
--- a/sys/lib/python/mercurial/templates/spartan/map
+++ /dev/null
@@ -1,178 +0,0 @@
-default = 'shortlog'
-mimetype = 'text/html; charset={encoding}'
-header = header.tmpl
-footer = footer.tmpl
-search = search.tmpl
-changelog = changelog.tmpl
-shortlog = shortlog.tmpl
-shortlogentry = shortlogentry.tmpl
-graph = graph.tmpl
-naventry = '<a href="{url}log/{node|short}{sessionvars%urlparameter}">{label|escape}</a> '
-navshortentry = '<a href="{url}shortlog/{node|short}{sessionvars%urlparameter}">{label|escape}</a> '
-navgraphentry = '<a href="{url}graph/{node|short}{sessionvars%urlparameter}">{label|escape}</a> '
-filenaventry = '<a href="{url}log/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{label|escape}</a> '
-filedifflink = '<a href="{url}diff/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{file|escape}</a> '
-filenodelink = '<a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{file|escape}</a> '
-filenolink = '{file|escape} '
-fileellipses = '...'
-changelogentry = changelogentry.tmpl
-searchentry = changelogentry.tmpl
-changeset = changeset.tmpl
-manifest = manifest.tmpl
-
-direntry = '
- <tr class="parity{parity}">
- <td><tt>drwxr-xr-x</tt>&nbsp;
- <td>&nbsp;
- <td>&nbsp;
- <td>
- <a href="{url}file/{node|short}{path|urlescape}{sessionvars%urlparameter}">{basename|escape}/</a>
- <a href="{url}file/{node|short}{path|urlescape}/{emptydirs|urlescape}{sessionvars%urlparameter}">
- {emptydirs|urlescape}
- </a>'
-
-fileentry = '
- <tr class="parity{parity}">
- <td><tt>{permissions|permissions}</tt>&nbsp;
- <td align=right><tt class="date">{date|isodate}</tt>&nbsp;
- <td align=right><tt>{size}</tt>&nbsp;
- <td><a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{basename|escape}</a>'
-
-filerevision = filerevision.tmpl
-fileannotate = fileannotate.tmpl
-filediff = filediff.tmpl
-filelog = filelog.tmpl
-fileline = '<div class="parity{parity}"><a class="lineno" href="#{lineid}" id="{lineid}">{linenumber}</a>&nbsp;{line|escape}</div>'
-filelogentry = filelogentry.tmpl
-
-# The &nbsp; ensures that all table cells have content (even if there
-# is an empty line in the annotated file), which in turn ensures that
-# all table rows have equal height.
-annotateline = '
- <tr class="parity{parity}">
- <td class="annotate">
- <a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}#l{targetline}"
- title="{node|short}: {desc|escape|firstline}">{author|user}@{rev}</a>
- </td>
- <td>
- <a class="lineno" href="#{lineid}" id="{lineid}">{linenumber}</a>
- </td>
- <td><pre>&nbsp;{line|escape}</pre></td>
- </tr>'
-difflineplus = '<span class="plusline"><a class="lineno" href="#{lineid}" id="{lineid}">{linenumber}</a>{line|escape}</span>'
-difflineminus = '<span class="minusline"><a class="lineno" href="#{lineid}" id="{lineid}">{linenumber}</a>{line|escape}</span>'
-difflineat = '<span class="atline"><a class="lineno" href="#{lineid}" id="{lineid}">{linenumber}</a>{line|escape}</span>'
-diffline = '<a class="lineno" href="#{lineid}" id="{lineid}">{linenumber}</a>{line|escape}'
-changelogparent = '
- <tr>
- <th class="parent">parent {rev}:</th>
- <td class="parent">
- <a href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a>
- </td>
- </tr>'
-changesetparent = '
- <tr>
- <th class="parent">parent {rev}:</th>
- <td class="parent"><a href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a></td>
- </tr>'
-filerevparent = '
- <tr>
- <td class="metatag">parent:</td>
- <td>
- <a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">
- {rename%filerename}{node|short}
- </a>
- </td>
- </tr>'
-filerename = '{file|escape}@'
-filelogrename = '
- <tr>
- <th>base:</th>
- <td>
- <a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">
- {file|escape}@{node|short}
- </a>
- </td>
- </tr>'
-fileannotateparent = '
- <tr>
- <td class="metatag">parent:</td>
- <td>
- <a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}">
- {rename%filerename}{node|short}
- </a>
- </td>
- </tr>'
-changesetchild = '
- <tr>
- <th class="child">child {rev}:</th>
- <td class="child"><a href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a></td>
- </tr>'
-changelogchild = '
- <tr>
- <th class="child">child {rev}:</th>
- <td class="child"><a href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a></td>
- </tr>'
-filerevchild = '
- <tr>
- <td class="metatag">child:</td>
- <td><a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{node|short}</a></td>
- </tr>'
-fileannotatechild = '
- <tr>
- <td class="metatag">child:</td>
- <td><a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{node|short}</a></td>
- </tr>'
-tags = tags.tmpl
-tagentry = '
- <li class="tagEntry parity{parity}">
- <tt class="node">{node}</tt>
- <a href="{url}rev/{node|short}{sessionvars%urlparameter}">{tag|escape}</a>
- </li>'
-branches = branches.tmpl
-branchentry = '
- <li class="tagEntry parity{parity}">
- <tt class="node">{node}</tt>
- <a href="{url}shortlog/{node|short}{sessionvars%urlparameter}" class="{status}">{branch|escape}</a>
- </li>'
-diffblock = '<pre class="parity{parity}">{lines}</pre>'
-changelogtag = '<tr><th class="tag">tag:</th><td class="tag">{tag|escape}</td></tr>'
-changesettag = '<tr><th class="tag">tag:</th><td class="tag">{tag|escape}</td></tr>'
-filediffparent = '
- <tr>
- <th class="parent">parent {rev}:</th>
- <td class="parent"><a href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a></td>
- </tr>'
-filelogparent = '
- <tr>
- <th>parent {rev}:</th>
- <td><a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{node|short}</a></td>
- </tr>'
-filediffchild = '
- <tr>
- <th class="child">child {rev}:</th>
- <td class="child"><a href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a></td>
- </tr>'
-filelogchild = '
- <tr>
- <th>child {rev}:</th>
- <td><a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{node|short}</a></td>
- </tr>'
-indexentry = '
- <tr class="parity{parity}">
- <td><a href="{url}{sessionvars%urlparameter}">{name|escape}</a></td>
- <td>{description}</td>
- <td>{contact|obfuscate}</td>
- <td class="age">{lastchange|age} ago</td>
- <td class="indexlinks">
- <a href="{url}rss-log">RSS</a>
- <a href="{url}atom-log">Atom</a>
- {archives%archiveentry}
- </td>
- </tr>'
-index = index.tmpl
-archiveentry = '<a href="{url}archive/{node|short}{extension|urlescape}">{type|escape}</a> '
-notfound = notfound.tmpl
-error = error.tmpl
-urlparameter = '{separator}{name}={value|urlescape}'
-hiddenformentry = '<input type="hidden" name="{name}" value="{value|escape}" />'
diff --git a/sys/lib/python/mercurial/templates/spartan/notfound.tmpl b/sys/lib/python/mercurial/templates/spartan/notfound.tmpl
deleted file mode 100644
index e9e6ba420..000000000
--- a/sys/lib/python/mercurial/templates/spartan/notfound.tmpl
+++ /dev/null
@@ -1,12 +0,0 @@
-{header}
-<title>Mercurial repository not found</title>
-</head>
-<body>
-
-<h2>Mercurial repository not found</h2>
-
-The specified repository "{repo|escape}" is unknown, sorry.
-
-Please go back to the <a href="{url}">main repository list page</a>.
-
-{footer}
diff --git a/sys/lib/python/mercurial/templates/spartan/search.tmpl b/sys/lib/python/mercurial/templates/spartan/search.tmpl
deleted file mode 100644
index 4987a5924..000000000
--- a/sys/lib/python/mercurial/templates/spartan/search.tmpl
+++ /dev/null
@@ -1,36 +0,0 @@
-{header}
-<title>{repo|escape}: searching for {query|escape}</title>
-</head>
-<body>
-
-<div class="buttons">
-<a href="{url}log{sessionvars%urlparameter}">changelog</a>
-<a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a>
-<a href="{url}graph{sessionvars%urlparameter}">graph</a>
-<a href="{url}tags{sessionvars%urlparameter}">tags</a>
-<a href="{url}branches{sessionvars%urlparameter}">branches</a>
-<a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a>
-{archives%archiveentry}
-</div>
-
-<h2>searching for {query|escape}</h2>
-
-<form>
-{sessionvars%hiddenformentry}
-<p>
-search:
-<input name="rev" type="text" width="30" value="{query|escape}">
-</p>
-</form>
-
-{entries}
-
-<form>
-{sessionvars%hiddenformentry}
-<p>
-search:
-<input name="rev" type="text" width="30" value="{query|escape}">
-</p>
-</form>
-
-{footer}
diff --git a/sys/lib/python/mercurial/templates/spartan/shortlog.tmpl b/sys/lib/python/mercurial/templates/spartan/shortlog.tmpl
deleted file mode 100644
index 1560c63d2..000000000
--- a/sys/lib/python/mercurial/templates/spartan/shortlog.tmpl
+++ /dev/null
@@ -1,43 +0,0 @@
-{header}
-<title>{repo|escape}: shortlog</title>
-<link rel="alternate" type="application/atom+xml"
- href="{url}atom-log" title="Atom feed for {repo|escape}">
-<link rel="alternate" type="application/rss+xml"
- href="{url}rss-log" title="RSS feed for {repo|escape}">
-</head>
-<body>
-
-<div class="buttons">
-<a href="{url}log/{rev}{sessionvars%urlparameter}">changelog</a>
-<a href="{url}graph{sessionvars%urlparameter}">graph</a>
-<a href="{url}tags{sessionvars%urlparameter}">tags</a>
-<a href="{url}branches{sessionvars%urlparameter}">branches</a>
-<a href="{url}file/{node|short}/{sessionvars%urlparameter}">files</a>
-{archives%archiveentry}
-<a type="application/rss+xml" href="{url}rss-log">rss</a>
-<a type="application/rss+xml" href="{url}atom-log" title="Atom feed for {repo|escape}">atom</a>
-</div>
-
-<h2>shortlog for {repo|escape}</h2>
-
-<form action="{url}log">
-{sessionvars%hiddenformentry}
-<p>
-<label for="search1">search:</label>
-<input name="rev" id="search1" type="text" size="30">
-navigate: <small class="navigate">{changenav%navshortentry}</small>
-</p>
-</form>
-
-{entries%shortlogentry}
-
-<form action="{url}log">
-{sessionvars%hiddenformentry}
-<p>
-<label for="search2">search:</label>
-<input name="rev" id="search2" type="text" size="30">
-navigate: <small class="navigate">{changenav%navshortentry}</small>
-</p>
-</form>
-
-{footer}
diff --git a/sys/lib/python/mercurial/templates/spartan/shortlogentry.tmpl b/sys/lib/python/mercurial/templates/spartan/shortlogentry.tmpl
deleted file mode 100644
index b6857db80..000000000
--- a/sys/lib/python/mercurial/templates/spartan/shortlogentry.tmpl
+++ /dev/null
@@ -1,7 +0,0 @@
-<table class="slogEntry parity{parity}">
- <tr>
- <td class="age">{date|age}</td>
- <td class="author">{author|person}</td>
- <td class="node"><a href="{url}rev/{node|short}{sessionvars%urlparameter}">{desc|strip|firstline|escape|nonempty}</a></td>
- </tr>
-</table>
diff --git a/sys/lib/python/mercurial/templates/spartan/tags.tmpl b/sys/lib/python/mercurial/templates/spartan/tags.tmpl
deleted file mode 100644
index 029452d1b..000000000
--- a/sys/lib/python/mercurial/templates/spartan/tags.tmpl
+++ /dev/null
@@ -1,26 +0,0 @@
-{header}
-<title>{repo|escape}: tags</title>
-<link rel="alternate" type="application/atom+xml"
- href="{url}atom-tags" title="Atom feed for {repo|escape}: tags">
-<link rel="alternate" type="application/rss+xml"
- href="{url}rss-tags" title="RSS feed for {repo|escape}: tags">
-</head>
-<body>
-
-<div class="buttons">
-<a href="{url}log{sessionvars%urlparameter}">changelog</a>
-<a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a>
-<a href="{url}graph{sessionvars%urlparameter}">graph</a>
-<a href="{url}branches{sessionvars%urlparameter}">branches</a>
-<a href="{url}file/{node|short}/{sessionvars%urlparameter}">files</a>
-<a type="application/rss+xml" href="{url}rss-tags">rss</a>
-<a type="application/atom+xml" href="{url}atom-tags">atom</a>
-</div>
-
-<h2>tags:</h2>
-
-<ul id="tagEntries">
-{entries%tagentry}
-</ul>
-
-{footer}
diff --git a/sys/lib/python/mercurial/templates/static/background.png b/sys/lib/python/mercurial/templates/static/background.png
deleted file mode 100644
index af8a0aa4e..000000000
--- a/sys/lib/python/mercurial/templates/static/background.png
+++ /dev/null
Binary files differ
diff --git a/sys/lib/python/mercurial/templates/static/coal-file.png b/sys/lib/python/mercurial/templates/static/coal-file.png
deleted file mode 100644
index 7ecf4632e..000000000
--- a/sys/lib/python/mercurial/templates/static/coal-file.png
+++ /dev/null
Binary files differ
diff --git a/sys/lib/python/mercurial/templates/static/coal-folder.png b/sys/lib/python/mercurial/templates/static/coal-folder.png
deleted file mode 100644
index d1b8ecc07..000000000
--- a/sys/lib/python/mercurial/templates/static/coal-folder.png
+++ /dev/null
Binary files differ
diff --git a/sys/lib/python/mercurial/templates/static/excanvas.js b/sys/lib/python/mercurial/templates/static/excanvas.js
deleted file mode 100644
index 9d71658ad..000000000
--- a/sys/lib/python/mercurial/templates/static/excanvas.js
+++ /dev/null
@@ -1,19 +0,0 @@
-if(!window.CanvasRenderingContext2D){(function(){var I=Math,i=I.round,L=I.sin,M=I.cos,m=10,A=m/2,Q={init:function(a){var b=a||document;if(/MSIE/.test(navigator.userAgent)&&!window.opera){var c=this;b.attachEvent("onreadystatechange",function(){c.r(b)})}},r:function(a){if(a.readyState=="complete"){if(!a.namespaces["s"]){a.namespaces.add("g_vml_","urn:schemas-microsoft-com:vml")}var b=a.createStyleSheet();b.cssText="canvas{display:inline-block;overflow:hidden;text-align:left;width:300px;height:150px}g_vml_\\:*{behavior:url(#default#VML)}";
-var c=a.getElementsByTagName("canvas");for(var d=0;d<c.length;d++){if(!c[d].getContext){this.initElement(c[d])}}}},q:function(a){var b=a.outerHTML,c=a.ownerDocument.createElement(b);if(b.slice(-2)!="/>"){var d="/"+a.tagName,e;while((e=a.nextSibling)&&e.tagName!=d){e.removeNode()}if(e){e.removeNode()}}a.parentNode.replaceChild(c,a);return c},initElement:function(a){a=this.q(a);a.getContext=function(){if(this.l){return this.l}return this.l=new K(this)};a.attachEvent("onpropertychange",V);a.attachEvent("onresize",
-W);var b=a.attributes;if(b.width&&b.width.specified){a.style.width=b.width.nodeValue+"px"}else{a.width=a.clientWidth}if(b.height&&b.height.specified){a.style.height=b.height.nodeValue+"px"}else{a.height=a.clientHeight}return a}};function V(a){var b=a.srcElement;switch(a.propertyName){case "width":b.style.width=b.attributes.width.nodeValue+"px";b.getContext().clearRect();break;case "height":b.style.height=b.attributes.height.nodeValue+"px";b.getContext().clearRect();break}}function W(a){var b=a.srcElement;
-if(b.firstChild){b.firstChild.style.width=b.clientWidth+"px";b.firstChild.style.height=b.clientHeight+"px"}}Q.init();var R=[];for(var E=0;E<16;E++){for(var F=0;F<16;F++){R[E*16+F]=E.toString(16)+F.toString(16)}}function J(){return[[1,0,0],[0,1,0],[0,0,1]]}function G(a,b){var c=J();for(var d=0;d<3;d++){for(var e=0;e<3;e++){var g=0;for(var h=0;h<3;h++){g+=a[d][h]*b[h][e]}c[d][e]=g}}return c}function N(a,b){b.fillStyle=a.fillStyle;b.lineCap=a.lineCap;b.lineJoin=a.lineJoin;b.lineWidth=a.lineWidth;b.miterLimit=
-a.miterLimit;b.shadowBlur=a.shadowBlur;b.shadowColor=a.shadowColor;b.shadowOffsetX=a.shadowOffsetX;b.shadowOffsetY=a.shadowOffsetY;b.strokeStyle=a.strokeStyle;b.d=a.d;b.e=a.e}function O(a){var b,c=1;a=String(a);if(a.substring(0,3)=="rgb"){var d=a.indexOf("(",3),e=a.indexOf(")",d+1),g=a.substring(d+1,e).split(",");b="#";for(var h=0;h<3;h++){b+=R[Number(g[h])]}if(g.length==4&&a.substr(3,1)=="a"){c=g[3]}}else{b=a}return[b,c]}function S(a){switch(a){case "butt":return"flat";case "round":return"round";
-case "square":default:return"square"}}function K(a){this.a=J();this.m=[];this.k=[];this.c=[];this.strokeStyle="#000";this.fillStyle="#000";this.lineWidth=1;this.lineJoin="miter";this.lineCap="butt";this.miterLimit=m*1;this.globalAlpha=1;this.canvas=a;var b=a.ownerDocument.createElement("div");b.style.width=a.clientWidth+"px";b.style.height=a.clientHeight+"px";b.style.overflow="hidden";b.style.position="absolute";a.appendChild(b);this.j=b;this.d=1;this.e=1}var j=K.prototype;j.clearRect=function(){this.j.innerHTML=
-"";this.c=[]};j.beginPath=function(){this.c=[]};j.moveTo=function(a,b){this.c.push({type:"moveTo",x:a,y:b});this.f=a;this.g=b};j.lineTo=function(a,b){this.c.push({type:"lineTo",x:a,y:b});this.f=a;this.g=b};j.bezierCurveTo=function(a,b,c,d,e,g){this.c.push({type:"bezierCurveTo",cp1x:a,cp1y:b,cp2x:c,cp2y:d,x:e,y:g});this.f=e;this.g=g};j.quadraticCurveTo=function(a,b,c,d){var e=this.f+0.6666666666666666*(a-this.f),g=this.g+0.6666666666666666*(b-this.g),h=e+(c-this.f)/3,l=g+(d-this.g)/3;this.bezierCurveTo(e,
-g,h,l,c,d)};j.arc=function(a,b,c,d,e,g){c*=m;var h=g?"at":"wa",l=a+M(d)*c-A,n=b+L(d)*c-A,o=a+M(e)*c-A,f=b+L(e)*c-A;if(l==o&&!g){l+=0.125}this.c.push({type:h,x:a,y:b,radius:c,xStart:l,yStart:n,xEnd:o,yEnd:f})};j.rect=function(a,b,c,d){this.moveTo(a,b);this.lineTo(a+c,b);this.lineTo(a+c,b+d);this.lineTo(a,b+d);this.closePath()};j.strokeRect=function(a,b,c,d){this.beginPath();this.moveTo(a,b);this.lineTo(a+c,b);this.lineTo(a+c,b+d);this.lineTo(a,b+d);this.closePath();this.stroke()};j.fillRect=function(a,
-b,c,d){this.beginPath();this.moveTo(a,b);this.lineTo(a+c,b);this.lineTo(a+c,b+d);this.lineTo(a,b+d);this.closePath();this.fill()};j.createLinearGradient=function(a,b,c,d){var e=new H("gradient");return e};j.createRadialGradient=function(a,b,c,d,e,g){var h=new H("gradientradial");h.n=c;h.o=g;h.i.x=a;h.i.y=b;return h};j.drawImage=function(a,b){var c,d,e,g,h,l,n,o,f=a.runtimeStyle.width,k=a.runtimeStyle.height;a.runtimeStyle.width="auto";a.runtimeStyle.height="auto";var q=a.width,r=a.height;a.runtimeStyle.width=
-f;a.runtimeStyle.height=k;if(arguments.length==3){c=arguments[1];d=arguments[2];h=(l=0);n=(e=q);o=(g=r)}else if(arguments.length==5){c=arguments[1];d=arguments[2];e=arguments[3];g=arguments[4];h=(l=0);n=q;o=r}else if(arguments.length==9){h=arguments[1];l=arguments[2];n=arguments[3];o=arguments[4];c=arguments[5];d=arguments[6];e=arguments[7];g=arguments[8]}else{throw"Invalid number of arguments";}var s=this.b(c,d),t=[],v=10,w=10;t.push(" <g_vml_:group",' coordsize="',m*v,",",m*w,'"',' coordorigin="0,0"',
-' style="width:',v,";height:",w,";position:absolute;");if(this.a[0][0]!=1||this.a[0][1]){var x=[];x.push("M11='",this.a[0][0],"',","M12='",this.a[1][0],"',","M21='",this.a[0][1],"',","M22='",this.a[1][1],"',","Dx='",i(s.x/m),"',","Dy='",i(s.y/m),"'");var p=s,y=this.b(c+e,d),z=this.b(c,d+g),B=this.b(c+e,d+g);p.x=Math.max(p.x,y.x,z.x,B.x);p.y=Math.max(p.y,y.y,z.y,B.y);t.push("padding:0 ",i(p.x/m),"px ",i(p.y/m),"px 0;filter:progid:DXImageTransform.Microsoft.Matrix(",x.join(""),", sizingmethod='clip');")}else{t.push("top:",
-i(s.y/m),"px;left:",i(s.x/m),"px;")}t.push(' ">','<g_vml_:image src="',a.src,'"',' style="width:',m*e,";"," height:",m*g,';"',' cropleft="',h/q,'"',' croptop="',l/r,'"',' cropright="',(q-h-n)/q,'"',' cropbottom="',(r-l-o)/r,'"'," />","</g_vml_:group>");this.j.insertAdjacentHTML("BeforeEnd",t.join(""))};j.stroke=function(a){var b=[],c=O(a?this.fillStyle:this.strokeStyle),d=c[0],e=c[1]*this.globalAlpha,g=10,h=10;b.push("<g_vml_:shape",' fillcolor="',d,'"',' filled="',Boolean(a),'"',' style="position:absolute;width:',
-g,";height:",h,';"',' coordorigin="0 0" coordsize="',m*g," ",m*h,'"',' stroked="',!a,'"',' strokeweight="',this.lineWidth,'"',' strokecolor="',d,'"',' path="');var l={x:null,y:null},n={x:null,y:null};for(var o=0;o<this.c.length;o++){var f=this.c[o];if(f.type=="moveTo"){b.push(" m ");var k=this.b(f.x,f.y);b.push(i(k.x),",",i(k.y))}else if(f.type=="lineTo"){b.push(" l ");var k=this.b(f.x,f.y);b.push(i(k.x),",",i(k.y))}else if(f.type=="close"){b.push(" x ")}else if(f.type=="bezierCurveTo"){b.push(" c ");
-var k=this.b(f.x,f.y),q=this.b(f.cp1x,f.cp1y),r=this.b(f.cp2x,f.cp2y);b.push(i(q.x),",",i(q.y),",",i(r.x),",",i(r.y),",",i(k.x),",",i(k.y))}else if(f.type=="at"||f.type=="wa"){b.push(" ",f.type," ");var k=this.b(f.x,f.y),s=this.b(f.xStart,f.yStart),t=this.b(f.xEnd,f.yEnd);b.push(i(k.x-this.d*f.radius),",",i(k.y-this.e*f.radius)," ",i(k.x+this.d*f.radius),",",i(k.y+this.e*f.radius)," ",i(s.x),",",i(s.y)," ",i(t.x),",",i(t.y))}if(k){if(l.x==null||k.x<l.x){l.x=k.x}if(n.x==null||k.x>n.x){n.x=k.x}if(l.y==
-null||k.y<l.y){l.y=k.y}if(n.y==null||k.y>n.y){n.y=k.y}}}b.push(' ">');if(typeof this.fillStyle=="object"){var v={x:"50%",y:"50%"},w=n.x-l.x,x=n.y-l.y,p=w>x?w:x;v.x=i(this.fillStyle.i.x/w*100+50)+"%";v.y=i(this.fillStyle.i.y/x*100+50)+"%";var y=[];if(this.fillStyle.p=="gradientradial"){var z=this.fillStyle.n/p*100,B=this.fillStyle.o/p*100-z}else{var z=0,B=100}var C={offset:null,color:null},D={offset:null,color:null};this.fillStyle.h.sort(function(T,U){return T.offset-U.offset});for(var o=0;o<this.fillStyle.h.length;o++){var u=
-this.fillStyle.h[o];y.push(u.offset*B+z,"% ",u.color,",");if(u.offset>C.offset||C.offset==null){C.offset=u.offset;C.color=u.color}if(u.offset<D.offset||D.offset==null){D.offset=u.offset;D.color=u.color}}y.pop();b.push("<g_vml_:fill",' color="',D.color,'"',' color2="',C.color,'"',' type="',this.fillStyle.p,'"',' focusposition="',v.x,", ",v.y,'"',' colors="',y.join(""),'"',' opacity="',e,'" />')}else if(a){b.push('<g_vml_:fill color="',d,'" opacity="',e,'" />')}else{b.push("<g_vml_:stroke",' opacity="',
-e,'"',' joinstyle="',this.lineJoin,'"',' miterlimit="',this.miterLimit,'"',' endcap="',S(this.lineCap),'"',' weight="',this.lineWidth,'px"',' color="',d,'" />')}b.push("</g_vml_:shape>");this.j.insertAdjacentHTML("beforeEnd",b.join(""));this.c=[]};j.fill=function(){this.stroke(true)};j.closePath=function(){this.c.push({type:"close"})};j.b=function(a,b){return{x:m*(a*this.a[0][0]+b*this.a[1][0]+this.a[2][0])-A,y:m*(a*this.a[0][1]+b*this.a[1][1]+this.a[2][1])-A}};j.save=function(){var a={};N(this,a);
-this.k.push(a);this.m.push(this.a);this.a=G(J(),this.a)};j.restore=function(){N(this.k.pop(),this);this.a=this.m.pop()};j.translate=function(a,b){var c=[[1,0,0],[0,1,0],[a,b,1]];this.a=G(c,this.a)};j.rotate=function(a){var b=M(a),c=L(a),d=[[b,c,0],[-c,b,0],[0,0,1]];this.a=G(d,this.a)};j.scale=function(a,b){this.d*=a;this.e*=b;var c=[[a,0,0],[0,b,0],[0,0,1]];this.a=G(c,this.a)};j.clip=function(){};j.arcTo=function(){};j.createPattern=function(){return new P};function H(a){this.p=a;this.n=0;this.o=
-0;this.h=[];this.i={x:0,y:0}}H.prototype.addColorStop=function(a,b){b=O(b);this.h.push({offset:1-a,color:b})};function P(){}G_vmlCanvasManager=Q;CanvasRenderingContext2D=K;CanvasGradient=H;CanvasPattern=P})()};
diff --git a/sys/lib/python/mercurial/templates/static/graph.js b/sys/lib/python/mercurial/templates/static/graph.js
deleted file mode 100644
index 0d4dcdde9..000000000
--- a/sys/lib/python/mercurial/templates/static/graph.js
+++ /dev/null
@@ -1,137 +0,0 @@
-// branch_renderer.js - Rendering of branch DAGs on the client side
-//
-// Copyright 2008 Dirkjan Ochtman <dirkjan AT ochtman DOT nl>
-// Copyright 2006 Alexander Schremmer <alex AT alexanderweb DOT de>
-//
-// derived from code written by Scott James Remnant <scott@ubuntu.com>
-// Copyright 2005 Canonical Ltd.
-//
-// This software may be used and distributed according to the terms
-// of the GNU General Public License, incorporated herein by reference.
-
-var colors = [
- [ 1.0, 0.0, 0.0 ],
- [ 1.0, 1.0, 0.0 ],
- [ 0.0, 1.0, 0.0 ],
- [ 0.0, 1.0, 1.0 ],
- [ 0.0, 0.0, 1.0 ],
- [ 1.0, 0.0, 1.0 ]
-];
-
-function Graph() {
-
- this.canvas = document.getElementById('graph');
- if (navigator.userAgent.indexOf('MSIE') >= 0) this.canvas = window.G_vmlCanvasManager.initElement(this.canvas);
- this.ctx = this.canvas.getContext('2d');
- this.ctx.strokeStyle = 'rgb(0, 0, 0)';
- this.ctx.fillStyle = 'rgb(0, 0, 0)';
- this.cur = [0, 0];
- this.line_width = 3;
- this.bg = [0, 4];
- this.cell = [2, 0];
- this.columns = 0;
- this.revlink = '';
-
- this.scale = function(height) {
- this.bg_height = height;
- this.box_size = Math.floor(this.bg_height / 1.2);
- this.cell_height = this.box_size;
- }
-
- function colorPart(num) {
- num *= 255
- num = num < 0 ? 0 : num;
- num = num > 255 ? 255 : num;
- var digits = Math.round(num).toString(16);
- if (num < 16) {
- return '0' + digits;
- } else {
- return digits;
- }
- }
-
- this.setColor = function(color, bg, fg) {
-
- // Set the colour.
- //
- // Picks a distinct colour based on an internal wheel; the bg
- // parameter provides the value that should be assigned to the 'zero'
- // colours and the fg parameter provides the multiplier that should be
- // applied to the foreground colours.
-
- color %= colors.length;
- var red = (colors[color][0] * fg) || bg;
- var green = (colors[color][1] * fg) || bg;
- var blue = (colors[color][2] * fg) || bg;
- red = Math.round(red * 255);
- green = Math.round(green * 255);
- blue = Math.round(blue * 255);
- var s = 'rgb(' + red + ', ' + green + ', ' + blue + ')';
- this.ctx.strokeStyle = s;
- this.ctx.fillStyle = s;
- return s;
-
- }
-
- this.render = function(data) {
-
- var backgrounds = '';
- var nodedata = '';
-
- for (var i in data) {
-
- var parity = i % 2;
- this.cell[1] += this.bg_height;
- this.bg[1] += this.bg_height;
-
- var cur = data[i];
- var node = cur[1];
- var edges = cur[2];
- var fold = false;
-
- for (var j in edges) {
-
- line = edges[j];
- start = line[0];
- end = line[1];
- color = line[2];
-
- if (end > this.columns || start > this.columns) {
- this.columns += 1;
- }
-
- if (start == this.columns && start > end) {
- var fold = true;
- }
-
- x0 = this.cell[0] + this.box_size * start + this.box_size / 2;
- y0 = this.bg[1] - this.bg_height / 2;
- x1 = this.cell[0] + this.box_size * end + this.box_size / 2;
- y1 = this.bg[1] + this.bg_height / 2;
-
- this.edge(x0, y0, x1, y1, color);
-
- }
-
- // Draw the revision node in the right column
-
- column = node[0]
- color = node[1]
-
- radius = this.box_size / 8;
- x = this.cell[0] + this.box_size * column + this.box_size / 2;
- y = this.bg[1] - this.bg_height / 2;
- var add = this.vertex(x, y, color, parity, cur);
- backgrounds += add[0];
- nodedata += add[1];
-
- if (fold) this.columns -= 1;
-
- }
-
- document.getElementById('nodebgs').innerHTML += backgrounds;
- document.getElementById('graphnodes').innerHTML += nodedata;
-
- }
-
-}
diff --git a/sys/lib/python/mercurial/templates/static/hgicon.png b/sys/lib/python/mercurial/templates/static/hgicon.png
deleted file mode 100644
index 60effbc5e..000000000
--- a/sys/lib/python/mercurial/templates/static/hgicon.png
+++ /dev/null
Binary files differ
diff --git a/sys/lib/python/mercurial/templates/static/hglogo.png b/sys/lib/python/mercurial/templates/static/hglogo.png
deleted file mode 100644
index adc6e65d1..000000000
--- a/sys/lib/python/mercurial/templates/static/hglogo.png
+++ /dev/null
Binary files differ
diff --git a/sys/lib/python/mercurial/templates/static/style-coal.css b/sys/lib/python/mercurial/templates/static/style-coal.css
deleted file mode 100644
index 9df71c7f1..000000000
--- a/sys/lib/python/mercurial/templates/static/style-coal.css
+++ /dev/null
@@ -1,265 +0,0 @@
-body {
- margin: 0;
- padding: 0;
- background: black url(background.png) repeat-x;
- font-family: sans-serif;
-}
-
-.container {
- padding-right: 150px;
-}
-
-.main {
- position: relative;
- background: white;
- padding: 2em;
- border-right: 15px solid black;
- border-bottom: 15px solid black;
-}
-
-#.main {
- width: 98%;
-}
-
-.overflow {
- width: 100%;
- overflow: auto;
-}
-
-.menu {
- background: #999;
- padding: 10px;
- width: 75px;
- margin: 0;
- font-size: 80%;
- text-align: left;
- position: fixed;
- top: 27px;
- left: auto;
- right: 27px;
-}
-
-#.menu {
- position: absolute !important;
- top:expression(eval(document.body.scrollTop + 27));
-}
-
-.menu ul {
- list-style: none;
- padding: 0;
- margin: 10px 0 0 0;
-}
-
-.menu li {
- margin-bottom: 3px;
- padding: 2px 4px;
- background: white;
- color: black;
- font-weight: normal;
-}
-
-.menu li.active {
- background: black;
- color: white;
-}
-
-.menu img {
- width: 75px;
- height: 90px;
- border: 0;
-}
-
-.menu a { color: black; display: block; }
-
-.search {
- position: absolute;
- top: .7em;
- right: 2em;
-}
-
-form.search div#hint {
- display: none;
- position: absolute;
- top: 40px;
- right: 0px;
- width: 190px;
- padding: 5px;
- background: #ffc;
- font-size: 70%;
- border: 1px solid yellow;
- -moz-border-radius: 5px; /* this works only in camino/firefox */
- -webkit-border-radius: 5px; /* this is just for Safari */
-}
-
-form.search:hover div#hint { display: block; }
-
-a { text-decoration:none; }
-.age { white-space:nowrap; }
-.date { white-space:nowrap; }
-.indexlinks { white-space:nowrap; }
-.parity0 { background-color: #f0f0f0; }
-.parity1 { background-color: white; }
-.plusline { color: green; }
-.minusline { color: #dc143c; } /* crimson */
-.atline { color: purple; }
-
-.navigate {
- text-align: right;
- font-size: 60%;
- margin: 1em 0;
-}
-
-.tag {
- color: #999;
- font-size: 70%;
- font-weight: normal;
- margin-left: .5em;
- vertical-align: baseline;
-}
-
-.branchhead {
- color: #000;
- font-size: 80%;
- font-weight: normal;
- margin-left: .5em;
- vertical-align: baseline;
-}
-
-ul#graphnodes .branchhead {
- font-size: 75%;
-}
-
-.branchname {
- color: #000;
- font-size: 60%;
- font-weight: normal;
- margin-left: .5em;
- vertical-align: baseline;
-}
-
-h3 .branchname {
- font-size: 80%;
-}
-
-/* Common */
-pre { margin: 0; }
-
-h2 { font-size: 120%; border-bottom: 1px solid #999; }
-h2 a { color: #000; }
-h3 {
- margin-top: -.7em;
- font-size: 100%;
-}
-
-/* log and tags tables */
-.bigtable {
- border-bottom: 1px solid #999;
- border-collapse: collapse;
- font-size: 90%;
- width: 100%;
- font-weight: normal;
- text-align: left;
-}
-
-.bigtable td {
- vertical-align: top;
-}
-
-.bigtable th {
- padding: 1px 4px;
- border-bottom: 1px solid #999;
-}
-.bigtable tr { border: none; }
-.bigtable .age { width: 6em; }
-.bigtable .author { width: 12em; }
-.bigtable .description { }
-.bigtable .node { width: 5em; font-family: monospace;}
-.bigtable .lineno { width: 2em; text-align: right;}
-.bigtable .lineno a { color: #999; font-size: smaller; font-family: monospace;}
-.bigtable .permissions { width: 8em; text-align: left;}
-.bigtable .size { width: 5em; text-align: right; }
-.bigtable .annotate { text-align: right; }
-.bigtable td.annotate { font-size: smaller; }
-.bigtable td.source { font-size: inherit; }
-
-.source, .sourcefirst, .sourcelast {
- font-family: monospace;
- white-space: pre;
- padding: 1px 4px;
- font-size: 90%;
-}
-.sourcefirst { border-bottom: 1px solid #999; font-weight: bold; }
-.sourcelast { border-top: 1px solid #999; }
-.source a { color: #999; font-size: smaller; font-family: monospace;}
-.bottomline { border-bottom: 1px solid #999; }
-
-.fileline { font-family: monospace; }
-.fileline img { border: 0; }
-
-.tagEntry .closed { color: #99f; }
-
-/* Changeset entry */
-#changesetEntry {
- border-collapse: collapse;
- font-size: 90%;
- width: 100%;
- margin-bottom: 1em;
-}
-
-#changesetEntry th {
- padding: 1px 4px;
- width: 4em;
- text-align: right;
- font-weight: normal;
- color: #999;
- margin-right: .5em;
- vertical-align: top;
-}
-
-div.description {
- border-left: 3px solid #999;
- margin: 1em 0 1em 0;
- padding: .3em;
-}
-
-/* Graph */
-div#wrapper {
- position: relative;
- border-top: 1px solid black;
- border-bottom: 1px solid black;
- margin: 0;
- padding: 0;
-}
-
-canvas {
- position: absolute;
- z-index: 5;
- top: -0.7em;
- margin: 0;
-}
-
-ul#graphnodes {
- position: absolute;
- z-index: 10;
- top: -1.0em;
- list-style: none inside none;
- padding: 0;
-}
-
-ul#nodebgs {
- list-style: none inside none;
- padding: 0;
- margin: 0;
- top: -0.7em;
-}
-
-ul#graphnodes li, ul#nodebgs li {
- height: 39px;
-}
-
-ul#graphnodes li .info {
- display: block;
- font-size: 70%;
- position: relative;
- top: -3px;
-}
diff --git a/sys/lib/python/mercurial/templates/static/style-gitweb.css b/sys/lib/python/mercurial/templates/static/style-gitweb.css
deleted file mode 100644
index 09fc3dccc..000000000
--- a/sys/lib/python/mercurial/templates/static/style-gitweb.css
+++ /dev/null
@@ -1,123 +0,0 @@
-body { font-family: sans-serif; font-size: 12px; margin:0px; border:solid #d9d8d1; border-width:1px; margin:10px; }
-a { color:#0000cc; }
-a:hover, a:visited, a:active { color:#880000; }
-div.page_header { height:25px; padding:8px; font-size:18px; font-weight:bold; background-color:#d9d8d1; }
-div.page_header a:visited { color:#0000cc; }
-div.page_header a:hover { color:#880000; }
-div.page_nav { padding:8px; }
-div.page_nav a:visited { color:#0000cc; }
-div.page_path { padding:8px; border:solid #d9d8d1; border-width:0px 0px 1px}
-div.page_footer { padding:4px 8px; background-color: #d9d8d1; }
-div.page_footer_text { float:left; color:#555555; font-style:italic; }
-div.page_body { padding:8px; }
-div.title, a.title {
- display:block; padding:6px 8px;
- font-weight:bold; background-color:#edece6; text-decoration:none; color:#000000;
-}
-a.title:hover { background-color: #d9d8d1; }
-div.title_text { padding:6px 0px; border: solid #d9d8d1; border-width:0px 0px 1px; }
-div.log_body { padding:8px 8px 8px 150px; }
-.age { white-space:nowrap; }
-span.age { position:relative; float:left; width:142px; font-style:italic; }
-div.log_link {
- padding:0px 8px;
- font-size:10px; font-family:sans-serif; font-style:normal;
- position:relative; float:left; width:136px;
-}
-div.list_head { padding:6px 8px 4px; border:solid #d9d8d1; border-width:1px 0px 0px; font-style:italic; }
-a.list { text-decoration:none; color:#000000; }
-a.list:hover { text-decoration:underline; color:#880000; }
-table { padding:8px 4px; }
-th { padding:2px 5px; font-size:12px; text-align:left; }
-tr.light:hover, .parity0:hover { background-color:#edece6; }
-tr.dark, .parity1 { background-color:#f6f6f0; }
-tr.dark:hover, .parity1:hover { background-color:#edece6; }
-td { padding:2px 5px; font-size:12px; vertical-align:top; }
-td.link { padding:2px 5px; font-family:sans-serif; font-size:10px; }
-td.indexlinks { white-space: nowrap; }
-td.indexlinks a {
- padding: 2px 5px; line-height: 10px;
- border: 1px solid;
- color: #ffffff; background-color: #7777bb;
- border-color: #aaaadd #333366 #333366 #aaaadd;
- font-weight: bold; text-align: center; text-decoration: none;
- font-size: 10px;
-}
-td.indexlinks a:hover { background-color: #6666aa; }
-div.pre { font-family:monospace; font-size:12px; white-space:pre; }
-div.diff_info { font-family:monospace; color:#000099; background-color:#edece6; font-style:italic; }
-div.index_include { border:solid #d9d8d1; border-width:0px 0px 1px; padding:12px 8px; }
-div.search { margin:4px 8px; position:absolute; top:56px; right:12px }
-.linenr { color:#999999; text-decoration:none }
-div.rss_logo { float: right; white-space: nowrap; }
-div.rss_logo a {
- padding:3px 6px; line-height:10px;
- border:1px solid; border-color:#fcc7a5 #7d3302 #3e1a01 #ff954e;
- color:#ffffff; background-color:#ff6600;
- font-weight:bold; font-family:sans-serif; font-size:10px;
- text-align:center; text-decoration:none;
-}
-div.rss_logo a:hover { background-color:#ee5500; }
-pre { margin: 0; }
-span.logtags span {
- padding: 0px 4px;
- font-size: 10px;
- font-weight: normal;
- border: 1px solid;
- background-color: #ffaaff;
- border-color: #ffccff #ff00ee #ff00ee #ffccff;
-}
-span.logtags span.tagtag {
- background-color: #ffffaa;
- border-color: #ffffcc #ffee00 #ffee00 #ffffcc;
-}
-span.logtags span.branchtag {
- background-color: #aaffaa;
- border-color: #ccffcc #00cc33 #00cc33 #ccffcc;
-}
-span.logtags span.inbranchtag {
- background-color: #d5dde6;
- border-color: #e3ecf4 #9398f4 #9398f4 #e3ecf4;
-}
-
-/* Graph */
-div#wrapper {
- position: relative;
- margin: 0;
- padding: 0;
- margin-top: 3px;
-}
-
-canvas {
- position: absolute;
- z-index: 5;
- top: -0.9em;
- margin: 0;
-}
-
-ul#nodebgs {
- list-style: none inside none;
- padding: 0;
- margin: 0;
- top: -0.7em;
-}
-
-ul#graphnodes li, ul#nodebgs li {
- height: 39px;
-}
-
-ul#graphnodes {
- position: absolute;
- z-index: 10;
- top: -0.8em;
- list-style: none inside none;
- padding: 0;
-}
-
-ul#graphnodes li .info {
- display: block;
- font-size: 100%;
- position: relative;
- top: -3px;
- font-style: italic;
-}
diff --git a/sys/lib/python/mercurial/templates/static/style-monoblue.css b/sys/lib/python/mercurial/templates/static/style-monoblue.css
deleted file mode 100644
index 12611ea49..000000000
--- a/sys/lib/python/mercurial/templates/static/style-monoblue.css
+++ /dev/null
@@ -1,472 +0,0 @@
-/*** Initial Settings ***/
-* {
- margin: 0;
- padding: 0;
- font-weight: normal;
- font-style: normal;
-}
-
-html {
- font-size: 100%;
- font-family: sans-serif;
-}
-
-body {
- font-size: 77%;
- margin: 15px 50px;
- background: #4B4B4C;
-}
-
-a {
- color:#0000cc;
- text-decoration: none;
-}
-/*** end of Initial Settings ***/
-
-
-/** common settings **/
-div#container {
- background: #FFFFFF;
- position: relative;
- color: #666;
-}
-
-div.page-header {
- padding: 50px 20px 0;
- background: #006699 top left repeat-x;
- position: relative;
-}
- div.page-header h1 {
- margin: 10px 0 30px;
- font-size: 1.8em;
- font-weight: bold;
- font-family: osaka,'MS P Gothic', Georgia, serif;
- letter-spacing: 1px;
- color: #DDD;
- }
- div.page-header h1 a {
- font-weight: bold;
- color: #FFF;
- }
- div.page-header a {
- text-decoration: none;
- }
-
- div.page-header form {
- position: absolute;
- margin-bottom: 2px;
- bottom: 0;
- right: 20px;
- }
- div.page-header form label {
- color: #DDD;
- }
- div.page-header form input {
- padding: 2px;
- border: solid 1px #DDD;
- }
- div.page-header form dl {
- overflow: hidden;
- }
- div.page-header form dl dt {
- font-size: 1.2em;
- }
- div.page-header form dl dt,
- div.page-header form dl dd {
- margin: 0 0 0 5px;
- float: left;
- height: 24px;
- line-height: 20px;
- }
-
- ul.page-nav {
- margin: 10px 0 0 0;
- list-style-type: none;
- overflow: hidden;
- width: 800px;
- }
- ul.page-nav li {
- margin: 0 2px 0 0;
- float: left;
- width: 80px;
- height: 24px;
- font-size: 1.1em;
- line-height: 24px;
- text-align: center;
- }
- ul.page-nav li.current {
- background: #FFF;
- }
- ul.page-nav li a {
- height: 24px;
- color: #666;
- background: #DDD;
- display: block;
- text-decoration: none;
- }
- ul.page-nav li a:hover {
- color:#333;
- background: #FFF;
- }
-
-ul.submenu {
- margin: 10px 0 -10px 20px;
- list-style-type: none;
-}
-ul.submenu li {
- margin: 0 10px 0 0;
- font-size: 1.2em;
- display: inline;
-}
-
-h2 {
- margin: 20px 0 10px;
- height: 30px;
- line-height: 30px;
- text-indent: 20px;
- background: #FFF;
- font-size: 1.2em;
- border-top: dotted 1px #D5E1E6;
- font-weight: bold;
-}
-h2.no-link {
- color:#006699;
-}
-h2.no-border {
- color: #FFF;
- background: #006699;
- border: 0;
-}
-h2 a {
- font-weight:bold;
- color:#006699;
-}
-
-div.page-path {
- text-align: right;
- padding: 20px 30px 10px 0;
- border:solid #d9d8d1;
- border-width:0px 0px 1px;
- font-size: 1.2em;
-}
-
-div.page-footer {
- margin: 50px 0 0;
- position: relative;
-}
- div.page-footer p {
- position: relative;
- left: 20px;
- bottom: 5px;
- font-size: 1.2em;
- }
-
- ul.rss-logo {
- position: absolute;
- top: -10px;
- right: 20px;
- height: 20px;
- list-style-type: none;
- }
- ul.rss-logo li {
- display: inline;
- }
- ul.rss-logo li a {
- padding: 3px 6px;
- line-height: 10px;
- border:1px solid;
- border-color:#fcc7a5 #7d3302 #3e1a01 #ff954e;
- color:#ffffff;
- background-color:#ff6600;
- font-weight:bold;
- font-family:sans-serif;
- font-size:10px;
- text-align:center;
- text-decoration:none;
- }
- div.rss-logo li a:hover {
- background-color:#ee5500;
- }
-
-p.normal {
- margin: 20px 0 20px 30px;
- font-size: 1.2em;
-}
-
-table {
- margin: 10px 0 0 20px;
- width: 95%;
- border-collapse: collapse;
-}
-table tr td {
- font-size: 1.1em;
-}
-table tr td.nowrap {
- white-space: nowrap;
-}
-/*
-table tr.parity0:hover,
-table tr.parity1:hover {
- background: #D5E1E6;
-}
-*/
-table tr.parity0 {
- background: #F1F6F7;
-}
-table tr.parity1 {
- background: #FFFFFF;
-}
-table tr td {
- padding: 5px 5px;
-}
-table.annotated tr td {
- padding: 0px 5px;
-}
-
-span.logtags span {
- padding: 2px 6px;
- font-weight: normal;
- font-size: 11px;
- border: 1px solid;
- background-color: #ffaaff;
- border-color: #ffccff #ff00ee #ff00ee #ffccff;
-}
-span.logtags span.tagtag {
- background-color: #ffffaa;
- border-color: #ffffcc #ffee00 #ffee00 #ffffcc;
-}
-span.logtags span.branchtag {
- background-color: #aaffaa;
- border-color: #ccffcc #00cc33 #00cc33 #ccffcc;
-}
-span.logtags span.inbranchtag {
- background-color: #d5dde6;
- border-color: #e3ecf4 #9398f4 #9398f4 #e3ecf4;
-}
-
-div.diff pre {
- margin: 10px 0 0 0;
-}
-div.diff pre span {
- font-family: monospace;
- white-space: pre;
- font-size: 1.2em;
- padding: 3px 0;
-}
-td.source {
- white-space: pre;
- font-family: monospace;
- margin: 10px 30px 0;
- font-size: 1.2em;
- font-family: monospace;
-}
- div.source div.parity0,
- div.source div.parity1 {
- padding: 1px;
- font-size: 1.2em;
- }
- div.source div.parity0 {
- background: #F1F6F7;
- }
- div.source div.parity1 {
- background: #FFFFFF;
- }
-div.parity0:hover,
-div.parity1:hover {
- background: #D5E1E6;
-}
-.linenr {
- color: #999;
- text-align: right;
-}
-.lineno {
- text-align: right;
-}
-.lineno a {
- color: #999;
-}
-td.linenr {
- width: 60px;
-}
-
-div#powered-by {
- position: absolute;
- width: 75px;
- top: 15px;
- right: 20px;
- font-size: 1.2em;
-}
-div#powered-by a {
- color: #EEE;
- text-decoration: none;
-}
-div#powered-by a:hover {
- text-decoration: underline;
-}
-/*
-div#monoblue-corner-top-left {
- position: absolute;
- top: 0;
- left: 0;
- width: 10px;
- height: 10px;
- background: url(./monoblue-corner.png) top left no-repeat !important;
- background: none;
-}
-div#monoblue-corner-top-right {
- position: absolute;
- top: 0;
- right: 0;
- width: 10px;
- height: 10px;
- background: url(./monoblue-corner.png) top right no-repeat !important;
- background: none;
-}
-div#monoblue-corner-bottom-left {
- position: absolute;
- bottom: 0;
- left: 0;
- width: 10px;
- height: 10px;
- background: url(./monoblue-corner.png) bottom left no-repeat !important;
- background: none;
-}
-div#monoblue-corner-bottom-right {
- position: absolute;
- bottom: 0;
- right: 0;
- width: 10px;
- height: 10px;
- background: url(./monoblue-corner.png) bottom right no-repeat !important;
- background: none;
-}
-*/
-/** end of common settings **/
-
-/** summary **/
-dl.overview {
- margin: 0 0 0 30px;
- font-size: 1.1em;
- overflow: hidden;
-}
- dl.overview dt,
- dl.overview dd {
- margin: 5px 0;
- float: left;
- }
- dl.overview dt {
- clear: left;
- font-weight: bold;
- width: 150px;
- }
-/** end of summary **/
-
-/** chagelog **/
-h3.changelog {
- margin: 20px 0 5px 30px;
- padding: 0 0 2px;
- font-size: 1.4em;
- border-bottom: dotted 1px #D5E1E6;
-}
-ul.changelog-entry {
- margin: 0 0 10px 30px;
- list-style-type: none;
- position: relative;
-}
-ul.changelog-entry li span.revdate {
- font-size: 1.1em;
-}
-ul.changelog-entry li.age {
- position: absolute;
- top: -25px;
- right: 10px;
- font-size: 1.4em;
- color: #CCC;
- font-weight: bold;
- font-style: italic;
-}
-ul.changelog-entry li span.name {
- font-size: 1.2em;
- font-weight: bold;
-}
-ul.changelog-entry li.description {
- margin: 10px 0 0;
- font-size: 1.1em;
-}
-/** end of changelog **/
-
-/** file **/
-p.files {
- margin: 0 0 0 20px;
- font-size: 2.0em;
- font-weight: bold;
-}
-/** end of file **/
-
-/** changeset **/
-h3.changeset {
- margin: 20px 0 5px 20px;
- padding: 0 0 2px;
- font-size: 1.6em;
- border-bottom: dotted 1px #D5E1E6;
-}
-p.changeset-age {
- position: relative;
-}
-p.changeset-age span {
- position: absolute;
- top: -25px;
- right: 10px;
- font-size: 1.4em;
- color: #CCC;
- font-weight: bold;
- font-style: italic;
-}
-p.description {
- margin: 10px 30px 0 30px;
- padding: 10px;
- border: solid 1px #CCC;
- font-size: 1.2em;
-}
-/** end of changeset **/
-
-/** canvas **/
-div#wrapper {
- position: relative;
- font-size: 1.2em;
-}
-
-canvas {
- position: absolute;
- z-index: 5;
- top: -0.7em;
-}
-
-ul#nodebgs li.parity0 {
- background: #F1F6F7;
-}
-
-ul#nodebgs li.parity1 {
- background: #FFFFFF;
-}
-
-ul#graphnodes {
- position: absolute;
- z-index: 10;
- top: 7px;
- list-style: none inside none;
-}
-
-ul#nodebgs {
- list-style: none inside none;
-}
-
-ul#graphnodes li, ul#nodebgs li {
- height: 39px;
-}
-
-ul#graphnodes li .info {
- display: block;
- position: relative;
-}
-/** end of canvas **/
diff --git a/sys/lib/python/mercurial/templates/static/style-paper.css b/sys/lib/python/mercurial/templates/static/style-paper.css
deleted file mode 100644
index edb9beab6..000000000
--- a/sys/lib/python/mercurial/templates/static/style-paper.css
+++ /dev/null
@@ -1,254 +0,0 @@
-body {
- margin: 0;
- padding: 0;
- background: white;
- font-family: sans-serif;
-}
-
-.container {
- padding-left: 115px;
-}
-
-.main {
- position: relative;
- background: white;
- padding: 2em 2em 2em 0;
-}
-
-#.main {
- width: 98%;
-}
-
-.overflow {
- width: 100%;
- overflow: auto;
-}
-
-.menu {
- width: 90px;
- margin: 0;
- font-size: 80%;
- text-align: left;
- position: absolute;
- top: 20px;
- left: 20px;
- right: auto;
-}
-
-.menu ul {
- list-style: none;
- padding: 0;
- margin: 10px 0 0 0;
- border-left: 2px solid #999;
-}
-
-.menu li {
- margin-bottom: 3px;
- padding: 2px 4px;
- background: white;
- color: black;
- font-weight: normal;
-}
-
-.menu li.active {
- font-weight: bold;
-}
-
-.menu img {
- width: 75px;
- height: 90px;
- border: 0;
-}
-
-.menu a { color: black; display: block; }
-
-.search {
- position: absolute;
- top: .7em;
- right: 2em;
-}
-
-form.search div#hint {
- display: none;
- position: absolute;
- top: 40px;
- right: 0px;
- width: 190px;
- padding: 5px;
- background: #ffc;
- font-size: 70%;
- border: 1px solid yellow;
- -moz-border-radius: 5px; /* this works only in camino/firefox */
- -webkit-border-radius: 5px; /* this is just for Safari */
-}
-
-form.search:hover div#hint { display: block; }
-
-a { text-decoration:none; }
-.age { white-space:nowrap; }
-.date { white-space:nowrap; }
-.indexlinks { white-space:nowrap; }
-.parity0 { background-color: #f0f0f0; }
-.parity1 { background-color: white; }
-.plusline { color: green; }
-.minusline { color: #dc143c; } /* crimson */
-.atline { color: purple; }
-
-.navigate {
- text-align: right;
- font-size: 60%;
- margin: 1em 0;
-}
-
-.tag {
- color: #999;
- font-size: 70%;
- font-weight: normal;
- margin-left: .5em;
- vertical-align: baseline;
-}
-
-.branchhead {
- color: #000;
- font-size: 80%;
- font-weight: normal;
- margin-left: .5em;
- vertical-align: baseline;
-}
-
-ul#graphnodes .branchhead {
- font-size: 75%;
-}
-
-.branchname {
- color: #000;
- font-size: 60%;
- font-weight: normal;
- margin-left: .5em;
- vertical-align: baseline;
-}
-
-h3 .branchname {
- font-size: 80%;
-}
-
-/* Common */
-pre { margin: 0; }
-
-h2 { font-size: 120%; border-bottom: 1px solid #999; }
-h2 a { color: #000; }
-h3 {
- margin-top: -.7em;
- font-size: 100%;
-}
-
-/* log and tags tables */
-.bigtable {
- border-bottom: 1px solid #999;
- border-collapse: collapse;
- font-size: 90%;
- width: 100%;
- font-weight: normal;
- text-align: left;
-}
-
-.bigtable td {
- vertical-align: top;
-}
-
-.bigtable th {
- padding: 1px 4px;
- border-bottom: 1px solid #999;
-}
-.bigtable tr { border: none; }
-.bigtable .age { width: 6em; }
-.bigtable .author { width: 12em; }
-.bigtable .description { }
-.bigtable .node { width: 5em; font-family: monospace;}
-.bigtable .permissions { width: 8em; text-align: left;}
-.bigtable .size { width: 5em; text-align: right; }
-.bigtable .annotate { text-align: right; }
-.bigtable td.annotate { font-size: smaller; }
-.bigtable td.source { font-size: inherit; }
-
-.source, .sourcefirst, .sourcelast {
- font-family: monospace;
- white-space: pre;
- padding: 1px 4px;
- font-size: 90%;
-}
-.sourcefirst { border-bottom: 1px solid #999; font-weight: bold; }
-.sourcelast { border-top: 1px solid #999; }
-.source a { color: #999; font-size: smaller; font-family: monospace;}
-.bottomline { border-bottom: 1px solid #999; }
-
-.fileline { font-family: monospace; }
-.fileline img { border: 0; }
-
-.tagEntry .closed { color: #99f; }
-
-/* Changeset entry */
-#changesetEntry {
- border-collapse: collapse;
- font-size: 90%;
- width: 100%;
- margin-bottom: 1em;
-}
-
-#changesetEntry th {
- padding: 1px 4px;
- width: 4em;
- text-align: right;
- font-weight: normal;
- color: #999;
- margin-right: .5em;
- vertical-align: top;
-}
-
-div.description {
- border-left: 2px solid #999;
- margin: 1em 0 1em 0;
- padding: .3em;
-}
-
-/* Graph */
-div#wrapper {
- position: relative;
- border-top: 1px solid black;
- border-bottom: 1px solid black;
- margin: 0;
- padding: 0;
-}
-
-canvas {
- position: absolute;
- z-index: 5;
- top: -0.7em;
- margin: 0;
-}
-
-ul#graphnodes {
- position: absolute;
- z-index: 10;
- top: -1.0em;
- list-style: none inside none;
- padding: 0;
-}
-
-ul#nodebgs {
- list-style: none inside none;
- padding: 0;
- margin: 0;
- top: -0.7em;
-}
-
-ul#graphnodes li, ul#nodebgs li {
- height: 39px;
-}
-
-ul#graphnodes li .info {
- display: block;
- font-size: 70%;
- position: relative;
- top: -3px;
-}
diff --git a/sys/lib/python/mercurial/templates/static/style.css b/sys/lib/python/mercurial/templates/static/style.css
deleted file mode 100644
index 66bd96d49..000000000
--- a/sys/lib/python/mercurial/templates/static/style.css
+++ /dev/null
@@ -1,105 +0,0 @@
-a { text-decoration:none; }
-.age { white-space:nowrap; }
-.date { white-space:nowrap; }
-.indexlinks { white-space:nowrap; }
-.parity0 { background-color: #ddd; }
-.parity1 { background-color: #eee; }
-.lineno { width: 60px; color: #aaa; font-size: smaller;
- text-align: right; }
-.plusline { color: green; }
-.minusline { color: red; }
-.atline { color: purple; }
-.annotate { font-size: smaller; text-align: right; padding-right: 1em; }
-.buttons a {
- background-color: #666;
- padding: 2pt;
- color: white;
- font-family: sans;
- font-weight: bold;
-}
-.navigate a {
- background-color: #ccc;
- padding: 2pt;
- font-family: sans;
- color: black;
-}
-
-.metatag {
- background-color: #888;
- color: white;
- text-align: right;
-}
-
-/* Common */
-pre { margin: 0; }
-
-.logo {
- float: right;
- clear: right;
-}
-
-/* Changelog/Filelog entries */
-.logEntry { width: 100%; }
-.logEntry .age { width: 15%; }
-.logEntry th { font-weight: normal; text-align: right; vertical-align: top; }
-.logEntry th.age, .logEntry th.firstline { font-weight: bold; }
-.logEntry th.firstline { text-align: left; width: inherit; }
-
-/* Shortlog entries */
-.slogEntry { width: 100%; }
-.slogEntry .age { width: 8em; }
-.slogEntry td { font-weight: normal; text-align: left; vertical-align: top; }
-.slogEntry td.author { width: 15em; }
-
-/* Tag entries */
-#tagEntries { list-style: none; margin: 0; padding: 0; }
-#tagEntries .tagEntry { list-style: none; margin: 0; padding: 0; }
-
-/* Changeset entry */
-#changesetEntry { }
-#changesetEntry th { font-weight: normal; background-color: #888; color: #fff; text-align: right; }
-#changesetEntry th.files, #changesetEntry th.description { vertical-align: top; }
-
-/* File diff view */
-#filediffEntry { }
-#filediffEntry th { font-weight: normal; background-color: #888; color: #fff; text-align: right; }
-
-/* Graph */
-div#wrapper {
- position: relative;
- margin: 0;
- padding: 0;
-}
-
-canvas {
- position: absolute;
- z-index: 5;
- top: -0.6em;
- margin: 0;
-}
-
-ul#nodebgs {
- list-style: none inside none;
- padding: 0;
- margin: 0;
- top: -0.7em;
-}
-
-ul#graphnodes li, ul#nodebgs li {
- height: 39px;
-}
-
-ul#graphnodes {
- position: absolute;
- z-index: 10;
- top: -0.85em;
- list-style: none inside none;
- padding: 0;
-}
-
-ul#graphnodes li .info {
- display: block;
- font-size: 70%;
- position: relative;
- top: -1px;
-}
diff --git a/sys/lib/python/mercurial/templates/template-vars.txt b/sys/lib/python/mercurial/templates/template-vars.txt
deleted file mode 100644
index f434d7295..000000000
--- a/sys/lib/python/mercurial/templates/template-vars.txt
+++ /dev/null
@@ -1,37 +0,0 @@
-repo the name of the repo
-rev a changeset.manifest revision
-node a changeset node
-changesets total number of changesets
-file a filename
-filerev a file revision
-filerevs total number of file revisions
-up the directory of the relevant file
-path a path in the manifest, starting with "/"
-basename a short pathname
-date a date string
-age age in hours, days, etc
-line a line of text (escaped)
-desc a description (escaped, with breaks)
-shortdesc a short description (escaped)
-author a name or email addressv(obfuscated)
-parent a list of the parent
-child a list of the children
-tags a list of tag
-
-header the global page header
-footer the global page footer
-
-files a list of file links
-file_copies a list of pairs of name, source filenames
-dirs a set of directory links
-diff a diff of one or more files
-annotate an annotated file
-entries the entries relevant to the page
-
-Templates and commands:
- changelog(rev) - a page for browsing changesets
- naventry - a link for jumping to a changeset number
- filenodelink - jump to file diff
- fileellipses - printed after maxfiles
- changelogentry - an entry in the log
- manifest - browse a manifest as a directory tree
diff --git a/sys/lib/python/mercurial/transaction.py b/sys/lib/python/mercurial/transaction.py
deleted file mode 100644
index 8eabacc54..000000000
--- a/sys/lib/python/mercurial/transaction.py
+++ /dev/null
@@ -1,165 +0,0 @@
-# transaction.py - simple journalling scheme for mercurial
-#
-# This transaction scheme is intended to gracefully handle program
-# errors and interruptions. More serious failures like system crashes
-# can be recovered with an fsck-like tool. As the whole repository is
-# effectively log-structured, this should amount to simply truncating
-# anything that isn't referenced in the changelog.
-#
-# Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-from i18n import _
-import os, errno
-import error
-
-def active(func):
- def _active(self, *args, **kwds):
- if self.count == 0:
- raise error.Abort(_(
- 'cannot use transaction when it is already committed/aborted'))
- return func(self, *args, **kwds)
- return _active
-
-def _playback(journal, report, opener, entries, unlink=True):
- for f, o, ignore in entries:
- if o or not unlink:
- try:
- opener(f, 'a').truncate(o)
- except:
- report(_("failed to truncate %s\n") % f)
- raise
- else:
- try:
- fn = opener(f).name
- os.unlink(fn)
- except IOError, inst:
- if inst.errno != errno.ENOENT:
- raise
- os.unlink(journal)
-
-class transaction(object):
- def __init__(self, report, opener, journal, after=None, createmode=None):
- self.journal = None
-
- self.count = 1
- self.report = report
- self.opener = opener
- self.after = after
- self.entries = []
- self.map = {}
- self.journal = journal
- self._queue = []
-
- self.file = open(self.journal, "w")
- if createmode is not None:
- os.chmod(self.journal, createmode & 0666)
-
- def __del__(self):
- if self.journal:
- if self.entries: self._abort()
- self.file.close()
-
- @active
- def startgroup(self):
- self._queue.append([])
-
- @active
- def endgroup(self):
- q = self._queue.pop()
- d = ''.join(['%s\0%d\n' % (x[0], x[1]) for x in q])
- self.entries.extend(q)
- self.file.write(d)
- self.file.flush()
-
- @active
- def add(self, file, offset, data=None):
- if file in self.map: return
-
- if self._queue:
- self._queue[-1].append((file, offset, data))
- return
-
- self.entries.append((file, offset, data))
- self.map[file] = len(self.entries) - 1
- # add enough data to the journal to do the truncate
- self.file.write("%s\0%d\n" % (file, offset))
- self.file.flush()
-
- @active
- def find(self, file):
- if file in self.map:
- return self.entries[self.map[file]]
- return None
-
- @active
- def replace(self, file, offset, data=None):
- '''
- replace can only replace already committed entries
- that are not pending in the queue
- '''
-
- if file not in self.map:
- raise KeyError(file)
- index = self.map[file]
- self.entries[index] = (file, offset, data)
- self.file.write("%s\0%d\n" % (file, offset))
- self.file.flush()
-
- @active
- def nest(self):
- self.count += 1
- return self
-
- def running(self):
- return self.count > 0
-
- @active
- def close(self):
- '''commit the transaction'''
- self.count -= 1
- if self.count != 0:
- return
- self.file.close()
- self.entries = []
- if self.after:
- self.after()
- else:
- os.unlink(self.journal)
- self.journal = None
-
- @active
- def abort(self):
- '''abort the transaction (generally called on error, or when the
- transaction is not explicitly committed before going out of
- scope)'''
- self._abort()
-
- def _abort(self):
- self.count = 0
- self.file.close()
-
- if not self.entries: return
-
- self.report(_("transaction abort!\n"))
-
- try:
- try:
- _playback(self.journal, self.report, self.opener, self.entries, False)
- self.report(_("rollback completed\n"))
- except:
- self.report(_("rollback failed - please run hg recover\n"))
- finally:
- self.journal = None
-
-
-def rollback(opener, file, report):
- entries = []
-
- for l in open(file).readlines():
- f, o = l.split('\0')
- entries.append((f, int(o), None))
-
- _playback(file, report, opener, entries)
diff --git a/sys/lib/python/mercurial/ui.py b/sys/lib/python/mercurial/ui.py
deleted file mode 100644
index bd122f74a..000000000
--- a/sys/lib/python/mercurial/ui.py
+++ /dev/null
@@ -1,381 +0,0 @@
-# ui.py - user interface bits for mercurial
-#
-# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-from i18n import _
-import errno, getpass, os, socket, sys, tempfile, traceback
-import config, util, error
-
-_booleans = {'1': True, 'yes': True, 'true': True, 'on': True,
- '0': False, 'no': False, 'false': False, 'off': False}
-
-class ui(object):
- def __init__(self, src=None):
- self._buffers = []
- self.quiet = self.verbose = self.debugflag = self._traceback = False
- self._reportuntrusted = True
- self._ocfg = config.config() # overlay
- self._tcfg = config.config() # trusted
- self._ucfg = config.config() # untrusted
- self._trustusers = set()
- self._trustgroups = set()
-
- if src:
- self._tcfg = src._tcfg.copy()
- self._ucfg = src._ucfg.copy()
- self._ocfg = src._ocfg.copy()
- self._trustusers = src._trustusers.copy()
- self._trustgroups = src._trustgroups.copy()
- self.fixconfig()
- else:
- # we always trust global config files
- for f in util.rcpath():
- self.readconfig(f, trust=True)
-
- def copy(self):
- return self.__class__(self)
-
- def _is_trusted(self, fp, f):
- st = util.fstat(fp)
- if util.isowner(st):
- return True
-
- tusers, tgroups = self._trustusers, self._trustgroups
- if '*' in tusers or '*' in tgroups:
- return True
-
- user = util.username(st.st_uid)
- group = util.groupname(st.st_gid)
- if user in tusers or group in tgroups or user == util.username():
- return True
-
- if self._reportuntrusted:
- self.warn(_('Not trusting file %s from untrusted '
- 'user %s, group %s\n') % (f, user, group))
- return False
-
- def readconfig(self, filename, root=None, trust=False,
- sections=None, remap=None):
- try:
- fp = open(filename)
- except IOError:
- if not sections: # ignore unless we were looking for something
- return
- raise
-
- cfg = config.config()
- trusted = sections or trust or self._is_trusted(fp, filename)
-
- try:
- cfg.read(filename, fp, sections=sections, remap=remap)
- except error.ConfigError, inst:
- if trusted:
- raise
- self.warn(_("Ignored: %s\n") % str(inst))
-
- if trusted:
- self._tcfg.update(cfg)
- self._tcfg.update(self._ocfg)
- self._ucfg.update(cfg)
- self._ucfg.update(self._ocfg)
-
- if root is None:
- root = os.path.expanduser('~')
- self.fixconfig(root=root)
-
- def fixconfig(self, root=None):
- # translate paths relative to root (or home) into absolute paths
- root = root or os.getcwd()
- for c in self._tcfg, self._ucfg, self._ocfg:
- for n, p in c.items('paths'):
- if p and "://" not in p and not os.path.isabs(p):
- c.set("paths", n, os.path.normpath(os.path.join(root, p)))
-
- # update ui options
- self.debugflag = self.configbool('ui', 'debug')
- self.verbose = self.debugflag or self.configbool('ui', 'verbose')
- self.quiet = not self.debugflag and self.configbool('ui', 'quiet')
- if self.verbose and self.quiet:
- self.quiet = self.verbose = False
- self._reportuntrusted = self.configbool("ui", "report_untrusted", True)
- self._traceback = self.configbool('ui', 'traceback', False)
-
- # update trust information
- self._trustusers.update(self.configlist('trusted', 'users'))
- self._trustgroups.update(self.configlist('trusted', 'groups'))
-
- def setconfig(self, section, name, value):
- for cfg in (self._ocfg, self._tcfg, self._ucfg):
- cfg.set(section, name, value)
- self.fixconfig()
-
- def _data(self, untrusted):
- return untrusted and self._ucfg or self._tcfg
-
- def configsource(self, section, name, untrusted=False):
- return self._data(untrusted).source(section, name) or 'none'
-
- def config(self, section, name, default=None, untrusted=False):
- value = self._data(untrusted).get(section, name, default)
- if self.debugflag and not untrusted and self._reportuntrusted:
- uvalue = self._ucfg.get(section, name)
- if uvalue is not None and uvalue != value:
- self.debug(_("ignoring untrusted configuration option "
- "%s.%s = %s\n") % (section, name, uvalue))
- return value
-
- def configbool(self, section, name, default=False, untrusted=False):
- v = self.config(section, name, None, untrusted)
- if v is None:
- return default
- if v.lower() not in _booleans:
- raise error.ConfigError(_("%s.%s not a boolean ('%s')")
- % (section, name, v))
- return _booleans[v.lower()]
-
- def configlist(self, section, name, default=None, untrusted=False):
- """Return a list of comma/space separated strings"""
- result = self.config(section, name, untrusted=untrusted)
- if result is None:
- result = default or []
- if isinstance(result, basestring):
- result = result.replace(",", " ").split()
- return result
-
- def has_section(self, section, untrusted=False):
- '''tell whether section exists in config.'''
- return section in self._data(untrusted)
-
- def configitems(self, section, untrusted=False):
- items = self._data(untrusted).items(section)
- if self.debugflag and not untrusted and self._reportuntrusted:
- for k, v in self._ucfg.items(section):
- if self._tcfg.get(section, k) != v:
- self.debug(_("ignoring untrusted configuration option "
- "%s.%s = %s\n") % (section, k, v))
- return items
-
- def walkconfig(self, untrusted=False):
- cfg = self._data(untrusted)
- for section in cfg.sections():
- for name, value in self.configitems(section, untrusted):
- yield section, name, str(value).replace('\n', '\\n')
-
- def username(self):
- """Return default username to be used in commits.
-
- Searched in this order: $HGUSER, [ui] section of hgrcs, $EMAIL
- and stop searching if one of these is set.
- If not found and ui.askusername is True, ask the user, else use
- ($LOGNAME or $USER or $LNAME or $USERNAME) + "@full.hostname".
- """
- user = os.environ.get("HGUSER")
- if user is None:
- user = self.config("ui", "username")
- if user is None:
- user = os.environ.get("EMAIL")
- if user is None and self.configbool("ui", "askusername"):
- user = self.prompt(_("enter a commit username:"), default=None)
- if user is None:
- try:
- user = '%s@%s' % (util.getuser(), socket.getfqdn())
- self.warn(_("No username found, using '%s' instead\n") % user)
- except KeyError:
- pass
- if not user:
- raise util.Abort(_("Please specify a username."))
- if "\n" in user:
- raise util.Abort(_("username %s contains a newline\n") % repr(user))
- return user
-
- def shortuser(self, user):
- """Return a short representation of a user name or email address."""
- if not self.verbose: user = util.shortuser(user)
- return user
-
- def _path(self, loc):
- p = self.config('paths', loc)
- if p and '%%' in p:
- self.warn('(deprecated \'%%\' in path %s=%s from %s)\n' %
- (loc, p, self.configsource('paths', loc)))
- p = p.replace('%%', '%')
- return p
-
- def expandpath(self, loc, default=None):
- """Return repository location relative to cwd or from [paths]"""
- if "://" in loc or os.path.isdir(os.path.join(loc, '.hg')):
- return loc
-
- path = self._path(loc)
- if not path and default is not None:
- path = self._path(default)
- return path or loc
-
- def pushbuffer(self):
- self._buffers.append([])
-
- def popbuffer(self):
- return "".join(self._buffers.pop())
-
- def write(self, *args):
- if self._buffers:
- self._buffers[-1].extend([str(a) for a in args])
- else:
- for a in args:
- sys.stdout.write(str(a))
-
- def write_err(self, *args):
- try:
- if not sys.stdout.closed: sys.stdout.flush()
- for a in args:
- sys.stderr.write(str(a))
- # stderr may be buffered under win32 when redirected to files,
- # including stdout.
- if not sys.stderr.closed: sys.stderr.flush()
- except IOError, inst:
- if inst.errno != errno.EPIPE:
- raise
-
- def flush(self):
- try: sys.stdout.flush()
- except: pass
- try: sys.stderr.flush()
- except: pass
-
- def interactive(self):
- i = self.configbool("ui", "interactive", None)
- if i is None:
- return sys.stdin.isatty()
- return i
-
- def _readline(self, prompt=''):
- if sys.stdin.isatty():
- try:
- # magically add command line editing support, where
- # available
- import readline
- # force demandimport to really load the module
- readline.read_history_file
- # windows sometimes raises something other than ImportError
- except Exception:
- pass
- line = raw_input(prompt)
- # When stdin is in binary mode on Windows, it can cause
- # raw_input() to emit an extra trailing carriage return
- if os.linesep == '\r\n' and line and line[-1] == '\r':
- line = line[:-1]
- return line
-
- def prompt(self, msg, default="y"):
- """Prompt user with msg, read response.
- If ui is not interactive, the default is returned.
- """
- if not self.interactive():
- self.write(msg, ' ', default, "\n")
- return default
- try:
- r = self._readline(msg + ' ')
- if not r:
- return default
- return r
- except EOFError:
- raise util.Abort(_('response expected'))
-
- def promptchoice(self, msg, choices, default=0):
- """Prompt user with msg, read response, and ensure it matches
- one of the provided choices. The index of the choice is returned.
- choices is a sequence of acceptable responses with the format:
- ('&None', 'E&xec', 'Sym&link') Responses are case insensitive.
- If ui is not interactive, the default is returned.
- """
- resps = [s[s.index('&')+1].lower() for s in choices]
- while True:
- r = self.prompt(msg, resps[default])
- if r.lower() in resps:
- return resps.index(r.lower())
- self.write(_("unrecognized response\n"))
-
-
- def getpass(self, prompt=None, default=None):
- if not self.interactive(): return default
- try:
- return getpass.getpass(prompt or _('password: '))
- except EOFError:
- raise util.Abort(_('response expected'))
- def status(self, *msg):
- if not self.quiet: self.write(*msg)
- def warn(self, *msg):
- self.write_err(*msg)
- def note(self, *msg):
- if self.verbose: self.write(*msg)
- def debug(self, *msg):
- if self.debugflag: self.write(*msg)
- def edit(self, text, user):
- (fd, name) = tempfile.mkstemp(prefix="hg-editor-", suffix=".txt",
- text=True)
- try:
- f = os.fdopen(fd, "w")
- f.write(text)
- f.close()
-
- editor = self.geteditor()
-
- util.system("%s \"%s\"" % (editor, name),
- environ={'HGUSER': user},
- onerr=util.Abort, errprefix=_("edit failed"))
-
- f = open(name)
- t = f.read()
- f.close()
- finally:
- os.unlink(name)
-
- return t
-
- def traceback(self):
- '''print exception traceback if traceback printing enabled.
- only to call in exception handler. returns true if traceback
- printed.'''
- if self._traceback:
- traceback.print_exc()
- return self._traceback
-
- def geteditor(self):
- '''return editor to use'''
- return (os.environ.get("HGEDITOR") or
- self.config("ui", "editor") or
- os.environ.get("VISUAL") or
- os.environ.get("EDITOR", "vi"))
-
- def progress(self, topic, pos, item="", unit="", total=None):
- '''show a progress message
-
- With stock hg, this is simply a debug message that is hidden
- by default, but with extensions or GUI tools it may be
- visible. 'topic' is the current operation, 'item' is a
- non-numeric marker of the current position (ie the currently
- in-process file), 'pos' is the current numeric position (ie
- revision, bytes, etc.), units is a corresponding unit label,
- and total is the highest expected pos.
-
- Multiple nested topics may be active at a time. All topics
- should be marked closed by setting pos to None at termination.
- '''
-
- if pos == None or not self.debugflag:
- return
-
- if units:
- units = ' ' + units
- if item:
- item = ' ' + item
-
- if total:
- pct = 100.0 * pos / total
- ui.debug('%s:%s %s/%s%s (%4.2g%%)\n'
- % (topic, item, pos, total, units, pct))
- else:
- ui.debug('%s:%s %s%s\n' % (topic, item, pos, units))
diff --git a/sys/lib/python/mercurial/url.py b/sys/lib/python/mercurial/url.py
deleted file mode 100644
index 131d95550..000000000
--- a/sys/lib/python/mercurial/url.py
+++ /dev/null
@@ -1,533 +0,0 @@
-# url.py - HTTP handling for mercurial
-#
-# Copyright 2005, 2006, 2007, 2008 Matt Mackall <mpm@selenic.com>
-# Copyright 2006, 2007 Alexis S. L. Carvalho <alexis@cecm.usp.br>
-# Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-import urllib, urllib2, urlparse, httplib, os, re, socket, cStringIO
-from i18n import _
-import keepalive, util
-
-def hidepassword(url):
- '''hide user credential in a url string'''
- scheme, netloc, path, params, query, fragment = urlparse.urlparse(url)
- netloc = re.sub('([^:]*):([^@]*)@(.*)', r'\1:***@\3', netloc)
- return urlparse.urlunparse((scheme, netloc, path, params, query, fragment))
-
-def removeauth(url):
- '''remove all authentication information from a url string'''
- scheme, netloc, path, params, query, fragment = urlparse.urlparse(url)
- netloc = netloc[netloc.find('@')+1:]
- return urlparse.urlunparse((scheme, netloc, path, params, query, fragment))
-
-def netlocsplit(netloc):
- '''split [user[:passwd]@]host[:port] into 4-tuple.'''
-
- a = netloc.find('@')
- if a == -1:
- user, passwd = None, None
- else:
- userpass, netloc = netloc[:a], netloc[a+1:]
- c = userpass.find(':')
- if c == -1:
- user, passwd = urllib.unquote(userpass), None
- else:
- user = urllib.unquote(userpass[:c])
- passwd = urllib.unquote(userpass[c+1:])
- c = netloc.find(':')
- if c == -1:
- host, port = netloc, None
- else:
- host, port = netloc[:c], netloc[c+1:]
- return host, port, user, passwd
-
-def netlocunsplit(host, port, user=None, passwd=None):
- '''turn host, port, user, passwd into [user[:passwd]@]host[:port].'''
- if port:
- hostport = host + ':' + port
- else:
- hostport = host
- if user:
- if passwd:
- userpass = urllib.quote(user) + ':' + urllib.quote(passwd)
- else:
- userpass = urllib.quote(user)
- return userpass + '@' + hostport
- return hostport
-
-_safe = ('abcdefghijklmnopqrstuvwxyz'
- 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
- '0123456789' '_.-/')
-_safeset = None
-_hex = None
-def quotepath(path):
- '''quote the path part of a URL
-
- This is similar to urllib.quote, but it also tries to avoid
- quoting things twice (inspired by wget):
-
- >>> quotepath('abc def')
- 'abc%20def'
- >>> quotepath('abc%20def')
- 'abc%20def'
- >>> quotepath('abc%20 def')
- 'abc%20%20def'
- >>> quotepath('abc def%20')
- 'abc%20def%20'
- >>> quotepath('abc def%2')
- 'abc%20def%252'
- >>> quotepath('abc def%')
- 'abc%20def%25'
- '''
- global _safeset, _hex
- if _safeset is None:
- _safeset = set(_safe)
- _hex = set('abcdefABCDEF0123456789')
- l = list(path)
- for i in xrange(len(l)):
- c = l[i]
- if c == '%' and i + 2 < len(l) and (l[i+1] in _hex and l[i+2] in _hex):
- pass
- elif c not in _safeset:
- l[i] = '%%%02X' % ord(c)
- return ''.join(l)
-
-class passwordmgr(urllib2.HTTPPasswordMgrWithDefaultRealm):
- def __init__(self, ui):
- urllib2.HTTPPasswordMgrWithDefaultRealm.__init__(self)
- self.ui = ui
-
- def find_user_password(self, realm, authuri):
- authinfo = urllib2.HTTPPasswordMgrWithDefaultRealm.find_user_password(
- self, realm, authuri)
- user, passwd = authinfo
- if user and passwd:
- self._writedebug(user, passwd)
- return (user, passwd)
-
- if not user:
- auth = self.readauthtoken(authuri)
- if auth:
- user, passwd = auth.get('username'), auth.get('password')
- if not user or not passwd:
- if not self.ui.interactive():
- raise util.Abort(_('http authorization required'))
-
- self.ui.write(_("http authorization required\n"))
- self.ui.status(_("realm: %s\n") % realm)
- if user:
- self.ui.status(_("user: %s\n") % user)
- else:
- user = self.ui.prompt(_("user:"), default=None)
-
- if not passwd:
- passwd = self.ui.getpass()
-
- self.add_password(realm, authuri, user, passwd)
- self._writedebug(user, passwd)
- return (user, passwd)
-
- def _writedebug(self, user, passwd):
- msg = _('http auth: user %s, password %s\n')
- self.ui.debug(msg % (user, passwd and '*' * len(passwd) or 'not set'))
-
- def readauthtoken(self, uri):
- # Read configuration
- config = dict()
- for key, val in self.ui.configitems('auth'):
- group, setting = key.split('.', 1)
- gdict = config.setdefault(group, dict())
- gdict[setting] = val
-
- # Find the best match
- scheme, hostpath = uri.split('://', 1)
- bestlen = 0
- bestauth = None
- for auth in config.itervalues():
- prefix = auth.get('prefix')
- if not prefix: continue
- p = prefix.split('://', 1)
- if len(p) > 1:
- schemes, prefix = [p[0]], p[1]
- else:
- schemes = (auth.get('schemes') or 'https').split()
- if (prefix == '*' or hostpath.startswith(prefix)) and \
- len(prefix) > bestlen and scheme in schemes:
- bestlen = len(prefix)
- bestauth = auth
- return bestauth
-
-class proxyhandler(urllib2.ProxyHandler):
- def __init__(self, ui):
- proxyurl = ui.config("http_proxy", "host") or os.getenv('http_proxy')
- # XXX proxyauthinfo = None
-
- if proxyurl:
- # proxy can be proper url or host[:port]
- if not (proxyurl.startswith('http:') or
- proxyurl.startswith('https:')):
- proxyurl = 'http://' + proxyurl + '/'
- snpqf = urlparse.urlsplit(proxyurl)
- proxyscheme, proxynetloc, proxypath, proxyquery, proxyfrag = snpqf
- hpup = netlocsplit(proxynetloc)
-
- proxyhost, proxyport, proxyuser, proxypasswd = hpup
- if not proxyuser:
- proxyuser = ui.config("http_proxy", "user")
- proxypasswd = ui.config("http_proxy", "passwd")
-
- # see if we should use a proxy for this url
- no_list = [ "localhost", "127.0.0.1" ]
- no_list.extend([p.lower() for
- p in ui.configlist("http_proxy", "no")])
- no_list.extend([p.strip().lower() for
- p in os.getenv("no_proxy", '').split(',')
- if p.strip()])
- # "http_proxy.always" config is for running tests on localhost
- if ui.configbool("http_proxy", "always"):
- self.no_list = []
- else:
- self.no_list = no_list
-
- proxyurl = urlparse.urlunsplit((
- proxyscheme, netlocunsplit(proxyhost, proxyport,
- proxyuser, proxypasswd or ''),
- proxypath, proxyquery, proxyfrag))
- proxies = {'http': proxyurl, 'https': proxyurl}
- ui.debug(_('proxying through http://%s:%s\n') %
- (proxyhost, proxyport))
- else:
- proxies = {}
-
- # urllib2 takes proxy values from the environment and those
- # will take precedence if found, so drop them
- for env in ["HTTP_PROXY", "http_proxy", "no_proxy"]:
- try:
- if env in os.environ:
- del os.environ[env]
- except OSError:
- pass
-
- urllib2.ProxyHandler.__init__(self, proxies)
- self.ui = ui
-
- def proxy_open(self, req, proxy, type_):
- host = req.get_host().split(':')[0]
- if host in self.no_list:
- return None
-
- # work around a bug in Python < 2.4.2
- # (it leaves a "\n" at the end of Proxy-authorization headers)
- baseclass = req.__class__
- class _request(baseclass):
- def add_header(self, key, val):
- if key.lower() == 'proxy-authorization':
- val = val.strip()
- return baseclass.add_header(self, key, val)
- req.__class__ = _request
-
- return urllib2.ProxyHandler.proxy_open(self, req, proxy, type_)
-
-class httpsendfile(file):
- def __len__(self):
- return os.fstat(self.fileno()).st_size
-
-def _gen_sendfile(connection):
- def _sendfile(self, data):
- # send a file
- if isinstance(data, httpsendfile):
- # if auth required, some data sent twice, so rewind here
- data.seek(0)
- for chunk in util.filechunkiter(data):
- connection.send(self, chunk)
- else:
- connection.send(self, data)
- return _sendfile
-
-has_https = hasattr(urllib2, 'HTTPSHandler')
-if has_https:
- try:
- # avoid using deprecated/broken FakeSocket in python 2.6
- import ssl
- _ssl_wrap_socket = ssl.wrap_socket
- except ImportError:
- def _ssl_wrap_socket(sock, key_file, cert_file):
- ssl = socket.ssl(sock, key_file, cert_file)
- return httplib.FakeSocket(sock, ssl)
-
-class httpconnection(keepalive.HTTPConnection):
- # must be able to send big bundle as stream.
- send = _gen_sendfile(keepalive.HTTPConnection)
-
- def _proxytunnel(self):
- proxyheaders = dict(
- [(x, self.headers[x]) for x in self.headers
- if x.lower().startswith('proxy-')])
- self._set_hostport(self.host, self.port)
- self.send('CONNECT %s:%d HTTP/1.0\r\n' % (self.realhost, self.realport))
- for header in proxyheaders.iteritems():
- self.send('%s: %s\r\n' % header)
- self.send('\r\n')
-
- # majority of the following code is duplicated from
- # httplib.HTTPConnection as there are no adequate places to
- # override functions to provide the needed functionality
- res = self.response_class(self.sock,
- strict=self.strict,
- method=self._method)
-
- while True:
- version, status, reason = res._read_status()
- if status != httplib.CONTINUE:
- break
- while True:
- skip = res.fp.readline().strip()
- if not skip:
- break
- res.status = status
- res.reason = reason.strip()
-
- if res.status == 200:
- while True:
- line = res.fp.readline()
- if line == '\r\n':
- break
- return True
-
- if version == 'HTTP/1.0':
- res.version = 10
- elif version.startswith('HTTP/1.'):
- res.version = 11
- elif version == 'HTTP/0.9':
- res.version = 9
- else:
- raise httplib.UnknownProtocol(version)
-
- if res.version == 9:
- res.length = None
- res.chunked = 0
- res.will_close = 1
- res.msg = httplib.HTTPMessage(cStringIO.StringIO())
- return False
-
- res.msg = httplib.HTTPMessage(res.fp)
- res.msg.fp = None
-
- # are we using the chunked-style of transfer encoding?
- trenc = res.msg.getheader('transfer-encoding')
- if trenc and trenc.lower() == "chunked":
- res.chunked = 1
- res.chunk_left = None
- else:
- res.chunked = 0
-
- # will the connection close at the end of the response?
- res.will_close = res._check_close()
-
- # do we have a Content-Length?
- # NOTE: RFC 2616, S4.4, #3 says we ignore this if tr_enc is "chunked"
- length = res.msg.getheader('content-length')
- if length and not res.chunked:
- try:
- res.length = int(length)
- except ValueError:
- res.length = None
- else:
- if res.length < 0: # ignore nonsensical negative lengths
- res.length = None
- else:
- res.length = None
-
- # does the body have a fixed length? (of zero)
- if (status == httplib.NO_CONTENT or status == httplib.NOT_MODIFIED or
- 100 <= status < 200 or # 1xx codes
- res._method == 'HEAD'):
- res.length = 0
-
- # if the connection remains open, and we aren't using chunked, and
- # a content-length was not provided, then assume that the connection
- # WILL close.
- if (not res.will_close and
- not res.chunked and
- res.length is None):
- res.will_close = 1
-
- self.proxyres = res
-
- return False
-
- def connect(self):
- if has_https and self.realhost: # use CONNECT proxy
- self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- self.sock.connect((self.host, self.port))
- if self._proxytunnel():
- # we do not support client x509 certificates
- self.sock = _ssl_wrap_socket(self.sock, None, None)
- else:
- keepalive.HTTPConnection.connect(self)
-
- def getresponse(self):
- proxyres = getattr(self, 'proxyres', None)
- if proxyres:
- if proxyres.will_close:
- self.close()
- self.proxyres = None
- return proxyres
- return keepalive.HTTPConnection.getresponse(self)
-
-class httphandler(keepalive.HTTPHandler):
- def http_open(self, req):
- return self.do_open(httpconnection, req)
-
- def _start_transaction(self, h, req):
- if req.get_selector() == req.get_full_url(): # has proxy
- urlparts = urlparse.urlparse(req.get_selector())
- if urlparts[0] == 'https': # only use CONNECT for HTTPS
- if ':' in urlparts[1]:
- realhost, realport = urlparts[1].split(':')
- realport = int(realport)
- else:
- realhost = urlparts[1]
- realport = 443
-
- h.realhost = realhost
- h.realport = realport
- h.headers = req.headers.copy()
- h.headers.update(self.parent.addheaders)
- return keepalive.HTTPHandler._start_transaction(self, h, req)
-
- h.realhost = None
- h.realport = None
- h.headers = None
- return keepalive.HTTPHandler._start_transaction(self, h, req)
-
- def __del__(self):
- self.close_all()
-
-if has_https:
- class httpsconnection(httplib.HTTPSConnection):
- response_class = keepalive.HTTPResponse
- # must be able to send big bundle as stream.
- send = _gen_sendfile(httplib.HTTPSConnection)
-
- class httpshandler(keepalive.KeepAliveHandler, urllib2.HTTPSHandler):
- def __init__(self, ui):
- keepalive.KeepAliveHandler.__init__(self)
- urllib2.HTTPSHandler.__init__(self)
- self.ui = ui
- self.pwmgr = passwordmgr(self.ui)
-
- def https_open(self, req):
- self.auth = self.pwmgr.readauthtoken(req.get_full_url())
- return self.do_open(self._makeconnection, req)
-
- def _makeconnection(self, host, port=443, *args, **kwargs):
- keyfile = None
- certfile = None
-
- if args: # key_file
- keyfile = args.pop(0)
- if args: # cert_file
- certfile = args.pop(0)
-
- # if the user has specified different key/cert files in
- # hgrc, we prefer these
- if self.auth and 'key' in self.auth and 'cert' in self.auth:
- keyfile = self.auth['key']
- certfile = self.auth['cert']
-
- # let host port take precedence
- if ':' in host and '[' not in host or ']:' in host:
- host, port = host.rsplit(':', 1)
- port = int(port)
- if '[' in host:
- host = host[1:-1]
-
- return httpsconnection(host, port, keyfile, certfile, *args, **kwargs)
-
-# In python < 2.5 AbstractDigestAuthHandler raises a ValueError if
-# it doesn't know about the auth type requested. This can happen if
-# somebody is using BasicAuth and types a bad password.
-class httpdigestauthhandler(urllib2.HTTPDigestAuthHandler):
- def http_error_auth_reqed(self, auth_header, host, req, headers):
- try:
- return urllib2.HTTPDigestAuthHandler.http_error_auth_reqed(
- self, auth_header, host, req, headers)
- except ValueError, inst:
- arg = inst.args[0]
- if arg.startswith("AbstractDigestAuthHandler doesn't know "):
- return
- raise
-
-def getauthinfo(path):
- scheme, netloc, urlpath, query, frag = urlparse.urlsplit(path)
- if not urlpath:
- urlpath = '/'
- if scheme != 'file':
- # XXX: why are we quoting the path again with some smart
- # heuristic here? Anyway, it cannot be done with file://
- # urls since path encoding is os/fs dependent (see
- # urllib.pathname2url() for details).
- urlpath = quotepath(urlpath)
- host, port, user, passwd = netlocsplit(netloc)
-
- # urllib cannot handle URLs with embedded user or passwd
- url = urlparse.urlunsplit((scheme, netlocunsplit(host, port),
- urlpath, query, frag))
- if user:
- netloc = host
- if port:
- netloc += ':' + port
- # Python < 2.4.3 uses only the netloc to search for a password
- authinfo = (None, (url, netloc), user, passwd or '')
- else:
- authinfo = None
- return url, authinfo
-
-handlerfuncs = []
-
-def opener(ui, authinfo=None):
- '''
- construct an opener suitable for urllib2
- authinfo will be added to the password manager
- '''
- handlers = [httphandler()]
- if has_https:
- handlers.append(httpshandler(ui))
-
- handlers.append(proxyhandler(ui))
-
- passmgr = passwordmgr(ui)
- if authinfo is not None:
- passmgr.add_password(*authinfo)
- user, passwd = authinfo[2:4]
- ui.debug(_('http auth: user %s, password %s\n') %
- (user, passwd and '*' * len(passwd) or 'not set'))
-
- handlers.extend((urllib2.HTTPBasicAuthHandler(passmgr),
- httpdigestauthhandler(passmgr)))
- handlers.extend([h(ui, passmgr) for h in handlerfuncs])
- opener = urllib2.build_opener(*handlers)
-
- # 1.0 here is the _protocol_ version
- opener.addheaders = [('User-agent', 'mercurial/proto-1.0')]
- opener.addheaders.append(('Accept', 'application/mercurial-0.1'))
- return opener
-
-scheme_re = re.compile(r'^([a-zA-Z0-9+-.]+)://')
-
-def open(ui, url, data=None):
- scheme = None
- m = scheme_re.search(url)
- if m:
- scheme = m.group(1).lower()
- if not scheme:
- path = util.normpath(os.path.abspath(url))
- url = 'file://' + urllib.pathname2url(path)
- authinfo = None
- else:
- url, authinfo = getauthinfo(url)
- return opener(ui, authinfo).open(url, data)
diff --git a/sys/lib/python/mercurial/util.py b/sys/lib/python/mercurial/util.py
deleted file mode 100644
index 02ff43d7f..000000000
--- a/sys/lib/python/mercurial/util.py
+++ /dev/null
@@ -1,1284 +0,0 @@
-# util.py - Mercurial utility functions and platform specfic implementations
-#
-# Copyright 2005 K. Thananchayan <thananck@yahoo.com>
-# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
-# Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-"""Mercurial utility functions and platform specfic implementations.
-
-This contains helper routines that are independent of the SCM core and
-hide platform-specific details from the core.
-"""
-
-from i18n import _
-import error, osutil
-import cStringIO, errno, re, shutil, sys, tempfile, traceback
-import os, stat, time, calendar, random, textwrap
-import imp
-
-# Python compatibility
-
-def sha1(s):
- return _fastsha1(s)
-
-def _fastsha1(s):
- # This function will import sha1 from hashlib or sha (whichever is
- # available) and overwrite itself with it on the first call.
- # Subsequent calls will go directly to the imported function.
- try:
- from hashlib import sha1 as _sha1
- except ImportError:
- from sha import sha as _sha1
- global _fastsha1, sha1
- _fastsha1 = sha1 = _sha1
- return _sha1(s)
-
-import subprocess
-closefds = os.name == 'posix'
-def popen2(cmd):
- # Setting bufsize to -1 lets the system decide the buffer size.
- # The default for bufsize is 0, meaning unbuffered. This leads to
- # poor performance on Mac OS X: http://bugs.python.org/issue4194
- p = subprocess.Popen(cmd, shell=True, bufsize=-1,
- close_fds=closefds,
- stdin=subprocess.PIPE, stdout=subprocess.PIPE)
- return p.stdin, p.stdout
-def popen3(cmd):
- p = subprocess.Popen(cmd, shell=True, bufsize=-1,
- close_fds=closefds,
- stdin=subprocess.PIPE, stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
- return p.stdin, p.stdout, p.stderr
-
-def version():
- """Return version information if available."""
- try:
- import __version__
- return __version__.version
- except ImportError:
- return 'unknown'
-
-# used by parsedate
-defaultdateformats = (
- '%Y-%m-%d %H:%M:%S',
- '%Y-%m-%d %I:%M:%S%p',
- '%Y-%m-%d %H:%M',
- '%Y-%m-%d %I:%M%p',
- '%Y-%m-%d',
- '%m-%d',
- '%m/%d',
- '%m/%d/%y',
- '%m/%d/%Y',
- '%a %b %d %H:%M:%S %Y',
- '%a %b %d %I:%M:%S%p %Y',
- '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
- '%b %d %H:%M:%S %Y',
- '%b %d %I:%M:%S%p %Y',
- '%b %d %H:%M:%S',
- '%b %d %I:%M:%S%p',
- '%b %d %H:%M',
- '%b %d %I:%M%p',
- '%b %d %Y',
- '%b %d',
- '%H:%M:%S',
- '%I:%M:%SP',
- '%H:%M',
- '%I:%M%p',
-)
-
-extendeddateformats = defaultdateformats + (
- "%Y",
- "%Y-%m",
- "%b",
- "%b %Y",
- )
-
-def cachefunc(func):
- '''cache the result of function calls'''
- # XXX doesn't handle keywords args
- cache = {}
- if func.func_code.co_argcount == 1:
- # we gain a small amount of time because
- # we don't need to pack/unpack the list
- def f(arg):
- if arg not in cache:
- cache[arg] = func(arg)
- return cache[arg]
- else:
- def f(*args):
- if args not in cache:
- cache[args] = func(*args)
- return cache[args]
-
- return f
-
-def lrucachefunc(func):
- '''cache most recent results of function calls'''
- cache = {}
- order = []
- if func.func_code.co_argcount == 1:
- def f(arg):
- if arg not in cache:
- if len(cache) > 20:
- del cache[order.pop(0)]
- cache[arg] = func(arg)
- else:
- order.remove(arg)
- order.append(arg)
- return cache[arg]
- else:
- def f(*args):
- if args not in cache:
- if len(cache) > 20:
- del cache[order.pop(0)]
- cache[args] = func(*args)
- else:
- order.remove(args)
- order.append(args)
- return cache[args]
-
- return f
-
-class propertycache(object):
- def __init__(self, func):
- self.func = func
- self.name = func.__name__
- def __get__(self, obj, type=None):
- result = self.func(obj)
- setattr(obj, self.name, result)
- return result
-
-def pipefilter(s, cmd):
- '''filter string S through command CMD, returning its output'''
- p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
- stdin=subprocess.PIPE, stdout=subprocess.PIPE)
- pout, perr = p.communicate(s)
- return pout
-
-def tempfilter(s, cmd):
- '''filter string S through a pair of temporary files with CMD.
- CMD is used as a template to create the real command to be run,
- with the strings INFILE and OUTFILE replaced by the real names of
- the temporary files generated.'''
- inname, outname = None, None
- try:
- infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
- fp = os.fdopen(infd, 'wb')
- fp.write(s)
- fp.close()
- outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
- os.close(outfd)
- cmd = cmd.replace('INFILE', inname)
- cmd = cmd.replace('OUTFILE', outname)
- code = os.system(cmd)
- if sys.platform == 'OpenVMS' and code & 1:
- code = 0
- if code: raise Abort(_("command '%s' failed: %s") %
- (cmd, explain_exit(code)))
- return open(outname, 'rb').read()
- finally:
- try:
- if inname: os.unlink(inname)
- except: pass
- try:
- if outname: os.unlink(outname)
- except: pass
-
-filtertable = {
- 'tempfile:': tempfilter,
- 'pipe:': pipefilter,
- }
-
-def filter(s, cmd):
- "filter a string through a command that transforms its input to its output"
- for name, fn in filtertable.iteritems():
- if cmd.startswith(name):
- return fn(s, cmd[len(name):].lstrip())
- return pipefilter(s, cmd)
-
-def binary(s):
- """return true if a string is binary data"""
- return bool(s and '\0' in s)
-
-def increasingchunks(source, min=1024, max=65536):
- '''return no less than min bytes per chunk while data remains,
- doubling min after each chunk until it reaches max'''
- def log2(x):
- if not x:
- return 0
- i = 0
- while x:
- x >>= 1
- i += 1
- return i - 1
-
- buf = []
- blen = 0
- for chunk in source:
- buf.append(chunk)
- blen += len(chunk)
- if blen >= min:
- if min < max:
- min = min << 1
- nmin = 1 << log2(blen)
- if nmin > min:
- min = nmin
- if min > max:
- min = max
- yield ''.join(buf)
- blen = 0
- buf = []
- if buf:
- yield ''.join(buf)
-
-Abort = error.Abort
-
-def always(fn): return True
-def never(fn): return False
-
-def pathto(root, n1, n2):
- '''return the relative path from one place to another.
- root should use os.sep to separate directories
- n1 should use os.sep to separate directories
- n2 should use "/" to separate directories
- returns an os.sep-separated path.
-
- If n1 is a relative path, it's assumed it's
- relative to root.
- n2 should always be relative to root.
- '''
- if not n1: return localpath(n2)
- if os.path.isabs(n1):
- if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
- return os.path.join(root, localpath(n2))
- n2 = '/'.join((pconvert(root), n2))
- a, b = splitpath(n1), n2.split('/')
- a.reverse()
- b.reverse()
- while a and b and a[-1] == b[-1]:
- a.pop()
- b.pop()
- b.reverse()
- return os.sep.join((['..'] * len(a)) + b) or '.'
-
-def canonpath(root, cwd, myname):
- """return the canonical path of myname, given cwd and root"""
- if root == os.sep:
- rootsep = os.sep
- elif endswithsep(root):
- rootsep = root
- else:
- rootsep = root + os.sep
- name = myname
- if not os.path.isabs(name):
- name = os.path.join(root, cwd, name)
- name = os.path.normpath(name)
- audit_path = path_auditor(root)
- if name != rootsep and name.startswith(rootsep):
- name = name[len(rootsep):]
- audit_path(name)
- return pconvert(name)
- elif name == root:
- return ''
- else:
- # Determine whether `name' is in the hierarchy at or beneath `root',
- # by iterating name=dirname(name) until that causes no change (can't
- # check name == '/', because that doesn't work on windows). For each
- # `name', compare dev/inode numbers. If they match, the list `rel'
- # holds the reversed list of components making up the relative file
- # name we want.
- root_st = os.stat(root)
- rel = []
- while True:
- try:
- name_st = os.stat(name)
- except OSError:
- break
- if samestat(name_st, root_st):
- if not rel:
- # name was actually the same as root (maybe a symlink)
- return ''
- rel.reverse()
- name = os.path.join(*rel)
- audit_path(name)
- return pconvert(name)
- dirname, basename = os.path.split(name)
- rel.append(basename)
- if dirname == name:
- break
- name = dirname
-
- raise Abort('%s not under root' % myname)
-
-_hgexecutable = None
-
-def main_is_frozen():
- """return True if we are a frozen executable.
-
- The code supports py2exe (most common, Windows only) and tools/freeze
- (portable, not much used).
- """
- return (hasattr(sys, "frozen") or # new py2exe
- hasattr(sys, "importers") or # old py2exe
- imp.is_frozen("__main__")) # tools/freeze
-
-def hgexecutable():
- """return location of the 'hg' executable.
-
- Defaults to $HG or 'hg' in the search path.
- """
- if _hgexecutable is None:
- hg = os.environ.get('HG')
- if hg:
- set_hgexecutable(hg)
- elif main_is_frozen():
- set_hgexecutable(sys.executable)
- else:
- set_hgexecutable(find_exe('hg') or 'hg')
- return _hgexecutable
-
-def set_hgexecutable(path):
- """set location of the 'hg' executable"""
- global _hgexecutable
- _hgexecutable = path
-
-def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None):
- '''enhanced shell command execution.
- run with environment maybe modified, maybe in different dir.
-
- if command fails and onerr is None, return status. if ui object,
- print error message and return status, else raise onerr object as
- exception.'''
- def py2shell(val):
- 'convert python object into string that is useful to shell'
- if val is None or val is False:
- return '0'
- if val is True:
- return '1'
- return str(val)
- oldenv = {}
- for k in environ:
- oldenv[k] = os.environ.get(k)
- if cwd is not None:
- oldcwd = os.getcwd()
- origcmd = cmd
- if os.name == 'nt':
- cmd = '"%s"' % cmd
- try:
- for k, v in environ.iteritems():
- os.environ[k] = py2shell(v)
- os.environ['HG'] = hgexecutable()
- if cwd is not None and oldcwd != cwd:
- os.chdir(cwd)
- rc = os.system(cmd)
- if sys.platform == 'OpenVMS' and rc & 1:
- rc = 0
- if rc and onerr:
- errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
- explain_exit(rc)[0])
- if errprefix:
- errmsg = '%s: %s' % (errprefix, errmsg)
- try:
- onerr.warn(errmsg + '\n')
- except AttributeError:
- raise onerr(errmsg)
- return rc
- finally:
- for k, v in oldenv.iteritems():
- if v is None:
- del os.environ[k]
- else:
- os.environ[k] = v
- if cwd is not None and oldcwd != cwd:
- os.chdir(oldcwd)
-
-def checksignature(func):
- '''wrap a function with code to check for calling errors'''
- def check(*args, **kwargs):
- try:
- return func(*args, **kwargs)
- except TypeError:
- if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
- raise error.SignatureError
- raise
-
- return check
-
-# os.path.lexists is not available on python2.3
-def lexists(filename):
- "test whether a file with this name exists. does not follow symlinks"
- try:
- os.lstat(filename)
- except:
- return False
- return True
-
-def rename(src, dst):
- """forcibly rename a file"""
- try:
- os.rename(src, dst)
- except OSError, err: # FIXME: check err (EEXIST ?)
-
- # On windows, rename to existing file is not allowed, so we
- # must delete destination first. But if a file is open, unlink
- # schedules it for delete but does not delete it. Rename
- # happens immediately even for open files, so we rename
- # destination to a temporary name, then delete that. Then
- # rename is safe to do.
- # The temporary name is chosen at random to avoid the situation
- # where a file is left lying around from a previous aborted run.
- # The usual race condition this introduces can't be avoided as
- # we need the name to rename into, and not the file itself. Due
- # to the nature of the operation however, any races will at worst
- # lead to the rename failing and the current operation aborting.
-
- def tempname(prefix):
- for tries in xrange(10):
- temp = '%s-%08x' % (prefix, random.randint(0, 0xffffffff))
- if not os.path.exists(temp):
- return temp
- raise IOError, (errno.EEXIST, "No usable temporary filename found")
-
- temp = tempname(dst)
- os.rename(dst, temp)
- os.unlink(temp)
- os.rename(src, dst)
-
-def unlink(f):
- """unlink and remove the directory if it is empty"""
- os.unlink(f)
- # try removing directories that might now be empty
- try:
- os.removedirs(os.path.dirname(f))
- except OSError:
- pass
-
-def copyfile(src, dest):
- "copy a file, preserving mode and atime/mtime"
- if os.path.islink(src):
- try:
- os.unlink(dest)
- except:
- pass
- os.symlink(os.readlink(src), dest)
- else:
- try:
- shutil.copyfile(src, dest)
- shutil.copystat(src, dest)
- except shutil.Error, inst:
- raise Abort(str(inst))
-
-def copyfiles(src, dst, hardlink=None):
- """Copy a directory tree using hardlinks if possible"""
-
- if hardlink is None:
- hardlink = (os.stat(src).st_dev ==
- os.stat(os.path.dirname(dst)).st_dev)
-
- if os.path.isdir(src):
- os.mkdir(dst)
- for name, kind in osutil.listdir(src):
- srcname = os.path.join(src, name)
- dstname = os.path.join(dst, name)
- copyfiles(srcname, dstname, hardlink)
- else:
- if hardlink:
- try:
- os_link(src, dst)
- except (IOError, OSError):
- hardlink = False
- shutil.copy(src, dst)
- else:
- shutil.copy(src, dst)
-
-class path_auditor(object):
- '''ensure that a filesystem path contains no banned components.
- the following properties of a path are checked:
-
- - under top-level .hg
- - starts at the root of a windows drive
- - contains ".."
- - traverses a symlink (e.g. a/symlink_here/b)
- - inside a nested repository'''
-
- def __init__(self, root):
- self.audited = set()
- self.auditeddir = set()
- self.root = root
-
- def __call__(self, path):
- if path in self.audited:
- return
- normpath = os.path.normcase(path)
- parts = splitpath(normpath)
- if (os.path.splitdrive(path)[0]
- or parts[0].lower() in ('.hg', '.hg.', '')
- or os.pardir in parts):
- raise Abort(_("path contains illegal component: %s") % path)
- if '.hg' in path.lower():
- lparts = [p.lower() for p in parts]
- for p in '.hg', '.hg.':
- if p in lparts[1:]:
- pos = lparts.index(p)
- base = os.path.join(*parts[:pos])
- raise Abort(_('path %r is inside repo %r') % (path, base))
- def check(prefix):
- curpath = os.path.join(self.root, prefix)
- try:
- st = os.lstat(curpath)
- except OSError, err:
- # EINVAL can be raised as invalid path syntax under win32.
- # They must be ignored for patterns can be checked too.
- if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
- raise
- else:
- if stat.S_ISLNK(st.st_mode):
- raise Abort(_('path %r traverses symbolic link %r') %
- (path, prefix))
- elif (stat.S_ISDIR(st.st_mode) and
- os.path.isdir(os.path.join(curpath, '.hg'))):
- raise Abort(_('path %r is inside repo %r') %
- (path, prefix))
- parts.pop()
- prefixes = []
- while parts:
- prefix = os.sep.join(parts)
- if prefix in self.auditeddir:
- break
- check(prefix)
- prefixes.append(prefix)
- parts.pop()
-
- self.audited.add(path)
- # only add prefixes to the cache after checking everything: we don't
- # want to add "foo/bar/baz" before checking if there's a "foo/.hg"
- self.auditeddir.update(prefixes)
-
-def nlinks(pathname):
- """Return number of hardlinks for the given file."""
- return os.lstat(pathname).st_nlink
-
-if hasattr(os, 'link'):
- os_link = os.link
-else:
- def os_link(src, dst):
- raise OSError(0, _("Hardlinks not supported"))
-
-def lookup_reg(key, name=None, scope=None):
- return None
-
-if os.name == 'nt':
- from windows import *
-else:
- from posix import *
-
-def makelock(info, pathname):
- try:
- return os.symlink(info, pathname)
- except OSError, why:
- if why.errno == errno.EEXIST:
- raise
- except AttributeError: # no symlink in os
- pass
-
- ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
- os.write(ld, info)
- os.close(ld)
-
-def readlock(pathname):
- try:
- return os.readlink(pathname)
- except OSError, why:
- if why.errno not in (errno.EINVAL, errno.ENOSYS):
- raise
- except AttributeError: # no symlink in os
- pass
- return posixfile(pathname).read()
-
-def fstat(fp):
- '''stat file object that may not have fileno method.'''
- try:
- return os.fstat(fp.fileno())
- except AttributeError:
- return os.stat(fp.name)
-
-# File system features
-
-def checkcase(path):
- """
- Check whether the given path is on a case-sensitive filesystem
-
- Requires a path (like /foo/.hg) ending with a foldable final
- directory component.
- """
- s1 = os.stat(path)
- d, b = os.path.split(path)
- p2 = os.path.join(d, b.upper())
- if path == p2:
- p2 = os.path.join(d, b.lower())
- try:
- s2 = os.stat(p2)
- if s2 == s1:
- return False
- return True
- except:
- return True
-
-_fspathcache = {}
-def fspath(name, root):
- '''Get name in the case stored in the filesystem
-
- The name is either relative to root, or it is an absolute path starting
- with root. Note that this function is unnecessary, and should not be
- called, for case-sensitive filesystems (simply because it's expensive).
- '''
- # If name is absolute, make it relative
- if name.lower().startswith(root.lower()):
- l = len(root)
- if name[l] == os.sep or name[l] == os.altsep:
- l = l + 1
- name = name[l:]
-
- if not os.path.exists(os.path.join(root, name)):
- return None
-
- seps = os.sep
- if os.altsep:
- seps = seps + os.altsep
- # Protect backslashes. This gets silly very quickly.
- seps.replace('\\','\\\\')
- pattern = re.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
- dir = os.path.normcase(os.path.normpath(root))
- result = []
- for part, sep in pattern.findall(name):
- if sep:
- result.append(sep)
- continue
-
- if dir not in _fspathcache:
- _fspathcache[dir] = os.listdir(dir)
- contents = _fspathcache[dir]
-
- lpart = part.lower()
- for n in contents:
- if n.lower() == lpart:
- result.append(n)
- break
- else:
- # Cannot happen, as the file exists!
- result.append(part)
- dir = os.path.join(dir, lpart)
-
- return ''.join(result)
-
-def checkexec(path):
- """
- Check whether the given path is on a filesystem with UNIX-like exec flags
-
- Requires a directory (like /foo/.hg)
- """
-
- # VFAT on some Linux versions can flip mode but it doesn't persist
- # a FS remount. Frequently we can detect it if files are created
- # with exec bit on.
-
- try:
- EXECFLAGS = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
- fh, fn = tempfile.mkstemp("", "", path)
- try:
- os.close(fh)
- m = os.stat(fn).st_mode & 0777
- new_file_has_exec = m & EXECFLAGS
- os.chmod(fn, m ^ EXECFLAGS)
- exec_flags_cannot_flip = ((os.stat(fn).st_mode & 0777) == m)
- finally:
- os.unlink(fn)
- except (IOError, OSError):
- # we don't care, the user probably won't be able to commit anyway
- return False
- return not (new_file_has_exec or exec_flags_cannot_flip)
-
-def checklink(path):
- """check whether the given path is on a symlink-capable filesystem"""
- # mktemp is not racy because symlink creation will fail if the
- # file already exists
- name = tempfile.mktemp(dir=path)
- try:
- os.symlink(".", name)
- os.unlink(name)
- return True
- except (OSError, AttributeError):
- return False
-
-def needbinarypatch():
- """return True if patches should be applied in binary mode by default."""
- return os.name == 'nt'
-
-def endswithsep(path):
- '''Check path ends with os.sep or os.altsep.'''
- return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
-
-def splitpath(path):
- '''Split path by os.sep.
- Note that this function does not use os.altsep because this is
- an alternative of simple "xxx.split(os.sep)".
- It is recommended to use os.path.normpath() before using this
- function if need.'''
- return path.split(os.sep)
-
-def gui():
- '''Are we running in a GUI?'''
- return os.name == "nt" or os.name == "mac" or os.environ.get("DISPLAY")
-
-def mktempcopy(name, emptyok=False, createmode=None):
- """Create a temporary file with the same contents from name
-
- The permission bits are copied from the original file.
-
- If the temporary file is going to be truncated immediately, you
- can use emptyok=True as an optimization.
-
- Returns the name of the temporary file.
- """
- d, fn = os.path.split(name)
- fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
- os.close(fd)
- # Temporary files are created with mode 0600, which is usually not
- # what we want. If the original file already exists, just copy
- # its mode. Otherwise, manually obey umask.
- try:
- st_mode = os.lstat(name).st_mode & 0777
- except OSError, inst:
- if inst.errno != errno.ENOENT:
- raise
- st_mode = createmode
- if st_mode is None:
- st_mode = ~umask
- st_mode &= 0666
- os.chmod(temp, st_mode)
- if emptyok:
- return temp
- try:
- try:
- ifp = posixfile(name, "rb")
- except IOError, inst:
- if inst.errno == errno.ENOENT:
- return temp
- if not getattr(inst, 'filename', None):
- inst.filename = name
- raise
- ofp = posixfile(temp, "wb")
- for chunk in filechunkiter(ifp):
- ofp.write(chunk)
- ifp.close()
- ofp.close()
- except:
- try: os.unlink(temp)
- except: pass
- raise
- return temp
-
-class atomictempfile(object):
- """file-like object that atomically updates a file
-
- All writes will be redirected to a temporary copy of the original
- file. When rename is called, the copy is renamed to the original
- name, making the changes visible.
- """
- def __init__(self, name, mode, createmode):
- self.__name = name
- self._fp = None
- self.temp = mktempcopy(name, emptyok=('w' in mode),
- createmode=createmode)
- self._fp = posixfile(self.temp, mode)
-
- def __getattr__(self, name):
- return getattr(self._fp, name)
-
- def rename(self):
- if not self._fp.closed:
- self._fp.close()
- rename(self.temp, localpath(self.__name))
-
- def __del__(self):
- if not self._fp:
- return
- if not self._fp.closed:
- try:
- os.unlink(self.temp)
- except: pass
- self._fp.close()
-
-def makedirs(name, mode=None):
- """recursive directory creation with parent mode inheritance"""
- try:
- os.mkdir(name)
- if mode is not None:
- os.chmod(name, mode)
- return
- except OSError, err:
- if err.errno == errno.EEXIST:
- return
- if err.errno != errno.ENOENT:
- raise
- parent = os.path.abspath(os.path.dirname(name))
- makedirs(parent, mode)
- makedirs(name, mode)
-
-class opener(object):
- """Open files relative to a base directory
-
- This class is used to hide the details of COW semantics and
- remote file access from higher level code.
- """
- def __init__(self, base, audit=True):
- self.base = base
- if audit:
- self.audit_path = path_auditor(base)
- else:
- self.audit_path = always
- self.createmode = None
-
- @propertycache
- def _can_symlink(self):
- return checklink(self.base)
-
- def _fixfilemode(self, name):
- if self.createmode is None:
- return
- os.chmod(name, self.createmode & 0666)
-
- def __call__(self, path, mode="r", text=False, atomictemp=False):
- self.audit_path(path)
- f = os.path.join(self.base, path)
-
- if not text and "b" not in mode:
- mode += "b" # for that other OS
-
- nlink = -1
- if mode not in ("r", "rb"):
- try:
- nlink = nlinks(f)
- except OSError:
- nlink = 0
- d = os.path.dirname(f)
- if not os.path.isdir(d):
- makedirs(d, self.createmode)
- if atomictemp:
- return atomictempfile(f, mode, self.createmode)
- if nlink > 1:
- rename(mktempcopy(f), f)
- fp = posixfile(f, mode)
- if nlink == 0:
- self._fixfilemode(f)
- return fp
-
- def symlink(self, src, dst):
- self.audit_path(dst)
- linkname = os.path.join(self.base, dst)
- try:
- os.unlink(linkname)
- except OSError:
- pass
-
- dirname = os.path.dirname(linkname)
- if not os.path.exists(dirname):
- makedirs(dirname, self.createmode)
-
- if self._can_symlink:
- try:
- os.symlink(src, linkname)
- except OSError, err:
- raise OSError(err.errno, _('could not symlink to %r: %s') %
- (src, err.strerror), linkname)
- else:
- f = self(dst, "w")
- f.write(src)
- f.close()
- self._fixfilemode(dst)
-
-class chunkbuffer(object):
- """Allow arbitrary sized chunks of data to be efficiently read from an
- iterator over chunks of arbitrary size."""
-
- def __init__(self, in_iter):
- """in_iter is the iterator that's iterating over the input chunks.
- targetsize is how big a buffer to try to maintain."""
- self.iter = iter(in_iter)
- self.buf = ''
- self.targetsize = 2**16
-
- def read(self, l):
- """Read L bytes of data from the iterator of chunks of data.
- Returns less than L bytes if the iterator runs dry."""
- if l > len(self.buf) and self.iter:
- # Clamp to a multiple of self.targetsize
- targetsize = max(l, self.targetsize)
- collector = cStringIO.StringIO()
- collector.write(self.buf)
- collected = len(self.buf)
- for chunk in self.iter:
- collector.write(chunk)
- collected += len(chunk)
- if collected >= targetsize:
- break
- if collected < targetsize:
- self.iter = False
- self.buf = collector.getvalue()
- if len(self.buf) == l:
- s, self.buf = str(self.buf), ''
- else:
- s, self.buf = self.buf[:l], buffer(self.buf, l)
- return s
-
-def filechunkiter(f, size=65536, limit=None):
- """Create a generator that produces the data in the file size
- (default 65536) bytes at a time, up to optional limit (default is
- to read all data). Chunks may be less than size bytes if the
- chunk is the last chunk in the file, or the file is a socket or
- some other type of file that sometimes reads less data than is
- requested."""
- assert size >= 0
- assert limit is None or limit >= 0
- while True:
- if limit is None: nbytes = size
- else: nbytes = min(limit, size)
- s = nbytes and f.read(nbytes)
- if not s: break
- if limit: limit -= len(s)
- yield s
-
-def makedate():
- lt = time.localtime()
- if lt[8] == 1 and time.daylight:
- tz = time.altzone
- else:
- tz = time.timezone
- return time.mktime(lt), tz
-
-def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
- """represent a (unixtime, offset) tuple as a localized time.
- unixtime is seconds since the epoch, and offset is the time zone's
- number of seconds away from UTC. if timezone is false, do not
- append time zone to string."""
- t, tz = date or makedate()
- if "%1" in format or "%2" in format:
- sign = (tz > 0) and "-" or "+"
- minutes = abs(tz) // 60
- format = format.replace("%1", "%c%02d" % (sign, minutes // 60))
- format = format.replace("%2", "%02d" % (minutes % 60))
- s = time.strftime(format, time.gmtime(float(t) - tz))
- return s
-
-def shortdate(date=None):
- """turn (timestamp, tzoff) tuple into iso 8631 date."""
- return datestr(date, format='%Y-%m-%d')
-
-def strdate(string, format, defaults=[]):
- """parse a localized time string and return a (unixtime, offset) tuple.
- if the string cannot be parsed, ValueError is raised."""
- def timezone(string):
- tz = string.split()[-1]
- if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
- sign = (tz[0] == "+") and 1 or -1
- hours = int(tz[1:3])
- minutes = int(tz[3:5])
- return -sign * (hours * 60 + minutes) * 60
- if tz == "GMT" or tz == "UTC":
- return 0
- return None
-
- # NOTE: unixtime = localunixtime + offset
- offset, date = timezone(string), string
- if offset != None:
- date = " ".join(string.split()[:-1])
-
- # add missing elements from defaults
- for part in defaults:
- found = [True for p in part if ("%"+p) in format]
- if not found:
- date += "@" + defaults[part]
- format += "@%" + part[0]
-
- timetuple = time.strptime(date, format)
- localunixtime = int(calendar.timegm(timetuple))
- if offset is None:
- # local timezone
- unixtime = int(time.mktime(timetuple))
- offset = unixtime - localunixtime
- else:
- unixtime = localunixtime + offset
- return unixtime, offset
-
-def parsedate(date, formats=None, defaults=None):
- """parse a localized date/time string and return a (unixtime, offset) tuple.
-
- The date may be a "unixtime offset" string or in one of the specified
- formats. If the date already is a (unixtime, offset) tuple, it is returned.
- """
- if not date:
- return 0, 0
- if isinstance(date, tuple) and len(date) == 2:
- return date
- if not formats:
- formats = defaultdateformats
- date = date.strip()
- try:
- when, offset = map(int, date.split(' '))
- except ValueError:
- # fill out defaults
- if not defaults:
- defaults = {}
- now = makedate()
- for part in "d mb yY HI M S".split():
- if part not in defaults:
- if part[0] in "HMS":
- defaults[part] = "00"
- else:
- defaults[part] = datestr(now, "%" + part[0])
-
- for format in formats:
- try:
- when, offset = strdate(date, format, defaults)
- except (ValueError, OverflowError):
- pass
- else:
- break
- else:
- raise Abort(_('invalid date: %r ') % date)
- # validate explicit (probably user-specified) date and
- # time zone offset. values must fit in signed 32 bits for
- # current 32-bit linux runtimes. timezones go from UTC-12
- # to UTC+14
- if abs(when) > 0x7fffffff:
- raise Abort(_('date exceeds 32 bits: %d') % when)
- if offset < -50400 or offset > 43200:
- raise Abort(_('impossible time zone offset: %d') % offset)
- return when, offset
-
-def matchdate(date):
- """Return a function that matches a given date match specifier
-
- Formats include:
-
- '{date}' match a given date to the accuracy provided
-
- '<{date}' on or before a given date
-
- '>{date}' on or after a given date
-
- """
-
- def lower(date):
- d = dict(mb="1", d="1")
- return parsedate(date, extendeddateformats, d)[0]
-
- def upper(date):
- d = dict(mb="12", HI="23", M="59", S="59")
- for days in "31 30 29".split():
- try:
- d["d"] = days
- return parsedate(date, extendeddateformats, d)[0]
- except:
- pass
- d["d"] = "28"
- return parsedate(date, extendeddateformats, d)[0]
-
- date = date.strip()
- if date[0] == "<":
- when = upper(date[1:])
- return lambda x: x <= when
- elif date[0] == ">":
- when = lower(date[1:])
- return lambda x: x >= when
- elif date[0] == "-":
- try:
- days = int(date[1:])
- except ValueError:
- raise Abort(_("invalid day spec: %s") % date[1:])
- when = makedate()[0] - days * 3600 * 24
- return lambda x: x >= when
- elif " to " in date:
- a, b = date.split(" to ")
- start, stop = lower(a), upper(b)
- return lambda x: x >= start and x <= stop
- else:
- start, stop = lower(date), upper(date)
- return lambda x: x >= start and x <= stop
-
-def shortuser(user):
- """Return a short representation of a user name or email address."""
- f = user.find('@')
- if f >= 0:
- user = user[:f]
- f = user.find('<')
- if f >= 0:
- user = user[f+1:]
- f = user.find(' ')
- if f >= 0:
- user = user[:f]
- f = user.find('.')
- if f >= 0:
- user = user[:f]
- return user
-
-def email(author):
- '''get email of author.'''
- r = author.find('>')
- if r == -1: r = None
- return author[author.find('<')+1:r]
-
-def ellipsis(text, maxlength=400):
- """Trim string to at most maxlength (default: 400) characters."""
- if len(text) <= maxlength:
- return text
- else:
- return "%s..." % (text[:maxlength-3])
-
-def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
- '''yield every hg repository under path, recursively.'''
- def errhandler(err):
- if err.filename == path:
- raise err
- if followsym and hasattr(os.path, 'samestat'):
- def _add_dir_if_not_there(dirlst, dirname):
- match = False
- samestat = os.path.samestat
- dirstat = os.stat(dirname)
- for lstdirstat in dirlst:
- if samestat(dirstat, lstdirstat):
- match = True
- break
- if not match:
- dirlst.append(dirstat)
- return not match
- else:
- followsym = False
-
- if (seen_dirs is None) and followsym:
- seen_dirs = []
- _add_dir_if_not_there(seen_dirs, path)
- for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
- if '.hg' in dirs:
- yield root # found a repository
- qroot = os.path.join(root, '.hg', 'patches')
- if os.path.isdir(os.path.join(qroot, '.hg')):
- yield qroot # we have a patch queue repo here
- if recurse:
- # avoid recursing inside the .hg directory
- dirs.remove('.hg')
- else:
- dirs[:] = [] # don't descend further
- elif followsym:
- newdirs = []
- for d in dirs:
- fname = os.path.join(root, d)
- if _add_dir_if_not_there(seen_dirs, fname):
- if os.path.islink(fname):
- for hgname in walkrepos(fname, True, seen_dirs):
- yield hgname
- else:
- newdirs.append(d)
- dirs[:] = newdirs
-
-_rcpath = None
-
-def os_rcpath():
- '''return default os-specific hgrc search path'''
- path = system_rcpath()
- path.extend(user_rcpath())
- path = [os.path.normpath(f) for f in path]
- return path
-
-def rcpath():
- '''return hgrc search path. if env var HGRCPATH is set, use it.
- for each item in path, if directory, use files ending in .rc,
- else use item.
- make HGRCPATH empty to only look in .hg/hgrc of current repo.
- if no HGRCPATH, use default os-specific path.'''
- global _rcpath
- if _rcpath is None:
- if 'HGRCPATH' in os.environ:
- _rcpath = []
- for p in os.environ['HGRCPATH'].split(os.pathsep):
- if not p: continue
- if os.path.isdir(p):
- for f, kind in osutil.listdir(p):
- if f.endswith('.rc'):
- _rcpath.append(os.path.join(p, f))
- else:
- _rcpath.append(p)
- else:
- _rcpath = os_rcpath()
- return _rcpath
-
-def bytecount(nbytes):
- '''return byte count formatted as readable string, with units'''
-
- units = (
- (100, 1<<30, _('%.0f GB')),
- (10, 1<<30, _('%.1f GB')),
- (1, 1<<30, _('%.2f GB')),
- (100, 1<<20, _('%.0f MB')),
- (10, 1<<20, _('%.1f MB')),
- (1, 1<<20, _('%.2f MB')),
- (100, 1<<10, _('%.0f KB')),
- (10, 1<<10, _('%.1f KB')),
- (1, 1<<10, _('%.2f KB')),
- (1, 1, _('%.0f bytes')),
- )
-
- for multiplier, divisor, format in units:
- if nbytes >= divisor * multiplier:
- return format % (nbytes / float(divisor))
- return units[-1][2] % nbytes
-
-def drop_scheme(scheme, path):
- sc = scheme + ':'
- if path.startswith(sc):
- path = path[len(sc):]
- if path.startswith('//'):
- path = path[2:]
- return path
-
-def uirepr(s):
- # Avoid double backslash in Windows path repr()
- return repr(s).replace('\\\\', '\\')
-
-def termwidth():
- if 'COLUMNS' in os.environ:
- try:
- return int(os.environ['COLUMNS'])
- except ValueError:
- pass
- try:
- import termios, array, fcntl
- for dev in (sys.stdout, sys.stdin):
- try:
- try:
- fd = dev.fileno()
- except AttributeError:
- continue
- if not os.isatty(fd):
- continue
- arri = fcntl.ioctl(fd, termios.TIOCGWINSZ, '\0' * 8)
- return array.array('h', arri)[1]
- except ValueError:
- pass
- except ImportError:
- pass
- return 80
-
-def wrap(line, hangindent, width=None):
- if width is None:
- width = termwidth() - 2
- padding = '\n' + ' ' * hangindent
- return padding.join(textwrap.wrap(line, width=width - hangindent))
-
-def iterlines(iterator):
- for chunk in iterator:
- for line in chunk.splitlines():
- yield line
diff --git a/sys/lib/python/mercurial/verify.py b/sys/lib/python/mercurial/verify.py
deleted file mode 100644
index 17daf7662..000000000
--- a/sys/lib/python/mercurial/verify.py
+++ /dev/null
@@ -1,258 +0,0 @@
-# verify.py - repository integrity checking for Mercurial
-#
-# Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-from node import nullid, short
-from i18n import _
-import revlog, util, error
-
-def verify(repo):
- lock = repo.lock()
- try:
- return _verify(repo)
- finally:
- lock.release()
-
-def _verify(repo):
- mflinkrevs = {}
- filelinkrevs = {}
- filenodes = {}
- revisions = 0
- badrevs = set()
- errors = [0]
- warnings = [0]
- ui = repo.ui
- cl = repo.changelog
- mf = repo.manifest
-
- if not repo.cancopy():
- raise util.Abort(_("cannot verify bundle or remote repos"))
-
- def err(linkrev, msg, filename=None):
- if linkrev != None:
- badrevs.add(linkrev)
- else:
- linkrev = '?'
- msg = "%s: %s" % (linkrev, msg)
- if filename:
- msg = "%s@%s" % (filename, msg)
- ui.warn(" " + msg + "\n")
- errors[0] += 1
-
- def exc(linkrev, msg, inst, filename=None):
- if isinstance(inst, KeyboardInterrupt):
- ui.warn(_("interrupted"))
- raise
- err(linkrev, "%s: %s" % (msg, inst), filename)
-
- def warn(msg):
- ui.warn(msg + "\n")
- warnings[0] += 1
-
- def checklog(obj, name, linkrev):
- if not len(obj) and (havecl or havemf):
- err(linkrev, _("empty or missing %s") % name)
- return
-
- d = obj.checksize()
- if d[0]:
- err(None, _("data length off by %d bytes") % d[0], name)
- if d[1]:
- err(None, _("index contains %d extra bytes") % d[1], name)
-
- if obj.version != revlog.REVLOGV0:
- if not revlogv1:
- warn(_("warning: `%s' uses revlog format 1") % name)
- elif revlogv1:
- warn(_("warning: `%s' uses revlog format 0") % name)
-
- def checkentry(obj, i, node, seen, linkrevs, f):
- lr = obj.linkrev(obj.rev(node))
- if lr < 0 or (havecl and lr not in linkrevs):
- if lr < 0 or lr >= len(cl):
- msg = _("rev %d points to nonexistent changeset %d")
- else:
- msg = _("rev %d points to unexpected changeset %d")
- err(None, msg % (i, lr), f)
- if linkrevs:
- warn(_(" (expected %s)") % " ".join(map(str, linkrevs)))
- lr = None # can't be trusted
-
- try:
- p1, p2 = obj.parents(node)
- if p1 not in seen and p1 != nullid:
- err(lr, _("unknown parent 1 %s of %s") %
- (short(p1), short(n)), f)
- if p2 not in seen and p2 != nullid:
- err(lr, _("unknown parent 2 %s of %s") %
- (short(p2), short(p1)), f)
- except Exception, inst:
- exc(lr, _("checking parents of %s") % short(node), inst, f)
-
- if node in seen:
- err(lr, _("duplicate revision %d (%d)") % (i, seen[n]), f)
- seen[n] = i
- return lr
-
- revlogv1 = cl.version != revlog.REVLOGV0
- if ui.verbose or not revlogv1:
- ui.status(_("repository uses revlog format %d\n") %
- (revlogv1 and 1 or 0))
-
- havecl = len(cl) > 0
- havemf = len(mf) > 0
-
- ui.status(_("checking changesets\n"))
- seen = {}
- checklog(cl, "changelog", 0)
- for i in repo:
- n = cl.node(i)
- checkentry(cl, i, n, seen, [i], "changelog")
-
- try:
- changes = cl.read(n)
- mflinkrevs.setdefault(changes[0], []).append(i)
- for f in changes[3]:
- filelinkrevs.setdefault(f, []).append(i)
- except Exception, inst:
- exc(i, _("unpacking changeset %s") % short(n), inst)
-
- ui.status(_("checking manifests\n"))
- seen = {}
- checklog(mf, "manifest", 0)
- for i in mf:
- n = mf.node(i)
- lr = checkentry(mf, i, n, seen, mflinkrevs.get(n, []), "manifest")
- if n in mflinkrevs:
- del mflinkrevs[n]
- else:
- err(lr, _("%s not in changesets") % short(n), "manifest")
-
- try:
- for f, fn in mf.readdelta(n).iteritems():
- if not f:
- err(lr, _("file without name in manifest"))
- elif f != "/dev/null":
- fns = filenodes.setdefault(f, {})
- if fn not in fns:
- fns[fn] = i
- except Exception, inst:
- exc(lr, _("reading manifest delta %s") % short(n), inst)
-
- ui.status(_("crosschecking files in changesets and manifests\n"))
-
- if havemf:
- for c,m in sorted([(c, m) for m in mflinkrevs for c in mflinkrevs[m]]):
- err(c, _("changeset refers to unknown manifest %s") % short(m))
- mflinkrevs = None # del is bad here due to scope issues
-
- for f in sorted(filelinkrevs):
- if f not in filenodes:
- lr = filelinkrevs[f][0]
- err(lr, _("in changeset but not in manifest"), f)
-
- if havecl:
- for f in sorted(filenodes):
- if f not in filelinkrevs:
- try:
- fl = repo.file(f)
- lr = min([fl.linkrev(fl.rev(n)) for n in filenodes[f]])
- except:
- lr = None
- err(lr, _("in manifest but not in changeset"), f)
-
- ui.status(_("checking files\n"))
-
- storefiles = set()
- for f, f2, size in repo.store.datafiles():
- if not f:
- err(None, _("cannot decode filename '%s'") % f2)
- elif size > 0:
- storefiles.add(f)
-
- files = sorted(set(filenodes) | set(filelinkrevs))
- for f in files:
- try:
- linkrevs = filelinkrevs[f]
- except KeyError:
- # in manifest but not in changelog
- linkrevs = []
-
- if linkrevs:
- lr = linkrevs[0]
- else:
- lr = None
-
- try:
- fl = repo.file(f)
- except error.RevlogError, e:
- err(lr, _("broken revlog! (%s)") % e, f)
- continue
-
- for ff in fl.files():
- try:
- storefiles.remove(ff)
- except KeyError:
- err(lr, _("missing revlog!"), ff)
-
- checklog(fl, f, lr)
- seen = {}
- for i in fl:
- revisions += 1
- n = fl.node(i)
- lr = checkentry(fl, i, n, seen, linkrevs, f)
- if f in filenodes:
- if havemf and n not in filenodes[f]:
- err(lr, _("%s not in manifests") % (short(n)), f)
- else:
- del filenodes[f][n]
-
- # verify contents
- try:
- t = fl.read(n)
- rp = fl.renamed(n)
- if len(t) != fl.size(i):
- if len(fl.revision(n)) != fl.size(i):
- err(lr, _("unpacked size is %s, %s expected") %
- (len(t), fl.size(i)), f)
- except Exception, inst:
- exc(lr, _("unpacking %s") % short(n), inst, f)
-
- # check renames
- try:
- if rp:
- fl2 = repo.file(rp[0])
- if not len(fl2):
- err(lr, _("empty or missing copy source revlog %s:%s")
- % (rp[0], short(rp[1])), f)
- elif rp[1] == nullid:
- ui.note(_("warning: %s@%s: copy source"
- " revision is nullid %s:%s\n")
- % (f, lr, rp[0], short(rp[1])))
- else:
- fl2.rev(rp[1])
- except Exception, inst:
- exc(lr, _("checking rename of %s") % short(n), inst, f)
-
- # cross-check
- if f in filenodes:
- fns = [(mf.linkrev(l), n) for n,l in filenodes[f].iteritems()]
- for lr, node in sorted(fns):
- err(lr, _("%s in manifests not found") % short(node), f)
-
- for f in storefiles:
- warn(_("warning: orphan revlog '%s'") % f)
-
- ui.status(_("%d files, %d changesets, %d total revisions\n") %
- (len(files), len(cl), revisions))
- if warnings[0]:
- ui.warn(_("%d warnings encountered!\n") % warnings[0])
- if errors[0]:
- ui.warn(_("%d integrity errors encountered!\n") % errors[0])
- if badrevs:
- ui.warn(_("(first damaged changeset appears to be %d)\n")
- % min(badrevs))
- return 1
diff --git a/sys/lib/python/mercurial/win32.py b/sys/lib/python/mercurial/win32.py
deleted file mode 100644
index 08e35b011..000000000
--- a/sys/lib/python/mercurial/win32.py
+++ /dev/null
@@ -1,144 +0,0 @@
-# win32.py - utility functions that use win32 API
-#
-# Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-"""Utility functions that use win32 API.
-
-Mark Hammond's win32all package allows better functionality on
-Windows. This module overrides definitions in util.py. If not
-available, import of this module will fail, and generic code will be
-used.
-"""
-
-import win32api
-
-import errno, os, sys, pywintypes, win32con, win32file, win32process
-import winerror
-import osutil, encoding
-from win32com.shell import shell, shellcon
-
-def os_link(src, dst):
- try:
- win32file.CreateHardLink(dst, src)
- # CreateHardLink sometimes succeeds on mapped drives but
- # following nlinks() returns 1. Check it now and bail out.
- if nlinks(src) < 2:
- try:
- win32file.DeleteFile(dst)
- except:
- pass
- # Fake hardlinking error
- raise OSError(errno.EINVAL, 'Hardlinking not supported')
- except pywintypes.error, details:
- raise OSError(errno.EINVAL, 'target implements hardlinks improperly')
- except NotImplementedError: # Another fake error win Win98
- raise OSError(errno.EINVAL, 'Hardlinking not supported')
-
-def nlinks(pathname):
- """Return number of hardlinks for the given file."""
- try:
- fh = win32file.CreateFile(pathname,
- win32file.GENERIC_READ, win32file.FILE_SHARE_READ,
- None, win32file.OPEN_EXISTING, 0, None)
- res = win32file.GetFileInformationByHandle(fh)
- fh.Close()
- return res[7]
- except pywintypes.error:
- return os.lstat(pathname).st_nlink
-
-def testpid(pid):
- '''return True if pid is still running or unable to
- determine, False otherwise'''
- try:
- handle = win32api.OpenProcess(
- win32con.PROCESS_QUERY_INFORMATION, False, pid)
- if handle:
- status = win32process.GetExitCodeProcess(handle)
- return status == win32con.STILL_ACTIVE
- except pywintypes.error, details:
- return details[0] != winerror.ERROR_INVALID_PARAMETER
- return True
-
-def lookup_reg(key, valname=None, scope=None):
- ''' Look up a key/value name in the Windows registry.
-
- valname: value name. If unspecified, the default value for the key
- is used.
- scope: optionally specify scope for registry lookup, this can be
- a sequence of scopes to look up in order. Default (CURRENT_USER,
- LOCAL_MACHINE).
- '''
- try:
- from _winreg import HKEY_CURRENT_USER, HKEY_LOCAL_MACHINE, \
- QueryValueEx, OpenKey
- except ImportError:
- return None
-
- if scope is None:
- scope = (HKEY_CURRENT_USER, HKEY_LOCAL_MACHINE)
- elif not isinstance(scope, (list, tuple)):
- scope = (scope,)
- for s in scope:
- try:
- val = QueryValueEx(OpenKey(s, key), valname)[0]
- # never let a Unicode string escape into the wild
- return encoding.tolocal(val.encode('UTF-8'))
- except EnvironmentError:
- pass
-
-def system_rcpath_win32():
- '''return default os-specific hgrc search path'''
- proc = win32api.GetCurrentProcess()
- try:
- # This will fail on windows < NT
- filename = win32process.GetModuleFileNameEx(proc, 0)
- except:
- filename = win32api.GetModuleFileName(0)
- # Use mercurial.ini found in directory with hg.exe
- progrc = os.path.join(os.path.dirname(filename), 'mercurial.ini')
- if os.path.isfile(progrc):
- return [progrc]
- # else look for a system rcpath in the registry
- try:
- value = win32api.RegQueryValue(
- win32con.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Mercurial')
- rcpath = []
- for p in value.split(os.pathsep):
- if p.lower().endswith('mercurial.ini'):
- rcpath.append(p)
- elif os.path.isdir(p):
- for f, kind in osutil.listdir(p):
- if f.endswith('.rc'):
- rcpath.append(os.path.join(p, f))
- return rcpath
- except pywintypes.error:
- return []
-
-def user_rcpath_win32():
- '''return os-specific hgrc search path to the user dir'''
- userdir = os.path.expanduser('~')
- if sys.getwindowsversion()[3] != 2 and userdir == '~':
- # We are on win < nt: fetch the APPDATA directory location and use
- # the parent directory as the user home dir.
- appdir = shell.SHGetPathFromIDList(
- shell.SHGetSpecialFolderLocation(0, shellcon.CSIDL_APPDATA))
- userdir = os.path.dirname(appdir)
- return [os.path.join(userdir, 'mercurial.ini'),
- os.path.join(userdir, '.hgrc')]
-
-def getuser():
- '''return name of current user'''
- return win32api.GetUserName()
-
-def set_signal_handler_win32():
- """Register a termination handler for console events including
- CTRL+C. python signal handlers do not work well with socket
- operations.
- """
- def handler(event):
- win32process.ExitProcess(1)
- win32api.SetConsoleCtrlHandler(handler)
-
diff --git a/sys/lib/python/mercurial/windows.py b/sys/lib/python/mercurial/windows.py
deleted file mode 100644
index 5a903f1d6..000000000
--- a/sys/lib/python/mercurial/windows.py
+++ /dev/null
@@ -1,292 +0,0 @@
-# windows.py - Windows utility function implementations for Mercurial
-#
-# Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2, incorporated herein by reference.
-
-from i18n import _
-import osutil, error
-import errno, msvcrt, os, re, sys
-
-nulldev = 'NUL:'
-umask = 002
-
-# wrap osutil.posixfile to provide friendlier exceptions
-def posixfile(name, mode='r', buffering=-1):
- try:
- return osutil.posixfile(name, mode, buffering)
- except WindowsError, err:
- raise IOError(err.errno, err.strerror)
-posixfile.__doc__ = osutil.posixfile.__doc__
-
-class winstdout(object):
- '''stdout on windows misbehaves if sent through a pipe'''
-
- def __init__(self, fp):
- self.fp = fp
-
- def __getattr__(self, key):
- return getattr(self.fp, key)
-
- def close(self):
- try:
- self.fp.close()
- except: pass
-
- def write(self, s):
- try:
- # This is workaround for "Not enough space" error on
- # writing large size of data to console.
- limit = 16000
- l = len(s)
- start = 0
- self.softspace = 0;
- while start < l:
- end = start + limit
- self.fp.write(s[start:end])
- start = end
- except IOError, inst:
- if inst.errno != 0: raise
- self.close()
- raise IOError(errno.EPIPE, 'Broken pipe')
-
- def flush(self):
- try:
- return self.fp.flush()
- except IOError, inst:
- if inst.errno != errno.EINVAL: raise
- self.close()
- raise IOError(errno.EPIPE, 'Broken pipe')
-
-sys.stdout = winstdout(sys.stdout)
-
-def _is_win_9x():
- '''return true if run on windows 95, 98 or me.'''
- try:
- return sys.getwindowsversion()[3] == 1
- except AttributeError:
- return 'command' in os.environ.get('comspec', '')
-
-def openhardlinks():
- return not _is_win_9x() and "win32api" in globals()
-
-def system_rcpath():
- try:
- return system_rcpath_win32()
- except:
- return [r'c:\mercurial\mercurial.ini']
-
-def user_rcpath():
- '''return os-specific hgrc search path to the user dir'''
- try:
- path = user_rcpath_win32()
- except:
- home = os.path.expanduser('~')
- path = [os.path.join(home, 'mercurial.ini'),
- os.path.join(home, '.hgrc')]
- userprofile = os.environ.get('USERPROFILE')
- if userprofile:
- path.append(os.path.join(userprofile, 'mercurial.ini'))
- path.append(os.path.join(userprofile, '.hgrc'))
- return path
-
-def parse_patch_output(output_line):
- """parses the output produced by patch and returns the filename"""
- pf = output_line[14:]
- if pf[0] == '`':
- pf = pf[1:-1] # Remove the quotes
- return pf
-
-def sshargs(sshcmd, host, user, port):
- '''Build argument list for ssh or Plink'''
- pflag = 'plink' in sshcmd.lower() and '-P' or '-p'
- args = user and ("%s@%s" % (user, host)) or host
- return port and ("%s %s %s" % (args, pflag, port)) or args
-
-def testpid(pid):
- '''return False if pid dead, True if running or not known'''
- return True
-
-def set_flags(f, l, x):
- pass
-
-def set_binary(fd):
- # When run without console, pipes may expose invalid
- # fileno(), usually set to -1.
- if hasattr(fd, 'fileno') and fd.fileno() >= 0:
- msvcrt.setmode(fd.fileno(), os.O_BINARY)
-
-def pconvert(path):
- return '/'.join(path.split(os.sep))
-
-def localpath(path):
- return path.replace('/', '\\')
-
-def normpath(path):
- return pconvert(os.path.normpath(path))
-
-def realpath(path):
- '''
- Returns the true, canonical file system path equivalent to the given
- path.
- '''
- # TODO: There may be a more clever way to do this that also handles other,
- # less common file systems.
- return os.path.normpath(os.path.normcase(os.path.realpath(path)))
-
-def samestat(s1, s2):
- return False
-
-# A sequence of backslashes is special iff it precedes a double quote:
-# - if there's an even number of backslashes, the double quote is not
-# quoted (i.e. it ends the quoted region)
-# - if there's an odd number of backslashes, the double quote is quoted
-# - in both cases, every pair of backslashes is unquoted into a single
-# backslash
-# (See http://msdn2.microsoft.com/en-us/library/a1y7w461.aspx )
-# So, to quote a string, we must surround it in double quotes, double
-# the number of backslashes that preceed double quotes and add another
-# backslash before every double quote (being careful with the double
-# quote we've appended to the end)
-_quotere = None
-def shellquote(s):
- global _quotere
- if _quotere is None:
- _quotere = re.compile(r'(\\*)("|\\$)')
- return '"%s"' % _quotere.sub(r'\1\1\\\2', s)
-
-def quotecommand(cmd):
- """Build a command string suitable for os.popen* calls."""
- # The extra quotes are needed because popen* runs the command
- # through the current COMSPEC. cmd.exe suppress enclosing quotes.
- return '"' + cmd + '"'
-
-def popen(command, mode='r'):
- # Work around "popen spawned process may not write to stdout
- # under windows"
- # http://bugs.python.org/issue1366
- command += " 2> %s" % nulldev
- return os.popen(quotecommand(command), mode)
-
-def explain_exit(code):
- return _("exited with status %d") % code, code
-
-# if you change this stub into a real check, please try to implement the
-# username and groupname functions above, too.
-def isowner(st):
- return True
-
-def find_exe(command):
- '''Find executable for command searching like cmd.exe does.
- If command is a basename then PATH is searched for command.
- PATH isn't searched if command is an absolute or relative path.
- An extension from PATHEXT is found and added if not present.
- If command isn't found None is returned.'''
- pathext = os.environ.get('PATHEXT', '.COM;.EXE;.BAT;.CMD')
- pathexts = [ext for ext in pathext.lower().split(os.pathsep)]
- if os.path.splitext(command)[1].lower() in pathexts:
- pathexts = ['']
-
- def findexisting(pathcommand):
- 'Will append extension (if needed) and return existing file'
- for ext in pathexts:
- executable = pathcommand + ext
- if os.path.exists(executable):
- return executable
- return None
-
- if os.sep in command:
- return findexisting(command)
-
- for path in os.environ.get('PATH', '').split(os.pathsep):
- executable = findexisting(os.path.join(path, command))
- if executable is not None:
- return executable
- return None
-
-def set_signal_handler():
- try:
- set_signal_handler_win32()
- except NameError:
- pass
-
-def statfiles(files):
- '''Stat each file in files and yield stat or None if file does not exist.
- Cluster and cache stat per directory to minimize number of OS stat calls.'''
- ncase = os.path.normcase
- sep = os.sep
- dircache = {} # dirname -> filename -> status | None if file does not exist
- for nf in files:
- nf = ncase(nf)
- dir, base = os.path.split(nf)
- if not dir:
- dir = '.'
- cache = dircache.get(dir, None)
- if cache is None:
- try:
- dmap = dict([(ncase(n), s)
- for n, k, s in osutil.listdir(dir, True)])
- except OSError, err:
- # handle directory not found in Python version prior to 2.5
- # Python <= 2.4 returns native Windows code 3 in errno
- # Python >= 2.5 returns ENOENT and adds winerror field
- # EINVAL is raised if dir is not a directory.
- if err.errno not in (3, errno.ENOENT, errno.EINVAL,
- errno.ENOTDIR):
- raise
- dmap = {}
- cache = dircache.setdefault(dir, dmap)
- yield cache.get(base, None)
-
-def getuser():
- '''return name of current user'''
- raise error.Abort(_('user name not available - set USERNAME '
- 'environment variable'))
-
-def username(uid=None):
- """Return the name of the user with the given uid.
-
- If uid is None, return the name of the current user."""
- return None
-
-def groupname(gid=None):
- """Return the name of the group with the given gid.
-
- If gid is None, return the name of the current group."""
- return None
-
-def _removedirs(name):
- """special version of os.removedirs that does not remove symlinked
- directories or junction points if they actually contain files"""
- if osutil.listdir(name):
- return
- os.rmdir(name)
- head, tail = os.path.split(name)
- if not tail:
- head, tail = os.path.split(head)
- while head and tail:
- try:
- if osutil.listdir(name):
- return
- os.rmdir(head)
- except:
- break
- head, tail = os.path.split(head)
-
-def unlink(f):
- """unlink and remove the directory if it is empty"""
- os.unlink(f)
- # try removing directories that might now be empty
- try:
- _removedirs(os.path.dirname(f))
- except OSError:
- pass
-
-try:
- # override functions with win32 versions if possible
- from win32 import *
-except ImportError:
- pass
-
-expandglobs = True
diff --git a/sys/lib/python/mhlib.py b/sys/lib/python/mhlib.py
deleted file mode 100644
index 1a9037521..000000000
--- a/sys/lib/python/mhlib.py
+++ /dev/null
@@ -1,1001 +0,0 @@
-"""MH interface -- purely object-oriented (well, almost)
-
-Executive summary:
-
-import mhlib
-
-mh = mhlib.MH() # use default mailbox directory and profile
-mh = mhlib.MH(mailbox) # override mailbox location (default from profile)
-mh = mhlib.MH(mailbox, profile) # override mailbox and profile
-
-mh.error(format, ...) # print error message -- can be overridden
-s = mh.getprofile(key) # profile entry (None if not set)
-path = mh.getpath() # mailbox pathname
-name = mh.getcontext() # name of current folder
-mh.setcontext(name) # set name of current folder
-
-list = mh.listfolders() # names of top-level folders
-list = mh.listallfolders() # names of all folders, including subfolders
-list = mh.listsubfolders(name) # direct subfolders of given folder
-list = mh.listallsubfolders(name) # all subfolders of given folder
-
-mh.makefolder(name) # create new folder
-mh.deletefolder(name) # delete folder -- must have no subfolders
-
-f = mh.openfolder(name) # new open folder object
-
-f.error(format, ...) # same as mh.error(format, ...)
-path = f.getfullname() # folder's full pathname
-path = f.getsequencesfilename() # full pathname of folder's sequences file
-path = f.getmessagefilename(n) # full pathname of message n in folder
-
-list = f.listmessages() # list of messages in folder (as numbers)
-n = f.getcurrent() # get current message
-f.setcurrent(n) # set current message
-list = f.parsesequence(seq) # parse msgs syntax into list of messages
-n = f.getlast() # get last message (0 if no messagse)
-f.setlast(n) # set last message (internal use only)
-
-dict = f.getsequences() # dictionary of sequences in folder {name: list}
-f.putsequences(dict) # write sequences back to folder
-
-f.createmessage(n, fp) # add message from file f as number n
-f.removemessages(list) # remove messages in list from folder
-f.refilemessages(list, tofolder) # move messages in list to other folder
-f.movemessage(n, tofolder, ton) # move one message to a given destination
-f.copymessage(n, tofolder, ton) # copy one message to a given destination
-
-m = f.openmessage(n) # new open message object (costs a file descriptor)
-m is a derived class of mimetools.Message(rfc822.Message), with:
-s = m.getheadertext() # text of message's headers
-s = m.getheadertext(pred) # text of message's headers, filtered by pred
-s = m.getbodytext() # text of message's body, decoded
-s = m.getbodytext(0) # text of message's body, not decoded
-"""
-
-# XXX To do, functionality:
-# - annotate messages
-# - send messages
-#
-# XXX To do, organization:
-# - move IntSet to separate file
-# - move most Message functionality to module mimetools
-
-
-# Customizable defaults
-
-MH_PROFILE = '~/.mh_profile'
-PATH = '~/Mail'
-MH_SEQUENCES = '.mh_sequences'
-FOLDER_PROTECT = 0700
-
-
-# Imported modules
-
-import os
-import sys
-import re
-import mimetools
-import multifile
-import shutil
-from bisect import bisect
-
-__all__ = ["MH","Error","Folder","Message"]
-
-# Exported constants
-
-class Error(Exception):
- pass
-
-
-class MH:
- """Class representing a particular collection of folders.
- Optional constructor arguments are the pathname for the directory
- containing the collection, and the MH profile to use.
- If either is omitted or empty a default is used; the default
- directory is taken from the MH profile if it is specified there."""
-
- def __init__(self, path = None, profile = None):
- """Constructor."""
- if profile is None: profile = MH_PROFILE
- self.profile = os.path.expanduser(profile)
- if path is None: path = self.getprofile('Path')
- if not path: path = PATH
- if not os.path.isabs(path) and path[0] != '~':
- path = os.path.join('~', path)
- path = os.path.expanduser(path)
- if not os.path.isdir(path): raise Error, 'MH() path not found'
- self.path = path
-
- def __repr__(self):
- """String representation."""
- return 'MH(%r, %r)' % (self.path, self.profile)
-
- def error(self, msg, *args):
- """Routine to print an error. May be overridden by a derived class."""
- sys.stderr.write('MH error: %s\n' % (msg % args))
-
- def getprofile(self, key):
- """Return a profile entry, None if not found."""
- return pickline(self.profile, key)
-
- def getpath(self):
- """Return the path (the name of the collection's directory)."""
- return self.path
-
- def getcontext(self):
- """Return the name of the current folder."""
- context = pickline(os.path.join(self.getpath(), 'context'),
- 'Current-Folder')
- if not context: context = 'inbox'
- return context
-
- def setcontext(self, context):
- """Set the name of the current folder."""
- fn = os.path.join(self.getpath(), 'context')
- f = open(fn, "w")
- f.write("Current-Folder: %s\n" % context)
- f.close()
-
- def listfolders(self):
- """Return the names of the top-level folders."""
- folders = []
- path = self.getpath()
- for name in os.listdir(path):
- fullname = os.path.join(path, name)
- if os.path.isdir(fullname):
- folders.append(name)
- folders.sort()
- return folders
-
- def listsubfolders(self, name):
- """Return the names of the subfolders in a given folder
- (prefixed with the given folder name)."""
- fullname = os.path.join(self.path, name)
- # Get the link count so we can avoid listing folders
- # that have no subfolders.
- nlinks = os.stat(fullname).st_nlink
- if nlinks <= 2:
- return []
- subfolders = []
- subnames = os.listdir(fullname)
- for subname in subnames:
- fullsubname = os.path.join(fullname, subname)
- if os.path.isdir(fullsubname):
- name_subname = os.path.join(name, subname)
- subfolders.append(name_subname)
- # Stop looking for subfolders when
- # we've seen them all
- nlinks = nlinks - 1
- if nlinks <= 2:
- break
- subfolders.sort()
- return subfolders
-
- def listallfolders(self):
- """Return the names of all folders and subfolders, recursively."""
- return self.listallsubfolders('')
-
- def listallsubfolders(self, name):
- """Return the names of subfolders in a given folder, recursively."""
- fullname = os.path.join(self.path, name)
- # Get the link count so we can avoid listing folders
- # that have no subfolders.
- nlinks = os.stat(fullname).st_nlink
- if nlinks <= 2:
- return []
- subfolders = []
- subnames = os.listdir(fullname)
- for subname in subnames:
- if subname[0] == ',' or isnumeric(subname): continue
- fullsubname = os.path.join(fullname, subname)
- if os.path.isdir(fullsubname):
- name_subname = os.path.join(name, subname)
- subfolders.append(name_subname)
- if not os.path.islink(fullsubname):
- subsubfolders = self.listallsubfolders(
- name_subname)
- subfolders = subfolders + subsubfolders
- # Stop looking for subfolders when
- # we've seen them all
- nlinks = nlinks - 1
- if nlinks <= 2:
- break
- subfolders.sort()
- return subfolders
-
- def openfolder(self, name):
- """Return a new Folder object for the named folder."""
- return Folder(self, name)
-
- def makefolder(self, name):
- """Create a new folder (or raise os.error if it cannot be created)."""
- protect = pickline(self.profile, 'Folder-Protect')
- if protect and isnumeric(protect):
- mode = int(protect, 8)
- else:
- mode = FOLDER_PROTECT
- os.mkdir(os.path.join(self.getpath(), name), mode)
-
- def deletefolder(self, name):
- """Delete a folder. This removes files in the folder but not
- subdirectories. Raise os.error if deleting the folder itself fails."""
- fullname = os.path.join(self.getpath(), name)
- for subname in os.listdir(fullname):
- fullsubname = os.path.join(fullname, subname)
- try:
- os.unlink(fullsubname)
- except os.error:
- self.error('%s not deleted, continuing...' %
- fullsubname)
- os.rmdir(fullname)
-
-
-numericprog = re.compile('^[1-9][0-9]*$')
-def isnumeric(str):
- return numericprog.match(str) is not None
-
-class Folder:
- """Class representing a particular folder."""
-
- def __init__(self, mh, name):
- """Constructor."""
- self.mh = mh
- self.name = name
- if not os.path.isdir(self.getfullname()):
- raise Error, 'no folder %s' % name
-
- def __repr__(self):
- """String representation."""
- return 'Folder(%r, %r)' % (self.mh, self.name)
-
- def error(self, *args):
- """Error message handler."""
- self.mh.error(*args)
-
- def getfullname(self):
- """Return the full pathname of the folder."""
- return os.path.join(self.mh.path, self.name)
-
- def getsequencesfilename(self):
- """Return the full pathname of the folder's sequences file."""
- return os.path.join(self.getfullname(), MH_SEQUENCES)
-
- def getmessagefilename(self, n):
- """Return the full pathname of a message in the folder."""
- return os.path.join(self.getfullname(), str(n))
-
- def listsubfolders(self):
- """Return list of direct subfolders."""
- return self.mh.listsubfolders(self.name)
-
- def listallsubfolders(self):
- """Return list of all subfolders."""
- return self.mh.listallsubfolders(self.name)
-
- def listmessages(self):
- """Return the list of messages currently present in the folder.
- As a side effect, set self.last to the last message (or 0)."""
- messages = []
- match = numericprog.match
- append = messages.append
- for name in os.listdir(self.getfullname()):
- if match(name):
- append(name)
- messages = map(int, messages)
- messages.sort()
- if messages:
- self.last = messages[-1]
- else:
- self.last = 0
- return messages
-
- def getsequences(self):
- """Return the set of sequences for the folder."""
- sequences = {}
- fullname = self.getsequencesfilename()
- try:
- f = open(fullname, 'r')
- except IOError:
- return sequences
- while 1:
- line = f.readline()
- if not line: break
- fields = line.split(':')
- if len(fields) != 2:
- self.error('bad sequence in %s: %s' %
- (fullname, line.strip()))
- key = fields[0].strip()
- value = IntSet(fields[1].strip(), ' ').tolist()
- sequences[key] = value
- return sequences
-
- def putsequences(self, sequences):
- """Write the set of sequences back to the folder."""
- fullname = self.getsequencesfilename()
- f = None
- for key, seq in sequences.iteritems():
- s = IntSet('', ' ')
- s.fromlist(seq)
- if not f: f = open(fullname, 'w')
- f.write('%s: %s\n' % (key, s.tostring()))
- if not f:
- try:
- os.unlink(fullname)
- except os.error:
- pass
- else:
- f.close()
-
- def getcurrent(self):
- """Return the current message. Raise Error when there is none."""
- seqs = self.getsequences()
- try:
- return max(seqs['cur'])
- except (ValueError, KeyError):
- raise Error, "no cur message"
-
- def setcurrent(self, n):
- """Set the current message."""
- updateline(self.getsequencesfilename(), 'cur', str(n), 0)
-
- def parsesequence(self, seq):
- """Parse an MH sequence specification into a message list.
- Attempt to mimic mh-sequence(5) as close as possible.
- Also attempt to mimic observed behavior regarding which
- conditions cause which error messages."""
- # XXX Still not complete (see mh-format(5)).
- # Missing are:
- # - 'prev', 'next' as count
- # - Sequence-Negation option
- all = self.listmessages()
- # Observed behavior: test for empty folder is done first
- if not all:
- raise Error, "no messages in %s" % self.name
- # Common case first: all is frequently the default
- if seq == 'all':
- return all
- # Test for X:Y before X-Y because 'seq:-n' matches both
- i = seq.find(':')
- if i >= 0:
- head, dir, tail = seq[:i], '', seq[i+1:]
- if tail[:1] in '-+':
- dir, tail = tail[:1], tail[1:]
- if not isnumeric(tail):
- raise Error, "bad message list %s" % seq
- try:
- count = int(tail)
- except (ValueError, OverflowError):
- # Can't use sys.maxint because of i+count below
- count = len(all)
- try:
- anchor = self._parseindex(head, all)
- except Error, msg:
- seqs = self.getsequences()
- if not head in seqs:
- if not msg:
- msg = "bad message list %s" % seq
- raise Error, msg, sys.exc_info()[2]
- msgs = seqs[head]
- if not msgs:
- raise Error, "sequence %s empty" % head
- if dir == '-':
- return msgs[-count:]
- else:
- return msgs[:count]
- else:
- if not dir:
- if head in ('prev', 'last'):
- dir = '-'
- if dir == '-':
- i = bisect(all, anchor)
- return all[max(0, i-count):i]
- else:
- i = bisect(all, anchor-1)
- return all[i:i+count]
- # Test for X-Y next
- i = seq.find('-')
- if i >= 0:
- begin = self._parseindex(seq[:i], all)
- end = self._parseindex(seq[i+1:], all)
- i = bisect(all, begin-1)
- j = bisect(all, end)
- r = all[i:j]
- if not r:
- raise Error, "bad message list %s" % seq
- return r
- # Neither X:Y nor X-Y; must be a number or a (pseudo-)sequence
- try:
- n = self._parseindex(seq, all)
- except Error, msg:
- seqs = self.getsequences()
- if not seq in seqs:
- if not msg:
- msg = "bad message list %s" % seq
- raise Error, msg
- return seqs[seq]
- else:
- if n not in all:
- if isnumeric(seq):
- raise Error, "message %d doesn't exist" % n
- else:
- raise Error, "no %s message" % seq
- else:
- return [n]
-
- def _parseindex(self, seq, all):
- """Internal: parse a message number (or cur, first, etc.)."""
- if isnumeric(seq):
- try:
- return int(seq)
- except (OverflowError, ValueError):
- return sys.maxint
- if seq in ('cur', '.'):
- return self.getcurrent()
- if seq == 'first':
- return all[0]
- if seq == 'last':
- return all[-1]
- if seq == 'next':
- n = self.getcurrent()
- i = bisect(all, n)
- try:
- return all[i]
- except IndexError:
- raise Error, "no next message"
- if seq == 'prev':
- n = self.getcurrent()
- i = bisect(all, n-1)
- if i == 0:
- raise Error, "no prev message"
- try:
- return all[i-1]
- except IndexError:
- raise Error, "no prev message"
- raise Error, None
-
- def openmessage(self, n):
- """Open a message -- returns a Message object."""
- return Message(self, n)
-
- def removemessages(self, list):
- """Remove one or more messages -- may raise os.error."""
- errors = []
- deleted = []
- for n in list:
- path = self.getmessagefilename(n)
- commapath = self.getmessagefilename(',' + str(n))
- try:
- os.unlink(commapath)
- except os.error:
- pass
- try:
- os.rename(path, commapath)
- except os.error, msg:
- errors.append(msg)
- else:
- deleted.append(n)
- if deleted:
- self.removefromallsequences(deleted)
- if errors:
- if len(errors) == 1:
- raise os.error, errors[0]
- else:
- raise os.error, ('multiple errors:', errors)
-
- def refilemessages(self, list, tofolder, keepsequences=0):
- """Refile one or more messages -- may raise os.error.
- 'tofolder' is an open folder object."""
- errors = []
- refiled = {}
- for n in list:
- ton = tofolder.getlast() + 1
- path = self.getmessagefilename(n)
- topath = tofolder.getmessagefilename(ton)
- try:
- os.rename(path, topath)
- except os.error:
- # Try copying
- try:
- shutil.copy2(path, topath)
- os.unlink(path)
- except (IOError, os.error), msg:
- errors.append(msg)
- try:
- os.unlink(topath)
- except os.error:
- pass
- continue
- tofolder.setlast(ton)
- refiled[n] = ton
- if refiled:
- if keepsequences:
- tofolder._copysequences(self, refiled.items())
- self.removefromallsequences(refiled.keys())
- if errors:
- if len(errors) == 1:
- raise os.error, errors[0]
- else:
- raise os.error, ('multiple errors:', errors)
-
- def _copysequences(self, fromfolder, refileditems):
- """Helper for refilemessages() to copy sequences."""
- fromsequences = fromfolder.getsequences()
- tosequences = self.getsequences()
- changed = 0
- for name, seq in fromsequences.items():
- try:
- toseq = tosequences[name]
- new = 0
- except KeyError:
- toseq = []
- new = 1
- for fromn, ton in refileditems:
- if fromn in seq:
- toseq.append(ton)
- changed = 1
- if new and toseq:
- tosequences[name] = toseq
- if changed:
- self.putsequences(tosequences)
-
- def movemessage(self, n, tofolder, ton):
- """Move one message over a specific destination message,
- which may or may not already exist."""
- path = self.getmessagefilename(n)
- # Open it to check that it exists
- f = open(path)
- f.close()
- del f
- topath = tofolder.getmessagefilename(ton)
- backuptopath = tofolder.getmessagefilename(',%d' % ton)
- try:
- os.rename(topath, backuptopath)
- except os.error:
- pass
- try:
- os.rename(path, topath)
- except os.error:
- # Try copying
- ok = 0
- try:
- tofolder.setlast(None)
- shutil.copy2(path, topath)
- ok = 1
- finally:
- if not ok:
- try:
- os.unlink(topath)
- except os.error:
- pass
- os.unlink(path)
- self.removefromallsequences([n])
-
- def copymessage(self, n, tofolder, ton):
- """Copy one message over a specific destination message,
- which may or may not already exist."""
- path = self.getmessagefilename(n)
- # Open it to check that it exists
- f = open(path)
- f.close()
- del f
- topath = tofolder.getmessagefilename(ton)
- backuptopath = tofolder.getmessagefilename(',%d' % ton)
- try:
- os.rename(topath, backuptopath)
- except os.error:
- pass
- ok = 0
- try:
- tofolder.setlast(None)
- shutil.copy2(path, topath)
- ok = 1
- finally:
- if not ok:
- try:
- os.unlink(topath)
- except os.error:
- pass
-
- def createmessage(self, n, txt):
- """Create a message, with text from the open file txt."""
- path = self.getmessagefilename(n)
- backuppath = self.getmessagefilename(',%d' % n)
- try:
- os.rename(path, backuppath)
- except os.error:
- pass
- ok = 0
- BUFSIZE = 16*1024
- try:
- f = open(path, "w")
- while 1:
- buf = txt.read(BUFSIZE)
- if not buf:
- break
- f.write(buf)
- f.close()
- ok = 1
- finally:
- if not ok:
- try:
- os.unlink(path)
- except os.error:
- pass
-
- def removefromallsequences(self, list):
- """Remove one or more messages from all sequences (including last)
- -- but not from 'cur'!!!"""
- if hasattr(self, 'last') and self.last in list:
- del self.last
- sequences = self.getsequences()
- changed = 0
- for name, seq in sequences.items():
- if name == 'cur':
- continue
- for n in list:
- if n in seq:
- seq.remove(n)
- changed = 1
- if not seq:
- del sequences[name]
- if changed:
- self.putsequences(sequences)
-
- def getlast(self):
- """Return the last message number."""
- if not hasattr(self, 'last'):
- self.listmessages() # Set self.last
- return self.last
-
- def setlast(self, last):
- """Set the last message number."""
- if last is None:
- if hasattr(self, 'last'):
- del self.last
- else:
- self.last = last
-
-class Message(mimetools.Message):
-
- def __init__(self, f, n, fp = None):
- """Constructor."""
- self.folder = f
- self.number = n
- if fp is None:
- path = f.getmessagefilename(n)
- fp = open(path, 'r')
- mimetools.Message.__init__(self, fp)
-
- def __repr__(self):
- """String representation."""
- return 'Message(%s, %s)' % (repr(self.folder), self.number)
-
- def getheadertext(self, pred = None):
- """Return the message's header text as a string. If an
- argument is specified, it is used as a filter predicate to
- decide which headers to return (its argument is the header
- name converted to lower case)."""
- if pred is None:
- return ''.join(self.headers)
- headers = []
- hit = 0
- for line in self.headers:
- if not line[0].isspace():
- i = line.find(':')
- if i > 0:
- hit = pred(line[:i].lower())
- if hit: headers.append(line)
- return ''.join(headers)
-
- def getbodytext(self, decode = 1):
- """Return the message's body text as string. This undoes a
- Content-Transfer-Encoding, but does not interpret other MIME
- features (e.g. multipart messages). To suppress decoding,
- pass 0 as an argument."""
- self.fp.seek(self.startofbody)
- encoding = self.getencoding()
- if not decode or encoding in ('', '7bit', '8bit', 'binary'):
- return self.fp.read()
- try:
- from cStringIO import StringIO
- except ImportError:
- from StringIO import StringIO
- output = StringIO()
- mimetools.decode(self.fp, output, encoding)
- return output.getvalue()
-
- def getbodyparts(self):
- """Only for multipart messages: return the message's body as a
- list of SubMessage objects. Each submessage object behaves
- (almost) as a Message object."""
- if self.getmaintype() != 'multipart':
- raise Error, 'Content-Type is not multipart/*'
- bdry = self.getparam('boundary')
- if not bdry:
- raise Error, 'multipart/* without boundary param'
- self.fp.seek(self.startofbody)
- mf = multifile.MultiFile(self.fp)
- mf.push(bdry)
- parts = []
- while mf.next():
- n = "%s.%r" % (self.number, 1 + len(parts))
- part = SubMessage(self.folder, n, mf)
- parts.append(part)
- mf.pop()
- return parts
-
- def getbody(self):
- """Return body, either a string or a list of messages."""
- if self.getmaintype() == 'multipart':
- return self.getbodyparts()
- else:
- return self.getbodytext()
-
-
-class SubMessage(Message):
-
- def __init__(self, f, n, fp):
- """Constructor."""
- Message.__init__(self, f, n, fp)
- if self.getmaintype() == 'multipart':
- self.body = Message.getbodyparts(self)
- else:
- self.body = Message.getbodytext(self)
- self.bodyencoded = Message.getbodytext(self, decode=0)
- # XXX If this is big, should remember file pointers
-
- def __repr__(self):
- """String representation."""
- f, n, fp = self.folder, self.number, self.fp
- return 'SubMessage(%s, %s, %s)' % (f, n, fp)
-
- def getbodytext(self, decode = 1):
- if not decode:
- return self.bodyencoded
- if type(self.body) == type(''):
- return self.body
-
- def getbodyparts(self):
- if type(self.body) == type([]):
- return self.body
-
- def getbody(self):
- return self.body
-
-
-class IntSet:
- """Class implementing sets of integers.
-
- This is an efficient representation for sets consisting of several
- continuous ranges, e.g. 1-100,200-400,402-1000 is represented
- internally as a list of three pairs: [(1,100), (200,400),
- (402,1000)]. The internal representation is always kept normalized.
-
- The constructor has up to three arguments:
- - the string used to initialize the set (default ''),
- - the separator between ranges (default ',')
- - the separator between begin and end of a range (default '-')
- The separators must be strings (not regexprs) and should be different.
-
- The tostring() function yields a string that can be passed to another
- IntSet constructor; __repr__() is a valid IntSet constructor itself.
- """
-
- # XXX The default begin/end separator means that negative numbers are
- # not supported very well.
- #
- # XXX There are currently no operations to remove set elements.
-
- def __init__(self, data = None, sep = ',', rng = '-'):
- self.pairs = []
- self.sep = sep
- self.rng = rng
- if data: self.fromstring(data)
-
- def reset(self):
- self.pairs = []
-
- def __cmp__(self, other):
- return cmp(self.pairs, other.pairs)
-
- def __hash__(self):
- return hash(self.pairs)
-
- def __repr__(self):
- return 'IntSet(%r, %r, %r)' % (self.tostring(), self.sep, self.rng)
-
- def normalize(self):
- self.pairs.sort()
- i = 1
- while i < len(self.pairs):
- alo, ahi = self.pairs[i-1]
- blo, bhi = self.pairs[i]
- if ahi >= blo-1:
- self.pairs[i-1:i+1] = [(alo, max(ahi, bhi))]
- else:
- i = i+1
-
- def tostring(self):
- s = ''
- for lo, hi in self.pairs:
- if lo == hi: t = repr(lo)
- else: t = repr(lo) + self.rng + repr(hi)
- if s: s = s + (self.sep + t)
- else: s = t
- return s
-
- def tolist(self):
- l = []
- for lo, hi in self.pairs:
- m = range(lo, hi+1)
- l = l + m
- return l
-
- def fromlist(self, list):
- for i in list:
- self.append(i)
-
- def clone(self):
- new = IntSet()
- new.pairs = self.pairs[:]
- return new
-
- def min(self):
- return self.pairs[0][0]
-
- def max(self):
- return self.pairs[-1][-1]
-
- def contains(self, x):
- for lo, hi in self.pairs:
- if lo <= x <= hi: return True
- return False
-
- def append(self, x):
- for i in range(len(self.pairs)):
- lo, hi = self.pairs[i]
- if x < lo: # Need to insert before
- if x+1 == lo:
- self.pairs[i] = (x, hi)
- else:
- self.pairs.insert(i, (x, x))
- if i > 0 and x-1 == self.pairs[i-1][1]:
- # Merge with previous
- self.pairs[i-1:i+1] = [
- (self.pairs[i-1][0],
- self.pairs[i][1])
- ]
- return
- if x <= hi: # Already in set
- return
- i = len(self.pairs) - 1
- if i >= 0:
- lo, hi = self.pairs[i]
- if x-1 == hi:
- self.pairs[i] = lo, x
- return
- self.pairs.append((x, x))
-
- def addpair(self, xlo, xhi):
- if xlo > xhi: return
- self.pairs.append((xlo, xhi))
- self.normalize()
-
- def fromstring(self, data):
- new = []
- for part in data.split(self.sep):
- list = []
- for subp in part.split(self.rng):
- s = subp.strip()
- list.append(int(s))
- if len(list) == 1:
- new.append((list[0], list[0]))
- elif len(list) == 2 and list[0] <= list[1]:
- new.append((list[0], list[1]))
- else:
- raise ValueError, 'bad data passed to IntSet'
- self.pairs = self.pairs + new
- self.normalize()
-
-
-# Subroutines to read/write entries in .mh_profile and .mh_sequences
-
-def pickline(file, key, casefold = 1):
- try:
- f = open(file, 'r')
- except IOError:
- return None
- pat = re.escape(key) + ':'
- prog = re.compile(pat, casefold and re.IGNORECASE)
- while 1:
- line = f.readline()
- if not line: break
- if prog.match(line):
- text = line[len(key)+1:]
- while 1:
- line = f.readline()
- if not line or not line[0].isspace():
- break
- text = text + line
- return text.strip()
- return None
-
-def updateline(file, key, value, casefold = 1):
- try:
- f = open(file, 'r')
- lines = f.readlines()
- f.close()
- except IOError:
- lines = []
- pat = re.escape(key) + ':(.*)\n'
- prog = re.compile(pat, casefold and re.IGNORECASE)
- if value is None:
- newline = None
- else:
- newline = '%s: %s\n' % (key, value)
- for i in range(len(lines)):
- line = lines[i]
- if prog.match(line):
- if newline is None:
- del lines[i]
- else:
- lines[i] = newline
- break
- else:
- if newline is not None:
- lines.append(newline)
- tempfile = file + "~"
- f = open(tempfile, 'w')
- for line in lines:
- f.write(line)
- f.close()
- os.rename(tempfile, file)
-
-
-# Test program
-
-def test():
- global mh, f
- os.system('rm -rf $HOME/Mail/@test')
- mh = MH()
- def do(s): print s; print eval(s)
- do('mh.listfolders()')
- do('mh.listallfolders()')
- testfolders = ['@test', '@test/test1', '@test/test2',
- '@test/test1/test11', '@test/test1/test12',
- '@test/test1/test11/test111']
- for t in testfolders: do('mh.makefolder(%r)' % (t,))
- do('mh.listsubfolders(\'@test\')')
- do('mh.listallsubfolders(\'@test\')')
- f = mh.openfolder('@test')
- do('f.listsubfolders()')
- do('f.listallsubfolders()')
- do('f.getsequences()')
- seqs = f.getsequences()
- seqs['foo'] = IntSet('1-10 12-20', ' ').tolist()
- print seqs
- f.putsequences(seqs)
- do('f.getsequences()')
- for t in reversed(testfolders): do('mh.deletefolder(%r)' % (t,))
- do('mh.getcontext()')
- context = mh.getcontext()
- f = mh.openfolder(context)
- do('f.getcurrent()')
- for seq in ('first', 'last', 'cur', '.', 'prev', 'next',
- 'first:3', 'last:3', 'cur:3', 'cur:-3',
- 'prev:3', 'next:3',
- '1:3', '1:-3', '100:3', '100:-3', '10000:3', '10000:-3',
- 'all'):
- try:
- do('f.parsesequence(%r)' % (seq,))
- except Error, msg:
- print "Error:", msg
- stuff = os.popen("pick %r 2>/dev/null" % (seq,)).read()
- list = map(int, stuff.split())
- print list, "<-- pick"
- do('f.listmessages()')
-
-
-if __name__ == '__main__':
- test()
diff --git a/sys/lib/python/mimetools.py b/sys/lib/python/mimetools.py
deleted file mode 100644
index 8c1cc1990..000000000
--- a/sys/lib/python/mimetools.py
+++ /dev/null
@@ -1,241 +0,0 @@
-"""Various tools used by MIME-reading or MIME-writing programs."""
-
-
-import os
-import rfc822
-import tempfile
-
-__all__ = ["Message","choose_boundary","encode","decode","copyliteral",
- "copybinary"]
-
-class Message(rfc822.Message):
- """A derived class of rfc822.Message that knows about MIME headers and
- contains some hooks for decoding encoded and multipart messages."""
-
- def __init__(self, fp, seekable = 1):
- rfc822.Message.__init__(self, fp, seekable)
- self.encodingheader = \
- self.getheader('content-transfer-encoding')
- self.typeheader = \
- self.getheader('content-type')
- self.parsetype()
- self.parseplist()
-
- def parsetype(self):
- str = self.typeheader
- if str is None:
- str = 'text/plain'
- if ';' in str:
- i = str.index(';')
- self.plisttext = str[i:]
- str = str[:i]
- else:
- self.plisttext = ''
- fields = str.split('/')
- for i in range(len(fields)):
- fields[i] = fields[i].strip().lower()
- self.type = '/'.join(fields)
- self.maintype = fields[0]
- self.subtype = '/'.join(fields[1:])
-
- def parseplist(self):
- str = self.plisttext
- self.plist = []
- while str[:1] == ';':
- str = str[1:]
- if ';' in str:
- # XXX Should parse quotes!
- end = str.index(';')
- else:
- end = len(str)
- f = str[:end]
- if '=' in f:
- i = f.index('=')
- f = f[:i].strip().lower() + \
- '=' + f[i+1:].strip()
- self.plist.append(f.strip())
- str = str[end:]
-
- def getplist(self):
- return self.plist
-
- def getparam(self, name):
- name = name.lower() + '='
- n = len(name)
- for p in self.plist:
- if p[:n] == name:
- return rfc822.unquote(p[n:])
- return None
-
- def getparamnames(self):
- result = []
- for p in self.plist:
- i = p.find('=')
- if i >= 0:
- result.append(p[:i].lower())
- return result
-
- def getencoding(self):
- if self.encodingheader is None:
- return '7bit'
- return self.encodingheader.lower()
-
- def gettype(self):
- return self.type
-
- def getmaintype(self):
- return self.maintype
-
- def getsubtype(self):
- return self.subtype
-
-
-
-
-# Utility functions
-# -----------------
-
-try:
- import thread
-except ImportError:
- import dummy_thread as thread
-_counter_lock = thread.allocate_lock()
-del thread
-
-_counter = 0
-def _get_next_counter():
- global _counter
- _counter_lock.acquire()
- _counter += 1
- result = _counter
- _counter_lock.release()
- return result
-
-_prefix = None
-
-def choose_boundary():
- """Return a string usable as a multipart boundary.
-
- The string chosen is unique within a single program run, and
- incorporates the user id (if available), process id (if available),
- and current time. So it's very unlikely the returned string appears
- in message text, but there's no guarantee.
-
- The boundary contains dots so you have to quote it in the header."""
-
- global _prefix
- import time
- if _prefix is None:
- import socket
- try:
- hostid = socket.gethostbyname(socket.gethostname())
- except socket.gaierror:
- hostid = '127.0.0.1'
- try:
- uid = repr(os.getuid())
- except AttributeError:
- uid = '1'
- try:
- pid = repr(os.getpid())
- except AttributeError:
- pid = '1'
- _prefix = hostid + '.' + uid + '.' + pid
- return "%s.%.3f.%d" % (_prefix, time.time(), _get_next_counter())
-
-
-# Subroutines for decoding some common content-transfer-types
-
-def decode(input, output, encoding):
- """Decode common content-transfer-encodings (base64, quopri, uuencode)."""
- if encoding == 'base64':
- import base64
- return base64.decode(input, output)
- if encoding == 'quoted-printable':
- import quopri
- return quopri.decode(input, output)
- if encoding in ('uuencode', 'x-uuencode', 'uue', 'x-uue'):
- import uu
- return uu.decode(input, output)
- if encoding in ('7bit', '8bit'):
- return output.write(input.read())
- if encoding in decodetab:
- pipethrough(input, decodetab[encoding], output)
- else:
- raise ValueError, \
- 'unknown Content-Transfer-Encoding: %s' % encoding
-
-def encode(input, output, encoding):
- """Encode common content-transfer-encodings (base64, quopri, uuencode)."""
- if encoding == 'base64':
- import base64
- return base64.encode(input, output)
- if encoding == 'quoted-printable':
- import quopri
- return quopri.encode(input, output, 0)
- if encoding in ('uuencode', 'x-uuencode', 'uue', 'x-uue'):
- import uu
- return uu.encode(input, output)
- if encoding in ('7bit', '8bit'):
- return output.write(input.read())
- if encoding in encodetab:
- pipethrough(input, encodetab[encoding], output)
- else:
- raise ValueError, \
- 'unknown Content-Transfer-Encoding: %s' % encoding
-
-# The following is no longer used for standard encodings
-
-# XXX This requires that uudecode and mmencode are in $PATH
-
-uudecode_pipe = '''(
-TEMP=/tmp/@uu.$$
-sed "s%^begin [0-7][0-7]* .*%begin 600 $TEMP%" | uudecode
-cat $TEMP
-rm $TEMP
-)'''
-
-decodetab = {
- 'uuencode': uudecode_pipe,
- 'x-uuencode': uudecode_pipe,
- 'uue': uudecode_pipe,
- 'x-uue': uudecode_pipe,
- 'quoted-printable': 'mmencode -u -q',
- 'base64': 'mmencode -u -b',
-}
-
-encodetab = {
- 'x-uuencode': 'uuencode tempfile',
- 'uuencode': 'uuencode tempfile',
- 'x-uue': 'uuencode tempfile',
- 'uue': 'uuencode tempfile',
- 'quoted-printable': 'mmencode -q',
- 'base64': 'mmencode -b',
-}
-
-def pipeto(input, command):
- pipe = os.popen(command, 'w')
- copyliteral(input, pipe)
- pipe.close()
-
-def pipethrough(input, command, output):
- (fd, tempname) = tempfile.mkstemp()
- temp = os.fdopen(fd, 'w')
- copyliteral(input, temp)
- temp.close()
- pipe = os.popen(command + ' <' + tempname, 'r')
- copybinary(pipe, output)
- pipe.close()
- os.unlink(tempname)
-
-def copyliteral(input, output):
- while 1:
- line = input.readline()
- if not line: break
- output.write(line)
-
-def copybinary(input, output):
- BUFSIZE = 8192
- while 1:
- line = input.read(BUFSIZE)
- if not line: break
- output.write(line)
diff --git a/sys/lib/python/mimetypes.py b/sys/lib/python/mimetypes.py
deleted file mode 100644
index b0d2f1817..000000000
--- a/sys/lib/python/mimetypes.py
+++ /dev/null
@@ -1,533 +0,0 @@
-"""Guess the MIME type of a file.
-
-This module defines two useful functions:
-
-guess_type(url, strict=1) -- guess the MIME type and encoding of a URL.
-
-guess_extension(type, strict=1) -- guess the extension for a given MIME type.
-
-It also contains the following, for tuning the behavior:
-
-Data:
-
-knownfiles -- list of files to parse
-inited -- flag set when init() has been called
-suffix_map -- dictionary mapping suffixes to suffixes
-encodings_map -- dictionary mapping suffixes to encodings
-types_map -- dictionary mapping suffixes to types
-
-Functions:
-
-init([files]) -- parse a list of files, default knownfiles
-read_mime_types(file) -- parse one file, return a dictionary or None
-"""
-
-import os
-import posixpath
-import urllib
-
-__all__ = [
- "guess_type","guess_extension","guess_all_extensions",
- "add_type","read_mime_types","init"
-]
-
-knownfiles = [
- "/etc/mime.types",
- "/etc/httpd/mime.types", # Mac OS X
- "/etc/httpd/conf/mime.types", # Apache
- "/etc/apache/mime.types", # Apache 1
- "/etc/apache2/mime.types", # Apache 2
- "/usr/local/etc/httpd/conf/mime.types",
- "/usr/local/lib/netscape/mime.types",
- "/usr/local/etc/httpd/conf/mime.types", # Apache 1.2
- "/usr/local/etc/mime.types", # Apache 1.3
- ]
-
-inited = False
-
-
-class MimeTypes:
- """MIME-types datastore.
-
- This datastore can handle information from mime.types-style files
- and supports basic determination of MIME type from a filename or
- URL, and can guess a reasonable extension given a MIME type.
- """
-
- def __init__(self, filenames=(), strict=True):
- if not inited:
- init()
- self.encodings_map = encodings_map.copy()
- self.suffix_map = suffix_map.copy()
- self.types_map = ({}, {}) # dict for (non-strict, strict)
- self.types_map_inv = ({}, {})
- for (ext, type) in types_map.items():
- self.add_type(type, ext, True)
- for (ext, type) in common_types.items():
- self.add_type(type, ext, False)
- for name in filenames:
- self.read(name, strict)
-
- def add_type(self, type, ext, strict=True):
- """Add a mapping between a type and an extension.
-
- When the extension is already known, the new
- type will replace the old one. When the type
- is already known the extension will be added
- to the list of known extensions.
-
- If strict is true, information will be added to
- list of standard types, else to the list of non-standard
- types.
- """
- self.types_map[strict][ext] = type
- exts = self.types_map_inv[strict].setdefault(type, [])
- if ext not in exts:
- exts.append(ext)
-
- def guess_type(self, url, strict=True):
- """Guess the type of a file based on its URL.
-
- Return value is a tuple (type, encoding) where type is None if
- the type can't be guessed (no or unknown suffix) or a string
- of the form type/subtype, usable for a MIME Content-type
- header; and encoding is None for no encoding or the name of
- the program used to encode (e.g. compress or gzip). The
- mappings are table driven. Encoding suffixes are case
- sensitive; type suffixes are first tried case sensitive, then
- case insensitive.
-
- The suffixes .tgz, .taz and .tz (case sensitive!) are all
- mapped to '.tar.gz'. (This is table-driven too, using the
- dictionary suffix_map.)
-
- Optional `strict' argument when False adds a bunch of commonly found,
- but non-standard types.
- """
- scheme, url = urllib.splittype(url)
- if scheme == 'data':
- # syntax of data URLs:
- # dataurl := "data:" [ mediatype ] [ ";base64" ] "," data
- # mediatype := [ type "/" subtype ] *( ";" parameter )
- # data := *urlchar
- # parameter := attribute "=" value
- # type/subtype defaults to "text/plain"
- comma = url.find(',')
- if comma < 0:
- # bad data URL
- return None, None
- semi = url.find(';', 0, comma)
- if semi >= 0:
- type = url[:semi]
- else:
- type = url[:comma]
- if '=' in type or '/' not in type:
- type = 'text/plain'
- return type, None # never compressed, so encoding is None
- base, ext = posixpath.splitext(url)
- while ext in self.suffix_map:
- base, ext = posixpath.splitext(base + self.suffix_map[ext])
- if ext in self.encodings_map:
- encoding = self.encodings_map[ext]
- base, ext = posixpath.splitext(base)
- else:
- encoding = None
- types_map = self.types_map[True]
- if ext in types_map:
- return types_map[ext], encoding
- elif ext.lower() in types_map:
- return types_map[ext.lower()], encoding
- elif strict:
- return None, encoding
- types_map = self.types_map[False]
- if ext in types_map:
- return types_map[ext], encoding
- elif ext.lower() in types_map:
- return types_map[ext.lower()], encoding
- else:
- return None, encoding
-
- def guess_all_extensions(self, type, strict=True):
- """Guess the extensions for a file based on its MIME type.
-
- Return value is a list of strings giving the possible filename
- extensions, including the leading dot ('.'). The extension is not
- guaranteed to have been associated with any particular data stream,
- but would be mapped to the MIME type `type' by guess_type().
-
- Optional `strict' argument when false adds a bunch of commonly found,
- but non-standard types.
- """
- type = type.lower()
- extensions = self.types_map_inv[True].get(type, [])
- if not strict:
- for ext in self.types_map_inv[False].get(type, []):
- if ext not in extensions:
- extensions.append(ext)
- return extensions
-
- def guess_extension(self, type, strict=True):
- """Guess the extension for a file based on its MIME type.
-
- Return value is a string giving a filename extension,
- including the leading dot ('.'). The extension is not
- guaranteed to have been associated with any particular data
- stream, but would be mapped to the MIME type `type' by
- guess_type(). If no extension can be guessed for `type', None
- is returned.
-
- Optional `strict' argument when false adds a bunch of commonly found,
- but non-standard types.
- """
- extensions = self.guess_all_extensions(type, strict)
- if not extensions:
- return None
- return extensions[0]
-
- def read(self, filename, strict=True):
- """
- Read a single mime.types-format file, specified by pathname.
-
- If strict is true, information will be added to
- list of standard types, else to the list of non-standard
- types.
- """
- fp = open(filename)
- self.readfp(fp, strict)
- fp.close()
-
- def readfp(self, fp, strict=True):
- """
- Read a single mime.types-format file.
-
- If strict is true, information will be added to
- list of standard types, else to the list of non-standard
- types.
- """
- while 1:
- line = fp.readline()
- if not line:
- break
- words = line.split()
- for i in range(len(words)):
- if words[i][0] == '#':
- del words[i:]
- break
- if not words:
- continue
- type, suffixes = words[0], words[1:]
- for suff in suffixes:
- self.add_type(type, '.' + suff, strict)
-
-def guess_type(url, strict=True):
- """Guess the type of a file based on its URL.
-
- Return value is a tuple (type, encoding) where type is None if the
- type can't be guessed (no or unknown suffix) or a string of the
- form type/subtype, usable for a MIME Content-type header; and
- encoding is None for no encoding or the name of the program used
- to encode (e.g. compress or gzip). The mappings are table
- driven. Encoding suffixes are case sensitive; type suffixes are
- first tried case sensitive, then case insensitive.
-
- The suffixes .tgz, .taz and .tz (case sensitive!) are all mapped
- to ".tar.gz". (This is table-driven too, using the dictionary
- suffix_map).
-
- Optional `strict' argument when false adds a bunch of commonly found, but
- non-standard types.
- """
- init()
- return guess_type(url, strict)
-
-
-def guess_all_extensions(type, strict=True):
- """Guess the extensions for a file based on its MIME type.
-
- Return value is a list of strings giving the possible filename
- extensions, including the leading dot ('.'). The extension is not
- guaranteed to have been associated with any particular data
- stream, but would be mapped to the MIME type `type' by
- guess_type(). If no extension can be guessed for `type', None
- is returned.
-
- Optional `strict' argument when false adds a bunch of commonly found,
- but non-standard types.
- """
- init()
- return guess_all_extensions(type, strict)
-
-def guess_extension(type, strict=True):
- """Guess the extension for a file based on its MIME type.
-
- Return value is a string giving a filename extension, including the
- leading dot ('.'). The extension is not guaranteed to have been
- associated with any particular data stream, but would be mapped to the
- MIME type `type' by guess_type(). If no extension can be guessed for
- `type', None is returned.
-
- Optional `strict' argument when false adds a bunch of commonly found,
- but non-standard types.
- """
- init()
- return guess_extension(type, strict)
-
-def add_type(type, ext, strict=True):
- """Add a mapping between a type and an extension.
-
- When the extension is already known, the new
- type will replace the old one. When the type
- is already known the extension will be added
- to the list of known extensions.
-
- If strict is true, information will be added to
- list of standard types, else to the list of non-standard
- types.
- """
- init()
- return add_type(type, ext, strict)
-
-
-def init(files=None):
- global guess_all_extensions, guess_extension, guess_type
- global suffix_map, types_map, encodings_map, common_types
- global add_type, inited
- inited = True
- db = MimeTypes()
- if files is None:
- files = knownfiles
- for file in files:
- if os.path.isfile(file):
- db.readfp(open(file))
- encodings_map = db.encodings_map
- suffix_map = db.suffix_map
- types_map = db.types_map[True]
- guess_all_extensions = db.guess_all_extensions
- guess_extension = db.guess_extension
- guess_type = db.guess_type
- add_type = db.add_type
- common_types = db.types_map[False]
-
-
-def read_mime_types(file):
- try:
- f = open(file)
- except IOError:
- return None
- db = MimeTypes()
- db.readfp(f, True)
- return db.types_map[True]
-
-
-def _default_mime_types():
- global suffix_map
- global encodings_map
- global types_map
- global common_types
-
- suffix_map = {
- '.tgz': '.tar.gz',
- '.taz': '.tar.gz',
- '.tz': '.tar.gz',
- }
-
- encodings_map = {
- '.gz': 'gzip',
- '.Z': 'compress',
- }
-
- # Before adding new types, make sure they are either registered with IANA,
- # at http://www.isi.edu/in-notes/iana/assignments/media-types
- # or extensions, i.e. using the x- prefix
-
- # If you add to these, please keep them sorted!
- types_map = {
- '.a' : 'application/octet-stream',
- '.ai' : 'application/postscript',
- '.aif' : 'audio/x-aiff',
- '.aifc' : 'audio/x-aiff',
- '.aiff' : 'audio/x-aiff',
- '.au' : 'audio/basic',
- '.avi' : 'video/x-msvideo',
- '.bat' : 'text/plain',
- '.bcpio' : 'application/x-bcpio',
- '.bin' : 'application/octet-stream',
- '.bmp' : 'image/x-ms-bmp',
- '.c' : 'text/plain',
- # Duplicates :(
- '.cdf' : 'application/x-cdf',
- '.cdf' : 'application/x-netcdf',
- '.cpio' : 'application/x-cpio',
- '.csh' : 'application/x-csh',
- '.css' : 'text/css',
- '.dll' : 'application/octet-stream',
- '.doc' : 'application/msword',
- '.dot' : 'application/msword',
- '.dvi' : 'application/x-dvi',
- '.eml' : 'message/rfc822',
- '.eps' : 'application/postscript',
- '.etx' : 'text/x-setext',
- '.exe' : 'application/octet-stream',
- '.gif' : 'image/gif',
- '.gtar' : 'application/x-gtar',
- '.h' : 'text/plain',
- '.hdf' : 'application/x-hdf',
- '.htm' : 'text/html',
- '.html' : 'text/html',
- '.ief' : 'image/ief',
- '.jpe' : 'image/jpeg',
- '.jpeg' : 'image/jpeg',
- '.jpg' : 'image/jpeg',
- '.js' : 'application/x-javascript',
- '.ksh' : 'text/plain',
- '.latex' : 'application/x-latex',
- '.m1v' : 'video/mpeg',
- '.man' : 'application/x-troff-man',
- '.me' : 'application/x-troff-me',
- '.mht' : 'message/rfc822',
- '.mhtml' : 'message/rfc822',
- '.mif' : 'application/x-mif',
- '.mov' : 'video/quicktime',
- '.movie' : 'video/x-sgi-movie',
- '.mp2' : 'audio/mpeg',
- '.mp3' : 'audio/mpeg',
- '.mpa' : 'video/mpeg',
- '.mpe' : 'video/mpeg',
- '.mpeg' : 'video/mpeg',
- '.mpg' : 'video/mpeg',
- '.ms' : 'application/x-troff-ms',
- '.nc' : 'application/x-netcdf',
- '.nws' : 'message/rfc822',
- '.o' : 'application/octet-stream',
- '.obj' : 'application/octet-stream',
- '.oda' : 'application/oda',
- '.p12' : 'application/x-pkcs12',
- '.p7c' : 'application/pkcs7-mime',
- '.pbm' : 'image/x-portable-bitmap',
- '.pdf' : 'application/pdf',
- '.pfx' : 'application/x-pkcs12',
- '.pgm' : 'image/x-portable-graymap',
- '.pl' : 'text/plain',
- '.png' : 'image/png',
- '.pnm' : 'image/x-portable-anymap',
- '.pot' : 'application/vnd.ms-powerpoint',
- '.ppa' : 'application/vnd.ms-powerpoint',
- '.ppm' : 'image/x-portable-pixmap',
- '.pps' : 'application/vnd.ms-powerpoint',
- '.ppt' : 'application/vnd.ms-powerpoint',
- '.ps' : 'application/postscript',
- '.pwz' : 'application/vnd.ms-powerpoint',
- '.py' : 'text/x-python',
- '.pyc' : 'application/x-python-code',
- '.pyo' : 'application/x-python-code',
- '.qt' : 'video/quicktime',
- '.ra' : 'audio/x-pn-realaudio',
- '.ram' : 'application/x-pn-realaudio',
- '.ras' : 'image/x-cmu-raster',
- '.rdf' : 'application/xml',
- '.rgb' : 'image/x-rgb',
- '.roff' : 'application/x-troff',
- '.rtx' : 'text/richtext',
- '.sgm' : 'text/x-sgml',
- '.sgml' : 'text/x-sgml',
- '.sh' : 'application/x-sh',
- '.shar' : 'application/x-shar',
- '.snd' : 'audio/basic',
- '.so' : 'application/octet-stream',
- '.src' : 'application/x-wais-source',
- '.sv4cpio': 'application/x-sv4cpio',
- '.sv4crc' : 'application/x-sv4crc',
- '.swf' : 'application/x-shockwave-flash',
- '.t' : 'application/x-troff',
- '.tar' : 'application/x-tar',
- '.tcl' : 'application/x-tcl',
- '.tex' : 'application/x-tex',
- '.texi' : 'application/x-texinfo',
- '.texinfo': 'application/x-texinfo',
- '.tif' : 'image/tiff',
- '.tiff' : 'image/tiff',
- '.tr' : 'application/x-troff',
- '.tsv' : 'text/tab-separated-values',
- '.txt' : 'text/plain',
- '.ustar' : 'application/x-ustar',
- '.vcf' : 'text/x-vcard',
- '.wav' : 'audio/x-wav',
- '.wiz' : 'application/msword',
- '.wsdl' : 'application/xml',
- '.xbm' : 'image/x-xbitmap',
- '.xlb' : 'application/vnd.ms-excel',
- # Duplicates :(
- '.xls' : 'application/excel',
- '.xls' : 'application/vnd.ms-excel',
- '.xml' : 'text/xml',
- '.xpdl' : 'application/xml',
- '.xpm' : 'image/x-xpixmap',
- '.xsl' : 'application/xml',
- '.xwd' : 'image/x-xwindowdump',
- '.zip' : 'application/zip',
- }
-
- # These are non-standard types, commonly found in the wild. They will
- # only match if strict=0 flag is given to the API methods.
-
- # Please sort these too
- common_types = {
- '.jpg' : 'image/jpg',
- '.mid' : 'audio/midi',
- '.midi': 'audio/midi',
- '.pct' : 'image/pict',
- '.pic' : 'image/pict',
- '.pict': 'image/pict',
- '.rtf' : 'application/rtf',
- '.xul' : 'text/xul'
- }
-
-
-_default_mime_types()
-
-
-if __name__ == '__main__':
- import sys
- import getopt
-
- USAGE = """\
-Usage: mimetypes.py [options] type
-
-Options:
- --help / -h -- print this message and exit
- --lenient / -l -- additionally search of some common, but non-standard
- types.
- --extension / -e -- guess extension instead of type
-
-More than one type argument may be given.
-"""
-
- def usage(code, msg=''):
- print USAGE
- if msg: print msg
- sys.exit(code)
-
- try:
- opts, args = getopt.getopt(sys.argv[1:], 'hle',
- ['help', 'lenient', 'extension'])
- except getopt.error, msg:
- usage(1, msg)
-
- strict = 1
- extension = 0
- for opt, arg in opts:
- if opt in ('-h', '--help'):
- usage(0)
- elif opt in ('-l', '--lenient'):
- strict = 0
- elif opt in ('-e', '--extension'):
- extension = 1
- for gtype in args:
- if extension:
- guess = guess_extension(gtype, strict)
- if not guess: print "I don't know anything about type", gtype
- else: print guess
- else:
- guess, encoding = guess_type(gtype, strict)
- if not guess: print "I don't know anything about type", gtype
- else: print 'type:', guess, 'encoding:', encoding
diff --git a/sys/lib/python/mimify.py b/sys/lib/python/mimify.py
deleted file mode 100755
index b6f61439d..000000000
--- a/sys/lib/python/mimify.py
+++ /dev/null
@@ -1,464 +0,0 @@
-#! /usr/bin/env python
-
-"""Mimification and unmimification of mail messages.
-
-Decode quoted-printable parts of a mail message or encode using
-quoted-printable.
-
-Usage:
- mimify(input, output)
- unmimify(input, output, decode_base64 = 0)
-to encode and decode respectively. Input and output may be the name
-of a file or an open file object. Only a readline() method is used
-on the input file, only a write() method is used on the output file.
-When using file names, the input and output file names may be the
-same.
-
-Interactive usage:
- mimify.py -e [infile [outfile]]
- mimify.py -d [infile [outfile]]
-to encode and decode respectively. Infile defaults to standard
-input and outfile to standard output.
-"""
-
-# Configure
-MAXLEN = 200 # if lines longer than this, encode as quoted-printable
-CHARSET = 'ISO-8859-1' # default charset for non-US-ASCII mail
-QUOTE = '> ' # string replies are quoted with
-# End configure
-
-import re
-
-__all__ = ["mimify","unmimify","mime_encode_header","mime_decode_header"]
-
-qp = re.compile('^content-transfer-encoding:\\s*quoted-printable', re.I)
-base64_re = re.compile('^content-transfer-encoding:\\s*base64', re.I)
-mp = re.compile('^content-type:.*multipart/.*boundary="?([^;"\n]*)', re.I|re.S)
-chrset = re.compile('^(content-type:.*charset=")(us-ascii|iso-8859-[0-9]+)(".*)', re.I|re.S)
-he = re.compile('^-*\n')
-mime_code = re.compile('=([0-9a-f][0-9a-f])', re.I)
-mime_head = re.compile('=\\?iso-8859-1\\?q\\?([^? \t\n]+)\\?=', re.I)
-repl = re.compile('^subject:\\s+re: ', re.I)
-
-class File:
- """A simple fake file object that knows about limited read-ahead and
- boundaries. The only supported method is readline()."""
-
- def __init__(self, file, boundary):
- self.file = file
- self.boundary = boundary
- self.peek = None
-
- def readline(self):
- if self.peek is not None:
- return ''
- line = self.file.readline()
- if not line:
- return line
- if self.boundary:
- if line == self.boundary + '\n':
- self.peek = line
- return ''
- if line == self.boundary + '--\n':
- self.peek = line
- return ''
- return line
-
-class HeaderFile:
- def __init__(self, file):
- self.file = file
- self.peek = None
-
- def readline(self):
- if self.peek is not None:
- line = self.peek
- self.peek = None
- else:
- line = self.file.readline()
- if not line:
- return line
- if he.match(line):
- return line
- while 1:
- self.peek = self.file.readline()
- if len(self.peek) == 0 or \
- (self.peek[0] != ' ' and self.peek[0] != '\t'):
- return line
- line = line + self.peek
- self.peek = None
-
-def mime_decode(line):
- """Decode a single line of quoted-printable text to 8bit."""
- newline = ''
- pos = 0
- while 1:
- res = mime_code.search(line, pos)
- if res is None:
- break
- newline = newline + line[pos:res.start(0)] + \
- chr(int(res.group(1), 16))
- pos = res.end(0)
- return newline + line[pos:]
-
-def mime_decode_header(line):
- """Decode a header line to 8bit."""
- newline = ''
- pos = 0
- while 1:
- res = mime_head.search(line, pos)
- if res is None:
- break
- match = res.group(1)
- # convert underscores to spaces (before =XX conversion!)
- match = ' '.join(match.split('_'))
- newline = newline + line[pos:res.start(0)] + mime_decode(match)
- pos = res.end(0)
- return newline + line[pos:]
-
-def unmimify_part(ifile, ofile, decode_base64 = 0):
- """Convert a quoted-printable part of a MIME mail message to 8bit."""
- multipart = None
- quoted_printable = 0
- is_base64 = 0
- is_repl = 0
- if ifile.boundary and ifile.boundary[:2] == QUOTE:
- prefix = QUOTE
- else:
- prefix = ''
-
- # read header
- hfile = HeaderFile(ifile)
- while 1:
- line = hfile.readline()
- if not line:
- return
- if prefix and line[:len(prefix)] == prefix:
- line = line[len(prefix):]
- pref = prefix
- else:
- pref = ''
- line = mime_decode_header(line)
- if qp.match(line):
- quoted_printable = 1
- continue # skip this header
- if decode_base64 and base64_re.match(line):
- is_base64 = 1
- continue
- ofile.write(pref + line)
- if not prefix and repl.match(line):
- # we're dealing with a reply message
- is_repl = 1
- mp_res = mp.match(line)
- if mp_res:
- multipart = '--' + mp_res.group(1)
- if he.match(line):
- break
- if is_repl and (quoted_printable or multipart):
- is_repl = 0
-
- # read body
- while 1:
- line = ifile.readline()
- if not line:
- return
- line = re.sub(mime_head, '\\1', line)
- if prefix and line[:len(prefix)] == prefix:
- line = line[len(prefix):]
- pref = prefix
- else:
- pref = ''
-## if is_repl and len(line) >= 4 and line[:4] == QUOTE+'--' and line[-3:] != '--\n':
-## multipart = line[:-1]
- while multipart:
- if line == multipart + '--\n':
- ofile.write(pref + line)
- multipart = None
- line = None
- break
- if line == multipart + '\n':
- ofile.write(pref + line)
- nifile = File(ifile, multipart)
- unmimify_part(nifile, ofile, decode_base64)
- line = nifile.peek
- if not line:
- # premature end of file
- break
- continue
- # not a boundary between parts
- break
- if line and quoted_printable:
- while line[-2:] == '=\n':
- line = line[:-2]
- newline = ifile.readline()
- if newline[:len(QUOTE)] == QUOTE:
- newline = newline[len(QUOTE):]
- line = line + newline
- line = mime_decode(line)
- if line and is_base64 and not pref:
- import base64
- line = base64.decodestring(line)
- if line:
- ofile.write(pref + line)
-
-def unmimify(infile, outfile, decode_base64 = 0):
- """Convert quoted-printable parts of a MIME mail message to 8bit."""
- if type(infile) == type(''):
- ifile = open(infile)
- if type(outfile) == type('') and infile == outfile:
- import os
- d, f = os.path.split(infile)
- os.rename(infile, os.path.join(d, ',' + f))
- else:
- ifile = infile
- if type(outfile) == type(''):
- ofile = open(outfile, 'w')
- else:
- ofile = outfile
- nifile = File(ifile, None)
- unmimify_part(nifile, ofile, decode_base64)
- ofile.flush()
-
-mime_char = re.compile('[=\177-\377]') # quote these chars in body
-mime_header_char = re.compile('[=?\177-\377]') # quote these in header
-
-def mime_encode(line, header):
- """Code a single line as quoted-printable.
- If header is set, quote some extra characters."""
- if header:
- reg = mime_header_char
- else:
- reg = mime_char
- newline = ''
- pos = 0
- if len(line) >= 5 and line[:5] == 'From ':
- # quote 'From ' at the start of a line for stupid mailers
- newline = ('=%02x' % ord('F')).upper()
- pos = 1
- while 1:
- res = reg.search(line, pos)
- if res is None:
- break
- newline = newline + line[pos:res.start(0)] + \
- ('=%02x' % ord(res.group(0))).upper()
- pos = res.end(0)
- line = newline + line[pos:]
-
- newline = ''
- while len(line) >= 75:
- i = 73
- while line[i] == '=' or line[i-1] == '=':
- i = i - 1
- i = i + 1
- newline = newline + line[:i] + '=\n'
- line = line[i:]
- return newline + line
-
-mime_header = re.compile('([ \t(]|^)([-a-zA-Z0-9_+]*[\177-\377][-a-zA-Z0-9_+\177-\377]*)(?=[ \t)]|\n)')
-
-def mime_encode_header(line):
- """Code a single header line as quoted-printable."""
- newline = ''
- pos = 0
- while 1:
- res = mime_header.search(line, pos)
- if res is None:
- break
- newline = '%s%s%s=?%s?Q?%s?=' % \
- (newline, line[pos:res.start(0)], res.group(1),
- CHARSET, mime_encode(res.group(2), 1))
- pos = res.end(0)
- return newline + line[pos:]
-
-mv = re.compile('^mime-version:', re.I)
-cte = re.compile('^content-transfer-encoding:', re.I)
-iso_char = re.compile('[\177-\377]')
-
-def mimify_part(ifile, ofile, is_mime):
- """Convert an 8bit part of a MIME mail message to quoted-printable."""
- has_cte = is_qp = is_base64 = 0
- multipart = None
- must_quote_body = must_quote_header = has_iso_chars = 0
-
- header = []
- header_end = ''
- message = []
- message_end = ''
- # read header
- hfile = HeaderFile(ifile)
- while 1:
- line = hfile.readline()
- if not line:
- break
- if not must_quote_header and iso_char.search(line):
- must_quote_header = 1
- if mv.match(line):
- is_mime = 1
- if cte.match(line):
- has_cte = 1
- if qp.match(line):
- is_qp = 1
- elif base64_re.match(line):
- is_base64 = 1
- mp_res = mp.match(line)
- if mp_res:
- multipart = '--' + mp_res.group(1)
- if he.match(line):
- header_end = line
- break
- header.append(line)
-
- # read body
- while 1:
- line = ifile.readline()
- if not line:
- break
- if multipart:
- if line == multipart + '--\n':
- message_end = line
- break
- if line == multipart + '\n':
- message_end = line
- break
- if is_base64:
- message.append(line)
- continue
- if is_qp:
- while line[-2:] == '=\n':
- line = line[:-2]
- newline = ifile.readline()
- if newline[:len(QUOTE)] == QUOTE:
- newline = newline[len(QUOTE):]
- line = line + newline
- line = mime_decode(line)
- message.append(line)
- if not has_iso_chars:
- if iso_char.search(line):
- has_iso_chars = must_quote_body = 1
- if not must_quote_body:
- if len(line) > MAXLEN:
- must_quote_body = 1
-
- # convert and output header and body
- for line in header:
- if must_quote_header:
- line = mime_encode_header(line)
- chrset_res = chrset.match(line)
- if chrset_res:
- if has_iso_chars:
- # change us-ascii into iso-8859-1
- if chrset_res.group(2).lower() == 'us-ascii':
- line = '%s%s%s' % (chrset_res.group(1),
- CHARSET,
- chrset_res.group(3))
- else:
- # change iso-8859-* into us-ascii
- line = '%sus-ascii%s' % chrset_res.group(1, 3)
- if has_cte and cte.match(line):
- line = 'Content-Transfer-Encoding: '
- if is_base64:
- line = line + 'base64\n'
- elif must_quote_body:
- line = line + 'quoted-printable\n'
- else:
- line = line + '7bit\n'
- ofile.write(line)
- if (must_quote_header or must_quote_body) and not is_mime:
- ofile.write('Mime-Version: 1.0\n')
- ofile.write('Content-Type: text/plain; ')
- if has_iso_chars:
- ofile.write('charset="%s"\n' % CHARSET)
- else:
- ofile.write('charset="us-ascii"\n')
- if must_quote_body and not has_cte:
- ofile.write('Content-Transfer-Encoding: quoted-printable\n')
- ofile.write(header_end)
-
- for line in message:
- if must_quote_body:
- line = mime_encode(line, 0)
- ofile.write(line)
- ofile.write(message_end)
-
- line = message_end
- while multipart:
- if line == multipart + '--\n':
- # read bit after the end of the last part
- while 1:
- line = ifile.readline()
- if not line:
- return
- if must_quote_body:
- line = mime_encode(line, 0)
- ofile.write(line)
- if line == multipart + '\n':
- nifile = File(ifile, multipart)
- mimify_part(nifile, ofile, 1)
- line = nifile.peek
- if not line:
- # premature end of file
- break
- ofile.write(line)
- continue
- # unexpectedly no multipart separator--copy rest of file
- while 1:
- line = ifile.readline()
- if not line:
- return
- if must_quote_body:
- line = mime_encode(line, 0)
- ofile.write(line)
-
-def mimify(infile, outfile):
- """Convert 8bit parts of a MIME mail message to quoted-printable."""
- if type(infile) == type(''):
- ifile = open(infile)
- if type(outfile) == type('') and infile == outfile:
- import os
- d, f = os.path.split(infile)
- os.rename(infile, os.path.join(d, ',' + f))
- else:
- ifile = infile
- if type(outfile) == type(''):
- ofile = open(outfile, 'w')
- else:
- ofile = outfile
- nifile = File(ifile, None)
- mimify_part(nifile, ofile, 0)
- ofile.flush()
-
-import sys
-if __name__ == '__main__' or (len(sys.argv) > 0 and sys.argv[0] == 'mimify'):
- import getopt
- usage = 'Usage: mimify [-l len] -[ed] [infile [outfile]]'
-
- decode_base64 = 0
- opts, args = getopt.getopt(sys.argv[1:], 'l:edb')
- if len(args) not in (0, 1, 2):
- print usage
- sys.exit(1)
- if (('-e', '') in opts) == (('-d', '') in opts) or \
- ((('-b', '') in opts) and (('-d', '') not in opts)):
- print usage
- sys.exit(1)
- for o, a in opts:
- if o == '-e':
- encode = mimify
- elif o == '-d':
- encode = unmimify
- elif o == '-l':
- try:
- MAXLEN = int(a)
- except (ValueError, OverflowError):
- print usage
- sys.exit(1)
- elif o == '-b':
- decode_base64 = 1
- if len(args) == 0:
- encode_args = (sys.stdin, sys.stdout)
- elif len(args) == 1:
- encode_args = (args[0], sys.stdout)
- else:
- encode_args = (args[0], args[1])
- if decode_base64:
- encode_args = encode_args + (decode_base64,)
- encode(*encode_args)
diff --git a/sys/lib/python/modulefinder.py b/sys/lib/python/modulefinder.py
deleted file mode 100644
index 25e14827c..000000000
--- a/sys/lib/python/modulefinder.py
+++ /dev/null
@@ -1,595 +0,0 @@
-"""Find modules used by a script, using introspection."""
-
-# This module should be kept compatible with Python 2.2, see PEP 291.
-
-import dis
-import imp
-import marshal
-import os
-import sys
-import new
-
-if hasattr(sys.__stdout__, "newlines"):
- READ_MODE = "U" # universal line endings
-else:
- # remain compatible with Python < 2.3
- READ_MODE = "r"
-
-LOAD_CONST = dis.opname.index('LOAD_CONST')
-IMPORT_NAME = dis.opname.index('IMPORT_NAME')
-STORE_NAME = dis.opname.index('STORE_NAME')
-STORE_GLOBAL = dis.opname.index('STORE_GLOBAL')
-STORE_OPS = [STORE_NAME, STORE_GLOBAL]
-
-# Modulefinder does a good job at simulating Python's, but it can not
-# handle __path__ modifications packages make at runtime. Therefore there
-# is a mechanism whereby you can register extra paths in this map for a
-# package, and it will be honored.
-
-# Note this is a mapping is lists of paths.
-packagePathMap = {}
-
-# A Public interface
-def AddPackagePath(packagename, path):
- paths = packagePathMap.get(packagename, [])
- paths.append(path)
- packagePathMap[packagename] = paths
-
-replacePackageMap = {}
-
-# This ReplacePackage mechanism allows modulefinder to work around the
-# way the _xmlplus package injects itself under the name "xml" into
-# sys.modules at runtime by calling ReplacePackage("_xmlplus", "xml")
-# before running ModuleFinder.
-
-def ReplacePackage(oldname, newname):
- replacePackageMap[oldname] = newname
-
-
-class Module:
-
- def __init__(self, name, file=None, path=None):
- self.__name__ = name
- self.__file__ = file
- self.__path__ = path
- self.__code__ = None
- # The set of global names that are assigned to in the module.
- # This includes those names imported through starimports of
- # Python modules.
- self.globalnames = {}
- # The set of starimports this module did that could not be
- # resolved, ie. a starimport from a non-Python module.
- self.starimports = {}
-
- def __repr__(self):
- s = "Module(%r" % (self.__name__,)
- if self.__file__ is not None:
- s = s + ", %r" % (self.__file__,)
- if self.__path__ is not None:
- s = s + ", %r" % (self.__path__,)
- s = s + ")"
- return s
-
-class ModuleFinder:
-
- def __init__(self, path=None, debug=0, excludes=[], replace_paths=[]):
- if path is None:
- path = sys.path
- self.path = path
- self.modules = {}
- self.badmodules = {}
- self.debug = debug
- self.indent = 0
- self.excludes = excludes
- self.replace_paths = replace_paths
- self.processed_paths = [] # Used in debugging only
-
- def msg(self, level, str, *args):
- if level <= self.debug:
- for i in range(self.indent):
- print " ",
- print str,
- for arg in args:
- print repr(arg),
- print
-
- def msgin(self, *args):
- level = args[0]
- if level <= self.debug:
- self.indent = self.indent + 1
- self.msg(*args)
-
- def msgout(self, *args):
- level = args[0]
- if level <= self.debug:
- self.indent = self.indent - 1
- self.msg(*args)
-
- def run_script(self, pathname):
- self.msg(2, "run_script", pathname)
- fp = open(pathname, READ_MODE)
- stuff = ("", "r", imp.PY_SOURCE)
- self.load_module('__main__', fp, pathname, stuff)
-
- def load_file(self, pathname):
- dir, name = os.path.split(pathname)
- name, ext = os.path.splitext(name)
- fp = open(pathname, READ_MODE)
- stuff = (ext, "r", imp.PY_SOURCE)
- self.load_module(name, fp, pathname, stuff)
-
- def import_hook(self, name, caller=None, fromlist=None):
- self.msg(3, "import_hook", name, caller, fromlist)
- parent = self.determine_parent(caller)
- q, tail = self.find_head_package(parent, name)
- m = self.load_tail(q, tail)
- if not fromlist:
- return q
- if m.__path__:
- self.ensure_fromlist(m, fromlist)
- return None
-
- def determine_parent(self, caller):
- self.msgin(4, "determine_parent", caller)
- if not caller:
- self.msgout(4, "determine_parent -> None")
- return None
- pname = caller.__name__
- if caller.__path__:
- parent = self.modules[pname]
- assert caller is parent
- self.msgout(4, "determine_parent ->", parent)
- return parent
- if '.' in pname:
- i = pname.rfind('.')
- pname = pname[:i]
- parent = self.modules[pname]
- assert parent.__name__ == pname
- self.msgout(4, "determine_parent ->", parent)
- return parent
- self.msgout(4, "determine_parent -> None")
- return None
-
- def find_head_package(self, parent, name):
- self.msgin(4, "find_head_package", parent, name)
- if '.' in name:
- i = name.find('.')
- head = name[:i]
- tail = name[i+1:]
- else:
- head = name
- tail = ""
- if parent:
- qname = "%s.%s" % (parent.__name__, head)
- else:
- qname = head
- q = self.import_module(head, qname, parent)
- if q:
- self.msgout(4, "find_head_package ->", (q, tail))
- return q, tail
- if parent:
- qname = head
- parent = None
- q = self.import_module(head, qname, parent)
- if q:
- self.msgout(4, "find_head_package ->", (q, tail))
- return q, tail
- self.msgout(4, "raise ImportError: No module named", qname)
- raise ImportError, "No module named " + qname
-
- def load_tail(self, q, tail):
- self.msgin(4, "load_tail", q, tail)
- m = q
- while tail:
- i = tail.find('.')
- if i < 0: i = len(tail)
- head, tail = tail[:i], tail[i+1:]
- mname = "%s.%s" % (m.__name__, head)
- m = self.import_module(head, mname, m)
- if not m:
- self.msgout(4, "raise ImportError: No module named", mname)
- raise ImportError, "No module named " + mname
- self.msgout(4, "load_tail ->", m)
- return m
-
- def ensure_fromlist(self, m, fromlist, recursive=0):
- self.msg(4, "ensure_fromlist", m, fromlist, recursive)
- for sub in fromlist:
- if sub == "*":
- if not recursive:
- all = self.find_all_submodules(m)
- if all:
- self.ensure_fromlist(m, all, 1)
- elif not hasattr(m, sub):
- subname = "%s.%s" % (m.__name__, sub)
- submod = self.import_module(sub, subname, m)
- if not submod:
- raise ImportError, "No module named " + subname
-
- def find_all_submodules(self, m):
- if not m.__path__:
- return
- modules = {}
- # 'suffixes' used to be a list hardcoded to [".py", ".pyc", ".pyo"].
- # But we must also collect Python extension modules - although
- # we cannot separate normal dlls from Python extensions.
- suffixes = []
- for triple in imp.get_suffixes():
- suffixes.append(triple[0])
- for dir in m.__path__:
- try:
- names = os.listdir(dir)
- except os.error:
- self.msg(2, "can't list directory", dir)
- continue
- for name in names:
- mod = None
- for suff in suffixes:
- n = len(suff)
- if name[-n:] == suff:
- mod = name[:-n]
- break
- if mod and mod != "__init__":
- modules[mod] = mod
- return modules.keys()
-
- def import_module(self, partname, fqname, parent):
- self.msgin(3, "import_module", partname, fqname, parent)
- try:
- m = self.modules[fqname]
- except KeyError:
- pass
- else:
- self.msgout(3, "import_module ->", m)
- return m
- if self.badmodules.has_key(fqname):
- self.msgout(3, "import_module -> None")
- return None
- if parent and parent.__path__ is None:
- self.msgout(3, "import_module -> None")
- return None
- try:
- fp, pathname, stuff = self.find_module(partname,
- parent and parent.__path__, parent)
- except ImportError:
- self.msgout(3, "import_module ->", None)
- return None
- try:
- m = self.load_module(fqname, fp, pathname, stuff)
- finally:
- if fp: fp.close()
- if parent:
- setattr(parent, partname, m)
- self.msgout(3, "import_module ->", m)
- return m
-
- def load_module(self, fqname, fp, pathname, (suffix, mode, type)):
- self.msgin(2, "load_module", fqname, fp and "fp", pathname)
- if type == imp.PKG_DIRECTORY:
- m = self.load_package(fqname, pathname)
- self.msgout(2, "load_module ->", m)
- return m
- if type == imp.PY_SOURCE:
- co = compile(fp.read()+'\n', pathname, 'exec')
- elif type == imp.PY_COMPILED:
- if fp.read(4) != imp.get_magic():
- self.msgout(2, "raise ImportError: Bad magic number", pathname)
- raise ImportError, "Bad magic number in %s" % pathname
- fp.read(4)
- co = marshal.load(fp)
- else:
- co = None
- m = self.add_module(fqname)
- m.__file__ = pathname
- if co:
- if self.replace_paths:
- co = self.replace_paths_in_code(co)
- m.__code__ = co
- self.scan_code(co, m)
- self.msgout(2, "load_module ->", m)
- return m
-
- def _add_badmodule(self, name, caller):
- if name not in self.badmodules:
- self.badmodules[name] = {}
- self.badmodules[name][caller.__name__] = 1
-
- def _safe_import_hook(self, name, caller, fromlist):
- # wrapper for self.import_hook() that won't raise ImportError
- if name in self.badmodules:
- self._add_badmodule(name, caller)
- return
- try:
- self.import_hook(name, caller)
- except ImportError, msg:
- self.msg(2, "ImportError:", str(msg))
- self._add_badmodule(name, caller)
- else:
- if fromlist:
- for sub in fromlist:
- if sub in self.badmodules:
- self._add_badmodule(sub, caller)
- continue
- try:
- self.import_hook(name, caller, [sub])
- except ImportError, msg:
- self.msg(2, "ImportError:", str(msg))
- fullname = name + "." + sub
- self._add_badmodule(fullname, caller)
-
- def scan_code(self, co, m):
- code = co.co_code
- n = len(code)
- i = 0
- fromlist = None
- while i < n:
- c = code[i]
- i = i+1
- op = ord(c)
- if op >= dis.HAVE_ARGUMENT:
- oparg = ord(code[i]) + ord(code[i+1])*256
- i = i+2
- if op == LOAD_CONST:
- # An IMPORT_NAME is always preceded by a LOAD_CONST, it's
- # a tuple of "from" names, or None for a regular import.
- # The tuple may contain "*" for "from <mod> import *"
- fromlist = co.co_consts[oparg]
- elif op == IMPORT_NAME:
- assert fromlist is None or type(fromlist) is tuple
- name = co.co_names[oparg]
- have_star = 0
- if fromlist is not None:
- if "*" in fromlist:
- have_star = 1
- fromlist = [f for f in fromlist if f != "*"]
- self._safe_import_hook(name, m, fromlist)
- if have_star:
- # We've encountered an "import *". If it is a Python module,
- # the code has already been parsed and we can suck out the
- # global names.
- mm = None
- if m.__path__:
- # At this point we don't know whether 'name' is a
- # submodule of 'm' or a global module. Let's just try
- # the full name first.
- mm = self.modules.get(m.__name__ + "." + name)
- if mm is None:
- mm = self.modules.get(name)
- if mm is not None:
- m.globalnames.update(mm.globalnames)
- m.starimports.update(mm.starimports)
- if mm.__code__ is None:
- m.starimports[name] = 1
- else:
- m.starimports[name] = 1
- elif op in STORE_OPS:
- # keep track of all global names that are assigned to
- name = co.co_names[oparg]
- m.globalnames[name] = 1
- for c in co.co_consts:
- if isinstance(c, type(co)):
- self.scan_code(c, m)
-
- def load_package(self, fqname, pathname):
- self.msgin(2, "load_package", fqname, pathname)
- newname = replacePackageMap.get(fqname)
- if newname:
- fqname = newname
- m = self.add_module(fqname)
- m.__file__ = pathname
- m.__path__ = [pathname]
-
- # As per comment at top of file, simulate runtime __path__ additions.
- m.__path__ = m.__path__ + packagePathMap.get(fqname, [])
-
- fp, buf, stuff = self.find_module("__init__", m.__path__)
- self.load_module(fqname, fp, buf, stuff)
- self.msgout(2, "load_package ->", m)
- return m
-
- def add_module(self, fqname):
- if self.modules.has_key(fqname):
- return self.modules[fqname]
- self.modules[fqname] = m = Module(fqname)
- return m
-
- def find_module(self, name, path, parent=None):
- if parent is not None:
- # assert path is not None
- fullname = parent.__name__+'.'+name
- else:
- fullname = name
- if fullname in self.excludes:
- self.msgout(3, "find_module -> Excluded", fullname)
- raise ImportError, name
-
- if path is None:
- if name in sys.builtin_module_names:
- return (None, None, ("", "", imp.C_BUILTIN))
-
- path = self.path
- return imp.find_module(name, path)
-
- def report(self):
- """Print a report to stdout, listing the found modules with their
- paths, as well as modules that are missing, or seem to be missing.
- """
- print
- print " %-25s %s" % ("Name", "File")
- print " %-25s %s" % ("----", "----")
- # Print modules found
- keys = self.modules.keys()
- keys.sort()
- for key in keys:
- m = self.modules[key]
- if m.__path__:
- print "P",
- else:
- print "m",
- print "%-25s" % key, m.__file__ or ""
-
- # Print missing modules
- missing, maybe = self.any_missing_maybe()
- if missing:
- print
- print "Missing modules:"
- for name in missing:
- mods = self.badmodules[name].keys()
- mods.sort()
- print "?", name, "imported from", ', '.join(mods)
- # Print modules that may be missing, but then again, maybe not...
- if maybe:
- print
- print "Submodules thay appear to be missing, but could also be",
- print "global names in the parent package:"
- for name in maybe:
- mods = self.badmodules[name].keys()
- mods.sort()
- print "?", name, "imported from", ', '.join(mods)
-
- def any_missing(self):
- """Return a list of modules that appear to be missing. Use
- any_missing_maybe() if you want to know which modules are
- certain to be missing, and which *may* be missing.
- """
- missing, maybe = self.any_missing_maybe()
- return missing + maybe
-
- def any_missing_maybe(self):
- """Return two lists, one with modules that are certainly missing
- and one with modules that *may* be missing. The latter names could
- either be submodules *or* just global names in the package.
-
- The reason it can't always be determined is that it's impossible to
- tell which names are imported when "from module import *" is done
- with an extension module, short of actually importing it.
- """
- missing = []
- maybe = []
- for name in self.badmodules:
- if name in self.excludes:
- continue
- i = name.rfind(".")
- if i < 0:
- missing.append(name)
- continue
- subname = name[i+1:]
- pkgname = name[:i]
- pkg = self.modules.get(pkgname)
- if pkg is not None:
- if pkgname in self.badmodules[name]:
- # The package tried to import this module itself and
- # failed. It's definitely missing.
- missing.append(name)
- elif subname in pkg.globalnames:
- # It's a global in the package: definitely not missing.
- pass
- elif pkg.starimports:
- # It could be missing, but the package did an "import *"
- # from a non-Python module, so we simply can't be sure.
- maybe.append(name)
- else:
- # It's not a global in the package, the package didn't
- # do funny star imports, it's very likely to be missing.
- # The symbol could be inserted into the package from the
- # outside, but since that's not good style we simply list
- # it missing.
- missing.append(name)
- else:
- missing.append(name)
- missing.sort()
- maybe.sort()
- return missing, maybe
-
- def replace_paths_in_code(self, co):
- new_filename = original_filename = os.path.normpath(co.co_filename)
- for f, r in self.replace_paths:
- if original_filename.startswith(f):
- new_filename = r + original_filename[len(f):]
- break
-
- if self.debug and original_filename not in self.processed_paths:
- if new_filename != original_filename:
- self.msgout(2, "co_filename %r changed to %r" \
- % (original_filename,new_filename,))
- else:
- self.msgout(2, "co_filename %r remains unchanged" \
- % (original_filename,))
- self.processed_paths.append(original_filename)
-
- consts = list(co.co_consts)
- for i in range(len(consts)):
- if isinstance(consts[i], type(co)):
- consts[i] = self.replace_paths_in_code(consts[i])
-
- return new.code(co.co_argcount, co.co_nlocals, co.co_stacksize,
- co.co_flags, co.co_code, tuple(consts), co.co_names,
- co.co_varnames, new_filename, co.co_name,
- co.co_firstlineno, co.co_lnotab,
- co.co_freevars, co.co_cellvars)
-
-
-def test():
- # Parse command line
- import getopt
- try:
- opts, args = getopt.getopt(sys.argv[1:], "dmp:qx:")
- except getopt.error, msg:
- print msg
- return
-
- # Process options
- debug = 1
- domods = 0
- addpath = []
- exclude = []
- for o, a in opts:
- if o == '-d':
- debug = debug + 1
- if o == '-m':
- domods = 1
- if o == '-p':
- addpath = addpath + a.split(os.pathsep)
- if o == '-q':
- debug = 0
- if o == '-x':
- exclude.append(a)
-
- # Provide default arguments
- if not args:
- script = "hello.py"
- else:
- script = args[0]
-
- # Set the path based on sys.path and the script directory
- path = sys.path[:]
- path[0] = os.path.dirname(script)
- path = addpath + path
- if debug > 1:
- print "path:"
- for item in path:
- print " ", repr(item)
-
- # Create the module finder and turn its crank
- mf = ModuleFinder(path, debug, exclude)
- for arg in args[1:]:
- if arg == '-m':
- domods = 1
- continue
- if domods:
- if arg[-2:] == '.*':
- mf.import_hook(arg[:-2], None, ["*"])
- else:
- mf.import_hook(arg)
- else:
- mf.load_file(arg)
- mf.run_script(script)
- mf.report()
- return mf # for -i debugging
-
-
-if __name__ == '__main__':
- try:
- mf = test()
- except KeyboardInterrupt:
- print "\n[interrupt]"
diff --git a/sys/lib/python/msilib/__init__.py b/sys/lib/python/msilib/__init__.py
deleted file mode 100644
index 4be82b033..000000000
--- a/sys/lib/python/msilib/__init__.py
+++ /dev/null
@@ -1,463 +0,0 @@
-# -*- coding: iso-8859-1 -*-
-# Copyright (C) 2005 Martin v. Löwis
-# Licensed to PSF under a Contributor Agreement.
-from _msi import *
-import sets, os, string, re
-
-Win64=0
-
-# Partially taken from Wine
-datasizemask= 0x00ff
-type_valid= 0x0100
-type_localizable= 0x0200
-
-typemask= 0x0c00
-type_long= 0x0000
-type_short= 0x0400
-type_string= 0x0c00
-type_binary= 0x0800
-
-type_nullable= 0x1000
-type_key= 0x2000
-# XXX temporary, localizable?
-knownbits = datasizemask | type_valid | type_localizable | \
- typemask | type_nullable | type_key
-
-class Table:
- def __init__(self, name):
- self.name = name
- self.fields = []
-
- def add_field(self, index, name, type):
- self.fields.append((index,name,type))
-
- def sql(self):
- fields = []
- keys = []
- self.fields.sort()
- fields = [None]*len(self.fields)
- for index, name, type in self.fields:
- index -= 1
- unk = type & ~knownbits
- if unk:
- print "%s.%s unknown bits %x" % (self.name, name, unk)
- size = type & datasizemask
- dtype = type & typemask
- if dtype == type_string:
- if size:
- tname="CHAR(%d)" % size
- else:
- tname="CHAR"
- elif dtype == type_short:
- assert size==2
- tname = "SHORT"
- elif dtype == type_long:
- assert size==4
- tname="LONG"
- elif dtype == type_binary:
- assert size==0
- tname="OBJECT"
- else:
- tname="unknown"
- print "%s.%sunknown integer type %d" % (self.name, name, size)
- if type & type_nullable:
- flags = ""
- else:
- flags = " NOT NULL"
- if type & type_localizable:
- flags += " LOCALIZABLE"
- fields[index] = "`%s` %s%s" % (name, tname, flags)
- if type & type_key:
- keys.append("`%s`" % name)
- fields = ", ".join(fields)
- keys = ", ".join(keys)
- return "CREATE TABLE %s (%s PRIMARY KEY %s)" % (self.name, fields, keys)
-
- def create(self, db):
- v = db.OpenView(self.sql())
- v.Execute(None)
- v.Close()
-
-class _Unspecified:pass
-def change_sequence(seq, action, seqno=_Unspecified, cond = _Unspecified):
- "Change the sequence number of an action in a sequence list"
- for i in range(len(seq)):
- if seq[i][0] == action:
- if cond is _Unspecified:
- cond = seq[i][1]
- if seqno is _Unspecified:
- seqno = seq[i][2]
- seq[i] = (action, cond, seqno)
- return
- raise ValueError, "Action not found in sequence"
-
-def add_data(db, table, values):
- v = db.OpenView("SELECT * FROM `%s`" % table)
- count = v.GetColumnInfo(MSICOLINFO_NAMES).GetFieldCount()
- r = CreateRecord(count)
- for value in values:
- assert len(value) == count, value
- for i in range(count):
- field = value[i]
- if isinstance(field, (int, long)):
- r.SetInteger(i+1,field)
- elif isinstance(field, basestring):
- r.SetString(i+1,field)
- elif field is None:
- pass
- elif isinstance(field, Binary):
- r.SetStream(i+1, field.name)
- else:
- raise TypeError, "Unsupported type %s" % field.__class__.__name__
- try:
- v.Modify(MSIMODIFY_INSERT, r)
- except Exception, e:
- raise MSIError("Could not insert "+repr(values)+" into "+table)
-
- r.ClearData()
- v.Close()
-
-
-def add_stream(db, name, path):
- v = db.OpenView("INSERT INTO _Streams (Name, Data) VALUES ('%s', ?)" % name)
- r = CreateRecord(1)
- r.SetStream(1, path)
- v.Execute(r)
- v.Close()
-
-def init_database(name, schema,
- ProductName, ProductCode, ProductVersion,
- Manufacturer):
- try:
- os.unlink(name)
- except OSError:
- pass
- ProductCode = ProductCode.upper()
- # Create the database
- db = OpenDatabase(name, MSIDBOPEN_CREATE)
- # Create the tables
- for t in schema.tables:
- t.create(db)
- # Fill the validation table
- add_data(db, "_Validation", schema._Validation_records)
- # Initialize the summary information, allowing atmost 20 properties
- si = db.GetSummaryInformation(20)
- si.SetProperty(PID_TITLE, "Installation Database")
- si.SetProperty(PID_SUBJECT, ProductName)
- si.SetProperty(PID_AUTHOR, Manufacturer)
- if Win64:
- si.SetProperty(PID_TEMPLATE, "Intel64;1033")
- else:
- si.SetProperty(PID_TEMPLATE, "Intel;1033")
- si.SetProperty(PID_REVNUMBER, gen_uuid())
- si.SetProperty(PID_WORDCOUNT, 2) # long file names, compressed, original media
- si.SetProperty(PID_PAGECOUNT, 200)
- si.SetProperty(PID_APPNAME, "Python MSI Library")
- # XXX more properties
- si.Persist()
- add_data(db, "Property", [
- ("ProductName", ProductName),
- ("ProductCode", ProductCode),
- ("ProductVersion", ProductVersion),
- ("Manufacturer", Manufacturer),
- ("ProductLanguage", "1033")])
- db.Commit()
- return db
-
-def add_tables(db, module):
- for table in module.tables:
- add_data(db, table, getattr(module, table))
-
-def make_id(str):
- #str = str.replace(".", "_") # colons are allowed
- str = str.replace(" ", "_")
- str = str.replace("-", "_")
- if str[0] in string.digits:
- str = "_"+str
- assert re.match("^[A-Za-z_][A-Za-z0-9_.]*$", str), "FILE"+str
- return str
-
-def gen_uuid():
- return "{"+UuidCreate().upper()+"}"
-
-class CAB:
- def __init__(self, name):
- self.name = name
- self.files = []
- self.filenames = sets.Set()
- self.index = 0
-
- def gen_id(self, file):
- logical = _logical = make_id(file)
- pos = 1
- while logical in self.filenames:
- logical = "%s.%d" % (_logical, pos)
- pos += 1
- self.filenames.add(logical)
- return logical
-
- def append(self, full, file, logical):
- if os.path.isdir(full):
- return
- if not logical:
- logical = self.gen_id(file)
- self.index += 1
- self.files.append((full, logical))
- return self.index, logical
-
- def commit(self, db):
- from tempfile import mktemp
- filename = mktemp()
- FCICreate(filename, self.files)
- add_data(db, "Media",
- [(1, self.index, None, "#"+self.name, None, None)])
- add_stream(db, self.name, filename)
- os.unlink(filename)
- db.Commit()
-
-_directories = sets.Set()
-class Directory:
- def __init__(self, db, cab, basedir, physical, _logical, default, componentflags=None):
- """Create a new directory in the Directory table. There is a current component
- at each point in time for the directory, which is either explicitly created
- through start_component, or implicitly when files are added for the first
- time. Files are added into the current component, and into the cab file.
- To create a directory, a base directory object needs to be specified (can be
- None), the path to the physical directory, and a logical directory name.
- Default specifies the DefaultDir slot in the directory table. componentflags
- specifies the default flags that new components get."""
- index = 1
- _logical = make_id(_logical)
- logical = _logical
- while logical in _directories:
- logical = "%s%d" % (_logical, index)
- index += 1
- _directories.add(logical)
- self.db = db
- self.cab = cab
- self.basedir = basedir
- self.physical = physical
- self.logical = logical
- self.component = None
- self.short_names = sets.Set()
- self.ids = sets.Set()
- self.keyfiles = {}
- self.componentflags = componentflags
- if basedir:
- self.absolute = os.path.join(basedir.absolute, physical)
- blogical = basedir.logical
- else:
- self.absolute = physical
- blogical = None
- add_data(db, "Directory", [(logical, blogical, default)])
-
- def start_component(self, component = None, feature = None, flags = None, keyfile = None, uuid=None):
- """Add an entry to the Component table, and make this component the current for this
- directory. If no component name is given, the directory name is used. If no feature
- is given, the current feature is used. If no flags are given, the directory's default
- flags are used. If no keyfile is given, the KeyPath is left null in the Component
- table."""
- if flags is None:
- flags = self.componentflags
- if uuid is None:
- uuid = gen_uuid()
- else:
- uuid = uuid.upper()
- if component is None:
- component = self.logical
- self.component = component
- if Win64:
- flags |= 256
- if keyfile:
- keyid = self.cab.gen_id(self.absolute, keyfile)
- self.keyfiles[keyfile] = keyid
- else:
- keyid = None
- add_data(self.db, "Component",
- [(component, uuid, self.logical, flags, None, keyid)])
- if feature is None:
- feature = current_feature
- add_data(self.db, "FeatureComponents",
- [(feature.id, component)])
-
- def make_short(self, file):
- parts = file.split(".")
- if len(parts)>1:
- suffix = parts[-1].upper()
- else:
- suffix = None
- prefix = parts[0].upper()
- if len(prefix) <= 8 and (not suffix or len(suffix)<=3):
- if suffix:
- file = prefix+"."+suffix
- else:
- file = prefix
- assert file not in self.short_names
- else:
- prefix = prefix[:6]
- if suffix:
- suffix = suffix[:3]
- pos = 1
- while 1:
- if suffix:
- file = "%s~%d.%s" % (prefix, pos, suffix)
- else:
- file = "%s~%d" % (prefix, pos)
- if file not in self.short_names: break
- pos += 1
- assert pos < 10000
- if pos in (10, 100, 1000):
- prefix = prefix[:-1]
- self.short_names.add(file)
- assert not re.search(r'[\?|><:/*"+,;=\[\]]', file) # restrictions on short names
- return file
-
- def add_file(self, file, src=None, version=None, language=None):
- """Add a file to the current component of the directory, starting a new one
- one if there is no current component. By default, the file name in the source
- and the file table will be identical. If the src file is specified, it is
- interpreted relative to the current directory. Optionally, a version and a
- language can be specified for the entry in the File table."""
- if not self.component:
- self.start_component(self.logical, current_feature, 0)
- if not src:
- # Allow relative paths for file if src is not specified
- src = file
- file = os.path.basename(file)
- absolute = os.path.join(self.absolute, src)
- assert not re.search(r'[\?|><:/*]"', file) # restrictions on long names
- if self.keyfiles.has_key(file):
- logical = self.keyfiles[file]
- else:
- logical = None
- sequence, logical = self.cab.append(absolute, file, logical)
- assert logical not in self.ids
- self.ids.add(logical)
- short = self.make_short(file)
- full = "%s|%s" % (short, file)
- filesize = os.stat(absolute).st_size
- # constants.msidbFileAttributesVital
- # Compressed omitted, since it is the database default
- # could add r/o, system, hidden
- attributes = 512
- add_data(self.db, "File",
- [(logical, self.component, full, filesize, version,
- language, attributes, sequence)])
- #if not version:
- # # Add hash if the file is not versioned
- # filehash = FileHash(absolute, 0)
- # add_data(self.db, "MsiFileHash",
- # [(logical, 0, filehash.IntegerData(1),
- # filehash.IntegerData(2), filehash.IntegerData(3),
- # filehash.IntegerData(4))])
- # Automatically remove .pyc/.pyo files on uninstall (2)
- # XXX: adding so many RemoveFile entries makes installer unbelievably
- # slow. So instead, we have to use wildcard remove entries
- if file.endswith(".py"):
- add_data(self.db, "RemoveFile",
- [(logical+"c", self.component, "%sC|%sc" % (short, file),
- self.logical, 2),
- (logical+"o", self.component, "%sO|%so" % (short, file),
- self.logical, 2)])
- return logical
-
- def glob(self, pattern, exclude = None):
- """Add a list of files to the current component as specified in the
- glob pattern. Individual files can be excluded in the exclude list."""
- files = glob.glob1(self.absolute, pattern)
- for f in files:
- if exclude and f in exclude: continue
- self.add_file(f)
- return files
-
- def remove_pyc(self):
- "Remove .pyc/.pyo files on uninstall"
- add_data(self.db, "RemoveFile",
- [(self.component+"c", self.component, "*.pyc", self.logical, 2),
- (self.component+"o", self.component, "*.pyo", self.logical, 2)])
-
-class Binary:
- def __init__(self, fname):
- self.name = fname
- def __repr__(self):
- return 'msilib.Binary(os.path.join(dirname,"%s"))' % self.name
-
-class Feature:
- def __init__(self, db, id, title, desc, display, level = 1,
- parent=None, directory = None, attributes=0):
- self.id = id
- if parent:
- parent = parent.id
- add_data(db, "Feature",
- [(id, parent, title, desc, display,
- level, directory, attributes)])
- def set_current(self):
- global current_feature
- current_feature = self
-
-class Control:
- def __init__(self, dlg, name):
- self.dlg = dlg
- self.name = name
-
- def event(self, event, argument, condition = "1", ordering = None):
- add_data(self.dlg.db, "ControlEvent",
- [(self.dlg.name, self.name, event, argument,
- condition, ordering)])
-
- def mapping(self, event, attribute):
- add_data(self.dlg.db, "EventMapping",
- [(self.dlg.name, self.name, event, attribute)])
-
- def condition(self, action, condition):
- add_data(self.dlg.db, "ControlCondition",
- [(self.dlg.name, self.name, action, condition)])
-
-class RadioButtonGroup(Control):
- def __init__(self, dlg, name, property):
- self.dlg = dlg
- self.name = name
- self.property = property
- self.index = 1
-
- def add(self, name, x, y, w, h, text, value = None):
- if value is None:
- value = name
- add_data(self.dlg.db, "RadioButton",
- [(self.property, self.index, value,
- x, y, w, h, text, None)])
- self.index += 1
-
-class Dialog:
- def __init__(self, db, name, x, y, w, h, attr, title, first, default, cancel):
- self.db = db
- self.name = name
- self.x, self.y, self.w, self.h = x,y,w,h
- add_data(db, "Dialog", [(name, x,y,w,h,attr,title,first,default,cancel)])
-
- def control(self, name, type, x, y, w, h, attr, prop, text, next, help):
- add_data(self.db, "Control",
- [(self.name, name, type, x, y, w, h, attr, prop, text, next, help)])
- return Control(self, name)
-
- def text(self, name, x, y, w, h, attr, text):
- return self.control(name, "Text", x, y, w, h, attr, None,
- text, None, None)
-
- def bitmap(self, name, x, y, w, h, text):
- return self.control(name, "Bitmap", x, y, w, h, 1, None, text, None, None)
-
- def line(self, name, x, y, w, h):
- return self.control(name, "Line", x, y, w, h, 1, None, None, None, None)
-
- def pushbutton(self, name, x, y, w, h, attr, text, next):
- return self.control(name, "PushButton", x, y, w, h, attr, None, text, next, None)
-
- def radiogroup(self, name, x, y, w, h, attr, prop, text, next):
- add_data(self.db, "Control",
- [(self.name, name, "RadioButtonGroup",
- x, y, w, h, attr, prop, text, next, None)])
- return RadioButtonGroup(self, name, prop)
-
- def checkbox(self, name, x, y, w, h, attr, prop, text, next):
- return self.control(name, "CheckBox", x, y, w, h, attr, prop, text, next, None)
diff --git a/sys/lib/python/msilib/schema.py b/sys/lib/python/msilib/schema.py
deleted file mode 100644
index 51dd17774..000000000
--- a/sys/lib/python/msilib/schema.py
+++ /dev/null
@@ -1,1007 +0,0 @@
-from . import Table
-
-_Validation = Table('_Validation')
-_Validation.add_field(1,'Table',11552)
-_Validation.add_field(2,'Column',11552)
-_Validation.add_field(3,'Nullable',3332)
-_Validation.add_field(4,'MinValue',4356)
-_Validation.add_field(5,'MaxValue',4356)
-_Validation.add_field(6,'KeyTable',7679)
-_Validation.add_field(7,'KeyColumn',5378)
-_Validation.add_field(8,'Category',7456)
-_Validation.add_field(9,'Set',7679)
-_Validation.add_field(10,'Description',7679)
-
-ActionText = Table('ActionText')
-ActionText.add_field(1,'Action',11592)
-ActionText.add_field(2,'Description',7936)
-ActionText.add_field(3,'Template',7936)
-
-AdminExecuteSequence = Table('AdminExecuteSequence')
-AdminExecuteSequence.add_field(1,'Action',11592)
-AdminExecuteSequence.add_field(2,'Condition',7679)
-AdminExecuteSequence.add_field(3,'Sequence',5378)
-
-Condition = Table('Condition')
-Condition.add_field(1,'Feature_',11558)
-Condition.add_field(2,'Level',9474)
-Condition.add_field(3,'Condition',7679)
-
-AdminUISequence = Table('AdminUISequence')
-AdminUISequence.add_field(1,'Action',11592)
-AdminUISequence.add_field(2,'Condition',7679)
-AdminUISequence.add_field(3,'Sequence',5378)
-
-AdvtExecuteSequence = Table('AdvtExecuteSequence')
-AdvtExecuteSequence.add_field(1,'Action',11592)
-AdvtExecuteSequence.add_field(2,'Condition',7679)
-AdvtExecuteSequence.add_field(3,'Sequence',5378)
-
-AdvtUISequence = Table('AdvtUISequence')
-AdvtUISequence.add_field(1,'Action',11592)
-AdvtUISequence.add_field(2,'Condition',7679)
-AdvtUISequence.add_field(3,'Sequence',5378)
-
-AppId = Table('AppId')
-AppId.add_field(1,'AppId',11558)
-AppId.add_field(2,'RemoteServerName',7679)
-AppId.add_field(3,'LocalService',7679)
-AppId.add_field(4,'ServiceParameters',7679)
-AppId.add_field(5,'DllSurrogate',7679)
-AppId.add_field(6,'ActivateAtStorage',5378)
-AppId.add_field(7,'RunAsInteractiveUser',5378)
-
-AppSearch = Table('AppSearch')
-AppSearch.add_field(1,'Property',11592)
-AppSearch.add_field(2,'Signature_',11592)
-
-Property = Table('Property')
-Property.add_field(1,'Property',11592)
-Property.add_field(2,'Value',3840)
-
-BBControl = Table('BBControl')
-BBControl.add_field(1,'Billboard_',11570)
-BBControl.add_field(2,'BBControl',11570)
-BBControl.add_field(3,'Type',3378)
-BBControl.add_field(4,'X',1282)
-BBControl.add_field(5,'Y',1282)
-BBControl.add_field(6,'Width',1282)
-BBControl.add_field(7,'Height',1282)
-BBControl.add_field(8,'Attributes',4356)
-BBControl.add_field(9,'Text',7986)
-
-Billboard = Table('Billboard')
-Billboard.add_field(1,'Billboard',11570)
-Billboard.add_field(2,'Feature_',3366)
-Billboard.add_field(3,'Action',7474)
-Billboard.add_field(4,'Ordering',5378)
-
-Feature = Table('Feature')
-Feature.add_field(1,'Feature',11558)
-Feature.add_field(2,'Feature_Parent',7462)
-Feature.add_field(3,'Title',8000)
-Feature.add_field(4,'Description',8191)
-Feature.add_field(5,'Display',5378)
-Feature.add_field(6,'Level',1282)
-Feature.add_field(7,'Directory_',7496)
-Feature.add_field(8,'Attributes',1282)
-
-Binary = Table('Binary')
-Binary.add_field(1,'Name',11592)
-Binary.add_field(2,'Data',2304)
-
-BindImage = Table('BindImage')
-BindImage.add_field(1,'File_',11592)
-BindImage.add_field(2,'Path',7679)
-
-File = Table('File')
-File.add_field(1,'File',11592)
-File.add_field(2,'Component_',3400)
-File.add_field(3,'FileName',4095)
-File.add_field(4,'FileSize',260)
-File.add_field(5,'Version',7496)
-File.add_field(6,'Language',7444)
-File.add_field(7,'Attributes',5378)
-File.add_field(8,'Sequence',1282)
-
-CCPSearch = Table('CCPSearch')
-CCPSearch.add_field(1,'Signature_',11592)
-
-CheckBox = Table('CheckBox')
-CheckBox.add_field(1,'Property',11592)
-CheckBox.add_field(2,'Value',7488)
-
-Class = Table('Class')
-Class.add_field(1,'CLSID',11558)
-Class.add_field(2,'Context',11552)
-Class.add_field(3,'Component_',11592)
-Class.add_field(4,'ProgId_Default',7679)
-Class.add_field(5,'Description',8191)
-Class.add_field(6,'AppId_',7462)
-Class.add_field(7,'FileTypeMask',7679)
-Class.add_field(8,'Icon_',7496)
-Class.add_field(9,'IconIndex',5378)
-Class.add_field(10,'DefInprocHandler',7456)
-Class.add_field(11,'Argument',7679)
-Class.add_field(12,'Feature_',3366)
-Class.add_field(13,'Attributes',5378)
-
-Component = Table('Component')
-Component.add_field(1,'Component',11592)
-Component.add_field(2,'ComponentId',7462)
-Component.add_field(3,'Directory_',3400)
-Component.add_field(4,'Attributes',1282)
-Component.add_field(5,'Condition',7679)
-Component.add_field(6,'KeyPath',7496)
-
-Icon = Table('Icon')
-Icon.add_field(1,'Name',11592)
-Icon.add_field(2,'Data',2304)
-
-ProgId = Table('ProgId')
-ProgId.add_field(1,'ProgId',11775)
-ProgId.add_field(2,'ProgId_Parent',7679)
-ProgId.add_field(3,'Class_',7462)
-ProgId.add_field(4,'Description',8191)
-ProgId.add_field(5,'Icon_',7496)
-ProgId.add_field(6,'IconIndex',5378)
-
-ComboBox = Table('ComboBox')
-ComboBox.add_field(1,'Property',11592)
-ComboBox.add_field(2,'Order',9474)
-ComboBox.add_field(3,'Value',3392)
-ComboBox.add_field(4,'Text',8000)
-
-CompLocator = Table('CompLocator')
-CompLocator.add_field(1,'Signature_',11592)
-CompLocator.add_field(2,'ComponentId',3366)
-CompLocator.add_field(3,'Type',5378)
-
-Complus = Table('Complus')
-Complus.add_field(1,'Component_',11592)
-Complus.add_field(2,'ExpType',13570)
-
-Directory = Table('Directory')
-Directory.add_field(1,'Directory',11592)
-Directory.add_field(2,'Directory_Parent',7496)
-Directory.add_field(3,'DefaultDir',4095)
-
-Control = Table('Control')
-Control.add_field(1,'Dialog_',11592)
-Control.add_field(2,'Control',11570)
-Control.add_field(3,'Type',3348)
-Control.add_field(4,'X',1282)
-Control.add_field(5,'Y',1282)
-Control.add_field(6,'Width',1282)
-Control.add_field(7,'Height',1282)
-Control.add_field(8,'Attributes',4356)
-Control.add_field(9,'Property',7474)
-Control.add_field(10,'Text',7936)
-Control.add_field(11,'Control_Next',7474)
-Control.add_field(12,'Help',7986)
-
-Dialog = Table('Dialog')
-Dialog.add_field(1,'Dialog',11592)
-Dialog.add_field(2,'HCentering',1282)
-Dialog.add_field(3,'VCentering',1282)
-Dialog.add_field(4,'Width',1282)
-Dialog.add_field(5,'Height',1282)
-Dialog.add_field(6,'Attributes',4356)
-Dialog.add_field(7,'Title',8064)
-Dialog.add_field(8,'Control_First',3378)
-Dialog.add_field(9,'Control_Default',7474)
-Dialog.add_field(10,'Control_Cancel',7474)
-
-ControlCondition = Table('ControlCondition')
-ControlCondition.add_field(1,'Dialog_',11592)
-ControlCondition.add_field(2,'Control_',11570)
-ControlCondition.add_field(3,'Action',11570)
-ControlCondition.add_field(4,'Condition',11775)
-
-ControlEvent = Table('ControlEvent')
-ControlEvent.add_field(1,'Dialog_',11592)
-ControlEvent.add_field(2,'Control_',11570)
-ControlEvent.add_field(3,'Event',11570)
-ControlEvent.add_field(4,'Argument',11775)
-ControlEvent.add_field(5,'Condition',15871)
-ControlEvent.add_field(6,'Ordering',5378)
-
-CreateFolder = Table('CreateFolder')
-CreateFolder.add_field(1,'Directory_',11592)
-CreateFolder.add_field(2,'Component_',11592)
-
-CustomAction = Table('CustomAction')
-CustomAction.add_field(1,'Action',11592)
-CustomAction.add_field(2,'Type',1282)
-CustomAction.add_field(3,'Source',7496)
-CustomAction.add_field(4,'Target',7679)
-
-DrLocator = Table('DrLocator')
-DrLocator.add_field(1,'Signature_',11592)
-DrLocator.add_field(2,'Parent',15688)
-DrLocator.add_field(3,'Path',15871)
-DrLocator.add_field(4,'Depth',5378)
-
-DuplicateFile = Table('DuplicateFile')
-DuplicateFile.add_field(1,'FileKey',11592)
-DuplicateFile.add_field(2,'Component_',3400)
-DuplicateFile.add_field(3,'File_',3400)
-DuplicateFile.add_field(4,'DestName',8191)
-DuplicateFile.add_field(5,'DestFolder',7496)
-
-Environment = Table('Environment')
-Environment.add_field(1,'Environment',11592)
-Environment.add_field(2,'Name',4095)
-Environment.add_field(3,'Value',8191)
-Environment.add_field(4,'Component_',3400)
-
-Error = Table('Error')
-Error.add_field(1,'Error',9474)
-Error.add_field(2,'Message',7936)
-
-EventMapping = Table('EventMapping')
-EventMapping.add_field(1,'Dialog_',11592)
-EventMapping.add_field(2,'Control_',11570)
-EventMapping.add_field(3,'Event',11570)
-EventMapping.add_field(4,'Attribute',3378)
-
-Extension = Table('Extension')
-Extension.add_field(1,'Extension',11775)
-Extension.add_field(2,'Component_',11592)
-Extension.add_field(3,'ProgId_',7679)
-Extension.add_field(4,'MIME_',7488)
-Extension.add_field(5,'Feature_',3366)
-
-MIME = Table('MIME')
-MIME.add_field(1,'ContentType',11584)
-MIME.add_field(2,'Extension_',3583)
-MIME.add_field(3,'CLSID',7462)
-
-FeatureComponents = Table('FeatureComponents')
-FeatureComponents.add_field(1,'Feature_',11558)
-FeatureComponents.add_field(2,'Component_',11592)
-
-FileSFPCatalog = Table('FileSFPCatalog')
-FileSFPCatalog.add_field(1,'File_',11592)
-FileSFPCatalog.add_field(2,'SFPCatalog_',11775)
-
-SFPCatalog = Table('SFPCatalog')
-SFPCatalog.add_field(1,'SFPCatalog',11775)
-SFPCatalog.add_field(2,'Catalog',2304)
-SFPCatalog.add_field(3,'Dependency',7424)
-
-Font = Table('Font')
-Font.add_field(1,'File_',11592)
-Font.add_field(2,'FontTitle',7552)
-
-IniFile = Table('IniFile')
-IniFile.add_field(1,'IniFile',11592)
-IniFile.add_field(2,'FileName',4095)
-IniFile.add_field(3,'DirProperty',7496)
-IniFile.add_field(4,'Section',3936)
-IniFile.add_field(5,'Key',3968)
-IniFile.add_field(6,'Value',4095)
-IniFile.add_field(7,'Action',1282)
-IniFile.add_field(8,'Component_',3400)
-
-IniLocator = Table('IniLocator')
-IniLocator.add_field(1,'Signature_',11592)
-IniLocator.add_field(2,'FileName',3583)
-IniLocator.add_field(3,'Section',3424)
-IniLocator.add_field(4,'Key',3456)
-IniLocator.add_field(5,'Field',5378)
-IniLocator.add_field(6,'Type',5378)
-
-InstallExecuteSequence = Table('InstallExecuteSequence')
-InstallExecuteSequence.add_field(1,'Action',11592)
-InstallExecuteSequence.add_field(2,'Condition',7679)
-InstallExecuteSequence.add_field(3,'Sequence',5378)
-
-InstallUISequence = Table('InstallUISequence')
-InstallUISequence.add_field(1,'Action',11592)
-InstallUISequence.add_field(2,'Condition',7679)
-InstallUISequence.add_field(3,'Sequence',5378)
-
-IsolatedComponent = Table('IsolatedComponent')
-IsolatedComponent.add_field(1,'Component_Shared',11592)
-IsolatedComponent.add_field(2,'Component_Application',11592)
-
-LaunchCondition = Table('LaunchCondition')
-LaunchCondition.add_field(1,'Condition',11775)
-LaunchCondition.add_field(2,'Description',4095)
-
-ListBox = Table('ListBox')
-ListBox.add_field(1,'Property',11592)
-ListBox.add_field(2,'Order',9474)
-ListBox.add_field(3,'Value',3392)
-ListBox.add_field(4,'Text',8000)
-
-ListView = Table('ListView')
-ListView.add_field(1,'Property',11592)
-ListView.add_field(2,'Order',9474)
-ListView.add_field(3,'Value',3392)
-ListView.add_field(4,'Text',8000)
-ListView.add_field(5,'Binary_',7496)
-
-LockPermissions = Table('LockPermissions')
-LockPermissions.add_field(1,'LockObject',11592)
-LockPermissions.add_field(2,'Table',11552)
-LockPermissions.add_field(3,'Domain',15871)
-LockPermissions.add_field(4,'User',11775)
-LockPermissions.add_field(5,'Permission',4356)
-
-Media = Table('Media')
-Media.add_field(1,'DiskId',9474)
-Media.add_field(2,'LastSequence',1282)
-Media.add_field(3,'DiskPrompt',8000)
-Media.add_field(4,'Cabinet',7679)
-Media.add_field(5,'VolumeLabel',7456)
-Media.add_field(6,'Source',7496)
-
-MoveFile = Table('MoveFile')
-MoveFile.add_field(1,'FileKey',11592)
-MoveFile.add_field(2,'Component_',3400)
-MoveFile.add_field(3,'SourceName',8191)
-MoveFile.add_field(4,'DestName',8191)
-MoveFile.add_field(5,'SourceFolder',7496)
-MoveFile.add_field(6,'DestFolder',3400)
-MoveFile.add_field(7,'Options',1282)
-
-MsiAssembly = Table('MsiAssembly')
-MsiAssembly.add_field(1,'Component_',11592)
-MsiAssembly.add_field(2,'Feature_',3366)
-MsiAssembly.add_field(3,'File_Manifest',7496)
-MsiAssembly.add_field(4,'File_Application',7496)
-MsiAssembly.add_field(5,'Attributes',5378)
-
-MsiAssemblyName = Table('MsiAssemblyName')
-MsiAssemblyName.add_field(1,'Component_',11592)
-MsiAssemblyName.add_field(2,'Name',11775)
-MsiAssemblyName.add_field(3,'Value',3583)
-
-MsiDigitalCertificate = Table('MsiDigitalCertificate')
-MsiDigitalCertificate.add_field(1,'DigitalCertificate',11592)
-MsiDigitalCertificate.add_field(2,'CertData',2304)
-
-MsiDigitalSignature = Table('MsiDigitalSignature')
-MsiDigitalSignature.add_field(1,'Table',11552)
-MsiDigitalSignature.add_field(2,'SignObject',11592)
-MsiDigitalSignature.add_field(3,'DigitalCertificate_',3400)
-MsiDigitalSignature.add_field(4,'Hash',6400)
-
-MsiFileHash = Table('MsiFileHash')
-MsiFileHash.add_field(1,'File_',11592)
-MsiFileHash.add_field(2,'Options',1282)
-MsiFileHash.add_field(3,'HashPart1',260)
-MsiFileHash.add_field(4,'HashPart2',260)
-MsiFileHash.add_field(5,'HashPart3',260)
-MsiFileHash.add_field(6,'HashPart4',260)
-
-MsiPatchHeaders = Table('MsiPatchHeaders')
-MsiPatchHeaders.add_field(1,'StreamRef',11558)
-MsiPatchHeaders.add_field(2,'Header',2304)
-
-ODBCAttribute = Table('ODBCAttribute')
-ODBCAttribute.add_field(1,'Driver_',11592)
-ODBCAttribute.add_field(2,'Attribute',11560)
-ODBCAttribute.add_field(3,'Value',8191)
-
-ODBCDriver = Table('ODBCDriver')
-ODBCDriver.add_field(1,'Driver',11592)
-ODBCDriver.add_field(2,'Component_',3400)
-ODBCDriver.add_field(3,'Description',3583)
-ODBCDriver.add_field(4,'File_',3400)
-ODBCDriver.add_field(5,'File_Setup',7496)
-
-ODBCDataSource = Table('ODBCDataSource')
-ODBCDataSource.add_field(1,'DataSource',11592)
-ODBCDataSource.add_field(2,'Component_',3400)
-ODBCDataSource.add_field(3,'Description',3583)
-ODBCDataSource.add_field(4,'DriverDescription',3583)
-ODBCDataSource.add_field(5,'Registration',1282)
-
-ODBCSourceAttribute = Table('ODBCSourceAttribute')
-ODBCSourceAttribute.add_field(1,'DataSource_',11592)
-ODBCSourceAttribute.add_field(2,'Attribute',11552)
-ODBCSourceAttribute.add_field(3,'Value',8191)
-
-ODBCTranslator = Table('ODBCTranslator')
-ODBCTranslator.add_field(1,'Translator',11592)
-ODBCTranslator.add_field(2,'Component_',3400)
-ODBCTranslator.add_field(3,'Description',3583)
-ODBCTranslator.add_field(4,'File_',3400)
-ODBCTranslator.add_field(5,'File_Setup',7496)
-
-Patch = Table('Patch')
-Patch.add_field(1,'File_',11592)
-Patch.add_field(2,'Sequence',9474)
-Patch.add_field(3,'PatchSize',260)
-Patch.add_field(4,'Attributes',1282)
-Patch.add_field(5,'Header',6400)
-Patch.add_field(6,'StreamRef_',7462)
-
-PatchPackage = Table('PatchPackage')
-PatchPackage.add_field(1,'PatchId',11558)
-PatchPackage.add_field(2,'Media_',1282)
-
-PublishComponent = Table('PublishComponent')
-PublishComponent.add_field(1,'ComponentId',11558)
-PublishComponent.add_field(2,'Qualifier',11775)
-PublishComponent.add_field(3,'Component_',11592)
-PublishComponent.add_field(4,'AppData',8191)
-PublishComponent.add_field(5,'Feature_',3366)
-
-RadioButton = Table('RadioButton')
-RadioButton.add_field(1,'Property',11592)
-RadioButton.add_field(2,'Order',9474)
-RadioButton.add_field(3,'Value',3392)
-RadioButton.add_field(4,'X',1282)
-RadioButton.add_field(5,'Y',1282)
-RadioButton.add_field(6,'Width',1282)
-RadioButton.add_field(7,'Height',1282)
-RadioButton.add_field(8,'Text',8000)
-RadioButton.add_field(9,'Help',7986)
-
-Registry = Table('Registry')
-Registry.add_field(1,'Registry',11592)
-Registry.add_field(2,'Root',1282)
-Registry.add_field(3,'Key',4095)
-Registry.add_field(4,'Name',8191)
-Registry.add_field(5,'Value',7936)
-Registry.add_field(6,'Component_',3400)
-
-RegLocator = Table('RegLocator')
-RegLocator.add_field(1,'Signature_',11592)
-RegLocator.add_field(2,'Root',1282)
-RegLocator.add_field(3,'Key',3583)
-RegLocator.add_field(4,'Name',7679)
-RegLocator.add_field(5,'Type',5378)
-
-RemoveFile = Table('RemoveFile')
-RemoveFile.add_field(1,'FileKey',11592)
-RemoveFile.add_field(2,'Component_',3400)
-RemoveFile.add_field(3,'FileName',8191)
-RemoveFile.add_field(4,'DirProperty',3400)
-RemoveFile.add_field(5,'InstallMode',1282)
-
-RemoveIniFile = Table('RemoveIniFile')
-RemoveIniFile.add_field(1,'RemoveIniFile',11592)
-RemoveIniFile.add_field(2,'FileName',4095)
-RemoveIniFile.add_field(3,'DirProperty',7496)
-RemoveIniFile.add_field(4,'Section',3936)
-RemoveIniFile.add_field(5,'Key',3968)
-RemoveIniFile.add_field(6,'Value',8191)
-RemoveIniFile.add_field(7,'Action',1282)
-RemoveIniFile.add_field(8,'Component_',3400)
-
-RemoveRegistry = Table('RemoveRegistry')
-RemoveRegistry.add_field(1,'RemoveRegistry',11592)
-RemoveRegistry.add_field(2,'Root',1282)
-RemoveRegistry.add_field(3,'Key',4095)
-RemoveRegistry.add_field(4,'Name',8191)
-RemoveRegistry.add_field(5,'Component_',3400)
-
-ReserveCost = Table('ReserveCost')
-ReserveCost.add_field(1,'ReserveKey',11592)
-ReserveCost.add_field(2,'Component_',3400)
-ReserveCost.add_field(3,'ReserveFolder',7496)
-ReserveCost.add_field(4,'ReserveLocal',260)
-ReserveCost.add_field(5,'ReserveSource',260)
-
-SelfReg = Table('SelfReg')
-SelfReg.add_field(1,'File_',11592)
-SelfReg.add_field(2,'Cost',5378)
-
-ServiceControl = Table('ServiceControl')
-ServiceControl.add_field(1,'ServiceControl',11592)
-ServiceControl.add_field(2,'Name',4095)
-ServiceControl.add_field(3,'Event',1282)
-ServiceControl.add_field(4,'Arguments',8191)
-ServiceControl.add_field(5,'Wait',5378)
-ServiceControl.add_field(6,'Component_',3400)
-
-ServiceInstall = Table('ServiceInstall')
-ServiceInstall.add_field(1,'ServiceInstall',11592)
-ServiceInstall.add_field(2,'Name',3583)
-ServiceInstall.add_field(3,'DisplayName',8191)
-ServiceInstall.add_field(4,'ServiceType',260)
-ServiceInstall.add_field(5,'StartType',260)
-ServiceInstall.add_field(6,'ErrorControl',260)
-ServiceInstall.add_field(7,'LoadOrderGroup',7679)
-ServiceInstall.add_field(8,'Dependencies',7679)
-ServiceInstall.add_field(9,'StartName',7679)
-ServiceInstall.add_field(10,'Password',7679)
-ServiceInstall.add_field(11,'Arguments',7679)
-ServiceInstall.add_field(12,'Component_',3400)
-ServiceInstall.add_field(13,'Description',8191)
-
-Shortcut = Table('Shortcut')
-Shortcut.add_field(1,'Shortcut',11592)
-Shortcut.add_field(2,'Directory_',3400)
-Shortcut.add_field(3,'Name',3968)
-Shortcut.add_field(4,'Component_',3400)
-Shortcut.add_field(5,'Target',3400)
-Shortcut.add_field(6,'Arguments',7679)
-Shortcut.add_field(7,'Description',8191)
-Shortcut.add_field(8,'Hotkey',5378)
-Shortcut.add_field(9,'Icon_',7496)
-Shortcut.add_field(10,'IconIndex',5378)
-Shortcut.add_field(11,'ShowCmd',5378)
-Shortcut.add_field(12,'WkDir',7496)
-
-Signature = Table('Signature')
-Signature.add_field(1,'Signature',11592)
-Signature.add_field(2,'FileName',3583)
-Signature.add_field(3,'MinVersion',7444)
-Signature.add_field(4,'MaxVersion',7444)
-Signature.add_field(5,'MinSize',4356)
-Signature.add_field(6,'MaxSize',4356)
-Signature.add_field(7,'MinDate',4356)
-Signature.add_field(8,'MaxDate',4356)
-Signature.add_field(9,'Languages',7679)
-
-TextStyle = Table('TextStyle')
-TextStyle.add_field(1,'TextStyle',11592)
-TextStyle.add_field(2,'FaceName',3360)
-TextStyle.add_field(3,'Size',1282)
-TextStyle.add_field(4,'Color',4356)
-TextStyle.add_field(5,'StyleBits',5378)
-
-TypeLib = Table('TypeLib')
-TypeLib.add_field(1,'LibID',11558)
-TypeLib.add_field(2,'Language',9474)
-TypeLib.add_field(3,'Component_',11592)
-TypeLib.add_field(4,'Version',4356)
-TypeLib.add_field(5,'Description',8064)
-TypeLib.add_field(6,'Directory_',7496)
-TypeLib.add_field(7,'Feature_',3366)
-TypeLib.add_field(8,'Cost',4356)
-
-UIText = Table('UIText')
-UIText.add_field(1,'Key',11592)
-UIText.add_field(2,'Text',8191)
-
-Upgrade = Table('Upgrade')
-Upgrade.add_field(1,'UpgradeCode',11558)
-Upgrade.add_field(2,'VersionMin',15636)
-Upgrade.add_field(3,'VersionMax',15636)
-Upgrade.add_field(4,'Language',15871)
-Upgrade.add_field(5,'Attributes',8452)
-Upgrade.add_field(6,'Remove',7679)
-Upgrade.add_field(7,'ActionProperty',3400)
-
-Verb = Table('Verb')
-Verb.add_field(1,'Extension_',11775)
-Verb.add_field(2,'Verb',11552)
-Verb.add_field(3,'Sequence',5378)
-Verb.add_field(4,'Command',8191)
-Verb.add_field(5,'Argument',8191)
-
-tables=[_Validation, ActionText, AdminExecuteSequence, Condition, AdminUISequence, AdvtExecuteSequence, AdvtUISequence, AppId, AppSearch, Property, BBControl, Billboard, Feature, Binary, BindImage, File, CCPSearch, CheckBox, Class, Component, Icon, ProgId, ComboBox, CompLocator, Complus, Directory, Control, Dialog, ControlCondition, ControlEvent, CreateFolder, CustomAction, DrLocator, DuplicateFile, Environment, Error, EventMapping, Extension, MIME, FeatureComponents, FileSFPCatalog, SFPCatalog, Font, IniFile, IniLocator, InstallExecuteSequence, InstallUISequence, IsolatedComponent, LaunchCondition, ListBox, ListView, LockPermissions, Media, MoveFile, MsiAssembly, MsiAssemblyName, MsiDigitalCertificate, MsiDigitalSignature, MsiFileHash, MsiPatchHeaders, ODBCAttribute, ODBCDriver, ODBCDataSource, ODBCSourceAttribute, ODBCTranslator, Patch, PatchPackage, PublishComponent, RadioButton, Registry, RegLocator, RemoveFile, RemoveIniFile, RemoveRegistry, ReserveCost, SelfReg, ServiceControl, ServiceInstall, Shortcut, Signature, TextStyle, TypeLib, UIText, Upgrade, Verb]
-
-_Validation_records = [
-(u'_Validation',u'Table',u'N',None, None, None, None, u'Identifier',None, u'Name of table',),
-(u'_Validation',u'Column',u'N',None, None, None, None, u'Identifier',None, u'Name of column',),
-(u'_Validation',u'Description',u'Y',None, None, None, None, u'Text',None, u'Description of column',),
-(u'_Validation',u'Set',u'Y',None, None, None, None, u'Text',None, u'Set of values that are permitted',),
-(u'_Validation',u'Category',u'Y',None, None, None, None, None, u'Text;Formatted;Template;Condition;Guid;Path;Version;Language;Identifier;Binary;UpperCase;LowerCase;Filename;Paths;AnyPath;WildCardFilename;RegPath;KeyFormatted;CustomSource;Property;Cabinet;Shortcut;URL',u'String category',),
-(u'_Validation',u'KeyColumn',u'Y',1,32,None, None, None, None, u'Column to which foreign key connects',),
-(u'_Validation',u'KeyTable',u'Y',None, None, None, None, u'Identifier',None, u'For foreign key, Name of table to which data must link',),
-(u'_Validation',u'MaxValue',u'Y',-2147483647,2147483647,None, None, None, None, u'Maximum value allowed',),
-(u'_Validation',u'MinValue',u'Y',-2147483647,2147483647,None, None, None, None, u'Minimum value allowed',),
-(u'_Validation',u'Nullable',u'N',None, None, None, None, None, u'Y;N;@',u'Whether the column is nullable',),
-(u'ActionText',u'Description',u'Y',None, None, None, None, u'Text',None, u'Localized description displayed in progress dialog and log when action is executing.',),
-(u'ActionText',u'Action',u'N',None, None, None, None, u'Identifier',None, u'Name of action to be described.',),
-(u'ActionText',u'Template',u'Y',None, None, None, None, u'Template',None, u'Optional localized format template used to format action data records for display during action execution.',),
-(u'AdminExecuteSequence',u'Action',u'N',None, None, None, None, u'Identifier',None, u'Name of action to invoke, either in the engine or the handler DLL.',),
-(u'AdminExecuteSequence',u'Condition',u'Y',None, None, None, None, u'Condition',None, u'Optional expression which skips the action if evaluates to expFalse.If the expression syntax is invalid, the engine will terminate, returning iesBadActionData.',),
-(u'AdminExecuteSequence',u'Sequence',u'Y',-4,32767,None, None, None, None, u'Number that determines the sort order in which the actions are to be executed. Leave blank to suppress action.',),
-(u'Condition',u'Condition',u'Y',None, None, None, None, u'Condition',None, u'Expression evaluated to determine if Level in the Feature table is to change.',),
-(u'Condition',u'Feature_',u'N',None, None, u'Feature',1,u'Identifier',None, u'Reference to a Feature entry in Feature table.',),
-(u'Condition',u'Level',u'N',0,32767,None, None, None, None, u'New selection Level to set in Feature table if Condition evaluates to TRUE.',),
-(u'AdminUISequence',u'Action',u'N',None, None, None, None, u'Identifier',None, u'Name of action to invoke, either in the engine or the handler DLL.',),
-(u'AdminUISequence',u'Condition',u'Y',None, None, None, None, u'Condition',None, u'Optional expression which skips the action if evaluates to expFalse.If the expression syntax is invalid, the engine will terminate, returning iesBadActionData.',),
-(u'AdminUISequence',u'Sequence',u'Y',-4,32767,None, None, None, None, u'Number that determines the sort order in which the actions are to be executed. Leave blank to suppress action.',),
-(u'AdvtExecuteSequence',u'Action',u'N',None, None, None, None, u'Identifier',None, u'Name of action to invoke, either in the engine or the handler DLL.',),
-(u'AdvtExecuteSequence',u'Condition',u'Y',None, None, None, None, u'Condition',None, u'Optional expression which skips the action if evaluates to expFalse.If the expression syntax is invalid, the engine will terminate, returning iesBadActionData.',),
-(u'AdvtExecuteSequence',u'Sequence',u'Y',-4,32767,None, None, None, None, u'Number that determines the sort order in which the actions are to be executed. Leave blank to suppress action.',),
-(u'AdvtUISequence',u'Action',u'N',None, None, None, None, u'Identifier',None, u'Name of action to invoke, either in the engine or the handler DLL.',),
-(u'AdvtUISequence',u'Condition',u'Y',None, None, None, None, u'Condition',None, u'Optional expression which skips the action if evaluates to expFalse.If the expression syntax is invalid, the engine will terminate, returning iesBadActionData.',),
-(u'AdvtUISequence',u'Sequence',u'Y',-4,32767,None, None, None, None, u'Number that determines the sort order in which the actions are to be executed. Leave blank to suppress action.',),
-(u'AppId',u'AppId',u'N',None, None, None, None, u'Guid',None, None, ),
-(u'AppId',u'ActivateAtStorage',u'Y',0,1,None, None, None, None, None, ),
-(u'AppId',u'DllSurrogate',u'Y',None, None, None, None, u'Text',None, None, ),
-(u'AppId',u'LocalService',u'Y',None, None, None, None, u'Text',None, None, ),
-(u'AppId',u'RemoteServerName',u'Y',None, None, None, None, u'Formatted',None, None, ),
-(u'AppId',u'RunAsInteractiveUser',u'Y',0,1,None, None, None, None, None, ),
-(u'AppId',u'ServiceParameters',u'Y',None, None, None, None, u'Text',None, None, ),
-(u'AppSearch',u'Property',u'N',None, None, None, None, u'Identifier',None, u'The property associated with a Signature',),
-(u'AppSearch',u'Signature_',u'N',None, None, u'Signature;RegLocator;IniLocator;DrLocator;CompLocator',1,u'Identifier',None, u'The Signature_ represents a unique file signature and is also the foreign key in the Signature, RegLocator, IniLocator, CompLocator and the DrLocator tables.',),
-(u'Property',u'Property',u'N',None, None, None, None, u'Identifier',None, u'Name of property, uppercase if settable by launcher or loader.',),
-(u'Property',u'Value',u'N',None, None, None, None, u'Text',None, u'String value for property. Never null or empty.',),
-(u'BBControl',u'Type',u'N',None, None, None, None, u'Identifier',None, u'The type of the control.',),
-(u'BBControl',u'Y',u'N',0,32767,None, None, None, None, u'Vertical coordinate of the upper left corner of the bounding rectangle of the control.',),
-(u'BBControl',u'Text',u'Y',None, None, None, None, u'Text',None, u'A string used to set the initial text contained within a control (if appropriate).',),
-(u'BBControl',u'BBControl',u'N',None, None, None, None, u'Identifier',None, u'Name of the control. This name must be unique within a billboard, but can repeat on different billboard.',),
-(u'BBControl',u'Attributes',u'Y',0,2147483647,None, None, None, None, u'A 32-bit word that specifies the attribute flags to be applied to this control.',),
-(u'BBControl',u'Billboard_',u'N',None, None, u'Billboard',1,u'Identifier',None, u'External key to the Billboard table, name of the billboard.',),
-(u'BBControl',u'Height',u'N',0,32767,None, None, None, None, u'Height of the bounding rectangle of the control.',),
-(u'BBControl',u'Width',u'N',0,32767,None, None, None, None, u'Width of the bounding rectangle of the control.',),
-(u'BBControl',u'X',u'N',0,32767,None, None, None, None, u'Horizontal coordinate of the upper left corner of the bounding rectangle of the control.',),
-(u'Billboard',u'Action',u'Y',None, None, None, None, u'Identifier',None, u'The name of an action. The billboard is displayed during the progress messages received from this action.',),
-(u'Billboard',u'Billboard',u'N',None, None, None, None, u'Identifier',None, u'Name of the billboard.',),
-(u'Billboard',u'Feature_',u'N',None, None, u'Feature',1,u'Identifier',None, u'An external key to the Feature Table. The billboard is shown only if this feature is being installed.',),
-(u'Billboard',u'Ordering',u'Y',0,32767,None, None, None, None, u'A positive integer. If there is more than one billboard corresponding to an action they will be shown in the order defined by this column.',),
-(u'Feature',u'Description',u'Y',None, None, None, None, u'Text',None, u'Longer descriptive text describing a visible feature item.',),
-(u'Feature',u'Attributes',u'N',None, None, None, None, None, u'0;1;2;4;5;6;8;9;10;16;17;18;20;21;22;24;25;26;32;33;34;36;37;38;48;49;50;52;53;54',u'Feature attributes',),
-(u'Feature',u'Feature',u'N',None, None, None, None, u'Identifier',None, u'Primary key used to identify a particular feature record.',),
-(u'Feature',u'Directory_',u'Y',None, None, u'Directory',1,u'UpperCase',None, u'The name of the Directory that can be configured by the UI. A non-null value will enable the browse button.',),
-(u'Feature',u'Level',u'N',0,32767,None, None, None, None, u'The install level at which record will be initially selected. An install level of 0 will disable an item and prevent its display.',),
-(u'Feature',u'Title',u'Y',None, None, None, None, u'Text',None, u'Short text identifying a visible feature item.',),
-(u'Feature',u'Display',u'Y',0,32767,None, None, None, None, u'Numeric sort order, used to force a specific display ordering.',),
-(u'Feature',u'Feature_Parent',u'Y',None, None, u'Feature',1,u'Identifier',None, u'Optional key of a parent record in the same table. If the parent is not selected, then the record will not be installed. Null indicates a root item.',),
-(u'Binary',u'Name',u'N',None, None, None, None, u'Identifier',None, u'Unique key identifying the binary data.',),
-(u'Binary',u'Data',u'N',None, None, None, None, u'Binary',None, u'The unformatted binary data.',),
-(u'BindImage',u'File_',u'N',None, None, u'File',1,u'Identifier',None, u'The index into the File table. This must be an executable file.',),
-(u'BindImage',u'Path',u'Y',None, None, None, None, u'Paths',None, u'A list of ; delimited paths that represent the paths to be searched for the import DLLS. The list is usually a list of properties each enclosed within square brackets [] .',),
-(u'File',u'Sequence',u'N',1,32767,None, None, None, None, u'Sequence with respect to the media images; order must track cabinet order.',),
-(u'File',u'Attributes',u'Y',0,32767,None, None, None, None, u'Integer containing bit flags representing file attributes (with the decimal value of each bit position in parentheses)',),
-(u'File',u'File',u'N',None, None, None, None, u'Identifier',None, u'Primary key, non-localized token, must match identifier in cabinet. For uncompressed files, this field is ignored.',),
-(u'File',u'Component_',u'N',None, None, u'Component',1,u'Identifier',None, u'Foreign key referencing Component that controls the file.',),
-(u'File',u'FileName',u'N',None, None, None, None, u'Filename',None, u'File name used for installation, may be localized. This may contain a "short name|long name" pair.',),
-(u'File',u'FileSize',u'N',0,2147483647,None, None, None, None, u'Size of file in bytes (long integer).',),
-(u'File',u'Language',u'Y',None, None, None, None, u'Language',None, u'List of decimal language Ids, comma-separated if more than one.',),
-(u'File',u'Version',u'Y',None, None, u'File',1,u'Version',None, u'Version string for versioned files; Blank for unversioned files.',),
-(u'CCPSearch',u'Signature_',u'N',None, None, u'Signature;RegLocator;IniLocator;DrLocator;CompLocator',1,u'Identifier',None, u'The Signature_ represents a unique file signature and is also the foreign key in the Signature, RegLocator, IniLocator, CompLocator and the DrLocator tables.',),
-(u'CheckBox',u'Property',u'N',None, None, None, None, u'Identifier',None, u'A named property to be tied to the item.',),
-(u'CheckBox',u'Value',u'Y',None, None, None, None, u'Formatted',None, u'The value string associated with the item.',),
-(u'Class',u'Description',u'Y',None, None, None, None, u'Text',None, u'Localized description for the Class.',),
-(u'Class',u'Attributes',u'Y',None, 32767,None, None, None, None, u'Class registration attributes.',),
-(u'Class',u'Feature_',u'N',None, None, u'Feature',1,u'Identifier',None, u'Required foreign key into the Feature Table, specifying the feature to validate or install in order for the CLSID factory to be operational.',),
-(u'Class',u'AppId_',u'Y',None, None, u'AppId',1,u'Guid',None, u'Optional AppID containing DCOM information for associated application (string GUID).',),
-(u'Class',u'Argument',u'Y',None, None, None, None, u'Formatted',None, u'optional argument for LocalServers.',),
-(u'Class',u'CLSID',u'N',None, None, None, None, u'Guid',None, u'The CLSID of an OLE factory.',),
-(u'Class',u'Component_',u'N',None, None, u'Component',1,u'Identifier',None, u'Required foreign key into the Component Table, specifying the component for which to return a path when called through LocateComponent.',),
-(u'Class',u'Context',u'N',None, None, None, None, u'Identifier',None, u'The numeric server context for this server. CLSCTX_xxxx',),
-(u'Class',u'DefInprocHandler',u'Y',None, None, None, None, u'Filename',u'1;2;3',u'Optional default inproc handler. Only optionally provided if Context=CLSCTX_LOCAL_SERVER. Typically "ole32.dll" or "mapi32.dll"',),
-(u'Class',u'FileTypeMask',u'Y',None, None, None, None, u'Text',None, u'Optional string containing information for the HKCRthis CLSID) key. If multiple patterns exist, they must be delimited by a semicolon, and numeric subkeys will be generated: 0,1,2...',),
-(u'Class',u'Icon_',u'Y',None, None, u'Icon',1,u'Identifier',None, u'Optional foreign key into the Icon Table, specifying the icon file associated with this CLSID. Will be written under the DefaultIcon key.',),
-(u'Class',u'IconIndex',u'Y',-32767,32767,None, None, None, None, u'Optional icon index.',),
-(u'Class',u'ProgId_Default',u'Y',None, None, u'ProgId',1,u'Text',None, u'Optional ProgId associated with this CLSID.',),
-(u'Component',u'Condition',u'Y',None, None, None, None, u'Condition',None, u"A conditional statement that will disable this component if the specified condition evaluates to the 'True' state. If a component is disabled, it will not be installed, regardless of the 'Action' state associated with the component.",),
-(u'Component',u'Attributes',u'N',None, None, None, None, None, None, u'Remote execution option, one of irsEnum',),
-(u'Component',u'Component',u'N',None, None, None, None, u'Identifier',None, u'Primary key used to identify a particular component record.',),
-(u'Component',u'ComponentId',u'Y',None, None, None, None, u'Guid',None, u'A string GUID unique to this component, version, and language.',),
-(u'Component',u'Directory_',u'N',None, None, u'Directory',1,u'Identifier',None, u'Required key of a Directory table record. This is actually a property name whose value contains the actual path, set either by the AppSearch action or with the default setting obtained from the Directory table.',),
-(u'Component',u'KeyPath',u'Y',None, None, u'File;Registry;ODBCDataSource',1,u'Identifier',None, u'Either the primary key into the File table, Registry table, or ODBCDataSource table. This extract path is stored when the component is installed, and is used to detect the presence of the component and to return the path to it.',),
-(u'Icon',u'Name',u'N',None, None, None, None, u'Identifier',None, u'Primary key. Name of the icon file.',),
-(u'Icon',u'Data',u'N',None, None, None, None, u'Binary',None, u'Binary stream. The binary icon data in PE (.DLL or .EXE) or icon (.ICO) format.',),
-(u'ProgId',u'Description',u'Y',None, None, None, None, u'Text',None, u'Localized description for the Program identifier.',),
-(u'ProgId',u'Icon_',u'Y',None, None, u'Icon',1,u'Identifier',None, u'Optional foreign key into the Icon Table, specifying the icon file associated with this ProgId. Will be written under the DefaultIcon key.',),
-(u'ProgId',u'IconIndex',u'Y',-32767,32767,None, None, None, None, u'Optional icon index.',),
-(u'ProgId',u'ProgId',u'N',None, None, None, None, u'Text',None, u'The Program Identifier. Primary key.',),
-(u'ProgId',u'Class_',u'Y',None, None, u'Class',1,u'Guid',None, u'The CLSID of an OLE factory corresponding to the ProgId.',),
-(u'ProgId',u'ProgId_Parent',u'Y',None, None, u'ProgId',1,u'Text',None, u'The Parent Program Identifier. If specified, the ProgId column becomes a version independent prog id.',),
-(u'ComboBox',u'Text',u'Y',None, None, None, None, u'Formatted',None, u'The visible text to be assigned to the item. Optional. If this entry or the entire column is missing, the text is the same as the value.',),
-(u'ComboBox',u'Property',u'N',None, None, None, None, u'Identifier',None, u'A named property to be tied to this item. All the items tied to the same property become part of the same combobox.',),
-(u'ComboBox',u'Value',u'N',None, None, None, None, u'Formatted',None, u'The value string associated with this item. Selecting the line will set the associated property to this value.',),
-(u'ComboBox',u'Order',u'N',1,32767,None, None, None, None, u'A positive integer used to determine the ordering of the items within one list.\tThe integers do not have to be consecutive.',),
-(u'CompLocator',u'Type',u'Y',0,1,None, None, None, None, u'A boolean value that determines if the registry value is a filename or a directory location.',),
-(u'CompLocator',u'Signature_',u'N',None, None, None, None, u'Identifier',None, u'The table key. The Signature_ represents a unique file signature and is also the foreign key in the Signature table.',),
-(u'CompLocator',u'ComponentId',u'N',None, None, None, None, u'Guid',None, u'A string GUID unique to this component, version, and language.',),
-(u'Complus',u'Component_',u'N',None, None, u'Component',1,u'Identifier',None, u'Foreign key referencing Component that controls the ComPlus component.',),
-(u'Complus',u'ExpType',u'Y',0,32767,None, None, None, None, u'ComPlus component attributes.',),
-(u'Directory',u'Directory',u'N',None, None, None, None, u'Identifier',None, u'Unique identifier for directory entry, primary key. If a property by this name is defined, it contains the full path to the directory.',),
-(u'Directory',u'DefaultDir',u'N',None, None, None, None, u'DefaultDir',None, u"The default sub-path under parent's path.",),
-(u'Directory',u'Directory_Parent',u'Y',None, None, u'Directory',1,u'Identifier',None, u'Reference to the entry in this table specifying the default parent directory. A record parented to itself or with a Null parent represents a root of the install tree.',),
-(u'Control',u'Type',u'N',None, None, None, None, u'Identifier',None, u'The type of the control.',),
-(u'Control',u'Y',u'N',0,32767,None, None, None, None, u'Vertical coordinate of the upper left corner of the bounding rectangle of the control.',),
-(u'Control',u'Text',u'Y',None, None, None, None, u'Formatted',None, u'A string used to set the initial text contained within a control (if appropriate).',),
-(u'Control',u'Property',u'Y',None, None, None, None, u'Identifier',None, u'The name of a defined property to be linked to this control. ',),
-(u'Control',u'Attributes',u'Y',0,2147483647,None, None, None, None, u'A 32-bit word that specifies the attribute flags to be applied to this control.',),
-(u'Control',u'Height',u'N',0,32767,None, None, None, None, u'Height of the bounding rectangle of the control.',),
-(u'Control',u'Width',u'N',0,32767,None, None, None, None, u'Width of the bounding rectangle of the control.',),
-(u'Control',u'X',u'N',0,32767,None, None, None, None, u'Horizontal coordinate of the upper left corner of the bounding rectangle of the control.',),
-(u'Control',u'Control',u'N',None, None, None, None, u'Identifier',None, u'Name of the control. This name must be unique within a dialog, but can repeat on different dialogs. ',),
-(u'Control',u'Control_Next',u'Y',None, None, u'Control',2,u'Identifier',None, u'The name of an other control on the same dialog. This link defines the tab order of the controls. The links have to form one or more cycles!',),
-(u'Control',u'Dialog_',u'N',None, None, u'Dialog',1,u'Identifier',None, u'External key to the Dialog table, name of the dialog.',),
-(u'Control',u'Help',u'Y',None, None, None, None, u'Text',None, u'The help strings used with the button. The text is optional. ',),
-(u'Dialog',u'Attributes',u'Y',0,2147483647,None, None, None, None, u'A 32-bit word that specifies the attribute flags to be applied to this dialog.',),
-(u'Dialog',u'Height',u'N',0,32767,None, None, None, None, u'Height of the bounding rectangle of the dialog.',),
-(u'Dialog',u'Width',u'N',0,32767,None, None, None, None, u'Width of the bounding rectangle of the dialog.',),
-(u'Dialog',u'Dialog',u'N',None, None, None, None, u'Identifier',None, u'Name of the dialog.',),
-(u'Dialog',u'Control_Cancel',u'Y',None, None, u'Control',2,u'Identifier',None, u'Defines the cancel control. Hitting escape or clicking on the close icon on the dialog is equivalent to pushing this button.',),
-(u'Dialog',u'Control_Default',u'Y',None, None, u'Control',2,u'Identifier',None, u'Defines the default control. Hitting return is equivalent to pushing this button.',),
-(u'Dialog',u'Control_First',u'N',None, None, u'Control',2,u'Identifier',None, u'Defines the control that has the focus when the dialog is created.',),
-(u'Dialog',u'HCentering',u'N',0,100,None, None, None, None, u'Horizontal position of the dialog on a 0-100 scale. 0 means left end, 100 means right end of the screen, 50 center.',),
-(u'Dialog',u'Title',u'Y',None, None, None, None, u'Formatted',None, u"A text string specifying the title to be displayed in the title bar of the dialog's window.",),
-(u'Dialog',u'VCentering',u'N',0,100,None, None, None, None, u'Vertical position of the dialog on a 0-100 scale. 0 means top end, 100 means bottom end of the screen, 50 center.',),
-(u'ControlCondition',u'Action',u'N',None, None, None, None, None, u'Default;Disable;Enable;Hide;Show',u'The desired action to be taken on the specified control.',),
-(u'ControlCondition',u'Condition',u'N',None, None, None, None, u'Condition',None, u'A standard conditional statement that specifies under which conditions the action should be triggered.',),
-(u'ControlCondition',u'Dialog_',u'N',None, None, u'Dialog',1,u'Identifier',None, u'A foreign key to the Dialog table, name of the dialog.',),
-(u'ControlCondition',u'Control_',u'N',None, None, u'Control',2,u'Identifier',None, u'A foreign key to the Control table, name of the control.',),
-(u'ControlEvent',u'Condition',u'Y',None, None, None, None, u'Condition',None, u'A standard conditional statement that specifies under which conditions an event should be triggered.',),
-(u'ControlEvent',u'Ordering',u'Y',0,2147483647,None, None, None, None, u'An integer used to order several events tied to the same control. Can be left blank.',),
-(u'ControlEvent',u'Argument',u'N',None, None, None, None, u'Formatted',None, u'A value to be used as a modifier when triggering a particular event.',),
-(u'ControlEvent',u'Dialog_',u'N',None, None, u'Dialog',1,u'Identifier',None, u'A foreign key to the Dialog table, name of the dialog.',),
-(u'ControlEvent',u'Control_',u'N',None, None, u'Control',2,u'Identifier',None, u'A foreign key to the Control table, name of the control',),
-(u'ControlEvent',u'Event',u'N',None, None, None, None, u'Formatted',None, u'An identifier that specifies the type of the event that should take place when the user interacts with control specified by the first two entries.',),
-(u'CreateFolder',u'Component_',u'N',None, None, u'Component',1,u'Identifier',None, u'Foreign key into the Component table.',),
-(u'CreateFolder',u'Directory_',u'N',None, None, u'Directory',1,u'Identifier',None, u'Primary key, could be foreign key into the Directory table.',),
-(u'CustomAction',u'Type',u'N',1,16383,None, None, None, None, u'The numeric custom action type, consisting of source location, code type, entry, option flags.',),
-(u'CustomAction',u'Action',u'N',None, None, None, None, u'Identifier',None, u'Primary key, name of action, normally appears in sequence table unless private use.',),
-(u'CustomAction',u'Source',u'Y',None, None, None, None, u'CustomSource',None, u'The table reference of the source of the code.',),
-(u'CustomAction',u'Target',u'Y',None, None, None, None, u'Formatted',None, u'Excecution parameter, depends on the type of custom action',),
-(u'DrLocator',u'Signature_',u'N',None, None, None, None, u'Identifier',None, u'The Signature_ represents a unique file signature and is also the foreign key in the Signature table.',),
-(u'DrLocator',u'Path',u'Y',None, None, None, None, u'AnyPath',None, u'The path on the user system. This is a either a subpath below the value of the Parent or a full path. The path may contain properties enclosed within [ ] that will be expanded.',),
-(u'DrLocator',u'Depth',u'Y',0,32767,None, None, None, None, u'The depth below the path to which the Signature_ is recursively searched. If absent, the depth is assumed to be 0.',),
-(u'DrLocator',u'Parent',u'Y',None, None, None, None, u'Identifier',None, u'The parent file signature. It is also a foreign key in the Signature table. If null and the Path column does not expand to a full path, then all the fixed drives of the user system are searched using the Path.',),
-(u'DuplicateFile',u'File_',u'N',None, None, u'File',1,u'Identifier',None, u'Foreign key referencing the source file to be duplicated.',),
-(u'DuplicateFile',u'Component_',u'N',None, None, u'Component',1,u'Identifier',None, u'Foreign key referencing Component that controls the duplicate file.',),
-(u'DuplicateFile',u'DestFolder',u'Y',None, None, None, None, u'Identifier',None, u'Name of a property whose value is assumed to resolve to the full pathname to a destination folder.',),
-(u'DuplicateFile',u'DestName',u'Y',None, None, None, None, u'Filename',None, u'Filename to be given to the duplicate file.',),
-(u'DuplicateFile',u'FileKey',u'N',None, None, None, None, u'Identifier',None, u'Primary key used to identify a particular file entry',),
-(u'Environment',u'Name',u'N',None, None, None, None, u'Text',None, u'The name of the environmental value.',),
-(u'Environment',u'Value',u'Y',None, None, None, None, u'Formatted',None, u'The value to set in the environmental settings.',),
-(u'Environment',u'Component_',u'N',None, None, u'Component',1,u'Identifier',None, u'Foreign key into the Component table referencing component that controls the installing of the environmental value.',),
-(u'Environment',u'Environment',u'N',None, None, None, None, u'Identifier',None, u'Unique identifier for the environmental variable setting',),
-(u'Error',u'Error',u'N',0,32767,None, None, None, None, u'Integer error number, obtained from header file IError(...) macros.',),
-(u'Error',u'Message',u'Y',None, None, None, None, u'Template',None, u'Error formatting template, obtained from user ed. or localizers.',),
-(u'EventMapping',u'Dialog_',u'N',None, None, u'Dialog',1,u'Identifier',None, u'A foreign key to the Dialog table, name of the Dialog.',),
-(u'EventMapping',u'Control_',u'N',None, None, u'Control',2,u'Identifier',None, u'A foreign key to the Control table, name of the control.',),
-(u'EventMapping',u'Event',u'N',None, None, None, None, u'Identifier',None, u'An identifier that specifies the type of the event that the control subscribes to.',),
-(u'EventMapping',u'Attribute',u'N',None, None, None, None, u'Identifier',None, u'The name of the control attribute, that is set when this event is received.',),
-(u'Extension',u'Feature_',u'N',None, None, u'Feature',1,u'Identifier',None, u'Required foreign key into the Feature Table, specifying the feature to validate or install in order for the CLSID factory to be operational.',),
-(u'Extension',u'Component_',u'N',None, None, u'Component',1,u'Identifier',None, u'Required foreign key into the Component Table, specifying the component for which to return a path when called through LocateComponent.',),
-(u'Extension',u'Extension',u'N',None, None, None, None, u'Text',None, u'The extension associated with the table row.',),
-(u'Extension',u'MIME_',u'Y',None, None, u'MIME',1,u'Text',None, u'Optional Context identifier, typically "type/format" associated with the extension',),
-(u'Extension',u'ProgId_',u'Y',None, None, u'ProgId',1,u'Text',None, u'Optional ProgId associated with this extension.',),
-(u'MIME',u'CLSID',u'Y',None, None, None, None, u'Guid',None, u'Optional associated CLSID.',),
-(u'MIME',u'ContentType',u'N',None, None, None, None, u'Text',None, u'Primary key. Context identifier, typically "type/format".',),
-(u'MIME',u'Extension_',u'N',None, None, u'Extension',1,u'Text',None, u'Optional associated extension (without dot)',),
-(u'FeatureComponents',u'Feature_',u'N',None, None, u'Feature',1,u'Identifier',None, u'Foreign key into Feature table.',),
-(u'FeatureComponents',u'Component_',u'N',None, None, u'Component',1,u'Identifier',None, u'Foreign key into Component table.',),
-(u'FileSFPCatalog',u'File_',u'N',None, None, u'File',1,u'Identifier',None, u'File associated with the catalog',),
-(u'FileSFPCatalog',u'SFPCatalog_',u'N',None, None, u'SFPCatalog',1,u'Filename',None, u'Catalog associated with the file',),
-(u'SFPCatalog',u'SFPCatalog',u'N',None, None, None, None, u'Filename',None, u'File name for the catalog.',),
-(u'SFPCatalog',u'Catalog',u'N',None, None, None, None, u'Binary',None, u'SFP Catalog',),
-(u'SFPCatalog',u'Dependency',u'Y',None, None, None, None, u'Formatted',None, u'Parent catalog - only used by SFP',),
-(u'Font',u'File_',u'N',None, None, u'File',1,u'Identifier',None, u'Primary key, foreign key into File table referencing font file.',),
-(u'Font',u'FontTitle',u'Y',None, None, None, None, u'Text',None, u'Font name.',),
-(u'IniFile',u'Action',u'N',None, None, None, None, None, u'0;1;3',u'The type of modification to be made, one of iifEnum',),
-(u'IniFile',u'Value',u'N',None, None, None, None, u'Formatted',None, u'The value to be written.',),
-(u'IniFile',u'Component_',u'N',None, None, u'Component',1,u'Identifier',None, u'Foreign key into the Component table referencing component that controls the installing of the .INI value.',),
-(u'IniFile',u'FileName',u'N',None, None, None, None, u'Filename',None, u'The .INI file name in which to write the information',),
-(u'IniFile',u'IniFile',u'N',None, None, None, None, u'Identifier',None, u'Primary key, non-localized token.',),
-(u'IniFile',u'DirProperty',u'Y',None, None, None, None, u'Identifier',None, u'Foreign key into the Directory table denoting the directory where the .INI file is.',),
-(u'IniFile',u'Key',u'N',None, None, None, None, u'Formatted',None, u'The .INI file key below Section.',),
-(u'IniFile',u'Section',u'N',None, None, None, None, u'Formatted',None, u'The .INI file Section.',),
-(u'IniLocator',u'Type',u'Y',0,2,None, None, None, None, u'An integer value that determines if the .INI value read is a filename or a directory location or to be used as is w/o interpretation.',),
-(u'IniLocator',u'Signature_',u'N',None, None, None, None, u'Identifier',None, u'The table key. The Signature_ represents a unique file signature and is also the foreign key in the Signature table.',),
-(u'IniLocator',u'FileName',u'N',None, None, None, None, u'Filename',None, u'The .INI file name.',),
-(u'IniLocator',u'Key',u'N',None, None, None, None, u'Text',None, u'Key value (followed by an equals sign in INI file).',),
-(u'IniLocator',u'Section',u'N',None, None, None, None, u'Text',None, u'Section name within in file (within square brackets in INI file).',),
-(u'IniLocator',u'Field',u'Y',0,32767,None, None, None, None, u'The field in the .INI line. If Field is null or 0 the entire line is read.',),
-(u'InstallExecuteSequence',u'Action',u'N',None, None, None, None, u'Identifier',None, u'Name of action to invoke, either in the engine or the handler DLL.',),
-(u'InstallExecuteSequence',u'Condition',u'Y',None, None, None, None, u'Condition',None, u'Optional expression which skips the action if evaluates to expFalse.If the expression syntax is invalid, the engine will terminate, returning iesBadActionData.',),
-(u'InstallExecuteSequence',u'Sequence',u'Y',-4,32767,None, None, None, None, u'Number that determines the sort order in which the actions are to be executed. Leave blank to suppress action.',),
-(u'InstallUISequence',u'Action',u'N',None, None, None, None, u'Identifier',None, u'Name of action to invoke, either in the engine or the handler DLL.',),
-(u'InstallUISequence',u'Condition',u'Y',None, None, None, None, u'Condition',None, u'Optional expression which skips the action if evaluates to expFalse.If the expression syntax is invalid, the engine will terminate, returning iesBadActionData.',),
-(u'InstallUISequence',u'Sequence',u'Y',-4,32767,None, None, None, None, u'Number that determines the sort order in which the actions are to be executed. Leave blank to suppress action.',),
-(u'IsolatedComponent',u'Component_Application',u'N',None, None, u'Component',1,u'Identifier',None, u'Key to Component table item for application',),
-(u'IsolatedComponent',u'Component_Shared',u'N',None, None, u'Component',1,u'Identifier',None, u'Key to Component table item to be isolated',),
-(u'LaunchCondition',u'Description',u'N',None, None, None, None, u'Formatted',None, u'Localizable text to display when condition fails and install must abort.',),
-(u'LaunchCondition',u'Condition',u'N',None, None, None, None, u'Condition',None, u'Expression which must evaluate to TRUE in order for install to commence.',),
-(u'ListBox',u'Text',u'Y',None, None, None, None, u'Text',None, u'The visible text to be assigned to the item. Optional. If this entry or the entire column is missing, the text is the same as the value.',),
-(u'ListBox',u'Property',u'N',None, None, None, None, u'Identifier',None, u'A named property to be tied to this item. All the items tied to the same property become part of the same listbox.',),
-(u'ListBox',u'Value',u'N',None, None, None, None, u'Formatted',None, u'The value string associated with this item. Selecting the line will set the associated property to this value.',),
-(u'ListBox',u'Order',u'N',1,32767,None, None, None, None, u'A positive integer used to determine the ordering of the items within one list..The integers do not have to be consecutive.',),
-(u'ListView',u'Text',u'Y',None, None, None, None, u'Text',None, u'The visible text to be assigned to the item. Optional. If this entry or the entire column is missing, the text is the same as the value.',),
-(u'ListView',u'Property',u'N',None, None, None, None, u'Identifier',None, u'A named property to be tied to this item. All the items tied to the same property become part of the same listview.',),
-(u'ListView',u'Value',u'N',None, None, None, None, u'Identifier',None, u'The value string associated with this item. Selecting the line will set the associated property to this value.',),
-(u'ListView',u'Order',u'N',1,32767,None, None, None, None, u'A positive integer used to determine the ordering of the items within one list..The integers do not have to be consecutive.',),
-(u'ListView',u'Binary_',u'Y',None, None, u'Binary',1,u'Identifier',None, u'The name of the icon to be displayed with the icon. The binary information is looked up from the Binary Table.',),
-(u'LockPermissions',u'Table',u'N',None, None, None, None, u'Identifier',u'Directory;File;Registry',u'Reference to another table name',),
-(u'LockPermissions',u'Domain',u'Y',None, None, None, None, u'Formatted',None, u'Domain name for user whose permissions are being set. (usually a property)',),
-(u'LockPermissions',u'LockObject',u'N',None, None, None, None, u'Identifier',None, u'Foreign key into Registry or File table',),
-(u'LockPermissions',u'Permission',u'Y',-2147483647,2147483647,None, None, None, None, u'Permission Access mask. Full Control = 268435456 (GENERIC_ALL = 0x10000000)',),
-(u'LockPermissions',u'User',u'N',None, None, None, None, u'Formatted',None, u'User for permissions to be set. (usually a property)',),
-(u'Media',u'Source',u'Y',None, None, None, None, u'Property',None, u'The property defining the location of the cabinet file.',),
-(u'Media',u'Cabinet',u'Y',None, None, None, None, u'Cabinet',None, u'If some or all of the files stored on the media are compressed in a cabinet, the name of that cabinet.',),
-(u'Media',u'DiskId',u'N',1,32767,None, None, None, None, u'Primary key, integer to determine sort order for table.',),
-(u'Media',u'DiskPrompt',u'Y',None, None, None, None, u'Text',None, u'Disk name: the visible text actually printed on the disk. This will be used to prompt the user when this disk needs to be inserted.',),
-(u'Media',u'LastSequence',u'N',0,32767,None, None, None, None, u'File sequence number for the last file for this media.',),
-(u'Media',u'VolumeLabel',u'Y',None, None, None, None, u'Text',None, u'The label attributed to the volume.',),
-(u'ModuleComponents',u'Component',u'N',None, None, u'Component',1,u'Identifier',None, u'Component contained in the module.',),
-(u'ModuleComponents',u'Language',u'N',None, None, u'ModuleSignature',2,None, None, u'Default language ID for module (may be changed by transform).',),
-(u'ModuleComponents',u'ModuleID',u'N',None, None, u'ModuleSignature',1,u'Identifier',None, u'Module containing the component.',),
-(u'ModuleSignature',u'Language',u'N',None, None, None, None, None, None, u'Default decimal language of module.',),
-(u'ModuleSignature',u'Version',u'N',None, None, None, None, u'Version',None, u'Version of the module.',),
-(u'ModuleSignature',u'ModuleID',u'N',None, None, None, None, u'Identifier',None, u'Module identifier (String.GUID).',),
-(u'ModuleDependency',u'ModuleID',u'N',None, None, u'ModuleSignature',1,u'Identifier',None, u'Module requiring the dependency.',),
-(u'ModuleDependency',u'ModuleLanguage',u'N',None, None, u'ModuleSignature',2,None, None, u'Language of module requiring the dependency.',),
-(u'ModuleDependency',u'RequiredID',u'N',None, None, None, None, None, None, u'String.GUID of required module.',),
-(u'ModuleDependency',u'RequiredLanguage',u'N',None, None, None, None, None, None, u'LanguageID of the required module.',),
-(u'ModuleDependency',u'RequiredVersion',u'Y',None, None, None, None, u'Version',None, u'Version of the required version.',),
-(u'ModuleExclusion',u'ModuleID',u'N',None, None, u'ModuleSignature',1,u'Identifier',None, u'String.GUID of module with exclusion requirement.',),
-(u'ModuleExclusion',u'ModuleLanguage',u'N',None, None, u'ModuleSignature',2,None, None, u'LanguageID of module with exclusion requirement.',),
-(u'ModuleExclusion',u'ExcludedID',u'N',None, None, None, None, None, None, u'String.GUID of excluded module.',),
-(u'ModuleExclusion',u'ExcludedLanguage',u'N',None, None, None, None, None, None, u'Language of excluded module.',),
-(u'ModuleExclusion',u'ExcludedMaxVersion',u'Y',None, None, None, None, u'Version',None, u'Maximum version of excluded module.',),
-(u'ModuleExclusion',u'ExcludedMinVersion',u'Y',None, None, None, None, u'Version',None, u'Minimum version of excluded module.',),
-(u'MoveFile',u'Component_',u'N',None, None, u'Component',1,u'Identifier',None, u'If this component is not "selected" for installation or removal, no action will be taken on the associated MoveFile entry',),
-(u'MoveFile',u'DestFolder',u'N',None, None, None, None, u'Identifier',None, u'Name of a property whose value is assumed to resolve to the full path to the destination directory',),
-(u'MoveFile',u'DestName',u'Y',None, None, None, None, u'Filename',None, u'Name to be given to the original file after it is moved or copied. If blank, the destination file will be given the same name as the source file',),
-(u'MoveFile',u'FileKey',u'N',None, None, None, None, u'Identifier',None, u'Primary key that uniquely identifies a particular MoveFile record',),
-(u'MoveFile',u'Options',u'N',0,1,None, None, None, None, u'Integer value specifying the MoveFile operating mode, one of imfoEnum',),
-(u'MoveFile',u'SourceFolder',u'Y',None, None, None, None, u'Identifier',None, u'Name of a property whose value is assumed to resolve to the full path to the source directory',),
-(u'MoveFile',u'SourceName',u'Y',None, None, None, None, u'Text',None, u"Name of the source file(s) to be moved or copied. Can contain the '*' or '?' wildcards.",),
-(u'MsiAssembly',u'Attributes',u'Y',None, None, None, None, None, None, u'Assembly attributes',),
-(u'MsiAssembly',u'Feature_',u'N',None, None, u'Feature',1,u'Identifier',None, u'Foreign key into Feature table.',),
-(u'MsiAssembly',u'Component_',u'N',None, None, u'Component',1,u'Identifier',None, u'Foreign key into Component table.',),
-(u'MsiAssembly',u'File_Application',u'Y',None, None, u'File',1,u'Identifier',None, u'Foreign key into File table, denoting the application context for private assemblies. Null for global assemblies.',),
-(u'MsiAssembly',u'File_Manifest',u'Y',None, None, u'File',1,u'Identifier',None, u'Foreign key into the File table denoting the manifest file for the assembly.',),
-(u'MsiAssemblyName',u'Name',u'N',None, None, None, None, u'Text',None, u'The name part of the name-value pairs for the assembly name.',),
-(u'MsiAssemblyName',u'Value',u'N',None, None, None, None, u'Text',None, u'The value part of the name-value pairs for the assembly name.',),
-(u'MsiAssemblyName',u'Component_',u'N',None, None, u'Component',1,u'Identifier',None, u'Foreign key into Component table.',),
-(u'MsiDigitalCertificate',u'CertData',u'N',None, None, None, None, u'Binary',None, u'A certificate context blob for a signer certificate',),
-(u'MsiDigitalCertificate',u'DigitalCertificate',u'N',None, None, None, None, u'Identifier',None, u'A unique identifier for the row',),
-(u'MsiDigitalSignature',u'Table',u'N',None, None, None, None, None, u'Media',u'Reference to another table name (only Media table is supported)',),
-(u'MsiDigitalSignature',u'DigitalCertificate_',u'N',None, None, u'MsiDigitalCertificate',1,u'Identifier',None, u'Foreign key to MsiDigitalCertificate table identifying the signer certificate',),
-(u'MsiDigitalSignature',u'Hash',u'Y',None, None, None, None, u'Binary',None, u'The encoded hash blob from the digital signature',),
-(u'MsiDigitalSignature',u'SignObject',u'N',None, None, None, None, u'Text',None, u'Foreign key to Media table',),
-(u'MsiFileHash',u'File_',u'N',None, None, u'File',1,u'Identifier',None, u'Primary key, foreign key into File table referencing file with this hash',),
-(u'MsiFileHash',u'Options',u'N',0,32767,None, None, None, None, u'Various options and attributes for this hash.',),
-(u'MsiFileHash',u'HashPart1',u'N',None, None, None, None, None, None, u'Size of file in bytes (long integer).',),
-(u'MsiFileHash',u'HashPart2',u'N',None, None, None, None, None, None, u'Size of file in bytes (long integer).',),
-(u'MsiFileHash',u'HashPart3',u'N',None, None, None, None, None, None, u'Size of file in bytes (long integer).',),
-(u'MsiFileHash',u'HashPart4',u'N',None, None, None, None, None, None, u'Size of file in bytes (long integer).',),
-(u'MsiPatchHeaders',u'StreamRef',u'N',None, None, None, None, u'Identifier',None, u'Primary key. A unique identifier for the row.',),
-(u'MsiPatchHeaders',u'Header',u'N',None, None, None, None, u'Binary',None, u'Binary stream. The patch header, used for patch validation.',),
-(u'ODBCAttribute',u'Value',u'Y',None, None, None, None, u'Text',None, u'Value for ODBC driver attribute',),
-(u'ODBCAttribute',u'Attribute',u'N',None, None, None, None, u'Text',None, u'Name of ODBC driver attribute',),
-(u'ODBCAttribute',u'Driver_',u'N',None, None, u'ODBCDriver',1,u'Identifier',None, u'Reference to ODBC driver in ODBCDriver table',),
-(u'ODBCDriver',u'Description',u'N',None, None, None, None, u'Text',None, u'Text used as registered name for driver, non-localized',),
-(u'ODBCDriver',u'File_',u'N',None, None, u'File',1,u'Identifier',None, u'Reference to key driver file',),
-(u'ODBCDriver',u'Component_',u'N',None, None, u'Component',1,u'Identifier',None, u'Reference to associated component',),
-(u'ODBCDriver',u'Driver',u'N',None, None, None, None, u'Identifier',None, u'Primary key, non-localized.internal token for driver',),
-(u'ODBCDriver',u'File_Setup',u'Y',None, None, u'File',1,u'Identifier',None, u'Optional reference to key driver setup DLL',),
-(u'ODBCDataSource',u'Description',u'N',None, None, None, None, u'Text',None, u'Text used as registered name for data source',),
-(u'ODBCDataSource',u'Component_',u'N',None, None, u'Component',1,u'Identifier',None, u'Reference to associated component',),
-(u'ODBCDataSource',u'DataSource',u'N',None, None, None, None, u'Identifier',None, u'Primary key, non-localized.internal token for data source',),
-(u'ODBCDataSource',u'DriverDescription',u'N',None, None, None, None, u'Text',None, u'Reference to driver description, may be existing driver',),
-(u'ODBCDataSource',u'Registration',u'N',0,1,None, None, None, None, u'Registration option: 0=machine, 1=user, others t.b.d.',),
-(u'ODBCSourceAttribute',u'Value',u'Y',None, None, None, None, u'Text',None, u'Value for ODBC data source attribute',),
-(u'ODBCSourceAttribute',u'Attribute',u'N',None, None, None, None, u'Text',None, u'Name of ODBC data source attribute',),
-(u'ODBCSourceAttribute',u'DataSource_',u'N',None, None, u'ODBCDataSource',1,u'Identifier',None, u'Reference to ODBC data source in ODBCDataSource table',),
-(u'ODBCTranslator',u'Description',u'N',None, None, None, None, u'Text',None, u'Text used as registered name for translator',),
-(u'ODBCTranslator',u'File_',u'N',None, None, u'File',1,u'Identifier',None, u'Reference to key translator file',),
-(u'ODBCTranslator',u'Component_',u'N',None, None, u'Component',1,u'Identifier',None, u'Reference to associated component',),
-(u'ODBCTranslator',u'File_Setup',u'Y',None, None, u'File',1,u'Identifier',None, u'Optional reference to key translator setup DLL',),
-(u'ODBCTranslator',u'Translator',u'N',None, None, None, None, u'Identifier',None, u'Primary key, non-localized.internal token for translator',),
-(u'Patch',u'Sequence',u'N',0,32767,None, None, None, None, u'Primary key, sequence with respect to the media images; order must track cabinet order.',),
-(u'Patch',u'Attributes',u'N',0,32767,None, None, None, None, u'Integer containing bit flags representing patch attributes',),
-(u'Patch',u'File_',u'N',None, None, None, None, u'Identifier',None, u'Primary key, non-localized token, foreign key to File table, must match identifier in cabinet.',),
-(u'Patch',u'Header',u'Y',None, None, None, None, u'Binary',None, u'Binary stream. The patch header, used for patch validation.',),
-(u'Patch',u'PatchSize',u'N',0,2147483647,None, None, None, None, u'Size of patch in bytes (long integer).',),
-(u'Patch',u'StreamRef_',u'Y',None, None, None, None, u'Identifier',None, u'Identifier. Foreign key to the StreamRef column of the MsiPatchHeaders table.',),
-(u'PatchPackage',u'Media_',u'N',0,32767,None, None, None, None, u'Foreign key to DiskId column of Media table. Indicates the disk containing the patch package.',),
-(u'PatchPackage',u'PatchId',u'N',None, None, None, None, u'Guid',None, u'A unique string GUID representing this patch.',),
-(u'PublishComponent',u'Feature_',u'N',None, None, u'Feature',1,u'Identifier',None, u'Foreign key into the Feature table.',),
-(u'PublishComponent',u'Component_',u'N',None, None, u'Component',1,u'Identifier',None, u'Foreign key into the Component table.',),
-(u'PublishComponent',u'ComponentId',u'N',None, None, None, None, u'Guid',None, u'A string GUID that represents the component id that will be requested by the alien product.',),
-(u'PublishComponent',u'AppData',u'Y',None, None, None, None, u'Text',None, u'This is localisable Application specific data that can be associated with a Qualified Component.',),
-(u'PublishComponent',u'Qualifier',u'N',None, None, None, None, u'Text',None, u'This is defined only when the ComponentId column is an Qualified Component Id. This is the Qualifier for ProvideComponentIndirect.',),
-(u'RadioButton',u'Y',u'N',0,32767,None, None, None, None, u'The vertical coordinate of the upper left corner of the bounding rectangle of the radio button.',),
-(u'RadioButton',u'Text',u'Y',None, None, None, None, u'Text',None, u'The visible title to be assigned to the radio button.',),
-(u'RadioButton',u'Property',u'N',None, None, None, None, u'Identifier',None, u'A named property to be tied to this radio button. All the buttons tied to the same property become part of the same group.',),
-(u'RadioButton',u'Height',u'N',0,32767,None, None, None, None, u'The height of the button.',),
-(u'RadioButton',u'Width',u'N',0,32767,None, None, None, None, u'The width of the button.',),
-(u'RadioButton',u'X',u'N',0,32767,None, None, None, None, u'The horizontal coordinate of the upper left corner of the bounding rectangle of the radio button.',),
-(u'RadioButton',u'Value',u'N',None, None, None, None, u'Formatted',None, u'The value string associated with this button. Selecting the button will set the associated property to this value.',),
-(u'RadioButton',u'Order',u'N',1,32767,None, None, None, None, u'A positive integer used to determine the ordering of the items within one list..The integers do not have to be consecutive.',),
-(u'RadioButton',u'Help',u'Y',None, None, None, None, u'Text',None, u'The help strings used with the button. The text is optional.',),
-(u'Registry',u'Name',u'Y',None, None, None, None, u'Formatted',None, u'The registry value name.',),
-(u'Registry',u'Value',u'Y',None, None, None, None, u'Formatted',None, u'The registry value.',),
-(u'Registry',u'Component_',u'N',None, None, u'Component',1,u'Identifier',None, u'Foreign key into the Component table referencing component that controls the installing of the registry value.',),
-(u'Registry',u'Key',u'N',None, None, None, None, u'RegPath',None, u'The key for the registry value.',),
-(u'Registry',u'Registry',u'N',None, None, None, None, u'Identifier',None, u'Primary key, non-localized token.',),
-(u'Registry',u'Root',u'N',-1,3,None, None, None, None, u'The predefined root key for the registry value, one of rrkEnum.',),
-(u'RegLocator',u'Name',u'Y',None, None, None, None, u'Formatted',None, u'The registry value name.',),
-(u'RegLocator',u'Type',u'Y',0,18,None, None, None, None, u'An integer value that determines if the registry value is a filename or a directory location or to be used as is w/o interpretation.',),
-(u'RegLocator',u'Signature_',u'N',None, None, None, None, u'Identifier',None, u'The table key. The Signature_ represents a unique file signature and is also the foreign key in the Signature table. If the type is 0, the registry values refers a directory, and _Signature is not a foreign key.',),
-(u'RegLocator',u'Key',u'N',None, None, None, None, u'RegPath',None, u'The key for the registry value.',),
-(u'RegLocator',u'Root',u'N',0,3,None, None, None, None, u'The predefined root key for the registry value, one of rrkEnum.',),
-(u'RemoveFile',u'Component_',u'N',None, None, u'Component',1,u'Identifier',None, u'Foreign key referencing Component that controls the file to be removed.',),
-(u'RemoveFile',u'FileKey',u'N',None, None, None, None, u'Identifier',None, u'Primary key used to identify a particular file entry',),
-(u'RemoveFile',u'FileName',u'Y',None, None, None, None, u'WildCardFilename',None, u'Name of the file to be removed.',),
-(u'RemoveFile',u'DirProperty',u'N',None, None, None, None, u'Identifier',None, u'Name of a property whose value is assumed to resolve to the full pathname to the folder of the file to be removed.',),
-(u'RemoveFile',u'InstallMode',u'N',None, None, None, None, None, u'1;2;3',u'Installation option, one of iimEnum.',),
-(u'RemoveIniFile',u'Action',u'N',None, None, None, None, None, u'2;4',u'The type of modification to be made, one of iifEnum.',),
-(u'RemoveIniFile',u'Value',u'Y',None, None, None, None, u'Formatted',None, u'The value to be deleted. The value is required when Action is iifIniRemoveTag',),
-(u'RemoveIniFile',u'Component_',u'N',None, None, u'Component',1,u'Identifier',None, u'Foreign key into the Component table referencing component that controls the deletion of the .INI value.',),
-(u'RemoveIniFile',u'FileName',u'N',None, None, None, None, u'Filename',None, u'The .INI file name in which to delete the information',),
-(u'RemoveIniFile',u'DirProperty',u'Y',None, None, None, None, u'Identifier',None, u'Foreign key into the Directory table denoting the directory where the .INI file is.',),
-(u'RemoveIniFile',u'Key',u'N',None, None, None, None, u'Formatted',None, u'The .INI file key below Section.',),
-(u'RemoveIniFile',u'Section',u'N',None, None, None, None, u'Formatted',None, u'The .INI file Section.',),
-(u'RemoveIniFile',u'RemoveIniFile',u'N',None, None, None, None, u'Identifier',None, u'Primary key, non-localized token.',),
-(u'RemoveRegistry',u'Name',u'Y',None, None, None, None, u'Formatted',None, u'The registry value name.',),
-(u'RemoveRegistry',u'Component_',u'N',None, None, u'Component',1,u'Identifier',None, u'Foreign key into the Component table referencing component that controls the deletion of the registry value.',),
-(u'RemoveRegistry',u'Key',u'N',None, None, None, None, u'RegPath',None, u'The key for the registry value.',),
-(u'RemoveRegistry',u'Root',u'N',-1,3,None, None, None, None, u'The predefined root key for the registry value, one of rrkEnum',),
-(u'RemoveRegistry',u'RemoveRegistry',u'N',None, None, None, None, u'Identifier',None, u'Primary key, non-localized token.',),
-(u'ReserveCost',u'Component_',u'N',None, None, u'Component',1,u'Identifier',None, u'Reserve a specified amount of space if this component is to be installed.',),
-(u'ReserveCost',u'ReserveFolder',u'Y',None, None, None, None, u'Identifier',None, u'Name of a property whose value is assumed to resolve to the full path to the destination directory',),
-(u'ReserveCost',u'ReserveKey',u'N',None, None, None, None, u'Identifier',None, u'Primary key that uniquely identifies a particular ReserveCost record',),
-(u'ReserveCost',u'ReserveLocal',u'N',0,2147483647,None, None, None, None, u'Disk space to reserve if linked component is installed locally.',),
-(u'ReserveCost',u'ReserveSource',u'N',0,2147483647,None, None, None, None, u'Disk space to reserve if linked component is installed to run from the source location.',),
-(u'SelfReg',u'File_',u'N',None, None, u'File',1,u'Identifier',None, u'Foreign key into the File table denoting the module that needs to be registered.',),
-(u'SelfReg',u'Cost',u'Y',0,32767,None, None, None, None, u'The cost of registering the module.',),
-(u'ServiceControl',u'Name',u'N',None, None, None, None, u'Formatted',None, u'Name of a service. /, \\, comma and space are invalid',),
-(u'ServiceControl',u'Component_',u'N',None, None, u'Component',1,u'Identifier',None, u'Required foreign key into the Component Table that controls the startup of the service',),
-(u'ServiceControl',u'Event',u'N',0,187,None, None, None, None, u'Bit field: Install: 0x1 = Start, 0x2 = Stop, 0x8 = Delete, Uninstall: 0x10 = Start, 0x20 = Stop, 0x80 = Delete',),
-(u'ServiceControl',u'ServiceControl',u'N',None, None, None, None, u'Identifier',None, u'Primary key, non-localized token.',),
-(u'ServiceControl',u'Arguments',u'Y',None, None, None, None, u'Formatted',None, u'Arguments for the service. Separate by [~].',),
-(u'ServiceControl',u'Wait',u'Y',0,1,None, None, None, None, u'Boolean for whether to wait for the service to fully start',),
-(u'ServiceInstall',u'Name',u'N',None, None, None, None, u'Formatted',None, u'Internal Name of the Service',),
-(u'ServiceInstall',u'Description',u'Y',None, None, None, None, u'Text',None, u'Description of service.',),
-(u'ServiceInstall',u'Component_',u'N',None, None, u'Component',1,u'Identifier',None, u'Required foreign key into the Component Table that controls the startup of the service',),
-(u'ServiceInstall',u'Arguments',u'Y',None, None, None, None, u'Formatted',None, u'Arguments to include in every start of the service, passed to WinMain',),
-(u'ServiceInstall',u'ServiceInstall',u'N',None, None, None, None, u'Identifier',None, u'Primary key, non-localized token.',),
-(u'ServiceInstall',u'Dependencies',u'Y',None, None, None, None, u'Formatted',None, u'Other services this depends on to start. Separate by [~], and end with [~][~]',),
-(u'ServiceInstall',u'DisplayName',u'Y',None, None, None, None, u'Formatted',None, u'External Name of the Service',),
-(u'ServiceInstall',u'ErrorControl',u'N',-2147483647,2147483647,None, None, None, None, u'Severity of error if service fails to start',),
-(u'ServiceInstall',u'LoadOrderGroup',u'Y',None, None, None, None, u'Formatted',None, u'LoadOrderGroup',),
-(u'ServiceInstall',u'Password',u'Y',None, None, None, None, u'Formatted',None, u'password to run service with. (with StartName)',),
-(u'ServiceInstall',u'ServiceType',u'N',-2147483647,2147483647,None, None, None, None, u'Type of the service',),
-(u'ServiceInstall',u'StartName',u'Y',None, None, None, None, u'Formatted',None, u'User or object name to run service as',),
-(u'ServiceInstall',u'StartType',u'N',0,4,None, None, None, None, u'Type of the service',),
-(u'Shortcut',u'Name',u'N',None, None, None, None, u'Filename',None, u'The name of the shortcut to be created.',),
-(u'Shortcut',u'Description',u'Y',None, None, None, None, u'Text',None, u'The description for the shortcut.',),
-(u'Shortcut',u'Component_',u'N',None, None, u'Component',1,u'Identifier',None, u'Foreign key into the Component table denoting the component whose selection gates the the shortcut creation/deletion.',),
-(u'Shortcut',u'Icon_',u'Y',None, None, u'Icon',1,u'Identifier',None, u'Foreign key into the File table denoting the external icon file for the shortcut.',),
-(u'Shortcut',u'IconIndex',u'Y',-32767,32767,None, None, None, None, u'The icon index for the shortcut.',),
-(u'Shortcut',u'Directory_',u'N',None, None, u'Directory',1,u'Identifier',None, u'Foreign key into the Directory table denoting the directory where the shortcut file is created.',),
-(u'Shortcut',u'Target',u'N',None, None, None, None, u'Shortcut',None, u'The shortcut target. This is usually a property that is expanded to a file or a folder that the shortcut points to.',),
-(u'Shortcut',u'Arguments',u'Y',None, None, None, None, u'Formatted',None, u'The command-line arguments for the shortcut.',),
-(u'Shortcut',u'Shortcut',u'N',None, None, None, None, u'Identifier',None, u'Primary key, non-localized token.',),
-(u'Shortcut',u'Hotkey',u'Y',0,32767,None, None, None, None, u'The hotkey for the shortcut. It has the virtual-key code for the key in the low-order byte, and the modifier flags in the high-order byte. ',),
-(u'Shortcut',u'ShowCmd',u'Y',None, None, None, None, None, u'1;3;7',u'The show command for the application window.The following values may be used.',),
-(u'Shortcut',u'WkDir',u'Y',None, None, None, None, u'Identifier',None, u'Name of property defining location of working directory.',),
-(u'Signature',u'FileName',u'N',None, None, None, None, u'Filename',None, u'The name of the file. This may contain a "short name|long name" pair.',),
-(u'Signature',u'Signature',u'N',None, None, None, None, u'Identifier',None, u'The table key. The Signature represents a unique file signature.',),
-(u'Signature',u'Languages',u'Y',None, None, None, None, u'Language',None, u'The languages supported by the file.',),
-(u'Signature',u'MaxDate',u'Y',0,2147483647,None, None, None, None, u'The maximum creation date of the file.',),
-(u'Signature',u'MaxSize',u'Y',0,2147483647,None, None, None, None, u'The maximum size of the file. ',),
-(u'Signature',u'MaxVersion',u'Y',None, None, None, None, u'Text',None, u'The maximum version of the file.',),
-(u'Signature',u'MinDate',u'Y',0,2147483647,None, None, None, None, u'The minimum creation date of the file.',),
-(u'Signature',u'MinSize',u'Y',0,2147483647,None, None, None, None, u'The minimum size of the file.',),
-(u'Signature',u'MinVersion',u'Y',None, None, None, None, u'Text',None, u'The minimum version of the file.',),
-(u'TextStyle',u'TextStyle',u'N',None, None, None, None, u'Identifier',None, u'Name of the style. The primary key of this table. This name is embedded in the texts to indicate a style change.',),
-(u'TextStyle',u'Color',u'Y',0,16777215,None, None, None, None, u'A long integer indicating the color of the string in the RGB format (Red, Green, Blue each 0-255, RGB = R + 256*G + 256^2*B).',),
-(u'TextStyle',u'FaceName',u'N',None, None, None, None, u'Text',None, u'A string indicating the name of the font used. Required. The string must be at most 31 characters long.',),
-(u'TextStyle',u'Size',u'N',0,32767,None, None, None, None, u'The size of the font used. This size is given in our units (1/12 of the system font height). Assuming that the system font is set to 12 point size, this is equivalent to the point size.',),
-(u'TextStyle',u'StyleBits',u'Y',0,15,None, None, None, None, u'A combination of style bits.',),
-(u'TypeLib',u'Description',u'Y',None, None, None, None, u'Text',None, None, ),
-(u'TypeLib',u'Feature_',u'N',None, None, u'Feature',1,u'Identifier',None, u'Required foreign key into the Feature Table, specifying the feature to validate or install in order for the type library to be operational.',),
-(u'TypeLib',u'Component_',u'N',None, None, u'Component',1,u'Identifier',None, u'Required foreign key into the Component Table, specifying the component for which to return a path when called through LocateComponent.',),
-(u'TypeLib',u'Directory_',u'Y',None, None, u'Directory',1,u'Identifier',None, u'Optional. The foreign key into the Directory table denoting the path to the help file for the type library.',),
-(u'TypeLib',u'Language',u'N',0,32767,None, None, None, None, u'The language of the library.',),
-(u'TypeLib',u'Version',u'Y',0,16777215,None, None, None, None, u'The version of the library. The minor version is in the lower 8 bits of the integer. The major version is in the next 16 bits. ',),
-(u'TypeLib',u'Cost',u'Y',0,2147483647,None, None, None, None, u'The cost associated with the registration of the typelib. This column is currently optional.',),
-(u'TypeLib',u'LibID',u'N',None, None, None, None, u'Guid',None, u'The GUID that represents the library.',),
-(u'UIText',u'Text',u'Y',None, None, None, None, u'Text',None, u'The localized version of the string.',),
-(u'UIText',u'Key',u'N',None, None, None, None, u'Identifier',None, u'A unique key that identifies the particular string.',),
-(u'Upgrade',u'Attributes',u'N',0,2147483647,None, None, None, None, u'The attributes of this product set.',),
-(u'Upgrade',u'Language',u'Y',None, None, None, None, u'Language',None, u'A comma-separated list of languages for either products in this set or products not in this set.',),
-(u'Upgrade',u'ActionProperty',u'N',None, None, None, None, u'UpperCase',None, u'The property to set when a product in this set is found.',),
-(u'Upgrade',u'Remove',u'Y',None, None, None, None, u'Formatted',None, u'The list of features to remove when uninstalling a product from this set. The default is "ALL".',),
-(u'Upgrade',u'UpgradeCode',u'N',None, None, None, None, u'Guid',None, u'The UpgradeCode GUID belonging to the products in this set.',),
-(u'Upgrade',u'VersionMax',u'Y',None, None, None, None, u'Text',None, u'The maximum ProductVersion of the products in this set. The set may or may not include products with this particular version.',),
-(u'Upgrade',u'VersionMin',u'Y',None, None, None, None, u'Text',None, u'The minimum ProductVersion of the products in this set. The set may or may not include products with this particular version.',),
-(u'Verb',u'Sequence',u'Y',0,32767,None, None, None, None, u'Order within the verbs for a particular extension. Also used simply to specify the default verb.',),
-(u'Verb',u'Argument',u'Y',None, None, None, None, u'Formatted',None, u'Optional value for the command arguments.',),
-(u'Verb',u'Extension_',u'N',None, None, u'Extension',1,u'Text',None, u'The extension associated with the table row.',),
-(u'Verb',u'Verb',u'N',None, None, None, None, u'Text',None, u'The verb for the command.',),
-(u'Verb',u'Command',u'Y',None, None, None, None, u'Formatted',None, u'The command text.',),
-]
diff --git a/sys/lib/python/msilib/sequence.py b/sys/lib/python/msilib/sequence.py
deleted file mode 100644
index 1138f7a23..000000000
--- a/sys/lib/python/msilib/sequence.py
+++ /dev/null
@@ -1,126 +0,0 @@
-AdminExecuteSequence = [
-(u'InstallInitialize', None, 1500),
-(u'InstallFinalize', None, 6600),
-(u'InstallFiles', None, 4000),
-(u'InstallAdminPackage', None, 3900),
-(u'FileCost', None, 900),
-(u'CostInitialize', None, 800),
-(u'CostFinalize', None, 1000),
-(u'InstallValidate', None, 1400),
-]
-
-AdminUISequence = [
-(u'FileCost', None, 900),
-(u'CostInitialize', None, 800),
-(u'CostFinalize', None, 1000),
-(u'ExecuteAction', None, 1300),
-(u'ExitDialog', None, -1),
-(u'FatalError', None, -3),
-(u'UserExit', None, -2),
-]
-
-AdvtExecuteSequence = [
-(u'InstallInitialize', None, 1500),
-(u'InstallFinalize', None, 6600),
-(u'CostInitialize', None, 800),
-(u'CostFinalize', None, 1000),
-(u'InstallValidate', None, 1400),
-(u'CreateShortcuts', None, 4500),
-(u'MsiPublishAssemblies', None, 6250),
-(u'PublishComponents', None, 6200),
-(u'PublishFeatures', None, 6300),
-(u'PublishProduct', None, 6400),
-(u'RegisterClassInfo', None, 4600),
-(u'RegisterExtensionInfo', None, 4700),
-(u'RegisterMIMEInfo', None, 4900),
-(u'RegisterProgIdInfo', None, 4800),
-]
-
-InstallExecuteSequence = [
-(u'InstallInitialize', None, 1500),
-(u'InstallFinalize', None, 6600),
-(u'InstallFiles', None, 4000),
-(u'FileCost', None, 900),
-(u'CostInitialize', None, 800),
-(u'CostFinalize', None, 1000),
-(u'InstallValidate', None, 1400),
-(u'CreateShortcuts', None, 4500),
-(u'MsiPublishAssemblies', None, 6250),
-(u'PublishComponents', None, 6200),
-(u'PublishFeatures', None, 6300),
-(u'PublishProduct', None, 6400),
-(u'RegisterClassInfo', None, 4600),
-(u'RegisterExtensionInfo', None, 4700),
-(u'RegisterMIMEInfo', None, 4900),
-(u'RegisterProgIdInfo', None, 4800),
-(u'AllocateRegistrySpace', u'NOT Installed', 1550),
-(u'AppSearch', None, 400),
-(u'BindImage', None, 4300),
-(u'CCPSearch', u'NOT Installed', 500),
-(u'CreateFolders', None, 3700),
-(u'DeleteServices', u'VersionNT', 2000),
-(u'DuplicateFiles', None, 4210),
-(u'FindRelatedProducts', None, 200),
-(u'InstallODBC', None, 5400),
-(u'InstallServices', u'VersionNT', 5800),
-(u'IsolateComponents', None, 950),
-(u'LaunchConditions', None, 100),
-(u'MigrateFeatureStates', None, 1200),
-(u'MoveFiles', None, 3800),
-(u'PatchFiles', None, 4090),
-(u'ProcessComponents', None, 1600),
-(u'RegisterComPlus', None, 5700),
-(u'RegisterFonts', None, 5300),
-(u'RegisterProduct', None, 6100),
-(u'RegisterTypeLibraries', None, 5500),
-(u'RegisterUser', None, 6000),
-(u'RemoveDuplicateFiles', None, 3400),
-(u'RemoveEnvironmentStrings', None, 3300),
-(u'RemoveExistingProducts', None, 6700),
-(u'RemoveFiles', None, 3500),
-(u'RemoveFolders', None, 3600),
-(u'RemoveIniValues', None, 3100),
-(u'RemoveODBC', None, 2400),
-(u'RemoveRegistryValues', None, 2600),
-(u'RemoveShortcuts', None, 3200),
-(u'RMCCPSearch', u'NOT Installed', 600),
-(u'SelfRegModules', None, 5600),
-(u'SelfUnregModules', None, 2200),
-(u'SetODBCFolders', None, 1100),
-(u'StartServices', u'VersionNT', 5900),
-(u'StopServices', u'VersionNT', 1900),
-(u'MsiUnpublishAssemblies', None, 1750),
-(u'UnpublishComponents', None, 1700),
-(u'UnpublishFeatures', None, 1800),
-(u'UnregisterClassInfo', None, 2700),
-(u'UnregisterComPlus', None, 2100),
-(u'UnregisterExtensionInfo', None, 2800),
-(u'UnregisterFonts', None, 2500),
-(u'UnregisterMIMEInfo', None, 3000),
-(u'UnregisterProgIdInfo', None, 2900),
-(u'UnregisterTypeLibraries', None, 2300),
-(u'ValidateProductID', None, 700),
-(u'WriteEnvironmentStrings', None, 5200),
-(u'WriteIniValues', None, 5100),
-(u'WriteRegistryValues', None, 5000),
-]
-
-InstallUISequence = [
-(u'FileCost', None, 900),
-(u'CostInitialize', None, 800),
-(u'CostFinalize', None, 1000),
-(u'ExecuteAction', None, 1300),
-(u'ExitDialog', None, -1),
-(u'FatalError', None, -3),
-(u'UserExit', None, -2),
-(u'AppSearch', None, 400),
-(u'CCPSearch', u'NOT Installed', 500),
-(u'FindRelatedProducts', None, 200),
-(u'IsolateComponents', None, 950),
-(u'LaunchConditions', None, 100),
-(u'MigrateFeatureStates', None, 1200),
-(u'RMCCPSearch', u'NOT Installed', 600),
-(u'ValidateProductID', None, 700),
-]
-
-tables=['AdminExecuteSequence', 'AdminUISequence', 'AdvtExecuteSequence', 'InstallExecuteSequence', 'InstallUISequence']
diff --git a/sys/lib/python/msilib/text.py b/sys/lib/python/msilib/text.py
deleted file mode 100644
index 12fd2d7c6..000000000
--- a/sys/lib/python/msilib/text.py
+++ /dev/null
@@ -1,129 +0,0 @@
-import msilib,os;dirname=os.path.dirname(__file__)
-
-ActionText = [
-(u'InstallValidate', u'Validating install', None),
-(u'InstallFiles', u'Copying new files', u'File: [1], Directory: [9], Size: [6]'),
-(u'InstallAdminPackage', u'Copying network install files', u'File: [1], Directory: [9], Size: [6]'),
-(u'FileCost', u'Computing space requirements', None),
-(u'CostInitialize', u'Computing space requirements', None),
-(u'CostFinalize', u'Computing space requirements', None),
-(u'CreateShortcuts', u'Creating shortcuts', u'Shortcut: [1]'),
-(u'PublishComponents', u'Publishing Qualified Components', u'Component ID: [1], Qualifier: [2]'),
-(u'PublishFeatures', u'Publishing Product Features', u'Feature: [1]'),
-(u'PublishProduct', u'Publishing product information', None),
-(u'RegisterClassInfo', u'Registering Class servers', u'Class Id: [1]'),
-(u'RegisterExtensionInfo', u'Registering extension servers', u'Extension: [1]'),
-(u'RegisterMIMEInfo', u'Registering MIME info', u'MIME Content Type: [1], Extension: [2]'),
-(u'RegisterProgIdInfo', u'Registering program identifiers', u'ProgId: [1]'),
-(u'AllocateRegistrySpace', u'Allocating registry space', u'Free space: [1]'),
-(u'AppSearch', u'Searching for installed applications', u'Property: [1], Signature: [2]'),
-(u'BindImage', u'Binding executables', u'File: [1]'),
-(u'CCPSearch', u'Searching for qualifying products', None),
-(u'CreateFolders', u'Creating folders', u'Folder: [1]'),
-(u'DeleteServices', u'Deleting services', u'Service: [1]'),
-(u'DuplicateFiles', u'Creating duplicate files', u'File: [1], Directory: [9], Size: [6]'),
-(u'FindRelatedProducts', u'Searching for related applications', u'Found application: [1]'),
-(u'InstallODBC', u'Installing ODBC components', None),
-(u'InstallServices', u'Installing new services', u'Service: [2]'),
-(u'LaunchConditions', u'Evaluating launch conditions', None),
-(u'MigrateFeatureStates', u'Migrating feature states from related applications', u'Application: [1]'),
-(u'MoveFiles', u'Moving files', u'File: [1], Directory: [9], Size: [6]'),
-(u'PatchFiles', u'Patching files', u'File: [1], Directory: [2], Size: [3]'),
-(u'ProcessComponents', u'Updating component registration', None),
-(u'RegisterComPlus', u'Registering COM+ Applications and Components', u'AppId: [1]{{, AppType: [2], Users: [3], RSN: [4]}}'),
-(u'RegisterFonts', u'Registering fonts', u'Font: [1]'),
-(u'RegisterProduct', u'Registering product', u'[1]'),
-(u'RegisterTypeLibraries', u'Registering type libraries', u'LibID: [1]'),
-(u'RegisterUser', u'Registering user', u'[1]'),
-(u'RemoveDuplicateFiles', u'Removing duplicated files', u'File: [1], Directory: [9]'),
-(u'RemoveEnvironmentStrings', u'Updating environment strings', u'Name: [1], Value: [2], Action [3]'),
-(u'RemoveExistingProducts', u'Removing applications', u'Application: [1], Command line: [2]'),
-(u'RemoveFiles', u'Removing files', u'File: [1], Directory: [9]'),
-(u'RemoveFolders', u'Removing folders', u'Folder: [1]'),
-(u'RemoveIniValues', u'Removing INI files entries', u'File: [1], Section: [2], Key: [3], Value: [4]'),
-(u'RemoveODBC', u'Removing ODBC components', None),
-(u'RemoveRegistryValues', u'Removing system registry values', u'Key: [1], Name: [2]'),
-(u'RemoveShortcuts', u'Removing shortcuts', u'Shortcut: [1]'),
-(u'RMCCPSearch', u'Searching for qualifying products', None),
-(u'SelfRegModules', u'Registering modules', u'File: [1], Folder: [2]'),
-(u'SelfUnregModules', u'Unregistering modules', u'File: [1], Folder: [2]'),
-(u'SetODBCFolders', u'Initializing ODBC directories', None),
-(u'StartServices', u'Starting services', u'Service: [1]'),
-(u'StopServices', u'Stopping services', u'Service: [1]'),
-(u'UnpublishComponents', u'Unpublishing Qualified Components', u'Component ID: [1], Qualifier: [2]'),
-(u'UnpublishFeatures', u'Unpublishing Product Features', u'Feature: [1]'),
-(u'UnregisterClassInfo', u'Unregister Class servers', u'Class Id: [1]'),
-(u'UnregisterComPlus', u'Unregistering COM+ Applications and Components', u'AppId: [1]{{, AppType: [2]}}'),
-(u'UnregisterExtensionInfo', u'Unregistering extension servers', u'Extension: [1]'),
-(u'UnregisterFonts', u'Unregistering fonts', u'Font: [1]'),
-(u'UnregisterMIMEInfo', u'Unregistering MIME info', u'MIME Content Type: [1], Extension: [2]'),
-(u'UnregisterProgIdInfo', u'Unregistering program identifiers', u'ProgId: [1]'),
-(u'UnregisterTypeLibraries', u'Unregistering type libraries', u'LibID: [1]'),
-(u'WriteEnvironmentStrings', u'Updating environment strings', u'Name: [1], Value: [2], Action [3]'),
-(u'WriteIniValues', u'Writing INI files values', u'File: [1], Section: [2], Key: [3], Value: [4]'),
-(u'WriteRegistryValues', u'Writing system registry values', u'Key: [1], Name: [2], Value: [3]'),
-(u'Advertise', u'Advertising application', None),
-(u'GenerateScript', u'Generating script operations for action:', u'[1]'),
-(u'InstallSFPCatalogFile', u'Installing system catalog', u'File: [1], Dependencies: [2]'),
-(u'MsiPublishAssemblies', u'Publishing assembly information', u'Application Context:[1], Assembly Name:[2]'),
-(u'MsiUnpublishAssemblies', u'Unpublishing assembly information', u'Application Context:[1], Assembly Name:[2]'),
-(u'Rollback', u'Rolling back action:', u'[1]'),
-(u'RollbackCleanup', u'Removing backup files', u'File: [1]'),
-(u'UnmoveFiles', u'Removing moved files', u'File: [1], Directory: [9]'),
-(u'UnpublishProduct', u'Unpublishing product information', None),
-]
-
-UIText = [
-(u'AbsentPath', None),
-(u'bytes', u'bytes'),
-(u'GB', u'GB'),
-(u'KB', u'KB'),
-(u'MB', u'MB'),
-(u'MenuAbsent', u'Entire feature will be unavailable'),
-(u'MenuAdvertise', u'Feature will be installed when required'),
-(u'MenuAllCD', u'Entire feature will be installed to run from CD'),
-(u'MenuAllLocal', u'Entire feature will be installed on local hard drive'),
-(u'MenuAllNetwork', u'Entire feature will be installed to run from network'),
-(u'MenuCD', u'Will be installed to run from CD'),
-(u'MenuLocal', u'Will be installed on local hard drive'),
-(u'MenuNetwork', u'Will be installed to run from network'),
-(u'ScriptInProgress', u'Gathering required information...'),
-(u'SelAbsentAbsent', u'This feature will remain uninstalled'),
-(u'SelAbsentAdvertise', u'This feature will be set to be installed when required'),
-(u'SelAbsentCD', u'This feature will be installed to run from CD'),
-(u'SelAbsentLocal', u'This feature will be installed on the local hard drive'),
-(u'SelAbsentNetwork', u'This feature will be installed to run from the network'),
-(u'SelAdvertiseAbsent', u'This feature will become unavailable'),
-(u'SelAdvertiseAdvertise', u'Will be installed when required'),
-(u'SelAdvertiseCD', u'This feature will be available to run from CD'),
-(u'SelAdvertiseLocal', u'This feature will be installed on your local hard drive'),
-(u'SelAdvertiseNetwork', u'This feature will be available to run from the network'),
-(u'SelCDAbsent', u"This feature will be uninstalled completely, you won't be able to run it from CD"),
-(u'SelCDAdvertise', u'This feature will change from run from CD state to set to be installed when required'),
-(u'SelCDCD', u'This feature will remain to be run from CD'),
-(u'SelCDLocal', u'This feature will change from run from CD state to be installed on the local hard drive'),
-(u'SelChildCostNeg', u'This feature frees up [1] on your hard drive.'),
-(u'SelChildCostPos', u'This feature requires [1] on your hard drive.'),
-(u'SelCostPending', u'Compiling cost for this feature...'),
-(u'SelLocalAbsent', u'This feature will be completely removed'),
-(u'SelLocalAdvertise', u'This feature will be removed from your local hard drive, but will be set to be installed when required'),
-(u'SelLocalCD', u'This feature will be removed from your local hard drive, but will be still available to run from CD'),
-(u'SelLocalLocal', u'This feature will remain on you local hard drive'),
-(u'SelLocalNetwork', u'This feature will be removed from your local hard drive, but will be still available to run from the network'),
-(u'SelNetworkAbsent', u"This feature will be uninstalled completely, you won't be able to run it from the network"),
-(u'SelNetworkAdvertise', u'This feature will change from run from network state to set to be installed when required'),
-(u'SelNetworkLocal', u'This feature will change from run from network state to be installed on the local hard drive'),
-(u'SelNetworkNetwork', u'This feature will remain to be run from the network'),
-(u'SelParentCostNegNeg', u'This feature frees up [1] on your hard drive. It has [2] of [3] subfeatures selected. The subfeatures free up [4] on your hard drive.'),
-(u'SelParentCostNegPos', u'This feature frees up [1] on your hard drive. It has [2] of [3] subfeatures selected. The subfeatures require [4] on your hard drive.'),
-(u'SelParentCostPosNeg', u'This feature requires [1] on your hard drive. It has [2] of [3] subfeatures selected. The subfeatures free up [4] on your hard drive.'),
-(u'SelParentCostPosPos', u'This feature requires [1] on your hard drive. It has [2] of [3] subfeatures selected. The subfeatures require [4] on your hard drive.'),
-(u'TimeRemaining', u'Time remaining: {[1] minutes }{[2] seconds}'),
-(u'VolumeCostAvailable', u'Available'),
-(u'VolumeCostDifference', u'Difference'),
-(u'VolumeCostRequired', u'Required'),
-(u'VolumeCostSize', u'Disk Size'),
-(u'VolumeCostVolume', u'Volume'),
-]
-
-tables=['ActionText', 'UIText']
diff --git a/sys/lib/python/multifile.py b/sys/lib/python/multifile.py
deleted file mode 100644
index e82a3fdea..000000000
--- a/sys/lib/python/multifile.py
+++ /dev/null
@@ -1,158 +0,0 @@
-"""A readline()-style interface to the parts of a multipart message.
-
-The MultiFile class makes each part of a multipart message "feel" like
-an ordinary file, as long as you use fp.readline(). Allows recursive
-use, for nested multipart messages. Probably best used together
-with module mimetools.
-
-Suggested use:
-
-real_fp = open(...)
-fp = MultiFile(real_fp)
-
-"read some lines from fp"
-fp.push(separator)
-while 1:
- "read lines from fp until it returns an empty string" (A)
- if not fp.next(): break
-fp.pop()
-"read remaining lines from fp until it returns an empty string"
-
-The latter sequence may be used recursively at (A).
-It is also allowed to use multiple push()...pop() sequences.
-
-If seekable is given as 0, the class code will not do the bookkeeping
-it normally attempts in order to make seeks relative to the beginning of the
-current file part. This may be useful when using MultiFile with a non-
-seekable stream object.
-"""
-
-__all__ = ["MultiFile","Error"]
-
-class Error(Exception):
- pass
-
-class MultiFile:
-
- seekable = 0
-
- def __init__(self, fp, seekable=1):
- self.fp = fp
- self.stack = []
- self.level = 0
- self.last = 0
- if seekable:
- self.seekable = 1
- self.start = self.fp.tell()
- self.posstack = []
-
- def tell(self):
- if self.level > 0:
- return self.lastpos
- return self.fp.tell() - self.start
-
- def seek(self, pos, whence=0):
- here = self.tell()
- if whence:
- if whence == 1:
- pos = pos + here
- elif whence == 2:
- if self.level > 0:
- pos = pos + self.lastpos
- else:
- raise Error, "can't use whence=2 yet"
- if not 0 <= pos <= here or \
- self.level > 0 and pos > self.lastpos:
- raise Error, 'bad MultiFile.seek() call'
- self.fp.seek(pos + self.start)
- self.level = 0
- self.last = 0
-
- def readline(self):
- if self.level > 0:
- return ''
- line = self.fp.readline()
- # Real EOF?
- if not line:
- self.level = len(self.stack)
- self.last = (self.level > 0)
- if self.last:
- raise Error, 'sudden EOF in MultiFile.readline()'
- return ''
- assert self.level == 0
- # Fast check to see if this is just data
- if self.is_data(line):
- return line
- else:
- # Ignore trailing whitespace on marker lines
- marker = line.rstrip()
- # No? OK, try to match a boundary.
- # Return the line (unstripped) if we don't.
- for i, sep in enumerate(reversed(self.stack)):
- if marker == self.section_divider(sep):
- self.last = 0
- break
- elif marker == self.end_marker(sep):
- self.last = 1
- break
- else:
- return line
- # We only get here if we see a section divider or EOM line
- if self.seekable:
- self.lastpos = self.tell() - len(line)
- self.level = i+1
- if self.level > 1:
- raise Error,'Missing endmarker in MultiFile.readline()'
- return ''
-
- def readlines(self):
- list = []
- while 1:
- line = self.readline()
- if not line: break
- list.append(line)
- return list
-
- def read(self): # Note: no size argument -- read until EOF only!
- return ''.join(self.readlines())
-
- def next(self):
- while self.readline(): pass
- if self.level > 1 or self.last:
- return 0
- self.level = 0
- self.last = 0
- if self.seekable:
- self.start = self.fp.tell()
- return 1
-
- def push(self, sep):
- if self.level > 0:
- raise Error, 'bad MultiFile.push() call'
- self.stack.append(sep)
- if self.seekable:
- self.posstack.append(self.start)
- self.start = self.fp.tell()
-
- def pop(self):
- if self.stack == []:
- raise Error, 'bad MultiFile.pop() call'
- if self.level <= 1:
- self.last = 0
- else:
- abslastpos = self.lastpos + self.start
- self.level = max(0, self.level - 1)
- self.stack.pop()
- if self.seekable:
- self.start = self.posstack.pop()
- if self.level > 0:
- self.lastpos = abslastpos - self.start
-
- def is_data(self, line):
- return line[:2] != '--'
-
- def section_divider(self, str):
- return "--" + str
-
- def end_marker(self, str):
- return "--" + str + "--"
diff --git a/sys/lib/python/mutex.py b/sys/lib/python/mutex.py
deleted file mode 100644
index 5d35bdf6a..000000000
--- a/sys/lib/python/mutex.py
+++ /dev/null
@@ -1,52 +0,0 @@
-"""Mutual exclusion -- for use with module sched
-
-A mutex has two pieces of state -- a 'locked' bit and a queue.
-When the mutex is not locked, the queue is empty.
-Otherwise, the queue contains 0 or more (function, argument) pairs
-representing functions (or methods) waiting to acquire the lock.
-When the mutex is unlocked while the queue is not empty,
-the first queue entry is removed and its function(argument) pair called,
-implying it now has the lock.
-
-Of course, no multi-threading is implied -- hence the funny interface
-for lock, where a function is called once the lock is aquired.
-"""
-
-from collections import deque
-
-class mutex:
- def __init__(self):
- """Create a new mutex -- initially unlocked."""
- self.locked = 0
- self.queue = deque()
-
- def test(self):
- """Test the locked bit of the mutex."""
- return self.locked
-
- def testandset(self):
- """Atomic test-and-set -- grab the lock if it is not set,
- return True if it succeeded."""
- if not self.locked:
- self.locked = 1
- return True
- else:
- return False
-
- def lock(self, function, argument):
- """Lock a mutex, call the function with supplied argument
- when it is acquired. If the mutex is already locked, place
- function and argument in the queue."""
- if self.testandset():
- function(argument)
- else:
- self.queue.append((function, argument))
-
- def unlock(self):
- """Unlock a mutex. If the queue is not empty, call the next
- function with its argument."""
- if self.queue:
- function, argument = self.queue.popleft()
- function(argument)
- else:
- self.locked = 0
diff --git a/sys/lib/python/netrc.py b/sys/lib/python/netrc.py
deleted file mode 100644
index 5493d77d3..000000000
--- a/sys/lib/python/netrc.py
+++ /dev/null
@@ -1,111 +0,0 @@
-"""An object-oriented interface to .netrc files."""
-
-# Module and documentation by Eric S. Raymond, 21 Dec 1998
-
-import os, shlex
-
-__all__ = ["netrc", "NetrcParseError"]
-
-
-class NetrcParseError(Exception):
- """Exception raised on syntax errors in the .netrc file."""
- def __init__(self, msg, filename=None, lineno=None):
- self.filename = filename
- self.lineno = lineno
- self.msg = msg
- Exception.__init__(self, msg)
-
- def __str__(self):
- return "%s (%s, line %s)" % (self.msg, self.filename, self.lineno)
-
-
-class netrc:
- def __init__(self, file=None):
- if file is None:
- try:
- file = os.path.join(os.environ['HOME'], ".netrc")
- except KeyError:
- raise IOError("Could not find .netrc: $HOME is not set")
- fp = open(file)
- self.hosts = {}
- self.macros = {}
- lexer = shlex.shlex(fp)
- lexer.wordchars += r"""!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~"""
- while 1:
- # Look for a machine, default, or macdef top-level keyword
- toplevel = tt = lexer.get_token()
- if not tt:
- break
- elif tt == 'machine':
- entryname = lexer.get_token()
- elif tt == 'default':
- entryname = 'default'
- elif tt == 'macdef': # Just skip to end of macdefs
- entryname = lexer.get_token()
- self.macros[entryname] = []
- lexer.whitespace = ' \t'
- while 1:
- line = lexer.instream.readline()
- if not line or line == '\012':
- lexer.whitespace = ' \t\r\n'
- break
- self.macros[entryname].append(line)
- continue
- else:
- raise NetrcParseError(
- "bad toplevel token %r" % tt, file, lexer.lineno)
-
- # We're looking at start of an entry for a named machine or default.
- login = ''
- account = password = None
- self.hosts[entryname] = {}
- while 1:
- tt = lexer.get_token()
- if (tt=='' or tt == 'machine' or
- tt == 'default' or tt =='macdef'):
- if password:
- self.hosts[entryname] = (login, account, password)
- lexer.push_token(tt)
- break
- else:
- raise NetrcParseError(
- "malformed %s entry %s terminated by %s"
- % (toplevel, entryname, repr(tt)),
- file, lexer.lineno)
- elif tt == 'login' or tt == 'user':
- login = lexer.get_token()
- elif tt == 'account':
- account = lexer.get_token()
- elif tt == 'password':
- password = lexer.get_token()
- else:
- raise NetrcParseError("bad follower token %r" % tt,
- file, lexer.lineno)
-
- def authenticators(self, host):
- """Return a (user, account, password) tuple for given host."""
- if host in self.hosts:
- return self.hosts[host]
- elif 'default' in self.hosts:
- return self.hosts['default']
- else:
- return None
-
- def __repr__(self):
- """Dump the class data in the format of a .netrc file."""
- rep = ""
- for host in self.hosts.keys():
- attrs = self.hosts[host]
- rep = rep + "machine "+ host + "\n\tlogin " + repr(attrs[0]) + "\n"
- if attrs[1]:
- rep = rep + "account " + repr(attrs[1])
- rep = rep + "\tpassword " + repr(attrs[2]) + "\n"
- for macro in self.macros.keys():
- rep = rep + "macdef " + macro + "\n"
- for line in self.macros[macro]:
- rep = rep + line
- rep = rep + "\n"
- return rep
-
-if __name__ == '__main__':
- print netrc()
diff --git a/sys/lib/python/new.py b/sys/lib/python/new.py
deleted file mode 100644
index 99a1c3fdc..000000000
--- a/sys/lib/python/new.py
+++ /dev/null
@@ -1,17 +0,0 @@
-"""Create new objects of various types. Deprecated.
-
-This module is no longer required except for backward compatibility.
-Objects of most types can now be created by calling the type object.
-"""
-
-from types import ClassType as classobj
-from types import FunctionType as function
-from types import InstanceType as instance
-from types import MethodType as instancemethod
-from types import ModuleType as module
-
-# CodeType is not accessible in restricted execution mode
-try:
- from types import CodeType as code
-except ImportError:
- pass
diff --git a/sys/lib/python/nntplib.py b/sys/lib/python/nntplib.py
deleted file mode 100644
index cc51d1da8..000000000
--- a/sys/lib/python/nntplib.py
+++ /dev/null
@@ -1,628 +0,0 @@
-"""An NNTP client class based on RFC 977: Network News Transfer Protocol.
-
-Example:
-
->>> from nntplib import NNTP
->>> s = NNTP('news')
->>> resp, count, first, last, name = s.group('comp.lang.python')
->>> print 'Group', name, 'has', count, 'articles, range', first, 'to', last
-Group comp.lang.python has 51 articles, range 5770 to 5821
->>> resp, subs = s.xhdr('subject', first + '-' + last)
->>> resp = s.quit()
->>>
-
-Here 'resp' is the server response line.
-Error responses are turned into exceptions.
-
-To post an article from a file:
->>> f = open(filename, 'r') # file containing article, including header
->>> resp = s.post(f)
->>>
-
-For descriptions of all methods, read the comments in the code below.
-Note that all arguments and return values representing article numbers
-are strings, not numbers, since they are rarely used for calculations.
-"""
-
-# RFC 977 by Brian Kantor and Phil Lapsley.
-# xover, xgtitle, xpath, date methods by Kevan Heydon
-
-
-# Imports
-import re
-import socket
-
-__all__ = ["NNTP","NNTPReplyError","NNTPTemporaryError",
- "NNTPPermanentError","NNTPProtocolError","NNTPDataError",
- "error_reply","error_temp","error_perm","error_proto",
- "error_data",]
-
-# Exceptions raised when an error or invalid response is received
-class NNTPError(Exception):
- """Base class for all nntplib exceptions"""
- def __init__(self, *args):
- Exception.__init__(self, *args)
- try:
- self.response = args[0]
- except IndexError:
- self.response = 'No response given'
-
-class NNTPReplyError(NNTPError):
- """Unexpected [123]xx reply"""
- pass
-
-class NNTPTemporaryError(NNTPError):
- """4xx errors"""
- pass
-
-class NNTPPermanentError(NNTPError):
- """5xx errors"""
- pass
-
-class NNTPProtocolError(NNTPError):
- """Response does not begin with [1-5]"""
- pass
-
-class NNTPDataError(NNTPError):
- """Error in response data"""
- pass
-
-# for backwards compatibility
-error_reply = NNTPReplyError
-error_temp = NNTPTemporaryError
-error_perm = NNTPPermanentError
-error_proto = NNTPProtocolError
-error_data = NNTPDataError
-
-
-
-# Standard port used by NNTP servers
-NNTP_PORT = 119
-
-
-# Response numbers that are followed by additional text (e.g. article)
-LONGRESP = ['100', '215', '220', '221', '222', '224', '230', '231', '282']
-
-
-# Line terminators (we always output CRLF, but accept any of CRLF, CR, LF)
-CRLF = '\r\n'
-
-
-
-# The class itself
-class NNTP:
- def __init__(self, host, port=NNTP_PORT, user=None, password=None,
- readermode=None, usenetrc=True):
- """Initialize an instance. Arguments:
- - host: hostname to connect to
- - port: port to connect to (default the standard NNTP port)
- - user: username to authenticate with
- - password: password to use with username
- - readermode: if true, send 'mode reader' command after
- connecting.
-
- readermode is sometimes necessary if you are connecting to an
- NNTP server on the local machine and intend to call
- reader-specific comamnds, such as `group'. If you get
- unexpected NNTPPermanentErrors, you might need to set
- readermode.
- """
- self.host = host
- self.port = port
- self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- self.sock.connect((self.host, self.port))
- self.file = self.sock.makefile('rb')
- self.debugging = 0
- self.welcome = self.getresp()
-
- # 'mode reader' is sometimes necessary to enable 'reader' mode.
- # However, the order in which 'mode reader' and 'authinfo' need to
- # arrive differs between some NNTP servers. Try to send
- # 'mode reader', and if it fails with an authorization failed
- # error, try again after sending authinfo.
- readermode_afterauth = 0
- if readermode:
- try:
- self.welcome = self.shortcmd('mode reader')
- except NNTPPermanentError:
- # error 500, probably 'not implemented'
- pass
- except NNTPTemporaryError, e:
- if user and e.response[:3] == '480':
- # Need authorization before 'mode reader'
- readermode_afterauth = 1
- else:
- raise
- # If no login/password was specified, try to get them from ~/.netrc
- # Presume that if .netc has an entry, NNRP authentication is required.
- try:
- if usenetrc and not user:
- import netrc
- credentials = netrc.netrc()
- auth = credentials.authenticators(host)
- if auth:
- user = auth[0]
- password = auth[2]
- except IOError:
- pass
- # Perform NNRP authentication if needed.
- if user:
- resp = self.shortcmd('authinfo user '+user)
- if resp[:3] == '381':
- if not password:
- raise NNTPReplyError(resp)
- else:
- resp = self.shortcmd(
- 'authinfo pass '+password)
- if resp[:3] != '281':
- raise NNTPPermanentError(resp)
- if readermode_afterauth:
- try:
- self.welcome = self.shortcmd('mode reader')
- except NNTPPermanentError:
- # error 500, probably 'not implemented'
- pass
-
-
- # Get the welcome message from the server
- # (this is read and squirreled away by __init__()).
- # If the response code is 200, posting is allowed;
- # if it 201, posting is not allowed
-
- def getwelcome(self):
- """Get the welcome message from the server
- (this is read and squirreled away by __init__()).
- If the response code is 200, posting is allowed;
- if it 201, posting is not allowed."""
-
- if self.debugging: print '*welcome*', repr(self.welcome)
- return self.welcome
-
- def set_debuglevel(self, level):
- """Set the debugging level. Argument 'level' means:
- 0: no debugging output (default)
- 1: print commands and responses but not body text etc.
- 2: also print raw lines read and sent before stripping CR/LF"""
-
- self.debugging = level
- debug = set_debuglevel
-
- def putline(self, line):
- """Internal: send one line to the server, appending CRLF."""
- line = line + CRLF
- if self.debugging > 1: print '*put*', repr(line)
- self.sock.sendall(line)
-
- def putcmd(self, line):
- """Internal: send one command to the server (through putline())."""
- if self.debugging: print '*cmd*', repr(line)
- self.putline(line)
-
- def getline(self):
- """Internal: return one line from the server, stripping CRLF.
- Raise EOFError if the connection is closed."""
- line = self.file.readline()
- if self.debugging > 1:
- print '*get*', repr(line)
- if not line: raise EOFError
- if line[-2:] == CRLF: line = line[:-2]
- elif line[-1:] in CRLF: line = line[:-1]
- return line
-
- def getresp(self):
- """Internal: get a response from the server.
- Raise various errors if the response indicates an error."""
- resp = self.getline()
- if self.debugging: print '*resp*', repr(resp)
- c = resp[:1]
- if c == '4':
- raise NNTPTemporaryError(resp)
- if c == '5':
- raise NNTPPermanentError(resp)
- if c not in '123':
- raise NNTPProtocolError(resp)
- return resp
-
- def getlongresp(self, file=None):
- """Internal: get a response plus following text from the server.
- Raise various errors if the response indicates an error."""
-
- openedFile = None
- try:
- # If a string was passed then open a file with that name
- if isinstance(file, str):
- openedFile = file = open(file, "w")
-
- resp = self.getresp()
- if resp[:3] not in LONGRESP:
- raise NNTPReplyError(resp)
- list = []
- while 1:
- line = self.getline()
- if line == '.':
- break
- if line[:2] == '..':
- line = line[1:]
- if file:
- file.write(line + "\n")
- else:
- list.append(line)
- finally:
- # If this method created the file, then it must close it
- if openedFile:
- openedFile.close()
-
- return resp, list
-
- def shortcmd(self, line):
- """Internal: send a command and get the response."""
- self.putcmd(line)
- return self.getresp()
-
- def longcmd(self, line, file=None):
- """Internal: send a command and get the response plus following text."""
- self.putcmd(line)
- return self.getlongresp(file)
-
- def newgroups(self, date, time, file=None):
- """Process a NEWGROUPS command. Arguments:
- - date: string 'yymmdd' indicating the date
- - time: string 'hhmmss' indicating the time
- Return:
- - resp: server response if successful
- - list: list of newsgroup names"""
-
- return self.longcmd('NEWGROUPS ' + date + ' ' + time, file)
-
- def newnews(self, group, date, time, file=None):
- """Process a NEWNEWS command. Arguments:
- - group: group name or '*'
- - date: string 'yymmdd' indicating the date
- - time: string 'hhmmss' indicating the time
- Return:
- - resp: server response if successful
- - list: list of message ids"""
-
- cmd = 'NEWNEWS ' + group + ' ' + date + ' ' + time
- return self.longcmd(cmd, file)
-
- def list(self, file=None):
- """Process a LIST command. Return:
- - resp: server response if successful
- - list: list of (group, last, first, flag) (strings)"""
-
- resp, list = self.longcmd('LIST', file)
- for i in range(len(list)):
- # Parse lines into "group last first flag"
- list[i] = tuple(list[i].split())
- return resp, list
-
- def description(self, group):
-
- """Get a description for a single group. If more than one
- group matches ('group' is a pattern), return the first. If no
- group matches, return an empty string.
-
- This elides the response code from the server, since it can
- only be '215' or '285' (for xgtitle) anyway. If the response
- code is needed, use the 'descriptions' method.
-
- NOTE: This neither checks for a wildcard in 'group' nor does
- it check whether the group actually exists."""
-
- resp, lines = self.descriptions(group)
- if len(lines) == 0:
- return ""
- else:
- return lines[0][1]
-
- def descriptions(self, group_pattern):
- """Get descriptions for a range of groups."""
- line_pat = re.compile("^(?P<group>[^ \t]+)[ \t]+(.*)$")
- # Try the more std (acc. to RFC2980) LIST NEWSGROUPS first
- resp, raw_lines = self.longcmd('LIST NEWSGROUPS ' + group_pattern)
- if resp[:3] != "215":
- # Now the deprecated XGTITLE. This either raises an error
- # or succeeds with the same output structure as LIST
- # NEWSGROUPS.
- resp, raw_lines = self.longcmd('XGTITLE ' + group_pattern)
- lines = []
- for raw_line in raw_lines:
- match = line_pat.search(raw_line.strip())
- if match:
- lines.append(match.group(1, 2))
- return resp, lines
-
- def group(self, name):
- """Process a GROUP command. Argument:
- - group: the group name
- Returns:
- - resp: server response if successful
- - count: number of articles (string)
- - first: first article number (string)
- - last: last article number (string)
- - name: the group name"""
-
- resp = self.shortcmd('GROUP ' + name)
- if resp[:3] != '211':
- raise NNTPReplyError(resp)
- words = resp.split()
- count = first = last = 0
- n = len(words)
- if n > 1:
- count = words[1]
- if n > 2:
- first = words[2]
- if n > 3:
- last = words[3]
- if n > 4:
- name = words[4].lower()
- return resp, count, first, last, name
-
- def help(self, file=None):
- """Process a HELP command. Returns:
- - resp: server response if successful
- - list: list of strings"""
-
- return self.longcmd('HELP',file)
-
- def statparse(self, resp):
- """Internal: parse the response of a STAT, NEXT or LAST command."""
- if resp[:2] != '22':
- raise NNTPReplyError(resp)
- words = resp.split()
- nr = 0
- id = ''
- n = len(words)
- if n > 1:
- nr = words[1]
- if n > 2:
- id = words[2]
- return resp, nr, id
-
- def statcmd(self, line):
- """Internal: process a STAT, NEXT or LAST command."""
- resp = self.shortcmd(line)
- return self.statparse(resp)
-
- def stat(self, id):
- """Process a STAT command. Argument:
- - id: article number or message id
- Returns:
- - resp: server response if successful
- - nr: the article number
- - id: the message id"""
-
- return self.statcmd('STAT ' + id)
-
- def next(self):
- """Process a NEXT command. No arguments. Return as for STAT."""
- return self.statcmd('NEXT')
-
- def last(self):
- """Process a LAST command. No arguments. Return as for STAT."""
- return self.statcmd('LAST')
-
- def artcmd(self, line, file=None):
- """Internal: process a HEAD, BODY or ARTICLE command."""
- resp, list = self.longcmd(line, file)
- resp, nr, id = self.statparse(resp)
- return resp, nr, id, list
-
- def head(self, id):
- """Process a HEAD command. Argument:
- - id: article number or message id
- Returns:
- - resp: server response if successful
- - nr: article number
- - id: message id
- - list: the lines of the article's header"""
-
- return self.artcmd('HEAD ' + id)
-
- def body(self, id, file=None):
- """Process a BODY command. Argument:
- - id: article number or message id
- - file: Filename string or file object to store the article in
- Returns:
- - resp: server response if successful
- - nr: article number
- - id: message id
- - list: the lines of the article's body or an empty list
- if file was used"""
-
- return self.artcmd('BODY ' + id, file)
-
- def article(self, id):
- """Process an ARTICLE command. Argument:
- - id: article number or message id
- Returns:
- - resp: server response if successful
- - nr: article number
- - id: message id
- - list: the lines of the article"""
-
- return self.artcmd('ARTICLE ' + id)
-
- def slave(self):
- """Process a SLAVE command. Returns:
- - resp: server response if successful"""
-
- return self.shortcmd('SLAVE')
-
- def xhdr(self, hdr, str, file=None):
- """Process an XHDR command (optional server extension). Arguments:
- - hdr: the header type (e.g. 'subject')
- - str: an article nr, a message id, or a range nr1-nr2
- Returns:
- - resp: server response if successful
- - list: list of (nr, value) strings"""
-
- pat = re.compile('^([0-9]+) ?(.*)\n?')
- resp, lines = self.longcmd('XHDR ' + hdr + ' ' + str, file)
- for i in range(len(lines)):
- line = lines[i]
- m = pat.match(line)
- if m:
- lines[i] = m.group(1, 2)
- return resp, lines
-
- def xover(self, start, end, file=None):
- """Process an XOVER command (optional server extension) Arguments:
- - start: start of range
- - end: end of range
- Returns:
- - resp: server response if successful
- - list: list of (art-nr, subject, poster, date,
- id, references, size, lines)"""
-
- resp, lines = self.longcmd('XOVER ' + start + '-' + end, file)
- xover_lines = []
- for line in lines:
- elem = line.split("\t")
- try:
- xover_lines.append((elem[0],
- elem[1],
- elem[2],
- elem[3],
- elem[4],
- elem[5].split(),
- elem[6],
- elem[7]))
- except IndexError:
- raise NNTPDataError(line)
- return resp,xover_lines
-
- def xgtitle(self, group, file=None):
- """Process an XGTITLE command (optional server extension) Arguments:
- - group: group name wildcard (i.e. news.*)
- Returns:
- - resp: server response if successful
- - list: list of (name,title) strings"""
-
- line_pat = re.compile("^([^ \t]+)[ \t]+(.*)$")
- resp, raw_lines = self.longcmd('XGTITLE ' + group, file)
- lines = []
- for raw_line in raw_lines:
- match = line_pat.search(raw_line.strip())
- if match:
- lines.append(match.group(1, 2))
- return resp, lines
-
- def xpath(self,id):
- """Process an XPATH command (optional server extension) Arguments:
- - id: Message id of article
- Returns:
- resp: server response if successful
- path: directory path to article"""
-
- resp = self.shortcmd("XPATH " + id)
- if resp[:3] != '223':
- raise NNTPReplyError(resp)
- try:
- [resp_num, path] = resp.split()
- except ValueError:
- raise NNTPReplyError(resp)
- else:
- return resp, path
-
- def date (self):
- """Process the DATE command. Arguments:
- None
- Returns:
- resp: server response if successful
- date: Date suitable for newnews/newgroups commands etc.
- time: Time suitable for newnews/newgroups commands etc."""
-
- resp = self.shortcmd("DATE")
- if resp[:3] != '111':
- raise NNTPReplyError(resp)
- elem = resp.split()
- if len(elem) != 2:
- raise NNTPDataError(resp)
- date = elem[1][2:8]
- time = elem[1][-6:]
- if len(date) != 6 or len(time) != 6:
- raise NNTPDataError(resp)
- return resp, date, time
-
-
- def post(self, f):
- """Process a POST command. Arguments:
- - f: file containing the article
- Returns:
- - resp: server response if successful"""
-
- resp = self.shortcmd('POST')
- # Raises error_??? if posting is not allowed
- if resp[0] != '3':
- raise NNTPReplyError(resp)
- while 1:
- line = f.readline()
- if not line:
- break
- if line[-1] == '\n':
- line = line[:-1]
- if line[:1] == '.':
- line = '.' + line
- self.putline(line)
- self.putline('.')
- return self.getresp()
-
- def ihave(self, id, f):
- """Process an IHAVE command. Arguments:
- - id: message-id of the article
- - f: file containing the article
- Returns:
- - resp: server response if successful
- Note that if the server refuses the article an exception is raised."""
-
- resp = self.shortcmd('IHAVE ' + id)
- # Raises error_??? if the server already has it
- if resp[0] != '3':
- raise NNTPReplyError(resp)
- while 1:
- line = f.readline()
- if not line:
- break
- if line[-1] == '\n':
- line = line[:-1]
- if line[:1] == '.':
- line = '.' + line
- self.putline(line)
- self.putline('.')
- return self.getresp()
-
- def quit(self):
- """Process a QUIT command and close the socket. Returns:
- - resp: server response if successful"""
-
- resp = self.shortcmd('QUIT')
- self.file.close()
- self.sock.close()
- del self.file, self.sock
- return resp
-
-
-# Test retrieval when run as a script.
-# Assumption: if there's a local news server, it's called 'news'.
-# Assumption: if user queries a remote news server, it's named
-# in the environment variable NNTPSERVER (used by slrn and kin)
-# and we want readermode off.
-if __name__ == '__main__':
- import os
- newshost = 'news' and os.environ["NNTPSERVER"]
- if newshost.find('.') == -1:
- mode = 'readermode'
- else:
- mode = None
- s = NNTP(newshost, readermode=mode)
- resp, count, first, last, name = s.group('comp.lang.python')
- print resp
- print 'Group', name, 'has', count, 'articles, range', first, 'to', last
- resp, subs = s.xhdr('subject', first + '-' + last)
- print resp
- for item in subs:
- print "%7s %s" % item
- resp = s.quit()
- print resp
diff --git a/sys/lib/python/ntpath.py b/sys/lib/python/ntpath.py
deleted file mode 100644
index 7a79b5322..000000000
--- a/sys/lib/python/ntpath.py
+++ /dev/null
@@ -1,511 +0,0 @@
-# Module 'ntpath' -- common operations on WinNT/Win95 pathnames
-"""Common pathname manipulations, WindowsNT/95 version.
-
-Instead of importing this module directly, import os and refer to this
-module as os.path.
-"""
-
-import os
-import stat
-import sys
-
-__all__ = ["normcase","isabs","join","splitdrive","split","splitext",
- "basename","dirname","commonprefix","getsize","getmtime",
- "getatime","getctime", "islink","exists","lexists","isdir","isfile",
- "ismount","walk","expanduser","expandvars","normpath","abspath",
- "splitunc","curdir","pardir","sep","pathsep","defpath","altsep",
- "extsep","devnull","realpath","supports_unicode_filenames"]
-
-# strings representing various path-related bits and pieces
-curdir = '.'
-pardir = '..'
-extsep = '.'
-sep = '\\'
-pathsep = ';'
-altsep = '/'
-defpath = '.;C:\\bin'
-if 'ce' in sys.builtin_module_names:
- defpath = '\\Windows'
-elif 'os2' in sys.builtin_module_names:
- # OS/2 w/ VACPP
- altsep = '/'
-devnull = 'nul'
-
-# Normalize the case of a pathname and map slashes to backslashes.
-# Other normalizations (such as optimizing '../' away) are not done
-# (this is done by normpath).
-
-def normcase(s):
- """Normalize case of pathname.
-
- Makes all characters lowercase and all slashes into backslashes."""
- return s.replace("/", "\\").lower()
-
-
-# Return whether a path is absolute.
-# Trivial in Posix, harder on the Mac or MS-DOS.
-# For DOS it is absolute if it starts with a slash or backslash (current
-# volume), or if a pathname after the volume letter and colon / UNC resource
-# starts with a slash or backslash.
-
-def isabs(s):
- """Test whether a path is absolute"""
- s = splitdrive(s)[1]
- return s != '' and s[:1] in '/\\'
-
-
-# Join two (or more) paths.
-
-def join(a, *p):
- """Join two or more pathname components, inserting "\\" as needed"""
- path = a
- for b in p:
- b_wins = 0 # set to 1 iff b makes path irrelevant
- if path == "":
- b_wins = 1
-
- elif isabs(b):
- # This probably wipes out path so far. However, it's more
- # complicated if path begins with a drive letter:
- # 1. join('c:', '/a') == 'c:/a'
- # 2. join('c:/', '/a') == 'c:/a'
- # But
- # 3. join('c:/a', '/b') == '/b'
- # 4. join('c:', 'd:/') = 'd:/'
- # 5. join('c:/', 'd:/') = 'd:/'
- if path[1:2] != ":" or b[1:2] == ":":
- # Path doesn't start with a drive letter, or cases 4 and 5.
- b_wins = 1
-
- # Else path has a drive letter, and b doesn't but is absolute.
- elif len(path) > 3 or (len(path) == 3 and
- path[-1] not in "/\\"):
- # case 3
- b_wins = 1
-
- if b_wins:
- path = b
- else:
- # Join, and ensure there's a separator.
- assert len(path) > 0
- if path[-1] in "/\\":
- if b and b[0] in "/\\":
- path += b[1:]
- else:
- path += b
- elif path[-1] == ":":
- path += b
- elif b:
- if b[0] in "/\\":
- path += b
- else:
- path += "\\" + b
- else:
- # path is not empty and does not end with a backslash,
- # but b is empty; since, e.g., split('a/') produces
- # ('a', ''), it's best if join() adds a backslash in
- # this case.
- path += '\\'
-
- return path
-
-
-# Split a path in a drive specification (a drive letter followed by a
-# colon) and the path specification.
-# It is always true that drivespec + pathspec == p
-def splitdrive(p):
- """Split a pathname into drive and path specifiers. Returns a 2-tuple
-"(drive,path)"; either part may be empty"""
- if p[1:2] == ':':
- return p[0:2], p[2:]
- return '', p
-
-
-# Parse UNC paths
-def splitunc(p):
- """Split a pathname into UNC mount point and relative path specifiers.
-
- Return a 2-tuple (unc, rest); either part may be empty.
- If unc is not empty, it has the form '//host/mount' (or similar
- using backslashes). unc+rest is always the input path.
- Paths containing drive letters never have an UNC part.
- """
- if p[1:2] == ':':
- return '', p # Drive letter present
- firstTwo = p[0:2]
- if firstTwo == '//' or firstTwo == '\\\\':
- # is a UNC path:
- # vvvvvvvvvvvvvvvvvvvv equivalent to drive letter
- # \\machine\mountpoint\directories...
- # directory ^^^^^^^^^^^^^^^
- normp = normcase(p)
- index = normp.find('\\', 2)
- if index == -1:
- ##raise RuntimeError, 'illegal UNC path: "' + p + '"'
- return ("", p)
- index = normp.find('\\', index + 1)
- if index == -1:
- index = len(p)
- return p[:index], p[index:]
- return '', p
-
-
-# Split a path in head (everything up to the last '/') and tail (the
-# rest). After the trailing '/' is stripped, the invariant
-# join(head, tail) == p holds.
-# The resulting head won't end in '/' unless it is the root.
-
-def split(p):
- """Split a pathname.
-
- Return tuple (head, tail) where tail is everything after the final slash.
- Either part may be empty."""
-
- d, p = splitdrive(p)
- # set i to index beyond p's last slash
- i = len(p)
- while i and p[i-1] not in '/\\':
- i = i - 1
- head, tail = p[:i], p[i:] # now tail has no slashes
- # remove trailing slashes from head, unless it's all slashes
- head2 = head
- while head2 and head2[-1] in '/\\':
- head2 = head2[:-1]
- head = head2 or head
- return d + head, tail
-
-
-# Split a path in root and extension.
-# The extension is everything starting at the last dot in the last
-# pathname component; the root is everything before that.
-# It is always true that root + ext == p.
-
-def splitext(p):
- """Split the extension from a pathname.
-
- Extension is everything from the last dot to the end.
- Return (root, ext), either part may be empty."""
-
- i = p.rfind('.')
- if i<=max(p.rfind('/'), p.rfind('\\')):
- return p, ''
- else:
- return p[:i], p[i:]
-
-
-# Return the tail (basename) part of a path.
-
-def basename(p):
- """Returns the final component of a pathname"""
- return split(p)[1]
-
-
-# Return the head (dirname) part of a path.
-
-def dirname(p):
- """Returns the directory component of a pathname"""
- return split(p)[0]
-
-
-# Return the longest prefix of all list elements.
-
-def commonprefix(m):
- "Given a list of pathnames, returns the longest common leading component"
- if not m: return ''
- s1 = min(m)
- s2 = max(m)
- n = min(len(s1), len(s2))
- for i in xrange(n):
- if s1[i] != s2[i]:
- return s1[:i]
- return s1[:n]
-
-
-# Get size, mtime, atime of files.
-
-def getsize(filename):
- """Return the size of a file, reported by os.stat()"""
- return os.stat(filename).st_size
-
-def getmtime(filename):
- """Return the last modification time of a file, reported by os.stat()"""
- return os.stat(filename).st_mtime
-
-def getatime(filename):
- """Return the last access time of a file, reported by os.stat()"""
- return os.stat(filename).st_atime
-
-def getctime(filename):
- """Return the creation time of a file, reported by os.stat()."""
- return os.stat(filename).st_ctime
-
-# Is a path a symbolic link?
-# This will always return false on systems where posix.lstat doesn't exist.
-
-def islink(path):
- """Test for symbolic link. On WindowsNT/95 always returns false"""
- return False
-
-
-# Does a path exist?
-
-def exists(path):
- """Test whether a path exists"""
- try:
- st = os.stat(path)
- except os.error:
- return False
- return True
-
-lexists = exists
-
-
-# Is a path a dos directory?
-# This follows symbolic links, so both islink() and isdir() can be true
-# for the same path.
-
-def isdir(path):
- """Test whether a path is a directory"""
- try:
- st = os.stat(path)
- except os.error:
- return False
- return stat.S_ISDIR(st.st_mode)
-
-
-# Is a path a regular file?
-# This follows symbolic links, so both islink() and isdir() can be true
-# for the same path.
-
-def isfile(path):
- """Test whether a path is a regular file"""
- try:
- st = os.stat(path)
- except os.error:
- return False
- return stat.S_ISREG(st.st_mode)
-
-
-# Is a path a mount point? Either a root (with or without drive letter)
-# or an UNC path with at most a / or \ after the mount point.
-
-def ismount(path):
- """Test whether a path is a mount point (defined as root of drive)"""
- unc, rest = splitunc(path)
- if unc:
- return rest in ("", "/", "\\")
- p = splitdrive(path)[1]
- return len(p) == 1 and p[0] in '/\\'
-
-
-# Directory tree walk.
-# For each directory under top (including top itself, but excluding
-# '.' and '..'), func(arg, dirname, filenames) is called, where
-# dirname is the name of the directory and filenames is the list
-# of files (and subdirectories etc.) in the directory.
-# The func may modify the filenames list, to implement a filter,
-# or to impose a different order of visiting.
-
-def walk(top, func, arg):
- """Directory tree walk with callback function.
-
- For each directory in the directory tree rooted at top (including top
- itself, but excluding '.' and '..'), call func(arg, dirname, fnames).
- dirname is the name of the directory, and fnames a list of the names of
- the files and subdirectories in dirname (excluding '.' and '..'). func
- may modify the fnames list in-place (e.g. via del or slice assignment),
- and walk will only recurse into the subdirectories whose names remain in
- fnames; this can be used to implement a filter, or to impose a specific
- order of visiting. No semantics are defined for, or required of, arg,
- beyond that arg is always passed to func. It can be used, e.g., to pass
- a filename pattern, or a mutable object designed to accumulate
- statistics. Passing None for arg is common."""
-
- try:
- names = os.listdir(top)
- except os.error:
- return
- func(arg, top, names)
- exceptions = ('.', '..')
- for name in names:
- if name not in exceptions:
- name = join(top, name)
- if isdir(name):
- walk(name, func, arg)
-
-
-# Expand paths beginning with '~' or '~user'.
-# '~' means $HOME; '~user' means that user's home directory.
-# If the path doesn't begin with '~', or if the user or $HOME is unknown,
-# the path is returned unchanged (leaving error reporting to whatever
-# function is called with the expanded path as argument).
-# See also module 'glob' for expansion of *, ? and [...] in pathnames.
-# (A function should also be defined to do full *sh-style environment
-# variable expansion.)
-
-def expanduser(path):
- """Expand ~ and ~user constructs.
-
- If user or $HOME is unknown, do nothing."""
- if path[:1] != '~':
- return path
- i, n = 1, len(path)
- while i < n and path[i] not in '/\\':
- i = i + 1
- if i == 1:
- if 'HOME' in os.environ:
- userhome = os.environ['HOME']
- elif not 'HOMEPATH' in os.environ:
- return path
- else:
- try:
- drive = os.environ['HOMEDRIVE']
- except KeyError:
- drive = ''
- userhome = join(drive, os.environ['HOMEPATH'])
- else:
- return path
- return userhome + path[i:]
-
-
-# Expand paths containing shell variable substitutions.
-# The following rules apply:
-# - no expansion within single quotes
-# - no escape character, except for '$$' which is translated into '$'
-# - ${varname} is accepted.
-# - varnames can be made out of letters, digits and the character '_'
-# XXX With COMMAND.COM you can use any characters in a variable name,
-# XXX except '^|<>='.
-
-def expandvars(path):
- """Expand shell variables of form $var and ${var}.
-
- Unknown variables are left unchanged."""
- if '$' not in path:
- return path
- import string
- varchars = string.ascii_letters + string.digits + '_-'
- res = ''
- index = 0
- pathlen = len(path)
- while index < pathlen:
- c = path[index]
- if c == '\'': # no expansion within single quotes
- path = path[index + 1:]
- pathlen = len(path)
- try:
- index = path.index('\'')
- res = res + '\'' + path[:index + 1]
- except ValueError:
- res = res + path
- index = pathlen - 1
- elif c == '$': # variable or '$$'
- if path[index + 1:index + 2] == '$':
- res = res + c
- index = index + 1
- elif path[index + 1:index + 2] == '{':
- path = path[index+2:]
- pathlen = len(path)
- try:
- index = path.index('}')
- var = path[:index]
- if var in os.environ:
- res = res + os.environ[var]
- except ValueError:
- res = res + path
- index = pathlen - 1
- else:
- var = ''
- index = index + 1
- c = path[index:index + 1]
- while c != '' and c in varchars:
- var = var + c
- index = index + 1
- c = path[index:index + 1]
- if var in os.environ:
- res = res + os.environ[var]
- if c != '':
- res = res + c
- else:
- res = res + c
- index = index + 1
- return res
-
-
-# Normalize a path, e.g. A//B, A/./B and A/foo/../B all become A\B.
-# Previously, this function also truncated pathnames to 8+3 format,
-# but as this module is called "ntpath", that's obviously wrong!
-
-def normpath(path):
- """Normalize path, eliminating double slashes, etc."""
- path = path.replace("/", "\\")
- prefix, path = splitdrive(path)
- # We need to be careful here. If the prefix is empty, and the path starts
- # with a backslash, it could either be an absolute path on the current
- # drive (\dir1\dir2\file) or a UNC filename (\\server\mount\dir1\file). It
- # is therefore imperative NOT to collapse multiple backslashes blindly in
- # that case.
- # The code below preserves multiple backslashes when there is no drive
- # letter. This means that the invalid filename \\\a\b is preserved
- # unchanged, where a\\\b is normalised to a\b. It's not clear that there
- # is any better behaviour for such edge cases.
- if prefix == '':
- # No drive letter - preserve initial backslashes
- while path[:1] == "\\":
- prefix = prefix + "\\"
- path = path[1:]
- else:
- # We have a drive letter - collapse initial backslashes
- if path.startswith("\\"):
- prefix = prefix + "\\"
- path = path.lstrip("\\")
- comps = path.split("\\")
- i = 0
- while i < len(comps):
- if comps[i] in ('.', ''):
- del comps[i]
- elif comps[i] == '..':
- if i > 0 and comps[i-1] != '..':
- del comps[i-1:i+1]
- i -= 1
- elif i == 0 and prefix.endswith("\\"):
- del comps[i]
- else:
- i += 1
- else:
- i += 1
- # If the path is now empty, substitute '.'
- if not prefix and not comps:
- comps.append('.')
- return prefix + "\\".join(comps)
-
-
-# Return an absolute path.
-try:
- from nt import _getfullpathname
-
-except ImportError: # not running on Windows - mock up something sensible
- def abspath(path):
- """Return the absolute version of a path."""
- if not isabs(path):
- path = join(os.getcwd(), path)
- return normpath(path)
-
-else: # use native Windows method on Windows
- def abspath(path):
- """Return the absolute version of a path."""
-
- if path: # Empty path must return current working directory.
- try:
- path = _getfullpathname(path)
- except WindowsError:
- pass # Bad path - return unchanged.
- else:
- path = os.getcwd()
- return normpath(path)
-
-# realpath is a no-op on systems without islink support
-realpath = abspath
-# Win9x family and earlier have no Unicode filename support.
-supports_unicode_filenames = (hasattr(sys, "getwindowsversion") and
- sys.getwindowsversion()[3] >= 2)
diff --git a/sys/lib/python/nturl2path.py b/sys/lib/python/nturl2path.py
deleted file mode 100644
index 31064044e..000000000
--- a/sys/lib/python/nturl2path.py
+++ /dev/null
@@ -1,63 +0,0 @@
-"""Convert a NT pathname to a file URL and vice versa."""
-
-def url2pathname(url):
- """OS-specific conversion from a relative URL of the 'file' scheme
- to a file system path; not recommended for general use."""
- # e.g.
- # ///C|/foo/bar/spam.foo
- # becomes
- # C:\foo\bar\spam.foo
- import string, urllib
- # Windows itself uses ":" even in URLs.
- url = url.replace(':', '|')
- if not '|' in url:
- # No drive specifier, just convert slashes
- if url[:4] == '////':
- # path is something like ////host/path/on/remote/host
- # convert this to \\host\path\on\remote\host
- # (notice halving of slashes at the start of the path)
- url = url[2:]
- components = url.split('/')
- # make sure not to convert quoted slashes :-)
- return urllib.unquote('\\'.join(components))
- comp = url.split('|')
- if len(comp) != 2 or comp[0][-1] not in string.ascii_letters:
- error = 'Bad URL: ' + url
- raise IOError, error
- drive = comp[0][-1].upper()
- components = comp[1].split('/')
- path = drive + ':'
- for comp in components:
- if comp:
- path = path + '\\' + urllib.unquote(comp)
- return path
-
-def pathname2url(p):
- """OS-specific conversion from a file system path to a relative URL
- of the 'file' scheme; not recommended for general use."""
- # e.g.
- # C:\foo\bar\spam.foo
- # becomes
- # ///C|/foo/bar/spam.foo
- import urllib
- if not ':' in p:
- # No drive specifier, just convert slashes and quote the name
- if p[:2] == '\\\\':
- # path is something like \\host\path\on\remote\host
- # convert this to ////host/path/on/remote/host
- # (notice doubling of slashes at the start of the path)
- p = '\\\\' + p
- components = p.split('\\')
- return urllib.quote('/'.join(components))
- comp = p.split(':')
- if len(comp) != 2 or len(comp[0]) > 1:
- error = 'Bad path: ' + p
- raise IOError, error
-
- drive = urllib.quote(comp[0].upper())
- components = comp[1].split('\\')
- path = '///' + drive + '|'
- for comp in components:
- if comp:
- path = path + '/' + urllib.quote(comp)
- return path
diff --git a/sys/lib/python/opcode.py b/sys/lib/python/opcode.py
deleted file mode 100644
index 095ca42ec..000000000
--- a/sys/lib/python/opcode.py
+++ /dev/null
@@ -1,185 +0,0 @@
-
-"""
-opcode module - potentially shared between dis and other modules which
-operate on bytecodes (e.g. peephole optimizers).
-"""
-
-__all__ = ["cmp_op", "hasconst", "hasname", "hasjrel", "hasjabs",
- "haslocal", "hascompare", "hasfree", "opname", "opmap",
- "HAVE_ARGUMENT", "EXTENDED_ARG"]
-
-cmp_op = ('<', '<=', '==', '!=', '>', '>=', 'in', 'not in', 'is',
- 'is not', 'exception match', 'BAD')
-
-hasconst = []
-hasname = []
-hasjrel = []
-hasjabs = []
-haslocal = []
-hascompare = []
-hasfree = []
-
-opmap = {}
-opname = [''] * 256
-for op in range(256): opname[op] = '<%r>' % (op,)
-del op
-
-def def_op(name, op):
- opname[op] = name
- opmap[name] = op
-
-def name_op(name, op):
- def_op(name, op)
- hasname.append(op)
-
-def jrel_op(name, op):
- def_op(name, op)
- hasjrel.append(op)
-
-def jabs_op(name, op):
- def_op(name, op)
- hasjabs.append(op)
-
-# Instruction opcodes for compiled code
-# Blank lines correspond to available opcodes
-
-def_op('STOP_CODE', 0)
-def_op('POP_TOP', 1)
-def_op('ROT_TWO', 2)
-def_op('ROT_THREE', 3)
-def_op('DUP_TOP', 4)
-def_op('ROT_FOUR', 5)
-
-def_op('NOP', 9)
-def_op('UNARY_POSITIVE', 10)
-def_op('UNARY_NEGATIVE', 11)
-def_op('UNARY_NOT', 12)
-def_op('UNARY_CONVERT', 13)
-
-def_op('UNARY_INVERT', 15)
-
-def_op('LIST_APPEND', 18)
-def_op('BINARY_POWER', 19)
-def_op('BINARY_MULTIPLY', 20)
-def_op('BINARY_DIVIDE', 21)
-def_op('BINARY_MODULO', 22)
-def_op('BINARY_ADD', 23)
-def_op('BINARY_SUBTRACT', 24)
-def_op('BINARY_SUBSCR', 25)
-def_op('BINARY_FLOOR_DIVIDE', 26)
-def_op('BINARY_TRUE_DIVIDE', 27)
-def_op('INPLACE_FLOOR_DIVIDE', 28)
-def_op('INPLACE_TRUE_DIVIDE', 29)
-def_op('SLICE+0', 30)
-def_op('SLICE+1', 31)
-def_op('SLICE+2', 32)
-def_op('SLICE+3', 33)
-
-def_op('STORE_SLICE+0', 40)
-def_op('STORE_SLICE+1', 41)
-def_op('STORE_SLICE+2', 42)
-def_op('STORE_SLICE+3', 43)
-
-def_op('DELETE_SLICE+0', 50)
-def_op('DELETE_SLICE+1', 51)
-def_op('DELETE_SLICE+2', 52)
-def_op('DELETE_SLICE+3', 53)
-
-def_op('INPLACE_ADD', 55)
-def_op('INPLACE_SUBTRACT', 56)
-def_op('INPLACE_MULTIPLY', 57)
-def_op('INPLACE_DIVIDE', 58)
-def_op('INPLACE_MODULO', 59)
-def_op('STORE_SUBSCR', 60)
-def_op('DELETE_SUBSCR', 61)
-def_op('BINARY_LSHIFT', 62)
-def_op('BINARY_RSHIFT', 63)
-def_op('BINARY_AND', 64)
-def_op('BINARY_XOR', 65)
-def_op('BINARY_OR', 66)
-def_op('INPLACE_POWER', 67)
-def_op('GET_ITER', 68)
-
-def_op('PRINT_EXPR', 70)
-def_op('PRINT_ITEM', 71)
-def_op('PRINT_NEWLINE', 72)
-def_op('PRINT_ITEM_TO', 73)
-def_op('PRINT_NEWLINE_TO', 74)
-def_op('INPLACE_LSHIFT', 75)
-def_op('INPLACE_RSHIFT', 76)
-def_op('INPLACE_AND', 77)
-def_op('INPLACE_XOR', 78)
-def_op('INPLACE_OR', 79)
-def_op('BREAK_LOOP', 80)
-def_op('WITH_CLEANUP', 81)
-def_op('LOAD_LOCALS', 82)
-def_op('RETURN_VALUE', 83)
-def_op('IMPORT_STAR', 84)
-def_op('EXEC_STMT', 85)
-def_op('YIELD_VALUE', 86)
-def_op('POP_BLOCK', 87)
-def_op('END_FINALLY', 88)
-def_op('BUILD_CLASS', 89)
-
-HAVE_ARGUMENT = 90 # Opcodes from here have an argument:
-
-name_op('STORE_NAME', 90) # Index in name list
-name_op('DELETE_NAME', 91) # ""
-def_op('UNPACK_SEQUENCE', 92) # Number of tuple items
-jrel_op('FOR_ITER', 93)
-
-name_op('STORE_ATTR', 95) # Index in name list
-name_op('DELETE_ATTR', 96) # ""
-name_op('STORE_GLOBAL', 97) # ""
-name_op('DELETE_GLOBAL', 98) # ""
-def_op('DUP_TOPX', 99) # number of items to duplicate
-def_op('LOAD_CONST', 100) # Index in const list
-hasconst.append(100)
-name_op('LOAD_NAME', 101) # Index in name list
-def_op('BUILD_TUPLE', 102) # Number of tuple items
-def_op('BUILD_LIST', 103) # Number of list items
-def_op('BUILD_MAP', 104) # Always zero for now
-name_op('LOAD_ATTR', 105) # Index in name list
-def_op('COMPARE_OP', 106) # Comparison operator
-hascompare.append(106)
-name_op('IMPORT_NAME', 107) # Index in name list
-name_op('IMPORT_FROM', 108) # Index in name list
-
-jrel_op('JUMP_FORWARD', 110) # Number of bytes to skip
-jrel_op('JUMP_IF_FALSE', 111) # ""
-jrel_op('JUMP_IF_TRUE', 112) # ""
-jabs_op('JUMP_ABSOLUTE', 113) # Target byte offset from beginning of code
-
-name_op('LOAD_GLOBAL', 116) # Index in name list
-
-jabs_op('CONTINUE_LOOP', 119) # Target address
-jrel_op('SETUP_LOOP', 120) # Distance to target address
-jrel_op('SETUP_EXCEPT', 121) # ""
-jrel_op('SETUP_FINALLY', 122) # ""
-
-def_op('LOAD_FAST', 124) # Local variable number
-haslocal.append(124)
-def_op('STORE_FAST', 125) # Local variable number
-haslocal.append(125)
-def_op('DELETE_FAST', 126) # Local variable number
-haslocal.append(126)
-
-def_op('RAISE_VARARGS', 130) # Number of raise arguments (1, 2, or 3)
-def_op('CALL_FUNCTION', 131) # #args + (#kwargs << 8)
-def_op('MAKE_FUNCTION', 132) # Number of args with default values
-def_op('BUILD_SLICE', 133) # Number of items
-def_op('MAKE_CLOSURE', 134)
-def_op('LOAD_CLOSURE', 135)
-hasfree.append(135)
-def_op('LOAD_DEREF', 136)
-hasfree.append(136)
-def_op('STORE_DEREF', 137)
-hasfree.append(137)
-
-def_op('CALL_FUNCTION_VAR', 140) # #args + (#kwargs << 8)
-def_op('CALL_FUNCTION_KW', 141) # #args + (#kwargs << 8)
-def_op('CALL_FUNCTION_VAR_KW', 142) # #args + (#kwargs << 8)
-def_op('EXTENDED_ARG', 143)
-EXTENDED_ARG = 143
-
-del def_op, name_op, jrel_op, jabs_op
diff --git a/sys/lib/python/optparse.py b/sys/lib/python/optparse.py
deleted file mode 100644
index 62d2f7e03..000000000
--- a/sys/lib/python/optparse.py
+++ /dev/null
@@ -1,1682 +0,0 @@
-"""optparse - a powerful, extensible, and easy-to-use option parser.
-
-By Greg Ward <gward@python.net>
-
-Originally distributed as Optik; see http://optik.sourceforge.net/ .
-
-If you have problems with this module, please do not file bugs,
-patches, or feature requests with Python; instead, use Optik's
-SourceForge project page:
- http://sourceforge.net/projects/optik
-
-For support, use the optik-users@lists.sourceforge.net mailing list
-(http://lists.sourceforge.net/lists/listinfo/optik-users).
-"""
-
-# Python developers: please do not make changes to this file, since
-# it is automatically generated from the Optik source code.
-
-__version__ = "1.5.3"
-
-__all__ = ['Option',
- 'SUPPRESS_HELP',
- 'SUPPRESS_USAGE',
- 'Values',
- 'OptionContainer',
- 'OptionGroup',
- 'OptionParser',
- 'HelpFormatter',
- 'IndentedHelpFormatter',
- 'TitledHelpFormatter',
- 'OptParseError',
- 'OptionError',
- 'OptionConflictError',
- 'OptionValueError',
- 'BadOptionError']
-
-__copyright__ = """
-Copyright (c) 2001-2006 Gregory P. Ward. All rights reserved.
-Copyright (c) 2002-2006 Python Software Foundation. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
-
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-
- * Neither the name of the author nor the names of its
- contributors may be used to endorse or promote products derived from
- this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
-IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
-TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
-PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR
-CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-"""
-
-import sys, os
-import types
-import textwrap
-
-def _repr(self):
- return "<%s at 0x%x: %s>" % (self.__class__.__name__, id(self), self)
-
-
-# This file was generated from:
-# Id: option_parser.py 527 2006-07-23 15:21:30Z greg
-# Id: option.py 522 2006-06-11 16:22:03Z gward
-# Id: help.py 527 2006-07-23 15:21:30Z greg
-# Id: errors.py 509 2006-04-20 00:58:24Z gward
-
-try:
- from gettext import gettext
-except ImportError:
- def gettext(message):
- return message
-_ = gettext
-
-
-class OptParseError (Exception):
- def __init__(self, msg):
- self.msg = msg
-
- def __str__(self):
- return self.msg
-
-
-class OptionError (OptParseError):
- """
- Raised if an Option instance is created with invalid or
- inconsistent arguments.
- """
-
- def __init__(self, msg, option):
- self.msg = msg
- self.option_id = str(option)
-
- def __str__(self):
- if self.option_id:
- return "option %s: %s" % (self.option_id, self.msg)
- else:
- return self.msg
-
-class OptionConflictError (OptionError):
- """
- Raised if conflicting options are added to an OptionParser.
- """
-
-class OptionValueError (OptParseError):
- """
- Raised if an invalid option value is encountered on the command
- line.
- """
-
-class BadOptionError (OptParseError):
- """
- Raised if an invalid option is seen on the command line.
- """
- def __init__(self, opt_str):
- self.opt_str = opt_str
-
- def __str__(self):
- return _("no such option: %s") % self.opt_str
-
-class AmbiguousOptionError (BadOptionError):
- """
- Raised if an ambiguous option is seen on the command line.
- """
- def __init__(self, opt_str, possibilities):
- BadOptionError.__init__(self, opt_str)
- self.possibilities = possibilities
-
- def __str__(self):
- return (_("ambiguous option: %s (%s?)")
- % (self.opt_str, ", ".join(self.possibilities)))
-
-
-class HelpFormatter:
-
- """
- Abstract base class for formatting option help. OptionParser
- instances should use one of the HelpFormatter subclasses for
- formatting help; by default IndentedHelpFormatter is used.
-
- Instance attributes:
- parser : OptionParser
- the controlling OptionParser instance
- indent_increment : int
- the number of columns to indent per nesting level
- max_help_position : int
- the maximum starting column for option help text
- help_position : int
- the calculated starting column for option help text;
- initially the same as the maximum
- width : int
- total number of columns for output (pass None to constructor for
- this value to be taken from the $COLUMNS environment variable)
- level : int
- current indentation level
- current_indent : int
- current indentation level (in columns)
- help_width : int
- number of columns available for option help text (calculated)
- default_tag : str
- text to replace with each option's default value, "%default"
- by default. Set to false value to disable default value expansion.
- option_strings : { Option : str }
- maps Option instances to the snippet of help text explaining
- the syntax of that option, e.g. "-h, --help" or
- "-fFILE, --file=FILE"
- _short_opt_fmt : str
- format string controlling how short options with values are
- printed in help text. Must be either "%s%s" ("-fFILE") or
- "%s %s" ("-f FILE"), because those are the two syntaxes that
- Optik supports.
- _long_opt_fmt : str
- similar but for long options; must be either "%s %s" ("--file FILE")
- or "%s=%s" ("--file=FILE").
- """
-
- NO_DEFAULT_VALUE = "none"
-
- def __init__(self,
- indent_increment,
- max_help_position,
- width,
- short_first):
- self.parser = None
- self.indent_increment = indent_increment
- self.help_position = self.max_help_position = max_help_position
- if width is None:
- try:
- width = int(os.environ['COLUMNS'])
- except (KeyError, ValueError):
- width = 80
- width -= 2
- self.width = width
- self.current_indent = 0
- self.level = 0
- self.help_width = None # computed later
- self.short_first = short_first
- self.default_tag = "%default"
- self.option_strings = {}
- self._short_opt_fmt = "%s %s"
- self._long_opt_fmt = "%s=%s"
-
- def set_parser(self, parser):
- self.parser = parser
-
- def set_short_opt_delimiter(self, delim):
- if delim not in ("", " "):
- raise ValueError(
- "invalid metavar delimiter for short options: %r" % delim)
- self._short_opt_fmt = "%s" + delim + "%s"
-
- def set_long_opt_delimiter(self, delim):
- if delim not in ("=", " "):
- raise ValueError(
- "invalid metavar delimiter for long options: %r" % delim)
- self._long_opt_fmt = "%s" + delim + "%s"
-
- def indent(self):
- self.current_indent += self.indent_increment
- self.level += 1
-
- def dedent(self):
- self.current_indent -= self.indent_increment
- assert self.current_indent >= 0, "Indent decreased below 0."
- self.level -= 1
-
- def format_usage(self, usage):
- raise NotImplementedError, "subclasses must implement"
-
- def format_heading(self, heading):
- raise NotImplementedError, "subclasses must implement"
-
- def _format_text(self, text):
- """
- Format a paragraph of free-form text for inclusion in the
- help output at the current indentation level.
- """
- text_width = self.width - self.current_indent
- indent = " "*self.current_indent
- return textwrap.fill(text,
- text_width,
- initial_indent=indent,
- subsequent_indent=indent)
-
- def format_description(self, description):
- if description:
- return self._format_text(description) + "\n"
- else:
- return ""
-
- def format_epilog(self, epilog):
- if epilog:
- return "\n" + self._format_text(epilog) + "\n"
- else:
- return ""
-
-
- def expand_default(self, option):
- if self.parser is None or not self.default_tag:
- return option.help
-
- default_value = self.parser.defaults.get(option.dest)
- if default_value is NO_DEFAULT or default_value is None:
- default_value = self.NO_DEFAULT_VALUE
-
- return option.help.replace(self.default_tag, str(default_value))
-
- def format_option(self, option):
- # The help for each option consists of two parts:
- # * the opt strings and metavars
- # eg. ("-x", or "-fFILENAME, --file=FILENAME")
- # * the user-supplied help string
- # eg. ("turn on expert mode", "read data from FILENAME")
- #
- # If possible, we write both of these on the same line:
- # -x turn on expert mode
- #
- # But if the opt string list is too long, we put the help
- # string on a second line, indented to the same column it would
- # start in if it fit on the first line.
- # -fFILENAME, --file=FILENAME
- # read data from FILENAME
- result = []
- opts = self.option_strings[option]
- opt_width = self.help_position - self.current_indent - 2
- if len(opts) > opt_width:
- opts = "%*s%s\n" % (self.current_indent, "", opts)
- indent_first = self.help_position
- else: # start help on same line as opts
- opts = "%*s%-*s " % (self.current_indent, "", opt_width, opts)
- indent_first = 0
- result.append(opts)
- if option.help:
- help_text = self.expand_default(option)
- help_lines = textwrap.wrap(help_text, self.help_width)
- result.append("%*s%s\n" % (indent_first, "", help_lines[0]))
- result.extend(["%*s%s\n" % (self.help_position, "", line)
- for line in help_lines[1:]])
- elif opts[-1] != "\n":
- result.append("\n")
- return "".join(result)
-
- def store_option_strings(self, parser):
- self.indent()
- max_len = 0
- for opt in parser.option_list:
- strings = self.format_option_strings(opt)
- self.option_strings[opt] = strings
- max_len = max(max_len, len(strings) + self.current_indent)
- self.indent()
- for group in parser.option_groups:
- for opt in group.option_list:
- strings = self.format_option_strings(opt)
- self.option_strings[opt] = strings
- max_len = max(max_len, len(strings) + self.current_indent)
- self.dedent()
- self.dedent()
- self.help_position = min(max_len + 2, self.max_help_position)
- self.help_width = self.width - self.help_position
-
- def format_option_strings(self, option):
- """Return a comma-separated list of option strings & metavariables."""
- if option.takes_value():
- metavar = option.metavar or option.dest.upper()
- short_opts = [self._short_opt_fmt % (sopt, metavar)
- for sopt in option._short_opts]
- long_opts = [self._long_opt_fmt % (lopt, metavar)
- for lopt in option._long_opts]
- else:
- short_opts = option._short_opts
- long_opts = option._long_opts
-
- if self.short_first:
- opts = short_opts + long_opts
- else:
- opts = long_opts + short_opts
-
- return ", ".join(opts)
-
-class IndentedHelpFormatter (HelpFormatter):
- """Format help with indented section bodies.
- """
-
- def __init__(self,
- indent_increment=2,
- max_help_position=24,
- width=None,
- short_first=1):
- HelpFormatter.__init__(
- self, indent_increment, max_help_position, width, short_first)
-
- def format_usage(self, usage):
- return _("Usage: %s\n") % usage
-
- def format_heading(self, heading):
- return "%*s%s:\n" % (self.current_indent, "", heading)
-
-
-class TitledHelpFormatter (HelpFormatter):
- """Format help with underlined section headers.
- """
-
- def __init__(self,
- indent_increment=0,
- max_help_position=24,
- width=None,
- short_first=0):
- HelpFormatter.__init__ (
- self, indent_increment, max_help_position, width, short_first)
-
- def format_usage(self, usage):
- return "%s %s\n" % (self.format_heading(_("Usage")), usage)
-
- def format_heading(self, heading):
- return "%s\n%s\n" % (heading, "=-"[self.level] * len(heading))
-
-
-def _parse_num(val, type):
- if val[:2].lower() == "0x": # hexadecimal
- radix = 16
- elif val[:2].lower() == "0b": # binary
- radix = 2
- val = val[2:] or "0" # have to remove "0b" prefix
- elif val[:1] == "0": # octal
- radix = 8
- else: # decimal
- radix = 10
-
- return type(val, radix)
-
-def _parse_int(val):
- return _parse_num(val, int)
-
-def _parse_long(val):
- return _parse_num(val, long)
-
-_builtin_cvt = { "int" : (_parse_int, _("integer")),
- "long" : (_parse_long, _("long integer")),
- "float" : (float, _("floating-point")),
- "complex" : (complex, _("complex")) }
-
-def check_builtin(option, opt, value):
- (cvt, what) = _builtin_cvt[option.type]
- try:
- return cvt(value)
- except ValueError:
- raise OptionValueError(
- _("option %s: invalid %s value: %r") % (opt, what, value))
-
-def check_choice(option, opt, value):
- if value in option.choices:
- return value
- else:
- choices = ", ".join(map(repr, option.choices))
- raise OptionValueError(
- _("option %s: invalid choice: %r (choose from %s)")
- % (opt, value, choices))
-
-# Not supplying a default is different from a default of None,
-# so we need an explicit "not supplied" value.
-NO_DEFAULT = ("NO", "DEFAULT")
-
-
-class Option:
- """
- Instance attributes:
- _short_opts : [string]
- _long_opts : [string]
-
- action : string
- type : string
- dest : string
- default : any
- nargs : int
- const : any
- choices : [string]
- callback : function
- callback_args : (any*)
- callback_kwargs : { string : any }
- help : string
- metavar : string
- """
-
- # The list of instance attributes that may be set through
- # keyword args to the constructor.
- ATTRS = ['action',
- 'type',
- 'dest',
- 'default',
- 'nargs',
- 'const',
- 'choices',
- 'callback',
- 'callback_args',
- 'callback_kwargs',
- 'help',
- 'metavar']
-
- # The set of actions allowed by option parsers. Explicitly listed
- # here so the constructor can validate its arguments.
- ACTIONS = ("store",
- "store_const",
- "store_true",
- "store_false",
- "append",
- "append_const",
- "count",
- "callback",
- "help",
- "version")
-
- # The set of actions that involve storing a value somewhere;
- # also listed just for constructor argument validation. (If
- # the action is one of these, there must be a destination.)
- STORE_ACTIONS = ("store",
- "store_const",
- "store_true",
- "store_false",
- "append",
- "append_const",
- "count")
-
- # The set of actions for which it makes sense to supply a value
- # type, ie. which may consume an argument from the command line.
- TYPED_ACTIONS = ("store",
- "append",
- "callback")
-
- # The set of actions which *require* a value type, ie. that
- # always consume an argument from the command line.
- ALWAYS_TYPED_ACTIONS = ("store",
- "append")
-
- # The set of actions which take a 'const' attribute.
- CONST_ACTIONS = ("store_const",
- "append_const")
-
- # The set of known types for option parsers. Again, listed here for
- # constructor argument validation.
- TYPES = ("string", "int", "long", "float", "complex", "choice")
-
- # Dictionary of argument checking functions, which convert and
- # validate option arguments according to the option type.
- #
- # Signature of checking functions is:
- # check(option : Option, opt : string, value : string) -> any
- # where
- # option is the Option instance calling the checker
- # opt is the actual option seen on the command-line
- # (eg. "-a", "--file")
- # value is the option argument seen on the command-line
- #
- # The return value should be in the appropriate Python type
- # for option.type -- eg. an integer if option.type == "int".
- #
- # If no checker is defined for a type, arguments will be
- # unchecked and remain strings.
- TYPE_CHECKER = { "int" : check_builtin,
- "long" : check_builtin,
- "float" : check_builtin,
- "complex": check_builtin,
- "choice" : check_choice,
- }
-
-
- # CHECK_METHODS is a list of unbound method objects; they are called
- # by the constructor, in order, after all attributes are
- # initialized. The list is created and filled in later, after all
- # the methods are actually defined. (I just put it here because I
- # like to define and document all class attributes in the same
- # place.) Subclasses that add another _check_*() method should
- # define their own CHECK_METHODS list that adds their check method
- # to those from this class.
- CHECK_METHODS = None
-
-
- # -- Constructor/initialization methods ----------------------------
-
- def __init__(self, *opts, **attrs):
- # Set _short_opts, _long_opts attrs from 'opts' tuple.
- # Have to be set now, in case no option strings are supplied.
- self._short_opts = []
- self._long_opts = []
- opts = self._check_opt_strings(opts)
- self._set_opt_strings(opts)
-
- # Set all other attrs (action, type, etc.) from 'attrs' dict
- self._set_attrs(attrs)
-
- # Check all the attributes we just set. There are lots of
- # complicated interdependencies, but luckily they can be farmed
- # out to the _check_*() methods listed in CHECK_METHODS -- which
- # could be handy for subclasses! The one thing these all share
- # is that they raise OptionError if they discover a problem.
- for checker in self.CHECK_METHODS:
- checker(self)
-
- def _check_opt_strings(self, opts):
- # Filter out None because early versions of Optik had exactly
- # one short option and one long option, either of which
- # could be None.
- opts = filter(None, opts)
- if not opts:
- raise TypeError("at least one option string must be supplied")
- return opts
-
- def _set_opt_strings(self, opts):
- for opt in opts:
- if len(opt) < 2:
- raise OptionError(
- "invalid option string %r: "
- "must be at least two characters long" % opt, self)
- elif len(opt) == 2:
- if not (opt[0] == "-" and opt[1] != "-"):
- raise OptionError(
- "invalid short option string %r: "
- "must be of the form -x, (x any non-dash char)" % opt,
- self)
- self._short_opts.append(opt)
- else:
- if not (opt[0:2] == "--" and opt[2] != "-"):
- raise OptionError(
- "invalid long option string %r: "
- "must start with --, followed by non-dash" % opt,
- self)
- self._long_opts.append(opt)
-
- def _set_attrs(self, attrs):
- for attr in self.ATTRS:
- if attrs.has_key(attr):
- setattr(self, attr, attrs[attr])
- del attrs[attr]
- else:
- if attr == 'default':
- setattr(self, attr, NO_DEFAULT)
- else:
- setattr(self, attr, None)
- if attrs:
- attrs = attrs.keys()
- attrs.sort()
- raise OptionError(
- "invalid keyword arguments: %s" % ", ".join(attrs),
- self)
-
-
- # -- Constructor validation methods --------------------------------
-
- def _check_action(self):
- if self.action is None:
- self.action = "store"
- elif self.action not in self.ACTIONS:
- raise OptionError("invalid action: %r" % self.action, self)
-
- def _check_type(self):
- if self.type is None:
- if self.action in self.ALWAYS_TYPED_ACTIONS:
- if self.choices is not None:
- # The "choices" attribute implies "choice" type.
- self.type = "choice"
- else:
- # No type given? "string" is the most sensible default.
- self.type = "string"
- else:
- # Allow type objects or builtin type conversion functions
- # (int, str, etc.) as an alternative to their names. (The
- # complicated check of __builtin__ is only necessary for
- # Python 2.1 and earlier, and is short-circuited by the
- # first check on modern Pythons.)
- import __builtin__
- if ( type(self.type) is types.TypeType or
- (hasattr(self.type, "__name__") and
- getattr(__builtin__, self.type.__name__, None) is self.type) ):
- self.type = self.type.__name__
-
- if self.type == "str":
- self.type = "string"
-
- if self.type not in self.TYPES:
- raise OptionError("invalid option type: %r" % self.type, self)
- if self.action not in self.TYPED_ACTIONS:
- raise OptionError(
- "must not supply a type for action %r" % self.action, self)
-
- def _check_choice(self):
- if self.type == "choice":
- if self.choices is None:
- raise OptionError(
- "must supply a list of choices for type 'choice'", self)
- elif type(self.choices) not in (types.TupleType, types.ListType):
- raise OptionError(
- "choices must be a list of strings ('%s' supplied)"
- % str(type(self.choices)).split("'")[1], self)
- elif self.choices is not None:
- raise OptionError(
- "must not supply choices for type %r" % self.type, self)
-
- def _check_dest(self):
- # No destination given, and we need one for this action. The
- # self.type check is for callbacks that take a value.
- takes_value = (self.action in self.STORE_ACTIONS or
- self.type is not None)
- if self.dest is None and takes_value:
-
- # Glean a destination from the first long option string,
- # or from the first short option string if no long options.
- if self._long_opts:
- # eg. "--foo-bar" -> "foo_bar"
- self.dest = self._long_opts[0][2:].replace('-', '_')
- else:
- self.dest = self._short_opts[0][1]
-
- def _check_const(self):
- if self.action not in self.CONST_ACTIONS and self.const is not None:
- raise OptionError(
- "'const' must not be supplied for action %r" % self.action,
- self)
-
- def _check_nargs(self):
- if self.action in self.TYPED_ACTIONS:
- if self.nargs is None:
- self.nargs = 1
- elif self.nargs is not None:
- raise OptionError(
- "'nargs' must not be supplied for action %r" % self.action,
- self)
-
- def _check_callback(self):
- if self.action == "callback":
- if not callable(self.callback):
- raise OptionError(
- "callback not callable: %r" % self.callback, self)
- if (self.callback_args is not None and
- type(self.callback_args) is not types.TupleType):
- raise OptionError(
- "callback_args, if supplied, must be a tuple: not %r"
- % self.callback_args, self)
- if (self.callback_kwargs is not None and
- type(self.callback_kwargs) is not types.DictType):
- raise OptionError(
- "callback_kwargs, if supplied, must be a dict: not %r"
- % self.callback_kwargs, self)
- else:
- if self.callback is not None:
- raise OptionError(
- "callback supplied (%r) for non-callback option"
- % self.callback, self)
- if self.callback_args is not None:
- raise OptionError(
- "callback_args supplied for non-callback option", self)
- if self.callback_kwargs is not None:
- raise OptionError(
- "callback_kwargs supplied for non-callback option", self)
-
-
- CHECK_METHODS = [_check_action,
- _check_type,
- _check_choice,
- _check_dest,
- _check_const,
- _check_nargs,
- _check_callback]
-
-
- # -- Miscellaneous methods -----------------------------------------
-
- def __str__(self):
- return "/".join(self._short_opts + self._long_opts)
-
- __repr__ = _repr
-
- def takes_value(self):
- return self.type is not None
-
- def get_opt_string(self):
- if self._long_opts:
- return self._long_opts[0]
- else:
- return self._short_opts[0]
-
-
- # -- Processing methods --------------------------------------------
-
- def check_value(self, opt, value):
- checker = self.TYPE_CHECKER.get(self.type)
- if checker is None:
- return value
- else:
- return checker(self, opt, value)
-
- def convert_value(self, opt, value):
- if value is not None:
- if self.nargs == 1:
- return self.check_value(opt, value)
- else:
- return tuple([self.check_value(opt, v) for v in value])
-
- def process(self, opt, value, values, parser):
-
- # First, convert the value(s) to the right type. Howl if any
- # value(s) are bogus.
- value = self.convert_value(opt, value)
-
- # And then take whatever action is expected of us.
- # This is a separate method to make life easier for
- # subclasses to add new actions.
- return self.take_action(
- self.action, self.dest, opt, value, values, parser)
-
- def take_action(self, action, dest, opt, value, values, parser):
- if action == "store":
- setattr(values, dest, value)
- elif action == "store_const":
- setattr(values, dest, self.const)
- elif action == "store_true":
- setattr(values, dest, True)
- elif action == "store_false":
- setattr(values, dest, False)
- elif action == "append":
- values.ensure_value(dest, []).append(value)
- elif action == "append_const":
- values.ensure_value(dest, []).append(self.const)
- elif action == "count":
- setattr(values, dest, values.ensure_value(dest, 0) + 1)
- elif action == "callback":
- args = self.callback_args or ()
- kwargs = self.callback_kwargs or {}
- self.callback(self, opt, value, parser, *args, **kwargs)
- elif action == "help":
- parser.print_help()
- parser.exit()
- elif action == "version":
- parser.print_version()
- parser.exit()
- else:
- raise RuntimeError, "unknown action %r" % self.action
-
- return 1
-
-# class Option
-
-
-SUPPRESS_HELP = "SUPPRESS"+"HELP"
-SUPPRESS_USAGE = "SUPPRESS"+"USAGE"
-
-# For compatibility with Python 2.2
-try:
- True, False
-except NameError:
- (True, False) = (1, 0)
-
-def isbasestring(x):
- return isinstance(x, types.StringType) or isinstance(x, types.UnicodeType)
-
-class Values:
-
- def __init__(self, defaults=None):
- if defaults:
- for (attr, val) in defaults.items():
- setattr(self, attr, val)
-
- def __str__(self):
- return str(self.__dict__)
-
- __repr__ = _repr
-
- def __cmp__(self, other):
- if isinstance(other, Values):
- return cmp(self.__dict__, other.__dict__)
- elif isinstance(other, types.DictType):
- return cmp(self.__dict__, other)
- else:
- return -1
-
- def _update_careful(self, dict):
- """
- Update the option values from an arbitrary dictionary, but only
- use keys from dict that already have a corresponding attribute
- in self. Any keys in dict without a corresponding attribute
- are silently ignored.
- """
- for attr in dir(self):
- if dict.has_key(attr):
- dval = dict[attr]
- if dval is not None:
- setattr(self, attr, dval)
-
- def _update_loose(self, dict):
- """
- Update the option values from an arbitrary dictionary,
- using all keys from the dictionary regardless of whether
- they have a corresponding attribute in self or not.
- """
- self.__dict__.update(dict)
-
- def _update(self, dict, mode):
- if mode == "careful":
- self._update_careful(dict)
- elif mode == "loose":
- self._update_loose(dict)
- else:
- raise ValueError, "invalid update mode: %r" % mode
-
- def read_module(self, modname, mode="careful"):
- __import__(modname)
- mod = sys.modules[modname]
- self._update(vars(mod), mode)
-
- def read_file(self, filename, mode="careful"):
- vars = {}
- execfile(filename, vars)
- self._update(vars, mode)
-
- def ensure_value(self, attr, value):
- if not hasattr(self, attr) or getattr(self, attr) is None:
- setattr(self, attr, value)
- return getattr(self, attr)
-
-
-class OptionContainer:
-
- """
- Abstract base class.
-
- Class attributes:
- standard_option_list : [Option]
- list of standard options that will be accepted by all instances
- of this parser class (intended to be overridden by subclasses).
-
- Instance attributes:
- option_list : [Option]
- the list of Option objects contained by this OptionContainer
- _short_opt : { string : Option }
- dictionary mapping short option strings, eg. "-f" or "-X",
- to the Option instances that implement them. If an Option
- has multiple short option strings, it will appears in this
- dictionary multiple times. [1]
- _long_opt : { string : Option }
- dictionary mapping long option strings, eg. "--file" or
- "--exclude", to the Option instances that implement them.
- Again, a given Option can occur multiple times in this
- dictionary. [1]
- defaults : { string : any }
- dictionary mapping option destination names to default
- values for each destination [1]
-
- [1] These mappings are common to (shared by) all components of the
- controlling OptionParser, where they are initially created.
-
- """
-
- def __init__(self, option_class, conflict_handler, description):
- # Initialize the option list and related data structures.
- # This method must be provided by subclasses, and it must
- # initialize at least the following instance attributes:
- # option_list, _short_opt, _long_opt, defaults.
- self._create_option_list()
-
- self.option_class = option_class
- self.set_conflict_handler(conflict_handler)
- self.set_description(description)
-
- def _create_option_mappings(self):
- # For use by OptionParser constructor -- create the master
- # option mappings used by this OptionParser and all
- # OptionGroups that it owns.
- self._short_opt = {} # single letter -> Option instance
- self._long_opt = {} # long option -> Option instance
- self.defaults = {} # maps option dest -> default value
-
-
- def _share_option_mappings(self, parser):
- # For use by OptionGroup constructor -- use shared option
- # mappings from the OptionParser that owns this OptionGroup.
- self._short_opt = parser._short_opt
- self._long_opt = parser._long_opt
- self.defaults = parser.defaults
-
- def set_conflict_handler(self, handler):
- if handler not in ("error", "resolve"):
- raise ValueError, "invalid conflict_resolution value %r" % handler
- self.conflict_handler = handler
-
- def set_description(self, description):
- self.description = description
-
- def get_description(self):
- return self.description
-
-
- def destroy(self):
- """see OptionParser.destroy()."""
- del self._short_opt
- del self._long_opt
- del self.defaults
-
-
- # -- Option-adding methods -----------------------------------------
-
- def _check_conflict(self, option):
- conflict_opts = []
- for opt in option._short_opts:
- if self._short_opt.has_key(opt):
- conflict_opts.append((opt, self._short_opt[opt]))
- for opt in option._long_opts:
- if self._long_opt.has_key(opt):
- conflict_opts.append((opt, self._long_opt[opt]))
-
- if conflict_opts:
- handler = self.conflict_handler
- if handler == "error":
- raise OptionConflictError(
- "conflicting option string(s): %s"
- % ", ".join([co[0] for co in conflict_opts]),
- option)
- elif handler == "resolve":
- for (opt, c_option) in conflict_opts:
- if opt.startswith("--"):
- c_option._long_opts.remove(opt)
- del self._long_opt[opt]
- else:
- c_option._short_opts.remove(opt)
- del self._short_opt[opt]
- if not (c_option._short_opts or c_option._long_opts):
- c_option.container.option_list.remove(c_option)
-
- def add_option(self, *args, **kwargs):
- """add_option(Option)
- add_option(opt_str, ..., kwarg=val, ...)
- """
- if type(args[0]) is types.StringType:
- option = self.option_class(*args, **kwargs)
- elif len(args) == 1 and not kwargs:
- option = args[0]
- if not isinstance(option, Option):
- raise TypeError, "not an Option instance: %r" % option
- else:
- raise TypeError, "invalid arguments"
-
- self._check_conflict(option)
-
- self.option_list.append(option)
- option.container = self
- for opt in option._short_opts:
- self._short_opt[opt] = option
- for opt in option._long_opts:
- self._long_opt[opt] = option
-
- if option.dest is not None: # option has a dest, we need a default
- if option.default is not NO_DEFAULT:
- self.defaults[option.dest] = option.default
- elif not self.defaults.has_key(option.dest):
- self.defaults[option.dest] = None
-
- return option
-
- def add_options(self, option_list):
- for option in option_list:
- self.add_option(option)
-
- # -- Option query/removal methods ----------------------------------
-
- def get_option(self, opt_str):
- return (self._short_opt.get(opt_str) or
- self._long_opt.get(opt_str))
-
- def has_option(self, opt_str):
- return (self._short_opt.has_key(opt_str) or
- self._long_opt.has_key(opt_str))
-
- def remove_option(self, opt_str):
- option = self._short_opt.get(opt_str)
- if option is None:
- option = self._long_opt.get(opt_str)
- if option is None:
- raise ValueError("no such option %r" % opt_str)
-
- for opt in option._short_opts:
- del self._short_opt[opt]
- for opt in option._long_opts:
- del self._long_opt[opt]
- option.container.option_list.remove(option)
-
-
- # -- Help-formatting methods ---------------------------------------
-
- def format_option_help(self, formatter):
- if not self.option_list:
- return ""
- result = []
- for option in self.option_list:
- if not option.help is SUPPRESS_HELP:
- result.append(formatter.format_option(option))
- return "".join(result)
-
- def format_description(self, formatter):
- return formatter.format_description(self.get_description())
-
- def format_help(self, formatter):
- result = []
- if self.description:
- result.append(self.format_description(formatter))
- if self.option_list:
- result.append(self.format_option_help(formatter))
- return "\n".join(result)
-
-
-class OptionGroup (OptionContainer):
-
- def __init__(self, parser, title, description=None):
- self.parser = parser
- OptionContainer.__init__(
- self, parser.option_class, parser.conflict_handler, description)
- self.title = title
-
- def _create_option_list(self):
- self.option_list = []
- self._share_option_mappings(self.parser)
-
- def set_title(self, title):
- self.title = title
-
- def destroy(self):
- """see OptionParser.destroy()."""
- OptionContainer.destroy(self)
- del self.option_list
-
- # -- Help-formatting methods ---------------------------------------
-
- def format_help(self, formatter):
- result = formatter.format_heading(self.title)
- formatter.indent()
- result += OptionContainer.format_help(self, formatter)
- formatter.dedent()
- return result
-
-
-class OptionParser (OptionContainer):
-
- """
- Class attributes:
- standard_option_list : [Option]
- list of standard options that will be accepted by all instances
- of this parser class (intended to be overridden by subclasses).
-
- Instance attributes:
- usage : string
- a usage string for your program. Before it is displayed
- to the user, "%prog" will be expanded to the name of
- your program (self.prog or os.path.basename(sys.argv[0])).
- prog : string
- the name of the current program (to override
- os.path.basename(sys.argv[0])).
- epilog : string
- paragraph of help text to print after option help
-
- option_groups : [OptionGroup]
- list of option groups in this parser (option groups are
- irrelevant for parsing the command-line, but very useful
- for generating help)
-
- allow_interspersed_args : bool = true
- if true, positional arguments may be interspersed with options.
- Assuming -a and -b each take a single argument, the command-line
- -ablah foo bar -bboo baz
- will be interpreted the same as
- -ablah -bboo -- foo bar baz
- If this flag were false, that command line would be interpreted as
- -ablah -- foo bar -bboo baz
- -- ie. we stop processing options as soon as we see the first
- non-option argument. (This is the tradition followed by
- Python's getopt module, Perl's Getopt::Std, and other argument-
- parsing libraries, but it is generally annoying to users.)
-
- process_default_values : bool = true
- if true, option default values are processed similarly to option
- values from the command line: that is, they are passed to the
- type-checking function for the option's type (as long as the
- default value is a string). (This really only matters if you
- have defined custom types; see SF bug #955889.) Set it to false
- to restore the behaviour of Optik 1.4.1 and earlier.
-
- rargs : [string]
- the argument list currently being parsed. Only set when
- parse_args() is active, and continually trimmed down as
- we consume arguments. Mainly there for the benefit of
- callback options.
- largs : [string]
- the list of leftover arguments that we have skipped while
- parsing options. If allow_interspersed_args is false, this
- list is always empty.
- values : Values
- the set of option values currently being accumulated. Only
- set when parse_args() is active. Also mainly for callbacks.
-
- Because of the 'rargs', 'largs', and 'values' attributes,
- OptionParser is not thread-safe. If, for some perverse reason, you
- need to parse command-line arguments simultaneously in different
- threads, use different OptionParser instances.
-
- """
-
- standard_option_list = []
-
- def __init__(self,
- usage=None,
- option_list=None,
- option_class=Option,
- version=None,
- conflict_handler="error",
- description=None,
- formatter=None,
- add_help_option=True,
- prog=None,
- epilog=None):
- OptionContainer.__init__(
- self, option_class, conflict_handler, description)
- self.set_usage(usage)
- self.prog = prog
- self.version = version
- self.allow_interspersed_args = True
- self.process_default_values = True
- if formatter is None:
- formatter = IndentedHelpFormatter()
- self.formatter = formatter
- self.formatter.set_parser(self)
- self.epilog = epilog
-
- # Populate the option list; initial sources are the
- # standard_option_list class attribute, the 'option_list'
- # argument, and (if applicable) the _add_version_option() and
- # _add_help_option() methods.
- self._populate_option_list(option_list,
- add_help=add_help_option)
-
- self._init_parsing_state()
-
-
- def destroy(self):
- """
- Declare that you are done with this OptionParser. This cleans up
- reference cycles so the OptionParser (and all objects referenced by
- it) can be garbage-collected promptly. After calling destroy(), the
- OptionParser is unusable.
- """
- OptionContainer.destroy(self)
- for group in self.option_groups:
- group.destroy()
- del self.option_list
- del self.option_groups
- del self.formatter
-
-
- # -- Private methods -----------------------------------------------
- # (used by our or OptionContainer's constructor)
-
- def _create_option_list(self):
- self.option_list = []
- self.option_groups = []
- self._create_option_mappings()
-
- def _add_help_option(self):
- self.add_option("-h", "--help",
- action="help",
- help=_("show this help message and exit"))
-
- def _add_version_option(self):
- self.add_option("--version",
- action="version",
- help=_("show program's version number and exit"))
-
- def _populate_option_list(self, option_list, add_help=True):
- if self.standard_option_list:
- self.add_options(self.standard_option_list)
- if option_list:
- self.add_options(option_list)
- if self.version:
- self._add_version_option()
- if add_help:
- self._add_help_option()
-
- def _init_parsing_state(self):
- # These are set in parse_args() for the convenience of callbacks.
- self.rargs = None
- self.largs = None
- self.values = None
-
-
- # -- Simple modifier methods ---------------------------------------
-
- def set_usage(self, usage):
- if usage is None:
- self.usage = _("%prog [options]")
- elif usage is SUPPRESS_USAGE:
- self.usage = None
- # For backwards compatibility with Optik 1.3 and earlier.
- elif usage.lower().startswith("usage: "):
- self.usage = usage[7:]
- else:
- self.usage = usage
-
- def enable_interspersed_args(self):
- self.allow_interspersed_args = True
-
- def disable_interspersed_args(self):
- self.allow_interspersed_args = False
-
- def set_process_default_values(self, process):
- self.process_default_values = process
-
- def set_default(self, dest, value):
- self.defaults[dest] = value
-
- def set_defaults(self, **kwargs):
- self.defaults.update(kwargs)
-
- def _get_all_options(self):
- options = self.option_list[:]
- for group in self.option_groups:
- options.extend(group.option_list)
- return options
-
- def get_default_values(self):
- if not self.process_default_values:
- # Old, pre-Optik 1.5 behaviour.
- return Values(self.defaults)
-
- defaults = self.defaults.copy()
- for option in self._get_all_options():
- default = defaults.get(option.dest)
- if isbasestring(default):
- opt_str = option.get_opt_string()
- defaults[option.dest] = option.check_value(opt_str, default)
-
- return Values(defaults)
-
-
- # -- OptionGroup methods -------------------------------------------
-
- def add_option_group(self, *args, **kwargs):
- # XXX lots of overlap with OptionContainer.add_option()
- if type(args[0]) is types.StringType:
- group = OptionGroup(self, *args, **kwargs)
- elif len(args) == 1 and not kwargs:
- group = args[0]
- if not isinstance(group, OptionGroup):
- raise TypeError, "not an OptionGroup instance: %r" % group
- if group.parser is not self:
- raise ValueError, "invalid OptionGroup (wrong parser)"
- else:
- raise TypeError, "invalid arguments"
-
- self.option_groups.append(group)
- return group
-
- def get_option_group(self, opt_str):
- option = (self._short_opt.get(opt_str) or
- self._long_opt.get(opt_str))
- if option and option.container is not self:
- return option.container
- return None
-
-
- # -- Option-parsing methods ----------------------------------------
-
- def _get_args(self, args):
- if args is None:
- return sys.argv[1:]
- else:
- return args[:] # don't modify caller's list
-
- def parse_args(self, args=None, values=None):
- """
- parse_args(args : [string] = sys.argv[1:],
- values : Values = None)
- -> (values : Values, args : [string])
-
- Parse the command-line options found in 'args' (default:
- sys.argv[1:]). Any errors result in a call to 'error()', which
- by default prints the usage message to stderr and calls
- sys.exit() with an error message. On success returns a pair
- (values, args) where 'values' is an Values instance (with all
- your option values) and 'args' is the list of arguments left
- over after parsing options.
- """
- rargs = self._get_args(args)
- if values is None:
- values = self.get_default_values()
-
- # Store the halves of the argument list as attributes for the
- # convenience of callbacks:
- # rargs
- # the rest of the command-line (the "r" stands for
- # "remaining" or "right-hand")
- # largs
- # the leftover arguments -- ie. what's left after removing
- # options and their arguments (the "l" stands for "leftover"
- # or "left-hand")
- self.rargs = rargs
- self.largs = largs = []
- self.values = values
-
- try:
- stop = self._process_args(largs, rargs, values)
- except (BadOptionError, OptionValueError), err:
- self.error(str(err))
-
- args = largs + rargs
- return self.check_values(values, args)
-
- def check_values(self, values, args):
- """
- check_values(values : Values, args : [string])
- -> (values : Values, args : [string])
-
- Check that the supplied option values and leftover arguments are
- valid. Returns the option values and leftover arguments
- (possibly adjusted, possibly completely new -- whatever you
- like). Default implementation just returns the passed-in
- values; subclasses may override as desired.
- """
- return (values, args)
-
- def _process_args(self, largs, rargs, values):
- """_process_args(largs : [string],
- rargs : [string],
- values : Values)
-
- Process command-line arguments and populate 'values', consuming
- options and arguments from 'rargs'. If 'allow_interspersed_args' is
- false, stop at the first non-option argument. If true, accumulate any
- interspersed non-option arguments in 'largs'.
- """
- while rargs:
- arg = rargs[0]
- # We handle bare "--" explicitly, and bare "-" is handled by the
- # standard arg handler since the short arg case ensures that the
- # len of the opt string is greater than 1.
- if arg == "--":
- del rargs[0]
- return
- elif arg[0:2] == "--":
- # process a single long option (possibly with value(s))
- self._process_long_opt(rargs, values)
- elif arg[:1] == "-" and len(arg) > 1:
- # process a cluster of short options (possibly with
- # value(s) for the last one only)
- self._process_short_opts(rargs, values)
- elif self.allow_interspersed_args:
- largs.append(arg)
- del rargs[0]
- else:
- return # stop now, leave this arg in rargs
-
- # Say this is the original argument list:
- # [arg0, arg1, ..., arg(i-1), arg(i), arg(i+1), ..., arg(N-1)]
- # ^
- # (we are about to process arg(i)).
- #
- # Then rargs is [arg(i), ..., arg(N-1)] and largs is a *subset* of
- # [arg0, ..., arg(i-1)] (any options and their arguments will have
- # been removed from largs).
- #
- # The while loop will usually consume 1 or more arguments per pass.
- # If it consumes 1 (eg. arg is an option that takes no arguments),
- # then after _process_arg() is done the situation is:
- #
- # largs = subset of [arg0, ..., arg(i)]
- # rargs = [arg(i+1), ..., arg(N-1)]
- #
- # If allow_interspersed_args is false, largs will always be
- # *empty* -- still a subset of [arg0, ..., arg(i-1)], but
- # not a very interesting subset!
-
- def _match_long_opt(self, opt):
- """_match_long_opt(opt : string) -> string
-
- Determine which long option string 'opt' matches, ie. which one
- it is an unambiguous abbrevation for. Raises BadOptionError if
- 'opt' doesn't unambiguously match any long option string.
- """
- return _match_abbrev(opt, self._long_opt)
-
- def _process_long_opt(self, rargs, values):
- arg = rargs.pop(0)
-
- # Value explicitly attached to arg? Pretend it's the next
- # argument.
- if "=" in arg:
- (opt, next_arg) = arg.split("=", 1)
- rargs.insert(0, next_arg)
- had_explicit_value = True
- else:
- opt = arg
- had_explicit_value = False
-
- opt = self._match_long_opt(opt)
- option = self._long_opt[opt]
- if option.takes_value():
- nargs = option.nargs
- if len(rargs) < nargs:
- if nargs == 1:
- self.error(_("%s option requires an argument") % opt)
- else:
- self.error(_("%s option requires %d arguments")
- % (opt, nargs))
- elif nargs == 1:
- value = rargs.pop(0)
- else:
- value = tuple(rargs[0:nargs])
- del rargs[0:nargs]
-
- elif had_explicit_value:
- self.error(_("%s option does not take a value") % opt)
-
- else:
- value = None
-
- option.process(opt, value, values, self)
-
- def _process_short_opts(self, rargs, values):
- arg = rargs.pop(0)
- stop = False
- i = 1
- for ch in arg[1:]:
- opt = "-" + ch
- option = self._short_opt.get(opt)
- i += 1 # we have consumed a character
-
- if not option:
- raise BadOptionError(opt)
- if option.takes_value():
- # Any characters left in arg? Pretend they're the
- # next arg, and stop consuming characters of arg.
- if i < len(arg):
- rargs.insert(0, arg[i:])
- stop = True
-
- nargs = option.nargs
- if len(rargs) < nargs:
- if nargs == 1:
- self.error(_("%s option requires an argument") % opt)
- else:
- self.error(_("%s option requires %d arguments")
- % (opt, nargs))
- elif nargs == 1:
- value = rargs.pop(0)
- else:
- value = tuple(rargs[0:nargs])
- del rargs[0:nargs]
-
- else: # option doesn't take a value
- value = None
-
- option.process(opt, value, values, self)
-
- if stop:
- break
-
-
- # -- Feedback methods ----------------------------------------------
-
- def get_prog_name(self):
- if self.prog is None:
- return os.path.basename(sys.argv[0])
- else:
- return self.prog
-
- def expand_prog_name(self, s):
- return s.replace("%prog", self.get_prog_name())
-
- def get_description(self):
- return self.expand_prog_name(self.description)
-
- def exit(self, status=0, msg=None):
- if msg:
- sys.stderr.write(msg)
- sys.exit(status)
-
- def error(self, msg):
- """error(msg : string)
-
- Print a usage message incorporating 'msg' to stderr and exit.
- If you override this in a subclass, it should not return -- it
- should either exit or raise an exception.
- """
- self.print_usage(sys.stderr)
- self.exit(2, "%s: error: %s\n" % (self.get_prog_name(), msg))
-
- def get_usage(self):
- if self.usage:
- return self.formatter.format_usage(
- self.expand_prog_name(self.usage))
- else:
- return ""
-
- def print_usage(self, file=None):
- """print_usage(file : file = stdout)
-
- Print the usage message for the current program (self.usage) to
- 'file' (default stdout). Any occurence of the string "%prog" in
- self.usage is replaced with the name of the current program
- (basename of sys.argv[0]). Does nothing if self.usage is empty
- or not defined.
- """
- if self.usage:
- print >>file, self.get_usage()
-
- def get_version(self):
- if self.version:
- return self.expand_prog_name(self.version)
- else:
- return ""
-
- def print_version(self, file=None):
- """print_version(file : file = stdout)
-
- Print the version message for this program (self.version) to
- 'file' (default stdout). As with print_usage(), any occurence
- of "%prog" in self.version is replaced by the current program's
- name. Does nothing if self.version is empty or undefined.
- """
- if self.version:
- print >>file, self.get_version()
-
- def format_option_help(self, formatter=None):
- if formatter is None:
- formatter = self.formatter
- formatter.store_option_strings(self)
- result = []
- result.append(formatter.format_heading(_("Options")))
- formatter.indent()
- if self.option_list:
- result.append(OptionContainer.format_option_help(self, formatter))
- result.append("\n")
- for group in self.option_groups:
- result.append(group.format_help(formatter))
- result.append("\n")
- formatter.dedent()
- # Drop the last "\n", or the header if no options or option groups:
- return "".join(result[:-1])
-
- def format_epilog(self, formatter):
- return formatter.format_epilog(self.epilog)
-
- def format_help(self, formatter=None):
- if formatter is None:
- formatter = self.formatter
- result = []
- if self.usage:
- result.append(self.get_usage() + "\n")
- if self.description:
- result.append(self.format_description(formatter) + "\n")
- result.append(self.format_option_help(formatter))
- result.append(self.format_epilog(formatter))
- return "".join(result)
-
- # used by test suite
- def _get_encoding(self, file):
- encoding = getattr(file, "encoding", None)
- if not encoding:
- encoding = sys.getdefaultencoding()
- return encoding
-
- def print_help(self, file=None):
- """print_help(file : file = stdout)
-
- Print an extended help message, listing all options and any
- help text provided with them, to 'file' (default stdout).
- """
- if file is None:
- file = sys.stdout
- encoding = self._get_encoding(file)
- file.write(self.format_help().encode(encoding, "replace"))
-
-# class OptionParser
-
-
-def _match_abbrev(s, wordmap):
- """_match_abbrev(s : string, wordmap : {string : Option}) -> string
-
- Return the string key in 'wordmap' for which 's' is an unambiguous
- abbreviation. If 's' is found to be ambiguous or doesn't match any of
- 'words', raise BadOptionError.
- """
- # Is there an exact match?
- if wordmap.has_key(s):
- return s
- else:
- # Isolate all words with s as a prefix.
- possibilities = [word for word in wordmap.keys()
- if word.startswith(s)]
- # No exact match, so there had better be just one possibility.
- if len(possibilities) == 1:
- return possibilities[0]
- elif not possibilities:
- raise BadOptionError(s)
- else:
- # More than one possible completion: ambiguous prefix.
- possibilities.sort()
- raise AmbiguousOptionError(s, possibilities)
-
-
-# Some day, there might be many Option classes. As of Optik 1.3, the
-# preferred way to instantiate Options is indirectly, via make_option(),
-# which will become a factory function when there are many Option
-# classes.
-make_option = Option
diff --git a/sys/lib/python/os.py b/sys/lib/python/os.py
deleted file mode 100644
index 21e402054..000000000
--- a/sys/lib/python/os.py
+++ /dev/null
@@ -1,738 +0,0 @@
-r"""OS routines for Mac, NT, or Posix depending on what system we're on.
-
-This exports:
- - all functions from posix, nt, os2, mac, or ce, e.g. unlink, stat, etc.
- - os.path is one of the modules posixpath, ntpath, or macpath
- - os.name is 'posix', 'nt', 'os2', 'mac', 'ce' or 'riscos'
- - os.curdir is a string representing the current directory ('.' or ':')
- - os.pardir is a string representing the parent directory ('..' or '::')
- - os.sep is the (or a most common) pathname separator ('/' or ':' or '\\')
- - os.extsep is the extension separator ('.' or '/')
- - os.altsep is the alternate pathname separator (None or '/')
- - os.pathsep is the component separator used in $PATH etc
- - os.linesep is the line separator in text files ('\r' or '\n' or '\r\n')
- - os.defpath is the default search path for executables
- - os.devnull is the file path of the null device ('/dev/null', etc.)
-
-Programs that import and use 'os' stand a better chance of being
-portable between different platforms. Of course, they must then
-only use functions that are defined by all platforms (e.g., unlink
-and opendir), and leave all pathname manipulation to os.path
-(e.g., split and join).
-"""
-
-#'
-
-import sys
-
-_names = sys.builtin_module_names
-
-# Note: more names are added to __all__ later.
-__all__ = ["altsep", "curdir", "pardir", "sep", "pathsep", "linesep",
- "defpath", "name", "path", "devnull",
- "SEEK_SET", "SEEK_CUR", "SEEK_END"]
-
-def _get_exports_list(module):
- try:
- return list(module.__all__)
- except AttributeError:
- return [n for n in dir(module) if n[0] != '_']
-
-if 'posix' in _names:
- name = 'posix'
- linesep = '\n'
- from posix import *
- try:
- from posix import _exit
- except ImportError:
- pass
- import posixpath as path
-
- import posix
- __all__.extend(_get_exports_list(posix))
- del posix
-
-elif 'nt' in _names:
- name = 'nt'
- linesep = '\r\n'
- from nt import *
- try:
- from nt import _exit
- except ImportError:
- pass
- import ntpath as path
-
- import nt
- __all__.extend(_get_exports_list(nt))
- del nt
-
-elif 'os2' in _names:
- name = 'os2'
- linesep = '\r\n'
- from os2 import *
- try:
- from os2 import _exit
- except ImportError:
- pass
- if sys.version.find('EMX GCC') == -1:
- import ntpath as path
- else:
- import os2emxpath as path
- from _emx_link import link
-
- import os2
- __all__.extend(_get_exports_list(os2))
- del os2
-
-elif 'mac' in _names:
- name = 'mac'
- linesep = '\r'
- from mac import *
- try:
- from mac import _exit
- except ImportError:
- pass
- import macpath as path
-
- import mac
- __all__.extend(_get_exports_list(mac))
- del mac
-
-elif 'ce' in _names:
- name = 'ce'
- linesep = '\r\n'
- from ce import *
- try:
- from ce import _exit
- except ImportError:
- pass
- # We can use the standard Windows path.
- import ntpath as path
-
- import ce
- __all__.extend(_get_exports_list(ce))
- del ce
-
-elif 'riscos' in _names:
- name = 'riscos'
- linesep = '\n'
- from riscos import *
- try:
- from riscos import _exit
- except ImportError:
- pass
- import riscospath as path
-
- import riscos
- __all__.extend(_get_exports_list(riscos))
- del riscos
-
-else:
- raise ImportError, 'no os specific module found'
-
-sys.modules['os.path'] = path
-from os.path import (curdir, pardir, sep, pathsep, defpath, extsep, altsep,
- devnull)
-
-del _names
-
-# Python uses fixed values for the SEEK_ constants; they are mapped
-# to native constants if necessary in posixmodule.c
-SEEK_SET = 0
-SEEK_CUR = 1
-SEEK_END = 2
-
-#'
-
-# Super directory utilities.
-# (Inspired by Eric Raymond; the doc strings are mostly his)
-
-def makedirs(name, mode=0777):
- """makedirs(path [, mode=0777])
-
- Super-mkdir; create a leaf directory and all intermediate ones.
- Works like mkdir, except that any intermediate path segment (not
- just the rightmost) will be created if it does not exist. This is
- recursive.
-
- """
- from errno import EEXIST
- head, tail = path.split(name)
- if not tail:
- head, tail = path.split(head)
- if head and tail and not path.exists(head):
- try:
- makedirs(head, mode)
- except OSError, e:
- # be happy if someone already created the path
- if e.errno != EEXIST:
- raise
- if tail == curdir: # xxx/newdir/. exists if xxx/newdir exists
- return
- mkdir(name, mode)
-
-def removedirs(name):
- """removedirs(path)
-
- Super-rmdir; remove a leaf directory and all empty intermediate
- ones. Works like rmdir except that, if the leaf directory is
- successfully removed, directories corresponding to rightmost path
- segments will be pruned away until either the whole path is
- consumed or an error occurs. Errors during this latter phase are
- ignored -- they generally mean that a directory was not empty.
-
- """
- rmdir(name)
- head, tail = path.split(name)
- if not tail:
- head, tail = path.split(head)
- while head and tail:
- try:
- rmdir(head)
- except error:
- break
- head, tail = path.split(head)
-
-def renames(old, new):
- """renames(old, new)
-
- Super-rename; create directories as necessary and delete any left
- empty. Works like rename, except creation of any intermediate
- directories needed to make the new pathname good is attempted
- first. After the rename, directories corresponding to rightmost
- path segments of the old name will be pruned way until either the
- whole path is consumed or a nonempty directory is found.
-
- Note: this function can fail with the new directory structure made
- if you lack permissions needed to unlink the leaf directory or
- file.
-
- """
- head, tail = path.split(new)
- if head and tail and not path.exists(head):
- makedirs(head)
- rename(old, new)
- head, tail = path.split(old)
- if head and tail:
- try:
- removedirs(head)
- except error:
- pass
-
-__all__.extend(["makedirs", "removedirs", "renames"])
-
-def walk(top, topdown=True, onerror=None):
- """Directory tree generator.
-
- For each directory in the directory tree rooted at top (including top
- itself, but excluding '.' and '..'), yields a 3-tuple
-
- dirpath, dirnames, filenames
-
- dirpath is a string, the path to the directory. dirnames is a list of
- the names of the subdirectories in dirpath (excluding '.' and '..').
- filenames is a list of the names of the non-directory files in dirpath.
- Note that the names in the lists are just names, with no path components.
- To get a full path (which begins with top) to a file or directory in
- dirpath, do os.path.join(dirpath, name).
-
- If optional arg 'topdown' is true or not specified, the triple for a
- directory is generated before the triples for any of its subdirectories
- (directories are generated top down). If topdown is false, the triple
- for a directory is generated after the triples for all of its
- subdirectories (directories are generated bottom up).
-
- When topdown is true, the caller can modify the dirnames list in-place
- (e.g., via del or slice assignment), and walk will only recurse into the
- subdirectories whose names remain in dirnames; this can be used to prune
- the search, or to impose a specific order of visiting. Modifying
- dirnames when topdown is false is ineffective, since the directories in
- dirnames have already been generated by the time dirnames itself is
- generated.
-
- By default errors from the os.listdir() call are ignored. If
- optional arg 'onerror' is specified, it should be a function; it
- will be called with one argument, an os.error instance. It can
- report the error to continue with the walk, or raise the exception
- to abort the walk. Note that the filename is available as the
- filename attribute of the exception object.
-
- Caution: if you pass a relative pathname for top, don't change the
- current working directory between resumptions of walk. walk never
- changes the current directory, and assumes that the client doesn't
- either.
-
- Example:
-
- from os.path import join, getsize
- for root, dirs, files in walk('python/Lib/email'):
- print root, "consumes",
- print sum([getsize(join(root, name)) for name in files]),
- print "bytes in", len(files), "non-directory files"
- if 'CVS' in dirs:
- dirs.remove('CVS') # don't visit CVS directories
- """
-
- from os.path import join, isdir, islink
-
- # We may not have read permission for top, in which case we can't
- # get a list of the files the directory contains. os.path.walk
- # always suppressed the exception then, rather than blow up for a
- # minor reason when (say) a thousand readable directories are still
- # left to visit. That logic is copied here.
- try:
- # Note that listdir and error are globals in this module due
- # to earlier import-*.
- names = listdir(top)
- except error, err:
- if onerror is not None:
- onerror(err)
- return
-
- dirs, nondirs = [], []
- for name in names:
- if isdir(join(top, name)):
- dirs.append(name)
- else:
- nondirs.append(name)
-
- if topdown:
- yield top, dirs, nondirs
- for name in dirs:
- path = join(top, name)
- if not islink(path):
- for x in walk(path, topdown, onerror):
- yield x
- if not topdown:
- yield top, dirs, nondirs
-
-__all__.append("walk")
-
-# Make sure os.environ exists, at least
-try:
- environ
-except NameError:
- environ = {}
-
-def execl(file, *args):
- """execl(file, *args)
-
- Execute the executable file with argument list args, replacing the
- current process. """
- execv(file, args)
-
-def execle(file, *args):
- """execle(file, *args, env)
-
- Execute the executable file with argument list args and
- environment env, replacing the current process. """
- env = args[-1]
- execve(file, args[:-1], env)
-
-def execlp(file, *args):
- """execlp(file, *args)
-
- Execute the executable file (which is searched for along $PATH)
- with argument list args, replacing the current process. """
- execvp(file, args)
-
-def execlpe(file, *args):
- """execlpe(file, *args, env)
-
- Execute the executable file (which is searched for along $PATH)
- with argument list args and environment env, replacing the current
- process. """
- env = args[-1]
- execvpe(file, args[:-1], env)
-
-def execvp(file, args):
- """execp(file, args)
-
- Execute the executable file (which is searched for along $PATH)
- with argument list args, replacing the current process.
- args may be a list or tuple of strings. """
- _execvpe(file, args)
-
-def execvpe(file, args, env):
- """execvpe(file, args, env)
-
- Execute the executable file (which is searched for along $PATH)
- with argument list args and environment env , replacing the
- current process.
- args may be a list or tuple of strings. """
- _execvpe(file, args, env)
-
-__all__.extend(["execl","execle","execlp","execlpe","execvp","execvpe"])
-
-def _execvpe(file, args, env=None):
- from errno import ENOENT, ENOTDIR
-
- if env is not None:
- func = execve
- argrest = (args, env)
- else:
- func = execv
- argrest = (args,)
- env = environ
-
- head, tail = path.split(file)
- if head:
- func(file, *argrest)
- return
- if 'PATH' in env:
- envpath = env['PATH']
- else:
- envpath = defpath
- PATH = envpath.split(pathsep)
- saved_exc = None
- saved_tb = None
- for dir in PATH:
- fullname = path.join(dir, file)
- try:
- func(fullname, *argrest)
- except error, e:
- tb = sys.exc_info()[2]
- if (e.errno != ENOENT and e.errno != ENOTDIR
- and saved_exc is None):
- saved_exc = e
- saved_tb = tb
- if saved_exc:
- raise error, saved_exc, saved_tb
- raise error, e, tb
-
-# Change environ to automatically call putenv() if it exists
-try:
- # This will fail if there's no putenv
- putenv
-except NameError:
- pass
-else:
- import UserDict
-
- # Fake unsetenv() for Windows
- # not sure about os2 here but
- # I'm guessing they are the same.
-
- if name in ('os2', 'nt'):
- def unsetenv(key):
- putenv(key, "")
-
- if name == "riscos":
- # On RISC OS, all env access goes through getenv and putenv
- from riscosenviron import _Environ
- elif name in ('os2', 'nt'): # Where Env Var Names Must Be UPPERCASE
- # But we store them as upper case
- class _Environ(UserDict.IterableUserDict):
- def __init__(self, environ):
- UserDict.UserDict.__init__(self)
- data = self.data
- for k, v in environ.items():
- data[k.upper()] = v
- def __setitem__(self, key, item):
- putenv(key, item)
- self.data[key.upper()] = item
- def __getitem__(self, key):
- return self.data[key.upper()]
- try:
- unsetenv
- except NameError:
- def __delitem__(self, key):
- del self.data[key.upper()]
- else:
- def __delitem__(self, key):
- unsetenv(key)
- del self.data[key.upper()]
- def has_key(self, key):
- return key.upper() in self.data
- def __contains__(self, key):
- return key.upper() in self.data
- def get(self, key, failobj=None):
- return self.data.get(key.upper(), failobj)
- def update(self, dict=None, **kwargs):
- if dict:
- try:
- keys = dict.keys()
- except AttributeError:
- # List of (key, value)
- for k, v in dict:
- self[k] = v
- else:
- # got keys
- # cannot use items(), since mappings
- # may not have them.
- for k in keys:
- self[k] = dict[k]
- if kwargs:
- self.update(kwargs)
- def copy(self):
- return dict(self)
-
- else: # Where Env Var Names Can Be Mixed Case
- class _Environ(UserDict.IterableUserDict):
- def __init__(self, environ):
- UserDict.UserDict.__init__(self)
- self.data = environ
- def __setitem__(self, key, item):
- putenv(key, item)
- self.data[key] = item
- def update(self, dict=None, **kwargs):
- if dict:
- try:
- keys = dict.keys()
- except AttributeError:
- # List of (key, value)
- for k, v in dict:
- self[k] = v
- else:
- # got keys
- # cannot use items(), since mappings
- # may not have them.
- for k in keys:
- self[k] = dict[k]
- if kwargs:
- self.update(kwargs)
- try:
- unsetenv
- except NameError:
- pass
- else:
- def __delitem__(self, key):
- unsetenv(key)
- del self.data[key]
- def copy(self):
- return dict(self)
-
-
- environ = _Environ(environ)
-
-def getenv(key, default=None):
- """Get an environment variable, return None if it doesn't exist.
- The optional second argument can specify an alternate default."""
- return environ.get(key, default)
-__all__.append("getenv")
-
-def _exists(name):
- try:
- eval(name)
- return True
- except NameError:
- return False
-
-# Supply spawn*() (probably only for Unix)
-if _exists("fork") and not _exists("spawnv") and _exists("execv"):
-
- P_WAIT = 0
- P_NOWAIT = P_NOWAITO = 1
-
- # XXX Should we support P_DETACH? I suppose it could fork()**2
- # and close the std I/O streams. Also, P_OVERLAY is the same
- # as execv*()?
-
- def _spawnvef(mode, file, args, env, func):
- # Internal helper; func is the exec*() function to use
- pid = fork()
- if not pid:
- # Child
- try:
- if env is None:
- func(file, args)
- else:
- func(file, args, env)
- except:
- _exit(127)
- else:
- # Parent
- if mode == P_NOWAIT:
- return pid # Caller is responsible for waiting!
- while 1:
- wpid, sts = waitpid(pid, 0)
- if WIFSTOPPED(sts):
- continue
- elif WIFSIGNALED(sts):
- return -WTERMSIG(sts)
- elif WIFEXITED(sts):
- return WEXITSTATUS(sts)
- else:
- raise error, "Not stopped, signaled or exited???"
-
- def spawnv(mode, file, args):
- """spawnv(mode, file, args) -> integer
-
-Execute file with arguments from args in a subprocess.
-If mode == P_NOWAIT return the pid of the process.
-If mode == P_WAIT return the process's exit code if it exits normally;
-otherwise return -SIG, where SIG is the signal that killed it. """
- return _spawnvef(mode, file, args, None, execv)
-
- def spawnve(mode, file, args, env):
- """spawnve(mode, file, args, env) -> integer
-
-Execute file with arguments from args in a subprocess with the
-specified environment.
-If mode == P_NOWAIT return the pid of the process.
-If mode == P_WAIT return the process's exit code if it exits normally;
-otherwise return -SIG, where SIG is the signal that killed it. """
- return _spawnvef(mode, file, args, env, execve)
-
- # Note: spawnvp[e] is't currently supported on Windows
-
- def spawnvp(mode, file, args):
- """spawnvp(mode, file, args) -> integer
-
-Execute file (which is looked for along $PATH) with arguments from
-args in a subprocess.
-If mode == P_NOWAIT return the pid of the process.
-If mode == P_WAIT return the process's exit code if it exits normally;
-otherwise return -SIG, where SIG is the signal that killed it. """
- return _spawnvef(mode, file, args, None, execvp)
-
- def spawnvpe(mode, file, args, env):
- """spawnvpe(mode, file, args, env) -> integer
-
-Execute file (which is looked for along $PATH) with arguments from
-args in a subprocess with the supplied environment.
-If mode == P_NOWAIT return the pid of the process.
-If mode == P_WAIT return the process's exit code if it exits normally;
-otherwise return -SIG, where SIG is the signal that killed it. """
- return _spawnvef(mode, file, args, env, execvpe)
-
-if _exists("spawnv"):
- # These aren't supplied by the basic Windows code
- # but can be easily implemented in Python
-
- def spawnl(mode, file, *args):
- """spawnl(mode, file, *args) -> integer
-
-Execute file with arguments from args in a subprocess.
-If mode == P_NOWAIT return the pid of the process.
-If mode == P_WAIT return the process's exit code if it exits normally;
-otherwise return -SIG, where SIG is the signal that killed it. """
- return spawnv(mode, file, args)
-
- def spawnle(mode, file, *args):
- """spawnle(mode, file, *args, env) -> integer
-
-Execute file with arguments from args in a subprocess with the
-supplied environment.
-If mode == P_NOWAIT return the pid of the process.
-If mode == P_WAIT return the process's exit code if it exits normally;
-otherwise return -SIG, where SIG is the signal that killed it. """
- env = args[-1]
- return spawnve(mode, file, args[:-1], env)
-
-
- __all__.extend(["spawnv", "spawnve", "spawnl", "spawnle",])
-
-
-if _exists("spawnvp"):
- # At the moment, Windows doesn't implement spawnvp[e],
- # so it won't have spawnlp[e] either.
- def spawnlp(mode, file, *args):
- """spawnlp(mode, file, *args) -> integer
-
-Execute file (which is looked for along $PATH) with arguments from
-args in a subprocess with the supplied environment.
-If mode == P_NOWAIT return the pid of the process.
-If mode == P_WAIT return the process's exit code if it exits normally;
-otherwise return -SIG, where SIG is the signal that killed it. """
- return spawnvp(mode, file, args)
-
- def spawnlpe(mode, file, *args):
- """spawnlpe(mode, file, *args, env) -> integer
-
-Execute file (which is looked for along $PATH) with arguments from
-args in a subprocess with the supplied environment.
-If mode == P_NOWAIT return the pid of the process.
-If mode == P_WAIT return the process's exit code if it exits normally;
-otherwise return -SIG, where SIG is the signal that killed it. """
- env = args[-1]
- return spawnvpe(mode, file, args[:-1], env)
-
-
- __all__.extend(["spawnvp", "spawnvpe", "spawnlp", "spawnlpe",])
-
-
-# Supply popen2 etc. (for Unix)
-if _exists("fork"):
- if not _exists("popen2"):
- def popen2(cmd, mode="t", bufsize=-1):
- """Execute the shell command 'cmd' in a sub-process. On UNIX, 'cmd'
- may be a sequence, in which case arguments will be passed directly to
- the program without shell intervention (as with os.spawnv()). If 'cmd'
- is a string it will be passed to the shell (as with os.system()). If
- 'bufsize' is specified, it sets the buffer size for the I/O pipes. The
- file objects (child_stdin, child_stdout) are returned."""
- import popen2
- stdout, stdin = popen2.popen2(cmd, bufsize)
- return stdin, stdout
- __all__.append("popen2")
-
- if not _exists("popen3"):
- def popen3(cmd, mode="t", bufsize=-1):
- """Execute the shell command 'cmd' in a sub-process. On UNIX, 'cmd'
- may be a sequence, in which case arguments will be passed directly to
- the program without shell intervention (as with os.spawnv()). If 'cmd'
- is a string it will be passed to the shell (as with os.system()). If
- 'bufsize' is specified, it sets the buffer size for the I/O pipes. The
- file objects (child_stdin, child_stdout, child_stderr) are returned."""
- import popen2
- stdout, stdin, stderr = popen2.popen3(cmd, bufsize)
- return stdin, stdout, stderr
- __all__.append("popen3")
-
- if not _exists("popen4"):
- def popen4(cmd, mode="t", bufsize=-1):
- """Execute the shell command 'cmd' in a sub-process. On UNIX, 'cmd'
- may be a sequence, in which case arguments will be passed directly to
- the program without shell intervention (as with os.spawnv()). If 'cmd'
- is a string it will be passed to the shell (as with os.system()). If
- 'bufsize' is specified, it sets the buffer size for the I/O pipes. The
- file objects (child_stdin, child_stdout_stderr) are returned."""
- import popen2
- stdout, stdin = popen2.popen4(cmd, bufsize)
- return stdin, stdout
- __all__.append("popen4")
-
-import copy_reg as _copy_reg
-
-def _make_stat_result(tup, dict):
- return stat_result(tup, dict)
-
-def _pickle_stat_result(sr):
- (type, args) = sr.__reduce__()
- return (_make_stat_result, args)
-
-try:
- _copy_reg.pickle(stat_result, _pickle_stat_result, _make_stat_result)
-except NameError: # stat_result may not exist
- pass
-
-def _make_statvfs_result(tup, dict):
- return statvfs_result(tup, dict)
-
-def _pickle_statvfs_result(sr):
- (type, args) = sr.__reduce__()
- return (_make_statvfs_result, args)
-
-try:
- _copy_reg.pickle(statvfs_result, _pickle_statvfs_result,
- _make_statvfs_result)
-except NameError: # statvfs_result may not exist
- pass
-
-if not _exists("urandom"):
- def urandom(n):
- """urandom(n) -> str
-
- Return a string of n random bytes suitable for cryptographic use.
-
- """
- try:
- _urandomfd = open("/dev/random", O_RDONLY)
- except (OSError, IOError):
- raise NotImplementedError("/dev/urandom (or equivalent) not found")
- bytes = ""
- while len(bytes) < n:
- bytes += read(_urandomfd, n - len(bytes))
- close(_urandomfd)
- return bytes
diff --git a/sys/lib/python/os2emxpath.py b/sys/lib/python/os2emxpath.py
deleted file mode 100644
index a84142240..000000000
--- a/sys/lib/python/os2emxpath.py
+++ /dev/null
@@ -1,423 +0,0 @@
-# Module 'os2emxpath' -- common operations on OS/2 pathnames
-"""Common pathname manipulations, OS/2 EMX version.
-
-Instead of importing this module directly, import os and refer to this
-module as os.path.
-"""
-
-import os
-import stat
-
-__all__ = ["normcase","isabs","join","splitdrive","split","splitext",
- "basename","dirname","commonprefix","getsize","getmtime",
- "getatime","getctime", "islink","exists","lexists","isdir","isfile",
- "ismount","walk","expanduser","expandvars","normpath","abspath",
- "splitunc","curdir","pardir","sep","pathsep","defpath","altsep",
- "extsep","devnull","realpath","supports_unicode_filenames"]
-
-# strings representing various path-related bits and pieces
-curdir = '.'
-pardir = '..'
-extsep = '.'
-sep = '/'
-altsep = '\\'
-pathsep = ';'
-defpath = '.;C:\\bin'
-devnull = 'nul'
-
-# Normalize the case of a pathname and map slashes to backslashes.
-# Other normalizations (such as optimizing '../' away) are not done
-# (this is done by normpath).
-
-def normcase(s):
- """Normalize case of pathname.
-
- Makes all characters lowercase and all altseps into seps."""
- return s.replace('\\', '/').lower()
-
-
-# Return whether a path is absolute.
-# Trivial in Posix, harder on the Mac or MS-DOS.
-# For DOS it is absolute if it starts with a slash or backslash (current
-# volume), or if a pathname after the volume letter and colon / UNC resource
-# starts with a slash or backslash.
-
-def isabs(s):
- """Test whether a path is absolute"""
- s = splitdrive(s)[1]
- return s != '' and s[:1] in '/\\'
-
-
-# Join two (or more) paths.
-
-def join(a, *p):
- """Join two or more pathname components, inserting sep as needed"""
- path = a
- for b in p:
- if isabs(b):
- path = b
- elif path == '' or path[-1:] in '/\\:':
- path = path + b
- else:
- path = path + '/' + b
- return path
-
-
-# Split a path in a drive specification (a drive letter followed by a
-# colon) and the path specification.
-# It is always true that drivespec + pathspec == p
-def splitdrive(p):
- """Split a pathname into drive and path specifiers. Returns a 2-tuple
-"(drive,path)"; either part may be empty"""
- if p[1:2] == ':':
- return p[0:2], p[2:]
- return '', p
-
-
-# Parse UNC paths
-def splitunc(p):
- """Split a pathname into UNC mount point and relative path specifiers.
-
- Return a 2-tuple (unc, rest); either part may be empty.
- If unc is not empty, it has the form '//host/mount' (or similar
- using backslashes). unc+rest is always the input path.
- Paths containing drive letters never have an UNC part.
- """
- if p[1:2] == ':':
- return '', p # Drive letter present
- firstTwo = p[0:2]
- if firstTwo == '/' * 2 or firstTwo == '\\' * 2:
- # is a UNC path:
- # vvvvvvvvvvvvvvvvvvvv equivalent to drive letter
- # \\machine\mountpoint\directories...
- # directory ^^^^^^^^^^^^^^^
- normp = normcase(p)
- index = normp.find('/', 2)
- if index == -1:
- ##raise RuntimeError, 'illegal UNC path: "' + p + '"'
- return ("", p)
- index = normp.find('/', index + 1)
- if index == -1:
- index = len(p)
- return p[:index], p[index:]
- return '', p
-
-
-# Split a path in head (everything up to the last '/') and tail (the
-# rest). After the trailing '/' is stripped, the invariant
-# join(head, tail) == p holds.
-# The resulting head won't end in '/' unless it is the root.
-
-def split(p):
- """Split a pathname.
-
- Return tuple (head, tail) where tail is everything after the final slash.
- Either part may be empty."""
-
- d, p = splitdrive(p)
- # set i to index beyond p's last slash
- i = len(p)
- while i and p[i-1] not in '/\\':
- i = i - 1
- head, tail = p[:i], p[i:] # now tail has no slashes
- # remove trailing slashes from head, unless it's all slashes
- head2 = head
- while head2 and head2[-1] in '/\\':
- head2 = head2[:-1]
- head = head2 or head
- return d + head, tail
-
-
-# Split a path in root and extension.
-# The extension is everything starting at the last dot in the last
-# pathname component; the root is everything before that.
-# It is always true that root + ext == p.
-
-def splitext(p):
- """Split the extension from a pathname.
-
- Extension is everything from the last dot to the end.
- Return (root, ext), either part may be empty."""
- root, ext = '', ''
- for c in p:
- if c in ['/','\\']:
- root, ext = root + ext + c, ''
- elif c == '.':
- if ext:
- root, ext = root + ext, c
- else:
- ext = c
- elif ext:
- ext = ext + c
- else:
- root = root + c
- return root, ext
-
-
-# Return the tail (basename) part of a path.
-
-def basename(p):
- """Returns the final component of a pathname"""
- return split(p)[1]
-
-
-# Return the head (dirname) part of a path.
-
-def dirname(p):
- """Returns the directory component of a pathname"""
- return split(p)[0]
-
-
-# Return the longest prefix of all list elements.
-
-def commonprefix(m):
- "Given a list of pathnames, returns the longest common leading component"
- if not m: return ''
- s1 = min(m)
- s2 = max(m)
- n = min(len(s1), len(s2))
- for i in xrange(n):
- if s1[i] != s2[i]:
- return s1[:i]
- return s1[:n]
-
-
-# Get size, mtime, atime of files.
-
-def getsize(filename):
- """Return the size of a file, reported by os.stat()"""
- return os.stat(filename).st_size
-
-def getmtime(filename):
- """Return the last modification time of a file, reported by os.stat()"""
- return os.stat(filename).st_mtime
-
-def getatime(filename):
- """Return the last access time of a file, reported by os.stat()"""
- return os.stat(filename).st_atime
-
-def getctime(filename):
- """Return the creation time of a file, reported by os.stat()."""
- return os.stat(filename).st_ctime
-
-# Is a path a symbolic link?
-# This will always return false on systems where posix.lstat doesn't exist.
-
-def islink(path):
- """Test for symbolic link. On OS/2 always returns false"""
- return False
-
-
-# Does a path exist?
-# This is false for dangling symbolic links.
-
-def exists(path):
- """Test whether a path exists"""
- try:
- st = os.stat(path)
- except os.error:
- return False
- return True
-
-lexists = exists
-
-
-# Is a path a directory?
-
-def isdir(path):
- """Test whether a path is a directory"""
- try:
- st = os.stat(path)
- except os.error:
- return False
- return stat.S_ISDIR(st.st_mode)
-
-
-# Is a path a regular file?
-# This follows symbolic links, so both islink() and isdir() can be true
-# for the same path.
-
-def isfile(path):
- """Test whether a path is a regular file"""
- try:
- st = os.stat(path)
- except os.error:
- return False
- return stat.S_ISREG(st.st_mode)
-
-
-# Is a path a mount point? Either a root (with or without drive letter)
-# or an UNC path with at most a / or \ after the mount point.
-
-def ismount(path):
- """Test whether a path is a mount point (defined as root of drive)"""
- unc, rest = splitunc(path)
- if unc:
- return rest in ("", "/", "\\")
- p = splitdrive(path)[1]
- return len(p) == 1 and p[0] in '/\\'
-
-
-# Directory tree walk.
-# For each directory under top (including top itself, but excluding
-# '.' and '..'), func(arg, dirname, filenames) is called, where
-# dirname is the name of the directory and filenames is the list
-# of files (and subdirectories etc.) in the directory.
-# The func may modify the filenames list, to implement a filter,
-# or to impose a different order of visiting.
-
-def walk(top, func, arg):
- """Directory tree walk whth callback function.
-
- walk(top, func, arg) calls func(arg, d, files) for each directory d
- in the tree rooted at top (including top itself); files is a list
- of all the files and subdirs in directory d."""
- try:
- names = os.listdir(top)
- except os.error:
- return
- func(arg, top, names)
- exceptions = ('.', '..')
- for name in names:
- if name not in exceptions:
- name = join(top, name)
- if isdir(name):
- walk(name, func, arg)
-
-
-# Expand paths beginning with '~' or '~user'.
-# '~' means $HOME; '~user' means that user's home directory.
-# If the path doesn't begin with '~', or if the user or $HOME is unknown,
-# the path is returned unchanged (leaving error reporting to whatever
-# function is called with the expanded path as argument).
-# See also module 'glob' for expansion of *, ? and [...] in pathnames.
-# (A function should also be defined to do full *sh-style environment
-# variable expansion.)
-
-def expanduser(path):
- """Expand ~ and ~user constructs.
-
- If user or $HOME is unknown, do nothing."""
- if path[:1] != '~':
- return path
- i, n = 1, len(path)
- while i < n and path[i] not in '/\\':
- i = i + 1
- if i == 1:
- if 'HOME' in os.environ:
- userhome = os.environ['HOME']
- elif not 'HOMEPATH' in os.environ:
- return path
- else:
- try:
- drive = os.environ['HOMEDRIVE']
- except KeyError:
- drive = ''
- userhome = join(drive, os.environ['HOMEPATH'])
- else:
- return path
- return userhome + path[i:]
-
-
-# Expand paths containing shell variable substitutions.
-# The following rules apply:
-# - no expansion within single quotes
-# - no escape character, except for '$$' which is translated into '$'
-# - ${varname} is accepted.
-# - varnames can be made out of letters, digits and the character '_'
-# XXX With COMMAND.COM you can use any characters in a variable name,
-# XXX except '^|<>='.
-
-def expandvars(path):
- """Expand shell variables of form $var and ${var}.
-
- Unknown variables are left unchanged."""
- if '$' not in path:
- return path
- import string
- varchars = string.letters + string.digits + '_-'
- res = ''
- index = 0
- pathlen = len(path)
- while index < pathlen:
- c = path[index]
- if c == '\'': # no expansion within single quotes
- path = path[index + 1:]
- pathlen = len(path)
- try:
- index = path.index('\'')
- res = res + '\'' + path[:index + 1]
- except ValueError:
- res = res + path
- index = pathlen - 1
- elif c == '$': # variable or '$$'
- if path[index + 1:index + 2] == '$':
- res = res + c
- index = index + 1
- elif path[index + 1:index + 2] == '{':
- path = path[index+2:]
- pathlen = len(path)
- try:
- index = path.index('}')
- var = path[:index]
- if var in os.environ:
- res = res + os.environ[var]
- except ValueError:
- res = res + path
- index = pathlen - 1
- else:
- var = ''
- index = index + 1
- c = path[index:index + 1]
- while c != '' and c in varchars:
- var = var + c
- index = index + 1
- c = path[index:index + 1]
- if var in os.environ:
- res = res + os.environ[var]
- if c != '':
- res = res + c
- else:
- res = res + c
- index = index + 1
- return res
-
-
-# Normalize a path, e.g. A//B, A/./B and A/foo/../B all become A/B.
-
-def normpath(path):
- """Normalize path, eliminating double slashes, etc."""
- path = path.replace('\\', '/')
- prefix, path = splitdrive(path)
- while path[:1] == '/':
- prefix = prefix + '/'
- path = path[1:]
- comps = path.split('/')
- i = 0
- while i < len(comps):
- if comps[i] == '.':
- del comps[i]
- elif comps[i] == '..' and i > 0 and comps[i-1] not in ('', '..'):
- del comps[i-1:i+1]
- i = i - 1
- elif comps[i] == '' and i > 0 and comps[i-1] != '':
- del comps[i]
- else:
- i = i + 1
- # If the path is now empty, substitute '.'
- if not prefix and not comps:
- comps.append('.')
- return prefix + '/'.join(comps)
-
-
-# Return an absolute path.
-def abspath(path):
- """Return the absolute version of a path"""
- if not isabs(path):
- path = join(os.getcwd(), path)
- return normpath(path)
-
-# realpath is a no-op on systems without islink support
-realpath = abspath
-
-supports_unicode_filenames = False
diff --git a/sys/lib/python/pdb.doc b/sys/lib/python/pdb.doc
deleted file mode 100644
index 81df3237f..000000000
--- a/sys/lib/python/pdb.doc
+++ /dev/null
@@ -1,192 +0,0 @@
-The Python Debugger Pdb
-=======================
-
-To use the debugger in its simplest form:
-
- >>> import pdb
- >>> pdb.run('<a statement>')
-
-The debugger's prompt is '(Pdb) '. This will stop in the first
-function call in <a statement>.
-
-Alternatively, if a statement terminated with an unhandled exception,
-you can use pdb's post-mortem facility to inspect the contents of the
-traceback:
-
- >>> <a statement>
- <exception traceback>
- >>> import pdb
- >>> pdb.pm()
-
-The commands recognized by the debugger are listed in the next
-section. Most can be abbreviated as indicated; e.g., h(elp) means
-that 'help' can be typed as 'h' or 'help' (but not as 'he' or 'hel',
-nor as 'H' or 'Help' or 'HELP'). Optional arguments are enclosed in
-square brackets.
-
-A blank line repeats the previous command literally, except for
-'list', where it lists the next 11 lines.
-
-Commands that the debugger doesn't recognize are assumed to be Python
-statements and are executed in the context of the program being
-debugged. Python statements can also be prefixed with an exclamation
-point ('!'). This is a powerful way to inspect the program being
-debugged; it is even possible to change variables. When an exception
-occurs in such a statement, the exception name is printed but the
-debugger's state is not changed.
-
-The debugger supports aliases, which can save typing. And aliases can
-have parameters (see the alias help entry) which allows one a certain
-level of adaptability to the context under examination.
-
-Multiple commands may be entered on a single line, separated by the
-pair ';;'. No intelligence is applied to separating the commands; the
-input is split at the first ';;', even if it is in the middle of a
-quoted string.
-
-If a file ".pdbrc" exists in your home directory or in the current
-directory, it is read in and executed as if it had been typed at the
-debugger prompt. This is particularly useful for aliases. If both
-files exist, the one in the home directory is read first and aliases
-defined there can be overriden by the local file.
-
-Aside from aliases, the debugger is not directly programmable; but it
-is implemented as a class from which you can derive your own debugger
-class, which you can make as fancy as you like.
-
-
-Debugger commands
-=================
-
-h(elp)
- Without argument, print the list of available commands. With
- a command name as argument, print help about that command
- (this is currently not implemented).
-
-w(here)
- Print a stack trace, with the most recent frame at the bottom.
- An arrow indicates the "current frame", which determines the
- context of most commands.
-
-d(own)
- Move the current frame one level down in the stack trace
- (to a newer frame).
-
-u(p)
- Move the current frame one level up in the stack trace
- (to an older frame).
-
-b(reak) [ ([filename:]lineno | function) [, condition] ]
- With a filename:line number argument, set a break there. If
- filename is omitted, use the current file. With a function
- name, set a break at the first executable line of that
- function. Without argument, list all breaks. Each breakpoint
- is assigned a number to which all the other breakpoint
- commands refer.
-
- The condition argument, if present, is a string which must
- evaluate to true in order for the breakpoint to be honored.
-
-tbreak [ ([filename:]lineno | function) [, condition] ]
- Temporary breakpoint, which is removed automatically when it
- is first hit. The arguments are the same as break.
-
-cl(ear) [bpnumber [bpnumber ...] ]
- With a space separated list of breakpoint numbers, clear those
- breakpoints. Without argument, clear all breaks (but first
- ask confirmation).
-
-disable bpnumber [bpnumber ...]
- Disables the breakpoints given as a space separated list of
- breakpoint numbers. Disabling a breakpoint means it cannot
- cause the program to stop execution, but unlike clearing a
- breakpoint, it remains in the list of breakpoints and can be
- (re-)enabled.
-
-enable bpnumber [bpnumber ...]
- Enables the breakpoints specified.
-
-ignore bpnumber count
- Sets the ignore count for the given breakpoint number. If
- count is omitted, the ignore count is set to 0. A breakpoint
- becomes active when the ignore count is zero. When non-zero,
- the count is decremented each time the breakpoint is reached
- and the breakpoint is not disabled and any associated
- condition evaluates to true.
-
-condition bpnumber condition
- condition is an expression which must evaluate to true before
- the breakpoint is honored. If condition is absent, any
- existing condition is removed; i.e., the breakpoint is made
- unconditional.
-
-s(tep)
- Execute the current line, stop at the first possible occasion
- (either in a function that is called or in the current function).
-
-n(ext)
- Continue execution until the next line in the current function
- is reached or it returns.
-
-r(eturn)
- Continue execution until the current function returns.
-
-c(ont(inue))
- Continue execution, only stop when a breakpoint is encountered.
-
-l(ist) [first [,last]]
- List source code for the current file.
- Without arguments, list 11 lines around the current line
- or continue the previous listing.
- With one argument, list 11 lines starting at that line.
- With two arguments, list the given range;
- if the second argument is less than the first, it is a count.
-
-a(rgs)
- Print the argument list of the current function.
-
-p expression
- Print the value of the expression.
-
-(!) statement
- Execute the (one-line) statement in the context of the current
- stack frame. The exclamation point can be omitted unless the
- first word of the statement resembles a debugger command. To
- assign to a global variable you must always prefix the command
- with a 'global' command, e.g.:
- (Pdb) global list_options; list_options = ['-l']
- (Pdb)
-
-
-whatis arg
- Prints the type of the argument.
-
-alias [name [command]]
- Creates an alias called 'name' that executes 'command'. The
- command must *not* be enclosed in quotes. Replaceable
- parameters can be indicated by %1, %2, and so on, while %* is
- replaced by all the parameters. If no command is given, the
- current alias for name is shown. If no name is given, all
- aliases are listed.
-
- Aliases may be nested and can contain anything that can be
- legally typed at the pdb prompt. Note! You *can* override
- internal pdb commands with aliases! Those internal commands
- are then hidden until the alias is removed. Aliasing is
- recursively applied to the first word of the command line; all
- other words in the line are left alone.
-
- As an example, here are two useful aliases (especially when
- placed in the .pdbrc file):
-
- #Print instance variables (usage "pi classInst")
- alias pi for k in %1.__dict__.keys(): print "%1.",k,"=",%1.__dict__[k]
- #Print instance variables in self
- alias ps pi self
-
-unalias name
- Deletes the specified alias.
-
-q(uit)
- Quit from the debugger.
- The program being executed is aborted.
diff --git a/sys/lib/python/pdb.py b/sys/lib/python/pdb.py
deleted file mode 100755
index f355d45e6..000000000
--- a/sys/lib/python/pdb.py
+++ /dev/null
@@ -1,1234 +0,0 @@
-#! /usr/bin/env python
-
-"""A Python debugger."""
-
-# (See pdb.doc for documentation.)
-
-import sys
-import linecache
-import cmd
-import bdb
-from repr import Repr
-import os
-import re
-import pprint
-import traceback
-# Create a custom safe Repr instance and increase its maxstring.
-# The default of 30 truncates error messages too easily.
-_repr = Repr()
-_repr.maxstring = 200
-_saferepr = _repr.repr
-
-__all__ = ["run", "pm", "Pdb", "runeval", "runctx", "runcall", "set_trace",
- "post_mortem", "help"]
-
-def find_function(funcname, filename):
- cre = re.compile(r'def\s+%s\s*[(]' % funcname)
- try:
- fp = open(filename)
- except IOError:
- return None
- # consumer of this info expects the first line to be 1
- lineno = 1
- answer = None
- while 1:
- line = fp.readline()
- if line == '':
- break
- if cre.match(line):
- answer = funcname, filename, lineno
- break
- lineno = lineno + 1
- fp.close()
- return answer
-
-
-# Interaction prompt line will separate file and call info from code
-# text using value of line_prefix string. A newline and arrow may
-# be to your liking. You can set it once pdb is imported using the
-# command "pdb.line_prefix = '\n% '".
-# line_prefix = ': ' # Use this to get the old situation back
-line_prefix = '\n-> ' # Probably a better default
-
-class Pdb(bdb.Bdb, cmd.Cmd):
-
- def __init__(self, completekey='tab', stdin=None, stdout=None):
- bdb.Bdb.__init__(self)
- cmd.Cmd.__init__(self, completekey, stdin, stdout)
- if stdout:
- self.use_rawinput = 0
- self.prompt = '(Pdb) '
- self.aliases = {}
- self.mainpyfile = ''
- self._wait_for_mainpyfile = 0
- # Try to load readline if it exists
- try:
- import readline
- except ImportError:
- pass
-
- # Read $HOME/.pdbrc and ./.pdbrc
- self.rcLines = []
- if 'HOME' in os.environ:
- envHome = os.environ['HOME']
- try:
- rcFile = open(os.path.join(envHome, ".pdbrc"))
- except IOError:
- pass
- else:
- for line in rcFile.readlines():
- self.rcLines.append(line)
- rcFile.close()
- try:
- rcFile = open(".pdbrc")
- except IOError:
- pass
- else:
- for line in rcFile.readlines():
- self.rcLines.append(line)
- rcFile.close()
-
- self.commands = {} # associates a command list to breakpoint numbers
- self.commands_doprompt = {} # for each bp num, tells if the prompt must be disp. after execing the cmd list
- self.commands_silent = {} # for each bp num, tells if the stack trace must be disp. after execing the cmd list
- self.commands_defining = False # True while in the process of defining a command list
- self.commands_bnum = None # The breakpoint number for which we are defining a list
-
- def reset(self):
- bdb.Bdb.reset(self)
- self.forget()
-
- def forget(self):
- self.lineno = None
- self.stack = []
- self.curindex = 0
- self.curframe = None
-
- def setup(self, f, t):
- self.forget()
- self.stack, self.curindex = self.get_stack(f, t)
- self.curframe = self.stack[self.curindex][0]
- self.execRcLines()
-
- # Can be executed earlier than 'setup' if desired
- def execRcLines(self):
- if self.rcLines:
- # Make local copy because of recursion
- rcLines = self.rcLines
- # executed only once
- self.rcLines = []
- for line in rcLines:
- line = line[:-1]
- if len(line) > 0 and line[0] != '#':
- self.onecmd(line)
-
- # Override Bdb methods
-
- def user_call(self, frame, argument_list):
- """This method is called when there is the remote possibility
- that we ever need to stop in this function."""
- if self._wait_for_mainpyfile:
- return
- if self.stop_here(frame):
- print >>self.stdout, '--Call--'
- self.interaction(frame, None)
-
- def user_line(self, frame):
- """This function is called when we stop or break at this line."""
- if self._wait_for_mainpyfile:
- if (self.mainpyfile != self.canonic(frame.f_code.co_filename)
- or frame.f_lineno<= 0):
- return
- self._wait_for_mainpyfile = 0
- if self.bp_commands(frame):
- self.interaction(frame, None)
-
- def bp_commands(self,frame):
- """ Call every command that was set for the current active breakpoint (if there is one)
- Returns True if the normal interaction function must be called, False otherwise """
- #self.currentbp is set in bdb.py in bdb.break_here if a breakpoint was hit
- if getattr(self,"currentbp",False) and self.currentbp in self.commands:
- currentbp = self.currentbp
- self.currentbp = 0
- lastcmd_back = self.lastcmd
- self.setup(frame, None)
- for line in self.commands[currentbp]:
- self.onecmd(line)
- self.lastcmd = lastcmd_back
- if not self.commands_silent[currentbp]:
- self.print_stack_entry(self.stack[self.curindex])
- if self.commands_doprompt[currentbp]:
- self.cmdloop()
- self.forget()
- return
- return 1
-
- def user_return(self, frame, return_value):
- """This function is called when a return trap is set here."""
- frame.f_locals['__return__'] = return_value
- print >>self.stdout, '--Return--'
- self.interaction(frame, None)
-
- def user_exception(self, frame, (exc_type, exc_value, exc_traceback)):
- """This function is called if an exception occurs,
- but only if we are to stop at or just below this level."""
- frame.f_locals['__exception__'] = exc_type, exc_value
- if type(exc_type) == type(''):
- exc_type_name = exc_type
- else: exc_type_name = exc_type.__name__
- print >>self.stdout, exc_type_name + ':', _saferepr(exc_value)
- self.interaction(frame, exc_traceback)
-
- # General interaction function
-
- def interaction(self, frame, traceback):
- self.setup(frame, traceback)
- self.print_stack_entry(self.stack[self.curindex])
- self.cmdloop()
- self.forget()
-
- def default(self, line):
- if line[:1] == '!': line = line[1:]
- locals = self.curframe.f_locals
- globals = self.curframe.f_globals
- try:
- code = compile(line + '\n', '<stdin>', 'single')
- exec code in globals, locals
- except:
- t, v = sys.exc_info()[:2]
- if type(t) == type(''):
- exc_type_name = t
- else: exc_type_name = t.__name__
- print >>self.stdout, '***', exc_type_name + ':', v
-
- def precmd(self, line):
- """Handle alias expansion and ';;' separator."""
- if not line.strip():
- return line
- args = line.split()
- while args[0] in self.aliases:
- line = self.aliases[args[0]]
- ii = 1
- for tmpArg in args[1:]:
- line = line.replace("%" + str(ii),
- tmpArg)
- ii = ii + 1
- line = line.replace("%*", ' '.join(args[1:]))
- args = line.split()
- # split into ';;' separated commands
- # unless it's an alias command
- if args[0] != 'alias':
- marker = line.find(';;')
- if marker >= 0:
- # queue up everything after marker
- next = line[marker+2:].lstrip()
- self.cmdqueue.append(next)
- line = line[:marker].rstrip()
- return line
-
- def onecmd(self, line):
- """Interpret the argument as though it had been typed in response
- to the prompt.
-
- Checks whether this line is typed at the normal prompt or in
- a breakpoint command list definition.
- """
- if not self.commands_defining:
- return cmd.Cmd.onecmd(self, line)
- else:
- return self.handle_command_def(line)
-
- def handle_command_def(self,line):
- """ Handles one command line during command list definition. """
- cmd, arg, line = self.parseline(line)
- if cmd == 'silent':
- self.commands_silent[self.commands_bnum] = True
- return # continue to handle other cmd def in the cmd list
- elif cmd == 'end':
- self.cmdqueue = []
- return 1 # end of cmd list
- cmdlist = self.commands[self.commands_bnum]
- if (arg):
- cmdlist.append(cmd+' '+arg)
- else:
- cmdlist.append(cmd)
- # Determine if we must stop
- try:
- func = getattr(self, 'do_' + cmd)
- except AttributeError:
- func = self.default
- if func.func_name in self.commands_resuming : # one of the resuming commands.
- self.commands_doprompt[self.commands_bnum] = False
- self.cmdqueue = []
- return 1
- return
-
- # Command definitions, called by cmdloop()
- # The argument is the remaining string on the command line
- # Return true to exit from the command loop
-
- do_h = cmd.Cmd.do_help
-
- def do_commands(self, arg):
- """Defines a list of commands associated to a breakpoint
- Those commands will be executed whenever the breakpoint causes the program to stop execution."""
- if not arg:
- bnum = len(bdb.Breakpoint.bpbynumber)-1
- else:
- try:
- bnum = int(arg)
- except:
- print >>self.stdout, "Usage : commands [bnum]\n ...\n end"
- return
- self.commands_bnum = bnum
- self.commands[bnum] = []
- self.commands_doprompt[bnum] = True
- self.commands_silent[bnum] = False
- prompt_back = self.prompt
- self.prompt = '(com) '
- self.commands_defining = True
- self.cmdloop()
- self.commands_defining = False
- self.prompt = prompt_back
-
- def do_break(self, arg, temporary = 0):
- # break [ ([filename:]lineno | function) [, "condition"] ]
- if not arg:
- if self.breaks: # There's at least one
- print >>self.stdout, "Num Type Disp Enb Where"
- for bp in bdb.Breakpoint.bpbynumber:
- if bp:
- bp.bpprint(self.stdout)
- return
- # parse arguments; comma has lowest precedence
- # and cannot occur in filename
- filename = None
- lineno = None
- cond = None
- comma = arg.find(',')
- if comma > 0:
- # parse stuff after comma: "condition"
- cond = arg[comma+1:].lstrip()
- arg = arg[:comma].rstrip()
- # parse stuff before comma: [filename:]lineno | function
- colon = arg.rfind(':')
- funcname = None
- if colon >= 0:
- filename = arg[:colon].rstrip()
- f = self.lookupmodule(filename)
- if not f:
- print >>self.stdout, '*** ', repr(filename),
- print >>self.stdout, 'not found from sys.path'
- return
- else:
- filename = f
- arg = arg[colon+1:].lstrip()
- try:
- lineno = int(arg)
- except ValueError, msg:
- print >>self.stdout, '*** Bad lineno:', arg
- return
- else:
- # no colon; can be lineno or function
- try:
- lineno = int(arg)
- except ValueError:
- try:
- func = eval(arg,
- self.curframe.f_globals,
- self.curframe.f_locals)
- except:
- func = arg
- try:
- if hasattr(func, 'im_func'):
- func = func.im_func
- code = func.func_code
- #use co_name to identify the bkpt (function names
- #could be aliased, but co_name is invariant)
- funcname = code.co_name
- lineno = code.co_firstlineno
- filename = code.co_filename
- except:
- # last thing to try
- (ok, filename, ln) = self.lineinfo(arg)
- if not ok:
- print >>self.stdout, '*** The specified object',
- print >>self.stdout, repr(arg),
- print >>self.stdout, 'is not a function'
- print >>self.stdout, 'or was not found along sys.path.'
- return
- funcname = ok # ok contains a function name
- lineno = int(ln)
- if not filename:
- filename = self.defaultFile()
- # Check for reasonable breakpoint
- line = self.checkline(filename, lineno)
- if line:
- # now set the break point
- err = self.set_break(filename, line, temporary, cond, funcname)
- if err: print >>self.stdout, '***', err
- else:
- bp = self.get_breaks(filename, line)[-1]
- print >>self.stdout, "Breakpoint %d at %s:%d" % (bp.number,
- bp.file,
- bp.line)
-
- # To be overridden in derived debuggers
- def defaultFile(self):
- """Produce a reasonable default."""
- filename = self.curframe.f_code.co_filename
- if filename == '<string>' and self.mainpyfile:
- filename = self.mainpyfile
- return filename
-
- do_b = do_break
-
- def do_tbreak(self, arg):
- self.do_break(arg, 1)
-
- def lineinfo(self, identifier):
- failed = (None, None, None)
- # Input is identifier, may be in single quotes
- idstring = identifier.split("'")
- if len(idstring) == 1:
- # not in single quotes
- id = idstring[0].strip()
- elif len(idstring) == 3:
- # quoted
- id = idstring[1].strip()
- else:
- return failed
- if id == '': return failed
- parts = id.split('.')
- # Protection for derived debuggers
- if parts[0] == 'self':
- del parts[0]
- if len(parts) == 0:
- return failed
- # Best first guess at file to look at
- fname = self.defaultFile()
- if len(parts) == 1:
- item = parts[0]
- else:
- # More than one part.
- # First is module, second is method/class
- f = self.lookupmodule(parts[0])
- if f:
- fname = f
- item = parts[1]
- answer = find_function(item, fname)
- return answer or failed
-
- def checkline(self, filename, lineno):
- """Check whether specified line seems to be executable.
-
- Return `lineno` if it is, 0 if not (e.g. a docstring, comment, blank
- line or EOF). Warning: testing is not comprehensive.
- """
- line = linecache.getline(filename, lineno)
- if not line:
- print >>self.stdout, 'End of file'
- return 0
- line = line.strip()
- # Don't allow setting breakpoint at a blank line
- if (not line or (line[0] == '#') or
- (line[:3] == '"""') or line[:3] == "'''"):
- print >>self.stdout, '*** Blank or comment'
- return 0
- return lineno
-
- def do_enable(self, arg):
- args = arg.split()
- for i in args:
- try:
- i = int(i)
- except ValueError:
- print >>self.stdout, 'Breakpoint index %r is not a number' % i
- continue
-
- if not (0 <= i < len(bdb.Breakpoint.bpbynumber)):
- print >>self.stdout, 'No breakpoint numbered', i
- continue
-
- bp = bdb.Breakpoint.bpbynumber[i]
- if bp:
- bp.enable()
-
- def do_disable(self, arg):
- args = arg.split()
- for i in args:
- try:
- i = int(i)
- except ValueError:
- print >>self.stdout, 'Breakpoint index %r is not a number' % i
- continue
-
- if not (0 <= i < len(bdb.Breakpoint.bpbynumber)):
- print >>self.stdout, 'No breakpoint numbered', i
- continue
-
- bp = bdb.Breakpoint.bpbynumber[i]
- if bp:
- bp.disable()
-
- def do_condition(self, arg):
- # arg is breakpoint number and condition
- args = arg.split(' ', 1)
- try:
- bpnum = int(args[0].strip())
- except ValueError:
- # something went wrong
- print >>self.stdout, \
- 'Breakpoint index %r is not a number' % args[0]
- return
- try:
- cond = args[1]
- except:
- cond = None
- try:
- bp = bdb.Breakpoint.bpbynumber[bpnum]
- except IndexError:
- print >>self.stdout, 'Breakpoint index %r is not valid' % args[0]
- return
- if bp:
- bp.cond = cond
- if not cond:
- print >>self.stdout, 'Breakpoint', bpnum,
- print >>self.stdout, 'is now unconditional.'
-
- def do_ignore(self,arg):
- """arg is bp number followed by ignore count."""
- args = arg.split()
- try:
- bpnum = int(args[0].strip())
- except ValueError:
- # something went wrong
- print >>self.stdout, \
- 'Breakpoint index %r is not a number' % args[0]
- return
- try:
- count = int(args[1].strip())
- except:
- count = 0
- try:
- bp = bdb.Breakpoint.bpbynumber[bpnum]
- except IndexError:
- print >>self.stdout, 'Breakpoint index %r is not valid' % args[0]
- return
- if bp:
- bp.ignore = count
- if count > 0:
- reply = 'Will ignore next '
- if count > 1:
- reply = reply + '%d crossings' % count
- else:
- reply = reply + '1 crossing'
- print >>self.stdout, reply + ' of breakpoint %d.' % bpnum
- else:
- print >>self.stdout, 'Will stop next time breakpoint',
- print >>self.stdout, bpnum, 'is reached.'
-
- def do_clear(self, arg):
- """Three possibilities, tried in this order:
- clear -> clear all breaks, ask for confirmation
- clear file:lineno -> clear all breaks at file:lineno
- clear bpno bpno ... -> clear breakpoints by number"""
- if not arg:
- try:
- reply = raw_input('Clear all breaks? ')
- except EOFError:
- reply = 'no'
- reply = reply.strip().lower()
- if reply in ('y', 'yes'):
- self.clear_all_breaks()
- return
- if ':' in arg:
- # Make sure it works for "clear C:\foo\bar.py:12"
- i = arg.rfind(':')
- filename = arg[:i]
- arg = arg[i+1:]
- try:
- lineno = int(arg)
- except ValueError:
- err = "Invalid line number (%s)" % arg
- else:
- err = self.clear_break(filename, lineno)
- if err: print >>self.stdout, '***', err
- return
- numberlist = arg.split()
- for i in numberlist:
- try:
- i = int(i)
- except ValueError:
- print >>self.stdout, 'Breakpoint index %r is not a number' % i
- continue
-
- if not (0 <= i < len(bdb.Breakpoint.bpbynumber)):
- print >>self.stdout, 'No breakpoint numbered', i
- continue
- err = self.clear_bpbynumber(i)
- if err:
- print >>self.stdout, '***', err
- else:
- print >>self.stdout, 'Deleted breakpoint', i
- do_cl = do_clear # 'c' is already an abbreviation for 'continue'
-
- def do_where(self, arg):
- self.print_stack_trace()
- do_w = do_where
- do_bt = do_where
-
- def do_up(self, arg):
- if self.curindex == 0:
- print >>self.stdout, '*** Oldest frame'
- else:
- self.curindex = self.curindex - 1
- self.curframe = self.stack[self.curindex][0]
- self.print_stack_entry(self.stack[self.curindex])
- self.lineno = None
- do_u = do_up
-
- def do_down(self, arg):
- if self.curindex + 1 == len(self.stack):
- print >>self.stdout, '*** Newest frame'
- else:
- self.curindex = self.curindex + 1
- self.curframe = self.stack[self.curindex][0]
- self.print_stack_entry(self.stack[self.curindex])
- self.lineno = None
- do_d = do_down
-
- def do_step(self, arg):
- self.set_step()
- return 1
- do_s = do_step
-
- def do_next(self, arg):
- self.set_next(self.curframe)
- return 1
- do_n = do_next
-
- def do_return(self, arg):
- self.set_return(self.curframe)
- return 1
- do_r = do_return
-
- def do_continue(self, arg):
- self.set_continue()
- return 1
- do_c = do_cont = do_continue
-
- def do_jump(self, arg):
- if self.curindex + 1 != len(self.stack):
- print >>self.stdout, "*** You can only jump within the bottom frame"
- return
- try:
- arg = int(arg)
- except ValueError:
- print >>self.stdout, "*** The 'jump' command requires a line number."
- else:
- try:
- # Do the jump, fix up our copy of the stack, and display the
- # new position
- self.curframe.f_lineno = arg
- self.stack[self.curindex] = self.stack[self.curindex][0], arg
- self.print_stack_entry(self.stack[self.curindex])
- except ValueError, e:
- print >>self.stdout, '*** Jump failed:', e
- do_j = do_jump
-
- def do_debug(self, arg):
- sys.settrace(None)
- globals = self.curframe.f_globals
- locals = self.curframe.f_locals
- p = Pdb()
- p.prompt = "(%s) " % self.prompt.strip()
- print >>self.stdout, "ENTERING RECURSIVE DEBUGGER"
- sys.call_tracing(p.run, (arg, globals, locals))
- print >>self.stdout, "LEAVING RECURSIVE DEBUGGER"
- sys.settrace(self.trace_dispatch)
- self.lastcmd = p.lastcmd
-
- def do_quit(self, arg):
- self._user_requested_quit = 1
- self.set_quit()
- return 1
-
- do_q = do_quit
- do_exit = do_quit
-
- def do_EOF(self, arg):
- print >>self.stdout
- self._user_requested_quit = 1
- self.set_quit()
- return 1
-
- def do_args(self, arg):
- f = self.curframe
- co = f.f_code
- dict = f.f_locals
- n = co.co_argcount
- if co.co_flags & 4: n = n+1
- if co.co_flags & 8: n = n+1
- for i in range(n):
- name = co.co_varnames[i]
- print >>self.stdout, name, '=',
- if name in dict: print >>self.stdout, dict[name]
- else: print >>self.stdout, "*** undefined ***"
- do_a = do_args
-
- def do_retval(self, arg):
- if '__return__' in self.curframe.f_locals:
- print >>self.stdout, self.curframe.f_locals['__return__']
- else:
- print >>self.stdout, '*** Not yet returned!'
- do_rv = do_retval
-
- def _getval(self, arg):
- try:
- return eval(arg, self.curframe.f_globals,
- self.curframe.f_locals)
- except:
- t, v = sys.exc_info()[:2]
- if isinstance(t, str):
- exc_type_name = t
- else: exc_type_name = t.__name__
- print >>self.stdout, '***', exc_type_name + ':', repr(v)
- raise
-
- def do_p(self, arg):
- try:
- print >>self.stdout, repr(self._getval(arg))
- except:
- pass
-
- def do_pp(self, arg):
- try:
- pprint.pprint(self._getval(arg), self.stdout)
- except:
- pass
-
- def do_list(self, arg):
- self.lastcmd = 'list'
- last = None
- if arg:
- try:
- x = eval(arg, {}, {})
- if type(x) == type(()):
- first, last = x
- first = int(first)
- last = int(last)
- if last < first:
- # Assume it's a count
- last = first + last
- else:
- first = max(1, int(x) - 5)
- except:
- print >>self.stdout, '*** Error in argument:', repr(arg)
- return
- elif self.lineno is None:
- first = max(1, self.curframe.f_lineno - 5)
- else:
- first = self.lineno + 1
- if last is None:
- last = first + 10
- filename = self.curframe.f_code.co_filename
- breaklist = self.get_file_breaks(filename)
- try:
- for lineno in range(first, last+1):
- line = linecache.getline(filename, lineno)
- if not line:
- print >>self.stdout, '[EOF]'
- break
- else:
- s = repr(lineno).rjust(3)
- if len(s) < 4: s = s + ' '
- if lineno in breaklist: s = s + 'B'
- else: s = s + ' '
- if lineno == self.curframe.f_lineno:
- s = s + '->'
- print >>self.stdout, s + '\t' + line,
- self.lineno = lineno
- except KeyboardInterrupt:
- pass
- do_l = do_list
-
- def do_whatis(self, arg):
- try:
- value = eval(arg, self.curframe.f_globals,
- self.curframe.f_locals)
- except:
- t, v = sys.exc_info()[:2]
- if type(t) == type(''):
- exc_type_name = t
- else: exc_type_name = t.__name__
- print >>self.stdout, '***', exc_type_name + ':', repr(v)
- return
- code = None
- # Is it a function?
- try: code = value.func_code
- except: pass
- if code:
- print >>self.stdout, 'Function', code.co_name
- return
- # Is it an instance method?
- try: code = value.im_func.func_code
- except: pass
- if code:
- print >>self.stdout, 'Method', code.co_name
- return
- # None of the above...
- print >>self.stdout, type(value)
-
- def do_alias(self, arg):
- args = arg.split()
- if len(args) == 0:
- keys = self.aliases.keys()
- keys.sort()
- for alias in keys:
- print >>self.stdout, "%s = %s" % (alias, self.aliases[alias])
- return
- if args[0] in self.aliases and len(args) == 1:
- print >>self.stdout, "%s = %s" % (args[0], self.aliases[args[0]])
- else:
- self.aliases[args[0]] = ' '.join(args[1:])
-
- def do_unalias(self, arg):
- args = arg.split()
- if len(args) == 0: return
- if args[0] in self.aliases:
- del self.aliases[args[0]]
-
- #list of all the commands making the program resume execution.
- commands_resuming = ['do_continue', 'do_step', 'do_next', 'do_return',
- 'do_quit', 'do_jump']
-
- # Print a traceback starting at the top stack frame.
- # The most recently entered frame is printed last;
- # this is different from dbx and gdb, but consistent with
- # the Python interpreter's stack trace.
- # It is also consistent with the up/down commands (which are
- # compatible with dbx and gdb: up moves towards 'main()'
- # and down moves towards the most recent stack frame).
-
- def print_stack_trace(self):
- try:
- for frame_lineno in self.stack:
- self.print_stack_entry(frame_lineno)
- except KeyboardInterrupt:
- pass
-
- def print_stack_entry(self, frame_lineno, prompt_prefix=line_prefix):
- frame, lineno = frame_lineno
- if frame is self.curframe:
- print >>self.stdout, '>',
- else:
- print >>self.stdout, ' ',
- print >>self.stdout, self.format_stack_entry(frame_lineno,
- prompt_prefix)
-
-
- # Help methods (derived from pdb.doc)
-
- def help_help(self):
- self.help_h()
-
- def help_h(self):
- print >>self.stdout, """h(elp)
-Without argument, print the list of available commands.
-With a command name as argument, print help about that command
-"help pdb" pipes the full documentation file to the $PAGER
-"help exec" gives help on the ! command"""
-
- def help_where(self):
- self.help_w()
-
- def help_w(self):
- print >>self.stdout, """w(here)
-Print a stack trace, with the most recent frame at the bottom.
-An arrow indicates the "current frame", which determines the
-context of most commands. 'bt' is an alias for this command."""
-
- help_bt = help_w
-
- def help_down(self):
- self.help_d()
-
- def help_d(self):
- print >>self.stdout, """d(own)
-Move the current frame one level down in the stack trace
-(to a newer frame)."""
-
- def help_up(self):
- self.help_u()
-
- def help_u(self):
- print >>self.stdout, """u(p)
-Move the current frame one level up in the stack trace
-(to an older frame)."""
-
- def help_break(self):
- self.help_b()
-
- def help_b(self):
- print >>self.stdout, """b(reak) ([file:]lineno | function) [, condition]
-With a line number argument, set a break there in the current
-file. With a function name, set a break at first executable line
-of that function. Without argument, list all breaks. If a second
-argument is present, it is a string specifying an expression
-which must evaluate to true before the breakpoint is honored.
-
-The line number may be prefixed with a filename and a colon,
-to specify a breakpoint in another file (probably one that
-hasn't been loaded yet). The file is searched for on sys.path;
-the .py suffix may be omitted."""
-
- def help_clear(self):
- self.help_cl()
-
- def help_cl(self):
- print >>self.stdout, "cl(ear) filename:lineno"
- print >>self.stdout, """cl(ear) [bpnumber [bpnumber...]]
-With a space separated list of breakpoint numbers, clear
-those breakpoints. Without argument, clear all breaks (but
-first ask confirmation). With a filename:lineno argument,
-clear all breaks at that line in that file.
-
-Note that the argument is different from previous versions of
-the debugger (in python distributions 1.5.1 and before) where
-a linenumber was used instead of either filename:lineno or
-breakpoint numbers."""
-
- def help_tbreak(self):
- print >>self.stdout, """tbreak same arguments as break, but breakpoint is
-removed when first hit."""
-
- def help_enable(self):
- print >>self.stdout, """enable bpnumber [bpnumber ...]
-Enables the breakpoints given as a space separated list of
-bp numbers."""
-
- def help_disable(self):
- print >>self.stdout, """disable bpnumber [bpnumber ...]
-Disables the breakpoints given as a space separated list of
-bp numbers."""
-
- def help_ignore(self):
- print >>self.stdout, """ignore bpnumber count
-Sets the ignore count for the given breakpoint number. A breakpoint
-becomes active when the ignore count is zero. When non-zero, the
-count is decremented each time the breakpoint is reached and the
-breakpoint is not disabled and any associated condition evaluates
-to true."""
-
- def help_condition(self):
- print >>self.stdout, """condition bpnumber str_condition
-str_condition is a string specifying an expression which
-must evaluate to true before the breakpoint is honored.
-If str_condition is absent, any existing condition is removed;
-i.e., the breakpoint is made unconditional."""
-
- def help_step(self):
- self.help_s()
-
- def help_s(self):
- print >>self.stdout, """s(tep)
-Execute the current line, stop at the first possible occasion
-(either in a function that is called or in the current function)."""
-
- def help_next(self):
- self.help_n()
-
- def help_n(self):
- print >>self.stdout, """n(ext)
-Continue execution until the next line in the current function
-is reached or it returns."""
-
- def help_return(self):
- self.help_r()
-
- def help_r(self):
- print >>self.stdout, """r(eturn)
-Continue execution until the current function returns."""
-
- def help_continue(self):
- self.help_c()
-
- def help_cont(self):
- self.help_c()
-
- def help_c(self):
- print >>self.stdout, """c(ont(inue))
-Continue execution, only stop when a breakpoint is encountered."""
-
- def help_jump(self):
- self.help_j()
-
- def help_j(self):
- print >>self.stdout, """j(ump) lineno
-Set the next line that will be executed."""
-
- def help_debug(self):
- print >>self.stdout, """debug code
-Enter a recursive debugger that steps through the code argument
-(which is an arbitrary expression or statement to be executed
-in the current environment)."""
-
- def help_list(self):
- self.help_l()
-
- def help_l(self):
- print >>self.stdout, """l(ist) [first [,last]]
-List source code for the current file.
-Without arguments, list 11 lines around the current line
-or continue the previous listing.
-With one argument, list 11 lines starting at that line.
-With two arguments, list the given range;
-if the second argument is less than the first, it is a count."""
-
- def help_args(self):
- self.help_a()
-
- def help_a(self):
- print >>self.stdout, """a(rgs)
-Print the arguments of the current function."""
-
- def help_p(self):
- print >>self.stdout, """p expression
-Print the value of the expression."""
-
- def help_pp(self):
- print >>self.stdout, """pp expression
-Pretty-print the value of the expression."""
-
- def help_exec(self):
- print >>self.stdout, """(!) statement
-Execute the (one-line) statement in the context of
-the current stack frame.
-The exclamation point can be omitted unless the first word
-of the statement resembles a debugger command.
-To assign to a global variable you must always prefix the
-command with a 'global' command, e.g.:
-(Pdb) global list_options; list_options = ['-l']
-(Pdb)"""
-
- def help_quit(self):
- self.help_q()
-
- def help_q(self):
- print >>self.stdout, """q(uit) or exit - Quit from the debugger.
-The program being executed is aborted."""
-
- help_exit = help_q
-
- def help_whatis(self):
- print >>self.stdout, """whatis arg
-Prints the type of the argument."""
-
- def help_EOF(self):
- print >>self.stdout, """EOF
-Handles the receipt of EOF as a command."""
-
- def help_alias(self):
- print >>self.stdout, """alias [name [command [parameter parameter ...] ]]
-Creates an alias called 'name' the executes 'command'. The command
-must *not* be enclosed in quotes. Replaceable parameters are
-indicated by %1, %2, and so on, while %* is replaced by all the
-parameters. If no command is given, the current alias for name
-is shown. If no name is given, all aliases are listed.
-
-Aliases may be nested and can contain anything that can be
-legally typed at the pdb prompt. Note! You *can* override
-internal pdb commands with aliases! Those internal commands
-are then hidden until the alias is removed. Aliasing is recursively
-applied to the first word of the command line; all other words
-in the line are left alone.
-
-Some useful aliases (especially when placed in the .pdbrc file) are:
-
-#Print instance variables (usage "pi classInst")
-alias pi for k in %1.__dict__.keys(): print "%1.",k,"=",%1.__dict__[k]
-
-#Print instance variables in self
-alias ps pi self
-"""
-
- def help_unalias(self):
- print >>self.stdout, """unalias name
-Deletes the specified alias."""
-
- def help_commands(self):
- print >>self.stdout, """commands [bpnumber]
-(com) ...
-(com) end
-(Pdb)
-
-Specify a list of commands for breakpoint number bpnumber. The
-commands themselves appear on the following lines. Type a line
-containing just 'end' to terminate the commands.
-
-To remove all commands from a breakpoint, type commands and
-follow it immediately with end; that is, give no commands.
-
-With no bpnumber argument, commands refers to the last
-breakpoint set.
-
-You can use breakpoint commands to start your program up again.
-Simply use the continue command, or step, or any other
-command that resumes execution.
-
-Specifying any command resuming execution (currently continue,
-step, next, return, jump, quit and their abbreviations) terminates
-the command list (as if that command was immediately followed by end).
-This is because any time you resume execution
-(even with a simple next or step), you may encounter
-another breakpoint--which could have its own command list, leading to
-ambiguities about which list to execute.
-
- If you use the 'silent' command in the command list, the
-usual message about stopping at a breakpoint is not printed. This may
-be desirable for breakpoints that are to print a specific message and
-then continue. If none of the other commands print anything, you
-see no sign that the breakpoint was reached.
-"""
-
- def help_pdb(self):
- help()
-
- def lookupmodule(self, filename):
- """Helper function for break/clear parsing -- may be overridden.
-
- lookupmodule() translates (possibly incomplete) file or module name
- into an absolute file name.
- """
- if os.path.isabs(filename) and os.path.exists(filename):
- return filename
- f = os.path.join(sys.path[0], filename)
- if os.path.exists(f) and self.canonic(f) == self.mainpyfile:
- return f
- root, ext = os.path.splitext(filename)
- if ext == '':
- filename = filename + '.py'
- if os.path.isabs(filename):
- return filename
- for dirname in sys.path:
- while os.path.islink(dirname):
- dirname = os.readlink(dirname)
- fullname = os.path.join(dirname, filename)
- if os.path.exists(fullname):
- return fullname
- return None
-
- def _runscript(self, filename):
- # Start with fresh empty copy of globals and locals and tell the script
- # that it's being run as __main__ to avoid scripts being able to access
- # the pdb.py namespace.
- globals_ = {"__name__" : "__main__"}
- locals_ = globals_
-
- # When bdb sets tracing, a number of call and line events happens
- # BEFORE debugger even reaches user's code (and the exact sequence of
- # events depends on python version). So we take special measures to
- # avoid stopping before we reach the main script (see user_line and
- # user_call for details).
- self._wait_for_mainpyfile = 1
- self.mainpyfile = self.canonic(filename)
- self._user_requested_quit = 0
- statement = 'execfile( "%s")' % filename
- self.run(statement, globals=globals_, locals=locals_)
-
-# Simplified interface
-
-def run(statement, globals=None, locals=None):
- Pdb().run(statement, globals, locals)
-
-def runeval(expression, globals=None, locals=None):
- return Pdb().runeval(expression, globals, locals)
-
-def runctx(statement, globals, locals):
- # B/W compatibility
- run(statement, globals, locals)
-
-def runcall(*args, **kwds):
- return Pdb().runcall(*args, **kwds)
-
-def set_trace():
- Pdb().set_trace(sys._getframe().f_back)
-
-# Post-Mortem interface
-
-def post_mortem(t):
- p = Pdb()
- p.reset()
- while t.tb_next is not None:
- t = t.tb_next
- p.interaction(t.tb_frame, t)
-
-def pm():
- post_mortem(sys.last_traceback)
-
-
-# Main program for testing
-
-TESTCMD = 'import x; x.main()'
-
-def test():
- run(TESTCMD)
-
-# print help
-def help():
- for dirname in sys.path:
- fullname = os.path.join(dirname, 'pdb.doc')
- if os.path.exists(fullname):
- sts = os.system('${PAGER-more} '+fullname)
- if sts: print '*** Pager exit status:', sts
- break
- else:
- print 'Sorry, can\'t find the help file "pdb.doc"',
- print 'along the Python search path'
-
-def main():
- if not sys.argv[1:]:
- print "usage: pdb.py scriptfile [arg] ..."
- sys.exit(2)
-
- mainpyfile = sys.argv[1] # Get script filename
- if not os.path.exists(mainpyfile):
- print 'Error:', mainpyfile, 'does not exist'
- sys.exit(1)
-
- del sys.argv[0] # Hide "pdb.py" from argument list
-
- # Replace pdb's dir with script's dir in front of module search path.
- sys.path[0] = os.path.dirname(mainpyfile)
-
- # Note on saving/restoring sys.argv: it's a good idea when sys.argv was
- # modified by the script being debugged. It's a bad idea when it was
- # changed by the user from the command line. The best approach would be to
- # have a "restart" command which would allow explicit specification of
- # command line arguments.
- pdb = Pdb()
- while 1:
- try:
- pdb._runscript(mainpyfile)
- if pdb._user_requested_quit:
- break
- print "The program finished and will be restarted"
- except SystemExit:
- # In most cases SystemExit does not warrant a post-mortem session.
- print "The program exited via sys.exit(). Exit status: ",
- print sys.exc_info()[1]
- except:
- traceback.print_exc()
- print "Uncaught exception. Entering post mortem debugging"
- print "Running 'cont' or 'step' will restart the program"
- t = sys.exc_info()[2]
- while t.tb_next is not None:
- t = t.tb_next
- pdb.interaction(t.tb_frame,t)
- print "Post mortem debugger finished. The "+mainpyfile+" will be restarted"
-
-
-# When invoked as main program, invoke the debugger on a script
-if __name__=='__main__':
- main()
diff --git a/sys/lib/python/pickle.py b/sys/lib/python/pickle.py
deleted file mode 100644
index ecc0b2153..000000000
--- a/sys/lib/python/pickle.py
+++ /dev/null
@@ -1,1383 +0,0 @@
-"""Create portable serialized representations of Python objects.
-
-See module cPickle for a (much) faster implementation.
-See module copy_reg for a mechanism for registering custom picklers.
-See module pickletools source for extensive comments.
-
-Classes:
-
- Pickler
- Unpickler
-
-Functions:
-
- dump(object, file)
- dumps(object) -> string
- load(file) -> object
- loads(string) -> object
-
-Misc variables:
-
- __version__
- format_version
- compatible_formats
-
-"""
-
-__version__ = "$Revision: 38432 $" # Code version
-
-from types import *
-from copy_reg import dispatch_table
-from copy_reg import _extension_registry, _inverted_registry, _extension_cache
-import marshal
-import sys
-import struct
-import re
-
-__all__ = ["PickleError", "PicklingError", "UnpicklingError", "Pickler",
- "Unpickler", "dump", "dumps", "load", "loads"]
-
-# These are purely informational; no code uses these.
-format_version = "2.0" # File format version we write
-compatible_formats = ["1.0", # Original protocol 0
- "1.1", # Protocol 0 with INST added
- "1.2", # Original protocol 1
- "1.3", # Protocol 1 with BINFLOAT added
- "2.0", # Protocol 2
- ] # Old format versions we can read
-
-# Keep in synch with cPickle. This is the highest protocol number we
-# know how to read.
-HIGHEST_PROTOCOL = 2
-
-# Why use struct.pack() for pickling but marshal.loads() for
-# unpickling? struct.pack() is 40% faster than marshal.dumps(), but
-# marshal.loads() is twice as fast as struct.unpack()!
-mloads = marshal.loads
-
-class PickleError(Exception):
- """A common base class for the other pickling exceptions."""
- pass
-
-class PicklingError(PickleError):
- """This exception is raised when an unpicklable object is passed to the
- dump() method.
-
- """
- pass
-
-class UnpicklingError(PickleError):
- """This exception is raised when there is a problem unpickling an object,
- such as a security violation.
-
- Note that other exceptions may also be raised during unpickling, including
- (but not necessarily limited to) AttributeError, EOFError, ImportError,
- and IndexError.
-
- """
- pass
-
-# An instance of _Stop is raised by Unpickler.load_stop() in response to
-# the STOP opcode, passing the object that is the result of unpickling.
-class _Stop(Exception):
- def __init__(self, value):
- self.value = value
-
-# Jython has PyStringMap; it's a dict subclass with string keys
-try:
- from org.python.core import PyStringMap
-except ImportError:
- PyStringMap = None
-
-# UnicodeType may or may not be exported (normally imported from types)
-try:
- UnicodeType
-except NameError:
- UnicodeType = None
-
-# Pickle opcodes. See pickletools.py for extensive docs. The listing
-# here is in kind-of alphabetical order of 1-character pickle code.
-# pickletools groups them by purpose.
-
-MARK = '(' # push special markobject on stack
-STOP = '.' # every pickle ends with STOP
-POP = '0' # discard topmost stack item
-POP_MARK = '1' # discard stack top through topmost markobject
-DUP = '2' # duplicate top stack item
-FLOAT = 'F' # push float object; decimal string argument
-INT = 'I' # push integer or bool; decimal string argument
-BININT = 'J' # push four-byte signed int
-BININT1 = 'K' # push 1-byte unsigned int
-LONG = 'L' # push long; decimal string argument
-BININT2 = 'M' # push 2-byte unsigned int
-NONE = 'N' # push None
-PERSID = 'P' # push persistent object; id is taken from string arg
-BINPERSID = 'Q' # " " " ; " " " " stack
-REDUCE = 'R' # apply callable to argtuple, both on stack
-STRING = 'S' # push string; NL-terminated string argument
-BINSTRING = 'T' # push string; counted binary string argument
-SHORT_BINSTRING = 'U' # " " ; " " " " < 256 bytes
-UNICODE = 'V' # push Unicode string; raw-unicode-escaped'd argument
-BINUNICODE = 'X' # " " " ; counted UTF-8 string argument
-APPEND = 'a' # append stack top to list below it
-BUILD = 'b' # call __setstate__ or __dict__.update()
-GLOBAL = 'c' # push self.find_class(modname, name); 2 string args
-DICT = 'd' # build a dict from stack items
-EMPTY_DICT = '}' # push empty dict
-APPENDS = 'e' # extend list on stack by topmost stack slice
-GET = 'g' # push item from memo on stack; index is string arg
-BINGET = 'h' # " " " " " " ; " " 1-byte arg
-INST = 'i' # build & push class instance
-LONG_BINGET = 'j' # push item from memo on stack; index is 4-byte arg
-LIST = 'l' # build list from topmost stack items
-EMPTY_LIST = ']' # push empty list
-OBJ = 'o' # build & push class instance
-PUT = 'p' # store stack top in memo; index is string arg
-BINPUT = 'q' # " " " " " ; " " 1-byte arg
-LONG_BINPUT = 'r' # " " " " " ; " " 4-byte arg
-SETITEM = 's' # add key+value pair to dict
-TUPLE = 't' # build tuple from topmost stack items
-EMPTY_TUPLE = ')' # push empty tuple
-SETITEMS = 'u' # modify dict by adding topmost key+value pairs
-BINFLOAT = 'G' # push float; arg is 8-byte float encoding
-
-TRUE = 'I01\n' # not an opcode; see INT docs in pickletools.py
-FALSE = 'I00\n' # not an opcode; see INT docs in pickletools.py
-
-# Protocol 2
-
-PROTO = '\x80' # identify pickle protocol
-NEWOBJ = '\x81' # build object by applying cls.__new__ to argtuple
-EXT1 = '\x82' # push object from extension registry; 1-byte index
-EXT2 = '\x83' # ditto, but 2-byte index
-EXT4 = '\x84' # ditto, but 4-byte index
-TUPLE1 = '\x85' # build 1-tuple from stack top
-TUPLE2 = '\x86' # build 2-tuple from two topmost stack items
-TUPLE3 = '\x87' # build 3-tuple from three topmost stack items
-NEWTRUE = '\x88' # push True
-NEWFALSE = '\x89' # push False
-LONG1 = '\x8a' # push long from < 256 bytes
-LONG4 = '\x8b' # push really big long
-
-_tuplesize2code = [EMPTY_TUPLE, TUPLE1, TUPLE2, TUPLE3]
-
-
-__all__.extend([x for x in dir() if re.match("[A-Z][A-Z0-9_]+$",x)])
-del x
-
-
-# Pickling machinery
-
-class Pickler:
-
- def __init__(self, file, protocol=None):
- """This takes a file-like object for writing a pickle data stream.
-
- The optional protocol argument tells the pickler to use the
- given protocol; supported protocols are 0, 1, 2. The default
- protocol is 0, to be backwards compatible. (Protocol 0 is the
- only protocol that can be written to a file opened in text
- mode and read back successfully. When using a protocol higher
- than 0, make sure the file is opened in binary mode, both when
- pickling and unpickling.)
-
- Protocol 1 is more efficient than protocol 0; protocol 2 is
- more efficient than protocol 1.
-
- Specifying a negative protocol version selects the highest
- protocol version supported. The higher the protocol used, the
- more recent the version of Python needed to read the pickle
- produced.
-
- The file parameter must have a write() method that accepts a single
- string argument. It can thus be an open file object, a StringIO
- object, or any other custom object that meets this interface.
-
- """
- if protocol is None:
- protocol = 0
- if protocol < 0:
- protocol = HIGHEST_PROTOCOL
- elif not 0 <= protocol <= HIGHEST_PROTOCOL:
- raise ValueError("pickle protocol must be <= %d" % HIGHEST_PROTOCOL)
- self.write = file.write
- self.memo = {}
- self.proto = int(protocol)
- self.bin = protocol >= 1
- self.fast = 0
-
- def clear_memo(self):
- """Clears the pickler's "memo".
-
- The memo is the data structure that remembers which objects the
- pickler has already seen, so that shared or recursive objects are
- pickled by reference and not by value. This method is useful when
- re-using picklers.
-
- """
- self.memo.clear()
-
- def dump(self, obj):
- """Write a pickled representation of obj to the open file."""
- if self.proto >= 2:
- self.write(PROTO + chr(self.proto))
- self.save(obj)
- self.write(STOP)
-
- def memoize(self, obj):
- """Store an object in the memo."""
-
- # The Pickler memo is a dictionary mapping object ids to 2-tuples
- # that contain the Unpickler memo key and the object being memoized.
- # The memo key is written to the pickle and will become
- # the key in the Unpickler's memo. The object is stored in the
- # Pickler memo so that transient objects are kept alive during
- # pickling.
-
- # The use of the Unpickler memo length as the memo key is just a
- # convention. The only requirement is that the memo values be unique.
- # But there appears no advantage to any other scheme, and this
- # scheme allows the Unpickler memo to be implemented as a plain (but
- # growable) array, indexed by memo key.
- if self.fast:
- return
- assert id(obj) not in self.memo
- memo_len = len(self.memo)
- self.write(self.put(memo_len))
- self.memo[id(obj)] = memo_len, obj
-
- # Return a PUT (BINPUT, LONG_BINPUT) opcode string, with argument i.
- def put(self, i, pack=struct.pack):
- if self.bin:
- if i < 256:
- return BINPUT + chr(i)
- else:
- return LONG_BINPUT + pack("<i", i)
-
- return PUT + repr(i) + '\n'
-
- # Return a GET (BINGET, LONG_BINGET) opcode string, with argument i.
- def get(self, i, pack=struct.pack):
- if self.bin:
- if i < 256:
- return BINGET + chr(i)
- else:
- return LONG_BINGET + pack("<i", i)
-
- return GET + repr(i) + '\n'
-
- def save(self, obj):
- # Check for persistent id (defined by a subclass)
- pid = self.persistent_id(obj)
- if pid:
- self.save_pers(pid)
- return
-
- # Check the memo
- x = self.memo.get(id(obj))
- if x:
- self.write(self.get(x[0]))
- return
-
- # Check the type dispatch table
- t = type(obj)
- f = self.dispatch.get(t)
- if f:
- f(self, obj) # Call unbound method with explicit self
- return
-
- # Check for a class with a custom metaclass; treat as regular class
- try:
- issc = issubclass(t, TypeType)
- except TypeError: # t is not a class (old Boost; see SF #502085)
- issc = 0
- if issc:
- self.save_global(obj)
- return
-
- # Check copy_reg.dispatch_table
- reduce = dispatch_table.get(t)
- if reduce:
- rv = reduce(obj)
- else:
- # Check for a __reduce_ex__ method, fall back to __reduce__
- reduce = getattr(obj, "__reduce_ex__", None)
- if reduce:
- rv = reduce(self.proto)
- else:
- reduce = getattr(obj, "__reduce__", None)
- if reduce:
- rv = reduce()
- else:
- raise PicklingError("Can't pickle %r object: %r" %
- (t.__name__, obj))
-
- # Check for string returned by reduce(), meaning "save as global"
- if type(rv) is StringType:
- self.save_global(obj, rv)
- return
-
- # Assert that reduce() returned a tuple
- if type(rv) is not TupleType:
- raise PicklingError("%s must return string or tuple" % reduce)
-
- # Assert that it returned an appropriately sized tuple
- l = len(rv)
- if not (2 <= l <= 5):
- raise PicklingError("Tuple returned by %s must have "
- "two to five elements" % reduce)
-
- # Save the reduce() output and finally memoize the object
- self.save_reduce(obj=obj, *rv)
-
- def persistent_id(self, obj):
- # This exists so a subclass can override it
- return None
-
- def save_pers(self, pid):
- # Save a persistent id reference
- if self.bin:
- self.save(pid)
- self.write(BINPERSID)
- else:
- self.write(PERSID + str(pid) + '\n')
-
- def save_reduce(self, func, args, state=None,
- listitems=None, dictitems=None, obj=None):
- # This API is called by some subclasses
-
- # Assert that args is a tuple or None
- if not isinstance(args, TupleType):
- raise PicklingError("args from reduce() should be a tuple")
-
- # Assert that func is callable
- if not callable(func):
- raise PicklingError("func from reduce should be callable")
-
- save = self.save
- write = self.write
-
- # Protocol 2 special case: if func's name is __newobj__, use NEWOBJ
- if self.proto >= 2 and getattr(func, "__name__", "") == "__newobj__":
- # A __reduce__ implementation can direct protocol 2 to
- # use the more efficient NEWOBJ opcode, while still
- # allowing protocol 0 and 1 to work normally. For this to
- # work, the function returned by __reduce__ should be
- # called __newobj__, and its first argument should be a
- # new-style class. The implementation for __newobj__
- # should be as follows, although pickle has no way to
- # verify this:
- #
- # def __newobj__(cls, *args):
- # return cls.__new__(cls, *args)
- #
- # Protocols 0 and 1 will pickle a reference to __newobj__,
- # while protocol 2 (and above) will pickle a reference to
- # cls, the remaining args tuple, and the NEWOBJ code,
- # which calls cls.__new__(cls, *args) at unpickling time
- # (see load_newobj below). If __reduce__ returns a
- # three-tuple, the state from the third tuple item will be
- # pickled regardless of the protocol, calling __setstate__
- # at unpickling time (see load_build below).
- #
- # Note that no standard __newobj__ implementation exists;
- # you have to provide your own. This is to enforce
- # compatibility with Python 2.2 (pickles written using
- # protocol 0 or 1 in Python 2.3 should be unpicklable by
- # Python 2.2).
- cls = args[0]
- if not hasattr(cls, "__new__"):
- raise PicklingError(
- "args[0] from __newobj__ args has no __new__")
- if obj is not None and cls is not obj.__class__:
- raise PicklingError(
- "args[0] from __newobj__ args has the wrong class")
- args = args[1:]
- save(cls)
- save(args)
- write(NEWOBJ)
- else:
- save(func)
- save(args)
- write(REDUCE)
-
- if obj is not None:
- self.memoize(obj)
-
- # More new special cases (that work with older protocols as
- # well): when __reduce__ returns a tuple with 4 or 5 items,
- # the 4th and 5th item should be iterators that provide list
- # items and dict items (as (key, value) tuples), or None.
-
- if listitems is not None:
- self._batch_appends(listitems)
-
- if dictitems is not None:
- self._batch_setitems(dictitems)
-
- if state is not None:
- save(state)
- write(BUILD)
-
- # Methods below this point are dispatched through the dispatch table
-
- dispatch = {}
-
- def save_none(self, obj):
- self.write(NONE)
- dispatch[NoneType] = save_none
-
- def save_bool(self, obj):
- if self.proto >= 2:
- self.write(obj and NEWTRUE or NEWFALSE)
- else:
- self.write(obj and TRUE or FALSE)
- dispatch[bool] = save_bool
-
- def save_int(self, obj, pack=struct.pack):
- if self.bin:
- # If the int is small enough to fit in a signed 4-byte 2's-comp
- # format, we can store it more efficiently than the general
- # case.
- # First one- and two-byte unsigned ints:
- if obj >= 0:
- if obj <= 0xff:
- self.write(BININT1 + chr(obj))
- return
- if obj <= 0xffff:
- self.write("%c%c%c" % (BININT2, obj&0xff, obj>>8))
- return
- # Next check for 4-byte signed ints:
- high_bits = obj >> 31 # note that Python shift sign-extends
- if high_bits == 0 or high_bits == -1:
- # All high bits are copies of bit 2**31, so the value
- # fits in a 4-byte signed int.
- self.write(BININT + pack("<i", obj))
- return
- # Text pickle, or int too big to fit in signed 4-byte format.
- self.write(INT + repr(obj) + '\n')
- dispatch[IntType] = save_int
-
- def save_long(self, obj, pack=struct.pack):
- if self.proto >= 2:
- bytes = encode_long(obj)
- n = len(bytes)
- if n < 256:
- self.write(LONG1 + chr(n) + bytes)
- else:
- self.write(LONG4 + pack("<i", n) + bytes)
- return
- self.write(LONG + repr(obj) + '\n')
- dispatch[LongType] = save_long
-
- def save_float(self, obj, pack=struct.pack):
- if self.bin:
- self.write(BINFLOAT + pack('>d', obj))
- else:
- self.write(FLOAT + repr(obj) + '\n')
- dispatch[FloatType] = save_float
-
- def save_string(self, obj, pack=struct.pack):
- if self.bin:
- n = len(obj)
- if n < 256:
- self.write(SHORT_BINSTRING + chr(n) + obj)
- else:
- self.write(BINSTRING + pack("<i", n) + obj)
- else:
- self.write(STRING + repr(obj) + '\n')
- self.memoize(obj)
- dispatch[StringType] = save_string
-
- def save_unicode(self, obj, pack=struct.pack):
- if self.bin:
- encoding = obj.encode('utf-8')
- n = len(encoding)
- self.write(BINUNICODE + pack("<i", n) + encoding)
- else:
- obj = obj.replace("\\", "\\u005c")
- obj = obj.replace("\n", "\\u000a")
- self.write(UNICODE + obj.encode('raw-unicode-escape') + '\n')
- self.memoize(obj)
- dispatch[UnicodeType] = save_unicode
-
- if StringType == UnicodeType:
- # This is true for Jython
- def save_string(self, obj, pack=struct.pack):
- unicode = obj.isunicode()
-
- if self.bin:
- if unicode:
- obj = obj.encode("utf-8")
- l = len(obj)
- if l < 256 and not unicode:
- self.write(SHORT_BINSTRING + chr(l) + obj)
- else:
- s = pack("<i", l)
- if unicode:
- self.write(BINUNICODE + s + obj)
- else:
- self.write(BINSTRING + s + obj)
- else:
- if unicode:
- obj = obj.replace("\\", "\\u005c")
- obj = obj.replace("\n", "\\u000a")
- obj = obj.encode('raw-unicode-escape')
- self.write(UNICODE + obj + '\n')
- else:
- self.write(STRING + repr(obj) + '\n')
- self.memoize(obj)
- dispatch[StringType] = save_string
-
- def save_tuple(self, obj):
- write = self.write
- proto = self.proto
-
- n = len(obj)
- if n == 0:
- if proto:
- write(EMPTY_TUPLE)
- else:
- write(MARK + TUPLE)
- return
-
- save = self.save
- memo = self.memo
- if n <= 3 and proto >= 2:
- for element in obj:
- save(element)
- # Subtle. Same as in the big comment below.
- if id(obj) in memo:
- get = self.get(memo[id(obj)][0])
- write(POP * n + get)
- else:
- write(_tuplesize2code[n])
- self.memoize(obj)
- return
-
- # proto 0 or proto 1 and tuple isn't empty, or proto > 1 and tuple
- # has more than 3 elements.
- write(MARK)
- for element in obj:
- save(element)
-
- if id(obj) in memo:
- # Subtle. d was not in memo when we entered save_tuple(), so
- # the process of saving the tuple's elements must have saved
- # the tuple itself: the tuple is recursive. The proper action
- # now is to throw away everything we put on the stack, and
- # simply GET the tuple (it's already constructed). This check
- # could have been done in the "for element" loop instead, but
- # recursive tuples are a rare thing.
- get = self.get(memo[id(obj)][0])
- if proto:
- write(POP_MARK + get)
- else: # proto 0 -- POP_MARK not available
- write(POP * (n+1) + get)
- return
-
- # No recursion.
- self.write(TUPLE)
- self.memoize(obj)
-
- dispatch[TupleType] = save_tuple
-
- # save_empty_tuple() isn't used by anything in Python 2.3. However, I
- # found a Pickler subclass in Zope3 that calls it, so it's not harmless
- # to remove it.
- def save_empty_tuple(self, obj):
- self.write(EMPTY_TUPLE)
-
- def save_list(self, obj):
- write = self.write
-
- if self.bin:
- write(EMPTY_LIST)
- else: # proto 0 -- can't use EMPTY_LIST
- write(MARK + LIST)
-
- self.memoize(obj)
- self._batch_appends(iter(obj))
-
- dispatch[ListType] = save_list
-
- # Keep in synch with cPickle's BATCHSIZE. Nothing will break if it gets
- # out of synch, though.
- _BATCHSIZE = 1000
-
- def _batch_appends(self, items):
- # Helper to batch up APPENDS sequences
- save = self.save
- write = self.write
-
- if not self.bin:
- for x in items:
- save(x)
- write(APPEND)
- return
-
- r = xrange(self._BATCHSIZE)
- while items is not None:
- tmp = []
- for i in r:
- try:
- x = items.next()
- tmp.append(x)
- except StopIteration:
- items = None
- break
- n = len(tmp)
- if n > 1:
- write(MARK)
- for x in tmp:
- save(x)
- write(APPENDS)
- elif n:
- save(tmp[0])
- write(APPEND)
- # else tmp is empty, and we're done
-
- def save_dict(self, obj):
- write = self.write
-
- if self.bin:
- write(EMPTY_DICT)
- else: # proto 0 -- can't use EMPTY_DICT
- write(MARK + DICT)
-
- self.memoize(obj)
- self._batch_setitems(obj.iteritems())
-
- dispatch[DictionaryType] = save_dict
- if not PyStringMap is None:
- dispatch[PyStringMap] = save_dict
-
- def _batch_setitems(self, items):
- # Helper to batch up SETITEMS sequences; proto >= 1 only
- save = self.save
- write = self.write
-
- if not self.bin:
- for k, v in items:
- save(k)
- save(v)
- write(SETITEM)
- return
-
- r = xrange(self._BATCHSIZE)
- while items is not None:
- tmp = []
- for i in r:
- try:
- tmp.append(items.next())
- except StopIteration:
- items = None
- break
- n = len(tmp)
- if n > 1:
- write(MARK)
- for k, v in tmp:
- save(k)
- save(v)
- write(SETITEMS)
- elif n:
- k, v = tmp[0]
- save(k)
- save(v)
- write(SETITEM)
- # else tmp is empty, and we're done
-
- def save_inst(self, obj):
- cls = obj.__class__
-
- memo = self.memo
- write = self.write
- save = self.save
-
- if hasattr(obj, '__getinitargs__'):
- args = obj.__getinitargs__()
- len(args) # XXX Assert it's a sequence
- _keep_alive(args, memo)
- else:
- args = ()
-
- write(MARK)
-
- if self.bin:
- save(cls)
- for arg in args:
- save(arg)
- write(OBJ)
- else:
- for arg in args:
- save(arg)
- write(INST + cls.__module__ + '\n' + cls.__name__ + '\n')
-
- self.memoize(obj)
-
- try:
- getstate = obj.__getstate__
- except AttributeError:
- stuff = obj.__dict__
- else:
- stuff = getstate()
- _keep_alive(stuff, memo)
- save(stuff)
- write(BUILD)
-
- dispatch[InstanceType] = save_inst
-
- def save_global(self, obj, name=None, pack=struct.pack):
- write = self.write
- memo = self.memo
-
- if name is None:
- name = obj.__name__
-
- module = getattr(obj, "__module__", None)
- if module is None:
- module = whichmodule(obj, name)
-
- try:
- __import__(module)
- mod = sys.modules[module]
- klass = getattr(mod, name)
- except (ImportError, KeyError, AttributeError):
- raise PicklingError(
- "Can't pickle %r: it's not found as %s.%s" %
- (obj, module, name))
- else:
- if klass is not obj:
- raise PicklingError(
- "Can't pickle %r: it's not the same object as %s.%s" %
- (obj, module, name))
-
- if self.proto >= 2:
- code = _extension_registry.get((module, name))
- if code:
- assert code > 0
- if code <= 0xff:
- write(EXT1 + chr(code))
- elif code <= 0xffff:
- write("%c%c%c" % (EXT2, code&0xff, code>>8))
- else:
- write(EXT4 + pack("<i", code))
- return
-
- write(GLOBAL + module + '\n' + name + '\n')
- self.memoize(obj)
-
- dispatch[ClassType] = save_global
- dispatch[FunctionType] = save_global
- dispatch[BuiltinFunctionType] = save_global
- dispatch[TypeType] = save_global
-
-# Pickling helpers
-
-def _keep_alive(x, memo):
- """Keeps a reference to the object x in the memo.
-
- Because we remember objects by their id, we have
- to assure that possibly temporary objects are kept
- alive by referencing them.
- We store a reference at the id of the memo, which should
- normally not be used unless someone tries to deepcopy
- the memo itself...
- """
- try:
- memo[id(memo)].append(x)
- except KeyError:
- # aha, this is the first one :-)
- memo[id(memo)]=[x]
-
-
-# A cache for whichmodule(), mapping a function object to the name of
-# the module in which the function was found.
-
-classmap = {} # called classmap for backwards compatibility
-
-def whichmodule(func, funcname):
- """Figure out the module in which a function occurs.
-
- Search sys.modules for the module.
- Cache in classmap.
- Return a module name.
- If the function cannot be found, return "__main__".
- """
- # Python functions should always get an __module__ from their globals.
- mod = getattr(func, "__module__", None)
- if mod is not None:
- return mod
- if func in classmap:
- return classmap[func]
-
- for name, module in sys.modules.items():
- if module is None:
- continue # skip dummy package entries
- if name != '__main__' and getattr(module, funcname, None) is func:
- break
- else:
- name = '__main__'
- classmap[func] = name
- return name
-
-
-# Unpickling machinery
-
-class Unpickler:
-
- def __init__(self, file):
- """This takes a file-like object for reading a pickle data stream.
-
- The protocol version of the pickle is detected automatically, so no
- proto argument is needed.
-
- The file-like object must have two methods, a read() method that
- takes an integer argument, and a readline() method that requires no
- arguments. Both methods should return a string. Thus file-like
- object can be a file object opened for reading, a StringIO object,
- or any other custom object that meets this interface.
- """
- self.readline = file.readline
- self.read = file.read
- self.memo = {}
-
- def load(self):
- """Read a pickled object representation from the open file.
-
- Return the reconstituted object hierarchy specified in the file.
- """
- self.mark = object() # any new unique object
- self.stack = []
- self.append = self.stack.append
- read = self.read
- dispatch = self.dispatch
- try:
- while 1:
- key = read(1)
- dispatch[key](self)
- except _Stop, stopinst:
- return stopinst.value
-
- # Return largest index k such that self.stack[k] is self.mark.
- # If the stack doesn't contain a mark, eventually raises IndexError.
- # This could be sped by maintaining another stack, of indices at which
- # the mark appears. For that matter, the latter stack would suffice,
- # and we wouldn't need to push mark objects on self.stack at all.
- # Doing so is probably a good thing, though, since if the pickle is
- # corrupt (or hostile) we may get a clue from finding self.mark embedded
- # in unpickled objects.
- def marker(self):
- stack = self.stack
- mark = self.mark
- k = len(stack)-1
- while stack[k] is not mark: k = k-1
- return k
-
- dispatch = {}
-
- def load_eof(self):
- raise EOFError
- dispatch[''] = load_eof
-
- def load_proto(self):
- proto = ord(self.read(1))
- if not 0 <= proto <= 2:
- raise ValueError, "unsupported pickle protocol: %d" % proto
- dispatch[PROTO] = load_proto
-
- def load_persid(self):
- pid = self.readline()[:-1]
- self.append(self.persistent_load(pid))
- dispatch[PERSID] = load_persid
-
- def load_binpersid(self):
- pid = self.stack.pop()
- self.append(self.persistent_load(pid))
- dispatch[BINPERSID] = load_binpersid
-
- def load_none(self):
- self.append(None)
- dispatch[NONE] = load_none
-
- def load_false(self):
- self.append(False)
- dispatch[NEWFALSE] = load_false
-
- def load_true(self):
- self.append(True)
- dispatch[NEWTRUE] = load_true
-
- def load_int(self):
- data = self.readline()
- if data == FALSE[1:]:
- val = False
- elif data == TRUE[1:]:
- val = True
- else:
- try:
- val = int(data)
- except ValueError:
- val = long(data)
- self.append(val)
- dispatch[INT] = load_int
-
- def load_binint(self):
- self.append(mloads('i' + self.read(4)))
- dispatch[BININT] = load_binint
-
- def load_binint1(self):
- self.append(ord(self.read(1)))
- dispatch[BININT1] = load_binint1
-
- def load_binint2(self):
- self.append(mloads('i' + self.read(2) + '\000\000'))
- dispatch[BININT2] = load_binint2
-
- def load_long(self):
- self.append(long(self.readline()[:-1], 0))
- dispatch[LONG] = load_long
-
- def load_long1(self):
- n = ord(self.read(1))
- bytes = self.read(n)
- self.append(decode_long(bytes))
- dispatch[LONG1] = load_long1
-
- def load_long4(self):
- n = mloads('i' + self.read(4))
- bytes = self.read(n)
- self.append(decode_long(bytes))
- dispatch[LONG4] = load_long4
-
- def load_float(self):
- self.append(float(self.readline()[:-1]))
- dispatch[FLOAT] = load_float
-
- def load_binfloat(self, unpack=struct.unpack):
- self.append(unpack('>d', self.read(8))[0])
- dispatch[BINFLOAT] = load_binfloat
-
- def load_string(self):
- rep = self.readline()[:-1]
- for q in "\"'": # double or single quote
- if rep.startswith(q):
- if not rep.endswith(q):
- raise ValueError, "insecure string pickle"
- rep = rep[len(q):-len(q)]
- break
- else:
- raise ValueError, "insecure string pickle"
- self.append(rep.decode("string-escape"))
- dispatch[STRING] = load_string
-
- def load_binstring(self):
- len = mloads('i' + self.read(4))
- self.append(self.read(len))
- dispatch[BINSTRING] = load_binstring
-
- def load_unicode(self):
- self.append(unicode(self.readline()[:-1],'raw-unicode-escape'))
- dispatch[UNICODE] = load_unicode
-
- def load_binunicode(self):
- len = mloads('i' + self.read(4))
- self.append(unicode(self.read(len),'utf-8'))
- dispatch[BINUNICODE] = load_binunicode
-
- def load_short_binstring(self):
- len = ord(self.read(1))
- self.append(self.read(len))
- dispatch[SHORT_BINSTRING] = load_short_binstring
-
- def load_tuple(self):
- k = self.marker()
- self.stack[k:] = [tuple(self.stack[k+1:])]
- dispatch[TUPLE] = load_tuple
-
- def load_empty_tuple(self):
- self.stack.append(())
- dispatch[EMPTY_TUPLE] = load_empty_tuple
-
- def load_tuple1(self):
- self.stack[-1] = (self.stack[-1],)
- dispatch[TUPLE1] = load_tuple1
-
- def load_tuple2(self):
- self.stack[-2:] = [(self.stack[-2], self.stack[-1])]
- dispatch[TUPLE2] = load_tuple2
-
- def load_tuple3(self):
- self.stack[-3:] = [(self.stack[-3], self.stack[-2], self.stack[-1])]
- dispatch[TUPLE3] = load_tuple3
-
- def load_empty_list(self):
- self.stack.append([])
- dispatch[EMPTY_LIST] = load_empty_list
-
- def load_empty_dictionary(self):
- self.stack.append({})
- dispatch[EMPTY_DICT] = load_empty_dictionary
-
- def load_list(self):
- k = self.marker()
- self.stack[k:] = [self.stack[k+1:]]
- dispatch[LIST] = load_list
-
- def load_dict(self):
- k = self.marker()
- d = {}
- items = self.stack[k+1:]
- for i in range(0, len(items), 2):
- key = items[i]
- value = items[i+1]
- d[key] = value
- self.stack[k:] = [d]
- dispatch[DICT] = load_dict
-
- # INST and OBJ differ only in how they get a class object. It's not
- # only sensible to do the rest in a common routine, the two routines
- # previously diverged and grew different bugs.
- # klass is the class to instantiate, and k points to the topmost mark
- # object, following which are the arguments for klass.__init__.
- def _instantiate(self, klass, k):
- args = tuple(self.stack[k+1:])
- del self.stack[k:]
- instantiated = 0
- if (not args and
- type(klass) is ClassType and
- not hasattr(klass, "__getinitargs__")):
- try:
- value = _EmptyClass()
- value.__class__ = klass
- instantiated = 1
- except RuntimeError:
- # In restricted execution, assignment to inst.__class__ is
- # prohibited
- pass
- if not instantiated:
- try:
- value = klass(*args)
- except TypeError, err:
- raise TypeError, "in constructor for %s: %s" % (
- klass.__name__, str(err)), sys.exc_info()[2]
- self.append(value)
-
- def load_inst(self):
- module = self.readline()[:-1]
- name = self.readline()[:-1]
- klass = self.find_class(module, name)
- self._instantiate(klass, self.marker())
- dispatch[INST] = load_inst
-
- def load_obj(self):
- # Stack is ... markobject classobject arg1 arg2 ...
- k = self.marker()
- klass = self.stack.pop(k+1)
- self._instantiate(klass, k)
- dispatch[OBJ] = load_obj
-
- def load_newobj(self):
- args = self.stack.pop()
- cls = self.stack[-1]
- obj = cls.__new__(cls, *args)
- self.stack[-1] = obj
- dispatch[NEWOBJ] = load_newobj
-
- def load_global(self):
- module = self.readline()[:-1]
- name = self.readline()[:-1]
- klass = self.find_class(module, name)
- self.append(klass)
- dispatch[GLOBAL] = load_global
-
- def load_ext1(self):
- code = ord(self.read(1))
- self.get_extension(code)
- dispatch[EXT1] = load_ext1
-
- def load_ext2(self):
- code = mloads('i' + self.read(2) + '\000\000')
- self.get_extension(code)
- dispatch[EXT2] = load_ext2
-
- def load_ext4(self):
- code = mloads('i' + self.read(4))
- self.get_extension(code)
- dispatch[EXT4] = load_ext4
-
- def get_extension(self, code):
- nil = []
- obj = _extension_cache.get(code, nil)
- if obj is not nil:
- self.append(obj)
- return
- key = _inverted_registry.get(code)
- if not key:
- raise ValueError("unregistered extension code %d" % code)
- obj = self.find_class(*key)
- _extension_cache[code] = obj
- self.append(obj)
-
- def find_class(self, module, name):
- # Subclasses may override this
- __import__(module)
- mod = sys.modules[module]
- klass = getattr(mod, name)
- return klass
-
- def load_reduce(self):
- stack = self.stack
- args = stack.pop()
- func = stack[-1]
- value = func(*args)
- stack[-1] = value
- dispatch[REDUCE] = load_reduce
-
- def load_pop(self):
- del self.stack[-1]
- dispatch[POP] = load_pop
-
- def load_pop_mark(self):
- k = self.marker()
- del self.stack[k:]
- dispatch[POP_MARK] = load_pop_mark
-
- def load_dup(self):
- self.append(self.stack[-1])
- dispatch[DUP] = load_dup
-
- def load_get(self):
- self.append(self.memo[self.readline()[:-1]])
- dispatch[GET] = load_get
-
- def load_binget(self):
- i = ord(self.read(1))
- self.append(self.memo[repr(i)])
- dispatch[BINGET] = load_binget
-
- def load_long_binget(self):
- i = mloads('i' + self.read(4))
- self.append(self.memo[repr(i)])
- dispatch[LONG_BINGET] = load_long_binget
-
- def load_put(self):
- self.memo[self.readline()[:-1]] = self.stack[-1]
- dispatch[PUT] = load_put
-
- def load_binput(self):
- i = ord(self.read(1))
- self.memo[repr(i)] = self.stack[-1]
- dispatch[BINPUT] = load_binput
-
- def load_long_binput(self):
- i = mloads('i' + self.read(4))
- self.memo[repr(i)] = self.stack[-1]
- dispatch[LONG_BINPUT] = load_long_binput
-
- def load_append(self):
- stack = self.stack
- value = stack.pop()
- list = stack[-1]
- list.append(value)
- dispatch[APPEND] = load_append
-
- def load_appends(self):
- stack = self.stack
- mark = self.marker()
- list = stack[mark - 1]
- list.extend(stack[mark + 1:])
- del stack[mark:]
- dispatch[APPENDS] = load_appends
-
- def load_setitem(self):
- stack = self.stack
- value = stack.pop()
- key = stack.pop()
- dict = stack[-1]
- dict[key] = value
- dispatch[SETITEM] = load_setitem
-
- def load_setitems(self):
- stack = self.stack
- mark = self.marker()
- dict = stack[mark - 1]
- for i in range(mark + 1, len(stack), 2):
- dict[stack[i]] = stack[i + 1]
-
- del stack[mark:]
- dispatch[SETITEMS] = load_setitems
-
- def load_build(self):
- stack = self.stack
- state = stack.pop()
- inst = stack[-1]
- setstate = getattr(inst, "__setstate__", None)
- if setstate:
- setstate(state)
- return
- slotstate = None
- if isinstance(state, tuple) and len(state) == 2:
- state, slotstate = state
- if state:
- try:
- inst.__dict__.update(state)
- except RuntimeError:
- # XXX In restricted execution, the instance's __dict__
- # is not accessible. Use the old way of unpickling
- # the instance variables. This is a semantic
- # difference when unpickling in restricted
- # vs. unrestricted modes.
- # Note, however, that cPickle has never tried to do the
- # .update() business, and always uses
- # PyObject_SetItem(inst.__dict__, key, value) in a
- # loop over state.items().
- for k, v in state.items():
- setattr(inst, k, v)
- if slotstate:
- for k, v in slotstate.items():
- setattr(inst, k, v)
- dispatch[BUILD] = load_build
-
- def load_mark(self):
- self.append(self.mark)
- dispatch[MARK] = load_mark
-
- def load_stop(self):
- value = self.stack.pop()
- raise _Stop(value)
- dispatch[STOP] = load_stop
-
-# Helper class for load_inst/load_obj
-
-class _EmptyClass:
- pass
-
-# Encode/decode longs in linear time.
-
-import binascii as _binascii
-
-def encode_long(x):
- r"""Encode a long to a two's complement little-endian binary string.
- Note that 0L is a special case, returning an empty string, to save a
- byte in the LONG1 pickling context.
-
- >>> encode_long(0L)
- ''
- >>> encode_long(255L)
- '\xff\x00'
- >>> encode_long(32767L)
- '\xff\x7f'
- >>> encode_long(-256L)
- '\x00\xff'
- >>> encode_long(-32768L)
- '\x00\x80'
- >>> encode_long(-128L)
- '\x80'
- >>> encode_long(127L)
- '\x7f'
- >>>
- """
-
- if x == 0:
- return ''
- if x > 0:
- ashex = hex(x)
- assert ashex.startswith("0x")
- njunkchars = 2 + ashex.endswith('L')
- nibbles = len(ashex) - njunkchars
- if nibbles & 1:
- # need an even # of nibbles for unhexlify
- ashex = "0x0" + ashex[2:]
- elif int(ashex[2], 16) >= 8:
- # "looks negative", so need a byte of sign bits
- ashex = "0x00" + ashex[2:]
- else:
- # Build the 256's-complement: (1L << nbytes) + x. The trick is
- # to find the number of bytes in linear time (although that should
- # really be a constant-time task).
- ashex = hex(-x)
- assert ashex.startswith("0x")
- njunkchars = 2 + ashex.endswith('L')
- nibbles = len(ashex) - njunkchars
- if nibbles & 1:
- # Extend to a full byte.
- nibbles += 1
- nbits = nibbles * 4
- x += 1L << nbits
- assert x > 0
- ashex = hex(x)
- njunkchars = 2 + ashex.endswith('L')
- newnibbles = len(ashex) - njunkchars
- if newnibbles < nibbles:
- ashex = "0x" + "0" * (nibbles - newnibbles) + ashex[2:]
- if int(ashex[2], 16) < 8:
- # "looks positive", so need a byte of sign bits
- ashex = "0xff" + ashex[2:]
-
- if ashex.endswith('L'):
- ashex = ashex[2:-1]
- else:
- ashex = ashex[2:]
- assert len(ashex) & 1 == 0, (x, ashex)
- binary = _binascii.unhexlify(ashex)
- return binary[::-1]
-
-def decode_long(data):
- r"""Decode a long from a two's complement little-endian binary string.
-
- >>> decode_long('')
- 0L
- >>> decode_long("\xff\x00")
- 255L
- >>> decode_long("\xff\x7f")
- 32767L
- >>> decode_long("\x00\xff")
- -256L
- >>> decode_long("\x00\x80")
- -32768L
- >>> decode_long("\x80")
- -128L
- >>> decode_long("\x7f")
- 127L
- """
-
- nbytes = len(data)
- if nbytes == 0:
- return 0L
- ashex = _binascii.hexlify(data[::-1])
- n = long(ashex, 16) # quadratic time before Python 2.3; linear now
- if data[-1] >= '\x80':
- n -= 1L << (nbytes * 8)
- return n
-
-# Shorthands
-
-try:
- from cStringIO import StringIO
-except ImportError:
- from StringIO import StringIO
-
-def dump(obj, file, protocol=None):
- Pickler(file, protocol).dump(obj)
-
-def dumps(obj, protocol=None):
- file = StringIO()
- Pickler(file, protocol).dump(obj)
- return file.getvalue()
-
-def load(file):
- return Unpickler(file).load()
-
-def loads(str):
- file = StringIO(str)
- return Unpickler(file).load()
-
-# Doctest
-
-def _test():
- import doctest
- return doctest.testmod()
-
-if __name__ == "__main__":
- _test()
diff --git a/sys/lib/python/pickletools.py b/sys/lib/python/pickletools.py
deleted file mode 100644
index 98f80f1ce..000000000
--- a/sys/lib/python/pickletools.py
+++ /dev/null
@@ -1,2246 +0,0 @@
-'''"Executable documentation" for the pickle module.
-
-Extensive comments about the pickle protocols and pickle-machine opcodes
-can be found here. Some functions meant for external use:
-
-genops(pickle)
- Generate all the opcodes in a pickle, as (opcode, arg, position) triples.
-
-dis(pickle, out=None, memo=None, indentlevel=4)
- Print a symbolic disassembly of a pickle.
-'''
-
-__all__ = ['dis',
- 'genops',
- ]
-
-# Other ideas:
-#
-# - A pickle verifier: read a pickle and check it exhaustively for
-# well-formedness. dis() does a lot of this already.
-#
-# - A protocol identifier: examine a pickle and return its protocol number
-# (== the highest .proto attr value among all the opcodes in the pickle).
-# dis() already prints this info at the end.
-#
-# - A pickle optimizer: for example, tuple-building code is sometimes more
-# elaborate than necessary, catering for the possibility that the tuple
-# is recursive. Or lots of times a PUT is generated that's never accessed
-# by a later GET.
-
-
-"""
-"A pickle" is a program for a virtual pickle machine (PM, but more accurately
-called an unpickling machine). It's a sequence of opcodes, interpreted by the
-PM, building an arbitrarily complex Python object.
-
-For the most part, the PM is very simple: there are no looping, testing, or
-conditional instructions, no arithmetic and no function calls. Opcodes are
-executed once each, from first to last, until a STOP opcode is reached.
-
-The PM has two data areas, "the stack" and "the memo".
-
-Many opcodes push Python objects onto the stack; e.g., INT pushes a Python
-integer object on the stack, whose value is gotten from a decimal string
-literal immediately following the INT opcode in the pickle bytestream. Other
-opcodes take Python objects off the stack. The result of unpickling is
-whatever object is left on the stack when the final STOP opcode is executed.
-
-The memo is simply an array of objects, or it can be implemented as a dict
-mapping little integers to objects. The memo serves as the PM's "long term
-memory", and the little integers indexing the memo are akin to variable
-names. Some opcodes pop a stack object into the memo at a given index,
-and others push a memo object at a given index onto the stack again.
-
-At heart, that's all the PM has. Subtleties arise for these reasons:
-
-+ Object identity. Objects can be arbitrarily complex, and subobjects
- may be shared (for example, the list [a, a] refers to the same object a
- twice). It can be vital that unpickling recreate an isomorphic object
- graph, faithfully reproducing sharing.
-
-+ Recursive objects. For example, after "L = []; L.append(L)", L is a
- list, and L[0] is the same list. This is related to the object identity
- point, and some sequences of pickle opcodes are subtle in order to
- get the right result in all cases.
-
-+ Things pickle doesn't know everything about. Examples of things pickle
- does know everything about are Python's builtin scalar and container
- types, like ints and tuples. They generally have opcodes dedicated to
- them. For things like module references and instances of user-defined
- classes, pickle's knowledge is limited. Historically, many enhancements
- have been made to the pickle protocol in order to do a better (faster,
- and/or more compact) job on those.
-
-+ Backward compatibility and micro-optimization. As explained below,
- pickle opcodes never go away, not even when better ways to do a thing
- get invented. The repertoire of the PM just keeps growing over time.
- For example, protocol 0 had two opcodes for building Python integers (INT
- and LONG), protocol 1 added three more for more-efficient pickling of short
- integers, and protocol 2 added two more for more-efficient pickling of
- long integers (before protocol 2, the only ways to pickle a Python long
- took time quadratic in the number of digits, for both pickling and
- unpickling). "Opcode bloat" isn't so much a subtlety as a source of
- wearying complication.
-
-
-Pickle protocols:
-
-For compatibility, the meaning of a pickle opcode never changes. Instead new
-pickle opcodes get added, and each version's unpickler can handle all the
-pickle opcodes in all protocol versions to date. So old pickles continue to
-be readable forever. The pickler can generally be told to restrict itself to
-the subset of opcodes available under previous protocol versions too, so that
-users can create pickles under the current version readable by older
-versions. However, a pickle does not contain its version number embedded
-within it. If an older unpickler tries to read a pickle using a later
-protocol, the result is most likely an exception due to seeing an unknown (in
-the older unpickler) opcode.
-
-The original pickle used what's now called "protocol 0", and what was called
-"text mode" before Python 2.3. The entire pickle bytestream is made up of
-printable 7-bit ASCII characters, plus the newline character, in protocol 0.
-That's why it was called text mode. Protocol 0 is small and elegant, but
-sometimes painfully inefficient.
-
-The second major set of additions is now called "protocol 1", and was called
-"binary mode" before Python 2.3. This added many opcodes with arguments
-consisting of arbitrary bytes, including NUL bytes and unprintable "high bit"
-bytes. Binary mode pickles can be substantially smaller than equivalent
-text mode pickles, and sometimes faster too; e.g., BININT represents a 4-byte
-int as 4 bytes following the opcode, which is cheaper to unpickle than the
-(perhaps) 11-character decimal string attached to INT. Protocol 1 also added
-a number of opcodes that operate on many stack elements at once (like APPENDS
-and SETITEMS), and "shortcut" opcodes (like EMPTY_DICT and EMPTY_TUPLE).
-
-The third major set of additions came in Python 2.3, and is called "protocol
-2". This added:
-
-- A better way to pickle instances of new-style classes (NEWOBJ).
-
-- A way for a pickle to identify its protocol (PROTO).
-
-- Time- and space- efficient pickling of long ints (LONG{1,4}).
-
-- Shortcuts for small tuples (TUPLE{1,2,3}}.
-
-- Dedicated opcodes for bools (NEWTRUE, NEWFALSE).
-
-- The "extension registry", a vector of popular objects that can be pushed
- efficiently by index (EXT{1,2,4}). This is akin to the memo and GET, but
- the registry contents are predefined (there's nothing akin to the memo's
- PUT).
-
-Another independent change with Python 2.3 is the abandonment of any
-pretense that it might be safe to load pickles received from untrusted
-parties -- no sufficient security analysis has been done to guarantee
-this and there isn't a use case that warrants the expense of such an
-analysis.
-
-To this end, all tests for __safe_for_unpickling__ or for
-copy_reg.safe_constructors are removed from the unpickling code.
-References to these variables in the descriptions below are to be seen
-as describing unpickling in Python 2.2 and before.
-"""
-
-# Meta-rule: Descriptions are stored in instances of descriptor objects,
-# with plain constructors. No meta-language is defined from which
-# descriptors could be constructed. If you want, e.g., XML, write a little
-# program to generate XML from the objects.
-
-##############################################################################
-# Some pickle opcodes have an argument, following the opcode in the
-# bytestream. An argument is of a specific type, described by an instance
-# of ArgumentDescriptor. These are not to be confused with arguments taken
-# off the stack -- ArgumentDescriptor applies only to arguments embedded in
-# the opcode stream, immediately following an opcode.
-
-# Represents the number of bytes consumed by an argument delimited by the
-# next newline character.
-UP_TO_NEWLINE = -1
-
-# Represents the number of bytes consumed by a two-argument opcode where
-# the first argument gives the number of bytes in the second argument.
-TAKEN_FROM_ARGUMENT1 = -2 # num bytes is 1-byte unsigned int
-TAKEN_FROM_ARGUMENT4 = -3 # num bytes is 4-byte signed little-endian int
-
-class ArgumentDescriptor(object):
- __slots__ = (
- # name of descriptor record, also a module global name; a string
- 'name',
-
- # length of argument, in bytes; an int; UP_TO_NEWLINE and
- # TAKEN_FROM_ARGUMENT{1,4} are negative values for variable-length
- # cases
- 'n',
-
- # a function taking a file-like object, reading this kind of argument
- # from the object at the current position, advancing the current
- # position by n bytes, and returning the value of the argument
- 'reader',
-
- # human-readable docs for this arg descriptor; a string
- 'doc',
- )
-
- def __init__(self, name, n, reader, doc):
- assert isinstance(name, str)
- self.name = name
-
- assert isinstance(n, int) and (n >= 0 or
- n in (UP_TO_NEWLINE,
- TAKEN_FROM_ARGUMENT1,
- TAKEN_FROM_ARGUMENT4))
- self.n = n
-
- self.reader = reader
-
- assert isinstance(doc, str)
- self.doc = doc
-
-from struct import unpack as _unpack
-
-def read_uint1(f):
- r"""
- >>> import StringIO
- >>> read_uint1(StringIO.StringIO('\xff'))
- 255
- """
-
- data = f.read(1)
- if data:
- return ord(data)
- raise ValueError("not enough data in stream to read uint1")
-
-uint1 = ArgumentDescriptor(
- name='uint1',
- n=1,
- reader=read_uint1,
- doc="One-byte unsigned integer.")
-
-
-def read_uint2(f):
- r"""
- >>> import StringIO
- >>> read_uint2(StringIO.StringIO('\xff\x00'))
- 255
- >>> read_uint2(StringIO.StringIO('\xff\xff'))
- 65535
- """
-
- data = f.read(2)
- if len(data) == 2:
- return _unpack("<H", data)[0]
- raise ValueError("not enough data in stream to read uint2")
-
-uint2 = ArgumentDescriptor(
- name='uint2',
- n=2,
- reader=read_uint2,
- doc="Two-byte unsigned integer, little-endian.")
-
-
-def read_int4(f):
- r"""
- >>> import StringIO
- >>> read_int4(StringIO.StringIO('\xff\x00\x00\x00'))
- 255
- >>> read_int4(StringIO.StringIO('\x00\x00\x00\x80')) == -(2**31)
- True
- """
-
- data = f.read(4)
- if len(data) == 4:
- return _unpack("<i", data)[0]
- raise ValueError("not enough data in stream to read int4")
-
-int4 = ArgumentDescriptor(
- name='int4',
- n=4,
- reader=read_int4,
- doc="Four-byte signed integer, little-endian, 2's complement.")
-
-
-def read_stringnl(f, decode=True, stripquotes=True):
- r"""
- >>> import StringIO
- >>> read_stringnl(StringIO.StringIO("'abcd'\nefg\n"))
- 'abcd'
-
- >>> read_stringnl(StringIO.StringIO("\n"))
- Traceback (most recent call last):
- ...
- ValueError: no string quotes around ''
-
- >>> read_stringnl(StringIO.StringIO("\n"), stripquotes=False)
- ''
-
- >>> read_stringnl(StringIO.StringIO("''\n"))
- ''
-
- >>> read_stringnl(StringIO.StringIO('"abcd"'))
- Traceback (most recent call last):
- ...
- ValueError: no newline found when trying to read stringnl
-
- Embedded escapes are undone in the result.
- >>> read_stringnl(StringIO.StringIO(r"'a\n\\b\x00c\td'" + "\n'e'"))
- 'a\n\\b\x00c\td'
- """
-
- data = f.readline()
- if not data.endswith('\n'):
- raise ValueError("no newline found when trying to read stringnl")
- data = data[:-1] # lose the newline
-
- if stripquotes:
- for q in "'\"":
- if data.startswith(q):
- if not data.endswith(q):
- raise ValueError("strinq quote %r not found at both "
- "ends of %r" % (q, data))
- data = data[1:-1]
- break
- else:
- raise ValueError("no string quotes around %r" % data)
-
- # I'm not sure when 'string_escape' was added to the std codecs; it's
- # crazy not to use it if it's there.
- if decode:
- data = data.decode('string_escape')
- return data
-
-stringnl = ArgumentDescriptor(
- name='stringnl',
- n=UP_TO_NEWLINE,
- reader=read_stringnl,
- doc="""A newline-terminated string.
-
- This is a repr-style string, with embedded escapes, and
- bracketing quotes.
- """)
-
-def read_stringnl_noescape(f):
- return read_stringnl(f, decode=False, stripquotes=False)
-
-stringnl_noescape = ArgumentDescriptor(
- name='stringnl_noescape',
- n=UP_TO_NEWLINE,
- reader=read_stringnl_noescape,
- doc="""A newline-terminated string.
-
- This is a str-style string, without embedded escapes,
- or bracketing quotes. It should consist solely of
- printable ASCII characters.
- """)
-
-def read_stringnl_noescape_pair(f):
- r"""
- >>> import StringIO
- >>> read_stringnl_noescape_pair(StringIO.StringIO("Queue\nEmpty\njunk"))
- 'Queue Empty'
- """
-
- return "%s %s" % (read_stringnl_noescape(f), read_stringnl_noescape(f))
-
-stringnl_noescape_pair = ArgumentDescriptor(
- name='stringnl_noescape_pair',
- n=UP_TO_NEWLINE,
- reader=read_stringnl_noescape_pair,
- doc="""A pair of newline-terminated strings.
-
- These are str-style strings, without embedded
- escapes, or bracketing quotes. They should
- consist solely of printable ASCII characters.
- The pair is returned as a single string, with
- a single blank separating the two strings.
- """)
-
-def read_string4(f):
- r"""
- >>> import StringIO
- >>> read_string4(StringIO.StringIO("\x00\x00\x00\x00abc"))
- ''
- >>> read_string4(StringIO.StringIO("\x03\x00\x00\x00abcdef"))
- 'abc'
- >>> read_string4(StringIO.StringIO("\x00\x00\x00\x03abcdef"))
- Traceback (most recent call last):
- ...
- ValueError: expected 50331648 bytes in a string4, but only 6 remain
- """
-
- n = read_int4(f)
- if n < 0:
- raise ValueError("string4 byte count < 0: %d" % n)
- data = f.read(n)
- if len(data) == n:
- return data
- raise ValueError("expected %d bytes in a string4, but only %d remain" %
- (n, len(data)))
-
-string4 = ArgumentDescriptor(
- name="string4",
- n=TAKEN_FROM_ARGUMENT4,
- reader=read_string4,
- doc="""A counted string.
-
- The first argument is a 4-byte little-endian signed int giving
- the number of bytes in the string, and the second argument is
- that many bytes.
- """)
-
-
-def read_string1(f):
- r"""
- >>> import StringIO
- >>> read_string1(StringIO.StringIO("\x00"))
- ''
- >>> read_string1(StringIO.StringIO("\x03abcdef"))
- 'abc'
- """
-
- n = read_uint1(f)
- assert n >= 0
- data = f.read(n)
- if len(data) == n:
- return data
- raise ValueError("expected %d bytes in a string1, but only %d remain" %
- (n, len(data)))
-
-string1 = ArgumentDescriptor(
- name="string1",
- n=TAKEN_FROM_ARGUMENT1,
- reader=read_string1,
- doc="""A counted string.
-
- The first argument is a 1-byte unsigned int giving the number
- of bytes in the string, and the second argument is that many
- bytes.
- """)
-
-
-def read_unicodestringnl(f):
- r"""
- >>> import StringIO
- >>> read_unicodestringnl(StringIO.StringIO("abc\uabcd\njunk"))
- u'abc\uabcd'
- """
-
- data = f.readline()
- if not data.endswith('\n'):
- raise ValueError("no newline found when trying to read "
- "unicodestringnl")
- data = data[:-1] # lose the newline
- return unicode(data, 'raw-unicode-escape')
-
-unicodestringnl = ArgumentDescriptor(
- name='unicodestringnl',
- n=UP_TO_NEWLINE,
- reader=read_unicodestringnl,
- doc="""A newline-terminated Unicode string.
-
- This is raw-unicode-escape encoded, so consists of
- printable ASCII characters, and may contain embedded
- escape sequences.
- """)
-
-def read_unicodestring4(f):
- r"""
- >>> import StringIO
- >>> s = u'abcd\uabcd'
- >>> enc = s.encode('utf-8')
- >>> enc
- 'abcd\xea\xaf\x8d'
- >>> n = chr(len(enc)) + chr(0) * 3 # little-endian 4-byte length
- >>> t = read_unicodestring4(StringIO.StringIO(n + enc + 'junk'))
- >>> s == t
- True
-
- >>> read_unicodestring4(StringIO.StringIO(n + enc[:-1]))
- Traceback (most recent call last):
- ...
- ValueError: expected 7 bytes in a unicodestring4, but only 6 remain
- """
-
- n = read_int4(f)
- if n < 0:
- raise ValueError("unicodestring4 byte count < 0: %d" % n)
- data = f.read(n)
- if len(data) == n:
- return unicode(data, 'utf-8')
- raise ValueError("expected %d bytes in a unicodestring4, but only %d "
- "remain" % (n, len(data)))
-
-unicodestring4 = ArgumentDescriptor(
- name="unicodestring4",
- n=TAKEN_FROM_ARGUMENT4,
- reader=read_unicodestring4,
- doc="""A counted Unicode string.
-
- The first argument is a 4-byte little-endian signed int
- giving the number of bytes in the string, and the second
- argument-- the UTF-8 encoding of the Unicode string --
- contains that many bytes.
- """)
-
-
-def read_decimalnl_short(f):
- r"""
- >>> import StringIO
- >>> read_decimalnl_short(StringIO.StringIO("1234\n56"))
- 1234
-
- >>> read_decimalnl_short(StringIO.StringIO("1234L\n56"))
- Traceback (most recent call last):
- ...
- ValueError: trailing 'L' not allowed in '1234L'
- """
-
- s = read_stringnl(f, decode=False, stripquotes=False)
- if s.endswith("L"):
- raise ValueError("trailing 'L' not allowed in %r" % s)
-
- # It's not necessarily true that the result fits in a Python short int:
- # the pickle may have been written on a 64-bit box. There's also a hack
- # for True and False here.
- if s == "00":
- return False
- elif s == "01":
- return True
-
- try:
- return int(s)
- except OverflowError:
- return long(s)
-
-def read_decimalnl_long(f):
- r"""
- >>> import StringIO
-
- >>> read_decimalnl_long(StringIO.StringIO("1234\n56"))
- Traceback (most recent call last):
- ...
- ValueError: trailing 'L' required in '1234'
-
- Someday the trailing 'L' will probably go away from this output.
-
- >>> read_decimalnl_long(StringIO.StringIO("1234L\n56"))
- 1234L
-
- >>> read_decimalnl_long(StringIO.StringIO("123456789012345678901234L\n6"))
- 123456789012345678901234L
- """
-
- s = read_stringnl(f, decode=False, stripquotes=False)
- if not s.endswith("L"):
- raise ValueError("trailing 'L' required in %r" % s)
- return long(s)
-
-
-decimalnl_short = ArgumentDescriptor(
- name='decimalnl_short',
- n=UP_TO_NEWLINE,
- reader=read_decimalnl_short,
- doc="""A newline-terminated decimal integer literal.
-
- This never has a trailing 'L', and the integer fit
- in a short Python int on the box where the pickle
- was written -- but there's no guarantee it will fit
- in a short Python int on the box where the pickle
- is read.
- """)
-
-decimalnl_long = ArgumentDescriptor(
- name='decimalnl_long',
- n=UP_TO_NEWLINE,
- reader=read_decimalnl_long,
- doc="""A newline-terminated decimal integer literal.
-
- This has a trailing 'L', and can represent integers
- of any size.
- """)
-
-
-def read_floatnl(f):
- r"""
- >>> import StringIO
- >>> read_floatnl(StringIO.StringIO("-1.25\n6"))
- -1.25
- """
- s = read_stringnl(f, decode=False, stripquotes=False)
- return float(s)
-
-floatnl = ArgumentDescriptor(
- name='floatnl',
- n=UP_TO_NEWLINE,
- reader=read_floatnl,
- doc="""A newline-terminated decimal floating literal.
-
- In general this requires 17 significant digits for roundtrip
- identity, and pickling then unpickling infinities, NaNs, and
- minus zero doesn't work across boxes, or on some boxes even
- on itself (e.g., Windows can't read the strings it produces
- for infinities or NaNs).
- """)
-
-def read_float8(f):
- r"""
- >>> import StringIO, struct
- >>> raw = struct.pack(">d", -1.25)
- >>> raw
- '\xbf\xf4\x00\x00\x00\x00\x00\x00'
- >>> read_float8(StringIO.StringIO(raw + "\n"))
- -1.25
- """
-
- data = f.read(8)
- if len(data) == 8:
- return _unpack(">d", data)[0]
- raise ValueError("not enough data in stream to read float8")
-
-
-float8 = ArgumentDescriptor(
- name='float8',
- n=8,
- reader=read_float8,
- doc="""An 8-byte binary representation of a float, big-endian.
-
- The format is unique to Python, and shared with the struct
- module (format string '>d') "in theory" (the struct and cPickle
- implementations don't share the code -- they should). It's
- strongly related to the IEEE-754 double format, and, in normal
- cases, is in fact identical to the big-endian 754 double format.
- On other boxes the dynamic range is limited to that of a 754
- double, and "add a half and chop" rounding is used to reduce
- the precision to 53 bits. However, even on a 754 box,
- infinities, NaNs, and minus zero may not be handled correctly
- (may not survive roundtrip pickling intact).
- """)
-
-# Protocol 2 formats
-
-from pickle import decode_long
-
-def read_long1(f):
- r"""
- >>> import StringIO
- >>> read_long1(StringIO.StringIO("\x00"))
- 0L
- >>> read_long1(StringIO.StringIO("\x02\xff\x00"))
- 255L
- >>> read_long1(StringIO.StringIO("\x02\xff\x7f"))
- 32767L
- >>> read_long1(StringIO.StringIO("\x02\x00\xff"))
- -256L
- >>> read_long1(StringIO.StringIO("\x02\x00\x80"))
- -32768L
- """
-
- n = read_uint1(f)
- data = f.read(n)
- if len(data) != n:
- raise ValueError("not enough data in stream to read long1")
- return decode_long(data)
-
-long1 = ArgumentDescriptor(
- name="long1",
- n=TAKEN_FROM_ARGUMENT1,
- reader=read_long1,
- doc="""A binary long, little-endian, using 1-byte size.
-
- This first reads one byte as an unsigned size, then reads that
- many bytes and interprets them as a little-endian 2's-complement long.
- If the size is 0, that's taken as a shortcut for the long 0L.
- """)
-
-def read_long4(f):
- r"""
- >>> import StringIO
- >>> read_long4(StringIO.StringIO("\x02\x00\x00\x00\xff\x00"))
- 255L
- >>> read_long4(StringIO.StringIO("\x02\x00\x00\x00\xff\x7f"))
- 32767L
- >>> read_long4(StringIO.StringIO("\x02\x00\x00\x00\x00\xff"))
- -256L
- >>> read_long4(StringIO.StringIO("\x02\x00\x00\x00\x00\x80"))
- -32768L
- >>> read_long1(StringIO.StringIO("\x00\x00\x00\x00"))
- 0L
- """
-
- n = read_int4(f)
- if n < 0:
- raise ValueError("long4 byte count < 0: %d" % n)
- data = f.read(n)
- if len(data) != n:
- raise ValueError("not enough data in stream to read long4")
- return decode_long(data)
-
-long4 = ArgumentDescriptor(
- name="long4",
- n=TAKEN_FROM_ARGUMENT4,
- reader=read_long4,
- doc="""A binary representation of a long, little-endian.
-
- This first reads four bytes as a signed size (but requires the
- size to be >= 0), then reads that many bytes and interprets them
- as a little-endian 2's-complement long. If the size is 0, that's taken
- as a shortcut for the long 0L, although LONG1 should really be used
- then instead (and in any case where # of bytes < 256).
- """)
-
-
-##############################################################################
-# Object descriptors. The stack used by the pickle machine holds objects,
-# and in the stack_before and stack_after attributes of OpcodeInfo
-# descriptors we need names to describe the various types of objects that can
-# appear on the stack.
-
-class StackObject(object):
- __slots__ = (
- # name of descriptor record, for info only
- 'name',
-
- # type of object, or tuple of type objects (meaning the object can
- # be of any type in the tuple)
- 'obtype',
-
- # human-readable docs for this kind of stack object; a string
- 'doc',
- )
-
- def __init__(self, name, obtype, doc):
- assert isinstance(name, str)
- self.name = name
-
- assert isinstance(obtype, type) or isinstance(obtype, tuple)
- if isinstance(obtype, tuple):
- for contained in obtype:
- assert isinstance(contained, type)
- self.obtype = obtype
-
- assert isinstance(doc, str)
- self.doc = doc
-
- def __repr__(self):
- return self.name
-
-
-pyint = StackObject(
- name='int',
- obtype=int,
- doc="A short (as opposed to long) Python integer object.")
-
-pylong = StackObject(
- name='long',
- obtype=long,
- doc="A long (as opposed to short) Python integer object.")
-
-pyinteger_or_bool = StackObject(
- name='int_or_bool',
- obtype=(int, long, bool),
- doc="A Python integer object (short or long), or "
- "a Python bool.")
-
-pybool = StackObject(
- name='bool',
- obtype=(bool,),
- doc="A Python bool object.")
-
-pyfloat = StackObject(
- name='float',
- obtype=float,
- doc="A Python float object.")
-
-pystring = StackObject(
- name='str',
- obtype=str,
- doc="A Python string object.")
-
-pyunicode = StackObject(
- name='unicode',
- obtype=unicode,
- doc="A Python Unicode string object.")
-
-pynone = StackObject(
- name="None",
- obtype=type(None),
- doc="The Python None object.")
-
-pytuple = StackObject(
- name="tuple",
- obtype=tuple,
- doc="A Python tuple object.")
-
-pylist = StackObject(
- name="list",
- obtype=list,
- doc="A Python list object.")
-
-pydict = StackObject(
- name="dict",
- obtype=dict,
- doc="A Python dict object.")
-
-anyobject = StackObject(
- name='any',
- obtype=object,
- doc="Any kind of object whatsoever.")
-
-markobject = StackObject(
- name="mark",
- obtype=StackObject,
- doc="""'The mark' is a unique object.
-
- Opcodes that operate on a variable number of objects
- generally don't embed the count of objects in the opcode,
- or pull it off the stack. Instead the MARK opcode is used
- to push a special marker object on the stack, and then
- some other opcodes grab all the objects from the top of
- the stack down to (but not including) the topmost marker
- object.
- """)
-
-stackslice = StackObject(
- name="stackslice",
- obtype=StackObject,
- doc="""An object representing a contiguous slice of the stack.
-
- This is used in conjuction with markobject, to represent all
- of the stack following the topmost markobject. For example,
- the POP_MARK opcode changes the stack from
-
- [..., markobject, stackslice]
- to
- [...]
-
- No matter how many object are on the stack after the topmost
- markobject, POP_MARK gets rid of all of them (including the
- topmost markobject too).
- """)
-
-##############################################################################
-# Descriptors for pickle opcodes.
-
-class OpcodeInfo(object):
-
- __slots__ = (
- # symbolic name of opcode; a string
- 'name',
-
- # the code used in a bytestream to represent the opcode; a
- # one-character string
- 'code',
-
- # If the opcode has an argument embedded in the byte string, an
- # instance of ArgumentDescriptor specifying its type. Note that
- # arg.reader(s) can be used to read and decode the argument from
- # the bytestream s, and arg.doc documents the format of the raw
- # argument bytes. If the opcode doesn't have an argument embedded
- # in the bytestream, arg should be None.
- 'arg',
-
- # what the stack looks like before this opcode runs; a list
- 'stack_before',
-
- # what the stack looks like after this opcode runs; a list
- 'stack_after',
-
- # the protocol number in which this opcode was introduced; an int
- 'proto',
-
- # human-readable docs for this opcode; a string
- 'doc',
- )
-
- def __init__(self, name, code, arg,
- stack_before, stack_after, proto, doc):
- assert isinstance(name, str)
- self.name = name
-
- assert isinstance(code, str)
- assert len(code) == 1
- self.code = code
-
- assert arg is None or isinstance(arg, ArgumentDescriptor)
- self.arg = arg
-
- assert isinstance(stack_before, list)
- for x in stack_before:
- assert isinstance(x, StackObject)
- self.stack_before = stack_before
-
- assert isinstance(stack_after, list)
- for x in stack_after:
- assert isinstance(x, StackObject)
- self.stack_after = stack_after
-
- assert isinstance(proto, int) and 0 <= proto <= 2
- self.proto = proto
-
- assert isinstance(doc, str)
- self.doc = doc
-
-I = OpcodeInfo
-opcodes = [
-
- # Ways to spell integers.
-
- I(name='INT',
- code='I',
- arg=decimalnl_short,
- stack_before=[],
- stack_after=[pyinteger_or_bool],
- proto=0,
- doc="""Push an integer or bool.
-
- The argument is a newline-terminated decimal literal string.
-
- The intent may have been that this always fit in a short Python int,
- but INT can be generated in pickles written on a 64-bit box that
- require a Python long on a 32-bit box. The difference between this
- and LONG then is that INT skips a trailing 'L', and produces a short
- int whenever possible.
-
- Another difference is due to that, when bool was introduced as a
- distinct type in 2.3, builtin names True and False were also added to
- 2.2.2, mapping to ints 1 and 0. For compatibility in both directions,
- True gets pickled as INT + "I01\\n", and False as INT + "I00\\n".
- Leading zeroes are never produced for a genuine integer. The 2.3
- (and later) unpicklers special-case these and return bool instead;
- earlier unpicklers ignore the leading "0" and return the int.
- """),
-
- I(name='BININT',
- code='J',
- arg=int4,
- stack_before=[],
- stack_after=[pyint],
- proto=1,
- doc="""Push a four-byte signed integer.
-
- This handles the full range of Python (short) integers on a 32-bit
- box, directly as binary bytes (1 for the opcode and 4 for the integer).
- If the integer is non-negative and fits in 1 or 2 bytes, pickling via
- BININT1 or BININT2 saves space.
- """),
-
- I(name='BININT1',
- code='K',
- arg=uint1,
- stack_before=[],
- stack_after=[pyint],
- proto=1,
- doc="""Push a one-byte unsigned integer.
-
- This is a space optimization for pickling very small non-negative ints,
- in range(256).
- """),
-
- I(name='BININT2',
- code='M',
- arg=uint2,
- stack_before=[],
- stack_after=[pyint],
- proto=1,
- doc="""Push a two-byte unsigned integer.
-
- This is a space optimization for pickling small positive ints, in
- range(256, 2**16). Integers in range(256) can also be pickled via
- BININT2, but BININT1 instead saves a byte.
- """),
-
- I(name='LONG',
- code='L',
- arg=decimalnl_long,
- stack_before=[],
- stack_after=[pylong],
- proto=0,
- doc="""Push a long integer.
-
- The same as INT, except that the literal ends with 'L', and always
- unpickles to a Python long. There doesn't seem a real purpose to the
- trailing 'L'.
-
- Note that LONG takes time quadratic in the number of digits when
- unpickling (this is simply due to the nature of decimal->binary
- conversion). Proto 2 added linear-time (in C; still quadratic-time
- in Python) LONG1 and LONG4 opcodes.
- """),
-
- I(name="LONG1",
- code='\x8a',
- arg=long1,
- stack_before=[],
- stack_after=[pylong],
- proto=2,
- doc="""Long integer using one-byte length.
-
- A more efficient encoding of a Python long; the long1 encoding
- says it all."""),
-
- I(name="LONG4",
- code='\x8b',
- arg=long4,
- stack_before=[],
- stack_after=[pylong],
- proto=2,
- doc="""Long integer using found-byte length.
-
- A more efficient encoding of a Python long; the long4 encoding
- says it all."""),
-
- # Ways to spell strings (8-bit, not Unicode).
-
- I(name='STRING',
- code='S',
- arg=stringnl,
- stack_before=[],
- stack_after=[pystring],
- proto=0,
- doc="""Push a Python string object.
-
- The argument is a repr-style string, with bracketing quote characters,
- and perhaps embedded escapes. The argument extends until the next
- newline character.
- """),
-
- I(name='BINSTRING',
- code='T',
- arg=string4,
- stack_before=[],
- stack_after=[pystring],
- proto=1,
- doc="""Push a Python string object.
-
- There are two arguments: the first is a 4-byte little-endian signed int
- giving the number of bytes in the string, and the second is that many
- bytes, which are taken literally as the string content.
- """),
-
- I(name='SHORT_BINSTRING',
- code='U',
- arg=string1,
- stack_before=[],
- stack_after=[pystring],
- proto=1,
- doc="""Push a Python string object.
-
- There are two arguments: the first is a 1-byte unsigned int giving
- the number of bytes in the string, and the second is that many bytes,
- which are taken literally as the string content.
- """),
-
- # Ways to spell None.
-
- I(name='NONE',
- code='N',
- arg=None,
- stack_before=[],
- stack_after=[pynone],
- proto=0,
- doc="Push None on the stack."),
-
- # Ways to spell bools, starting with proto 2. See INT for how this was
- # done before proto 2.
-
- I(name='NEWTRUE',
- code='\x88',
- arg=None,
- stack_before=[],
- stack_after=[pybool],
- proto=2,
- doc="""True.
-
- Push True onto the stack."""),
-
- I(name='NEWFALSE',
- code='\x89',
- arg=None,
- stack_before=[],
- stack_after=[pybool],
- proto=2,
- doc="""True.
-
- Push False onto the stack."""),
-
- # Ways to spell Unicode strings.
-
- I(name='UNICODE',
- code='V',
- arg=unicodestringnl,
- stack_before=[],
- stack_after=[pyunicode],
- proto=0, # this may be pure-text, but it's a later addition
- doc="""Push a Python Unicode string object.
-
- The argument is a raw-unicode-escape encoding of a Unicode string,
- and so may contain embedded escape sequences. The argument extends
- until the next newline character.
- """),
-
- I(name='BINUNICODE',
- code='X',
- arg=unicodestring4,
- stack_before=[],
- stack_after=[pyunicode],
- proto=1,
- doc="""Push a Python Unicode string object.
-
- There are two arguments: the first is a 4-byte little-endian signed int
- giving the number of bytes in the string. The second is that many
- bytes, and is the UTF-8 encoding of the Unicode string.
- """),
-
- # Ways to spell floats.
-
- I(name='FLOAT',
- code='F',
- arg=floatnl,
- stack_before=[],
- stack_after=[pyfloat],
- proto=0,
- doc="""Newline-terminated decimal float literal.
-
- The argument is repr(a_float), and in general requires 17 significant
- digits for roundtrip conversion to be an identity (this is so for
- IEEE-754 double precision values, which is what Python float maps to
- on most boxes).
-
- In general, FLOAT cannot be used to transport infinities, NaNs, or
- minus zero across boxes (or even on a single box, if the platform C
- library can't read the strings it produces for such things -- Windows
- is like that), but may do less damage than BINFLOAT on boxes with
- greater precision or dynamic range than IEEE-754 double.
- """),
-
- I(name='BINFLOAT',
- code='G',
- arg=float8,
- stack_before=[],
- stack_after=[pyfloat],
- proto=1,
- doc="""Float stored in binary form, with 8 bytes of data.
-
- This generally requires less than half the space of FLOAT encoding.
- In general, BINFLOAT cannot be used to transport infinities, NaNs, or
- minus zero, raises an exception if the exponent exceeds the range of
- an IEEE-754 double, and retains no more than 53 bits of precision (if
- there are more than that, "add a half and chop" rounding is used to
- cut it back to 53 significant bits).
- """),
-
- # Ways to build lists.
-
- I(name='EMPTY_LIST',
- code=']',
- arg=None,
- stack_before=[],
- stack_after=[pylist],
- proto=1,
- doc="Push an empty list."),
-
- I(name='APPEND',
- code='a',
- arg=None,
- stack_before=[pylist, anyobject],
- stack_after=[pylist],
- proto=0,
- doc="""Append an object to a list.
-
- Stack before: ... pylist anyobject
- Stack after: ... pylist+[anyobject]
-
- although pylist is really extended in-place.
- """),
-
- I(name='APPENDS',
- code='e',
- arg=None,
- stack_before=[pylist, markobject, stackslice],
- stack_after=[pylist],
- proto=1,
- doc="""Extend a list by a slice of stack objects.
-
- Stack before: ... pylist markobject stackslice
- Stack after: ... pylist+stackslice
-
- although pylist is really extended in-place.
- """),
-
- I(name='LIST',
- code='l',
- arg=None,
- stack_before=[markobject, stackslice],
- stack_after=[pylist],
- proto=0,
- doc="""Build a list out of the topmost stack slice, after markobject.
-
- All the stack entries following the topmost markobject are placed into
- a single Python list, which single list object replaces all of the
- stack from the topmost markobject onward. For example,
-
- Stack before: ... markobject 1 2 3 'abc'
- Stack after: ... [1, 2, 3, 'abc']
- """),
-
- # Ways to build tuples.
-
- I(name='EMPTY_TUPLE',
- code=')',
- arg=None,
- stack_before=[],
- stack_after=[pytuple],
- proto=1,
- doc="Push an empty tuple."),
-
- I(name='TUPLE',
- code='t',
- arg=None,
- stack_before=[markobject, stackslice],
- stack_after=[pytuple],
- proto=0,
- doc="""Build a tuple out of the topmost stack slice, after markobject.
-
- All the stack entries following the topmost markobject are placed into
- a single Python tuple, which single tuple object replaces all of the
- stack from the topmost markobject onward. For example,
-
- Stack before: ... markobject 1 2 3 'abc'
- Stack after: ... (1, 2, 3, 'abc')
- """),
-
- I(name='TUPLE1',
- code='\x85',
- arg=None,
- stack_before=[anyobject],
- stack_after=[pytuple],
- proto=2,
- doc="""One-tuple.
-
- This code pops one value off the stack and pushes a tuple of
- length 1 whose one item is that value back onto it. IOW:
-
- stack[-1] = tuple(stack[-1:])
- """),
-
- I(name='TUPLE2',
- code='\x86',
- arg=None,
- stack_before=[anyobject, anyobject],
- stack_after=[pytuple],
- proto=2,
- doc="""One-tuple.
-
- This code pops two values off the stack and pushes a tuple
- of length 2 whose items are those values back onto it. IOW:
-
- stack[-2:] = [tuple(stack[-2:])]
- """),
-
- I(name='TUPLE3',
- code='\x87',
- arg=None,
- stack_before=[anyobject, anyobject, anyobject],
- stack_after=[pytuple],
- proto=2,
- doc="""One-tuple.
-
- This code pops three values off the stack and pushes a tuple
- of length 3 whose items are those values back onto it. IOW:
-
- stack[-3:] = [tuple(stack[-3:])]
- """),
-
- # Ways to build dicts.
-
- I(name='EMPTY_DICT',
- code='}',
- arg=None,
- stack_before=[],
- stack_after=[pydict],
- proto=1,
- doc="Push an empty dict."),
-
- I(name='DICT',
- code='d',
- arg=None,
- stack_before=[markobject, stackslice],
- stack_after=[pydict],
- proto=0,
- doc="""Build a dict out of the topmost stack slice, after markobject.
-
- All the stack entries following the topmost markobject are placed into
- a single Python dict, which single dict object replaces all of the
- stack from the topmost markobject onward. The stack slice alternates
- key, value, key, value, .... For example,
-
- Stack before: ... markobject 1 2 3 'abc'
- Stack after: ... {1: 2, 3: 'abc'}
- """),
-
- I(name='SETITEM',
- code='s',
- arg=None,
- stack_before=[pydict, anyobject, anyobject],
- stack_after=[pydict],
- proto=0,
- doc="""Add a key+value pair to an existing dict.
-
- Stack before: ... pydict key value
- Stack after: ... pydict
-
- where pydict has been modified via pydict[key] = value.
- """),
-
- I(name='SETITEMS',
- code='u',
- arg=None,
- stack_before=[pydict, markobject, stackslice],
- stack_after=[pydict],
- proto=1,
- doc="""Add an arbitrary number of key+value pairs to an existing dict.
-
- The slice of the stack following the topmost markobject is taken as
- an alternating sequence of keys and values, added to the dict
- immediately under the topmost markobject. Everything at and after the
- topmost markobject is popped, leaving the mutated dict at the top
- of the stack.
-
- Stack before: ... pydict markobject key_1 value_1 ... key_n value_n
- Stack after: ... pydict
-
- where pydict has been modified via pydict[key_i] = value_i for i in
- 1, 2, ..., n, and in that order.
- """),
-
- # Stack manipulation.
-
- I(name='POP',
- code='0',
- arg=None,
- stack_before=[anyobject],
- stack_after=[],
- proto=0,
- doc="Discard the top stack item, shrinking the stack by one item."),
-
- I(name='DUP',
- code='2',
- arg=None,
- stack_before=[anyobject],
- stack_after=[anyobject, anyobject],
- proto=0,
- doc="Push the top stack item onto the stack again, duplicating it."),
-
- I(name='MARK',
- code='(',
- arg=None,
- stack_before=[],
- stack_after=[markobject],
- proto=0,
- doc="""Push markobject onto the stack.
-
- markobject is a unique object, used by other opcodes to identify a
- region of the stack containing a variable number of objects for them
- to work on. See markobject.doc for more detail.
- """),
-
- I(name='POP_MARK',
- code='1',
- arg=None,
- stack_before=[markobject, stackslice],
- stack_after=[],
- proto=0,
- doc="""Pop all the stack objects at and above the topmost markobject.
-
- When an opcode using a variable number of stack objects is done,
- POP_MARK is used to remove those objects, and to remove the markobject
- that delimited their starting position on the stack.
- """),
-
- # Memo manipulation. There are really only two operations (get and put),
- # each in all-text, "short binary", and "long binary" flavors.
-
- I(name='GET',
- code='g',
- arg=decimalnl_short,
- stack_before=[],
- stack_after=[anyobject],
- proto=0,
- doc="""Read an object from the memo and push it on the stack.
-
- The index of the memo object to push is given by the newline-teriminated
- decimal string following. BINGET and LONG_BINGET are space-optimized
- versions.
- """),
-
- I(name='BINGET',
- code='h',
- arg=uint1,
- stack_before=[],
- stack_after=[anyobject],
- proto=1,
- doc="""Read an object from the memo and push it on the stack.
-
- The index of the memo object to push is given by the 1-byte unsigned
- integer following.
- """),
-
- I(name='LONG_BINGET',
- code='j',
- arg=int4,
- stack_before=[],
- stack_after=[anyobject],
- proto=1,
- doc="""Read an object from the memo and push it on the stack.
-
- The index of the memo object to push is given by the 4-byte signed
- little-endian integer following.
- """),
-
- I(name='PUT',
- code='p',
- arg=decimalnl_short,
- stack_before=[],
- stack_after=[],
- proto=0,
- doc="""Store the stack top into the memo. The stack is not popped.
-
- The index of the memo location to write into is given by the newline-
- terminated decimal string following. BINPUT and LONG_BINPUT are
- space-optimized versions.
- """),
-
- I(name='BINPUT',
- code='q',
- arg=uint1,
- stack_before=[],
- stack_after=[],
- proto=1,
- doc="""Store the stack top into the memo. The stack is not popped.
-
- The index of the memo location to write into is given by the 1-byte
- unsigned integer following.
- """),
-
- I(name='LONG_BINPUT',
- code='r',
- arg=int4,
- stack_before=[],
- stack_after=[],
- proto=1,
- doc="""Store the stack top into the memo. The stack is not popped.
-
- The index of the memo location to write into is given by the 4-byte
- signed little-endian integer following.
- """),
-
- # Access the extension registry (predefined objects). Akin to the GET
- # family.
-
- I(name='EXT1',
- code='\x82',
- arg=uint1,
- stack_before=[],
- stack_after=[anyobject],
- proto=2,
- doc="""Extension code.
-
- This code and the similar EXT2 and EXT4 allow using a registry
- of popular objects that are pickled by name, typically classes.
- It is envisioned that through a global negotiation and
- registration process, third parties can set up a mapping between
- ints and object names.
-
- In order to guarantee pickle interchangeability, the extension
- code registry ought to be global, although a range of codes may
- be reserved for private use.
-
- EXT1 has a 1-byte integer argument. This is used to index into the
- extension registry, and the object at that index is pushed on the stack.
- """),
-
- I(name='EXT2',
- code='\x83',
- arg=uint2,
- stack_before=[],
- stack_after=[anyobject],
- proto=2,
- doc="""Extension code.
-
- See EXT1. EXT2 has a two-byte integer argument.
- """),
-
- I(name='EXT4',
- code='\x84',
- arg=int4,
- stack_before=[],
- stack_after=[anyobject],
- proto=2,
- doc="""Extension code.
-
- See EXT1. EXT4 has a four-byte integer argument.
- """),
-
- # Push a class object, or module function, on the stack, via its module
- # and name.
-
- I(name='GLOBAL',
- code='c',
- arg=stringnl_noescape_pair,
- stack_before=[],
- stack_after=[anyobject],
- proto=0,
- doc="""Push a global object (module.attr) on the stack.
-
- Two newline-terminated strings follow the GLOBAL opcode. The first is
- taken as a module name, and the second as a class name. The class
- object module.class is pushed on the stack. More accurately, the
- object returned by self.find_class(module, class) is pushed on the
- stack, so unpickling subclasses can override this form of lookup.
- """),
-
- # Ways to build objects of classes pickle doesn't know about directly
- # (user-defined classes). I despair of documenting this accurately
- # and comprehensibly -- you really have to read the pickle code to
- # find all the special cases.
-
- I(name='REDUCE',
- code='R',
- arg=None,
- stack_before=[anyobject, anyobject],
- stack_after=[anyobject],
- proto=0,
- doc="""Push an object built from a callable and an argument tuple.
-
- The opcode is named to remind of the __reduce__() method.
-
- Stack before: ... callable pytuple
- Stack after: ... callable(*pytuple)
-
- The callable and the argument tuple are the first two items returned
- by a __reduce__ method. Applying the callable to the argtuple is
- supposed to reproduce the original object, or at least get it started.
- If the __reduce__ method returns a 3-tuple, the last component is an
- argument to be passed to the object's __setstate__, and then the REDUCE
- opcode is followed by code to create setstate's argument, and then a
- BUILD opcode to apply __setstate__ to that argument.
-
- If type(callable) is not ClassType, REDUCE complains unless the
- callable has been registered with the copy_reg module's
- safe_constructors dict, or the callable has a magic
- '__safe_for_unpickling__' attribute with a true value. I'm not sure
- why it does this, but I've sure seen this complaint often enough when
- I didn't want to <wink>.
- """),
-
- I(name='BUILD',
- code='b',
- arg=None,
- stack_before=[anyobject, anyobject],
- stack_after=[anyobject],
- proto=0,
- doc="""Finish building an object, via __setstate__ or dict update.
-
- Stack before: ... anyobject argument
- Stack after: ... anyobject
-
- where anyobject may have been mutated, as follows:
-
- If the object has a __setstate__ method,
-
- anyobject.__setstate__(argument)
-
- is called.
-
- Else the argument must be a dict, the object must have a __dict__, and
- the object is updated via
-
- anyobject.__dict__.update(argument)
-
- This may raise RuntimeError in restricted execution mode (which
- disallows access to __dict__ directly); in that case, the object
- is updated instead via
-
- for k, v in argument.items():
- anyobject[k] = v
- """),
-
- I(name='INST',
- code='i',
- arg=stringnl_noescape_pair,
- stack_before=[markobject, stackslice],
- stack_after=[anyobject],
- proto=0,
- doc="""Build a class instance.
-
- This is the protocol 0 version of protocol 1's OBJ opcode.
- INST is followed by two newline-terminated strings, giving a
- module and class name, just as for the GLOBAL opcode (and see
- GLOBAL for more details about that). self.find_class(module, name)
- is used to get a class object.
-
- In addition, all the objects on the stack following the topmost
- markobject are gathered into a tuple and popped (along with the
- topmost markobject), just as for the TUPLE opcode.
-
- Now it gets complicated. If all of these are true:
-
- + The argtuple is empty (markobject was at the top of the stack
- at the start).
-
- + It's an old-style class object (the type of the class object is
- ClassType).
-
- + The class object does not have a __getinitargs__ attribute.
-
- then we want to create an old-style class instance without invoking
- its __init__() method (pickle has waffled on this over the years; not
- calling __init__() is current wisdom). In this case, an instance of
- an old-style dummy class is created, and then we try to rebind its
- __class__ attribute to the desired class object. If this succeeds,
- the new instance object is pushed on the stack, and we're done. In
- restricted execution mode it can fail (assignment to __class__ is
- disallowed), and I'm not really sure what happens then -- it looks
- like the code ends up calling the class object's __init__ anyway,
- via falling into the next case.
-
- Else (the argtuple is not empty, it's not an old-style class object,
- or the class object does have a __getinitargs__ attribute), the code
- first insists that the class object have a __safe_for_unpickling__
- attribute. Unlike as for the __safe_for_unpickling__ check in REDUCE,
- it doesn't matter whether this attribute has a true or false value, it
- only matters whether it exists (XXX this is a bug; cPickle
- requires the attribute to be true). If __safe_for_unpickling__
- doesn't exist, UnpicklingError is raised.
-
- Else (the class object does have a __safe_for_unpickling__ attr),
- the class object obtained from INST's arguments is applied to the
- argtuple obtained from the stack, and the resulting instance object
- is pushed on the stack.
-
- NOTE: checks for __safe_for_unpickling__ went away in Python 2.3.
- """),
-
- I(name='OBJ',
- code='o',
- arg=None,
- stack_before=[markobject, anyobject, stackslice],
- stack_after=[anyobject],
- proto=1,
- doc="""Build a class instance.
-
- This is the protocol 1 version of protocol 0's INST opcode, and is
- very much like it. The major difference is that the class object
- is taken off the stack, allowing it to be retrieved from the memo
- repeatedly if several instances of the same class are created. This
- can be much more efficient (in both time and space) than repeatedly
- embedding the module and class names in INST opcodes.
-
- Unlike INST, OBJ takes no arguments from the opcode stream. Instead
- the class object is taken off the stack, immediately above the
- topmost markobject:
-
- Stack before: ... markobject classobject stackslice
- Stack after: ... new_instance_object
-
- As for INST, the remainder of the stack above the markobject is
- gathered into an argument tuple, and then the logic seems identical,
- except that no __safe_for_unpickling__ check is done (XXX this is
- a bug; cPickle does test __safe_for_unpickling__). See INST for
- the gory details.
-
- NOTE: In Python 2.3, INST and OBJ are identical except for how they
- get the class object. That was always the intent; the implementations
- had diverged for accidental reasons.
- """),
-
- I(name='NEWOBJ',
- code='\x81',
- arg=None,
- stack_before=[anyobject, anyobject],
- stack_after=[anyobject],
- proto=2,
- doc="""Build an object instance.
-
- The stack before should be thought of as containing a class
- object followed by an argument tuple (the tuple being the stack
- top). Call these cls and args. They are popped off the stack,
- and the value returned by cls.__new__(cls, *args) is pushed back
- onto the stack.
- """),
-
- # Machine control.
-
- I(name='PROTO',
- code='\x80',
- arg=uint1,
- stack_before=[],
- stack_after=[],
- proto=2,
- doc="""Protocol version indicator.
-
- For protocol 2 and above, a pickle must start with this opcode.
- The argument is the protocol version, an int in range(2, 256).
- """),
-
- I(name='STOP',
- code='.',
- arg=None,
- stack_before=[anyobject],
- stack_after=[],
- proto=0,
- doc="""Stop the unpickling machine.
-
- Every pickle ends with this opcode. The object at the top of the stack
- is popped, and that's the result of unpickling. The stack should be
- empty then.
- """),
-
- # Ways to deal with persistent IDs.
-
- I(name='PERSID',
- code='P',
- arg=stringnl_noescape,
- stack_before=[],
- stack_after=[anyobject],
- proto=0,
- doc="""Push an object identified by a persistent ID.
-
- The pickle module doesn't define what a persistent ID means. PERSID's
- argument is a newline-terminated str-style (no embedded escapes, no
- bracketing quote characters) string, which *is* "the persistent ID".
- The unpickler passes this string to self.persistent_load(). Whatever
- object that returns is pushed on the stack. There is no implementation
- of persistent_load() in Python's unpickler: it must be supplied by an
- unpickler subclass.
- """),
-
- I(name='BINPERSID',
- code='Q',
- arg=None,
- stack_before=[anyobject],
- stack_after=[anyobject],
- proto=1,
- doc="""Push an object identified by a persistent ID.
-
- Like PERSID, except the persistent ID is popped off the stack (instead
- of being a string embedded in the opcode bytestream). The persistent
- ID is passed to self.persistent_load(), and whatever object that
- returns is pushed on the stack. See PERSID for more detail.
- """),
-]
-del I
-
-# Verify uniqueness of .name and .code members.
-name2i = {}
-code2i = {}
-
-for i, d in enumerate(opcodes):
- if d.name in name2i:
- raise ValueError("repeated name %r at indices %d and %d" %
- (d.name, name2i[d.name], i))
- if d.code in code2i:
- raise ValueError("repeated code %r at indices %d and %d" %
- (d.code, code2i[d.code], i))
-
- name2i[d.name] = i
- code2i[d.code] = i
-
-del name2i, code2i, i, d
-
-##############################################################################
-# Build a code2op dict, mapping opcode characters to OpcodeInfo records.
-# Also ensure we've got the same stuff as pickle.py, although the
-# introspection here is dicey.
-
-code2op = {}
-for d in opcodes:
- code2op[d.code] = d
-del d
-
-def assure_pickle_consistency(verbose=False):
- import pickle, re
-
- copy = code2op.copy()
- for name in pickle.__all__:
- if not re.match("[A-Z][A-Z0-9_]+$", name):
- if verbose:
- print "skipping %r: it doesn't look like an opcode name" % name
- continue
- picklecode = getattr(pickle, name)
- if not isinstance(picklecode, str) or len(picklecode) != 1:
- if verbose:
- print ("skipping %r: value %r doesn't look like a pickle "
- "code" % (name, picklecode))
- continue
- if picklecode in copy:
- if verbose:
- print "checking name %r w/ code %r for consistency" % (
- name, picklecode)
- d = copy[picklecode]
- if d.name != name:
- raise ValueError("for pickle code %r, pickle.py uses name %r "
- "but we're using name %r" % (picklecode,
- name,
- d.name))
- # Forget this one. Any left over in copy at the end are a problem
- # of a different kind.
- del copy[picklecode]
- else:
- raise ValueError("pickle.py appears to have a pickle opcode with "
- "name %r and code %r, but we don't" %
- (name, picklecode))
- if copy:
- msg = ["we appear to have pickle opcodes that pickle.py doesn't have:"]
- for code, d in copy.items():
- msg.append(" name %r with code %r" % (d.name, code))
- raise ValueError("\n".join(msg))
-
-assure_pickle_consistency()
-del assure_pickle_consistency
-
-##############################################################################
-# A pickle opcode generator.
-
-def genops(pickle):
- """Generate all the opcodes in a pickle.
-
- 'pickle' is a file-like object, or string, containing the pickle.
-
- Each opcode in the pickle is generated, from the current pickle position,
- stopping after a STOP opcode is delivered. A triple is generated for
- each opcode:
-
- opcode, arg, pos
-
- opcode is an OpcodeInfo record, describing the current opcode.
-
- If the opcode has an argument embedded in the pickle, arg is its decoded
- value, as a Python object. If the opcode doesn't have an argument, arg
- is None.
-
- If the pickle has a tell() method, pos was the value of pickle.tell()
- before reading the current opcode. If the pickle is a string object,
- it's wrapped in a StringIO object, and the latter's tell() result is
- used. Else (the pickle doesn't have a tell(), and it's not obvious how
- to query its current position) pos is None.
- """
-
- import cStringIO as StringIO
-
- if isinstance(pickle, str):
- pickle = StringIO.StringIO(pickle)
-
- if hasattr(pickle, "tell"):
- getpos = pickle.tell
- else:
- getpos = lambda: None
-
- while True:
- pos = getpos()
- code = pickle.read(1)
- opcode = code2op.get(code)
- if opcode is None:
- if code == "":
- raise ValueError("pickle exhausted before seeing STOP")
- else:
- raise ValueError("at position %s, opcode %r unknown" % (
- pos is None and "<unknown>" or pos,
- code))
- if opcode.arg is None:
- arg = None
- else:
- arg = opcode.arg.reader(pickle)
- yield opcode, arg, pos
- if code == '.':
- assert opcode.name == 'STOP'
- break
-
-##############################################################################
-# A symbolic pickle disassembler.
-
-def dis(pickle, out=None, memo=None, indentlevel=4):
- """Produce a symbolic disassembly of a pickle.
-
- 'pickle' is a file-like object, or string, containing a (at least one)
- pickle. The pickle is disassembled from the current position, through
- the first STOP opcode encountered.
-
- Optional arg 'out' is a file-like object to which the disassembly is
- printed. It defaults to sys.stdout.
-
- Optional arg 'memo' is a Python dict, used as the pickle's memo. It
- may be mutated by dis(), if the pickle contains PUT or BINPUT opcodes.
- Passing the same memo object to another dis() call then allows disassembly
- to proceed across multiple pickles that were all created by the same
- pickler with the same memo. Ordinarily you don't need to worry about this.
-
- Optional arg indentlevel is the number of blanks by which to indent
- a new MARK level. It defaults to 4.
-
- In addition to printing the disassembly, some sanity checks are made:
-
- + All embedded opcode arguments "make sense".
-
- + Explicit and implicit pop operations have enough items on the stack.
-
- + When an opcode implicitly refers to a markobject, a markobject is
- actually on the stack.
-
- + A memo entry isn't referenced before it's defined.
-
- + The markobject isn't stored in the memo.
-
- + A memo entry isn't redefined.
- """
-
- # Most of the hair here is for sanity checks, but most of it is needed
- # anyway to detect when a protocol 0 POP takes a MARK off the stack
- # (which in turn is needed to indent MARK blocks correctly).
-
- stack = [] # crude emulation of unpickler stack
- if memo is None:
- memo = {} # crude emulation of unpicker memo
- maxproto = -1 # max protocol number seen
- markstack = [] # bytecode positions of MARK opcodes
- indentchunk = ' ' * indentlevel
- errormsg = None
- for opcode, arg, pos in genops(pickle):
- if pos is not None:
- print >> out, "%5d:" % pos,
-
- line = "%-4s %s%s" % (repr(opcode.code)[1:-1],
- indentchunk * len(markstack),
- opcode.name)
-
- maxproto = max(maxproto, opcode.proto)
- before = opcode.stack_before # don't mutate
- after = opcode.stack_after # don't mutate
- numtopop = len(before)
-
- # See whether a MARK should be popped.
- markmsg = None
- if markobject in before or (opcode.name == "POP" and
- stack and
- stack[-1] is markobject):
- assert markobject not in after
- if __debug__:
- if markobject in before:
- assert before[-1] is stackslice
- if markstack:
- markpos = markstack.pop()
- if markpos is None:
- markmsg = "(MARK at unknown opcode offset)"
- else:
- markmsg = "(MARK at %d)" % markpos
- # Pop everything at and after the topmost markobject.
- while stack[-1] is not markobject:
- stack.pop()
- stack.pop()
- # Stop later code from popping too much.
- try:
- numtopop = before.index(markobject)
- except ValueError:
- assert opcode.name == "POP"
- numtopop = 0
- else:
- errormsg = markmsg = "no MARK exists on stack"
-
- # Check for correct memo usage.
- if opcode.name in ("PUT", "BINPUT", "LONG_BINPUT"):
- assert arg is not None
- if arg in memo:
- errormsg = "memo key %r already defined" % arg
- elif not stack:
- errormsg = "stack is empty -- can't store into memo"
- elif stack[-1] is markobject:
- errormsg = "can't store markobject in the memo"
- else:
- memo[arg] = stack[-1]
-
- elif opcode.name in ("GET", "BINGET", "LONG_BINGET"):
- if arg in memo:
- assert len(after) == 1
- after = [memo[arg]] # for better stack emulation
- else:
- errormsg = "memo key %r has never been stored into" % arg
-
- if arg is not None or markmsg:
- # make a mild effort to align arguments
- line += ' ' * (10 - len(opcode.name))
- if arg is not None:
- line += ' ' + repr(arg)
- if markmsg:
- line += ' ' + markmsg
- print >> out, line
-
- if errormsg:
- # Note that we delayed complaining until the offending opcode
- # was printed.
- raise ValueError(errormsg)
-
- # Emulate the stack effects.
- if len(stack) < numtopop:
- raise ValueError("tries to pop %d items from stack with "
- "only %d items" % (numtopop, len(stack)))
- if numtopop:
- del stack[-numtopop:]
- if markobject in after:
- assert markobject not in before
- markstack.append(pos)
-
- stack.extend(after)
-
- print >> out, "highest protocol among opcodes =", maxproto
- if stack:
- raise ValueError("stack not empty after STOP: %r" % stack)
-
-# For use in the doctest, simply as an example of a class to pickle.
-class _Example:
- def __init__(self, value):
- self.value = value
-
-_dis_test = r"""
->>> import pickle
->>> x = [1, 2, (3, 4), {'abc': u"def"}]
->>> pkl = pickle.dumps(x, 0)
->>> dis(pkl)
- 0: ( MARK
- 1: l LIST (MARK at 0)
- 2: p PUT 0
- 5: I INT 1
- 8: a APPEND
- 9: I INT 2
- 12: a APPEND
- 13: ( MARK
- 14: I INT 3
- 17: I INT 4
- 20: t TUPLE (MARK at 13)
- 21: p PUT 1
- 24: a APPEND
- 25: ( MARK
- 26: d DICT (MARK at 25)
- 27: p PUT 2
- 30: S STRING 'abc'
- 37: p PUT 3
- 40: V UNICODE u'def'
- 45: p PUT 4
- 48: s SETITEM
- 49: a APPEND
- 50: . STOP
-highest protocol among opcodes = 0
-
-Try again with a "binary" pickle.
-
->>> pkl = pickle.dumps(x, 1)
->>> dis(pkl)
- 0: ] EMPTY_LIST
- 1: q BINPUT 0
- 3: ( MARK
- 4: K BININT1 1
- 6: K BININT1 2
- 8: ( MARK
- 9: K BININT1 3
- 11: K BININT1 4
- 13: t TUPLE (MARK at 8)
- 14: q BINPUT 1
- 16: } EMPTY_DICT
- 17: q BINPUT 2
- 19: U SHORT_BINSTRING 'abc'
- 24: q BINPUT 3
- 26: X BINUNICODE u'def'
- 34: q BINPUT 4
- 36: s SETITEM
- 37: e APPENDS (MARK at 3)
- 38: . STOP
-highest protocol among opcodes = 1
-
-Exercise the INST/OBJ/BUILD family.
-
->>> import random
->>> dis(pickle.dumps(random.random, 0))
- 0: c GLOBAL 'random random'
- 15: p PUT 0
- 18: . STOP
-highest protocol among opcodes = 0
-
->>> from pickletools import _Example
->>> x = [_Example(42)] * 2
->>> dis(pickle.dumps(x, 0))
- 0: ( MARK
- 1: l LIST (MARK at 0)
- 2: p PUT 0
- 5: ( MARK
- 6: i INST 'pickletools _Example' (MARK at 5)
- 28: p PUT 1
- 31: ( MARK
- 32: d DICT (MARK at 31)
- 33: p PUT 2
- 36: S STRING 'value'
- 45: p PUT 3
- 48: I INT 42
- 52: s SETITEM
- 53: b BUILD
- 54: a APPEND
- 55: g GET 1
- 58: a APPEND
- 59: . STOP
-highest protocol among opcodes = 0
-
->>> dis(pickle.dumps(x, 1))
- 0: ] EMPTY_LIST
- 1: q BINPUT 0
- 3: ( MARK
- 4: ( MARK
- 5: c GLOBAL 'pickletools _Example'
- 27: q BINPUT 1
- 29: o OBJ (MARK at 4)
- 30: q BINPUT 2
- 32: } EMPTY_DICT
- 33: q BINPUT 3
- 35: U SHORT_BINSTRING 'value'
- 42: q BINPUT 4
- 44: K BININT1 42
- 46: s SETITEM
- 47: b BUILD
- 48: h BINGET 2
- 50: e APPENDS (MARK at 3)
- 51: . STOP
-highest protocol among opcodes = 1
-
-Try "the canonical" recursive-object test.
-
->>> L = []
->>> T = L,
->>> L.append(T)
->>> L[0] is T
-True
->>> T[0] is L
-True
->>> L[0][0] is L
-True
->>> T[0][0] is T
-True
->>> dis(pickle.dumps(L, 0))
- 0: ( MARK
- 1: l LIST (MARK at 0)
- 2: p PUT 0
- 5: ( MARK
- 6: g GET 0
- 9: t TUPLE (MARK at 5)
- 10: p PUT 1
- 13: a APPEND
- 14: . STOP
-highest protocol among opcodes = 0
-
->>> dis(pickle.dumps(L, 1))
- 0: ] EMPTY_LIST
- 1: q BINPUT 0
- 3: ( MARK
- 4: h BINGET 0
- 6: t TUPLE (MARK at 3)
- 7: q BINPUT 1
- 9: a APPEND
- 10: . STOP
-highest protocol among opcodes = 1
-
-Note that, in the protocol 0 pickle of the recursive tuple, the disassembler
-has to emulate the stack in order to realize that the POP opcode at 16 gets
-rid of the MARK at 0.
-
->>> dis(pickle.dumps(T, 0))
- 0: ( MARK
- 1: ( MARK
- 2: l LIST (MARK at 1)
- 3: p PUT 0
- 6: ( MARK
- 7: g GET 0
- 10: t TUPLE (MARK at 6)
- 11: p PUT 1
- 14: a APPEND
- 15: 0 POP
- 16: 0 POP (MARK at 0)
- 17: g GET 1
- 20: . STOP
-highest protocol among opcodes = 0
-
->>> dis(pickle.dumps(T, 1))
- 0: ( MARK
- 1: ] EMPTY_LIST
- 2: q BINPUT 0
- 4: ( MARK
- 5: h BINGET 0
- 7: t TUPLE (MARK at 4)
- 8: q BINPUT 1
- 10: a APPEND
- 11: 1 POP_MARK (MARK at 0)
- 12: h BINGET 1
- 14: . STOP
-highest protocol among opcodes = 1
-
-Try protocol 2.
-
->>> dis(pickle.dumps(L, 2))
- 0: \x80 PROTO 2
- 2: ] EMPTY_LIST
- 3: q BINPUT 0
- 5: h BINGET 0
- 7: \x85 TUPLE1
- 8: q BINPUT 1
- 10: a APPEND
- 11: . STOP
-highest protocol among opcodes = 2
-
->>> dis(pickle.dumps(T, 2))
- 0: \x80 PROTO 2
- 2: ] EMPTY_LIST
- 3: q BINPUT 0
- 5: h BINGET 0
- 7: \x85 TUPLE1
- 8: q BINPUT 1
- 10: a APPEND
- 11: 0 POP
- 12: h BINGET 1
- 14: . STOP
-highest protocol among opcodes = 2
-"""
-
-_memo_test = r"""
->>> import pickle
->>> from StringIO import StringIO
->>> f = StringIO()
->>> p = pickle.Pickler(f, 2)
->>> x = [1, 2, 3]
->>> p.dump(x)
->>> p.dump(x)
->>> f.seek(0)
->>> memo = {}
->>> dis(f, memo=memo)
- 0: \x80 PROTO 2
- 2: ] EMPTY_LIST
- 3: q BINPUT 0
- 5: ( MARK
- 6: K BININT1 1
- 8: K BININT1 2
- 10: K BININT1 3
- 12: e APPENDS (MARK at 5)
- 13: . STOP
-highest protocol among opcodes = 2
->>> dis(f, memo=memo)
- 14: \x80 PROTO 2
- 16: h BINGET 0
- 18: . STOP
-highest protocol among opcodes = 2
-"""
-
-__test__ = {'disassembler_test': _dis_test,
- 'disassembler_memo_test': _memo_test,
- }
-
-def _test():
- import doctest
- return doctest.testmod()
-
-if __name__ == "__main__":
- _test()
diff --git a/sys/lib/python/pipes.py b/sys/lib/python/pipes.py
deleted file mode 100644
index 295d9c88b..000000000
--- a/sys/lib/python/pipes.py
+++ /dev/null
@@ -1,298 +0,0 @@
-"""Conversion pipeline templates.
-
-The problem:
-------------
-
-Suppose you have some data that you want to convert to another format,
-such as from GIF image format to PPM image format. Maybe the
-conversion involves several steps (e.g. piping it through compress or
-uuencode). Some of the conversion steps may require that their input
-is a disk file, others may be able to read standard input; similar for
-their output. The input to the entire conversion may also be read
-from a disk file or from an open file, and similar for its output.
-
-The module lets you construct a pipeline template by sticking one or
-more conversion steps together. It will take care of creating and
-removing temporary files if they are necessary to hold intermediate
-data. You can then use the template to do conversions from many
-different sources to many different destinations. The temporary
-file names used are different each time the template is used.
-
-The templates are objects so you can create templates for many
-different conversion steps and store them in a dictionary, for
-instance.
-
-
-Directions:
------------
-
-To create a template:
- t = Template()
-
-To add a conversion step to a template:
- t.append(command, kind)
-where kind is a string of two characters: the first is '-' if the
-command reads its standard input or 'f' if it requires a file; the
-second likewise for the output. The command must be valid /bin/sh
-syntax. If input or output files are required, they are passed as
-$IN and $OUT; otherwise, it must be possible to use the command in
-a pipeline.
-
-To add a conversion step at the beginning:
- t.prepend(command, kind)
-
-To convert a file to another file using a template:
- sts = t.copy(infile, outfile)
-If infile or outfile are the empty string, standard input is read or
-standard output is written, respectively. The return value is the
-exit status of the conversion pipeline.
-
-To open a file for reading or writing through a conversion pipeline:
- fp = t.open(file, mode)
-where mode is 'r' to read the file, or 'w' to write it -- just like
-for the built-in function open() or for os.popen().
-
-To create a new template object initialized to a given one:
- t2 = t.clone()
-
-For an example, see the function test() at the end of the file.
-""" # '
-
-
-import re
-
-import os
-import tempfile
-import string
-
-__all__ = ["Template"]
-
-# Conversion step kinds
-
-FILEIN_FILEOUT = 'ff' # Must read & write real files
-STDIN_FILEOUT = '-f' # Must write a real file
-FILEIN_STDOUT = 'f-' # Must read a real file
-STDIN_STDOUT = '--' # Normal pipeline element
-SOURCE = '.-' # Must be first, writes stdout
-SINK = '-.' # Must be last, reads stdin
-
-stepkinds = [FILEIN_FILEOUT, STDIN_FILEOUT, FILEIN_STDOUT, STDIN_STDOUT, \
- SOURCE, SINK]
-
-
-class Template:
- """Class representing a pipeline template."""
-
- def __init__(self):
- """Template() returns a fresh pipeline template."""
- self.debugging = 0
- self.reset()
-
- def __repr__(self):
- """t.__repr__() implements repr(t)."""
- return '<Template instance, steps=%r>' % (self.steps,)
-
- def reset(self):
- """t.reset() restores a pipeline template to its initial state."""
- self.steps = []
-
- def clone(self):
- """t.clone() returns a new pipeline template with identical
- initial state as the current one."""
- t = Template()
- t.steps = self.steps[:]
- t.debugging = self.debugging
- return t
-
- def debug(self, flag):
- """t.debug(flag) turns debugging on or off."""
- self.debugging = flag
-
- def append(self, cmd, kind):
- """t.append(cmd, kind) adds a new step at the end."""
- if type(cmd) is not type(''):
- raise TypeError, \
- 'Template.append: cmd must be a string'
- if kind not in stepkinds:
- raise ValueError, \
- 'Template.append: bad kind %r' % (kind,)
- if kind == SOURCE:
- raise ValueError, \
- 'Template.append: SOURCE can only be prepended'
- if self.steps and self.steps[-1][1] == SINK:
- raise ValueError, \
- 'Template.append: already ends with SINK'
- if kind[0] == 'f' and not re.search(r'\$IN\b', cmd):
- raise ValueError, \
- 'Template.append: missing $IN in cmd'
- if kind[1] == 'f' and not re.search(r'\$OUT\b', cmd):
- raise ValueError, \
- 'Template.append: missing $OUT in cmd'
- self.steps.append((cmd, kind))
-
- def prepend(self, cmd, kind):
- """t.prepend(cmd, kind) adds a new step at the front."""
- if type(cmd) is not type(''):
- raise TypeError, \
- 'Template.prepend: cmd must be a string'
- if kind not in stepkinds:
- raise ValueError, \
- 'Template.prepend: bad kind %r' % (kind,)
- if kind == SINK:
- raise ValueError, \
- 'Template.prepend: SINK can only be appended'
- if self.steps and self.steps[0][1] == SOURCE:
- raise ValueError, \
- 'Template.prepend: already begins with SOURCE'
- if kind[0] == 'f' and not re.search(r'\$IN\b', cmd):
- raise ValueError, \
- 'Template.prepend: missing $IN in cmd'
- if kind[1] == 'f' and not re.search(r'\$OUT\b', cmd):
- raise ValueError, \
- 'Template.prepend: missing $OUT in cmd'
- self.steps.insert(0, (cmd, kind))
-
- def open(self, file, rw):
- """t.open(file, rw) returns a pipe or file object open for
- reading or writing; the file is the other end of the pipeline."""
- if rw == 'r':
- return self.open_r(file)
- if rw == 'w':
- return self.open_w(file)
- raise ValueError, \
- 'Template.open: rw must be \'r\' or \'w\', not %r' % (rw,)
-
- def open_r(self, file):
- """t.open_r(file) and t.open_w(file) implement
- t.open(file, 'r') and t.open(file, 'w') respectively."""
- if not self.steps:
- return open(file, 'r')
- if self.steps[-1][1] == SINK:
- raise ValueError, \
- 'Template.open_r: pipeline ends width SINK'
- cmd = self.makepipeline(file, '')
- return os.popen(cmd, 'r')
-
- def open_w(self, file):
- if not self.steps:
- return open(file, 'w')
- if self.steps[0][1] == SOURCE:
- raise ValueError, \
- 'Template.open_w: pipeline begins with SOURCE'
- cmd = self.makepipeline('', file)
- return os.popen(cmd, 'w')
-
- def copy(self, infile, outfile):
- return os.system(self.makepipeline(infile, outfile))
-
- def makepipeline(self, infile, outfile):
- cmd = makepipeline(infile, self.steps, outfile)
- if self.debugging:
- print cmd
- cmd = 'set -x; ' + cmd
- return cmd
-
-
-def makepipeline(infile, steps, outfile):
- # Build a list with for each command:
- # [input filename or '', command string, kind, output filename or '']
-
- list = []
- for cmd, kind in steps:
- list.append(['', cmd, kind, ''])
- #
- # Make sure there is at least one step
- #
- if not list:
- list.append(['', 'cat', '--', ''])
- #
- # Take care of the input and output ends
- #
- [cmd, kind] = list[0][1:3]
- if kind[0] == 'f' and not infile:
- list.insert(0, ['', 'cat', '--', ''])
- list[0][0] = infile
- #
- [cmd, kind] = list[-1][1:3]
- if kind[1] == 'f' and not outfile:
- list.append(['', 'cat', '--', ''])
- list[-1][-1] = outfile
- #
- # Invent temporary files to connect stages that need files
- #
- garbage = []
- for i in range(1, len(list)):
- lkind = list[i-1][2]
- rkind = list[i][2]
- if lkind[1] == 'f' or rkind[0] == 'f':
- (fd, temp) = tempfile.mkstemp()
- os.close(fd)
- garbage.append(temp)
- list[i-1][-1] = list[i][0] = temp
- #
- for item in list:
- [inf, cmd, kind, outf] = item
- if kind[1] == 'f':
- cmd = 'OUT=' + quote(outf) + '; ' + cmd
- if kind[0] == 'f':
- cmd = 'IN=' + quote(inf) + '; ' + cmd
- if kind[0] == '-' and inf:
- cmd = cmd + ' <' + quote(inf)
- if kind[1] == '-' and outf:
- cmd = cmd + ' >' + quote(outf)
- item[1] = cmd
- #
- cmdlist = list[0][1]
- for item in list[1:]:
- [cmd, kind] = item[1:3]
- if item[0] == '':
- if 'f' in kind:
- cmd = '{ ' + cmd + '; }'
- cmdlist = cmdlist + ' |\n' + cmd
- else:
- cmdlist = cmdlist + '\n' + cmd
- #
- if garbage:
- rmcmd = 'rm -f'
- for file in garbage:
- rmcmd = rmcmd + ' ' + quote(file)
- trapcmd = 'trap ' + quote(rmcmd + '; exit') + ' 1 2 3 13 14 15'
- cmdlist = trapcmd + '\n' + cmdlist + '\n' + rmcmd
- #
- return cmdlist
-
-
-# Reliably quote a string as a single argument for /bin/sh
-
-_safechars = string.ascii_letters + string.digits + '!@%_-+=:,./' # Safe unquoted
-_funnychars = '"`$\\' # Unsafe inside "double quotes"
-
-def quote(file):
- for c in file:
- if c not in _safechars:
- break
- else:
- return file
- if '\'' not in file:
- return '\'' + file + '\''
- res = ''
- for c in file:
- if c in _funnychars:
- c = '\\' + c
- res = res + c
- return '"' + res + '"'
-
-
-# Small test program and example
-
-def test():
- print 'Testing...'
- t = Template()
- t.append('togif $IN $OUT', 'ff')
- t.append('giftoppm', '--')
- t.append('ppmtogif >$OUT', '-f')
- t.append('fromgif $IN $OUT', 'ff')
- t.debug(1)
- FILE = '/usr/local/images/rgb/rogues/guido.rgb'
- t.copy(FILE, '@temp')
- print 'Done.'
diff --git a/sys/lib/python/pkgutil.py b/sys/lib/python/pkgutil.py
deleted file mode 100644
index 37738e4a7..000000000
--- a/sys/lib/python/pkgutil.py
+++ /dev/null
@@ -1,546 +0,0 @@
-"""Utilities to support packages."""
-
-# NOTE: This module must remain compatible with Python 2.3, as it is shared
-# by setuptools for distribution with Python 2.3 and up.
-
-import os
-import sys
-import imp
-import os.path
-from types import ModuleType
-
-__all__ = [
- 'get_importer', 'iter_importers', 'get_loader', 'find_loader',
- 'walk_packages', 'iter_modules',
- 'ImpImporter', 'ImpLoader', 'read_code', 'extend_path',
-]
-
-def read_code(stream):
- # This helper is needed in order for the PEP 302 emulation to
- # correctly handle compiled files
- import marshal
-
- magic = stream.read(4)
- if magic != imp.get_magic():
- return None
-
- stream.read(4) # Skip timestamp
- return marshal.load(stream)
-
-
-def simplegeneric(func):
- """Make a trivial single-dispatch generic function"""
- registry = {}
- def wrapper(*args, **kw):
- ob = args[0]
- try:
- cls = ob.__class__
- except AttributeError:
- cls = type(ob)
- try:
- mro = cls.__mro__
- except AttributeError:
- try:
- class cls(cls, object):
- pass
- mro = cls.__mro__[1:]
- except TypeError:
- mro = object, # must be an ExtensionClass or some such :(
- for t in mro:
- if t in registry:
- return registry[t](*args, **kw)
- else:
- return func(*args, **kw)
- try:
- wrapper.__name__ = func.__name__
- except (TypeError, AttributeError):
- pass # Python 2.3 doesn't allow functions to be renamed
-
- def register(typ, func=None):
- if func is None:
- return lambda f: register(typ, f)
- registry[typ] = func
- return func
-
- wrapper.__dict__ = func.__dict__
- wrapper.__doc__ = func.__doc__
- wrapper.register = register
- return wrapper
-
-
-def walk_packages(path=None, prefix='', onerror=None):
- """Yields (module_loader, name, ispkg) for all modules recursively
- on path, or, if path is None, all accessible modules.
-
- 'path' should be either None or a list of paths to look for
- modules in.
-
- 'prefix' is a string to output on the front of every module name
- on output.
-
- Note that this function must import all *packages* (NOT all
- modules!) on the given path, in order to access the __path__
- attribute to find submodules.
-
- 'onerror' is a function which gets called with one argument (the
- name of the package which was being imported) if any exception
- occurs while trying to import a package. If no onerror function is
- supplied, ImportErrors are caught and ignored, while all other
- exceptions are propagated, terminating the search.
-
- Examples:
-
- # list all modules python can access
- walk_packages()
-
- # list all submodules of ctypes
- walk_packages(ctypes.__path__, ctypes.__name__+'.')
- """
-
- def seen(p, m={}):
- if p in m:
- return True
- m[p] = True
-
- for importer, name, ispkg in iter_modules(path, prefix):
- yield importer, name, ispkg
-
- if ispkg:
- try:
- __import__(name)
- except ImportError:
- if onerror is not None:
- onerror(name)
- except Exception:
- if onerror is not None:
- onerror(name)
- else:
- raise
- else:
- path = getattr(sys.modules[name], '__path__', None) or []
-
- # don't traverse path items we've seen before
- path = [p for p in path if not seen(p)]
-
- for item in walk_packages(path, name+'.', onerror):
- yield item
-
-
-def iter_modules(path=None, prefix=''):
- """Yields (module_loader, name, ispkg) for all submodules on path,
- or, if path is None, all top-level modules on sys.path.
-
- 'path' should be either None or a list of paths to look for
- modules in.
-
- 'prefix' is a string to output on the front of every module name
- on output.
- """
-
- if path is None:
- importers = iter_importers()
- else:
- importers = map(get_importer, path)
-
- yielded = {}
- for i in importers:
- for name, ispkg in iter_importer_modules(i, prefix):
- if name not in yielded:
- yielded[name] = 1
- yield i, name, ispkg
-
-
-#@simplegeneric
-def iter_importer_modules(importer, prefix=''):
- if not hasattr(importer, 'iter_modules'):
- return []
- return importer.iter_modules(prefix)
-
-iter_importer_modules = simplegeneric(iter_importer_modules)
-
-
-class ImpImporter:
- """PEP 302 Importer that wraps Python's "classic" import algorithm
-
- ImpImporter(dirname) produces a PEP 302 importer that searches that
- directory. ImpImporter(None) produces a PEP 302 importer that searches
- the current sys.path, plus any modules that are frozen or built-in.
-
- Note that ImpImporter does not currently support being used by placement
- on sys.meta_path.
- """
-
- def __init__(self, path=None):
- self.path = path
-
- def find_module(self, fullname, path=None):
- # Note: we ignore 'path' argument since it is only used via meta_path
- subname = fullname.split(".")[-1]
- if subname != fullname and self.path is None:
- return None
- if self.path is None:
- path = None
- else:
- path = [os.path.realpath(self.path)]
- try:
- file, filename, etc = imp.find_module(subname, path)
- except ImportError:
- return None
- return ImpLoader(fullname, file, filename, etc)
-
- def iter_modules(self, prefix=''):
- if self.path is None or not os.path.isdir(self.path):
- return
-
- yielded = {}
- import inspect
-
- filenames = os.listdir(self.path)
- filenames.sort() # handle packages before same-named modules
-
- for fn in filenames:
- modname = inspect.getmodulename(fn)
- if modname=='__init__' or modname in yielded:
- continue
-
- path = os.path.join(self.path, fn)
- ispkg = False
-
- if not modname and os.path.isdir(path) and '.' not in fn:
- modname = fn
- for fn in os.listdir(path):
- subname = inspect.getmodulename(fn)
- if subname=='__init__':
- ispkg = True
- break
- else:
- continue # not a package
-
- if modname and '.' not in modname:
- yielded[modname] = 1
- yield prefix + modname, ispkg
-
-
-class ImpLoader:
- """PEP 302 Loader that wraps Python's "classic" import algorithm
- """
- code = source = None
-
- def __init__(self, fullname, file, filename, etc):
- self.file = file
- self.filename = filename
- self.fullname = fullname
- self.etc = etc
-
- def load_module(self, fullname):
- self._reopen()
- try:
- mod = imp.load_module(fullname, self.file, self.filename, self.etc)
- finally:
- if self.file:
- self.file.close()
- # Note: we don't set __loader__ because we want the module to look
- # normal; i.e. this is just a wrapper for standard import machinery
- return mod
-
- def get_data(self, pathname):
- return open(pathname, "rb").read()
-
- def _reopen(self):
- if self.file and self.file.closed:
- mod_type = self.etc[2]
- if mod_type==imp.PY_SOURCE:
- self.file = open(self.filename, 'rU')
- elif mod_type in (imp.PY_COMPILED, imp.C_EXTENSION):
- self.file = open(self.filename, 'rb')
-
- def _fix_name(self, fullname):
- if fullname is None:
- fullname = self.fullname
- elif fullname != self.fullname:
- raise ImportError("Loader for module %s cannot handle "
- "module %s" % (self.fullname, fullname))
- return fullname
-
- def is_package(self, fullname):
- fullname = self._fix_name(fullname)
- return self.etc[2]==imp.PKG_DIRECTORY
-
- def get_code(self, fullname=None):
- fullname = self._fix_name(fullname)
- if self.code is None:
- mod_type = self.etc[2]
- if mod_type==imp.PY_SOURCE:
- source = self.get_source(fullname)
- self.code = compile(source, self.filename, 'exec')
- elif mod_type==imp.PY_COMPILED:
- self._reopen()
- try:
- self.code = read_code(self.file)
- finally:
- self.file.close()
- elif mod_type==imp.PKG_DIRECTORY:
- self.code = self._get_delegate().get_code()
- return self.code
-
- def get_source(self, fullname=None):
- fullname = self._fix_name(fullname)
- if self.source is None:
- mod_type = self.etc[2]
- if mod_type==imp.PY_SOURCE:
- self._reopen()
- try:
- self.source = self.file.read()
- finally:
- self.file.close()
- elif mod_type==imp.PY_COMPILED:
- if os.path.exists(self.filename[:-1]):
- f = open(self.filename[:-1], 'rU')
- self.source = f.read()
- f.close()
- elif mod_type==imp.PKG_DIRECTORY:
- self.source = self._get_delegate().get_source()
- return self.source
-
-
- def _get_delegate(self):
- return ImpImporter(self.filename).find_module('__init__')
-
- def get_filename(self, fullname=None):
- fullname = self._fix_name(fullname)
- mod_type = self.etc[2]
- if self.etc[2]==imp.PKG_DIRECTORY:
- return self._get_delegate().get_filename()
- elif self.etc[2] in (imp.PY_SOURCE, imp.PY_COMPILED, imp.C_EXTENSION):
- return self.filename
- return None
-
-
-try:
- import zipimport
- from zipimport import zipimporter
-
- def iter_zipimport_modules(importer, prefix=''):
- dirlist = zipimport._zip_directory_cache[importer.archive].keys()
- dirlist.sort()
- _prefix = importer.prefix
- plen = len(_prefix)
- yielded = {}
- import inspect
- for fn in dirlist:
- if not fn.startswith(_prefix):
- continue
-
- fn = fn[plen:].split(os.sep)
-
- if len(fn)==2 and fn[1].startswith('__init__.py'):
- if fn[0] not in yielded:
- yielded[fn[0]] = 1
- yield fn[0], True
-
- if len(fn)!=1:
- continue
-
- modname = inspect.getmodulename(fn[0])
- if modname=='__init__':
- continue
-
- if modname and '.' not in modname and modname not in yielded:
- yielded[modname] = 1
- yield prefix + modname, False
-
- iter_importer_modules.register(zipimporter, iter_zipimport_modules)
-
-except ImportError:
- pass
-
-
-def get_importer(path_item):
- """Retrieve a PEP 302 importer for the given path item
-
- The returned importer is cached in sys.path_importer_cache
- if it was newly created by a path hook.
-
- If there is no importer, a wrapper around the basic import
- machinery is returned. This wrapper is never inserted into
- the importer cache (None is inserted instead).
-
- The cache (or part of it) can be cleared manually if a
- rescan of sys.path_hooks is necessary.
- """
- try:
- importer = sys.path_importer_cache[path_item]
- except KeyError:
- for path_hook in sys.path_hooks:
- try:
- importer = path_hook(path_item)
- break
- except ImportError:
- pass
- else:
- importer = None
- sys.path_importer_cache.setdefault(path_item, importer)
-
- if importer is None:
- try:
- importer = ImpImporter(path_item)
- except ImportError:
- importer = None
- return importer
-
-
-def iter_importers(fullname=""):
- """Yield PEP 302 importers for the given module name
-
- If fullname contains a '.', the importers will be for the package
- containing fullname, otherwise they will be importers for sys.meta_path,
- sys.path, and Python's "classic" import machinery, in that order. If
- the named module is in a package, that package is imported as a side
- effect of invoking this function.
-
- Non PEP 302 mechanisms (e.g. the Windows registry) used by the
- standard import machinery to find files in alternative locations
- are partially supported, but are searched AFTER sys.path. Normally,
- these locations are searched BEFORE sys.path, preventing sys.path
- entries from shadowing them.
-
- For this to cause a visible difference in behaviour, there must
- be a module or package name that is accessible via both sys.path
- and one of the non PEP 302 file system mechanisms. In this case,
- the emulation will find the former version, while the builtin
- import mechanism will find the latter.
-
- Items of the following types can be affected by this discrepancy:
- imp.C_EXTENSION, imp.PY_SOURCE, imp.PY_COMPILED, imp.PKG_DIRECTORY
- """
- if fullname.startswith('.'):
- raise ImportError("Relative module names not supported")
- if '.' in fullname:
- # Get the containing package's __path__
- pkg = '.'.join(fullname.split('.')[:-1])
- if pkg not in sys.modules:
- __import__(pkg)
- path = getattr(sys.modules[pkg], '__path__', None) or []
- else:
- for importer in sys.meta_path:
- yield importer
- path = sys.path
- for item in path:
- yield get_importer(item)
- if '.' not in fullname:
- yield ImpImporter()
-
-def get_loader(module_or_name):
- """Get a PEP 302 "loader" object for module_or_name
-
- If the module or package is accessible via the normal import
- mechanism, a wrapper around the relevant part of that machinery
- is returned. Returns None if the module cannot be found or imported.
- If the named module is not already imported, its containing package
- (if any) is imported, in order to establish the package __path__.
-
- This function uses iter_importers(), and is thus subject to the same
- limitations regarding platform-specific special import locations such
- as the Windows registry.
- """
- if module_or_name in sys.modules:
- module_or_name = sys.modules[module_or_name]
- if isinstance(module_or_name, ModuleType):
- module = module_or_name
- loader = getattr(module, '__loader__', None)
- if loader is not None:
- return loader
- fullname = module.__name__
- else:
- fullname = module_or_name
- return find_loader(fullname)
-
-def find_loader(fullname):
- """Find a PEP 302 "loader" object for fullname
-
- If fullname contains dots, path must be the containing package's __path__.
- Returns None if the module cannot be found or imported. This function uses
- iter_importers(), and is thus subject to the same limitations regarding
- platform-specific special import locations such as the Windows registry.
- """
- for importer in iter_importers(fullname):
- loader = importer.find_module(fullname)
- if loader is not None:
- return loader
-
- return None
-
-
-def extend_path(path, name):
- """Extend a package's path.
-
- Intended use is to place the following code in a package's __init__.py:
-
- from pkgutil import extend_path
- __path__ = extend_path(__path__, __name__)
-
- This will add to the package's __path__ all subdirectories of
- directories on sys.path named after the package. This is useful
- if one wants to distribute different parts of a single logical
- package as multiple directories.
-
- It also looks for *.pkg files beginning where * matches the name
- argument. This feature is similar to *.pth files (see site.py),
- except that it doesn't special-case lines starting with 'import'.
- A *.pkg file is trusted at face value: apart from checking for
- duplicates, all entries found in a *.pkg file are added to the
- path, regardless of whether they are exist the filesystem. (This
- is a feature.)
-
- If the input path is not a list (as is the case for frozen
- packages) it is returned unchanged. The input path is not
- modified; an extended copy is returned. Items are only appended
- to the copy at the end.
-
- It is assumed that sys.path is a sequence. Items of sys.path that
- are not (unicode or 8-bit) strings referring to existing
- directories are ignored. Unicode items of sys.path that cause
- errors when used as filenames may cause this function to raise an
- exception (in line with os.path.isdir() behavior).
- """
-
- if not isinstance(path, list):
- # This could happen e.g. when this is called from inside a
- # frozen package. Return the path unchanged in that case.
- return path
-
- pname = os.path.join(*name.split('.')) # Reconstitute as relative path
- # Just in case os.extsep != '.'
- sname = os.extsep.join(name.split('.'))
- sname_pkg = sname + os.extsep + "pkg"
- init_py = "__init__" + os.extsep + "py"
-
- path = path[:] # Start with a copy of the existing path
-
- for dir in sys.path:
- if not isinstance(dir, basestring) or not os.path.isdir(dir):
- continue
- subdir = os.path.join(dir, pname)
- # XXX This may still add duplicate entries to path on
- # case-insensitive filesystems
- initfile = os.path.join(subdir, init_py)
- if subdir not in path and os.path.isfile(initfile):
- path.append(subdir)
- # XXX Is this the right thing for subpackages like zope.app?
- # It looks for a file named "zope.app.pkg"
- pkgfile = os.path.join(dir, sname_pkg)
- if os.path.isfile(pkgfile):
- try:
- f = open(pkgfile)
- except IOError, msg:
- sys.stderr.write("Can't open %s: %s\n" %
- (pkgfile, msg))
- else:
- for line in f:
- line = line.rstrip('\n')
- if not line or line.startswith('#'):
- continue
- path.append(line) # Don't check for existence!
- f.close()
-
- return path
diff --git a/sys/lib/python/plat-aix3/IN.py b/sys/lib/python/plat-aix3/IN.py
deleted file mode 100644
index 2c57362fc..000000000
--- a/sys/lib/python/plat-aix3/IN.py
+++ /dev/null
@@ -1,126 +0,0 @@
-# Generated by h2py from /usr/include/netinet/in.h
-
-# Included from net/nh.h
-
-# Included from sys/machine.h
-LITTLE_ENDIAN = 1234
-BIG_ENDIAN = 4321
-PDP_ENDIAN = 3412
-BYTE_ORDER = BIG_ENDIAN
-DEFAULT_GPR = 0xDEADBEEF
-MSR_EE = 0x8000
-MSR_PR = 0x4000
-MSR_FP = 0x2000
-MSR_ME = 0x1000
-MSR_FE = 0x0800
-MSR_FE0 = 0x0800
-MSR_SE = 0x0400
-MSR_BE = 0x0200
-MSR_IE = 0x0100
-MSR_FE1 = 0x0100
-MSR_AL = 0x0080
-MSR_IP = 0x0040
-MSR_IR = 0x0020
-MSR_DR = 0x0010
-MSR_PM = 0x0004
-DEFAULT_MSR = (MSR_EE | MSR_ME | MSR_AL | MSR_IR | MSR_DR)
-DEFAULT_USER_MSR = (DEFAULT_MSR | MSR_PR)
-CR_LT = 0x80000000
-CR_GT = 0x40000000
-CR_EQ = 0x20000000
-CR_SO = 0x10000000
-CR_FX = 0x08000000
-CR_FEX = 0x04000000
-CR_VX = 0x02000000
-CR_OX = 0x01000000
-XER_SO = 0x80000000
-XER_OV = 0x40000000
-XER_CA = 0x20000000
-def XER_COMP_BYTE(xer): return ((xer >> 8) & 0x000000FF)
-
-def XER_LENGTH(xer): return (xer & 0x0000007F)
-
-DSISR_IO = 0x80000000
-DSISR_PFT = 0x40000000
-DSISR_LOCK = 0x20000000
-DSISR_FPIO = 0x10000000
-DSISR_PROT = 0x08000000
-DSISR_LOOP = 0x04000000
-DSISR_DRST = 0x04000000
-DSISR_ST = 0x02000000
-DSISR_SEGB = 0x01000000
-DSISR_DABR = 0x00400000
-DSISR_EAR = 0x00100000
-SRR_IS_PFT = 0x40000000
-SRR_IS_ISPEC = 0x20000000
-SRR_IS_IIO = 0x10000000
-SRR_IS_PROT = 0x08000000
-SRR_IS_LOOP = 0x04000000
-SRR_PR_FPEN = 0x00100000
-SRR_PR_INVAL = 0x00080000
-SRR_PR_PRIV = 0x00040000
-SRR_PR_TRAP = 0x00020000
-SRR_PR_IMPRE = 0x00010000
-def ntohl(x): return (x)
-
-def ntohs(x): return (x)
-
-def htonl(x): return (x)
-
-def htons(x): return (x)
-
-IPPROTO_IP = 0
-IPPROTO_ICMP = 1
-IPPROTO_GGP = 3
-IPPROTO_TCP = 6
-IPPROTO_EGP = 8
-IPPROTO_PUP = 12
-IPPROTO_UDP = 17
-IPPROTO_IDP = 22
-IPPROTO_TP = 29
-IPPROTO_LOCAL = 63
-IPPROTO_EON = 80
-IPPROTO_BIP = 0x53
-IPPROTO_RAW = 255
-IPPROTO_MAX = 256
-IPPORT_RESERVED = 1024
-IPPORT_USERRESERVED = 5000
-IPPORT_TIMESERVER = 37
-def IN_CLASSA(i): return (((long)(i) & 0x80000000) == 0)
-
-IN_CLASSA_NET = 0xff000000
-IN_CLASSA_NSHIFT = 24
-IN_CLASSA_HOST = 0x00ffffff
-IN_CLASSA_MAX = 128
-def IN_CLASSB(i): return (((long)(i) & 0xc0000000) == 0x80000000)
-
-IN_CLASSB_NET = 0xffff0000
-IN_CLASSB_NSHIFT = 16
-IN_CLASSB_HOST = 0x0000ffff
-IN_CLASSB_MAX = 65536
-def IN_CLASSC(i): return (((long)(i) & 0xe0000000) == 0xc0000000)
-
-IN_CLASSC_NET = 0xffffff00
-IN_CLASSC_NSHIFT = 8
-IN_CLASSC_HOST = 0x000000ff
-def IN_CLASSD(i): return (((long)(i) & 0xf0000000) == 0xe0000000)
-
-def IN_MULTICAST(i): return IN_CLASSD(i)
-
-def IN_EXPERIMENTAL(i): return (((long)(i) & 0xe0000000) == 0xe0000000)
-
-def IN_BADCLASS(i): return (((long)(i) & 0xf0000000) == 0xf0000000)
-
-INADDR_ANY = 0x00000000
-INADDR_LOOPBACK = 0x7f000001
-INADDR_BROADCAST = 0xffffffff
-INADDR_NONE = 0xffffffff
-IN_LOOPBACKNET = 127
-IP_OPTIONS = 1
-IP_HDRINCL = 2
-IP_TOS = 3
-IP_TTL = 4
-IP_RECVOPTS = 5
-IP_RECVRETOPTS = 6
-IP_RECVDSTADDR = 7
-IP_RETOPTS = 8
diff --git a/sys/lib/python/plat-aix3/regen b/sys/lib/python/plat-aix3/regen
deleted file mode 100755
index 58deb202a..000000000
--- a/sys/lib/python/plat-aix3/regen
+++ /dev/null
@@ -1,8 +0,0 @@
-#! /bin/sh
-case `uname -sv` in
-'AIX 3'*) ;;
-*) echo Probably not on an AIX 3 system 1>&2
- exit 1;;
-esac
-set -v
-h2py.py -i '(u_long)' /usr/include/netinet/in.h
diff --git a/sys/lib/python/plat-aix4/IN.py b/sys/lib/python/plat-aix4/IN.py
deleted file mode 100644
index 00f0e1f87..000000000
--- a/sys/lib/python/plat-aix4/IN.py
+++ /dev/null
@@ -1,165 +0,0 @@
-# Generated by h2py from /usr/include/netinet/in.h
-
-# Included from net/nh.h
-
-# Included from sys/machine.h
-LITTLE_ENDIAN = 1234
-BIG_ENDIAN = 4321
-PDP_ENDIAN = 3412
-BYTE_ORDER = BIG_ENDIAN
-DEFAULT_GPR = 0xDEADBEEF
-MSR_EE = 0x8000
-MSR_PR = 0x4000
-MSR_FP = 0x2000
-MSR_ME = 0x1000
-MSR_FE = 0x0800
-MSR_FE0 = 0x0800
-MSR_SE = 0x0400
-MSR_BE = 0x0200
-MSR_IE = 0x0100
-MSR_FE1 = 0x0100
-MSR_AL = 0x0080
-MSR_IP = 0x0040
-MSR_IR = 0x0020
-MSR_DR = 0x0010
-MSR_PM = 0x0004
-DEFAULT_MSR = (MSR_EE | MSR_ME | MSR_AL | MSR_IR | MSR_DR)
-DEFAULT_USER_MSR = (DEFAULT_MSR | MSR_PR)
-CR_LT = 0x80000000
-CR_GT = 0x40000000
-CR_EQ = 0x20000000
-CR_SO = 0x10000000
-CR_FX = 0x08000000
-CR_FEX = 0x04000000
-CR_VX = 0x02000000
-CR_OX = 0x01000000
-XER_SO = 0x80000000
-XER_OV = 0x40000000
-XER_CA = 0x20000000
-def XER_COMP_BYTE(xer): return ((xer >> 8) & 0x000000FF)
-
-def XER_LENGTH(xer): return (xer & 0x0000007F)
-
-DSISR_IO = 0x80000000
-DSISR_PFT = 0x40000000
-DSISR_LOCK = 0x20000000
-DSISR_FPIO = 0x10000000
-DSISR_PROT = 0x08000000
-DSISR_LOOP = 0x04000000
-DSISR_DRST = 0x04000000
-DSISR_ST = 0x02000000
-DSISR_SEGB = 0x01000000
-DSISR_DABR = 0x00400000
-DSISR_EAR = 0x00100000
-SRR_IS_PFT = 0x40000000
-SRR_IS_ISPEC = 0x20000000
-SRR_IS_IIO = 0x10000000
-SRR_IS_GUARD = 0x10000000
-SRR_IS_PROT = 0x08000000
-SRR_IS_LOOP = 0x04000000
-SRR_PR_FPEN = 0x00100000
-SRR_PR_INVAL = 0x00080000
-SRR_PR_PRIV = 0x00040000
-SRR_PR_TRAP = 0x00020000
-SRR_PR_IMPRE = 0x00010000
-def BUID_7F_SRVAL(raddr): return (0x87F00000 | (((uint)(raddr)) >> 28))
-
-BT_256M = 0x1FFC
-BT_128M = 0x0FFC
-BT_64M = 0x07FC
-BT_32M = 0x03FC
-BT_16M = 0x01FC
-BT_8M = 0x00FC
-BT_4M = 0x007C
-BT_2M = 0x003C
-BT_1M = 0x001C
-BT_512K = 0x000C
-BT_256K = 0x0004
-BT_128K = 0x0000
-BT_NOACCESS = 0x0
-BT_RDONLY = 0x1
-BT_WRITE = 0x2
-BT_VS = 0x2
-BT_VP = 0x1
-def BAT_ESEG(dbatu): return (((uint)(dbatu) >> 28))
-
-MIN_BAT_SIZE = 0x00020000
-MAX_BAT_SIZE = 0x10000000
-def ntohl(x): return (x)
-
-def ntohs(x): return (x)
-
-def htonl(x): return (x)
-
-def htons(x): return (x)
-
-IPPROTO_IP = 0
-IPPROTO_ICMP = 1
-IPPROTO_IGMP = 2
-IPPROTO_GGP = 3
-IPPROTO_TCP = 6
-IPPROTO_EGP = 8
-IPPROTO_PUP = 12
-IPPROTO_UDP = 17
-IPPROTO_IDP = 22
-IPPROTO_TP = 29
-IPPROTO_LOCAL = 63
-IPPROTO_EON = 80
-IPPROTO_BIP = 0x53
-IPPROTO_RAW = 255
-IPPROTO_MAX = 256
-IPPORT_RESERVED = 1024
-IPPORT_USERRESERVED = 5000
-IPPORT_TIMESERVER = 37
-def IN_CLASSA(i): return (((long)(i) & 0x80000000) == 0)
-
-IN_CLASSA_NET = 0xff000000
-IN_CLASSA_NSHIFT = 24
-IN_CLASSA_HOST = 0x00ffffff
-IN_CLASSA_MAX = 128
-def IN_CLASSB(i): return (((long)(i) & 0xc0000000) == 0x80000000)
-
-IN_CLASSB_NET = 0xffff0000
-IN_CLASSB_NSHIFT = 16
-IN_CLASSB_HOST = 0x0000ffff
-IN_CLASSB_MAX = 65536
-def IN_CLASSC(i): return (((long)(i) & 0xe0000000) == 0xc0000000)
-
-IN_CLASSC_NET = 0xffffff00
-IN_CLASSC_NSHIFT = 8
-IN_CLASSC_HOST = 0x000000ff
-def IN_CLASSD(i): return (((long)(i) & 0xf0000000) == 0xe0000000)
-
-def IN_MULTICAST(i): return IN_CLASSD(i)
-
-IN_CLASSD_NET = 0xf0000000
-IN_CLASSD_NSHIFT = 28
-IN_CLASSD_HOST = 0x0fffffff
-INADDR_UNSPEC_GROUP = 0xe0000000
-INADDR_ALLHOSTS_GROUP = 0xe0000001
-INADDR_MAX_LOCAL_GROUP = 0xe00000ff
-def IN_EXPERIMENTAL(i): return (((long)(i) & 0xe0000000) == 0xe0000000)
-
-def IN_BADCLASS(i): return (((long)(i) & 0xf0000000) == 0xf0000000)
-
-INADDR_ANY = 0x00000000
-INADDR_BROADCAST = 0xffffffff
-INADDR_LOOPBACK = 0x7f000001
-INADDR_NONE = 0xffffffff
-IN_LOOPBACKNET = 127
-IP_OPTIONS = 1
-IP_HDRINCL = 2
-IP_TOS = 3
-IP_TTL = 4
-IP_RECVOPTS = 5
-IP_RECVRETOPTS = 6
-IP_RECVDSTADDR = 7
-IP_RETOPTS = 8
-IP_MULTICAST_IF = 9
-IP_MULTICAST_TTL = 10
-IP_MULTICAST_LOOP = 11
-IP_ADD_MEMBERSHIP = 12
-IP_DROP_MEMBERSHIP = 13
-IP_DEFAULT_MULTICAST_TTL = 1
-IP_DEFAULT_MULTICAST_LOOP = 1
-IP_MAX_MEMBERSHIPS = 20
diff --git a/sys/lib/python/plat-aix4/regen b/sys/lib/python/plat-aix4/regen
deleted file mode 100755
index 57a71c4ed..000000000
--- a/sys/lib/python/plat-aix4/regen
+++ /dev/null
@@ -1,8 +0,0 @@
-#! /bin/sh
-case `uname -sv` in
-'AIX 4'*) ;;
-*) echo Probably not on an AIX 4 system 1>&2
- exit 1;;
-esac
-set -v
-h2py.py -i '(u_long)' /usr/include/netinet/in.h
diff --git a/sys/lib/python/plat-atheos/IN.py b/sys/lib/python/plat-atheos/IN.py
deleted file mode 100644
index 6588d6511..000000000
--- a/sys/lib/python/plat-atheos/IN.py
+++ /dev/null
@@ -1,944 +0,0 @@
-# Generated by h2py from /include/netinet/in.h
-_NETINET_IN_H = 1
-
-# Included from features.h
-_FEATURES_H = 1
-__USE_ANSI = 1
-__FAVOR_BSD = 1
-_ISOC9X_SOURCE = 1
-_POSIX_SOURCE = 1
-_POSIX_C_SOURCE = 199506L
-_XOPEN_SOURCE = 500
-_XOPEN_SOURCE_EXTENDED = 1
-_LARGEFILE64_SOURCE = 1
-_BSD_SOURCE = 1
-_SVID_SOURCE = 1
-_BSD_SOURCE = 1
-_SVID_SOURCE = 1
-__USE_ISOC9X = 1
-_POSIX_SOURCE = 1
-_POSIX_C_SOURCE = 2
-_POSIX_C_SOURCE = 199506L
-__USE_POSIX = 1
-__USE_POSIX2 = 1
-__USE_POSIX199309 = 1
-__USE_POSIX199506 = 1
-__USE_XOPEN = 1
-__USE_XOPEN_EXTENDED = 1
-__USE_UNIX98 = 1
-_LARGEFILE_SOURCE = 1
-__USE_XOPEN_EXTENDED = 1
-__USE_LARGEFILE = 1
-__USE_LARGEFILE64 = 1
-__USE_FILE_OFFSET64 = 1
-__USE_MISC = 1
-__USE_BSD = 1
-__USE_SVID = 1
-__USE_GNU = 1
-__USE_REENTRANT = 1
-__STDC_IEC_559__ = 1
-__STDC_IEC_559_COMPLEX__ = 1
-__GNU_LIBRARY__ = 6
-__GLIBC__ = 2
-__GLIBC_MINOR__ = 1
-
-# Included from sys/cdefs.h
-_SYS_CDEFS_H = 1
-def __PMT(args): return args
-
-def __P(args): return args
-
-def __PMT(args): return args
-
-def __P(args): return ()
-
-def __PMT(args): return ()
-
-def __STRING(x): return #x
-
-def __STRING(x): return "x"
-
-def __ASMNAME(cname): return __ASMNAME2 (__USER_LABEL_PREFIX__, cname)
-
-def __attribute__(xyz): return
-
-__USE_EXTERN_INLINES = 1
-
-# Included from gnu/stubs.h
-
-# Included from limits.h
-_LIBC_LIMITS_H_ = 1
-
-# Included from bits/posix1_lim.h
-_BITS_POSIX1_LIM_H = 1
-_POSIX_AIO_LISTIO_MAX = 2
-_POSIX_AIO_MAX = 1
-_POSIX_ARG_MAX = 4096
-_POSIX_CHILD_MAX = 6
-_POSIX_DELAYTIMER_MAX = 32
-_POSIX_LINK_MAX = 8
-_POSIX_MAX_CANON = 255
-_POSIX_MAX_INPUT = 255
-_POSIX_MQ_OPEN_MAX = 8
-_POSIX_MQ_PRIO_MAX = 32
-_POSIX_NGROUPS_MAX = 0
-_POSIX_OPEN_MAX = 16
-_POSIX_FD_SETSIZE = _POSIX_OPEN_MAX
-_POSIX_NAME_MAX = 14
-_POSIX_PATH_MAX = 255
-_POSIX_PIPE_BUF = 512
-_POSIX_RTSIG_MAX = 8
-_POSIX_SEM_NSEMS_MAX = 256
-_POSIX_SEM_VALUE_MAX = 32767
-_POSIX_SIGQUEUE_MAX = 32
-_POSIX_SSIZE_MAX = 32767
-_POSIX_STREAM_MAX = 8
-_POSIX_TZNAME_MAX = 3
-_POSIX_QLIMIT = 1
-_POSIX_HIWAT = _POSIX_PIPE_BUF
-_POSIX_UIO_MAXIOV = 16
-_POSIX_TTY_NAME_MAX = 9
-_POSIX_TIMER_MAX = 32
-_POSIX_LOGIN_NAME_MAX = 9
-_POSIX_CLOCKRES_MIN = 20000000
-
-# Included from bits/local_lim.h
-
-# Included from posix/limits.h
-CHAR_BIT = 8
-CHAR_MAX = 127
-CHAR_MIN = (-128)
-INT_MAX = 2147483647
-INT_MIN = (-2147483647-1)
-LONG_MAX = 2147483647L
-LONG_MIN = (-2147483647L-1L)
-SCHAR_MAX = 127
-SCHAR_MIN = (-128)
-SHRT_MAX = 32767
-SHRT_MIN = (-32768)
-UCHAR_MAX = 255
-USHRT_MAX = 65535
-_POSIX_ARG_MAX = 131072
-_POSIX_CHILD_MAX = 4096
-_POSIX_LINK_MAX = 1
-_POSIX_MAX_CANON = 126
-_POSIX_MAX_INPUT = 126
-_POSIX_NAME_MAX = 256
-_POSIX_NGROUPS_MAX = 32
-_POSIX_OPEN_MAX = 256
-_POSIX_PATH_MAX = 255
-_POSIX_PIPE_BUF = 512
-_POSIX_SSIZE_MAX = 2147483647
-_POSIX_STREAM_MAX = 256
-_POSIX_TZNAME_MAX = 5
-NGROUPS_MAX = 32
-ARG_MAX = 131072
-CHILD_MAX = 4096
-OPEN_MAX = 256
-LINK_MAX = 1
-MAX_CANON = 126
-MAX_INPUT = 126
-NAME_MAX = 255
-PATH_MAX = 4096
-PIPE_BUF = 4096
-SSIZE_MAX = 2147483647
-MAXSYMLINKS = 16
-AIO_PRIO_DELTA_MAX = 20
-SSIZE_MAX = INT_MAX
-NGROUPS_MAX = _POSIX_NGROUPS_MAX
-
-# Included from bits/posix2_lim.h
-_BITS_POSIX2_LIM_H = 1
-_POSIX2_BC_BASE_MAX = 99
-_POSIX2_BC_DIM_MAX = 2048
-_POSIX2_BC_SCALE_MAX = 99
-_POSIX2_BC_STRING_MAX = 1000
-_POSIX2_COLL_WEIGHTS_MAX = 255
-_POSIX2_EQUIV_CLASS_MAX = 255
-_POSIX2_EXPR_NEST_MAX = 32
-_POSIX2_LINE_MAX = 2048
-_POSIX2_RE_DUP_MAX = 255
-_POSIX2_CHARCLASS_NAME_MAX = 2048
-BC_BASE_MAX = _POSIX2_BC_BASE_MAX
-BC_DIM_MAX = _POSIX2_BC_DIM_MAX
-BC_SCALE_MAX = _POSIX2_BC_SCALE_MAX
-BC_STRING_MAX = _POSIX2_BC_STRING_MAX
-COLL_WEIGHTS_MAX = _POSIX2_COLL_WEIGHTS_MAX
-EQUIV_CLASS_MAX = _POSIX2_EQUIV_CLASS_MAX
-EXPR_NEST_MAX = _POSIX2_EXPR_NEST_MAX
-LINE_MAX = _POSIX2_LINE_MAX
-RE_DUP_MAX = _POSIX2_RE_DUP_MAX
-CHARCLASS_NAME_MAX = _POSIX2_CHARCLASS_NAME_MAX
-
-# Included from bits/xopen_lim.h
-_XOPEN_LIM_H = 1
-
-# Included from bits/stdio_lim.h
-L_tmpnam = 20
-TMP_MAX = 238328
-FILENAME_MAX = 4096
-L_ctermid = 9
-L_cuserid = 9
-FOPEN_MAX = 256
-STREAM_MAX = FOPEN_MAX
-TZNAME_MAX = _POSIX_TZNAME_MAX
-_XOPEN_IOV_MAX = _POSIX_UIO_MAXIOV
-NL_ARGMAX = _POSIX_ARG_MAX
-NL_LANGMAX = _POSIX2_LINE_MAX
-NL_MSGMAX = INT_MAX
-NL_NMAX = INT_MAX
-NL_SETMAX = INT_MAX
-NL_TEXTMAX = INT_MAX
-NZERO = 20
-MB_LEN_MAX = 6
-_LIMITS_H = 1
-CHAR_BIT = 8
-SCHAR_MIN = (-128)
-SCHAR_MAX = 127
-UCHAR_MAX = 255
-CHAR_MIN = 0
-CHAR_MAX = UCHAR_MAX
-CHAR_MIN = SCHAR_MIN
-CHAR_MAX = SCHAR_MAX
-SHRT_MIN = (-32768)
-SHRT_MAX = 32767
-USHRT_MAX = 65535
-INT_MIN = (-INT_MAX - 1)
-INT_MAX = 2147483647
-UINT_MAX = 4294967295
-LONG_MAX = 9223372036854775807L
-LONG_MAX = 2147483647L
-LONG_MIN = (-LONG_MAX - 1L)
-ULONG_MAX = 4294967295L
-
-# Included from stdint.h
-_STDINT_H = 1
-
-# Included from bits/wordsize.h
-__WORDSIZE = 32
-def __INT64_C(c): return c ## L
-
-def __UINT64_C(c): return c ## UL
-
-def __INT64_C(c): return c ## LL
-
-def __UINT64_C(c): return c ## ULL
-
-INT8_MIN = (-128)
-INT16_MIN = (-32767-1)
-INT32_MIN = (-2147483647-1)
-INT64_MIN = (-__INT64_C(9223372036854775807)-1)
-INT8_MAX = (127)
-INT16_MAX = (32767)
-INT32_MAX = (2147483647)
-INT64_MAX = (__INT64_C(9223372036854775807))
-UINT64_MAX = (__UINT64_C(18446744073709551615))
-INT_LEAST8_MIN = (-128)
-INT_LEAST16_MIN = (-32767-1)
-INT_LEAST32_MIN = (-2147483647-1)
-INT_LEAST64_MIN = (-__INT64_C(9223372036854775807)-1)
-INT_LEAST8_MAX = (127)
-INT_LEAST16_MAX = (32767)
-INT_LEAST32_MAX = (2147483647)
-INT_LEAST64_MAX = (__INT64_C(9223372036854775807))
-UINT_LEAST64_MAX = (__UINT64_C(18446744073709551615))
-INT_FAST8_MIN = (-128)
-INT_FAST16_MIN = (-9223372036854775807L-1)
-INT_FAST32_MIN = (-9223372036854775807L-1)
-INT_FAST16_MIN = (-2147483647-1)
-INT_FAST32_MIN = (-2147483647-1)
-INT_FAST64_MIN = (-__INT64_C(9223372036854775807)-1)
-INT_FAST8_MAX = (127)
-INT_FAST16_MAX = (9223372036854775807L)
-INT_FAST32_MAX = (9223372036854775807L)
-INT_FAST16_MAX = (2147483647)
-INT_FAST32_MAX = (2147483647)
-INT_FAST64_MAX = (__INT64_C(9223372036854775807))
-UINT_FAST64_MAX = (__UINT64_C(18446744073709551615))
-INTPTR_MIN = (-9223372036854775807L-1)
-INTPTR_MAX = (9223372036854775807L)
-INTPTR_MIN = (-2147483647-1)
-INTPTR_MAX = (2147483647)
-INTMAX_MIN = (-__INT64_C(9223372036854775807)-1)
-INTMAX_MAX = (__INT64_C(9223372036854775807))
-UINTMAX_MAX = (__UINT64_C(18446744073709551615))
-PTRDIFF_MIN = (-9223372036854775807L-1)
-PTRDIFF_MAX = (9223372036854775807L)
-PTRDIFF_MIN = (-2147483647-1)
-PTRDIFF_MAX = (2147483647)
-SIG_ATOMIC_MIN = (-2147483647-1)
-SIG_ATOMIC_MAX = (2147483647)
-WCHAR_MIN = (-2147483647-1)
-WCHAR_MAX = (2147483647)
-WINT_MIN = (0)
-def INT8_C(c): return c
-
-def INT16_C(c): return c
-
-def INT32_C(c): return c
-
-def INT64_C(c): return c ## L
-
-def INT64_C(c): return c ## LL
-
-def UINT8_C(c): return c ## U
-
-def UINT16_C(c): return c ## U
-
-def UINT32_C(c): return c ## U
-
-def UINT64_C(c): return c ## UL
-
-def UINT64_C(c): return c ## ULL
-
-def INTMAX_C(c): return c ## L
-
-def UINTMAX_C(c): return c ## UL
-
-def INTMAX_C(c): return c ## LL
-
-def UINTMAX_C(c): return c ## ULL
-
-
-# Included from sys/types.h
-_SYS_TYPES_H = 1
-
-# Included from bits/types.h
-_BITS_TYPES_H = 1
-__FD_SETSIZE = 1024
-def __FDELT(d): return ((d) / __NFDBITS)
-
-
-# Included from bits/pthreadtypes.h
-
-# Included from time.h
-_TIME_H = 1
-
-# Included from bits/time.h
-
-# Included from posix/time.h
-
-# Included from posix/types.h
-MAXHOSTNAMELEN = 64
-FD_SETSIZE = 1024
-CLOCKS_PER_SEC = 1000000
-_BITS_TIME_H = 1
-CLOCKS_PER_SEC = 1000000
-CLK_TCK = 100
-_STRUCT_TIMEVAL = 1
-CLK_TCK = CLOCKS_PER_SEC
-__clock_t_defined = 1
-__time_t_defined = 1
-__timespec_defined = 1
-def __isleap(year): return \
-
-__BIT_TYPES_DEFINED__ = 1
-
-# Included from endian.h
-_ENDIAN_H = 1
-__LITTLE_ENDIAN = 1234
-__BIG_ENDIAN = 4321
-__PDP_ENDIAN = 3412
-
-# Included from bits/endian.h
-__BYTE_ORDER = __LITTLE_ENDIAN
-__FLOAT_WORD_ORDER = __BYTE_ORDER
-LITTLE_ENDIAN = __LITTLE_ENDIAN
-BIG_ENDIAN = __BIG_ENDIAN
-PDP_ENDIAN = __PDP_ENDIAN
-BYTE_ORDER = __BYTE_ORDER
-
-# Included from sys/select.h
-_SYS_SELECT_H = 1
-
-# Included from bits/select.h
-def __FD_ZERO(fdsp): return \
-
-def __FD_ZERO(set): return \
-
-
-# Included from bits/sigset.h
-_SIGSET_H_types = 1
-_SIGSET_H_fns = 1
-def __sigmask(sig): return \
-
-def __sigemptyset(set): return \
-
-def __sigfillset(set): return \
-
-def __sigisemptyset(set): return \
-
-FD_SETSIZE = __FD_SETSIZE
-def FD_ZERO(fdsetp): return __FD_ZERO (fdsetp)
-
-
-# Included from sys/sysmacros.h
-_SYS_SYSMACROS_H = 1
-def major(dev): return ( (( (dev) >> 8) & 0xff))
-
-def minor(dev): return ( ((dev) & 0xff))
-
-
-# Included from bits/socket.h
-PF_UNSPEC = 0
-PF_LOCAL = 1
-PF_UNIX = PF_LOCAL
-PF_FILE = PF_LOCAL
-PF_INET = 2
-PF_AX25 = 3
-PF_IPX = 4
-PF_APPLETALK = 5
-PF_NETROM = 6
-PF_BRIDGE = 7
-PF_ATMPVC = 8
-PF_X25 = 9
-PF_INET6 = 10
-PF_ROSE = 11
-PF_DECnet = 12
-PF_NETBEUI = 13
-PF_SECURITY = 14
-PF_KEY = 15
-PF_NETLINK = 16
-PF_ROUTE = PF_NETLINK
-PF_PACKET = 17
-PF_ASH = 18
-PF_ECONET = 19
-PF_ATMSVC = 20
-PF_SNA = 22
-PF_IRDA = 23
-PF_MAX = 32
-AF_UNSPEC = PF_UNSPEC
-AF_LOCAL = PF_LOCAL
-AF_UNIX = PF_UNIX
-AF_FILE = PF_FILE
-AF_INET = PF_INET
-AF_AX25 = PF_AX25
-AF_IPX = PF_IPX
-AF_APPLETALK = PF_APPLETALK
-AF_NETROM = PF_NETROM
-AF_BRIDGE = PF_BRIDGE
-AF_ATMPVC = PF_ATMPVC
-AF_X25 = PF_X25
-AF_INET6 = PF_INET6
-AF_ROSE = PF_ROSE
-AF_DECnet = PF_DECnet
-AF_NETBEUI = PF_NETBEUI
-AF_SECURITY = PF_SECURITY
-AF_KEY = PF_KEY
-AF_NETLINK = PF_NETLINK
-AF_ROUTE = PF_ROUTE
-AF_PACKET = PF_PACKET
-AF_ASH = PF_ASH
-AF_ECONET = PF_ECONET
-AF_ATMSVC = PF_ATMSVC
-AF_SNA = PF_SNA
-AF_IRDA = PF_IRDA
-AF_MAX = PF_MAX
-SOL_RAW = 255
-SOL_DECNET = 261
-SOL_X25 = 262
-SOL_PACKET = 263
-SOL_ATM = 264
-SOL_AAL = 265
-SOL_IRDA = 266
-SOMAXCONN = 128
-
-# Included from bits/sockaddr.h
-_BITS_SOCKADDR_H = 1
-def __SOCKADDR_COMMON(sa_prefix): return \
-
-_SS_SIZE = 128
-def CMSG_FIRSTHDR(mhdr): return \
-
-
-# Included from atheos/socket.h
-
-# Included from atheos/types.h
-OS_NAME_LENGTH = 64
-TRUE = 1
-FALSE = 0
-
-# Included from atheos/filesystem.h
-
-# Included from atheos/atomic.h
-
-# Included from atheos/typedefs.h
-
-# Included from atheos/fs_attribs.h
-
-# Included from atheos/kernel.h
-
-# Included from atheos/kdebug.h
-
-# Included from atheos/threads.h
-TF_DEADLOCK = 0x0001
-DB_PACKET_SIZE = 128
-DB_PORT_COUNT = 16
-DBP_PRINTK = 0
-DBP_DEBUGGER = 2
-
-# Included from atheos/stdlib.h
-
-# Included from atheos/string.h
-def COMMON(x): return \
-
-def COMMON(x): return \
-
-
-# Included from atheos/schedule.h
-
-# Included from atheos/timer.h
-
-# Included from posix/resource.h
-RUSAGE_SELF = 0
-RUSAGE_CHILDREN = -1
-RLIMIT_CPU = 0
-RLIMIT_FSIZE = 1
-RLIMIT_DATA = 2
-RLIMIT_STACK = 3
-RLIMIT_CORE = 4
-RLIMIT_RSS = 5
-RLIMIT_MEMLOCK = 6
-RLIMIT_NPROC = 7
-RLIMIT_NOFILE = 8
-RLIMIT_AS = 9
-RLIM_NLIMITS = 10
-
-# Included from atheos/v86.h
-
-# Included from atheos/areas.h
-MEMF_REAL = 0x00000002
-MEMF_USER = 0x00000004
-MEMF_BUFFER = 0x00000008
-MEMF_KERNEL = 0x00000010
-MEMF_OKTOFAILHACK = 0x00000020
-MEMF_PRI_MASK = 0x000000ff
-MEMF_NOBLOCK = 0x00000100
-MEMF_CLEAR = 0x00010000
-MEMF_LOCKED = 0x10000000
-PAGE_SHIFT = 12
-PGDIR_SHIFT = 22
-def PAGE_ALIGN(addr): return (((addr)+PAGE_SIZE-1)&PAGE_MASK)
-
-AREA_NO_LOCK = 0
-AREA_LAZY_LOCK = 1
-AREA_FULL_LOCK = 2
-AREA_CONTIGUOUS = 3
-AREA_READ = 0x00000001
-AREA_WRITE = 0x00000002
-AREA_EXEC = 0x00000004
-AREA_FULL_ACCESS = (AREA_READ | AREA_WRITE | AREA_EXEC)
-AREA_KERNEL = 0x00000008
-AREA_UNMAP_PHYS = 0x00000010
-AREA_ANY_ADDRESS = 0x00000000
-AREA_EXACT_ADDRESS = 0x00000100
-AREA_BASE_ADDRESS = 0x00000200
-AREA_CLONE_ADDRESS = 0x00000300
-AREA_ADDR_SPEC_MASK = 0x00000f00
-AREA_TOP_DOWN = 0x00001000
-AREA_REMAPPED = 0x0020
-AREA_SHARED = 0x0040
-AREA_GROWSDOWN = 0x0080
-AREA_FIRST_KERNEL_ADDRESS = 0x00100000
-AREA_LAST_KERNEL_ADDRESS = 0x7fffffff
-AREA_FIRST_USER_ADDRESS = 0x80000000
-AREA_LAST_USER_ADDRESS = 0xffffffff
-MAX_CPU_COUNT = 16
-def kfree(p): return kassertw( __kfree(p) == 0 )
-
-
-# Included from posix/dirent.h
-MAXNAMLEN = NAME_MAX
-MAXNAMLEN = 255
-
-# Included from dirent.h
-_DIRENT_H = 1
-
-# Included from bits/dirent.h
-def _D_ALLOC_NAMLEN(d): return (_D_EXACT_NAMLEN (d) + 1)
-
-def IFTODT(mode): return (((mode) & 0170000) >> 12)
-
-def DTTOIF(dirtype): return ((dirtype) << 12)
-
-def dirfd(dirp): return _DIR_dirfd (dirp)
-
-MAXNAMLEN = NAME_MAX
-MAXNAMLEN = 255
-
-# Included from posix/stat.h
-S_IFMT = 00170000
-S_IFSOCK = 0140000
-S_IFLNK = 0120000
-S_IFREG = 0100000
-S_IFBLK = 0060000
-S_IFDIR = 0040000
-S_IFCHR = 0020000
-S_IFIFO = 0010000
-S_ISUID = 0004000
-S_ISGID = 0002000
-S_ISVTX = 0001000
-def S_ISLNK(m): return (((m) & S_IFMT) == S_IFLNK)
-
-def S_ISREG(m): return (((m) & S_IFMT) == S_IFREG)
-
-def S_ISDIR(m): return (((m) & S_IFMT) == S_IFDIR)
-
-def S_ISCHR(m): return (((m) & S_IFMT) == S_IFCHR)
-
-def S_ISBLK(m): return (((m) & S_IFMT) == S_IFBLK)
-
-def S_ISFIFO(m): return (((m) & S_IFMT) == S_IFIFO)
-
-def S_ISSOCK(m): return (((m) & S_IFMT) == S_IFSOCK)
-
-S_IRWXU = 00700
-S_IRUSR = 00400
-S_IWUSR = 00200
-S_IXUSR = 00100
-S_IRWXG = 00070
-S_IRGRP = 00040
-S_IWGRP = 00020
-S_IXGRP = 00010
-S_IRWXO = 00007
-S_IROTH = 00004
-S_IWOTH = 00002
-S_IXOTH = 00001
-S_IRWXUGO = (S_IRWXU|S_IRWXG|S_IRWXO)
-S_IALLUGO = (S_ISUID|S_ISGID|S_ISVTX|S_IRWXUGO)
-S_IRUGO = (S_IRUSR|S_IRGRP|S_IROTH)
-S_IWUGO = (S_IWUSR|S_IWGRP|S_IWOTH)
-S_IXUGO = (S_IXUSR|S_IXGRP|S_IXOTH)
-_STAT_VER_KERNEL = 0
-
-# Included from posix/fcntl.h
-O_ACCMODE = 0003
-O_RWMASK = O_ACCMODE
-O_RDONLY = 00
-O_WRONLY = 01
-O_RDWR = 02
-O_CREAT = 0100
-O_EXCL = 0200
-O_NOCTTY = 0400
-O_TRUNC = 01000
-O_APPEND = 02000
-O_NONBLOCK = 04000
-O_NDELAY = O_NONBLOCK
-O_SYNC = 010000
-O_FSYNC = O_SYNC
-O_ASYNC = 020000
-FASYNC = O_ASYNC
-O_DIRECTORY = 040000
-O_NOTRAVERSE = 0100000
-O_NOFOLLOW = O_NOTRAVERSE
-F_DUPFD = 0
-F_GETFD = 1
-F_SETFD = 2
-F_GETFL = 3
-F_SETFL = 4
-F_GETLK = 5
-F_SETLK = 6
-F_SETLKW = 7
-F_SETOWN = 8
-F_GETOWN = 9
-F_SETSIG = 10
-F_GETSIG = 11
-F_COPYFD = 12
-FD_CLOEXEC = 1
-F_RDLCK = 0
-F_WRLCK = 1
-F_UNLCK = 2
-F_EXLCK = 4
-F_SHLCK = 8
-LOCK_SH = 1
-LOCK_EX = 2
-LOCK_NB = 4
-LOCK_UN = 8
-
-# Included from posix/uio.h
-UIO_FASTIOV = 8
-UIO_MAXIOV = 1024
-MNTF_READONLY = 0x0001
-FS_IS_READONLY = 0x00000001
-FS_IS_REMOVABLE = 0x00000002
-FS_IS_PERSISTENT = 0x00000004
-FS_IS_SHARED = 0x00000008
-FS_IS_BLOCKBASED = 0x00000010
-FS_CAN_MOUNT = 0x00000020
-FS_HAS_MIME = 0x00010000
-FS_HAS_ATTR = 0x00020000
-FS_HAS_QUERY = 0x00040000
-FSINFO_VERSION = 1
-WSTAT_MODE = 0x0001
-WSTAT_UID = 0x0002
-WSTAT_GID = 0x0004
-WSTAT_SIZE = 0x0008
-WSTAT_ATIME = 0x0010
-WSTAT_MTIME = 0x0020
-WSTAT_CTIME = 0x0040
-WFSSTAT_NAME = 0x0001
-FSDRIVER_API_VERSION = 1
-
-# Included from net/nettypes.h
-IP_ADR_LEN = 4
-INADDR_ANY = 0x00000000
-INADDR_BROADCAST = 0xffffffff
-INADDR_LOOPBACK = 0x7f000001
-def CMSG_ALIGN(len): return ( ((len)+sizeof(long)-1) & ~(sizeof(long)-1) )
-
-PROT_SOCK = 1024
-SHUTDOWN_MASK = 3
-RCV_SHUTDOWN = 1
-SEND_SHUTDOWN = 2
-SOCK_STREAM = 1
-SOCK_DGRAM = 2
-SOCK_RAW = 3
-SOCK_RDM = 4
-SOCK_SEQPACKET = 5
-SOCK_PACKET = 10
-PF_UNSPEC = 0
-PF_LOCAL = 1
-PF_UNIX = PF_LOCAL
-PF_FILE = PF_LOCAL
-PF_INET = 2
-PF_AX25 = 3
-PF_IPX = 4
-PF_APPLETALK = 5
-PF_NETROM = 6
-PF_BRIDGE = 7
-PF_ATMPVC = 8
-PF_X25 = 9
-PF_INET6 = 10
-PF_ROSE = 11
-PF_DECnet = 12
-PF_NETBEUI = 13
-PF_SECURITY = 14
-PF_KEY = 15
-PF_NETLINK = 16
-PF_ROUTE = PF_NETLINK
-PF_PACKET = 17
-PF_ASH = 18
-PF_ECONET = 19
-PF_ATMSVC = 20
-PF_SNA = 22
-PF_IRDA = 23
-PF_MAX = 32
-AF_UNSPEC = PF_UNSPEC
-AF_LOCAL = PF_LOCAL
-AF_UNIX = PF_UNIX
-AF_FILE = PF_FILE
-AF_INET = PF_INET
-AF_AX25 = PF_AX25
-AF_IPX = PF_IPX
-AF_APPLETALK = PF_APPLETALK
-AF_NETROM = PF_NETROM
-AF_BRIDGE = PF_BRIDGE
-AF_ATMPVC = PF_ATMPVC
-AF_X25 = PF_X25
-AF_INET6 = PF_INET6
-AF_ROSE = PF_ROSE
-AF_DECnet = PF_DECnet
-AF_NETBEUI = PF_NETBEUI
-AF_SECURITY = PF_SECURITY
-AF_KEY = PF_KEY
-AF_NETLINK = PF_NETLINK
-AF_ROUTE = PF_ROUTE
-AF_PACKET = PF_PACKET
-AF_ASH = PF_ASH
-AF_ECONET = PF_ECONET
-AF_ATMSVC = PF_ATMSVC
-AF_SNA = PF_SNA
-AF_IRDA = PF_IRDA
-AF_MAX = PF_MAX
-PF_UNIX = 1
-AF_UNIX = PF_UNIX
-PF_INET = 2
-AF_INET = PF_INET
-SOMAXCONN = 128
-MSG_OOB = 1
-MSG_PEEK = 2
-MSG_DONTROUTE = 4
-MSG_PROXY = 16
-SOL_SOCKET = 1
-SO_DEBUG = 1
-SO_REUSEADDR = 2
-SO_TYPE = 3
-SO_ERROR = 4
-SO_DONTROUTE = 5
-SO_BROADCAST = 6
-SO_SNDBUF = 7
-SO_RCVBUF = 8
-SO_KEEPALIVE = 9
-SO_OOBINLINE = 10
-SO_NO_CHECK = 11
-SO_PRIORITY = 12
-SO_LINGER = 13
-SO_BSDCOMPAT = 14
-SOL_IP = 0
-SOL_IPX = 256
-SOL_AX25 = 257
-SOL_ATALK = 258
-SOL_NETROM = 259
-SOL_TCP = 6
-SOL_UDP = 17
-IP_TOS = 1
-IPTOS_LOWDELAY = 0x10
-IPTOS_THROUGHPUT = 0x08
-IPTOS_RELIABILITY = 0x04
-IPTOS_MINCOST = 0x02
-IP_TTL = 2
-IP_HDRINCL = 3
-IP_OPTIONS = 4
-IP_MULTICAST_IF = 32
-IP_MULTICAST_TTL = 33
-IP_MULTICAST_LOOP = 34
-IP_ADD_MEMBERSHIP = 35
-IP_DROP_MEMBERSHIP = 36
-TCP_NODELAY = 0x01
-TCP_MAXSEG = 0x02
-def IN_CLASSA(a): return ((( (a)) & 0x80000000) == 0)
-
-IN_CLASSA_NET = 0xff000000
-IN_CLASSA_NSHIFT = 24
-IN_CLASSA_HOST = (0xffffffff & ~IN_CLASSA_NET)
-IN_CLASSA_MAX = 128
-def IN_CLASSB(a): return ((( (a)) & 0xc0000000) == 0x80000000)
-
-IN_CLASSB_NET = 0xffff0000
-IN_CLASSB_NSHIFT = 16
-IN_CLASSB_HOST = (0xffffffff & ~IN_CLASSB_NET)
-IN_CLASSB_MAX = 65536
-def IN_CLASSC(a): return ((( (a)) & 0xe0000000) == 0xc0000000)
-
-IN_CLASSC_NET = 0xffffff00
-IN_CLASSC_NSHIFT = 8
-IN_CLASSC_HOST = (0xffffffff & ~IN_CLASSC_NET)
-def IN_CLASSD(a): return ((( (a)) & 0xf0000000) == 0xe0000000)
-
-def IN_MULTICAST(a): return IN_CLASSD(a)
-
-def IN_EXPERIMENTAL(a): return ((( (a)) & 0xe0000000) == 0xe0000000)
-
-def IN_BADCLASS(a): return ((( (a)) & 0xf0000000) == 0xf0000000)
-
-INADDR_ANY = ( 0x00000000)
-INADDR_BROADCAST = ( 0xffffffff)
-INADDR_NONE = ( 0xffffffff)
-IN_LOOPBACKNET = 127
-INADDR_LOOPBACK = ( 0x7f000001)
-INADDR_UNSPEC_GROUP = ( 0xe0000000)
-INADDR_ALLHOSTS_GROUP = ( 0xe0000001)
-INADDR_ALLRTRS_GROUP = ( 0xe0000002)
-INADDR_MAX_LOCAL_GROUP = ( 0xe00000ff)
-INET_ADDRSTRLEN = 16
-INET6_ADDRSTRLEN = 46
-
-# Included from bits/in.h
-IP_TOS = 1
-IP_TTL = 2
-IP_HDRINCL = 3
-IP_OPTIONS = 4
-IP_ROUTER_ALERT = 5
-IP_RECVOPTS = 6
-IP_RETOPTS = 7
-IP_PKTINFO = 8
-IP_PKTOPTIONS = 9
-IP_PMTUDISC = 10
-IP_MTU_DISCOVER = 10
-IP_RECVERR = 11
-IP_RECVTTL = 12
-IP_RECVTOS = 13
-IP_MULTICAST_IF = 32
-IP_MULTICAST_TTL = 33
-IP_MULTICAST_LOOP = 34
-IP_ADD_MEMBERSHIP = 35
-IP_DROP_MEMBERSHIP = 36
-IP_RECVRETOPTS = IP_RETOPTS
-IP_PMTUDISC_DONT = 0
-IP_PMTUDISC_WANT = 1
-IP_PMTUDISC_DO = 2
-SOL_IP = 0
-SOL_SOCKET = 1
-IP_DEFAULT_MULTICAST_TTL = 1
-IP_DEFAULT_MULTICAST_LOOP = 1
-IP_MAX_MEMBERSHIPS = 20
-IPV6_ADDRFORM = 1
-IPV6_PKTINFO = 2
-IPV6_HOPOPTS = 3
-IPV6_DSTOPTS = 4
-IPV6_RXSRCRT = 5
-IPV6_PKTOPTIONS = 6
-IPV6_CHECKSUM = 7
-IPV6_HOPLIMIT = 8
-IPV6_NEXTHOP = 9
-IPV6_AUTHHDR = 10
-IPV6_UNICAST_HOPS = 16
-IPV6_MULTICAST_IF = 17
-IPV6_MULTICAST_HOPS = 18
-IPV6_MULTICAST_LOOP = 19
-IPV6_ADD_MEMBERSHIP = 20
-IPV6_DROP_MEMBERSHIP = 21
-IPV6_ROUTER_ALERT = 22
-SCM_SRCRT = IPV6_RXSRCRT
-IPV6_RXHOPOPTS = IPV6_HOPOPTS
-IPV6_RXDSTOPTS = IPV6_DSTOPTS
-IPV6_PMTUDISC_DONT = 0
-IPV6_PMTUDISC_WANT = 1
-IPV6_PMTUDISC_DO = 2
-SOL_IPV6 = 41
-SOL_ICMPV6 = 58
-
-# Included from bits/byteswap.h
-def __bswap_constant_16(x): return \
-
-def __bswap_16(x): return \
-
-def __bswap_16(x): return __bswap_constant_16 (x)
-
-def __bswap_constant_32(x): return \
-
-def __bswap_32(x): return \
-
-def __bswap_32(x): return \
-
-def __bswap_32(x): return __bswap_constant_32 (x)
-
-def __bswap_64(x): return \
-
-def ntohl(x): return (x)
-
-def ntohs(x): return (x)
-
-def htonl(x): return (x)
-
-def htons(x): return (x)
-
-def ntohl(x): return __bswap_32 (x)
-
-def ntohs(x): return __bswap_16 (x)
-
-def htonl(x): return __bswap_32 (x)
-
-def htons(x): return __bswap_16 (x)
-
-def IN6_IS_ADDR_UNSPECIFIED(a): return \
-
-def IN6_IS_ADDR_LOOPBACK(a): return \
-
-def IN6_IS_ADDR_LINKLOCAL(a): return \
-
-def IN6_IS_ADDR_SITELOCAL(a): return \
-
-def IN6_IS_ADDR_V4MAPPED(a): return \
-
-def IN6_IS_ADDR_V4COMPAT(a): return \
-
-def IN6_IS_ADDR_MC_NODELOCAL(a): return \
-
-def IN6_IS_ADDR_MC_LINKLOCAL(a): return \
-
-def IN6_IS_ADDR_MC_SITELOCAL(a): return \
-
-def IN6_IS_ADDR_MC_ORGLOCAL(a): return \
-
-def IN6_IS_ADDR_MC_GLOBAL(a): return
diff --git a/sys/lib/python/plat-atheos/TYPES.py b/sys/lib/python/plat-atheos/TYPES.py
deleted file mode 100644
index 314ca736a..000000000
--- a/sys/lib/python/plat-atheos/TYPES.py
+++ /dev/null
@@ -1,142 +0,0 @@
-# Generated by h2py from /include/sys/types.h
-_SYS_TYPES_H = 1
-
-# Included from features.h
-_FEATURES_H = 1
-__USE_ANSI = 1
-__FAVOR_BSD = 1
-_ISOC9X_SOURCE = 1
-_POSIX_SOURCE = 1
-_POSIX_C_SOURCE = 199506L
-_XOPEN_SOURCE = 500
-_XOPEN_SOURCE_EXTENDED = 1
-_LARGEFILE64_SOURCE = 1
-_BSD_SOURCE = 1
-_SVID_SOURCE = 1
-_BSD_SOURCE = 1
-_SVID_SOURCE = 1
-__USE_ISOC9X = 1
-_POSIX_SOURCE = 1
-_POSIX_C_SOURCE = 2
-_POSIX_C_SOURCE = 199506L
-__USE_POSIX = 1
-__USE_POSIX2 = 1
-__USE_POSIX199309 = 1
-__USE_POSIX199506 = 1
-__USE_XOPEN = 1
-__USE_XOPEN_EXTENDED = 1
-__USE_UNIX98 = 1
-_LARGEFILE_SOURCE = 1
-__USE_XOPEN_EXTENDED = 1
-__USE_LARGEFILE = 1
-__USE_LARGEFILE64 = 1
-__USE_FILE_OFFSET64 = 1
-__USE_MISC = 1
-__USE_BSD = 1
-__USE_SVID = 1
-__USE_GNU = 1
-__USE_REENTRANT = 1
-__STDC_IEC_559__ = 1
-__STDC_IEC_559_COMPLEX__ = 1
-__GNU_LIBRARY__ = 6
-__GLIBC__ = 2
-__GLIBC_MINOR__ = 1
-
-# Included from sys/cdefs.h
-_SYS_CDEFS_H = 1
-def __PMT(args): return args
-
-def __P(args): return args
-
-def __PMT(args): return args
-
-def __P(args): return ()
-
-def __PMT(args): return ()
-
-def __STRING(x): return #x
-
-def __STRING(x): return "x"
-
-def __ASMNAME(cname): return __ASMNAME2 (__USER_LABEL_PREFIX__, cname)
-
-def __attribute__(xyz): return
-
-__USE_EXTERN_INLINES = 1
-
-# Included from gnu/stubs.h
-
-# Included from bits/types.h
-_BITS_TYPES_H = 1
-__FD_SETSIZE = 1024
-def __FDELT(d): return ((d) / __NFDBITS)
-
-
-# Included from bits/pthreadtypes.h
-
-# Included from time.h
-_TIME_H = 1
-
-# Included from bits/time.h
-
-# Included from posix/time.h
-
-# Included from posix/types.h
-MAXHOSTNAMELEN = 64
-FD_SETSIZE = 1024
-CLOCKS_PER_SEC = 1000000
-_BITS_TIME_H = 1
-CLOCKS_PER_SEC = 1000000
-CLK_TCK = 100
-_STRUCT_TIMEVAL = 1
-CLK_TCK = CLOCKS_PER_SEC
-__clock_t_defined = 1
-__time_t_defined = 1
-__timespec_defined = 1
-def __isleap(year): return \
-
-__BIT_TYPES_DEFINED__ = 1
-
-# Included from endian.h
-_ENDIAN_H = 1
-__LITTLE_ENDIAN = 1234
-__BIG_ENDIAN = 4321
-__PDP_ENDIAN = 3412
-
-# Included from bits/endian.h
-__BYTE_ORDER = __LITTLE_ENDIAN
-__FLOAT_WORD_ORDER = __BYTE_ORDER
-LITTLE_ENDIAN = __LITTLE_ENDIAN
-BIG_ENDIAN = __BIG_ENDIAN
-PDP_ENDIAN = __PDP_ENDIAN
-BYTE_ORDER = __BYTE_ORDER
-
-# Included from sys/select.h
-_SYS_SELECT_H = 1
-
-# Included from bits/select.h
-def __FD_ZERO(fdsp): return \
-
-def __FD_ZERO(set): return \
-
-
-# Included from bits/sigset.h
-_SIGSET_H_types = 1
-_SIGSET_H_fns = 1
-def __sigmask(sig): return \
-
-def __sigemptyset(set): return \
-
-def __sigfillset(set): return \
-
-def __sigisemptyset(set): return \
-
-FD_SETSIZE = __FD_SETSIZE
-def FD_ZERO(fdsetp): return __FD_ZERO (fdsetp)
-
-
-# Included from sys/sysmacros.h
-_SYS_SYSMACROS_H = 1
-def major(dev): return ( (( (dev) >> 8) & 0xff))
-
-def minor(dev): return ( ((dev) & 0xff))
diff --git a/sys/lib/python/plat-atheos/regen b/sys/lib/python/plat-atheos/regen
deleted file mode 100644
index 7c002c367..000000000
--- a/sys/lib/python/plat-atheos/regen
+++ /dev/null
@@ -1,3 +0,0 @@
-#! /bin/sh
-set -v
-python$EXE ../../Tools/scripts/h2py.py -i '\(u_long\)' -i '\(uint32_t\)' -i '\(int\)' -i '\(unsigned int\)' /include/netinet/in.h /include/sys/types.h
diff --git a/sys/lib/python/plat-beos5/IN.py b/sys/lib/python/plat-beos5/IN.py
deleted file mode 100644
index 362cb41fa..000000000
--- a/sys/lib/python/plat-beos5/IN.py
+++ /dev/null
@@ -1,327 +0,0 @@
-# Generated by h2py from /boot/develop/headers/be/net/netinet/in.h
-
-# Included from socket.h
-
-# Included from BeBuild.h
-B_BEOS_VERSION_4 = 0x0400
-B_BEOS_VERSION_4_5 = 0x0450
-B_BEOS_VERSION_5 = 0x0500
-B_BEOS_VERSION = B_BEOS_VERSION_5
-B_BEOS_VERSION_MAUI = B_BEOS_VERSION_5
-_PR2_COMPATIBLE_ = 1
-_PR3_COMPATIBLE_ = 1
-_R4_COMPATIBLE_ = 1
-_R4_5_COMPATIBLE_ = 1
-_PR2_COMPATIBLE_ = 0
-_PR3_COMPATIBLE_ = 0
-_R4_COMPATIBLE_ = 1
-_R4_5_COMPATIBLE_ = 1
-def _UNUSED(x): return x
-
-
-# Included from sys/types.h
-
-# Included from time.h
-
-# Included from be_setup.h
-def __std(ref): return ref
-
-__be_os = 2
-__dest_os = __be_os
-__MSL__ = 0x4011
-__GLIBC__ = -2
-__GLIBC_MINOR__ = 1
-
-# Included from null.h
-NULL = (0)
-NULL = 0L
-
-# Included from size_t.h
-
-# Included from stddef.h
-
-# Included from wchar_t.h
-CLOCKS_PER_SEC = 1000
-CLK_TCK = CLOCKS_PER_SEC
-MAX_TIMESTR = 70
-
-# Included from sys/time.h
-
-# Included from ByteOrder.h
-
-# Included from endian.h
-__LITTLE_ENDIAN = 1234
-LITTLE_ENDIAN = __LITTLE_ENDIAN
-__BYTE_ORDER = __LITTLE_ENDIAN
-BYTE_ORDER = __BYTE_ORDER
-__BIG_ENDIAN = 0
-BIG_ENDIAN = 0
-__BIG_ENDIAN = 4321
-BIG_ENDIAN = __BIG_ENDIAN
-__BYTE_ORDER = __BIG_ENDIAN
-BYTE_ORDER = __BYTE_ORDER
-__LITTLE_ENDIAN = 0
-LITTLE_ENDIAN = 0
-__PDP_ENDIAN = 3412
-PDP_ENDIAN = __PDP_ENDIAN
-
-# Included from SupportDefs.h
-
-# Included from Errors.h
-
-# Included from limits.h
-
-# Included from float.h
-FLT_ROUNDS = 1
-FLT_RADIX = 2
-FLT_MANT_DIG = 24
-FLT_DIG = 6
-FLT_MIN_EXP = (-125)
-FLT_MIN_10_EXP = (-37)
-FLT_MAX_EXP = 128
-FLT_MAX_10_EXP = 38
-DBL_MANT_DIG = 53
-DBL_DIG = 15
-DBL_MIN_EXP = (-1021)
-DBL_MIN_10_EXP = (-308)
-DBL_MAX_EXP = 1024
-DBL_MAX_10_EXP = 308
-LDBL_MANT_DIG = DBL_MANT_DIG
-LDBL_DIG = DBL_DIG
-LDBL_MIN_EXP = DBL_MIN_EXP
-LDBL_MIN_10_EXP = DBL_MIN_10_EXP
-LDBL_MAX_EXP = DBL_MAX_EXP
-LDBL_MAX_10_EXP = DBL_MAX_10_EXP
-CHAR_BIT = (8)
-SCHAR_MIN = (-127-1)
-SCHAR_MAX = (127)
-CHAR_MIN = SCHAR_MIN
-CHAR_MAX = SCHAR_MAX
-MB_LEN_MAX = (1)
-SHRT_MIN = (-32767-1)
-SHRT_MAX = (32767)
-LONG_MIN = (-2147483647L-1)
-LONG_MAX = (2147483647L)
-INT_MIN = LONG_MIN
-INT_MAX = LONG_MAX
-ARG_MAX = (32768)
-ATEXIT_MAX = (32)
-CHILD_MAX = (1024)
-IOV_MAX = (256)
-FILESIZEBITS = (64)
-LINK_MAX = (1)
-LOGIN_NAME_MAX = (32)
-MAX_CANON = (255)
-MAX_INPUT = (255)
-NAME_MAX = (256)
-NGROUPS_MAX = (32)
-OPEN_MAX = (128)
-PATH_MAX = (1024)
-PIPE_MAX = (512)
-SSIZE_MAX = (2147483647L)
-TTY_NAME_MAX = (256)
-TZNAME_MAX = (32)
-SYMLINKS_MAX = (16)
-_POSIX_ARG_MAX = (32768)
-_POSIX_CHILD_MAX = (1024)
-_POSIX_LINK_MAX = (1)
-_POSIX_LOGIN_NAME_MAX = (9)
-_POSIX_MAX_CANON = (255)
-_POSIX_MAX_INPUT = (255)
-_POSIX_NAME_MAX = (255)
-_POSIX_NGROUPS_MAX = (0)
-_POSIX_OPEN_MAX = (128)
-_POSIX_PATH_MAX = (1024)
-_POSIX_PIPE_BUF = (512)
-_POSIX_SSIZE_MAX = (2147483647L)
-_POSIX_STREAM_MAX = (8)
-_POSIX_TTY_NAME_MAX = (256)
-_POSIX_TZNAME_MAX = (3)
-B_GENERAL_ERROR_BASE = LONG_MIN
-B_OS_ERROR_BASE = B_GENERAL_ERROR_BASE + 0x1000
-B_APP_ERROR_BASE = B_GENERAL_ERROR_BASE + 0x2000
-B_INTERFACE_ERROR_BASE = B_GENERAL_ERROR_BASE + 0x3000
-B_MEDIA_ERROR_BASE = B_GENERAL_ERROR_BASE + 0x4000
-B_TRANSLATION_ERROR_BASE = B_GENERAL_ERROR_BASE + 0x4800
-B_MIDI_ERROR_BASE = B_GENERAL_ERROR_BASE + 0x5000
-B_STORAGE_ERROR_BASE = B_GENERAL_ERROR_BASE + 0x6000
-B_POSIX_ERROR_BASE = B_GENERAL_ERROR_BASE + 0x7000
-B_MAIL_ERROR_BASE = B_GENERAL_ERROR_BASE + 0x8000
-B_PRINT_ERROR_BASE = B_GENERAL_ERROR_BASE + 0x9000
-B_DEVICE_ERROR_BASE = B_GENERAL_ERROR_BASE + 0xa000
-B_ERRORS_END = (B_GENERAL_ERROR_BASE + 0xffff)
-E2BIG = (B_POSIX_ERROR_BASE + 1)
-ECHILD = (B_POSIX_ERROR_BASE + 2)
-EDEADLK = (B_POSIX_ERROR_BASE + 3)
-EFBIG = (B_POSIX_ERROR_BASE + 4)
-EMLINK = (B_POSIX_ERROR_BASE + 5)
-ENFILE = (B_POSIX_ERROR_BASE + 6)
-ENODEV = (B_POSIX_ERROR_BASE + 7)
-ENOLCK = (B_POSIX_ERROR_BASE + 8)
-ENOSYS = (B_POSIX_ERROR_BASE + 9)
-ENOTTY = (B_POSIX_ERROR_BASE + 10)
-ENXIO = (B_POSIX_ERROR_BASE + 11)
-ESPIPE = (B_POSIX_ERROR_BASE + 12)
-ESRCH = (B_POSIX_ERROR_BASE + 13)
-EFPOS = (B_POSIX_ERROR_BASE + 14)
-ESIGPARM = (B_POSIX_ERROR_BASE + 15)
-EDOM = (B_POSIX_ERROR_BASE + 16)
-ERANGE = (B_POSIX_ERROR_BASE + 17)
-EPROTOTYPE = (B_POSIX_ERROR_BASE + 18)
-EPROTONOSUPPORT = (B_POSIX_ERROR_BASE + 19)
-EPFNOSUPPORT = (B_POSIX_ERROR_BASE + 20)
-EAFNOSUPPORT = (B_POSIX_ERROR_BASE + 21)
-EADDRINUSE = (B_POSIX_ERROR_BASE + 22)
-EADDRNOTAVAIL = (B_POSIX_ERROR_BASE + 23)
-ENETDOWN = (B_POSIX_ERROR_BASE + 24)
-ENETUNREACH = (B_POSIX_ERROR_BASE + 25)
-ENETRESET = (B_POSIX_ERROR_BASE + 26)
-ECONNABORTED = (B_POSIX_ERROR_BASE + 27)
-ECONNRESET = (B_POSIX_ERROR_BASE + 28)
-EISCONN = (B_POSIX_ERROR_BASE + 29)
-ENOTCONN = (B_POSIX_ERROR_BASE + 30)
-ESHUTDOWN = (B_POSIX_ERROR_BASE + 31)
-ECONNREFUSED = (B_POSIX_ERROR_BASE + 32)
-EHOSTUNREACH = (B_POSIX_ERROR_BASE + 33)
-ENOPROTOOPT = (B_POSIX_ERROR_BASE + 34)
-ENOBUFS = (B_POSIX_ERROR_BASE + 35)
-EINPROGRESS = (B_POSIX_ERROR_BASE + 36)
-EALREADY = (B_POSIX_ERROR_BASE + 37)
-EILSEQ = (B_POSIX_ERROR_BASE + 38)
-ENOMSG = (B_POSIX_ERROR_BASE + 39)
-ESTALE = (B_POSIX_ERROR_BASE + 40)
-EOVERFLOW = (B_POSIX_ERROR_BASE + 41)
-EMSGSIZE = (B_POSIX_ERROR_BASE + 42)
-EOPNOTSUPP = (B_POSIX_ERROR_BASE + 43)
-ENOTSOCK = (B_POSIX_ERROR_BASE + 44)
-false = 0
-true = 1
-NULL = (0)
-FALSE = 0
-TRUE = 1
-
-# Included from TypeConstants.h
-B_HOST_IS_LENDIAN = 1
-B_HOST_IS_BENDIAN = 0
-def B_HOST_TO_LENDIAN_DOUBLE(arg): return (double)(arg)
-
-def B_HOST_TO_LENDIAN_FLOAT(arg): return (float)(arg)
-
-def B_HOST_TO_LENDIAN_INT64(arg): return (uint64)(arg)
-
-def B_HOST_TO_LENDIAN_INT32(arg): return (uint32)(arg)
-
-def B_HOST_TO_LENDIAN_INT16(arg): return (uint16)(arg)
-
-def B_HOST_TO_BENDIAN_DOUBLE(arg): return __swap_double(arg)
-
-def B_HOST_TO_BENDIAN_FLOAT(arg): return __swap_float(arg)
-
-def B_HOST_TO_BENDIAN_INT64(arg): return __swap_int64(arg)
-
-def B_HOST_TO_BENDIAN_INT32(arg): return __swap_int32(arg)
-
-def B_HOST_TO_BENDIAN_INT16(arg): return __swap_int16(arg)
-
-def B_LENDIAN_TO_HOST_DOUBLE(arg): return (double)(arg)
-
-def B_LENDIAN_TO_HOST_FLOAT(arg): return (float)(arg)
-
-def B_LENDIAN_TO_HOST_INT64(arg): return (uint64)(arg)
-
-def B_LENDIAN_TO_HOST_INT32(arg): return (uint32)(arg)
-
-def B_LENDIAN_TO_HOST_INT16(arg): return (uint16)(arg)
-
-def B_BENDIAN_TO_HOST_DOUBLE(arg): return __swap_double(arg)
-
-def B_BENDIAN_TO_HOST_FLOAT(arg): return __swap_float(arg)
-
-def B_BENDIAN_TO_HOST_INT64(arg): return __swap_int64(arg)
-
-def B_BENDIAN_TO_HOST_INT32(arg): return __swap_int32(arg)
-
-def B_BENDIAN_TO_HOST_INT16(arg): return __swap_int16(arg)
-
-B_HOST_IS_LENDIAN = 0
-B_HOST_IS_BENDIAN = 1
-def B_HOST_TO_LENDIAN_DOUBLE(arg): return __swap_double(arg)
-
-def B_HOST_TO_LENDIAN_FLOAT(arg): return __swap_float(arg)
-
-def B_HOST_TO_LENDIAN_INT64(arg): return __swap_int64(arg)
-
-def B_HOST_TO_LENDIAN_INT32(arg): return __swap_int32(arg)
-
-def B_HOST_TO_LENDIAN_INT16(arg): return __swap_int16(arg)
-
-def B_HOST_TO_BENDIAN_DOUBLE(arg): return (double)(arg)
-
-def B_HOST_TO_BENDIAN_FLOAT(arg): return (float)(arg)
-
-def B_HOST_TO_BENDIAN_INT64(arg): return (uint64)(arg)
-
-def B_HOST_TO_BENDIAN_INT32(arg): return (uint32)(arg)
-
-def B_HOST_TO_BENDIAN_INT16(arg): return (uint16)(arg)
-
-def B_LENDIAN_TO_HOST_DOUBLE(arg): return __swap_double(arg)
-
-def B_LENDIAN_TO_HOST_FLOAT(arg): return __swap_float(arg)
-
-def B_LENDIAN_TO_HOST_INT64(arg): return __swap_int64(arg)
-
-def B_LENDIAN_TO_HOST_INT32(arg): return __swap_int32(arg)
-
-def B_LENDIAN_TO_HOST_INT16(arg): return __swap_int16(arg)
-
-def B_BENDIAN_TO_HOST_DOUBLE(arg): return (double)(arg)
-
-def B_BENDIAN_TO_HOST_FLOAT(arg): return (float)(arg)
-
-def B_BENDIAN_TO_HOST_INT64(arg): return (uint64)(arg)
-
-def B_BENDIAN_TO_HOST_INT32(arg): return (uint32)(arg)
-
-def B_BENDIAN_TO_HOST_INT16(arg): return (uint16)(arg)
-
-def B_SWAP_DOUBLE(arg): return __swap_double(arg)
-
-def B_SWAP_FLOAT(arg): return __swap_float(arg)
-
-def B_SWAP_INT64(arg): return __swap_int64(arg)
-
-def B_SWAP_INT32(arg): return __swap_int32(arg)
-
-def B_SWAP_INT16(arg): return __swap_int16(arg)
-
-def htonl(x): return B_HOST_TO_BENDIAN_INT32(x)
-
-def ntohl(x): return B_BENDIAN_TO_HOST_INT32(x)
-
-def htons(x): return B_HOST_TO_BENDIAN_INT16(x)
-
-def ntohs(x): return B_BENDIAN_TO_HOST_INT16(x)
-
-AF_INET = 1
-INADDR_ANY = 0x00000000
-INADDR_BROADCAST = 0xffffffff
-INADDR_LOOPBACK = 0x7f000001
-SOL_SOCKET = 1
-SO_DEBUG = 1
-SO_REUSEADDR = 2
-SO_NONBLOCK = 3
-SO_REUSEPORT = 4
-MSG_OOB = 0x1
-SOCK_DGRAM = 1
-SOCK_STREAM = 2
-IPPROTO_UDP = 1
-IPPROTO_TCP = 2
-IPPROTO_ICMP = 3
-B_UDP_MAX_SIZE = (65536 - 1024)
-FD_SETSIZE = 256
-FDSETSIZE = FD_SETSIZE
-NFDBITS = 32
-def _FDMSKNO(fd): return ((fd) / NFDBITS)
-
-def _FDBITNO(fd): return ((fd) % NFDBITS)
diff --git a/sys/lib/python/plat-beos5/regen b/sys/lib/python/plat-beos5/regen
deleted file mode 100755
index c2d2f78b1..000000000
--- a/sys/lib/python/plat-beos5/regen
+++ /dev/null
@@ -1,7 +0,0 @@
-#! /bin/sh
-
-H2PY=../../Tools/scripts/h2py.py
-HEADERS=/boot/develop/headers
-
-set -v
-python $H2PY -i '(u_long)' $HEADERS/be/net/netinet/in.h
diff --git a/sys/lib/python/plat-darwin/IN.py b/sys/lib/python/plat-darwin/IN.py
deleted file mode 100644
index 9d7408654..000000000
--- a/sys/lib/python/plat-darwin/IN.py
+++ /dev/null
@@ -1,357 +0,0 @@
-# Generated by h2py from /usr/include/netinet/in.h
-
-# Included from sys/appleapiopts.h
-IPPROTO_IP = 0
-IPPROTO_HOPOPTS = 0
-IPPROTO_ICMP = 1
-IPPROTO_IGMP = 2
-IPPROTO_GGP = 3
-IPPROTO_IPV4 = 4
-IPPROTO_IPIP = IPPROTO_IPV4
-IPPROTO_TCP = 6
-IPPROTO_ST = 7
-IPPROTO_EGP = 8
-IPPROTO_PIGP = 9
-IPPROTO_RCCMON = 10
-IPPROTO_NVPII = 11
-IPPROTO_PUP = 12
-IPPROTO_ARGUS = 13
-IPPROTO_EMCON = 14
-IPPROTO_XNET = 15
-IPPROTO_CHAOS = 16
-IPPROTO_UDP = 17
-IPPROTO_MUX = 18
-IPPROTO_MEAS = 19
-IPPROTO_HMP = 20
-IPPROTO_PRM = 21
-IPPROTO_IDP = 22
-IPPROTO_TRUNK1 = 23
-IPPROTO_TRUNK2 = 24
-IPPROTO_LEAF1 = 25
-IPPROTO_LEAF2 = 26
-IPPROTO_RDP = 27
-IPPROTO_IRTP = 28
-IPPROTO_TP = 29
-IPPROTO_BLT = 30
-IPPROTO_NSP = 31
-IPPROTO_INP = 32
-IPPROTO_SEP = 33
-IPPROTO_3PC = 34
-IPPROTO_IDPR = 35
-IPPROTO_XTP = 36
-IPPROTO_DDP = 37
-IPPROTO_CMTP = 38
-IPPROTO_TPXX = 39
-IPPROTO_IL = 40
-IPPROTO_IPV6 = 41
-IPPROTO_SDRP = 42
-IPPROTO_ROUTING = 43
-IPPROTO_FRAGMENT = 44
-IPPROTO_IDRP = 45
-IPPROTO_RSVP = 46
-IPPROTO_GRE = 47
-IPPROTO_MHRP = 48
-IPPROTO_BHA = 49
-IPPROTO_ESP = 50
-IPPROTO_AH = 51
-IPPROTO_INLSP = 52
-IPPROTO_SWIPE = 53
-IPPROTO_NHRP = 54
-IPPROTO_ICMPV6 = 58
-IPPROTO_NONE = 59
-IPPROTO_DSTOPTS = 60
-IPPROTO_AHIP = 61
-IPPROTO_CFTP = 62
-IPPROTO_HELLO = 63
-IPPROTO_SATEXPAK = 64
-IPPROTO_KRYPTOLAN = 65
-IPPROTO_RVD = 66
-IPPROTO_IPPC = 67
-IPPROTO_ADFS = 68
-IPPROTO_SATMON = 69
-IPPROTO_VISA = 70
-IPPROTO_IPCV = 71
-IPPROTO_CPNX = 72
-IPPROTO_CPHB = 73
-IPPROTO_WSN = 74
-IPPROTO_PVP = 75
-IPPROTO_BRSATMON = 76
-IPPROTO_ND = 77
-IPPROTO_WBMON = 78
-IPPROTO_WBEXPAK = 79
-IPPROTO_EON = 80
-IPPROTO_VMTP = 81
-IPPROTO_SVMTP = 82
-IPPROTO_VINES = 83
-IPPROTO_TTP = 84
-IPPROTO_IGP = 85
-IPPROTO_DGP = 86
-IPPROTO_TCF = 87
-IPPROTO_IGRP = 88
-IPPROTO_OSPFIGP = 89
-IPPROTO_SRPC = 90
-IPPROTO_LARP = 91
-IPPROTO_MTP = 92
-IPPROTO_AX25 = 93
-IPPROTO_IPEIP = 94
-IPPROTO_MICP = 95
-IPPROTO_SCCSP = 96
-IPPROTO_ETHERIP = 97
-IPPROTO_ENCAP = 98
-IPPROTO_APES = 99
-IPPROTO_GMTP = 100
-IPPROTO_IPCOMP = 108
-IPPROTO_PIM = 103
-IPPROTO_PGM = 113
-IPPROTO_DIVERT = 254
-IPPROTO_RAW = 255
-IPPROTO_MAX = 256
-IPPROTO_DONE = 257
-IPPORT_RESERVED = 1024
-IPPORT_USERRESERVED = 5000
-IPPORT_HIFIRSTAUTO = 49152
-IPPORT_HILASTAUTO = 65535
-IPPORT_RESERVEDSTART = 600
-def IN_CLASSA(i): return (((u_int32_t)(i) & 0x80000000) == 0)
-
-IN_CLASSA_NET = 0xff000000
-IN_CLASSA_NSHIFT = 24
-IN_CLASSA_HOST = 0x00ffffff
-IN_CLASSA_MAX = 128
-def IN_CLASSB(i): return (((u_int32_t)(i) & 0xc0000000) == 0x80000000)
-
-IN_CLASSB_NET = 0xffff0000
-IN_CLASSB_NSHIFT = 16
-IN_CLASSB_HOST = 0x0000ffff
-IN_CLASSB_MAX = 65536
-def IN_CLASSC(i): return (((u_int32_t)(i) & 0xe0000000) == 0xc0000000)
-
-IN_CLASSC_NET = 0xffffff00
-IN_CLASSC_NSHIFT = 8
-IN_CLASSC_HOST = 0x000000ff
-def IN_CLASSD(i): return (((u_int32_t)(i) & 0xf0000000) == 0xe0000000)
-
-IN_CLASSD_NET = 0xf0000000
-IN_CLASSD_NSHIFT = 28
-IN_CLASSD_HOST = 0x0fffffff
-def IN_MULTICAST(i): return IN_CLASSD(i)
-
-def IN_EXPERIMENTAL(i): return (((u_int32_t)(i) & 0xf0000000) == 0xf0000000)
-
-def IN_BADCLASS(i): return (((u_int32_t)(i) & 0xf0000000) == 0xf0000000)
-
-INADDR_NONE = 0xffffffff
-def IN_LINKLOCAL(i): return (((u_int32_t)(i) & IN_CLASSB_NET) == IN_LINKLOCALNETNUM)
-
-IN_LOOPBACKNET = 127
-INET_ADDRSTRLEN = 16
-IP_OPTIONS = 1
-IP_HDRINCL = 2
-IP_TOS = 3
-IP_TTL = 4
-IP_RECVOPTS = 5
-IP_RECVRETOPTS = 6
-IP_RECVDSTADDR = 7
-IP_RETOPTS = 8
-IP_MULTICAST_IF = 9
-IP_MULTICAST_TTL = 10
-IP_MULTICAST_LOOP = 11
-IP_ADD_MEMBERSHIP = 12
-IP_DROP_MEMBERSHIP = 13
-IP_MULTICAST_VIF = 14
-IP_RSVP_ON = 15
-IP_RSVP_OFF = 16
-IP_RSVP_VIF_ON = 17
-IP_RSVP_VIF_OFF = 18
-IP_PORTRANGE = 19
-IP_RECVIF = 20
-IP_IPSEC_POLICY = 21
-IP_FAITH = 22
-IP_STRIPHDR = 23
-IP_FW_ADD = 40
-IP_FW_DEL = 41
-IP_FW_FLUSH = 42
-IP_FW_ZERO = 43
-IP_FW_GET = 44
-IP_FW_RESETLOG = 45
-IP_OLD_FW_ADD = 50
-IP_OLD_FW_DEL = 51
-IP_OLD_FW_FLUSH = 52
-IP_OLD_FW_ZERO = 53
-IP_OLD_FW_GET = 54
-IP_NAT__XXX = 55
-IP_OLD_FW_RESETLOG = 56
-IP_DUMMYNET_CONFIGURE = 60
-IP_DUMMYNET_DEL = 61
-IP_DUMMYNET_FLUSH = 62
-IP_DUMMYNET_GET = 64
-IP_DEFAULT_MULTICAST_TTL = 1
-IP_DEFAULT_MULTICAST_LOOP = 1
-IP_MAX_MEMBERSHIPS = 20
-IP_PORTRANGE_DEFAULT = 0
-IP_PORTRANGE_HIGH = 1
-IP_PORTRANGE_LOW = 2
-IPPROTO_MAXID = (IPPROTO_AH + 1)
-IPCTL_FORWARDING = 1
-IPCTL_SENDREDIRECTS = 2
-IPCTL_DEFTTL = 3
-IPCTL_DEFMTU = 4
-IPCTL_RTEXPIRE = 5
-IPCTL_RTMINEXPIRE = 6
-IPCTL_RTMAXCACHE = 7
-IPCTL_SOURCEROUTE = 8
-IPCTL_DIRECTEDBROADCAST = 9
-IPCTL_INTRQMAXLEN = 10
-IPCTL_INTRQDROPS = 11
-IPCTL_STATS = 12
-IPCTL_ACCEPTSOURCEROUTE = 13
-IPCTL_FASTFORWARDING = 14
-IPCTL_KEEPFAITH = 15
-IPCTL_GIF_TTL = 16
-IPCTL_MAXID = 17
-
-# Included from netinet6/in6.h
-__KAME_VERSION = "20010528/apple-darwin"
-IPV6PORT_RESERVED = 1024
-IPV6PORT_ANONMIN = 49152
-IPV6PORT_ANONMAX = 65535
-IPV6PORT_RESERVEDMIN = 600
-IPV6PORT_RESERVEDMAX = (IPV6PORT_RESERVED-1)
-INET6_ADDRSTRLEN = 46
-IPV6_ADDR_INT32_ONE = 1
-IPV6_ADDR_INT32_TWO = 2
-IPV6_ADDR_INT32_MNL = 0xff010000
-IPV6_ADDR_INT32_MLL = 0xff020000
-IPV6_ADDR_INT32_SMP = 0x0000ffff
-IPV6_ADDR_INT16_ULL = 0xfe80
-IPV6_ADDR_INT16_USL = 0xfec0
-IPV6_ADDR_INT16_MLL = 0xff02
-IPV6_ADDR_INT32_ONE = 0x01000000
-IPV6_ADDR_INT32_TWO = 0x02000000
-IPV6_ADDR_INT32_MNL = 0x000001ff
-IPV6_ADDR_INT32_MLL = 0x000002ff
-IPV6_ADDR_INT32_SMP = 0xffff0000
-IPV6_ADDR_INT16_ULL = 0x80fe
-IPV6_ADDR_INT16_USL = 0xc0fe
-IPV6_ADDR_INT16_MLL = 0x02ff
-def IN6_IS_ADDR_UNSPECIFIED(a): return \
-
-def IN6_IS_ADDR_LOOPBACK(a): return \
-
-def IN6_IS_ADDR_V4COMPAT(a): return \
-
-def IN6_IS_ADDR_V4MAPPED(a): return \
-
-IPV6_ADDR_SCOPE_NODELOCAL = 0x01
-IPV6_ADDR_SCOPE_LINKLOCAL = 0x02
-IPV6_ADDR_SCOPE_SITELOCAL = 0x05
-IPV6_ADDR_SCOPE_ORGLOCAL = 0x08
-IPV6_ADDR_SCOPE_GLOBAL = 0x0e
-__IPV6_ADDR_SCOPE_NODELOCAL = 0x01
-__IPV6_ADDR_SCOPE_LINKLOCAL = 0x02
-__IPV6_ADDR_SCOPE_SITELOCAL = 0x05
-__IPV6_ADDR_SCOPE_ORGLOCAL = 0x08
-__IPV6_ADDR_SCOPE_GLOBAL = 0x0e
-def IN6_IS_ADDR_LINKLOCAL(a): return \
-
-def IN6_IS_ADDR_SITELOCAL(a): return \
-
-def IN6_IS_ADDR_MC_NODELOCAL(a): return \
-
-def IN6_IS_ADDR_MC_LINKLOCAL(a): return \
-
-def IN6_IS_ADDR_MC_SITELOCAL(a): return \
-
-def IN6_IS_ADDR_MC_ORGLOCAL(a): return \
-
-def IN6_IS_ADDR_MC_GLOBAL(a): return \
-
-def IN6_IS_ADDR_MC_NODELOCAL(a): return \
-
-def IN6_IS_ADDR_MC_LINKLOCAL(a): return \
-
-def IN6_IS_ADDR_MC_SITELOCAL(a): return \
-
-def IN6_IS_ADDR_MC_ORGLOCAL(a): return \
-
-def IN6_IS_ADDR_MC_GLOBAL(a): return \
-
-def IN6_IS_SCOPE_LINKLOCAL(a): return \
-
-def IFA6_IS_DEPRECATED(a): return \
-
-def IFA6_IS_INVALID(a): return \
-
-IPV6_OPTIONS = 1
-IPV6_RECVOPTS = 5
-IPV6_RECVRETOPTS = 6
-IPV6_RECVDSTADDR = 7
-IPV6_RETOPTS = 8
-IPV6_SOCKOPT_RESERVED1 = 3
-IPV6_UNICAST_HOPS = 4
-IPV6_MULTICAST_IF = 9
-IPV6_MULTICAST_HOPS = 10
-IPV6_MULTICAST_LOOP = 11
-IPV6_JOIN_GROUP = 12
-IPV6_LEAVE_GROUP = 13
-IPV6_PORTRANGE = 14
-ICMP6_FILTER = 18
-IPV6_PKTINFO = 19
-IPV6_HOPLIMIT = 20
-IPV6_NEXTHOP = 21
-IPV6_HOPOPTS = 22
-IPV6_DSTOPTS = 23
-IPV6_RTHDR = 24
-IPV6_PKTOPTIONS = 25
-IPV6_CHECKSUM = 26
-IPV6_V6ONLY = 27
-IPV6_BINDV6ONLY = IPV6_V6ONLY
-IPV6_IPSEC_POLICY = 28
-IPV6_FAITH = 29
-IPV6_FW_ADD = 30
-IPV6_FW_DEL = 31
-IPV6_FW_FLUSH = 32
-IPV6_FW_ZERO = 33
-IPV6_FW_GET = 34
-IPV6_RTHDR_LOOSE = 0
-IPV6_RTHDR_STRICT = 1
-IPV6_RTHDR_TYPE_0 = 0
-IPV6_DEFAULT_MULTICAST_HOPS = 1
-IPV6_DEFAULT_MULTICAST_LOOP = 1
-IPV6_PORTRANGE_DEFAULT = 0
-IPV6_PORTRANGE_HIGH = 1
-IPV6_PORTRANGE_LOW = 2
-IPV6PROTO_MAXID = (IPPROTO_PIM + 1)
-IPV6CTL_FORWARDING = 1
-IPV6CTL_SENDREDIRECTS = 2
-IPV6CTL_DEFHLIM = 3
-IPV6CTL_DEFMTU = 4
-IPV6CTL_FORWSRCRT = 5
-IPV6CTL_STATS = 6
-IPV6CTL_MRTSTATS = 7
-IPV6CTL_MRTPROTO = 8
-IPV6CTL_MAXFRAGPACKETS = 9
-IPV6CTL_SOURCECHECK = 10
-IPV6CTL_SOURCECHECK_LOGINT = 11
-IPV6CTL_ACCEPT_RTADV = 12
-IPV6CTL_KEEPFAITH = 13
-IPV6CTL_LOG_INTERVAL = 14
-IPV6CTL_HDRNESTLIMIT = 15
-IPV6CTL_DAD_COUNT = 16
-IPV6CTL_AUTO_FLOWLABEL = 17
-IPV6CTL_DEFMCASTHLIM = 18
-IPV6CTL_GIF_HLIM = 19
-IPV6CTL_KAME_VERSION = 20
-IPV6CTL_USE_DEPRECATED = 21
-IPV6CTL_RR_PRUNE = 22
-IPV6CTL_MAPPED_ADDR = 23
-IPV6CTL_V6ONLY = 24
-IPV6CTL_RTEXPIRE = 25
-IPV6CTL_RTMINEXPIRE = 26
-IPV6CTL_RTMAXCACHE = 27
-IPV6CTL_USETEMPADDR = 32
-IPV6CTL_TEMPPLTIME = 33
-IPV6CTL_TEMPVLTIME = 34
-IPV6CTL_AUTO_LINKLOCAL = 35
-IPV6CTL_RIP6STATS = 36
-IPV6CTL_MAXID = 37
diff --git a/sys/lib/python/plat-darwin/regen b/sys/lib/python/plat-darwin/regen
deleted file mode 100755
index a20cdc151..000000000
--- a/sys/lib/python/plat-darwin/regen
+++ /dev/null
@@ -1,3 +0,0 @@
-#! /bin/sh
-set -v
-python$EXE ../../Tools/scripts/h2py.py -i '(u_long)' /usr/include/netinet/in.h
diff --git a/sys/lib/python/plat-freebsd2/IN.py b/sys/lib/python/plat-freebsd2/IN.py
deleted file mode 100644
index 9f7e0177e..000000000
--- a/sys/lib/python/plat-freebsd2/IN.py
+++ /dev/null
@@ -1,187 +0,0 @@
-# Generated by h2py from /usr/include/netinet/in.h
-IPPROTO_IP = 0
-IPPROTO_ICMP = 1
-IPPROTO_IGMP = 2
-IPPROTO_GGP = 3
-IPPROTO_IPIP = 4
-IPPROTO_TCP = 6
-IPPROTO_ST = 7
-IPPROTO_EGP = 8
-IPPROTO_PIGP = 9
-IPPROTO_RCCMON = 10
-IPPROTO_NVPII = 11
-IPPROTO_PUP = 12
-IPPROTO_ARGUS = 13
-IPPROTO_EMCON = 14
-IPPROTO_XNET = 15
-IPPROTO_CHAOS = 16
-IPPROTO_UDP = 17
-IPPROTO_MUX = 18
-IPPROTO_MEAS = 19
-IPPROTO_HMP = 20
-IPPROTO_PRM = 21
-IPPROTO_IDP = 22
-IPPROTO_TRUNK1 = 23
-IPPROTO_TRUNK2 = 24
-IPPROTO_LEAF1 = 25
-IPPROTO_LEAF2 = 26
-IPPROTO_RDP = 27
-IPPROTO_IRTP = 28
-IPPROTO_TP = 29
-IPPROTO_BLT = 30
-IPPROTO_NSP = 31
-IPPROTO_INP = 32
-IPPROTO_SEP = 33
-IPPROTO_3PC = 34
-IPPROTO_IDPR = 35
-IPPROTO_XTP = 36
-IPPROTO_DDP = 37
-IPPROTO_CMTP = 38
-IPPROTO_TPXX = 39
-IPPROTO_IL = 40
-IPPROTO_SIP = 41
-IPPROTO_SDRP = 42
-IPPROTO_SIPSR = 43
-IPPROTO_SIPFRAG = 44
-IPPROTO_IDRP = 45
-IPPROTO_RSVP = 46
-IPPROTO_GRE = 47
-IPPROTO_MHRP = 48
-IPPROTO_BHA = 49
-IPPROTO_ESP = 50
-IPPROTO_AH = 51
-IPPROTO_INLSP = 52
-IPPROTO_SWIPE = 53
-IPPROTO_NHRP = 54
-IPPROTO_AHIP = 61
-IPPROTO_CFTP = 62
-IPPROTO_HELLO = 63
-IPPROTO_SATEXPAK = 64
-IPPROTO_KRYPTOLAN = 65
-IPPROTO_RVD = 66
-IPPROTO_IPPC = 67
-IPPROTO_ADFS = 68
-IPPROTO_SATMON = 69
-IPPROTO_VISA = 70
-IPPROTO_IPCV = 71
-IPPROTO_CPNX = 72
-IPPROTO_CPHB = 73
-IPPROTO_WSN = 74
-IPPROTO_PVP = 75
-IPPROTO_BRSATMON = 76
-IPPROTO_ND = 77
-IPPROTO_WBMON = 78
-IPPROTO_WBEXPAK = 79
-IPPROTO_EON = 80
-IPPROTO_VMTP = 81
-IPPROTO_SVMTP = 82
-IPPROTO_VINES = 83
-IPPROTO_TTP = 84
-IPPROTO_IGP = 85
-IPPROTO_DGP = 86
-IPPROTO_TCF = 87
-IPPROTO_IGRP = 88
-IPPROTO_OSPFIGP = 89
-IPPROTO_SRPC = 90
-IPPROTO_LARP = 91
-IPPROTO_MTP = 92
-IPPROTO_AX25 = 93
-IPPROTO_IPEIP = 94
-IPPROTO_MICP = 95
-IPPROTO_SCCSP = 96
-IPPROTO_ETHERIP = 97
-IPPROTO_ENCAP = 98
-IPPROTO_APES = 99
-IPPROTO_GMTP = 100
-IPPROTO_DIVERT = 254
-IPPROTO_RAW = 255
-IPPROTO_MAX = 256
-IPPORT_RESERVED = 1024
-IPPORT_USERRESERVED = 5000
-IPPORT_HIFIRSTAUTO = 40000
-IPPORT_HILASTAUTO = 44999
-IPPORT_RESERVEDSTART = 600
-def IN_CLASSA(i): return (((long)(i) & 0x80000000) == 0)
-
-IN_CLASSA_NET = 0xff000000
-IN_CLASSA_NSHIFT = 24
-IN_CLASSA_HOST = 0x00ffffff
-IN_CLASSA_MAX = 128
-def IN_CLASSB(i): return (((long)(i) & 0xc0000000) == 0x80000000)
-
-IN_CLASSB_NET = 0xffff0000
-IN_CLASSB_NSHIFT = 16
-IN_CLASSB_HOST = 0x0000ffff
-IN_CLASSB_MAX = 65536
-def IN_CLASSC(i): return (((long)(i) & 0xe0000000) == 0xc0000000)
-
-IN_CLASSC_NET = 0xffffff00
-IN_CLASSC_NSHIFT = 8
-IN_CLASSC_HOST = 0x000000ff
-def IN_CLASSD(i): return (((long)(i) & 0xf0000000) == 0xe0000000)
-
-IN_CLASSD_NET = 0xf0000000
-IN_CLASSD_NSHIFT = 28
-IN_CLASSD_HOST = 0x0fffffff
-def IN_MULTICAST(i): return IN_CLASSD(i)
-
-def IN_EXPERIMENTAL(i): return (((long)(i) & 0xf0000000) == 0xf0000000)
-
-def IN_BADCLASS(i): return (((long)(i) & 0xf0000000) == 0xf0000000)
-
-INADDR_ANY = 0x00000000
-INADDR_BROADCAST = 0xffffffff
-INADDR_NONE = 0xffffffff
-INADDR_UNSPEC_GROUP = 0xe0000000
-INADDR_ALLHOSTS_GROUP = 0xe0000001
-INADDR_ALLRTRS_GROUP = 0xe0000002
-INADDR_MAX_LOCAL_GROUP = 0xe00000ff
-IN_LOOPBACKNET = 127
-IP_OPTIONS = 1
-IP_HDRINCL = 2
-IP_TOS = 3
-IP_TTL = 4
-IP_RECVOPTS = 5
-IP_RECVRETOPTS = 6
-IP_RECVDSTADDR = 7
-IP_RETOPTS = 8
-IP_MULTICAST_IF = 9
-IP_MULTICAST_TTL = 10
-IP_MULTICAST_LOOP = 11
-IP_ADD_MEMBERSHIP = 12
-IP_DROP_MEMBERSHIP = 13
-IP_MULTICAST_VIF = 14
-IP_RSVP_ON = 15
-IP_RSVP_OFF = 16
-IP_RSVP_VIF_ON = 17
-IP_RSVP_VIF_OFF = 18
-IP_PORTRANGE = 19
-IP_RECVIF = 20
-IP_FW_ADD = 50
-IP_FW_DEL = 51
-IP_FW_FLUSH = 52
-IP_FW_ZERO = 53
-IP_FW_GET = 54
-IP_NAT = 55
-IP_DEFAULT_MULTICAST_TTL = 1
-IP_DEFAULT_MULTICAST_LOOP = 1
-IP_MAX_MEMBERSHIPS = 20
-IP_PORTRANGE_DEFAULT = 0
-IP_PORTRANGE_HIGH = 1
-IP_PORTRANGE_LOW = 2
-IPPROTO_MAXID = (IPPROTO_IDP + 1)
-IPCTL_FORWARDING = 1
-IPCTL_SENDREDIRECTS = 2
-IPCTL_DEFTTL = 3
-IPCTL_DEFMTU = 4
-IPCTL_RTEXPIRE = 5
-IPCTL_RTMINEXPIRE = 6
-IPCTL_RTMAXCACHE = 7
-IPCTL_SOURCEROUTE = 8
-IPCTL_DIRECTEDBROADCAST = 9
-IPCTL_INTRQMAXLEN = 10
-IPCTL_INTRQDROPS = 11
-IPCTL_ACCEPTSOURCEROUTE = 13
-IPCTL_MAXID = 13
-IP_NAT_IN = 0x00000001
-IP_NAT_OUT = 0x00000002
diff --git a/sys/lib/python/plat-freebsd2/regen b/sys/lib/python/plat-freebsd2/regen
deleted file mode 100755
index 8f7e15e70..000000000
--- a/sys/lib/python/plat-freebsd2/regen
+++ /dev/null
@@ -1,3 +0,0 @@
-#! /bin/sh
-set -v
-h2py -i '(u_long)' /usr/include/netinet/in.h
diff --git a/sys/lib/python/plat-freebsd3/IN.py b/sys/lib/python/plat-freebsd3/IN.py
deleted file mode 100644
index 17514454e..000000000
--- a/sys/lib/python/plat-freebsd3/IN.py
+++ /dev/null
@@ -1,189 +0,0 @@
-# Generated by h2py from /usr/include/netinet/in.h
-IPPROTO_IP = 0
-IPPROTO_ICMP = 1
-IPPROTO_IGMP = 2
-IPPROTO_GGP = 3
-IPPROTO_IPIP = 4
-IPPROTO_TCP = 6
-IPPROTO_ST = 7
-IPPROTO_EGP = 8
-IPPROTO_PIGP = 9
-IPPROTO_RCCMON = 10
-IPPROTO_NVPII = 11
-IPPROTO_PUP = 12
-IPPROTO_ARGUS = 13
-IPPROTO_EMCON = 14
-IPPROTO_XNET = 15
-IPPROTO_CHAOS = 16
-IPPROTO_UDP = 17
-IPPROTO_MUX = 18
-IPPROTO_MEAS = 19
-IPPROTO_HMP = 20
-IPPROTO_PRM = 21
-IPPROTO_IDP = 22
-IPPROTO_TRUNK1 = 23
-IPPROTO_TRUNK2 = 24
-IPPROTO_LEAF1 = 25
-IPPROTO_LEAF2 = 26
-IPPROTO_RDP = 27
-IPPROTO_IRTP = 28
-IPPROTO_TP = 29
-IPPROTO_BLT = 30
-IPPROTO_NSP = 31
-IPPROTO_INP = 32
-IPPROTO_SEP = 33
-IPPROTO_3PC = 34
-IPPROTO_IDPR = 35
-IPPROTO_XTP = 36
-IPPROTO_DDP = 37
-IPPROTO_CMTP = 38
-IPPROTO_TPXX = 39
-IPPROTO_IL = 40
-IPPROTO_SIP = 41
-IPPROTO_SDRP = 42
-IPPROTO_SIPSR = 43
-IPPROTO_SIPFRAG = 44
-IPPROTO_IDRP = 45
-IPPROTO_RSVP = 46
-IPPROTO_GRE = 47
-IPPROTO_MHRP = 48
-IPPROTO_BHA = 49
-IPPROTO_ESP = 50
-IPPROTO_AH = 51
-IPPROTO_INLSP = 52
-IPPROTO_SWIPE = 53
-IPPROTO_NHRP = 54
-IPPROTO_AHIP = 61
-IPPROTO_CFTP = 62
-IPPROTO_HELLO = 63
-IPPROTO_SATEXPAK = 64
-IPPROTO_KRYPTOLAN = 65
-IPPROTO_RVD = 66
-IPPROTO_IPPC = 67
-IPPROTO_ADFS = 68
-IPPROTO_SATMON = 69
-IPPROTO_VISA = 70
-IPPROTO_IPCV = 71
-IPPROTO_CPNX = 72
-IPPROTO_CPHB = 73
-IPPROTO_WSN = 74
-IPPROTO_PVP = 75
-IPPROTO_BRSATMON = 76
-IPPROTO_ND = 77
-IPPROTO_WBMON = 78
-IPPROTO_WBEXPAK = 79
-IPPROTO_EON = 80
-IPPROTO_VMTP = 81
-IPPROTO_SVMTP = 82
-IPPROTO_VINES = 83
-IPPROTO_TTP = 84
-IPPROTO_IGP = 85
-IPPROTO_DGP = 86
-IPPROTO_TCF = 87
-IPPROTO_IGRP = 88
-IPPROTO_OSPFIGP = 89
-IPPROTO_SRPC = 90
-IPPROTO_LARP = 91
-IPPROTO_MTP = 92
-IPPROTO_AX25 = 93
-IPPROTO_IPEIP = 94
-IPPROTO_MICP = 95
-IPPROTO_SCCSP = 96
-IPPROTO_ETHERIP = 97
-IPPROTO_ENCAP = 98
-IPPROTO_APES = 99
-IPPROTO_GMTP = 100
-IPPROTO_DIVERT = 254
-IPPROTO_RAW = 255
-IPPROTO_MAX = 256
-IPPORT_RESERVED = 1024
-IPPORT_USERRESERVED = 5000
-IPPORT_HIFIRSTAUTO = 49152
-IPPORT_HILASTAUTO = 65535
-IPPORT_RESERVEDSTART = 600
-def IN_CLASSA(i): return (((long)(i) & 0x80000000) == 0)
-
-IN_CLASSA_NET = 0xff000000
-IN_CLASSA_NSHIFT = 24
-IN_CLASSA_HOST = 0x00ffffff
-IN_CLASSA_MAX = 128
-def IN_CLASSB(i): return (((long)(i) & 0xc0000000) == 0x80000000)
-
-IN_CLASSB_NET = 0xffff0000
-IN_CLASSB_NSHIFT = 16
-IN_CLASSB_HOST = 0x0000ffff
-IN_CLASSB_MAX = 65536
-def IN_CLASSC(i): return (((long)(i) & 0xe0000000) == 0xc0000000)
-
-IN_CLASSC_NET = 0xffffff00
-IN_CLASSC_NSHIFT = 8
-IN_CLASSC_HOST = 0x000000ff
-def IN_CLASSD(i): return (((long)(i) & 0xf0000000) == 0xe0000000)
-
-IN_CLASSD_NET = 0xf0000000
-IN_CLASSD_NSHIFT = 28
-IN_CLASSD_HOST = 0x0fffffff
-def IN_MULTICAST(i): return IN_CLASSD(i)
-
-def IN_EXPERIMENTAL(i): return (((long)(i) & 0xf0000000) == 0xf0000000)
-
-def IN_BADCLASS(i): return (((long)(i) & 0xf0000000) == 0xf0000000)
-
-INADDR_ANY = 0x00000000
-INADDR_LOOPBACK = 0x7f000001
-INADDR_BROADCAST = 0xffffffff
-INADDR_NONE = 0xffffffff
-INADDR_UNSPEC_GROUP = 0xe0000000
-INADDR_ALLHOSTS_GROUP = 0xe0000001
-INADDR_ALLRTRS_GROUP = 0xe0000002
-INADDR_MAX_LOCAL_GROUP = 0xe00000ff
-IN_LOOPBACKNET = 127
-IP_OPTIONS = 1
-IP_HDRINCL = 2
-IP_TOS = 3
-IP_TTL = 4
-IP_RECVOPTS = 5
-IP_RECVRETOPTS = 6
-IP_RECVDSTADDR = 7
-IP_RETOPTS = 8
-IP_MULTICAST_IF = 9
-IP_MULTICAST_TTL = 10
-IP_MULTICAST_LOOP = 11
-IP_ADD_MEMBERSHIP = 12
-IP_DROP_MEMBERSHIP = 13
-IP_MULTICAST_VIF = 14
-IP_RSVP_ON = 15
-IP_RSVP_OFF = 16
-IP_RSVP_VIF_ON = 17
-IP_RSVP_VIF_OFF = 18
-IP_PORTRANGE = 19
-IP_RECVIF = 20
-IP_FW_ADD = 50
-IP_FW_DEL = 51
-IP_FW_FLUSH = 52
-IP_FW_ZERO = 53
-IP_FW_GET = 54
-IP_NAT = 55
-IP_DEFAULT_MULTICAST_TTL = 1
-IP_DEFAULT_MULTICAST_LOOP = 1
-IP_MAX_MEMBERSHIPS = 20
-IP_PORTRANGE_DEFAULT = 0
-IP_PORTRANGE_HIGH = 1
-IP_PORTRANGE_LOW = 2
-IPPROTO_MAXID = (IPPROTO_IDP + 1)
-IPCTL_FORWARDING = 1
-IPCTL_SENDREDIRECTS = 2
-IPCTL_DEFTTL = 3
-IPCTL_DEFMTU = 4
-IPCTL_RTEXPIRE = 5
-IPCTL_RTMINEXPIRE = 6
-IPCTL_RTMAXCACHE = 7
-IPCTL_SOURCEROUTE = 8
-IPCTL_DIRECTEDBROADCAST = 9
-IPCTL_INTRQMAXLEN = 10
-IPCTL_INTRQDROPS = 11
-IPCTL_STATS = 12
-IPCTL_ACCEPTSOURCEROUTE = 13
-IPCTL_MAXID = 14
-IP_NAT_IN = 0x00000001
-IP_NAT_OUT = 0x00000002
diff --git a/sys/lib/python/plat-freebsd3/regen b/sys/lib/python/plat-freebsd3/regen
deleted file mode 100755
index 170f155d1..000000000
--- a/sys/lib/python/plat-freebsd3/regen
+++ /dev/null
@@ -1,4 +0,0 @@
-#! /bin/sh
-set -v
-h2py -i '(u_long)' /usr/include/netinet/in.h
-
diff --git a/sys/lib/python/plat-freebsd4/IN.py b/sys/lib/python/plat-freebsd4/IN.py
deleted file mode 100644
index bca241884..000000000
--- a/sys/lib/python/plat-freebsd4/IN.py
+++ /dev/null
@@ -1,355 +0,0 @@
-# Generated by h2py from /usr/include/netinet/in.h
-IPPROTO_IP = 0
-IPPROTO_HOPOPTS = 0
-IPPROTO_ICMP = 1
-IPPROTO_IGMP = 2
-IPPROTO_GGP = 3
-IPPROTO_IPV4 = 4
-IPPROTO_IPIP = IPPROTO_IPV4
-IPPROTO_TCP = 6
-IPPROTO_ST = 7
-IPPROTO_EGP = 8
-IPPROTO_PIGP = 9
-IPPROTO_RCCMON = 10
-IPPROTO_NVPII = 11
-IPPROTO_PUP = 12
-IPPROTO_ARGUS = 13
-IPPROTO_EMCON = 14
-IPPROTO_XNET = 15
-IPPROTO_CHAOS = 16
-IPPROTO_UDP = 17
-IPPROTO_MUX = 18
-IPPROTO_MEAS = 19
-IPPROTO_HMP = 20
-IPPROTO_PRM = 21
-IPPROTO_IDP = 22
-IPPROTO_TRUNK1 = 23
-IPPROTO_TRUNK2 = 24
-IPPROTO_LEAF1 = 25
-IPPROTO_LEAF2 = 26
-IPPROTO_RDP = 27
-IPPROTO_IRTP = 28
-IPPROTO_TP = 29
-IPPROTO_BLT = 30
-IPPROTO_NSP = 31
-IPPROTO_INP = 32
-IPPROTO_SEP = 33
-IPPROTO_3PC = 34
-IPPROTO_IDPR = 35
-IPPROTO_XTP = 36
-IPPROTO_DDP = 37
-IPPROTO_CMTP = 38
-IPPROTO_TPXX = 39
-IPPROTO_IL = 40
-IPPROTO_IPV6 = 41
-IPPROTO_SDRP = 42
-IPPROTO_ROUTING = 43
-IPPROTO_FRAGMENT = 44
-IPPROTO_IDRP = 45
-IPPROTO_RSVP = 46
-IPPROTO_GRE = 47
-IPPROTO_MHRP = 48
-IPPROTO_BHA = 49
-IPPROTO_ESP = 50
-IPPROTO_AH = 51
-IPPROTO_INLSP = 52
-IPPROTO_SWIPE = 53
-IPPROTO_NHRP = 54
-IPPROTO_ICMPV6 = 58
-IPPROTO_NONE = 59
-IPPROTO_DSTOPTS = 60
-IPPROTO_AHIP = 61
-IPPROTO_CFTP = 62
-IPPROTO_HELLO = 63
-IPPROTO_SATEXPAK = 64
-IPPROTO_KRYPTOLAN = 65
-IPPROTO_RVD = 66
-IPPROTO_IPPC = 67
-IPPROTO_ADFS = 68
-IPPROTO_SATMON = 69
-IPPROTO_VISA = 70
-IPPROTO_IPCV = 71
-IPPROTO_CPNX = 72
-IPPROTO_CPHB = 73
-IPPROTO_WSN = 74
-IPPROTO_PVP = 75
-IPPROTO_BRSATMON = 76
-IPPROTO_ND = 77
-IPPROTO_WBMON = 78
-IPPROTO_WBEXPAK = 79
-IPPROTO_EON = 80
-IPPROTO_VMTP = 81
-IPPROTO_SVMTP = 82
-IPPROTO_VINES = 83
-IPPROTO_TTP = 84
-IPPROTO_IGP = 85
-IPPROTO_DGP = 86
-IPPROTO_TCF = 87
-IPPROTO_IGRP = 88
-IPPROTO_OSPFIGP = 89
-IPPROTO_SRPC = 90
-IPPROTO_LARP = 91
-IPPROTO_MTP = 92
-IPPROTO_AX25 = 93
-IPPROTO_IPEIP = 94
-IPPROTO_MICP = 95
-IPPROTO_SCCSP = 96
-IPPROTO_ETHERIP = 97
-IPPROTO_ENCAP = 98
-IPPROTO_APES = 99
-IPPROTO_GMTP = 100
-IPPROTO_IPCOMP = 108
-IPPROTO_PIM = 103
-IPPROTO_PGM = 113
-IPPROTO_DIVERT = 254
-IPPROTO_RAW = 255
-IPPROTO_MAX = 256
-IPPROTO_DONE = 257
-IPPORT_RESERVED = 1024
-IPPORT_USERRESERVED = 5000
-IPPORT_HIFIRSTAUTO = 49152
-IPPORT_HILASTAUTO = 65535
-IPPORT_RESERVEDSTART = 600
-def IN_CLASSA(i): return (((u_int32_t)(i) & 0x80000000) == 0)
-
-IN_CLASSA_NET = 0xff000000
-IN_CLASSA_NSHIFT = 24
-IN_CLASSA_HOST = 0x00ffffff
-IN_CLASSA_MAX = 128
-def IN_CLASSB(i): return (((u_int32_t)(i) & 0xc0000000) == 0x80000000)
-
-IN_CLASSB_NET = 0xffff0000
-IN_CLASSB_NSHIFT = 16
-IN_CLASSB_HOST = 0x0000ffff
-IN_CLASSB_MAX = 65536
-def IN_CLASSC(i): return (((u_int32_t)(i) & 0xe0000000) == 0xc0000000)
-
-IN_CLASSC_NET = 0xffffff00
-IN_CLASSC_NSHIFT = 8
-IN_CLASSC_HOST = 0x000000ff
-def IN_CLASSD(i): return (((u_int32_t)(i) & 0xf0000000) == 0xe0000000)
-
-IN_CLASSD_NET = 0xf0000000
-IN_CLASSD_NSHIFT = 28
-IN_CLASSD_HOST = 0x0fffffff
-def IN_MULTICAST(i): return IN_CLASSD(i)
-
-def IN_EXPERIMENTAL(i): return (((u_int32_t)(i) & 0xf0000000) == 0xf0000000)
-
-def IN_BADCLASS(i): return (((u_int32_t)(i) & 0xf0000000) == 0xf0000000)
-
-INADDR_NONE = 0xffffffff
-IN_LOOPBACKNET = 127
-INET_ADDRSTRLEN = 16
-IP_OPTIONS = 1
-IP_HDRINCL = 2
-IP_TOS = 3
-IP_TTL = 4
-IP_RECVOPTS = 5
-IP_RECVRETOPTS = 6
-IP_RECVDSTADDR = 7
-IP_RETOPTS = 8
-IP_MULTICAST_IF = 9
-IP_MULTICAST_TTL = 10
-IP_MULTICAST_LOOP = 11
-IP_ADD_MEMBERSHIP = 12
-IP_DROP_MEMBERSHIP = 13
-IP_MULTICAST_VIF = 14
-IP_RSVP_ON = 15
-IP_RSVP_OFF = 16
-IP_RSVP_VIF_ON = 17
-IP_RSVP_VIF_OFF = 18
-IP_PORTRANGE = 19
-IP_RECVIF = 20
-IP_IPSEC_POLICY = 21
-IP_FAITH = 22
-IP_FW_ADD = 50
-IP_FW_DEL = 51
-IP_FW_FLUSH = 52
-IP_FW_ZERO = 53
-IP_FW_GET = 54
-IP_FW_RESETLOG = 55
-IP_DUMMYNET_CONFIGURE = 60
-IP_DUMMYNET_DEL = 61
-IP_DUMMYNET_FLUSH = 62
-IP_DUMMYNET_GET = 64
-IP_DEFAULT_MULTICAST_TTL = 1
-IP_DEFAULT_MULTICAST_LOOP = 1
-IP_MAX_MEMBERSHIPS = 20
-IP_PORTRANGE_DEFAULT = 0
-IP_PORTRANGE_HIGH = 1
-IP_PORTRANGE_LOW = 2
-IPPROTO_MAXID = (IPPROTO_AH + 1)
-IPCTL_FORWARDING = 1
-IPCTL_SENDREDIRECTS = 2
-IPCTL_DEFTTL = 3
-IPCTL_DEFMTU = 4
-IPCTL_RTEXPIRE = 5
-IPCTL_RTMINEXPIRE = 6
-IPCTL_RTMAXCACHE = 7
-IPCTL_SOURCEROUTE = 8
-IPCTL_DIRECTEDBROADCAST = 9
-IPCTL_INTRQMAXLEN = 10
-IPCTL_INTRQDROPS = 11
-IPCTL_STATS = 12
-IPCTL_ACCEPTSOURCEROUTE = 13
-IPCTL_FASTFORWARDING = 14
-IPCTL_KEEPFAITH = 15
-IPCTL_GIF_TTL = 16
-IPCTL_MAXID = 17
-
-# Included from netinet6/in6.h
-
-# Included from sys/queue.h
-def SLIST_HEAD_INITIALIZER(head): return \
-
-def SLIST_ENTRY(type): return \
-
-def STAILQ_HEAD_INITIALIZER(head): return \
-
-def STAILQ_ENTRY(type): return \
-
-def LIST_HEAD_INITIALIZER(head): return \
-
-def LIST_ENTRY(type): return \
-
-def TAILQ_HEAD_INITIALIZER(head): return \
-
-def TAILQ_ENTRY(type): return \
-
-def CIRCLEQ_ENTRY(type): return \
-
-__KAME_VERSION = "20000701/FreeBSD-current"
-IPV6PORT_RESERVED = 1024
-IPV6PORT_ANONMIN = 49152
-IPV6PORT_ANONMAX = 65535
-IPV6PORT_RESERVEDMIN = 600
-IPV6PORT_RESERVEDMAX = (IPV6PORT_RESERVED-1)
-INET6_ADDRSTRLEN = 46
-IPV6_ADDR_INT32_ONE = 1
-IPV6_ADDR_INT32_TWO = 2
-IPV6_ADDR_INT32_MNL = 0xff010000
-IPV6_ADDR_INT32_MLL = 0xff020000
-IPV6_ADDR_INT32_SMP = 0x0000ffff
-IPV6_ADDR_INT16_ULL = 0xfe80
-IPV6_ADDR_INT16_USL = 0xfec0
-IPV6_ADDR_INT16_MLL = 0xff02
-IPV6_ADDR_INT32_ONE = 0x01000000
-IPV6_ADDR_INT32_TWO = 0x02000000
-IPV6_ADDR_INT32_MNL = 0x000001ff
-IPV6_ADDR_INT32_MLL = 0x000002ff
-IPV6_ADDR_INT32_SMP = 0xffff0000
-IPV6_ADDR_INT16_ULL = 0x80fe
-IPV6_ADDR_INT16_USL = 0xc0fe
-IPV6_ADDR_INT16_MLL = 0x02ff
-def IN6_IS_ADDR_UNSPECIFIED(a): return \
-
-def IN6_IS_ADDR_LOOPBACK(a): return \
-
-def IN6_IS_ADDR_V4COMPAT(a): return \
-
-def IN6_IS_ADDR_V4MAPPED(a): return \
-
-IPV6_ADDR_SCOPE_NODELOCAL = 0x01
-IPV6_ADDR_SCOPE_LINKLOCAL = 0x02
-IPV6_ADDR_SCOPE_SITELOCAL = 0x05
-IPV6_ADDR_SCOPE_ORGLOCAL = 0x08
-IPV6_ADDR_SCOPE_GLOBAL = 0x0e
-__IPV6_ADDR_SCOPE_NODELOCAL = 0x01
-__IPV6_ADDR_SCOPE_LINKLOCAL = 0x02
-__IPV6_ADDR_SCOPE_SITELOCAL = 0x05
-__IPV6_ADDR_SCOPE_ORGLOCAL = 0x08
-__IPV6_ADDR_SCOPE_GLOBAL = 0x0e
-def IN6_IS_ADDR_LINKLOCAL(a): return \
-
-def IN6_IS_ADDR_SITELOCAL(a): return \
-
-def IN6_IS_ADDR_MC_NODELOCAL(a): return \
-
-def IN6_IS_ADDR_MC_LINKLOCAL(a): return \
-
-def IN6_IS_ADDR_MC_SITELOCAL(a): return \
-
-def IN6_IS_ADDR_MC_ORGLOCAL(a): return \
-
-def IN6_IS_ADDR_MC_GLOBAL(a): return \
-
-def IN6_IS_ADDR_MC_NODELOCAL(a): return \
-
-def IN6_IS_ADDR_MC_LINKLOCAL(a): return \
-
-def IN6_IS_ADDR_MC_SITELOCAL(a): return \
-
-def IN6_IS_ADDR_MC_ORGLOCAL(a): return \
-
-def IN6_IS_ADDR_MC_GLOBAL(a): return \
-
-def IN6_IS_SCOPE_LINKLOCAL(a): return \
-
-IPV6_OPTIONS = 1
-IPV6_RECVOPTS = 5
-IPV6_RECVRETOPTS = 6
-IPV6_RECVDSTADDR = 7
-IPV6_RETOPTS = 8
-IPV6_SOCKOPT_RESERVED1 = 3
-IPV6_UNICAST_HOPS = 4
-IPV6_MULTICAST_IF = 9
-IPV6_MULTICAST_HOPS = 10
-IPV6_MULTICAST_LOOP = 11
-IPV6_JOIN_GROUP = 12
-IPV6_LEAVE_GROUP = 13
-IPV6_PORTRANGE = 14
-ICMP6_FILTER = 18
-IPV6_PKTINFO = 19
-IPV6_HOPLIMIT = 20
-IPV6_NEXTHOP = 21
-IPV6_HOPOPTS = 22
-IPV6_DSTOPTS = 23
-IPV6_RTHDR = 24
-IPV6_PKTOPTIONS = 25
-IPV6_CHECKSUM = 26
-IPV6_BINDV6ONLY = 27
-IPV6_IPSEC_POLICY = 28
-IPV6_FAITH = 29
-IPV6_FW_ADD = 30
-IPV6_FW_DEL = 31
-IPV6_FW_FLUSH = 32
-IPV6_FW_ZERO = 33
-IPV6_FW_GET = 34
-IPV6_RTHDR_LOOSE = 0
-IPV6_RTHDR_STRICT = 1
-IPV6_RTHDR_TYPE_0 = 0
-IPV6_DEFAULT_MULTICAST_HOPS = 1
-IPV6_DEFAULT_MULTICAST_LOOP = 1
-IPV6_PORTRANGE_DEFAULT = 0
-IPV6_PORTRANGE_HIGH = 1
-IPV6_PORTRANGE_LOW = 2
-IPV6PROTO_MAXID = (IPPROTO_PIM + 1)
-IPV6CTL_FORWARDING = 1
-IPV6CTL_SENDREDIRECTS = 2
-IPV6CTL_DEFHLIM = 3
-IPV6CTL_DEFMTU = 4
-IPV6CTL_FORWSRCRT = 5
-IPV6CTL_STATS = 6
-IPV6CTL_MRTSTATS = 7
-IPV6CTL_MRTPROTO = 8
-IPV6CTL_MAXFRAGPACKETS = 9
-IPV6CTL_SOURCECHECK = 10
-IPV6CTL_SOURCECHECK_LOGINT = 11
-IPV6CTL_ACCEPT_RTADV = 12
-IPV6CTL_KEEPFAITH = 13
-IPV6CTL_LOG_INTERVAL = 14
-IPV6CTL_HDRNESTLIMIT = 15
-IPV6CTL_DAD_COUNT = 16
-IPV6CTL_AUTO_FLOWLABEL = 17
-IPV6CTL_DEFMCASTHLIM = 18
-IPV6CTL_GIF_HLIM = 19
-IPV6CTL_KAME_VERSION = 20
-IPV6CTL_USE_DEPRECATED = 21
-IPV6CTL_RR_PRUNE = 22
-IPV6CTL_MAPPED_ADDR = 23
-IPV6CTL_BINDV6ONLY = 24
-IPV6CTL_RTEXPIRE = 25
-IPV6CTL_RTMINEXPIRE = 26
-IPV6CTL_RTMAXCACHE = 27
-IPV6CTL_MAXID = 28
diff --git a/sys/lib/python/plat-freebsd4/regen b/sys/lib/python/plat-freebsd4/regen
deleted file mode 100644
index 8aa6898c6..000000000
--- a/sys/lib/python/plat-freebsd4/regen
+++ /dev/null
@@ -1,3 +0,0 @@
-#! /bin/sh
-set -v
-python ../../Tools/scripts/h2py.py -i '(u_long)' /usr/include/netinet/in.h
diff --git a/sys/lib/python/plat-freebsd5/IN.py b/sys/lib/python/plat-freebsd5/IN.py
deleted file mode 100644
index bca241884..000000000
--- a/sys/lib/python/plat-freebsd5/IN.py
+++ /dev/null
@@ -1,355 +0,0 @@
-# Generated by h2py from /usr/include/netinet/in.h
-IPPROTO_IP = 0
-IPPROTO_HOPOPTS = 0
-IPPROTO_ICMP = 1
-IPPROTO_IGMP = 2
-IPPROTO_GGP = 3
-IPPROTO_IPV4 = 4
-IPPROTO_IPIP = IPPROTO_IPV4
-IPPROTO_TCP = 6
-IPPROTO_ST = 7
-IPPROTO_EGP = 8
-IPPROTO_PIGP = 9
-IPPROTO_RCCMON = 10
-IPPROTO_NVPII = 11
-IPPROTO_PUP = 12
-IPPROTO_ARGUS = 13
-IPPROTO_EMCON = 14
-IPPROTO_XNET = 15
-IPPROTO_CHAOS = 16
-IPPROTO_UDP = 17
-IPPROTO_MUX = 18
-IPPROTO_MEAS = 19
-IPPROTO_HMP = 20
-IPPROTO_PRM = 21
-IPPROTO_IDP = 22
-IPPROTO_TRUNK1 = 23
-IPPROTO_TRUNK2 = 24
-IPPROTO_LEAF1 = 25
-IPPROTO_LEAF2 = 26
-IPPROTO_RDP = 27
-IPPROTO_IRTP = 28
-IPPROTO_TP = 29
-IPPROTO_BLT = 30
-IPPROTO_NSP = 31
-IPPROTO_INP = 32
-IPPROTO_SEP = 33
-IPPROTO_3PC = 34
-IPPROTO_IDPR = 35
-IPPROTO_XTP = 36
-IPPROTO_DDP = 37
-IPPROTO_CMTP = 38
-IPPROTO_TPXX = 39
-IPPROTO_IL = 40
-IPPROTO_IPV6 = 41
-IPPROTO_SDRP = 42
-IPPROTO_ROUTING = 43
-IPPROTO_FRAGMENT = 44
-IPPROTO_IDRP = 45
-IPPROTO_RSVP = 46
-IPPROTO_GRE = 47
-IPPROTO_MHRP = 48
-IPPROTO_BHA = 49
-IPPROTO_ESP = 50
-IPPROTO_AH = 51
-IPPROTO_INLSP = 52
-IPPROTO_SWIPE = 53
-IPPROTO_NHRP = 54
-IPPROTO_ICMPV6 = 58
-IPPROTO_NONE = 59
-IPPROTO_DSTOPTS = 60
-IPPROTO_AHIP = 61
-IPPROTO_CFTP = 62
-IPPROTO_HELLO = 63
-IPPROTO_SATEXPAK = 64
-IPPROTO_KRYPTOLAN = 65
-IPPROTO_RVD = 66
-IPPROTO_IPPC = 67
-IPPROTO_ADFS = 68
-IPPROTO_SATMON = 69
-IPPROTO_VISA = 70
-IPPROTO_IPCV = 71
-IPPROTO_CPNX = 72
-IPPROTO_CPHB = 73
-IPPROTO_WSN = 74
-IPPROTO_PVP = 75
-IPPROTO_BRSATMON = 76
-IPPROTO_ND = 77
-IPPROTO_WBMON = 78
-IPPROTO_WBEXPAK = 79
-IPPROTO_EON = 80
-IPPROTO_VMTP = 81
-IPPROTO_SVMTP = 82
-IPPROTO_VINES = 83
-IPPROTO_TTP = 84
-IPPROTO_IGP = 85
-IPPROTO_DGP = 86
-IPPROTO_TCF = 87
-IPPROTO_IGRP = 88
-IPPROTO_OSPFIGP = 89
-IPPROTO_SRPC = 90
-IPPROTO_LARP = 91
-IPPROTO_MTP = 92
-IPPROTO_AX25 = 93
-IPPROTO_IPEIP = 94
-IPPROTO_MICP = 95
-IPPROTO_SCCSP = 96
-IPPROTO_ETHERIP = 97
-IPPROTO_ENCAP = 98
-IPPROTO_APES = 99
-IPPROTO_GMTP = 100
-IPPROTO_IPCOMP = 108
-IPPROTO_PIM = 103
-IPPROTO_PGM = 113
-IPPROTO_DIVERT = 254
-IPPROTO_RAW = 255
-IPPROTO_MAX = 256
-IPPROTO_DONE = 257
-IPPORT_RESERVED = 1024
-IPPORT_USERRESERVED = 5000
-IPPORT_HIFIRSTAUTO = 49152
-IPPORT_HILASTAUTO = 65535
-IPPORT_RESERVEDSTART = 600
-def IN_CLASSA(i): return (((u_int32_t)(i) & 0x80000000) == 0)
-
-IN_CLASSA_NET = 0xff000000
-IN_CLASSA_NSHIFT = 24
-IN_CLASSA_HOST = 0x00ffffff
-IN_CLASSA_MAX = 128
-def IN_CLASSB(i): return (((u_int32_t)(i) & 0xc0000000) == 0x80000000)
-
-IN_CLASSB_NET = 0xffff0000
-IN_CLASSB_NSHIFT = 16
-IN_CLASSB_HOST = 0x0000ffff
-IN_CLASSB_MAX = 65536
-def IN_CLASSC(i): return (((u_int32_t)(i) & 0xe0000000) == 0xc0000000)
-
-IN_CLASSC_NET = 0xffffff00
-IN_CLASSC_NSHIFT = 8
-IN_CLASSC_HOST = 0x000000ff
-def IN_CLASSD(i): return (((u_int32_t)(i) & 0xf0000000) == 0xe0000000)
-
-IN_CLASSD_NET = 0xf0000000
-IN_CLASSD_NSHIFT = 28
-IN_CLASSD_HOST = 0x0fffffff
-def IN_MULTICAST(i): return IN_CLASSD(i)
-
-def IN_EXPERIMENTAL(i): return (((u_int32_t)(i) & 0xf0000000) == 0xf0000000)
-
-def IN_BADCLASS(i): return (((u_int32_t)(i) & 0xf0000000) == 0xf0000000)
-
-INADDR_NONE = 0xffffffff
-IN_LOOPBACKNET = 127
-INET_ADDRSTRLEN = 16
-IP_OPTIONS = 1
-IP_HDRINCL = 2
-IP_TOS = 3
-IP_TTL = 4
-IP_RECVOPTS = 5
-IP_RECVRETOPTS = 6
-IP_RECVDSTADDR = 7
-IP_RETOPTS = 8
-IP_MULTICAST_IF = 9
-IP_MULTICAST_TTL = 10
-IP_MULTICAST_LOOP = 11
-IP_ADD_MEMBERSHIP = 12
-IP_DROP_MEMBERSHIP = 13
-IP_MULTICAST_VIF = 14
-IP_RSVP_ON = 15
-IP_RSVP_OFF = 16
-IP_RSVP_VIF_ON = 17
-IP_RSVP_VIF_OFF = 18
-IP_PORTRANGE = 19
-IP_RECVIF = 20
-IP_IPSEC_POLICY = 21
-IP_FAITH = 22
-IP_FW_ADD = 50
-IP_FW_DEL = 51
-IP_FW_FLUSH = 52
-IP_FW_ZERO = 53
-IP_FW_GET = 54
-IP_FW_RESETLOG = 55
-IP_DUMMYNET_CONFIGURE = 60
-IP_DUMMYNET_DEL = 61
-IP_DUMMYNET_FLUSH = 62
-IP_DUMMYNET_GET = 64
-IP_DEFAULT_MULTICAST_TTL = 1
-IP_DEFAULT_MULTICAST_LOOP = 1
-IP_MAX_MEMBERSHIPS = 20
-IP_PORTRANGE_DEFAULT = 0
-IP_PORTRANGE_HIGH = 1
-IP_PORTRANGE_LOW = 2
-IPPROTO_MAXID = (IPPROTO_AH + 1)
-IPCTL_FORWARDING = 1
-IPCTL_SENDREDIRECTS = 2
-IPCTL_DEFTTL = 3
-IPCTL_DEFMTU = 4
-IPCTL_RTEXPIRE = 5
-IPCTL_RTMINEXPIRE = 6
-IPCTL_RTMAXCACHE = 7
-IPCTL_SOURCEROUTE = 8
-IPCTL_DIRECTEDBROADCAST = 9
-IPCTL_INTRQMAXLEN = 10
-IPCTL_INTRQDROPS = 11
-IPCTL_STATS = 12
-IPCTL_ACCEPTSOURCEROUTE = 13
-IPCTL_FASTFORWARDING = 14
-IPCTL_KEEPFAITH = 15
-IPCTL_GIF_TTL = 16
-IPCTL_MAXID = 17
-
-# Included from netinet6/in6.h
-
-# Included from sys/queue.h
-def SLIST_HEAD_INITIALIZER(head): return \
-
-def SLIST_ENTRY(type): return \
-
-def STAILQ_HEAD_INITIALIZER(head): return \
-
-def STAILQ_ENTRY(type): return \
-
-def LIST_HEAD_INITIALIZER(head): return \
-
-def LIST_ENTRY(type): return \
-
-def TAILQ_HEAD_INITIALIZER(head): return \
-
-def TAILQ_ENTRY(type): return \
-
-def CIRCLEQ_ENTRY(type): return \
-
-__KAME_VERSION = "20000701/FreeBSD-current"
-IPV6PORT_RESERVED = 1024
-IPV6PORT_ANONMIN = 49152
-IPV6PORT_ANONMAX = 65535
-IPV6PORT_RESERVEDMIN = 600
-IPV6PORT_RESERVEDMAX = (IPV6PORT_RESERVED-1)
-INET6_ADDRSTRLEN = 46
-IPV6_ADDR_INT32_ONE = 1
-IPV6_ADDR_INT32_TWO = 2
-IPV6_ADDR_INT32_MNL = 0xff010000
-IPV6_ADDR_INT32_MLL = 0xff020000
-IPV6_ADDR_INT32_SMP = 0x0000ffff
-IPV6_ADDR_INT16_ULL = 0xfe80
-IPV6_ADDR_INT16_USL = 0xfec0
-IPV6_ADDR_INT16_MLL = 0xff02
-IPV6_ADDR_INT32_ONE = 0x01000000
-IPV6_ADDR_INT32_TWO = 0x02000000
-IPV6_ADDR_INT32_MNL = 0x000001ff
-IPV6_ADDR_INT32_MLL = 0x000002ff
-IPV6_ADDR_INT32_SMP = 0xffff0000
-IPV6_ADDR_INT16_ULL = 0x80fe
-IPV6_ADDR_INT16_USL = 0xc0fe
-IPV6_ADDR_INT16_MLL = 0x02ff
-def IN6_IS_ADDR_UNSPECIFIED(a): return \
-
-def IN6_IS_ADDR_LOOPBACK(a): return \
-
-def IN6_IS_ADDR_V4COMPAT(a): return \
-
-def IN6_IS_ADDR_V4MAPPED(a): return \
-
-IPV6_ADDR_SCOPE_NODELOCAL = 0x01
-IPV6_ADDR_SCOPE_LINKLOCAL = 0x02
-IPV6_ADDR_SCOPE_SITELOCAL = 0x05
-IPV6_ADDR_SCOPE_ORGLOCAL = 0x08
-IPV6_ADDR_SCOPE_GLOBAL = 0x0e
-__IPV6_ADDR_SCOPE_NODELOCAL = 0x01
-__IPV6_ADDR_SCOPE_LINKLOCAL = 0x02
-__IPV6_ADDR_SCOPE_SITELOCAL = 0x05
-__IPV6_ADDR_SCOPE_ORGLOCAL = 0x08
-__IPV6_ADDR_SCOPE_GLOBAL = 0x0e
-def IN6_IS_ADDR_LINKLOCAL(a): return \
-
-def IN6_IS_ADDR_SITELOCAL(a): return \
-
-def IN6_IS_ADDR_MC_NODELOCAL(a): return \
-
-def IN6_IS_ADDR_MC_LINKLOCAL(a): return \
-
-def IN6_IS_ADDR_MC_SITELOCAL(a): return \
-
-def IN6_IS_ADDR_MC_ORGLOCAL(a): return \
-
-def IN6_IS_ADDR_MC_GLOBAL(a): return \
-
-def IN6_IS_ADDR_MC_NODELOCAL(a): return \
-
-def IN6_IS_ADDR_MC_LINKLOCAL(a): return \
-
-def IN6_IS_ADDR_MC_SITELOCAL(a): return \
-
-def IN6_IS_ADDR_MC_ORGLOCAL(a): return \
-
-def IN6_IS_ADDR_MC_GLOBAL(a): return \
-
-def IN6_IS_SCOPE_LINKLOCAL(a): return \
-
-IPV6_OPTIONS = 1
-IPV6_RECVOPTS = 5
-IPV6_RECVRETOPTS = 6
-IPV6_RECVDSTADDR = 7
-IPV6_RETOPTS = 8
-IPV6_SOCKOPT_RESERVED1 = 3
-IPV6_UNICAST_HOPS = 4
-IPV6_MULTICAST_IF = 9
-IPV6_MULTICAST_HOPS = 10
-IPV6_MULTICAST_LOOP = 11
-IPV6_JOIN_GROUP = 12
-IPV6_LEAVE_GROUP = 13
-IPV6_PORTRANGE = 14
-ICMP6_FILTER = 18
-IPV6_PKTINFO = 19
-IPV6_HOPLIMIT = 20
-IPV6_NEXTHOP = 21
-IPV6_HOPOPTS = 22
-IPV6_DSTOPTS = 23
-IPV6_RTHDR = 24
-IPV6_PKTOPTIONS = 25
-IPV6_CHECKSUM = 26
-IPV6_BINDV6ONLY = 27
-IPV6_IPSEC_POLICY = 28
-IPV6_FAITH = 29
-IPV6_FW_ADD = 30
-IPV6_FW_DEL = 31
-IPV6_FW_FLUSH = 32
-IPV6_FW_ZERO = 33
-IPV6_FW_GET = 34
-IPV6_RTHDR_LOOSE = 0
-IPV6_RTHDR_STRICT = 1
-IPV6_RTHDR_TYPE_0 = 0
-IPV6_DEFAULT_MULTICAST_HOPS = 1
-IPV6_DEFAULT_MULTICAST_LOOP = 1
-IPV6_PORTRANGE_DEFAULT = 0
-IPV6_PORTRANGE_HIGH = 1
-IPV6_PORTRANGE_LOW = 2
-IPV6PROTO_MAXID = (IPPROTO_PIM + 1)
-IPV6CTL_FORWARDING = 1
-IPV6CTL_SENDREDIRECTS = 2
-IPV6CTL_DEFHLIM = 3
-IPV6CTL_DEFMTU = 4
-IPV6CTL_FORWSRCRT = 5
-IPV6CTL_STATS = 6
-IPV6CTL_MRTSTATS = 7
-IPV6CTL_MRTPROTO = 8
-IPV6CTL_MAXFRAGPACKETS = 9
-IPV6CTL_SOURCECHECK = 10
-IPV6CTL_SOURCECHECK_LOGINT = 11
-IPV6CTL_ACCEPT_RTADV = 12
-IPV6CTL_KEEPFAITH = 13
-IPV6CTL_LOG_INTERVAL = 14
-IPV6CTL_HDRNESTLIMIT = 15
-IPV6CTL_DAD_COUNT = 16
-IPV6CTL_AUTO_FLOWLABEL = 17
-IPV6CTL_DEFMCASTHLIM = 18
-IPV6CTL_GIF_HLIM = 19
-IPV6CTL_KAME_VERSION = 20
-IPV6CTL_USE_DEPRECATED = 21
-IPV6CTL_RR_PRUNE = 22
-IPV6CTL_MAPPED_ADDR = 23
-IPV6CTL_BINDV6ONLY = 24
-IPV6CTL_RTEXPIRE = 25
-IPV6CTL_RTMINEXPIRE = 26
-IPV6CTL_RTMAXCACHE = 27
-IPV6CTL_MAXID = 28
diff --git a/sys/lib/python/plat-freebsd5/regen b/sys/lib/python/plat-freebsd5/regen
deleted file mode 100644
index 8aa6898c6..000000000
--- a/sys/lib/python/plat-freebsd5/regen
+++ /dev/null
@@ -1,3 +0,0 @@
-#! /bin/sh
-set -v
-python ../../Tools/scripts/h2py.py -i '(u_long)' /usr/include/netinet/in.h
diff --git a/sys/lib/python/plat-freebsd6/IN.py b/sys/lib/python/plat-freebsd6/IN.py
deleted file mode 100644
index 31e9e130a..000000000
--- a/sys/lib/python/plat-freebsd6/IN.py
+++ /dev/null
@@ -1,515 +0,0 @@
-# Generated by h2py from /usr/include/netinet/in.h
-
-# Included from sys/cdefs.h
-def __P(protos): return protos
-
-def __STRING(x): return #x
-
-def __XSTRING(x): return __STRING(x)
-
-def __P(protos): return ()
-
-def __STRING(x): return "x"
-
-def __aligned(x): return __attribute__((__aligned__(x)))
-
-def __section(x): return __attribute__((__section__(x)))
-
-def __aligned(x): return __attribute__((__aligned__(x)))
-
-def __section(x): return __attribute__((__section__(x)))
-
-def __nonnull(x): return __attribute__((__nonnull__(x)))
-
-def __predict_true(exp): return __builtin_expect((exp), 1)
-
-def __predict_false(exp): return __builtin_expect((exp), 0)
-
-def __predict_true(exp): return (exp)
-
-def __predict_false(exp): return (exp)
-
-def __FBSDID(s): return __IDSTRING(__CONCAT(__rcsid_,__LINE__),s)
-
-def __RCSID(s): return __IDSTRING(__CONCAT(__rcsid_,__LINE__),s)
-
-def __RCSID_SOURCE(s): return __IDSTRING(__CONCAT(__rcsid_source_,__LINE__),s)
-
-def __SCCSID(s): return __IDSTRING(__CONCAT(__sccsid_,__LINE__),s)
-
-def __COPYRIGHT(s): return __IDSTRING(__CONCAT(__copyright_,__LINE__),s)
-
-_POSIX_C_SOURCE = 199009
-_POSIX_C_SOURCE = 199209
-__XSI_VISIBLE = 600
-_POSIX_C_SOURCE = 200112
-__XSI_VISIBLE = 500
-_POSIX_C_SOURCE = 199506
-_POSIX_C_SOURCE = 198808
-__POSIX_VISIBLE = 200112
-__ISO_C_VISIBLE = 1999
-__POSIX_VISIBLE = 199506
-__ISO_C_VISIBLE = 1990
-__POSIX_VISIBLE = 199309
-__ISO_C_VISIBLE = 1990
-__POSIX_VISIBLE = 199209
-__ISO_C_VISIBLE = 1990
-__POSIX_VISIBLE = 199009
-__ISO_C_VISIBLE = 1990
-__POSIX_VISIBLE = 198808
-__ISO_C_VISIBLE = 0
-__POSIX_VISIBLE = 0
-__XSI_VISIBLE = 0
-__BSD_VISIBLE = 0
-__ISO_C_VISIBLE = 1990
-__POSIX_VISIBLE = 0
-__XSI_VISIBLE = 0
-__BSD_VISIBLE = 0
-__ISO_C_VISIBLE = 1999
-__POSIX_VISIBLE = 200112
-__XSI_VISIBLE = 600
-__BSD_VISIBLE = 1
-__ISO_C_VISIBLE = 1999
-
-# Included from sys/_types.h
-
-# Included from machine/_types.h
-
-# Included from machine/endian.h
-_QUAD_HIGHWORD = 1
-_QUAD_LOWWORD = 0
-_LITTLE_ENDIAN = 1234
-_BIG_ENDIAN = 4321
-_PDP_ENDIAN = 3412
-_BYTE_ORDER = _LITTLE_ENDIAN
-LITTLE_ENDIAN = _LITTLE_ENDIAN
-BIG_ENDIAN = _BIG_ENDIAN
-PDP_ENDIAN = _PDP_ENDIAN
-BYTE_ORDER = _BYTE_ORDER
-__INTEL_COMPILER_with_FreeBSD_endian = 1
-__INTEL_COMPILER_with_FreeBSD_endian = 1
-def __word_swap_int_var(x): return \
-
-def __word_swap_int_const(x): return \
-
-def __word_swap_int(x): return __word_swap_int_var(x)
-
-def __byte_swap_int_var(x): return \
-
-def __byte_swap_int_var(x): return \
-
-def __byte_swap_int_const(x): return \
-
-def __byte_swap_int(x): return __byte_swap_int_var(x)
-
-def __byte_swap_word_var(x): return \
-
-def __byte_swap_word_const(x): return \
-
-def __byte_swap_word(x): return __byte_swap_word_var(x)
-
-def __htonl(x): return __bswap32(x)
-
-def __htons(x): return __bswap16(x)
-
-def __ntohl(x): return __bswap32(x)
-
-def __ntohs(x): return __bswap16(x)
-
-IPPROTO_IP = 0
-IPPROTO_ICMP = 1
-IPPROTO_TCP = 6
-IPPROTO_UDP = 17
-def htonl(x): return __htonl(x)
-
-def htons(x): return __htons(x)
-
-def ntohl(x): return __ntohl(x)
-
-def ntohs(x): return __ntohs(x)
-
-IPPROTO_RAW = 255
-INET_ADDRSTRLEN = 16
-IPPROTO_HOPOPTS = 0
-IPPROTO_IGMP = 2
-IPPROTO_GGP = 3
-IPPROTO_IPV4 = 4
-IPPROTO_IPIP = IPPROTO_IPV4
-IPPROTO_ST = 7
-IPPROTO_EGP = 8
-IPPROTO_PIGP = 9
-IPPROTO_RCCMON = 10
-IPPROTO_NVPII = 11
-IPPROTO_PUP = 12
-IPPROTO_ARGUS = 13
-IPPROTO_EMCON = 14
-IPPROTO_XNET = 15
-IPPROTO_CHAOS = 16
-IPPROTO_MUX = 18
-IPPROTO_MEAS = 19
-IPPROTO_HMP = 20
-IPPROTO_PRM = 21
-IPPROTO_IDP = 22
-IPPROTO_TRUNK1 = 23
-IPPROTO_TRUNK2 = 24
-IPPROTO_LEAF1 = 25
-IPPROTO_LEAF2 = 26
-IPPROTO_RDP = 27
-IPPROTO_IRTP = 28
-IPPROTO_TP = 29
-IPPROTO_BLT = 30
-IPPROTO_NSP = 31
-IPPROTO_INP = 32
-IPPROTO_SEP = 33
-IPPROTO_3PC = 34
-IPPROTO_IDPR = 35
-IPPROTO_XTP = 36
-IPPROTO_DDP = 37
-IPPROTO_CMTP = 38
-IPPROTO_TPXX = 39
-IPPROTO_IL = 40
-IPPROTO_IPV6 = 41
-IPPROTO_SDRP = 42
-IPPROTO_ROUTING = 43
-IPPROTO_FRAGMENT = 44
-IPPROTO_IDRP = 45
-IPPROTO_RSVP = 46
-IPPROTO_GRE = 47
-IPPROTO_MHRP = 48
-IPPROTO_BHA = 49
-IPPROTO_ESP = 50
-IPPROTO_AH = 51
-IPPROTO_INLSP = 52
-IPPROTO_SWIPE = 53
-IPPROTO_NHRP = 54
-IPPROTO_MOBILE = 55
-IPPROTO_TLSP = 56
-IPPROTO_SKIP = 57
-IPPROTO_ICMPV6 = 58
-IPPROTO_NONE = 59
-IPPROTO_DSTOPTS = 60
-IPPROTO_AHIP = 61
-IPPROTO_CFTP = 62
-IPPROTO_HELLO = 63
-IPPROTO_SATEXPAK = 64
-IPPROTO_KRYPTOLAN = 65
-IPPROTO_RVD = 66
-IPPROTO_IPPC = 67
-IPPROTO_ADFS = 68
-IPPROTO_SATMON = 69
-IPPROTO_VISA = 70
-IPPROTO_IPCV = 71
-IPPROTO_CPNX = 72
-IPPROTO_CPHB = 73
-IPPROTO_WSN = 74
-IPPROTO_PVP = 75
-IPPROTO_BRSATMON = 76
-IPPROTO_ND = 77
-IPPROTO_WBMON = 78
-IPPROTO_WBEXPAK = 79
-IPPROTO_EON = 80
-IPPROTO_VMTP = 81
-IPPROTO_SVMTP = 82
-IPPROTO_VINES = 83
-IPPROTO_TTP = 84
-IPPROTO_IGP = 85
-IPPROTO_DGP = 86
-IPPROTO_TCF = 87
-IPPROTO_IGRP = 88
-IPPROTO_OSPFIGP = 89
-IPPROTO_SRPC = 90
-IPPROTO_LARP = 91
-IPPROTO_MTP = 92
-IPPROTO_AX25 = 93
-IPPROTO_IPEIP = 94
-IPPROTO_MICP = 95
-IPPROTO_SCCSP = 96
-IPPROTO_ETHERIP = 97
-IPPROTO_ENCAP = 98
-IPPROTO_APES = 99
-IPPROTO_GMTP = 100
-IPPROTO_IPCOMP = 108
-IPPROTO_PIM = 103
-IPPROTO_PGM = 113
-IPPROTO_PFSYNC = 240
-IPPROTO_OLD_DIVERT = 254
-IPPROTO_MAX = 256
-IPPROTO_DONE = 257
-IPPROTO_DIVERT = 258
-IPPORT_RESERVED = 1024
-IPPORT_HIFIRSTAUTO = 49152
-IPPORT_HILASTAUTO = 65535
-IPPORT_RESERVEDSTART = 600
-IPPORT_MAX = 65535
-def IN_CLASSA(i): return (((u_int32_t)(i) & (-2147483648)) == 0)
-
-IN_CLASSA_NET = (-16777216)
-IN_CLASSA_NSHIFT = 24
-IN_CLASSA_HOST = 0x00ffffff
-IN_CLASSA_MAX = 128
-def IN_CLASSB(i): return (((u_int32_t)(i) & (-1073741824)) == (-2147483648))
-
-IN_CLASSB_NET = (-65536)
-IN_CLASSB_NSHIFT = 16
-IN_CLASSB_HOST = 0x0000ffff
-IN_CLASSB_MAX = 65536
-def IN_CLASSC(i): return (((u_int32_t)(i) & (-536870912)) == (-1073741824))
-
-IN_CLASSC_NET = (-256)
-IN_CLASSC_NSHIFT = 8
-IN_CLASSC_HOST = 0x000000ff
-def IN_CLASSD(i): return (((u_int32_t)(i) & (-268435456)) == (-536870912))
-
-IN_CLASSD_NET = (-268435456)
-IN_CLASSD_NSHIFT = 28
-IN_CLASSD_HOST = 0x0fffffff
-def IN_MULTICAST(i): return IN_CLASSD(i)
-
-def IN_EXPERIMENTAL(i): return (((u_int32_t)(i) & (-268435456)) == (-268435456))
-
-def IN_BADCLASS(i): return (((u_int32_t)(i) & (-268435456)) == (-268435456))
-
-INADDR_NONE = (-1)
-IN_LOOPBACKNET = 127
-IP_OPTIONS = 1
-IP_HDRINCL = 2
-IP_TOS = 3
-IP_TTL = 4
-IP_RECVOPTS = 5
-IP_RECVRETOPTS = 6
-IP_RECVDSTADDR = 7
-IP_SENDSRCADDR = IP_RECVDSTADDR
-IP_RETOPTS = 8
-IP_MULTICAST_IF = 9
-IP_MULTICAST_TTL = 10
-IP_MULTICAST_LOOP = 11
-IP_ADD_MEMBERSHIP = 12
-IP_DROP_MEMBERSHIP = 13
-IP_MULTICAST_VIF = 14
-IP_RSVP_ON = 15
-IP_RSVP_OFF = 16
-IP_RSVP_VIF_ON = 17
-IP_RSVP_VIF_OFF = 18
-IP_PORTRANGE = 19
-IP_RECVIF = 20
-IP_IPSEC_POLICY = 21
-IP_FAITH = 22
-IP_ONESBCAST = 23
-IP_FW_TABLE_ADD = 40
-IP_FW_TABLE_DEL = 41
-IP_FW_TABLE_FLUSH = 42
-IP_FW_TABLE_GETSIZE = 43
-IP_FW_TABLE_LIST = 44
-IP_FW_ADD = 50
-IP_FW_DEL = 51
-IP_FW_FLUSH = 52
-IP_FW_ZERO = 53
-IP_FW_GET = 54
-IP_FW_RESETLOG = 55
-IP_DUMMYNET_CONFIGURE = 60
-IP_DUMMYNET_DEL = 61
-IP_DUMMYNET_FLUSH = 62
-IP_DUMMYNET_GET = 64
-IP_RECVTTL = 65
-IP_DEFAULT_MULTICAST_TTL = 1
-IP_DEFAULT_MULTICAST_LOOP = 1
-IP_MAX_MEMBERSHIPS = 20
-IP_PORTRANGE_DEFAULT = 0
-IP_PORTRANGE_HIGH = 1
-IP_PORTRANGE_LOW = 2
-IPPROTO_MAXID = (IPPROTO_AH + 1)
-IPCTL_FORWARDING = 1
-IPCTL_SENDREDIRECTS = 2
-IPCTL_DEFTTL = 3
-IPCTL_DEFMTU = 4
-IPCTL_RTEXPIRE = 5
-IPCTL_RTMINEXPIRE = 6
-IPCTL_RTMAXCACHE = 7
-IPCTL_SOURCEROUTE = 8
-IPCTL_DIRECTEDBROADCAST = 9
-IPCTL_INTRQMAXLEN = 10
-IPCTL_INTRQDROPS = 11
-IPCTL_STATS = 12
-IPCTL_ACCEPTSOURCEROUTE = 13
-IPCTL_FASTFORWARDING = 14
-IPCTL_KEEPFAITH = 15
-IPCTL_GIF_TTL = 16
-IPCTL_MAXID = 17
-def in_nullhost(x): return ((x).s_addr == INADDR_ANY)
-
-
-# Included from netinet6/in6.h
-__KAME_VERSION = "20010528/FreeBSD"
-IPV6PORT_RESERVED = 1024
-IPV6PORT_ANONMIN = 49152
-IPV6PORT_ANONMAX = 65535
-IPV6PORT_RESERVEDMIN = 600
-IPV6PORT_RESERVEDMAX = (IPV6PORT_RESERVED-1)
-INET6_ADDRSTRLEN = 46
-IPV6_ADDR_INT32_ONE = 1
-IPV6_ADDR_INT32_TWO = 2
-IPV6_ADDR_INT32_MNL = (-16711680)
-IPV6_ADDR_INT32_MLL = (-16646144)
-IPV6_ADDR_INT32_SMP = 0x0000ffff
-IPV6_ADDR_INT16_ULL = 0xfe80
-IPV6_ADDR_INT16_USL = 0xfec0
-IPV6_ADDR_INT16_MLL = 0xff02
-IPV6_ADDR_INT32_ONE = 0x01000000
-IPV6_ADDR_INT32_TWO = 0x02000000
-IPV6_ADDR_INT32_MNL = 0x000001ff
-IPV6_ADDR_INT32_MLL = 0x000002ff
-IPV6_ADDR_INT32_SMP = (-65536)
-IPV6_ADDR_INT16_ULL = 0x80fe
-IPV6_ADDR_INT16_USL = 0xc0fe
-IPV6_ADDR_INT16_MLL = 0x02ff
-def IN6_IS_ADDR_UNSPECIFIED(a): return \
-
-def IN6_IS_ADDR_LOOPBACK(a): return \
-
-def IN6_IS_ADDR_V4COMPAT(a): return \
-
-def IN6_IS_ADDR_V4MAPPED(a): return \
-
-IPV6_ADDR_SCOPE_NODELOCAL = 0x01
-IPV6_ADDR_SCOPE_INTFACELOCAL = 0x01
-IPV6_ADDR_SCOPE_LINKLOCAL = 0x02
-IPV6_ADDR_SCOPE_SITELOCAL = 0x05
-IPV6_ADDR_SCOPE_ORGLOCAL = 0x08
-IPV6_ADDR_SCOPE_GLOBAL = 0x0e
-__IPV6_ADDR_SCOPE_NODELOCAL = 0x01
-__IPV6_ADDR_SCOPE_INTFACELOCAL = 0x01
-__IPV6_ADDR_SCOPE_LINKLOCAL = 0x02
-__IPV6_ADDR_SCOPE_SITELOCAL = 0x05
-__IPV6_ADDR_SCOPE_ORGLOCAL = 0x08
-__IPV6_ADDR_SCOPE_GLOBAL = 0x0e
-def IN6_IS_ADDR_LINKLOCAL(a): return \
-
-def IN6_IS_ADDR_SITELOCAL(a): return \
-
-def IN6_IS_ADDR_MC_NODELOCAL(a): return \
-
-def IN6_IS_ADDR_MC_INTFACELOCAL(a): return \
-
-def IN6_IS_ADDR_MC_LINKLOCAL(a): return \
-
-def IN6_IS_ADDR_MC_SITELOCAL(a): return \
-
-def IN6_IS_ADDR_MC_ORGLOCAL(a): return \
-
-def IN6_IS_ADDR_MC_GLOBAL(a): return \
-
-def IN6_IS_ADDR_MC_NODELOCAL(a): return \
-
-def IN6_IS_ADDR_MC_LINKLOCAL(a): return \
-
-def IN6_IS_ADDR_MC_SITELOCAL(a): return \
-
-def IN6_IS_ADDR_MC_ORGLOCAL(a): return \
-
-def IN6_IS_ADDR_MC_GLOBAL(a): return \
-
-def IN6_IS_SCOPE_LINKLOCAL(a): return \
-
-def IFA6_IS_DEPRECATED(a): return \
-
-def IFA6_IS_INVALID(a): return \
-
-IPV6_OPTIONS = 1
-IPV6_RECVOPTS = 5
-IPV6_RECVRETOPTS = 6
-IPV6_RECVDSTADDR = 7
-IPV6_RETOPTS = 8
-IPV6_SOCKOPT_RESERVED1 = 3
-IPV6_UNICAST_HOPS = 4
-IPV6_MULTICAST_IF = 9
-IPV6_MULTICAST_HOPS = 10
-IPV6_MULTICAST_LOOP = 11
-IPV6_JOIN_GROUP = 12
-IPV6_LEAVE_GROUP = 13
-IPV6_PORTRANGE = 14
-ICMP6_FILTER = 18
-IPV6_2292PKTINFO = 19
-IPV6_2292HOPLIMIT = 20
-IPV6_2292NEXTHOP = 21
-IPV6_2292HOPOPTS = 22
-IPV6_2292DSTOPTS = 23
-IPV6_2292RTHDR = 24
-IPV6_2292PKTOPTIONS = 25
-IPV6_CHECKSUM = 26
-IPV6_V6ONLY = 27
-IPV6_BINDV6ONLY = IPV6_V6ONLY
-IPV6_IPSEC_POLICY = 28
-IPV6_FAITH = 29
-IPV6_FW_ADD = 30
-IPV6_FW_DEL = 31
-IPV6_FW_FLUSH = 32
-IPV6_FW_ZERO = 33
-IPV6_FW_GET = 34
-IPV6_RTHDRDSTOPTS = 35
-IPV6_RECVPKTINFO = 36
-IPV6_RECVHOPLIMIT = 37
-IPV6_RECVRTHDR = 38
-IPV6_RECVHOPOPTS = 39
-IPV6_RECVDSTOPTS = 40
-IPV6_RECVRTHDRDSTOPTS = 41
-IPV6_USE_MIN_MTU = 42
-IPV6_RECVPATHMTU = 43
-IPV6_PATHMTU = 44
-IPV6_REACHCONF = 45
-IPV6_PKTINFO = 46
-IPV6_HOPLIMIT = 47
-IPV6_NEXTHOP = 48
-IPV6_HOPOPTS = 49
-IPV6_DSTOPTS = 50
-IPV6_RTHDR = 51
-IPV6_PKTOPTIONS = 52
-IPV6_RECVTCLASS = 57
-IPV6_AUTOFLOWLABEL = 59
-IPV6_TCLASS = 61
-IPV6_DONTFRAG = 62
-IPV6_PREFER_TEMPADDR = 63
-IPV6_RTHDR_LOOSE = 0
-IPV6_RTHDR_STRICT = 1
-IPV6_RTHDR_TYPE_0 = 0
-IPV6_DEFAULT_MULTICAST_HOPS = 1
-IPV6_DEFAULT_MULTICAST_LOOP = 1
-IPV6_PORTRANGE_DEFAULT = 0
-IPV6_PORTRANGE_HIGH = 1
-IPV6_PORTRANGE_LOW = 2
-IPV6PROTO_MAXID = (IPPROTO_PIM + 1)
-IPV6CTL_FORWARDING = 1
-IPV6CTL_SENDREDIRECTS = 2
-IPV6CTL_DEFHLIM = 3
-IPV6CTL_DEFMTU = 4
-IPV6CTL_FORWSRCRT = 5
-IPV6CTL_STATS = 6
-IPV6CTL_MRTSTATS = 7
-IPV6CTL_MRTPROTO = 8
-IPV6CTL_MAXFRAGPACKETS = 9
-IPV6CTL_SOURCECHECK = 10
-IPV6CTL_SOURCECHECK_LOGINT = 11
-IPV6CTL_ACCEPT_RTADV = 12
-IPV6CTL_KEEPFAITH = 13
-IPV6CTL_LOG_INTERVAL = 14
-IPV6CTL_HDRNESTLIMIT = 15
-IPV6CTL_DAD_COUNT = 16
-IPV6CTL_AUTO_FLOWLABEL = 17
-IPV6CTL_DEFMCASTHLIM = 18
-IPV6CTL_GIF_HLIM = 19
-IPV6CTL_KAME_VERSION = 20
-IPV6CTL_USE_DEPRECATED = 21
-IPV6CTL_RR_PRUNE = 22
-IPV6CTL_MAPPED_ADDR = 23
-IPV6CTL_V6ONLY = 24
-IPV6CTL_RTEXPIRE = 25
-IPV6CTL_RTMINEXPIRE = 26
-IPV6CTL_RTMAXCACHE = 27
-IPV6CTL_USETEMPADDR = 32
-IPV6CTL_TEMPPLTIME = 33
-IPV6CTL_TEMPVLTIME = 34
-IPV6CTL_AUTO_LINKLOCAL = 35
-IPV6CTL_RIP6STATS = 36
-IPV6CTL_PREFER_TEMPADDR = 37
-IPV6CTL_ADDRCTLPOLICY = 38
-IPV6CTL_MAXFRAGS = 41
-IPV6CTL_MAXID = 42
diff --git a/sys/lib/python/plat-freebsd6/regen b/sys/lib/python/plat-freebsd6/regen
deleted file mode 100644
index 8aa6898c6..000000000
--- a/sys/lib/python/plat-freebsd6/regen
+++ /dev/null
@@ -1,3 +0,0 @@
-#! /bin/sh
-set -v
-python ../../Tools/scripts/h2py.py -i '(u_long)' /usr/include/netinet/in.h
diff --git a/sys/lib/python/plat-freebsd7/IN.py b/sys/lib/python/plat-freebsd7/IN.py
deleted file mode 100644
index 77314ac1a..000000000
--- a/sys/lib/python/plat-freebsd7/IN.py
+++ /dev/null
@@ -1,535 +0,0 @@
-# Generated by h2py from /usr/include/netinet/in.h
-
-# Included from sys/cdefs.h
-__GNUCLIKE_ASM = 3
-__GNUCLIKE_ASM = 2
-__GNUCLIKE___TYPEOF = 1
-__GNUCLIKE___OFFSETOF = 1
-__GNUCLIKE___SECTION = 1
-__GNUCLIKE_ATTRIBUTE_MODE_DI = 1
-__GNUCLIKE_CTOR_SECTION_HANDLING = 1
-__GNUCLIKE_BUILTIN_CONSTANT_P = 1
-__GNUCLIKE_BUILTIN_VARARGS = 1
-__GNUCLIKE_BUILTIN_VAALIST = 1
-__GNUC_VA_LIST_COMPATIBILITY = 1
-__GNUCLIKE_BUILTIN_STDARG = 1
-__GNUCLIKE_BUILTIN_NEXT_ARG = 1
-__GNUCLIKE_BUILTIN_MEMCPY = 1
-__CC_SUPPORTS_INLINE = 1
-__CC_SUPPORTS___INLINE = 1
-__CC_SUPPORTS___INLINE__ = 1
-__CC_SUPPORTS___FUNC__ = 1
-__CC_SUPPORTS_WARNING = 1
-__CC_SUPPORTS_VARADIC_XXX = 1
-__CC_SUPPORTS_DYNAMIC_ARRAY_INIT = 1
-__CC_INT_IS_32BIT = 1
-def __P(protos): return protos
-
-def __STRING(x): return #x
-
-def __XSTRING(x): return __STRING(x)
-
-def __P(protos): return ()
-
-def __STRING(x): return "x"
-
-def __aligned(x): return __attribute__((__aligned__(x)))
-
-def __section(x): return __attribute__((__section__(x)))
-
-def __aligned(x): return __attribute__((__aligned__(x)))
-
-def __section(x): return __attribute__((__section__(x)))
-
-def __nonnull(x): return __attribute__((__nonnull__(x)))
-
-def __predict_true(exp): return __builtin_expect((exp), 1)
-
-def __predict_false(exp): return __builtin_expect((exp), 0)
-
-def __predict_true(exp): return (exp)
-
-def __predict_false(exp): return (exp)
-
-def __FBSDID(s): return __IDSTRING(__CONCAT(__rcsid_,__LINE__),s)
-
-def __RCSID(s): return __IDSTRING(__CONCAT(__rcsid_,__LINE__),s)
-
-def __RCSID_SOURCE(s): return __IDSTRING(__CONCAT(__rcsid_source_,__LINE__),s)
-
-def __SCCSID(s): return __IDSTRING(__CONCAT(__sccsid_,__LINE__),s)
-
-def __COPYRIGHT(s): return __IDSTRING(__CONCAT(__copyright_,__LINE__),s)
-
-_POSIX_C_SOURCE = 199009
-_POSIX_C_SOURCE = 199209
-__XSI_VISIBLE = 600
-_POSIX_C_SOURCE = 200112
-__XSI_VISIBLE = 500
-_POSIX_C_SOURCE = 199506
-_POSIX_C_SOURCE = 198808
-__POSIX_VISIBLE = 200112
-__ISO_C_VISIBLE = 1999
-__POSIX_VISIBLE = 199506
-__ISO_C_VISIBLE = 1990
-__POSIX_VISIBLE = 199309
-__ISO_C_VISIBLE = 1990
-__POSIX_VISIBLE = 199209
-__ISO_C_VISIBLE = 1990
-__POSIX_VISIBLE = 199009
-__ISO_C_VISIBLE = 1990
-__POSIX_VISIBLE = 198808
-__ISO_C_VISIBLE = 0
-__POSIX_VISIBLE = 0
-__XSI_VISIBLE = 0
-__BSD_VISIBLE = 0
-__ISO_C_VISIBLE = 1990
-__POSIX_VISIBLE = 0
-__XSI_VISIBLE = 0
-__BSD_VISIBLE = 0
-__ISO_C_VISIBLE = 1999
-__POSIX_VISIBLE = 200112
-__XSI_VISIBLE = 600
-__BSD_VISIBLE = 1
-__ISO_C_VISIBLE = 1999
-
-# Included from sys/_types.h
-
-# Included from machine/_types.h
-
-# Included from machine/endian.h
-_QUAD_HIGHWORD = 1
-_QUAD_LOWWORD = 0
-_LITTLE_ENDIAN = 1234
-_BIG_ENDIAN = 4321
-_PDP_ENDIAN = 3412
-_BYTE_ORDER = _LITTLE_ENDIAN
-LITTLE_ENDIAN = _LITTLE_ENDIAN
-BIG_ENDIAN = _BIG_ENDIAN
-PDP_ENDIAN = _PDP_ENDIAN
-BYTE_ORDER = _BYTE_ORDER
-def __word_swap_int_var(x): return \
-
-def __word_swap_int_const(x): return \
-
-def __word_swap_int(x): return __word_swap_int_var(x)
-
-def __byte_swap_int_var(x): return \
-
-def __byte_swap_int_const(x): return \
-
-def __byte_swap_int(x): return __byte_swap_int_var(x)
-
-def __byte_swap_word_var(x): return \
-
-def __byte_swap_word_const(x): return \
-
-def __byte_swap_word(x): return __byte_swap_word_var(x)
-
-def __htonl(x): return __bswap32(x)
-
-def __htons(x): return __bswap16(x)
-
-def __ntohl(x): return __bswap32(x)
-
-def __ntohs(x): return __bswap16(x)
-
-IPPROTO_IP = 0
-IPPROTO_ICMP = 1
-IPPROTO_TCP = 6
-IPPROTO_UDP = 17
-def htonl(x): return __htonl(x)
-
-def htons(x): return __htons(x)
-
-def ntohl(x): return __ntohl(x)
-
-def ntohs(x): return __ntohs(x)
-
-IPPROTO_RAW = 255
-INET_ADDRSTRLEN = 16
-IPPROTO_HOPOPTS = 0
-IPPROTO_IGMP = 2
-IPPROTO_GGP = 3
-IPPROTO_IPV4 = 4
-IPPROTO_IPIP = IPPROTO_IPV4
-IPPROTO_ST = 7
-IPPROTO_EGP = 8
-IPPROTO_PIGP = 9
-IPPROTO_RCCMON = 10
-IPPROTO_NVPII = 11
-IPPROTO_PUP = 12
-IPPROTO_ARGUS = 13
-IPPROTO_EMCON = 14
-IPPROTO_XNET = 15
-IPPROTO_CHAOS = 16
-IPPROTO_MUX = 18
-IPPROTO_MEAS = 19
-IPPROTO_HMP = 20
-IPPROTO_PRM = 21
-IPPROTO_IDP = 22
-IPPROTO_TRUNK1 = 23
-IPPROTO_TRUNK2 = 24
-IPPROTO_LEAF1 = 25
-IPPROTO_LEAF2 = 26
-IPPROTO_RDP = 27
-IPPROTO_IRTP = 28
-IPPROTO_TP = 29
-IPPROTO_BLT = 30
-IPPROTO_NSP = 31
-IPPROTO_INP = 32
-IPPROTO_SEP = 33
-IPPROTO_3PC = 34
-IPPROTO_IDPR = 35
-IPPROTO_XTP = 36
-IPPROTO_DDP = 37
-IPPROTO_CMTP = 38
-IPPROTO_TPXX = 39
-IPPROTO_IL = 40
-IPPROTO_IPV6 = 41
-IPPROTO_SDRP = 42
-IPPROTO_ROUTING = 43
-IPPROTO_FRAGMENT = 44
-IPPROTO_IDRP = 45
-IPPROTO_RSVP = 46
-IPPROTO_GRE = 47
-IPPROTO_MHRP = 48
-IPPROTO_BHA = 49
-IPPROTO_ESP = 50
-IPPROTO_AH = 51
-IPPROTO_INLSP = 52
-IPPROTO_SWIPE = 53
-IPPROTO_NHRP = 54
-IPPROTO_MOBILE = 55
-IPPROTO_TLSP = 56
-IPPROTO_SKIP = 57
-IPPROTO_ICMPV6 = 58
-IPPROTO_NONE = 59
-IPPROTO_DSTOPTS = 60
-IPPROTO_AHIP = 61
-IPPROTO_CFTP = 62
-IPPROTO_HELLO = 63
-IPPROTO_SATEXPAK = 64
-IPPROTO_KRYPTOLAN = 65
-IPPROTO_RVD = 66
-IPPROTO_IPPC = 67
-IPPROTO_ADFS = 68
-IPPROTO_SATMON = 69
-IPPROTO_VISA = 70
-IPPROTO_IPCV = 71
-IPPROTO_CPNX = 72
-IPPROTO_CPHB = 73
-IPPROTO_WSN = 74
-IPPROTO_PVP = 75
-IPPROTO_BRSATMON = 76
-IPPROTO_ND = 77
-IPPROTO_WBMON = 78
-IPPROTO_WBEXPAK = 79
-IPPROTO_EON = 80
-IPPROTO_VMTP = 81
-IPPROTO_SVMTP = 82
-IPPROTO_VINES = 83
-IPPROTO_TTP = 84
-IPPROTO_IGP = 85
-IPPROTO_DGP = 86
-IPPROTO_TCF = 87
-IPPROTO_IGRP = 88
-IPPROTO_OSPFIGP = 89
-IPPROTO_SRPC = 90
-IPPROTO_LARP = 91
-IPPROTO_MTP = 92
-IPPROTO_AX25 = 93
-IPPROTO_IPEIP = 94
-IPPROTO_MICP = 95
-IPPROTO_SCCSP = 96
-IPPROTO_ETHERIP = 97
-IPPROTO_ENCAP = 98
-IPPROTO_APES = 99
-IPPROTO_GMTP = 100
-IPPROTO_IPCOMP = 108
-IPPROTO_PIM = 103
-IPPROTO_CARP = 112
-IPPROTO_PGM = 113
-IPPROTO_PFSYNC = 240
-IPPROTO_OLD_DIVERT = 254
-IPPROTO_MAX = 256
-IPPROTO_DONE = 257
-IPPROTO_DIVERT = 258
-IPPROTO_SPACER = 32767
-IPPORT_RESERVED = 1024
-IPPORT_HIFIRSTAUTO = 49152
-IPPORT_HILASTAUTO = 65535
-IPPORT_RESERVEDSTART = 600
-IPPORT_MAX = 65535
-def IN_CLASSA(i): return (((u_int32_t)(i) & (-2147483648)) == 0)
-
-IN_CLASSA_NET = (-16777216)
-IN_CLASSA_NSHIFT = 24
-IN_CLASSA_HOST = 0x00ffffff
-IN_CLASSA_MAX = 128
-def IN_CLASSB(i): return (((u_int32_t)(i) & (-1073741824)) == (-2147483648))
-
-IN_CLASSB_NET = (-65536)
-IN_CLASSB_NSHIFT = 16
-IN_CLASSB_HOST = 0x0000ffff
-IN_CLASSB_MAX = 65536
-def IN_CLASSC(i): return (((u_int32_t)(i) & (-536870912)) == (-1073741824))
-
-IN_CLASSC_NET = (-256)
-IN_CLASSC_NSHIFT = 8
-IN_CLASSC_HOST = 0x000000ff
-def IN_CLASSD(i): return (((u_int32_t)(i) & (-268435456)) == (-536870912))
-
-IN_CLASSD_NET = (-268435456)
-IN_CLASSD_NSHIFT = 28
-IN_CLASSD_HOST = 0x0fffffff
-def IN_MULTICAST(i): return IN_CLASSD(i)
-
-def IN_EXPERIMENTAL(i): return (((u_int32_t)(i) & (-268435456)) == (-268435456))
-
-def IN_BADCLASS(i): return (((u_int32_t)(i) & (-268435456)) == (-268435456))
-
-INADDR_NONE = (-1)
-IN_LOOPBACKNET = 127
-IP_OPTIONS = 1
-IP_HDRINCL = 2
-IP_TOS = 3
-IP_TTL = 4
-IP_RECVOPTS = 5
-IP_RECVRETOPTS = 6
-IP_RECVDSTADDR = 7
-IP_SENDSRCADDR = IP_RECVDSTADDR
-IP_RETOPTS = 8
-IP_MULTICAST_IF = 9
-IP_MULTICAST_TTL = 10
-IP_MULTICAST_LOOP = 11
-IP_ADD_MEMBERSHIP = 12
-IP_DROP_MEMBERSHIP = 13
-IP_MULTICAST_VIF = 14
-IP_RSVP_ON = 15
-IP_RSVP_OFF = 16
-IP_RSVP_VIF_ON = 17
-IP_RSVP_VIF_OFF = 18
-IP_PORTRANGE = 19
-IP_RECVIF = 20
-IP_IPSEC_POLICY = 21
-IP_FAITH = 22
-IP_ONESBCAST = 23
-IP_FW_TABLE_ADD = 40
-IP_FW_TABLE_DEL = 41
-IP_FW_TABLE_FLUSH = 42
-IP_FW_TABLE_GETSIZE = 43
-IP_FW_TABLE_LIST = 44
-IP_FW_ADD = 50
-IP_FW_DEL = 51
-IP_FW_FLUSH = 52
-IP_FW_ZERO = 53
-IP_FW_GET = 54
-IP_FW_RESETLOG = 55
-IP_DUMMYNET_CONFIGURE = 60
-IP_DUMMYNET_DEL = 61
-IP_DUMMYNET_FLUSH = 62
-IP_DUMMYNET_GET = 64
-IP_RECVTTL = 65
-IP_DEFAULT_MULTICAST_TTL = 1
-IP_DEFAULT_MULTICAST_LOOP = 1
-IP_MAX_MEMBERSHIPS = 20
-IP_PORTRANGE_DEFAULT = 0
-IP_PORTRANGE_HIGH = 1
-IP_PORTRANGE_LOW = 2
-IPPROTO_MAXID = (IPPROTO_AH + 1)
-IPCTL_FORWARDING = 1
-IPCTL_SENDREDIRECTS = 2
-IPCTL_DEFTTL = 3
-IPCTL_DEFMTU = 4
-IPCTL_RTEXPIRE = 5
-IPCTL_RTMINEXPIRE = 6
-IPCTL_RTMAXCACHE = 7
-IPCTL_SOURCEROUTE = 8
-IPCTL_DIRECTEDBROADCAST = 9
-IPCTL_INTRQMAXLEN = 10
-IPCTL_INTRQDROPS = 11
-IPCTL_STATS = 12
-IPCTL_ACCEPTSOURCEROUTE = 13
-IPCTL_FASTFORWARDING = 14
-IPCTL_KEEPFAITH = 15
-IPCTL_GIF_TTL = 16
-IPCTL_MAXID = 17
-def in_nullhost(x): return ((x).s_addr == INADDR_ANY)
-
-
-# Included from netinet6/in6.h
-__KAME_VERSION = "20010528/FreeBSD"
-IPV6PORT_RESERVED = 1024
-IPV6PORT_ANONMIN = 49152
-IPV6PORT_ANONMAX = 65535
-IPV6PORT_RESERVEDMIN = 600
-IPV6PORT_RESERVEDMAX = (IPV6PORT_RESERVED-1)
-INET6_ADDRSTRLEN = 46
-IPV6_ADDR_INT32_ONE = 1
-IPV6_ADDR_INT32_TWO = 2
-IPV6_ADDR_INT32_MNL = (-16711680)
-IPV6_ADDR_INT32_MLL = (-16646144)
-IPV6_ADDR_INT32_SMP = 0x0000ffff
-IPV6_ADDR_INT16_ULL = 0xfe80
-IPV6_ADDR_INT16_USL = 0xfec0
-IPV6_ADDR_INT16_MLL = 0xff02
-IPV6_ADDR_INT32_ONE = 0x01000000
-IPV6_ADDR_INT32_TWO = 0x02000000
-IPV6_ADDR_INT32_MNL = 0x000001ff
-IPV6_ADDR_INT32_MLL = 0x000002ff
-IPV6_ADDR_INT32_SMP = (-65536)
-IPV6_ADDR_INT16_ULL = 0x80fe
-IPV6_ADDR_INT16_USL = 0xc0fe
-IPV6_ADDR_INT16_MLL = 0x02ff
-def IN6_IS_ADDR_UNSPECIFIED(a): return \
-
-def IN6_IS_ADDR_LOOPBACK(a): return \
-
-def IN6_IS_ADDR_V4COMPAT(a): return \
-
-def IN6_IS_ADDR_V4MAPPED(a): return \
-
-IPV6_ADDR_SCOPE_NODELOCAL = 0x01
-IPV6_ADDR_SCOPE_INTFACELOCAL = 0x01
-IPV6_ADDR_SCOPE_LINKLOCAL = 0x02
-IPV6_ADDR_SCOPE_SITELOCAL = 0x05
-IPV6_ADDR_SCOPE_ORGLOCAL = 0x08
-IPV6_ADDR_SCOPE_GLOBAL = 0x0e
-__IPV6_ADDR_SCOPE_NODELOCAL = 0x01
-__IPV6_ADDR_SCOPE_INTFACELOCAL = 0x01
-__IPV6_ADDR_SCOPE_LINKLOCAL = 0x02
-__IPV6_ADDR_SCOPE_SITELOCAL = 0x05
-__IPV6_ADDR_SCOPE_ORGLOCAL = 0x08
-__IPV6_ADDR_SCOPE_GLOBAL = 0x0e
-def IN6_IS_ADDR_LINKLOCAL(a): return \
-
-def IN6_IS_ADDR_SITELOCAL(a): return \
-
-def IN6_IS_ADDR_MC_NODELOCAL(a): return \
-
-def IN6_IS_ADDR_MC_INTFACELOCAL(a): return \
-
-def IN6_IS_ADDR_MC_LINKLOCAL(a): return \
-
-def IN6_IS_ADDR_MC_SITELOCAL(a): return \
-
-def IN6_IS_ADDR_MC_ORGLOCAL(a): return \
-
-def IN6_IS_ADDR_MC_GLOBAL(a): return \
-
-def IN6_IS_ADDR_MC_NODELOCAL(a): return \
-
-def IN6_IS_ADDR_MC_LINKLOCAL(a): return \
-
-def IN6_IS_ADDR_MC_SITELOCAL(a): return \
-
-def IN6_IS_ADDR_MC_ORGLOCAL(a): return \
-
-def IN6_IS_ADDR_MC_GLOBAL(a): return \
-
-def IN6_IS_SCOPE_LINKLOCAL(a): return \
-
-def IFA6_IS_DEPRECATED(a): return \
-
-def IFA6_IS_INVALID(a): return \
-
-IPV6_OPTIONS = 1
-IPV6_RECVOPTS = 5
-IPV6_RECVRETOPTS = 6
-IPV6_RECVDSTADDR = 7
-IPV6_RETOPTS = 8
-IPV6_SOCKOPT_RESERVED1 = 3
-IPV6_UNICAST_HOPS = 4
-IPV6_MULTICAST_IF = 9
-IPV6_MULTICAST_HOPS = 10
-IPV6_MULTICAST_LOOP = 11
-IPV6_JOIN_GROUP = 12
-IPV6_LEAVE_GROUP = 13
-IPV6_PORTRANGE = 14
-ICMP6_FILTER = 18
-IPV6_2292PKTINFO = 19
-IPV6_2292HOPLIMIT = 20
-IPV6_2292NEXTHOP = 21
-IPV6_2292HOPOPTS = 22
-IPV6_2292DSTOPTS = 23
-IPV6_2292RTHDR = 24
-IPV6_2292PKTOPTIONS = 25
-IPV6_CHECKSUM = 26
-IPV6_V6ONLY = 27
-IPV6_BINDV6ONLY = IPV6_V6ONLY
-IPV6_IPSEC_POLICY = 28
-IPV6_FAITH = 29
-IPV6_FW_ADD = 30
-IPV6_FW_DEL = 31
-IPV6_FW_FLUSH = 32
-IPV6_FW_ZERO = 33
-IPV6_FW_GET = 34
-IPV6_RTHDRDSTOPTS = 35
-IPV6_RECVPKTINFO = 36
-IPV6_RECVHOPLIMIT = 37
-IPV6_RECVRTHDR = 38
-IPV6_RECVHOPOPTS = 39
-IPV6_RECVDSTOPTS = 40
-IPV6_RECVRTHDRDSTOPTS = 41
-IPV6_USE_MIN_MTU = 42
-IPV6_RECVPATHMTU = 43
-IPV6_PATHMTU = 44
-IPV6_REACHCONF = 45
-IPV6_PKTINFO = 46
-IPV6_HOPLIMIT = 47
-IPV6_NEXTHOP = 48
-IPV6_HOPOPTS = 49
-IPV6_DSTOPTS = 50
-IPV6_RTHDR = 51
-IPV6_PKTOPTIONS = 52
-IPV6_RECVTCLASS = 57
-IPV6_AUTOFLOWLABEL = 59
-IPV6_TCLASS = 61
-IPV6_DONTFRAG = 62
-IPV6_PREFER_TEMPADDR = 63
-IPV6_RTHDR_LOOSE = 0
-IPV6_RTHDR_STRICT = 1
-IPV6_RTHDR_TYPE_0 = 0
-IPV6_DEFAULT_MULTICAST_HOPS = 1
-IPV6_DEFAULT_MULTICAST_LOOP = 1
-IPV6_PORTRANGE_DEFAULT = 0
-IPV6_PORTRANGE_HIGH = 1
-IPV6_PORTRANGE_LOW = 2
-IPV6PROTO_MAXID = (IPPROTO_PIM + 1)
-IPV6CTL_FORWARDING = 1
-IPV6CTL_SENDREDIRECTS = 2
-IPV6CTL_DEFHLIM = 3
-IPV6CTL_DEFMTU = 4
-IPV6CTL_FORWSRCRT = 5
-IPV6CTL_STATS = 6
-IPV6CTL_MRTSTATS = 7
-IPV6CTL_MRTPROTO = 8
-IPV6CTL_MAXFRAGPACKETS = 9
-IPV6CTL_SOURCECHECK = 10
-IPV6CTL_SOURCECHECK_LOGINT = 11
-IPV6CTL_ACCEPT_RTADV = 12
-IPV6CTL_KEEPFAITH = 13
-IPV6CTL_LOG_INTERVAL = 14
-IPV6CTL_HDRNESTLIMIT = 15
-IPV6CTL_DAD_COUNT = 16
-IPV6CTL_AUTO_FLOWLABEL = 17
-IPV6CTL_DEFMCASTHLIM = 18
-IPV6CTL_GIF_HLIM = 19
-IPV6CTL_KAME_VERSION = 20
-IPV6CTL_USE_DEPRECATED = 21
-IPV6CTL_RR_PRUNE = 22
-IPV6CTL_MAPPED_ADDR = 23
-IPV6CTL_V6ONLY = 24
-IPV6CTL_RTEXPIRE = 25
-IPV6CTL_RTMINEXPIRE = 26
-IPV6CTL_RTMAXCACHE = 27
-IPV6CTL_USETEMPADDR = 32
-IPV6CTL_TEMPPLTIME = 33
-IPV6CTL_TEMPVLTIME = 34
-IPV6CTL_AUTO_LINKLOCAL = 35
-IPV6CTL_RIP6STATS = 36
-IPV6CTL_PREFER_TEMPADDR = 37
-IPV6CTL_ADDRCTLPOLICY = 38
-IPV6CTL_MAXFRAGS = 41
-IPV6CTL_MAXID = 42
diff --git a/sys/lib/python/plat-freebsd7/regen b/sys/lib/python/plat-freebsd7/regen
deleted file mode 100644
index 8aa6898c6..000000000
--- a/sys/lib/python/plat-freebsd7/regen
+++ /dev/null
@@ -1,3 +0,0 @@
-#! /bin/sh
-set -v
-python ../../Tools/scripts/h2py.py -i '(u_long)' /usr/include/netinet/in.h
diff --git a/sys/lib/python/plat-generic/regen b/sys/lib/python/plat-generic/regen
deleted file mode 100755
index a20cdc151..000000000
--- a/sys/lib/python/plat-generic/regen
+++ /dev/null
@@ -1,3 +0,0 @@
-#! /bin/sh
-set -v
-python$EXE ../../Tools/scripts/h2py.py -i '(u_long)' /usr/include/netinet/in.h
diff --git a/sys/lib/python/plat-irix5/AL.py b/sys/lib/python/plat-irix5/AL.py
deleted file mode 100755
index ec941a2ba..000000000
--- a/sys/lib/python/plat-irix5/AL.py
+++ /dev/null
@@ -1,61 +0,0 @@
-RATE_48000 = 48000
-RATE_44100 = 44100
-RATE_32000 = 32000
-RATE_22050 = 22050
-RATE_16000 = 16000
-RATE_11025 = 11025
-RATE_8000 = 8000
-
-SAMPFMT_TWOSCOMP= 1
-SAMPFMT_FLOAT = 32
-SAMPFMT_DOUBLE = 64
-
-SAMPLE_8 = 1
-SAMPLE_16 = 2
- # SAMPLE_24 is the low 24 bits of a long, sign extended to 32 bits
-SAMPLE_24 = 4
-
-MONO = 1
-STEREO = 2
-QUADRO = 4 # 4CHANNEL is not a legal Python name
-
-INPUT_LINE = 0
-INPUT_MIC = 1
-INPUT_DIGITAL = 2
-
-MONITOR_OFF = 0
-MONITOR_ON = 1
-
-ERROR_NUMBER = 0
-ERROR_TYPE = 1
-ERROR_LOCATION_LSP = 2
-ERROR_LOCATION_MSP = 3
-ERROR_LENGTH = 4
-
-ERROR_INPUT_UNDERFLOW = 0
-ERROR_OUTPUT_OVERFLOW = 1
-
-# These seem to be not supported anymore:
-##HOLD, RELEASE = 0, 1
-##ATTAIL, ATHEAD, ATMARK, ATTIME = 0, 1, 2, 3
-
-DEFAULT_DEVICE = 1
-
-INPUT_SOURCE = 0
-LEFT_INPUT_ATTEN = 1
-RIGHT_INPUT_ATTEN = 2
-INPUT_RATE = 3
-OUTPUT_RATE = 4
-LEFT_SPEAKER_GAIN = 5
-RIGHT_SPEAKER_GAIN = 6
-INPUT_COUNT = 7
-OUTPUT_COUNT = 8
-UNUSED_COUNT = 9
-SYNC_INPUT_TO_AES = 10
-SYNC_OUTPUT_TO_AES = 11
-MONITOR_CTL = 12
-LEFT_MONITOR_ATTEN = 13
-RIGHT_MONITOR_ATTEN = 14
-
-ENUM_VALUE = 0 # only certain values are valid
-RANGE_VALUE = 1 # any value in range is valid
diff --git a/sys/lib/python/plat-irix5/CD.py b/sys/lib/python/plat-irix5/CD.py
deleted file mode 100755
index 8c1e03bc6..000000000
--- a/sys/lib/python/plat-irix5/CD.py
+++ /dev/null
@@ -1,34 +0,0 @@
-ERROR = 0
-NODISC = 1
-READY = 2
-PLAYING = 3
-PAUSED = 4
-STILL = 5
-
-AUDIO = 0
-PNUM = 1
-INDEX = 2
-PTIME = 3
-ATIME = 4
-CATALOG = 5
-IDENT = 6
-CONTROL = 7
-
-CDDA_DATASIZE = 2352
-
-##CDDA_SUBCODESIZE = (sizeof(struct subcodeQ))
-##CDDA_BLOCKSIZE = (sizeof(struct cdframe))
-##CDDA_NUMSAMPLES = (CDDA_DATASIZE/2)
-##
-##CDQ_PREEMP_MASK = 0xd
-##CDQ_COPY_MASK = 0xb
-##CDQ_DDATA_MASK = 0xd
-##CDQ_BROADCAST_MASK = 0x8
-##CDQ_PREEMPHASIS = 0x1
-##CDQ_COPY_PERMITTED = 0x2
-##CDQ_DIGITAL_DATA = 0x4
-##CDQ_BROADCAST_USE = 0x8
-##
-##CDQ_MODE1 = 0x1
-##CDQ_MODE2 = 0x2
-##CDQ_MODE3 = 0x3
diff --git a/sys/lib/python/plat-irix5/CL.py b/sys/lib/python/plat-irix5/CL.py
deleted file mode 100755
index 23259c554..000000000
--- a/sys/lib/python/plat-irix5/CL.py
+++ /dev/null
@@ -1,24 +0,0 @@
-# Backward compatible module CL.
-# All relevant symbols are now defined in the module cl.
-try:
- from cl import *
-except ImportError:
- from CL_old import *
-else:
- del CompressImage
- del DecompressImage
- del GetAlgorithmName
- del OpenCompressor
- del OpenDecompressor
- del QueryAlgorithms
- del QueryMaxHeaderSize
- del QueryScheme
- del QuerySchemeFromName
- del SetDefault
- del SetMax
- del SetMin
- try:
- del cvt_type
- except NameError:
- pass
- del error
diff --git a/sys/lib/python/plat-irix5/CL_old.py b/sys/lib/python/plat-irix5/CL_old.py
deleted file mode 100755
index 967b49708..000000000
--- a/sys/lib/python/plat-irix5/CL_old.py
+++ /dev/null
@@ -1,236 +0,0 @@
-#
-# cl.h - Compression Library typedefs and prototypes
-#
-# 01/07/92 Cleanup by Brian Knittel
-# 02/18/92 Original Version by Brian Knittel
-#
-
-#
-# originalFormat parameter values
-#
-MAX_NUMBER_OF_ORIGINAL_FORMATS = 32
-
-# Audio
-MONO = 0
-STEREO_INTERLEAVED = 1
-
-# Video
-# YUV is defined to be the same thing as YCrCb (luma and two chroma components).
-# 422 is appended to YUV (or YCrCb) if the chroma is sub-sampled by 2
-# horizontally, packed as U Y1 V Y2 (byte order).
-# 422HC is appended to YUV (or YCrCb) if the chroma is sub-sampled by 2
-# vertically in addition to horizontally, and is packed the same as
-# 422 except that U & V are not valid on the second line.
-#
-RGB = 0
-RGBX = 1
-RGBA = 2
-RGB332 = 3
-
-GRAYSCALE = 4
-Y = 4
-YUV = 5
-YCbCr = 5
-YUV422 = 6 # 4:2:2 sampling
-YCbCr422 = 6 # 4:2:2 sampling
-YUV422HC = 7 # 4:1:1 sampling
-YCbCr422HC = 7 # 4:1:1 sampling
-YUV422DC = 7 # 4:1:1 sampling
-YCbCr422DC = 7 # 4:1:1 sampling
-
-BEST_FIT = -1
-
-def BytesPerSample(s):
- if s in (MONO, YUV):
- return 2
- elif s == STEREO_INTERLEAVED:
- return 4
- else:
- return 0
-
-def BytesPerPixel(f):
- if f in (RGB, YUV):
- return 3
- elif f in (RGBX, RGBA):
- return 4
- elif f in (RGB332, GRAYSCALE):
- return 1
- else:
- return 2
-
-def AudioFormatName(f):
- if f == MONO:
- return 'MONO'
- elif f == STEREO_INTERLEAVED:
- return 'STEREO_INTERLEAVED'
- else:
- return 'Not a valid format'
-
-def VideoFormatName(f):
- if f == RGB:
- return 'RGB'
- elif f == RGBX:
- return 'RGBX'
- elif f == RGBA:
- return 'RGBA'
- elif f == RGB332:
- return 'RGB332'
- elif f == GRAYSCALE:
- return 'GRAYSCALE'
- elif f == YUV:
- return 'YUV'
- elif f == YUV422:
- return 'YUV422'
- elif f == YUV422DC:
- return 'YUV422DC'
- else:
- return 'Not a valid format'
-
-MAX_NUMBER_OF_AUDIO_ALGORITHMS = 32
-MAX_NUMBER_OF_VIDEO_ALGORITHMS = 32
-
-#
-# Algorithm types
-#
-AUDIO = 0
-VIDEO = 1
-
-def AlgorithmNumber(scheme):
- return scheme & 0x7fff
-def AlgorithmType(scheme):
- return (scheme >> 15) & 1
-def Algorithm(type, n):
- return n | ((type & 1) << 15)
-
-#
-# "compressionScheme" argument values
-#
-UNKNOWN_SCHEME = -1
-
-UNCOMPRESSED_AUDIO = Algorithm(AUDIO, 0)
-G711_ULAW = Algorithm(AUDIO, 1)
-ULAW = Algorithm(AUDIO, 1)
-G711_ALAW = Algorithm(AUDIO, 2)
-ALAW = Algorithm(AUDIO, 2)
-AWARE_MPEG_AUDIO = Algorithm(AUDIO, 3)
-AWARE_MULTIRATE = Algorithm(AUDIO, 4)
-
-UNCOMPRESSED = Algorithm(VIDEO, 0)
-UNCOMPRESSED_VIDEO = Algorithm(VIDEO, 0)
-RLE = Algorithm(VIDEO, 1)
-JPEG = Algorithm(VIDEO, 2)
-MPEG_VIDEO = Algorithm(VIDEO, 3)
-MVC1 = Algorithm(VIDEO, 4)
-RTR = Algorithm(VIDEO, 5)
-RTR1 = Algorithm(VIDEO, 5)
-
-#
-# Parameters
-#
-MAX_NUMBER_OF_PARAMS = 256
-# Default Parameters
-IMAGE_WIDTH = 0
-IMAGE_HEIGHT = 1
-ORIGINAL_FORMAT = 2
-INTERNAL_FORMAT = 3
-COMPONENTS = 4
-BITS_PER_COMPONENT = 5
-FRAME_RATE = 6
-COMPRESSION_RATIO = 7
-EXACT_COMPRESSION_RATIO = 8
-FRAME_BUFFER_SIZE = 9
-COMPRESSED_BUFFER_SIZE = 10
-BLOCK_SIZE = 11
-PREROLL = 12
-FRAME_TYPE = 13
-ALGORITHM_ID = 14
-ALGORITHM_VERSION = 15
-ORIENTATION = 16
-NUMBER_OF_FRAMES = 17
-SPEED = 18
-LAST_FRAME_INDEX = 19
-NUMBER_OF_PARAMS = 20
-
-# JPEG Specific Parameters
-QUALITY_FACTOR = NUMBER_OF_PARAMS + 0
-
-# MPEG Specific Parameters
-END_OF_SEQUENCE = NUMBER_OF_PARAMS + 0
-
-# RTR Specific Parameters
-QUALITY_LEVEL = NUMBER_OF_PARAMS + 0
-ZOOM_X = NUMBER_OF_PARAMS + 1
-ZOOM_Y = NUMBER_OF_PARAMS + 2
-
-#
-# Parameter value types
-#
-ENUM_VALUE = 0 # only certain constant values are valid
-RANGE_VALUE = 1 # any value in a given range is valid
-FLOATING_ENUM_VALUE = 2 # only certain constant floating point values are valid
-FLOATING_RANGE_VALUE = 3 # any value in a given floating point range is valid
-
-#
-# Algorithm Functionality
-#
-DECOMPRESSOR = 1
-COMPRESSOR = 2
-CODEC = 3
-
-#
-# Buffer types
-#
-NONE = 0
-FRAME = 1
-DATA = 2
-
-#
-# Frame types
-#
-NONE = 0
-KEYFRAME = 1
-INTRA = 1
-PREDICTED = 2
-BIDIRECTIONAL = 3
-
-#
-# Orientations
-#
-TOP_DOWN = 0
-BOTTOM_UP = 1
-
-#
-# SGI Proprietary Algorithm Header Start Code
-#
-HEADER_START_CODE = 0xc1C0DEC
-
-#
-# error codes
-#
-
-BAD_NO_BUFFERSPACE = -2 # no space for internal buffers
-BAD_PVBUFFER = -3 # param/val buffer doesn't make sense
-BAD_BUFFERLENGTH_NEG = -4 # negative buffer length
-BAD_BUFFERLENGTH_ODD = -5 # odd length parameter/value buffer
-BAD_PARAM = -6 # invalid parameter
-BAD_COMPRESSION_SCHEME = -7 # compression scheme parameter invalid
-BAD_COMPRESSOR_HANDLE = -8 # compression handle parameter invalid
-BAD_COMPRESSOR_HANDLE_POINTER = -9 # compression handle pointer invalid
-BAD_BUFFER_HANDLE = -10 # buffer handle invalid
-BAD_BUFFER_QUERY_SIZE = -11 # buffer query size too large
-JPEG_ERROR = -12 # error from libjpeg
-BAD_FRAME_SIZE = -13 # frame size invalid
-PARAM_OUT_OF_RANGE = -14 # parameter out of range
-ADDED_ALGORITHM_ERROR = -15 # added algorithm had a unique error
-BAD_ALGORITHM_TYPE = -16 # bad algorithm type
-BAD_ALGORITHM_NAME = -17 # bad algorithm name
-BAD_BUFFERING = -18 # bad buffering calls
-BUFFER_NOT_CREATED = -19 # buffer not created
-BAD_BUFFER_EXISTS = -20 # buffer already created
-BAD_INTERNAL_FORMAT = -21 # invalid internal format
-BAD_BUFFER_POINTER = -22 # invalid buffer pointer
-FRAME_BUFFER_SIZE_ZERO = -23 # frame buffer has zero size
-BAD_STREAM_HEADER = -24 # invalid stream header
-
-BAD_LICENSE = -25 # netls license not valid
-AWARE_ERROR = -26 # error from libawcmp
diff --git a/sys/lib/python/plat-irix5/DEVICE.py b/sys/lib/python/plat-irix5/DEVICE.py
deleted file mode 100755
index 7ace8cb0b..000000000
--- a/sys/lib/python/plat-irix5/DEVICE.py
+++ /dev/null
@@ -1,400 +0,0 @@
-NULLDEV = 0
-BUTOFFSET = 1
-VALOFFSET = 256
-PSEUDOFFSET = 512
-BUT2OFFSET = 3840
-TIMOFFSET = 515
-XKBDOFFSET = 143
-BUTCOUNT = 255
-VALCOUNT = 256
-TIMCOUNT = 4
-XKBDCOUNT = 28
-USERBUTOFFSET = 4096
-USERVALOFFSET = 12288
-USERPSEUDOFFSET = 16384
-BUT0 = 1
-BUT1 = 2
-BUT2 = 3
-BUT3 = 4
-BUT4 = 5
-BUT5 = 6
-BUT6 = 7
-BUT7 = 8
-BUT8 = 9
-BUT9 = 10
-BUT10 = 11
-BUT11 = 12
-BUT12 = 13
-BUT13 = 14
-BUT14 = 15
-BUT15 = 16
-BUT16 = 17
-BUT17 = 18
-BUT18 = 19
-BUT19 = 20
-BUT20 = 21
-BUT21 = 22
-BUT22 = 23
-BUT23 = 24
-BUT24 = 25
-BUT25 = 26
-BUT26 = 27
-BUT27 = 28
-BUT28 = 29
-BUT29 = 30
-BUT30 = 31
-BUT31 = 32
-BUT32 = 33
-BUT33 = 34
-BUT34 = 35
-BUT35 = 36
-BUT36 = 37
-BUT37 = 38
-BUT38 = 39
-BUT39 = 40
-BUT40 = 41
-BUT41 = 42
-BUT42 = 43
-BUT43 = 44
-BUT44 = 45
-BUT45 = 46
-BUT46 = 47
-BUT47 = 48
-BUT48 = 49
-BUT49 = 50
-BUT50 = 51
-BUT51 = 52
-BUT52 = 53
-BUT53 = 54
-BUT54 = 55
-BUT55 = 56
-BUT56 = 57
-BUT57 = 58
-BUT58 = 59
-BUT59 = 60
-BUT60 = 61
-BUT61 = 62
-BUT62 = 63
-BUT63 = 64
-BUT64 = 65
-BUT65 = 66
-BUT66 = 67
-BUT67 = 68
-BUT68 = 69
-BUT69 = 70
-BUT70 = 71
-BUT71 = 72
-BUT72 = 73
-BUT73 = 74
-BUT74 = 75
-BUT75 = 76
-BUT76 = 77
-BUT77 = 78
-BUT78 = 79
-BUT79 = 80
-BUT80 = 81
-BUT81 = 82
-BUT82 = 83
-MAXKBDBUT = 83
-BUT100 = 101
-BUT101 = 102
-BUT102 = 103
-BUT103 = 104
-BUT104 = 105
-BUT105 = 106
-BUT106 = 107
-BUT107 = 108
-BUT108 = 109
-BUT109 = 110
-BUT110 = 111
-BUT111 = 112
-BUT112 = 113
-BUT113 = 114
-BUT114 = 115
-BUT115 = 116
-BUT116 = 117
-BUT117 = 118
-BUT118 = 119
-BUT119 = 120
-BUT120 = 121
-BUT121 = 122
-BUT122 = 123
-BUT123 = 124
-BUT124 = 125
-BUT125 = 126
-BUT126 = 127
-BUT127 = 128
-BUT128 = 129
-BUT129 = 130
-BUT130 = 131
-BUT131 = 132
-BUT132 = 133
-BUT133 = 134
-BUT134 = 135
-BUT135 = 136
-BUT136 = 137
-BUT137 = 138
-BUT138 = 139
-BUT139 = 140
-BUT140 = 141
-BUT141 = 142
-BUT142 = 143
-BUT143 = 144
-BUT144 = 145
-BUT145 = 146
-BUT146 = 147
-BUT147 = 148
-BUT148 = 149
-BUT149 = 150
-BUT150 = 151
-BUT151 = 152
-BUT152 = 153
-BUT153 = 154
-BUT154 = 155
-BUT155 = 156
-BUT156 = 157
-BUT157 = 158
-BUT158 = 159
-BUT159 = 160
-BUT160 = 161
-BUT161 = 162
-BUT162 = 163
-BUT163 = 164
-BUT164 = 165
-BUT165 = 166
-BUT166 = 167
-BUT167 = 168
-BUT168 = 169
-BUT181 = 182
-BUT182 = 183
-BUT183 = 184
-BUT184 = 185
-BUT185 = 186
-BUT186 = 187
-BUT187 = 188
-BUT188 = 189
-BUT189 = 190
-MOUSE1 = 101
-MOUSE2 = 102
-MOUSE3 = 103
-LEFTMOUSE = 103
-MIDDLEMOUSE = 102
-RIGHTMOUSE = 101
-LPENBUT = 104
-BPAD0 = 105
-BPAD1 = 106
-BPAD2 = 107
-BPAD3 = 108
-LPENVALID = 109
-SWBASE = 111
-SW0 = 111
-SW1 = 112
-SW2 = 113
-SW3 = 114
-SW4 = 115
-SW5 = 116
-SW6 = 117
-SW7 = 118
-SW8 = 119
-SW9 = 120
-SW10 = 121
-SW11 = 122
-SW12 = 123
-SW13 = 124
-SW14 = 125
-SW15 = 126
-SW16 = 127
-SW17 = 128
-SW18 = 129
-SW19 = 130
-SW20 = 131
-SW21 = 132
-SW22 = 133
-SW23 = 134
-SW24 = 135
-SW25 = 136
-SW26 = 137
-SW27 = 138
-SW28 = 139
-SW29 = 140
-SW30 = 141
-SW31 = 142
-SBBASE = 182
-SBPICK = 182
-SBBUT1 = 183
-SBBUT2 = 184
-SBBUT3 = 185
-SBBUT4 = 186
-SBBUT5 = 187
-SBBUT6 = 188
-SBBUT7 = 189
-SBBUT8 = 190
-AKEY = 11
-BKEY = 36
-CKEY = 28
-DKEY = 18
-EKEY = 17
-FKEY = 19
-GKEY = 26
-HKEY = 27
-IKEY = 40
-JKEY = 34
-KKEY = 35
-LKEY = 42
-MKEY = 44
-NKEY = 37
-OKEY = 41
-PKEY = 48
-QKEY = 10
-RKEY = 24
-SKEY = 12
-TKEY = 25
-UKEY = 33
-VKEY = 29
-WKEY = 16
-XKEY = 21
-YKEY = 32
-ZKEY = 20
-ZEROKEY = 46
-ONEKEY = 8
-TWOKEY = 14
-THREEKEY = 15
-FOURKEY = 22
-FIVEKEY = 23
-SIXKEY = 30
-SEVENKEY = 31
-EIGHTKEY = 38
-NINEKEY = 39
-BREAKKEY = 1
-SETUPKEY = 2
-CTRLKEY = 3
-LEFTCTRLKEY = CTRLKEY
-CAPSLOCKKEY = 4
-RIGHTSHIFTKEY = 5
-LEFTSHIFTKEY = 6
-NOSCRLKEY = 13
-ESCKEY = 7
-TABKEY = 9
-RETKEY = 51
-SPACEKEY = 83
-LINEFEEDKEY = 60
-BACKSPACEKEY = 61
-DELKEY = 62
-SEMICOLONKEY = 43
-PERIODKEY = 52
-COMMAKEY = 45
-QUOTEKEY = 50
-ACCENTGRAVEKEY = 55
-MINUSKEY = 47
-VIRGULEKEY = 53
-BACKSLASHKEY = 57
-EQUALKEY = 54
-LEFTBRACKETKEY = 49
-RIGHTBRACKETKEY = 56
-LEFTARROWKEY = 73
-DOWNARROWKEY = 74
-RIGHTARROWKEY = 80
-UPARROWKEY = 81
-PAD0 = 59
-PAD1 = 58
-PAD2 = 64
-PAD3 = 65
-PAD4 = 63
-PAD5 = 69
-PAD6 = 70
-PAD7 = 67
-PAD8 = 68
-PAD9 = 75
-PADPF1 = 72
-PADPF2 = 71
-PADPF3 = 79
-PADPF4 = 78
-PADPERIOD = 66
-PADMINUS = 76
-PADCOMMA = 77
-PADENTER = 82
-LEFTALTKEY = 143
-RIGHTALTKEY = 144
-RIGHTCTRLKEY = 145
-F1KEY = 146
-F2KEY = 147
-F3KEY = 148
-F4KEY = 149
-F5KEY = 150
-F6KEY = 151
-F7KEY = 152
-F8KEY = 153
-F9KEY = 154
-F10KEY = 155
-F11KEY = 156
-F12KEY = 157
-PRINTSCREENKEY = 158
-SCROLLLOCKKEY = 159
-PAUSEKEY = 160
-INSERTKEY = 161
-HOMEKEY = 162
-PAGEUPKEY = 163
-ENDKEY = 164
-PAGEDOWNKEY = 165
-NUMLOCKKEY = 166
-PADVIRGULEKEY = 167
-PADASTERKEY = 168
-PADPLUSKEY = 169
-SGIRESERVED = 256
-DIAL0 = 257
-DIAL1 = 258
-DIAL2 = 259
-DIAL3 = 260
-DIAL4 = 261
-DIAL5 = 262
-DIAL6 = 263
-DIAL7 = 264
-DIAL8 = 265
-MOUSEX = 266
-MOUSEY = 267
-LPENX = 268
-LPENY = 269
-BPADX = 270
-BPADY = 271
-CURSORX = 272
-CURSORY = 273
-GHOSTX = 274
-GHOSTY = 275
-SBTX = 276
-SBTY = 277
-SBTZ = 278
-SBRX = 279
-SBRY = 280
-SBRZ = 281
-SBPERIOD = 282
-TIMER0 = 515
-TIMER1 = 516
-TIMER2 = 517
-TIMER3 = 518
-KEYBD = 513
-RAWKEYBD = 514
-VALMARK = 523
-REDRAW = 528
-INPUTCHANGE = 534
-QFULL = 535
-QREADERROR = 538
-WINFREEZE = 539
-WINTHAW = 540
-REDRAWICONIC = 541
-WINQUIT = 542
-DEPTHCHANGE = 543
-WINSHUT = 546
-DRAWOVERLAY = 547
-VIDEO = 548
-MENUBUTTON = RIGHTMOUSE
-WINCLOSE = 537
-KEYBDFNAMES = 544
-KEYBDFSTRINGS = 545
-MAXSGIDEVICE = 20000
-GERROR = 524
-WMSEND = 529
-WMREPLY = 530
-WMGFCLOSE = 531
-WMTXCLOSE = 532
-MODECHANGE = 533
-PIECECHANGE = 536
diff --git a/sys/lib/python/plat-irix5/ERRNO.py b/sys/lib/python/plat-irix5/ERRNO.py
deleted file mode 100755
index d49e9641e..000000000
--- a/sys/lib/python/plat-irix5/ERRNO.py
+++ /dev/null
@@ -1,147 +0,0 @@
-# Generated by h2py from /usr/include/errno.h
-
-# Included from sys/errno.h
-__KBASE = 1000
-__IRIXBASE = 1000
-EPERM = 1
-ENOENT = 2
-ESRCH = 3
-EINTR = 4
-EIO = 5
-ENXIO = 6
-E2BIG = 7
-ENOEXEC = 8
-EBADF = 9
-ECHILD = 10
-EAGAIN = 11
-ENOMEM = 12
-EACCES = 13
-EFAULT = 14
-ENOTBLK = 15
-EBUSY = 16
-EEXIST = 17
-EXDEV = 18
-ENODEV = 19
-ENOTDIR = 20
-EISDIR = 21
-EINVAL = 22
-ENFILE = 23
-EMFILE = 24
-ENOTTY = 25
-ETXTBSY = 26
-EFBIG = 27
-ENOSPC = 28
-ESPIPE = 29
-EROFS = 30
-EMLINK = 31
-EPIPE = 32
-EDOM = 33
-ERANGE = 34
-ENOMSG = 35
-EIDRM = 36
-ECHRNG = 37
-EL2NSYNC = 38
-EL3HLT = 39
-EL3RST = 40
-ELNRNG = 41
-EUNATCH = 42
-ENOCSI = 43
-EL2HLT = 44
-EDEADLK = 45
-ENOLCK = 46
-EBADE = 50
-EBADR = 51
-EXFULL = 52
-ENOANO = 53
-EBADRQC = 54
-EBADSLT = 55
-EDEADLOCK = 56
-EBFONT = 57
-ENOSTR = 60
-ENODATA = 61
-ETIME = 62
-ENOSR = 63
-ENONET = 64
-ENOPKG = 65
-EREMOTE = 66
-ENOLINK = 67
-EADV = 68
-ESRMNT = 69
-ECOMM = 70
-EPROTO = 71
-EMULTIHOP = 74
-EBADMSG = 77
-ENAMETOOLONG = 78
-EOVERFLOW = 79
-ENOTUNIQ = 80
-EBADFD = 81
-EREMCHG = 82
-ELIBACC = 83
-ELIBBAD = 84
-ELIBSCN = 85
-ELIBMAX = 86
-ELIBEXEC = 87
-EILSEQ = 88
-ENOSYS = 89
-ELOOP = 90
-ERESTART = 91
-ESTRPIPE = 92
-ENOTEMPTY = 93
-EUSERS = 94
-ENOTSOCK = 95
-EDESTADDRREQ = 96
-EMSGSIZE = 97
-EPROTOTYPE = 98
-ENOPROTOOPT = 99
-EPROTONOSUPPORT = 120
-ESOCKTNOSUPPORT = 121
-EOPNOTSUPP = 122
-EPFNOSUPPORT = 123
-EAFNOSUPPORT = 124
-EADDRINUSE = 125
-EADDRNOTAVAIL = 126
-ENETDOWN = 127
-ENETUNREACH = 128
-ENETRESET = 129
-ECONNABORTED = 130
-ECONNRESET = 131
-ENOBUFS = 132
-EISCONN = 133
-ENOTCONN = 134
-ESHUTDOWN = 143
-ETOOMANYREFS = 144
-ETIMEDOUT = 145
-ECONNREFUSED = 146
-EHOSTDOWN = 147
-EHOSTUNREACH = 148
-EWOULDBLOCK = __KBASE+101
-EWOULDBLOCK = EAGAIN
-EALREADY = 149
-EINPROGRESS = 150
-ESTALE = 151
-EIORESID = 500
-EUCLEAN = 135
-ENOTNAM = 137
-ENAVAIL = 138
-EISNAM = 139
-EREMOTEIO = 140
-EINIT = 141
-EREMDEV = 142
-ECANCELED = 158
-ECANCELED = 1000
-EDQUOT = 1133
-ENFSREMOTE = 1135
-ETCP_EBASE = 100
-ETCP_ELIMIT = 129
-ENAMI_EBASE = 129
-ENAMI_ELIMIT = 131
-ENFS_EBASE = 131
-ENFS_ELIMIT = 135
-ELASTERRNO = 135
-TCP_EBASE = ETCP_EBASE
-TCP_ELIMIT = ETCP_ELIMIT
-NAMI_EBASE = ENAMI_EBASE
-NAMI_ELIMIT = ENAMI_ELIMIT
-NFS_EBASE = ENFS_EBASE
-NFS_ELIMIT = ENFS_ELIMIT
-LASTERRNO = ELASTERRNO
diff --git a/sys/lib/python/plat-irix5/FILE.py b/sys/lib/python/plat-irix5/FILE.py
deleted file mode 100755
index 05697c15e..000000000
--- a/sys/lib/python/plat-irix5/FILE.py
+++ /dev/null
@@ -1,239 +0,0 @@
-# Generated by h2py from /usr/include/sys/file.h
-
-# Included from sys/types.h
-
-# Included from sgidefs.h
-_MIPS_ISA_MIPS1 = 1
-_MIPS_ISA_MIPS2 = 2
-_MIPS_ISA_MIPS3 = 3
-_MIPS_ISA_MIPS4 = 4
-_MIPS_SIM_ABI32 = 1
-_MIPS_SIM_NABI32 = 2
-_MIPS_SIM_ABI64 = 3
-P_MYID = (-1)
-P_MYHOSTID = (-1)
-
-# Included from sys/bsd_types.h
-
-# Included from sys/mkdev.h
-ONBITSMAJOR = 7
-ONBITSMINOR = 8
-OMAXMAJ = 0x7f
-OMAXMIN = 0xff
-NBITSMAJOR = 14
-NBITSMINOR = 18
-MAXMAJ = 0x1ff
-MAXMIN = 0x3ffff
-OLDDEV = 0
-NEWDEV = 1
-MKDEV_VER = NEWDEV
-def major(dev): return __major(MKDEV_VER, dev)
-
-def minor(dev): return __minor(MKDEV_VER, dev)
-
-
-# Included from sys/select.h
-FD_SETSIZE = 1024
-NBBY = 8
-
-# Included from sys/sema.h
-HP_NOPOLICY = 0
-HP_ADDOFF = 1
-HP_MULOFF = 2
-SEMA_NAMSZ = 8
-SEMA_NOHIST = 0x1
-SEMA_LIFO = 0x2
-SEMA_MUTEX = 0x4
-SEMA_METER = 0x8
-SEMAOP_PSEMA = 1
-SEMAOP_VSEMA = 2
-SEMAOP_CPSEMA = 3
-SEMAOP_CVSEMA = 4
-SEMAOP_WSEMA = 5
-SEMAOP_UNSEMA = 6
-SEMAOP_INIT = 7
-SEMAOP_FREE = 8
-SSOP_PHIT = 1
-SSOP_PSLP = 2
-SSOP_PWAKE = 6
-SSOP_PRESIG = 7
-SSOP_POSTSIG = 8
-SSOP_VNOWAKE = 3
-SSOP_VWAKE = 4
-SSOP_CPHIT = 1
-SSOP_CPMISS = 5
-SSOP_CVNOWAKE = 3
-SSOP_CVWAKE = 4
-SSOP_WMISS = 5
-SSOP_WWAKE = 4
-SSOP_RMV = 9
-TZERO = 10
-SEMA_NOP = 0
-SEMA_WAKE = 1
-SEMA_VSEMA = 2
-SEMA_SPINOP = 3
-MR_ACCESS = 0x1
-MR_UPDATE = 0x2
-def cv_signal(cv): return cvsema(cv);
-
-def cv_destroy(cv): return freesema(cv)
-
-def mutex_enter(m): return psema(m, PZERO | PNOSTOP)
-
-def mutex_exit(m): return vsema(m)
-
-def mutex_destroy(m): return freesema(m)
-
-def MUTEX_HELD(m): return (ownsema(m))
-
-def MUTEX_HELD(m): return (1)
-
-RW_READER = MR_ACCESS
-RW_WRITER = MR_UPDATE
-def rw_exit(r): return mrunlock(r)
-
-def rw_tryupgrade(r): return cmrpromote(r)
-
-def rw_downgrade(r): return mrdemote(r)
-
-def rw_destroy(r): return mrfree(r)
-
-def RW_WRITE_HELD(r): return ismrlocked(r, MR_UPDATE)
-
-def RW_READ_HELD(r): return ismrlocked(r, MR_ACCESS)
-
-
-# Included from sys/splock.h
-SPLOCKNAMSIZ = 8
-SPLOCK_NONE = 0
-SPLOCK_SOFT = 1
-SPLOCK_HARD = 2
-OWNER_NONE = -1
-MAP_LOCKID = 0
-SPLOCK_MAX = (96*1024)
-SPLOCK_MAX = 32768
-MIN_POOL_SIZE = 256
-MAX_POOL_SIZE = 16384
-DEF_SEMA_POOL = 8192
-DEF_VNODE_POOL = 1024
-DEF_FILE_POOL = 1024
-def ownlock(x): return 1
-
-def splock(x): return 1
-
-def io_splock(x): return 1
-
-def apvsema(x): return vsema(x)
-
-def apcpsema(x): return cpsema(x)
-
-def apcvsema(x): return cvsema(x)
-
-def mp_mrunlock(a): return mrunlock(a)
-
-def apvsema(x): return 0
-
-def apcpsema(x): return 1
-
-def apcvsema(x): return 0
-
-def mp_mrunlock(a): return 0
-
-
-# Included from sys/fcntl.h
-FNDELAY = 0x04
-FAPPEND = 0x08
-FSYNC = 0x10
-FNONBLOCK = 0x80
-FASYNC = 0x1000
-FNONBLK = FNONBLOCK
-FDIRECT = 0x8000
-FCREAT = 0x0100
-FTRUNC = 0x0200
-FEXCL = 0x0400
-FNOCTTY = 0x0800
-O_RDONLY = 0
-O_WRONLY = 1
-O_RDWR = 2
-O_NDELAY = 0x04
-O_APPEND = 0x08
-O_SYNC = 0x10
-O_NONBLOCK = 0x80
-O_DIRECT = 0x8000
-O_CREAT = 0x100
-O_TRUNC = 0x200
-O_EXCL = 0x400
-O_NOCTTY = 0x800
-F_DUPFD = 0
-F_GETFD = 1
-F_SETFD = 2
-F_GETFL = 3
-F_SETFL = 4
-F_GETLK = 14
-F_SETLK = 6
-F_SETLKW = 7
-F_CHKFL = 8
-F_ALLOCSP = 10
-F_FREESP = 11
-F_SETBSDLK = 12
-F_SETBSDLKW = 13
-F_DIOINFO = 30
-F_FSGETXATTR = 31
-F_FSSETXATTR = 32
-F_GETLK64 = 33
-F_SETLK64 = 34
-F_SETLKW64 = 35
-F_ALLOCSP64 = 36
-F_FREESP64 = 37
-F_GETBMAP = 38
-F_FSSETDM = 39
-F_RSETLK = 20
-F_RGETLK = 21
-F_RSETLKW = 22
-F_GETOWN = 23
-F_SETOWN = 24
-F_O_GETLK = 5
-F_O_GETOWN = 10
-F_O_SETOWN = 11
-F_RDLCK = 01
-F_WRLCK = 02
-F_UNLCK = 03
-O_ACCMODE = 3
-FD_CLOEXEC = 1
-FD_NODUP_FORK = 4
-FMASK = 0x90FF
-FOPEN = 0xFFFFFFFF
-FREAD = 0x01
-FWRITE = 0x02
-FNDELAY = 0x04
-FAPPEND = 0x08
-FSYNC = 0x10
-FNONBLOCK = 0x80
-FASYNC = 0x1000
-FNONBLK = FNONBLOCK
-FDIRECT = 0x8000
-FCREAT = 0x0100
-FTRUNC = 0x0200
-FEXCL = 0x0400
-FNOCTTY = 0x0800
-IRIX4_FASYNC = 0x40
-FMARK = 0x4000
-FDEFER = 0x2000
-FINPROGRESS = 0x0400
-FINVIS = 0x0100
-FNMFS = 0x2000
-FCLOSEXEC = 001
-FDSHD = 0x0001
-FDNOMARK = 0x0002
-FDIGNPROGRESS = 0x0004
-LOCK_SH = 1
-LOCK_EX = 2
-LOCK_NB = 4
-LOCK_UN = 8
-F_OK = 0
-X_OK = 1
-W_OK = 2
-R_OK = 4
-L_SET = 0
-L_INCR = 1
-L_XTND = 2
diff --git a/sys/lib/python/plat-irix5/FL.py b/sys/lib/python/plat-irix5/FL.py
deleted file mode 100755
index 727da4c9a..000000000
--- a/sys/lib/python/plat-irix5/FL.py
+++ /dev/null
@@ -1,289 +0,0 @@
-# Constants used by the FORMS library (module fl).
-# This corresponds to "forms.h".
-# Recommended use: import FL; ... FL.NORMAL_BOX ... etc.
-# Alternate use: from FL import *; ... NORMAL_BOX ... etc.
-
-_v20 = 1
-_v21 = 1
-##import fl
-##try:
-## _v20 = (fl.get_rgbmode is not None)
-##except:
-## _v20 = 0
-##del fl
-
-NULL = 0
-FALSE = 0
-TRUE = 1
-
-EVENT = -1
-
-LABEL_SIZE = 64
-if _v20:
- SHORTCUT_SIZE = 32
-PLACE_FREE = 0
-PLACE_SIZE = 1
-PLACE_ASPECT = 2
-PLACE_MOUSE = 3
-PLACE_CENTER = 4
-PLACE_POSITION = 5
-FL_PLACE_FULLSCREEN = 6
-FIND_INPUT = 0
-FIND_AUTOMATIC = 1
-FIND_MOUSE = 2
-BEGIN_GROUP = 10000
-END_GROUP = 20000
-ALIGN_TOP = 0
-ALIGN_BOTTOM = 1
-ALIGN_LEFT = 2
-ALIGN_RIGHT = 3
-ALIGN_CENTER = 4
-NO_BOX = 0
-UP_BOX = 1
-DOWN_BOX = 2
-FLAT_BOX = 3
-BORDER_BOX = 4
-SHADOW_BOX = 5
-FRAME_BOX = 6
-ROUNDED_BOX = 7
-RFLAT_BOX = 8
-RSHADOW_BOX = 9
-TOP_BOUND_COL = 51
-LEFT_BOUND_COL = 55
-BOT_BOUND_COL = 40
-RIGHT_BOUND_COL = 35
-COL1 = 47
-MCOL = 49
-LCOL = 0
-BOUND_WIDTH = 3.0
-DRAW = 0
-PUSH = 1
-RELEASE = 2
-ENTER = 3
-LEAVE = 4
-MOUSE = 5
-FOCUS = 6
-UNFOCUS = 7
-KEYBOARD = 8
-STEP = 9
-MOVE = 10
-FONT_NAME = 'Helvetica'
-FONT_BOLDNAME = 'Helvetica-Bold'
-FONT_ITALICNAME = 'Helvetica-Oblique'
-FONT_FIXEDNAME = 'Courier'
-FONT_ICONNAME = 'Icon'
-SMALL_FONT = 8.0
-NORMAL_FONT = 11.0
-LARGE_FONT = 20.0
-NORMAL_STYLE = 0
-BOLD_STYLE = 1
-ITALIC_STYLE = 2
-FIXED_STYLE = 3
-ENGRAVED_STYLE = 4
-ICON_STYLE = 5
-BITMAP = 3
-NORMAL_BITMAP = 0
-BITMAP_BOXTYPE = NO_BOX
-BITMAP_COL1 = 0
-BITMAP_COL2 = COL1
-BITMAP_LCOL = LCOL
-BITMAP_ALIGN = ALIGN_BOTTOM
-BITMAP_MAXSIZE = 128*128
-BITMAP_BW = BOUND_WIDTH
-BOX = 1
-BOX_BOXTYPE = UP_BOX
-BOX_COL1 = COL1
-BOX_LCOL = LCOL
-BOX_ALIGN = ALIGN_CENTER
-BOX_BW = BOUND_WIDTH
-BROWSER = 71
-NORMAL_BROWSER = 0
-SELECT_BROWSER = 1
-HOLD_BROWSER = 2
-MULTI_BROWSER = 3
-BROWSER_BOXTYPE = DOWN_BOX
-BROWSER_COL1 = COL1
-BROWSER_COL2 = 3
-BROWSER_LCOL = LCOL
-BROWSER_ALIGN = ALIGN_BOTTOM
-BROWSER_SLCOL = COL1
-BROWSER_BW = BOUND_WIDTH
-BROWSER_LINELENGTH = 128
-BROWSER_MAXLINE = 512
-BUTTON = 11
-NORMAL_BUTTON = 0
-PUSH_BUTTON = 1
-RADIO_BUTTON = 2
-HIDDEN_BUTTON = 3
-TOUCH_BUTTON = 4
-INOUT_BUTTON = 5
-RETURN_BUTTON = 6
-if _v20:
- HIDDEN_RET_BUTTON = 7
-BUTTON_BOXTYPE = UP_BOX
-BUTTON_COL1 = COL1
-BUTTON_COL2 = COL1
-BUTTON_LCOL = LCOL
-BUTTON_ALIGN = ALIGN_CENTER
-BUTTON_MCOL1 = MCOL
-BUTTON_MCOL2 = MCOL
-BUTTON_BW = BOUND_WIDTH
-if _v20:
- CHART = 4
- BAR_CHART = 0
- HORBAR_CHART = 1
- LINE_CHART = 2
- FILLED_CHART = 3
- SPIKE_CHART = 4
- PIE_CHART = 5
- SPECIALPIE_CHART = 6
- CHART_BOXTYPE = BORDER_BOX
- CHART_COL1 = COL1
- CHART_LCOL = LCOL
- CHART_ALIGN = ALIGN_BOTTOM
- CHART_BW = BOUND_WIDTH
- CHART_MAX = 128
-CHOICE = 42
-NORMAL_CHOICE = 0
-CHOICE_BOXTYPE = DOWN_BOX
-CHOICE_COL1 = COL1
-CHOICE_COL2 = LCOL
-CHOICE_LCOL = LCOL
-CHOICE_ALIGN = ALIGN_LEFT
-CHOICE_BW = BOUND_WIDTH
-CHOICE_MCOL = MCOL
-CHOICE_MAXITEMS = 128
-CHOICE_MAXSTR = 64
-CLOCK = 61
-SQUARE_CLOCK = 0
-ROUND_CLOCK = 1
-CLOCK_BOXTYPE = UP_BOX
-CLOCK_COL1 = 37
-CLOCK_COL2 = 42
-CLOCK_LCOL = LCOL
-CLOCK_ALIGN = ALIGN_BOTTOM
-CLOCK_TOPCOL = COL1
-CLOCK_BW = BOUND_WIDTH
-COUNTER = 25
-NORMAL_COUNTER = 0
-SIMPLE_COUNTER = 1
-COUNTER_BOXTYPE = UP_BOX
-COUNTER_COL1 = COL1
-COUNTER_COL2 = 4
-COUNTER_LCOL = LCOL
-COUNTER_ALIGN = ALIGN_BOTTOM
-if _v20:
- COUNTER_BW = BOUND_WIDTH
-else:
- DEFAULT = 51
- RETURN_DEFAULT = 0
- ALWAYS_DEFAULT = 1
-DIAL = 22
-NORMAL_DIAL = 0
-LINE_DIAL = 1
-DIAL_BOXTYPE = NO_BOX
-DIAL_COL1 = COL1
-DIAL_COL2 = 37
-DIAL_LCOL = LCOL
-DIAL_ALIGN = ALIGN_BOTTOM
-DIAL_TOPCOL = COL1
-DIAL_BW = BOUND_WIDTH
-FREE = 101
-NORMAL_FREE = 1
-SLEEPING_FREE = 2
-INPUT_FREE = 3
-CONTINUOUS_FREE = 4
-ALL_FREE = 5
-INPUT = 31
-NORMAL_INPUT = 0
-if _v20:
- FLOAT_INPUT = 1
- INT_INPUT = 2
- HIDDEN_INPUT = 3
- if _v21:
- MULTILINE_INPUT = 4
- SECRET_INPUT = 5
-else:
- ALWAYS_INPUT = 1
-INPUT_BOXTYPE = DOWN_BOX
-INPUT_COL1 = 13
-INPUT_COL2 = 5
-INPUT_LCOL = LCOL
-INPUT_ALIGN = ALIGN_LEFT
-INPUT_TCOL = LCOL
-INPUT_CCOL = 4
-INPUT_BW = BOUND_WIDTH
-INPUT_MAX = 128
-LIGHTBUTTON = 12
-LIGHTBUTTON_BOXTYPE = UP_BOX
-LIGHTBUTTON_COL1 = 39
-LIGHTBUTTON_COL2 = 3
-LIGHTBUTTON_LCOL = LCOL
-LIGHTBUTTON_ALIGN = ALIGN_CENTER
-LIGHTBUTTON_TOPCOL = COL1
-LIGHTBUTTON_MCOL = MCOL
-LIGHTBUTTON_BW1 = BOUND_WIDTH
-LIGHTBUTTON_BW2 = BOUND_WIDTH/2.0
-LIGHTBUTTON_MINSIZE = 12.0
-MENU = 41
-TOUCH_MENU = 0
-PUSH_MENU = 1
-MENU_BOXTYPE = BORDER_BOX
-MENU_COL1 = 55
-MENU_COL2 = 37
-MENU_LCOL = LCOL
-MENU_ALIGN = ALIGN_CENTER
-MENU_BW = BOUND_WIDTH
-MENU_MAX = 300
-POSITIONER = 23
-NORMAL_POSITIONER = 0
-POSITIONER_BOXTYPE = DOWN_BOX
-POSITIONER_COL1 = COL1
-POSITIONER_COL2 = 1
-POSITIONER_LCOL = LCOL
-POSITIONER_ALIGN = ALIGN_BOTTOM
-POSITIONER_BW = BOUND_WIDTH
-ROUNDBUTTON = 13
-ROUNDBUTTON_BOXTYPE = NO_BOX
-ROUNDBUTTON_COL1 = 7
-ROUNDBUTTON_COL2 = 3
-ROUNDBUTTON_LCOL = LCOL
-ROUNDBUTTON_ALIGN = ALIGN_CENTER
-ROUNDBUTTON_TOPCOL = COL1
-ROUNDBUTTON_MCOL = MCOL
-ROUNDBUTTON_BW = BOUND_WIDTH
-SLIDER = 21
-VALSLIDER = 24
-VERT_SLIDER = 0
-HOR_SLIDER = 1
-VERT_FILL_SLIDER = 2
-HOR_FILL_SLIDER = 3
-VERT_NICE_SLIDER = 4
-HOR_NICE_SLIDER = 5
-SLIDER_BOXTYPE = DOWN_BOX
-SLIDER_COL1 = COL1
-SLIDER_COL2 = COL1
-SLIDER_LCOL = LCOL
-SLIDER_ALIGN = ALIGN_BOTTOM
-SLIDER_BW1 = BOUND_WIDTH
-SLIDER_BW2 = BOUND_WIDTH*0.75
-SLIDER_FINE = 0.05
-SLIDER_WIDTH = 0.08
-TEXT = 2
-NORMAL_TEXT = 0
-TEXT_BOXTYPE = NO_BOX
-TEXT_COL1 = COL1
-TEXT_LCOL = LCOL
-TEXT_ALIGN = ALIGN_LEFT
-TEXT_BW = BOUND_WIDTH
-TIMER = 62
-NORMAL_TIMER = 0
-VALUE_TIMER = 1
-HIDDEN_TIMER = 2
-TIMER_BOXTYPE = DOWN_BOX
-TIMER_COL1 = COL1
-TIMER_COL2 = 1
-TIMER_LCOL = LCOL
-TIMER_ALIGN = ALIGN_CENTER
-TIMER_BW = BOUND_WIDTH
-TIMER_BLINKRATE = 0.2
diff --git a/sys/lib/python/plat-irix5/GET.py b/sys/lib/python/plat-irix5/GET.py
deleted file mode 100755
index 9c3d7d695..000000000
--- a/sys/lib/python/plat-irix5/GET.py
+++ /dev/null
@@ -1,59 +0,0 @@
-# Symbols from <gl/get.h>
-
-BCKBUFFER = 0x1
-FRNTBUFFER = 0x2
-DRAWZBUFFER = 0x4
-DMRGB = 0
-DMSINGLE = 1
-DMDOUBLE = 2
-DMRGBDOUBLE = 5
-HZ30 = 0
-HZ60 = 1
-NTSC = 2
-HDTV = 3
-VGA = 4
-IRIS3K = 5
-PR60 = 6
-PAL = 9
-HZ30_SG = 11
-A343 = 14
-STR_RECT = 15
-VOF0 = 16
-VOF1 = 17
-VOF2 = 18
-VOF3 = 19
-SGI0 = 20
-SGI1 = 21
-SGI2 = 22
-HZ72 = 23
-GL_VIDEO_REG = 0x00800000
-GLV_GENLOCK = 0x00000001
-GLV_UNBLANK = 0x00000002
-GLV_SRED = 0x00000004
-GLV_SGREEN = 0x00000008
-GLV_SBLUE = 0x00000010
-GLV_SALPHA = 0x00000020
-GLV_TTLGENLOCK = 0x00000080
-GLV_TTLSYNC = GLV_TTLGENLOCK
-GLV_GREENGENLOCK = 0x0000100
-LEFTPLANE = 0x0001
-RIGHTPLANE = 0x0002
-BOTTOMPLANE = 0x0004
-TOPPLANE = 0x0008
-NEARPLANE = 0x0010
-FARPLANE = 0x0020
-## GETDEF = __GL_GET_H__
-NOBUFFER = 0x0
-BOTHBUFFERS = 0x3
-DMINTENSITYSINGLE = 3
-DMINTENSITYDOUBLE = 4
-MONSPECIAL = 0x20
-HZ50 = 3
-MONA = 5
-MONB = 6
-MONC = 7
-MOND = 8
-MON_ALL = 12
-MON_GEN_ALL = 13
-CMAPMULTI = 0
-CMAPONE = 1
diff --git a/sys/lib/python/plat-irix5/GL.py b/sys/lib/python/plat-irix5/GL.py
deleted file mode 100755
index 9f02f65f3..000000000
--- a/sys/lib/python/plat-irix5/GL.py
+++ /dev/null
@@ -1,393 +0,0 @@
-NULL = 0
-FALSE = 0
-TRUE = 1
-ATTRIBSTACKDEPTH = 10
-VPSTACKDEPTH = 8
-MATRIXSTACKDEPTH = 32
-NAMESTACKDEPTH = 1025
-STARTTAG = -2
-ENDTAG = -3
-BLACK = 0
-RED = 1
-GREEN = 2
-YELLOW = 3
-BLUE = 4
-MAGENTA = 5
-CYAN = 6
-WHITE = 7
-PUP_CLEAR = 0
-PUP_COLOR = 1
-PUP_BLACK = 2
-PUP_WHITE = 3
-NORMALDRAW = 0x010
-PUPDRAW = 0x020
-OVERDRAW = 0x040
-UNDERDRAW = 0x080
-CURSORDRAW = 0x100
-DUALDRAW = 0x200
-PATTERN_16 = 16
-PATTERN_32 = 32
-PATTERN_64 = 64
-PATTERN_16_SIZE = 16
-PATTERN_32_SIZE = 64
-PATTERN_64_SIZE = 256
-SRC_AUTO = 0
-SRC_FRONT = 1
-SRC_BACK = 2
-SRC_ZBUFFER = 3
-SRC_PUP = 4
-SRC_OVER = 5
-SRC_UNDER = 6
-SRC_FRAMEGRABBER = 7
-BF_ZERO = 0
-BF_ONE = 1
-BF_DC = 2
-BF_SC = 2
-BF_MDC = 3
-BF_MSC = 3
-BF_SA = 4
-BF_MSA = 5
-BF_DA = 6
-BF_MDA = 7
-BF_MIN_SA_MDA = 8
-AF_NEVER = 0
-AF_LESS = 1
-AF_EQUAL = 2
-AF_LEQUAL = 3
-AF_GREATER = 4
-AF_NOTEQUAL = 5
-AF_GEQUAL = 6
-AF_ALWAYS = 7
-ZF_NEVER = 0
-ZF_LESS = 1
-ZF_EQUAL = 2
-ZF_LEQUAL = 3
-ZF_GREATER = 4
-ZF_NOTEQUAL = 5
-ZF_GEQUAL = 6
-ZF_ALWAYS = 7
-ZSRC_DEPTH = 0
-ZSRC_COLOR = 1
-SMP_OFF = 0x0
-SMP_ON = 0x1
-SMP_SMOOTHER = 0x2
-SML_OFF = 0x0
-SML_ON = 0x1
-SML_SMOOTHER = 0x2
-SML_END_CORRECT = 0x4
-PYSM_OFF = 0
-PYSM_ON = 1
-PYSM_SHRINK = 2
-DT_OFF = 0
-DT_ON = 1
-PUP_NONE = 0
-PUP_GREY = 0x1
-PUP_BOX = 0x2
-PUP_CHECK = 0x4
-GLC_OLDPOLYGON = 0
-GLC_ZRANGEMAP = 1
-GLC_MQUEUERATE = 2
-GLC_SOFTATTACH = 3
-GLC_MANAGEBG = 4
-GLC_SLOWMAPCOLORS = 5
-GLC_INPUTCHANGEBUG = 6
-GLC_NOBORDERBUG = 7
-GLC_SET_VSYNC = 8
-GLC_GET_VSYNC = 9
-GLC_VSYNC_SLEEP = 10
-GLC_COMPATRATE = 15
-C16X1 = 0
-C16X2 = 1
-C32X1 = 2
-C32X2 = 3
-CCROSS = 4
-FLAT = 0
-GOURAUD = 1
-LO_ZERO = 0x0
-LO_AND = 0x1
-LO_ANDR = 0x2
-LO_SRC = 0x3
-LO_ANDI = 0x4
-LO_DST = 0x5
-LO_XOR = 0x6
-LO_OR = 0x7
-LO_NOR = 0x8
-LO_XNOR = 0x9
-LO_NDST = 0xa
-LO_ORR = 0xb
-LO_NSRC = 0xc
-LO_ORI = 0xd
-LO_NAND = 0xe
-LO_ONE = 0xf
-INFOCUSSCRN = -2
-ST_KEEP = 0
-ST_ZERO = 1
-ST_REPLACE = 2
-ST_INCR = 3
-ST_DECR = 4
-ST_INVERT = 5
-SF_NEVER = 0
-SF_LESS = 1
-SF_EQUAL = 2
-SF_LEQUAL = 3
-SF_GREATER = 4
-SF_NOTEQUAL = 5
-SF_GEQUAL = 6
-SF_ALWAYS = 7
-SS_OFF = 0
-SS_DEPTH = 1
-PYM_FILL = 1
-PYM_POINT = 2
-PYM_LINE = 3
-PYM_HOLLOW = 4
-PYM_LINE_FAST = 5
-FG_OFF = 0
-FG_ON = 1
-FG_DEFINE = 2
-FG_VTX_EXP = 2
-FG_VTX_LIN = 3
-FG_PIX_EXP = 4
-FG_PIX_LIN = 5
-FG_VTX_EXP2 = 6
-FG_PIX_EXP2 = 7
-PM_SHIFT = 0
-PM_EXPAND = 1
-PM_C0 = 2
-PM_C1 = 3
-PM_ADD24 = 4
-PM_SIZE = 5
-PM_OFFSET = 6
-PM_STRIDE = 7
-PM_TTOB = 8
-PM_RTOL = 9
-PM_ZDATA = 10
-PM_WARP = 11
-PM_RDX = 12
-PM_RDY = 13
-PM_CDX = 14
-PM_CDY = 15
-PM_XSTART = 16
-PM_YSTART = 17
-PM_VO1 = 1000
-NAUTO = 0
-NNORMALIZE = 1
-AC_CLEAR = 0
-AC_ACCUMULATE = 1
-AC_CLEAR_ACCUMULATE = 2
-AC_RETURN = 3
-AC_MULT = 4
-AC_ADD = 5
-CP_OFF = 0
-CP_ON = 1
-CP_DEFINE = 2
-SB_RESET = 0
-SB_TRACK = 1
-SB_HOLD = 2
-RD_FREEZE = 0x00000001
-RD_ALPHAONE = 0x00000002
-RD_IGNORE_UNDERLAY = 0x00000004
-RD_IGNORE_OVERLAY = 0x00000008
-RD_IGNORE_PUP = 0x00000010
-RD_OFFSCREEN = 0x00000020
-GD_XPMAX = 0
-GD_YPMAX = 1
-GD_XMMAX = 2
-GD_YMMAX = 3
-GD_ZMIN = 4
-GD_ZMAX = 5
-GD_BITS_NORM_SNG_RED = 6
-GD_BITS_NORM_SNG_GREEN = 7
-GD_BITS_NORM_SNG_BLUE = 8
-GD_BITS_NORM_DBL_RED = 9
-GD_BITS_NORM_DBL_GREEN = 10
-GD_BITS_NORM_DBL_BLUE = 11
-GD_BITS_NORM_SNG_CMODE = 12
-GD_BITS_NORM_DBL_CMODE = 13
-GD_BITS_NORM_SNG_MMAP = 14
-GD_BITS_NORM_DBL_MMAP = 15
-GD_BITS_NORM_ZBUFFER = 16
-GD_BITS_OVER_SNG_CMODE = 17
-GD_BITS_UNDR_SNG_CMODE = 18
-GD_BITS_PUP_SNG_CMODE = 19
-GD_BITS_NORM_SNG_ALPHA = 21
-GD_BITS_NORM_DBL_ALPHA = 22
-GD_BITS_CURSOR = 23
-GD_OVERUNDER_SHARED = 24
-GD_BLEND = 25
-GD_CIFRACT = 26
-GD_CROSSHAIR_CINDEX = 27
-GD_DITHER = 28
-GD_LINESMOOTH_CMODE = 30
-GD_LINESMOOTH_RGB = 31
-GD_LOGICOP = 33
-GD_NSCRNS = 35
-GD_NURBS_ORDER = 36
-GD_NBLINKS = 37
-GD_NVERTEX_POLY = 39
-GD_PATSIZE_64 = 40
-GD_PNTSMOOTH_CMODE = 41
-GD_PNTSMOOTH_RGB = 42
-GD_PUP_TO_OVERUNDER = 43
-GD_READSOURCE = 44
-GD_READSOURCE_ZBUFFER = 48
-GD_STEREO = 50
-GD_SUBPIXEL_LINE = 51
-GD_SUBPIXEL_PNT = 52
-GD_SUBPIXEL_POLY = 53
-GD_TRIMCURVE_ORDER = 54
-GD_WSYS = 55
-GD_ZDRAW_GEOM = 57
-GD_ZDRAW_PIXELS = 58
-GD_SCRNTYPE = 61
-GD_TEXTPORT = 62
-GD_NMMAPS = 63
-GD_FRAMEGRABBER = 64
-GD_TIMERHZ = 66
-GD_DBBOX = 67
-GD_AFUNCTION = 68
-GD_ALPHA_OVERUNDER = 69
-GD_BITS_ACBUF = 70
-GD_BITS_ACBUF_HW = 71
-GD_BITS_STENCIL = 72
-GD_CLIPPLANES = 73
-GD_FOGVERTEX = 74
-GD_LIGHTING_TWOSIDE = 76
-GD_POLYMODE = 77
-GD_POLYSMOOTH = 78
-GD_SCRBOX = 79
-GD_TEXTURE = 80
-GD_FOGPIXEL = 81
-GD_TEXTURE_PERSP = 82
-GD_MUXPIPES = 83
-GD_NOLIMIT = -2
-GD_WSYS_NONE = 0
-GD_WSYS_4S = 1
-GD_SCRNTYPE_WM = 0
-GD_SCRNTYPE_NOWM = 1
-N_PIXEL_TOLERANCE = 1
-N_CULLING = 2
-N_DISPLAY = 3
-N_ERRORCHECKING = 4
-N_SUBDIVISIONS = 5
-N_S_STEPS = 6
-N_T_STEPS = 7
-N_TILES = 8
-N_TMP1 = 9
-N_TMP2 = 10
-N_TMP3 = 11
-N_TMP4 = 12
-N_TMP5 = 13
-N_TMP6 = 14
-N_FILL = 1.0
-N_OUTLINE_POLY = 2.0
-N_OUTLINE_PATCH = 5.0
-N_ISOLINE_S = 12.0
-N_ST = 0x8
-N_STW = 0xd
-N_XYZ = 0x4c
-N_XYZW = 0x51
-N_TEX = 0x88
-N_TEXW = 0x8d
-N_RGBA = 0xd0
-N_RGBAW = 0xd5
-N_P2D = 0x8
-N_P2DR = 0xd
-N_V3D = 0x4c
-N_V3DR = 0x51
-N_T2D = 0x88
-N_T2DR = 0x8d
-N_C4D = 0xd0
-N_C4DR = 0xd5
-LMNULL = 0.0
-MSINGLE = 0
-MPROJECTION = 1
-MVIEWING = 2
-MTEXTURE = 3
-MAXLIGHTS = 8
-MAXRESTRICTIONS = 4
-DEFMATERIAL = 0
-EMISSION = 1
-AMBIENT = 2
-DIFFUSE = 3
-SPECULAR = 4
-SHININESS = 5
-COLORINDEXES = 6
-ALPHA = 7
-DEFLIGHT = 100
-LCOLOR = 101
-POSITION = 102
-SPOTDIRECTION = 103
-SPOTLIGHT = 104
-DEFLMODEL = 200
-LOCALVIEWER = 201
-ATTENUATION = 202
-ATTENUATION2 = 203
-TWOSIDE = 204
-MATERIAL = 1000
-BACKMATERIAL = 1001
-LIGHT0 = 1100
-LIGHT1 = 1101
-LIGHT2 = 1102
-LIGHT3 = 1103
-LIGHT4 = 1104
-LIGHT5 = 1105
-LIGHT6 = 1106
-LIGHT7 = 1107
-LMODEL = 1200
-LMC_COLOR = 0
-LMC_EMISSION = 1
-LMC_AMBIENT = 2
-LMC_DIFFUSE = 3
-LMC_SPECULAR = 4
-LMC_AD = 5
-LMC_NULL = 6
-TX_MINFILTER = 0x100
-TX_MAGFILTER = 0x200
-TX_WRAP = 0x300
-TX_WRAP_S = 0x310
-TX_WRAP_T = 0x320
-TX_TILE = 0x400
-TX_BORDER = 0x500
-TX_NULL = 0x000
-TX_POINT = 0x110
-TX_BILINEAR = 0x220
-TX_MIPMAP = 0x120
-TX_MIPMAP_POINT = 0x121
-TX_MIPMAP_LINEAR = 0x122
-TX_MIPMAP_BILINEAR = 0x123
-TX_MIPMAP_TRILINEAR = 0x124
-TX_REPEAT = 0x301
-TX_CLAMP = 0x302
-TX_SELECT = 0x303
-TX_TEXTURE_0 = 0
-TV_MODULATE = 0x101
-TV_BLEND = 0x102
-TV_DECAL = 0x103
-TV_COLOR = 0x200
-TV_NULL = 0x000
-TV_ENV0 = 0
-TX_S = 0
-TX_T = 1
-TG_OFF = 0
-TG_ON = 1
-TG_CONTOUR = 2
-TG_LINEAR = 3
-TG_SPHEREMAP = 4
-TG_REFRACTMAP = 5
-DGLSINK = 0
-DGLLOCAL = 1
-DGLTSOCKET = 2
-DGL4DDN = 3
-PUP_CURSOR = PUP_COLOR
-FATAL = 1
-WARNING = 2
-ASK_CONT = 3
-ASK_RESTART = 4
-XMAXSCREEN = 1279
-YMAXSCREEN = 1023
-XMAXMEDIUM = 1023
-YMAXMEDIUM = 767
-XMAX170 = 645
-YMAX170 = 484
-XMAXPAL = 779
-YMAXPAL = 574
diff --git a/sys/lib/python/plat-irix5/GLWS.py b/sys/lib/python/plat-irix5/GLWS.py
deleted file mode 100755
index 69dab7143..000000000
--- a/sys/lib/python/plat-irix5/GLWS.py
+++ /dev/null
@@ -1,12 +0,0 @@
-NOERROR = 0
-NOCONTEXT = -1
-NODISPLAY = -2
-NOWINDOW = -3
-NOGRAPHICS = -4
-NOTTOP = -5
-NOVISUAL = -6
-BUFSIZE = -7
-BADWINDOW = -8
-ALREADYBOUND = -100
-BINDFAILED = -101
-SETFAILED = -102
diff --git a/sys/lib/python/plat-irix5/IN.py b/sys/lib/python/plat-irix5/IN.py
deleted file mode 100755
index 2d6789201..000000000
--- a/sys/lib/python/plat-irix5/IN.py
+++ /dev/null
@@ -1,141 +0,0 @@
-# Generated by h2py from /usr/include/netinet/in.h
-
-# Included from sys/endian.h
-LITTLE_ENDIAN = 1234
-BIG_ENDIAN = 4321
-PDP_ENDIAN = 3412
-BYTE_ORDER = BIG_ENDIAN
-BYTE_ORDER = LITTLE_ENDIAN
-def ntohl(x): return (x)
-
-def ntohs(x): return (x)
-
-def htonl(x): return (x)
-
-def htons(x): return (x)
-
-def htonl(x): return ntohl(x)
-
-def htons(x): return ntohs(x)
-
-
-# Included from sys/bsd_types.h
-
-# Included from sys/mkdev.h
-ONBITSMAJOR = 7
-ONBITSMINOR = 8
-OMAXMAJ = 0x7f
-OMAXMIN = 0xff
-NBITSMAJOR = 14
-NBITSMINOR = 18
-MAXMAJ = 0x1ff
-MAXMIN = 0x3ffff
-OLDDEV = 0
-NEWDEV = 1
-MKDEV_VER = NEWDEV
-def major(dev): return __major(MKDEV_VER, dev)
-
-def minor(dev): return __minor(MKDEV_VER, dev)
-
-
-# Included from sys/select.h
-FD_SETSIZE = 1024
-NBBY = 8
-IPPROTO_IP = 0
-IPPROTO_ICMP = 1
-IPPROTO_IGMP = 2
-IPPROTO_GGP = 3
-IPPROTO_ENCAP = 4
-IPPROTO_TCP = 6
-IPPROTO_EGP = 8
-IPPROTO_PUP = 12
-IPPROTO_UDP = 17
-IPPROTO_IDP = 22
-IPPROTO_TP = 29
-IPPROTO_XTP = 36
-IPPROTO_HELLO = 63
-IPPROTO_ND = 77
-IPPROTO_EON = 80
-IPPROTO_RAW = 255
-IPPROTO_MAX = 256
-IPPORT_RESERVED = 1024
-IPPORT_USERRESERVED = 5000
-IPPORT_MAXPORT = 65535
-def IN_CLASSA(i): return (((long)(i) & 0x80000000) == 0)
-
-IN_CLASSA_NET = 0xff000000
-IN_CLASSA_NSHIFT = 24
-IN_CLASSA_HOST = 0x00ffffff
-IN_CLASSA_MAX = 128
-def IN_CLASSB(i): return (((long)(i) & 0xc0000000) == 0x80000000)
-
-IN_CLASSB_NET = 0xffff0000
-IN_CLASSB_NSHIFT = 16
-IN_CLASSB_HOST = 0x0000ffff
-IN_CLASSB_MAX = 65536
-def IN_CLASSC(i): return (((long)(i) & 0xe0000000) == 0xc0000000)
-
-IN_CLASSC_NET = 0xffffff00
-IN_CLASSC_NSHIFT = 8
-IN_CLASSC_HOST = 0x000000ff
-def IN_CLASSD(i): return (((long)(i) & 0xf0000000) == 0xe0000000)
-
-IN_CLASSD_NET = 0xf0000000
-IN_CLASSD_NSHIFT = 28
-IN_CLASSD_HOST = 0x0fffffff
-def IN_MULTICAST(i): return IN_CLASSD(i)
-
-def IN_EXPERIMENTAL(i): return (((long)(i) & 0xf0000000) == 0xf0000000)
-
-def IN_BADCLASS(i): return (((long)(i) & 0xf0000000) == 0xf0000000)
-
-INADDR_ANY = 0x00000000
-INADDR_BROADCAST = 0xffffffff
-INADDR_LOOPBACK = 0x7F000001
-INADDR_UNSPEC_GROUP = 0xe0000000
-INADDR_ALLHOSTS_GROUP = 0xe0000001
-INADDR_MAX_LOCAL_GROUP = 0xe00000ff
-INADDR_NONE = 0xffffffff
-IN_LOOPBACKNET = 127
-IP_OPTIONS = 1
-IP_MULTICAST_IF = 2
-IP_MULTICAST_TTL = 3
-IP_MULTICAST_LOOP = 4
-IP_ADD_MEMBERSHIP = 5
-IP_DROP_MEMBERSHIP = 6
-IP_HDRINCL = 7
-IP_TOS = 8
-IP_TTL = 9
-IP_RECVOPTS = 10
-IP_RECVRETOPTS = 11
-IP_RECVDSTADDR = 12
-IP_RETOPTS = 13
-IP_OPTIONS = 1
-IP_HDRINCL = 2
-IP_TOS = 3
-IP_TTL = 4
-IP_RECVOPTS = 5
-IP_RECVRETOPTS = 6
-IP_RECVDSTADDR = 7
-IP_RETOPTS = 8
-IP_MULTICAST_IF = 20
-IP_MULTICAST_TTL = 21
-IP_MULTICAST_LOOP = 22
-IP_ADD_MEMBERSHIP = 23
-IP_DROP_MEMBERSHIP = 24
-IRIX4_IP_OPTIONS = 1
-IRIX4_IP_MULTICAST_IF = 2
-IRIX4_IP_MULTICAST_TTL = 3
-IRIX4_IP_MULTICAST_LOOP = 4
-IRIX4_IP_ADD_MEMBERSHIP = 5
-IRIX4_IP_DROP_MEMBERSHIP = 6
-IRIX4_IP_HDRINCL = 7
-IRIX4_IP_TOS = 8
-IRIX4_IP_TTL = 9
-IRIX4_IP_RECVOPTS = 10
-IRIX4_IP_RECVRETOPTS = 11
-IRIX4_IP_RECVDSTADDR = 12
-IRIX4_IP_RETOPTS = 13
-IP_DEFAULT_MULTICAST_TTL = 1
-IP_DEFAULT_MULTICAST_LOOP = 1
-IP_MAX_MEMBERSHIPS = 20
diff --git a/sys/lib/python/plat-irix5/IOCTL.py b/sys/lib/python/plat-irix5/IOCTL.py
deleted file mode 100755
index cec3c3f6a..000000000
--- a/sys/lib/python/plat-irix5/IOCTL.py
+++ /dev/null
@@ -1,233 +0,0 @@
-# These lines were mostly generated by h2py.py (see demo/scripts)
-# from <sys/ioctl.h>, <sys/termio.h> and <termios.h> on Irix 4.0.2
-# with some manual changes to cope with imperfections in h2py.py.
-# The applicability on other systems is not clear; especially non-SYSV
-# systems may have a totally different set of ioctls.
-
-IOCTYPE = 0xff00
-LIOC = (ord('l')<<8)
-LIOCGETP = (LIOC|1)
-LIOCSETP = (LIOC|2)
-LIOCGETS = (LIOC|5)
-LIOCSETS = (LIOC|6)
-DIOC = (ord('d')<<8)
-DIOCGETC = (DIOC|1)
-DIOCGETB = (DIOC|2)
-DIOCSETE = (DIOC|3)
-IOCPARM_MASK = 0x7f
-IOC_VOID = 0x20000000
-IOC_OUT = 0x40000000
-IOC_IN = 0x80000000
-IOC_INOUT = (IOC_IN|IOC_OUT)
-int = 'i'
-short = 'h'
-long = 'l'
-def sizeof(t): import struct; return struct.calcsize(t)
-def _IO(x,y): return (IOC_VOID|((x)<<8)|y)
-def _IOR(x,y,t): return (IOC_OUT|((sizeof(t)&IOCPARM_MASK)<<16)|((x)<<8)|y)
-def _IOW(x,y,t): return (IOC_IN|((sizeof(t)&IOCPARM_MASK)<<16)|((x)<<8)|y)
-# this should be _IORW, but stdio got there first
-def _IOWR(x,y,t): return (IOC_INOUT|((sizeof(t)&IOCPARM_MASK)<<16)|((x)<<8)|y)
-FIONREAD = _IOR(ord('f'), 127, int)
-FIONBIO = _IOW(ord('f'), 126, int)
-FIOASYNC = _IOW(ord('f'), 125, int)
-FIOSETOWN = _IOW(ord('f'), 124, int)
-FIOGETOWN = _IOR(ord('f'), 123, int)
-NCC = 8
-NCC_PAD = 7
-NCC_EXT = 16
-NCCS = (NCC+NCC_PAD+NCC_EXT)
-VINTR = 0
-VQUIT = 1
-VERASE = 2
-VKILL = 3
-VEOF = 4
-VEOL = 5
-VEOL2 = 6
-VMIN = VEOF
-VTIME = VEOL
-VSWTCH = 7
-VLNEXT = (NCC+NCC_PAD+0)
-VWERASE = (NCC+NCC_PAD+1)
-VRPRNT = (NCC+NCC_PAD+2)
-VFLUSHO = (NCC+NCC_PAD+3)
-VSTOP = (NCC+NCC_PAD+4)
-VSTART = (NCC+NCC_PAD+5)
-CNUL = '\0'
-CDEL = '\377'
-CESC = '\\'
-CINTR = '\177'
-CQUIT = '\34'
-CBRK = '\377'
-def CTRL(c): return ord(c) & 0x0f
-CERASE = CTRL('H')
-CKILL = CTRL('U')
-CEOF = CTRL('d')
-CEOT = CEOF
-CSTART = CTRL('q')
-CSTOP = CTRL('s')
-CSWTCH = CTRL('z')
-CSUSP = CSWTCH
-CNSWTCH = 0
-CLNEXT = CTRL('v')
-CWERASE = CTRL('w')
-CFLUSHO = CTRL('o')
-CFLUSH = CFLUSHO
-CRPRNT = CTRL('r')
-CDSUSP = CTRL('y')
-IGNBRK = 0000001
-BRKINT = 0000002
-IGNPAR = 0000004
-PARMRK = 0000010
-INPCK = 0000020
-ISTRIP = 0000040
-INLCR = 0000100
-IGNCR = 0000200
-ICRNL = 0000400
-IUCLC = 0001000
-IXON = 0002000
-IXANY = 0004000
-IXOFF = 0010000
-IBLKMD = 0020000
-OPOST = 0000001
-OLCUC = 0000002
-ONLCR = 0000004
-OCRNL = 0000010
-ONOCR = 0000020
-ONLRET = 0000040
-OFILL = 0000100
-OFDEL = 0000200
-NLDLY = 0000400
-NL0 = 0
-NL1 = 0000400
-CRDLY = 0003000
-CR0 = 0
-CR1 = 0001000
-CR2 = 0002000
-CR3 = 0003000
-TABDLY = 0014000
-TAB0 = 0
-TAB1 = 0004000
-TAB2 = 0010000
-TAB3 = 0014000
-BSDLY = 0020000
-BS0 = 0
-BS1 = 0020000
-VTDLY = 0040000
-VT0 = 0
-VT1 = 0040000
-FFDLY = 0100000
-FF0 = 0
-FF1 = 0100000
-CBAUD = 0000017
-B0 = 0
-B50 = 0000001
-B75 = 0000002
-B110 = 0000003
-B134 = 0000004
-B150 = 0000005
-B200 = 0000006
-B300 = 0000007
-B600 = 0000010
-B1200 = 0000011
-B1800 = 0000012
-B2400 = 0000013
-B4800 = 0000014
-B9600 = 0000015
-B19200 = 0000016
-EXTA = 0000016
-B38400 = 0000017
-EXTB = 0000017
-CSIZE = 0000060
-CS5 = 0
-CS6 = 0000020
-CS7 = 0000040
-CS8 = 0000060
-CSTOPB = 0000100
-CREAD = 0000200
-PARENB = 0000400
-PARODD = 0001000
-HUPCL = 0002000
-CLOCAL = 0004000
-LOBLK = 0040000
-ISIG = 0000001
-ICANON = 0000002
-XCASE = 0000004
-ECHO = 0000010
-ECHOE = 0000020
-ECHOK = 0000040
-ECHONL = 0000100
-NOFLSH = 0000200
-IIEXTEN = 0000400
-ITOSTOP = 0001000
-SSPEED = B9600
-IOCTYPE = 0xff00
-TIOC = (ord('T')<<8)
-oTCGETA = (TIOC|1)
-oTCSETA = (TIOC|2)
-oTCSETAW = (TIOC|3)
-oTCSETAF = (TIOC|4)
-TCSBRK = (TIOC|5)
-TCXONC = (TIOC|6)
-TCFLSH = (TIOC|7)
-TCGETA = (TIOC|8)
-TCSETA = (TIOC|9)
-TCSETAW = (TIOC|10)
-TCSETAF = (TIOC|11)
-TIOCFLUSH = (TIOC|12)
-TCDSET = (TIOC|32)
-TCBLKMD = (TIOC|33)
-TIOCPKT = (TIOC|112)
-TIOCPKT_DATA = 0x00
-TIOCPKT_FLUSHREAD = 0x01
-TIOCPKT_FLUSHWRITE = 0x02
-TIOCPKT_NOSTOP = 0x10
-TIOCPKT_DOSTOP = 0x20
-TIOCNOTTY = (TIOC|113)
-TIOCSTI = (TIOC|114)
-TIOCSPGRP = _IOW(ord('t'), 118, int)
-TIOCGPGRP = _IOR(ord('t'), 119, int)
-TIOCCONS = _IOW(ord('t'), 120, int)
-struct_winsize = 'hhhh'
-TIOCGWINSZ = _IOR(ord('t'), 104, struct_winsize)
-TIOCSWINSZ = _IOW(ord('t'), 103, struct_winsize)
-TFIOC = (ord('F')<<8)
-oFIONREAD = (TFIOC|127)
-LDIOC = (ord('D')<<8)
-LDOPEN = (LDIOC|0)
-LDCLOSE = (LDIOC|1)
-LDCHG = (LDIOC|2)
-LDGETT = (LDIOC|8)
-LDSETT = (LDIOC|9)
-TERM_NONE = 0
-TERM_TEC = 1
-TERM_V61 = 2
-TERM_V10 = 3
-TERM_TEX = 4
-TERM_D40 = 5
-TERM_H45 = 6
-TERM_D42 = 7
-TM_NONE = 0000
-TM_SNL = 0001
-TM_ANL = 0002
-TM_LCF = 0004
-TM_CECHO = 0010
-TM_CINVIS = 0020
-TM_SET = 0200
-LDISC0 = 0
-LDISC1 = 1
-NTTYDISC = LDISC1
-VSUSP = VSWTCH
-TCSANOW = 0
-TCSADRAIN = 1
-TCSAFLUSH = 2
-TCIFLUSH = 0
-TCOFLUSH = 1
-TCIOFLUSH = 2
-TCOOFF = 0
-TCOON = 1
-TCIOFF = 2
-TCION = 3
-TO_STOP = LOBLK
-IEXTEN = IIEXTEN
-TOSTOP = ITOSTOP
diff --git a/sys/lib/python/plat-irix5/SV.py b/sys/lib/python/plat-irix5/SV.py
deleted file mode 100755
index db8efe52d..000000000
--- a/sys/lib/python/plat-irix5/SV.py
+++ /dev/null
@@ -1,120 +0,0 @@
-NTSC_XMAX = 640
-NTSC_YMAX = 480
-PAL_XMAX = 768
-PAL_YMAX = 576
-BLANKING_BUFFER_SIZE = 2
-
-MAX_SOURCES = 2
-
-# mode parameter for Bind calls
-IN_OFF = 0 # No Video
-IN_OVER = 1 # Video over graphics
-IN_UNDER = 2 # Video under graphics
-IN_REPLACE = 3 # Video replaces entire win
-
-# mode parameters for LoadMap calls. Specifies buffer, always 256 entries
-INPUT_COLORMAP = 0 # tuples of 8-bit RGB
-CHROMA_KEY_MAP = 1 # tuples of 8-bit RGB
-COLOR_SPACE_MAP = 2 # tuples of 8-bit RGB
-GAMMA_MAP = 3 # tuples of 24-bit red values
-
-# mode parameters for UseExclusive calls
-INPUT = 0
-OUTPUT = 1
-IN_OUT = 2
-
-# Format constants for the capture routines
-RGB8_FRAMES = 0 # noninterleaved 8 bit 3:2:3 RBG fields
-RGB32_FRAMES = 1 # 32-bit 8:8:8 RGB frames
-YUV411_FRAMES = 2 # interleaved, 8:2:2 YUV format
-YUV411_FRAMES_AND_BLANKING_BUFFER = 3
-
-#
-# sv.SetParam is passed variable length argument lists,
-# consisting of <name, value> pairs. The following
-# constants identify argument names.
-#
-_NAME_BASE = 1000
-SOURCE = (_NAME_BASE + 0)
-SOURCE1 = 0
-SOURCE2 = 1
-SOURCE3 = 2
-COLOR = (_NAME_BASE + 1)
-DEFAULT_COLOR = 0
-USER_COLOR = 1
-MONO = 2
-OUTPUTMODE = (_NAME_BASE + 2)
-LIVE_OUTPUT = 0
-STILL24_OUT = 1
-FREEZE = (_NAME_BASE + 3)
-DITHER = (_NAME_BASE + 4)
-OUTPUT_FILTER = (_NAME_BASE + 5)
-HUE = (_NAME_BASE + 6)
-GENLOCK = (_NAME_BASE + 7)
-GENLOCK_OFF = 0
-GENLOCK_ON = 1
-GENLOCK_HOUSE = 2
-BROADCAST = (_NAME_BASE + 8)
-NTSC = 0
-PAL = 1
-VIDEO_MODE = (_NAME_BASE + 9)
-COMP = 0
-SVIDEO = 1
-INPUT_BYPASS = (_NAME_BASE + 10)
-FIELDDROP = (_NAME_BASE + 11)
-SLAVE = (_NAME_BASE + 12)
-APERTURE_FACTOR = (_NAME_BASE + 13)
-AFACTOR_0 = 0
-AFACTOR_QTR = 1
-AFACTOR_HLF = 2
-AFACTOR_ONE = 3
-CORING = (_NAME_BASE + 14)
-COR_OFF = 0
-COR_1LSB = 1
-COR_2LSB = 2
-COR_3LSB = 3
-APERTURE_BANDPASS = (_NAME_BASE + 15)
-ABAND_F0 = 0
-ABAND_F1 = 1
-ABAND_F2 = 2
-ABAND_F3 = 3
-PREFILTER = (_NAME_BASE + 16)
-CHROMA_TRAP = (_NAME_BASE + 17)
-CK_THRESHOLD = (_NAME_BASE + 18)
-PAL_SENSITIVITY = (_NAME_BASE + 19)
-GAIN_CONTROL = (_NAME_BASE + 20)
-GAIN_SLOW = 0
-GAIN_MEDIUM = 1
-GAIN_FAST = 2
-GAIN_FROZEN = 3
-AUTO_CKILL = (_NAME_BASE + 21)
-VTR_MODE = (_NAME_BASE + 22)
-VTR_INPUT = 0
-CAMERA_INPUT = 1
-LUMA_DELAY = (_NAME_BASE + 23)
-VNOISE = (_NAME_BASE + 24)
-VNOISE_NORMAL = 0
-VNOISE_SEARCH = 1
-VNOISE_AUTO = 2
-VNOISE_BYPASS = 3
-CHCV_PAL = (_NAME_BASE + 25)
-CHCV_NTSC = (_NAME_BASE + 26)
-CCIR_LEVELS = (_NAME_BASE + 27)
-STD_CHROMA = (_NAME_BASE + 28)
-DENC_VTBYPASS = (_NAME_BASE + 29)
-FAST_TIMECONSTANT = (_NAME_BASE + 30)
-GENLOCK_DELAY = (_NAME_BASE + 31)
-PHASE_SYNC = (_NAME_BASE + 32)
-VIDEO_OUTPUT = (_NAME_BASE + 33)
-CHROMA_PHASEOUT = (_NAME_BASE + 34)
-CHROMA_CENTER = (_NAME_BASE + 35)
-YUV_TO_RGB_INVERT = (_NAME_BASE + 36)
-SOURCE1_BROADCAST = (_NAME_BASE + 37)
-SOURCE1_MODE = (_NAME_BASE + 38)
-SOURCE2_BROADCAST = (_NAME_BASE + 39)
-SOURCE2_MODE = (_NAME_BASE + 40)
-SOURCE3_BROADCAST = (_NAME_BASE + 41)
-SOURCE3_MODE = (_NAME_BASE + 42)
-SIGNAL_STD = (_NAME_BASE + 43)
-NOSIGNAL = 2
-SIGNAL_COLOR = (_NAME_BASE + 44)
diff --git a/sys/lib/python/plat-irix5/WAIT.py b/sys/lib/python/plat-irix5/WAIT.py
deleted file mode 100755
index bfd0133e8..000000000
--- a/sys/lib/python/plat-irix5/WAIT.py
+++ /dev/null
@@ -1,14 +0,0 @@
-# Generated by h2py from /usr/include/sys/wait.h
-_WSTOPPED = 0177
-WNOHANG = 0100
-WEXITED = 0001
-WTRAPPED = 0002
-WSTOPPED = 0004
-WCONTINUED = 0010
-WNOWAIT = 0200
-WOPTMASK = (WEXITED|WTRAPPED|WSTOPPED|WCONTINUED|WNOHANG|WNOWAIT)
-WSTOPFLG = 0177
-WCONTFLG = 0177777
-WCOREFLAG = 0200
-WSIGMASK = 0177
-WUNTRACED = 0004
diff --git a/sys/lib/python/plat-irix5/cddb.py b/sys/lib/python/plat-irix5/cddb.py
deleted file mode 100755
index 7b2711f0b..000000000
--- a/sys/lib/python/plat-irix5/cddb.py
+++ /dev/null
@@ -1,204 +0,0 @@
-# This file implements a class which forms an interface to the .cddb
-# directory that is maintained by SGI's cdman program.
-#
-# Usage is as follows:
-#
-# import readcd
-# r = readcd.Readcd()
-# c = Cddb(r.gettrackinfo())
-#
-# Now you can use c.artist, c.title and c.track[trackno] (where trackno
-# starts at 1). When the CD is not recognized, all values will be the empty
-# string.
-# It is also possible to set the above mentioned variables to new values.
-# You can then use c.write() to write out the changed values to the
-# .cdplayerrc file.
-
-import string, posix, os
-
-_cddbrc = '.cddb'
-_DB_ID_NTRACKS = 5
-_dbid_map = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ@_=+abcdefghijklmnopqrstuvwxyz'
-def _dbid(v):
- if v >= len(_dbid_map):
- return string.zfill(v, 2)
- else:
- return _dbid_map[v]
-
-def tochash(toc):
- if type(toc) == type(''):
- tracklist = []
- for i in range(2, len(toc), 4):
- tracklist.append((None,
- (int(toc[i:i+2]),
- int(toc[i+2:i+4]))))
- else:
- tracklist = toc
- ntracks = len(tracklist)
- hash = _dbid((ntracks >> 4) & 0xF) + _dbid(ntracks & 0xF)
- if ntracks <= _DB_ID_NTRACKS:
- nidtracks = ntracks
- else:
- nidtracks = _DB_ID_NTRACKS - 1
- min = 0
- sec = 0
- for track in tracklist:
- start, length = track
- min = min + length[0]
- sec = sec + length[1]
- min = min + sec / 60
- sec = sec % 60
- hash = hash + _dbid(min) + _dbid(sec)
- for i in range(nidtracks):
- start, length = tracklist[i]
- hash = hash + _dbid(length[0]) + _dbid(length[1])
- return hash
-
-class Cddb:
- def __init__(self, tracklist):
- if os.environ.has_key('CDDB_PATH'):
- path = os.environ['CDDB_PATH']
- cddb_path = path.split(',')
- else:
- home = os.environ['HOME']
- cddb_path = [home + '/' + _cddbrc]
-
- self._get_id(tracklist)
-
- for dir in cddb_path:
- file = dir + '/' + self.id + '.rdb'
- try:
- f = open(file, 'r')
- self.file = file
- break
- except IOError:
- pass
- ntracks = int(self.id[:2], 16)
- self.artist = ''
- self.title = ''
- self.track = [None] + [''] * ntracks
- self.trackartist = [None] + [''] * ntracks
- self.notes = []
- if not hasattr(self, 'file'):
- return
- import re
- reg = re.compile(r'^([^.]*)\.([^:]*):[\t ]+(.*)')
- while 1:
- line = f.readline()
- if not line:
- break
- match = reg.match(line)
- if not match:
- print 'syntax error in ' + file
- continue
- name1, name2, value = match.group(1, 2, 3)
- if name1 == 'album':
- if name2 == 'artist':
- self.artist = value
- elif name2 == 'title':
- self.title = value
- elif name2 == 'toc':
- if not self.toc:
- self.toc = value
- if self.toc != value:
- print 'toc\'s don\'t match'
- elif name2 == 'notes':
- self.notes.append(value)
- elif name1[:5] == 'track':
- try:
- trackno = int(name1[5:])
- except strings.atoi_error:
- print 'syntax error in ' + file
- continue
- if trackno > ntracks:
- print 'track number %r in file %r out of range' % (trackno, file)
- continue
- if name2 == 'title':
- self.track[trackno] = value
- elif name2 == 'artist':
- self.trackartist[trackno] = value
- f.close()
- for i in range(2, len(self.track)):
- track = self.track[i]
- # if track title starts with `,', use initial part
- # of previous track's title
- if track and track[0] == ',':
- try:
- off = self.track[i - 1].index(',')
- except ValueError:
- pass
- else:
- self.track[i] = self.track[i-1][:off] \
- + track
-
- def _get_id(self, tracklist):
- # fill in self.id and self.toc.
- # if the argument is a string ending in .rdb, the part
- # upto the suffix is taken as the id.
- if type(tracklist) == type(''):
- if tracklist[-4:] == '.rdb':
- self.id = tracklist[:-4]
- self.toc = ''
- return
- t = []
- for i in range(2, len(tracklist), 4):
- t.append((None, \
- (int(tracklist[i:i+2]), \
- int(tracklist[i+2:i+4]))))
- tracklist = t
- ntracks = len(tracklist)
- self.id = _dbid((ntracks >> 4) & 0xF) + _dbid(ntracks & 0xF)
- if ntracks <= _DB_ID_NTRACKS:
- nidtracks = ntracks
- else:
- nidtracks = _DB_ID_NTRACKS - 1
- min = 0
- sec = 0
- for track in tracklist:
- start, length = track
- min = min + length[0]
- sec = sec + length[1]
- min = min + sec / 60
- sec = sec % 60
- self.id = self.id + _dbid(min) + _dbid(sec)
- for i in range(nidtracks):
- start, length = tracklist[i]
- self.id = self.id + _dbid(length[0]) + _dbid(length[1])
- self.toc = string.zfill(ntracks, 2)
- for track in tracklist:
- start, length = track
- self.toc = self.toc + string.zfill(length[0], 2) + \
- string.zfill(length[1], 2)
-
- def write(self):
- import posixpath
- if os.environ.has_key('CDDB_WRITE_DIR'):
- dir = os.environ['CDDB_WRITE_DIR']
- else:
- dir = os.environ['HOME'] + '/' + _cddbrc
- file = dir + '/' + self.id + '.rdb'
- if posixpath.exists(file):
- # make backup copy
- posix.rename(file, file + '~')
- f = open(file, 'w')
- f.write('album.title:\t' + self.title + '\n')
- f.write('album.artist:\t' + self.artist + '\n')
- f.write('album.toc:\t' + self.toc + '\n')
- for note in self.notes:
- f.write('album.notes:\t' + note + '\n')
- prevpref = None
- for i in range(1, len(self.track)):
- if self.trackartist[i]:
- f.write('track%r.artist:\t%s\n' % (i, self.trackartist[i]))
- track = self.track[i]
- try:
- off = track.index(',')
- except ValuError:
- prevpref = None
- else:
- if prevpref and track[:off] == prevpref:
- track = track[off:]
- else:
- prevpref = track[:off]
- f.write('track%r.title:\t%s\n' % (i, track))
- f.close()
diff --git a/sys/lib/python/plat-irix5/cdplayer.py b/sys/lib/python/plat-irix5/cdplayer.py
deleted file mode 100755
index 1c0168f7d..000000000
--- a/sys/lib/python/plat-irix5/cdplayer.py
+++ /dev/null
@@ -1,88 +0,0 @@
-# This file implements a class which forms an interface to the .cdplayerrc
-# file that is maintained by SGI's cdplayer program.
-#
-# Usage is as follows:
-#
-# import readcd
-# r = readcd.Readcd()
-# c = Cdplayer(r.gettrackinfo())
-#
-# Now you can use c.artist, c.title and c.track[trackno] (where trackno
-# starts at 1). When the CD is not recognized, all values will be the empty
-# string.
-# It is also possible to set the above mentioned variables to new values.
-# You can then use c.write() to write out the changed values to the
-# .cdplayerrc file.
-
-cdplayerrc = '.cdplayerrc'
-
-class Cdplayer:
- def __init__(self, tracklist):
- import string
- self.artist = ''
- self.title = ''
- if type(tracklist) == type(''):
- t = []
- for i in range(2, len(tracklist), 4):
- t.append((None, \
- (string.atoi(tracklist[i:i+2]), \
- string.atoi(tracklist[i+2:i+4]))))
- tracklist = t
- self.track = [None] + [''] * len(tracklist)
- self.id = 'd' + string.zfill(len(tracklist), 2)
- for track in tracklist:
- start, length = track
- self.id = self.id + string.zfill(length[0], 2) + \
- string.zfill(length[1], 2)
- try:
- import posix
- f = open(posix.environ['HOME'] + '/' + cdplayerrc, 'r')
- except IOError:
- return
- import re
- reg = re.compile(r'^([^:]*):\t(.*)')
- s = self.id + '.'
- l = len(s)
- while 1:
- line = f.readline()
- if line == '':
- break
- if line[:l] == s:
- line = line[l:]
- match = reg.match(line)
- if not match:
- print 'syntax error in ~/' + cdplayerrc
- continue
- name, value = match.group(1, 2)
- if name == 'title':
- self.title = value
- elif name == 'artist':
- self.artist = value
- elif name[:5] == 'track':
- trackno = string.atoi(name[6:])
- self.track[trackno] = value
- f.close()
-
- def write(self):
- import posix
- filename = posix.environ['HOME'] + '/' + cdplayerrc
- try:
- old = open(filename, 'r')
- except IOError:
- old = open('/dev/null', 'r')
- new = open(filename + '.new', 'w')
- s = self.id + '.'
- l = len(s)
- while 1:
- line = old.readline()
- if line == '':
- break
- if line[:l] != s:
- new.write(line)
- new.write(self.id + '.title:\t' + self.title + '\n')
- new.write(self.id + '.artist:\t' + self.artist + '\n')
- for i in range(1, len(self.track)):
- new.write('%s.track.%r:\t%s\n' % (self.id, i, self.track[i]))
- old.close()
- new.close()
- posix.rename(filename + '.new', filename)
diff --git a/sys/lib/python/plat-irix5/flp.doc b/sys/lib/python/plat-irix5/flp.doc
deleted file mode 100755
index 1a2f374ae..000000000
--- a/sys/lib/python/plat-irix5/flp.doc
+++ /dev/null
@@ -1,117 +0,0 @@
-.SH
-Module flp
-.LP
-The flp module loads fl-forms from fd files, as generated
-by fdesign. The module is designed to be flexible enough to allow
-almost anything to be done with the loaded form.
-.LP
-Loadform defines
-two types of functions: functions to parse fd files and functions to
-create the forms from the templates returned by the parse functions.
-There are fairly low-level create functions that create single objects,
-and convenience routines that create complete forms, including callbacks,
-etc.
-.LP
-The exception flp.error is raised whenever an error occurs while parsing a forms
-definition file or creating a form.
-.SH 2
-Parsing functions
-.LP
-There are two parsing functions, parse_form() and parse_forms(). They
-take the following form:
-.LP
-.ft C
-ftuple = parse_form(filename, formname)
-.br
-ftdict = parse_forms(filename)
-.IP
-Parse_form parses a single form, and returns a tuple (ftmp, otmplist).
-Ftmp is a template for a form, otmplist is a list of templates for
-objects. See below for a description of these templates.
-.IP
-Parse_forms parses all forms in an fd file. It returns a dictionary of
-(ftmp, otmplist) tuples, indexed by formname.
-.IP
-Filename is the name of the forms definition file to inspect. The functions
-appends '.fd' if needed, and use 'sys.path' to locate the file.
-.IP
-formname is the name of the form to load. This argument is mandatory,
-even if the file only contains one form.
-.LP
-The form template and object template are structures that contain all
-the information read from the fd file, in 'natural' form. A form
-template record contains the following fields:
-.IP
-.nf
-"Name", the name of the form;
-"Width", the width of the form;
-"Height", the height of the form; and
-"Numberofobjects", the number of objects in the form.
-.LP
-An object template contains the following fields:
-.IP
-.nf
-"Class", the class of object (eg. FL.BUTTON);
-"Type", the sub-class (eg. FL.NORMALBUTTON);
-"Box", a list with four members: [x, y, width, height];
-"Boxtype", the type of box (eg. FL.DOWNBOX);
-"Colors", a list with the two object colors;
-"Alignment", the label alignment (eg. FL.ALIGNLEFT);
-"Style", the label style (eg. FL.BOLDSTYLE);
-"Lcol", the label color;
-"Label", a string containing the label;
-"Name", a string containing the name of the object;
-"Callback", a string containing the callback routine name; and
-"Argument", a string containing the callback routine extra argument.
-.SH
-Low-level create routines.
-.LP
-The three low-level creation routines are called as follows:
-.LP
-.ft C
-form = create_form(form_template)
-.IP
-Create an fl form from a form template. Returns the form created.
-.LP
-.ft C
-obj = create_object(form, obj_template)
-.IP
-Create an object in an fl form. Return the new object.
-An error is raised if the object has a callback routine.
-.SH
-High-level create routines.
-.LP
-The 'standard' way to handle forms in python is to define a class
-that contains the form and all the objects (insofar as they are named),
-and that defines all the callback functions, and use an instance of
-this class to handle the form interaction.
-Flp contains three routines that simplify handling this paradigm:
-.LP
-.ft C
-create_full_form(instance, ftuple)
-.IP
-This routine takes an instance of your form-handling class and an
-ftuple (as returned by the parsing routines) as parameters. It inserts
-the form into the instance, defines all object names and arranges that
-the callback methods are called. All the names inserted into the
-instance are the same as the names used for the objects, etc. in the
-fd file.
-.LP
-.ft C
-merge_full_form(instance, form, ftuple)
-.IP
-This function does the same as create_full_form, only it does not create
-the form itself nor the 'background box' that fdesign automatically
-adds to each form. This is useful if your class inherits a superclass
-that already defines a skeleton form (with 'OK' and 'Cancel' buttons,
-for instance), and you want to merge the new form into that existing
-form. The 'form' parameter is the form to which the new objects are
-added.
-.LP
-If you use the paradigm sketched here but need slightly more control
-over object creation there is a routine that creates a single object
-and inserts its name (and arranges for the callback routine to be
-called):
-.LP
-.ft C
-create_object_instance(instance, form, obj_template)
diff --git a/sys/lib/python/plat-irix5/flp.py b/sys/lib/python/plat-irix5/flp.py
deleted file mode 100755
index 4f9175f5f..000000000
--- a/sys/lib/python/plat-irix5/flp.py
+++ /dev/null
@@ -1,451 +0,0 @@
-#
-# flp - Module to load fl forms from fd files
-#
-# Jack Jansen, December 1991
-#
-import string
-import os
-import sys
-import FL
-
-SPLITLINE = '--------------------'
-FORMLINE = '=============== FORM ==============='
-ENDLINE = '=============================='
-
-class error(Exception):
- pass
-
-##################################################################
-# Part 1 - The parsing routines #
-##################################################################
-
-#
-# Externally visible function. Load form.
-#
-def parse_form(filename, formname):
- forms = checkcache(filename)
- if forms is None:
- forms = parse_forms(filename)
- if forms.has_key(formname):
- return forms[formname]
- else:
- raise error, 'No such form in fd file'
-
-#
-# Externally visible function. Load all forms.
-#
-def parse_forms(filename):
- forms = checkcache(filename)
- if forms is not None: return forms
- fp = _open_formfile(filename)
- nforms = _parse_fd_header(fp)
- forms = {}
- for i in range(nforms):
- form = _parse_fd_form(fp, None)
- forms[form[0].Name] = form
- writecache(filename, forms)
- return forms
-
-#
-# Internal: see if a cached version of the file exists
-#
-MAGIC = '.fdc'
-_internal_cache = {} # Used by frozen scripts only
-def checkcache(filename):
- if _internal_cache.has_key(filename):
- altforms = _internal_cache[filename]
- return _unpack_cache(altforms)
- import marshal
- fp, filename = _open_formfile2(filename)
- fp.close()
- cachename = filename + 'c'
- try:
- fp = open(cachename, 'r')
- except IOError:
- #print 'flp: no cache file', cachename
- return None
- try:
- if fp.read(4) != MAGIC:
- print 'flp: bad magic word in cache file', cachename
- return None
- cache_mtime = rdlong(fp)
- file_mtime = getmtime(filename)
- if cache_mtime != file_mtime:
- #print 'flp: outdated cache file', cachename
- return None
- #print 'flp: valid cache file', cachename
- altforms = marshal.load(fp)
- return _unpack_cache(altforms)
- finally:
- fp.close()
-
-def _unpack_cache(altforms):
- forms = {}
- for name in altforms.keys():
- altobj, altlist = altforms[name]
- obj = _newobj()
- obj.make(altobj)
- list = []
- for altobj in altlist:
- nobj = _newobj()
- nobj.make(altobj)
- list.append(nobj)
- forms[name] = obj, list
- return forms
-
-def rdlong(fp):
- s = fp.read(4)
- if len(s) != 4: return None
- a, b, c, d = s[0], s[1], s[2], s[3]
- return ord(a)<<24 | ord(b)<<16 | ord(c)<<8 | ord(d)
-
-def wrlong(fp, x):
- a, b, c, d = (x>>24)&0xff, (x>>16)&0xff, (x>>8)&0xff, x&0xff
- fp.write(chr(a) + chr(b) + chr(c) + chr(d))
-
-def getmtime(filename):
- import os
- from stat import ST_MTIME
- try:
- return os.stat(filename)[ST_MTIME]
- except os.error:
- return None
-
-#
-# Internal: write cached version of the form (parsing is too slow!)
-#
-def writecache(filename, forms):
- import marshal
- fp, filename = _open_formfile2(filename)
- fp.close()
- cachename = filename + 'c'
- try:
- fp = open(cachename, 'w')
- except IOError:
- print 'flp: can\'t create cache file', cachename
- return # Never mind
- fp.write('\0\0\0\0') # Seek back and write MAGIC when done
- wrlong(fp, getmtime(filename))
- altforms = _pack_cache(forms)
- marshal.dump(altforms, fp)
- fp.seek(0)
- fp.write(MAGIC)
- fp.close()
- #print 'flp: wrote cache file', cachename
-
-#
-# External: print some statements that set up the internal cache.
-# This is for use with the "freeze" script. You should call
-# flp.freeze(filename) for all forms used by the script, and collect
-# the output on a file in a module file named "frozenforms.py". Then
-# in the main program of the script import frozenforms.
-# (Don't forget to take this out when using the unfrozen version of
-# the script!)
-#
-def freeze(filename):
- forms = parse_forms(filename)
- altforms = _pack_cache(forms)
- print 'import flp'
- print 'flp._internal_cache[', repr(filename), '] =', altforms
-
-#
-# Internal: create the data structure to be placed in the cache
-#
-def _pack_cache(forms):
- altforms = {}
- for name in forms.keys():
- obj, list = forms[name]
- altobj = obj.__dict__
- altlist = []
- for obj in list: altlist.append(obj.__dict__)
- altforms[name] = altobj, altlist
- return altforms
-
-#
-# Internal: Locate form file (using PYTHONPATH) and open file
-#
-def _open_formfile(filename):
- return _open_formfile2(filename)[0]
-
-def _open_formfile2(filename):
- if filename[-3:] != '.fd':
- filename = filename + '.fd'
- if filename[0] == '/':
- try:
- fp = open(filename,'r')
- except IOError:
- fp = None
- else:
- for pc in sys.path:
- pn = os.path.join(pc, filename)
- try:
- fp = open(pn, 'r')
- filename = pn
- break
- except IOError:
- fp = None
- if fp is None:
- raise error, 'Cannot find forms file ' + filename
- return fp, filename
-
-#
-# Internal: parse the fd file header, return number of forms
-#
-def _parse_fd_header(file):
- # First read the magic header line
- datum = _parse_1_line(file)
- if datum != ('Magic', 12321):
- raise error, 'Not a forms definition file'
- # Now skip until we know number of forms
- while 1:
- datum = _parse_1_line(file)
- if type(datum) == type(()) and datum[0] == 'Numberofforms':
- break
- return datum[1]
-#
-# Internal: parse fd form, or skip if name doesn't match.
-# the special value None means 'always parse it'.
-#
-def _parse_fd_form(file, name):
- datum = _parse_1_line(file)
- if datum != FORMLINE:
- raise error, 'Missing === FORM === line'
- form = _parse_object(file)
- if form.Name == name or name is None:
- objs = []
- for j in range(form.Numberofobjects):
- obj = _parse_object(file)
- objs.append(obj)
- return (form, objs)
- else:
- for j in range(form.Numberofobjects):
- _skip_object(file)
- return None
-
-#
-# Internal class: a convenient place to store object info fields
-#
-class _newobj:
- def add(self, name, value):
- self.__dict__[name] = value
- def make(self, dict):
- for name in dict.keys():
- self.add(name, dict[name])
-
-#
-# Internal parsing routines.
-#
-def _parse_string(str):
- if '\\' in str:
- s = '\'' + str + '\''
- try:
- return eval(s)
- except:
- pass
- return str
-
-def _parse_num(str):
- return eval(str)
-
-def _parse_numlist(str):
- slist = string.split(str)
- nlist = []
- for i in slist:
- nlist.append(_parse_num(i))
- return nlist
-
-# This dictionary maps item names to parsing routines.
-# If no routine is given '_parse_num' is default.
-_parse_func = { \
- 'Name': _parse_string, \
- 'Box': _parse_numlist, \
- 'Colors': _parse_numlist, \
- 'Label': _parse_string, \
- 'Name': _parse_string, \
- 'Callback': _parse_string, \
- 'Argument': _parse_string }
-
-# This function parses a line, and returns either
-# a string or a tuple (name,value)
-
-import re
-prog = re.compile('^([^:]*): *(.*)')
-
-def _parse_line(line):
- match = prog.match(line)
- if not match:
- return line
- name, value = match.group(1, 2)
- if name[0] == 'N':
- name = string.join(string.split(name),'')
- name = string.lower(name)
- name = string.capitalize(name)
- try:
- pf = _parse_func[name]
- except KeyError:
- pf = _parse_num
- value = pf(value)
- return (name, value)
-
-def _readline(file):
- line = file.readline()
- if not line:
- raise EOFError
- return line[:-1]
-
-def _parse_1_line(file):
- line = _readline(file)
- while line == '':
- line = _readline(file)
- return _parse_line(line)
-
-def _skip_object(file):
- line = ''
- while not line in (SPLITLINE, FORMLINE, ENDLINE):
- pos = file.tell()
- line = _readline(file)
- if line == FORMLINE:
- file.seek(pos)
-
-def _parse_object(file):
- obj = _newobj()
- while 1:
- pos = file.tell()
- datum = _parse_1_line(file)
- if datum in (SPLITLINE, FORMLINE, ENDLINE):
- if datum == FORMLINE:
- file.seek(pos)
- return obj
- if type(datum) is not type(()) or len(datum) != 2:
- raise error, 'Parse error, illegal line in object: '+datum
- obj.add(datum[0], datum[1])
-
-#################################################################
-# Part 2 - High-level object/form creation routines #
-#################################################################
-
-#
-# External - Create a form an link to an instance variable.
-#
-def create_full_form(inst, (fdata, odatalist)):
- form = create_form(fdata)
- exec 'inst.'+fdata.Name+' = form\n'
- for odata in odatalist:
- create_object_instance(inst, form, odata)
-
-#
-# External - Merge a form into an existing form in an instance
-# variable.
-#
-def merge_full_form(inst, form, (fdata, odatalist)):
- exec 'inst.'+fdata.Name+' = form\n'
- if odatalist[0].Class != FL.BOX:
- raise error, 'merge_full_form() expects FL.BOX as first obj'
- for odata in odatalist[1:]:
- create_object_instance(inst, form, odata)
-
-
-#################################################################
-# Part 3 - Low-level object/form creation routines #
-#################################################################
-
-#
-# External Create_form - Create form from parameters
-#
-def create_form(fdata):
- import fl
- return fl.make_form(FL.NO_BOX, fdata.Width, fdata.Height)
-
-#
-# External create_object - Create an object. Make sure there are
-# no callbacks. Returns the object created.
-#
-def create_object(form, odata):
- obj = _create_object(form, odata)
- if odata.Callback:
- raise error, 'Creating free object with callback'
- return obj
-#
-# External create_object_instance - Create object in an instance.
-#
-def create_object_instance(inst, form, odata):
- obj = _create_object(form, odata)
- if odata.Callback:
- cbfunc = eval('inst.'+odata.Callback)
- obj.set_call_back(cbfunc, odata.Argument)
- if odata.Name:
- exec 'inst.' + odata.Name + ' = obj\n'
-#
-# Internal _create_object: Create the object and fill options
-#
-def _create_object(form, odata):
- crfunc = _select_crfunc(form, odata.Class)
- obj = crfunc(odata.Type, odata.Box[0], odata.Box[1], odata.Box[2], \
- odata.Box[3], odata.Label)
- if not odata.Class in (FL.BEGIN_GROUP, FL.END_GROUP):
- obj.boxtype = odata.Boxtype
- obj.col1 = odata.Colors[0]
- obj.col2 = odata.Colors[1]
- obj.align = odata.Alignment
- obj.lstyle = odata.Style
- obj.lsize = odata.Size
- obj.lcol = odata.Lcol
- return obj
-#
-# Internal crfunc: helper function that returns correct create function
-#
-def _select_crfunc(fm, cl):
- if cl == FL.BEGIN_GROUP: return fm.bgn_group
- elif cl == FL.END_GROUP: return fm.end_group
- elif cl == FL.BITMAP: return fm.add_bitmap
- elif cl == FL.BOX: return fm.add_box
- elif cl == FL.BROWSER: return fm.add_browser
- elif cl == FL.BUTTON: return fm.add_button
- elif cl == FL.CHART: return fm.add_chart
- elif cl == FL.CHOICE: return fm.add_choice
- elif cl == FL.CLOCK: return fm.add_clock
- elif cl == FL.COUNTER: return fm.add_counter
- elif cl == FL.DIAL: return fm.add_dial
- elif cl == FL.FREE: return fm.add_free
- elif cl == FL.INPUT: return fm.add_input
- elif cl == FL.LIGHTBUTTON: return fm.add_lightbutton
- elif cl == FL.MENU: return fm.add_menu
- elif cl == FL.POSITIONER: return fm.add_positioner
- elif cl == FL.ROUNDBUTTON: return fm.add_roundbutton
- elif cl == FL.SLIDER: return fm.add_slider
- elif cl == FL.VALSLIDER: return fm.add_valslider
- elif cl == FL.TEXT: return fm.add_text
- elif cl == FL.TIMER: return fm.add_timer
- else:
- raise error, 'Unknown object type: %r' % (cl,)
-
-
-def test():
- import time
- t0 = time.time()
- if len(sys.argv) == 2:
- forms = parse_forms(sys.argv[1])
- t1 = time.time()
- print 'parse time:', 0.001*(t1-t0), 'sec.'
- keys = forms.keys()
- keys.sort()
- for i in keys:
- _printform(forms[i])
- elif len(sys.argv) == 3:
- form = parse_form(sys.argv[1], sys.argv[2])
- t1 = time.time()
- print 'parse time:', round(t1-t0, 3), 'sec.'
- _printform(form)
- else:
- print 'Usage: test fdfile [form]'
-
-def _printform(form):
- f = form[0]
- objs = form[1]
- print 'Form ', f.Name, ', size: ', f.Width, f.Height, ' Nobj ', f.Numberofobjects
- for i in objs:
- print ' Obj ', i.Name, ' type ', i.Class, i.Type
- print ' Box ', i.Box, ' btype ', i.Boxtype
- print ' Label ', i.Label, ' size/style/col/align ', i.Size,i.Style, i.Lcol, i.Alignment
- print ' cols ', i.Colors
- print ' cback ', i.Callback, i.Argument
diff --git a/sys/lib/python/plat-irix5/jpeg.py b/sys/lib/python/plat-irix5/jpeg.py
deleted file mode 100755
index f8fc7e721..000000000
--- a/sys/lib/python/plat-irix5/jpeg.py
+++ /dev/null
@@ -1,111 +0,0 @@
-# Implement 'jpeg' interface using SGI's compression library
-
-# XXX Options 'smooth' and 'optimize' are ignored.
-
-# XXX It appears that compressing grayscale images doesn't work right;
-# XXX the resulting file causes weirdness.
-
-class error(Exception):
- pass
-
-options = {'quality': 75, 'optimize': 0, 'smooth': 0, 'forcegray': 0}
-
-comp = None
-decomp = None
-
-def compress(imgdata, width, height, bytesperpixel):
- global comp
- import cl
- if comp is None: comp = cl.OpenCompressor(cl.JPEG)
- if bytesperpixel == 1:
- format = cl.GRAYSCALE
- elif bytesperpixel == 4:
- format = cl.RGBX
- if options['forcegray']:
- iformat = cl.GRAYSCALE
- else:
- iformat = cl.YUV
- # XXX How to support 'optimize'?
- params = [cl.IMAGE_WIDTH, width, cl.IMAGE_HEIGHT, height, \
- cl.ORIGINAL_FORMAT, format, \
- cl.ORIENTATION, cl.BOTTOM_UP, \
- cl.QUALITY_FACTOR, options['quality'], \
- cl.INTERNAL_FORMAT, iformat, \
- ]
- comp.SetParams(params)
- jpegdata = comp.Compress(1, imgdata)
- return jpegdata
-
-def decompress(jpegdata):
- global decomp
- import cl
- if decomp is None: decomp = cl.OpenDecompressor(cl.JPEG)
- headersize = decomp.ReadHeader(jpegdata)
- params = [cl.IMAGE_WIDTH, 0, cl.IMAGE_HEIGHT, 0, cl.INTERNAL_FORMAT, 0]
- decomp.GetParams(params)
- width, height, format = params[1], params[3], params[5]
- if format == cl.GRAYSCALE or options['forcegray']:
- format = cl.GRAYSCALE
- bytesperpixel = 1
- else:
- format = cl.RGBX
- bytesperpixel = 4
- # XXX How to support 'smooth'?
- params = [cl.ORIGINAL_FORMAT, format, \
- cl.ORIENTATION, cl.BOTTOM_UP, \
- cl.FRAME_BUFFER_SIZE, width*height*bytesperpixel]
- decomp.SetParams(params)
- imgdata = decomp.Decompress(1, jpegdata)
- return imgdata, width, height, bytesperpixel
-
-def setoption(name, value):
- if type(value) is not type(0):
- raise TypeError, 'jpeg.setoption: numeric options only'
- if name == 'forcegrey':
- name = 'forcegray'
- if not options.has_key(name):
- raise KeyError, 'jpeg.setoption: unknown option name'
- options[name] = int(value)
-
-def test():
- import sys
- if sys.argv[1:2] == ['-g']:
- del sys.argv[1]
- setoption('forcegray', 1)
- if not sys.argv[1:]:
- sys.argv.append('/usr/local/images/data/jpg/asterix.jpg')
- for file in sys.argv[1:]:
- show(file)
-
-def show(file):
- import gl, GL, DEVICE
- jpegdata = open(file, 'r').read()
- imgdata, width, height, bytesperpixel = decompress(jpegdata)
- gl.foreground()
- gl.prefsize(width, height)
- win = gl.winopen(file)
- if bytesperpixel == 1:
- gl.cmode()
- gl.pixmode(GL.PM_SIZE, 8)
- gl.gconfig()
- for i in range(256):
- gl.mapcolor(i, i, i, i)
- else:
- gl.RGBmode()
- gl.pixmode(GL.PM_SIZE, 32)
- gl.gconfig()
- gl.qdevice(DEVICE.REDRAW)
- gl.qdevice(DEVICE.ESCKEY)
- gl.qdevice(DEVICE.WINQUIT)
- gl.qdevice(DEVICE.WINSHUT)
- gl.lrectwrite(0, 0, width-1, height-1, imgdata)
- while 1:
- dev, val = gl.qread()
- if dev in (DEVICE.ESCKEY, DEVICE.WINSHUT, DEVICE.WINQUIT):
- break
- if dev == DEVICE.REDRAW:
- gl.lrectwrite(0, 0, width-1, height-1, imgdata)
- gl.winclose(win)
- # Now test the compression and write the result to a fixed filename
- newjpegdata = compress(imgdata, width, height, bytesperpixel)
- open('/tmp/j.jpg', 'w').write(newjpegdata)
diff --git a/sys/lib/python/plat-irix5/panel.py b/sys/lib/python/plat-irix5/panel.py
deleted file mode 100755
index 12e62a51b..000000000
--- a/sys/lib/python/plat-irix5/panel.py
+++ /dev/null
@@ -1,281 +0,0 @@
-# Module 'panel'
-#
-# Support for the Panel library.
-# Uses built-in module 'pnl'.
-# Applications should use 'panel.function' instead of 'pnl.function';
-# most 'pnl' functions are transparently exported by 'panel',
-# but dopanel() is overridden and you have to use this version
-# if you want to use callbacks.
-
-
-import pnl
-
-
-debug = 0
-
-
-# Test if an object is a list.
-#
-def is_list(x):
- return type(x) == type([])
-
-
-# Reverse a list.
-#
-def reverse(list):
- res = []
- for item in list:
- res.insert(0, item)
- return res
-
-
-# Get an attribute of a list, which may itself be another list.
-# Don't use 'prop' for name.
-#
-def getattrlist(list, name):
- for item in list:
- if item and is_list(item) and item[0] == name:
- return item[1:]
- return []
-
-
-# Get a property of a list, which may itself be another list.
-#
-def getproplist(list, name):
- for item in list:
- if item and is_list(item) and item[0] == 'prop':
- if len(item) > 1 and item[1] == name:
- return item[2:]
- return []
-
-
-# Test if an actuator description contains the property 'end-of-group'
-#
-def is_endgroup(list):
- x = getproplist(list, 'end-of-group')
- return (x and x[0] == '#t')
-
-
-# Neatly display an actuator definition given as S-expression
-# the prefix string is printed before each line.
-#
-def show_actuator(prefix, a):
- for item in a:
- if not is_list(item):
- print prefix, item
- elif item and item[0] == 'al':
- print prefix, 'Subactuator list:'
- for a in item[1:]:
- show_actuator(prefix + ' ', a)
- elif len(item) == 2:
- print prefix, item[0], '=>', item[1]
- elif len(item) == 3 and item[0] == 'prop':
- print prefix, 'Prop', item[1], '=>',
- print item[2]
- else:
- print prefix, '?', item
-
-
-# Neatly display a panel.
-#
-def show_panel(prefix, p):
- for item in p:
- if not is_list(item):
- print prefix, item
- elif item and item[0] == 'al':
- print prefix, 'Actuator list:'
- for a in item[1:]:
- show_actuator(prefix + ' ', a)
- elif len(item) == 2:
- print prefix, item[0], '=>', item[1]
- elif len(item) == 3 and item[0] == 'prop':
- print prefix, 'Prop', item[1], '=>',
- print item[2]
- else:
- print prefix, '?', item
-
-
-# Exception raised by build_actuator or build_panel.
-#
-panel_error = 'panel error'
-
-
-# Dummy callback used to initialize the callbacks.
-#
-def dummy_callback(arg):
- pass
-
-
-# Assign attributes to members of the target.
-# Attribute names in exclist are ignored.
-# The member name is the attribute name prefixed with the prefix.
-#
-def assign_members(target, attrlist, exclist, prefix):
- for item in attrlist:
- if is_list(item) and len(item) == 2 and item[0] not in exclist:
- name, value = item[0], item[1]
- ok = 1
- if value[0] in '-0123456789':
- value = eval(value)
- elif value[0] == '"':
- value = value[1:-1]
- elif value == 'move-then-resize':
- # Strange default set by Panel Editor...
- ok = 0
- else:
- print 'unknown value', value, 'for', name
- ok = 0
- if ok:
- lhs = 'target.' + prefix + name
- stmt = lhs + '=' + repr(value)
- if debug: print 'exec', stmt
- try:
- exec stmt + '\n'
- except KeyboardInterrupt: # Don't catch this!
- raise KeyboardInterrupt
- except:
- print 'assign failed:', stmt
-
-
-# Build a real actuator from an actuator description.
-# Return a pair (actuator, name).
-#
-def build_actuator(descr):
- namelist = getattrlist(descr, 'name')
- if namelist:
- # Assume it is a string
- actuatorname = namelist[0][1:-1]
- else:
- actuatorname = ''
- type = descr[0]
- if type[:4] == 'pnl_': type = type[4:]
- act = pnl.mkact(type)
- act.downfunc = act.activefunc = act.upfunc = dummy_callback
- #
- assign_members(act, descr[1:], ['al', 'data', 'name'], '')
- #
- # Treat actuator-specific data
- #
- datalist = getattrlist(descr, 'data')
- prefix = ''
- if type[-4:] == 'puck':
- prefix = 'puck_'
- elif type == 'mouse':
- prefix = 'mouse_'
- assign_members(act, datalist, [], prefix)
- #
- return act, actuatorname
-
-
-# Build all sub-actuators and add them to the super-actuator.
-# The super-actuator must already have been added to the panel.
-# Sub-actuators with defined names are added as members to the panel
-# so they can be referenced as p.name.
-#
-# Note: I have no idea how panel.endgroup() works when applied
-# to a sub-actuator.
-#
-def build_subactuators(panel, super_act, al):
- #
- # This is nearly the same loop as below in build_panel(),
- # except a call is made to addsubact() instead of addact().
- #
- for a in al:
- act, name = build_actuator(a)
- act.addsubact(super_act)
- if name:
- stmt = 'panel.' + name + ' = act'
- if debug: print 'exec', stmt
- exec stmt + '\n'
- if is_endgroup(a):
- panel.endgroup()
- sub_al = getattrlist(a, 'al')
- if sub_al:
- build_subactuators(panel, act, sub_al)
- #
- # Fix the actuator to which whe just added subactuators.
- # This can't hurt (I hope) and is needed for the scroll actuator.
- #
- super_act.fixact()
-
-
-# Build a real panel from a panel definition.
-# Return a panel object p, where for each named actuator a, p.name is a
-# reference to a.
-#
-def build_panel(descr):
- #
- # Sanity check
- #
- if (not descr) or descr[0] != 'panel':
- raise panel_error, 'panel description must start with "panel"'
- #
- if debug: show_panel('', descr)
- #
- # Create an empty panel
- #
- panel = pnl.mkpanel()
- #
- # Assign panel attributes
- #
- assign_members(panel, descr[1:], ['al'], '')
- #
- # Look for actuator list
- #
- al = getattrlist(descr, 'al')
- #
- # The order in which actuators are created is important
- # because of the endgroup() operator.
- # Unfortunately the Panel Editor outputs the actuator list
- # in reverse order, so we reverse it here.
- #
- al = reverse(al)
- #
- for a in al:
- act, name = build_actuator(a)
- act.addact(panel)
- if name:
- stmt = 'panel.' + name + ' = act'
- exec stmt + '\n'
- if is_endgroup(a):
- panel.endgroup()
- sub_al = getattrlist(a, 'al')
- if sub_al:
- build_subactuators(panel, act, sub_al)
- #
- return panel
-
-
-# Wrapper around pnl.dopanel() which calls call-back functions.
-#
-def my_dopanel():
- # Extract only the first 4 elements to allow for future expansion
- a, down, active, up = pnl.dopanel()[:4]
- if down:
- down.downfunc(down)
- if active:
- active.activefunc(active)
- if up:
- up.upfunc(up)
- return a
-
-
-# Create one or more panels from a description file (S-expressions)
-# generated by the Panel Editor.
-#
-def defpanellist(file):
- import panelparser
- descrlist = panelparser.parse_file(open(file, 'r'))
- panellist = []
- for descr in descrlist:
- panellist.append(build_panel(descr))
- return panellist
-
-
-# Import everything from built-in method pnl, so the user can always
-# use panel.foo() instead of pnl.foo().
-# This gives *no* performance penalty once this module is imported.
-#
-from pnl import * # for export
-
-dopanel = my_dopanel # override pnl.dopanel
diff --git a/sys/lib/python/plat-irix5/panelparser.py b/sys/lib/python/plat-irix5/panelparser.py
deleted file mode 100755
index c831c4927..000000000
--- a/sys/lib/python/plat-irix5/panelparser.py
+++ /dev/null
@@ -1,128 +0,0 @@
-# Module 'parser'
-#
-# Parse S-expressions output by the Panel Editor
-# (which is written in Scheme so it can't help writing S-expressions).
-#
-# See notes at end of file.
-
-
-whitespace = ' \t\n'
-operators = '()\''
-separators = operators + whitespace + ';' + '"'
-
-
-# Tokenize a string.
-# Return a list of tokens (strings).
-#
-def tokenize_string(s):
- tokens = []
- while s:
- c = s[:1]
- if c in whitespace:
- s = s[1:]
- elif c == ';':
- s = ''
- elif c == '"':
- n = len(s)
- i = 1
- while i < n:
- c = s[i]
- i = i+1
- if c == '"': break
- if c == '\\': i = i+1
- tokens.append(s[:i])
- s = s[i:]
- elif c in operators:
- tokens.append(c)
- s = s[1:]
- else:
- n = len(s)
- i = 1
- while i < n:
- if s[i] in separators: break
- i = i+1
- tokens.append(s[:i])
- s = s[i:]
- return tokens
-
-
-# Tokenize a whole file (given as file object, not as file name).
-# Return a list of tokens (strings).
-#
-def tokenize_file(fp):
- tokens = []
- while 1:
- line = fp.readline()
- if not line: break
- tokens = tokens + tokenize_string(line)
- return tokens
-
-
-# Exception raised by parse_exr.
-#
-syntax_error = 'syntax error'
-
-
-# Parse an S-expression.
-# Input is a list of tokens as returned by tokenize_*().
-# Return a pair (expr, tokens)
-# where expr is a list representing the s-expression,
-# and tokens contains the remaining tokens.
-# May raise syntax_error.
-#
-def parse_expr(tokens):
- if (not tokens) or tokens[0] != '(':
- raise syntax_error, 'expected "("'
- tokens = tokens[1:]
- expr = []
- while 1:
- if not tokens:
- raise syntax_error, 'missing ")"'
- if tokens[0] == ')':
- return expr, tokens[1:]
- elif tokens[0] == '(':
- subexpr, tokens = parse_expr(tokens)
- expr.append(subexpr)
- else:
- expr.append(tokens[0])
- tokens = tokens[1:]
-
-
-# Parse a file (given as file object, not as file name).
-# Return a list of parsed S-expressions found at the top level.
-#
-def parse_file(fp):
- tokens = tokenize_file(fp)
- exprlist = []
- while tokens:
- expr, tokens = parse_expr(tokens)
- exprlist.append(expr)
- return exprlist
-
-
-# EXAMPLE:
-#
-# The input
-# '(hip (hop hur-ray))'
-#
-# passed to tokenize_string() returns the token list
-# ['(', 'hip', '(', 'hop', 'hur-ray', ')', ')']
-#
-# When this is passed to parse_expr() it returns the expression
-# ['hip', ['hop', 'hur-ray']]
-# plus an empty token list (because there are no tokens left.
-#
-# When a file containing the example is passed to parse_file() it returns
-# a list whose only element is the output of parse_expr() above:
-# [['hip', ['hop', 'hur-ray']]]
-
-
-# TOKENIZING:
-#
-# Comments start with semicolon (;) and continue till the end of the line.
-#
-# Tokens are separated by whitespace, except the following characters
-# always form a separate token (outside strings):
-# ( ) '
-# Strings are enclosed in double quotes (") and backslash (\) is used
-# as escape character in strings.
diff --git a/sys/lib/python/plat-irix5/readcd.doc b/sys/lib/python/plat-irix5/readcd.doc
deleted file mode 100755
index 1be549c79..000000000
--- a/sys/lib/python/plat-irix5/readcd.doc
+++ /dev/null
@@ -1,104 +0,0 @@
-Interface to CD-ROM player.
-
-This module implements an interface to the built-in cd module. The
-intention is to provide a more user-friendly interface than the
-built-in module.
-
-The module defines a class Readcd with several methods. The
-initialization of the class will try to open the CD player. This
-means that initialization will fail if the CD player is already in
-use. A RuntimeError will be raised by the cd module in that case.
-
-The way to work with this module is as follows. The user specifies
-the parts of the CD that are to be read and he specifies callback
-functions which are to be called by the system. At some point he can
-tell the system to play. The specified parts of the CD will then be
-read and the callbacks will be called.
-
-Initialization.
-===============
-
-r = readcd.Readcd([cd-player [, mode]])
-
-The optional arguments are the name of the CD device and the mode.
-When "mode" is not specified, it defaults to 'r' (which is the only
-possible value); when "cd-player" also isn't specified, it defaults
-to "None" which indicates the default CD player.
-
-Methods.
-========
-
-eject() -- Eject the CD from the player.
-
-reset() -- Reset the list of data stretches to be played.
-
-appendtrack(track) -- Append the specified track to the list of music
-stretches.
-
-appendstretch(first, last) -- Append the stretch from "first" to "last"
-to the list of music stretches. Both "first" and "last" can be in one
-of four forms. "None": for "first", the beginning of the CD, for
-"last" the end of the CD; a single integer: a track number--playing
-starts at the beginning of the track or ends at the end of the
-specified track; a three-tuple: the absolute time from the start of
-the CD in minutes, seconds, frames; a four-tuple: track number and
-relative time within the track in minutes, seconds, frames.
-
-settracks(tracklist) -- The argument is a list of integers. The list
-of stretches is set to argument list. The old list is discarded.
-
-setcallback(type, func, arg) -- Set a callback function for "type".
-The function will be called as func(arg, type, data) where "arg" is
-the third argument of setcallback, "type" is the type of callback,
-"data" is type-dependent data. See the CDsetcallback(3) manual page
-for more information. The possible "type" arguments are defined in
-the CD module.
-
-removecallback(type) -- Remove the callback for "type".
-
-gettrackinfo([tracklist]) -- Return a list of tuples. Each tuple
-consists of start and length information of a track. The start and
-length information consist of three-tuples with minutes, seconds and
-frames. The optional tracklist argument gives a list of interesting
-track numbers. If no tracklist is specified, information about all
-tracks is returned.
-
-getstatus() -- Return the status information of the CD.
-
-play() -- Play the preprogrammed stretches of music from the CD. When
-nothing was programmed, the whole CD is played.
-
-Specifying stretches.
-=====================
-
-There are three methods available to specify a stretch of music to be
-played. The easiest way is to use "settracklist(tracklist)" with which
-a list of tracks can be specified. "settracklist(tracklist)" is
-equivalent to the sequence
- reset()
- for track in tracklist:
- appendtrack(track)
-
-The next method is "appendtrack(track)" with which a whole track can be
-added to the list of music to be played. "appendtrack(track)" is
-equivalent to "appendstretch(track, track)".
-
-The most complete method is "appendstretch(first, last)". Using this
-method, it is possible to specify any stretch of music.
-
-When two consecutive tracks are played, it is possible to choose
-whether the pause that may be between the tracks is played as well or
-whether the pause should be skipped. When the end of a stretch is
-specified using a track number and the next stretch starts at the
-beginning of the following track and that was also specified using the
-track number (that is, both were specified as integers, not as tuples),
-the pause is played. When either value was specified using absolute
-time or track-relative time (that is, as three-tuple or as
-four-tuple), the pause will not be played.
-
-Errors.
-=======
-
-When an error occurs, an exception will be raised. Depending on where
-the error occurs, the exception may either be "readcd.Error" or
-"RuntimeError".
diff --git a/sys/lib/python/plat-irix5/readcd.py b/sys/lib/python/plat-irix5/readcd.py
deleted file mode 100755
index 5453ce421..000000000
--- a/sys/lib/python/plat-irix5/readcd.py
+++ /dev/null
@@ -1,244 +0,0 @@
-# Class interface to the CD module.
-
-import cd, CD
-
-class Error(Exception):
- pass
-class _Stop(Exception):
- pass
-
-def _doatime(self, cb_type, data):
- if ((data[0] * 60) + data[1]) * 75 + data[2] > self.end:
-## print 'done with list entry', repr(self.listindex)
- raise _Stop
- func, arg = self.callbacks[cb_type]
- if func:
- func(arg, cb_type, data)
-
-def _dopnum(self, cb_type, data):
- if data > self.end:
-## print 'done with list entry', repr(self.listindex)
- raise _Stop
- func, arg = self.callbacks[cb_type]
- if func:
- func(arg, cb_type, data)
-
-class Readcd:
- def __init__(self, *arg):
- if len(arg) == 0:
- self.player = cd.open()
- elif len(arg) == 1:
- self.player = cd.open(arg[0])
- elif len(arg) == 2:
- self.player = cd.open(arg[0], arg[1])
- else:
- raise Error, 'bad __init__ call'
- self.list = []
- self.callbacks = [(None, None)] * 8
- self.parser = cd.createparser()
- self.playing = 0
- self.end = 0
- self.status = None
- self.trackinfo = None
-
- def eject(self):
- self.player.eject()
- self.list = []
- self.end = 0
- self.listindex = 0
- self.status = None
- self.trackinfo = None
- if self.playing:
-## print 'stop playing from eject'
- raise _Stop
-
- def pmsf2msf(self, track, min, sec, frame):
- if not self.status:
- self.cachestatus()
- if track < self.status[5] or track > self.status[6]:
- raise Error, 'track number out of range'
- if not self.trackinfo:
- self.cacheinfo()
- start, total = self.trackinfo[track]
- start = ((start[0] * 60) + start[1]) * 75 + start[2]
- total = ((total[0] * 60) + total[1]) * 75 + total[2]
- block = ((min * 60) + sec) * 75 + frame
- if block > total:
- raise Error, 'out of range'
- block = start + block
- min, block = divmod(block, 75*60)
- sec, frame = divmod(block, 75)
- return min, sec, frame
-
- def reset(self):
- self.list = []
-
- def appendtrack(self, track):
- self.appendstretch(track, track)
-
- def appendstretch(self, start, end):
- if not self.status:
- self.cachestatus()
- if not start:
- start = 1
- if not end:
- end = self.status[6]
- if type(end) == type(0):
- if end < self.status[5] or end > self.status[6]:
- raise Error, 'range error'
- else:
- l = len(end)
- if l == 4:
- prog, min, sec, frame = end
- if prog < self.status[5] or prog > self.status[6]:
- raise Error, 'range error'
- end = self.pmsf2msf(prog, min, sec, frame)
- elif l != 3:
- raise Error, 'syntax error'
- if type(start) == type(0):
- if start < self.status[5] or start > self.status[6]:
- raise Error, 'range error'
- if len(self.list) > 0:
- s, e = self.list[-1]
- if type(e) == type(0):
- if start == e+1:
- start = s
- del self.list[-1]
- else:
- l = len(start)
- if l == 4:
- prog, min, sec, frame = start
- if prog < self.status[5] or prog > self.status[6]:
- raise Error, 'range error'
- start = self.pmsf2msf(prog, min, sec, frame)
- elif l != 3:
- raise Error, 'syntax error'
- self.list.append((start, end))
-
- def settracks(self, list):
- self.list = []
- for track in list:
- self.appendtrack(track)
-
- def setcallback(self, cb_type, func, arg):
- if cb_type < 0 or cb_type >= 8:
- raise Error, 'type out of range'
- self.callbacks[cb_type] = (func, arg)
- if self.playing:
- start, end = self.list[self.listindex]
- if type(end) == type(0):
- if cb_type != CD.PNUM:
- self.parser.setcallback(cb_type, func, arg)
- else:
- if cb_type != CD.ATIME:
- self.parser.setcallback(cb_type, func, arg)
-
- def removecallback(self, cb_type):
- if cb_type < 0 or cb_type >= 8:
- raise Error, 'type out of range'
- self.callbacks[cb_type] = (None, None)
- if self.playing:
- start, end = self.list[self.listindex]
- if type(end) == type(0):
- if cb_type != CD.PNUM:
- self.parser.removecallback(cb_type)
- else:
- if cb_type != CD.ATIME:
- self.parser.removecallback(cb_type)
-
- def gettrackinfo(self, *arg):
- if not self.status:
- self.cachestatus()
- if not self.trackinfo:
- self.cacheinfo()
- if len(arg) == 0:
- return self.trackinfo[self.status[5]:self.status[6]+1]
- result = []
- for i in arg:
- if i < self.status[5] or i > self.status[6]:
- raise Error, 'range error'
- result.append(self.trackinfo[i])
- return result
-
- def cacheinfo(self):
- if not self.status:
- self.cachestatus()
- self.trackinfo = []
- for i in range(self.status[5]):
- self.trackinfo.append(None)
- for i in range(self.status[5], self.status[6]+1):
- self.trackinfo.append(self.player.gettrackinfo(i))
-
- def cachestatus(self):
- self.status = self.player.getstatus()
- if self.status[0] == CD.NODISC:
- self.status = None
- raise Error, 'no disc in player'
-
- def getstatus(self):
- return self.player.getstatus()
-
- def play(self):
- if not self.status:
- self.cachestatus()
- size = self.player.bestreadsize()
- self.listindex = 0
- self.playing = 0
- for i in range(8):
- func, arg = self.callbacks[i]
- if func:
- self.parser.setcallback(i, func, arg)
- else:
- self.parser.removecallback(i)
- if len(self.list) == 0:
- for i in range(self.status[5], self.status[6]+1):
- self.appendtrack(i)
- try:
- while 1:
- if not self.playing:
- if self.listindex >= len(self.list):
- return
- start, end = self.list[self.listindex]
- if type(start) == type(0):
- dummy = self.player.seektrack(
- start)
- else:
- min, sec, frame = start
- dummy = self.player.seek(
- min, sec, frame)
- if type(end) == type(0):
- self.parser.setcallback(
- CD.PNUM, _dopnum, self)
- self.end = end
- func, arg = \
- self.callbacks[CD.ATIME]
- if func:
- self.parser.setcallback(CD.ATIME, func, arg)
- else:
- self.parser.removecallback(CD.ATIME)
- else:
- min, sec, frame = end
- self.parser.setcallback(
- CD.ATIME, _doatime,
- self)
- self.end = (min * 60 + sec) * \
- 75 + frame
- func, arg = \
- self.callbacks[CD.PNUM]
- if func:
- self.parser.setcallback(CD.PNUM, func, arg)
- else:
- self.parser.removecallback(CD.PNUM)
- self.playing = 1
- data = self.player.readda(size)
- if data == '':
- self.playing = 0
- self.listindex = self.listindex + 1
- continue
- try:
- self.parser.parseframe(data)
- except _Stop:
- self.playing = 0
- self.listindex = self.listindex + 1
- finally:
- self.playing = 0
diff --git a/sys/lib/python/plat-irix5/regen b/sys/lib/python/plat-irix5/regen
deleted file mode 100755
index c950a475c..000000000
--- a/sys/lib/python/plat-irix5/regen
+++ /dev/null
@@ -1,10 +0,0 @@
-#! /bin/sh
-case `uname -sr` in
-'IRIX '[45].*) ;;
-*) echo Probably not on an IRIX system 1>&2
- exit 1;;
-esac
-set -v
-h2py /usr/include/sys/file.h
-h2py -i '(u_long)' /usr/include/netinet/in.h
-h2py /usr/include/errno.h
diff --git a/sys/lib/python/plat-irix5/torgb.py b/sys/lib/python/plat-irix5/torgb.py
deleted file mode 100755
index 54c86c477..000000000
--- a/sys/lib/python/plat-irix5/torgb.py
+++ /dev/null
@@ -1,99 +0,0 @@
-# Convert "arbitrary" image files to rgb files (SGI's image format).
-# Input may be compressed.
-# The uncompressed file type may be PBM, PGM, PPM, GIF, TIFF, or Sun raster.
-# An exception is raised if the file is not of a recognized type.
-# Returned filename is either the input filename or a temporary filename;
-# in the latter case the caller must ensure that it is removed.
-# Other temporary files used are removed by the function.
-
-import os
-import tempfile
-import pipes
-import imghdr
-
-table = {}
-
-t = pipes.Template()
-t.append('fromppm $IN $OUT', 'ff')
-table['ppm'] = t
-
-t = pipes.Template()
-t.append('(PATH=$PATH:/ufs/guido/bin/sgi; exec pnmtoppm)', '--')
-t.append('fromppm $IN $OUT', 'ff')
-table['pnm'] = t
-table['pgm'] = t
-table['pbm'] = t
-
-t = pipes.Template()
-t.append('fromgif $IN $OUT', 'ff')
-table['gif'] = t
-
-t = pipes.Template()
-t.append('tifftopnm', '--')
-t.append('(PATH=$PATH:/ufs/guido/bin/sgi; exec pnmtoppm)', '--')
-t.append('fromppm $IN $OUT', 'ff')
-table['tiff'] = t
-
-t = pipes.Template()
-t.append('rasttopnm', '--')
-t.append('(PATH=$PATH:/ufs/guido/bin/sgi; exec pnmtoppm)', '--')
-t.append('fromppm $IN $OUT', 'ff')
-table['rast'] = t
-
-t = pipes.Template()
-t.append('djpeg', '--')
-t.append('(PATH=$PATH:/ufs/guido/bin/sgi; exec pnmtoppm)', '--')
-t.append('fromppm $IN $OUT', 'ff')
-table['jpeg'] = t
-
-uncompress = pipes.Template()
-uncompress.append('uncompress', '--')
-
-
-class error(Exception):
- pass
-
-def torgb(filename):
- temps = []
- ret = None
- try:
- ret = _torgb(filename, temps)
- finally:
- for temp in temps[:]:
- if temp != ret:
- try:
- os.unlink(temp)
- except os.error:
- pass
- temps.remove(temp)
- return ret
-
-def _torgb(filename, temps):
- if filename[-2:] == '.Z':
- (fd, fname) = tempfile.mkstemp()
- os.close(fd)
- temps.append(fname)
- sts = uncompress.copy(filename, fname)
- if sts:
- raise error, filename + ': uncompress failed'
- else:
- fname = filename
- try:
- ftype = imghdr.what(fname)
- except IOError, msg:
- if type(msg) == type(()) and len(msg) == 2 and \
- type(msg[0]) == type(0) and type(msg[1]) == type(''):
- msg = msg[1]
- if type(msg) is not type(''):
- msg = repr(msg)
- raise error, filename + ': ' + msg
- if ftype == 'rgb':
- return fname
- if ftype is None or not table.has_key(ftype):
- raise error, '%s: unsupported image file type %r' % (filename, ftype)
- (fd, temp) = tempfile.mkstemp()
- os.close(fd)
- sts = table[ftype].copy(fname, temp)
- if sts:
- raise error, filename + ': conversion to rgb failed'
- return temp
diff --git a/sys/lib/python/plat-irix6/AL.py b/sys/lib/python/plat-irix6/AL.py
deleted file mode 100644
index ec941a2ba..000000000
--- a/sys/lib/python/plat-irix6/AL.py
+++ /dev/null
@@ -1,61 +0,0 @@
-RATE_48000 = 48000
-RATE_44100 = 44100
-RATE_32000 = 32000
-RATE_22050 = 22050
-RATE_16000 = 16000
-RATE_11025 = 11025
-RATE_8000 = 8000
-
-SAMPFMT_TWOSCOMP= 1
-SAMPFMT_FLOAT = 32
-SAMPFMT_DOUBLE = 64
-
-SAMPLE_8 = 1
-SAMPLE_16 = 2
- # SAMPLE_24 is the low 24 bits of a long, sign extended to 32 bits
-SAMPLE_24 = 4
-
-MONO = 1
-STEREO = 2
-QUADRO = 4 # 4CHANNEL is not a legal Python name
-
-INPUT_LINE = 0
-INPUT_MIC = 1
-INPUT_DIGITAL = 2
-
-MONITOR_OFF = 0
-MONITOR_ON = 1
-
-ERROR_NUMBER = 0
-ERROR_TYPE = 1
-ERROR_LOCATION_LSP = 2
-ERROR_LOCATION_MSP = 3
-ERROR_LENGTH = 4
-
-ERROR_INPUT_UNDERFLOW = 0
-ERROR_OUTPUT_OVERFLOW = 1
-
-# These seem to be not supported anymore:
-##HOLD, RELEASE = 0, 1
-##ATTAIL, ATHEAD, ATMARK, ATTIME = 0, 1, 2, 3
-
-DEFAULT_DEVICE = 1
-
-INPUT_SOURCE = 0
-LEFT_INPUT_ATTEN = 1
-RIGHT_INPUT_ATTEN = 2
-INPUT_RATE = 3
-OUTPUT_RATE = 4
-LEFT_SPEAKER_GAIN = 5
-RIGHT_SPEAKER_GAIN = 6
-INPUT_COUNT = 7
-OUTPUT_COUNT = 8
-UNUSED_COUNT = 9
-SYNC_INPUT_TO_AES = 10
-SYNC_OUTPUT_TO_AES = 11
-MONITOR_CTL = 12
-LEFT_MONITOR_ATTEN = 13
-RIGHT_MONITOR_ATTEN = 14
-
-ENUM_VALUE = 0 # only certain values are valid
-RANGE_VALUE = 1 # any value in range is valid
diff --git a/sys/lib/python/plat-irix6/CD.py b/sys/lib/python/plat-irix6/CD.py
deleted file mode 100644
index 8c1e03bc6..000000000
--- a/sys/lib/python/plat-irix6/CD.py
+++ /dev/null
@@ -1,34 +0,0 @@
-ERROR = 0
-NODISC = 1
-READY = 2
-PLAYING = 3
-PAUSED = 4
-STILL = 5
-
-AUDIO = 0
-PNUM = 1
-INDEX = 2
-PTIME = 3
-ATIME = 4
-CATALOG = 5
-IDENT = 6
-CONTROL = 7
-
-CDDA_DATASIZE = 2352
-
-##CDDA_SUBCODESIZE = (sizeof(struct subcodeQ))
-##CDDA_BLOCKSIZE = (sizeof(struct cdframe))
-##CDDA_NUMSAMPLES = (CDDA_DATASIZE/2)
-##
-##CDQ_PREEMP_MASK = 0xd
-##CDQ_COPY_MASK = 0xb
-##CDQ_DDATA_MASK = 0xd
-##CDQ_BROADCAST_MASK = 0x8
-##CDQ_PREEMPHASIS = 0x1
-##CDQ_COPY_PERMITTED = 0x2
-##CDQ_DIGITAL_DATA = 0x4
-##CDQ_BROADCAST_USE = 0x8
-##
-##CDQ_MODE1 = 0x1
-##CDQ_MODE2 = 0x2
-##CDQ_MODE3 = 0x3
diff --git a/sys/lib/python/plat-irix6/CL.py b/sys/lib/python/plat-irix6/CL.py
deleted file mode 100644
index 23259c554..000000000
--- a/sys/lib/python/plat-irix6/CL.py
+++ /dev/null
@@ -1,24 +0,0 @@
-# Backward compatible module CL.
-# All relevant symbols are now defined in the module cl.
-try:
- from cl import *
-except ImportError:
- from CL_old import *
-else:
- del CompressImage
- del DecompressImage
- del GetAlgorithmName
- del OpenCompressor
- del OpenDecompressor
- del QueryAlgorithms
- del QueryMaxHeaderSize
- del QueryScheme
- del QuerySchemeFromName
- del SetDefault
- del SetMax
- del SetMin
- try:
- del cvt_type
- except NameError:
- pass
- del error
diff --git a/sys/lib/python/plat-irix6/DEVICE.py b/sys/lib/python/plat-irix6/DEVICE.py
deleted file mode 100644
index 7ace8cb0b..000000000
--- a/sys/lib/python/plat-irix6/DEVICE.py
+++ /dev/null
@@ -1,400 +0,0 @@
-NULLDEV = 0
-BUTOFFSET = 1
-VALOFFSET = 256
-PSEUDOFFSET = 512
-BUT2OFFSET = 3840
-TIMOFFSET = 515
-XKBDOFFSET = 143
-BUTCOUNT = 255
-VALCOUNT = 256
-TIMCOUNT = 4
-XKBDCOUNT = 28
-USERBUTOFFSET = 4096
-USERVALOFFSET = 12288
-USERPSEUDOFFSET = 16384
-BUT0 = 1
-BUT1 = 2
-BUT2 = 3
-BUT3 = 4
-BUT4 = 5
-BUT5 = 6
-BUT6 = 7
-BUT7 = 8
-BUT8 = 9
-BUT9 = 10
-BUT10 = 11
-BUT11 = 12
-BUT12 = 13
-BUT13 = 14
-BUT14 = 15
-BUT15 = 16
-BUT16 = 17
-BUT17 = 18
-BUT18 = 19
-BUT19 = 20
-BUT20 = 21
-BUT21 = 22
-BUT22 = 23
-BUT23 = 24
-BUT24 = 25
-BUT25 = 26
-BUT26 = 27
-BUT27 = 28
-BUT28 = 29
-BUT29 = 30
-BUT30 = 31
-BUT31 = 32
-BUT32 = 33
-BUT33 = 34
-BUT34 = 35
-BUT35 = 36
-BUT36 = 37
-BUT37 = 38
-BUT38 = 39
-BUT39 = 40
-BUT40 = 41
-BUT41 = 42
-BUT42 = 43
-BUT43 = 44
-BUT44 = 45
-BUT45 = 46
-BUT46 = 47
-BUT47 = 48
-BUT48 = 49
-BUT49 = 50
-BUT50 = 51
-BUT51 = 52
-BUT52 = 53
-BUT53 = 54
-BUT54 = 55
-BUT55 = 56
-BUT56 = 57
-BUT57 = 58
-BUT58 = 59
-BUT59 = 60
-BUT60 = 61
-BUT61 = 62
-BUT62 = 63
-BUT63 = 64
-BUT64 = 65
-BUT65 = 66
-BUT66 = 67
-BUT67 = 68
-BUT68 = 69
-BUT69 = 70
-BUT70 = 71
-BUT71 = 72
-BUT72 = 73
-BUT73 = 74
-BUT74 = 75
-BUT75 = 76
-BUT76 = 77
-BUT77 = 78
-BUT78 = 79
-BUT79 = 80
-BUT80 = 81
-BUT81 = 82
-BUT82 = 83
-MAXKBDBUT = 83
-BUT100 = 101
-BUT101 = 102
-BUT102 = 103
-BUT103 = 104
-BUT104 = 105
-BUT105 = 106
-BUT106 = 107
-BUT107 = 108
-BUT108 = 109
-BUT109 = 110
-BUT110 = 111
-BUT111 = 112
-BUT112 = 113
-BUT113 = 114
-BUT114 = 115
-BUT115 = 116
-BUT116 = 117
-BUT117 = 118
-BUT118 = 119
-BUT119 = 120
-BUT120 = 121
-BUT121 = 122
-BUT122 = 123
-BUT123 = 124
-BUT124 = 125
-BUT125 = 126
-BUT126 = 127
-BUT127 = 128
-BUT128 = 129
-BUT129 = 130
-BUT130 = 131
-BUT131 = 132
-BUT132 = 133
-BUT133 = 134
-BUT134 = 135
-BUT135 = 136
-BUT136 = 137
-BUT137 = 138
-BUT138 = 139
-BUT139 = 140
-BUT140 = 141
-BUT141 = 142
-BUT142 = 143
-BUT143 = 144
-BUT144 = 145
-BUT145 = 146
-BUT146 = 147
-BUT147 = 148
-BUT148 = 149
-BUT149 = 150
-BUT150 = 151
-BUT151 = 152
-BUT152 = 153
-BUT153 = 154
-BUT154 = 155
-BUT155 = 156
-BUT156 = 157
-BUT157 = 158
-BUT158 = 159
-BUT159 = 160
-BUT160 = 161
-BUT161 = 162
-BUT162 = 163
-BUT163 = 164
-BUT164 = 165
-BUT165 = 166
-BUT166 = 167
-BUT167 = 168
-BUT168 = 169
-BUT181 = 182
-BUT182 = 183
-BUT183 = 184
-BUT184 = 185
-BUT185 = 186
-BUT186 = 187
-BUT187 = 188
-BUT188 = 189
-BUT189 = 190
-MOUSE1 = 101
-MOUSE2 = 102
-MOUSE3 = 103
-LEFTMOUSE = 103
-MIDDLEMOUSE = 102
-RIGHTMOUSE = 101
-LPENBUT = 104
-BPAD0 = 105
-BPAD1 = 106
-BPAD2 = 107
-BPAD3 = 108
-LPENVALID = 109
-SWBASE = 111
-SW0 = 111
-SW1 = 112
-SW2 = 113
-SW3 = 114
-SW4 = 115
-SW5 = 116
-SW6 = 117
-SW7 = 118
-SW8 = 119
-SW9 = 120
-SW10 = 121
-SW11 = 122
-SW12 = 123
-SW13 = 124
-SW14 = 125
-SW15 = 126
-SW16 = 127
-SW17 = 128
-SW18 = 129
-SW19 = 130
-SW20 = 131
-SW21 = 132
-SW22 = 133
-SW23 = 134
-SW24 = 135
-SW25 = 136
-SW26 = 137
-SW27 = 138
-SW28 = 139
-SW29 = 140
-SW30 = 141
-SW31 = 142
-SBBASE = 182
-SBPICK = 182
-SBBUT1 = 183
-SBBUT2 = 184
-SBBUT3 = 185
-SBBUT4 = 186
-SBBUT5 = 187
-SBBUT6 = 188
-SBBUT7 = 189
-SBBUT8 = 190
-AKEY = 11
-BKEY = 36
-CKEY = 28
-DKEY = 18
-EKEY = 17
-FKEY = 19
-GKEY = 26
-HKEY = 27
-IKEY = 40
-JKEY = 34
-KKEY = 35
-LKEY = 42
-MKEY = 44
-NKEY = 37
-OKEY = 41
-PKEY = 48
-QKEY = 10
-RKEY = 24
-SKEY = 12
-TKEY = 25
-UKEY = 33
-VKEY = 29
-WKEY = 16
-XKEY = 21
-YKEY = 32
-ZKEY = 20
-ZEROKEY = 46
-ONEKEY = 8
-TWOKEY = 14
-THREEKEY = 15
-FOURKEY = 22
-FIVEKEY = 23
-SIXKEY = 30
-SEVENKEY = 31
-EIGHTKEY = 38
-NINEKEY = 39
-BREAKKEY = 1
-SETUPKEY = 2
-CTRLKEY = 3
-LEFTCTRLKEY = CTRLKEY
-CAPSLOCKKEY = 4
-RIGHTSHIFTKEY = 5
-LEFTSHIFTKEY = 6
-NOSCRLKEY = 13
-ESCKEY = 7
-TABKEY = 9
-RETKEY = 51
-SPACEKEY = 83
-LINEFEEDKEY = 60
-BACKSPACEKEY = 61
-DELKEY = 62
-SEMICOLONKEY = 43
-PERIODKEY = 52
-COMMAKEY = 45
-QUOTEKEY = 50
-ACCENTGRAVEKEY = 55
-MINUSKEY = 47
-VIRGULEKEY = 53
-BACKSLASHKEY = 57
-EQUALKEY = 54
-LEFTBRACKETKEY = 49
-RIGHTBRACKETKEY = 56
-LEFTARROWKEY = 73
-DOWNARROWKEY = 74
-RIGHTARROWKEY = 80
-UPARROWKEY = 81
-PAD0 = 59
-PAD1 = 58
-PAD2 = 64
-PAD3 = 65
-PAD4 = 63
-PAD5 = 69
-PAD6 = 70
-PAD7 = 67
-PAD8 = 68
-PAD9 = 75
-PADPF1 = 72
-PADPF2 = 71
-PADPF3 = 79
-PADPF4 = 78
-PADPERIOD = 66
-PADMINUS = 76
-PADCOMMA = 77
-PADENTER = 82
-LEFTALTKEY = 143
-RIGHTALTKEY = 144
-RIGHTCTRLKEY = 145
-F1KEY = 146
-F2KEY = 147
-F3KEY = 148
-F4KEY = 149
-F5KEY = 150
-F6KEY = 151
-F7KEY = 152
-F8KEY = 153
-F9KEY = 154
-F10KEY = 155
-F11KEY = 156
-F12KEY = 157
-PRINTSCREENKEY = 158
-SCROLLLOCKKEY = 159
-PAUSEKEY = 160
-INSERTKEY = 161
-HOMEKEY = 162
-PAGEUPKEY = 163
-ENDKEY = 164
-PAGEDOWNKEY = 165
-NUMLOCKKEY = 166
-PADVIRGULEKEY = 167
-PADASTERKEY = 168
-PADPLUSKEY = 169
-SGIRESERVED = 256
-DIAL0 = 257
-DIAL1 = 258
-DIAL2 = 259
-DIAL3 = 260
-DIAL4 = 261
-DIAL5 = 262
-DIAL6 = 263
-DIAL7 = 264
-DIAL8 = 265
-MOUSEX = 266
-MOUSEY = 267
-LPENX = 268
-LPENY = 269
-BPADX = 270
-BPADY = 271
-CURSORX = 272
-CURSORY = 273
-GHOSTX = 274
-GHOSTY = 275
-SBTX = 276
-SBTY = 277
-SBTZ = 278
-SBRX = 279
-SBRY = 280
-SBRZ = 281
-SBPERIOD = 282
-TIMER0 = 515
-TIMER1 = 516
-TIMER2 = 517
-TIMER3 = 518
-KEYBD = 513
-RAWKEYBD = 514
-VALMARK = 523
-REDRAW = 528
-INPUTCHANGE = 534
-QFULL = 535
-QREADERROR = 538
-WINFREEZE = 539
-WINTHAW = 540
-REDRAWICONIC = 541
-WINQUIT = 542
-DEPTHCHANGE = 543
-WINSHUT = 546
-DRAWOVERLAY = 547
-VIDEO = 548
-MENUBUTTON = RIGHTMOUSE
-WINCLOSE = 537
-KEYBDFNAMES = 544
-KEYBDFSTRINGS = 545
-MAXSGIDEVICE = 20000
-GERROR = 524
-WMSEND = 529
-WMREPLY = 530
-WMGFCLOSE = 531
-WMTXCLOSE = 532
-MODECHANGE = 533
-PIECECHANGE = 536
diff --git a/sys/lib/python/plat-irix6/ERRNO.py b/sys/lib/python/plat-irix6/ERRNO.py
deleted file mode 100644
index 1836fa04e..000000000
--- a/sys/lib/python/plat-irix6/ERRNO.py
+++ /dev/null
@@ -1,180 +0,0 @@
-# Generated by h2py from /usr/include/errno.h
-
-# Included from sys/errno.h
-
-# Included from standards.h
-__KBASE = 1000
-__IRIXBASE = 1000
-__FTNBASE = 4000
-__FTNLAST = 5999
-EPERM = 1
-ENOENT = 2
-ESRCH = 3
-EINTR = 4
-EIO = 5
-ENXIO = 6
-E2BIG = 7
-ENOEXEC = 8
-EBADF = 9
-ECHILD = 10
-EAGAIN = 11
-ENOMEM = 12
-EACCES = 13
-EFAULT = 14
-ENOTBLK = 15
-EBUSY = 16
-EEXIST = 17
-EXDEV = 18
-ENODEV = 19
-ENOTDIR = 20
-EISDIR = 21
-EINVAL = 22
-ENFILE = 23
-EMFILE = 24
-ENOTTY = 25
-ETXTBSY = 26
-EFBIG = 27
-ENOSPC = 28
-ESPIPE = 29
-EROFS = 30
-EMLINK = 31
-EPIPE = 32
-EDOM = 33
-ERANGE = 34
-ENOMSG = 35
-EIDRM = 36
-ECHRNG = 37
-EL2NSYNC = 38
-EL3HLT = 39
-EL3RST = 40
-ELNRNG = 41
-EUNATCH = 42
-ENOCSI = 43
-EL2HLT = 44
-EDEADLK = 45
-ENOLCK = 46
-ECKPT = 47
-EBADE = 50
-EBADR = 51
-EXFULL = 52
-ENOANO = 53
-EBADRQC = 54
-EBADSLT = 55
-EDEADLOCK = 56
-EBFONT = 57
-ENOSTR = 60
-ENODATA = 61
-ETIME = 62
-ENOSR = 63
-ENONET = 64
-ENOPKG = 65
-EREMOTE = 66
-ENOLINK = 67
-EADV = 68
-ESRMNT = 69
-ECOMM = 70
-EPROTO = 71
-EMULTIHOP = 74
-EBADMSG = 77
-ENAMETOOLONG = 78
-EOVERFLOW = 79
-ENOTUNIQ = 80
-EBADFD = 81
-EREMCHG = 82
-ELIBACC = 83
-ELIBBAD = 84
-ELIBSCN = 85
-ELIBMAX = 86
-ELIBEXEC = 87
-EILSEQ = 88
-ENOSYS = 89
-ELOOP = 90
-ERESTART = 91
-ESTRPIPE = 92
-ENOTEMPTY = 93
-EUSERS = 94
-ENOTSOCK = 95
-EDESTADDRREQ = 96
-EMSGSIZE = 97
-EPROTOTYPE = 98
-ENOPROTOOPT = 99
-EPROTONOSUPPORT = 120
-ESOCKTNOSUPPORT = 121
-EOPNOTSUPP = 122
-EPFNOSUPPORT = 123
-EAFNOSUPPORT = 124
-EADDRINUSE = 125
-EADDRNOTAVAIL = 126
-ENETDOWN = 127
-ENETUNREACH = 128
-ENETRESET = 129
-ECONNABORTED = 130
-ECONNRESET = 131
-ENOBUFS = 132
-EISCONN = 133
-ENOTCONN = 134
-ESHUTDOWN = 143
-ETOOMANYREFS = 144
-ETIMEDOUT = 145
-ECONNREFUSED = 146
-EHOSTDOWN = 147
-EHOSTUNREACH = 148
-LASTERRNO = ENOTCONN
-EWOULDBLOCK = __KBASE+101
-EWOULDBLOCK = EAGAIN
-EALREADY = 149
-EINPROGRESS = 150
-ESTALE = 151
-EIORESID = 500
-EUCLEAN = 135
-ENOTNAM = 137
-ENAVAIL = 138
-EISNAM = 139
-EREMOTEIO = 140
-EINIT = 141
-EREMDEV = 142
-ECANCELED = 158
-ENOLIMFILE = 1001
-EPROCLIM = 1002
-EDISJOINT = 1003
-ENOLOGIN = 1004
-ELOGINLIM = 1005
-EGROUPLOOP = 1006
-ENOATTACH = 1007
-ENOTSUP = 1008
-ENOATTR = 1009
-EFSCORRUPTED = 1010
-EDIRCORRUPTED = 1010
-EWRONGFS = 1011
-EDQUOT = 1133
-ENFSREMOTE = 1135
-ECONTROLLER = 1300
-ENOTCONTROLLER = 1301
-EENQUEUED = 1302
-ENOTENQUEUED = 1303
-EJOINED = 1304
-ENOTJOINED = 1305
-ENOPROC = 1306
-EMUSTRUN = 1307
-ENOTSTOPPED = 1308
-ECLOCKCPU = 1309
-EINVALSTATE = 1310
-ENOEXIST = 1311
-EENDOFMINOR = 1312
-EBUFSIZE = 1313
-EEMPTY = 1314
-ENOINTRGROUP = 1315
-EINVALMODE = 1316
-ECANTEXTENT = 1317
-EINVALTIME = 1318
-EDESTROYED = 1319
-EBDHDL = 1400
-EDELAY = 1401
-ENOBWD = 1402
-EBADRSPEC = 1403
-EBADTSPEC = 1404
-EBADFILT = 1405
-EMIGRATED = 1500
-EMIGRATING = 1501
-ECELLDOWN = 1502
-EMEMRETRY = 1600
diff --git a/sys/lib/python/plat-irix6/FILE.py b/sys/lib/python/plat-irix6/FILE.py
deleted file mode 100644
index ab74d7c67..000000000
--- a/sys/lib/python/plat-irix6/FILE.py
+++ /dev/null
@@ -1,674 +0,0 @@
-# Generated by h2py from /usr/include/sys/file.h
-
-# Included from standards.h
-
-# Included from sys/types.h
-
-# Included from sgidefs.h
-_MIPS_ISA_MIPS1 = 1
-_MIPS_ISA_MIPS2 = 2
-_MIPS_ISA_MIPS3 = 3
-_MIPS_ISA_MIPS4 = 4
-_MIPS_SIM_ABI32 = 1
-_MIPS_SIM_NABI32 = 2
-_MIPS_SIM_ABI64 = 3
-
-# Included from sys/pthread.h
-P_MYID = (-1)
-P_MYHOSTID = (-1)
-
-# Included from sys/bsd_types.h
-
-# Included from sys/mkdev.h
-ONBITSMAJOR = 7
-ONBITSMINOR = 8
-OMAXMAJ = 0x7f
-OMAXMIN = 0xff
-NBITSMAJOR = 14
-NBITSMINOR = 18
-MAXMAJ = 0x1ff
-MAXMIN = 0x3ffff
-OLDDEV = 0
-NEWDEV = 1
-MKDEV_VER = NEWDEV
-def IS_STRING_SPEC_DEV(x): return ((dev_t)(x)==__makedev(MKDEV_VER, 0, 0))
-
-def major(dev): return __major(MKDEV_VER, dev)
-
-def minor(dev): return __minor(MKDEV_VER, dev)
-
-
-# Included from sys/select.h
-FD_SETSIZE = 1024
-__NBBY = 8
-
-# Included from string.h
-NULL = 0L
-NBBY = 8
-
-# Included from sys/cpumask.h
-MAXCPU = 128
-def CPUMASK_INDEX(bit): return ((bit) >> 6)
-
-def CPUMASK_SHFT(bit): return ((bit) & 0x3f)
-
-def CPUMASK_IS_ZERO(p): return ((p) == 0)
-
-def CPUMASK_IS_NONZERO(p): return ((p) != 0)
-
-
-# Included from sys/nodemask.h
-def CNODEMASK_IS_ZERO(p): return ((p) == 0)
-
-def CNODEMASK_IS_NONZERO(p): return ((p) != 0)
-
-
-# Included from sys/sema.h
-
-# Included from sys/timespec.h
-
-# Included from sys/param.h
-
-# Included from sys/signal.h
-SIGHUP = 1
-SIGINT = 2
-SIGQUIT = 3
-SIGILL = 4
-SIGTRAP = 5
-SIGIOT = 6
-SIGABRT = 6
-SIGEMT = 7
-SIGFPE = 8
-SIGKILL = 9
-SIGBUS = 10
-SIGSEGV = 11
-SIGSYS = 12
-SIGPIPE = 13
-SIGALRM = 14
-SIGTERM = 15
-SIGUSR1 = 16
-SIGUSR2 = 17
-SIGCLD = 18
-SIGCHLD = 18
-SIGPWR = 19
-SIGWINCH = 20
-SIGURG = 21
-SIGPOLL = 22
-SIGIO = 22
-SIGSTOP = 23
-SIGTSTP = 24
-SIGCONT = 25
-SIGTTIN = 26
-SIGTTOU = 27
-SIGVTALRM = 28
-SIGPROF = 29
-SIGXCPU = 30
-SIGXFSZ = 31
-SIGK32 = 32
-SIGCKPT = 33
-SIGRESTART = 34
-SIGUME = 35
-SIGPTINTR = 47
-SIGPTRESCHED = 48
-SIGRTMIN = 49
-SIGRTMAX = 64
-__sigargs = int
-
-# Included from sys/sigevent.h
-SIGEV_NONE = 128
-SIGEV_SIGNAL = 129
-SIGEV_CALLBACK = 130
-SIGEV_THREAD = 131
-
-# Included from sys/siginfo.h
-SI_MAXSZ = 128
-SI_USER = 0
-SI_KILL = SI_USER
-SI_QUEUE = -1
-SI_ASYNCIO = -2
-SI_TIMER = -3
-SI_MESGQ = -4
-ILL_ILLOPC = 1
-ILL_ILLOPN = 2
-ILL_ILLADR = 3
-ILL_ILLTRP = 4
-ILL_PRVOPC = 5
-ILL_PRVREG = 6
-ILL_COPROC = 7
-ILL_BADSTK = 8
-NSIGILL = 8
-FPE_INTDIV = 1
-FPE_INTOVF = 2
-FPE_FLTDIV = 3
-FPE_FLTOVF = 4
-FPE_FLTUND = 5
-FPE_FLTRES = 6
-FPE_FLTINV = 7
-FPE_FLTSUB = 8
-NSIGFPE = 8
-SEGV_MAPERR = 1
-SEGV_ACCERR = 2
-NSIGSEGV = 2
-BUS_ADRALN = 1
-BUS_ADRERR = 2
-BUS_OBJERR = 3
-NSIGBUS = 3
-TRAP_BRKPT = 1
-TRAP_TRACE = 2
-NSIGTRAP = 2
-CLD_EXITED = 1
-CLD_KILLED = 2
-CLD_DUMPED = 3
-CLD_TRAPPED = 4
-CLD_STOPPED = 5
-CLD_CONTINUED = 6
-NSIGCLD = 6
-POLL_IN = 1
-POLL_OUT = 2
-POLL_MSG = 3
-POLL_ERR = 4
-POLL_PRI = 5
-POLL_HUP = 6
-NSIGPOLL = 6
-UME_ECCERR = 1
-NSIGUME = 1
-SIG_NOP = 0
-SIG_BLOCK = 1
-SIG_UNBLOCK = 2
-SIG_SETMASK = 3
-SIG_SETMASK32 = 256
-SA_ONSTACK = 0x00000001
-SA_RESETHAND = 0x00000002
-SA_RESTART = 0x00000004
-SA_SIGINFO = 0x00000008
-SA_NODEFER = 0x00000010
-SA_NOCLDWAIT = 0x00010000
-SA_NOCLDSTOP = 0x00020000
-_SA_BSDCALL = 0x10000000
-MINSIGSTKSZ = 512
-SIGSTKSZ = 8192
-SS_ONSTACK = 0x00000001
-SS_DISABLE = 0x00000002
-
-# Included from sys/ucontext.h
-NGREG = 36
-NGREG = 37
-GETCONTEXT = 0
-SETCONTEXT = 1
-UC_SIGMASK = 001
-UC_STACK = 002
-UC_CPU = 004
-UC_MAU = 010
-UC_MCONTEXT = (UC_CPU|UC_MAU)
-UC_ALL = (UC_SIGMASK|UC_STACK|UC_MCONTEXT)
-CTX_R0 = 0
-CTX_AT = 1
-CTX_V0 = 2
-CTX_V1 = 3
-CTX_A0 = 4
-CTX_A1 = 5
-CTX_A2 = 6
-CTX_A3 = 7
-CTX_T0 = 8
-CTX_T1 = 9
-CTX_T2 = 10
-CTX_T3 = 11
-CTX_T4 = 12
-CTX_T5 = 13
-CTX_T6 = 14
-CTX_T7 = 15
-CTX_A4 = 8
-CTX_A5 = 9
-CTX_A6 = 10
-CTX_A7 = 11
-CTX_T0 = 12
-CTX_T1 = 13
-CTX_T2 = 14
-CTX_T3 = 15
-CTX_S0 = 16
-CTX_S1 = 17
-CTX_S2 = 18
-CTX_S3 = 19
-CTX_S4 = 20
-CTX_S5 = 21
-CTX_S6 = 22
-CTX_S7 = 23
-CTX_T8 = 24
-CTX_T9 = 25
-CTX_K0 = 26
-CTX_K1 = 27
-CTX_GP = 28
-CTX_SP = 29
-CTX_S8 = 30
-CTX_RA = 31
-CTX_MDLO = 32
-CTX_MDHI = 33
-CTX_CAUSE = 34
-CTX_EPC = 35
-CTX_SR = 36
-CXT_R0 = CTX_R0
-CXT_AT = CTX_AT
-CXT_V0 = CTX_V0
-CXT_V1 = CTX_V1
-CXT_A0 = CTX_A0
-CXT_A1 = CTX_A1
-CXT_A2 = CTX_A2
-CXT_A3 = CTX_A3
-CXT_T0 = CTX_T0
-CXT_T1 = CTX_T1
-CXT_T2 = CTX_T2
-CXT_T3 = CTX_T3
-CXT_T4 = CTX_T4
-CXT_T5 = CTX_T5
-CXT_T6 = CTX_T6
-CXT_T7 = CTX_T7
-CXT_S0 = CTX_S0
-CXT_S1 = CTX_S1
-CXT_S2 = CTX_S2
-CXT_S3 = CTX_S3
-CXT_S4 = CTX_S4
-CXT_S5 = CTX_S5
-CXT_S6 = CTX_S6
-CXT_S7 = CTX_S7
-CXT_T8 = CTX_T8
-CXT_T9 = CTX_T9
-CXT_K0 = CTX_K0
-CXT_K1 = CTX_K1
-CXT_GP = CTX_GP
-CXT_SP = CTX_SP
-CXT_S8 = CTX_S8
-CXT_RA = CTX_RA
-CXT_MDLO = CTX_MDLO
-CXT_MDHI = CTX_MDHI
-CXT_CAUSE = CTX_CAUSE
-CXT_EPC = CTX_EPC
-CXT_SR = CTX_SR
-CTX_FV0 = 0
-CTX_FV1 = 2
-CTX_FA0 = 12
-CTX_FA1 = 13
-CTX_FA2 = 14
-CTX_FA3 = 15
-CTX_FA4 = 16
-CTX_FA5 = 17
-CTX_FA6 = 18
-CTX_FA7 = 19
-CTX_FT0 = 4
-CTX_FT1 = 5
-CTX_FT2 = 6
-CTX_FT3 = 7
-CTX_FT4 = 8
-CTX_FT5 = 9
-CTX_FT6 = 10
-CTX_FT7 = 11
-CTX_FT8 = 20
-CTX_FT9 = 21
-CTX_FT10 = 22
-CTX_FT11 = 23
-CTX_FT12 = 1
-CTX_FT13 = 3
-CTX_FS0 = 24
-CTX_FS1 = 25
-CTX_FS2 = 26
-CTX_FS3 = 27
-CTX_FS4 = 28
-CTX_FS5 = 29
-CTX_FS6 = 30
-CTX_FS7 = 31
-CTX_FT8 = 21
-CTX_FT9 = 23
-CTX_FT10 = 25
-CTX_FT11 = 27
-CTX_FT12 = 29
-CTX_FT13 = 31
-CTX_FT14 = 1
-CTX_FT15 = 3
-CTX_FS0 = 20
-CTX_FS1 = 22
-CTX_FS2 = 24
-CTX_FS3 = 26
-CTX_FS4 = 28
-CTX_FS5 = 30
-SV_ONSTACK = 0x0001
-SV_INTERRUPT = 0x0002
-NUMBSDSIGS = (32)
-def sigmask(sig): return (1L << ((sig)-1))
-
-def sigmask(sig): return (1L << ((sig)-1))
-
-SIG_ERR = (-1)
-SIG_IGN = (1)
-SIG_HOLD = (2)
-SIG_DFL = (0)
-NSIG = 65
-MAXSIG = (NSIG-1)
-NUMSIGS = (NSIG-1)
-BRK_USERBP = 0
-BRK_KERNELBP = 1
-BRK_ABORT = 2
-BRK_BD_TAKEN = 3
-BRK_BD_NOTTAKEN = 4
-BRK_SSTEPBP = 5
-BRK_OVERFLOW = 6
-BRK_DIVZERO = 7
-BRK_RANGE = 8
-BRK_PSEUDO_OP_BIT = 0x80
-BRK_PSEUDO_OP_MAX = 0x3
-BRK_CACHE_SYNC = 0x80
-BRK_MULOVF = 1023
-_POSIX_VERSION = 199506L
-_POSIX_VERSION = 199506
-_POSIX_VDISABLE = 0
-MAX_INPUT = 512
-MAX_CANON = 256
-UID_NOBODY = 60001
-GID_NOBODY = UID_NOBODY
-UID_NOACCESS = 60002
-MAXPID = 0x7ffffff0
-MAXUID = 0x7fffffff
-MAXLINK = 30000
-SSIZE = 1
-SINCR = 1
-KSTKSIZE = 1
-EXTKSTKSIZE = 1
-KSTKIDX = 0
-KSTEIDX = 1
-EXTKSTKSIZE = 0
-KSTKIDX = 0
-CANBSIZ = 256
-HZ = 100
-TICK = 10000000
-NOFILE = 20
-NGROUPS_UMIN = 0
-NGROUPS_UMAX = 32
-NGROUPS = 16
-PMASK = 0177
-PCATCH = 0400
-PLTWAIT = 01000
-PRECALC = 01000
-PSWP = 0
-PINOD = 10
-PSNDD = PINOD
-PRIBIO = 20
-PZERO = 25
-PMEM = 0
-NZERO = 20
-PPIPE = 26
-PVFS = 27
-PWAIT = 30
-PSLEP = 39
-PUSER = 60
-PBATCH_CRITICAL = -1
-PTIME_SHARE = -2
-PTIME_SHARE_OVER = -3
-PBATCH = -4
-PWEIGHTLESS = -5
-IO_NBPC = 4096
-IO_BPCSHIFT = 12
-MIN_NBPC = 4096
-MIN_BPCSHIFT = 12
-MIN_CPSSHIFT = 10
-BPCSHIFT = 12
-CPSSHIFT = 10
-BPCSHIFT = 14
-CPSSHIFT = 12
-CPSSHIFT = 11
-BPSSHIFT = (BPCSHIFT+CPSSHIFT)
-NULL = 0L
-CMASK = 022
-NODEV = (-1)
-NOPAGE = (-1)
-NBPSCTR = 512
-SCTRSHFT = 9
-def BASEPRI(psw): return (((psw) & SR_IMASK) == SR_IMASK0)
-
-def BASEPRI(psw): return (((psw) & SR_IMASK) == SR_IMASK)
-
-def USERMODE(psw): return (((psw) & SR_KSU_MSK) == SR_KSU_USR)
-
-MAXPATHLEN = 1024
-MAXSYMLINKS = 30
-MAXNAMELEN = 256
-PIPE_BUF = 10240
-PIPE_MAX = 10240
-NBBY = 8
-BBSHIFT = 9
-BBSIZE = (1<<BBSHIFT)
-BBMASK = (BBSIZE-1)
-def BBTOB(bbs): return ((bbs) << BBSHIFT)
-
-def OFFTOBB(bytes): return (((__uint64_t)(bytes) + BBSIZE - 1) >> BBSHIFT)
-
-def OFFTOBBT(bytes): return ((off_t)(bytes) >> BBSHIFT)
-
-def BBTOOFF(bbs): return ((off_t)(bbs) << BBSHIFT)
-
-SEEKLIMIT32 = 0x7fffffff
-MAXBSIZE = 8192
-DEV_BSIZE = BBSIZE
-DEV_BSHIFT = BBSHIFT
-def btodb(bytes): return \
-
-def dbtob(db): return \
-
-BLKDEV_IOSHIFT = BPCSHIFT
-BLKDEV_IOSIZE = (1<<BLKDEV_IOSHIFT)
-def BLKDEV_OFF(off): return ((off) & (BLKDEV_IOSIZE - 1))
-
-def BLKDEV_LBN(off): return ((off) >> BLKDEV_IOSHIFT)
-
-def BLKDEV_LTOP(bn): return ((bn) * BLKDEV_BB)
-
-MAXHOSTNAMELEN = 256
-def DELAY(n): return us_delay(n)
-
-def DELAYBUS(n): return us_delaybus(n)
-
-TIMEPOKE_NOW = -100L
-MUTEX_DEFAULT = 0x0
-METER_NAMSZ = 16
-METER_NO_SEQ = -1
-def mutex_spinlock(l): return splhi()
-
-def mutex_spintrylock(l): return splhi()
-
-def spinlock_initialized(l): return 1
-
-SV_FIFO = 0x0
-SV_LIFO = 0x2
-SV_PRIO = 0x4
-SV_KEYED = 0x6
-SV_DEFAULT = SV_FIFO
-SEMA_NOHIST = 0x0001
-SEMA_LOCK = 0x0004
-NSCHEDCLASS = (-(PWEIGHTLESS)+1)
-MR_ACCESS = 1
-MR_UPDATE = 2
-MRLOCK_BARRIER = 0x1
-MRLOCK_BEHAVIOR = 0x2
-MRLOCK_DBLTRIPPABLE = 0x4
-MRLOCK_ALLOW_EQUAL_PRI = 0x8
-MRLOCK_DEFAULT = MRLOCK_BARRIER
-def mraccess(mrp): return mraccessf(mrp, 0)
-
-def mrupdate(mrp): return mrupdatef(mrp, 0)
-
-def mp_mutex_unlock(m): return mutex_unlock(m)
-
-def mp_mutex_trylock(m): return mutex_trylock(m)
-
-def mp_mutex_spinlock(m): return mutex_spinlock(m)
-
-
-# Included from sys/mon.h
-MON_LOCKED = 0x01
-MON_WAITING = 0x02
-MON_TIMEOUT = 0x04
-MON_DOSRV = 0x08
-MON_RUN = 0x10
-MR_READER_BUCKETS = 13
-def initlock(l): return spinlock_init(l,0)
-
-def ownlock(x): return 1
-
-def mutex_enter(m): return mutex_lock(m, PZERO)
-
-def mutex_tryenter(m): return mutex_trylock(m)
-
-def mutex_exit(m): return mutex_unlock(m)
-
-def cv_signal(cv): return sv_signal(cv)
-
-def cv_broadcast(cv): return sv_broadcast(cv)
-
-def cv_destroy(cv): return sv_destroy(cv)
-
-RW_READER = MR_ACCESS
-RW_WRITER = MR_UPDATE
-def rw_exit(r): return mrunlock(r)
-
-def rw_tryupgrade(r): return mrtrypromote(r)
-
-def rw_downgrade(r): return mrdemote(r)
-
-def rw_destroy(r): return mrfree(r)
-
-def RW_WRITE_HELD(r): return ismrlocked(r, MR_UPDATE)
-
-def RW_READ_HELD(r): return ismrlocked(r, MR_ACCESS)
-
-MS_FREE = 0
-MS_UPD = 1
-MS_ACC = 2
-MS_WAITERS = 4
-
-# Included from sys/fcntl.h
-FNDELAY = 0x04
-FAPPEND = 0x08
-FSYNC = 0x10
-FDSYNC = 0x20
-FRSYNC = 0x40
-FNONBLOCK = 0x80
-FASYNC = 0x1000
-FLARGEFILE = 0x2000
-FNONBLK = FNONBLOCK
-FDIRECT = 0x8000
-FBULK = 0x10000
-FDIRENT64 = 0x8000
-FCREAT = 0x0100
-FTRUNC = 0x0200
-FEXCL = 0x0400
-FNOCTTY = 0x0800
-O_RDONLY = 0
-O_WRONLY = 1
-O_RDWR = 2
-O_NDELAY = 0x04
-O_APPEND = 0x08
-O_SYNC = 0x10
-O_DSYNC = 0x20
-O_RSYNC = 0x40
-O_NONBLOCK = 0x80
-O_LARGEFILE = 0x2000
-O_DIRECT = 0x8000
-O_BULK = 0x10000
-O_CREAT = 0x100
-O_TRUNC = 0x200
-O_EXCL = 0x400
-O_NOCTTY = 0x800
-F_DUPFD = 0
-F_GETFD = 1
-F_SETFD = 2
-F_GETFL = 3
-F_SETFL = 4
-F_SETLK = 6
-F_SETLKW = 7
-F_CHKFL = 8
-F_ALLOCSP = 10
-F_FREESP = 11
-F_SETBSDLK = 12
-F_SETBSDLKW = 13
-F_GETLK = 14
-F_CHKLK = 15
-F_CHKLKW = 16
-F_CLNLK = 17
-F_RSETLK = 20
-F_RGETLK = 21
-F_RSETLKW = 22
-F_GETOWN = 23
-F_SETOWN = 24
-F_DIOINFO = 30
-F_FSGETXATTR = 31
-F_FSSETXATTR = 32
-F_GETLK64 = 33
-F_SETLK64 = 34
-F_SETLKW64 = 35
-F_ALLOCSP64 = 36
-F_FREESP64 = 37
-F_GETBMAP = 38
-F_FSSETDM = 39
-F_RESVSP = 40
-F_UNRESVSP = 41
-F_RESVSP64 = 42
-F_UNRESVSP64 = 43
-F_GETBMAPA = 44
-F_FSGETXATTRA = 45
-F_SETBIOSIZE = 46
-F_GETBIOSIZE = 47
-F_GETOPS = 50
-F_DMAPI = 51
-F_FSYNC = 52
-F_FSYNC64 = 53
-F_GETBDSATTR = 54
-F_SETBDSATTR = 55
-F_GETBMAPX = 56
-F_SETPRIO = 57
-F_GETPRIO = 58
-F_RDLCK = 01
-F_WRLCK = 02
-F_UNLCK = 03
-O_ACCMODE = 3
-FD_CLOEXEC = 1
-FD_NODUP_FORK = 4
-BMV_IF_ATTRFORK = 0x1
-BMV_IF_NO_DMAPI_READ = 0x2
-BMV_IF_PREALLOC = 0x4
-BMV_IF_VALID = (BMV_IF_ATTRFORK|BMV_IF_NO_DMAPI_READ|BMV_IF_PREALLOC)
-BMV_OF_PREALLOC = 0x1
-BMV_IF_EXTENDED = 0x40000000
-FMASK = 0x190FF
-FOPEN = 0xFFFFFFFF
-FREAD = 0x01
-FWRITE = 0x02
-FNDELAY = 0x04
-FAPPEND = 0x08
-FSYNC = 0x10
-FDSYNC = 0x20
-FRSYNC = 0x40
-FNONBLOCK = 0x80
-FASYNC = 0x1000
-FNONBLK = FNONBLOCK
-FLARGEFILE = 0x2000
-FDIRECT = 0x8000
-FBULK = 0x10000
-FCREAT = 0x0100
-FTRUNC = 0x0200
-FEXCL = 0x0400
-FNOCTTY = 0x0800
-FINVIS = 0x0100
-FSOCKET = 0x0200
-FINPROGRESS = 0x0400
-FPRIORITY = 0x0800
-FPRIO = 0x4000
-FDIRENT64 = 0x8000
-FCLOSEXEC = 0x01
-LOCK_SH = 1
-LOCK_EX = 2
-LOCK_NB = 4
-LOCK_UN = 8
-L_SET = 0
-L_INCR = 1
-L_XTND = 2
-F_OK = 0
-X_OK = 1
-W_OK = 2
-R_OK = 4
diff --git a/sys/lib/python/plat-irix6/FL.py b/sys/lib/python/plat-irix6/FL.py
deleted file mode 100644
index 727da4c9a..000000000
--- a/sys/lib/python/plat-irix6/FL.py
+++ /dev/null
@@ -1,289 +0,0 @@
-# Constants used by the FORMS library (module fl).
-# This corresponds to "forms.h".
-# Recommended use: import FL; ... FL.NORMAL_BOX ... etc.
-# Alternate use: from FL import *; ... NORMAL_BOX ... etc.
-
-_v20 = 1
-_v21 = 1
-##import fl
-##try:
-## _v20 = (fl.get_rgbmode is not None)
-##except:
-## _v20 = 0
-##del fl
-
-NULL = 0
-FALSE = 0
-TRUE = 1
-
-EVENT = -1
-
-LABEL_SIZE = 64
-if _v20:
- SHORTCUT_SIZE = 32
-PLACE_FREE = 0
-PLACE_SIZE = 1
-PLACE_ASPECT = 2
-PLACE_MOUSE = 3
-PLACE_CENTER = 4
-PLACE_POSITION = 5
-FL_PLACE_FULLSCREEN = 6
-FIND_INPUT = 0
-FIND_AUTOMATIC = 1
-FIND_MOUSE = 2
-BEGIN_GROUP = 10000
-END_GROUP = 20000
-ALIGN_TOP = 0
-ALIGN_BOTTOM = 1
-ALIGN_LEFT = 2
-ALIGN_RIGHT = 3
-ALIGN_CENTER = 4
-NO_BOX = 0
-UP_BOX = 1
-DOWN_BOX = 2
-FLAT_BOX = 3
-BORDER_BOX = 4
-SHADOW_BOX = 5
-FRAME_BOX = 6
-ROUNDED_BOX = 7
-RFLAT_BOX = 8
-RSHADOW_BOX = 9
-TOP_BOUND_COL = 51
-LEFT_BOUND_COL = 55
-BOT_BOUND_COL = 40
-RIGHT_BOUND_COL = 35
-COL1 = 47
-MCOL = 49
-LCOL = 0
-BOUND_WIDTH = 3.0
-DRAW = 0
-PUSH = 1
-RELEASE = 2
-ENTER = 3
-LEAVE = 4
-MOUSE = 5
-FOCUS = 6
-UNFOCUS = 7
-KEYBOARD = 8
-STEP = 9
-MOVE = 10
-FONT_NAME = 'Helvetica'
-FONT_BOLDNAME = 'Helvetica-Bold'
-FONT_ITALICNAME = 'Helvetica-Oblique'
-FONT_FIXEDNAME = 'Courier'
-FONT_ICONNAME = 'Icon'
-SMALL_FONT = 8.0
-NORMAL_FONT = 11.0
-LARGE_FONT = 20.0
-NORMAL_STYLE = 0
-BOLD_STYLE = 1
-ITALIC_STYLE = 2
-FIXED_STYLE = 3
-ENGRAVED_STYLE = 4
-ICON_STYLE = 5
-BITMAP = 3
-NORMAL_BITMAP = 0
-BITMAP_BOXTYPE = NO_BOX
-BITMAP_COL1 = 0
-BITMAP_COL2 = COL1
-BITMAP_LCOL = LCOL
-BITMAP_ALIGN = ALIGN_BOTTOM
-BITMAP_MAXSIZE = 128*128
-BITMAP_BW = BOUND_WIDTH
-BOX = 1
-BOX_BOXTYPE = UP_BOX
-BOX_COL1 = COL1
-BOX_LCOL = LCOL
-BOX_ALIGN = ALIGN_CENTER
-BOX_BW = BOUND_WIDTH
-BROWSER = 71
-NORMAL_BROWSER = 0
-SELECT_BROWSER = 1
-HOLD_BROWSER = 2
-MULTI_BROWSER = 3
-BROWSER_BOXTYPE = DOWN_BOX
-BROWSER_COL1 = COL1
-BROWSER_COL2 = 3
-BROWSER_LCOL = LCOL
-BROWSER_ALIGN = ALIGN_BOTTOM
-BROWSER_SLCOL = COL1
-BROWSER_BW = BOUND_WIDTH
-BROWSER_LINELENGTH = 128
-BROWSER_MAXLINE = 512
-BUTTON = 11
-NORMAL_BUTTON = 0
-PUSH_BUTTON = 1
-RADIO_BUTTON = 2
-HIDDEN_BUTTON = 3
-TOUCH_BUTTON = 4
-INOUT_BUTTON = 5
-RETURN_BUTTON = 6
-if _v20:
- HIDDEN_RET_BUTTON = 7
-BUTTON_BOXTYPE = UP_BOX
-BUTTON_COL1 = COL1
-BUTTON_COL2 = COL1
-BUTTON_LCOL = LCOL
-BUTTON_ALIGN = ALIGN_CENTER
-BUTTON_MCOL1 = MCOL
-BUTTON_MCOL2 = MCOL
-BUTTON_BW = BOUND_WIDTH
-if _v20:
- CHART = 4
- BAR_CHART = 0
- HORBAR_CHART = 1
- LINE_CHART = 2
- FILLED_CHART = 3
- SPIKE_CHART = 4
- PIE_CHART = 5
- SPECIALPIE_CHART = 6
- CHART_BOXTYPE = BORDER_BOX
- CHART_COL1 = COL1
- CHART_LCOL = LCOL
- CHART_ALIGN = ALIGN_BOTTOM
- CHART_BW = BOUND_WIDTH
- CHART_MAX = 128
-CHOICE = 42
-NORMAL_CHOICE = 0
-CHOICE_BOXTYPE = DOWN_BOX
-CHOICE_COL1 = COL1
-CHOICE_COL2 = LCOL
-CHOICE_LCOL = LCOL
-CHOICE_ALIGN = ALIGN_LEFT
-CHOICE_BW = BOUND_WIDTH
-CHOICE_MCOL = MCOL
-CHOICE_MAXITEMS = 128
-CHOICE_MAXSTR = 64
-CLOCK = 61
-SQUARE_CLOCK = 0
-ROUND_CLOCK = 1
-CLOCK_BOXTYPE = UP_BOX
-CLOCK_COL1 = 37
-CLOCK_COL2 = 42
-CLOCK_LCOL = LCOL
-CLOCK_ALIGN = ALIGN_BOTTOM
-CLOCK_TOPCOL = COL1
-CLOCK_BW = BOUND_WIDTH
-COUNTER = 25
-NORMAL_COUNTER = 0
-SIMPLE_COUNTER = 1
-COUNTER_BOXTYPE = UP_BOX
-COUNTER_COL1 = COL1
-COUNTER_COL2 = 4
-COUNTER_LCOL = LCOL
-COUNTER_ALIGN = ALIGN_BOTTOM
-if _v20:
- COUNTER_BW = BOUND_WIDTH
-else:
- DEFAULT = 51
- RETURN_DEFAULT = 0
- ALWAYS_DEFAULT = 1
-DIAL = 22
-NORMAL_DIAL = 0
-LINE_DIAL = 1
-DIAL_BOXTYPE = NO_BOX
-DIAL_COL1 = COL1
-DIAL_COL2 = 37
-DIAL_LCOL = LCOL
-DIAL_ALIGN = ALIGN_BOTTOM
-DIAL_TOPCOL = COL1
-DIAL_BW = BOUND_WIDTH
-FREE = 101
-NORMAL_FREE = 1
-SLEEPING_FREE = 2
-INPUT_FREE = 3
-CONTINUOUS_FREE = 4
-ALL_FREE = 5
-INPUT = 31
-NORMAL_INPUT = 0
-if _v20:
- FLOAT_INPUT = 1
- INT_INPUT = 2
- HIDDEN_INPUT = 3
- if _v21:
- MULTILINE_INPUT = 4
- SECRET_INPUT = 5
-else:
- ALWAYS_INPUT = 1
-INPUT_BOXTYPE = DOWN_BOX
-INPUT_COL1 = 13
-INPUT_COL2 = 5
-INPUT_LCOL = LCOL
-INPUT_ALIGN = ALIGN_LEFT
-INPUT_TCOL = LCOL
-INPUT_CCOL = 4
-INPUT_BW = BOUND_WIDTH
-INPUT_MAX = 128
-LIGHTBUTTON = 12
-LIGHTBUTTON_BOXTYPE = UP_BOX
-LIGHTBUTTON_COL1 = 39
-LIGHTBUTTON_COL2 = 3
-LIGHTBUTTON_LCOL = LCOL
-LIGHTBUTTON_ALIGN = ALIGN_CENTER
-LIGHTBUTTON_TOPCOL = COL1
-LIGHTBUTTON_MCOL = MCOL
-LIGHTBUTTON_BW1 = BOUND_WIDTH
-LIGHTBUTTON_BW2 = BOUND_WIDTH/2.0
-LIGHTBUTTON_MINSIZE = 12.0
-MENU = 41
-TOUCH_MENU = 0
-PUSH_MENU = 1
-MENU_BOXTYPE = BORDER_BOX
-MENU_COL1 = 55
-MENU_COL2 = 37
-MENU_LCOL = LCOL
-MENU_ALIGN = ALIGN_CENTER
-MENU_BW = BOUND_WIDTH
-MENU_MAX = 300
-POSITIONER = 23
-NORMAL_POSITIONER = 0
-POSITIONER_BOXTYPE = DOWN_BOX
-POSITIONER_COL1 = COL1
-POSITIONER_COL2 = 1
-POSITIONER_LCOL = LCOL
-POSITIONER_ALIGN = ALIGN_BOTTOM
-POSITIONER_BW = BOUND_WIDTH
-ROUNDBUTTON = 13
-ROUNDBUTTON_BOXTYPE = NO_BOX
-ROUNDBUTTON_COL1 = 7
-ROUNDBUTTON_COL2 = 3
-ROUNDBUTTON_LCOL = LCOL
-ROUNDBUTTON_ALIGN = ALIGN_CENTER
-ROUNDBUTTON_TOPCOL = COL1
-ROUNDBUTTON_MCOL = MCOL
-ROUNDBUTTON_BW = BOUND_WIDTH
-SLIDER = 21
-VALSLIDER = 24
-VERT_SLIDER = 0
-HOR_SLIDER = 1
-VERT_FILL_SLIDER = 2
-HOR_FILL_SLIDER = 3
-VERT_NICE_SLIDER = 4
-HOR_NICE_SLIDER = 5
-SLIDER_BOXTYPE = DOWN_BOX
-SLIDER_COL1 = COL1
-SLIDER_COL2 = COL1
-SLIDER_LCOL = LCOL
-SLIDER_ALIGN = ALIGN_BOTTOM
-SLIDER_BW1 = BOUND_WIDTH
-SLIDER_BW2 = BOUND_WIDTH*0.75
-SLIDER_FINE = 0.05
-SLIDER_WIDTH = 0.08
-TEXT = 2
-NORMAL_TEXT = 0
-TEXT_BOXTYPE = NO_BOX
-TEXT_COL1 = COL1
-TEXT_LCOL = LCOL
-TEXT_ALIGN = ALIGN_LEFT
-TEXT_BW = BOUND_WIDTH
-TIMER = 62
-NORMAL_TIMER = 0
-VALUE_TIMER = 1
-HIDDEN_TIMER = 2
-TIMER_BOXTYPE = DOWN_BOX
-TIMER_COL1 = COL1
-TIMER_COL2 = 1
-TIMER_LCOL = LCOL
-TIMER_ALIGN = ALIGN_CENTER
-TIMER_BW = BOUND_WIDTH
-TIMER_BLINKRATE = 0.2
diff --git a/sys/lib/python/plat-irix6/GET.py b/sys/lib/python/plat-irix6/GET.py
deleted file mode 100644
index 9c3d7d695..000000000
--- a/sys/lib/python/plat-irix6/GET.py
+++ /dev/null
@@ -1,59 +0,0 @@
-# Symbols from <gl/get.h>
-
-BCKBUFFER = 0x1
-FRNTBUFFER = 0x2
-DRAWZBUFFER = 0x4
-DMRGB = 0
-DMSINGLE = 1
-DMDOUBLE = 2
-DMRGBDOUBLE = 5
-HZ30 = 0
-HZ60 = 1
-NTSC = 2
-HDTV = 3
-VGA = 4
-IRIS3K = 5
-PR60 = 6
-PAL = 9
-HZ30_SG = 11
-A343 = 14
-STR_RECT = 15
-VOF0 = 16
-VOF1 = 17
-VOF2 = 18
-VOF3 = 19
-SGI0 = 20
-SGI1 = 21
-SGI2 = 22
-HZ72 = 23
-GL_VIDEO_REG = 0x00800000
-GLV_GENLOCK = 0x00000001
-GLV_UNBLANK = 0x00000002
-GLV_SRED = 0x00000004
-GLV_SGREEN = 0x00000008
-GLV_SBLUE = 0x00000010
-GLV_SALPHA = 0x00000020
-GLV_TTLGENLOCK = 0x00000080
-GLV_TTLSYNC = GLV_TTLGENLOCK
-GLV_GREENGENLOCK = 0x0000100
-LEFTPLANE = 0x0001
-RIGHTPLANE = 0x0002
-BOTTOMPLANE = 0x0004
-TOPPLANE = 0x0008
-NEARPLANE = 0x0010
-FARPLANE = 0x0020
-## GETDEF = __GL_GET_H__
-NOBUFFER = 0x0
-BOTHBUFFERS = 0x3
-DMINTENSITYSINGLE = 3
-DMINTENSITYDOUBLE = 4
-MONSPECIAL = 0x20
-HZ50 = 3
-MONA = 5
-MONB = 6
-MONC = 7
-MOND = 8
-MON_ALL = 12
-MON_GEN_ALL = 13
-CMAPMULTI = 0
-CMAPONE = 1
diff --git a/sys/lib/python/plat-irix6/GL.py b/sys/lib/python/plat-irix6/GL.py
deleted file mode 100644
index 9f02f65f3..000000000
--- a/sys/lib/python/plat-irix6/GL.py
+++ /dev/null
@@ -1,393 +0,0 @@
-NULL = 0
-FALSE = 0
-TRUE = 1
-ATTRIBSTACKDEPTH = 10
-VPSTACKDEPTH = 8
-MATRIXSTACKDEPTH = 32
-NAMESTACKDEPTH = 1025
-STARTTAG = -2
-ENDTAG = -3
-BLACK = 0
-RED = 1
-GREEN = 2
-YELLOW = 3
-BLUE = 4
-MAGENTA = 5
-CYAN = 6
-WHITE = 7
-PUP_CLEAR = 0
-PUP_COLOR = 1
-PUP_BLACK = 2
-PUP_WHITE = 3
-NORMALDRAW = 0x010
-PUPDRAW = 0x020
-OVERDRAW = 0x040
-UNDERDRAW = 0x080
-CURSORDRAW = 0x100
-DUALDRAW = 0x200
-PATTERN_16 = 16
-PATTERN_32 = 32
-PATTERN_64 = 64
-PATTERN_16_SIZE = 16
-PATTERN_32_SIZE = 64
-PATTERN_64_SIZE = 256
-SRC_AUTO = 0
-SRC_FRONT = 1
-SRC_BACK = 2
-SRC_ZBUFFER = 3
-SRC_PUP = 4
-SRC_OVER = 5
-SRC_UNDER = 6
-SRC_FRAMEGRABBER = 7
-BF_ZERO = 0
-BF_ONE = 1
-BF_DC = 2
-BF_SC = 2
-BF_MDC = 3
-BF_MSC = 3
-BF_SA = 4
-BF_MSA = 5
-BF_DA = 6
-BF_MDA = 7
-BF_MIN_SA_MDA = 8
-AF_NEVER = 0
-AF_LESS = 1
-AF_EQUAL = 2
-AF_LEQUAL = 3
-AF_GREATER = 4
-AF_NOTEQUAL = 5
-AF_GEQUAL = 6
-AF_ALWAYS = 7
-ZF_NEVER = 0
-ZF_LESS = 1
-ZF_EQUAL = 2
-ZF_LEQUAL = 3
-ZF_GREATER = 4
-ZF_NOTEQUAL = 5
-ZF_GEQUAL = 6
-ZF_ALWAYS = 7
-ZSRC_DEPTH = 0
-ZSRC_COLOR = 1
-SMP_OFF = 0x0
-SMP_ON = 0x1
-SMP_SMOOTHER = 0x2
-SML_OFF = 0x0
-SML_ON = 0x1
-SML_SMOOTHER = 0x2
-SML_END_CORRECT = 0x4
-PYSM_OFF = 0
-PYSM_ON = 1
-PYSM_SHRINK = 2
-DT_OFF = 0
-DT_ON = 1
-PUP_NONE = 0
-PUP_GREY = 0x1
-PUP_BOX = 0x2
-PUP_CHECK = 0x4
-GLC_OLDPOLYGON = 0
-GLC_ZRANGEMAP = 1
-GLC_MQUEUERATE = 2
-GLC_SOFTATTACH = 3
-GLC_MANAGEBG = 4
-GLC_SLOWMAPCOLORS = 5
-GLC_INPUTCHANGEBUG = 6
-GLC_NOBORDERBUG = 7
-GLC_SET_VSYNC = 8
-GLC_GET_VSYNC = 9
-GLC_VSYNC_SLEEP = 10
-GLC_COMPATRATE = 15
-C16X1 = 0
-C16X2 = 1
-C32X1 = 2
-C32X2 = 3
-CCROSS = 4
-FLAT = 0
-GOURAUD = 1
-LO_ZERO = 0x0
-LO_AND = 0x1
-LO_ANDR = 0x2
-LO_SRC = 0x3
-LO_ANDI = 0x4
-LO_DST = 0x5
-LO_XOR = 0x6
-LO_OR = 0x7
-LO_NOR = 0x8
-LO_XNOR = 0x9
-LO_NDST = 0xa
-LO_ORR = 0xb
-LO_NSRC = 0xc
-LO_ORI = 0xd
-LO_NAND = 0xe
-LO_ONE = 0xf
-INFOCUSSCRN = -2
-ST_KEEP = 0
-ST_ZERO = 1
-ST_REPLACE = 2
-ST_INCR = 3
-ST_DECR = 4
-ST_INVERT = 5
-SF_NEVER = 0
-SF_LESS = 1
-SF_EQUAL = 2
-SF_LEQUAL = 3
-SF_GREATER = 4
-SF_NOTEQUAL = 5
-SF_GEQUAL = 6
-SF_ALWAYS = 7
-SS_OFF = 0
-SS_DEPTH = 1
-PYM_FILL = 1
-PYM_POINT = 2
-PYM_LINE = 3
-PYM_HOLLOW = 4
-PYM_LINE_FAST = 5
-FG_OFF = 0
-FG_ON = 1
-FG_DEFINE = 2
-FG_VTX_EXP = 2
-FG_VTX_LIN = 3
-FG_PIX_EXP = 4
-FG_PIX_LIN = 5
-FG_VTX_EXP2 = 6
-FG_PIX_EXP2 = 7
-PM_SHIFT = 0
-PM_EXPAND = 1
-PM_C0 = 2
-PM_C1 = 3
-PM_ADD24 = 4
-PM_SIZE = 5
-PM_OFFSET = 6
-PM_STRIDE = 7
-PM_TTOB = 8
-PM_RTOL = 9
-PM_ZDATA = 10
-PM_WARP = 11
-PM_RDX = 12
-PM_RDY = 13
-PM_CDX = 14
-PM_CDY = 15
-PM_XSTART = 16
-PM_YSTART = 17
-PM_VO1 = 1000
-NAUTO = 0
-NNORMALIZE = 1
-AC_CLEAR = 0
-AC_ACCUMULATE = 1
-AC_CLEAR_ACCUMULATE = 2
-AC_RETURN = 3
-AC_MULT = 4
-AC_ADD = 5
-CP_OFF = 0
-CP_ON = 1
-CP_DEFINE = 2
-SB_RESET = 0
-SB_TRACK = 1
-SB_HOLD = 2
-RD_FREEZE = 0x00000001
-RD_ALPHAONE = 0x00000002
-RD_IGNORE_UNDERLAY = 0x00000004
-RD_IGNORE_OVERLAY = 0x00000008
-RD_IGNORE_PUP = 0x00000010
-RD_OFFSCREEN = 0x00000020
-GD_XPMAX = 0
-GD_YPMAX = 1
-GD_XMMAX = 2
-GD_YMMAX = 3
-GD_ZMIN = 4
-GD_ZMAX = 5
-GD_BITS_NORM_SNG_RED = 6
-GD_BITS_NORM_SNG_GREEN = 7
-GD_BITS_NORM_SNG_BLUE = 8
-GD_BITS_NORM_DBL_RED = 9
-GD_BITS_NORM_DBL_GREEN = 10
-GD_BITS_NORM_DBL_BLUE = 11
-GD_BITS_NORM_SNG_CMODE = 12
-GD_BITS_NORM_DBL_CMODE = 13
-GD_BITS_NORM_SNG_MMAP = 14
-GD_BITS_NORM_DBL_MMAP = 15
-GD_BITS_NORM_ZBUFFER = 16
-GD_BITS_OVER_SNG_CMODE = 17
-GD_BITS_UNDR_SNG_CMODE = 18
-GD_BITS_PUP_SNG_CMODE = 19
-GD_BITS_NORM_SNG_ALPHA = 21
-GD_BITS_NORM_DBL_ALPHA = 22
-GD_BITS_CURSOR = 23
-GD_OVERUNDER_SHARED = 24
-GD_BLEND = 25
-GD_CIFRACT = 26
-GD_CROSSHAIR_CINDEX = 27
-GD_DITHER = 28
-GD_LINESMOOTH_CMODE = 30
-GD_LINESMOOTH_RGB = 31
-GD_LOGICOP = 33
-GD_NSCRNS = 35
-GD_NURBS_ORDER = 36
-GD_NBLINKS = 37
-GD_NVERTEX_POLY = 39
-GD_PATSIZE_64 = 40
-GD_PNTSMOOTH_CMODE = 41
-GD_PNTSMOOTH_RGB = 42
-GD_PUP_TO_OVERUNDER = 43
-GD_READSOURCE = 44
-GD_READSOURCE_ZBUFFER = 48
-GD_STEREO = 50
-GD_SUBPIXEL_LINE = 51
-GD_SUBPIXEL_PNT = 52
-GD_SUBPIXEL_POLY = 53
-GD_TRIMCURVE_ORDER = 54
-GD_WSYS = 55
-GD_ZDRAW_GEOM = 57
-GD_ZDRAW_PIXELS = 58
-GD_SCRNTYPE = 61
-GD_TEXTPORT = 62
-GD_NMMAPS = 63
-GD_FRAMEGRABBER = 64
-GD_TIMERHZ = 66
-GD_DBBOX = 67
-GD_AFUNCTION = 68
-GD_ALPHA_OVERUNDER = 69
-GD_BITS_ACBUF = 70
-GD_BITS_ACBUF_HW = 71
-GD_BITS_STENCIL = 72
-GD_CLIPPLANES = 73
-GD_FOGVERTEX = 74
-GD_LIGHTING_TWOSIDE = 76
-GD_POLYMODE = 77
-GD_POLYSMOOTH = 78
-GD_SCRBOX = 79
-GD_TEXTURE = 80
-GD_FOGPIXEL = 81
-GD_TEXTURE_PERSP = 82
-GD_MUXPIPES = 83
-GD_NOLIMIT = -2
-GD_WSYS_NONE = 0
-GD_WSYS_4S = 1
-GD_SCRNTYPE_WM = 0
-GD_SCRNTYPE_NOWM = 1
-N_PIXEL_TOLERANCE = 1
-N_CULLING = 2
-N_DISPLAY = 3
-N_ERRORCHECKING = 4
-N_SUBDIVISIONS = 5
-N_S_STEPS = 6
-N_T_STEPS = 7
-N_TILES = 8
-N_TMP1 = 9
-N_TMP2 = 10
-N_TMP3 = 11
-N_TMP4 = 12
-N_TMP5 = 13
-N_TMP6 = 14
-N_FILL = 1.0
-N_OUTLINE_POLY = 2.0
-N_OUTLINE_PATCH = 5.0
-N_ISOLINE_S = 12.0
-N_ST = 0x8
-N_STW = 0xd
-N_XYZ = 0x4c
-N_XYZW = 0x51
-N_TEX = 0x88
-N_TEXW = 0x8d
-N_RGBA = 0xd0
-N_RGBAW = 0xd5
-N_P2D = 0x8
-N_P2DR = 0xd
-N_V3D = 0x4c
-N_V3DR = 0x51
-N_T2D = 0x88
-N_T2DR = 0x8d
-N_C4D = 0xd0
-N_C4DR = 0xd5
-LMNULL = 0.0
-MSINGLE = 0
-MPROJECTION = 1
-MVIEWING = 2
-MTEXTURE = 3
-MAXLIGHTS = 8
-MAXRESTRICTIONS = 4
-DEFMATERIAL = 0
-EMISSION = 1
-AMBIENT = 2
-DIFFUSE = 3
-SPECULAR = 4
-SHININESS = 5
-COLORINDEXES = 6
-ALPHA = 7
-DEFLIGHT = 100
-LCOLOR = 101
-POSITION = 102
-SPOTDIRECTION = 103
-SPOTLIGHT = 104
-DEFLMODEL = 200
-LOCALVIEWER = 201
-ATTENUATION = 202
-ATTENUATION2 = 203
-TWOSIDE = 204
-MATERIAL = 1000
-BACKMATERIAL = 1001
-LIGHT0 = 1100
-LIGHT1 = 1101
-LIGHT2 = 1102
-LIGHT3 = 1103
-LIGHT4 = 1104
-LIGHT5 = 1105
-LIGHT6 = 1106
-LIGHT7 = 1107
-LMODEL = 1200
-LMC_COLOR = 0
-LMC_EMISSION = 1
-LMC_AMBIENT = 2
-LMC_DIFFUSE = 3
-LMC_SPECULAR = 4
-LMC_AD = 5
-LMC_NULL = 6
-TX_MINFILTER = 0x100
-TX_MAGFILTER = 0x200
-TX_WRAP = 0x300
-TX_WRAP_S = 0x310
-TX_WRAP_T = 0x320
-TX_TILE = 0x400
-TX_BORDER = 0x500
-TX_NULL = 0x000
-TX_POINT = 0x110
-TX_BILINEAR = 0x220
-TX_MIPMAP = 0x120
-TX_MIPMAP_POINT = 0x121
-TX_MIPMAP_LINEAR = 0x122
-TX_MIPMAP_BILINEAR = 0x123
-TX_MIPMAP_TRILINEAR = 0x124
-TX_REPEAT = 0x301
-TX_CLAMP = 0x302
-TX_SELECT = 0x303
-TX_TEXTURE_0 = 0
-TV_MODULATE = 0x101
-TV_BLEND = 0x102
-TV_DECAL = 0x103
-TV_COLOR = 0x200
-TV_NULL = 0x000
-TV_ENV0 = 0
-TX_S = 0
-TX_T = 1
-TG_OFF = 0
-TG_ON = 1
-TG_CONTOUR = 2
-TG_LINEAR = 3
-TG_SPHEREMAP = 4
-TG_REFRACTMAP = 5
-DGLSINK = 0
-DGLLOCAL = 1
-DGLTSOCKET = 2
-DGL4DDN = 3
-PUP_CURSOR = PUP_COLOR
-FATAL = 1
-WARNING = 2
-ASK_CONT = 3
-ASK_RESTART = 4
-XMAXSCREEN = 1279
-YMAXSCREEN = 1023
-XMAXMEDIUM = 1023
-YMAXMEDIUM = 767
-XMAX170 = 645
-YMAX170 = 484
-XMAXPAL = 779
-YMAXPAL = 574
diff --git a/sys/lib/python/plat-irix6/GLWS.py b/sys/lib/python/plat-irix6/GLWS.py
deleted file mode 100644
index 69dab7143..000000000
--- a/sys/lib/python/plat-irix6/GLWS.py
+++ /dev/null
@@ -1,12 +0,0 @@
-NOERROR = 0
-NOCONTEXT = -1
-NODISPLAY = -2
-NOWINDOW = -3
-NOGRAPHICS = -4
-NOTTOP = -5
-NOVISUAL = -6
-BUFSIZE = -7
-BADWINDOW = -8
-ALREADYBOUND = -100
-BINDFAILED = -101
-SETFAILED = -102
diff --git a/sys/lib/python/plat-irix6/IN.py b/sys/lib/python/plat-irix6/IN.py
deleted file mode 100644
index 9385bb0e0..000000000
--- a/sys/lib/python/plat-irix6/IN.py
+++ /dev/null
@@ -1,385 +0,0 @@
-# Generated by h2py from /usr/include/netinet/in.h
-
-# Included from standards.h
-
-# Included from sgidefs.h
-_MIPS_ISA_MIPS1 = 1
-_MIPS_ISA_MIPS2 = 2
-_MIPS_ISA_MIPS3 = 3
-_MIPS_ISA_MIPS4 = 4
-_MIPS_SIM_ABI32 = 1
-_MIPS_SIM_NABI32 = 2
-_MIPS_SIM_ABI64 = 3
-
-# Included from sys/bsd_types.h
-
-# Included from sys/mkdev.h
-ONBITSMAJOR = 7
-ONBITSMINOR = 8
-OMAXMAJ = 0x7f
-OMAXMIN = 0xff
-NBITSMAJOR = 14
-NBITSMINOR = 18
-MAXMAJ = 0x1ff
-MAXMIN = 0x3ffff
-OLDDEV = 0
-NEWDEV = 1
-MKDEV_VER = NEWDEV
-def IS_STRING_SPEC_DEV(x): return ((dev_t)(x)==__makedev(MKDEV_VER, 0, 0))
-
-def major(dev): return __major(MKDEV_VER, dev)
-
-def minor(dev): return __minor(MKDEV_VER, dev)
-
-
-# Included from sys/select.h
-FD_SETSIZE = 1024
-__NBBY = 8
-
-# Included from string.h
-NULL = 0L
-NBBY = 8
-
-# Included from sys/endian.h
-LITTLE_ENDIAN = 1234
-BIG_ENDIAN = 4321
-PDP_ENDIAN = 3412
-_LITTLE_ENDIAN = 1234
-_BIG_ENDIAN = 4321
-_PDP_ENDIAN = 3412
-_BYTE_ORDER = _BIG_ENDIAN
-_BYTE_ORDER = _LITTLE_ENDIAN
-def ntohl(x): return (x)
-
-def ntohs(x): return (x)
-
-def htonl(x): return (x)
-
-def htons(x): return (x)
-
-def htonl(x): return ntohl(x)
-
-def htons(x): return ntohs(x)
-
-
-# Included from sys/types.h
-
-# Included from sys/pthread.h
-P_MYID = (-1)
-P_MYHOSTID = (-1)
-
-# Included from sys/cpumask.h
-MAXCPU = 128
-def CPUMASK_INDEX(bit): return ((bit) >> 6)
-
-def CPUMASK_SHFT(bit): return ((bit) & 0x3f)
-
-def CPUMASK_IS_ZERO(p): return ((p) == 0)
-
-def CPUMASK_IS_NONZERO(p): return ((p) != 0)
-
-
-# Included from sys/nodemask.h
-def CNODEMASK_IS_ZERO(p): return ((p) == 0)
-
-def CNODEMASK_IS_NONZERO(p): return ((p) != 0)
-
-IPPROTO_IP = 0
-IPPROTO_HOPOPTS = 0
-IPPROTO_ICMP = 1
-IPPROTO_IGMP = 2
-IPPROTO_GGP = 3
-IPPROTO_IPIP = 4
-IPPROTO_ENCAP = IPPROTO_IPIP
-IPPROTO_ST = 5
-IPPROTO_TCP = 6
-IPPROTO_UCL = 7
-IPPROTO_EGP = 8
-IPPROTO_IGP = 9
-IPPROTO_BBN_RCC_MON = 10
-IPPROTO_NVP_II = 11
-IPPROTO_PUP = 12
-IPPROTO_ARGUS = 13
-IPPROTO_EMCON = 14
-IPPROTO_XNET = 15
-IPPROTO_CHAOS = 16
-IPPROTO_UDP = 17
-IPPROTO_MUX = 18
-IPPROTO_DCN_MEAS = 19
-IPPROTO_HMP = 20
-IPPROTO_PRM = 21
-IPPROTO_IDP = 22
-IPPROTO_TRUNK_1 = 23
-IPPROTO_TRUNK_2 = 24
-IPPROTO_LEAF_1 = 25
-IPPROTO_LEAF_2 = 26
-IPPROTO_RDP = 27
-IPPROTO_IRTP = 28
-IPPROTO_TP = 29
-IPPROTO_NETBLT = 30
-IPPROTO_MFE_NSP = 31
-IPPROTO_MERIT_INP = 32
-IPPROTO_SEP = 33
-IPPROTO_3PC = 34
-IPPROTO_IDPR = 35
-IPPROTO_XTP = 36
-IPPROTO_DDP = 37
-IPPROTO_IDPR_CMTP = 38
-IPPROTO_TPPP = 39
-IPPROTO_IL = 40
-IPPROTO_IPV6 = 41
-IPPROTO_ROUTING = 43
-IPPROTO_FRAGMENT = 44
-IPPROTO_RSVP = 46
-IPPROTO_ESP = 50
-IPPROTO_AH = 51
-IPPROTO_ICMPV6 = 58
-IPPROTO_NONE = 59
-IPPROTO_DSTOPTS = 60
-IPPROTO_CFTP = 62
-IPPROTO_HELLO = 63
-IPPROTO_SAT_EXPAK = 64
-IPPROTO_KRYPTOLAN = 65
-IPPROTO_RVD = 66
-IPPROTO_IPPC = 67
-IPPROTO_SAT_MON = 69
-IPPROTO_VISA = 70
-IPPROTO_IPCV = 71
-IPPROTO_CPNX = 72
-IPPROTO_CPHB = 73
-IPPROTO_WSN = 74
-IPPROTO_PVP = 75
-IPPROTO_BR_SAT_MON = 76
-IPPROTO_ND = 77
-IPPROTO_WB_MON = 78
-IPPROTO_WB_EXPAK = 79
-IPPROTO_EON = 80
-IPPROTO_VMTP = 81
-IPPROTO_SECURE_VMTP = 82
-IPPROTO_VINES = 83
-IPPROTO_TTP = 84
-IPPROTO_NSFNET_IGP = 85
-IPPROTO_DGP = 86
-IPPROTO_TCF = 87
-IPPROTO_IGRP = 88
-IPPROTO_OSPF = 89
-IPPROTO_SPRITE_RPC = 90
-IPPROTO_LARP = 91
-IPPROTO_MTP = 92
-IPPROTO_AX25 = 93
-IPPROTO_SWIPE = 94
-IPPROTO_MICP = 95
-IPPROTO_AES_SP3_D = 96
-IPPROTO_ETHERIP = 97
-IPPROTO_ENCAPHDR = 98
-IPPROTO_RAW = 255
-IPPROTO_MAX = 256
-IPPROTO_STP = 257
-IPPORT_RESERVED = 1024
-IPPORT_MAXPORT = 65535
-INET_ADDRSTRLEN = 16
-INET6_ADDRSTRLEN = 46
-def IN_CLASSA(i): return (((__int32_t)(i) & 0x80000000) == 0)
-
-IN_CLASSA_NET = 0xff000000
-IN_CLASSA_NSHIFT = 24
-IN_CLASSA_HOST = 0x00ffffff
-IN_CLASSA_MAX = 128
-def IN_CLASSB(i): return (((__int32_t)(i) & 0xc0000000) == 0x80000000)
-
-IN_CLASSB_NET = 0xffff0000
-IN_CLASSB_NSHIFT = 16
-IN_CLASSB_HOST = 0x0000ffff
-IN_CLASSB_MAX = 65536
-def IN_CLASSC(i): return (((__int32_t)(i) & 0xe0000000) == 0xc0000000)
-
-IN_CLASSC_NET = 0xffffff00
-IN_CLASSC_NSHIFT = 8
-IN_CLASSC_HOST = 0x000000ff
-def IN_CLASSD(i): return (((__int32_t)(i) & 0xf0000000) == 0xe0000000)
-
-IN_CLASSD_NET = 0xf0000000
-IN_CLASSD_NSHIFT = 28
-IN_CLASSD_HOST = 0x0fffffff
-def IN_MULTICAST(i): return IN_CLASSD(i)
-
-def IN_EXPERIMENTAL(i): return (((__int32_t)(i) & 0xf0000000) == 0xf0000000)
-
-def IN_BADCLASS(i): return (((__int32_t)(i) & 0xf0000000) == 0xf0000000)
-
-INADDR_NONE = 0xffffffff
-IN_LOOPBACKNET = 127
-IPNGVERSION = 6
-IPV6_FLOWINFO_FLOWLABEL = 0x00ffffff
-IPV6_FLOWINFO_PRIORITY = 0x0f000000
-IPV6_FLOWINFO_PRIFLOW = 0x0fffffff
-IPV6_FLOWINFO_SRFLAG = 0x10000000
-IPV6_FLOWINFO_VERSION = 0xf0000000
-IPV6_PRIORITY_UNCHARACTERIZED = 0x00000000
-IPV6_PRIORITY_FILLER = 0x01000000
-IPV6_PRIORITY_UNATTENDED = 0x02000000
-IPV6_PRIORITY_RESERVED1 = 0x03000000
-IPV6_PRIORITY_BULK = 0x04000000
-IPV6_PRIORITY_RESERVED2 = 0x05000000
-IPV6_PRIORITY_INTERACTIVE = 0x06000000
-IPV6_PRIORITY_CONTROL = 0x07000000
-IPV6_PRIORITY_8 = 0x08000000
-IPV6_PRIORITY_9 = 0x09000000
-IPV6_PRIORITY_10 = 0x0a000000
-IPV6_PRIORITY_11 = 0x0b000000
-IPV6_PRIORITY_12 = 0x0c000000
-IPV6_PRIORITY_13 = 0x0d000000
-IPV6_PRIORITY_14 = 0x0e000000
-IPV6_PRIORITY_15 = 0x0f000000
-IPV6_SRFLAG_STRICT = 0x10000000
-IPV6_SRFLAG_LOOSE = 0x00000000
-IPV6_VERSION = 0x60000000
-IPV6_FLOWINFO_FLOWLABEL = 0xffffff00
-IPV6_FLOWINFO_PRIORITY = 0x0000000f
-IPV6_FLOWINFO_PRIFLOW = 0xffffff0f
-IPV6_FLOWINFO_SRFLAG = 0x00000010
-IPV6_FLOWINFO_VERSION = 0x000000f0
-IPV6_PRIORITY_UNCHARACTERIZED = 0x00000000
-IPV6_PRIORITY_FILLER = 0x00000001
-IPV6_PRIORITY_UNATTENDED = 0x00000002
-IPV6_PRIORITY_RESERVED1 = 0x00000003
-IPV6_PRIORITY_BULK = 0x00000004
-IPV6_PRIORITY_RESERVED2 = 0x00000005
-IPV6_PRIORITY_INTERACTIVE = 0x00000006
-IPV6_PRIORITY_CONTROL = 0x00000007
-IPV6_PRIORITY_8 = 0x00000008
-IPV6_PRIORITY_9 = 0x00000009
-IPV6_PRIORITY_10 = 0x0000000a
-IPV6_PRIORITY_11 = 0x0000000b
-IPV6_PRIORITY_12 = 0x0000000c
-IPV6_PRIORITY_13 = 0x0000000d
-IPV6_PRIORITY_14 = 0x0000000e
-IPV6_PRIORITY_15 = 0x0000000f
-IPV6_SRFLAG_STRICT = 0x00000010
-IPV6_SRFLAG_LOOSE = 0x00000000
-IPV6_VERSION = 0x00000060
-def IPV6_GET_FLOWLABEL(x): return (ntohl(x) & 0x00ffffff)
-
-def IPV6_GET_PRIORITY(x): return ((ntohl(x) >> 24) & 0xf)
-
-def IPV6_GET_VERSION(x): return ((ntohl(x) >> 28) & 0xf)
-
-def IPV6_SET_FLOWLABEL(x): return (htonl(x) & IPV6_FLOWINFO_FLOWLABEL)
-
-def IPV6_SET_PRIORITY(x): return (htonl((x & 0xf) << 24))
-
-def CLR_ADDR6(a): return \
-
-def IS_ANYSOCKADDR(a): return \
-
-def IS_ANYADDR6(a): return \
-
-def IS_COMPATSOCKADDR(a): return \
-
-def IS_COMPATADDR6(a): return \
-
-def IS_LOOPSOCKADDR(a): return \
-
-def IS_LOOPADDR6(a): return \
-
-def IS_IPV4SOCKADDR(a): return \
-
-def IS_IPV4ADDR6(a): return \
-
-def IS_LOOPSOCKADDR(a): return \
-
-def IS_LOOPADDR6(a): return \
-
-def IS_IPV4SOCKADDR(a): return \
-
-def IS_IPV4ADDR6(a): return \
-
-def IS_LOCALADDR6(a): return ((a).s6_addr8[0] == 0xfe)
-
-def IS_LINKLADDR6(a): return \
-
-def IS_SITELADDR6(a): return \
-
-def IS_MULTIADDR6(a): return ((a).s6_addr8[0] == 0xff)
-
-def MADDR6_FLAGS(a): return ((a).s6_addr8[1] >> 4)
-
-MADDR6_FLG_WK = 0
-MADDR6_FLG_TS = 1
-def MADDR6_SCOPE(a): return ((a).s6_addr8[1] & 0x0f)
-
-MADDR6_SCP_NODE = 0x1
-MADDR6_SCP_LINK = 0x2
-MADDR6_SCP_SITE = 0x5
-MADDR6_SCP_ORG = 0x8
-MADDR6_SCP_GLO = 0xe
-MADDR6_ALLNODES = 1
-MADDR6_ALLROUTERS = 2
-MADDR6_ALLHOSTS = 3
-def IN6_IS_ADDR_UNSPECIFIED(p): return IS_ANYADDR6(*p)
-
-def IN6_IS_ADDR_LOOPBACK(p): return IS_LOOPADDR6(*p)
-
-def IN6_IS_ADDR_MULTICAST(p): return IS_MULTIADDR6(*p)
-
-def IN6_IS_ADDR_LINKLOCAL(p): return IS_LINKLADDR6(*p)
-
-def IN6_IS_ADDR_SITELOCAL(p): return IS_SITELADDR6(*p)
-
-def IN6_IS_ADDR_V4MAPPED(p): return IS_IPV4ADDR6(*p)
-
-def IN6_IS_ADDR_V4COMPAT(p): return IS_COMPATADDR6(*p)
-
-def IN6_IS_ADDR_MC_NODELOCAL(p): return \
-
-def IN6_IS_ADDR_MC_LINKLOCAL(p): return \
-
-def IN6_IS_ADDR_MC_SITELOCAL(p): return \
-
-def IN6_IS_ADDR_MC_ORGLOCAL(p): return \
-
-def IN6_IS_ADDR_MC_GLOBAL(p): return \
-
-IP_OPTIONS = 1
-IP_HDRINCL = 2
-IP_TOS = 3
-IP_TTL = 4
-IP_RECVOPTS = 5
-IP_RECVRETOPTS = 6
-IP_RECVDSTADDR = 7
-IP_RETOPTS = 8
-IP_MULTICAST_IF = 20
-IP_MULTICAST_TTL = 21
-IP_MULTICAST_LOOP = 22
-IP_ADD_MEMBERSHIP = 23
-IP_DROP_MEMBERSHIP = 24
-IP_MULTICAST_VIF = 25
-IP_RSVP_VIF_ON = 26
-IP_RSVP_VIF_OFF = 27
-IP_RSVP_ON = 28
-IP_SENDSRCADDR = 36
-IPV6_UNICAST_HOPS = IP_TTL
-IPV6_MULTICAST_IF = IP_MULTICAST_IF
-IPV6_MULTICAST_HOPS = IP_MULTICAST_TTL
-IPV6_MULTICAST_LOOP = IP_MULTICAST_LOOP
-IPV6_ADD_MEMBERSHIP = IP_ADD_MEMBERSHIP
-IPV6_DROP_MEMBERSHIP = IP_DROP_MEMBERSHIP
-IPV6_SENDIF = 40
-IPV6_NOPROBE = 42
-IPV6_RECVPKTINFO = 43
-IPV6_PKTINFO = 44
-IP_RECVTTL = 45
-IPV6_RECVHOPS = IP_RECVTTL
-IPV6_CHECKSUM = 46
-ICMP6_FILTER = 47
-IPV6_HOPLIMIT = 48
-IPV6_HOPOPTS = 49
-IPV6_DSTOPTS = 50
-IPV6_RTHDR = 51
-IPV6_PKTOPTIONS = 52
-IPV6_NEXTHOP = 53
-IP_DEFAULT_MULTICAST_TTL = 1
-IP_DEFAULT_MULTICAST_LOOP = 1
-IPV6_RTHDR_LOOSE = 0
-IPV6_RTHDR_STRICT = 1
-IPV6_RTHDR_TYPE_0 = 0
diff --git a/sys/lib/python/plat-irix6/IOCTL.py b/sys/lib/python/plat-irix6/IOCTL.py
deleted file mode 100644
index cec3c3f6a..000000000
--- a/sys/lib/python/plat-irix6/IOCTL.py
+++ /dev/null
@@ -1,233 +0,0 @@
-# These lines were mostly generated by h2py.py (see demo/scripts)
-# from <sys/ioctl.h>, <sys/termio.h> and <termios.h> on Irix 4.0.2
-# with some manual changes to cope with imperfections in h2py.py.
-# The applicability on other systems is not clear; especially non-SYSV
-# systems may have a totally different set of ioctls.
-
-IOCTYPE = 0xff00
-LIOC = (ord('l')<<8)
-LIOCGETP = (LIOC|1)
-LIOCSETP = (LIOC|2)
-LIOCGETS = (LIOC|5)
-LIOCSETS = (LIOC|6)
-DIOC = (ord('d')<<8)
-DIOCGETC = (DIOC|1)
-DIOCGETB = (DIOC|2)
-DIOCSETE = (DIOC|3)
-IOCPARM_MASK = 0x7f
-IOC_VOID = 0x20000000
-IOC_OUT = 0x40000000
-IOC_IN = 0x80000000
-IOC_INOUT = (IOC_IN|IOC_OUT)
-int = 'i'
-short = 'h'
-long = 'l'
-def sizeof(t): import struct; return struct.calcsize(t)
-def _IO(x,y): return (IOC_VOID|((x)<<8)|y)
-def _IOR(x,y,t): return (IOC_OUT|((sizeof(t)&IOCPARM_MASK)<<16)|((x)<<8)|y)
-def _IOW(x,y,t): return (IOC_IN|((sizeof(t)&IOCPARM_MASK)<<16)|((x)<<8)|y)
-# this should be _IORW, but stdio got there first
-def _IOWR(x,y,t): return (IOC_INOUT|((sizeof(t)&IOCPARM_MASK)<<16)|((x)<<8)|y)
-FIONREAD = _IOR(ord('f'), 127, int)
-FIONBIO = _IOW(ord('f'), 126, int)
-FIOASYNC = _IOW(ord('f'), 125, int)
-FIOSETOWN = _IOW(ord('f'), 124, int)
-FIOGETOWN = _IOR(ord('f'), 123, int)
-NCC = 8
-NCC_PAD = 7
-NCC_EXT = 16
-NCCS = (NCC+NCC_PAD+NCC_EXT)
-VINTR = 0
-VQUIT = 1
-VERASE = 2
-VKILL = 3
-VEOF = 4
-VEOL = 5
-VEOL2 = 6
-VMIN = VEOF
-VTIME = VEOL
-VSWTCH = 7
-VLNEXT = (NCC+NCC_PAD+0)
-VWERASE = (NCC+NCC_PAD+1)
-VRPRNT = (NCC+NCC_PAD+2)
-VFLUSHO = (NCC+NCC_PAD+3)
-VSTOP = (NCC+NCC_PAD+4)
-VSTART = (NCC+NCC_PAD+5)
-CNUL = '\0'
-CDEL = '\377'
-CESC = '\\'
-CINTR = '\177'
-CQUIT = '\34'
-CBRK = '\377'
-def CTRL(c): return ord(c) & 0x0f
-CERASE = CTRL('H')
-CKILL = CTRL('U')
-CEOF = CTRL('d')
-CEOT = CEOF
-CSTART = CTRL('q')
-CSTOP = CTRL('s')
-CSWTCH = CTRL('z')
-CSUSP = CSWTCH
-CNSWTCH = 0
-CLNEXT = CTRL('v')
-CWERASE = CTRL('w')
-CFLUSHO = CTRL('o')
-CFLUSH = CFLUSHO
-CRPRNT = CTRL('r')
-CDSUSP = CTRL('y')
-IGNBRK = 0000001
-BRKINT = 0000002
-IGNPAR = 0000004
-PARMRK = 0000010
-INPCK = 0000020
-ISTRIP = 0000040
-INLCR = 0000100
-IGNCR = 0000200
-ICRNL = 0000400
-IUCLC = 0001000
-IXON = 0002000
-IXANY = 0004000
-IXOFF = 0010000
-IBLKMD = 0020000
-OPOST = 0000001
-OLCUC = 0000002
-ONLCR = 0000004
-OCRNL = 0000010
-ONOCR = 0000020
-ONLRET = 0000040
-OFILL = 0000100
-OFDEL = 0000200
-NLDLY = 0000400
-NL0 = 0
-NL1 = 0000400
-CRDLY = 0003000
-CR0 = 0
-CR1 = 0001000
-CR2 = 0002000
-CR3 = 0003000
-TABDLY = 0014000
-TAB0 = 0
-TAB1 = 0004000
-TAB2 = 0010000
-TAB3 = 0014000
-BSDLY = 0020000
-BS0 = 0
-BS1 = 0020000
-VTDLY = 0040000
-VT0 = 0
-VT1 = 0040000
-FFDLY = 0100000
-FF0 = 0
-FF1 = 0100000
-CBAUD = 0000017
-B0 = 0
-B50 = 0000001
-B75 = 0000002
-B110 = 0000003
-B134 = 0000004
-B150 = 0000005
-B200 = 0000006
-B300 = 0000007
-B600 = 0000010
-B1200 = 0000011
-B1800 = 0000012
-B2400 = 0000013
-B4800 = 0000014
-B9600 = 0000015
-B19200 = 0000016
-EXTA = 0000016
-B38400 = 0000017
-EXTB = 0000017
-CSIZE = 0000060
-CS5 = 0
-CS6 = 0000020
-CS7 = 0000040
-CS8 = 0000060
-CSTOPB = 0000100
-CREAD = 0000200
-PARENB = 0000400
-PARODD = 0001000
-HUPCL = 0002000
-CLOCAL = 0004000
-LOBLK = 0040000
-ISIG = 0000001
-ICANON = 0000002
-XCASE = 0000004
-ECHO = 0000010
-ECHOE = 0000020
-ECHOK = 0000040
-ECHONL = 0000100
-NOFLSH = 0000200
-IIEXTEN = 0000400
-ITOSTOP = 0001000
-SSPEED = B9600
-IOCTYPE = 0xff00
-TIOC = (ord('T')<<8)
-oTCGETA = (TIOC|1)
-oTCSETA = (TIOC|2)
-oTCSETAW = (TIOC|3)
-oTCSETAF = (TIOC|4)
-TCSBRK = (TIOC|5)
-TCXONC = (TIOC|6)
-TCFLSH = (TIOC|7)
-TCGETA = (TIOC|8)
-TCSETA = (TIOC|9)
-TCSETAW = (TIOC|10)
-TCSETAF = (TIOC|11)
-TIOCFLUSH = (TIOC|12)
-TCDSET = (TIOC|32)
-TCBLKMD = (TIOC|33)
-TIOCPKT = (TIOC|112)
-TIOCPKT_DATA = 0x00
-TIOCPKT_FLUSHREAD = 0x01
-TIOCPKT_FLUSHWRITE = 0x02
-TIOCPKT_NOSTOP = 0x10
-TIOCPKT_DOSTOP = 0x20
-TIOCNOTTY = (TIOC|113)
-TIOCSTI = (TIOC|114)
-TIOCSPGRP = _IOW(ord('t'), 118, int)
-TIOCGPGRP = _IOR(ord('t'), 119, int)
-TIOCCONS = _IOW(ord('t'), 120, int)
-struct_winsize = 'hhhh'
-TIOCGWINSZ = _IOR(ord('t'), 104, struct_winsize)
-TIOCSWINSZ = _IOW(ord('t'), 103, struct_winsize)
-TFIOC = (ord('F')<<8)
-oFIONREAD = (TFIOC|127)
-LDIOC = (ord('D')<<8)
-LDOPEN = (LDIOC|0)
-LDCLOSE = (LDIOC|1)
-LDCHG = (LDIOC|2)
-LDGETT = (LDIOC|8)
-LDSETT = (LDIOC|9)
-TERM_NONE = 0
-TERM_TEC = 1
-TERM_V61 = 2
-TERM_V10 = 3
-TERM_TEX = 4
-TERM_D40 = 5
-TERM_H45 = 6
-TERM_D42 = 7
-TM_NONE = 0000
-TM_SNL = 0001
-TM_ANL = 0002
-TM_LCF = 0004
-TM_CECHO = 0010
-TM_CINVIS = 0020
-TM_SET = 0200
-LDISC0 = 0
-LDISC1 = 1
-NTTYDISC = LDISC1
-VSUSP = VSWTCH
-TCSANOW = 0
-TCSADRAIN = 1
-TCSAFLUSH = 2
-TCIFLUSH = 0
-TCOFLUSH = 1
-TCIOFLUSH = 2
-TCOOFF = 0
-TCOON = 1
-TCIOFF = 2
-TCION = 3
-TO_STOP = LOBLK
-IEXTEN = IIEXTEN
-TOSTOP = ITOSTOP
diff --git a/sys/lib/python/plat-irix6/SV.py b/sys/lib/python/plat-irix6/SV.py
deleted file mode 100644
index db8efe52d..000000000
--- a/sys/lib/python/plat-irix6/SV.py
+++ /dev/null
@@ -1,120 +0,0 @@
-NTSC_XMAX = 640
-NTSC_YMAX = 480
-PAL_XMAX = 768
-PAL_YMAX = 576
-BLANKING_BUFFER_SIZE = 2
-
-MAX_SOURCES = 2
-
-# mode parameter for Bind calls
-IN_OFF = 0 # No Video
-IN_OVER = 1 # Video over graphics
-IN_UNDER = 2 # Video under graphics
-IN_REPLACE = 3 # Video replaces entire win
-
-# mode parameters for LoadMap calls. Specifies buffer, always 256 entries
-INPUT_COLORMAP = 0 # tuples of 8-bit RGB
-CHROMA_KEY_MAP = 1 # tuples of 8-bit RGB
-COLOR_SPACE_MAP = 2 # tuples of 8-bit RGB
-GAMMA_MAP = 3 # tuples of 24-bit red values
-
-# mode parameters for UseExclusive calls
-INPUT = 0
-OUTPUT = 1
-IN_OUT = 2
-
-# Format constants for the capture routines
-RGB8_FRAMES = 0 # noninterleaved 8 bit 3:2:3 RBG fields
-RGB32_FRAMES = 1 # 32-bit 8:8:8 RGB frames
-YUV411_FRAMES = 2 # interleaved, 8:2:2 YUV format
-YUV411_FRAMES_AND_BLANKING_BUFFER = 3
-
-#
-# sv.SetParam is passed variable length argument lists,
-# consisting of <name, value> pairs. The following
-# constants identify argument names.
-#
-_NAME_BASE = 1000
-SOURCE = (_NAME_BASE + 0)
-SOURCE1 = 0
-SOURCE2 = 1
-SOURCE3 = 2
-COLOR = (_NAME_BASE + 1)
-DEFAULT_COLOR = 0
-USER_COLOR = 1
-MONO = 2
-OUTPUTMODE = (_NAME_BASE + 2)
-LIVE_OUTPUT = 0
-STILL24_OUT = 1
-FREEZE = (_NAME_BASE + 3)
-DITHER = (_NAME_BASE + 4)
-OUTPUT_FILTER = (_NAME_BASE + 5)
-HUE = (_NAME_BASE + 6)
-GENLOCK = (_NAME_BASE + 7)
-GENLOCK_OFF = 0
-GENLOCK_ON = 1
-GENLOCK_HOUSE = 2
-BROADCAST = (_NAME_BASE + 8)
-NTSC = 0
-PAL = 1
-VIDEO_MODE = (_NAME_BASE + 9)
-COMP = 0
-SVIDEO = 1
-INPUT_BYPASS = (_NAME_BASE + 10)
-FIELDDROP = (_NAME_BASE + 11)
-SLAVE = (_NAME_BASE + 12)
-APERTURE_FACTOR = (_NAME_BASE + 13)
-AFACTOR_0 = 0
-AFACTOR_QTR = 1
-AFACTOR_HLF = 2
-AFACTOR_ONE = 3
-CORING = (_NAME_BASE + 14)
-COR_OFF = 0
-COR_1LSB = 1
-COR_2LSB = 2
-COR_3LSB = 3
-APERTURE_BANDPASS = (_NAME_BASE + 15)
-ABAND_F0 = 0
-ABAND_F1 = 1
-ABAND_F2 = 2
-ABAND_F3 = 3
-PREFILTER = (_NAME_BASE + 16)
-CHROMA_TRAP = (_NAME_BASE + 17)
-CK_THRESHOLD = (_NAME_BASE + 18)
-PAL_SENSITIVITY = (_NAME_BASE + 19)
-GAIN_CONTROL = (_NAME_BASE + 20)
-GAIN_SLOW = 0
-GAIN_MEDIUM = 1
-GAIN_FAST = 2
-GAIN_FROZEN = 3
-AUTO_CKILL = (_NAME_BASE + 21)
-VTR_MODE = (_NAME_BASE + 22)
-VTR_INPUT = 0
-CAMERA_INPUT = 1
-LUMA_DELAY = (_NAME_BASE + 23)
-VNOISE = (_NAME_BASE + 24)
-VNOISE_NORMAL = 0
-VNOISE_SEARCH = 1
-VNOISE_AUTO = 2
-VNOISE_BYPASS = 3
-CHCV_PAL = (_NAME_BASE + 25)
-CHCV_NTSC = (_NAME_BASE + 26)
-CCIR_LEVELS = (_NAME_BASE + 27)
-STD_CHROMA = (_NAME_BASE + 28)
-DENC_VTBYPASS = (_NAME_BASE + 29)
-FAST_TIMECONSTANT = (_NAME_BASE + 30)
-GENLOCK_DELAY = (_NAME_BASE + 31)
-PHASE_SYNC = (_NAME_BASE + 32)
-VIDEO_OUTPUT = (_NAME_BASE + 33)
-CHROMA_PHASEOUT = (_NAME_BASE + 34)
-CHROMA_CENTER = (_NAME_BASE + 35)
-YUV_TO_RGB_INVERT = (_NAME_BASE + 36)
-SOURCE1_BROADCAST = (_NAME_BASE + 37)
-SOURCE1_MODE = (_NAME_BASE + 38)
-SOURCE2_BROADCAST = (_NAME_BASE + 39)
-SOURCE2_MODE = (_NAME_BASE + 40)
-SOURCE3_BROADCAST = (_NAME_BASE + 41)
-SOURCE3_MODE = (_NAME_BASE + 42)
-SIGNAL_STD = (_NAME_BASE + 43)
-NOSIGNAL = 2
-SIGNAL_COLOR = (_NAME_BASE + 44)
diff --git a/sys/lib/python/plat-irix6/WAIT.py b/sys/lib/python/plat-irix6/WAIT.py
deleted file mode 100644
index 741af3b99..000000000
--- a/sys/lib/python/plat-irix6/WAIT.py
+++ /dev/null
@@ -1,335 +0,0 @@
-# Generated by h2py from /usr/include/sys/wait.h
-
-# Included from standards.h
-def _W_INT(i): return (i)
-
-WUNTRACED = 0004
-WNOHANG = 0100
-_WSTOPPED = 0177
-def WIFEXITED(stat): return ((_W_INT(stat)&0377)==0)
-
-def WEXITSTATUS(stat): return ((_W_INT(stat)>>8)&0377)
-
-def WTERMSIG(stat): return (_W_INT(stat)&0177)
-
-def WSTOPSIG(stat): return ((_W_INT(stat)>>8)&0377)
-
-WEXITED = 0001
-WTRAPPED = 0002
-WSTOPPED = 0004
-WCONTINUED = 0010
-WNOWAIT = 0200
-WOPTMASK = (WEXITED|WTRAPPED|WSTOPPED|WCONTINUED|WNOHANG|WNOWAIT)
-WSTOPFLG = 0177
-WCONTFLG = 0177777
-WCOREFLAG = 0200
-WSIGMASK = 0177
-def WWORD(stat): return (_W_INT(stat)&0177777)
-
-def WIFCONTINUED(stat): return (WWORD(stat)==WCONTFLG)
-
-def WCOREDUMP(stat): return (_W_INT(stat) & WCOREFLAG)
-
-
-# Included from sys/types.h
-
-# Included from sgidefs.h
-_MIPS_ISA_MIPS1 = 1
-_MIPS_ISA_MIPS2 = 2
-_MIPS_ISA_MIPS3 = 3
-_MIPS_ISA_MIPS4 = 4
-_MIPS_SIM_ABI32 = 1
-_MIPS_SIM_NABI32 = 2
-_MIPS_SIM_ABI64 = 3
-P_MYID = (-1)
-P_MYHOSTID = (-1)
-
-# Included from sys/bsd_types.h
-
-# Included from sys/mkdev.h
-ONBITSMAJOR = 7
-ONBITSMINOR = 8
-OMAXMAJ = 0x7f
-OMAXMIN = 0xff
-NBITSMAJOR = 14
-NBITSMINOR = 18
-MAXMAJ = 0x1ff
-MAXMIN = 0x3ffff
-OLDDEV = 0
-NEWDEV = 1
-MKDEV_VER = NEWDEV
-def major(dev): return __major(MKDEV_VER, dev)
-
-def minor(dev): return __minor(MKDEV_VER, dev)
-
-
-# Included from sys/select.h
-FD_SETSIZE = 1024
-__NBBY = 8
-
-# Included from string.h
-NULL = 0L
-NBBY = 8
-
-# Included from sys/procset.h
-P_INITPID = 1
-P_INITUID = 0
-P_INITPGID = 0
-
-# Included from sys/signal.h
-SIGHUP = 1
-SIGINT = 2
-SIGQUIT = 3
-SIGILL = 4
-SIGTRAP = 5
-SIGIOT = 6
-SIGABRT = 6
-SIGEMT = 7
-SIGFPE = 8
-SIGKILL = 9
-SIGBUS = 10
-SIGSEGV = 11
-SIGSYS = 12
-SIGPIPE = 13
-SIGALRM = 14
-SIGTERM = 15
-SIGUSR1 = 16
-SIGUSR2 = 17
-SIGCLD = 18
-SIGCHLD = 18
-SIGPWR = 19
-SIGWINCH = 20
-SIGURG = 21
-SIGPOLL = 22
-SIGIO = 22
-SIGSTOP = 23
-SIGTSTP = 24
-SIGCONT = 25
-SIGTTIN = 26
-SIGTTOU = 27
-SIGVTALRM = 28
-SIGPROF = 29
-SIGXCPU = 30
-SIGXFSZ = 31
-SIG32 = 32
-SIGCKPT = 33
-SIGRTMIN = 49
-SIGRTMAX = 64
-SIGPTINTR = 47
-SIGPTRESCHED = 48
-__sigargs = int
-SIGEV_NONE = 128
-SIGEV_SIGNAL = 129
-SIGEV_CALLBACK = 130
-
-# Included from sys/siginfo.h
-ILL_ILLOPC = 1
-ILL_ILLOPN = 2
-ILL_ILLADR = 3
-ILL_ILLTRP = 4
-ILL_PRVOPC = 5
-ILL_PRVREG = 6
-ILL_COPROC = 7
-ILL_BADSTK = 8
-NSIGILL = 8
-FPE_INTDIV = 1
-FPE_INTOVF = 2
-FPE_FLTDIV = 3
-FPE_FLTOVF = 4
-FPE_FLTUND = 5
-FPE_FLTRES = 6
-FPE_FLTINV = 7
-FPE_FLTSUB = 8
-NSIGFPE = 8
-SEGV_MAPERR = 1
-SEGV_ACCERR = 2
-NSIGSEGV = 2
-BUS_ADRALN = 1
-BUS_ADRERR = 2
-BUS_OBJERR = 3
-NSIGBUS = 3
-TRAP_BRKPT = 1
-TRAP_TRACE = 2
-NSIGTRAP = 2
-CLD_EXITED = 1
-CLD_KILLED = 2
-CLD_DUMPED = 3
-CLD_TRAPPED = 4
-CLD_STOPPED = 5
-CLD_CONTINUED = 6
-NSIGCLD = 6
-POLL_IN = 1
-POLL_OUT = 2
-POLL_MSG = 3
-POLL_ERR = 4
-POLL_PRI = 5
-POLL_HUP = 6
-NSIGPOLL = 6
-SI_MAXSZ = 128
-SI_USER = 0
-SI_KILL = SI_USER
-SI_QUEUE = -1
-SI_ASYNCIO = -2
-SI_TIMER = -3
-SI_MESGQ = -4
-SIG_NOP = 0
-SIG_BLOCK = 1
-SIG_UNBLOCK = 2
-SIG_SETMASK = 3
-SIG_SETMASK32 = 256
-SA_ONSTACK = 0x00000001
-SA_RESETHAND = 0x00000002
-SA_RESTART = 0x00000004
-SA_SIGINFO = 0x00000008
-SA_NODEFER = 0x00000010
-SA_NOCLDWAIT = 0x00010000
-SA_NOCLDSTOP = 0x00020000
-_SA_BSDCALL = 0x10000000
-MINSIGSTKSZ = 512
-SIGSTKSZ = 8192
-SS_ONSTACK = 0x00000001
-SS_DISABLE = 0x00000002
-
-# Included from sys/ucontext.h
-NGREG = 36
-NGREG = 37
-GETCONTEXT = 0
-SETCONTEXT = 1
-UC_SIGMASK = 001
-UC_STACK = 002
-UC_CPU = 004
-UC_MAU = 010
-UC_MCONTEXT = (UC_CPU|UC_MAU)
-UC_ALL = (UC_SIGMASK|UC_STACK|UC_MCONTEXT)
-CTX_R0 = 0
-CTX_AT = 1
-CTX_V0 = 2
-CTX_V1 = 3
-CTX_A0 = 4
-CTX_A1 = 5
-CTX_A2 = 6
-CTX_A3 = 7
-CTX_T0 = 8
-CTX_T1 = 9
-CTX_T2 = 10
-CTX_T3 = 11
-CTX_T4 = 12
-CTX_T5 = 13
-CTX_T6 = 14
-CTX_T7 = 15
-CTX_A4 = 8
-CTX_A5 = 9
-CTX_A6 = 10
-CTX_A7 = 11
-CTX_T0 = 12
-CTX_T1 = 13
-CTX_T2 = 14
-CTX_T3 = 15
-CTX_S0 = 16
-CTX_S1 = 17
-CTX_S2 = 18
-CTX_S3 = 19
-CTX_S4 = 20
-CTX_S5 = 21
-CTX_S6 = 22
-CTX_S7 = 23
-CTX_T8 = 24
-CTX_T9 = 25
-CTX_K0 = 26
-CTX_K1 = 27
-CTX_GP = 28
-CTX_SP = 29
-CTX_S8 = 30
-CTX_RA = 31
-CTX_MDLO = 32
-CTX_MDHI = 33
-CTX_CAUSE = 34
-CTX_EPC = 35
-CTX_SR = 36
-CXT_R0 = CTX_R0
-CXT_AT = CTX_AT
-CXT_V0 = CTX_V0
-CXT_V1 = CTX_V1
-CXT_A0 = CTX_A0
-CXT_A1 = CTX_A1
-CXT_A2 = CTX_A2
-CXT_A3 = CTX_A3
-CXT_T0 = CTX_T0
-CXT_T1 = CTX_T1
-CXT_T2 = CTX_T2
-CXT_T3 = CTX_T3
-CXT_T4 = CTX_T4
-CXT_T5 = CTX_T5
-CXT_T6 = CTX_T6
-CXT_T7 = CTX_T7
-CXT_S0 = CTX_S0
-CXT_S1 = CTX_S1
-CXT_S2 = CTX_S2
-CXT_S3 = CTX_S3
-CXT_S4 = CTX_S4
-CXT_S5 = CTX_S5
-CXT_S6 = CTX_S6
-CXT_S7 = CTX_S7
-CXT_T8 = CTX_T8
-CXT_T9 = CTX_T9
-CXT_K0 = CTX_K0
-CXT_K1 = CTX_K1
-CXT_GP = CTX_GP
-CXT_SP = CTX_SP
-CXT_S8 = CTX_S8
-CXT_RA = CTX_RA
-CXT_MDLO = CTX_MDLO
-CXT_MDHI = CTX_MDHI
-CXT_CAUSE = CTX_CAUSE
-CXT_EPC = CTX_EPC
-CXT_SR = CTX_SR
-SV_ONSTACK = 0x0001
-SV_INTERRUPT = 0x0002
-NUMBSDSIGS = (32)
-def sigmask(sig): return (1L << ((sig)-1))
-
-def sigmask(sig): return (1L << ((sig)-1))
-
-SIG_ERR = (-1)
-SIG_IGN = (1)
-SIG_HOLD = (2)
-SIG_DFL = (0)
-NSIG = 65
-MAXSIG = (NSIG-1)
-NUMSIGS = (NSIG-1)
-BRK_USERBP = 0
-BRK_KERNELBP = 1
-BRK_ABORT = 2
-BRK_BD_TAKEN = 3
-BRK_BD_NOTTAKEN = 4
-BRK_SSTEPBP = 5
-BRK_OVERFLOW = 6
-BRK_DIVZERO = 7
-BRK_RANGE = 8
-BRK_PSEUDO_OP_BIT = 0x80
-BRK_PSEUDO_OP_MAX = 0x3
-BRK_CACHE_SYNC = 0x80
-BRK_SWASH_FLUSH = 0x81
-BRK_SWASH_SWTCH = 0x82
-BRK_MULOVF = 1023
-
-# Included from sys/resource.h
-PRIO_MIN = -20
-PRIO_MAX = 20
-PRIO_PROCESS = 0
-PRIO_PGRP = 1
-PRIO_USER = 2
-RUSAGE_SELF = 0
-RUSAGE_CHILDREN = -1
-RLIMIT_CPU = 0
-RLIMIT_FSIZE = 1
-RLIMIT_DATA = 2
-RLIMIT_STACK = 3
-RLIMIT_CORE = 4
-RLIMIT_NOFILE = 5
-RLIMIT_VMEM = 6
-RLIMIT_RSS = 7
-RLIMIT_AS = RLIMIT_VMEM
-RLIM_NLIMITS = 8
-RLIM32_INFINITY = 0x7fffffff
-RLIM_INFINITY = 0x7fffffff
diff --git a/sys/lib/python/plat-irix6/cddb.py b/sys/lib/python/plat-irix6/cddb.py
deleted file mode 100644
index 45883054f..000000000
--- a/sys/lib/python/plat-irix6/cddb.py
+++ /dev/null
@@ -1,204 +0,0 @@
-# This file implements a class which forms an interface to the .cddb
-# directory that is maintained by SGI's cdman program.
-#
-# Usage is as follows:
-#
-# import readcd
-# r = readcd.Readcd()
-# c = Cddb(r.gettrackinfo())
-#
-# Now you can use c.artist, c.title and c.track[trackno] (where trackno
-# starts at 1). When the CD is not recognized, all values will be the empty
-# string.
-# It is also possible to set the above mentioned variables to new values.
-# You can then use c.write() to write out the changed values to the
-# .cdplayerrc file.
-
-import string, posix, os
-
-_cddbrc = '.cddb'
-_DB_ID_NTRACKS = 5
-_dbid_map = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ@_=+abcdefghijklmnopqrstuvwxyz'
-def _dbid(v):
- if v >= len(_dbid_map):
- return string.zfill(v, 2)
- else:
- return _dbid_map[v]
-
-def tochash(toc):
- if type(toc) == type(''):
- tracklist = []
- for i in range(2, len(toc), 4):
- tracklist.append((None,
- (int(toc[i:i+2]),
- int(toc[i+2:i+4]))))
- else:
- tracklist = toc
- ntracks = len(tracklist)
- hash = _dbid((ntracks >> 4) & 0xF) + _dbid(ntracks & 0xF)
- if ntracks <= _DB_ID_NTRACKS:
- nidtracks = ntracks
- else:
- nidtracks = _DB_ID_NTRACKS - 1
- min = 0
- sec = 0
- for track in tracklist:
- start, length = track
- min = min + length[0]
- sec = sec + length[1]
- min = min + sec / 60
- sec = sec % 60
- hash = hash + _dbid(min) + _dbid(sec)
- for i in range(nidtracks):
- start, length = tracklist[i]
- hash = hash + _dbid(length[0]) + _dbid(length[1])
- return hash
-
-class Cddb:
- def __init__(self, tracklist):
- if os.environ.has_key('CDDB_PATH'):
- path = os.environ['CDDB_PATH']
- cddb_path = path.split(',')
- else:
- home = os.environ['HOME']
- cddb_path = [home + '/' + _cddbrc]
-
- self._get_id(tracklist)
-
- for dir in cddb_path:
- file = dir + '/' + self.id + '.rdb'
- try:
- f = open(file, 'r')
- self.file = file
- break
- except IOError:
- pass
- ntracks = int(self.id[:2], 16)
- self.artist = ''
- self.title = ''
- self.track = [None] + [''] * ntracks
- self.trackartist = [None] + [''] * ntracks
- self.notes = []
- if not hasattr(self, 'file'):
- return
- import re
- reg = re.compile(r'^([^.]*)\.([^:]*):[\t ]+(.*)')
- while 1:
- line = f.readline()
- if not line:
- break
- match = reg.match(line)
- if not match:
- print 'syntax error in ' + file
- continue
- name1, name2, value = match.group(1, 2, 3)
- if name1 == 'album':
- if name2 == 'artist':
- self.artist = value
- elif name2 == 'title':
- self.title = value
- elif name2 == 'toc':
- if not self.toc:
- self.toc = value
- if self.toc != value:
- print 'toc\'s don\'t match'
- elif name2 == 'notes':
- self.notes.append(value)
- elif name1[:5] == 'track':
- try:
- trackno = int(name1[5:])
- except ValueError:
- print 'syntax error in ' + file
- continue
- if trackno > ntracks:
- print 'track number %r in file %s out of range' % (trackno, file)
- continue
- if name2 == 'title':
- self.track[trackno] = value
- elif name2 == 'artist':
- self.trackartist[trackno] = value
- f.close()
- for i in range(2, len(self.track)):
- track = self.track[i]
- # if track title starts with `,', use initial part
- # of previous track's title
- if track and track[0] == ',':
- try:
- off = self.track[i - 1].index(',')
- except ValueError:
- pass
- else:
- self.track[i] = self.track[i-1][:off] \
- + track
-
- def _get_id(self, tracklist):
- # fill in self.id and self.toc.
- # if the argument is a string ending in .rdb, the part
- # upto the suffix is taken as the id.
- if type(tracklist) == type(''):
- if tracklist[-4:] == '.rdb':
- self.id = tracklist[:-4]
- self.toc = ''
- return
- t = []
- for i in range(2, len(tracklist), 4):
- t.append((None, \
- (int(tracklist[i:i+2]), \
- int(tracklist[i+2:i+4]))))
- tracklist = t
- ntracks = len(tracklist)
- self.id = _dbid((ntracks >> 4) & 0xF) + _dbid(ntracks & 0xF)
- if ntracks <= _DB_ID_NTRACKS:
- nidtracks = ntracks
- else:
- nidtracks = _DB_ID_NTRACKS - 1
- min = 0
- sec = 0
- for track in tracklist:
- start, length = track
- min = min + length[0]
- sec = sec + length[1]
- min = min + sec / 60
- sec = sec % 60
- self.id = self.id + _dbid(min) + _dbid(sec)
- for i in range(nidtracks):
- start, length = tracklist[i]
- self.id = self.id + _dbid(length[0]) + _dbid(length[1])
- self.toc = string.zfill(ntracks, 2)
- for track in tracklist:
- start, length = track
- self.toc = self.toc + string.zfill(length[0], 2) + \
- string.zfill(length[1], 2)
-
- def write(self):
- import posixpath
- if os.environ.has_key('CDDB_WRITE_DIR'):
- dir = os.environ['CDDB_WRITE_DIR']
- else:
- dir = os.environ['HOME'] + '/' + _cddbrc
- file = dir + '/' + self.id + '.rdb'
- if posixpath.exists(file):
- # make backup copy
- posix.rename(file, file + '~')
- f = open(file, 'w')
- f.write('album.title:\t' + self.title + '\n')
- f.write('album.artist:\t' + self.artist + '\n')
- f.write('album.toc:\t' + self.toc + '\n')
- for note in self.notes:
- f.write('album.notes:\t' + note + '\n')
- prevpref = None
- for i in range(1, len(self.track)):
- if self.trackartist[i]:
- f.write('track%r.artist:\t%s\n' % (i, self.trackartist[i]))
- track = self.track[i]
- try:
- off = track.index(',')
- except ValueError:
- prevpref = None
- else:
- if prevpref and track[:off] == prevpref:
- track = track[off:]
- else:
- prevpref = track[:off]
- f.write('track%r.title:\t%s\n' % (i, track))
- f.close()
diff --git a/sys/lib/python/plat-irix6/cdplayer.py b/sys/lib/python/plat-irix6/cdplayer.py
deleted file mode 100644
index d4bc7328a..000000000
--- a/sys/lib/python/plat-irix6/cdplayer.py
+++ /dev/null
@@ -1,88 +0,0 @@
-# This file implements a class which forms an interface to the .cdplayerrc
-# file that is maintained by SGI's cdplayer program.
-#
-# Usage is as follows:
-#
-# import readcd
-# r = readcd.Readcd()
-# c = Cdplayer(r.gettrackinfo())
-#
-# Now you can use c.artist, c.title and c.track[trackno] (where trackno
-# starts at 1). When the CD is not recognized, all values will be the empty
-# string.
-# It is also possible to set the above mentioned variables to new values.
-# You can then use c.write() to write out the changed values to the
-# .cdplayerrc file.
-
-cdplayerrc = '.cdplayerrc'
-
-class Cdplayer:
- def __init__(self, tracklist):
- import string
- self.artist = ''
- self.title = ''
- if type(tracklist) == type(''):
- t = []
- for i in range(2, len(tracklist), 4):
- t.append((None, \
- (int(tracklist[i:i+2]), \
- int(tracklist[i+2:i+4]))))
- tracklist = t
- self.track = [None] + [''] * len(tracklist)
- self.id = 'd' + string.zfill(len(tracklist), 2)
- for track in tracklist:
- start, length = track
- self.id = self.id + string.zfill(length[0], 2) + \
- string.zfill(length[1], 2)
- try:
- import posix
- f = open(posix.environ['HOME'] + '/' + cdplayerrc, 'r')
- except IOError:
- return
- import re
- reg = re.compile(r'^([^:]*):\t(.*)')
- s = self.id + '.'
- l = len(s)
- while 1:
- line = f.readline()
- if line == '':
- break
- if line[:l] == s:
- line = line[l:]
- match = reg.match(line)
- if not match:
- print 'syntax error in ~/' + cdplayerrc
- continue
- name, value = match.group(1, 2)
- if name == 'title':
- self.title = value
- elif name == 'artist':
- self.artist = value
- elif name[:5] == 'track':
- trackno = int(name[6:])
- self.track[trackno] = value
- f.close()
-
- def write(self):
- import posix
- filename = posix.environ['HOME'] + '/' + cdplayerrc
- try:
- old = open(filename, 'r')
- except IOError:
- old = open('/dev/null', 'r')
- new = open(filename + '.new', 'w')
- s = self.id + '.'
- l = len(s)
- while 1:
- line = old.readline()
- if line == '':
- break
- if line[:l] != s:
- new.write(line)
- new.write(self.id + '.title:\t' + self.title + '\n')
- new.write(self.id + '.artist:\t' + self.artist + '\n')
- for i in range(1, len(self.track)):
- new.write('%s.track.%r:\t%s\n' % (i, track))
- old.close()
- new.close()
- posix.rename(filename + '.new', filename)
diff --git a/sys/lib/python/plat-irix6/flp.doc b/sys/lib/python/plat-irix6/flp.doc
deleted file mode 100644
index 1a2f374ae..000000000
--- a/sys/lib/python/plat-irix6/flp.doc
+++ /dev/null
@@ -1,117 +0,0 @@
-.SH
-Module flp
-.LP
-The flp module loads fl-forms from fd files, as generated
-by fdesign. The module is designed to be flexible enough to allow
-almost anything to be done with the loaded form.
-.LP
-Loadform defines
-two types of functions: functions to parse fd files and functions to
-create the forms from the templates returned by the parse functions.
-There are fairly low-level create functions that create single objects,
-and convenience routines that create complete forms, including callbacks,
-etc.
-.LP
-The exception flp.error is raised whenever an error occurs while parsing a forms
-definition file or creating a form.
-.SH 2
-Parsing functions
-.LP
-There are two parsing functions, parse_form() and parse_forms(). They
-take the following form:
-.LP
-.ft C
-ftuple = parse_form(filename, formname)
-.br
-ftdict = parse_forms(filename)
-.IP
-Parse_form parses a single form, and returns a tuple (ftmp, otmplist).
-Ftmp is a template for a form, otmplist is a list of templates for
-objects. See below for a description of these templates.
-.IP
-Parse_forms parses all forms in an fd file. It returns a dictionary of
-(ftmp, otmplist) tuples, indexed by formname.
-.IP
-Filename is the name of the forms definition file to inspect. The functions
-appends '.fd' if needed, and use 'sys.path' to locate the file.
-.IP
-formname is the name of the form to load. This argument is mandatory,
-even if the file only contains one form.
-.LP
-The form template and object template are structures that contain all
-the information read from the fd file, in 'natural' form. A form
-template record contains the following fields:
-.IP
-.nf
-"Name", the name of the form;
-"Width", the width of the form;
-"Height", the height of the form; and
-"Numberofobjects", the number of objects in the form.
-.LP
-An object template contains the following fields:
-.IP
-.nf
-"Class", the class of object (eg. FL.BUTTON);
-"Type", the sub-class (eg. FL.NORMALBUTTON);
-"Box", a list with four members: [x, y, width, height];
-"Boxtype", the type of box (eg. FL.DOWNBOX);
-"Colors", a list with the two object colors;
-"Alignment", the label alignment (eg. FL.ALIGNLEFT);
-"Style", the label style (eg. FL.BOLDSTYLE);
-"Lcol", the label color;
-"Label", a string containing the label;
-"Name", a string containing the name of the object;
-"Callback", a string containing the callback routine name; and
-"Argument", a string containing the callback routine extra argument.
-.SH
-Low-level create routines.
-.LP
-The three low-level creation routines are called as follows:
-.LP
-.ft C
-form = create_form(form_template)
-.IP
-Create an fl form from a form template. Returns the form created.
-.LP
-.ft C
-obj = create_object(form, obj_template)
-.IP
-Create an object in an fl form. Return the new object.
-An error is raised if the object has a callback routine.
-.SH
-High-level create routines.
-.LP
-The 'standard' way to handle forms in python is to define a class
-that contains the form and all the objects (insofar as they are named),
-and that defines all the callback functions, and use an instance of
-this class to handle the form interaction.
-Flp contains three routines that simplify handling this paradigm:
-.LP
-.ft C
-create_full_form(instance, ftuple)
-.IP
-This routine takes an instance of your form-handling class and an
-ftuple (as returned by the parsing routines) as parameters. It inserts
-the form into the instance, defines all object names and arranges that
-the callback methods are called. All the names inserted into the
-instance are the same as the names used for the objects, etc. in the
-fd file.
-.LP
-.ft C
-merge_full_form(instance, form, ftuple)
-.IP
-This function does the same as create_full_form, only it does not create
-the form itself nor the 'background box' that fdesign automatically
-adds to each form. This is useful if your class inherits a superclass
-that already defines a skeleton form (with 'OK' and 'Cancel' buttons,
-for instance), and you want to merge the new form into that existing
-form. The 'form' parameter is the form to which the new objects are
-added.
-.LP
-If you use the paradigm sketched here but need slightly more control
-over object creation there is a routine that creates a single object
-and inserts its name (and arranges for the callback routine to be
-called):
-.LP
-.ft C
-create_object_instance(instance, form, obj_template)
diff --git a/sys/lib/python/plat-irix6/flp.py b/sys/lib/python/plat-irix6/flp.py
deleted file mode 100644
index f745472a7..000000000
--- a/sys/lib/python/plat-irix6/flp.py
+++ /dev/null
@@ -1,450 +0,0 @@
-#
-# flp - Module to load fl forms from fd files
-#
-# Jack Jansen, December 1991
-#
-import os
-import sys
-import FL
-
-SPLITLINE = '--------------------'
-FORMLINE = '=============== FORM ==============='
-ENDLINE = '=============================='
-
-class error(Exception):
- pass
-
-##################################################################
-# Part 1 - The parsing routines #
-##################################################################
-
-#
-# Externally visible function. Load form.
-#
-def parse_form(filename, formname):
- forms = checkcache(filename)
- if forms is None:
- forms = parse_forms(filename)
- if forms.has_key(formname):
- return forms[formname]
- else:
- raise error, 'No such form in fd file'
-
-#
-# Externally visible function. Load all forms.
-#
-def parse_forms(filename):
- forms = checkcache(filename)
- if forms is not None: return forms
- fp = _open_formfile(filename)
- nforms = _parse_fd_header(fp)
- forms = {}
- for i in range(nforms):
- form = _parse_fd_form(fp, None)
- forms[form[0].Name] = form
- writecache(filename, forms)
- return forms
-
-#
-# Internal: see if a cached version of the file exists
-#
-MAGIC = '.fdc'
-_internal_cache = {} # Used by frozen scripts only
-def checkcache(filename):
- if _internal_cache.has_key(filename):
- altforms = _internal_cache[filename]
- return _unpack_cache(altforms)
- import marshal
- fp, filename = _open_formfile2(filename)
- fp.close()
- cachename = filename + 'c'
- try:
- fp = open(cachename, 'r')
- except IOError:
- #print 'flp: no cache file', cachename
- return None
- try:
- if fp.read(4) != MAGIC:
- print 'flp: bad magic word in cache file', cachename
- return None
- cache_mtime = rdlong(fp)
- file_mtime = getmtime(filename)
- if cache_mtime != file_mtime:
- #print 'flp: outdated cache file', cachename
- return None
- #print 'flp: valid cache file', cachename
- altforms = marshal.load(fp)
- return _unpack_cache(altforms)
- finally:
- fp.close()
-
-def _unpack_cache(altforms):
- forms = {}
- for name in altforms.keys():
- altobj, altlist = altforms[name]
- obj = _newobj()
- obj.make(altobj)
- list = []
- for altobj in altlist:
- nobj = _newobj()
- nobj.make(altobj)
- list.append(nobj)
- forms[name] = obj, list
- return forms
-
-def rdlong(fp):
- s = fp.read(4)
- if len(s) != 4: return None
- a, b, c, d = s[0], s[1], s[2], s[3]
- return ord(a)<<24 | ord(b)<<16 | ord(c)<<8 | ord(d)
-
-def wrlong(fp, x):
- a, b, c, d = (x>>24)&0xff, (x>>16)&0xff, (x>>8)&0xff, x&0xff
- fp.write(chr(a) + chr(b) + chr(c) + chr(d))
-
-def getmtime(filename):
- import os
- from stat import ST_MTIME
- try:
- return os.stat(filename)[ST_MTIME]
- except os.error:
- return None
-
-#
-# Internal: write cached version of the form (parsing is too slow!)
-#
-def writecache(filename, forms):
- import marshal
- fp, filename = _open_formfile2(filename)
- fp.close()
- cachename = filename + 'c'
- try:
- fp = open(cachename, 'w')
- except IOError:
- print 'flp: can\'t create cache file', cachename
- return # Never mind
- fp.write('\0\0\0\0') # Seek back and write MAGIC when done
- wrlong(fp, getmtime(filename))
- altforms = _pack_cache(forms)
- marshal.dump(altforms, fp)
- fp.seek(0)
- fp.write(MAGIC)
- fp.close()
- #print 'flp: wrote cache file', cachename
-
-#
-# External: print some statements that set up the internal cache.
-# This is for use with the "freeze" script. You should call
-# flp.freeze(filename) for all forms used by the script, and collect
-# the output on a file in a module file named "frozenforms.py". Then
-# in the main program of the script import frozenforms.
-# (Don't forget to take this out when using the unfrozen version of
-# the script!)
-#
-def freeze(filename):
- forms = parse_forms(filename)
- altforms = _pack_cache(forms)
- print 'import flp'
- print 'flp._internal_cache[', repr(filename), '] =', altforms
-
-#
-# Internal: create the data structure to be placed in the cache
-#
-def _pack_cache(forms):
- altforms = {}
- for name in forms.keys():
- obj, list = forms[name]
- altobj = obj.__dict__
- altlist = []
- for obj in list: altlist.append(obj.__dict__)
- altforms[name] = altobj, altlist
- return altforms
-
-#
-# Internal: Locate form file (using PYTHONPATH) and open file
-#
-def _open_formfile(filename):
- return _open_formfile2(filename)[0]
-
-def _open_formfile2(filename):
- if filename[-3:] != '.fd':
- filename = filename + '.fd'
- if filename[0] == '/':
- try:
- fp = open(filename,'r')
- except IOError:
- fp = None
- else:
- for pc in sys.path:
- pn = os.path.join(pc, filename)
- try:
- fp = open(pn, 'r')
- filename = pn
- break
- except IOError:
- fp = None
- if fp is None:
- raise error, 'Cannot find forms file ' + filename
- return fp, filename
-
-#
-# Internal: parse the fd file header, return number of forms
-#
-def _parse_fd_header(file):
- # First read the magic header line
- datum = _parse_1_line(file)
- if datum != ('Magic', 12321):
- raise error, 'Not a forms definition file'
- # Now skip until we know number of forms
- while 1:
- datum = _parse_1_line(file)
- if type(datum) == type(()) and datum[0] == 'Numberofforms':
- break
- return datum[1]
-#
-# Internal: parse fd form, or skip if name doesn't match.
-# the special value None means 'always parse it'.
-#
-def _parse_fd_form(file, name):
- datum = _parse_1_line(file)
- if datum != FORMLINE:
- raise error, 'Missing === FORM === line'
- form = _parse_object(file)
- if form.Name == name or name is None:
- objs = []
- for j in range(form.Numberofobjects):
- obj = _parse_object(file)
- objs.append(obj)
- return (form, objs)
- else:
- for j in range(form.Numberofobjects):
- _skip_object(file)
- return None
-
-#
-# Internal class: a convenient place to store object info fields
-#
-class _newobj:
- def add(self, name, value):
- self.__dict__[name] = value
- def make(self, dict):
- for name in dict.keys():
- self.add(name, dict[name])
-
-#
-# Internal parsing routines.
-#
-def _parse_string(str):
- if '\\' in str:
- s = '\'' + str + '\''
- try:
- return eval(s)
- except:
- pass
- return str
-
-def _parse_num(str):
- return eval(str)
-
-def _parse_numlist(str):
- slist = str.split()
- nlist = []
- for i in slist:
- nlist.append(_parse_num(i))
- return nlist
-
-# This dictionary maps item names to parsing routines.
-# If no routine is given '_parse_num' is default.
-_parse_func = { \
- 'Name': _parse_string, \
- 'Box': _parse_numlist, \
- 'Colors': _parse_numlist, \
- 'Label': _parse_string, \
- 'Name': _parse_string, \
- 'Callback': _parse_string, \
- 'Argument': _parse_string }
-
-# This function parses a line, and returns either
-# a string or a tuple (name,value)
-
-import re
-prog = re.compile('^([^:]*): *(.*)')
-
-def _parse_line(line):
- match = prog.match(line)
- if not match:
- return line
- name, value = match.group(1, 2)
- if name[0] == 'N':
- name = ''.join(name.split())
- name = name.lower()
- name = name.capitalize()
- try:
- pf = _parse_func[name]
- except KeyError:
- pf = _parse_num
- value = pf(value)
- return (name, value)
-
-def _readline(file):
- line = file.readline()
- if not line:
- raise EOFError
- return line[:-1]
-
-def _parse_1_line(file):
- line = _readline(file)
- while line == '':
- line = _readline(file)
- return _parse_line(line)
-
-def _skip_object(file):
- line = ''
- while not line in (SPLITLINE, FORMLINE, ENDLINE):
- pos = file.tell()
- line = _readline(file)
- if line == FORMLINE:
- file.seek(pos)
-
-def _parse_object(file):
- obj = _newobj()
- while 1:
- pos = file.tell()
- datum = _parse_1_line(file)
- if datum in (SPLITLINE, FORMLINE, ENDLINE):
- if datum == FORMLINE:
- file.seek(pos)
- return obj
- if type(datum) is not type(()) or len(datum) != 2:
- raise error, 'Parse error, illegal line in object: '+datum
- obj.add(datum[0], datum[1])
-
-#################################################################
-# Part 2 - High-level object/form creation routines #
-#################################################################
-
-#
-# External - Create a form an link to an instance variable.
-#
-def create_full_form(inst, (fdata, odatalist)):
- form = create_form(fdata)
- exec 'inst.'+fdata.Name+' = form\n'
- for odata in odatalist:
- create_object_instance(inst, form, odata)
-
-#
-# External - Merge a form into an existing form in an instance
-# variable.
-#
-def merge_full_form(inst, form, (fdata, odatalist)):
- exec 'inst.'+fdata.Name+' = form\n'
- if odatalist[0].Class != FL.BOX:
- raise error, 'merge_full_form() expects FL.BOX as first obj'
- for odata in odatalist[1:]:
- create_object_instance(inst, form, odata)
-
-
-#################################################################
-# Part 3 - Low-level object/form creation routines #
-#################################################################
-
-#
-# External Create_form - Create form from parameters
-#
-def create_form(fdata):
- import fl
- return fl.make_form(FL.NO_BOX, fdata.Width, fdata.Height)
-
-#
-# External create_object - Create an object. Make sure there are
-# no callbacks. Returns the object created.
-#
-def create_object(form, odata):
- obj = _create_object(form, odata)
- if odata.Callback:
- raise error, 'Creating free object with callback'
- return obj
-#
-# External create_object_instance - Create object in an instance.
-#
-def create_object_instance(inst, form, odata):
- obj = _create_object(form, odata)
- if odata.Callback:
- cbfunc = eval('inst.'+odata.Callback)
- obj.set_call_back(cbfunc, odata.Argument)
- if odata.Name:
- exec 'inst.' + odata.Name + ' = obj\n'
-#
-# Internal _create_object: Create the object and fill options
-#
-def _create_object(form, odata):
- crfunc = _select_crfunc(form, odata.Class)
- obj = crfunc(odata.Type, odata.Box[0], odata.Box[1], odata.Box[2], \
- odata.Box[3], odata.Label)
- if not odata.Class in (FL.BEGIN_GROUP, FL.END_GROUP):
- obj.boxtype = odata.Boxtype
- obj.col1 = odata.Colors[0]
- obj.col2 = odata.Colors[1]
- obj.align = odata.Alignment
- obj.lstyle = odata.Style
- obj.lsize = odata.Size
- obj.lcol = odata.Lcol
- return obj
-#
-# Internal crfunc: helper function that returns correct create function
-#
-def _select_crfunc(fm, cl):
- if cl == FL.BEGIN_GROUP: return fm.bgn_group
- elif cl == FL.END_GROUP: return fm.end_group
- elif cl == FL.BITMAP: return fm.add_bitmap
- elif cl == FL.BOX: return fm.add_box
- elif cl == FL.BROWSER: return fm.add_browser
- elif cl == FL.BUTTON: return fm.add_button
- elif cl == FL.CHART: return fm.add_chart
- elif cl == FL.CHOICE: return fm.add_choice
- elif cl == FL.CLOCK: return fm.add_clock
- elif cl == FL.COUNTER: return fm.add_counter
- elif cl == FL.DIAL: return fm.add_dial
- elif cl == FL.FREE: return fm.add_free
- elif cl == FL.INPUT: return fm.add_input
- elif cl == FL.LIGHTBUTTON: return fm.add_lightbutton
- elif cl == FL.MENU: return fm.add_menu
- elif cl == FL.POSITIONER: return fm.add_positioner
- elif cl == FL.ROUNDBUTTON: return fm.add_roundbutton
- elif cl == FL.SLIDER: return fm.add_slider
- elif cl == FL.VALSLIDER: return fm.add_valslider
- elif cl == FL.TEXT: return fm.add_text
- elif cl == FL.TIMER: return fm.add_timer
- else:
- raise error, 'Unknown object type: %r' % (cl,)
-
-
-def test():
- import time
- t0 = time.time()
- if len(sys.argv) == 2:
- forms = parse_forms(sys.argv[1])
- t1 = time.time()
- print 'parse time:', 0.001*(t1-t0), 'sec.'
- keys = forms.keys()
- keys.sort()
- for i in keys:
- _printform(forms[i])
- elif len(sys.argv) == 3:
- form = parse_form(sys.argv[1], sys.argv[2])
- t1 = time.time()
- print 'parse time:', round(t1-t0, 3), 'sec.'
- _printform(form)
- else:
- print 'Usage: test fdfile [form]'
-
-def _printform(form):
- f = form[0]
- objs = form[1]
- print 'Form ', f.Name, ', size: ', f.Width, f.Height, ' Nobj ', f.Numberofobjects
- for i in objs:
- print ' Obj ', i.Name, ' type ', i.Class, i.Type
- print ' Box ', i.Box, ' btype ', i.Boxtype
- print ' Label ', i.Label, ' size/style/col/align ', i.Size,i.Style, i.Lcol, i.Alignment
- print ' cols ', i.Colors
- print ' cback ', i.Callback, i.Argument
diff --git a/sys/lib/python/plat-irix6/jpeg.py b/sys/lib/python/plat-irix6/jpeg.py
deleted file mode 100644
index 0b52031aa..000000000
--- a/sys/lib/python/plat-irix6/jpeg.py
+++ /dev/null
@@ -1,111 +0,0 @@
-# Implement 'jpeg' interface using SGI's compression library
-
-# XXX Options 'smooth' and 'optimize' are ignored.
-
-# XXX It appears that compressing grayscale images doesn't work right;
-# XXX the resulting file causes weirdness.
-
-class error(Exception):
- pass
-
-options = {'quality': 75, 'optimize': 0, 'smooth': 0, 'forcegray': 0}
-
-comp = None
-decomp = None
-
-def compress(imgdata, width, height, bytesperpixel):
- global comp
- import cl
- if comp is None: comp = cl.OpenCompressor(cl.JPEG)
- if bytesperpixel == 1:
- format = cl.GRAYSCALE
- elif bytesperpixel == 4:
- format = cl.RGBX
- if options['forcegray']:
- iformat = cl.GRAYSCALE
- else:
- iformat = cl.YUV
- # XXX How to support 'optimize'?
- params = [cl.IMAGE_WIDTH, width, cl.IMAGE_HEIGHT, height,
- cl.ORIGINAL_FORMAT, format,
- cl.ORIENTATION, cl.BOTTOM_UP,
- cl.QUALITY_FACTOR, options['quality'],
- cl.INTERNAL_FORMAT, iformat,
- ]
- comp.SetParams(params)
- jpegdata = comp.Compress(1, imgdata)
- return jpegdata
-
-def decompress(jpegdata):
- global decomp
- import cl
- if decomp is None: decomp = cl.OpenDecompressor(cl.JPEG)
- headersize = decomp.ReadHeader(jpegdata)
- params = [cl.IMAGE_WIDTH, 0, cl.IMAGE_HEIGHT, 0, cl.INTERNAL_FORMAT, 0]
- decomp.GetParams(params)
- width, height, format = params[1], params[3], params[5]
- if format == cl.GRAYSCALE or options['forcegray']:
- format = cl.GRAYSCALE
- bytesperpixel = 1
- else:
- format = cl.RGBX
- bytesperpixel = 4
- # XXX How to support 'smooth'?
- params = [cl.ORIGINAL_FORMAT, format,
- cl.ORIENTATION, cl.BOTTOM_UP,
- cl.FRAME_BUFFER_SIZE, width*height*bytesperpixel]
- decomp.SetParams(params)
- imgdata = decomp.Decompress(1, jpegdata)
- return imgdata, width, height, bytesperpixel
-
-def setoption(name, value):
- if type(value) is not type(0):
- raise TypeError, 'jpeg.setoption: numeric options only'
- if name == 'forcegrey':
- name = 'forcegray'
- if not options.has_key(name):
- raise KeyError, 'jpeg.setoption: unknown option name'
- options[name] = int(value)
-
-def test():
- import sys
- if sys.argv[1:2] == ['-g']:
- del sys.argv[1]
- setoption('forcegray', 1)
- if not sys.argv[1:]:
- sys.argv.append('/usr/local/images/data/jpg/asterix.jpg')
- for file in sys.argv[1:]:
- show(file)
-
-def show(file):
- import gl, GL, DEVICE
- jpegdata = open(file, 'r').read()
- imgdata, width, height, bytesperpixel = decompress(jpegdata)
- gl.foreground()
- gl.prefsize(width, height)
- win = gl.winopen(file)
- if bytesperpixel == 1:
- gl.cmode()
- gl.pixmode(GL.PM_SIZE, 8)
- gl.gconfig()
- for i in range(256):
- gl.mapcolor(i, i, i, i)
- else:
- gl.RGBmode()
- gl.pixmode(GL.PM_SIZE, 32)
- gl.gconfig()
- gl.qdevice(DEVICE.REDRAW)
- gl.qdevice(DEVICE.ESCKEY)
- gl.qdevice(DEVICE.WINQUIT)
- gl.qdevice(DEVICE.WINSHUT)
- gl.lrectwrite(0, 0, width-1, height-1, imgdata)
- while 1:
- dev, val = gl.qread()
- if dev in (DEVICE.ESCKEY, DEVICE.WINSHUT, DEVICE.WINQUIT):
- break
- if dev == DEVICE.REDRAW:
- gl.lrectwrite(0, 0, width-1, height-1, imgdata)
- gl.winclose(win)
- # Now test the compression and write the result to a fixed filename
- newjpegdata = compress(imgdata, width, height, bytesperpixel)
- open('/tmp/j.jpg', 'w').write(newjpegdata)
diff --git a/sys/lib/python/plat-irix6/panel.py b/sys/lib/python/plat-irix6/panel.py
deleted file mode 100644
index 12e62a51b..000000000
--- a/sys/lib/python/plat-irix6/panel.py
+++ /dev/null
@@ -1,281 +0,0 @@
-# Module 'panel'
-#
-# Support for the Panel library.
-# Uses built-in module 'pnl'.
-# Applications should use 'panel.function' instead of 'pnl.function';
-# most 'pnl' functions are transparently exported by 'panel',
-# but dopanel() is overridden and you have to use this version
-# if you want to use callbacks.
-
-
-import pnl
-
-
-debug = 0
-
-
-# Test if an object is a list.
-#
-def is_list(x):
- return type(x) == type([])
-
-
-# Reverse a list.
-#
-def reverse(list):
- res = []
- for item in list:
- res.insert(0, item)
- return res
-
-
-# Get an attribute of a list, which may itself be another list.
-# Don't use 'prop' for name.
-#
-def getattrlist(list, name):
- for item in list:
- if item and is_list(item) and item[0] == name:
- return item[1:]
- return []
-
-
-# Get a property of a list, which may itself be another list.
-#
-def getproplist(list, name):
- for item in list:
- if item and is_list(item) and item[0] == 'prop':
- if len(item) > 1 and item[1] == name:
- return item[2:]
- return []
-
-
-# Test if an actuator description contains the property 'end-of-group'
-#
-def is_endgroup(list):
- x = getproplist(list, 'end-of-group')
- return (x and x[0] == '#t')
-
-
-# Neatly display an actuator definition given as S-expression
-# the prefix string is printed before each line.
-#
-def show_actuator(prefix, a):
- for item in a:
- if not is_list(item):
- print prefix, item
- elif item and item[0] == 'al':
- print prefix, 'Subactuator list:'
- for a in item[1:]:
- show_actuator(prefix + ' ', a)
- elif len(item) == 2:
- print prefix, item[0], '=>', item[1]
- elif len(item) == 3 and item[0] == 'prop':
- print prefix, 'Prop', item[1], '=>',
- print item[2]
- else:
- print prefix, '?', item
-
-
-# Neatly display a panel.
-#
-def show_panel(prefix, p):
- for item in p:
- if not is_list(item):
- print prefix, item
- elif item and item[0] == 'al':
- print prefix, 'Actuator list:'
- for a in item[1:]:
- show_actuator(prefix + ' ', a)
- elif len(item) == 2:
- print prefix, item[0], '=>', item[1]
- elif len(item) == 3 and item[0] == 'prop':
- print prefix, 'Prop', item[1], '=>',
- print item[2]
- else:
- print prefix, '?', item
-
-
-# Exception raised by build_actuator or build_panel.
-#
-panel_error = 'panel error'
-
-
-# Dummy callback used to initialize the callbacks.
-#
-def dummy_callback(arg):
- pass
-
-
-# Assign attributes to members of the target.
-# Attribute names in exclist are ignored.
-# The member name is the attribute name prefixed with the prefix.
-#
-def assign_members(target, attrlist, exclist, prefix):
- for item in attrlist:
- if is_list(item) and len(item) == 2 and item[0] not in exclist:
- name, value = item[0], item[1]
- ok = 1
- if value[0] in '-0123456789':
- value = eval(value)
- elif value[0] == '"':
- value = value[1:-1]
- elif value == 'move-then-resize':
- # Strange default set by Panel Editor...
- ok = 0
- else:
- print 'unknown value', value, 'for', name
- ok = 0
- if ok:
- lhs = 'target.' + prefix + name
- stmt = lhs + '=' + repr(value)
- if debug: print 'exec', stmt
- try:
- exec stmt + '\n'
- except KeyboardInterrupt: # Don't catch this!
- raise KeyboardInterrupt
- except:
- print 'assign failed:', stmt
-
-
-# Build a real actuator from an actuator description.
-# Return a pair (actuator, name).
-#
-def build_actuator(descr):
- namelist = getattrlist(descr, 'name')
- if namelist:
- # Assume it is a string
- actuatorname = namelist[0][1:-1]
- else:
- actuatorname = ''
- type = descr[0]
- if type[:4] == 'pnl_': type = type[4:]
- act = pnl.mkact(type)
- act.downfunc = act.activefunc = act.upfunc = dummy_callback
- #
- assign_members(act, descr[1:], ['al', 'data', 'name'], '')
- #
- # Treat actuator-specific data
- #
- datalist = getattrlist(descr, 'data')
- prefix = ''
- if type[-4:] == 'puck':
- prefix = 'puck_'
- elif type == 'mouse':
- prefix = 'mouse_'
- assign_members(act, datalist, [], prefix)
- #
- return act, actuatorname
-
-
-# Build all sub-actuators and add them to the super-actuator.
-# The super-actuator must already have been added to the panel.
-# Sub-actuators with defined names are added as members to the panel
-# so they can be referenced as p.name.
-#
-# Note: I have no idea how panel.endgroup() works when applied
-# to a sub-actuator.
-#
-def build_subactuators(panel, super_act, al):
- #
- # This is nearly the same loop as below in build_panel(),
- # except a call is made to addsubact() instead of addact().
- #
- for a in al:
- act, name = build_actuator(a)
- act.addsubact(super_act)
- if name:
- stmt = 'panel.' + name + ' = act'
- if debug: print 'exec', stmt
- exec stmt + '\n'
- if is_endgroup(a):
- panel.endgroup()
- sub_al = getattrlist(a, 'al')
- if sub_al:
- build_subactuators(panel, act, sub_al)
- #
- # Fix the actuator to which whe just added subactuators.
- # This can't hurt (I hope) and is needed for the scroll actuator.
- #
- super_act.fixact()
-
-
-# Build a real panel from a panel definition.
-# Return a panel object p, where for each named actuator a, p.name is a
-# reference to a.
-#
-def build_panel(descr):
- #
- # Sanity check
- #
- if (not descr) or descr[0] != 'panel':
- raise panel_error, 'panel description must start with "panel"'
- #
- if debug: show_panel('', descr)
- #
- # Create an empty panel
- #
- panel = pnl.mkpanel()
- #
- # Assign panel attributes
- #
- assign_members(panel, descr[1:], ['al'], '')
- #
- # Look for actuator list
- #
- al = getattrlist(descr, 'al')
- #
- # The order in which actuators are created is important
- # because of the endgroup() operator.
- # Unfortunately the Panel Editor outputs the actuator list
- # in reverse order, so we reverse it here.
- #
- al = reverse(al)
- #
- for a in al:
- act, name = build_actuator(a)
- act.addact(panel)
- if name:
- stmt = 'panel.' + name + ' = act'
- exec stmt + '\n'
- if is_endgroup(a):
- panel.endgroup()
- sub_al = getattrlist(a, 'al')
- if sub_al:
- build_subactuators(panel, act, sub_al)
- #
- return panel
-
-
-# Wrapper around pnl.dopanel() which calls call-back functions.
-#
-def my_dopanel():
- # Extract only the first 4 elements to allow for future expansion
- a, down, active, up = pnl.dopanel()[:4]
- if down:
- down.downfunc(down)
- if active:
- active.activefunc(active)
- if up:
- up.upfunc(up)
- return a
-
-
-# Create one or more panels from a description file (S-expressions)
-# generated by the Panel Editor.
-#
-def defpanellist(file):
- import panelparser
- descrlist = panelparser.parse_file(open(file, 'r'))
- panellist = []
- for descr in descrlist:
- panellist.append(build_panel(descr))
- return panellist
-
-
-# Import everything from built-in method pnl, so the user can always
-# use panel.foo() instead of pnl.foo().
-# This gives *no* performance penalty once this module is imported.
-#
-from pnl import * # for export
-
-dopanel = my_dopanel # override pnl.dopanel
diff --git a/sys/lib/python/plat-irix6/panelparser.py b/sys/lib/python/plat-irix6/panelparser.py
deleted file mode 100644
index c831c4927..000000000
--- a/sys/lib/python/plat-irix6/panelparser.py
+++ /dev/null
@@ -1,128 +0,0 @@
-# Module 'parser'
-#
-# Parse S-expressions output by the Panel Editor
-# (which is written in Scheme so it can't help writing S-expressions).
-#
-# See notes at end of file.
-
-
-whitespace = ' \t\n'
-operators = '()\''
-separators = operators + whitespace + ';' + '"'
-
-
-# Tokenize a string.
-# Return a list of tokens (strings).
-#
-def tokenize_string(s):
- tokens = []
- while s:
- c = s[:1]
- if c in whitespace:
- s = s[1:]
- elif c == ';':
- s = ''
- elif c == '"':
- n = len(s)
- i = 1
- while i < n:
- c = s[i]
- i = i+1
- if c == '"': break
- if c == '\\': i = i+1
- tokens.append(s[:i])
- s = s[i:]
- elif c in operators:
- tokens.append(c)
- s = s[1:]
- else:
- n = len(s)
- i = 1
- while i < n:
- if s[i] in separators: break
- i = i+1
- tokens.append(s[:i])
- s = s[i:]
- return tokens
-
-
-# Tokenize a whole file (given as file object, not as file name).
-# Return a list of tokens (strings).
-#
-def tokenize_file(fp):
- tokens = []
- while 1:
- line = fp.readline()
- if not line: break
- tokens = tokens + tokenize_string(line)
- return tokens
-
-
-# Exception raised by parse_exr.
-#
-syntax_error = 'syntax error'
-
-
-# Parse an S-expression.
-# Input is a list of tokens as returned by tokenize_*().
-# Return a pair (expr, tokens)
-# where expr is a list representing the s-expression,
-# and tokens contains the remaining tokens.
-# May raise syntax_error.
-#
-def parse_expr(tokens):
- if (not tokens) or tokens[0] != '(':
- raise syntax_error, 'expected "("'
- tokens = tokens[1:]
- expr = []
- while 1:
- if not tokens:
- raise syntax_error, 'missing ")"'
- if tokens[0] == ')':
- return expr, tokens[1:]
- elif tokens[0] == '(':
- subexpr, tokens = parse_expr(tokens)
- expr.append(subexpr)
- else:
- expr.append(tokens[0])
- tokens = tokens[1:]
-
-
-# Parse a file (given as file object, not as file name).
-# Return a list of parsed S-expressions found at the top level.
-#
-def parse_file(fp):
- tokens = tokenize_file(fp)
- exprlist = []
- while tokens:
- expr, tokens = parse_expr(tokens)
- exprlist.append(expr)
- return exprlist
-
-
-# EXAMPLE:
-#
-# The input
-# '(hip (hop hur-ray))'
-#
-# passed to tokenize_string() returns the token list
-# ['(', 'hip', '(', 'hop', 'hur-ray', ')', ')']
-#
-# When this is passed to parse_expr() it returns the expression
-# ['hip', ['hop', 'hur-ray']]
-# plus an empty token list (because there are no tokens left.
-#
-# When a file containing the example is passed to parse_file() it returns
-# a list whose only element is the output of parse_expr() above:
-# [['hip', ['hop', 'hur-ray']]]
-
-
-# TOKENIZING:
-#
-# Comments start with semicolon (;) and continue till the end of the line.
-#
-# Tokens are separated by whitespace, except the following characters
-# always form a separate token (outside strings):
-# ( ) '
-# Strings are enclosed in double quotes (") and backslash (\) is used
-# as escape character in strings.
diff --git a/sys/lib/python/plat-irix6/readcd.doc b/sys/lib/python/plat-irix6/readcd.doc
deleted file mode 100644
index 1be549c79..000000000
--- a/sys/lib/python/plat-irix6/readcd.doc
+++ /dev/null
@@ -1,104 +0,0 @@
-Interface to CD-ROM player.
-
-This module implements an interface to the built-in cd module. The
-intention is to provide a more user-friendly interface than the
-built-in module.
-
-The module defines a class Readcd with several methods. The
-initialization of the class will try to open the CD player. This
-means that initialization will fail if the CD player is already in
-use. A RuntimeError will be raised by the cd module in that case.
-
-The way to work with this module is as follows. The user specifies
-the parts of the CD that are to be read and he specifies callback
-functions which are to be called by the system. At some point he can
-tell the system to play. The specified parts of the CD will then be
-read and the callbacks will be called.
-
-Initialization.
-===============
-
-r = readcd.Readcd([cd-player [, mode]])
-
-The optional arguments are the name of the CD device and the mode.
-When "mode" is not specified, it defaults to 'r' (which is the only
-possible value); when "cd-player" also isn't specified, it defaults
-to "None" which indicates the default CD player.
-
-Methods.
-========
-
-eject() -- Eject the CD from the player.
-
-reset() -- Reset the list of data stretches to be played.
-
-appendtrack(track) -- Append the specified track to the list of music
-stretches.
-
-appendstretch(first, last) -- Append the stretch from "first" to "last"
-to the list of music stretches. Both "first" and "last" can be in one
-of four forms. "None": for "first", the beginning of the CD, for
-"last" the end of the CD; a single integer: a track number--playing
-starts at the beginning of the track or ends at the end of the
-specified track; a three-tuple: the absolute time from the start of
-the CD in minutes, seconds, frames; a four-tuple: track number and
-relative time within the track in minutes, seconds, frames.
-
-settracks(tracklist) -- The argument is a list of integers. The list
-of stretches is set to argument list. The old list is discarded.
-
-setcallback(type, func, arg) -- Set a callback function for "type".
-The function will be called as func(arg, type, data) where "arg" is
-the third argument of setcallback, "type" is the type of callback,
-"data" is type-dependent data. See the CDsetcallback(3) manual page
-for more information. The possible "type" arguments are defined in
-the CD module.
-
-removecallback(type) -- Remove the callback for "type".
-
-gettrackinfo([tracklist]) -- Return a list of tuples. Each tuple
-consists of start and length information of a track. The start and
-length information consist of three-tuples with minutes, seconds and
-frames. The optional tracklist argument gives a list of interesting
-track numbers. If no tracklist is specified, information about all
-tracks is returned.
-
-getstatus() -- Return the status information of the CD.
-
-play() -- Play the preprogrammed stretches of music from the CD. When
-nothing was programmed, the whole CD is played.
-
-Specifying stretches.
-=====================
-
-There are three methods available to specify a stretch of music to be
-played. The easiest way is to use "settracklist(tracklist)" with which
-a list of tracks can be specified. "settracklist(tracklist)" is
-equivalent to the sequence
- reset()
- for track in tracklist:
- appendtrack(track)
-
-The next method is "appendtrack(track)" with which a whole track can be
-added to the list of music to be played. "appendtrack(track)" is
-equivalent to "appendstretch(track, track)".
-
-The most complete method is "appendstretch(first, last)". Using this
-method, it is possible to specify any stretch of music.
-
-When two consecutive tracks are played, it is possible to choose
-whether the pause that may be between the tracks is played as well or
-whether the pause should be skipped. When the end of a stretch is
-specified using a track number and the next stretch starts at the
-beginning of the following track and that was also specified using the
-track number (that is, both were specified as integers, not as tuples),
-the pause is played. When either value was specified using absolute
-time or track-relative time (that is, as three-tuple or as
-four-tuple), the pause will not be played.
-
-Errors.
-=======
-
-When an error occurs, an exception will be raised. Depending on where
-the error occurs, the exception may either be "readcd.Error" or
-"RuntimeError".
diff --git a/sys/lib/python/plat-irix6/readcd.py b/sys/lib/python/plat-irix6/readcd.py
deleted file mode 100644
index 5453ce421..000000000
--- a/sys/lib/python/plat-irix6/readcd.py
+++ /dev/null
@@ -1,244 +0,0 @@
-# Class interface to the CD module.
-
-import cd, CD
-
-class Error(Exception):
- pass
-class _Stop(Exception):
- pass
-
-def _doatime(self, cb_type, data):
- if ((data[0] * 60) + data[1]) * 75 + data[2] > self.end:
-## print 'done with list entry', repr(self.listindex)
- raise _Stop
- func, arg = self.callbacks[cb_type]
- if func:
- func(arg, cb_type, data)
-
-def _dopnum(self, cb_type, data):
- if data > self.end:
-## print 'done with list entry', repr(self.listindex)
- raise _Stop
- func, arg = self.callbacks[cb_type]
- if func:
- func(arg, cb_type, data)
-
-class Readcd:
- def __init__(self, *arg):
- if len(arg) == 0:
- self.player = cd.open()
- elif len(arg) == 1:
- self.player = cd.open(arg[0])
- elif len(arg) == 2:
- self.player = cd.open(arg[0], arg[1])
- else:
- raise Error, 'bad __init__ call'
- self.list = []
- self.callbacks = [(None, None)] * 8
- self.parser = cd.createparser()
- self.playing = 0
- self.end = 0
- self.status = None
- self.trackinfo = None
-
- def eject(self):
- self.player.eject()
- self.list = []
- self.end = 0
- self.listindex = 0
- self.status = None
- self.trackinfo = None
- if self.playing:
-## print 'stop playing from eject'
- raise _Stop
-
- def pmsf2msf(self, track, min, sec, frame):
- if not self.status:
- self.cachestatus()
- if track < self.status[5] or track > self.status[6]:
- raise Error, 'track number out of range'
- if not self.trackinfo:
- self.cacheinfo()
- start, total = self.trackinfo[track]
- start = ((start[0] * 60) + start[1]) * 75 + start[2]
- total = ((total[0] * 60) + total[1]) * 75 + total[2]
- block = ((min * 60) + sec) * 75 + frame
- if block > total:
- raise Error, 'out of range'
- block = start + block
- min, block = divmod(block, 75*60)
- sec, frame = divmod(block, 75)
- return min, sec, frame
-
- def reset(self):
- self.list = []
-
- def appendtrack(self, track):
- self.appendstretch(track, track)
-
- def appendstretch(self, start, end):
- if not self.status:
- self.cachestatus()
- if not start:
- start = 1
- if not end:
- end = self.status[6]
- if type(end) == type(0):
- if end < self.status[5] or end > self.status[6]:
- raise Error, 'range error'
- else:
- l = len(end)
- if l == 4:
- prog, min, sec, frame = end
- if prog < self.status[5] or prog > self.status[6]:
- raise Error, 'range error'
- end = self.pmsf2msf(prog, min, sec, frame)
- elif l != 3:
- raise Error, 'syntax error'
- if type(start) == type(0):
- if start < self.status[5] or start > self.status[6]:
- raise Error, 'range error'
- if len(self.list) > 0:
- s, e = self.list[-1]
- if type(e) == type(0):
- if start == e+1:
- start = s
- del self.list[-1]
- else:
- l = len(start)
- if l == 4:
- prog, min, sec, frame = start
- if prog < self.status[5] or prog > self.status[6]:
- raise Error, 'range error'
- start = self.pmsf2msf(prog, min, sec, frame)
- elif l != 3:
- raise Error, 'syntax error'
- self.list.append((start, end))
-
- def settracks(self, list):
- self.list = []
- for track in list:
- self.appendtrack(track)
-
- def setcallback(self, cb_type, func, arg):
- if cb_type < 0 or cb_type >= 8:
- raise Error, 'type out of range'
- self.callbacks[cb_type] = (func, arg)
- if self.playing:
- start, end = self.list[self.listindex]
- if type(end) == type(0):
- if cb_type != CD.PNUM:
- self.parser.setcallback(cb_type, func, arg)
- else:
- if cb_type != CD.ATIME:
- self.parser.setcallback(cb_type, func, arg)
-
- def removecallback(self, cb_type):
- if cb_type < 0 or cb_type >= 8:
- raise Error, 'type out of range'
- self.callbacks[cb_type] = (None, None)
- if self.playing:
- start, end = self.list[self.listindex]
- if type(end) == type(0):
- if cb_type != CD.PNUM:
- self.parser.removecallback(cb_type)
- else:
- if cb_type != CD.ATIME:
- self.parser.removecallback(cb_type)
-
- def gettrackinfo(self, *arg):
- if not self.status:
- self.cachestatus()
- if not self.trackinfo:
- self.cacheinfo()
- if len(arg) == 0:
- return self.trackinfo[self.status[5]:self.status[6]+1]
- result = []
- for i in arg:
- if i < self.status[5] or i > self.status[6]:
- raise Error, 'range error'
- result.append(self.trackinfo[i])
- return result
-
- def cacheinfo(self):
- if not self.status:
- self.cachestatus()
- self.trackinfo = []
- for i in range(self.status[5]):
- self.trackinfo.append(None)
- for i in range(self.status[5], self.status[6]+1):
- self.trackinfo.append(self.player.gettrackinfo(i))
-
- def cachestatus(self):
- self.status = self.player.getstatus()
- if self.status[0] == CD.NODISC:
- self.status = None
- raise Error, 'no disc in player'
-
- def getstatus(self):
- return self.player.getstatus()
-
- def play(self):
- if not self.status:
- self.cachestatus()
- size = self.player.bestreadsize()
- self.listindex = 0
- self.playing = 0
- for i in range(8):
- func, arg = self.callbacks[i]
- if func:
- self.parser.setcallback(i, func, arg)
- else:
- self.parser.removecallback(i)
- if len(self.list) == 0:
- for i in range(self.status[5], self.status[6]+1):
- self.appendtrack(i)
- try:
- while 1:
- if not self.playing:
- if self.listindex >= len(self.list):
- return
- start, end = self.list[self.listindex]
- if type(start) == type(0):
- dummy = self.player.seektrack(
- start)
- else:
- min, sec, frame = start
- dummy = self.player.seek(
- min, sec, frame)
- if type(end) == type(0):
- self.parser.setcallback(
- CD.PNUM, _dopnum, self)
- self.end = end
- func, arg = \
- self.callbacks[CD.ATIME]
- if func:
- self.parser.setcallback(CD.ATIME, func, arg)
- else:
- self.parser.removecallback(CD.ATIME)
- else:
- min, sec, frame = end
- self.parser.setcallback(
- CD.ATIME, _doatime,
- self)
- self.end = (min * 60 + sec) * \
- 75 + frame
- func, arg = \
- self.callbacks[CD.PNUM]
- if func:
- self.parser.setcallback(CD.PNUM, func, arg)
- else:
- self.parser.removecallback(CD.PNUM)
- self.playing = 1
- data = self.player.readda(size)
- if data == '':
- self.playing = 0
- self.listindex = self.listindex + 1
- continue
- try:
- self.parser.parseframe(data)
- except _Stop:
- self.playing = 0
- self.listindex = self.listindex + 1
- finally:
- self.playing = 0
diff --git a/sys/lib/python/plat-irix6/regen b/sys/lib/python/plat-irix6/regen
deleted file mode 100755
index 6a2cb408d..000000000
--- a/sys/lib/python/plat-irix6/regen
+++ /dev/null
@@ -1,11 +0,0 @@
-#! /bin/sh
-case `uname -sr` in
-'IRIX '[456].*) ;;
-'IRIX64 '[456].*) ;;
-*) echo Probably not on an IRIX system 1>&2
- exit 1;;
-esac
-set -v
-h2py /usr/include/sys/file.h
-h2py -i '(u_long)' /usr/include/netinet/in.h
-h2py /usr/include/errno.h
diff --git a/sys/lib/python/plat-irix6/torgb.py b/sys/lib/python/plat-irix6/torgb.py
deleted file mode 100644
index 54c86c477..000000000
--- a/sys/lib/python/plat-irix6/torgb.py
+++ /dev/null
@@ -1,99 +0,0 @@
-# Convert "arbitrary" image files to rgb files (SGI's image format).
-# Input may be compressed.
-# The uncompressed file type may be PBM, PGM, PPM, GIF, TIFF, or Sun raster.
-# An exception is raised if the file is not of a recognized type.
-# Returned filename is either the input filename or a temporary filename;
-# in the latter case the caller must ensure that it is removed.
-# Other temporary files used are removed by the function.
-
-import os
-import tempfile
-import pipes
-import imghdr
-
-table = {}
-
-t = pipes.Template()
-t.append('fromppm $IN $OUT', 'ff')
-table['ppm'] = t
-
-t = pipes.Template()
-t.append('(PATH=$PATH:/ufs/guido/bin/sgi; exec pnmtoppm)', '--')
-t.append('fromppm $IN $OUT', 'ff')
-table['pnm'] = t
-table['pgm'] = t
-table['pbm'] = t
-
-t = pipes.Template()
-t.append('fromgif $IN $OUT', 'ff')
-table['gif'] = t
-
-t = pipes.Template()
-t.append('tifftopnm', '--')
-t.append('(PATH=$PATH:/ufs/guido/bin/sgi; exec pnmtoppm)', '--')
-t.append('fromppm $IN $OUT', 'ff')
-table['tiff'] = t
-
-t = pipes.Template()
-t.append('rasttopnm', '--')
-t.append('(PATH=$PATH:/ufs/guido/bin/sgi; exec pnmtoppm)', '--')
-t.append('fromppm $IN $OUT', 'ff')
-table['rast'] = t
-
-t = pipes.Template()
-t.append('djpeg', '--')
-t.append('(PATH=$PATH:/ufs/guido/bin/sgi; exec pnmtoppm)', '--')
-t.append('fromppm $IN $OUT', 'ff')
-table['jpeg'] = t
-
-uncompress = pipes.Template()
-uncompress.append('uncompress', '--')
-
-
-class error(Exception):
- pass
-
-def torgb(filename):
- temps = []
- ret = None
- try:
- ret = _torgb(filename, temps)
- finally:
- for temp in temps[:]:
- if temp != ret:
- try:
- os.unlink(temp)
- except os.error:
- pass
- temps.remove(temp)
- return ret
-
-def _torgb(filename, temps):
- if filename[-2:] == '.Z':
- (fd, fname) = tempfile.mkstemp()
- os.close(fd)
- temps.append(fname)
- sts = uncompress.copy(filename, fname)
- if sts:
- raise error, filename + ': uncompress failed'
- else:
- fname = filename
- try:
- ftype = imghdr.what(fname)
- except IOError, msg:
- if type(msg) == type(()) and len(msg) == 2 and \
- type(msg[0]) == type(0) and type(msg[1]) == type(''):
- msg = msg[1]
- if type(msg) is not type(''):
- msg = repr(msg)
- raise error, filename + ': ' + msg
- if ftype == 'rgb':
- return fname
- if ftype is None or not table.has_key(ftype):
- raise error, '%s: unsupported image file type %r' % (filename, ftype)
- (fd, temp) = tempfile.mkstemp()
- os.close(fd)
- sts = table[ftype].copy(fname, temp)
- if sts:
- raise error, filename + ': conversion to rgb failed'
- return temp
diff --git a/sys/lib/python/plat-linux2/CDROM.py b/sys/lib/python/plat-linux2/CDROM.py
deleted file mode 100644
index 434093684..000000000
--- a/sys/lib/python/plat-linux2/CDROM.py
+++ /dev/null
@@ -1,207 +0,0 @@
-# Generated by h2py from /usr/include/linux/cdrom.h
-
-CDROMPAUSE = 0x5301
-CDROMRESUME = 0x5302
-CDROMPLAYMSF = 0x5303
-CDROMPLAYTRKIND = 0x5304
-CDROMREADTOCHDR = 0x5305
-CDROMREADTOCENTRY = 0x5306
-CDROMSTOP = 0x5307
-CDROMSTART = 0x5308
-CDROMEJECT = 0x5309
-CDROMVOLCTRL = 0x530a
-CDROMSUBCHNL = 0x530b
-CDROMREADMODE2 = 0x530c
-CDROMREADMODE1 = 0x530d
-CDROMREADAUDIO = 0x530e
-CDROMEJECT_SW = 0x530f
-CDROMMULTISESSION = 0x5310
-CDROM_GET_MCN = 0x5311
-CDROM_GET_UPC = CDROM_GET_MCN
-CDROMRESET = 0x5312
-CDROMVOLREAD = 0x5313
-CDROMREADRAW = 0x5314
-CDROMREADCOOKED = 0x5315
-CDROMSEEK = 0x5316
-CDROMPLAYBLK = 0x5317
-CDROMREADALL = 0x5318
-CDROMGETSPINDOWN = 0x531d
-CDROMSETSPINDOWN = 0x531e
-CDROMCLOSETRAY = 0x5319
-CDROM_SET_OPTIONS = 0x5320
-CDROM_CLEAR_OPTIONS = 0x5321
-CDROM_SELECT_SPEED = 0x5322
-CDROM_SELECT_DISC = 0x5323
-CDROM_MEDIA_CHANGED = 0x5325
-CDROM_DRIVE_STATUS = 0x5326
-CDROM_DISC_STATUS = 0x5327
-CDROM_CHANGER_NSLOTS = 0x5328
-CDROM_LOCKDOOR = 0x5329
-CDROM_DEBUG = 0x5330
-CDROM_GET_CAPABILITY = 0x5331
-CDROMAUDIOBUFSIZ = 0x5382
-DVD_READ_STRUCT = 0x5390
-DVD_WRITE_STRUCT = 0x5391
-DVD_AUTH = 0x5392
-CDROM_SEND_PACKET = 0x5393
-CDROM_NEXT_WRITABLE = 0x5394
-CDROM_LAST_WRITTEN = 0x5395
-CDROM_PACKET_SIZE = 12
-CGC_DATA_UNKNOWN = 0
-CGC_DATA_WRITE = 1
-CGC_DATA_READ = 2
-CGC_DATA_NONE = 3
-CD_MINS = 74
-CD_SECS = 60
-CD_FRAMES = 75
-CD_SYNC_SIZE = 12
-CD_MSF_OFFSET = 150
-CD_CHUNK_SIZE = 24
-CD_NUM_OF_CHUNKS = 98
-CD_FRAMESIZE_SUB = 96
-CD_HEAD_SIZE = 4
-CD_SUBHEAD_SIZE = 8
-CD_EDC_SIZE = 4
-CD_ZERO_SIZE = 8
-CD_ECC_SIZE = 276
-CD_FRAMESIZE = 2048
-CD_FRAMESIZE_RAW = 2352
-CD_FRAMESIZE_RAWER = 2646
-CD_FRAMESIZE_RAW1 = (CD_FRAMESIZE_RAW-CD_SYNC_SIZE)
-CD_FRAMESIZE_RAW0 = (CD_FRAMESIZE_RAW-CD_SYNC_SIZE-CD_HEAD_SIZE)
-CD_XA_HEAD = (CD_HEAD_SIZE+CD_SUBHEAD_SIZE)
-CD_XA_TAIL = (CD_EDC_SIZE+CD_ECC_SIZE)
-CD_XA_SYNC_HEAD = (CD_SYNC_SIZE+CD_XA_HEAD)
-CDROM_LBA = 0x01
-CDROM_MSF = 0x02
-CDROM_DATA_TRACK = 0x04
-CDROM_LEADOUT = 0xAA
-CDROM_AUDIO_INVALID = 0x00
-CDROM_AUDIO_PLAY = 0x11
-CDROM_AUDIO_PAUSED = 0x12
-CDROM_AUDIO_COMPLETED = 0x13
-CDROM_AUDIO_ERROR = 0x14
-CDROM_AUDIO_NO_STATUS = 0x15
-CDC_CLOSE_TRAY = 0x1
-CDC_OPEN_TRAY = 0x2
-CDC_LOCK = 0x4
-CDC_SELECT_SPEED = 0x8
-CDC_SELECT_DISC = 0x10
-CDC_MULTI_SESSION = 0x20
-CDC_MCN = 0x40
-CDC_MEDIA_CHANGED = 0x80
-CDC_PLAY_AUDIO = 0x100
-CDC_RESET = 0x200
-CDC_IOCTLS = 0x400
-CDC_DRIVE_STATUS = 0x800
-CDC_GENERIC_PACKET = 0x1000
-CDC_CD_R = 0x2000
-CDC_CD_RW = 0x4000
-CDC_DVD = 0x8000
-CDC_DVD_R = 0x10000
-CDC_DVD_RAM = 0x20000
-CDS_NO_INFO = 0
-CDS_NO_DISC = 1
-CDS_TRAY_OPEN = 2
-CDS_DRIVE_NOT_READY = 3
-CDS_DISC_OK = 4
-CDS_AUDIO = 100
-CDS_DATA_1 = 101
-CDS_DATA_2 = 102
-CDS_XA_2_1 = 103
-CDS_XA_2_2 = 104
-CDS_MIXED = 105
-CDO_AUTO_CLOSE = 0x1
-CDO_AUTO_EJECT = 0x2
-CDO_USE_FFLAGS = 0x4
-CDO_LOCK = 0x8
-CDO_CHECK_TYPE = 0x10
-CD_PART_MAX = 64
-CD_PART_MASK = (CD_PART_MAX - 1)
-GPCMD_BLANK = 0xa1
-GPCMD_CLOSE_TRACK = 0x5b
-GPCMD_FLUSH_CACHE = 0x35
-GPCMD_FORMAT_UNIT = 0x04
-GPCMD_GET_CONFIGURATION = 0x46
-GPCMD_GET_EVENT_STATUS_NOTIFICATION = 0x4a
-GPCMD_GET_PERFORMANCE = 0xac
-GPCMD_INQUIRY = 0x12
-GPCMD_LOAD_UNLOAD = 0xa6
-GPCMD_MECHANISM_STATUS = 0xbd
-GPCMD_MODE_SELECT_10 = 0x55
-GPCMD_MODE_SENSE_10 = 0x5a
-GPCMD_PAUSE_RESUME = 0x4b
-GPCMD_PLAY_AUDIO_10 = 0x45
-GPCMD_PLAY_AUDIO_MSF = 0x47
-GPCMD_PLAY_AUDIO_TI = 0x48
-GPCMD_PLAY_CD = 0xbc
-GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL = 0x1e
-GPCMD_READ_10 = 0x28
-GPCMD_READ_12 = 0xa8
-GPCMD_READ_CDVD_CAPACITY = 0x25
-GPCMD_READ_CD = 0xbe
-GPCMD_READ_CD_MSF = 0xb9
-GPCMD_READ_DISC_INFO = 0x51
-GPCMD_READ_DVD_STRUCTURE = 0xad
-GPCMD_READ_FORMAT_CAPACITIES = 0x23
-GPCMD_READ_HEADER = 0x44
-GPCMD_READ_TRACK_RZONE_INFO = 0x52
-GPCMD_READ_SUBCHANNEL = 0x42
-GPCMD_READ_TOC_PMA_ATIP = 0x43
-GPCMD_REPAIR_RZONE_TRACK = 0x58
-GPCMD_REPORT_KEY = 0xa4
-GPCMD_REQUEST_SENSE = 0x03
-GPCMD_RESERVE_RZONE_TRACK = 0x53
-GPCMD_SCAN = 0xba
-GPCMD_SEEK = 0x2b
-GPCMD_SEND_DVD_STRUCTURE = 0xad
-GPCMD_SEND_EVENT = 0xa2
-GPCMD_SEND_KEY = 0xa3
-GPCMD_SEND_OPC = 0x54
-GPCMD_SET_READ_AHEAD = 0xa7
-GPCMD_SET_STREAMING = 0xb6
-GPCMD_START_STOP_UNIT = 0x1b
-GPCMD_STOP_PLAY_SCAN = 0x4e
-GPCMD_TEST_UNIT_READY = 0x00
-GPCMD_VERIFY_10 = 0x2f
-GPCMD_WRITE_10 = 0x2a
-GPCMD_WRITE_AND_VERIFY_10 = 0x2e
-GPCMD_SET_SPEED = 0xbb
-GPCMD_PLAYAUDIO_TI = 0x48
-GPCMD_GET_MEDIA_STATUS = 0xda
-GPMODE_R_W_ERROR_PAGE = 0x01
-GPMODE_WRITE_PARMS_PAGE = 0x05
-GPMODE_AUDIO_CTL_PAGE = 0x0e
-GPMODE_POWER_PAGE = 0x1a
-GPMODE_FAULT_FAIL_PAGE = 0x1c
-GPMODE_TO_PROTECT_PAGE = 0x1d
-GPMODE_CAPABILITIES_PAGE = 0x2a
-GPMODE_ALL_PAGES = 0x3f
-GPMODE_CDROM_PAGE = 0x0d
-DVD_STRUCT_PHYSICAL = 0x00
-DVD_STRUCT_COPYRIGHT = 0x01
-DVD_STRUCT_DISCKEY = 0x02
-DVD_STRUCT_BCA = 0x03
-DVD_STRUCT_MANUFACT = 0x04
-DVD_LAYERS = 4
-DVD_LU_SEND_AGID = 0
-DVD_HOST_SEND_CHALLENGE = 1
-DVD_LU_SEND_KEY1 = 2
-DVD_LU_SEND_CHALLENGE = 3
-DVD_HOST_SEND_KEY2 = 4
-DVD_AUTH_ESTABLISHED = 5
-DVD_AUTH_FAILURE = 6
-DVD_LU_SEND_TITLE_KEY = 7
-DVD_LU_SEND_ASF = 8
-DVD_INVALIDATE_AGID = 9
-DVD_LU_SEND_RPC_STATE = 10
-DVD_HOST_SEND_RPC_STATE = 11
-DVD_CPM_NO_COPYRIGHT = 0
-DVD_CPM_COPYRIGHTED = 1
-DVD_CP_SEC_NONE = 0
-DVD_CP_SEC_EXIST = 1
-DVD_CGMS_UNRESTRICTED = 0
-DVD_CGMS_SINGLE = 2
-DVD_CGMS_RESTRICTED = 3
-
-CDROM_MAX_SLOTS = 256
diff --git a/sys/lib/python/plat-linux2/DLFCN.py b/sys/lib/python/plat-linux2/DLFCN.py
deleted file mode 100644
index e23340a13..000000000
--- a/sys/lib/python/plat-linux2/DLFCN.py
+++ /dev/null
@@ -1,83 +0,0 @@
-# Generated by h2py from /usr/include/dlfcn.h
-_DLFCN_H = 1
-
-# Included from features.h
-_FEATURES_H = 1
-__USE_ANSI = 1
-__FAVOR_BSD = 1
-_ISOC99_SOURCE = 1
-_POSIX_SOURCE = 1
-_POSIX_C_SOURCE = 199506L
-_XOPEN_SOURCE = 600
-_XOPEN_SOURCE_EXTENDED = 1
-_LARGEFILE64_SOURCE = 1
-_BSD_SOURCE = 1
-_SVID_SOURCE = 1
-_BSD_SOURCE = 1
-_SVID_SOURCE = 1
-__USE_ISOC99 = 1
-_POSIX_SOURCE = 1
-_POSIX_C_SOURCE = 2
-_POSIX_C_SOURCE = 199506L
-__USE_POSIX = 1
-__USE_POSIX2 = 1
-__USE_POSIX199309 = 1
-__USE_POSIX199506 = 1
-__USE_XOPEN = 1
-__USE_XOPEN_EXTENDED = 1
-__USE_UNIX98 = 1
-_LARGEFILE_SOURCE = 1
-__USE_XOPEN2K = 1
-__USE_ISOC99 = 1
-__USE_XOPEN_EXTENDED = 1
-__USE_LARGEFILE = 1
-__USE_LARGEFILE64 = 1
-__USE_FILE_OFFSET64 = 1
-__USE_MISC = 1
-__USE_BSD = 1
-__USE_SVID = 1
-__USE_GNU = 1
-__USE_REENTRANT = 1
-__STDC_IEC_559__ = 1
-__STDC_IEC_559_COMPLEX__ = 1
-__STDC_ISO_10646__ = 200009L
-__GNU_LIBRARY__ = 6
-__GLIBC__ = 2
-__GLIBC_MINOR__ = 2
-
-# Included from sys/cdefs.h
-_SYS_CDEFS_H = 1
-def __PMT(args): return args
-
-def __P(args): return args
-
-def __PMT(args): return args
-
-def __STRING(x): return #x
-
-__flexarr = []
-__flexarr = [0]
-__flexarr = []
-__flexarr = [1]
-def __ASMNAME(cname): return __ASMNAME2 (__USER_LABEL_PREFIX__, cname)
-
-def __attribute__(xyz): return
-
-def __attribute_format_arg__(x): return __attribute__ ((__format_arg__ (x)))
-
-def __attribute_format_arg__(x): return
-
-__USE_LARGEFILE = 1
-__USE_LARGEFILE64 = 1
-__USE_EXTERN_INLINES = 1
-
-# Included from gnu/stubs.h
-
-# Included from bits/dlfcn.h
-RTLD_LAZY = 0x00001
-RTLD_NOW = 0x00002
-RTLD_BINDING_MASK = 0x3
-RTLD_NOLOAD = 0x00004
-RTLD_GLOBAL = 0x00100
-RTLD_LOCAL = 0
-RTLD_NODELETE = 0x01000
diff --git a/sys/lib/python/plat-linux2/IN.py b/sys/lib/python/plat-linux2/IN.py
deleted file mode 100644
index ad307f653..000000000
--- a/sys/lib/python/plat-linux2/IN.py
+++ /dev/null
@@ -1,615 +0,0 @@
-# Generated by h2py from /usr/include/netinet/in.h
-_NETINET_IN_H = 1
-
-# Included from features.h
-_FEATURES_H = 1
-__USE_ANSI = 1
-__FAVOR_BSD = 1
-_ISOC99_SOURCE = 1
-_POSIX_SOURCE = 1
-_POSIX_C_SOURCE = 199506L
-_XOPEN_SOURCE = 600
-_XOPEN_SOURCE_EXTENDED = 1
-_LARGEFILE64_SOURCE = 1
-_BSD_SOURCE = 1
-_SVID_SOURCE = 1
-_BSD_SOURCE = 1
-_SVID_SOURCE = 1
-__USE_ISOC99 = 1
-_POSIX_SOURCE = 1
-_POSIX_C_SOURCE = 2
-_POSIX_C_SOURCE = 199506L
-__USE_POSIX = 1
-__USE_POSIX2 = 1
-__USE_POSIX199309 = 1
-__USE_POSIX199506 = 1
-__USE_XOPEN = 1
-__USE_XOPEN_EXTENDED = 1
-__USE_UNIX98 = 1
-_LARGEFILE_SOURCE = 1
-__USE_XOPEN2K = 1
-__USE_ISOC99 = 1
-__USE_XOPEN_EXTENDED = 1
-__USE_LARGEFILE = 1
-__USE_LARGEFILE64 = 1
-__USE_FILE_OFFSET64 = 1
-__USE_MISC = 1
-__USE_BSD = 1
-__USE_SVID = 1
-__USE_GNU = 1
-__USE_REENTRANT = 1
-__STDC_IEC_559__ = 1
-__STDC_IEC_559_COMPLEX__ = 1
-__STDC_ISO_10646__ = 200009L
-__GNU_LIBRARY__ = 6
-__GLIBC__ = 2
-__GLIBC_MINOR__ = 2
-
-# Included from sys/cdefs.h
-_SYS_CDEFS_H = 1
-def __PMT(args): return args
-
-def __P(args): return args
-
-def __PMT(args): return args
-
-def __STRING(x): return #x
-
-__flexarr = []
-__flexarr = [0]
-__flexarr = []
-__flexarr = [1]
-def __ASMNAME(cname): return __ASMNAME2 (__USER_LABEL_PREFIX__, cname)
-
-def __attribute__(xyz): return
-
-def __attribute_format_arg__(x): return __attribute__ ((__format_arg__ (x)))
-
-def __attribute_format_arg__(x): return
-
-__USE_LARGEFILE = 1
-__USE_LARGEFILE64 = 1
-__USE_EXTERN_INLINES = 1
-
-# Included from gnu/stubs.h
-
-# Included from stdint.h
-_STDINT_H = 1
-
-# Included from bits/wchar.h
-_BITS_WCHAR_H = 1
-__WCHAR_MIN = (-2147483647l - 1l)
-__WCHAR_MAX = (2147483647l)
-
-# Included from bits/wordsize.h
-__WORDSIZE = 32
-def __INT64_C(c): return c ## L
-
-def __UINT64_C(c): return c ## UL
-
-def __INT64_C(c): return c ## LL
-
-def __UINT64_C(c): return c ## ULL
-
-INT8_MIN = (-128)
-INT16_MIN = (-32767-1)
-INT32_MIN = (-2147483647-1)
-INT64_MIN = (-__INT64_C(9223372036854775807)-1)
-INT8_MAX = (127)
-INT16_MAX = (32767)
-INT32_MAX = (2147483647)
-INT64_MAX = (__INT64_C(9223372036854775807))
-UINT8_MAX = (255)
-UINT16_MAX = (65535)
-UINT64_MAX = (__UINT64_C(18446744073709551615))
-INT_LEAST8_MIN = (-128)
-INT_LEAST16_MIN = (-32767-1)
-INT_LEAST32_MIN = (-2147483647-1)
-INT_LEAST64_MIN = (-__INT64_C(9223372036854775807)-1)
-INT_LEAST8_MAX = (127)
-INT_LEAST16_MAX = (32767)
-INT_LEAST32_MAX = (2147483647)
-INT_LEAST64_MAX = (__INT64_C(9223372036854775807))
-UINT_LEAST8_MAX = (255)
-UINT_LEAST16_MAX = (65535)
-UINT_LEAST64_MAX = (__UINT64_C(18446744073709551615))
-INT_FAST8_MIN = (-128)
-INT_FAST16_MIN = (-9223372036854775807L-1)
-INT_FAST32_MIN = (-9223372036854775807L-1)
-INT_FAST16_MIN = (-2147483647-1)
-INT_FAST32_MIN = (-2147483647-1)
-INT_FAST64_MIN = (-__INT64_C(9223372036854775807)-1)
-INT_FAST8_MAX = (127)
-INT_FAST16_MAX = (9223372036854775807L)
-INT_FAST32_MAX = (9223372036854775807L)
-INT_FAST16_MAX = (2147483647)
-INT_FAST32_MAX = (2147483647)
-INT_FAST64_MAX = (__INT64_C(9223372036854775807))
-UINT_FAST8_MAX = (255)
-UINT_FAST64_MAX = (__UINT64_C(18446744073709551615))
-INTPTR_MIN = (-9223372036854775807L-1)
-INTPTR_MAX = (9223372036854775807L)
-INTPTR_MIN = (-2147483647-1)
-INTPTR_MAX = (2147483647)
-INTMAX_MIN = (-__INT64_C(9223372036854775807)-1)
-INTMAX_MAX = (__INT64_C(9223372036854775807))
-UINTMAX_MAX = (__UINT64_C(18446744073709551615))
-PTRDIFF_MIN = (-9223372036854775807L-1)
-PTRDIFF_MAX = (9223372036854775807L)
-PTRDIFF_MIN = (-2147483647-1)
-PTRDIFF_MAX = (2147483647)
-SIG_ATOMIC_MIN = (-2147483647-1)
-SIG_ATOMIC_MAX = (2147483647)
-WCHAR_MIN = __WCHAR_MIN
-WCHAR_MAX = __WCHAR_MAX
-def INT8_C(c): return c
-
-def INT16_C(c): return c
-
-def INT32_C(c): return c
-
-def INT64_C(c): return c ## L
-
-def INT64_C(c): return c ## LL
-
-def UINT8_C(c): return c ## U
-
-def UINT16_C(c): return c ## U
-
-def UINT32_C(c): return c ## U
-
-def UINT64_C(c): return c ## UL
-
-def UINT64_C(c): return c ## ULL
-
-def INTMAX_C(c): return c ## L
-
-def UINTMAX_C(c): return c ## UL
-
-def INTMAX_C(c): return c ## LL
-
-def UINTMAX_C(c): return c ## ULL
-
-
-# Included from bits/types.h
-_BITS_TYPES_H = 1
-__FD_SETSIZE = 1024
-
-# Included from bits/pthreadtypes.h
-_BITS_PTHREADTYPES_H = 1
-
-# Included from bits/sched.h
-SCHED_OTHER = 0
-SCHED_FIFO = 1
-SCHED_RR = 2
-CSIGNAL = 0x000000ff
-CLONE_VM = 0x00000100
-CLONE_FS = 0x00000200
-CLONE_FILES = 0x00000400
-CLONE_SIGHAND = 0x00000800
-CLONE_PID = 0x00001000
-CLONE_PTRACE = 0x00002000
-CLONE_VFORK = 0x00004000
-__defined_schedparam = 1
-def IN_CLASSA(a): return ((((in_addr_t)(a)) & (-2147483648)) == 0)
-
-IN_CLASSA_NET = (-16777216)
-IN_CLASSA_NSHIFT = 24
-IN_CLASSA_HOST = ((-1) & ~IN_CLASSA_NET)
-IN_CLASSA_MAX = 128
-def IN_CLASSB(a): return ((((in_addr_t)(a)) & (-1073741824)) == (-2147483648))
-
-IN_CLASSB_NET = (-65536)
-IN_CLASSB_NSHIFT = 16
-IN_CLASSB_HOST = ((-1) & ~IN_CLASSB_NET)
-IN_CLASSB_MAX = 65536
-def IN_CLASSC(a): return ((((in_addr_t)(a)) & (-536870912)) == (-1073741824))
-
-IN_CLASSC_NET = (-256)
-IN_CLASSC_NSHIFT = 8
-IN_CLASSC_HOST = ((-1) & ~IN_CLASSC_NET)
-def IN_CLASSD(a): return ((((in_addr_t)(a)) & (-268435456)) == (-536870912))
-
-def IN_MULTICAST(a): return IN_CLASSD(a)
-
-def IN_EXPERIMENTAL(a): return ((((in_addr_t)(a)) & (-536870912)) == (-536870912))
-
-def IN_BADCLASS(a): return ((((in_addr_t)(a)) & (-268435456)) == (-268435456))
-
-IN_LOOPBACKNET = 127
-INET_ADDRSTRLEN = 16
-INET6_ADDRSTRLEN = 46
-
-# Included from bits/socket.h
-
-# Included from limits.h
-_LIBC_LIMITS_H_ = 1
-MB_LEN_MAX = 16
-_LIMITS_H = 1
-CHAR_BIT = 8
-SCHAR_MIN = (-128)
-SCHAR_MAX = 127
-UCHAR_MAX = 255
-CHAR_MIN = 0
-CHAR_MAX = UCHAR_MAX
-CHAR_MIN = SCHAR_MIN
-CHAR_MAX = SCHAR_MAX
-SHRT_MIN = (-32768)
-SHRT_MAX = 32767
-USHRT_MAX = 65535
-INT_MAX = 2147483647
-LONG_MAX = 9223372036854775807L
-LONG_MAX = 2147483647L
-LONG_MIN = (-LONG_MAX - 1L)
-
-# Included from bits/posix1_lim.h
-_BITS_POSIX1_LIM_H = 1
-_POSIX_AIO_LISTIO_MAX = 2
-_POSIX_AIO_MAX = 1
-_POSIX_ARG_MAX = 4096
-_POSIX_CHILD_MAX = 6
-_POSIX_DELAYTIMER_MAX = 32
-_POSIX_LINK_MAX = 8
-_POSIX_MAX_CANON = 255
-_POSIX_MAX_INPUT = 255
-_POSIX_MQ_OPEN_MAX = 8
-_POSIX_MQ_PRIO_MAX = 32
-_POSIX_NGROUPS_MAX = 0
-_POSIX_OPEN_MAX = 16
-_POSIX_FD_SETSIZE = _POSIX_OPEN_MAX
-_POSIX_NAME_MAX = 14
-_POSIX_PATH_MAX = 256
-_POSIX_PIPE_BUF = 512
-_POSIX_RTSIG_MAX = 8
-_POSIX_SEM_NSEMS_MAX = 256
-_POSIX_SEM_VALUE_MAX = 32767
-_POSIX_SIGQUEUE_MAX = 32
-_POSIX_SSIZE_MAX = 32767
-_POSIX_STREAM_MAX = 8
-_POSIX_TZNAME_MAX = 6
-_POSIX_QLIMIT = 1
-_POSIX_HIWAT = _POSIX_PIPE_BUF
-_POSIX_UIO_MAXIOV = 16
-_POSIX_TTY_NAME_MAX = 9
-_POSIX_TIMER_MAX = 32
-_POSIX_LOGIN_NAME_MAX = 9
-_POSIX_CLOCKRES_MIN = 20000000
-
-# Included from bits/local_lim.h
-
-# Included from linux/limits.h
-NR_OPEN = 1024
-NGROUPS_MAX = 32
-ARG_MAX = 131072
-CHILD_MAX = 999
-OPEN_MAX = 256
-LINK_MAX = 127
-MAX_CANON = 255
-MAX_INPUT = 255
-NAME_MAX = 255
-PATH_MAX = 4096
-PIPE_BUF = 4096
-RTSIG_MAX = 32
-_POSIX_THREAD_KEYS_MAX = 128
-PTHREAD_KEYS_MAX = 1024
-_POSIX_THREAD_DESTRUCTOR_ITERATIONS = 4
-PTHREAD_DESTRUCTOR_ITERATIONS = _POSIX_THREAD_DESTRUCTOR_ITERATIONS
-_POSIX_THREAD_THREADS_MAX = 64
-PTHREAD_THREADS_MAX = 1024
-AIO_PRIO_DELTA_MAX = 20
-PTHREAD_STACK_MIN = 16384
-TIMER_MAX = 256
-SSIZE_MAX = LONG_MAX
-NGROUPS_MAX = _POSIX_NGROUPS_MAX
-
-# Included from bits/posix2_lim.h
-_BITS_POSIX2_LIM_H = 1
-_POSIX2_BC_BASE_MAX = 99
-_POSIX2_BC_DIM_MAX = 2048
-_POSIX2_BC_SCALE_MAX = 99
-_POSIX2_BC_STRING_MAX = 1000
-_POSIX2_COLL_WEIGHTS_MAX = 2
-_POSIX2_EXPR_NEST_MAX = 32
-_POSIX2_LINE_MAX = 2048
-_POSIX2_RE_DUP_MAX = 255
-_POSIX2_CHARCLASS_NAME_MAX = 14
-BC_BASE_MAX = _POSIX2_BC_BASE_MAX
-BC_DIM_MAX = _POSIX2_BC_DIM_MAX
-BC_SCALE_MAX = _POSIX2_BC_SCALE_MAX
-BC_STRING_MAX = _POSIX2_BC_STRING_MAX
-COLL_WEIGHTS_MAX = 255
-EXPR_NEST_MAX = _POSIX2_EXPR_NEST_MAX
-LINE_MAX = _POSIX2_LINE_MAX
-CHARCLASS_NAME_MAX = 2048
-RE_DUP_MAX = (0x7fff)
-
-# Included from bits/xopen_lim.h
-_XOPEN_LIM_H = 1
-
-# Included from bits/stdio_lim.h
-L_tmpnam = 20
-TMP_MAX = 238328
-FILENAME_MAX = 4096
-L_ctermid = 9
-L_cuserid = 9
-FOPEN_MAX = 16
-IOV_MAX = 1024
-_XOPEN_IOV_MAX = _POSIX_UIO_MAXIOV
-NL_ARGMAX = _POSIX_ARG_MAX
-NL_LANGMAX = _POSIX2_LINE_MAX
-NL_MSGMAX = INT_MAX
-NL_NMAX = INT_MAX
-NL_SETMAX = INT_MAX
-NL_TEXTMAX = INT_MAX
-NZERO = 20
-WORD_BIT = 16
-WORD_BIT = 32
-WORD_BIT = 64
-WORD_BIT = 16
-WORD_BIT = 32
-WORD_BIT = 64
-WORD_BIT = 32
-LONG_BIT = 32
-LONG_BIT = 64
-LONG_BIT = 32
-LONG_BIT = 64
-LONG_BIT = 64
-LONG_BIT = 32
-from TYPES import *
-PF_UNSPEC = 0
-PF_LOCAL = 1
-PF_UNIX = PF_LOCAL
-PF_FILE = PF_LOCAL
-PF_INET = 2
-PF_AX25 = 3
-PF_IPX = 4
-PF_APPLETALK = 5
-PF_NETROM = 6
-PF_BRIDGE = 7
-PF_ATMPVC = 8
-PF_X25 = 9
-PF_INET6 = 10
-PF_ROSE = 11
-PF_DECnet = 12
-PF_NETBEUI = 13
-PF_SECURITY = 14
-PF_KEY = 15
-PF_NETLINK = 16
-PF_ROUTE = PF_NETLINK
-PF_PACKET = 17
-PF_ASH = 18
-PF_ECONET = 19
-PF_ATMSVC = 20
-PF_SNA = 22
-PF_IRDA = 23
-PF_PPPOX = 24
-PF_WANPIPE = 25
-PF_BLUETOOTH = 31
-PF_MAX = 32
-AF_UNSPEC = PF_UNSPEC
-AF_LOCAL = PF_LOCAL
-AF_UNIX = PF_UNIX
-AF_FILE = PF_FILE
-AF_INET = PF_INET
-AF_AX25 = PF_AX25
-AF_IPX = PF_IPX
-AF_APPLETALK = PF_APPLETALK
-AF_NETROM = PF_NETROM
-AF_BRIDGE = PF_BRIDGE
-AF_ATMPVC = PF_ATMPVC
-AF_X25 = PF_X25
-AF_INET6 = PF_INET6
-AF_ROSE = PF_ROSE
-AF_DECnet = PF_DECnet
-AF_NETBEUI = PF_NETBEUI
-AF_SECURITY = PF_SECURITY
-AF_KEY = PF_KEY
-AF_NETLINK = PF_NETLINK
-AF_ROUTE = PF_ROUTE
-AF_PACKET = PF_PACKET
-AF_ASH = PF_ASH
-AF_ECONET = PF_ECONET
-AF_ATMSVC = PF_ATMSVC
-AF_SNA = PF_SNA
-AF_IRDA = PF_IRDA
-AF_PPPOX = PF_PPPOX
-AF_WANPIPE = PF_WANPIPE
-AF_BLUETOOTH = PF_BLUETOOTH
-AF_MAX = PF_MAX
-SOL_RAW = 255
-SOL_DECNET = 261
-SOL_X25 = 262
-SOL_PACKET = 263
-SOL_ATM = 264
-SOL_AAL = 265
-SOL_IRDA = 266
-SOMAXCONN = 128
-
-# Included from bits/sockaddr.h
-_BITS_SOCKADDR_H = 1
-def __SOCKADDR_COMMON(sa_prefix): return \
-
-_SS_SIZE = 128
-def CMSG_FIRSTHDR(mhdr): return \
-
-
-# Included from asm/socket.h
-
-# Included from asm/sockios.h
-FIOSETOWN = 0x8901
-SIOCSPGRP = 0x8902
-FIOGETOWN = 0x8903
-SIOCGPGRP = 0x8904
-SIOCATMARK = 0x8905
-SIOCGSTAMP = 0x8906
-SOL_SOCKET = 1
-SO_DEBUG = 1
-SO_REUSEADDR = 2
-SO_TYPE = 3
-SO_ERROR = 4
-SO_DONTROUTE = 5
-SO_BROADCAST = 6
-SO_SNDBUF = 7
-SO_RCVBUF = 8
-SO_KEEPALIVE = 9
-SO_OOBINLINE = 10
-SO_NO_CHECK = 11
-SO_PRIORITY = 12
-SO_LINGER = 13
-SO_BSDCOMPAT = 14
-SO_PASSCRED = 16
-SO_PEERCRED = 17
-SO_RCVLOWAT = 18
-SO_SNDLOWAT = 19
-SO_RCVTIMEO = 20
-SO_SNDTIMEO = 21
-SO_SECURITY_AUTHENTICATION = 22
-SO_SECURITY_ENCRYPTION_TRANSPORT = 23
-SO_SECURITY_ENCRYPTION_NETWORK = 24
-SO_BINDTODEVICE = 25
-SO_ATTACH_FILTER = 26
-SO_DETACH_FILTER = 27
-SO_PEERNAME = 28
-SO_TIMESTAMP = 29
-SCM_TIMESTAMP = SO_TIMESTAMP
-SO_ACCEPTCONN = 30
-SOCK_STREAM = 1
-SOCK_DGRAM = 2
-SOCK_RAW = 3
-SOCK_RDM = 4
-SOCK_SEQPACKET = 5
-SOCK_PACKET = 10
-SOCK_MAX = (SOCK_PACKET+1)
-
-# Included from bits/in.h
-IP_TOS = 1
-IP_TTL = 2
-IP_HDRINCL = 3
-IP_OPTIONS = 4
-IP_ROUTER_ALERT = 5
-IP_RECVOPTS = 6
-IP_RETOPTS = 7
-IP_PKTINFO = 8
-IP_PKTOPTIONS = 9
-IP_PMTUDISC = 10
-IP_MTU_DISCOVER = 10
-IP_RECVERR = 11
-IP_RECVTTL = 12
-IP_RECVTOS = 13
-IP_MULTICAST_IF = 32
-IP_MULTICAST_TTL = 33
-IP_MULTICAST_LOOP = 34
-IP_ADD_MEMBERSHIP = 35
-IP_DROP_MEMBERSHIP = 36
-IP_RECVRETOPTS = IP_RETOPTS
-IP_PMTUDISC_DONT = 0
-IP_PMTUDISC_WANT = 1
-IP_PMTUDISC_DO = 2
-SOL_IP = 0
-IP_DEFAULT_MULTICAST_TTL = 1
-IP_DEFAULT_MULTICAST_LOOP = 1
-IP_MAX_MEMBERSHIPS = 20
-IPV6_ADDRFORM = 1
-IPV6_PKTINFO = 2
-IPV6_HOPOPTS = 3
-IPV6_DSTOPTS = 4
-IPV6_RTHDR = 5
-IPV6_PKTOPTIONS = 6
-IPV6_CHECKSUM = 7
-IPV6_HOPLIMIT = 8
-IPV6_NEXTHOP = 9
-IPV6_AUTHHDR = 10
-IPV6_UNICAST_HOPS = 16
-IPV6_MULTICAST_IF = 17
-IPV6_MULTICAST_HOPS = 18
-IPV6_MULTICAST_LOOP = 19
-IPV6_JOIN_GROUP = 20
-IPV6_LEAVE_GROUP = 21
-IPV6_ROUTER_ALERT = 22
-IPV6_MTU_DISCOVER = 23
-IPV6_MTU = 24
-IPV6_RECVERR = 25
-IPV6_RXHOPOPTS = IPV6_HOPOPTS
-IPV6_RXDSTOPTS = IPV6_DSTOPTS
-IPV6_ADD_MEMBERSHIP = IPV6_JOIN_GROUP
-IPV6_DROP_MEMBERSHIP = IPV6_LEAVE_GROUP
-IPV6_PMTUDISC_DONT = 0
-IPV6_PMTUDISC_WANT = 1
-IPV6_PMTUDISC_DO = 2
-SOL_IPV6 = 41
-SOL_ICMPV6 = 58
-IPV6_RTHDR_LOOSE = 0
-IPV6_RTHDR_STRICT = 1
-IPV6_RTHDR_TYPE_0 = 0
-
-# Included from endian.h
-_ENDIAN_H = 1
-__LITTLE_ENDIAN = 1234
-__BIG_ENDIAN = 4321
-__PDP_ENDIAN = 3412
-
-# Included from bits/endian.h
-__BYTE_ORDER = __LITTLE_ENDIAN
-__FLOAT_WORD_ORDER = __BYTE_ORDER
-LITTLE_ENDIAN = __LITTLE_ENDIAN
-BIG_ENDIAN = __BIG_ENDIAN
-PDP_ENDIAN = __PDP_ENDIAN
-BYTE_ORDER = __BYTE_ORDER
-
-# Included from bits/byteswap.h
-_BITS_BYTESWAP_H = 1
-def __bswap_constant_16(x): return \
-
-def __bswap_16(x): return \
-
-def __bswap_16(x): return __bswap_constant_16 (x)
-
-def __bswap_constant_32(x): return \
-
-def __bswap_32(x): return \
-
-def __bswap_32(x): return \
-
-def __bswap_32(x): return __bswap_constant_32 (x)
-
-def __bswap_constant_64(x): return \
-
-def __bswap_64(x): return \
-
-def ntohl(x): return (x)
-
-def ntohs(x): return (x)
-
-def htonl(x): return (x)
-
-def htons(x): return (x)
-
-def ntohl(x): return __bswap_32 (x)
-
-def ntohs(x): return __bswap_16 (x)
-
-def htonl(x): return __bswap_32 (x)
-
-def htons(x): return __bswap_16 (x)
-
-def IN6_IS_ADDR_UNSPECIFIED(a): return \
-
-def IN6_IS_ADDR_LOOPBACK(a): return \
-
-def IN6_IS_ADDR_LINKLOCAL(a): return \
-
-def IN6_IS_ADDR_SITELOCAL(a): return \
-
-def IN6_IS_ADDR_V4MAPPED(a): return \
-
-def IN6_IS_ADDR_V4COMPAT(a): return \
-
-def IN6_IS_ADDR_MC_NODELOCAL(a): return \
-
-def IN6_IS_ADDR_MC_LINKLOCAL(a): return \
-
-def IN6_IS_ADDR_MC_SITELOCAL(a): return \
-
-def IN6_IS_ADDR_MC_ORGLOCAL(a): return \
-
-def IN6_IS_ADDR_MC_GLOBAL(a): return
diff --git a/sys/lib/python/plat-linux2/TYPES.py b/sys/lib/python/plat-linux2/TYPES.py
deleted file mode 100644
index 0cdd5995d..000000000
--- a/sys/lib/python/plat-linux2/TYPES.py
+++ /dev/null
@@ -1,170 +0,0 @@
-# Generated by h2py from /usr/include/sys/types.h
-_SYS_TYPES_H = 1
-
-# Included from features.h
-_FEATURES_H = 1
-__USE_ANSI = 1
-__FAVOR_BSD = 1
-_ISOC99_SOURCE = 1
-_POSIX_SOURCE = 1
-_POSIX_C_SOURCE = 199506L
-_XOPEN_SOURCE = 600
-_XOPEN_SOURCE_EXTENDED = 1
-_LARGEFILE64_SOURCE = 1
-_BSD_SOURCE = 1
-_SVID_SOURCE = 1
-_BSD_SOURCE = 1
-_SVID_SOURCE = 1
-__USE_ISOC99 = 1
-_POSIX_SOURCE = 1
-_POSIX_C_SOURCE = 2
-_POSIX_C_SOURCE = 199506L
-__USE_POSIX = 1
-__USE_POSIX2 = 1
-__USE_POSIX199309 = 1
-__USE_POSIX199506 = 1
-__USE_XOPEN = 1
-__USE_XOPEN_EXTENDED = 1
-__USE_UNIX98 = 1
-_LARGEFILE_SOURCE = 1
-__USE_XOPEN2K = 1
-__USE_ISOC99 = 1
-__USE_XOPEN_EXTENDED = 1
-__USE_LARGEFILE = 1
-__USE_LARGEFILE64 = 1
-__USE_FILE_OFFSET64 = 1
-__USE_MISC = 1
-__USE_BSD = 1
-__USE_SVID = 1
-__USE_GNU = 1
-__USE_REENTRANT = 1
-__STDC_IEC_559__ = 1
-__STDC_IEC_559_COMPLEX__ = 1
-__STDC_ISO_10646__ = 200009L
-__GNU_LIBRARY__ = 6
-__GLIBC__ = 2
-__GLIBC_MINOR__ = 2
-
-# Included from sys/cdefs.h
-_SYS_CDEFS_H = 1
-def __PMT(args): return args
-
-def __P(args): return args
-
-def __PMT(args): return args
-
-def __STRING(x): return #x
-
-__flexarr = []
-__flexarr = [0]
-__flexarr = []
-__flexarr = [1]
-def __ASMNAME(cname): return __ASMNAME2 (__USER_LABEL_PREFIX__, cname)
-
-def __attribute__(xyz): return
-
-def __attribute_format_arg__(x): return __attribute__ ((__format_arg__ (x)))
-
-def __attribute_format_arg__(x): return
-
-__USE_LARGEFILE = 1
-__USE_LARGEFILE64 = 1
-__USE_EXTERN_INLINES = 1
-
-# Included from gnu/stubs.h
-
-# Included from bits/types.h
-_BITS_TYPES_H = 1
-__FD_SETSIZE = 1024
-
-# Included from bits/pthreadtypes.h
-_BITS_PTHREADTYPES_H = 1
-
-# Included from bits/sched.h
-SCHED_OTHER = 0
-SCHED_FIFO = 1
-SCHED_RR = 2
-CSIGNAL = 0x000000ff
-CLONE_VM = 0x00000100
-CLONE_FS = 0x00000200
-CLONE_FILES = 0x00000400
-CLONE_SIGHAND = 0x00000800
-CLONE_PID = 0x00001000
-CLONE_PTRACE = 0x00002000
-CLONE_VFORK = 0x00004000
-__defined_schedparam = 1
-
-# Included from time.h
-_TIME_H = 1
-
-# Included from bits/time.h
-_BITS_TIME_H = 1
-CLOCKS_PER_SEC = 1000000l
-CLOCK_REALTIME = 0
-CLOCK_PROCESS_CPUTIME_ID = 2
-CLOCK_THREAD_CPUTIME_ID = 3
-TIMER_ABSTIME = 1
-_STRUCT_TIMEVAL = 1
-CLK_TCK = CLOCKS_PER_SEC
-__clock_t_defined = 1
-__time_t_defined = 1
-__clockid_t_defined = 1
-__timer_t_defined = 1
-__timespec_defined = 1
-def __isleap(year): return \
-
-__BIT_TYPES_DEFINED__ = 1
-
-# Included from endian.h
-_ENDIAN_H = 1
-__LITTLE_ENDIAN = 1234
-__BIG_ENDIAN = 4321
-__PDP_ENDIAN = 3412
-
-# Included from bits/endian.h
-__BYTE_ORDER = __LITTLE_ENDIAN
-__FLOAT_WORD_ORDER = __BYTE_ORDER
-LITTLE_ENDIAN = __LITTLE_ENDIAN
-BIG_ENDIAN = __BIG_ENDIAN
-PDP_ENDIAN = __PDP_ENDIAN
-BYTE_ORDER = __BYTE_ORDER
-
-# Included from sys/select.h
-_SYS_SELECT_H = 1
-
-# Included from bits/select.h
-def __FD_ZERO(fdsp): return \
-
-def __FD_ZERO(set): return \
-
-
-# Included from bits/sigset.h
-_SIGSET_H_types = 1
-_SIGSET_H_fns = 1
-def __sigmask(sig): return \
-
-def __sigemptyset(set): return \
-
-def __sigfillset(set): return \
-
-def __sigisemptyset(set): return \
-
-def __FDELT(d): return ((d) / __NFDBITS)
-
-FD_SETSIZE = __FD_SETSIZE
-def FD_ZERO(fdsetp): return __FD_ZERO (fdsetp)
-
-
-# Included from sys/sysmacros.h
-_SYS_SYSMACROS_H = 1
-def major(dev): return ((int)(((dev) >> 8) & 0xff))
-
-def minor(dev): return ((int)((dev) & 0xff))
-
-def major(dev): return (((dev).__val[1] >> 8) & 0xff)
-
-def minor(dev): return ((dev).__val[1] & 0xff)
-
-def major(dev): return (((dev).__val[0] >> 8) & 0xff)
-
-def minor(dev): return ((dev).__val[0] & 0xff)
diff --git a/sys/lib/python/plat-linux2/regen b/sys/lib/python/plat-linux2/regen
deleted file mode 100755
index c76950e23..000000000
--- a/sys/lib/python/plat-linux2/regen
+++ /dev/null
@@ -1,8 +0,0 @@
-#! /bin/sh
-case `uname` in
-Linux*) ;;
-*) echo Probably not on a Linux system 1>&2
- exit 1;;
-esac
-set -v
-h2py -i '(u_long)' /usr/include/sys/types.h /usr/include/netinet/in.h /usr/include/dlfcn.h
diff --git a/sys/lib/python/plat-netbsd1/IN.py b/sys/lib/python/plat-netbsd1/IN.py
deleted file mode 100644
index 474c51e6d..000000000
--- a/sys/lib/python/plat-netbsd1/IN.py
+++ /dev/null
@@ -1,56 +0,0 @@
-# Generated by h2py from /usr/include/netinet/in.h
-IPPROTO_IP = 0
-IPPROTO_ICMP = 1
-IPPROTO_IGMP = 2
-IPPROTO_GGP = 3
-IPPROTO_IPIP = 4
-IPPROTO_TCP = 6
-IPPROTO_EGP = 8
-IPPROTO_PUP = 12
-IPPROTO_UDP = 17
-IPPROTO_IDP = 22
-IPPROTO_TP = 29
-IPPROTO_EON = 80
-IPPROTO_ENCAP = 98
-IPPROTO_RAW = 255
-IPPROTO_MAX = 256
-IPPORT_RESERVED = 1024
-IPPORT_USERRESERVED = 5000
-def __IPADDR(x): return ((u_int32_t)(x))
-
-IN_CLASSA_NSHIFT = 24
-IN_CLASSA_MAX = 128
-IN_CLASSB_NSHIFT = 16
-IN_CLASSB_MAX = 65536
-IN_CLASSC_NSHIFT = 8
-IN_CLASSD_NSHIFT = 28
-def IN_MULTICAST(i): return IN_CLASSD(i)
-
-IN_LOOPBACKNET = 127
-IP_OPTIONS = 1
-IP_HDRINCL = 2
-IP_TOS = 3
-IP_TTL = 4
-IP_RECVOPTS = 5
-IP_RECVRETOPTS = 6
-IP_RECVDSTADDR = 7
-IP_RETOPTS = 8
-IP_MULTICAST_IF = 9
-IP_MULTICAST_TTL = 10
-IP_MULTICAST_LOOP = 11
-IP_ADD_MEMBERSHIP = 12
-IP_DROP_MEMBERSHIP = 13
-IP_RECVIF = 20
-IP_DEFAULT_MULTICAST_TTL = 1
-IP_DEFAULT_MULTICAST_LOOP = 1
-IP_MAX_MEMBERSHIPS = 20
-IPPROTO_MAXID = (IPPROTO_IDP + 1)
-IPCTL_FORWARDING = 1
-IPCTL_SENDREDIRECTS = 2
-IPCTL_DEFTTL = 3
-IPCTL_DEFMTU = 4
-IPCTL_FORWSRCRT = 5
-IPCTL_DIRECTEDBCAST = 6
-IPCTL_ALLOWSRCRT = 7
-IPCTL_MAXID = 8
-def in_nullhost(x): return ((x).s_addr == INADDR_ANY)
diff --git a/sys/lib/python/plat-netbsd1/regen b/sys/lib/python/plat-netbsd1/regen
deleted file mode 100755
index 8aa6898c6..000000000
--- a/sys/lib/python/plat-netbsd1/regen
+++ /dev/null
@@ -1,3 +0,0 @@
-#! /bin/sh
-set -v
-python ../../Tools/scripts/h2py.py -i '(u_long)' /usr/include/netinet/in.h
diff --git a/sys/lib/python/plat-next3/regen b/sys/lib/python/plat-next3/regen
deleted file mode 100755
index 7a036135a..000000000
--- a/sys/lib/python/plat-next3/regen
+++ /dev/null
@@ -1,6 +0,0 @@
-#! /bin/sh
-set -v
-INCLUDE="/NextDeveloper/Headers;/NextDeveloper/Headers/ansi;/NextDeveloper/Headers/bsd"
-export INCLUDE
-
-python ../../Tools/scripts/h2py.py -i '(u_long)' /usr/include/bsd/netinet/in.h
diff --git a/sys/lib/python/plat-os2emx/IN.py b/sys/lib/python/plat-os2emx/IN.py
deleted file mode 100644
index 8b06eb8f0..000000000
--- a/sys/lib/python/plat-os2emx/IN.py
+++ /dev/null
@@ -1,82 +0,0 @@
-# Generated by h2py from f:/emx/include/netinet/in.h
-
-# Included from sys/param.h
-PAGE_SIZE = 0x1000
-HZ = 100
-MAXNAMLEN = 260
-MAXPATHLEN = 260
-def htonl(X): return _swapl(X)
-
-def ntohl(X): return _swapl(X)
-
-def htons(X): return _swaps(X)
-
-def ntohs(X): return _swaps(X)
-
-IPPROTO_IP = 0
-IPPROTO_ICMP = 1
-IPPROTO_IGMP = 2
-IPPROTO_GGP = 3
-IPPROTO_TCP = 6
-IPPROTO_EGP = 8
-IPPROTO_PUP = 12
-IPPROTO_UDP = 17
-IPPROTO_IDP = 22
-IPPROTO_TP = 29
-IPPROTO_EON = 80
-IPPROTO_RAW = 255
-IPPROTO_MAX = 256
-IPPORT_RESERVED = 1024
-IPPORT_USERRESERVED = 5000
-def IN_CLASSA(i): return (((long)(i) & 0x80000000) == 0)
-
-IN_CLASSA_NET = 0xff000000
-IN_CLASSA_NSHIFT = 24
-IN_CLASSA_HOST = 0x00ffffff
-IN_CLASSA_MAX = 128
-def IN_CLASSB(i): return (((long)(i) & 0xc0000000) == 0x80000000)
-
-IN_CLASSB_NET = 0xffff0000
-IN_CLASSB_NSHIFT = 16
-IN_CLASSB_HOST = 0x0000ffff
-IN_CLASSB_MAX = 65536
-def IN_CLASSC(i): return (((long)(i) & 0xe0000000) == 0xc0000000)
-
-IN_CLASSC_NET = 0xffffff00
-IN_CLASSC_NSHIFT = 8
-IN_CLASSC_HOST = 0x000000ff
-def IN_CLASSD(i): return (((long)(i) & 0xf0000000) == 0xe0000000)
-
-IN_CLASSD_NET = 0xf0000000
-IN_CLASSD_NSHIFT = 28
-IN_CLASSD_HOST = 0x0fffffff
-def IN_MULTICAST(i): return IN_CLASSD(i)
-
-def IN_EXPERIMENTAL(i): return (((long)(i) & 0xe0000000) == 0xe0000000)
-
-def IN_BADCLASS(i): return (((long)(i) & 0xf0000000) == 0xf0000000)
-
-INADDR_ANY = 0x00000000
-INADDR_LOOPBACK = 0x7f000001
-INADDR_BROADCAST = 0xffffffff
-INADDR_NONE = 0xffffffff
-INADDR_UNSPEC_GROUP = 0xe0000000
-INADDR_ALLHOSTS_GROUP = 0xe0000001
-INADDR_MAX_LOCAL_GROUP = 0xe00000ff
-IN_LOOPBACKNET = 127
-IP_OPTIONS = 1
-IP_MULTICAST_IF = 2
-IP_MULTICAST_TTL = 3
-IP_MULTICAST_LOOP = 4
-IP_ADD_MEMBERSHIP = 5
-IP_DROP_MEMBERSHIP = 6
-IP_HDRINCL = 2
-IP_TOS = 3
-IP_TTL = 4
-IP_RECVOPTS = 5
-IP_RECVRETOPTS = 6
-IP_RECVDSTADDR = 7
-IP_RETOPTS = 8
-IP_DEFAULT_MULTICAST_TTL = 1
-IP_DEFAULT_MULTICAST_LOOP = 1
-IP_MAX_MEMBERSHIPS = 20
diff --git a/sys/lib/python/plat-os2emx/SOCKET.py b/sys/lib/python/plat-os2emx/SOCKET.py
deleted file mode 100644
index dac594ad7..000000000
--- a/sys/lib/python/plat-os2emx/SOCKET.py
+++ /dev/null
@@ -1,106 +0,0 @@
-# Generated by h2py from f:/emx/include/sys/socket.h
-
-# Included from sys/types.h
-FD_SETSIZE = 256
-
-# Included from sys/uio.h
-FREAD = 1
-FWRITE = 2
-SOCK_STREAM = 1
-SOCK_DGRAM = 2
-SOCK_RAW = 3
-SOCK_RDM = 4
-SOCK_SEQPACKET = 5
-SO_DEBUG = 0x0001
-SO_ACCEPTCONN = 0x0002
-SO_REUSEADDR = 0x0004
-SO_KEEPALIVE = 0x0008
-SO_DONTROUTE = 0x0010
-SO_BROADCAST = 0x0020
-SO_USELOOPBACK = 0x0040
-SO_LINGER = 0x0080
-SO_OOBINLINE = 0x0100
-SO_L_BROADCAST = 0x0200
-SO_RCV_SHUTDOWN = 0x0400
-SO_SND_SHUTDOWN = 0x0800
-SO_SNDBUF = 0x1001
-SO_RCVBUF = 0x1002
-SO_SNDLOWAT = 0x1003
-SO_RCVLOWAT = 0x1004
-SO_SNDTIMEO = 0x1005
-SO_RCVTIMEO = 0x1006
-SO_ERROR = 0x1007
-SO_TYPE = 0x1008
-SO_OPTIONS = 0x1010
-SOL_SOCKET = 0xffff
-AF_UNSPEC = 0
-AF_UNIX = 1
-AF_INET = 2
-AF_IMPLINK = 3
-AF_PUP = 4
-AF_CHAOS = 5
-AF_NS = 6
-AF_NBS = 7
-AF_ISO = 7
-AF_OSI = AF_ISO
-AF_ECMA = 8
-AF_DATAKIT = 9
-AF_CCITT = 10
-AF_SNA = 11
-AF_DECnet = 12
-AF_DLI = 13
-AF_LAT = 14
-AF_HYLINK = 15
-AF_APPLETALK = 16
-AF_NB = 17
-AF_NETBIOS = AF_NB
-AF_OS2 = AF_UNIX
-AF_MAX = 18
-PF_UNSPEC = AF_UNSPEC
-PF_UNIX = AF_UNIX
-PF_INET = AF_INET
-PF_IMPLINK = AF_IMPLINK
-PF_PUP = AF_PUP
-PF_CHAOS = AF_CHAOS
-PF_NS = AF_NS
-PF_NBS = AF_NBS
-PF_ISO = AF_ISO
-PF_OSI = AF_ISO
-PF_ECMA = AF_ECMA
-PF_DATAKIT = AF_DATAKIT
-PF_CCITT = AF_CCITT
-PF_SNA = AF_SNA
-PF_DECnet = AF_DECnet
-PF_DLI = AF_DLI
-PF_LAT = AF_LAT
-PF_HYLINK = AF_HYLINK
-PF_APPLETALK = AF_APPLETALK
-PF_NB = AF_NB
-PF_NETBIOS = AF_NB
-PF_OS2 = AF_UNIX
-PF_MAX = AF_MAX
-SOMAXCONN = 5
-MSG_OOB = 0x1
-MSG_PEEK = 0x2
-MSG_DONTROUTE = 0x4
-MSG_EOR = 0x8
-MSG_TRUNC = 0x10
-MSG_CTRUNC = 0x20
-MSG_WAITALL = 0x40
-MSG_MAXIOVLEN = 16
-SCM_RIGHTS = 0x01
-MT_FREE = 0
-MT_DATA = 1
-MT_HEADER = 2
-MT_SOCKET = 3
-MT_PCB = 4
-MT_RTABLE = 5
-MT_HTABLE = 6
-MT_ATABLE = 7
-MT_SONAME = 8
-MT_ZOMBIE = 9
-MT_SOOPTS = 10
-MT_FTABLE = 11
-MT_RIGHTS = 12
-MT_IFADDR = 13
-MAXSOCKETS = 2048
diff --git a/sys/lib/python/plat-os2emx/_emx_link.py b/sys/lib/python/plat-os2emx/_emx_link.py
deleted file mode 100644
index 422c2bbf6..000000000
--- a/sys/lib/python/plat-os2emx/_emx_link.py
+++ /dev/null
@@ -1,79 +0,0 @@
-# _emx_link.py
-
-# Written by Andrew I MacIntyre, December 2002.
-
-"""_emx_link.py is a simplistic emulation of the Unix link(2) library routine
-for creating so-called hard links. It is intended to be imported into
-the os module in place of the unimplemented (on OS/2) Posix link()
-function (os.link()).
-
-We do this on OS/2 by implementing a file copy, with link(2) semantics:-
- - the target cannot already exist;
- - we hope that the actual file open (if successful) is actually
- atomic...
-
-Limitations of this approach/implementation include:-
- - no support for correct link counts (EMX stat(target).st_nlink
- is always 1);
- - thread safety undefined;
- - default file permissions (r+w) used, can't be over-ridden;
- - implemented in Python so comparatively slow, especially for large
- source files;
- - need sufficient free disk space to store the copy.
-
-Behaviour:-
- - any exception should propagate to the caller;
- - want target to be an exact copy of the source, so use binary mode;
- - returns None, same as os.link() which is implemented in posixmodule.c;
- - target removed in the event of a failure where possible;
- - given the motivation to write this emulation came from trying to
- support a Unix resource lock implementation, where minimal overhead
- during creation of the target is desirable and the files are small,
- we read a source block before attempting to create the target so that
- we're ready to immediately write some data into it.
-"""
-
-import os
-import errno
-
-__all__ = ['link']
-
-def link(source, target):
- """link(source, target) -> None
-
- Attempt to hard link the source file to the target file name.
- On OS/2, this creates a complete copy of the source file.
- """
-
- s = os.open(source, os.O_RDONLY | os.O_BINARY)
- if os.isatty(s):
- raise OSError, (errno.EXDEV, 'Cross-device link')
- data = os.read(s, 1024)
-
- try:
- t = os.open(target, os.O_WRONLY | os.O_BINARY | os.O_CREAT | os.O_EXCL)
- except OSError:
- os.close(s)
- raise
-
- try:
- while data:
- os.write(t, data)
- data = os.read(s, 1024)
- except OSError:
- os.close(s)
- os.close(t)
- os.unlink(target)
- raise
-
- os.close(s)
- os.close(t)
-
-if __name__ == '__main__':
- import sys
- try:
- link(sys.argv[1], sys.argv[2])
- except IndexError:
- print 'Usage: emx_link <source> <target>'
- except OSError:
- print 'emx_link: %s' % str(sys.exc_info()[1])
diff --git a/sys/lib/python/plat-os2emx/grp.py b/sys/lib/python/plat-os2emx/grp.py
deleted file mode 100644
index fceb4c94f..000000000
--- a/sys/lib/python/plat-os2emx/grp.py
+++ /dev/null
@@ -1,182 +0,0 @@
-# this module is an OS/2 oriented replacement for the grp standard
-# extension module.
-
-# written by Andrew MacIntyre, April 2001.
-# updated July 2003, adding field accessor support
-
-# note that this implementation checks whether ":" or ";" as used as
-# the field separator character.
-
-"""Replacement for grp standard extension module, intended for use on
-OS/2 and similar systems which don't normally have an /etc/group file.
-
-The standard Unix group database is an ASCII text file with 4 fields per
-record (line), separated by a colon:
- - group name (string)
- - group password (optional encrypted string)
- - group id (integer)
- - group members (comma delimited list of userids, with no spaces)
-
-Note that members are only included in the group file for groups that
-aren't their primary groups.
-(see the section 8.2 of the Python Library Reference)
-
-This implementation differs from the standard Unix implementation by
-allowing use of the platform's native path separator character - ';' on OS/2,
-DOS and MS-Windows - as the field separator in addition to the Unix
-standard ":".
-
-The module looks for the group database at the following locations
-(in order first to last):
- - ${ETC_GROUP} (or %ETC_GROUP%)
- - ${ETC}/group (or %ETC%/group)
- - ${PYTHONHOME}/Etc/group (or %PYTHONHOME%/Etc/group)
-
-Classes
--------
-
-None
-
-Functions
----------
-
-getgrgid(gid) - return the record for group-id gid as a 4-tuple
-
-getgrnam(name) - return the record for group 'name' as a 4-tuple
-
-getgrall() - return a list of 4-tuples, each tuple being one record
- (NOTE: the order is arbitrary)
-
-Attributes
-----------
-
-group_file - the path of the group database file
-
-"""
-
-import os
-
-# try and find the group file
-__group_path = []
-if os.environ.has_key('ETC_GROUP'):
- __group_path.append(os.environ['ETC_GROUP'])
-if os.environ.has_key('ETC'):
- __group_path.append('%s/group' % os.environ['ETC'])
-if os.environ.has_key('PYTHONHOME'):
- __group_path.append('%s/Etc/group' % os.environ['PYTHONHOME'])
-
-group_file = None
-for __i in __group_path:
- try:
- __f = open(__i, 'r')
- __f.close()
- group_file = __i
- break
- except:
- pass
-
-# decide what field separator we can try to use - Unix standard, with
-# the platform's path separator as an option. No special field conversion
-# handlers are required for the group file.
-__field_sep = [':']
-if os.pathsep:
- if os.pathsep != ':':
- __field_sep.append(os.pathsep)
-
-# helper routine to identify which separator character is in use
-def __get_field_sep(record):
- fs = None
- for c in __field_sep:
- # there should be 3 delimiter characters (for 4 fields)
- if record.count(c) == 3:
- fs = c
- break
- if fs:
- return fs
- else:
- raise KeyError, '>> group database fields not delimited <<'
-
-# class to match the new record field name accessors.
-# the resulting object is intended to behave like a read-only tuple,
-# with each member also accessible by a field name.
-class Group:
- def __init__(self, name, passwd, gid, mem):
- self.__dict__['gr_name'] = name
- self.__dict__['gr_passwd'] = passwd
- self.__dict__['gr_gid'] = gid
- self.__dict__['gr_mem'] = mem
- self.__dict__['_record'] = (self.gr_name, self.gr_passwd,
- self.gr_gid, self.gr_mem)
-
- def __len__(self):
- return 4
-
- def __getitem__(self, key):
- return self._record[key]
-
- def __setattr__(self, name, value):
- raise AttributeError('attribute read-only: %s' % name)
-
- def __repr__(self):
- return str(self._record)
-
- def __cmp__(self, other):
- this = str(self._record)
- if this == other:
- return 0
- elif this < other:
- return -1
- else:
- return 1
-
-
-# read the whole file, parsing each entry into tuple form
-# with dictionaries to speed recall by GID or group name
-def __read_group_file():
- if group_file:
- group = open(group_file, 'r')
- else:
- raise KeyError, '>> no group database <<'
- gidx = {}
- namx = {}
- sep = None
- while 1:
- entry = group.readline().strip()
- if len(entry) > 3:
- if sep == None:
- sep = __get_field_sep(entry)
- fields = entry.split(sep)
- fields[2] = int(fields[2])
- fields[3] = [f.strip() for f in fields[3].split(',')]
- record = Group(*fields)
- if not gidx.has_key(fields[2]):
- gidx[fields[2]] = record
- if not namx.has_key(fields[0]):
- namx[fields[0]] = record
- elif len(entry) > 0:
- pass # skip empty or malformed records
- else:
- break
- group.close()
- if len(gidx) == 0:
- raise KeyError
- return (gidx, namx)
-
-# return the group database entry by GID
-def getgrgid(gid):
- g, n = __read_group_file()
- return g[gid]
-
-# return the group database entry by group name
-def getgrnam(name):
- g, n = __read_group_file()
- return n[name]
-
-# return all the group database entries
-def getgrall():
- g, n = __read_group_file()
- return g.values()
-
-# test harness
-if __name__ == '__main__':
- getgrall()
diff --git a/sys/lib/python/plat-os2emx/pwd.py b/sys/lib/python/plat-os2emx/pwd.py
deleted file mode 100644
index 95d766a79..000000000
--- a/sys/lib/python/plat-os2emx/pwd.py
+++ /dev/null
@@ -1,208 +0,0 @@
-# this module is an OS/2 oriented replacement for the pwd standard
-# extension module.
-
-# written by Andrew MacIntyre, April 2001.
-# updated July 2003, adding field accessor support
-
-# note that this implementation checks whether ":" or ";" as used as
-# the field separator character. Path conversions are are applied when
-# the database uses ":" as the field separator character.
-
-"""Replacement for pwd standard extension module, intended for use on
-OS/2 and similar systems which don't normally have an /etc/passwd file.
-
-The standard Unix password database is an ASCII text file with 7 fields
-per record (line), separated by a colon:
- - user name (string)
- - password (encrypted string, or "*" or "")
- - user id (integer)
- - group id (integer)
- - description (usually user's name)
- - home directory (path to user's home directory)
- - shell (path to the user's login shell)
-
-(see the section 8.1 of the Python Library Reference)
-
-This implementation differs from the standard Unix implementation by
-allowing use of the platform's native path separator character - ';' on OS/2,
-DOS and MS-Windows - as the field separator in addition to the Unix
-standard ":". Additionally, when ":" is the separator path conversions
-are applied to deal with any munging of the drive letter reference.
-
-The module looks for the password database at the following locations
-(in order first to last):
- - ${ETC_PASSWD} (or %ETC_PASSWD%)
- - ${ETC}/passwd (or %ETC%/passwd)
- - ${PYTHONHOME}/Etc/passwd (or %PYTHONHOME%/Etc/passwd)
-
-Classes
--------
-
-None
-
-Functions
----------
-
-getpwuid(uid) - return the record for user-id uid as a 7-tuple
-
-getpwnam(name) - return the record for user 'name' as a 7-tuple
-
-getpwall() - return a list of 7-tuples, each tuple being one record
- (NOTE: the order is arbitrary)
-
-Attributes
-----------
-
-passwd_file - the path of the password database file
-
-"""
-
-import os
-
-# try and find the passwd file
-__passwd_path = []
-if os.environ.has_key('ETC_PASSWD'):
- __passwd_path.append(os.environ['ETC_PASSWD'])
-if os.environ.has_key('ETC'):
- __passwd_path.append('%s/passwd' % os.environ['ETC'])
-if os.environ.has_key('PYTHONHOME'):
- __passwd_path.append('%s/Etc/passwd' % os.environ['PYTHONHOME'])
-
-passwd_file = None
-for __i in __passwd_path:
- try:
- __f = open(__i, 'r')
- __f.close()
- passwd_file = __i
- break
- except:
- pass
-
-# path conversion handlers
-def __nullpathconv(path):
- return path.replace(os.altsep, os.sep)
-
-def __unixpathconv(path):
- # two known drive letter variations: "x;" and "$x"
- if path[0] == '$':
- conv = path[1] + ':' + path[2:]
- elif path[1] == ';':
- conv = path[0] + ':' + path[2:]
- else:
- conv = path
- return conv.replace(os.altsep, os.sep)
-
-# decide what field separator we can try to use - Unix standard, with
-# the platform's path separator as an option. No special field conversion
-# handler is required when using the platform's path separator as field
-# separator, but are required for the home directory and shell fields when
-# using the standard Unix (":") field separator.
-__field_sep = {':': __unixpathconv}
-if os.pathsep:
- if os.pathsep != ':':
- __field_sep[os.pathsep] = __nullpathconv
-
-# helper routine to identify which separator character is in use
-def __get_field_sep(record):
- fs = None
- for c in __field_sep.keys():
- # there should be 6 delimiter characters (for 7 fields)
- if record.count(c) == 6:
- fs = c
- break
- if fs:
- return fs
- else:
- raise KeyError, '>> passwd database fields not delimited <<'
-
-# class to match the new record field name accessors.
-# the resulting object is intended to behave like a read-only tuple,
-# with each member also accessible by a field name.
-class Passwd:
- def __init__(self, name, passwd, uid, gid, gecos, dir, shell):
- self.__dict__['pw_name'] = name
- self.__dict__['pw_passwd'] = passwd
- self.__dict__['pw_uid'] = uid
- self.__dict__['pw_gid'] = gid
- self.__dict__['pw_gecos'] = gecos
- self.__dict__['pw_dir'] = dir
- self.__dict__['pw_shell'] = shell
- self.__dict__['_record'] = (self.pw_name, self.pw_passwd,
- self.pw_uid, self.pw_gid,
- self.pw_gecos, self.pw_dir,
- self.pw_shell)
-
- def __len__(self):
- return 7
-
- def __getitem__(self, key):
- return self._record[key]
-
- def __setattr__(self, name, value):
- raise AttributeError('attribute read-only: %s' % name)
-
- def __repr__(self):
- return str(self._record)
-
- def __cmp__(self, other):
- this = str(self._record)
- if this == other:
- return 0
- elif this < other:
- return -1
- else:
- return 1
-
-
-# read the whole file, parsing each entry into tuple form
-# with dictionaries to speed recall by UID or passwd name
-def __read_passwd_file():
- if passwd_file:
- passwd = open(passwd_file, 'r')
- else:
- raise KeyError, '>> no password database <<'
- uidx = {}
- namx = {}
- sep = None
- while 1:
- entry = passwd.readline().strip()
- if len(entry) > 6:
- if sep == None:
- sep = __get_field_sep(entry)
- fields = entry.split(sep)
- for i in (2, 3):
- fields[i] = int(fields[i])
- for i in (5, 6):
- fields[i] = __field_sep[sep](fields[i])
- record = Passwd(*fields)
- if not uidx.has_key(fields[2]):
- uidx[fields[2]] = record
- if not namx.has_key(fields[0]):
- namx[fields[0]] = record
- elif len(entry) > 0:
- pass # skip empty or malformed records
- else:
- break
- passwd.close()
- if len(uidx) == 0:
- raise KeyError
- return (uidx, namx)
-
-# return the passwd database entry by UID
-def getpwuid(uid):
- u, n = __read_passwd_file()
- return u[uid]
-
-# return the passwd database entry by passwd name
-def getpwnam(name):
- u, n = __read_passwd_file()
- return n[name]
-
-# return all the passwd database entries
-def getpwall():
- u, n = __read_passwd_file()
- return n.values()
-
-# test harness
-if __name__ == '__main__':
- getpwall()
diff --git a/sys/lib/python/plat-os2emx/regen b/sys/lib/python/plat-os2emx/regen
deleted file mode 100644
index 3ecd2a860..000000000
--- a/sys/lib/python/plat-os2emx/regen
+++ /dev/null
@@ -1,7 +0,0 @@
-#! /bin/sh
-export INCLUDE=$C_INCLUDE_PATH
-set -v
-python.exe ../../Tools/scripts/h2py.py $C_INCLUDE_PATH/fcntl.h
-python.exe ../../Tools/scripts/h2py.py $C_INCLUDE_PATH/sys/socket.h
-python.exe ../../Tools/scripts/h2py.py -i '(u_long)' $C_INCLUDE_PATH/netinet/in.h
-#python.exe ../../Tools/scripts/h2py.py $C_INCLUDE_PATH/termios.h
diff --git a/sys/lib/python/plat-riscos/riscosenviron.py b/sys/lib/python/plat-riscos/riscosenviron.py
deleted file mode 100644
index 95a2ce55d..000000000
--- a/sys/lib/python/plat-riscos/riscosenviron.py
+++ /dev/null
@@ -1,45 +0,0 @@
-"""A more or less complete dictionary like interface for the RISC OS environment."""
-
-import riscos
-
-class _Environ:
- def __init__(self, initial = None):
- pass
- def __repr__(self):
- return repr(riscos.getenvdict())
- def __cmp__(self, dict):
- return cmp(riscos.getenvdict(), dict)
- def __len__(self):
- return len(riscos.getenvdict())
- def __getitem__(self, key):
- ret = riscos.getenv(key)
- if ret<>None:
- return ret
- else:
- raise KeyError
- def __setitem__(self, key, item):
- riscos.putenv(key, item)
- def __delitem__(self, key):
- riscos.delenv(key)
- def clear(self):
- # too dangerous on RISC OS
- pass
- def copy(self):
- return riscos.getenvdict()
- def keys(self): return riscos.getenvdict().keys()
- def items(self): return riscos.getenvdict().items()
- def values(self): return riscos.getenvdict().values()
- def has_key(self, key):
- value = riscos.getenv(key)
- return value<>None
- def __contains__(self, key):
- return riscos.getenv(key) is not None
- def update(self, dict):
- for k, v in dict.items():
- riscos.putenv(k, v)
- def get(self, key, failobj=None):
- value = riscos.getenv(key)
- if value<>None:
- return value
- else:
- return failobj
diff --git a/sys/lib/python/plat-riscos/riscospath.py b/sys/lib/python/plat-riscos/riscospath.py
deleted file mode 100644
index ea39e60f1..000000000
--- a/sys/lib/python/plat-riscos/riscospath.py
+++ /dev/null
@@ -1,378 +0,0 @@
-# Module 'riscospath' -- common operations on RISC OS pathnames.
-
-# contributed by Andrew Clover ( andrew@oaktree.co.uk )
-
-# The "os.path" name is an alias for this module on RISC OS systems;
-# on other systems (e.g. Mac, Windows), os.path provides the same
-# operations in a manner specific to that platform, and is an alias
-# to another module (e.g. macpath, ntpath).
-
-"""
-Instead of importing this module directly, import os and refer to this module
-as os.path.
-"""
-
-# strings representing various path-related bits and pieces
-curdir = '@'
-pardir = '^'
-extsep = '/'
-sep = '.'
-pathsep = ','
-defpath = '<Run$Dir>'
-altsep = None
-
-# Imports - make an error-generating swi object if the swi module is not
-# available (ie. we are not running on RISC OS Python)
-
-import os, stat, string
-
-try:
- import swi
-except ImportError:
- class _swi:
- def swi(*a):
- raise AttributeError, 'This function only available under RISC OS'
- block= swi
- swi= _swi()
-
-[_false, _true]= range(2)
-
-_roots= ['$', '&', '%', '@', '\\']
-
-
-# _allowMOSFSNames
-# After importing riscospath, set _allowMOSFSNames true if you want the module
-# to understand the "-SomeFS-" notation left over from the old BBC Master MOS,
-# as well as the standard "SomeFS:" notation. Set this to be fully backwards
-# compatible but remember that "-SomeFS-" can also be a perfectly valid file
-# name so care must be taken when splitting and joining paths.
-
-_allowMOSFSNames= _false
-
-
-## Path manipulation, RISC OS stylee.
-
-def _split(p):
- """
- split filing system name (including special field) and drive specifier from rest
- of path. This is needed by many riscospath functions.
- """
- dash= _allowMOSFSNames and p[:1]=='-'
- if dash:
- q= string.find(p, '-', 1)+1
- else:
- if p[:1]==':':
- q= 0
- else:
- q= string.find(p, ':')+1 # q= index of start of non-FS portion of path
- s= string.find(p, '#')
- if s==-1 or s>q:
- s= q # find end of main FS name, not including special field
- else:
- for c in p[dash:s]:
- if c not in string.ascii_letters:
- q= 0
- break # disallow invalid non-special-field characters in FS name
- r= q
- if p[q:q+1]==':':
- r= string.find(p, '.', q+1)+1
- if r==0:
- r= len(p) # find end of drive name (if any) following FS name (if any)
- return (p[:q], p[q:r], p[r:])
-
-
-def normcase(p):
- """
- Normalize the case of a pathname. This converts to lowercase as the native RISC
- OS filesystems are case-insensitive. However, not all filesystems have to be,
- and there's no simple way to find out what type an FS is argh.
- """
- return string.lower(p)
-
-
-def isabs(p):
- """
- Return whether a path is absolute. Under RISC OS, a file system specifier does
- not make a path absolute, but a drive name or number does, and so does using the
- symbol for root, URD, library, CSD or PSD. This means it is perfectly possible
- to have an "absolute" URL dependent on the current working directory, and
- equally you can have a "relative" URL that's on a completely different device to
- the current one argh.
- """
- (fs, drive, path)= _split(p)
- return drive!='' or path[:1] in _roots
-
-
-def join(a, *p):
- """
- Join path elements with the directory separator, replacing the entire path when
- an absolute or FS-changing path part is found.
- """
- j= a
- for b in p:
- (fs, drive, path)= _split(b)
- if j=='' or fs!='' or drive!='' or path[:1] in _roots:
- j= b
- elif j[-1]==':':
- j= j+b
- else:
- j= j+'.'+b
- return j
-
-
-def split(p):
- """
- Split a path in head (everything up to the last '.') and tail (the rest). FS
- name must still be dealt with separately since special field may contain '.'.
- """
- (fs, drive, path)= _split(p)
- q= string.rfind(path, '.')
- if q!=-1:
- return (fs+drive+path[:q], path[q+1:])
- return ('', p)
-
-
-def splitext(p):
- """
- Split a path in root and extension. This assumes the 'using slash for dot and
- dot for slash with foreign files' convention common in RISC OS is in force.
- """
- (tail, head)= split(p)
- if '/' in head:
- q= len(head)-string.rfind(head, '/')
- return (p[:-q], p[-q:])
- return (p, '')
-
-
-def splitdrive(p):
- """
- Split a pathname into a drive specification (including FS name) and the rest of
- the path. The terminating dot of the drive name is included in the drive
- specification.
- """
- (fs, drive, path)= _split(p)
- return (fs+drive, p)
-
-
-def basename(p):
- """
- Return the tail (basename) part of a path.
- """
- return split(p)[1]
-
-
-def dirname(p):
- """
- Return the head (dirname) part of a path.
- """
- return split(p)[0]
-
-
-def commonprefix(m):
- "Given a list of pathnames, returns the longest common leading component"
- if not m: return ''
- s1 = min(m)
- s2 = max(m)
- n = min(len(s1), len(s2))
- for i in xrange(n):
- if s1[i] != s2[i]:
- return s1[:i]
- return s1[:n]
-
-
-## File access functions. Why are we in os.path?
-
-def getsize(p):
- """
- Return the size of a file, reported by os.stat().
- """
- st= os.stat(p)
- return st[stat.ST_SIZE]
-
-
-def getmtime(p):
- """
- Return the last modification time of a file, reported by os.stat().
- """
- st = os.stat(p)
- return st[stat.ST_MTIME]
-
-getatime= getmtime
-
-
-# RISC OS-specific file access functions
-
-def exists(p):
- """
- Test whether a path exists.
- """
- try:
- return swi.swi('OS_File', '5s;i', p)!=0
- except swi.error:
- return 0
-
-lexists = exists
-
-
-def isdir(p):
- """
- Is a path a directory? Includes image files.
- """
- try:
- return swi.swi('OS_File', '5s;i', p) in [2, 3]
- except swi.error:
- return 0
-
-
-def isfile(p):
- """
- Test whether a path is a file, including image files.
- """
- try:
- return swi.swi('OS_File', '5s;i', p) in [1, 3]
- except swi.error:
- return 0
-
-
-def islink(p):
- """
- RISC OS has no links or mounts.
- """
- return _false
-
-ismount= islink
-
-
-# Same-file testing.
-
-# samefile works on filename comparison since there is no ST_DEV and ST_INO is
-# not reliably unique (esp. directories). First it has to normalise the
-# pathnames, which it can do 'properly' using OS_FSControl since samefile can
-# assume it's running on RISC OS (unlike normpath).
-
-def samefile(fa, fb):
- """
- Test whether two pathnames reference the same actual file.
- """
- l= 512
- b= swi.block(l)
- swi.swi('OS_FSControl', 'isb..i', 37, fa, b, l)
- fa= b.ctrlstring()
- swi.swi('OS_FSControl', 'isb..i', 37, fb, b, l)
- fb= b.ctrlstring()
- return fa==fb
-
-
-def sameopenfile(a, b):
- """
- Test whether two open file objects reference the same file.
- """
- return os.fstat(a)[stat.ST_INO]==os.fstat(b)[stat.ST_INO]
-
-
-## Path canonicalisation
-
-# 'user directory' is taken as meaning the User Root Directory, which is in
-# practice never used, for anything.
-
-def expanduser(p):
- (fs, drive, path)= _split(p)
- l= 512
- b= swi.block(l)
-
- if path[:1]!='@':
- return p
- if fs=='':
- fsno= swi.swi('OS_Args', '00;i')
- swi.swi('OS_FSControl', 'iibi', 33, fsno, b, l)
- fsname= b.ctrlstring()
- else:
- if fs[:1]=='-':
- fsname= fs[1:-1]
- else:
- fsname= fs[:-1]
- fsname= string.split(fsname, '#', 1)[0] # remove special field from fs
- x= swi.swi('OS_FSControl', 'ib2s.i;.....i', 54, b, fsname, l)
- if x<l:
- urd= b.tostring(0, l-x-1)
- else: # no URD! try CSD
- x= swi.swi('OS_FSControl', 'ib0s.i;.....i', 54, b, fsname, l)
- if x<l:
- urd= b.tostring(0, l-x-1)
- else: # no CSD! use root
- urd= '$'
- return fsname+':'+urd+path[1:]
-
-# Environment variables are in angle brackets.
-
-def expandvars(p):
- """
- Expand environment variables using OS_GSTrans.
- """
- l= 512
- b= swi.block(l)
- return b.tostring(0, swi.swi('OS_GSTrans', 'sbi;..i', p, b, l))
-
-
-# Return an absolute path. RISC OS' osfscontrol_canonicalise_path does this among others
-abspath = os.expand
-
-
-# realpath is a no-op on systems without islink support
-realpath = abspath
-
-
-# Normalize a path. Only special path element under RISC OS is "^" for "..".
-
-def normpath(p):
- """
- Normalize path, eliminating up-directory ^s.
- """
- (fs, drive, path)= _split(p)
- rhs= ''
- ups= 0
- while path!='':
- (path, el)= split(path)
- if el=='^':
- ups= ups+1
- else:
- if ups>0:
- ups= ups-1
- else:
- if rhs=='':
- rhs= el
- else:
- rhs= el+'.'+rhs
- while ups>0:
- ups= ups-1
- rhs= '^.'+rhs
- return fs+drive+rhs
-
-
-# Directory tree walk.
-# Independent of host system. Why am I in os.path?
-
-def walk(top, func, arg):
- """Directory tree walk with callback function.
-
- For each directory in the directory tree rooted at top (including top
- itself, but excluding '.' and '..'), call func(arg, dirname, fnames).
- dirname is the name of the directory, and fnames a list of the names of
- the files and subdirectories in dirname (excluding '.' and '..'). func
- may modify the fnames list in-place (e.g. via del or slice assignment),
- and walk will only recurse into the subdirectories whose names remain in
- fnames; this can be used to implement a filter, or to impose a specific
- order of visiting. No semantics are defined for, or required of, arg,
- beyond that arg is always passed to func. It can be used, e.g., to pass
- a filename pattern, or a mutable object designed to accumulate
- statistics. Passing None for arg is common."""
-
- try:
- names= os.listdir(top)
- except os.error:
- return
- func(arg, top, names)
- for name in names:
- name= join(top, name)
- if isdir(name) and not islink(name):
- walk(name, func, arg)
diff --git a/sys/lib/python/plat-riscos/rourl2path.py b/sys/lib/python/plat-riscos/rourl2path.py
deleted file mode 100644
index 7a8badf44..000000000
--- a/sys/lib/python/plat-riscos/rourl2path.py
+++ /dev/null
@@ -1,71 +0,0 @@
-"""riscos specific module for conversion between pathnames and URLs.
-Based on macurl2path.
-Do not import directly, use urllib instead."""
-
-import string
-import urllib
-import os
-
-__all__ = ["url2pathname","pathname2url"]
-
-__slash_dot = string.maketrans("/.", "./")
-
-def url2pathname(url):
- """OS-specific conversion from a relative URL of the 'file' scheme
- to a file system path; not recommended for general use."""
- tp = urllib.splittype(url)[0]
- if tp and tp <> 'file':
- raise RuntimeError, 'Cannot convert non-local URL to pathname'
- # Turn starting /// into /, an empty hostname means current host
- if url[:3] == '///':
- url = url[2:]
- elif url[:2] == '//':
- raise RuntimeError, 'Cannot convert non-local URL to pathname'
- components = string.split(url, '/')
- if not components[0]:
- if '$' in components:
- del components[0]
- else:
- components[0] = '$'
- # Remove . and embedded ..
- i = 0
- while i < len(components):
- if components[i] == '.':
- del components[i]
- elif components[i] == '..' and i > 0 and \
- components[i-1] not in ('', '..'):
- del components[i-1:i+1]
- i -= 1
- elif components[i] == '..':
- components[i] = '^'
- i += 1
- elif components[i] == '' and i > 0 and components[i-1] <> '':
- del components[i]
- else:
- i += 1
- components = map(lambda x: urllib.unquote(x).translate(__slash_dot), components)
- return '.'.join(components)
-
-def pathname2url(pathname):
- """OS-specific conversion from a file system path to a relative URL
- of the 'file' scheme; not recommended for general use."""
- return urllib.quote('///' + pathname.translate(__slash_dot), "/$:")
-
-def test():
- for url in ["index.html",
- "/SCSI::SCSI4/$/Anwendung/Comm/Apps/!Fresco/Welcome",
- "/SCSI::SCSI4/$/Anwendung/Comm/Apps/../!Fresco/Welcome",
- "../index.html",
- "bar/index.html",
- "/foo/bar/index.html",
- "/foo/bar/",
- "/"]:
- print '%r -> %r' % (url, url2pathname(url))
- print "*******************************************************"
- for path in ["SCSI::SCSI4.$.Anwendung",
- "PythonApp:Lib",
- "PythonApp:Lib.rourl2path/py"]:
- print '%r -> %r' % (path, pathname2url(path))
-
-if __name__ == '__main__':
- test()
diff --git a/sys/lib/python/plat-sunos5/CDIO.py b/sys/lib/python/plat-sunos5/CDIO.py
deleted file mode 100644
index d766b5026..000000000
--- a/sys/lib/python/plat-sunos5/CDIO.py
+++ /dev/null
@@ -1,73 +0,0 @@
-# Generated by h2py from /usr/include/sys/cdio.h
-CDROM_LBA = 0x01
-CDROM_MSF = 0x02
-CDROM_DATA_TRACK = 0x04
-CDROM_LEADOUT = 0xAA
-CDROM_AUDIO_INVALID = 0x00
-CDROM_AUDIO_PLAY = 0x11
-CDROM_AUDIO_PAUSED = 0x12
-CDROM_AUDIO_COMPLETED = 0x13
-CDROM_AUDIO_ERROR = 0x14
-CDROM_AUDIO_NO_STATUS = 0x15
-CDROM_DA_NO_SUBCODE = 0x00
-CDROM_DA_SUBQ = 0x01
-CDROM_DA_ALL_SUBCODE = 0x02
-CDROM_DA_SUBCODE_ONLY = 0x03
-CDROM_XA_DATA = 0x00
-CDROM_XA_SECTOR_DATA = 0x01
-CDROM_XA_DATA_W_ERROR = 0x02
-CDROM_BLK_512 = 512
-CDROM_BLK_1024 = 1024
-CDROM_BLK_2048 = 2048
-CDROM_BLK_2056 = 2056
-CDROM_BLK_2336 = 2336
-CDROM_BLK_2340 = 2340
-CDROM_BLK_2352 = 2352
-CDROM_BLK_2368 = 2368
-CDROM_BLK_2448 = 2448
-CDROM_BLK_2646 = 2646
-CDROM_BLK_2647 = 2647
-CDROM_BLK_SUBCODE = 96
-CDROM_NORMAL_SPEED = 0x00
-CDROM_DOUBLE_SPEED = 0x01
-CDROM_QUAD_SPEED = 0x03
-CDROM_TWELVE_SPEED = 0x0C
-CDROM_MAXIMUM_SPEED = 0xff
-CDIOC = (0x04 << 8)
-CDROMPAUSE = (CDIOC|151)
-CDROMRESUME = (CDIOC|152)
-CDROMPLAYMSF = (CDIOC|153)
-CDROMPLAYTRKIND = (CDIOC|154)
-CDROMREADTOCHDR = (CDIOC|155)
-CDROMREADTOCENTRY = (CDIOC|156)
-CDROMSTOP = (CDIOC|157)
-CDROMSTART = (CDIOC|158)
-CDROMEJECT = (CDIOC|159)
-CDROMVOLCTRL = (CDIOC|160)
-CDROMSUBCHNL = (CDIOC|161)
-CDROMREADMODE2 = (CDIOC|162)
-CDROMREADMODE1 = (CDIOC|163)
-CDROMREADOFFSET = (CDIOC|164)
-CDROMGBLKMODE = (CDIOC|165)
-CDROMSBLKMODE = (CDIOC|166)
-CDROMCDDA = (CDIOC|167)
-CDROMCDXA = (CDIOC|168)
-CDROMSUBCODE = (CDIOC|169)
-CDROMGDRVSPEED = (CDIOC|170)
-CDROMSDRVSPEED = (CDIOC|171)
-SCMD_READ_TOC = 0x43
-SCMD_PLAYAUDIO_MSF = 0x47
-SCMD_PLAYAUDIO_TI = 0x48
-SCMD_PAUSE_RESUME = 0x4B
-SCMD_READ_SUBCHANNEL = 0x42
-SCMD_PLAYAUDIO10 = 0x45
-SCMD_PLAYTRACK_REL10 = 0x49
-SCMD_READ_HEADER = 0x44
-SCMD_PLAYAUDIO12 = 0xA5
-SCMD_PLAYTRACK_REL12 = 0xA9
-SCMD_CD_PLAYBACK_CONTROL = 0xC9
-SCMD_CD_PLAYBACK_STATUS = 0xC4
-SCMD_READ_CDDA = 0xD8
-SCMD_READ_CDXA = 0xDB
-SCMD_READ_ALL_SUBCODES = 0xDF
-CDROM_MODE2_SIZE = 2336
diff --git a/sys/lib/python/plat-sunos5/DLFCN.py b/sys/lib/python/plat-sunos5/DLFCN.py
deleted file mode 100644
index f49235067..000000000
--- a/sys/lib/python/plat-sunos5/DLFCN.py
+++ /dev/null
@@ -1,27 +0,0 @@
-# Generated by h2py from /usr/include/dlfcn.h
-from TYPES import *
-RTLD_LAZY = 0x00001
-RTLD_NOW = 0x00002
-RTLD_NOLOAD = 0x00004
-RTLD_GLOBAL = 0x00100
-RTLD_LOCAL = 0x00000
-RTLD_PARENT = 0x00200
-RTLD_GROUP = 0x00400
-RTLD_WORLD = 0x00800
-RTLD_NODELETE = 0x01000
-RTLD_CONFGEN = 0x10000
-RTLD_REL_RELATIVE = 0x00001
-RTLD_REL_EXEC = 0x00002
-RTLD_REL_DEPENDS = 0x00004
-RTLD_REL_PRELOAD = 0x00008
-RTLD_REL_SELF = 0x00010
-RTLD_REL_WEAK = 0x00020
-RTLD_REL_ALL = 0x00fff
-RTLD_MEMORY = 0x01000
-RTLD_STRIP = 0x02000
-RTLD_NOHEAP = 0x04000
-RTLD_CONFSET = 0x10000
-RTLD_DI_LMID = 1
-RTLD_DI_LINKMAP = 2
-RTLD_DI_CONFIGADDR = 3
-RTLD_DI_MAX = 3
diff --git a/sys/lib/python/plat-sunos5/IN.py b/sys/lib/python/plat-sunos5/IN.py
deleted file mode 100755
index 867b9eb5f..000000000
--- a/sys/lib/python/plat-sunos5/IN.py
+++ /dev/null
@@ -1,1421 +0,0 @@
-# Generated by h2py from /usr/include/netinet/in.h
-
-# Included from sys/feature_tests.h
-
-# Included from sys/isa_defs.h
-_CHAR_ALIGNMENT = 1
-_SHORT_ALIGNMENT = 2
-_INT_ALIGNMENT = 4
-_LONG_ALIGNMENT = 8
-_LONG_LONG_ALIGNMENT = 8
-_DOUBLE_ALIGNMENT = 8
-_LONG_DOUBLE_ALIGNMENT = 16
-_POINTER_ALIGNMENT = 8
-_MAX_ALIGNMENT = 16
-_ALIGNMENT_REQUIRED = 1
-_CHAR_ALIGNMENT = 1
-_SHORT_ALIGNMENT = 2
-_INT_ALIGNMENT = 4
-_LONG_ALIGNMENT = 4
-_LONG_LONG_ALIGNMENT = 4
-_DOUBLE_ALIGNMENT = 4
-_LONG_DOUBLE_ALIGNMENT = 4
-_POINTER_ALIGNMENT = 4
-_MAX_ALIGNMENT = 4
-_ALIGNMENT_REQUIRED = 0
-_CHAR_ALIGNMENT = 1
-_SHORT_ALIGNMENT = 2
-_INT_ALIGNMENT = 4
-_LONG_LONG_ALIGNMENT = 8
-_DOUBLE_ALIGNMENT = 8
-_ALIGNMENT_REQUIRED = 1
-_LONG_ALIGNMENT = 4
-_LONG_DOUBLE_ALIGNMENT = 8
-_POINTER_ALIGNMENT = 4
-_MAX_ALIGNMENT = 8
-_LONG_ALIGNMENT = 8
-_LONG_DOUBLE_ALIGNMENT = 16
-_POINTER_ALIGNMENT = 8
-_MAX_ALIGNMENT = 16
-_POSIX_C_SOURCE = 1
-_LARGEFILE64_SOURCE = 1
-_LARGEFILE_SOURCE = 1
-_FILE_OFFSET_BITS = 64
-_FILE_OFFSET_BITS = 32
-_POSIX_C_SOURCE = 199506L
-_POSIX_PTHREAD_SEMANTICS = 1
-_XOPEN_VERSION = 500
-_XOPEN_VERSION = 4
-_XOPEN_VERSION = 3
-from TYPES import *
-
-# Included from sys/stream.h
-
-# Included from sys/vnode.h
-from TYPES import *
-
-# Included from sys/t_lock.h
-
-# Included from sys/machlock.h
-from TYPES import *
-LOCK_HELD_VALUE = 0xff
-def SPIN_LOCK(pl): return ((pl) > ipltospl(LOCK_LEVEL))
-
-def LOCK_SAMPLE_INTERVAL(i): return (((i) & 0xff) == 0)
-
-CLOCK_LEVEL = 10
-LOCK_LEVEL = 10
-DISP_LEVEL = (LOCK_LEVEL + 1)
-PTR24_LSB = 5
-PTR24_MSB = (PTR24_LSB + 24)
-PTR24_ALIGN = 32
-PTR24_BASE = 0xe0000000
-
-# Included from sys/param.h
-from TYPES import *
-_POSIX_VDISABLE = 0
-MAX_INPUT = 512
-MAX_CANON = 256
-UID_NOBODY = 60001
-GID_NOBODY = UID_NOBODY
-UID_NOACCESS = 60002
-MAX_TASKID = 999999
-MAX_MAXPID = 999999
-DEFAULT_MAXPID = 999999
-DEFAULT_JUMPPID = 100000
-DEFAULT_MAXPID = 30000
-DEFAULT_JUMPPID = 0
-MAXUID = 2147483647
-MAXPROJID = MAXUID
-MAXLINK = 32767
-NMOUNT = 40
-CANBSIZ = 256
-NOFILE = 20
-NGROUPS_UMIN = 0
-NGROUPS_UMAX = 32
-NGROUPS_MAX_DEFAULT = 16
-NZERO = 20
-NULL = 0L
-NULL = 0
-CMASK = 022
-CDLIMIT = (1L<<11)
-NBPS = 0x20000
-NBPSCTR = 512
-UBSIZE = 512
-SCTRSHFT = 9
-SYSNAME = 9
-PREMOTE = 39
-MAXPATHLEN = 1024
-MAXSYMLINKS = 20
-MAXNAMELEN = 256
-NADDR = 13
-PIPE_BUF = 5120
-PIPE_MAX = 5120
-NBBY = 8
-MAXBSIZE = 8192
-DEV_BSIZE = 512
-DEV_BSHIFT = 9
-MAXFRAG = 8
-MAXOFF32_T = 0x7fffffff
-MAXOFF_T = 0x7fffffffffffffffl
-MAXOFFSET_T = 0x7fffffffffffffffl
-MAXOFF_T = 0x7fffffffl
-MAXOFFSET_T = 0x7fffffff
-def btodb(bytes): return \
-
-def dbtob(db): return \
-
-def lbtodb(bytes): return \
-
-def ldbtob(db): return \
-
-NCARGS32 = 0x100000
-NCARGS64 = 0x200000
-NCARGS = NCARGS64
-NCARGS = NCARGS32
-FSHIFT = 8
-FSCALE = (1<<FSHIFT)
-def DELAY(n): return drv_usecwait(n)
-
-def mmu_ptob(x): return ((x) << MMU_PAGESHIFT)
-
-def mmu_btop(x): return (((x)) >> MMU_PAGESHIFT)
-
-def mmu_btopr(x): return ((((x) + MMU_PAGEOFFSET) >> MMU_PAGESHIFT))
-
-def mmu_ptod(x): return ((x) << (MMU_PAGESHIFT - DEV_BSHIFT))
-
-def ptod(x): return ((x) << (PAGESHIFT - DEV_BSHIFT))
-
-def ptob(x): return ((x) << PAGESHIFT)
-
-def btop(x): return (((x) >> PAGESHIFT))
-
-def btopr(x): return ((((x) + PAGEOFFSET) >> PAGESHIFT))
-
-def dtop(DD): return (((DD) + NDPP - 1) >> (PAGESHIFT - DEV_BSHIFT))
-
-def dtopt(DD): return ((DD) >> (PAGESHIFT - DEV_BSHIFT))
-
-_AIO_LISTIO_MAX = (4096)
-_AIO_MAX = (-1)
-_MQ_OPEN_MAX = (32)
-_MQ_PRIO_MAX = (32)
-_SEM_NSEMS_MAX = INT_MAX
-_SEM_VALUE_MAX = INT_MAX
-
-# Included from sys/unistd.h
-_CS_PATH = 65
-_CS_LFS_CFLAGS = 68
-_CS_LFS_LDFLAGS = 69
-_CS_LFS_LIBS = 70
-_CS_LFS_LINTFLAGS = 71
-_CS_LFS64_CFLAGS = 72
-_CS_LFS64_LDFLAGS = 73
-_CS_LFS64_LIBS = 74
-_CS_LFS64_LINTFLAGS = 75
-_CS_XBS5_ILP32_OFF32_CFLAGS = 700
-_CS_XBS5_ILP32_OFF32_LDFLAGS = 701
-_CS_XBS5_ILP32_OFF32_LIBS = 702
-_CS_XBS5_ILP32_OFF32_LINTFLAGS = 703
-_CS_XBS5_ILP32_OFFBIG_CFLAGS = 705
-_CS_XBS5_ILP32_OFFBIG_LDFLAGS = 706
-_CS_XBS5_ILP32_OFFBIG_LIBS = 707
-_CS_XBS5_ILP32_OFFBIG_LINTFLAGS = 708
-_CS_XBS5_LP64_OFF64_CFLAGS = 709
-_CS_XBS5_LP64_OFF64_LDFLAGS = 710
-_CS_XBS5_LP64_OFF64_LIBS = 711
-_CS_XBS5_LP64_OFF64_LINTFLAGS = 712
-_CS_XBS5_LPBIG_OFFBIG_CFLAGS = 713
-_CS_XBS5_LPBIG_OFFBIG_LDFLAGS = 714
-_CS_XBS5_LPBIG_OFFBIG_LIBS = 715
-_CS_XBS5_LPBIG_OFFBIG_LINTFLAGS = 716
-_SC_ARG_MAX = 1
-_SC_CHILD_MAX = 2
-_SC_CLK_TCK = 3
-_SC_NGROUPS_MAX = 4
-_SC_OPEN_MAX = 5
-_SC_JOB_CONTROL = 6
-_SC_SAVED_IDS = 7
-_SC_VERSION = 8
-_SC_PASS_MAX = 9
-_SC_LOGNAME_MAX = 10
-_SC_PAGESIZE = 11
-_SC_XOPEN_VERSION = 12
-_SC_NPROCESSORS_CONF = 14
-_SC_NPROCESSORS_ONLN = 15
-_SC_STREAM_MAX = 16
-_SC_TZNAME_MAX = 17
-_SC_AIO_LISTIO_MAX = 18
-_SC_AIO_MAX = 19
-_SC_AIO_PRIO_DELTA_MAX = 20
-_SC_ASYNCHRONOUS_IO = 21
-_SC_DELAYTIMER_MAX = 22
-_SC_FSYNC = 23
-_SC_MAPPED_FILES = 24
-_SC_MEMLOCK = 25
-_SC_MEMLOCK_RANGE = 26
-_SC_MEMORY_PROTECTION = 27
-_SC_MESSAGE_PASSING = 28
-_SC_MQ_OPEN_MAX = 29
-_SC_MQ_PRIO_MAX = 30
-_SC_PRIORITIZED_IO = 31
-_SC_PRIORITY_SCHEDULING = 32
-_SC_REALTIME_SIGNALS = 33
-_SC_RTSIG_MAX = 34
-_SC_SEMAPHORES = 35
-_SC_SEM_NSEMS_MAX = 36
-_SC_SEM_VALUE_MAX = 37
-_SC_SHARED_MEMORY_OBJECTS = 38
-_SC_SIGQUEUE_MAX = 39
-_SC_SIGRT_MIN = 40
-_SC_SIGRT_MAX = 41
-_SC_SYNCHRONIZED_IO = 42
-_SC_TIMERS = 43
-_SC_TIMER_MAX = 44
-_SC_2_C_BIND = 45
-_SC_2_C_DEV = 46
-_SC_2_C_VERSION = 47
-_SC_2_FORT_DEV = 48
-_SC_2_FORT_RUN = 49
-_SC_2_LOCALEDEF = 50
-_SC_2_SW_DEV = 51
-_SC_2_UPE = 52
-_SC_2_VERSION = 53
-_SC_BC_BASE_MAX = 54
-_SC_BC_DIM_MAX = 55
-_SC_BC_SCALE_MAX = 56
-_SC_BC_STRING_MAX = 57
-_SC_COLL_WEIGHTS_MAX = 58
-_SC_EXPR_NEST_MAX = 59
-_SC_LINE_MAX = 60
-_SC_RE_DUP_MAX = 61
-_SC_XOPEN_CRYPT = 62
-_SC_XOPEN_ENH_I18N = 63
-_SC_XOPEN_SHM = 64
-_SC_2_CHAR_TERM = 66
-_SC_XOPEN_XCU_VERSION = 67
-_SC_ATEXIT_MAX = 76
-_SC_IOV_MAX = 77
-_SC_XOPEN_UNIX = 78
-_SC_PAGE_SIZE = _SC_PAGESIZE
-_SC_T_IOV_MAX = 79
-_SC_PHYS_PAGES = 500
-_SC_AVPHYS_PAGES = 501
-_SC_COHER_BLKSZ = 503
-_SC_SPLIT_CACHE = 504
-_SC_ICACHE_SZ = 505
-_SC_DCACHE_SZ = 506
-_SC_ICACHE_LINESZ = 507
-_SC_DCACHE_LINESZ = 508
-_SC_ICACHE_BLKSZ = 509
-_SC_DCACHE_BLKSZ = 510
-_SC_DCACHE_TBLKSZ = 511
-_SC_ICACHE_ASSOC = 512
-_SC_DCACHE_ASSOC = 513
-_SC_MAXPID = 514
-_SC_STACK_PROT = 515
-_SC_THREAD_DESTRUCTOR_ITERATIONS = 568
-_SC_GETGR_R_SIZE_MAX = 569
-_SC_GETPW_R_SIZE_MAX = 570
-_SC_LOGIN_NAME_MAX = 571
-_SC_THREAD_KEYS_MAX = 572
-_SC_THREAD_STACK_MIN = 573
-_SC_THREAD_THREADS_MAX = 574
-_SC_TTY_NAME_MAX = 575
-_SC_THREADS = 576
-_SC_THREAD_ATTR_STACKADDR = 577
-_SC_THREAD_ATTR_STACKSIZE = 578
-_SC_THREAD_PRIORITY_SCHEDULING = 579
-_SC_THREAD_PRIO_INHERIT = 580
-_SC_THREAD_PRIO_PROTECT = 581
-_SC_THREAD_PROCESS_SHARED = 582
-_SC_THREAD_SAFE_FUNCTIONS = 583
-_SC_XOPEN_LEGACY = 717
-_SC_XOPEN_REALTIME = 718
-_SC_XOPEN_REALTIME_THREADS = 719
-_SC_XBS5_ILP32_OFF32 = 720
-_SC_XBS5_ILP32_OFFBIG = 721
-_SC_XBS5_LP64_OFF64 = 722
-_SC_XBS5_LPBIG_OFFBIG = 723
-_PC_LINK_MAX = 1
-_PC_MAX_CANON = 2
-_PC_MAX_INPUT = 3
-_PC_NAME_MAX = 4
-_PC_PATH_MAX = 5
-_PC_PIPE_BUF = 6
-_PC_NO_TRUNC = 7
-_PC_VDISABLE = 8
-_PC_CHOWN_RESTRICTED = 9
-_PC_ASYNC_IO = 10
-_PC_PRIO_IO = 11
-_PC_SYNC_IO = 12
-_PC_FILESIZEBITS = 67
-_PC_LAST = 67
-_POSIX_VERSION = 199506L
-_POSIX2_VERSION = 199209L
-_POSIX2_C_VERSION = 199209L
-_XOPEN_XCU_VERSION = 4
-_XOPEN_REALTIME = 1
-_XOPEN_ENH_I18N = 1
-_XOPEN_SHM = 1
-_POSIX2_C_BIND = 1
-_POSIX2_CHAR_TERM = 1
-_POSIX2_LOCALEDEF = 1
-_POSIX2_C_DEV = 1
-_POSIX2_SW_DEV = 1
-_POSIX2_UPE = 1
-
-# Included from sys/mutex.h
-from TYPES import *
-def MUTEX_HELD(x): return (mutex_owned(x))
-
-
-# Included from sys/rwlock.h
-from TYPES import *
-def RW_READ_HELD(x): return (rw_read_held((x)))
-
-def RW_WRITE_HELD(x): return (rw_write_held((x)))
-
-def RW_LOCK_HELD(x): return (rw_lock_held((x)))
-
-def RW_ISWRITER(x): return (rw_iswriter(x))
-
-
-# Included from sys/semaphore.h
-
-# Included from sys/thread.h
-from TYPES import *
-
-# Included from sys/klwp.h
-from TYPES import *
-
-# Included from sys/condvar.h
-from TYPES import *
-
-# Included from sys/time.h
-
-# Included from sys/types32.h
-
-# Included from sys/int_types.h
-TIME32_MAX = INT32_MAX
-TIME32_MIN = INT32_MIN
-def TIMEVAL_OVERFLOW(tv): return \
-
-from TYPES import *
-DST_NONE = 0
-DST_USA = 1
-DST_AUST = 2
-DST_WET = 3
-DST_MET = 4
-DST_EET = 5
-DST_CAN = 6
-DST_GB = 7
-DST_RUM = 8
-DST_TUR = 9
-DST_AUSTALT = 10
-ITIMER_REAL = 0
-ITIMER_VIRTUAL = 1
-ITIMER_PROF = 2
-ITIMER_REALPROF = 3
-def ITIMERVAL_OVERFLOW(itv): return \
-
-SEC = 1
-MILLISEC = 1000
-MICROSEC = 1000000
-NANOSEC = 1000000000
-
-# Included from sys/time_impl.h
-def TIMESPEC_OVERFLOW(ts): return \
-
-def ITIMERSPEC_OVERFLOW(it): return \
-
-__CLOCK_REALTIME0 = 0
-CLOCK_VIRTUAL = 1
-CLOCK_PROF = 2
-__CLOCK_REALTIME3 = 3
-CLOCK_HIGHRES = 4
-CLOCK_MAX = 5
-CLOCK_REALTIME = __CLOCK_REALTIME3
-CLOCK_REALTIME = __CLOCK_REALTIME0
-TIMER_RELTIME = 0x0
-TIMER_ABSTIME = 0x1
-def TICK_TO_SEC(tick): return ((tick) / hz)
-
-def SEC_TO_TICK(sec): return ((sec) * hz)
-
-def TICK_TO_MSEC(tick): return \
-
-def MSEC_TO_TICK(msec): return \
-
-def MSEC_TO_TICK_ROUNDUP(msec): return \
-
-def TICK_TO_USEC(tick): return ((tick) * usec_per_tick)
-
-def USEC_TO_TICK(usec): return ((usec) / usec_per_tick)
-
-def USEC_TO_TICK_ROUNDUP(usec): return \
-
-def TICK_TO_NSEC(tick): return ((tick) * nsec_per_tick)
-
-def NSEC_TO_TICK(nsec): return ((nsec) / nsec_per_tick)
-
-def NSEC_TO_TICK_ROUNDUP(nsec): return \
-
-def TIMEVAL_TO_TICK(tvp): return \
-
-def TIMESTRUC_TO_TICK(tsp): return \
-
-
-# Included from time.h
-from TYPES import *
-
-# Included from iso/time_iso.h
-NULL = 0L
-NULL = 0
-CLOCKS_PER_SEC = 1000000
-
-# Included from sys/select.h
-FD_SETSIZE = 65536
-FD_SETSIZE = 1024
-_NBBY = 8
-NBBY = _NBBY
-def FD_ZERO(p): return bzero((p), sizeof (*(p)))
-
-
-# Included from sys/signal.h
-
-# Included from sys/iso/signal_iso.h
-SIGHUP = 1
-SIGINT = 2
-SIGQUIT = 3
-SIGILL = 4
-SIGTRAP = 5
-SIGIOT = 6
-SIGABRT = 6
-SIGEMT = 7
-SIGFPE = 8
-SIGKILL = 9
-SIGBUS = 10
-SIGSEGV = 11
-SIGSYS = 12
-SIGPIPE = 13
-SIGALRM = 14
-SIGTERM = 15
-SIGUSR1 = 16
-SIGUSR2 = 17
-SIGCLD = 18
-SIGCHLD = 18
-SIGPWR = 19
-SIGWINCH = 20
-SIGURG = 21
-SIGPOLL = 22
-SIGIO = SIGPOLL
-SIGSTOP = 23
-SIGTSTP = 24
-SIGCONT = 25
-SIGTTIN = 26
-SIGTTOU = 27
-SIGVTALRM = 28
-SIGPROF = 29
-SIGXCPU = 30
-SIGXFSZ = 31
-SIGWAITING = 32
-SIGLWP = 33
-SIGFREEZE = 34
-SIGTHAW = 35
-SIGCANCEL = 36
-SIGLOST = 37
-_SIGRTMIN = 38
-_SIGRTMAX = 45
-SIG_BLOCK = 1
-SIG_UNBLOCK = 2
-SIG_SETMASK = 3
-SIGNO_MASK = 0xFF
-SIGDEFER = 0x100
-SIGHOLD = 0x200
-SIGRELSE = 0x400
-SIGIGNORE = 0x800
-SIGPAUSE = 0x1000
-
-# Included from sys/siginfo.h
-from TYPES import *
-SIGEV_NONE = 1
-SIGEV_SIGNAL = 2
-SIGEV_THREAD = 3
-SI_NOINFO = 32767
-SI_USER = 0
-SI_LWP = (-1)
-SI_QUEUE = (-2)
-SI_TIMER = (-3)
-SI_ASYNCIO = (-4)
-SI_MESGQ = (-5)
-
-# Included from sys/machsig.h
-ILL_ILLOPC = 1
-ILL_ILLOPN = 2
-ILL_ILLADR = 3
-ILL_ILLTRP = 4
-ILL_PRVOPC = 5
-ILL_PRVREG = 6
-ILL_COPROC = 7
-ILL_BADSTK = 8
-NSIGILL = 8
-EMT_TAGOVF = 1
-EMT_CPCOVF = 2
-NSIGEMT = 2
-FPE_INTDIV = 1
-FPE_INTOVF = 2
-FPE_FLTDIV = 3
-FPE_FLTOVF = 4
-FPE_FLTUND = 5
-FPE_FLTRES = 6
-FPE_FLTINV = 7
-FPE_FLTSUB = 8
-NSIGFPE = 8
-SEGV_MAPERR = 1
-SEGV_ACCERR = 2
-NSIGSEGV = 2
-BUS_ADRALN = 1
-BUS_ADRERR = 2
-BUS_OBJERR = 3
-NSIGBUS = 3
-TRAP_BRKPT = 1
-TRAP_TRACE = 2
-TRAP_RWATCH = 3
-TRAP_WWATCH = 4
-TRAP_XWATCH = 5
-NSIGTRAP = 5
-CLD_EXITED = 1
-CLD_KILLED = 2
-CLD_DUMPED = 3
-CLD_TRAPPED = 4
-CLD_STOPPED = 5
-CLD_CONTINUED = 6
-NSIGCLD = 6
-POLL_IN = 1
-POLL_OUT = 2
-POLL_MSG = 3
-POLL_ERR = 4
-POLL_PRI = 5
-POLL_HUP = 6
-NSIGPOLL = 6
-PROF_SIG = 1
-NSIGPROF = 1
-SI_MAXSZ = 256
-SI_MAXSZ = 128
-
-# Included from sys/time_std_impl.h
-from TYPES import *
-SI32_MAXSZ = 128
-def SI_CANQUEUE(c): return ((c) <= SI_QUEUE)
-
-SA_NOCLDSTOP = 0x00020000
-SA_ONSTACK = 0x00000001
-SA_RESETHAND = 0x00000002
-SA_RESTART = 0x00000004
-SA_SIGINFO = 0x00000008
-SA_NODEFER = 0x00000010
-SA_NOCLDWAIT = 0x00010000
-SA_WAITSIG = 0x00010000
-NSIG = 46
-MAXSIG = 45
-S_SIGNAL = 1
-S_SIGSET = 2
-S_SIGACTION = 3
-S_NONE = 4
-MINSIGSTKSZ = 2048
-SIGSTKSZ = 8192
-SS_ONSTACK = 0x00000001
-SS_DISABLE = 0x00000002
-SN_PROC = 1
-SN_CANCEL = 2
-SN_SEND = 3
-
-# Included from sys/ucontext.h
-from TYPES import *
-
-# Included from sys/regset.h
-REG_CCR = (0)
-REG_PSR = (0)
-REG_PSR = (0)
-REG_PC = (1)
-REG_nPC = (2)
-REG_Y = (3)
-REG_G1 = (4)
-REG_G2 = (5)
-REG_G3 = (6)
-REG_G4 = (7)
-REG_G5 = (8)
-REG_G6 = (9)
-REG_G7 = (10)
-REG_O0 = (11)
-REG_O1 = (12)
-REG_O2 = (13)
-REG_O3 = (14)
-REG_O4 = (15)
-REG_O5 = (16)
-REG_O6 = (17)
-REG_O7 = (18)
-REG_ASI = (19)
-REG_FPRS = (20)
-REG_PS = REG_PSR
-REG_SP = REG_O6
-REG_R0 = REG_O0
-REG_R1 = REG_O1
-_NGREG = 21
-_NGREG = 19
-NGREG = _NGREG
-_NGREG32 = 19
-_NGREG64 = 21
-SPARC_MAXREGWINDOW = 31
-MAXFPQ = 16
-XRS_ID = 0x78727300
-
-# Included from v7/sys/privregs.h
-
-# Included from v7/sys/psr.h
-PSR_CWP = 0x0000001F
-PSR_ET = 0x00000020
-PSR_PS = 0x00000040
-PSR_S = 0x00000080
-PSR_PIL = 0x00000F00
-PSR_EF = 0x00001000
-PSR_EC = 0x00002000
-PSR_RSV = 0x000FC000
-PSR_ICC = 0x00F00000
-PSR_C = 0x00100000
-PSR_V = 0x00200000
-PSR_Z = 0x00400000
-PSR_N = 0x00800000
-PSR_VER = 0x0F000000
-PSR_IMPL = 0xF0000000
-PSL_ALLCC = PSR_ICC
-PSL_USER = (PSR_S)
-PSL_USERMASK = (PSR_ICC)
-PSL_UBITS = (PSR_ICC|PSR_EF)
-def USERMODE(ps): return (((ps) & PSR_PS) == 0)
-
-
-# Included from sys/fsr.h
-FSR_CEXC = 0x0000001f
-FSR_AEXC = 0x000003e0
-FSR_FCC = 0x00000c00
-FSR_PR = 0x00001000
-FSR_QNE = 0x00002000
-FSR_FTT = 0x0001c000
-FSR_VER = 0x000e0000
-FSR_TEM = 0x0f800000
-FSR_RP = 0x30000000
-FSR_RD = 0xc0000000
-FSR_VER_SHIFT = 17
-FSR_FCC1 = 0x00000003
-FSR_FCC2 = 0x0000000C
-FSR_FCC3 = 0x00000030
-FSR_CEXC_NX = 0x00000001
-FSR_CEXC_DZ = 0x00000002
-FSR_CEXC_UF = 0x00000004
-FSR_CEXC_OF = 0x00000008
-FSR_CEXC_NV = 0x00000010
-FSR_AEXC_NX = (0x1 << 5)
-FSR_AEXC_DZ = (0x2 << 5)
-FSR_AEXC_UF = (0x4 << 5)
-FSR_AEXC_OF = (0x8 << 5)
-FSR_AEXC_NV = (0x10 << 5)
-FTT_NONE = 0
-FTT_IEEE = 1
-FTT_UNFIN = 2
-FTT_UNIMP = 3
-FTT_SEQ = 4
-FTT_ALIGN = 5
-FTT_DFAULT = 6
-FSR_FTT_SHIFT = 14
-FSR_FTT_IEEE = (FTT_IEEE << FSR_FTT_SHIFT)
-FSR_FTT_UNFIN = (FTT_UNFIN << FSR_FTT_SHIFT)
-FSR_FTT_UNIMP = (FTT_UNIMP << FSR_FTT_SHIFT)
-FSR_FTT_SEQ = (FTT_SEQ << FSR_FTT_SHIFT)
-FSR_FTT_ALIGN = (FTT_ALIGN << FSR_FTT_SHIFT)
-FSR_FTT_DFAULT = (FTT_DFAULT << FSR_FTT_SHIFT)
-FSR_TEM_NX = (0x1 << 23)
-FSR_TEM_DZ = (0x2 << 23)
-FSR_TEM_UF = (0x4 << 23)
-FSR_TEM_OF = (0x8 << 23)
-FSR_TEM_NV = (0x10 << 23)
-RP_DBLEXT = 0
-RP_SINGLE = 1
-RP_DOUBLE = 2
-RP_RESERVED = 3
-RD_NEAR = 0
-RD_ZER0 = 1
-RD_POSINF = 2
-RD_NEGINF = 3
-FPRS_DL = 0x1
-FPRS_DU = 0x2
-FPRS_FEF = 0x4
-PIL_MAX = 0xf
-def SAVE_GLOBALS(RP): return \
-
-def RESTORE_GLOBALS(RP): return \
-
-def SAVE_OUTS(RP): return \
-
-def RESTORE_OUTS(RP): return \
-
-def SAVE_WINDOW(SBP): return \
-
-def RESTORE_WINDOW(SBP): return \
-
-def STORE_FPREGS(FP): return \
-
-def LOAD_FPREGS(FP): return \
-
-_SPARC_MAXREGWINDOW = 31
-_XRS_ID = 0x78727300
-GETCONTEXT = 0
-SETCONTEXT = 1
-UC_SIGMASK = 001
-UC_STACK = 002
-UC_CPU = 004
-UC_MAU = 010
-UC_FPU = UC_MAU
-UC_INTR = 020
-UC_ASR = 040
-UC_MCONTEXT = (UC_CPU|UC_FPU|UC_ASR)
-UC_ALL = (UC_SIGMASK|UC_STACK|UC_MCONTEXT)
-_SIGQUEUE_MAX = 32
-_SIGNOTIFY_MAX = 32
-
-# Included from sys/pcb.h
-INSTR_VALID = 0x02
-NORMAL_STEP = 0x04
-WATCH_STEP = 0x08
-CPC_OVERFLOW = 0x10
-ASYNC_HWERR = 0x20
-STEP_NONE = 0
-STEP_REQUESTED = 1
-STEP_ACTIVE = 2
-STEP_WASACTIVE = 3
-
-# Included from sys/msacct.h
-LMS_USER = 0
-LMS_SYSTEM = 1
-LMS_TRAP = 2
-LMS_TFAULT = 3
-LMS_DFAULT = 4
-LMS_KFAULT = 5
-LMS_USER_LOCK = 6
-LMS_SLEEP = 7
-LMS_WAIT_CPU = 8
-LMS_STOPPED = 9
-NMSTATES = 10
-
-# Included from sys/lwp.h
-
-# Included from sys/synch.h
-from TYPES import *
-USYNC_THREAD = 0x00
-USYNC_PROCESS = 0x01
-LOCK_NORMAL = 0x00
-LOCK_ERRORCHECK = 0x02
-LOCK_RECURSIVE = 0x04
-USYNC_PROCESS_ROBUST = 0x08
-LOCK_PRIO_NONE = 0x00
-LOCK_PRIO_INHERIT = 0x10
-LOCK_PRIO_PROTECT = 0x20
-LOCK_STALL_NP = 0x00
-LOCK_ROBUST_NP = 0x40
-LOCK_OWNERDEAD = 0x1
-LOCK_NOTRECOVERABLE = 0x2
-LOCK_INITED = 0x4
-LOCK_UNMAPPED = 0x8
-LWP_DETACHED = 0x00000040
-LWP_SUSPENDED = 0x00000080
-__LWP_ASLWP = 0x00000100
-MAXSYSARGS = 8
-NORMALRETURN = 0
-JUSTRETURN = 1
-LWP_USER = 0x01
-LWP_SYS = 0x02
-TS_FREE = 0x00
-TS_SLEEP = 0x01
-TS_RUN = 0x02
-TS_ONPROC = 0x04
-TS_ZOMB = 0x08
-TS_STOPPED = 0x10
-T_INTR_THREAD = 0x0001
-T_WAKEABLE = 0x0002
-T_TOMASK = 0x0004
-T_TALLOCSTK = 0x0008
-T_WOULDBLOCK = 0x0020
-T_DONTBLOCK = 0x0040
-T_DONTPEND = 0x0080
-T_SYS_PROF = 0x0100
-T_WAITCVSEM = 0x0200
-T_WATCHPT = 0x0400
-T_PANIC = 0x0800
-TP_HOLDLWP = 0x0002
-TP_TWAIT = 0x0004
-TP_LWPEXIT = 0x0008
-TP_PRSTOP = 0x0010
-TP_CHKPT = 0x0020
-TP_EXITLWP = 0x0040
-TP_PRVSTOP = 0x0080
-TP_MSACCT = 0x0100
-TP_STOPPING = 0x0200
-TP_WATCHPT = 0x0400
-TP_PAUSE = 0x0800
-TP_CHANGEBIND = 0x1000
-TS_LOAD = 0x0001
-TS_DONT_SWAP = 0x0002
-TS_SWAPENQ = 0x0004
-TS_ON_SWAPQ = 0x0008
-TS_CSTART = 0x0100
-TS_UNPAUSE = 0x0200
-TS_XSTART = 0x0400
-TS_PSTART = 0x0800
-TS_RESUME = 0x1000
-TS_CREATE = 0x2000
-TS_ALLSTART = \
- (TS_CSTART|TS_UNPAUSE|TS_XSTART|TS_PSTART|TS_RESUME|TS_CREATE)
-def CPR_VSTOPPED(t): return \
-
-def THREAD_TRANSITION(tp): return thread_transition(tp);
-
-def THREAD_STOP(tp): return \
-
-def THREAD_ZOMB(tp): return THREAD_SET_STATE(tp, TS_ZOMB, NULL)
-
-def SEMA_HELD(x): return (sema_held((x)))
-
-NO_LOCKS_HELD = 1
-NO_COMPETING_THREADS = 1
-
-# Included from sys/cred.h
-
-# Included from sys/uio.h
-from TYPES import *
-
-# Included from sys/resource.h
-from TYPES import *
-PRIO_PROCESS = 0
-PRIO_PGRP = 1
-PRIO_USER = 2
-RLIMIT_CPU = 0
-RLIMIT_FSIZE = 1
-RLIMIT_DATA = 2
-RLIMIT_STACK = 3
-RLIMIT_CORE = 4
-RLIMIT_NOFILE = 5
-RLIMIT_VMEM = 6
-RLIMIT_AS = RLIMIT_VMEM
-RLIM_NLIMITS = 7
-RLIM_INFINITY = (-3l)
-RLIM_SAVED_MAX = (-2l)
-RLIM_SAVED_CUR = (-1l)
-RLIM_INFINITY = 0x7fffffff
-RLIM_SAVED_MAX = 0x7ffffffe
-RLIM_SAVED_CUR = 0x7ffffffd
-RLIM32_INFINITY = 0x7fffffff
-RLIM32_SAVED_MAX = 0x7ffffffe
-RLIM32_SAVED_CUR = 0x7ffffffd
-
-# Included from sys/model.h
-
-# Included from sys/debug.h
-def ASSERT64(x): return ASSERT(x)
-
-def ASSERT32(x): return ASSERT(x)
-
-DATAMODEL_MASK = 0x0FF00000
-DATAMODEL_ILP32 = 0x00100000
-DATAMODEL_LP64 = 0x00200000
-DATAMODEL_NONE = 0
-DATAMODEL_NATIVE = DATAMODEL_LP64
-DATAMODEL_NATIVE = DATAMODEL_ILP32
-def STRUCT_SIZE(handle): return \
-
-def STRUCT_BUF(handle): return ((handle).ptr.m64)
-
-def SIZEOF_PTR(umodel): return \
-
-def STRUCT_SIZE(handle): return (sizeof (*(handle).ptr))
-
-def STRUCT_BUF(handle): return ((handle).ptr)
-
-def SIZEOF_PTR(umodel): return sizeof (caddr_t)
-
-def lwp_getdatamodel(t): return DATAMODEL_ILP32
-
-RUSAGE_SELF = 0
-RUSAGE_CHILDREN = -1
-
-# Included from vm/seg_enum.h
-
-# Included from sys/buf.h
-
-# Included from sys/kstat.h
-from TYPES import *
-KSTAT_STRLEN = 31
-def KSTAT_ENTER(k): return \
-
-def KSTAT_EXIT(k): return \
-
-KSTAT_TYPE_RAW = 0
-KSTAT_TYPE_NAMED = 1
-KSTAT_TYPE_INTR = 2
-KSTAT_TYPE_IO = 3
-KSTAT_TYPE_TIMER = 4
-KSTAT_NUM_TYPES = 5
-KSTAT_FLAG_VIRTUAL = 0x01
-KSTAT_FLAG_VAR_SIZE = 0x02
-KSTAT_FLAG_WRITABLE = 0x04
-KSTAT_FLAG_PERSISTENT = 0x08
-KSTAT_FLAG_DORMANT = 0x10
-KSTAT_FLAG_INVALID = 0x20
-KSTAT_READ = 0
-KSTAT_WRITE = 1
-KSTAT_DATA_CHAR = 0
-KSTAT_DATA_INT32 = 1
-KSTAT_DATA_UINT32 = 2
-KSTAT_DATA_INT64 = 3
-KSTAT_DATA_UINT64 = 4
-KSTAT_DATA_LONG = KSTAT_DATA_INT32
-KSTAT_DATA_ULONG = KSTAT_DATA_UINT32
-KSTAT_DATA_LONG = KSTAT_DATA_INT64
-KSTAT_DATA_ULONG = KSTAT_DATA_UINT64
-KSTAT_DATA_LONG = 7
-KSTAT_DATA_ULONG = 8
-KSTAT_DATA_LONGLONG = KSTAT_DATA_INT64
-KSTAT_DATA_ULONGLONG = KSTAT_DATA_UINT64
-KSTAT_DATA_FLOAT = 5
-KSTAT_DATA_DOUBLE = 6
-KSTAT_INTR_HARD = 0
-KSTAT_INTR_SOFT = 1
-KSTAT_INTR_WATCHDOG = 2
-KSTAT_INTR_SPURIOUS = 3
-KSTAT_INTR_MULTSVC = 4
-KSTAT_NUM_INTRS = 5
-B_BUSY = 0x0001
-B_DONE = 0x0002
-B_ERROR = 0x0004
-B_PAGEIO = 0x0010
-B_PHYS = 0x0020
-B_READ = 0x0040
-B_WRITE = 0x0100
-B_KERNBUF = 0x0008
-B_WANTED = 0x0080
-B_AGE = 0x000200
-B_ASYNC = 0x000400
-B_DELWRI = 0x000800
-B_STALE = 0x001000
-B_DONTNEED = 0x002000
-B_REMAPPED = 0x004000
-B_FREE = 0x008000
-B_INVAL = 0x010000
-B_FORCE = 0x020000
-B_HEAD = 0x040000
-B_NOCACHE = 0x080000
-B_TRUNC = 0x100000
-B_SHADOW = 0x200000
-B_RETRYWRI = 0x400000
-def notavail(bp): return \
-
-def BWRITE(bp): return \
-
-def BWRITE2(bp): return \
-
-VROOT = 0x01
-VNOCACHE = 0x02
-VNOMAP = 0x04
-VDUP = 0x08
-VNOSWAP = 0x10
-VNOMOUNT = 0x20
-VISSWAP = 0x40
-VSWAPLIKE = 0x80
-VVFSLOCK = 0x100
-VVFSWAIT = 0x200
-VVMLOCK = 0x400
-VDIROPEN = 0x800
-VVMEXEC = 0x1000
-VPXFS = 0x2000
-AT_TYPE = 0x0001
-AT_MODE = 0x0002
-AT_UID = 0x0004
-AT_GID = 0x0008
-AT_FSID = 0x0010
-AT_NODEID = 0x0020
-AT_NLINK = 0x0040
-AT_SIZE = 0x0080
-AT_ATIME = 0x0100
-AT_MTIME = 0x0200
-AT_CTIME = 0x0400
-AT_RDEV = 0x0800
-AT_BLKSIZE = 0x1000
-AT_NBLOCKS = 0x2000
-AT_VCODE = 0x4000
-AT_ALL = (AT_TYPE|AT_MODE|AT_UID|AT_GID|AT_FSID|AT_NODEID|\
- AT_NLINK|AT_SIZE|AT_ATIME|AT_MTIME|AT_CTIME|\
- AT_RDEV|AT_BLKSIZE|AT_NBLOCKS|AT_VCODE)
-AT_STAT = (AT_MODE|AT_UID|AT_GID|AT_FSID|AT_NODEID|AT_NLINK|\
- AT_SIZE|AT_ATIME|AT_MTIME|AT_CTIME|AT_RDEV)
-AT_TIMES = (AT_ATIME|AT_MTIME|AT_CTIME)
-AT_NOSET = (AT_NLINK|AT_RDEV|AT_FSID|AT_NODEID|AT_TYPE|\
- AT_BLKSIZE|AT_NBLOCKS|AT_VCODE)
-VSUID = 04000
-VSGID = 02000
-VSVTX = 01000
-VREAD = 00400
-VWRITE = 00200
-VEXEC = 00100
-MODEMASK = 07777
-PERMMASK = 00777
-def MANDMODE(mode): return (((mode) & (VSGID|(VEXEC>>3))) == VSGID)
-
-VSA_ACL = 0x0001
-VSA_ACLCNT = 0x0002
-VSA_DFACL = 0x0004
-VSA_DFACLCNT = 0x0008
-LOOKUP_DIR = 0x01
-DUMP_ALLOC = 0
-DUMP_FREE = 1
-DUMP_SCAN = 2
-ATTR_UTIME = 0x01
-ATTR_EXEC = 0x02
-ATTR_COMM = 0x04
-ATTR_HINT = 0x08
-ATTR_REAL = 0x10
-
-# Included from sys/poll.h
-POLLIN = 0x0001
-POLLPRI = 0x0002
-POLLOUT = 0x0004
-POLLRDNORM = 0x0040
-POLLWRNORM = POLLOUT
-POLLRDBAND = 0x0080
-POLLWRBAND = 0x0100
-POLLNORM = POLLRDNORM
-POLLERR = 0x0008
-POLLHUP = 0x0010
-POLLNVAL = 0x0020
-POLLREMOVE = 0x0800
-POLLRDDATA = 0x0200
-POLLNOERR = 0x0400
-POLLCLOSED = 0x8000
-
-# Included from sys/strmdep.h
-def str_aligned(X): return (((ulong_t)(X) & (sizeof (long) - 1)) == 0)
-
-
-# Included from sys/strft.h
-tdelta_t_sz = 12
-FTEV_MASK = 0x1FFF
-FTEV_ISWR = 0x8000
-FTEV_CS = 0x4000
-FTEV_PS = 0x2000
-FTEV_QMASK = 0x1F00
-FTEV_ALLOCMASK = 0x1FF8
-FTEV_ALLOCB = 0x0000
-FTEV_ESBALLOC = 0x0001
-FTEV_DESBALLOC = 0x0002
-FTEV_ESBALLOCA = 0x0003
-FTEV_DESBALLOCA = 0x0004
-FTEV_ALLOCBIG = 0x0005
-FTEV_ALLOCBW = 0x0006
-FTEV_FREEB = 0x0008
-FTEV_DUPB = 0x0009
-FTEV_COPYB = 0x000A
-FTEV_CALLER = 0x000F
-FTEV_PUT = 0x0100
-FTEV_FSYNCQ = 0x0103
-FTEV_DSYNCQ = 0x0104
-FTEV_PUTQ = 0x0105
-FTEV_GETQ = 0x0106
-FTEV_RMVQ = 0x0107
-FTEV_INSQ = 0x0108
-FTEV_PUTBQ = 0x0109
-FTEV_FLUSHQ = 0x010A
-FTEV_REPLYQ = 0x010B
-FTEV_PUTNEXT = 0x010D
-FTEV_RWNEXT = 0x010E
-FTEV_QWINNER = 0x010F
-FTEV_GEWRITE = 0x0101
-def FTFLW_HASH(h): return (((unsigned)(h))%ftflw_hash_sz)
-
-FTBLK_EVNTS = 0x9
-QENAB = 0x00000001
-QWANTR = 0x00000002
-QWANTW = 0x00000004
-QFULL = 0x00000008
-QREADR = 0x00000010
-QUSE = 0x00000020
-QNOENB = 0x00000040
-QBACK = 0x00000100
-QHLIST = 0x00000200
-QPAIR = 0x00000800
-QPERQ = 0x00001000
-QPERMOD = 0x00002000
-QMTSAFE = 0x00004000
-QMTOUTPERIM = 0x00008000
-QMT_TYPEMASK = (QPAIR|QPERQ|QPERMOD|QMTSAFE|QMTOUTPERIM)
-QINSERVICE = 0x00010000
-QWCLOSE = 0x00020000
-QEND = 0x00040000
-QWANTWSYNC = 0x00080000
-QSYNCSTR = 0x00100000
-QISDRV = 0x00200000
-QHOT = 0x00400000
-QNEXTHOT = 0x00800000
-_QINSERTING = 0x04000000
-_QREMOVING = 0x08000000
-Q_SQQUEUED = 0x01
-Q_SQDRAINING = 0x02
-QB_FULL = 0x01
-QB_WANTW = 0x02
-QB_BACK = 0x04
-NBAND = 256
-STRUIOT_NONE = -1
-STRUIOT_DONTCARE = 0
-STRUIOT_STANDARD = 1
-STRUIOT_IP = 2
-DBLK_REFMIN = 0x01
-STRUIO_SPEC = 0x01
-STRUIO_DONE = 0x02
-STRUIO_IP = 0x04
-STRUIO_ZC = 0x08
-STRUIO_ICK = 0x10
-MSGMARK = 0x01
-MSGNOLOOP = 0x02
-MSGDELIM = 0x04
-MSGNOGET = 0x08
-MSGMARKNEXT = 0x10
-MSGNOTMARKNEXT = 0x20
-M_DATA = 0x00
-M_PROTO = 0x01
-M_BREAK = 0x08
-M_PASSFP = 0x09
-M_EVENT = 0x0a
-M_SIG = 0x0b
-M_DELAY = 0x0c
-M_CTL = 0x0d
-M_IOCTL = 0x0e
-M_SETOPTS = 0x10
-M_RSE = 0x11
-M_IOCACK = 0x81
-M_IOCNAK = 0x82
-M_PCPROTO = 0x83
-M_PCSIG = 0x84
-M_READ = 0x85
-M_FLUSH = 0x86
-M_STOP = 0x87
-M_START = 0x88
-M_HANGUP = 0x89
-M_ERROR = 0x8a
-M_COPYIN = 0x8b
-M_COPYOUT = 0x8c
-M_IOCDATA = 0x8d
-M_PCRSE = 0x8e
-M_STOPI = 0x8f
-M_STARTI = 0x90
-M_PCEVENT = 0x91
-M_UNHANGUP = 0x92
-QNORM = 0x00
-QPCTL = 0x80
-IOC_MODELS = DATAMODEL_MASK
-IOC_ILP32 = DATAMODEL_ILP32
-IOC_LP64 = DATAMODEL_LP64
-IOC_NATIVE = DATAMODEL_NATIVE
-IOC_NONE = DATAMODEL_NONE
-STRCANON = 0x01
-RECOPY = 0x02
-SO_ALL = 0x003f
-SO_READOPT = 0x0001
-SO_WROFF = 0x0002
-SO_MINPSZ = 0x0004
-SO_MAXPSZ = 0x0008
-SO_HIWAT = 0x0010
-SO_LOWAT = 0x0020
-SO_MREADON = 0x0040
-SO_MREADOFF = 0x0080
-SO_NDELON = 0x0100
-SO_NDELOFF = 0x0200
-SO_ISTTY = 0x0400
-SO_ISNTTY = 0x0800
-SO_TOSTOP = 0x1000
-SO_TONSTOP = 0x2000
-SO_BAND = 0x4000
-SO_DELIM = 0x8000
-SO_NODELIM = 0x010000
-SO_STRHOLD = 0x020000
-SO_ERROPT = 0x040000
-SO_COPYOPT = 0x080000
-SO_MAXBLK = 0x100000
-DEF_IOV_MAX = 16
-INFOD_FIRSTBYTES = 0x02
-INFOD_BYTES = 0x04
-INFOD_COUNT = 0x08
-INFOD_COPYOUT = 0x10
-MODOPEN = 0x1
-CLONEOPEN = 0x2
-CONSOPEN = 0x4
-OPENFAIL = -1
-BPRI_LO = 1
-BPRI_MED = 2
-BPRI_HI = 3
-BPRI_FT = 4
-INFPSZ = -1
-FLUSHALL = 1
-FLUSHDATA = 0
-STRHIGH = 5120
-STRLOW = 1024
-MAXIOCBSZ = 1024
-PERIM_INNER = 1
-PERIM_OUTER = 2
-def datamsg(type): return \
-
-def straln(a): return (caddr_t)((intptr_t)(a) & ~(sizeof (int)-1))
-
-
-# Included from sys/byteorder.h
-def ntohl(x): return (x)
-
-def ntohs(x): return (x)
-
-def htonl(x): return (x)
-
-def htons(x): return (x)
-
-IPPROTO_IP = 0
-IPPROTO_HOPOPTS = 0
-IPPROTO_ICMP = 1
-IPPROTO_IGMP = 2
-IPPROTO_GGP = 3
-IPPROTO_ENCAP = 4
-IPPROTO_TCP = 6
-IPPROTO_EGP = 8
-IPPROTO_PUP = 12
-IPPROTO_UDP = 17
-IPPROTO_IDP = 22
-IPPROTO_IPV6 = 41
-IPPROTO_ROUTING = 43
-IPPROTO_FRAGMENT = 44
-IPPROTO_RSVP = 46
-IPPROTO_ESP = 50
-IPPROTO_AH = 51
-IPPROTO_ICMPV6 = 58
-IPPROTO_NONE = 59
-IPPROTO_DSTOPTS = 60
-IPPROTO_HELLO = 63
-IPPROTO_ND = 77
-IPPROTO_EON = 80
-IPPROTO_PIM = 103
-IPPROTO_RAW = 255
-IPPROTO_MAX = 256
-IPPORT_ECHO = 7
-IPPORT_DISCARD = 9
-IPPORT_SYSTAT = 11
-IPPORT_DAYTIME = 13
-IPPORT_NETSTAT = 15
-IPPORT_FTP = 21
-IPPORT_TELNET = 23
-IPPORT_SMTP = 25
-IPPORT_TIMESERVER = 37
-IPPORT_NAMESERVER = 42
-IPPORT_WHOIS = 43
-IPPORT_MTP = 57
-IPPORT_BOOTPS = 67
-IPPORT_BOOTPC = 68
-IPPORT_TFTP = 69
-IPPORT_RJE = 77
-IPPORT_FINGER = 79
-IPPORT_TTYLINK = 87
-IPPORT_SUPDUP = 95
-IPPORT_EXECSERVER = 512
-IPPORT_LOGINSERVER = 513
-IPPORT_CMDSERVER = 514
-IPPORT_EFSSERVER = 520
-IPPORT_BIFFUDP = 512
-IPPORT_WHOSERVER = 513
-IPPORT_ROUTESERVER = 520
-IPPORT_RESERVED = 1024
-IPPORT_USERRESERVED = 5000
-IMPLINK_IP = 155
-IMPLINK_LOWEXPER = 156
-IMPLINK_HIGHEXPER = 158
-IN_CLASSA_NSHIFT = 24
-IN_CLASSA_MAX = 128
-IN_CLASSB_NSHIFT = 16
-IN_CLASSB_MAX = 65536
-IN_CLASSC_NSHIFT = 8
-IN_CLASSD_NSHIFT = 28
-def IN_MULTICAST(i): return IN_CLASSD(i)
-
-IN_LOOPBACKNET = 127
-def IN_SET_LOOPBACK_ADDR(a): return \
-
-def IN6_IS_ADDR_UNSPECIFIED(addr): return \
-
-def IN6_IS_ADDR_LOOPBACK(addr): return \
-
-def IN6_IS_ADDR_LOOPBACK(addr): return \
-
-def IN6_IS_ADDR_MULTICAST(addr): return \
-
-def IN6_IS_ADDR_MULTICAST(addr): return \
-
-def IN6_IS_ADDR_LINKLOCAL(addr): return \
-
-def IN6_IS_ADDR_LINKLOCAL(addr): return \
-
-def IN6_IS_ADDR_SITELOCAL(addr): return \
-
-def IN6_IS_ADDR_SITELOCAL(addr): return \
-
-def IN6_IS_ADDR_V4MAPPED(addr): return \
-
-def IN6_IS_ADDR_V4MAPPED(addr): return \
-
-def IN6_IS_ADDR_V4MAPPED_ANY(addr): return \
-
-def IN6_IS_ADDR_V4MAPPED_ANY(addr): return \
-
-def IN6_IS_ADDR_V4COMPAT(addr): return \
-
-def IN6_IS_ADDR_V4COMPAT(addr): return \
-
-def IN6_IS_ADDR_MC_RESERVED(addr): return \
-
-def IN6_IS_ADDR_MC_RESERVED(addr): return \
-
-def IN6_IS_ADDR_MC_NODELOCAL(addr): return \
-
-def IN6_IS_ADDR_MC_NODELOCAL(addr): return \
-
-def IN6_IS_ADDR_MC_LINKLOCAL(addr): return \
-
-def IN6_IS_ADDR_MC_LINKLOCAL(addr): return \
-
-def IN6_IS_ADDR_MC_SITELOCAL(addr): return \
-
-def IN6_IS_ADDR_MC_SITELOCAL(addr): return \
-
-def IN6_IS_ADDR_MC_ORGLOCAL(addr): return \
-
-def IN6_IS_ADDR_MC_ORGLOCAL(addr): return \
-
-def IN6_IS_ADDR_MC_GLOBAL(addr): return \
-
-def IN6_IS_ADDR_MC_GLOBAL(addr): return \
-
-IP_OPTIONS = 1
-IP_HDRINCL = 2
-IP_TOS = 3
-IP_TTL = 4
-IP_RECVOPTS = 5
-IP_RECVRETOPTS = 6
-IP_RECVDSTADDR = 7
-IP_RETOPTS = 8
-IP_MULTICAST_IF = 0x10
-IP_MULTICAST_TTL = 0x11
-IP_MULTICAST_LOOP = 0x12
-IP_ADD_MEMBERSHIP = 0x13
-IP_DROP_MEMBERSHIP = 0x14
-IP_SEC_OPT = 0x22
-IPSEC_PREF_NEVER = 0x01
-IPSEC_PREF_REQUIRED = 0x02
-IPSEC_PREF_UNIQUE = 0x04
-IP_ADD_PROXY_ADDR = 0x40
-IP_BOUND_IF = 0x41
-IP_UNSPEC_SRC = 0x42
-IP_REUSEADDR = 0x104
-IP_DONTROUTE = 0x105
-IP_BROADCAST = 0x106
-IP_DEFAULT_MULTICAST_TTL = 1
-IP_DEFAULT_MULTICAST_LOOP = 1
-IPV6_RTHDR_TYPE_0 = 0
-IPV6_UNICAST_HOPS = 0x5
-IPV6_MULTICAST_IF = 0x6
-IPV6_MULTICAST_HOPS = 0x7
-IPV6_MULTICAST_LOOP = 0x8
-IPV6_JOIN_GROUP = 0x9
-IPV6_LEAVE_GROUP = 0xa
-IPV6_ADD_MEMBERSHIP = 0x9
-IPV6_DROP_MEMBERSHIP = 0xa
-IPV6_PKTINFO = 0xb
-IPV6_HOPLIMIT = 0xc
-IPV6_NEXTHOP = 0xd
-IPV6_HOPOPTS = 0xe
-IPV6_DSTOPTS = 0xf
-IPV6_RTHDR = 0x10
-IPV6_RTHDRDSTOPTS = 0x11
-IPV6_RECVPKTINFO = 0x12
-IPV6_RECVHOPLIMIT = 0x13
-IPV6_RECVHOPOPTS = 0x14
-IPV6_RECVDSTOPTS = 0x15
-IPV6_RECVRTHDR = 0x16
-IPV6_RECVRTHDRDSTOPTS = 0x17
-IPV6_CHECKSUM = 0x18
-IPV6_BOUND_IF = 0x41
-IPV6_UNSPEC_SRC = 0x42
-INET_ADDRSTRLEN = 16
-INET6_ADDRSTRLEN = 46
-IPV6_PAD1_OPT = 0
diff --git a/sys/lib/python/plat-sunos5/STROPTS.py b/sys/lib/python/plat-sunos5/STROPTS.py
deleted file mode 100644
index e95db932d..000000000
--- a/sys/lib/python/plat-sunos5/STROPTS.py
+++ /dev/null
@@ -1,1813 +0,0 @@
-# Generated by h2py from /usr/include/sys/stropts.h
-
-# Included from sys/feature_tests.h
-
-# Included from sys/isa_defs.h
-_CHAR_ALIGNMENT = 1
-_SHORT_ALIGNMENT = 2
-_INT_ALIGNMENT = 4
-_LONG_ALIGNMENT = 8
-_LONG_LONG_ALIGNMENT = 8
-_DOUBLE_ALIGNMENT = 8
-_LONG_DOUBLE_ALIGNMENT = 16
-_POINTER_ALIGNMENT = 8
-_MAX_ALIGNMENT = 16
-_ALIGNMENT_REQUIRED = 1
-_CHAR_ALIGNMENT = 1
-_SHORT_ALIGNMENT = 2
-_INT_ALIGNMENT = 4
-_LONG_ALIGNMENT = 4
-_LONG_LONG_ALIGNMENT = 4
-_DOUBLE_ALIGNMENT = 4
-_LONG_DOUBLE_ALIGNMENT = 4
-_POINTER_ALIGNMENT = 4
-_MAX_ALIGNMENT = 4
-_ALIGNMENT_REQUIRED = 0
-_CHAR_ALIGNMENT = 1
-_SHORT_ALIGNMENT = 2
-_INT_ALIGNMENT = 4
-_LONG_LONG_ALIGNMENT = 8
-_DOUBLE_ALIGNMENT = 8
-_ALIGNMENT_REQUIRED = 1
-_LONG_ALIGNMENT = 4
-_LONG_DOUBLE_ALIGNMENT = 8
-_POINTER_ALIGNMENT = 4
-_MAX_ALIGNMENT = 8
-_LONG_ALIGNMENT = 8
-_LONG_DOUBLE_ALIGNMENT = 16
-_POINTER_ALIGNMENT = 8
-_MAX_ALIGNMENT = 16
-_POSIX_C_SOURCE = 1
-_LARGEFILE64_SOURCE = 1
-_LARGEFILE_SOURCE = 1
-_FILE_OFFSET_BITS = 64
-_FILE_OFFSET_BITS = 32
-_POSIX_C_SOURCE = 199506L
-_POSIX_PTHREAD_SEMANTICS = 1
-_XOPEN_VERSION = 500
-_XOPEN_VERSION = 4
-_XOPEN_VERSION = 3
-from TYPES import *
-
-# Included from sys/conf.h
-
-# Included from sys/t_lock.h
-
-# Included from sys/machlock.h
-from TYPES import *
-LOCK_HELD_VALUE = 0xff
-def SPIN_LOCK(pl): return ((pl) > ipltospl(LOCK_LEVEL))
-
-def LOCK_SAMPLE_INTERVAL(i): return (((i) & 0xff) == 0)
-
-CLOCK_LEVEL = 10
-LOCK_LEVEL = 10
-DISP_LEVEL = (LOCK_LEVEL + 1)
-PTR24_LSB = 5
-PTR24_MSB = (PTR24_LSB + 24)
-PTR24_ALIGN = 32
-PTR24_BASE = 0xe0000000
-
-# Included from sys/param.h
-from TYPES import *
-_POSIX_VDISABLE = 0
-MAX_INPUT = 512
-MAX_CANON = 256
-UID_NOBODY = 60001
-GID_NOBODY = UID_NOBODY
-UID_NOACCESS = 60002
-MAX_TASKID = 999999
-MAX_MAXPID = 999999
-DEFAULT_MAXPID = 999999
-DEFAULT_JUMPPID = 100000
-DEFAULT_MAXPID = 30000
-DEFAULT_JUMPPID = 0
-MAXUID = 2147483647
-MAXPROJID = MAXUID
-MAXLINK = 32767
-NMOUNT = 40
-CANBSIZ = 256
-NOFILE = 20
-NGROUPS_UMIN = 0
-NGROUPS_UMAX = 32
-NGROUPS_MAX_DEFAULT = 16
-NZERO = 20
-NULL = 0L
-NULL = 0
-CMASK = 022
-CDLIMIT = (1L<<11)
-NBPS = 0x20000
-NBPSCTR = 512
-UBSIZE = 512
-SCTRSHFT = 9
-SYSNAME = 9
-PREMOTE = 39
-MAXPATHLEN = 1024
-MAXSYMLINKS = 20
-MAXNAMELEN = 256
-NADDR = 13
-PIPE_BUF = 5120
-PIPE_MAX = 5120
-NBBY = 8
-MAXBSIZE = 8192
-DEV_BSIZE = 512
-DEV_BSHIFT = 9
-MAXFRAG = 8
-MAXOFF32_T = 0x7fffffff
-MAXOFF_T = 0x7fffffffffffffffl
-MAXOFFSET_T = 0x7fffffffffffffffl
-MAXOFF_T = 0x7fffffffl
-MAXOFFSET_T = 0x7fffffff
-def btodb(bytes): return \
-
-def dbtob(db): return \
-
-def lbtodb(bytes): return \
-
-def ldbtob(db): return \
-
-NCARGS32 = 0x100000
-NCARGS64 = 0x200000
-NCARGS = NCARGS64
-NCARGS = NCARGS32
-FSHIFT = 8
-FSCALE = (1<<FSHIFT)
-def DELAY(n): return drv_usecwait(n)
-
-def mmu_ptob(x): return ((x) << MMU_PAGESHIFT)
-
-def mmu_btop(x): return (((x)) >> MMU_PAGESHIFT)
-
-def mmu_btopr(x): return ((((x) + MMU_PAGEOFFSET) >> MMU_PAGESHIFT))
-
-def mmu_ptod(x): return ((x) << (MMU_PAGESHIFT - DEV_BSHIFT))
-
-def ptod(x): return ((x) << (PAGESHIFT - DEV_BSHIFT))
-
-def ptob(x): return ((x) << PAGESHIFT)
-
-def btop(x): return (((x) >> PAGESHIFT))
-
-def btopr(x): return ((((x) + PAGEOFFSET) >> PAGESHIFT))
-
-def dtop(DD): return (((DD) + NDPP - 1) >> (PAGESHIFT - DEV_BSHIFT))
-
-def dtopt(DD): return ((DD) >> (PAGESHIFT - DEV_BSHIFT))
-
-_AIO_LISTIO_MAX = (4096)
-_AIO_MAX = (-1)
-_MQ_OPEN_MAX = (32)
-_MQ_PRIO_MAX = (32)
-_SEM_NSEMS_MAX = INT_MAX
-_SEM_VALUE_MAX = INT_MAX
-
-# Included from sys/unistd.h
-_CS_PATH = 65
-_CS_LFS_CFLAGS = 68
-_CS_LFS_LDFLAGS = 69
-_CS_LFS_LIBS = 70
-_CS_LFS_LINTFLAGS = 71
-_CS_LFS64_CFLAGS = 72
-_CS_LFS64_LDFLAGS = 73
-_CS_LFS64_LIBS = 74
-_CS_LFS64_LINTFLAGS = 75
-_CS_XBS5_ILP32_OFF32_CFLAGS = 700
-_CS_XBS5_ILP32_OFF32_LDFLAGS = 701
-_CS_XBS5_ILP32_OFF32_LIBS = 702
-_CS_XBS5_ILP32_OFF32_LINTFLAGS = 703
-_CS_XBS5_ILP32_OFFBIG_CFLAGS = 705
-_CS_XBS5_ILP32_OFFBIG_LDFLAGS = 706
-_CS_XBS5_ILP32_OFFBIG_LIBS = 707
-_CS_XBS5_ILP32_OFFBIG_LINTFLAGS = 708
-_CS_XBS5_LP64_OFF64_CFLAGS = 709
-_CS_XBS5_LP64_OFF64_LDFLAGS = 710
-_CS_XBS5_LP64_OFF64_LIBS = 711
-_CS_XBS5_LP64_OFF64_LINTFLAGS = 712
-_CS_XBS5_LPBIG_OFFBIG_CFLAGS = 713
-_CS_XBS5_LPBIG_OFFBIG_LDFLAGS = 714
-_CS_XBS5_LPBIG_OFFBIG_LIBS = 715
-_CS_XBS5_LPBIG_OFFBIG_LINTFLAGS = 716
-_SC_ARG_MAX = 1
-_SC_CHILD_MAX = 2
-_SC_CLK_TCK = 3
-_SC_NGROUPS_MAX = 4
-_SC_OPEN_MAX = 5
-_SC_JOB_CONTROL = 6
-_SC_SAVED_IDS = 7
-_SC_VERSION = 8
-_SC_PASS_MAX = 9
-_SC_LOGNAME_MAX = 10
-_SC_PAGESIZE = 11
-_SC_XOPEN_VERSION = 12
-_SC_NPROCESSORS_CONF = 14
-_SC_NPROCESSORS_ONLN = 15
-_SC_STREAM_MAX = 16
-_SC_TZNAME_MAX = 17
-_SC_AIO_LISTIO_MAX = 18
-_SC_AIO_MAX = 19
-_SC_AIO_PRIO_DELTA_MAX = 20
-_SC_ASYNCHRONOUS_IO = 21
-_SC_DELAYTIMER_MAX = 22
-_SC_FSYNC = 23
-_SC_MAPPED_FILES = 24
-_SC_MEMLOCK = 25
-_SC_MEMLOCK_RANGE = 26
-_SC_MEMORY_PROTECTION = 27
-_SC_MESSAGE_PASSING = 28
-_SC_MQ_OPEN_MAX = 29
-_SC_MQ_PRIO_MAX = 30
-_SC_PRIORITIZED_IO = 31
-_SC_PRIORITY_SCHEDULING = 32
-_SC_REALTIME_SIGNALS = 33
-_SC_RTSIG_MAX = 34
-_SC_SEMAPHORES = 35
-_SC_SEM_NSEMS_MAX = 36
-_SC_SEM_VALUE_MAX = 37
-_SC_SHARED_MEMORY_OBJECTS = 38
-_SC_SIGQUEUE_MAX = 39
-_SC_SIGRT_MIN = 40
-_SC_SIGRT_MAX = 41
-_SC_SYNCHRONIZED_IO = 42
-_SC_TIMERS = 43
-_SC_TIMER_MAX = 44
-_SC_2_C_BIND = 45
-_SC_2_C_DEV = 46
-_SC_2_C_VERSION = 47
-_SC_2_FORT_DEV = 48
-_SC_2_FORT_RUN = 49
-_SC_2_LOCALEDEF = 50
-_SC_2_SW_DEV = 51
-_SC_2_UPE = 52
-_SC_2_VERSION = 53
-_SC_BC_BASE_MAX = 54
-_SC_BC_DIM_MAX = 55
-_SC_BC_SCALE_MAX = 56
-_SC_BC_STRING_MAX = 57
-_SC_COLL_WEIGHTS_MAX = 58
-_SC_EXPR_NEST_MAX = 59
-_SC_LINE_MAX = 60
-_SC_RE_DUP_MAX = 61
-_SC_XOPEN_CRYPT = 62
-_SC_XOPEN_ENH_I18N = 63
-_SC_XOPEN_SHM = 64
-_SC_2_CHAR_TERM = 66
-_SC_XOPEN_XCU_VERSION = 67
-_SC_ATEXIT_MAX = 76
-_SC_IOV_MAX = 77
-_SC_XOPEN_UNIX = 78
-_SC_PAGE_SIZE = _SC_PAGESIZE
-_SC_T_IOV_MAX = 79
-_SC_PHYS_PAGES = 500
-_SC_AVPHYS_PAGES = 501
-_SC_COHER_BLKSZ = 503
-_SC_SPLIT_CACHE = 504
-_SC_ICACHE_SZ = 505
-_SC_DCACHE_SZ = 506
-_SC_ICACHE_LINESZ = 507
-_SC_DCACHE_LINESZ = 508
-_SC_ICACHE_BLKSZ = 509
-_SC_DCACHE_BLKSZ = 510
-_SC_DCACHE_TBLKSZ = 511
-_SC_ICACHE_ASSOC = 512
-_SC_DCACHE_ASSOC = 513
-_SC_MAXPID = 514
-_SC_STACK_PROT = 515
-_SC_THREAD_DESTRUCTOR_ITERATIONS = 568
-_SC_GETGR_R_SIZE_MAX = 569
-_SC_GETPW_R_SIZE_MAX = 570
-_SC_LOGIN_NAME_MAX = 571
-_SC_THREAD_KEYS_MAX = 572
-_SC_THREAD_STACK_MIN = 573
-_SC_THREAD_THREADS_MAX = 574
-_SC_TTY_NAME_MAX = 575
-_SC_THREADS = 576
-_SC_THREAD_ATTR_STACKADDR = 577
-_SC_THREAD_ATTR_STACKSIZE = 578
-_SC_THREAD_PRIORITY_SCHEDULING = 579
-_SC_THREAD_PRIO_INHERIT = 580
-_SC_THREAD_PRIO_PROTECT = 581
-_SC_THREAD_PROCESS_SHARED = 582
-_SC_THREAD_SAFE_FUNCTIONS = 583
-_SC_XOPEN_LEGACY = 717
-_SC_XOPEN_REALTIME = 718
-_SC_XOPEN_REALTIME_THREADS = 719
-_SC_XBS5_ILP32_OFF32 = 720
-_SC_XBS5_ILP32_OFFBIG = 721
-_SC_XBS5_LP64_OFF64 = 722
-_SC_XBS5_LPBIG_OFFBIG = 723
-_PC_LINK_MAX = 1
-_PC_MAX_CANON = 2
-_PC_MAX_INPUT = 3
-_PC_NAME_MAX = 4
-_PC_PATH_MAX = 5
-_PC_PIPE_BUF = 6
-_PC_NO_TRUNC = 7
-_PC_VDISABLE = 8
-_PC_CHOWN_RESTRICTED = 9
-_PC_ASYNC_IO = 10
-_PC_PRIO_IO = 11
-_PC_SYNC_IO = 12
-_PC_FILESIZEBITS = 67
-_PC_LAST = 67
-_POSIX_VERSION = 199506L
-_POSIX2_VERSION = 199209L
-_POSIX2_C_VERSION = 199209L
-_XOPEN_XCU_VERSION = 4
-_XOPEN_REALTIME = 1
-_XOPEN_ENH_I18N = 1
-_XOPEN_SHM = 1
-_POSIX2_C_BIND = 1
-_POSIX2_CHAR_TERM = 1
-_POSIX2_LOCALEDEF = 1
-_POSIX2_C_DEV = 1
-_POSIX2_SW_DEV = 1
-_POSIX2_UPE = 1
-
-# Included from sys/mutex.h
-from TYPES import *
-def MUTEX_HELD(x): return (mutex_owned(x))
-
-
-# Included from sys/rwlock.h
-from TYPES import *
-def RW_READ_HELD(x): return (rw_read_held((x)))
-
-def RW_WRITE_HELD(x): return (rw_write_held((x)))
-
-def RW_LOCK_HELD(x): return (rw_lock_held((x)))
-
-def RW_ISWRITER(x): return (rw_iswriter(x))
-
-
-# Included from sys/semaphore.h
-
-# Included from sys/thread.h
-from TYPES import *
-
-# Included from sys/klwp.h
-from TYPES import *
-
-# Included from sys/condvar.h
-from TYPES import *
-
-# Included from sys/time.h
-
-# Included from sys/types32.h
-
-# Included from sys/int_types.h
-TIME32_MAX = INT32_MAX
-TIME32_MIN = INT32_MIN
-def TIMEVAL_OVERFLOW(tv): return \
-
-from TYPES import *
-DST_NONE = 0
-DST_USA = 1
-DST_AUST = 2
-DST_WET = 3
-DST_MET = 4
-DST_EET = 5
-DST_CAN = 6
-DST_GB = 7
-DST_RUM = 8
-DST_TUR = 9
-DST_AUSTALT = 10
-ITIMER_REAL = 0
-ITIMER_VIRTUAL = 1
-ITIMER_PROF = 2
-ITIMER_REALPROF = 3
-def ITIMERVAL_OVERFLOW(itv): return \
-
-SEC = 1
-MILLISEC = 1000
-MICROSEC = 1000000
-NANOSEC = 1000000000
-
-# Included from sys/time_impl.h
-def TIMESPEC_OVERFLOW(ts): return \
-
-def ITIMERSPEC_OVERFLOW(it): return \
-
-__CLOCK_REALTIME0 = 0
-CLOCK_VIRTUAL = 1
-CLOCK_PROF = 2
-__CLOCK_REALTIME3 = 3
-CLOCK_HIGHRES = 4
-CLOCK_MAX = 5
-CLOCK_REALTIME = __CLOCK_REALTIME3
-CLOCK_REALTIME = __CLOCK_REALTIME0
-TIMER_RELTIME = 0x0
-TIMER_ABSTIME = 0x1
-def TICK_TO_SEC(tick): return ((tick) / hz)
-
-def SEC_TO_TICK(sec): return ((sec) * hz)
-
-def TICK_TO_MSEC(tick): return \
-
-def MSEC_TO_TICK(msec): return \
-
-def MSEC_TO_TICK_ROUNDUP(msec): return \
-
-def TICK_TO_USEC(tick): return ((tick) * usec_per_tick)
-
-def USEC_TO_TICK(usec): return ((usec) / usec_per_tick)
-
-def USEC_TO_TICK_ROUNDUP(usec): return \
-
-def TICK_TO_NSEC(tick): return ((tick) * nsec_per_tick)
-
-def NSEC_TO_TICK(nsec): return ((nsec) / nsec_per_tick)
-
-def NSEC_TO_TICK_ROUNDUP(nsec): return \
-
-def TIMEVAL_TO_TICK(tvp): return \
-
-def TIMESTRUC_TO_TICK(tsp): return \
-
-
-# Included from time.h
-from TYPES import *
-
-# Included from iso/time_iso.h
-NULL = 0L
-NULL = 0
-CLOCKS_PER_SEC = 1000000
-
-# Included from sys/select.h
-FD_SETSIZE = 65536
-FD_SETSIZE = 1024
-_NBBY = 8
-NBBY = _NBBY
-def FD_ZERO(p): return bzero((p), sizeof (*(p)))
-
-
-# Included from sys/signal.h
-
-# Included from sys/iso/signal_iso.h
-SIGHUP = 1
-SIGINT = 2
-SIGQUIT = 3
-SIGILL = 4
-SIGTRAP = 5
-SIGIOT = 6
-SIGABRT = 6
-SIGEMT = 7
-SIGFPE = 8
-SIGKILL = 9
-SIGBUS = 10
-SIGSEGV = 11
-SIGSYS = 12
-SIGPIPE = 13
-SIGALRM = 14
-SIGTERM = 15
-SIGUSR1 = 16
-SIGUSR2 = 17
-SIGCLD = 18
-SIGCHLD = 18
-SIGPWR = 19
-SIGWINCH = 20
-SIGURG = 21
-SIGPOLL = 22
-SIGIO = SIGPOLL
-SIGSTOP = 23
-SIGTSTP = 24
-SIGCONT = 25
-SIGTTIN = 26
-SIGTTOU = 27
-SIGVTALRM = 28
-SIGPROF = 29
-SIGXCPU = 30
-SIGXFSZ = 31
-SIGWAITING = 32
-SIGLWP = 33
-SIGFREEZE = 34
-SIGTHAW = 35
-SIGCANCEL = 36
-SIGLOST = 37
-_SIGRTMIN = 38
-_SIGRTMAX = 45
-SIG_BLOCK = 1
-SIG_UNBLOCK = 2
-SIG_SETMASK = 3
-SIGNO_MASK = 0xFF
-SIGDEFER = 0x100
-SIGHOLD = 0x200
-SIGRELSE = 0x400
-SIGIGNORE = 0x800
-SIGPAUSE = 0x1000
-
-# Included from sys/siginfo.h
-from TYPES import *
-SIGEV_NONE = 1
-SIGEV_SIGNAL = 2
-SIGEV_THREAD = 3
-SI_NOINFO = 32767
-SI_USER = 0
-SI_LWP = (-1)
-SI_QUEUE = (-2)
-SI_TIMER = (-3)
-SI_ASYNCIO = (-4)
-SI_MESGQ = (-5)
-
-# Included from sys/machsig.h
-ILL_ILLOPC = 1
-ILL_ILLOPN = 2
-ILL_ILLADR = 3
-ILL_ILLTRP = 4
-ILL_PRVOPC = 5
-ILL_PRVREG = 6
-ILL_COPROC = 7
-ILL_BADSTK = 8
-NSIGILL = 8
-EMT_TAGOVF = 1
-EMT_CPCOVF = 2
-NSIGEMT = 2
-FPE_INTDIV = 1
-FPE_INTOVF = 2
-FPE_FLTDIV = 3
-FPE_FLTOVF = 4
-FPE_FLTUND = 5
-FPE_FLTRES = 6
-FPE_FLTINV = 7
-FPE_FLTSUB = 8
-NSIGFPE = 8
-SEGV_MAPERR = 1
-SEGV_ACCERR = 2
-NSIGSEGV = 2
-BUS_ADRALN = 1
-BUS_ADRERR = 2
-BUS_OBJERR = 3
-NSIGBUS = 3
-TRAP_BRKPT = 1
-TRAP_TRACE = 2
-TRAP_RWATCH = 3
-TRAP_WWATCH = 4
-TRAP_XWATCH = 5
-NSIGTRAP = 5
-CLD_EXITED = 1
-CLD_KILLED = 2
-CLD_DUMPED = 3
-CLD_TRAPPED = 4
-CLD_STOPPED = 5
-CLD_CONTINUED = 6
-NSIGCLD = 6
-POLL_IN = 1
-POLL_OUT = 2
-POLL_MSG = 3
-POLL_ERR = 4
-POLL_PRI = 5
-POLL_HUP = 6
-NSIGPOLL = 6
-PROF_SIG = 1
-NSIGPROF = 1
-SI_MAXSZ = 256
-SI_MAXSZ = 128
-
-# Included from sys/time_std_impl.h
-from TYPES import *
-SI32_MAXSZ = 128
-def SI_CANQUEUE(c): return ((c) <= SI_QUEUE)
-
-SA_NOCLDSTOP = 0x00020000
-SA_ONSTACK = 0x00000001
-SA_RESETHAND = 0x00000002
-SA_RESTART = 0x00000004
-SA_SIGINFO = 0x00000008
-SA_NODEFER = 0x00000010
-SA_NOCLDWAIT = 0x00010000
-SA_WAITSIG = 0x00010000
-NSIG = 46
-MAXSIG = 45
-S_SIGNAL = 1
-S_SIGSET = 2
-S_SIGACTION = 3
-S_NONE = 4
-MINSIGSTKSZ = 2048
-SIGSTKSZ = 8192
-SS_ONSTACK = 0x00000001
-SS_DISABLE = 0x00000002
-SN_PROC = 1
-SN_CANCEL = 2
-SN_SEND = 3
-
-# Included from sys/ucontext.h
-from TYPES import *
-
-# Included from sys/regset.h
-REG_CCR = (0)
-REG_PSR = (0)
-REG_PSR = (0)
-REG_PC = (1)
-REG_nPC = (2)
-REG_Y = (3)
-REG_G1 = (4)
-REG_G2 = (5)
-REG_G3 = (6)
-REG_G4 = (7)
-REG_G5 = (8)
-REG_G6 = (9)
-REG_G7 = (10)
-REG_O0 = (11)
-REG_O1 = (12)
-REG_O2 = (13)
-REG_O3 = (14)
-REG_O4 = (15)
-REG_O5 = (16)
-REG_O6 = (17)
-REG_O7 = (18)
-REG_ASI = (19)
-REG_FPRS = (20)
-REG_PS = REG_PSR
-REG_SP = REG_O6
-REG_R0 = REG_O0
-REG_R1 = REG_O1
-_NGREG = 21
-_NGREG = 19
-NGREG = _NGREG
-_NGREG32 = 19
-_NGREG64 = 21
-SPARC_MAXREGWINDOW = 31
-MAXFPQ = 16
-XRS_ID = 0x78727300
-
-# Included from v7/sys/privregs.h
-
-# Included from v7/sys/psr.h
-PSR_CWP = 0x0000001F
-PSR_ET = 0x00000020
-PSR_PS = 0x00000040
-PSR_S = 0x00000080
-PSR_PIL = 0x00000F00
-PSR_EF = 0x00001000
-PSR_EC = 0x00002000
-PSR_RSV = 0x000FC000
-PSR_ICC = 0x00F00000
-PSR_C = 0x00100000
-PSR_V = 0x00200000
-PSR_Z = 0x00400000
-PSR_N = 0x00800000
-PSR_VER = 0x0F000000
-PSR_IMPL = 0xF0000000
-PSL_ALLCC = PSR_ICC
-PSL_USER = (PSR_S)
-PSL_USERMASK = (PSR_ICC)
-PSL_UBITS = (PSR_ICC|PSR_EF)
-def USERMODE(ps): return (((ps) & PSR_PS) == 0)
-
-
-# Included from sys/fsr.h
-FSR_CEXC = 0x0000001f
-FSR_AEXC = 0x000003e0
-FSR_FCC = 0x00000c00
-FSR_PR = 0x00001000
-FSR_QNE = 0x00002000
-FSR_FTT = 0x0001c000
-FSR_VER = 0x000e0000
-FSR_TEM = 0x0f800000
-FSR_RP = 0x30000000
-FSR_RD = 0xc0000000
-FSR_VER_SHIFT = 17
-FSR_FCC1 = 0x00000003
-FSR_FCC2 = 0x0000000C
-FSR_FCC3 = 0x00000030
-FSR_CEXC_NX = 0x00000001
-FSR_CEXC_DZ = 0x00000002
-FSR_CEXC_UF = 0x00000004
-FSR_CEXC_OF = 0x00000008
-FSR_CEXC_NV = 0x00000010
-FSR_AEXC_NX = (0x1 << 5)
-FSR_AEXC_DZ = (0x2 << 5)
-FSR_AEXC_UF = (0x4 << 5)
-FSR_AEXC_OF = (0x8 << 5)
-FSR_AEXC_NV = (0x10 << 5)
-FTT_NONE = 0
-FTT_IEEE = 1
-FTT_UNFIN = 2
-FTT_UNIMP = 3
-FTT_SEQ = 4
-FTT_ALIGN = 5
-FTT_DFAULT = 6
-FSR_FTT_SHIFT = 14
-FSR_FTT_IEEE = (FTT_IEEE << FSR_FTT_SHIFT)
-FSR_FTT_UNFIN = (FTT_UNFIN << FSR_FTT_SHIFT)
-FSR_FTT_UNIMP = (FTT_UNIMP << FSR_FTT_SHIFT)
-FSR_FTT_SEQ = (FTT_SEQ << FSR_FTT_SHIFT)
-FSR_FTT_ALIGN = (FTT_ALIGN << FSR_FTT_SHIFT)
-FSR_FTT_DFAULT = (FTT_DFAULT << FSR_FTT_SHIFT)
-FSR_TEM_NX = (0x1 << 23)
-FSR_TEM_DZ = (0x2 << 23)
-FSR_TEM_UF = (0x4 << 23)
-FSR_TEM_OF = (0x8 << 23)
-FSR_TEM_NV = (0x10 << 23)
-RP_DBLEXT = 0
-RP_SINGLE = 1
-RP_DOUBLE = 2
-RP_RESERVED = 3
-RD_NEAR = 0
-RD_ZER0 = 1
-RD_POSINF = 2
-RD_NEGINF = 3
-FPRS_DL = 0x1
-FPRS_DU = 0x2
-FPRS_FEF = 0x4
-PIL_MAX = 0xf
-def SAVE_GLOBALS(RP): return \
-
-def RESTORE_GLOBALS(RP): return \
-
-def SAVE_OUTS(RP): return \
-
-def RESTORE_OUTS(RP): return \
-
-def SAVE_WINDOW(SBP): return \
-
-def RESTORE_WINDOW(SBP): return \
-
-def STORE_FPREGS(FP): return \
-
-def LOAD_FPREGS(FP): return \
-
-_SPARC_MAXREGWINDOW = 31
-_XRS_ID = 0x78727300
-GETCONTEXT = 0
-SETCONTEXT = 1
-UC_SIGMASK = 001
-UC_STACK = 002
-UC_CPU = 004
-UC_MAU = 010
-UC_FPU = UC_MAU
-UC_INTR = 020
-UC_ASR = 040
-UC_MCONTEXT = (UC_CPU|UC_FPU|UC_ASR)
-UC_ALL = (UC_SIGMASK|UC_STACK|UC_MCONTEXT)
-_SIGQUEUE_MAX = 32
-_SIGNOTIFY_MAX = 32
-
-# Included from sys/pcb.h
-INSTR_VALID = 0x02
-NORMAL_STEP = 0x04
-WATCH_STEP = 0x08
-CPC_OVERFLOW = 0x10
-ASYNC_HWERR = 0x20
-STEP_NONE = 0
-STEP_REQUESTED = 1
-STEP_ACTIVE = 2
-STEP_WASACTIVE = 3
-
-# Included from sys/msacct.h
-LMS_USER = 0
-LMS_SYSTEM = 1
-LMS_TRAP = 2
-LMS_TFAULT = 3
-LMS_DFAULT = 4
-LMS_KFAULT = 5
-LMS_USER_LOCK = 6
-LMS_SLEEP = 7
-LMS_WAIT_CPU = 8
-LMS_STOPPED = 9
-NMSTATES = 10
-
-# Included from sys/lwp.h
-
-# Included from sys/synch.h
-from TYPES import *
-USYNC_THREAD = 0x00
-USYNC_PROCESS = 0x01
-LOCK_NORMAL = 0x00
-LOCK_ERRORCHECK = 0x02
-LOCK_RECURSIVE = 0x04
-USYNC_PROCESS_ROBUST = 0x08
-LOCK_PRIO_NONE = 0x00
-LOCK_PRIO_INHERIT = 0x10
-LOCK_PRIO_PROTECT = 0x20
-LOCK_STALL_NP = 0x00
-LOCK_ROBUST_NP = 0x40
-LOCK_OWNERDEAD = 0x1
-LOCK_NOTRECOVERABLE = 0x2
-LOCK_INITED = 0x4
-LOCK_UNMAPPED = 0x8
-LWP_DETACHED = 0x00000040
-LWP_SUSPENDED = 0x00000080
-__LWP_ASLWP = 0x00000100
-MAXSYSARGS = 8
-NORMALRETURN = 0
-JUSTRETURN = 1
-LWP_USER = 0x01
-LWP_SYS = 0x02
-TS_FREE = 0x00
-TS_SLEEP = 0x01
-TS_RUN = 0x02
-TS_ONPROC = 0x04
-TS_ZOMB = 0x08
-TS_STOPPED = 0x10
-T_INTR_THREAD = 0x0001
-T_WAKEABLE = 0x0002
-T_TOMASK = 0x0004
-T_TALLOCSTK = 0x0008
-T_WOULDBLOCK = 0x0020
-T_DONTBLOCK = 0x0040
-T_DONTPEND = 0x0080
-T_SYS_PROF = 0x0100
-T_WAITCVSEM = 0x0200
-T_WATCHPT = 0x0400
-T_PANIC = 0x0800
-TP_HOLDLWP = 0x0002
-TP_TWAIT = 0x0004
-TP_LWPEXIT = 0x0008
-TP_PRSTOP = 0x0010
-TP_CHKPT = 0x0020
-TP_EXITLWP = 0x0040
-TP_PRVSTOP = 0x0080
-TP_MSACCT = 0x0100
-TP_STOPPING = 0x0200
-TP_WATCHPT = 0x0400
-TP_PAUSE = 0x0800
-TP_CHANGEBIND = 0x1000
-TS_LOAD = 0x0001
-TS_DONT_SWAP = 0x0002
-TS_SWAPENQ = 0x0004
-TS_ON_SWAPQ = 0x0008
-TS_CSTART = 0x0100
-TS_UNPAUSE = 0x0200
-TS_XSTART = 0x0400
-TS_PSTART = 0x0800
-TS_RESUME = 0x1000
-TS_CREATE = 0x2000
-TS_ALLSTART = \
- (TS_CSTART|TS_UNPAUSE|TS_XSTART|TS_PSTART|TS_RESUME|TS_CREATE)
-def CPR_VSTOPPED(t): return \
-
-def THREAD_TRANSITION(tp): return thread_transition(tp);
-
-def THREAD_STOP(tp): return \
-
-def THREAD_ZOMB(tp): return THREAD_SET_STATE(tp, TS_ZOMB, NULL)
-
-def SEMA_HELD(x): return (sema_held((x)))
-
-NO_LOCKS_HELD = 1
-NO_COMPETING_THREADS = 1
-FMNAMESZ = 8
-
-# Included from sys/systm.h
-from TYPES import *
-
-# Included from sys/proc.h
-
-# Included from sys/cred.h
-
-# Included from sys/user.h
-from TYPES import *
-
-# Included from sys/resource.h
-from TYPES import *
-PRIO_PROCESS = 0
-PRIO_PGRP = 1
-PRIO_USER = 2
-RLIMIT_CPU = 0
-RLIMIT_FSIZE = 1
-RLIMIT_DATA = 2
-RLIMIT_STACK = 3
-RLIMIT_CORE = 4
-RLIMIT_NOFILE = 5
-RLIMIT_VMEM = 6
-RLIMIT_AS = RLIMIT_VMEM
-RLIM_NLIMITS = 7
-RLIM_INFINITY = (-3l)
-RLIM_SAVED_MAX = (-2l)
-RLIM_SAVED_CUR = (-1l)
-RLIM_INFINITY = 0x7fffffff
-RLIM_SAVED_MAX = 0x7ffffffe
-RLIM_SAVED_CUR = 0x7ffffffd
-RLIM32_INFINITY = 0x7fffffff
-RLIM32_SAVED_MAX = 0x7ffffffe
-RLIM32_SAVED_CUR = 0x7ffffffd
-
-# Included from sys/model.h
-
-# Included from sys/debug.h
-def ASSERT64(x): return ASSERT(x)
-
-def ASSERT32(x): return ASSERT(x)
-
-DATAMODEL_MASK = 0x0FF00000
-DATAMODEL_ILP32 = 0x00100000
-DATAMODEL_LP64 = 0x00200000
-DATAMODEL_NONE = 0
-DATAMODEL_NATIVE = DATAMODEL_LP64
-DATAMODEL_NATIVE = DATAMODEL_ILP32
-def STRUCT_SIZE(handle): return \
-
-def STRUCT_BUF(handle): return ((handle).ptr.m64)
-
-def SIZEOF_PTR(umodel): return \
-
-def STRUCT_SIZE(handle): return (sizeof (*(handle).ptr))
-
-def STRUCT_BUF(handle): return ((handle).ptr)
-
-def SIZEOF_PTR(umodel): return sizeof (caddr_t)
-
-def lwp_getdatamodel(t): return DATAMODEL_ILP32
-
-RUSAGE_SELF = 0
-RUSAGE_CHILDREN = -1
-
-# Included from sys/auxv.h
-AT_NULL = 0
-AT_IGNORE = 1
-AT_EXECFD = 2
-AT_PHDR = 3
-AT_PHENT = 4
-AT_PHNUM = 5
-AT_PAGESZ = 6
-AT_BASE = 7
-AT_FLAGS = 8
-AT_ENTRY = 9
-AT_DCACHEBSIZE = 10
-AT_ICACHEBSIZE = 11
-AT_UCACHEBSIZE = 12
-AT_SUN_UID = 2000
-AT_SUN_RUID = 2001
-AT_SUN_GID = 2002
-AT_SUN_RGID = 2003
-AT_SUN_LDELF = 2004
-AT_SUN_LDSHDR = 2005
-AT_SUN_LDNAME = 2006
-AT_SUN_LPAGESZ = 2007
-AT_SUN_PLATFORM = 2008
-AT_SUN_HWCAP = 2009
-AT_SUN_IFLUSH = 2010
-AT_SUN_CPU = 2011
-AT_SUN_EMUL_ENTRY = 2012
-AT_SUN_EMUL_EXECFD = 2013
-AT_SUN_EXECNAME = 2014
-AT_SUN_MMU = 2015
-
-# Included from sys/errno.h
-EPERM = 1
-ENOENT = 2
-ESRCH = 3
-EINTR = 4
-EIO = 5
-ENXIO = 6
-E2BIG = 7
-ENOEXEC = 8
-EBADF = 9
-ECHILD = 10
-EAGAIN = 11
-ENOMEM = 12
-EACCES = 13
-EFAULT = 14
-ENOTBLK = 15
-EBUSY = 16
-EEXIST = 17
-EXDEV = 18
-ENODEV = 19
-ENOTDIR = 20
-EISDIR = 21
-EINVAL = 22
-ENFILE = 23
-EMFILE = 24
-ENOTTY = 25
-ETXTBSY = 26
-EFBIG = 27
-ENOSPC = 28
-ESPIPE = 29
-EROFS = 30
-EMLINK = 31
-EPIPE = 32
-EDOM = 33
-ERANGE = 34
-ENOMSG = 35
-EIDRM = 36
-ECHRNG = 37
-EL2NSYNC = 38
-EL3HLT = 39
-EL3RST = 40
-ELNRNG = 41
-EUNATCH = 42
-ENOCSI = 43
-EL2HLT = 44
-EDEADLK = 45
-ENOLCK = 46
-ECANCELED = 47
-ENOTSUP = 48
-EDQUOT = 49
-EBADE = 50
-EBADR = 51
-EXFULL = 52
-ENOANO = 53
-EBADRQC = 54
-EBADSLT = 55
-EDEADLOCK = 56
-EBFONT = 57
-EOWNERDEAD = 58
-ENOTRECOVERABLE = 59
-ENOSTR = 60
-ENODATA = 61
-ETIME = 62
-ENOSR = 63
-ENONET = 64
-ENOPKG = 65
-EREMOTE = 66
-ENOLINK = 67
-EADV = 68
-ESRMNT = 69
-ECOMM = 70
-EPROTO = 71
-ELOCKUNMAPPED = 72
-ENOTACTIVE = 73
-EMULTIHOP = 74
-EBADMSG = 77
-ENAMETOOLONG = 78
-EOVERFLOW = 79
-ENOTUNIQ = 80
-EBADFD = 81
-EREMCHG = 82
-ELIBACC = 83
-ELIBBAD = 84
-ELIBSCN = 85
-ELIBMAX = 86
-ELIBEXEC = 87
-EILSEQ = 88
-ENOSYS = 89
-ELOOP = 90
-ERESTART = 91
-ESTRPIPE = 92
-ENOTEMPTY = 93
-EUSERS = 94
-ENOTSOCK = 95
-EDESTADDRREQ = 96
-EMSGSIZE = 97
-EPROTOTYPE = 98
-ENOPROTOOPT = 99
-EPROTONOSUPPORT = 120
-ESOCKTNOSUPPORT = 121
-EOPNOTSUPP = 122
-EPFNOSUPPORT = 123
-EAFNOSUPPORT = 124
-EADDRINUSE = 125
-EADDRNOTAVAIL = 126
-ENETDOWN = 127
-ENETUNREACH = 128
-ENETRESET = 129
-ECONNABORTED = 130
-ECONNRESET = 131
-ENOBUFS = 132
-EISCONN = 133
-ENOTCONN = 134
-ESHUTDOWN = 143
-ETOOMANYREFS = 144
-ETIMEDOUT = 145
-ECONNREFUSED = 146
-EHOSTDOWN = 147
-EHOSTUNREACH = 148
-EWOULDBLOCK = EAGAIN
-EALREADY = 149
-EINPROGRESS = 150
-ESTALE = 151
-PSARGSZ = 80
-PSCOMSIZ = 14
-MAXCOMLEN = 16
-__KERN_NAUXV_IMPL = 19
-__KERN_NAUXV_IMPL = 21
-__KERN_NAUXV_IMPL = 21
-PSARGSZ = 80
-
-# Included from sys/watchpoint.h
-from TYPES import *
-
-# Included from vm/seg_enum.h
-
-# Included from sys/copyops.h
-from TYPES import *
-
-# Included from sys/buf.h
-
-# Included from sys/kstat.h
-from TYPES import *
-KSTAT_STRLEN = 31
-def KSTAT_ENTER(k): return \
-
-def KSTAT_EXIT(k): return \
-
-KSTAT_TYPE_RAW = 0
-KSTAT_TYPE_NAMED = 1
-KSTAT_TYPE_INTR = 2
-KSTAT_TYPE_IO = 3
-KSTAT_TYPE_TIMER = 4
-KSTAT_NUM_TYPES = 5
-KSTAT_FLAG_VIRTUAL = 0x01
-KSTAT_FLAG_VAR_SIZE = 0x02
-KSTAT_FLAG_WRITABLE = 0x04
-KSTAT_FLAG_PERSISTENT = 0x08
-KSTAT_FLAG_DORMANT = 0x10
-KSTAT_FLAG_INVALID = 0x20
-KSTAT_READ = 0
-KSTAT_WRITE = 1
-KSTAT_DATA_CHAR = 0
-KSTAT_DATA_INT32 = 1
-KSTAT_DATA_UINT32 = 2
-KSTAT_DATA_INT64 = 3
-KSTAT_DATA_UINT64 = 4
-KSTAT_DATA_LONG = KSTAT_DATA_INT32
-KSTAT_DATA_ULONG = KSTAT_DATA_UINT32
-KSTAT_DATA_LONG = KSTAT_DATA_INT64
-KSTAT_DATA_ULONG = KSTAT_DATA_UINT64
-KSTAT_DATA_LONG = 7
-KSTAT_DATA_ULONG = 8
-KSTAT_DATA_LONGLONG = KSTAT_DATA_INT64
-KSTAT_DATA_ULONGLONG = KSTAT_DATA_UINT64
-KSTAT_DATA_FLOAT = 5
-KSTAT_DATA_DOUBLE = 6
-KSTAT_INTR_HARD = 0
-KSTAT_INTR_SOFT = 1
-KSTAT_INTR_WATCHDOG = 2
-KSTAT_INTR_SPURIOUS = 3
-KSTAT_INTR_MULTSVC = 4
-KSTAT_NUM_INTRS = 5
-B_BUSY = 0x0001
-B_DONE = 0x0002
-B_ERROR = 0x0004
-B_PAGEIO = 0x0010
-B_PHYS = 0x0020
-B_READ = 0x0040
-B_WRITE = 0x0100
-B_KERNBUF = 0x0008
-B_WANTED = 0x0080
-B_AGE = 0x000200
-B_ASYNC = 0x000400
-B_DELWRI = 0x000800
-B_STALE = 0x001000
-B_DONTNEED = 0x002000
-B_REMAPPED = 0x004000
-B_FREE = 0x008000
-B_INVAL = 0x010000
-B_FORCE = 0x020000
-B_HEAD = 0x040000
-B_NOCACHE = 0x080000
-B_TRUNC = 0x100000
-B_SHADOW = 0x200000
-B_RETRYWRI = 0x400000
-def notavail(bp): return \
-
-def BWRITE(bp): return \
-
-def BWRITE2(bp): return \
-
-
-# Included from sys/aio_req.h
-
-# Included from sys/uio.h
-from TYPES import *
-WP_NOWATCH = 0x01
-WP_SETPROT = 0x02
-
-# Included from sys/timer.h
-from TYPES import *
-_TIMER_MAX = 32
-ITLK_LOCKED = 0x01
-ITLK_WANTED = 0x02
-ITLK_REMOVE = 0x04
-IT_PERLWP = 0x01
-IT_SIGNAL = 0x02
-
-# Included from sys/utrap.h
-UT_INSTRUCTION_DISABLED = 1
-UT_INSTRUCTION_ERROR = 2
-UT_INSTRUCTION_PROTECTION = 3
-UT_ILLTRAP_INSTRUCTION = 4
-UT_ILLEGAL_INSTRUCTION = 5
-UT_PRIVILEGED_OPCODE = 6
-UT_FP_DISABLED = 7
-UT_FP_EXCEPTION_IEEE_754 = 8
-UT_FP_EXCEPTION_OTHER = 9
-UT_TAG_OVERFLOW = 10
-UT_DIVISION_BY_ZERO = 11
-UT_DATA_EXCEPTION = 12
-UT_DATA_ERROR = 13
-UT_DATA_PROTECTION = 14
-UT_MEM_ADDRESS_NOT_ALIGNED = 15
-UT_PRIVILEGED_ACTION = 16
-UT_ASYNC_DATA_ERROR = 17
-UT_TRAP_INSTRUCTION_16 = 18
-UT_TRAP_INSTRUCTION_17 = 19
-UT_TRAP_INSTRUCTION_18 = 20
-UT_TRAP_INSTRUCTION_19 = 21
-UT_TRAP_INSTRUCTION_20 = 22
-UT_TRAP_INSTRUCTION_21 = 23
-UT_TRAP_INSTRUCTION_22 = 24
-UT_TRAP_INSTRUCTION_23 = 25
-UT_TRAP_INSTRUCTION_24 = 26
-UT_TRAP_INSTRUCTION_25 = 27
-UT_TRAP_INSTRUCTION_26 = 28
-UT_TRAP_INSTRUCTION_27 = 29
-UT_TRAP_INSTRUCTION_28 = 30
-UT_TRAP_INSTRUCTION_29 = 31
-UT_TRAP_INSTRUCTION_30 = 32
-UT_TRAP_INSTRUCTION_31 = 33
-UTRAP_V8P_FP_DISABLED = UT_FP_DISABLED
-UTRAP_V8P_MEM_ADDRESS_NOT_ALIGNED = UT_MEM_ADDRESS_NOT_ALIGNED
-UT_PRECISE_MAXTRAPS = 33
-
-# Included from sys/refstr.h
-
-# Included from sys/task.h
-from TYPES import *
-TASK_NORMAL = 0x0
-TASK_FINAL = 0x1
-TASK_FINALITY = 0x1
-
-# Included from sys/id_space.h
-from TYPES import *
-
-# Included from sys/vmem.h
-from TYPES import *
-VM_SLEEP = 0x00000000
-VM_NOSLEEP = 0x00000001
-VM_PANIC = 0x00000002
-VM_KMFLAGS = 0x000000ff
-VM_BESTFIT = 0x00000100
-VMEM_ALLOC = 0x01
-VMEM_FREE = 0x02
-VMEM_SPAN = 0x10
-ISP_NORMAL = 0x0
-ISP_RESERVE = 0x1
-
-# Included from sys/exacct_impl.h
-from TYPES import *
-
-# Included from sys/kmem.h
-from TYPES import *
-KM_SLEEP = 0x0000
-KM_NOSLEEP = 0x0001
-KM_PANIC = 0x0002
-KM_VMFLAGS = 0x00ff
-KM_FLAGS = 0xffff
-KMC_NOTOUCH = 0x00010000
-KMC_NODEBUG = 0x00020000
-KMC_NOMAGAZINE = 0x00040000
-KMC_NOHASH = 0x00080000
-KMC_QCACHE = 0x00100000
-_ISA_IA32 = 0
-_ISA_IA64 = 1
-SSLEEP = 1
-SRUN = 2
-SZOMB = 3
-SSTOP = 4
-SIDL = 5
-SONPROC = 6
-CLDPEND = 0x0001
-CLDCONT = 0x0002
-SSYS = 0x00000001
-STRC = 0x00000002
-SLOAD = 0x00000008
-SLOCK = 0x00000010
-SPREXEC = 0x00000020
-SPROCTR = 0x00000040
-SPRFORK = 0x00000080
-SKILLED = 0x00000100
-SULOAD = 0x00000200
-SRUNLCL = 0x00000400
-SBPTADJ = 0x00000800
-SKILLCL = 0x00001000
-SOWEUPC = 0x00002000
-SEXECED = 0x00004000
-SPASYNC = 0x00008000
-SJCTL = 0x00010000
-SNOWAIT = 0x00020000
-SVFORK = 0x00040000
-SVFWAIT = 0x00080000
-EXITLWPS = 0x00100000
-HOLDFORK = 0x00200000
-SWAITSIG = 0x00400000
-HOLDFORK1 = 0x00800000
-COREDUMP = 0x01000000
-SMSACCT = 0x02000000
-ASLWP = 0x04000000
-SPRLOCK = 0x08000000
-NOCD = 0x10000000
-HOLDWATCH = 0x20000000
-SMSFORK = 0x40000000
-SDOCORE = 0x80000000
-FORREAL = 0
-JUSTLOOKING = 1
-SUSPEND_NORMAL = 0
-SUSPEND_PAUSE = 1
-NOCLASS = (-1)
-
-# Included from sys/dditypes.h
-DDI_DEVICE_ATTR_V0 = 0x0001
-DDI_NEVERSWAP_ACC = 0x00
-DDI_STRUCTURE_LE_ACC = 0x01
-DDI_STRUCTURE_BE_ACC = 0x02
-DDI_STRICTORDER_ACC = 0x00
-DDI_UNORDERED_OK_ACC = 0x01
-DDI_MERGING_OK_ACC = 0x02
-DDI_LOADCACHING_OK_ACC = 0x03
-DDI_STORECACHING_OK_ACC = 0x04
-DDI_DATA_SZ01_ACC = 1
-DDI_DATA_SZ02_ACC = 2
-DDI_DATA_SZ04_ACC = 4
-DDI_DATA_SZ08_ACC = 8
-VERS_ACCHDL = 0x0001
-DEVID_NONE = 0
-DEVID_SCSI3_WWN = 1
-DEVID_SCSI_SERIAL = 2
-DEVID_FAB = 3
-DEVID_ENCAP = 4
-DEVID_MAXTYPE = 4
-
-# Included from sys/varargs.h
-
-# Included from sys/va_list.h
-VA_ALIGN = 8
-def _ARGSIZEOF(t): return ((sizeof (t) + VA_ALIGN - 1) & ~(VA_ALIGN - 1))
-
-VA_ALIGN = 8
-def _ARGSIZEOF(t): return ((sizeof (t) + VA_ALIGN - 1) & ~(VA_ALIGN - 1))
-
-NSYSCALL = 256
-SE_32RVAL1 = 0x0
-SE_32RVAL2 = 0x1
-SE_64RVAL = 0x2
-SE_RVAL_MASK = 0x3
-SE_LOADABLE = 0x08
-SE_LOADED = 0x10
-SE_NOUNLOAD = 0x20
-SE_ARGC = 0x40
-
-# Included from sys/devops.h
-from TYPES import *
-
-# Included from sys/poll.h
-POLLIN = 0x0001
-POLLPRI = 0x0002
-POLLOUT = 0x0004
-POLLRDNORM = 0x0040
-POLLWRNORM = POLLOUT
-POLLRDBAND = 0x0080
-POLLWRBAND = 0x0100
-POLLNORM = POLLRDNORM
-POLLERR = 0x0008
-POLLHUP = 0x0010
-POLLNVAL = 0x0020
-POLLREMOVE = 0x0800
-POLLRDDATA = 0x0200
-POLLNOERR = 0x0400
-POLLCLOSED = 0x8000
-
-# Included from vm/as.h
-
-# Included from vm/seg.h
-
-# Included from sys/vnode.h
-from TYPES import *
-VROOT = 0x01
-VNOCACHE = 0x02
-VNOMAP = 0x04
-VDUP = 0x08
-VNOSWAP = 0x10
-VNOMOUNT = 0x20
-VISSWAP = 0x40
-VSWAPLIKE = 0x80
-VVFSLOCK = 0x100
-VVFSWAIT = 0x200
-VVMLOCK = 0x400
-VDIROPEN = 0x800
-VVMEXEC = 0x1000
-VPXFS = 0x2000
-AT_TYPE = 0x0001
-AT_MODE = 0x0002
-AT_UID = 0x0004
-AT_GID = 0x0008
-AT_FSID = 0x0010
-AT_NODEID = 0x0020
-AT_NLINK = 0x0040
-AT_SIZE = 0x0080
-AT_ATIME = 0x0100
-AT_MTIME = 0x0200
-AT_CTIME = 0x0400
-AT_RDEV = 0x0800
-AT_BLKSIZE = 0x1000
-AT_NBLOCKS = 0x2000
-AT_VCODE = 0x4000
-AT_ALL = (AT_TYPE|AT_MODE|AT_UID|AT_GID|AT_FSID|AT_NODEID|\
- AT_NLINK|AT_SIZE|AT_ATIME|AT_MTIME|AT_CTIME|\
- AT_RDEV|AT_BLKSIZE|AT_NBLOCKS|AT_VCODE)
-AT_STAT = (AT_MODE|AT_UID|AT_GID|AT_FSID|AT_NODEID|AT_NLINK|\
- AT_SIZE|AT_ATIME|AT_MTIME|AT_CTIME|AT_RDEV)
-AT_TIMES = (AT_ATIME|AT_MTIME|AT_CTIME)
-AT_NOSET = (AT_NLINK|AT_RDEV|AT_FSID|AT_NODEID|AT_TYPE|\
- AT_BLKSIZE|AT_NBLOCKS|AT_VCODE)
-VSUID = 04000
-VSGID = 02000
-VSVTX = 01000
-VREAD = 00400
-VWRITE = 00200
-VEXEC = 00100
-MODEMASK = 07777
-PERMMASK = 00777
-def MANDMODE(mode): return (((mode) & (VSGID|(VEXEC>>3))) == VSGID)
-
-VSA_ACL = 0x0001
-VSA_ACLCNT = 0x0002
-VSA_DFACL = 0x0004
-VSA_DFACLCNT = 0x0008
-LOOKUP_DIR = 0x01
-DUMP_ALLOC = 0
-DUMP_FREE = 1
-DUMP_SCAN = 2
-ATTR_UTIME = 0x01
-ATTR_EXEC = 0x02
-ATTR_COMM = 0x04
-ATTR_HINT = 0x08
-ATTR_REAL = 0x10
-
-# Included from vm/faultcode.h
-FC_HWERR = 0x1
-FC_ALIGN = 0x2
-FC_OBJERR = 0x3
-FC_PROT = 0x4
-FC_NOMAP = 0x5
-FC_NOSUPPORT = 0x6
-def FC_MAKE_ERR(e): return (((e) << 8) | FC_OBJERR)
-
-def FC_CODE(fc): return ((fc) & 0xff)
-
-def FC_ERRNO(fc): return ((unsigned)(fc) >> 8)
-
-
-# Included from vm/hat.h
-from TYPES import *
-
-# Included from vm/page.h
-PAGE_HASHAVELEN = 4
-PAGE_HASHVPSHIFT = 6
-PG_EXCL = 0x0001
-PG_WAIT = 0x0002
-PG_PHYSCONTIG = 0x0004
-PG_MATCH_COLOR = 0x0008
-PG_NORELOC = 0x0010
-PG_FREE_LIST = 1
-PG_CACHE_LIST = 2
-PG_LIST_TAIL = 0
-PG_LIST_HEAD = 1
-def page_next_raw(PP): return page_nextn_raw((PP), 1)
-
-PAGE_IO_INUSE = 0x1
-PAGE_IO_WANTED = 0x2
-PGREL_NOTREL = 0x1
-PGREL_CLEAN = 0x2
-PGREL_MOD = 0x3
-P_FREE = 0x80
-P_NORELOC = 0x40
-def PP_SETAGED(pp): return ASSERT(PP_ISAGED(pp))
-
-HAT_FLAGS_RESV = 0xFF000000
-HAT_LOAD = 0x00
-HAT_LOAD_LOCK = 0x01
-HAT_LOAD_ADV = 0x04
-HAT_LOAD_CONTIG = 0x10
-HAT_LOAD_NOCONSIST = 0x20
-HAT_LOAD_SHARE = 0x40
-HAT_LOAD_REMAP = 0x80
-HAT_RELOAD_SHARE = 0x100
-HAT_PLAT_ATTR_MASK = 0xF00000
-HAT_PROT_MASK = 0x0F
-HAT_NOFAULT = 0x10
-HAT_NOSYNC = 0x20
-HAT_STRICTORDER = 0x0000
-HAT_UNORDERED_OK = 0x0100
-HAT_MERGING_OK = 0x0200
-HAT_LOADCACHING_OK = 0x0300
-HAT_STORECACHING_OK = 0x0400
-HAT_ORDER_MASK = 0x0700
-HAT_NEVERSWAP = 0x0000
-HAT_STRUCTURE_BE = 0x1000
-HAT_STRUCTURE_LE = 0x2000
-HAT_ENDIAN_MASK = 0x3000
-HAT_COW = 0x0001
-HAT_UNLOAD = 0x00
-HAT_UNLOAD_NOSYNC = 0x02
-HAT_UNLOAD_UNLOCK = 0x04
-HAT_UNLOAD_OTHER = 0x08
-HAT_UNLOAD_UNMAP = 0x10
-HAT_SYNC_DONTZERO = 0x00
-HAT_SYNC_ZERORM = 0x01
-HAT_SYNC_STOPON_REF = 0x02
-HAT_SYNC_STOPON_MOD = 0x04
-HAT_SYNC_STOPON_RM = (HAT_SYNC_STOPON_REF | HAT_SYNC_STOPON_MOD)
-HAT_DUP_ALL = 1
-HAT_DUP_COW = 2
-HAT_MAP = 0x00
-HAT_ADV_PGUNLOAD = 0x00
-HAT_FORCE_PGUNLOAD = 0x01
-P_MOD = 0x1
-P_REF = 0x2
-P_RO = 0x4
-def hat_ismod(pp): return (hat_page_getattr(pp, P_MOD))
-
-def hat_isref(pp): return (hat_page_getattr(pp, P_REF))
-
-def hat_isro(pp): return (hat_page_getattr(pp, P_RO))
-
-def hat_setmod(pp): return (hat_page_setattr(pp, P_MOD))
-
-def hat_setref(pp): return (hat_page_setattr(pp, P_REF))
-
-def hat_setrefmod(pp): return (hat_page_setattr(pp, P_REF|P_MOD))
-
-def hat_clrmod(pp): return (hat_page_clrattr(pp, P_MOD))
-
-def hat_clrref(pp): return (hat_page_clrattr(pp, P_REF))
-
-def hat_clrrefmod(pp): return (hat_page_clrattr(pp, P_REF|P_MOD))
-
-def hat_page_is_mapped(pp): return (hat_page_getshare(pp))
-
-HAT_DONTALLOC = 0
-HAT_ALLOC = 1
-HRM_SHIFT = 4
-HRM_BYTES = (1 << HRM_SHIFT)
-HRM_PAGES = ((HRM_BYTES * NBBY) / 2)
-HRM_PGPERBYTE = (NBBY/2)
-HRM_PGBYTEMASK = (HRM_PGPERBYTE-1)
-HRM_HASHSIZE = 0x200
-HRM_HASHMASK = (HRM_HASHSIZE - 1)
-HRM_BLIST_INCR = 0x200
-HRM_SWSMONID = 1
-SSL_NLEVELS = 4
-SSL_BFACTOR = 4
-SSL_LOG2BF = 2
-SEGP_ASYNC_FLUSH = 0x1
-SEGP_FORCE_WIRED = 0x2
-SEGP_SUCCESS = 0
-SEGP_FAIL = 1
-def seg_pages(seg): return \
-
-IE_NOMEM = -1
-AS_PAGLCK = 0x80
-AS_CLAIMGAP = 0x40
-AS_UNMAPWAIT = 0x20
-def AS_TYPE_64BIT(as): return \
-
-AS_LREP_LINKEDLIST = 0
-AS_LREP_SKIPLIST = 1
-AS_MUTATION_THRESH = 225
-AH_DIR = 0x1
-AH_LO = 0x0
-AH_HI = 0x1
-AH_CONTAIN = 0x2
-
-# Included from sys/ddidmareq.h
-DMA_UNIT_8 = 1
-DMA_UNIT_16 = 2
-DMA_UNIT_32 = 4
-DMALIM_VER0 = ((0x86000000) + 0)
-DDI_DMA_FORCE_PHYSICAL = 0x0100
-DMA_ATTR_V0 = 0
-DMA_ATTR_VERSION = DMA_ATTR_V0
-DDI_DMA_CALLBACK_RUNOUT = 0
-DDI_DMA_CALLBACK_DONE = 1
-DDI_DMA_WRITE = 0x0001
-DDI_DMA_READ = 0x0002
-DDI_DMA_RDWR = (DDI_DMA_READ | DDI_DMA_WRITE)
-DDI_DMA_REDZONE = 0x0004
-DDI_DMA_PARTIAL = 0x0008
-DDI_DMA_CONSISTENT = 0x0010
-DDI_DMA_EXCLUSIVE = 0x0020
-DDI_DMA_STREAMING = 0x0040
-DDI_DMA_SBUS_64BIT = 0x2000
-DDI_DMA_MAPPED = 0
-DDI_DMA_MAPOK = 0
-DDI_DMA_PARTIAL_MAP = 1
-DDI_DMA_DONE = 2
-DDI_DMA_NORESOURCES = -1
-DDI_DMA_NOMAPPING = -2
-DDI_DMA_TOOBIG = -3
-DDI_DMA_TOOSMALL = -4
-DDI_DMA_LOCKED = -5
-DDI_DMA_BADLIMITS = -6
-DDI_DMA_STALE = -7
-DDI_DMA_BADATTR = -8
-DDI_DMA_INUSE = -9
-DDI_DMA_SYNC_FORDEV = 0x0
-DDI_DMA_SYNC_FORCPU = 0x1
-DDI_DMA_SYNC_FORKERNEL = 0x2
-
-# Included from sys/ddimapreq.h
-
-# Included from sys/mman.h
-PROT_READ = 0x1
-PROT_WRITE = 0x2
-PROT_EXEC = 0x4
-PROT_USER = 0x8
-PROT_ZFOD = (PROT_READ | PROT_WRITE | PROT_EXEC | PROT_USER)
-PROT_ALL = (PROT_READ | PROT_WRITE | PROT_EXEC | PROT_USER)
-PROT_NONE = 0x0
-MAP_SHARED = 1
-MAP_PRIVATE = 2
-MAP_TYPE = 0xf
-MAP_FIXED = 0x10
-MAP_NORESERVE = 0x40
-MAP_ANON = 0x100
-MAP_ANONYMOUS = MAP_ANON
-MAP_RENAME = 0x20
-PROC_TEXT = (PROT_EXEC | PROT_READ)
-PROC_DATA = (PROT_READ | PROT_WRITE | PROT_EXEC)
-SHARED = 0x10
-PRIVATE = 0x20
-VALID_ATTR = (PROT_READ|PROT_WRITE|PROT_EXEC|SHARED|PRIVATE)
-PROT_EXCL = 0x20
-_MAP_LOW32 = 0x80
-_MAP_NEW = 0x80000000
-from TYPES import *
-MADV_NORMAL = 0
-MADV_RANDOM = 1
-MADV_SEQUENTIAL = 2
-MADV_WILLNEED = 3
-MADV_DONTNEED = 4
-MADV_FREE = 5
-MS_OLDSYNC = 0x0
-MS_SYNC = 0x4
-MS_ASYNC = 0x1
-MS_INVALIDATE = 0x2
-MC_SYNC = 1
-MC_LOCK = 2
-MC_UNLOCK = 3
-MC_ADVISE = 4
-MC_LOCKAS = 5
-MC_UNLOCKAS = 6
-MCL_CURRENT = 0x1
-MCL_FUTURE = 0x2
-DDI_MAP_VERSION = 0x0001
-DDI_MF_USER_MAPPING = 0x1
-DDI_MF_KERNEL_MAPPING = 0x2
-DDI_MF_DEVICE_MAPPING = 0x4
-DDI_ME_GENERIC = (-1)
-DDI_ME_UNIMPLEMENTED = (-2)
-DDI_ME_NORESOURCES = (-3)
-DDI_ME_UNSUPPORTED = (-4)
-DDI_ME_REGSPEC_RANGE = (-5)
-DDI_ME_RNUMBER_RANGE = (-6)
-DDI_ME_INVAL = (-7)
-
-# Included from sys/ddipropdefs.h
-def CELLS_1275_TO_BYTES(n): return ((n) * PROP_1275_CELL_SIZE)
-
-def BYTES_TO_1275_CELLS(n): return ((n) / PROP_1275_CELL_SIZE)
-
-PH_FROM_PROM = 0x01
-DDI_PROP_SUCCESS = 0
-DDI_PROP_NOT_FOUND = 1
-DDI_PROP_UNDEFINED = 2
-DDI_PROP_NO_MEMORY = 3
-DDI_PROP_INVAL_ARG = 4
-DDI_PROP_BUF_TOO_SMALL = 5
-DDI_PROP_CANNOT_DECODE = 6
-DDI_PROP_CANNOT_ENCODE = 7
-DDI_PROP_END_OF_DATA = 8
-DDI_PROP_FOUND_1275 = 255
-PROP_1275_INT_SIZE = 4
-DDI_PROP_DONTPASS = 0x0001
-DDI_PROP_CANSLEEP = 0x0002
-DDI_PROP_SYSTEM_DEF = 0x0004
-DDI_PROP_NOTPROM = 0x0008
-DDI_PROP_DONTSLEEP = 0x0010
-DDI_PROP_STACK_CREATE = 0x0020
-DDI_PROP_UNDEF_IT = 0x0040
-DDI_PROP_HW_DEF = 0x0080
-DDI_PROP_TYPE_INT = 0x0100
-DDI_PROP_TYPE_STRING = 0x0200
-DDI_PROP_TYPE_BYTE = 0x0400
-DDI_PROP_TYPE_COMPOSITE = 0x0800
-DDI_PROP_TYPE_ANY = (DDI_PROP_TYPE_INT | \
- DDI_PROP_TYPE_STRING | \
- DDI_PROP_TYPE_BYTE | \
- DDI_PROP_TYPE_COMPOSITE)
-DDI_PROP_TYPE_MASK = (DDI_PROP_TYPE_INT | \
- DDI_PROP_TYPE_STRING | \
- DDI_PROP_TYPE_BYTE | \
- DDI_PROP_TYPE_COMPOSITE)
-DDI_RELATIVE_ADDRESSING = "relative-addressing"
-DDI_GENERIC_ADDRESSING = "generic-addressing"
-
-# Included from sys/ddidevmap.h
-KMEM_PAGEABLE = 0x100
-KMEM_NON_PAGEABLE = 0x200
-UMEM_LOCKED = 0x400
-UMEM_TRASH = 0x800
-DEVMAP_OPS_REV = 1
-DEVMAP_DEFAULTS = 0x00
-DEVMAP_MAPPING_INVALID = 0x01
-DEVMAP_ALLOW_REMAP = 0x02
-DEVMAP_USE_PAGESIZE = 0x04
-DEVMAP_SETUP_FLAGS = \
- (DEVMAP_MAPPING_INVALID | DEVMAP_ALLOW_REMAP | DEVMAP_USE_PAGESIZE)
-DEVMAP_SETUP_DONE = 0x100
-DEVMAP_LOCK_INITED = 0x200
-DEVMAP_FAULTING = 0x400
-DEVMAP_LOCKED = 0x800
-DEVMAP_FLAG_LARGE = 0x1000
-DDI_UMEM_SLEEP = 0x0
-DDI_UMEM_NOSLEEP = 0x01
-DDI_UMEM_PAGEABLE = 0x02
-DDI_UMEM_TRASH = 0x04
-DDI_UMEMLOCK_READ = 0x01
-DDI_UMEMLOCK_WRITE = 0x02
-
-# Included from sys/nexusdefs.h
-
-# Included from sys/nexusintr.h
-BUSO_REV = 4
-BUSO_REV_3 = 3
-BUSO_REV_4 = 4
-DEVO_REV = 3
-CB_REV = 1
-DDI_IDENTIFIED = (0)
-DDI_NOT_IDENTIFIED = (-1)
-DDI_PROBE_FAILURE = ENXIO
-DDI_PROBE_DONTCARE = 0
-DDI_PROBE_PARTIAL = 1
-DDI_PROBE_SUCCESS = 2
-MAPDEV_REV = 1
-from TYPES import *
-D_NEW = 0x00
-_D_OLD = 0x01
-D_TAPE = 0x08
-D_MTSAFE = 0x0020
-_D_QNEXTLESS = 0x0040
-_D_MTOCSHARED = 0x0080
-D_MTOCEXCL = 0x0800
-D_MTPUTSHARED = 0x1000
-D_MTPERQ = 0x2000
-D_MTQPAIR = 0x4000
-D_MTPERMOD = 0x6000
-D_MTOUTPERIM = 0x8000
-_D_MTCBSHARED = 0x10000
-D_MTINNER_MOD = (D_MTPUTSHARED|_D_MTOCSHARED|_D_MTCBSHARED)
-D_MTOUTER_MOD = (D_MTOCEXCL)
-D_MP = D_MTSAFE
-D_64BIT = 0x200
-D_SYNCSTR = 0x400
-D_DEVMAP = 0x100
-D_HOTPLUG = 0x4
-SNDZERO = 0x001
-SNDPIPE = 0x002
-RNORM = 0x000
-RMSGD = 0x001
-RMSGN = 0x002
-RMODEMASK = 0x003
-RPROTDAT = 0x004
-RPROTDIS = 0x008
-RPROTNORM = 0x010
-RPROTMASK = 0x01c
-RFLUSHMASK = 0x020
-RFLUSHPCPROT = 0x020
-RERRNORM = 0x001
-RERRNONPERSIST = 0x002
-RERRMASK = (RERRNORM|RERRNONPERSIST)
-WERRNORM = 0x004
-WERRNONPERSIST = 0x008
-WERRMASK = (WERRNORM|WERRNONPERSIST)
-FLUSHR = 0x01
-FLUSHW = 0x02
-FLUSHRW = 0x03
-FLUSHBAND = 0x04
-MAPINOK = 0x01
-NOMAPIN = 0x02
-REMAPOK = 0x04
-NOREMAP = 0x08
-S_INPUT = 0x0001
-S_HIPRI = 0x0002
-S_OUTPUT = 0x0004
-S_MSG = 0x0008
-S_ERROR = 0x0010
-S_HANGUP = 0x0020
-S_RDNORM = 0x0040
-S_WRNORM = S_OUTPUT
-S_RDBAND = 0x0080
-S_WRBAND = 0x0100
-S_BANDURG = 0x0200
-RS_HIPRI = 0x01
-STRUIO_POSTPONE = 0x08
-STRUIO_MAPIN = 0x10
-MSG_HIPRI = 0x01
-MSG_ANY = 0x02
-MSG_BAND = 0x04
-MSG_XPG4 = 0x08
-MSG_IPEEK = 0x10
-MSG_DISCARDTAIL = 0x20
-MSG_HOLDSIG = 0x40
-MSG_IGNERROR = 0x80
-MSG_DELAYERROR = 0x100
-MSG_IGNFLOW = 0x200
-MSG_NOMARK = 0x400
-MORECTL = 1
-MOREDATA = 2
-MUXID_ALL = (-1)
-ANYMARK = 0x01
-LASTMARK = 0x02
-_INFTIM = -1
-INFTIM = _INFTIM
diff --git a/sys/lib/python/plat-sunos5/SUNAUDIODEV.py b/sys/lib/python/plat-sunos5/SUNAUDIODEV.py
deleted file mode 100755
index 632139f87..000000000
--- a/sys/lib/python/plat-sunos5/SUNAUDIODEV.py
+++ /dev/null
@@ -1,40 +0,0 @@
-# Symbolic constants for use with sunaudiodev module
-# The names are the same as in audioio.h with the leading AUDIO_
-# removed.
-
-# Not all values are supported on all releases of SunOS.
-
-# Encoding types, for fields i_encoding and o_encoding
-
-ENCODING_NONE = 0 # no encoding assigned
-ENCODING_ULAW = 1 # u-law encoding
-ENCODING_ALAW = 2 # A-law encoding
-ENCODING_LINEAR = 3 # Linear PCM encoding
-
-# Gain ranges for i_gain, o_gain and monitor_gain
-
-MIN_GAIN = 0 # minimum gain value
-MAX_GAIN = 255 # maximum gain value
-
-# Balance values for i_balance and o_balance
-
-LEFT_BALANCE = 0 # left channel only
-MID_BALANCE = 32 # equal left/right channel
-RIGHT_BALANCE = 64 # right channel only
-BALANCE_SHIFT = 3
-
-# Port names for i_port and o_port
-
-PORT_A = 1
-PORT_B = 2
-PORT_C = 3
-PORT_D = 4
-
-SPEAKER = 0x01 # output to built-in speaker
-HEADPHONE = 0x02 # output to headphone jack
-LINE_OUT = 0x04 # output to line out
-
-MICROPHONE = 0x01 # input from microphone
-LINE_IN = 0x02 # input from line in
-CD = 0x04 # input from on-board CD inputs
-INTERNAL_CD_IN = CD # input from internal CDROM
diff --git a/sys/lib/python/plat-sunos5/TYPES.py b/sys/lib/python/plat-sunos5/TYPES.py
deleted file mode 100644
index e8e4a0074..000000000
--- a/sys/lib/python/plat-sunos5/TYPES.py
+++ /dev/null
@@ -1,313 +0,0 @@
-# Generated by h2py from /usr/include/sys/types.h
-
-# Included from sys/isa_defs.h
-_CHAR_ALIGNMENT = 1
-_SHORT_ALIGNMENT = 2
-_INT_ALIGNMENT = 4
-_LONG_ALIGNMENT = 8
-_LONG_LONG_ALIGNMENT = 8
-_DOUBLE_ALIGNMENT = 8
-_LONG_DOUBLE_ALIGNMENT = 16
-_POINTER_ALIGNMENT = 8
-_MAX_ALIGNMENT = 16
-_ALIGNMENT_REQUIRED = 1
-_CHAR_ALIGNMENT = 1
-_SHORT_ALIGNMENT = 2
-_INT_ALIGNMENT = 4
-_LONG_ALIGNMENT = 4
-_LONG_LONG_ALIGNMENT = 4
-_DOUBLE_ALIGNMENT = 4
-_LONG_DOUBLE_ALIGNMENT = 4
-_POINTER_ALIGNMENT = 4
-_MAX_ALIGNMENT = 4
-_ALIGNMENT_REQUIRED = 0
-_CHAR_ALIGNMENT = 1
-_SHORT_ALIGNMENT = 2
-_INT_ALIGNMENT = 4
-_LONG_LONG_ALIGNMENT = 8
-_DOUBLE_ALIGNMENT = 8
-_ALIGNMENT_REQUIRED = 1
-_LONG_ALIGNMENT = 4
-_LONG_DOUBLE_ALIGNMENT = 8
-_POINTER_ALIGNMENT = 4
-_MAX_ALIGNMENT = 8
-_LONG_ALIGNMENT = 8
-_LONG_DOUBLE_ALIGNMENT = 16
-_POINTER_ALIGNMENT = 8
-_MAX_ALIGNMENT = 16
-
-# Included from sys/feature_tests.h
-_POSIX_C_SOURCE = 1
-_LARGEFILE64_SOURCE = 1
-_LARGEFILE_SOURCE = 1
-_FILE_OFFSET_BITS = 64
-_FILE_OFFSET_BITS = 32
-_POSIX_C_SOURCE = 199506L
-_POSIX_PTHREAD_SEMANTICS = 1
-_XOPEN_VERSION = 500
-_XOPEN_VERSION = 4
-_XOPEN_VERSION = 3
-
-# Included from sys/machtypes.h
-
-# Included from sys/inttypes.h
-
-# Included from sys/int_types.h
-
-# Included from sys/int_limits.h
-INT8_MAX = (127)
-INT16_MAX = (32767)
-INT32_MAX = (2147483647)
-INTMAX_MAX = INT32_MAX
-INT_LEAST8_MAX = INT8_MAX
-INT_LEAST16_MAX = INT16_MAX
-INT_LEAST32_MAX = INT32_MAX
-INT8_MIN = (-128)
-INT16_MIN = (-32767-1)
-INT32_MIN = (-2147483647-1)
-INTMAX_MIN = INT32_MIN
-INT_LEAST8_MIN = INT8_MIN
-INT_LEAST16_MIN = INT16_MIN
-INT_LEAST32_MIN = INT32_MIN
-
-# Included from sys/int_const.h
-def INT8_C(c): return (c)
-
-def INT16_C(c): return (c)
-
-def INT32_C(c): return (c)
-
-def INT64_C(c): return __CONCAT__(c,l)
-
-def INT64_C(c): return __CONCAT__(c,ll)
-
-def UINT8_C(c): return __CONCAT__(c,u)
-
-def UINT16_C(c): return __CONCAT__(c,u)
-
-def UINT32_C(c): return __CONCAT__(c,u)
-
-def UINT64_C(c): return __CONCAT__(c,ul)
-
-def UINT64_C(c): return __CONCAT__(c,ull)
-
-def INTMAX_C(c): return __CONCAT__(c,l)
-
-def UINTMAX_C(c): return __CONCAT__(c,ul)
-
-def INTMAX_C(c): return __CONCAT__(c,ll)
-
-def UINTMAX_C(c): return __CONCAT__(c,ull)
-
-def INTMAX_C(c): return (c)
-
-def UINTMAX_C(c): return (c)
-
-
-# Included from sys/int_fmtio.h
-PRId8 = "d"
-PRId16 = "d"
-PRId32 = "d"
-PRId64 = "ld"
-PRId64 = "lld"
-PRIdLEAST8 = "d"
-PRIdLEAST16 = "d"
-PRIdLEAST32 = "d"
-PRIdLEAST64 = "ld"
-PRIdLEAST64 = "lld"
-PRIi8 = "i"
-PRIi16 = "i"
-PRIi32 = "i"
-PRIi64 = "li"
-PRIi64 = "lli"
-PRIiLEAST8 = "i"
-PRIiLEAST16 = "i"
-PRIiLEAST32 = "i"
-PRIiLEAST64 = "li"
-PRIiLEAST64 = "lli"
-PRIo8 = "o"
-PRIo16 = "o"
-PRIo32 = "o"
-PRIo64 = "lo"
-PRIo64 = "llo"
-PRIoLEAST8 = "o"
-PRIoLEAST16 = "o"
-PRIoLEAST32 = "o"
-PRIoLEAST64 = "lo"
-PRIoLEAST64 = "llo"
-PRIx8 = "x"
-PRIx16 = "x"
-PRIx32 = "x"
-PRIx64 = "lx"
-PRIx64 = "llx"
-PRIxLEAST8 = "x"
-PRIxLEAST16 = "x"
-PRIxLEAST32 = "x"
-PRIxLEAST64 = "lx"
-PRIxLEAST64 = "llx"
-PRIX8 = "X"
-PRIX16 = "X"
-PRIX32 = "X"
-PRIX64 = "lX"
-PRIX64 = "llX"
-PRIXLEAST8 = "X"
-PRIXLEAST16 = "X"
-PRIXLEAST32 = "X"
-PRIXLEAST64 = "lX"
-PRIXLEAST64 = "llX"
-PRIu8 = "u"
-PRIu16 = "u"
-PRIu32 = "u"
-PRIu64 = "lu"
-PRIu64 = "llu"
-PRIuLEAST8 = "u"
-PRIuLEAST16 = "u"
-PRIuLEAST32 = "u"
-PRIuLEAST64 = "lu"
-PRIuLEAST64 = "llu"
-SCNd16 = "hd"
-SCNd32 = "d"
-SCNd64 = "ld"
-SCNd64 = "lld"
-SCNi16 = "hi"
-SCNi32 = "i"
-SCNi64 = "li"
-SCNi64 = "lli"
-SCNo16 = "ho"
-SCNo32 = "o"
-SCNo64 = "lo"
-SCNo64 = "llo"
-SCNu16 = "hu"
-SCNu32 = "u"
-SCNu64 = "lu"
-SCNu64 = "llu"
-SCNx16 = "hx"
-SCNx32 = "x"
-SCNx64 = "lx"
-SCNx64 = "llx"
-PRIdMAX = "ld"
-PRIoMAX = "lo"
-PRIxMAX = "lx"
-PRIuMAX = "lu"
-PRIdMAX = "lld"
-PRIoMAX = "llo"
-PRIxMAX = "llx"
-PRIuMAX = "llu"
-PRIdMAX = "d"
-PRIoMAX = "o"
-PRIxMAX = "x"
-PRIuMAX = "u"
-SCNiMAX = "li"
-SCNdMAX = "ld"
-SCNoMAX = "lo"
-SCNxMAX = "lx"
-SCNiMAX = "lli"
-SCNdMAX = "lld"
-SCNoMAX = "llo"
-SCNxMAX = "llx"
-SCNiMAX = "i"
-SCNdMAX = "d"
-SCNoMAX = "o"
-SCNxMAX = "x"
-
-# Included from sys/types32.h
-SHRT_MIN = (-32768)
-SHRT_MAX = 32767
-USHRT_MAX = 65535
-INT_MIN = (-2147483647-1)
-INT_MAX = 2147483647
-LONG_MIN = (-9223372036854775807L-1L)
-LONG_MAX = 9223372036854775807L
-LONG_MIN = (-2147483647L-1L)
-LONG_MAX = 2147483647L
-P_MYID = (-1)
-
-# Included from sys/select.h
-
-# Included from sys/time.h
-TIME32_MAX = INT32_MAX
-TIME32_MIN = INT32_MIN
-def TIMEVAL_OVERFLOW(tv): return \
-
-from TYPES import *
-DST_NONE = 0
-DST_USA = 1
-DST_AUST = 2
-DST_WET = 3
-DST_MET = 4
-DST_EET = 5
-DST_CAN = 6
-DST_GB = 7
-DST_RUM = 8
-DST_TUR = 9
-DST_AUSTALT = 10
-ITIMER_REAL = 0
-ITIMER_VIRTUAL = 1
-ITIMER_PROF = 2
-ITIMER_REALPROF = 3
-def ITIMERVAL_OVERFLOW(itv): return \
-
-SEC = 1
-MILLISEC = 1000
-MICROSEC = 1000000
-NANOSEC = 1000000000
-
-# Included from sys/time_impl.h
-def TIMESPEC_OVERFLOW(ts): return \
-
-def ITIMERSPEC_OVERFLOW(it): return \
-
-__CLOCK_REALTIME0 = 0
-CLOCK_VIRTUAL = 1
-CLOCK_PROF = 2
-__CLOCK_REALTIME3 = 3
-CLOCK_HIGHRES = 4
-CLOCK_MAX = 5
-CLOCK_REALTIME = __CLOCK_REALTIME3
-CLOCK_REALTIME = __CLOCK_REALTIME0
-TIMER_RELTIME = 0x0
-TIMER_ABSTIME = 0x1
-
-# Included from sys/mutex.h
-from TYPES import *
-def MUTEX_HELD(x): return (mutex_owned(x))
-
-def TICK_TO_SEC(tick): return ((tick) / hz)
-
-def SEC_TO_TICK(sec): return ((sec) * hz)
-
-def TICK_TO_MSEC(tick): return \
-
-def MSEC_TO_TICK(msec): return \
-
-def MSEC_TO_TICK_ROUNDUP(msec): return \
-
-def TICK_TO_USEC(tick): return ((tick) * usec_per_tick)
-
-def USEC_TO_TICK(usec): return ((usec) / usec_per_tick)
-
-def USEC_TO_TICK_ROUNDUP(usec): return \
-
-def TICK_TO_NSEC(tick): return ((tick) * nsec_per_tick)
-
-def NSEC_TO_TICK(nsec): return ((nsec) / nsec_per_tick)
-
-def NSEC_TO_TICK_ROUNDUP(nsec): return \
-
-def TIMEVAL_TO_TICK(tvp): return \
-
-def TIMESTRUC_TO_TICK(tsp): return \
-
-
-# Included from time.h
-from TYPES import *
-
-# Included from iso/time_iso.h
-NULL = 0L
-NULL = 0
-CLOCKS_PER_SEC = 1000000
-FD_SETSIZE = 65536
-FD_SETSIZE = 1024
-_NBBY = 8
-NBBY = _NBBY
-def FD_ZERO(p): return bzero((p), sizeof (*(p)))
diff --git a/sys/lib/python/plat-sunos5/regen b/sys/lib/python/plat-sunos5/regen
deleted file mode 100755
index 78cb7de14..000000000
--- a/sys/lib/python/plat-sunos5/regen
+++ /dev/null
@@ -1,9 +0,0 @@
-#! /bin/sh
-case `uname -sr` in
-'SunOS 5.'*) ;;
-*) echo Probably not on a Solaris 2 system 1>&2
- exit 1;;
-esac
-set -v
-h2py -i '(u_long)' /usr/include/sys/types.h /usr/include/netinet/in.h /usr/include/sys/stropts.h /usr/include/dlfcn.h
-
diff --git a/sys/lib/python/plat-unixware7/IN.py b/sys/lib/python/plat-unixware7/IN.py
deleted file mode 100644
index d66ae970c..000000000
--- a/sys/lib/python/plat-unixware7/IN.py
+++ /dev/null
@@ -1,836 +0,0 @@
-# Generated by h2py from /usr/include/netinet/in.h
-
-# Included from netinet/in_f.h
-def IN_CLASSA(i): return (((long)(i) & 0x80000000) == 0)
-
-IN_CLASSA_NET = 0xff000000
-IN_CLASSA_NSHIFT = 24
-IN_CLASSA_HOST = 0x00ffffff
-IN_CLASSA_MAX = 128
-def IN_CLASSB(i): return (((long)(i) & 0xc0000000) == 0x80000000)
-
-IN_CLASSB_NET = 0xffff0000
-IN_CLASSB_NSHIFT = 16
-IN_CLASSB_HOST = 0x0000ffff
-IN_CLASSB_MAX = 65536
-def IN_CLASSC(i): return (((long)(i) & 0xe0000000) == 0xc0000000)
-
-IN_CLASSC_NET = 0xffffff00
-IN_CLASSC_NSHIFT = 8
-IN_CLASSC_HOST = 0x000000ff
-def IN_CLASSD(i): return (((long)(i) & 0xf0000000) == 0xe0000000)
-
-IN_CLASSD_NET = 0xf0000000
-IN_CLASSD_NSHIFT = 28
-IN_CLASSD_HOST = 0x0fffffff
-def IN_MULTICAST(i): return IN_CLASSD(i)
-
-def IN_EXPERIMENTAL(i): return (((long)(i) & 0xe0000000) == 0xe0000000)
-
-def IN_BADCLASS(i): return (((long)(i) & 0xf0000000) == 0xf0000000)
-
-INADDR_ANY = 0x00000000
-INADDR_LOOPBACK = 0x7f000001
-INADDR_BROADCAST = 0xffffffff
-INADDR_NONE = 0xffffffff
-IN_LOOPBACKNET = 127
-INADDR_UNSPEC_GROUP = 0xe0000000
-INADDR_ALLHOSTS_GROUP = 0xe0000001
-INADDR_ALLRTRS_GROUP = 0xe0000002
-INADDR_MAX_LOCAL_GROUP = 0xe00000ff
-
-# Included from netinet/in6.h
-
-# Included from sys/types.h
-def quad_low(x): return x.val[0]
-
-ADT_EMASKSIZE = 8
-SHRT_MIN = -32768
-SHRT_MAX = 32767
-INT_MIN = (-2147483647-1)
-INT_MAX = 2147483647
-LONG_MIN = (-2147483647-1)
-LONG_MAX = 2147483647
-OFF32_MAX = LONG_MAX
-ISTAT_ASSERTED = 0
-ISTAT_ASSUMED = 1
-ISTAT_NONE = 2
-OFF_MAX = OFF32_MAX
-CLOCK_MAX = LONG_MAX
-P_MYID = (-1)
-P_MYHOSTID = (-1)
-
-# Included from sys/select.h
-FD_SETSIZE = 4096
-NBBY = 8
-NULL = 0
-
-# Included from sys/bitypes.h
-
-# Included from netinet/in6_f.h
-def IN6_IS_ADDR_UNSPECIFIED(a): return IN6_ADDR_EQUAL_L(a, 0, 0, 0, 0)
-
-def IN6_SET_ADDR_UNSPECIFIED(a): return IN6_ADDR_COPY_L(a, 0, 0, 0, 0)
-
-def IN6_IS_ADDR_ANY(a): return IN6_ADDR_EQUAL_L(a, 0, 0, 0, 0)
-
-def IN6_SET_ADDR_ANY(a): return IN6_ADDR_COPY_L(a, 0, 0, 0, 0)
-
-def IN6_IS_ADDR_LOOPBACK(a): return IN6_ADDR_EQUAL_L(a, 0, 0, 0, 0x01000000)
-
-def IN6_SET_ADDR_LOOPBACK(a): return IN6_ADDR_COPY_L(a, 0, 0, 0, 0x01000000)
-
-IN6_MC_FLAG_PERMANENT = 0x0
-IN6_MC_FLAG_TRANSIENT = 0x1
-IN6_MC_SCOPE_NODELOCAL = 0x1
-IN6_MC_SCOPE_LINKLOCAL = 0x2
-IN6_MC_SCOPE_SITELOCAL = 0x5
-IN6_MC_SCOPE_ORGLOCAL = 0x8
-IN6_MC_SCOPE_GLOBAL = 0xE
-def IN6_IS_ADDR_MC_NODELOCAL(a): return \
-
-def IN6_IS_ADDR_MC_LINKLOCAL(a): return \
-
-def IN6_IS_ADDR_MC_SITELOCAL(a): return \
-
-def IN6_IS_ADDR_MC_ORGLOCAL(a): return \
-
-def IN6_IS_ADDR_MC_GLOBAL(a): return \
-
-
-# Included from sys/convsa.h
-__NETLIB_UW211_SVR4 = 1
-__NETLIB_UW211_XPG4 = 2
-__NETLIB_GEMINI_SVR4 = 3
-__NETLIB_GEMINI_XPG4 = 4
-__NETLIB_FP1_SVR4 = 5
-__NETLIB_FP1_XPG4 = 6
-__NETLIB_BASE_VERSION__ = __NETLIB_UW211_SVR4
-__NETLIB_VERSION__ = __NETLIB_FP1_SVR4
-__NETLIB_VERSION__ = __NETLIB_FP1_XPG4
-__NETLIB_VERSION__ = __NETLIB_GEMINI_SVR4
-__NETLIB_VERSION__ = __NETLIB_GEMINI_XPG4
-__NETLIB_VERSION__ = __NETLIB_UW211_SVR4
-__NETLIB_VERSION__ = __NETLIB_UW211_XPG4
-__NETLIB_VERSION__ = __NETLIB_FP1_XPG4
-
-# Included from sys/byteorder.h
-LITTLE_ENDIAN = 1234
-BIG_ENDIAN = 4321
-PDP_ENDIAN = 3412
-
-# Included from sys/byteorder_f.h
-BYTE_ORDER = LITTLE_ENDIAN
-def htonl(hl): return __htonl(hl)
-
-def ntohl(nl): return __ntohl(nl)
-
-def htons(hs): return __htons(hs)
-
-def ntohs(ns): return __ntohs(ns)
-
-def ntohl(x): return (x)
-
-def ntohs(x): return (x)
-
-def htonl(x): return (x)
-
-def htons(x): return (x)
-
-def __NETLIB_VERSION_IS_XPG4(version): return (((version) % 2) == 0)
-
-def __NETLIB_VERSION_HAS_SALEN(version): return ((version) >= __NETLIB_GEMINI_SVR4)
-
-def __NETLIB_VERSION_IS_IKS(version): return ((version) >= __NETLIB_FP1_SVR4)
-
-def SA_FAMILY_GET(sa): return \
-
-INET6_ADDRSTRLEN = 46
-IPV6_UNICAST_HOPS = 3
-IPV6_ADDRFORM = 24
-IPV6_MULTICAST_HOPS = 25
-IPV6_MULTICAST_IF = 26
-IPV6_MULTICAST_LOOP = 27
-IPV6_ADD_MEMBERSHIP = 28
-IPV6_DROP_MEMBERSHIP = 29
-
-# Included from sys/insrem.h
-def LIST_INIT(head): return \
-
-def LIST_INIT(head): return \
-
-def remque(a): return REMQUE(a)
-
-
-# Included from sys/socket.h
-
-# Included from sys/uio.h
-SHUT_RD = 0
-SHUT_WR = 1
-SHUT_RDWR = 2
-
-# Included from sys/netconfig.h
-
-# Included from sys/cdefs.h
-def __P(protos): return protos
-
-def __STRING(x): return #x
-
-def __P(protos): return ()
-
-def __STRING(x): return "x"
-
-NETCONFIG = "/etc/netconfig"
-NETPATH = "NETPATH"
-NC_TPI_CLTS = 1
-NC_TPI_COTS = 2
-NC_TPI_COTS_ORD = 3
-NC_TPI_RAW = 4
-NC_NOFLAG = 00
-NC_VISIBLE = 01
-NC_BROADCAST = 02
-NC_NOPROTOFMLY = "-"
-NC_LOOPBACK = "loopback"
-NC_INET = "inet"
-NC_INET6 = "inet6"
-NC_IMPLINK = "implink"
-NC_PUP = "pup"
-NC_CHAOS = "chaos"
-NC_NS = "ns"
-NC_NBS = "nbs"
-NC_ECMA = "ecma"
-NC_DATAKIT = "datakit"
-NC_CCITT = "ccitt"
-NC_SNA = "sna"
-NC_DECNET = "decnet"
-NC_DLI = "dli"
-NC_LAT = "lat"
-NC_HYLINK = "hylink"
-NC_APPLETALK = "appletalk"
-NC_NIT = "nit"
-NC_IEEE802 = "ieee802"
-NC_OSI = "osi"
-NC_X25 = "x25"
-NC_OSINET = "osinet"
-NC_GOSIP = "gosip"
-NC_NETWARE = "netware"
-NC_NOPROTO = "-"
-NC_TCP = "tcp"
-NC_UDP = "udp"
-NC_ICMP = "icmp"
-NC_IPX = "ipx"
-NC_SPX = "spx"
-NC_TPI_CLTS = 1
-NC_TPI_COTS = 2
-NC_TPI_COTS_ORD = 3
-NC_TPI_RAW = 4
-SOCK_STREAM = 2
-SOCK_DGRAM = 1
-SOCK_RAW = 4
-SOCK_RDM = 5
-SOCK_SEQPACKET = 6
-SO_DEBUG = 0x0001
-SO_ACCEPTCONN = 0x0002
-SO_REUSEADDR = 0x0004
-SO_KEEPALIVE = 0x0008
-SO_DONTROUTE = 0x0010
-SO_BROADCAST = 0x0020
-SO_USELOOPBACK = 0x0040
-SO_LINGER = 0x0080
-SO_OOBINLINE = 0x0100
-SO_ORDREL = 0x0200
-SO_IMASOCKET = 0x0400
-SO_MGMT = 0x0800
-SO_REUSEPORT = 0x1000
-SO_LISTENING = 0x2000
-SO_RDWR = 0x4000
-SO_SEMA = 0x8000
-SO_DONTLINGER = (~SO_LINGER)
-SO_SNDBUF = 0x1001
-SO_RCVBUF = 0x1002
-SO_SNDLOWAT = 0x1003
-SO_RCVLOWAT = 0x1004
-SO_SNDTIMEO = 0x1005
-SO_RCVTIMEO = 0x1006
-SO_ERROR = 0x1007
-SO_TYPE = 0x1008
-SO_PROTOTYPE = 0x1009
-SO_ALLRAW = 0x100a
-SOL_SOCKET = 0xffff
-AF_UNSPEC = 0
-AF_UNIX = 1
-AF_LOCAL = AF_UNIX
-AF_INET = 2
-AF_IMPLINK = 3
-AF_PUP = 4
-AF_CHAOS = 5
-AF_NS = 6
-AF_NBS = 7
-AF_ECMA = 8
-AF_DATAKIT = 9
-AF_CCITT = 10
-AF_SNA = 11
-AF_DECnet = 12
-AF_DLI = 13
-AF_LAT = 14
-AF_HYLINK = 15
-AF_APPLETALK = 16
-AF_NIT = 17
-AF_802 = 18
-AF_OSI = 19
-AF_ISO = AF_OSI
-AF_X25 = 20
-AF_OSINET = 21
-AF_GOSIP = 22
-AF_YNET = 23
-AF_ROUTE = 24
-AF_LINK = 25
-pseudo_AF_XTP = 26
-AF_INET6 = 27
-AF_MAX = 27
-AF_INET_BSWAP = 0x0200
-PF_UNSPEC = AF_UNSPEC
-PF_UNIX = AF_UNIX
-PF_LOCAL = AF_LOCAL
-PF_INET = AF_INET
-PF_IMPLINK = AF_IMPLINK
-PF_PUP = AF_PUP
-PF_CHAOS = AF_CHAOS
-PF_NS = AF_NS
-PF_NBS = AF_NBS
-PF_ECMA = AF_ECMA
-PF_DATAKIT = AF_DATAKIT
-PF_CCITT = AF_CCITT
-PF_SNA = AF_SNA
-PF_DECnet = AF_DECnet
-PF_DLI = AF_DLI
-PF_LAT = AF_LAT
-PF_HYLINK = AF_HYLINK
-PF_APPLETALK = AF_APPLETALK
-PF_NIT = AF_NIT
-PF_802 = AF_802
-PF_OSI = AF_OSI
-PF_ISO = PF_OSI
-PF_X25 = AF_X25
-PF_OSINET = AF_OSINET
-PF_GOSIP = AF_GOSIP
-PF_YNET = AF_YNET
-PF_ROUTE = AF_ROUTE
-PF_LINK = AF_LINK
-pseudo_PF_XTP = pseudo_AF_XTP
-PF_INET6 = AF_INET6
-PF_MAX = AF_MAX
-SOMAXCONN = 5
-SCM_RIGHTS = 1
-MSG_OOB = 0x1
-MSG_PEEK = 0x2
-MSG_DONTROUTE = 0x4
-MSG_CTRUNC = 0x8
-MSG_TRUNC = 0x10
-MSG_EOR = 0x30
-MSG_WAITALL = 0x20
-MSG_MAXIOVLEN = 16
-def OPTLEN(x): return ((((x) + sizeof(long) - 1) / sizeof(long)) * sizeof(long))
-
-GIARG = 0x1
-CONTI = 0x2
-GITAB = 0x4
-SOCKETSYS = 88
-SOCKETSYS = 83
-SO_ACCEPT = 1
-SO_BIND = 2
-SO_CONNECT = 3
-SO_GETPEERNAME = 4
-SO_GETSOCKNAME = 5
-SO_GETSOCKOPT = 6
-SO_LISTEN = 7
-SO_RECV = 8
-SO_RECVFROM = 9
-SO_SEND = 10
-SO_SENDTO = 11
-SO_SETSOCKOPT = 12
-SO_SHUTDOWN = 13
-SO_SOCKET = 14
-SO_SOCKPOLL = 15
-SO_GETIPDOMAIN = 16
-SO_SETIPDOMAIN = 17
-SO_ADJTIME = 18
-
-# Included from sys/stream.h
-
-# Included from sys/cred.h
-
-# Included from sys/ksynch.h
-
-# Included from sys/dl.h
-SIGNBIT = 0x80000000
-
-# Included from sys/ipl.h
-
-# Included from sys/disp_p.h
-
-# Included from sys/trap.h
-DIVERR = 0
-SGLSTP = 1
-NMIFLT = 2
-BPTFLT = 3
-INTOFLT = 4
-BOUNDFLT = 5
-INVOPFLT = 6
-NOEXTFLT = 7
-DBLFLT = 8
-EXTOVRFLT = 9
-INVTSSFLT = 10
-SEGNPFLT = 11
-STKFLT = 12
-GPFLT = 13
-PGFLT = 14
-EXTERRFLT = 16
-ALIGNFLT = 17
-MCEFLT = 18
-USERFLT = 0x100
-TRP_PREEMPT = 0x200
-TRP_UNUSED = 0x201
-PF_ERR_MASK = 0x01
-PF_ERR_PAGE = 0
-PF_ERR_PROT = 1
-PF_ERR_WRITE = 2
-PF_ERR_USER = 4
-EVT_STRSCHED = 0x04
-EVT_GLOBCALLOUT = 0x08
-EVT_LCLCALLOUT = 0x10
-EVT_SOFTINTMASK = (EVT_STRSCHED|EVT_GLOBCALLOUT|EVT_LCLCALLOUT)
-PL0 = 0
-PL1 = 1
-PL2 = 2
-PL3 = 3
-PL4 = 4
-PL5 = 5
-PL6 = 6
-PLHI = 8
-PL7 = PLHI
-PLBASE = PL0
-PLTIMEOUT = PL1
-PLDISK = PL5
-PLSTR = PL6
-PLTTY = PLSTR
-PLMIN = PL0
-PLMIN = PL1
-MAX_INTR_LEVELS = 10
-MAX_INTR_NESTING = 50
-STRSCHED = EVT_STRSCHED
-GLOBALSOFTINT = EVT_GLOBCALLOUT
-LOCALSOFTINT = EVT_LCLCALLOUT
-
-# Included from sys/ksynch_p.h
-def GET_TIME(timep): return \
-
-LK_THRESHOLD = 500000
-
-# Included from sys/list.h
-
-# Included from sys/listasm.h
-def remque_null(e): return \
-
-def LS_ISEMPTY(listp): return \
-
-LK_BASIC = 0x1
-LK_SLEEP = 0x2
-LK_NOSTATS = 0x4
-def CYCLES_SINCE(c): return CYCLES_BETWEEN((c), CYCLES())
-
-LSB_NLKDS = 92
-EVT_RUNRUN = 0x01
-EVT_KPRUNRUN = 0x02
-SP_UNLOCKED = 0
-SP_LOCKED = 1
-KS_LOCKTEST = 0x01
-KS_MPSTATS = 0x02
-KS_DEINITED = 0x04
-KS_NVLTTRACE = 0x08
-RWS_READ = (ord('r'))
-RWS_WRITE = (ord('w'))
-RWS_UNLOCKED = (ord('u'))
-RWS_BUSY = (ord('b'))
-def SLEEP_LOCKOWNED(lkp): return \
-
-def SLEEP_DISOWN(lkp): return \
-
-KS_NOPRMPT = 0x00000001
-__KS_LOCKTEST = KS_LOCKTEST
-__KS_LOCKTEST = 0
-__KS_MPSTATS = KS_MPSTATS
-__KS_MPSTATS = 0
-__KS_NVLTTRACE = KS_NVLTTRACE
-__KS_NVLTTRACE = 0
-KSFLAGS = (__KS_LOCKTEST|__KS_MPSTATS|__KS_NVLTTRACE)
-KSVUNIPROC = 1
-KSVMPDEBUG = 2
-KSVMPNODEBUG = 3
-KSVFLAG = KSVUNIPROC
-KSVFLAG = KSVMPDEBUG
-KSVFLAG = KSVMPNODEBUG
-
-# Included from sys/ksinline.h
-_A_SP_LOCKED = 1
-_A_SP_UNLOCKED = 0
-_A_INVPL = -1
-def _ATOMIC_INT_INCR(atomic_intp): return \
-
-def _ATOMIC_INT_DECR(atomic_intp): return \
-
-def ATOMIC_INT_READ(atomic_intp): return _ATOMIC_INT_READ(atomic_intp)
-
-def ATOMIC_INT_INCR(atomic_intp): return _ATOMIC_INT_INCR(atomic_intp)
-
-def ATOMIC_INT_DECR(atomic_intp): return _ATOMIC_INT_DECR(atomic_intp)
-
-def FSPIN_INIT(lp): return
-
-def FSPIN_LOCK(l): return DISABLE()
-
-def FSPIN_TRYLOCK(l): return (DISABLE(), B_TRUE)
-
-def FSPIN_UNLOCK(l): return ENABLE()
-
-def LOCK_DEINIT(lp): return
-
-def LOCK_DEALLOC(lp): return
-
-def LOCK_OWNED(lp): return (B_TRUE)
-
-def RW_DEINIT(lp): return
-
-def RW_DEALLOC(lp): return
-
-def RW_OWNED(lp): return (B_TRUE)
-
-def IS_LOCKED(lockp): return B_FALSE
-
-def LOCK_PLMIN(lockp): return \
-
-def TRYLOCK_PLMIN(lockp): return LOCK_PLMIN(lockp)
-
-def LOCK_SH_PLMIN(lockp): return LOCK_PLMIN(lockp)
-
-def RW_RDLOCK_PLMIN(lockp): return LOCK_PLMIN(lockp)
-
-def RW_WRLOCK_PLMIN(lockp): return LOCK_PLMIN(lockp)
-
-def LOCK_DEINIT(l): return
-
-def LOCK_PLMIN(lockp): return LOCK((lockp), PLMIN)
-
-def TRYLOCK_PLMIN(lockp): return TRYLOCK((lockp), PLMIN)
-
-def LOCK_SH_PLMIN(lockp): return LOCK_SH((lockp), PLMIN)
-
-def RW_RDLOCK_PLMIN(lockp): return RW_RDLOCK((lockp), PLMIN)
-
-def RW_WRLOCK_PLMIN(lockp): return RW_WRLOCK((lockp), PLMIN)
-
-def FSPIN_IS_LOCKED(fsp): return B_FALSE
-
-def SPIN_IS_LOCKED(lockp): return B_FALSE
-
-def FSPIN_OWNED(l): return (B_TRUE)
-
-CR_MLDREAL = 0x00000001
-CR_RDUMP = 0x00000002
-def crhold(credp): return crholdn((credp), 1)
-
-def crfree(credp): return crfreen((credp), 1)
-
-
-# Included from sys/strmdep.h
-def str_aligned(X): return (((uint)(X) & (sizeof(int) - 1)) == 0)
-
-
-# Included from sys/engine.h
-
-# Included from sys/clock.h
-
-# Included from sys/time.h
-DST_NONE = 0
-DST_USA = 1
-DST_AUST = 2
-DST_WET = 3
-DST_MET = 4
-DST_EET = 5
-DST_CAN = 6
-DST_GB = 7
-DST_RUM = 8
-DST_TUR = 9
-DST_AUSTALT = 10
-ITIMER_REAL = 0
-ITIMER_VIRTUAL = 1
-ITIMER_PROF = 2
-FD_SETSIZE = 4096
-FD_NBBY = 8
-
-# Included from time.h
-NULL = 0
-CLOCKS_PER_SEC = 1000000
-
-# Included from sys/clock_p.h
-CGBITS = 4
-IDBITS = 28
-def toid_unpackcg(idval): return (((idval) >> IDBITS) & 0xf)
-
-def toid_unpackid(idval): return ((idval) & 0xfffffff)
-
-def toid_unpackcg(idval): return 0
-
-def toid_unpackid(idval): return (idval)
-
-NCALLOUT_HASH = 1024
-CALLOUT_MAXVAL = 0x7fffffff
-TO_PERIODIC = 0x80000000
-TO_IMMEDIATE = 0x80000000
-SEC = 1
-MILLISEC = 1000
-MICROSEC = 1000000
-NANOSEC = 1000000000
-SECHR = (60*60)
-SECDAY = (24*SECHR)
-SECYR = (365*SECDAY)
-def TIME_OWNED_R(cgnum): return (B_TRUE)
-
-LOOPSECONDS = 1800
-LOOPMICROSECONDS = (LOOPSECONDS * MICROSEC)
-def TICKS_SINCE(t): return TICKS_BETWEEN(t, TICKS())
-
-MAXRQS = 2
-E_OFFLINE = 0x01
-E_BAD = 0x02
-E_SHUTDOWN = 0x04
-E_DRIVER = 0x08
-E_DEFAULTKEEP = 0x100
-E_DRIVERBOUND = 0x200
-E_EXCLUSIVE = 0x400
-E_CGLEADER = 0x800
-E_NOWAY = (E_OFFLINE|E_BAD|E_SHUTDOWN)
-E_BOUND = 0x01
-E_GLOBAL = 0x00
-E_UNAVAIL = -1
-ENGINE_ONLINE = 1
-def PROCESSOR_UNMAP(e): return ((e) - engine)
-
-BOOTENG = 0
-QMOVED = 0x0001
-QWANTR = 0x0002
-QWANTW = 0x0004
-QFULL = 0x0008
-QREADR = 0x0010
-QUSE = 0x0020
-QNOENB = 0x0040
-QUP = 0x0080
-QBACK = 0x0100
-QINTER = 0x0200
-QPROCSON = 0x0400
-QTOENAB = 0x0800
-QFREEZE = 0x1000
-QBOUND = 0x2000
-QDEFCNT = 0x4000
-QENAB = 0x0001
-QSVCBUSY = 0x0002
-STRM_PUTCNT_TABLES = 31
-def STRM_MYENG_PUTCNT(sdp): return STRM_PUTCNT(l.eng_num, sdp)
-
-QB_FULL = 0x01
-QB_WANTW = 0x02
-QB_BACK = 0x04
-NBAND = 256
-DB_WASDUPED = 0x1
-DB_2PIECE = 0x2
-STRLEAKHASHSZ = 1021
-MSGMARK = 0x01
-MSGNOLOOP = 0x02
-MSGDELIM = 0x04
-MSGNOGET = 0x08
-MSGLOG = 0x10
-M_DATA = 0x00
-M_PROTO = 0x01
-M_BREAK = 0x08
-M_PASSFP = 0x09
-M_SIG = 0x0b
-M_DELAY = 0x0c
-M_CTL = 0x0d
-M_IOCTL = 0x0e
-M_SETOPTS = 0x10
-M_RSE = 0x11
-M_TRAIL = 0x12
-M_IOCACK = 0x81
-M_IOCNAK = 0x82
-M_PCPROTO = 0x83
-M_PCSIG = 0x84
-M_READ = 0x85
-M_FLUSH = 0x86
-M_STOP = 0x87
-M_START = 0x88
-M_HANGUP = 0x89
-M_ERROR = 0x8a
-M_COPYIN = 0x8b
-M_COPYOUT = 0x8c
-M_IOCDATA = 0x8d
-M_PCRSE = 0x8e
-M_STOPI = 0x8f
-M_STARTI = 0x90
-M_PCCTL = 0x91
-M_PCSETOPTS = 0x92
-QNORM = 0x00
-QPCTL = 0x80
-STRCANON = 0x01
-RECOPY = 0x02
-SO_ALL = 0x003f
-SO_READOPT = 0x0001
-SO_WROFF = 0x0002
-SO_MINPSZ = 0x0004
-SO_MAXPSZ = 0x0008
-SO_HIWAT = 0x0010
-SO_LOWAT = 0x0020
-SO_MREADON = 0x0040
-SO_MREADOFF = 0x0080
-SO_NDELON = 0x0100
-SO_NDELOFF = 0x0200
-SO_ISTTY = 0x0400
-SO_ISNTTY = 0x0800
-SO_TOSTOP = 0x1000
-SO_TONSTOP = 0x2000
-SO_BAND = 0x4000
-SO_DELIM = 0x8000
-SO_NODELIM = 0x010000
-SO_STRHOLD = 0x020000
-SO_LOOP = 0x040000
-DRVOPEN = 0x0
-MODOPEN = 0x1
-CLONEOPEN = 0x2
-OPENFAIL = -1
-BPRI_LO = 1
-BPRI_MED = 2
-BPRI_HI = 3
-INFPSZ = -1
-FLUSHALL = 1
-FLUSHDATA = 0
-STRHIGH = 5120
-STRLOW = 1024
-MAXIOCBSZ = 1024
-def straln(a): return (caddr_t)((long)(a) & ~(sizeof(int)-1))
-
-IPM_ID = 200
-ICMPM_ID = 201
-TCPM_ID = 202
-UDPM_ID = 203
-ARPM_ID = 204
-APPM_ID = 205
-RIPM_ID = 206
-PPPM_ID = 207
-AHDLCM_ID = 208
-MHDLCRIPM_ID = 209
-HDLCM_ID = 210
-PPCID_ID = 211
-IGMPM_ID = 212
-IPIPM_ID = 213
-IPPROTO_IP = 0
-IPPROTO_HOPOPTS = 0
-IPPROTO_ICMP = 1
-IPPROTO_IGMP = 2
-IPPROTO_GGP = 3
-IPPROTO_IPIP = 4
-IPPROTO_TCP = 6
-IPPROTO_EGP = 8
-IPPROTO_PUP = 12
-IPPROTO_UDP = 17
-IPPROTO_IDP = 22
-IPPROTO_TP = 29
-IPPROTO_IPV6 = 41
-IPPROTO_ROUTING = 43
-IPPROTO_FRAGMENT = 44
-IPPROTO_ESP = 50
-IPPROTO_AH = 51
-IPPROTO_ICMPV6 = 58
-IPPROTO_NONE = 59
-IPPROTO_DSTOPTS = 60
-IPPROTO_HELLO = 63
-IPPROTO_ND = 77
-IPPROTO_EON = 80
-IPPROTO_RAW = 255
-IPPROTO_MAX = 256
-IPPORT_ECHO = 7
-IPPORT_DISCARD = 9
-IPPORT_SYSTAT = 11
-IPPORT_DAYTIME = 13
-IPPORT_NETSTAT = 15
-IPPORT_FTP = 21
-IPPORT_TELNET = 23
-IPPORT_SMTP = 25
-IPPORT_TIMESERVER = 37
-IPPORT_NAMESERVER = 42
-IPPORT_WHOIS = 43
-IPPORT_MTP = 57
-IPPORT_TFTP = 69
-IPPORT_RJE = 77
-IPPORT_FINGER = 79
-IPPORT_TTYLINK = 87
-IPPORT_SUPDUP = 95
-IPPORT_EXECSERVER = 512
-IPPORT_LOGINSERVER = 513
-IPPORT_CMDSERVER = 514
-IPPORT_EFSSERVER = 520
-IPPORT_BIFFUDP = 512
-IPPORT_WHOSERVER = 513
-IPPORT_ROUTESERVER = 520
-IPPORT_RESERVED = 1024
-IPPORT_USERRESERVED = 65535
-IPPORT_RESERVED_LOW = 512
-IPPORT_RESERVED_HIGH = 1023
-IPPORT_USERRESERVED_LOW = 32768
-IPPORT_USERRESERVED_HIGH = 65535
-INET_ADDRSTRLEN = 16
-IP_OPTIONS = 1
-IP_TOS = 2
-IP_TTL = 3
-IP_HDRINCL = 4
-IP_RECVOPTS = 5
-IP_RECVRETOPTS = 6
-IP_RECVDSTADDR = 7
-IP_RETOPTS = 8
-IP_MULTICAST_IF = 9
-IP_MULTICAST_LOOP = 10
-IP_ADD_MEMBERSHIP = 11
-IP_DROP_MEMBERSHIP = 12
-IP_BROADCAST_IF = 14
-IP_RECVIFINDEX = 15
-IP_MULTICAST_TTL = 16
-MRT_INIT = 17
-MRT_DONE = 18
-MRT_ADD_VIF = 19
-MRT_DEL_VIF = 20
-MRT_ADD_MFC = 21
-MRT_DEL_MFC = 22
-MRT_VERSION = 23
-IP_DEFAULT_MULTICAST_TTL = 1
-IP_DEFAULT_MULTICAST_LOOP = 1
-IP_MAX_MEMBERSHIPS = 20
-INADDR_UNSPEC_GROUP = 0xe0000000
-INADDR_ALLHOSTS_GROUP = 0xe0000001
-INADDR_ALLRTRS_GROUP = 0xe0000002
-INADDR_MAX_LOCAL_GROUP = 0xe00000ff
-
-# Included from netinet/in_mp.h
-
-# Included from netinet/in_mp_ddi.h
-
-# Included from sys/inline.h
-IP_HIER_BASE = (20)
-def ASSERT_LOCK(x): return
-
-def ASSERT_WRLOCK(x): return
-
-def ASSERT_UNLOCK(x): return
-
-def CANPUT(q): return canput((q))
-
-def CANPUTNEXT(q): return canputnext((q))
-
-INET_DEBUG = 1
diff --git a/sys/lib/python/plat-unixware7/STROPTS.py b/sys/lib/python/plat-unixware7/STROPTS.py
deleted file mode 100644
index 0f0cb2516..000000000
--- a/sys/lib/python/plat-unixware7/STROPTS.py
+++ /dev/null
@@ -1,328 +0,0 @@
-# Generated by h2py from /usr/include/sys/stropts.h
-
-# Included from sys/types.h
-def quad_low(x): return x.val[0]
-
-ADT_EMASKSIZE = 8
-SHRT_MIN = -32768
-SHRT_MAX = 32767
-INT_MIN = (-2147483647-1)
-INT_MAX = 2147483647
-LONG_MIN = (-2147483647-1)
-LONG_MAX = 2147483647
-OFF32_MAX = LONG_MAX
-ISTAT_ASSERTED = 0
-ISTAT_ASSUMED = 1
-ISTAT_NONE = 2
-OFF_MAX = OFF32_MAX
-CLOCK_MAX = LONG_MAX
-P_MYID = (-1)
-P_MYHOSTID = (-1)
-
-# Included from sys/select.h
-FD_SETSIZE = 4096
-NBBY = 8
-NULL = 0
-
-# Included from sys/conf.h
-D_NEW = 0x00
-D_OLD = 0x01
-D_DMA = 0x02
-D_BLKOFF = 0x400
-D_LFS = 0x8000
-D_STR = 0x0800
-D_MOD = 0x1000
-D_PSEUDO = 0x2000
-D_RANDOM = 0x4000
-D_HOT = 0x10000
-D_SEEKNEG = 0x04
-D_TAPE = 0x08
-D_NOBRKUP = 0x10
-D_INITPUB = 0x20
-D_NOSPECMACDATA = 0x40
-D_RDWEQ = 0x80
-SECMASK = (D_INITPUB|D_NOSPECMACDATA|D_RDWEQ)
-DAF_REQDMA = 0x1
-DAF_PHYSREQ = 0x2
-DAF_PRE8 = 0x4
-DAF_STATIC = 0x8
-DAF_STR = 0x10
-D_MP = 0x100
-D_UPF = 0x200
-ROOTFS_NAMESZ = 7
-FMNAMESZ = 8
-MCD_VERSION = 1
-DI_BCBP = 0
-DI_MEDIA = 1
-
-# Included from sys/secsys.h
-ES_MACOPENLID = 1
-ES_MACSYSLID = 2
-ES_MACROOTLID = 3
-ES_PRVINFO = 4
-ES_PRVSETCNT = 5
-ES_PRVSETS = 6
-ES_MACADTLID = 7
-ES_PRVID = 8
-ES_TPGETMAJOR = 9
-SA_EXEC = 001
-SA_WRITE = 002
-SA_READ = 004
-SA_SUBSIZE = 010
-
-# Included from sys/stropts_f.h
-X_STR = (ord('S')<<8)
-X_I_BASE = (X_STR|0200)
-X_I_NREAD = (X_STR|0201)
-X_I_PUSH = (X_STR|0202)
-X_I_POP = (X_STR|0203)
-X_I_LOOK = (X_STR|0204)
-X_I_FLUSH = (X_STR|0205)
-X_I_SRDOPT = (X_STR|0206)
-X_I_GRDOPT = (X_STR|0207)
-X_I_STR = (X_STR|0210)
-X_I_SETSIG = (X_STR|0211)
-X_I_GETSIG = (X_STR|0212)
-X_I_FIND = (X_STR|0213)
-X_I_LINK = (X_STR|0214)
-X_I_UNLINK = (X_STR|0215)
-X_I_PEEK = (X_STR|0217)
-X_I_FDINSERT = (X_STR|0220)
-X_I_SENDFD = (X_STR|0221)
-X_I_RECVFD = (X_STR|0222)
-
-# Included from unistd.h
-
-# Included from sys/unistd.h
-R_OK = 004
-W_OK = 002
-X_OK = 001
-F_OK = 000
-EFF_ONLY_OK = 010
-EX_OK = 020
-SEEK_SET = 0
-SEEK_CUR = 1
-SEEK_END = 2
-_SC_ARG_MAX = 1
-_SC_CHILD_MAX = 2
-_SC_CLK_TCK = 3
-_SC_NGROUPS_MAX = 4
-_SC_OPEN_MAX = 5
-_SC_JOB_CONTROL = 6
-_SC_SAVED_IDS = 7
-_SC_VERSION = 8
-_SC_PASS_MAX = 9
-_SC_LOGNAME_MAX = 10
-_SC_PAGESIZE = 11
-_SC_PAGE_SIZE = _SC_PAGESIZE
-_SC_XOPEN_VERSION = 12
-_SC_NACLS_MAX = 13
-_SC_NPROCESSORS_CONF = 14
-_SC_NPROCESSORS_ONLN = 15
-_SC_NPROCESSES = 39
-_SC_TOTAL_MEMORY = 40
-_SC_USEABLE_MEMORY = 41
-_SC_GENERAL_MEMORY = 42
-_SC_DEDICATED_MEMORY = 43
-_SC_NCGS_CONF = 44
-_SC_NCGS_ONLN = 45
-_SC_MAX_CPUS_PER_CG = 46
-_SC_CG_SIMPLE_IMPL = 47
-_SC_CACHE_LINE = 48
-_SC_SYSTEM_ID = 49
-_SC_THREADS = 51
-_SC_THREAD_ATTR_STACKADDR = 52
-_SC_THREAD_ATTR_STACKSIZE = 53
-_SC_THREAD_DESTRUCTOR_ITERATIONS = 54
-_SC_THREAD_KEYS_MAX = 55
-_SC_THREAD_PRIORITY_SCHEDULING = 56
-_SC_THREAD_PRIO_INHERIT = 57
-_SC_THREAD_PRIO_PROTECT = 58
-_SC_THREAD_STACK_MIN = 59
-_SC_THREAD_PROCESS_SHARED = 60
-_SC_THREAD_SAFE_FUNCTIONS = 61
-_SC_THREAD_THREADS_MAX = 62
-_SC_KERNEL_VM = 63
-_SC_TZNAME_MAX = 320
-_SC_STREAM_MAX = 321
-_SC_XOPEN_CRYPT = 323
-_SC_XOPEN_ENH_I18N = 324
-_SC_XOPEN_SHM = 325
-_SC_XOPEN_XCU_VERSION = 327
-_SC_AES_OS_VERSION = 330
-_SC_ATEXIT_MAX = 331
-_SC_2_C_BIND = 350
-_SC_2_C_DEV = 351
-_SC_2_C_VERSION = 352
-_SC_2_CHAR_TERM = 353
-_SC_2_FORT_DEV = 354
-_SC_2_FORT_RUN = 355
-_SC_2_LOCALEDEF = 356
-_SC_2_SW_DEV = 357
-_SC_2_UPE = 358
-_SC_2_VERSION = 359
-_SC_BC_BASE_MAX = 370
-_SC_BC_DIM_MAX = 371
-_SC_BC_SCALE_MAX = 372
-_SC_BC_STRING_MAX = 373
-_SC_COLL_WEIGHTS_MAX = 380
-_SC_EXPR_NEST_MAX = 381
-_SC_LINE_MAX = 382
-_SC_RE_DUP_MAX = 383
-_SC_IOV_MAX = 390
-_SC_NPROC_CONF = 391
-_SC_NPROC_ONLN = 392
-_SC_XOPEN_UNIX = 400
-_SC_SEMAPHORES = 440
-_CS_PATH = 1
-__O_CS_HOSTNAME = 2
-_CS_RELEASE = 3
-_CS_VERSION = 4
-__O_CS_MACHINE = 5
-__O_CS_ARCHITECTURE = 6
-_CS_HW_SERIAL = 7
-__O_CS_HW_PROVIDER = 8
-_CS_SRPC_DOMAIN = 9
-_CS_INITTAB_NAME = 10
-__O_CS_SYSNAME = 11
-_CS_LFS_CFLAGS = 20
-_CS_LFS_LDFLAGS = 21
-_CS_LFS_LIBS = 22
-_CS_LFS_LINTFLAGS = 23
-_CS_LFS64_CFLAGS = 24
-_CS_LFS64_LDFLAGS = 25
-_CS_LFS64_LIBS = 26
-_CS_LFS64_LINTFLAGS = 27
-_CS_ARCHITECTURE = 100
-_CS_BUSTYPES = 101
-_CS_HOSTNAME = 102
-_CS_HW_PROVIDER = 103
-_CS_KERNEL_STAMP = 104
-_CS_MACHINE = 105
-_CS_OS_BASE = 106
-_CS_OS_PROVIDER = 107
-_CS_SYSNAME = 108
-_CS_USER_LIMIT = 109
-_PC_LINK_MAX = 1
-_PC_MAX_CANON = 2
-_PC_MAX_INPUT = 3
-_PC_NAME_MAX = 4
-_PC_PATH_MAX = 5
-_PC_PIPE_BUF = 6
-_PC_NO_TRUNC = 7
-_PC_VDISABLE = 8
-_PC_CHOWN_RESTRICTED = 9
-_PC_FILESIZEBITS = 10
-_POSIX_VERSION = 199009L
-_XOPEN_VERSION = 4
-GF_PATH = "/etc/group"
-PF_PATH = "/etc/passwd"
-F_ULOCK = 0
-F_LOCK = 1
-F_TLOCK = 2
-F_TEST = 3
-_POSIX_JOB_CONTROL = 1
-_POSIX_SAVED_IDS = 1
-_POSIX_VDISABLE = 0
-NULL = 0
-STDIN_FILENO = 0
-STDOUT_FILENO = 1
-STDERR_FILENO = 2
-_XOPEN_UNIX = 1
-_XOPEN_ENH_I18N = 1
-_XOPEN_XPG4 = 1
-_POSIX2_C_VERSION = 199209L
-_POSIX2_VERSION = 199209L
-_XOPEN_XCU_VERSION = 4
-_POSIX_SEMAPHORES = 1
-_POSIX_THREADS = 1
-_POSIX_THREAD_ATTR_STACKADDR = 1
-_POSIX_THREAD_ATTR_STACKSIZE = 1
-_POSIX_THREAD_PRIORITY_SCHEDULING = 1
-_POSIX_THREAD_PROCESS_SHARED = 1
-_POSIX_THREAD_SAFE_FUNCTIONS = 1
-_POSIX2_C_BIND = 1
-_POSIX2_CHAR_TERM = 1
-_POSIX2_FORT_RUN = 1
-_POSIX2_LOCALEDEF = 1
-_POSIX2_UPE = 1
-_LFS_ASYNCHRONOUS_IO = 1
-_LFS_LARGEFILE = 1
-_LFS64_ASYNCHRONOUS_IO = 1
-_LFS64_LARGEFILE = 1
-_LFS64_STDIO = 1
-FMNAMESZ = 8
-SNDZERO = 0x001
-SNDPIPE = 0x002
-RNORM = 0x000
-RMSGD = 0x001
-RMSGN = 0x002
-RMODEMASK = 0x003
-RPROTDAT = 0x004
-RPROTDIS = 0x008
-RPROTNORM = 0x010
-RPROTMASK = 0x01c
-FLUSHR = 0x01
-FLUSHW = 0x02
-FLUSHRW = 0x03
-FLUSHBAND = 0x04
-S_INPUT = 0x0001
-S_HIPRI = 0x0002
-S_OUTPUT = 0x0004
-S_MSG = 0x0008
-S_ERROR = 0x0010
-S_HANGUP = 0x0020
-S_RDNORM = 0x0040
-S_WRNORM = S_OUTPUT
-S_RDBAND = 0x0080
-S_WRBAND = 0x0100
-S_BANDURG = 0x0200
-RS_HIPRI = 0x01
-MSG_HIPRI = 0x01
-MSG_ANY = 0x02
-MSG_BAND = 0x04
-MSG_DISCARD = 0x08
-MSG_PEEKIOCTL = 0x10
-MORECTL = 1
-MOREDATA = 2
-MUXID_ALL = (-1)
-ANYMARK = 0x01
-LASTMARK = 0x02
-STR = (ord('S')<<8)
-I_NREAD = (STR|01)
-I_PUSH = (STR|02)
-I_POP = (STR|03)
-I_LOOK = (STR|04)
-I_FLUSH = (STR|05)
-I_SRDOPT = (STR|06)
-I_GRDOPT = (STR|07)
-I_STR = (STR|010)
-I_SETSIG = (STR|011)
-I_GETSIG = (STR|012)
-I_FIND = (STR|013)
-I_LINK = (STR|014)
-I_UNLINK = (STR|015)
-I_PEEK = (STR|017)
-I_FDINSERT = (STR|020)
-I_SENDFD = (STR|021)
-I_RECVFD = (STR|022)
-I_E_RECVFD = (STR|016)
-I_RECVFD = (STR|016)
-I_RECVFD = (STR|022)
-I_SWROPT = (STR|023)
-I_GWROPT = (STR|024)
-I_LIST = (STR|025)
-I_PLINK = (STR|026)
-I_PUNLINK = (STR|027)
-I_FLUSHBAND = (STR|034)
-I_CKBAND = (STR|035)
-I_GETBAND = (STR|036)
-I_ATMARK = (STR|037)
-I_SETCLTIME = (STR|040)
-I_GETCLTIME = (STR|041)
-I_CANPUT = (STR|042)
-I_S_RECVFD = (STR|043)
-I_STATS = (STR|044)
-I_BIGPIPE = (STR|045)
-I_GETTP = (STR|046)
-INFTIM = -1
diff --git a/sys/lib/python/plat-unixware7/regen b/sys/lib/python/plat-unixware7/regen
deleted file mode 100755
index 68998a7a5..000000000
--- a/sys/lib/python/plat-unixware7/regen
+++ /dev/null
@@ -1,9 +0,0 @@
-#! /bin/sh
-case `uname -sr` in
-UnixWare*) ;;
-*) echo Probably not on a UnixWare system 1>&2
- exit 1;;
-esac
-set -v
-h2py -i '(u_long)' /usr/include/netinet/in.h
-h2py /usr/include/sys/stropts.h
diff --git a/sys/lib/python/platform.py b/sys/lib/python/platform.py
deleted file mode 100755
index 288bc9561..000000000
--- a/sys/lib/python/platform.py
+++ /dev/null
@@ -1,1254 +0,0 @@
-#!/usr/bin/env python
-
-""" This module tries to retrieve as much platform-identifying data as
- possible. It makes this information available via function APIs.
-
- If called from the command line, it prints the platform
- information concatenated as single string to stdout. The output
- format is useable as part of a filename.
-
-"""
-# This module is maintained by Marc-Andre Lemburg <mal@egenix.com>.
-# If you find problems, please submit bug reports/patches via the
-# Python SourceForge Project Page and assign them to "lemburg".
-#
-# Note: Please keep this module compatible to Python 1.5.2.
-#
-# Still needed:
-# * more support for WinCE
-# * support for MS-DOS (PythonDX ?)
-# * support for Amiga and other still unsupported platforms running Python
-# * support for additional Linux distributions
-#
-# Many thanks to all those who helped adding platform-specific
-# checks (in no particular order):
-#
-# Charles G Waldman, David Arnold, Gordon McMillan, Ben Darnell,
-# Jeff Bauer, Cliff Crawford, Ivan Van Laningham, Josef
-# Betancourt, Randall Hopper, Karl Putland, John Farrell, Greg
-# Andruk, Just van Rossum, Thomas Heller, Mark R. Levinson, Mark
-# Hammond, Bill Tutt, Hans Nowak, Uwe Zessin (OpenVMS support),
-# Colin Kong, Trent Mick, Guido van Rossum
-#
-# History:
-#
-# <see CVS and SVN checkin messages for history>
-#
-# 1.0.3 - added normalization of Windows system name
-# 1.0.2 - added more Windows support
-# 1.0.1 - reformatted to make doc.py happy
-# 1.0.0 - reformatted a bit and checked into Python CVS
-# 0.8.0 - added sys.version parser and various new access
-# APIs (python_version(), python_compiler(), etc.)
-# 0.7.2 - fixed architecture() to use sizeof(pointer) where available
-# 0.7.1 - added support for Caldera OpenLinux
-# 0.7.0 - some fixes for WinCE; untabified the source file
-# 0.6.2 - support for OpenVMS - requires version 1.5.2-V006 or higher and
-# vms_lib.getsyi() configured
-# 0.6.1 - added code to prevent 'uname -p' on platforms which are
-# known not to support it
-# 0.6.0 - fixed win32_ver() to hopefully work on Win95,98,NT and Win2k;
-# did some cleanup of the interfaces - some APIs have changed
-# 0.5.5 - fixed another type in the MacOS code... should have
-# used more coffee today ;-)
-# 0.5.4 - fixed a few typos in the MacOS code
-# 0.5.3 - added experimental MacOS support; added better popen()
-# workarounds in _syscmd_ver() -- still not 100% elegant
-# though
-# 0.5.2 - fixed uname() to return '' instead of 'unknown' in all
-# return values (the system uname command tends to return
-# 'unknown' instead of just leaving the field emtpy)
-# 0.5.1 - included code for slackware dist; added exception handlers
-# to cover up situations where platforms don't have os.popen
-# (e.g. Mac) or fail on socket.gethostname(); fixed libc
-# detection RE
-# 0.5.0 - changed the API names referring to system commands to *syscmd*;
-# added java_ver(); made syscmd_ver() a private
-# API (was system_ver() in previous versions) -- use uname()
-# instead; extended the win32_ver() to also return processor
-# type information
-# 0.4.0 - added win32_ver() and modified the platform() output for WinXX
-# 0.3.4 - fixed a bug in _follow_symlinks()
-# 0.3.3 - fixed popen() and "file" command invokation bugs
-# 0.3.2 - added architecture() API and support for it in platform()
-# 0.3.1 - fixed syscmd_ver() RE to support Windows NT
-# 0.3.0 - added system alias support
-# 0.2.3 - removed 'wince' again... oh well.
-# 0.2.2 - added 'wince' to syscmd_ver() supported platforms
-# 0.2.1 - added cache logic and changed the platform string format
-# 0.2.0 - changed the API to use functions instead of module globals
-# since some action take too long to be run on module import
-# 0.1.0 - first release
-#
-# You can always get the latest version of this module at:
-#
-# http://www.egenix.com/files/python/platform.py
-#
-# If that URL should fail, try contacting the author.
-
-__copyright__ = """
- Copyright (c) 1999-2000, Marc-Andre Lemburg; mailto:mal@lemburg.com
- Copyright (c) 2000-2003, eGenix.com Software GmbH; mailto:info@egenix.com
-
- Permission to use, copy, modify, and distribute this software and its
- documentation for any purpose and without fee or royalty is hereby granted,
- provided that the above copyright notice appear in all copies and that
- both that copyright notice and this permission notice appear in
- supporting documentation or portions thereof, including modifications,
- that you make.
-
- EGENIX.COM SOFTWARE GMBH DISCLAIMS ALL WARRANTIES WITH REGARD TO
- THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
- FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL,
- INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
- FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
- NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
- WITH THE USE OR PERFORMANCE OF THIS SOFTWARE !
-
-"""
-
-__version__ = '1.0.4'
-
-import sys,string,os,re
-
-### Platform specific APIs
-
-_libc_search = re.compile(r'(__libc_init)'
- '|'
- '(GLIBC_([0-9.]+))'
- '|'
- '(libc(_\w+)?\.so(?:\.(\d[0-9.]*))?)')
-
-def libc_ver(executable=sys.executable,lib='',version='',
-
- chunksize=2048):
-
- """ Tries to determine the libc version that the file executable
- (which defaults to the Python interpreter) is linked against.
-
- Returns a tuple of strings (lib,version) which default to the
- given parameters in case the lookup fails.
-
- Note that the function has intimate knowledge of how different
- libc versions add symbols to the executable and thus is probably
- only useable for executables compiled using gcc.
-
- The file is read and scanned in chunks of chunksize bytes.
-
- """
- f = open(executable,'rb')
- binary = f.read(chunksize)
- pos = 0
- while 1:
- m = _libc_search.search(binary,pos)
- if not m:
- binary = f.read(chunksize)
- if not binary:
- break
- pos = 0
- continue
- libcinit,glibc,glibcversion,so,threads,soversion = m.groups()
- if libcinit and not lib:
- lib = 'libc'
- elif glibc:
- if lib != 'glibc':
- lib = 'glibc'
- version = glibcversion
- elif glibcversion > version:
- version = glibcversion
- elif so:
- if lib != 'glibc':
- lib = 'libc'
- if soversion > version:
- version = soversion
- if threads and version[-len(threads):] != threads:
- version = version + threads
- pos = m.end()
- f.close()
- return lib,version
-
-def _dist_try_harder(distname,version,id):
-
- """ Tries some special tricks to get the distribution
- information in case the default method fails.
-
- Currently supports older SuSE Linux, Caldera OpenLinux and
- Slackware Linux distributions.
-
- """
- if os.path.exists('/var/adm/inst-log/info'):
- # SuSE Linux stores distribution information in that file
- info = open('/var/adm/inst-log/info').readlines()
- distname = 'SuSE'
- for line in info:
- tv = string.split(line)
- if len(tv) == 2:
- tag,value = tv
- else:
- continue
- if tag == 'MIN_DIST_VERSION':
- version = string.strip(value)
- elif tag == 'DIST_IDENT':
- values = string.split(value,'-')
- id = values[2]
- return distname,version,id
-
- if os.path.exists('/etc/.installed'):
- # Caldera OpenLinux has some infos in that file (thanks to Colin Kong)
- info = open('/etc/.installed').readlines()
- for line in info:
- pkg = string.split(line,'-')
- if len(pkg) >= 2 and pkg[0] == 'OpenLinux':
- # XXX does Caldera support non Intel platforms ? If yes,
- # where can we find the needed id ?
- return 'OpenLinux',pkg[1],id
-
- if os.path.isdir('/usr/lib/setup'):
- # Check for slackware verson tag file (thanks to Greg Andruk)
- verfiles = os.listdir('/usr/lib/setup')
- for n in range(len(verfiles)-1, -1, -1):
- if verfiles[n][:14] != 'slack-version-':
- del verfiles[n]
- if verfiles:
- verfiles.sort()
- distname = 'slackware'
- version = verfiles[-1][14:]
- return distname,version,id
-
- return distname,version,id
-
-_release_filename = re.compile(r'(\w+)[-_](release|version)')
-_release_version = re.compile(r'([\d.]+)[^(]*(?:\((.+)\))?')
-
-# Note:In supported_dists below we need 'fedora' before 'redhat' as in
-# Fedora redhat-release is a link to fedora-release.
-
-def dist(distname='',version='',id='',
-
- supported_dists=('SuSE', 'debian', 'fedora', 'redhat', 'mandrake')):
-
- """ Tries to determine the name of the Linux OS distribution name.
-
- The function first looks for a distribution release file in
- /etc and then reverts to _dist_try_harder() in case no
- suitable files are found.
-
- Returns a tuple (distname,version,id) which default to the
- args given as parameters.
-
- """
- try:
- etc = os.listdir('/etc')
- except os.error:
- # Probably not a Unix system
- return distname,version,id
- for file in etc:
- m = _release_filename.match(file)
- if m:
- _distname,dummy = m.groups()
- if _distname in supported_dists:
- distname = _distname
- break
- else:
- return _dist_try_harder(distname,version,id)
- f = open('/etc/'+file,'r')
- firstline = f.readline()
- f.close()
- m = _release_version.search(firstline)
- if m:
- _version,_id = m.groups()
- if _version:
- version = _version
- if _id:
- id = _id
- else:
- # Unkown format... take the first two words
- l = string.split(string.strip(firstline))
- if l:
- version = l[0]
- if len(l) > 1:
- id = l[1]
- return distname,version,id
-
-class _popen:
-
- """ Fairly portable (alternative) popen implementation.
-
- This is mostly needed in case os.popen() is not available, or
- doesn't work as advertised, e.g. in Win9X GUI programs like
- PythonWin or IDLE.
-
- Writing to the pipe is currently not supported.
-
- """
- tmpfile = ''
- pipe = None
- bufsize = None
- mode = 'r'
-
- def __init__(self,cmd,mode='r',bufsize=None):
-
- if mode != 'r':
- raise ValueError,'popen()-emulation only supports read mode'
- import tempfile
- self.tmpfile = tmpfile = tempfile.mktemp()
- os.system(cmd + ' > %s' % tmpfile)
- self.pipe = open(tmpfile,'rb')
- self.bufsize = bufsize
- self.mode = mode
-
- def read(self):
-
- return self.pipe.read()
-
- def readlines(self):
-
- if self.bufsize is not None:
- return self.pipe.readlines()
-
- def close(self,
-
- remove=os.unlink,error=os.error):
-
- if self.pipe:
- rc = self.pipe.close()
- else:
- rc = 255
- if self.tmpfile:
- try:
- remove(self.tmpfile)
- except error:
- pass
- return rc
-
- # Alias
- __del__ = close
-
-def popen(cmd, mode='r', bufsize=None):
-
- """ Portable popen() interface.
- """
- # Find a working popen implementation preferring win32pipe.popen
- # over os.popen over _popen
- popen = None
- if os.environ.get('OS','') == 'Windows_NT':
- # On NT win32pipe should work; on Win9x it hangs due to bugs
- # in the MS C lib (see MS KnowledgeBase article Q150956)
- try:
- import win32pipe
- except ImportError:
- pass
- else:
- popen = win32pipe.popen
- if popen is None:
- if hasattr(os,'popen'):
- popen = os.popen
- # Check whether it works... it doesn't in GUI programs
- # on Windows platforms
- if sys.platform == 'win32': # XXX Others too ?
- try:
- popen('')
- except os.error:
- popen = _popen
- else:
- popen = _popen
- if bufsize is None:
- return popen(cmd,mode)
- else:
- return popen(cmd,mode,bufsize)
-
-def _norm_version(version,build=''):
-
- """ Normalize the version and build strings and return a single
- version string using the format major.minor.build (or patchlevel).
- """
- l = string.split(version,'.')
- if build:
- l.append(build)
- try:
- ints = map(int,l)
- except ValueError:
- strings = l
- else:
- strings = map(str,ints)
- version = string.join(strings[:3],'.')
- return version
-
-_ver_output = re.compile(r'(?:([\w ]+) ([\w.]+) '
- '.*'
- 'Version ([\d.]+))')
-
-def _syscmd_ver(system='',release='',version='',
-
- supported_platforms=('win32','win16','dos','os2')):
-
- """ Tries to figure out the OS version used and returns
- a tuple (system,release,version).
-
- It uses the "ver" shell command for this which is known
- to exists on Windows, DOS and OS/2. XXX Others too ?
-
- In case this fails, the given parameters are used as
- defaults.
-
- """
- if sys.platform not in supported_platforms:
- return system,release,version
-
- # Try some common cmd strings
- for cmd in ('ver','command /c ver','cmd /c ver'):
- try:
- pipe = popen(cmd)
- info = pipe.read()
- if pipe.close():
- raise os.error,'command failed'
- # XXX How can I supress shell errors from being written
- # to stderr ?
- except os.error,why:
- #print 'Command %s failed: %s' % (cmd,why)
- continue
- except IOError,why:
- #print 'Command %s failed: %s' % (cmd,why)
- continue
- else:
- break
- else:
- return system,release,version
-
- # Parse the output
- info = string.strip(info)
- m = _ver_output.match(info)
- if m:
- system,release,version = m.groups()
- # Strip trailing dots from version and release
- if release[-1] == '.':
- release = release[:-1]
- if version[-1] == '.':
- version = version[:-1]
- # Normalize the version and build strings (eliminating additional
- # zeros)
- version = _norm_version(version)
- return system,release,version
-
-def _win32_getvalue(key,name,default=''):
-
- """ Read a value for name from the registry key.
-
- In case this fails, default is returned.
-
- """
- from win32api import RegQueryValueEx
- try:
- return RegQueryValueEx(key,name)
- except:
- return default
-
-def win32_ver(release='',version='',csd='',ptype=''):
-
- """ Get additional version information from the Windows Registry
- and return a tuple (version,csd,ptype) referring to version
- number, CSD level and OS type (multi/single
- processor).
-
- As a hint: ptype returns 'Uniprocessor Free' on single
- processor NT machines and 'Multiprocessor Free' on multi
- processor machines. The 'Free' refers to the OS version being
- free of debugging code. It could also state 'Checked' which
- means the OS version uses debugging code, i.e. code that
- checks arguments, ranges, etc. (Thomas Heller).
-
- Note: this function only works if Mark Hammond's win32
- package is installed and obviously only runs on Win32
- compatible platforms.
-
- """
- # XXX Is there any way to find out the processor type on WinXX ?
- # XXX Is win32 available on Windows CE ?
- #
- # Adapted from code posted by Karl Putland to comp.lang.python.
- #
- # The mappings between reg. values and release names can be found
- # here: http://msdn.microsoft.com/library/en-us/sysinfo/base/osversioninfo_str.asp
-
- # Import the needed APIs
- try:
- import win32api
- except ImportError:
- return release,version,csd,ptype
- from win32api import RegQueryValueEx,RegOpenKeyEx,RegCloseKey,GetVersionEx
- from win32con import HKEY_LOCAL_MACHINE,VER_PLATFORM_WIN32_NT,\
- VER_PLATFORM_WIN32_WINDOWS
-
- # Find out the registry key and some general version infos
- maj,min,buildno,plat,csd = GetVersionEx()
- version = '%i.%i.%i' % (maj,min,buildno & 0xFFFF)
- if csd[:13] == 'Service Pack ':
- csd = 'SP' + csd[13:]
- if plat == VER_PLATFORM_WIN32_WINDOWS:
- regkey = 'SOFTWARE\\Microsoft\\Windows\\CurrentVersion'
- # Try to guess the release name
- if maj == 4:
- if min == 0:
- release = '95'
- elif min == 10:
- release = '98'
- elif min == 90:
- release = 'Me'
- else:
- release = 'postMe'
- elif maj == 5:
- release = '2000'
- elif plat == VER_PLATFORM_WIN32_NT:
- regkey = 'SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion'
- if maj <= 4:
- release = 'NT'
- elif maj == 5:
- if min == 0:
- release = '2000'
- elif min == 1:
- release = 'XP'
- elif min == 2:
- release = '2003Server'
- else:
- release = 'post2003'
- else:
- if not release:
- # E.g. Win3.1 with win32s
- release = '%i.%i' % (maj,min)
- return release,version,csd,ptype
-
- # Open the registry key
- try:
- keyCurVer = RegOpenKeyEx(HKEY_LOCAL_MACHINE,regkey)
- # Get a value to make sure the key exists...
- RegQueryValueEx(keyCurVer,'SystemRoot')
- except:
- return release,version,csd,ptype
-
- # Parse values
- #subversion = _win32_getvalue(keyCurVer,
- # 'SubVersionNumber',
- # ('',1))[0]
- #if subversion:
- # release = release + subversion # 95a, 95b, etc.
- build = _win32_getvalue(keyCurVer,
- 'CurrentBuildNumber',
- ('',1))[0]
- ptype = _win32_getvalue(keyCurVer,
- 'CurrentType',
- (ptype,1))[0]
-
- # Normalize version
- version = _norm_version(version,build)
-
- # Close key
- RegCloseKey(keyCurVer)
- return release,version,csd,ptype
-
-def _mac_ver_lookup(selectors,default=None):
-
- from gestalt import gestalt
- import MacOS
- l = []
- append = l.append
- for selector in selectors:
- try:
- append(gestalt(selector))
- except (RuntimeError, MacOS.Error):
- append(default)
- return l
-
-def _bcd2str(bcd):
-
- return hex(bcd)[2:]
-
-def mac_ver(release='',versioninfo=('','',''),machine=''):
-
- """ Get MacOS version information and return it as tuple (release,
- versioninfo, machine) with versioninfo being a tuple (version,
- dev_stage, non_release_version).
-
- Entries which cannot be determined are set to the paramter values
- which default to ''. All tuple entries are strings.
-
- Thanks to Mark R. Levinson for mailing documentation links and
- code examples for this function. Documentation for the
- gestalt() API is available online at:
-
- http://www.rgaros.nl/gestalt/
-
- """
- # Check whether the version info module is available
- try:
- import gestalt
- import MacOS
- except ImportError:
- return release,versioninfo,machine
- # Get the infos
- sysv,sysu,sysa = _mac_ver_lookup(('sysv','sysu','sysa'))
- # Decode the infos
- if sysv:
- major = (sysv & 0xFF00) >> 8
- minor = (sysv & 0x00F0) >> 4
- patch = (sysv & 0x000F)
- release = '%s.%i.%i' % (_bcd2str(major),minor,patch)
- if sysu:
- major = int((sysu & 0xFF000000L) >> 24)
- minor = (sysu & 0x00F00000) >> 20
- bugfix = (sysu & 0x000F0000) >> 16
- stage = (sysu & 0x0000FF00) >> 8
- nonrel = (sysu & 0x000000FF)
- version = '%s.%i.%i' % (_bcd2str(major),minor,bugfix)
- nonrel = _bcd2str(nonrel)
- stage = {0x20:'development',
- 0x40:'alpha',
- 0x60:'beta',
- 0x80:'final'}.get(stage,'')
- versioninfo = (version,stage,nonrel)
- if sysa:
- machine = {0x1: '68k',
- 0x2: 'PowerPC',
- 0xa: 'i386'}.get(sysa,'')
- return release,versioninfo,machine
-
-def _java_getprop(name,default):
-
- from java.lang import System
- try:
- return System.getProperty(name)
- except:
- return default
-
-def java_ver(release='',vendor='',vminfo=('','',''),osinfo=('','','')):
-
- """ Version interface for Jython.
-
- Returns a tuple (release,vendor,vminfo,osinfo) with vminfo being
- a tuple (vm_name,vm_release,vm_vendor) and osinfo being a
- tuple (os_name,os_version,os_arch).
-
- Values which cannot be determined are set to the defaults
- given as parameters (which all default to '').
-
- """
- # Import the needed APIs
- try:
- import java.lang
- except ImportError:
- return release,vendor,vminfo,osinfo
-
- vendor = _java_getprop('java.vendor',vendor)
- release = _java_getprop('java.version',release)
- vm_name,vm_release,vm_vendor = vminfo
- vm_name = _java_getprop('java.vm.name',vm_name)
- vm_vendor = _java_getprop('java.vm.vendor',vm_vendor)
- vm_release = _java_getprop('java.vm.version',vm_release)
- vminfo = vm_name,vm_release,vm_vendor
- os_name,os_version,os_arch = osinfo
- os_arch = _java_getprop('java.os.arch',os_arch)
- os_name = _java_getprop('java.os.name',os_name)
- os_version = _java_getprop('java.os.version',os_version)
- osinfo = os_name,os_version,os_arch
-
- return release,vendor,vminfo,osinfo
-
-### System name aliasing
-
-def system_alias(system,release,version):
-
- """ Returns (system,release,version) aliased to common
- marketing names used for some systems.
-
- It also does some reordering of the information in some cases
- where it would otherwise cause confusion.
-
- """
- if system == 'Rhapsody':
- # Apple's BSD derivative
- # XXX How can we determine the marketing release number ?
- return 'MacOS X Server',system+release,version
-
- elif system == 'SunOS':
- # Sun's OS
- if release < '5':
- # These releases use the old name SunOS
- return system,release,version
- # Modify release (marketing release = SunOS release - 3)
- l = string.split(release,'.')
- if l:
- try:
- major = int(l[0])
- except ValueError:
- pass
- else:
- major = major - 3
- l[0] = str(major)
- release = string.join(l,'.')
- if release < '6':
- system = 'Solaris'
- else:
- # XXX Whatever the new SunOS marketing name is...
- system = 'Solaris'
-
- elif system == 'IRIX64':
- # IRIX reports IRIX64 on platforms with 64-bit support; yet it
- # is really a version and not a different platform, since 32-bit
- # apps are also supported..
- system = 'IRIX'
- if version:
- version = version + ' (64bit)'
- else:
- version = '64bit'
-
- elif system in ('win32','win16'):
- # In case one of the other tricks
- system = 'Windows'
-
- return system,release,version
-
-### Various internal helpers
-
-def _platform(*args):
-
- """ Helper to format the platform string in a filename
- compatible format e.g. "system-version-machine".
- """
- # Format the platform string
- platform = string.join(
- map(string.strip,
- filter(len,args)),
- '-')
-
- # Cleanup some possible filename obstacles...
- replace = string.replace
- platform = replace(platform,' ','_')
- platform = replace(platform,'/','-')
- platform = replace(platform,'\\','-')
- platform = replace(platform,':','-')
- platform = replace(platform,';','-')
- platform = replace(platform,'"','-')
- platform = replace(platform,'(','-')
- platform = replace(platform,')','-')
-
- # No need to report 'unknown' information...
- platform = replace(platform,'unknown','')
-
- # Fold '--'s and remove trailing '-'
- while 1:
- cleaned = replace(platform,'--','-')
- if cleaned == platform:
- break
- platform = cleaned
- while platform[-1] == '-':
- platform = platform[:-1]
-
- return platform
-
-def _node(default=''):
-
- """ Helper to determine the node name of this machine.
- """
- try:
- import socket
- except ImportError:
- # No sockets...
- return default
- try:
- return socket.gethostname()
- except socket.error:
- # Still not working...
- return default
-
-# os.path.abspath is new in Python 1.5.2:
-if not hasattr(os.path,'abspath'):
-
- def _abspath(path,
-
- isabs=os.path.isabs,join=os.path.join,getcwd=os.getcwd,
- normpath=os.path.normpath):
-
- if not isabs(path):
- path = join(getcwd(), path)
- return normpath(path)
-
-else:
-
- _abspath = os.path.abspath
-
-def _follow_symlinks(filepath):
-
- """ In case filepath is a symlink, follow it until a
- real file is reached.
- """
- filepath = _abspath(filepath)
- while os.path.islink(filepath):
- filepath = os.path.normpath(
- os.path.join(filepath,os.readlink(filepath)))
- return filepath
-
-def _syscmd_uname(option,default=''):
-
- """ Interface to the system's uname command.
- """
- if sys.platform in ('dos','win32','win16','os2'):
- # XXX Others too ?
- return default
- try:
- f = os.popen('uname %s 2> /dev/null' % option)
- except (AttributeError,os.error):
- return default
- output = string.strip(f.read())
- rc = f.close()
- if not output or rc:
- return default
- else:
- return output
-
-def _syscmd_file(target,default=''):
-
- """ Interface to the system's file command.
-
- The function uses the -b option of the file command to have it
- ommit the filename in its output and if possible the -L option
- to have the command follow symlinks. It returns default in
- case the command should fail.
-
- """
- target = _follow_symlinks(target)
- try:
- f = os.popen('file %s 2> /dev/null' % target)
- except (AttributeError,os.error):
- return default
- output = string.strip(f.read())
- rc = f.close()
- if not output or rc:
- return default
- else:
- return output
-
-### Information about the used architecture
-
-# Default values for architecture; non-empty strings override the
-# defaults given as parameters
-_default_architecture = {
- 'win32': ('','WindowsPE'),
- 'win16': ('','Windows'),
- 'dos': ('','MSDOS'),
-}
-
-_architecture_split = re.compile(r'[\s,]').split
-
-def architecture(executable=sys.executable,bits='',linkage=''):
-
- """ Queries the given executable (defaults to the Python interpreter
- binary) for various architecture information.
-
- Returns a tuple (bits,linkage) which contains information about
- the bit architecture and the linkage format used for the
- executable. Both values are returned as strings.
-
- Values that cannot be determined are returned as given by the
- parameter presets. If bits is given as '', the sizeof(pointer)
- (or sizeof(long) on Python version < 1.5.2) is used as
- indicator for the supported pointer size.
-
- The function relies on the system's "file" command to do the
- actual work. This is available on most if not all Unix
- platforms. On some non-Unix platforms where the "file" command
- does not exist and the executable is set to the Python interpreter
- binary defaults from _default_architecture are used.
-
- """
- # Use the sizeof(pointer) as default number of bits if nothing
- # else is given as default.
- if not bits:
- import struct
- try:
- size = struct.calcsize('P')
- except struct.error:
- # Older installations can only query longs
- size = struct.calcsize('l')
- bits = str(size*8) + 'bit'
-
- # Get data from the 'file' system command
- output = _syscmd_file(executable,'')
-
- if not output and \
- executable == sys.executable:
- # "file" command did not return anything; we'll try to provide
- # some sensible defaults then...
- if _default_architecture.has_key(sys.platform):
- b,l = _default_architecture[sys.platform]
- if b:
- bits = b
- if l:
- linkage = l
- return bits,linkage
-
- # Split the output into a list of strings omitting the filename
- fileout = _architecture_split(output)[1:]
-
- if 'executable' not in fileout:
- # Format not supported
- return bits,linkage
-
- # Bits
- if '32-bit' in fileout:
- bits = '32bit'
- elif 'N32' in fileout:
- # On Irix only
- bits = 'n32bit'
- elif '64-bit' in fileout:
- bits = '64bit'
-
- # Linkage
- if 'ELF' in fileout:
- linkage = 'ELF'
- elif 'PE' in fileout:
- # E.g. Windows uses this format
- if 'Windows' in fileout:
- linkage = 'WindowsPE'
- else:
- linkage = 'PE'
- elif 'COFF' in fileout:
- linkage = 'COFF'
- elif 'MS-DOS' in fileout:
- linkage = 'MSDOS'
- else:
- # XXX the A.OUT format also falls under this class...
- pass
-
- return bits,linkage
-
-### Portable uname() interface
-
-_uname_cache = None
-
-def uname():
-
- """ Fairly portable uname interface. Returns a tuple
- of strings (system,node,release,version,machine,processor)
- identifying the underlying platform.
-
- Note that unlike the os.uname function this also returns
- possible processor information as an additional tuple entry.
-
- Entries which cannot be determined are set to ''.
-
- """
- global _uname_cache
-
- if _uname_cache is not None:
- return _uname_cache
-
- # Get some infos from the builtin os.uname API...
- try:
- system,node,release,version,machine = os.uname()
-
- except AttributeError:
- # Hmm, no uname... we'll have to poke around the system then.
- system = sys.platform
- release = ''
- version = ''
- node = _node()
- machine = ''
- processor = ''
- use_syscmd_ver = 1
-
- # Try win32_ver() on win32 platforms
- if system == 'win32':
- release,version,csd,ptype = win32_ver()
- if release and version:
- use_syscmd_ver = 0
-
- # Try the 'ver' system command available on some
- # platforms
- if use_syscmd_ver:
- system,release,version = _syscmd_ver(system)
- # Normalize system to what win32_ver() normally returns
- # (_syscmd_ver() tends to return the vendor name as well)
- if system == 'Microsoft Windows':
- system = 'Windows'
-
- # In case we still don't know anything useful, we'll try to
- # help ourselves
- if system in ('win32','win16'):
- if not version:
- if system == 'win32':
- version = '32bit'
- else:
- version = '16bit'
- system = 'Windows'
-
- elif system[:4] == 'java':
- release,vendor,vminfo,osinfo = java_ver()
- system = 'Java'
- version = string.join(vminfo,', ')
- if not version:
- version = vendor
-
- elif os.name == 'mac':
- release,(version,stage,nonrel),machine = mac_ver()
- system = 'MacOS'
-
- else:
- # System specific extensions
- if system == 'OpenVMS':
- # OpenVMS seems to have release and version mixed up
- if not release or release == '0':
- release = version
- version = ''
- # Get processor information
- try:
- import vms_lib
- except ImportError:
- pass
- else:
- csid, cpu_number = vms_lib.getsyi('SYI$_CPU',0)
- if (cpu_number >= 128):
- processor = 'Alpha'
- else:
- processor = 'VAX'
- else:
- # Get processor information from the uname system command
- processor = _syscmd_uname('-p','')
-
- # 'unknown' is not really any useful as information; we'll convert
- # it to '' which is more portable
- if system == 'unknown':
- system = ''
- if node == 'unknown':
- node = ''
- if release == 'unknown':
- release = ''
- if version == 'unknown':
- version = ''
- if machine == 'unknown':
- machine = ''
- if processor == 'unknown':
- processor = ''
- _uname_cache = system,node,release,version,machine,processor
- return _uname_cache
-
-### Direct interfaces to some of the uname() return values
-
-def system():
-
- """ Returns the system/OS name, e.g. 'Linux', 'Windows' or 'Java'.
-
- An empty string is returned if the value cannot be determined.
-
- """
- return uname()[0]
-
-def node():
-
- """ Returns the computer's network name (which may not be fully
- qualified)
-
- An empty string is returned if the value cannot be determined.
-
- """
- return uname()[1]
-
-def release():
-
- """ Returns the system's release, e.g. '2.2.0' or 'NT'
-
- An empty string is returned if the value cannot be determined.
-
- """
- return uname()[2]
-
-def version():
-
- """ Returns the system's release version, e.g. '#3 on degas'
-
- An empty string is returned if the value cannot be determined.
-
- """
- return uname()[3]
-
-def machine():
-
- """ Returns the machine type, e.g. 'i386'
-
- An empty string is returned if the value cannot be determined.
-
- """
- return uname()[4]
-
-def processor():
-
- """ Returns the (true) processor name, e.g. 'amdk6'
-
- An empty string is returned if the value cannot be
- determined. Note that many platforms do not provide this
- information or simply return the same value as for machine(),
- e.g. NetBSD does this.
-
- """
- return uname()[5]
-
-### Various APIs for extracting information from sys.version
-
-_sys_version_parser = re.compile(r'([\w.+]+)\s*'
- '\(#?([^,]+),\s*([\w ]+),\s*([\w :]+)\)\s*'
- '\[([^\]]+)\]?')
-_sys_version_cache = None
-
-def _sys_version():
-
- """ Returns a parsed version of Python's sys.version as tuple
- (version, buildno, builddate, compiler) referring to the Python
- version, build number, build date/time as string and the compiler
- identification string.
-
- Note that unlike the Python sys.version, the returned value
- for the Python version will always include the patchlevel (it
- defaults to '.0').
-
- """
- global _sys_version_cache
-
- if _sys_version_cache is not None:
- return _sys_version_cache
- version, buildno, builddate, buildtime, compiler = \
- _sys_version_parser.match(sys.version).groups()
- builddate = builddate + ' ' + buildtime
- l = string.split(version, '.')
- if len(l) == 2:
- l.append('0')
- version = string.join(l, '.')
- _sys_version_cache = (version, buildno, builddate, compiler)
- return _sys_version_cache
-
-def python_version():
-
- """ Returns the Python version as string 'major.minor.patchlevel'
-
- Note that unlike the Python sys.version, the returned value
- will always include the patchlevel (it defaults to 0).
-
- """
- return _sys_version()[0]
-
-def python_version_tuple():
-
- """ Returns the Python version as tuple (major, minor, patchlevel)
- of strings.
-
- Note that unlike the Python sys.version, the returned value
- will always include the patchlevel (it defaults to 0).
-
- """
- return string.split(_sys_version()[0], '.')
-
-def python_build():
-
- """ Returns a tuple (buildno, builddate) stating the Python
- build number and date as strings.
-
- """
- return _sys_version()[1:3]
-
-def python_compiler():
-
- """ Returns a string identifying the compiler used for compiling
- Python.
-
- """
- return _sys_version()[3]
-
-### The Opus Magnum of platform strings :-)
-
-_platform_cache = {}
-
-def platform(aliased=0, terse=0):
-
- """ Returns a single string identifying the underlying platform
- with as much useful information as possible (but no more :).
-
- The output is intended to be human readable rather than
- machine parseable. It may look different on different
- platforms and this is intended.
-
- If "aliased" is true, the function will use aliases for
- various platforms that report system names which differ from
- their common names, e.g. SunOS will be reported as
- Solaris. The system_alias() function is used to implement
- this.
-
- Setting terse to true causes the function to return only the
- absolute minimum information needed to identify the platform.
-
- """
- result = _platform_cache.get((aliased, terse), None)
- if result is not None:
- return result
-
- # Get uname information and then apply platform specific cosmetics
- # to it...
- system,node,release,version,machine,processor = uname()
- if machine == processor:
- processor = ''
- if aliased:
- system,release,version = system_alias(system,release,version)
-
- if system == 'Windows':
- # MS platforms
- rel,vers,csd,ptype = win32_ver(version)
- if terse:
- platform = _platform(system,release)
- else:
- platform = _platform(system,release,version,csd)
-
- elif system in ('Linux',):
- # Linux based systems
- distname,distversion,distid = dist('')
- if distname and not terse:
- platform = _platform(system,release,machine,processor,
- 'with',
- distname,distversion,distid)
- else:
- # If the distribution name is unknown check for libc vs. glibc
- libcname,libcversion = libc_ver(sys.executable)
- platform = _platform(system,release,machine,processor,
- 'with',
- libcname+libcversion)
- elif system == 'Java':
- # Java platforms
- r,v,vminfo,(os_name,os_version,os_arch) = java_ver()
- if terse:
- platform = _platform(system,release,version)
- else:
- platform = _platform(system,release,version,
- 'on',
- os_name,os_version,os_arch)
-
- elif system == 'MacOS':
- # MacOS platforms
- if terse:
- platform = _platform(system,release)
- else:
- platform = _platform(system,release,machine)
-
- else:
- # Generic handler
- if terse:
- platform = _platform(system,release)
- else:
- bits,linkage = architecture(sys.executable)
- platform = _platform(system,release,machine,processor,bits,linkage)
-
- _platform_cache[(aliased, terse)] = platform
- return platform
-
-### Command line interface
-
-if __name__ == '__main__':
- # Default is to print the aliased verbose platform string
- terse = ('terse' in sys.argv or '--terse' in sys.argv)
- aliased = (not 'nonaliased' in sys.argv and not '--nonaliased' in sys.argv)
- print platform(aliased,terse)
- sys.exit(0)
diff --git a/sys/lib/python/popen2.py b/sys/lib/python/popen2.py
deleted file mode 100644
index 694979e6d..000000000
--- a/sys/lib/python/popen2.py
+++ /dev/null
@@ -1,244 +0,0 @@
-"""Spawn a command with pipes to its stdin, stdout, and optionally stderr.
-
-The normal os.popen(cmd, mode) call spawns a shell command and provides a
-file interface to just the input or output of the process depending on
-whether mode is 'r' or 'w'. This module provides the functions popen2(cmd)
-and popen3(cmd) which return two or three pipes to the spawned command.
-"""
-
-import os
-import sys
-
-__all__ = ["popen2", "popen3", "popen4"]
-
-try:
- MAXFD = os.sysconf('SC_OPEN_MAX')
-except (AttributeError, ValueError):
- MAXFD = 256
-
-_active = []
-
-def _cleanup():
- for inst in _active[:]:
- if inst.poll(_deadstate=sys.maxint) >= 0:
- try:
- _active.remove(inst)
- except ValueError:
- # This can happen if two threads create a new Popen instance.
- # It's harmless that it was already removed, so ignore.
- pass
-
-class Popen3:
- """Class representing a child process. Normally instances are created
- by the factory functions popen2() and popen3()."""
-
- sts = -1 # Child not completed yet
-
- def __init__(self, cmd, capturestderr=False, bufsize=-1):
- """The parameter 'cmd' is the shell command to execute in a
- sub-process. On UNIX, 'cmd' may be a sequence, in which case arguments
- will be passed directly to the program without shell intervention (as
- with os.spawnv()). If 'cmd' is a string it will be passed to the shell
- (as with os.system()). The 'capturestderr' flag, if true, specifies
- that the object should capture standard error output of the child
- process. The default is false. If the 'bufsize' parameter is
- specified, it specifies the size of the I/O buffers to/from the child
- process."""
- _cleanup()
- self.cmd = cmd
- p2cread, p2cwrite = os.pipe()
- c2pread, c2pwrite = os.pipe()
- if capturestderr:
- errout, errin = os.pipe()
- self.pid = os.fork()
- if self.pid == 0:
- # Child
- os.dup2(p2cread, 0)
- os.dup2(c2pwrite, 1)
- if capturestderr:
- os.dup2(errin, 2)
- self._run_child(cmd)
- os.close(p2cread)
- self.tochild = os.fdopen(p2cwrite, 'w', bufsize)
- os.close(c2pwrite)
- self.fromchild = os.fdopen(c2pread, 'r', bufsize)
- if capturestderr:
- os.close(errin)
- self.childerr = os.fdopen(errout, 'r', bufsize)
- else:
- self.childerr = None
-
- def __del__(self):
- # In case the child hasn't been waited on, check if it's done.
- self.poll(_deadstate=sys.maxint)
- if self.sts < 0:
- if _active is not None:
- # Child is still running, keep us alive until we can wait on it.
- _active.append(self)
-
- def _run_child(self, cmd):
- if isinstance(cmd, basestring):
- cmd = ['/bin/sh', '-c', cmd]
- for i in xrange(3, MAXFD):
- try:
- os.close(i)
- except OSError:
- pass
- try:
- os.execvp(cmd[0], cmd)
- finally:
- os._exit(1)
-
- def poll(self, _deadstate=None):
- """Return the exit status of the child process if it has finished,
- or -1 if it hasn't finished yet."""
- if self.sts < 0:
- try:
- pid, sts = os.waitpid(self.pid, os.WNOHANG)
- # pid will be 0 if self.pid hasn't terminated
- if pid == self.pid:
- self.sts = sts
- except os.error:
- if _deadstate is not None:
- self.sts = _deadstate
- return self.sts
-
- def wait(self):
- """Wait for and return the exit status of the child process."""
- if self.sts < 0:
- pid, sts = os.waitpid(self.pid, 0)
- # This used to be a test, but it is believed to be
- # always true, so I changed it to an assertion - mvl
- assert pid == self.pid
- self.sts = sts
- return self.sts
-
-
-class Popen4(Popen3):
- childerr = None
-
- def __init__(self, cmd, bufsize=-1):
- _cleanup()
- self.cmd = cmd
- p2cread, p2cwrite = os.pipe()
- c2pread, c2pwrite = os.pipe()
- self.pid = os.fork()
- if self.pid == 0:
- # Child
- os.dup2(p2cread, 0)
- os.dup2(c2pwrite, 1)
- os.dup2(c2pwrite, 2)
- self._run_child(cmd)
- os.close(p2cread)
- self.tochild = os.fdopen(p2cwrite, 'w', bufsize)
- os.close(c2pwrite)
- self.fromchild = os.fdopen(c2pread, 'r', bufsize)
-
-
-if sys.platform[:3] == "win" or sys.platform == "os2emx":
- # Some things don't make sense on non-Unix platforms.
- del Popen3, Popen4
-
- def popen2(cmd, bufsize=-1, mode='t'):
- """Execute the shell command 'cmd' in a sub-process. On UNIX, 'cmd' may
- be a sequence, in which case arguments will be passed directly to the
- program without shell intervention (as with os.spawnv()). If 'cmd' is a
- string it will be passed to the shell (as with os.system()). If
- 'bufsize' is specified, it sets the buffer size for the I/O pipes. The
- file objects (child_stdout, child_stdin) are returned."""
- w, r = os.popen2(cmd, mode, bufsize)
- return r, w
-
- def popen3(cmd, bufsize=-1, mode='t'):
- """Execute the shell command 'cmd' in a sub-process. On UNIX, 'cmd' may
- be a sequence, in which case arguments will be passed directly to the
- program without shell intervention (as with os.spawnv()). If 'cmd' is a
- string it will be passed to the shell (as with os.system()). If
- 'bufsize' is specified, it sets the buffer size for the I/O pipes. The
- file objects (child_stdout, child_stdin, child_stderr) are returned."""
- w, r, e = os.popen3(cmd, mode, bufsize)
- return r, w, e
-
- def popen4(cmd, bufsize=-1, mode='t'):
- """Execute the shell command 'cmd' in a sub-process. On UNIX, 'cmd' may
- be a sequence, in which case arguments will be passed directly to the
- program without shell intervention (as with os.spawnv()). If 'cmd' is a
- string it will be passed to the shell (as with os.system()). If
- 'bufsize' is specified, it sets the buffer size for the I/O pipes. The
- file objects (child_stdout_stderr, child_stdin) are returned."""
- w, r = os.popen4(cmd, mode, bufsize)
- return r, w
-else:
- def popen2(cmd, bufsize=-1, mode='t'):
- """Execute the shell command 'cmd' in a sub-process. On UNIX, 'cmd' may
- be a sequence, in which case arguments will be passed directly to the
- program without shell intervention (as with os.spawnv()). If 'cmd' is a
- string it will be passed to the shell (as with os.system()). If
- 'bufsize' is specified, it sets the buffer size for the I/O pipes. The
- file objects (child_stdout, child_stdin) are returned."""
- inst = Popen3(cmd, False, bufsize)
- return inst.fromchild, inst.tochild
-
- def popen3(cmd, bufsize=-1, mode='t'):
- """Execute the shell command 'cmd' in a sub-process. On UNIX, 'cmd' may
- be a sequence, in which case arguments will be passed directly to the
- program without shell intervention (as with os.spawnv()). If 'cmd' is a
- string it will be passed to the shell (as with os.system()). If
- 'bufsize' is specified, it sets the buffer size for the I/O pipes. The
- file objects (child_stdout, child_stdin, child_stderr) are returned."""
- inst = Popen3(cmd, True, bufsize)
- return inst.fromchild, inst.tochild, inst.childerr
-
- def popen4(cmd, bufsize=-1, mode='t'):
- """Execute the shell command 'cmd' in a sub-process. On UNIX, 'cmd' may
- be a sequence, in which case arguments will be passed directly to the
- program without shell intervention (as with os.spawnv()). If 'cmd' is a
- string it will be passed to the shell (as with os.system()). If
- 'bufsize' is specified, it sets the buffer size for the I/O pipes. The
- file objects (child_stdout_stderr, child_stdin) are returned."""
- inst = Popen4(cmd, bufsize)
- return inst.fromchild, inst.tochild
-
- __all__.extend(["Popen3", "Popen4"])
-
-def _test():
- # When the test runs, there shouldn't be any open pipes
- _cleanup()
- assert not _active, "Active pipes when test starts " + repr([c.cmd for c in _active])
- cmd = "cat"
- teststr = "ab cd\n"
- if os.name == "nt":
- cmd = "more"
- # "more" doesn't act the same way across Windows flavors,
- # sometimes adding an extra newline at the start or the
- # end. So we strip whitespace off both ends for comparison.
- expected = teststr.strip()
- print "testing popen2..."
- r, w = popen2(cmd)
- w.write(teststr)
- w.close()
- got = r.read()
- if got.strip() != expected:
- raise ValueError("wrote %r read %r" % (teststr, got))
- print "testing popen3..."
- try:
- r, w, e = popen3([cmd])
- except:
- r, w, e = popen3(cmd)
- w.write(teststr)
- w.close()
- got = r.read()
- if got.strip() != expected:
- raise ValueError("wrote %r read %r" % (teststr, got))
- got = e.read()
- if got:
- raise ValueError("unexpected %r on stderr" % (got,))
- for inst in _active[:]:
- inst.wait()
- _cleanup()
- if _active:
- raise ValueError("_active not empty")
- print "All OK"
-
-if __name__ == '__main__':
- _test()
diff --git a/sys/lib/python/poplib.py b/sys/lib/python/poplib.py
deleted file mode 100644
index 1cf114abc..000000000
--- a/sys/lib/python/poplib.py
+++ /dev/null
@@ -1,423 +0,0 @@
-"""A POP3 client class.
-
-Based on the J. Myers POP3 draft, Jan. 96
-"""
-
-# Author: David Ascher <david_ascher@brown.edu>
-# [heavily stealing from nntplib.py]
-# Updated: Piers Lauder <piers@cs.su.oz.au> [Jul '97]
-# String method conversion and test jig improvements by ESR, February 2001.
-# Added the POP3_SSL class. Methods loosely based on IMAP_SSL. Hector Urtubia <urtubia@mrbook.org> Aug 2003
-
-# Example (see the test function at the end of this file)
-
-# Imports
-
-import re, socket
-
-__all__ = ["POP3","error_proto","POP3_SSL"]
-
-# Exception raised when an error or invalid response is received:
-
-class error_proto(Exception): pass
-
-# Standard Port
-POP3_PORT = 110
-
-# POP SSL PORT
-POP3_SSL_PORT = 995
-
-# Line terminators (we always output CRLF, but accept any of CRLF, LFCR, LF)
-CR = '\r'
-LF = '\n'
-CRLF = CR+LF
-
-
-class POP3:
-
- """This class supports both the minimal and optional command sets.
- Arguments can be strings or integers (where appropriate)
- (e.g.: retr(1) and retr('1') both work equally well.
-
- Minimal Command Set:
- USER name user(name)
- PASS string pass_(string)
- STAT stat()
- LIST [msg] list(msg = None)
- RETR msg retr(msg)
- DELE msg dele(msg)
- NOOP noop()
- RSET rset()
- QUIT quit()
-
- Optional Commands (some servers support these):
- RPOP name rpop(name)
- APOP name digest apop(name, digest)
- TOP msg n top(msg, n)
- UIDL [msg] uidl(msg = None)
-
- Raises one exception: 'error_proto'.
-
- Instantiate with:
- POP3(hostname, port=110)
-
- NB: the POP protocol locks the mailbox from user
- authorization until QUIT, so be sure to get in, suck
- the messages, and quit, each time you access the
- mailbox.
-
- POP is a line-based protocol, which means large mail
- messages consume lots of python cycles reading them
- line-by-line.
-
- If it's available on your mail server, use IMAP4
- instead, it doesn't suffer from the two problems
- above.
- """
-
-
- def __init__(self, host, port = POP3_PORT):
- self.host = host
- self.port = port
- msg = "getaddrinfo returns an empty list"
- self.sock = None
- for res in socket.getaddrinfo(self.host, self.port, 0, socket.SOCK_STREAM):
- af, socktype, proto, canonname, sa = res
- try:
- self.sock = socket.socket(af, socktype, proto)
- self.sock.connect(sa)
- except socket.error, msg:
- if self.sock:
- self.sock.close()
- self.sock = None
- continue
- break
- if not self.sock:
- raise socket.error, msg
- self.file = self.sock.makefile('rb')
- self._debugging = 0
- self.welcome = self._getresp()
-
-
- def _putline(self, line):
- if self._debugging > 1: print '*put*', repr(line)
- self.sock.sendall('%s%s' % (line, CRLF))
-
-
- # Internal: send one command to the server (through _putline())
-
- def _putcmd(self, line):
- if self._debugging: print '*cmd*', repr(line)
- self._putline(line)
-
-
- # Internal: return one line from the server, stripping CRLF.
- # This is where all the CPU time of this module is consumed.
- # Raise error_proto('-ERR EOF') if the connection is closed.
-
- def _getline(self):
- line = self.file.readline()
- if self._debugging > 1: print '*get*', repr(line)
- if not line: raise error_proto('-ERR EOF')
- octets = len(line)
- # server can send any combination of CR & LF
- # however, 'readline()' returns lines ending in LF
- # so only possibilities are ...LF, ...CRLF, CR...LF
- if line[-2:] == CRLF:
- return line[:-2], octets
- if line[0] == CR:
- return line[1:-1], octets
- return line[:-1], octets
-
-
- # Internal: get a response from the server.
- # Raise 'error_proto' if the response doesn't start with '+'.
-
- def _getresp(self):
- resp, o = self._getline()
- if self._debugging > 1: print '*resp*', repr(resp)
- c = resp[:1]
- if c != '+':
- raise error_proto(resp)
- return resp
-
-
- # Internal: get a response plus following text from the server.
-
- def _getlongresp(self):
- resp = self._getresp()
- list = []; octets = 0
- line, o = self._getline()
- while line != '.':
- if line[:2] == '..':
- o = o-1
- line = line[1:]
- octets = octets + o
- list.append(line)
- line, o = self._getline()
- return resp, list, octets
-
-
- # Internal: send a command and get the response
-
- def _shortcmd(self, line):
- self._putcmd(line)
- return self._getresp()
-
-
- # Internal: send a command and get the response plus following text
-
- def _longcmd(self, line):
- self._putcmd(line)
- return self._getlongresp()
-
-
- # These can be useful:
-
- def getwelcome(self):
- return self.welcome
-
-
- def set_debuglevel(self, level):
- self._debugging = level
-
-
- # Here are all the POP commands:
-
- def user(self, user):
- """Send user name, return response
-
- (should indicate password required).
- """
- return self._shortcmd('USER %s' % user)
-
-
- def pass_(self, pswd):
- """Send password, return response
-
- (response includes message count, mailbox size).
-
- NB: mailbox is locked by server from here to 'quit()'
- """
- return self._shortcmd('PASS %s' % pswd)
-
-
- def stat(self):
- """Get mailbox status.
-
- Result is tuple of 2 ints (message count, mailbox size)
- """
- retval = self._shortcmd('STAT')
- rets = retval.split()
- if self._debugging: print '*stat*', repr(rets)
- numMessages = int(rets[1])
- sizeMessages = int(rets[2])
- return (numMessages, sizeMessages)
-
-
- def list(self, which=None):
- """Request listing, return result.
-
- Result without a message number argument is in form
- ['response', ['mesg_num octets', ...], octets].
-
- Result when a message number argument is given is a
- single response: the "scan listing" for that message.
- """
- if which is not None:
- return self._shortcmd('LIST %s' % which)
- return self._longcmd('LIST')
-
-
- def retr(self, which):
- """Retrieve whole message number 'which'.
-
- Result is in form ['response', ['line', ...], octets].
- """
- return self._longcmd('RETR %s' % which)
-
-
- def dele(self, which):
- """Delete message number 'which'.
-
- Result is 'response'.
- """
- return self._shortcmd('DELE %s' % which)
-
-
- def noop(self):
- """Does nothing.
-
- One supposes the response indicates the server is alive.
- """
- return self._shortcmd('NOOP')
-
-
- def rset(self):
- """Not sure what this does."""
- return self._shortcmd('RSET')
-
-
- def quit(self):
- """Signoff: commit changes on server, unlock mailbox, close connection."""
- try:
- resp = self._shortcmd('QUIT')
- except error_proto, val:
- resp = val
- self.file.close()
- self.sock.close()
- del self.file, self.sock
- return resp
-
- #__del__ = quit
-
-
- # optional commands:
-
- def rpop(self, user):
- """Not sure what this does."""
- return self._shortcmd('RPOP %s' % user)
-
-
- timestamp = re.compile(r'\+OK.*(<[^>]+>)')
-
- def apop(self, user, secret):
- """Authorisation
-
- - only possible if server has supplied a timestamp in initial greeting.
-
- Args:
- user - mailbox user;
- secret - secret shared between client and server.
-
- NB: mailbox is locked by server from here to 'quit()'
- """
- m = self.timestamp.match(self.welcome)
- if not m:
- raise error_proto('-ERR APOP not supported by server')
- import hashlib
- digest = hashlib.md5(m.group(1)+secret).digest()
- digest = ''.join(map(lambda x:'%02x'%ord(x), digest))
- return self._shortcmd('APOP %s %s' % (user, digest))
-
-
- def top(self, which, howmuch):
- """Retrieve message header of message number 'which'
- and first 'howmuch' lines of message body.
-
- Result is in form ['response', ['line', ...], octets].
- """
- return self._longcmd('TOP %s %s' % (which, howmuch))
-
-
- def uidl(self, which=None):
- """Return message digest (unique id) list.
-
- If 'which', result contains unique id for that message
- in the form 'response mesgnum uid', otherwise result is
- the list ['response', ['mesgnum uid', ...], octets]
- """
- if which is not None:
- return self._shortcmd('UIDL %s' % which)
- return self._longcmd('UIDL')
-
-class POP3_SSL(POP3):
- """POP3 client class over SSL connection
-
- Instantiate with: POP3_SSL(hostname, port=995, keyfile=None, certfile=None)
-
- hostname - the hostname of the pop3 over ssl server
- port - port number
- keyfile - PEM formatted file that countains your private key
- certfile - PEM formatted certificate chain file
-
- See the methods of the parent class POP3 for more documentation.
- """
-
- def __init__(self, host, port = POP3_SSL_PORT, keyfile = None, certfile = None):
- self.host = host
- self.port = port
- self.keyfile = keyfile
- self.certfile = certfile
- self.buffer = ""
- msg = "getaddrinfo returns an empty list"
- self.sock = None
- for res in socket.getaddrinfo(self.host, self.port, 0, socket.SOCK_STREAM):
- af, socktype, proto, canonname, sa = res
- try:
- self.sock = socket.socket(af, socktype, proto)
- self.sock.connect(sa)
- except socket.error, msg:
- if self.sock:
- self.sock.close()
- self.sock = None
- continue
- break
- if not self.sock:
- raise socket.error, msg
- self.file = self.sock.makefile('rb')
- self.sslobj = socket.ssl(self.sock, self.keyfile, self.certfile)
- self._debugging = 0
- self.welcome = self._getresp()
-
- def _fillBuffer(self):
- localbuf = self.sslobj.read()
- if len(localbuf) == 0:
- raise error_proto('-ERR EOF')
- self.buffer += localbuf
-
- def _getline(self):
- line = ""
- renewline = re.compile(r'.*?\n')
- match = renewline.match(self.buffer)
- while not match:
- self._fillBuffer()
- match = renewline.match(self.buffer)
- line = match.group(0)
- self.buffer = renewline.sub('' ,self.buffer, 1)
- if self._debugging > 1: print '*get*', repr(line)
-
- octets = len(line)
- if line[-2:] == CRLF:
- return line[:-2], octets
- if line[0] == CR:
- return line[1:-1], octets
- return line[:-1], octets
-
- def _putline(self, line):
- if self._debugging > 1: print '*put*', repr(line)
- line += CRLF
- bytes = len(line)
- while bytes > 0:
- sent = self.sslobj.write(line)
- if sent == bytes:
- break # avoid copy
- line = line[sent:]
- bytes = bytes - sent
-
- def quit(self):
- """Signoff: commit changes on server, unlock mailbox, close connection."""
- try:
- resp = self._shortcmd('QUIT')
- except error_proto, val:
- resp = val
- self.sock.close()
- del self.sslobj, self.sock
- return resp
-
-
-if __name__ == "__main__":
- import sys
- a = POP3(sys.argv[1])
- print a.getwelcome()
- a.user(sys.argv[2])
- a.pass_(sys.argv[3])
- a.list()
- (numMsgs, totalSize) = a.stat()
- for i in range(1, numMsgs + 1):
- (header, msg, octets) = a.retr(i)
- print "Message %d:" % i
- for line in msg:
- print ' ' + line
- print '-----------------------'
- a.quit()
diff --git a/sys/lib/python/posixfile.py b/sys/lib/python/posixfile.py
deleted file mode 100644
index 763c605c9..000000000
--- a/sys/lib/python/posixfile.py
+++ /dev/null
@@ -1,235 +0,0 @@
-"""Extended file operations available in POSIX.
-
-f = posixfile.open(filename, [mode, [bufsize]])
- will create a new posixfile object
-
-f = posixfile.fileopen(fileobject)
- will create a posixfile object from a builtin file object
-
-f.file()
- will return the original builtin file object
-
-f.dup()
- will return a new file object based on a new filedescriptor
-
-f.dup2(fd)
- will return a new file object based on the given filedescriptor
-
-f.flags(mode)
- will turn on the associated flag (merge)
- mode can contain the following characters:
-
- (character representing a flag)
- a append only flag
- c close on exec flag
- n no delay flag
- s synchronization flag
- (modifiers)
- ! turn flags 'off' instead of default 'on'
- = copy flags 'as is' instead of default 'merge'
- ? return a string in which the characters represent the flags
- that are set
-
- note: - the '!' and '=' modifiers are mutually exclusive.
- - the '?' modifier will return the status of the flags after they
- have been changed by other characters in the mode string
-
-f.lock(mode [, len [, start [, whence]]])
- will (un)lock a region
- mode can contain the following characters:
-
- (character representing type of lock)
- u unlock
- r read lock
- w write lock
- (modifiers)
- | wait until the lock can be granted
- ? return the first lock conflicting with the requested lock
- or 'None' if there is no conflict. The lock returned is in the
- format (mode, len, start, whence, pid) where mode is a
- character representing the type of lock ('r' or 'w')
-
- note: - the '?' modifier prevents a region from being locked; it is
- query only
-"""
-
-
-class _posixfile_:
- """File wrapper class that provides extra POSIX file routines."""
-
- states = ['open', 'closed']
-
- #
- # Internal routines
- #
- def __repr__(self):
- file = self._file_
- return "<%s posixfile '%s', mode '%s' at %s>" % \
- (self.states[file.closed], file.name, file.mode, \
- hex(id(self))[2:])
-
- #
- # Initialization routines
- #
- def open(self, name, mode='r', bufsize=-1):
- import __builtin__
- return self.fileopen(__builtin__.open(name, mode, bufsize))
-
- def fileopen(self, file):
- import types
- if repr(type(file)) != "<type 'file'>":
- raise TypeError, 'posixfile.fileopen() arg must be file object'
- self._file_ = file
- # Copy basic file methods
- for maybemethod in dir(file):
- if not maybemethod.startswith('_'):
- attr = getattr(file, maybemethod)
- if isinstance(attr, types.BuiltinMethodType):
- setattr(self, maybemethod, attr)
- return self
-
- #
- # New methods
- #
- def file(self):
- return self._file_
-
- def dup(self):
- import posix
-
- if not hasattr(posix, 'fdopen'):
- raise AttributeError, 'dup() method unavailable'
-
- return posix.fdopen(posix.dup(self._file_.fileno()), self._file_.mode)
-
- def dup2(self, fd):
- import posix
-
- if not hasattr(posix, 'fdopen'):
- raise AttributeError, 'dup() method unavailable'
-
- posix.dup2(self._file_.fileno(), fd)
- return posix.fdopen(fd, self._file_.mode)
-
- def flags(self, *which):
- import fcntl, os
-
- if which:
- if len(which) > 1:
- raise TypeError, 'Too many arguments'
- which = which[0]
- else: which = '?'
-
- l_flags = 0
- if 'n' in which: l_flags = l_flags | os.O_NDELAY
- if 'a' in which: l_flags = l_flags | os.O_APPEND
- if 's' in which: l_flags = l_flags | os.O_SYNC
-
- file = self._file_
-
- if '=' not in which:
- cur_fl = fcntl.fcntl(file.fileno(), fcntl.F_GETFL, 0)
- if '!' in which: l_flags = cur_fl & ~ l_flags
- else: l_flags = cur_fl | l_flags
-
- l_flags = fcntl.fcntl(file.fileno(), fcntl.F_SETFL, l_flags)
-
- if 'c' in which:
- arg = ('!' not in which) # 0 is don't, 1 is do close on exec
- l_flags = fcntl.fcntl(file.fileno(), fcntl.F_SETFD, arg)
-
- if '?' in which:
- which = '' # Return current flags
- l_flags = fcntl.fcntl(file.fileno(), fcntl.F_GETFL, 0)
- if os.O_APPEND & l_flags: which = which + 'a'
- if fcntl.fcntl(file.fileno(), fcntl.F_GETFD, 0) & 1:
- which = which + 'c'
- if os.O_NDELAY & l_flags: which = which + 'n'
- if os.O_SYNC & l_flags: which = which + 's'
- return which
-
- def lock(self, how, *args):
- import struct, fcntl
-
- if 'w' in how: l_type = fcntl.F_WRLCK
- elif 'r' in how: l_type = fcntl.F_RDLCK
- elif 'u' in how: l_type = fcntl.F_UNLCK
- else: raise TypeError, 'no type of lock specified'
-
- if '|' in how: cmd = fcntl.F_SETLKW
- elif '?' in how: cmd = fcntl.F_GETLK
- else: cmd = fcntl.F_SETLK
-
- l_whence = 0
- l_start = 0
- l_len = 0
-
- if len(args) == 1:
- l_len = args[0]
- elif len(args) == 2:
- l_len, l_start = args
- elif len(args) == 3:
- l_len, l_start, l_whence = args
- elif len(args) > 3:
- raise TypeError, 'too many arguments'
-
- # Hack by davem@magnet.com to get locking to go on freebsd;
- # additions for AIX by Vladimir.Marangozov@imag.fr
- import sys, os
- if sys.platform in ('netbsd1',
- 'openbsd2',
- 'freebsd2', 'freebsd3', 'freebsd4', 'freebsd5',
- 'freebsd6', 'freebsd7',
- 'bsdos2', 'bsdos3', 'bsdos4'):
- flock = struct.pack('lxxxxlxxxxlhh', \
- l_start, l_len, os.getpid(), l_type, l_whence)
- elif sys.platform in ('aix3', 'aix4'):
- flock = struct.pack('hhlllii', \
- l_type, l_whence, l_start, l_len, 0, 0, 0)
- else:
- flock = struct.pack('hhllhh', \
- l_type, l_whence, l_start, l_len, 0, 0)
-
- flock = fcntl.fcntl(self._file_.fileno(), cmd, flock)
-
- if '?' in how:
- if sys.platform in ('netbsd1',
- 'openbsd2',
- 'freebsd2', 'freebsd3', 'freebsd4', 'freebsd5',
- 'bsdos2', 'bsdos3', 'bsdos4'):
- l_start, l_len, l_pid, l_type, l_whence = \
- struct.unpack('lxxxxlxxxxlhh', flock)
- elif sys.platform in ('aix3', 'aix4'):
- l_type, l_whence, l_start, l_len, l_sysid, l_pid, l_vfs = \
- struct.unpack('hhlllii', flock)
- elif sys.platform == "linux2":
- l_type, l_whence, l_start, l_len, l_pid, l_sysid = \
- struct.unpack('hhllhh', flock)
- else:
- l_type, l_whence, l_start, l_len, l_sysid, l_pid = \
- struct.unpack('hhllhh', flock)
-
- if l_type != fcntl.F_UNLCK:
- if l_type == fcntl.F_RDLCK:
- return 'r', l_len, l_start, l_whence, l_pid
- else:
- return 'w', l_len, l_start, l_whence, l_pid
-
-def open(name, mode='r', bufsize=-1):
- """Public routine to open a file as a posixfile object."""
- return _posixfile_().open(name, mode, bufsize)
-
-def fileopen(file):
- """Public routine to get a posixfile object from a Python file object."""
- return _posixfile_().fileopen(file)
-
-#
-# Constants
-#
-SEEK_SET = 0
-SEEK_CUR = 1
-SEEK_END = 2
-
-#
-# End of posixfile.py
-#
diff --git a/sys/lib/python/posixpath.py b/sys/lib/python/posixpath.py
deleted file mode 100644
index b396f0ace..000000000
--- a/sys/lib/python/posixpath.py
+++ /dev/null
@@ -1,453 +0,0 @@
-"""Common operations on Posix pathnames.
-
-Instead of importing this module directly, import os and refer to
-this module as os.path. The "os.path" name is an alias for this
-module on Posix systems; on other systems (e.g. Mac, Windows),
-os.path provides the same operations in a manner specific to that
-platform, and is an alias to another module (e.g. macpath, ntpath).
-
-Some of this can actually be useful on non-Posix systems too, e.g.
-for manipulation of the pathname component of URLs.
-"""
-
-import os
-import stat
-
-__all__ = ["normcase","isabs","join","splitdrive","split","splitext",
- "basename","dirname","commonprefix","getsize","getmtime",
- "getatime","getctime","islink","exists","lexists","isdir","isfile",
- "ismount","walk","expanduser","expandvars","normpath","abspath",
- "samefile","sameopenfile","samestat",
- "curdir","pardir","sep","pathsep","defpath","altsep","extsep",
- "devnull","realpath","supports_unicode_filenames"]
-
-# strings representing various path-related bits and pieces
-curdir = '.'
-pardir = '..'
-extsep = '.'
-sep = '/'
-pathsep = ':'
-defpath = ':/bin:/usr/bin'
-altsep = None
-devnull = '/dev/null'
-
-# Normalize the case of a pathname. Trivial in Posix, string.lower on Mac.
-# On MS-DOS this may also turn slashes into backslashes; however, other
-# normalizations (such as optimizing '../' away) are not allowed
-# (another function should be defined to do that).
-
-def normcase(s):
- """Normalize case of pathname. Has no effect under Posix"""
- return s
-
-
-# Return whether a path is absolute.
-# Trivial in Posix, harder on the Mac or MS-DOS.
-
-def isabs(s):
- """Test whether a path is absolute"""
- return s.startswith('/')
-
-
-# Join pathnames.
-# Ignore the previous parts if a part is absolute.
-# Insert a '/' unless the first part is empty or already ends in '/'.
-
-def join(a, *p):
- """Join two or more pathname components, inserting '/' as needed"""
- path = a
- for b in p:
- if b.startswith('/'):
- path = b
- elif path == '' or path.endswith('/'):
- path += b
- else:
- path += '/' + b
- return path
-
-
-# Split a path in head (everything up to the last '/') and tail (the
-# rest). If the path ends in '/', tail will be empty. If there is no
-# '/' in the path, head will be empty.
-# Trailing '/'es are stripped from head unless it is the root.
-
-def split(p):
- """Split a pathname. Returns tuple "(head, tail)" where "tail" is
- everything after the final slash. Either part may be empty."""
- i = p.rfind('/') + 1
- head, tail = p[:i], p[i:]
- if head and head != '/'*len(head):
- head = head.rstrip('/')
- return head, tail
-
-
-# Split a path in root and extension.
-# The extension is everything starting at the last dot in the last
-# pathname component; the root is everything before that.
-# It is always true that root + ext == p.
-
-def splitext(p):
- """Split the extension from a pathname. Extension is everything from the
- last dot to the end. Returns "(root, ext)", either part may be empty."""
- i = p.rfind('.')
- if i<=p.rfind('/'):
- return p, ''
- else:
- return p[:i], p[i:]
-
-
-# Split a pathname into a drive specification and the rest of the
-# path. Useful on DOS/Windows/NT; on Unix, the drive is always empty.
-
-def splitdrive(p):
- """Split a pathname into drive and path. On Posix, drive is always
- empty."""
- return '', p
-
-
-# Return the tail (basename) part of a path.
-
-def basename(p):
- """Returns the final component of a pathname"""
- return split(p)[1]
-
-
-# Return the head (dirname) part of a path.
-
-def dirname(p):
- """Returns the directory component of a pathname"""
- return split(p)[0]
-
-
-# Return the longest prefix of all list elements.
-
-def commonprefix(m):
- "Given a list of pathnames, returns the longest common leading component"
- if not m: return ''
- s1 = min(m)
- s2 = max(m)
- n = min(len(s1), len(s2))
- for i in xrange(n):
- if s1[i] != s2[i]:
- return s1[:i]
- return s1[:n]
-
-# Get size, mtime, atime of files.
-
-def getsize(filename):
- """Return the size of a file, reported by os.stat()."""
- return os.stat(filename).st_size
-
-def getmtime(filename):
- """Return the last modification time of a file, reported by os.stat()."""
- return os.stat(filename).st_mtime
-
-def getatime(filename):
- """Return the last access time of a file, reported by os.stat()."""
- return os.stat(filename).st_atime
-
-def getctime(filename):
- """Return the metadata change time of a file, reported by os.stat()."""
- return os.stat(filename).st_ctime
-
-# Is a path a symbolic link?
-# This will always return false on systems where os.lstat doesn't exist.
-
-def islink(path):
- """Test whether a path is a symbolic link"""
- try:
- st = os.lstat(path)
- except (os.error, AttributeError):
- return False
- return stat.S_ISLNK(st.st_mode)
-
-
-# Does a path exist?
-# This is false for dangling symbolic links.
-
-def exists(path):
- """Test whether a path exists. Returns False for broken symbolic links"""
- try:
- st = os.stat(path)
- except os.error:
- return False
- return True
-
-
-# Being true for dangling symbolic links is also useful.
-
-def lexists(path):
- """Test whether a path exists. Returns True for broken symbolic links"""
- try:
- st = os.lstat(path)
- except os.error:
- return False
- return True
-
-
-# Is a path a directory?
-# This follows symbolic links, so both islink() and isdir() can be true
-# for the same path.
-
-def isdir(path):
- """Test whether a path is a directory"""
- try:
- st = os.stat(path)
- except os.error:
- return False
- return stat.S_ISDIR(st.st_mode)
-
-
-# Is a path a regular file?
-# This follows symbolic links, so both islink() and isfile() can be true
-# for the same path.
-
-def isfile(path):
- """Test whether a path is a regular file"""
- try:
- st = os.stat(path)
- except os.error:
- return False
- return stat.S_ISREG(st.st_mode)
-
-
-# Are two filenames really pointing to the same file?
-
-def samefile(f1, f2):
- """Test whether two pathnames reference the same actual file"""
- s1 = os.stat(f1)
- s2 = os.stat(f2)
- return samestat(s1, s2)
-
-
-# Are two open files really referencing the same file?
-# (Not necessarily the same file descriptor!)
-
-def sameopenfile(fp1, fp2):
- """Test whether two open file objects reference the same file"""
- s1 = os.fstat(fp1)
- s2 = os.fstat(fp2)
- return samestat(s1, s2)
-
-
-# Are two stat buffers (obtained from stat, fstat or lstat)
-# describing the same file?
-
-def samestat(s1, s2):
- """Test whether two stat buffers reference the same file"""
- return s1.st_ino == s2.st_ino and \
- s1.st_dev == s2.st_dev
-
-
-# Is a path a mount point?
-# (Does this work for all UNIXes? Is it even guaranteed to work by Posix?)
-
-def ismount(path):
- """Test whether a path is a mount point"""
- try:
- s1 = os.stat(path)
- s2 = os.stat(join(path, '..'))
- except os.error:
- return False # It doesn't exist -- so not a mount point :-)
- dev1 = s1.st_dev
- dev2 = s2.st_dev
- if dev1 != dev2:
- return True # path/.. on a different device as path
- ino1 = s1.st_ino
- ino2 = s2.st_ino
- if ino1 == ino2:
- return True # path/.. is the same i-node as path
- return False
-
-
-# Directory tree walk.
-# For each directory under top (including top itself, but excluding
-# '.' and '..'), func(arg, dirname, filenames) is called, where
-# dirname is the name of the directory and filenames is the list
-# of files (and subdirectories etc.) in the directory.
-# The func may modify the filenames list, to implement a filter,
-# or to impose a different order of visiting.
-
-def walk(top, func, arg):
- """Directory tree walk with callback function.
-
- For each directory in the directory tree rooted at top (including top
- itself, but excluding '.' and '..'), call func(arg, dirname, fnames).
- dirname is the name of the directory, and fnames a list of the names of
- the files and subdirectories in dirname (excluding '.' and '..'). func
- may modify the fnames list in-place (e.g. via del or slice assignment),
- and walk will only recurse into the subdirectories whose names remain in
- fnames; this can be used to implement a filter, or to impose a specific
- order of visiting. No semantics are defined for, or required of, arg,
- beyond that arg is always passed to func. It can be used, e.g., to pass
- a filename pattern, or a mutable object designed to accumulate
- statistics. Passing None for arg is common."""
-
- try:
- names = os.listdir(top)
- except os.error:
- return
- func(arg, top, names)
- for name in names:
- name = join(top, name)
- try:
- st = os.lstat(name)
- except os.error:
- continue
- if stat.S_ISDIR(st.st_mode):
- walk(name, func, arg)
-
-
-# Expand paths beginning with '~' or '~user'.
-# '~' means $HOME; '~user' means that user's home directory.
-# If the path doesn't begin with '~', or if the user or $HOME is unknown,
-# the path is returned unchanged (leaving error reporting to whatever
-# function is called with the expanded path as argument).
-# See also module 'glob' for expansion of *, ? and [...] in pathnames.
-# (A function should also be defined to do full *sh-style environment
-# variable expansion.)
-
-def expanduser(path):
- """Expand ~ and ~user constructions. If user or $HOME is unknown,
- do nothing."""
- if not path.startswith('~'):
- return path
- i = path.find('/', 1)
- if i < 0:
- i = len(path)
- if i == 1:
- if 'HOME' not in os.environ:
- import pwd
- userhome = pwd.getpwuid(os.getuid()).pw_dir
- else:
- userhome = os.environ['HOME']
- else:
- import pwd
- try:
- pwent = pwd.getpwnam(path[1:i])
- except KeyError:
- return path
- userhome = pwent.pw_dir
- userhome = userhome.rstrip('/')
- return userhome + path[i:]
-
-
-# Expand paths containing shell variable substitutions.
-# This expands the forms $variable and ${variable} only.
-# Non-existent variables are left unchanged.
-
-_varprog = None
-
-def expandvars(path):
- """Expand shell variables of form $var and ${var}. Unknown variables
- are left unchanged."""
- global _varprog
- if '$' not in path:
- return path
- if not _varprog:
- import re
- _varprog = re.compile(r'\$(\w+|\{[^}]*\})')
- i = 0
- while True:
- m = _varprog.search(path, i)
- if not m:
- break
- i, j = m.span(0)
- name = m.group(1)
- if name.startswith('{') and name.endswith('}'):
- name = name[1:-1]
- if name in os.environ:
- tail = path[j:]
- path = path[:i] + os.environ[name]
- i = len(path)
- path += tail
- else:
- i = j
- return path
-
-
-# Normalize a path, e.g. A//B, A/./B and A/foo/../B all become A/B.
-# It should be understood that this may change the meaning of the path
-# if it contains symbolic links!
-
-def normpath(path):
- """Normalize path, eliminating double slashes, etc."""
- if path == '':
- return '.'
- initial_slashes = path.startswith('/')
- # POSIX allows one or two initial slashes, but treats three or more
- # as single slash.
- if (initial_slashes and
- path.startswith('//') and not path.startswith('///')):
- initial_slashes = 2
- comps = path.split('/')
- new_comps = []
- for comp in comps:
- if comp in ('', '.'):
- continue
- if (comp != '..' or (not initial_slashes and not new_comps) or
- (new_comps and new_comps[-1] == '..')):
- new_comps.append(comp)
- elif new_comps:
- new_comps.pop()
- comps = new_comps
- path = '/'.join(comps)
- if initial_slashes:
- path = '/'*initial_slashes + path
- return path or '.'
-
-
-def abspath(path):
- """Return an absolute path."""
- if not isabs(path):
- path = join(os.getcwd(), path)
- return normpath(path)
-
-
-# Return a canonical path (i.e. the absolute location of a file on the
-# filesystem).
-
-def realpath(filename):
- """Return the canonical path of the specified filename, eliminating any
-symbolic links encountered in the path."""
- if isabs(filename):
- bits = ['/'] + filename.split('/')[1:]
- else:
- bits = [''] + filename.split('/')
-
- for i in range(2, len(bits)+1):
- component = join(*bits[0:i])
- # Resolve symbolic links.
- if islink(component):
- resolved = _resolve_link(component)
- if resolved is None:
- # Infinite loop -- return original component + rest of the path
- return abspath(join(*([component] + bits[i:])))
- else:
- newpath = join(*([resolved] + bits[i:]))
- return realpath(newpath)
-
- return abspath(filename)
-
-
-def _resolve_link(path):
- """Internal helper function. Takes a path and follows symlinks
- until we either arrive at something that isn't a symlink, or
- encounter a path we've seen before (meaning that there's a loop).
- """
- paths_seen = []
- while islink(path):
- if path in paths_seen:
- # Already seen this path, so we must have a symlink loop
- return None
- paths_seen.append(path)
- # Resolve where the link points to
- resolved = os.readlink(path)
- if not isabs(resolved):
- dir = dirname(path)
- path = normpath(join(dir, resolved))
- else:
- path = normpath(resolved)
- return path
-
-supports_unicode_filenames = False
diff --git a/sys/lib/python/pprint.py b/sys/lib/python/pprint.py
deleted file mode 100644
index 19a3dc249..000000000
--- a/sys/lib/python/pprint.py
+++ /dev/null
@@ -1,315 +0,0 @@
-# Author: Fred L. Drake, Jr.
-# fdrake@acm.org
-#
-# This is a simple little module I wrote to make life easier. I didn't
-# see anything quite like it in the library, though I may have overlooked
-# something. I wrote this when I was trying to read some heavily nested
-# tuples with fairly non-descriptive content. This is modeled very much
-# after Lisp/Scheme - style pretty-printing of lists. If you find it
-# useful, thank small children who sleep at night.
-
-"""Support to pretty-print lists, tuples, & dictionaries recursively.
-
-Very simple, but useful, especially in debugging data structures.
-
-Classes
--------
-
-PrettyPrinter()
- Handle pretty-printing operations onto a stream using a configured
- set of formatting parameters.
-
-Functions
----------
-
-pformat()
- Format a Python object into a pretty-printed representation.
-
-pprint()
- Pretty-print a Python object to a stream [default is sys.stdout].
-
-saferepr()
- Generate a 'standard' repr()-like value, but protect against recursive
- data structures.
-
-"""
-
-import sys as _sys
-
-from cStringIO import StringIO as _StringIO
-
-__all__ = ["pprint","pformat","isreadable","isrecursive","saferepr",
- "PrettyPrinter"]
-
-# cache these for faster access:
-_commajoin = ", ".join
-_id = id
-_len = len
-_type = type
-
-
-def pprint(object, stream=None, indent=1, width=80, depth=None):
- """Pretty-print a Python object to a stream [default is sys.stdout]."""
- printer = PrettyPrinter(
- stream=stream, indent=indent, width=width, depth=depth)
- printer.pprint(object)
-
-def pformat(object, indent=1, width=80, depth=None):
- """Format a Python object into a pretty-printed representation."""
- return PrettyPrinter(indent=indent, width=width, depth=depth).pformat(object)
-
-def saferepr(object):
- """Version of repr() which can handle recursive data structures."""
- return _safe_repr(object, {}, None, 0)[0]
-
-def isreadable(object):
- """Determine if saferepr(object) is readable by eval()."""
- return _safe_repr(object, {}, None, 0)[1]
-
-def isrecursive(object):
- """Determine if object requires a recursive representation."""
- return _safe_repr(object, {}, None, 0)[2]
-
-class PrettyPrinter:
- def __init__(self, indent=1, width=80, depth=None, stream=None):
- """Handle pretty printing operations onto a stream using a set of
- configured parameters.
-
- indent
- Number of spaces to indent for each level of nesting.
-
- width
- Attempted maximum number of columns in the output.
-
- depth
- The maximum depth to print out nested structures.
-
- stream
- The desired output stream. If omitted (or false), the standard
- output stream available at construction will be used.
-
- """
- indent = int(indent)
- width = int(width)
- assert indent >= 0, "indent must be >= 0"
- assert depth is None or depth > 0, "depth must be > 0"
- assert width, "width must be != 0"
- self._depth = depth
- self._indent_per_level = indent
- self._width = width
- if stream is not None:
- self._stream = stream
- else:
- self._stream = _sys.stdout
-
- def pprint(self, object):
- self._format(object, self._stream, 0, 0, {}, 0)
- self._stream.write("\n")
-
- def pformat(self, object):
- sio = _StringIO()
- self._format(object, sio, 0, 0, {}, 0)
- return sio.getvalue()
-
- def isrecursive(self, object):
- return self.format(object, {}, 0, 0)[2]
-
- def isreadable(self, object):
- s, readable, recursive = self.format(object, {}, 0, 0)
- return readable and not recursive
-
- def _format(self, object, stream, indent, allowance, context, level):
- level = level + 1
- objid = _id(object)
- if objid in context:
- stream.write(_recursion(object))
- self._recursive = True
- self._readable = False
- return
- rep = self._repr(object, context, level - 1)
- typ = _type(object)
- sepLines = _len(rep) > (self._width - 1 - indent - allowance)
- write = stream.write
-
- if sepLines:
- r = getattr(typ, "__repr__", None)
- if issubclass(typ, dict) and r is dict.__repr__:
- write('{')
- if self._indent_per_level > 1:
- write((self._indent_per_level - 1) * ' ')
- length = _len(object)
- if length:
- context[objid] = 1
- indent = indent + self._indent_per_level
- items = object.items()
- items.sort()
- key, ent = items[0]
- rep = self._repr(key, context, level)
- write(rep)
- write(': ')
- self._format(ent, stream, indent + _len(rep) + 2,
- allowance + 1, context, level)
- if length > 1:
- for key, ent in items[1:]:
- rep = self._repr(key, context, level)
- write(',\n%s%s: ' % (' '*indent, rep))
- self._format(ent, stream, indent + _len(rep) + 2,
- allowance + 1, context, level)
- indent = indent - self._indent_per_level
- del context[objid]
- write('}')
- return
-
- if (issubclass(typ, list) and r is list.__repr__) or \
- (issubclass(typ, tuple) and r is tuple.__repr__):
- if issubclass(typ, list):
- write('[')
- endchar = ']'
- else:
- write('(')
- endchar = ')'
- if self._indent_per_level > 1:
- write((self._indent_per_level - 1) * ' ')
- length = _len(object)
- if length:
- context[objid] = 1
- indent = indent + self._indent_per_level
- self._format(object[0], stream, indent, allowance + 1,
- context, level)
- if length > 1:
- for ent in object[1:]:
- write(',\n' + ' '*indent)
- self._format(ent, stream, indent,
- allowance + 1, context, level)
- indent = indent - self._indent_per_level
- del context[objid]
- if issubclass(typ, tuple) and length == 1:
- write(',')
- write(endchar)
- return
-
- write(rep)
-
- def _repr(self, object, context, level):
- repr, readable, recursive = self.format(object, context.copy(),
- self._depth, level)
- if not readable:
- self._readable = False
- if recursive:
- self._recursive = True
- return repr
-
- def format(self, object, context, maxlevels, level):
- """Format object for a specific context, returning a string
- and flags indicating whether the representation is 'readable'
- and whether the object represents a recursive construct.
- """
- return _safe_repr(object, context, maxlevels, level)
-
-
-# Return triple (repr_string, isreadable, isrecursive).
-
-def _safe_repr(object, context, maxlevels, level):
- typ = _type(object)
- if typ is str:
- if 'locale' not in _sys.modules:
- return repr(object), True, False
- if "'" in object and '"' not in object:
- closure = '"'
- quotes = {'"': '\\"'}
- else:
- closure = "'"
- quotes = {"'": "\\'"}
- qget = quotes.get
- sio = _StringIO()
- write = sio.write
- for char in object:
- if char.isalpha():
- write(char)
- else:
- write(qget(char, repr(char)[1:-1]))
- return ("%s%s%s" % (closure, sio.getvalue(), closure)), True, False
-
- r = getattr(typ, "__repr__", None)
- if issubclass(typ, dict) and r is dict.__repr__:
- if not object:
- return "{}", True, False
- objid = _id(object)
- if maxlevels and level > maxlevels:
- return "{...}", False, objid in context
- if objid in context:
- return _recursion(object), False, True
- context[objid] = 1
- readable = True
- recursive = False
- components = []
- append = components.append
- level += 1
- saferepr = _safe_repr
- for k, v in sorted(object.items()):
- krepr, kreadable, krecur = saferepr(k, context, maxlevels, level)
- vrepr, vreadable, vrecur = saferepr(v, context, maxlevels, level)
- append("%s: %s" % (krepr, vrepr))
- readable = readable and kreadable and vreadable
- if krecur or vrecur:
- recursive = True
- del context[objid]
- return "{%s}" % _commajoin(components), readable, recursive
-
- if (issubclass(typ, list) and r is list.__repr__) or \
- (issubclass(typ, tuple) and r is tuple.__repr__):
- if issubclass(typ, list):
- if not object:
- return "[]", True, False
- format = "[%s]"
- elif _len(object) == 1:
- format = "(%s,)"
- else:
- if not object:
- return "()", True, False
- format = "(%s)"
- objid = _id(object)
- if maxlevels and level > maxlevels:
- return format % "...", False, objid in context
- if objid in context:
- return _recursion(object), False, True
- context[objid] = 1
- readable = True
- recursive = False
- components = []
- append = components.append
- level += 1
- for o in object:
- orepr, oreadable, orecur = _safe_repr(o, context, maxlevels, level)
- append(orepr)
- if not oreadable:
- readable = False
- if orecur:
- recursive = True
- del context[objid]
- return format % _commajoin(components), readable, recursive
-
- rep = repr(object)
- return rep, (rep and not rep.startswith('<')), False
-
-
-def _recursion(object):
- return ("<Recursion on %s with id=%s>"
- % (_type(object).__name__, _id(object)))
-
-
-def _perfcheck(object=None):
- import time
- if object is None:
- object = [("string", (1, 2), [3, 4], {5: 6, 7: 8})] * 100000
- p = PrettyPrinter()
- t1 = time.time()
- _safe_repr(object, {}, None, 0)
- t2 = time.time()
- p.pformat(object)
- t3 = time.time()
- print "_safe_repr:", t2 - t1
- print "pformat:", t3 - t2
-
-if __name__ == "__main__":
- _perfcheck()
diff --git a/sys/lib/python/profile.py b/sys/lib/python/profile.py
deleted file mode 100755
index b6048aa0c..000000000
--- a/sys/lib/python/profile.py
+++ /dev/null
@@ -1,619 +0,0 @@
-#! /usr/bin/env python
-#
-# Class for profiling python code. rev 1.0 6/2/94
-#
-# Based on prior profile module by Sjoerd Mullender...
-# which was hacked somewhat by: Guido van Rossum
-
-"""Class for profiling Python code."""
-
-# Copyright 1994, by InfoSeek Corporation, all rights reserved.
-# Written by James Roskind
-#
-# Permission to use, copy, modify, and distribute this Python software
-# and its associated documentation for any purpose (subject to the
-# restriction in the following sentence) without fee is hereby granted,
-# provided that the above copyright notice appears in all copies, and
-# that both that copyright notice and this permission notice appear in
-# supporting documentation, and that the name of InfoSeek not be used in
-# advertising or publicity pertaining to distribution of the software
-# without specific, written prior permission. This permission is
-# explicitly restricted to the copying and modification of the software
-# to remain in Python, compiled Python, or other languages (such as C)
-# wherein the modified or derived code is exclusively imported into a
-# Python module.
-#
-# INFOSEEK CORPORATION DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
-# SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
-# FITNESS. IN NO EVENT SHALL INFOSEEK CORPORATION BE LIABLE FOR ANY
-# SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER
-# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF
-# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
-# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-
-
-import sys
-import os
-import time
-import marshal
-from optparse import OptionParser
-
-__all__ = ["run", "runctx", "help", "Profile"]
-
-# Sample timer for use with
-#i_count = 0
-#def integer_timer():
-# global i_count
-# i_count = i_count + 1
-# return i_count
-#itimes = integer_timer # replace with C coded timer returning integers
-
-#**************************************************************************
-# The following are the static member functions for the profiler class
-# Note that an instance of Profile() is *not* needed to call them.
-#**************************************************************************
-
-def run(statement, filename=None, sort=-1):
- """Run statement under profiler optionally saving results in filename
-
- This function takes a single argument that can be passed to the
- "exec" statement, and an optional file name. In all cases this
- routine attempts to "exec" its first argument and gather profiling
- statistics from the execution. If no file name is present, then this
- function automatically prints a simple profiling report, sorted by the
- standard name string (file/line/function-name) that is presented in
- each line.
- """
- prof = Profile()
- try:
- prof = prof.run(statement)
- except SystemExit:
- pass
- if filename is not None:
- prof.dump_stats(filename)
- else:
- return prof.print_stats(sort)
-
-def runctx(statement, globals, locals, filename=None):
- """Run statement under profiler, supplying your own globals and locals,
- optionally saving results in filename.
-
- statement and filename have the same semantics as profile.run
- """
- prof = Profile()
- try:
- prof = prof.runctx(statement, globals, locals)
- except SystemExit:
- pass
-
- if filename is not None:
- prof.dump_stats(filename)
- else:
- return prof.print_stats()
-
-# Backwards compatibility.
-def help():
- print "Documentation for the profile module can be found "
- print "in the Python Library Reference, section 'The Python Profiler'."
-
-if os.name == "mac":
- import MacOS
- def _get_time_mac(timer=MacOS.GetTicks):
- return timer() / 60.0
-
-if hasattr(os, "times"):
- def _get_time_times(timer=os.times):
- t = timer()
- return t[0] + t[1]
-
-# Using getrusage(3) is better than clock(3) if available:
-# on some systems (e.g. FreeBSD), getrusage has a higher resolution
-# Furthermore, on a POSIX system, returns microseconds, which
-# wrap around after 36min.
-_has_res = 0
-try:
- import resource
- resgetrusage = lambda: resource.getrusage(resource.RUSAGE_SELF)
- def _get_time_resource(timer=resgetrusage):
- t = timer()
- return t[0] + t[1]
- _has_res = 1
-except ImportError:
- pass
-
-class Profile:
- """Profiler class.
-
- self.cur is always a tuple. Each such tuple corresponds to a stack
- frame that is currently active (self.cur[-2]). The following are the
- definitions of its members. We use this external "parallel stack" to
- avoid contaminating the program that we are profiling. (old profiler
- used to write into the frames local dictionary!!) Derived classes
- can change the definition of some entries, as long as they leave
- [-2:] intact (frame and previous tuple). In case an internal error is
- detected, the -3 element is used as the function name.
-
- [ 0] = Time that needs to be charged to the parent frame's function.
- It is used so that a function call will not have to access the
- timing data for the parent frame.
- [ 1] = Total time spent in this frame's function, excluding time in
- subfunctions (this latter is tallied in cur[2]).
- [ 2] = Total time spent in subfunctions, excluding time executing the
- frame's function (this latter is tallied in cur[1]).
- [-3] = Name of the function that corresponds to this frame.
- [-2] = Actual frame that we correspond to (used to sync exception handling).
- [-1] = Our parent 6-tuple (corresponds to frame.f_back).
-
- Timing data for each function is stored as a 5-tuple in the dictionary
- self.timings[]. The index is always the name stored in self.cur[-3].
- The following are the definitions of the members:
-
- [0] = The number of times this function was called, not counting direct
- or indirect recursion,
- [1] = Number of times this function appears on the stack, minus one
- [2] = Total time spent internal to this function
- [3] = Cumulative time that this function was present on the stack. In
- non-recursive functions, this is the total execution time from start
- to finish of each invocation of a function, including time spent in
- all subfunctions.
- [4] = A dictionary indicating for each function name, the number of times
- it was called by us.
- """
-
- bias = 0 # calibration constant
-
- def __init__(self, timer=None, bias=None):
- self.timings = {}
- self.cur = None
- self.cmd = ""
- self.c_func_name = ""
-
- if bias is None:
- bias = self.bias
- self.bias = bias # Materialize in local dict for lookup speed.
-
- if not timer:
- if _has_res:
- self.timer = resgetrusage
- self.dispatcher = self.trace_dispatch
- self.get_time = _get_time_resource
- elif os.name == 'mac':
- self.timer = MacOS.GetTicks
- self.dispatcher = self.trace_dispatch_mac
- self.get_time = _get_time_mac
- elif hasattr(time, 'clock'):
- self.timer = self.get_time = time.clock
- self.dispatcher = self.trace_dispatch_i
- elif hasattr(os, 'times'):
- self.timer = os.times
- self.dispatcher = self.trace_dispatch
- self.get_time = _get_time_times
- else:
- self.timer = self.get_time = time.time
- self.dispatcher = self.trace_dispatch_i
- else:
- self.timer = timer
- t = self.timer() # test out timer function
- try:
- length = len(t)
- except TypeError:
- self.get_time = timer
- self.dispatcher = self.trace_dispatch_i
- else:
- if length == 2:
- self.dispatcher = self.trace_dispatch
- else:
- self.dispatcher = self.trace_dispatch_l
- # This get_time() implementation needs to be defined
- # here to capture the passed-in timer in the parameter
- # list (for performance). Note that we can't assume
- # the timer() result contains two values in all
- # cases.
- def get_time_timer(timer=timer, sum=sum):
- return sum(timer())
- self.get_time = get_time_timer
- self.t = self.get_time()
- self.simulate_call('profiler')
-
- # Heavily optimized dispatch routine for os.times() timer
-
- def trace_dispatch(self, frame, event, arg):
- timer = self.timer
- t = timer()
- t = t[0] + t[1] - self.t - self.bias
-
- if event == "c_call":
- self.c_func_name = arg.__name__
-
- if self.dispatch[event](self, frame,t):
- t = timer()
- self.t = t[0] + t[1]
- else:
- r = timer()
- self.t = r[0] + r[1] - t # put back unrecorded delta
-
- # Dispatch routine for best timer program (return = scalar, fastest if
- # an integer but float works too -- and time.clock() relies on that).
-
- def trace_dispatch_i(self, frame, event, arg):
- timer = self.timer
- t = timer() - self.t - self.bias
-
- if event == "c_call":
- self.c_func_name = arg.__name__
-
- if self.dispatch[event](self, frame, t):
- self.t = timer()
- else:
- self.t = timer() - t # put back unrecorded delta
-
- # Dispatch routine for macintosh (timer returns time in ticks of
- # 1/60th second)
-
- def trace_dispatch_mac(self, frame, event, arg):
- timer = self.timer
- t = timer()/60.0 - self.t - self.bias
-
- if event == "c_call":
- self.c_func_name = arg.__name__
-
- if self.dispatch[event](self, frame, t):
- self.t = timer()/60.0
- else:
- self.t = timer()/60.0 - t # put back unrecorded delta
-
- # SLOW generic dispatch routine for timer returning lists of numbers
-
- def trace_dispatch_l(self, frame, event, arg):
- get_time = self.get_time
- t = get_time() - self.t - self.bias
-
- if event == "c_call":
- self.c_func_name = arg.__name__
-
- if self.dispatch[event](self, frame, t):
- self.t = get_time()
- else:
- self.t = get_time() - t # put back unrecorded delta
-
- # In the event handlers, the first 3 elements of self.cur are unpacked
- # into vrbls w/ 3-letter names. The last two characters are meant to be
- # mnemonic:
- # _pt self.cur[0] "parent time" time to be charged to parent frame
- # _it self.cur[1] "internal time" time spent directly in the function
- # _et self.cur[2] "external time" time spent in subfunctions
-
- def trace_dispatch_exception(self, frame, t):
- rpt, rit, ret, rfn, rframe, rcur = self.cur
- if (rframe is not frame) and rcur:
- return self.trace_dispatch_return(rframe, t)
- self.cur = rpt, rit+t, ret, rfn, rframe, rcur
- return 1
-
-
- def trace_dispatch_call(self, frame, t):
- if self.cur and frame.f_back is not self.cur[-2]:
- rpt, rit, ret, rfn, rframe, rcur = self.cur
- if not isinstance(rframe, Profile.fake_frame):
- assert rframe.f_back is frame.f_back, ("Bad call", rfn,
- rframe, rframe.f_back,
- frame, frame.f_back)
- self.trace_dispatch_return(rframe, 0)
- assert (self.cur is None or \
- frame.f_back is self.cur[-2]), ("Bad call",
- self.cur[-3])
- fcode = frame.f_code
- fn = (fcode.co_filename, fcode.co_firstlineno, fcode.co_name)
- self.cur = (t, 0, 0, fn, frame, self.cur)
- timings = self.timings
- if fn in timings:
- cc, ns, tt, ct, callers = timings[fn]
- timings[fn] = cc, ns + 1, tt, ct, callers
- else:
- timings[fn] = 0, 0, 0, 0, {}
- return 1
-
- def trace_dispatch_c_call (self, frame, t):
- fn = ("", 0, self.c_func_name)
- self.cur = (t, 0, 0, fn, frame, self.cur)
- timings = self.timings
- if timings.has_key(fn):
- cc, ns, tt, ct, callers = timings[fn]
- timings[fn] = cc, ns+1, tt, ct, callers
- else:
- timings[fn] = 0, 0, 0, 0, {}
- return 1
-
- def trace_dispatch_return(self, frame, t):
- if frame is not self.cur[-2]:
- assert frame is self.cur[-2].f_back, ("Bad return", self.cur[-3])
- self.trace_dispatch_return(self.cur[-2], 0)
-
- # Prefix "r" means part of the Returning or exiting frame.
- # Prefix "p" means part of the Previous or Parent or older frame.
-
- rpt, rit, ret, rfn, frame, rcur = self.cur
- rit = rit + t
- frame_total = rit + ret
-
- ppt, pit, pet, pfn, pframe, pcur = rcur
- self.cur = ppt, pit + rpt, pet + frame_total, pfn, pframe, pcur
-
- timings = self.timings
- cc, ns, tt, ct, callers = timings[rfn]
- if not ns:
- # This is the only occurrence of the function on the stack.
- # Else this is a (directly or indirectly) recursive call, and
- # its cumulative time will get updated when the topmost call to
- # it returns.
- ct = ct + frame_total
- cc = cc + 1
-
- if pfn in callers:
- callers[pfn] = callers[pfn] + 1 # hack: gather more
- # stats such as the amount of time added to ct courtesy
- # of this specific call, and the contribution to cc
- # courtesy of this call.
- else:
- callers[pfn] = 1
-
- timings[rfn] = cc, ns - 1, tt + rit, ct, callers
-
- return 1
-
-
- dispatch = {
- "call": trace_dispatch_call,
- "exception": trace_dispatch_exception,
- "return": trace_dispatch_return,
- "c_call": trace_dispatch_c_call,
- "c_exception": trace_dispatch_return, # the C function returned
- "c_return": trace_dispatch_return,
- }
-
-
- # The next few functions play with self.cmd. By carefully preloading
- # our parallel stack, we can force the profiled result to include
- # an arbitrary string as the name of the calling function.
- # We use self.cmd as that string, and the resulting stats look
- # very nice :-).
-
- def set_cmd(self, cmd):
- if self.cur[-1]: return # already set
- self.cmd = cmd
- self.simulate_call(cmd)
-
- class fake_code:
- def __init__(self, filename, line, name):
- self.co_filename = filename
- self.co_line = line
- self.co_name = name
- self.co_firstlineno = 0
-
- def __repr__(self):
- return repr((self.co_filename, self.co_line, self.co_name))
-
- class fake_frame:
- def __init__(self, code, prior):
- self.f_code = code
- self.f_back = prior
-
- def simulate_call(self, name):
- code = self.fake_code('profile', 0, name)
- if self.cur:
- pframe = self.cur[-2]
- else:
- pframe = None
- frame = self.fake_frame(code, pframe)
- self.dispatch['call'](self, frame, 0)
-
- # collect stats from pending stack, including getting final
- # timings for self.cmd frame.
-
- def simulate_cmd_complete(self):
- get_time = self.get_time
- t = get_time() - self.t
- while self.cur[-1]:
- # We *can* cause assertion errors here if
- # dispatch_trace_return checks for a frame match!
- self.dispatch['return'](self, self.cur[-2], t)
- t = 0
- self.t = get_time() - t
-
-
- def print_stats(self, sort=-1):
- import pstats
- pstats.Stats(self).strip_dirs().sort_stats(sort). \
- print_stats()
-
- def dump_stats(self, file):
- f = open(file, 'wb')
- self.create_stats()
- marshal.dump(self.stats, f)
- f.close()
-
- def create_stats(self):
- self.simulate_cmd_complete()
- self.snapshot_stats()
-
- def snapshot_stats(self):
- self.stats = {}
- for func, (cc, ns, tt, ct, callers) in self.timings.iteritems():
- callers = callers.copy()
- nc = 0
- for callcnt in callers.itervalues():
- nc += callcnt
- self.stats[func] = cc, nc, tt, ct, callers
-
-
- # The following two methods can be called by clients to use
- # a profiler to profile a statement, given as a string.
-
- def run(self, cmd):
- import __main__
- dict = __main__.__dict__
- return self.runctx(cmd, dict, dict)
-
- def runctx(self, cmd, globals, locals):
- self.set_cmd(cmd)
- sys.setprofile(self.dispatcher)
- try:
- exec cmd in globals, locals
- finally:
- sys.setprofile(None)
- return self
-
- # This method is more useful to profile a single function call.
- def runcall(self, func, *args, **kw):
- self.set_cmd(repr(func))
- sys.setprofile(self.dispatcher)
- try:
- return func(*args, **kw)
- finally:
- sys.setprofile(None)
-
-
- #******************************************************************
- # The following calculates the overhead for using a profiler. The
- # problem is that it takes a fair amount of time for the profiler
- # to stop the stopwatch (from the time it receives an event).
- # Similarly, there is a delay from the time that the profiler
- # re-starts the stopwatch before the user's code really gets to
- # continue. The following code tries to measure the difference on
- # a per-event basis.
- #
- # Note that this difference is only significant if there are a lot of
- # events, and relatively little user code per event. For example,
- # code with small functions will typically benefit from having the
- # profiler calibrated for the current platform. This *could* be
- # done on the fly during init() time, but it is not worth the
- # effort. Also note that if too large a value specified, then
- # execution time on some functions will actually appear as a
- # negative number. It is *normal* for some functions (with very
- # low call counts) to have such negative stats, even if the
- # calibration figure is "correct."
- #
- # One alternative to profile-time calibration adjustments (i.e.,
- # adding in the magic little delta during each event) is to track
- # more carefully the number of events (and cumulatively, the number
- # of events during sub functions) that are seen. If this were
- # done, then the arithmetic could be done after the fact (i.e., at
- # display time). Currently, we track only call/return events.
- # These values can be deduced by examining the callees and callers
- # vectors for each functions. Hence we *can* almost correct the
- # internal time figure at print time (note that we currently don't
- # track exception event processing counts). Unfortunately, there
- # is currently no similar information for cumulative sub-function
- # time. It would not be hard to "get all this info" at profiler
- # time. Specifically, we would have to extend the tuples to keep
- # counts of this in each frame, and then extend the defs of timing
- # tuples to include the significant two figures. I'm a bit fearful
- # that this additional feature will slow the heavily optimized
- # event/time ratio (i.e., the profiler would run slower, fur a very
- # low "value added" feature.)
- #**************************************************************
-
- def calibrate(self, m, verbose=0):
- if self.__class__ is not Profile:
- raise TypeError("Subclasses must override .calibrate().")
-
- saved_bias = self.bias
- self.bias = 0
- try:
- return self._calibrate_inner(m, verbose)
- finally:
- self.bias = saved_bias
-
- def _calibrate_inner(self, m, verbose):
- get_time = self.get_time
-
- # Set up a test case to be run with and without profiling. Include
- # lots of calls, because we're trying to quantify stopwatch overhead.
- # Do not raise any exceptions, though, because we want to know
- # exactly how many profile events are generated (one call event, +
- # one return event, per Python-level call).
-
- def f1(n):
- for i in range(n):
- x = 1
-
- def f(m, f1=f1):
- for i in range(m):
- f1(100)
-
- f(m) # warm up the cache
-
- # elapsed_noprofile <- time f(m) takes without profiling.
- t0 = get_time()
- f(m)
- t1 = get_time()
- elapsed_noprofile = t1 - t0
- if verbose:
- print "elapsed time without profiling =", elapsed_noprofile
-
- # elapsed_profile <- time f(m) takes with profiling. The difference
- # is profiling overhead, only some of which the profiler subtracts
- # out on its own.
- p = Profile()
- t0 = get_time()
- p.runctx('f(m)', globals(), locals())
- t1 = get_time()
- elapsed_profile = t1 - t0
- if verbose:
- print "elapsed time with profiling =", elapsed_profile
-
- # reported_time <- "CPU seconds" the profiler charged to f and f1.
- total_calls = 0.0
- reported_time = 0.0
- for (filename, line, funcname), (cc, ns, tt, ct, callers) in \
- p.timings.items():
- if funcname in ("f", "f1"):
- total_calls += cc
- reported_time += tt
-
- if verbose:
- print "'CPU seconds' profiler reported =", reported_time
- print "total # calls =", total_calls
- if total_calls != m + 1:
- raise ValueError("internal error: total calls = %d" % total_calls)
-
- # reported_time - elapsed_noprofile = overhead the profiler wasn't
- # able to measure. Divide by twice the number of calls (since there
- # are two profiler events per call in this test) to get the hidden
- # overhead per event.
- mean = (reported_time - elapsed_noprofile) / 2.0 / total_calls
- if verbose:
- print "mean stopwatch overhead per profile event =", mean
- return mean
-
-#****************************************************************************
-def Stats(*args):
- print 'Report generating functions are in the "pstats" module\a'
-
-def main():
- usage = "profile.py [-o output_file_path] [-s sort] scriptfile [arg] ..."
- parser = OptionParser(usage=usage)
- parser.allow_interspersed_args = False
- parser.add_option('-o', '--outfile', dest="outfile",
- help="Save stats to <outfile>", default=None)
- parser.add_option('-s', '--sort', dest="sort",
- help="Sort order when printing to stdout, based on pstats.Stats class", default=-1)
-
- if not sys.argv[1:]:
- parser.print_usage()
- sys.exit(2)
-
- (options, args) = parser.parse_args()
- sys.argv[:] = args
-
- if (len(sys.argv) > 0):
- sys.path.insert(0, os.path.dirname(sys.argv[0]))
- run('execfile(%r)' % (sys.argv[0],), options.outfile, options.sort)
- else:
- parser.print_usage()
- return parser
-
-# When invoked as main program, invoke the profiler on a script
-if __name__ == '__main__':
- main()
diff --git a/sys/lib/python/pstats.py b/sys/lib/python/pstats.py
deleted file mode 100644
index bdbb27e04..000000000
--- a/sys/lib/python/pstats.py
+++ /dev/null
@@ -1,684 +0,0 @@
-"""Class for printing reports on profiled python code."""
-
-# Class for printing reports on profiled python code. rev 1.0 4/1/94
-#
-# Based on prior profile module by Sjoerd Mullender...
-# which was hacked somewhat by: Guido van Rossum
-#
-# see profile.doc and profile.py for more info.
-
-# Copyright 1994, by InfoSeek Corporation, all rights reserved.
-# Written by James Roskind
-#
-# Permission to use, copy, modify, and distribute this Python software
-# and its associated documentation for any purpose (subject to the
-# restriction in the following sentence) without fee is hereby granted,
-# provided that the above copyright notice appears in all copies, and
-# that both that copyright notice and this permission notice appear in
-# supporting documentation, and that the name of InfoSeek not be used in
-# advertising or publicity pertaining to distribution of the software
-# without specific, written prior permission. This permission is
-# explicitly restricted to the copying and modification of the software
-# to remain in Python, compiled Python, or other languages (such as C)
-# wherein the modified or derived code is exclusively imported into a
-# Python module.
-#
-# INFOSEEK CORPORATION DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
-# SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
-# FITNESS. IN NO EVENT SHALL INFOSEEK CORPORATION BE LIABLE FOR ANY
-# SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER
-# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF
-# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
-# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-
-import sys
-import os
-import time
-import marshal
-import re
-
-__all__ = ["Stats"]
-
-class Stats:
- """This class is used for creating reports from data generated by the
- Profile class. It is a "friend" of that class, and imports data either
- by direct access to members of Profile class, or by reading in a dictionary
- that was emitted (via marshal) from the Profile class.
-
- The big change from the previous Profiler (in terms of raw functionality)
- is that an "add()" method has been provided to combine Stats from
- several distinct profile runs. Both the constructor and the add()
- method now take arbitrarily many file names as arguments.
-
- All the print methods now take an argument that indicates how many lines
- to print. If the arg is a floating point number between 0 and 1.0, then
- it is taken as a decimal percentage of the available lines to be printed
- (e.g., .1 means print 10% of all available lines). If it is an integer,
- it is taken to mean the number of lines of data that you wish to have
- printed.
-
- The sort_stats() method now processes some additional options (i.e., in
- addition to the old -1, 0, 1, or 2). It takes an arbitrary number of
- quoted strings to select the sort order. For example sort_stats('time',
- 'name') sorts on the major key of 'internal function time', and on the
- minor key of 'the name of the function'. Look at the two tables in
- sort_stats() and get_sort_arg_defs(self) for more examples.
-
- All methods return self, so you can string together commands like:
- Stats('foo', 'goo').strip_dirs().sort_stats('calls').\
- print_stats(5).print_callers(5)
- """
-
- def __init__(self, *args, **kwds):
- # I can't figure out how to explictly specify a stream keyword arg
- # with *args:
- # def __init__(self, *args, stream=sys.stdout): ...
- # so I use **kwds and sqauwk if something unexpected is passed in.
- self.stream = sys.stdout
- if "stream" in kwds:
- self.stream = kwds["stream"]
- del kwds["stream"]
- if kwds:
- keys = kwds.keys()
- keys.sort()
- extras = ", ".join(["%s=%s" % (k, kwds[k]) for k in keys])
- raise ValueError, "unrecognized keyword args: %s" % extras
- if not len(args):
- arg = None
- else:
- arg = args[0]
- args = args[1:]
- self.init(arg)
- self.add(*args)
-
- def init(self, arg):
- self.all_callees = None # calc only if needed
- self.files = []
- self.fcn_list = None
- self.total_tt = 0
- self.total_calls = 0
- self.prim_calls = 0
- self.max_name_len = 0
- self.top_level = {}
- self.stats = {}
- self.sort_arg_dict = {}
- self.load_stats(arg)
- trouble = 1
- try:
- self.get_top_level_stats()
- trouble = 0
- finally:
- if trouble:
- print >> self.stream, "Invalid timing data",
- if self.files: print >> self.stream, self.files[-1],
- print >> self.stream
-
- def load_stats(self, arg):
- if not arg: self.stats = {}
- elif isinstance(arg, basestring):
- f = open(arg, 'rb')
- self.stats = marshal.load(f)
- f.close()
- try:
- file_stats = os.stat(arg)
- arg = time.ctime(file_stats.st_mtime) + " " + arg
- except: # in case this is not unix
- pass
- self.files = [ arg ]
- elif hasattr(arg, 'create_stats'):
- arg.create_stats()
- self.stats = arg.stats
- arg.stats = {}
- if not self.stats:
- raise TypeError, "Cannot create or construct a %r object from '%r''" % (
- self.__class__, arg)
- return
-
- def get_top_level_stats(self):
- for func, (cc, nc, tt, ct, callers) in self.stats.items():
- self.total_calls += nc
- self.prim_calls += cc
- self.total_tt += tt
- if callers.has_key(("jprofile", 0, "profiler")):
- self.top_level[func] = None
- if len(func_std_string(func)) > self.max_name_len:
- self.max_name_len = len(func_std_string(func))
-
- def add(self, *arg_list):
- if not arg_list: return self
- if len(arg_list) > 1: self.add(*arg_list[1:])
- other = arg_list[0]
- if type(self) != type(other) or self.__class__ != other.__class__:
- other = Stats(other)
- self.files += other.files
- self.total_calls += other.total_calls
- self.prim_calls += other.prim_calls
- self.total_tt += other.total_tt
- for func in other.top_level:
- self.top_level[func] = None
-
- if self.max_name_len < other.max_name_len:
- self.max_name_len = other.max_name_len
-
- self.fcn_list = None
-
- for func, stat in other.stats.iteritems():
- if func in self.stats:
- old_func_stat = self.stats[func]
- else:
- old_func_stat = (0, 0, 0, 0, {},)
- self.stats[func] = add_func_stats(old_func_stat, stat)
- return self
-
- def dump_stats(self, filename):
- """Write the profile data to a file we know how to load back."""
- f = file(filename, 'wb')
- try:
- marshal.dump(self.stats, f)
- finally:
- f.close()
-
- # list the tuple indices and directions for sorting,
- # along with some printable description
- sort_arg_dict_default = {
- "calls" : (((1,-1), ), "call count"),
- "cumulative": (((3,-1), ), "cumulative time"),
- "file" : (((4, 1), ), "file name"),
- "line" : (((5, 1), ), "line number"),
- "module" : (((4, 1), ), "file name"),
- "name" : (((6, 1), ), "function name"),
- "nfl" : (((6, 1),(4, 1),(5, 1),), "name/file/line"),
- "pcalls" : (((0,-1), ), "call count"),
- "stdname" : (((7, 1), ), "standard name"),
- "time" : (((2,-1), ), "internal time"),
- }
-
- def get_sort_arg_defs(self):
- """Expand all abbreviations that are unique."""
- if not self.sort_arg_dict:
- self.sort_arg_dict = dict = {}
- bad_list = {}
- for word, tup in self.sort_arg_dict_default.iteritems():
- fragment = word
- while fragment:
- if not fragment:
- break
- if fragment in dict:
- bad_list[fragment] = 0
- break
- dict[fragment] = tup
- fragment = fragment[:-1]
- for word in bad_list:
- del dict[word]
- return self.sort_arg_dict
-
- def sort_stats(self, *field):
- if not field:
- self.fcn_list = 0
- return self
- if len(field) == 1 and type(field[0]) == type(1):
- # Be compatible with old profiler
- field = [ {-1: "stdname",
- 0:"calls",
- 1:"time",
- 2: "cumulative" } [ field[0] ] ]
-
- sort_arg_defs = self.get_sort_arg_defs()
- sort_tuple = ()
- self.sort_type = ""
- connector = ""
- for word in field:
- sort_tuple = sort_tuple + sort_arg_defs[word][0]
- self.sort_type += connector + sort_arg_defs[word][1]
- connector = ", "
-
- stats_list = []
- for func, (cc, nc, tt, ct, callers) in self.stats.iteritems():
- stats_list.append((cc, nc, tt, ct) + func +
- (func_std_string(func), func))
-
- stats_list.sort(TupleComp(sort_tuple).compare)
-
- self.fcn_list = fcn_list = []
- for tuple in stats_list:
- fcn_list.append(tuple[-1])
- return self
-
- def reverse_order(self):
- if self.fcn_list:
- self.fcn_list.reverse()
- return self
-
- def strip_dirs(self):
- oldstats = self.stats
- self.stats = newstats = {}
- max_name_len = 0
- for func, (cc, nc, tt, ct, callers) in oldstats.iteritems():
- newfunc = func_strip_path(func)
- if len(func_std_string(newfunc)) > max_name_len:
- max_name_len = len(func_std_string(newfunc))
- newcallers = {}
- for func2, caller in callers.iteritems():
- newcallers[func_strip_path(func2)] = caller
-
- if newfunc in newstats:
- newstats[newfunc] = add_func_stats(
- newstats[newfunc],
- (cc, nc, tt, ct, newcallers))
- else:
- newstats[newfunc] = (cc, nc, tt, ct, newcallers)
- old_top = self.top_level
- self.top_level = new_top = {}
- for func in old_top:
- new_top[func_strip_path(func)] = None
-
- self.max_name_len = max_name_len
-
- self.fcn_list = None
- self.all_callees = None
- return self
-
- def calc_callees(self):
- if self.all_callees: return
- self.all_callees = all_callees = {}
- for func, (cc, nc, tt, ct, callers) in self.stats.iteritems():
- if not func in all_callees:
- all_callees[func] = {}
- for func2, caller in callers.iteritems():
- if not func2 in all_callees:
- all_callees[func2] = {}
- all_callees[func2][func] = caller
- return
-
- #******************************************************************
- # The following functions support actual printing of reports
- #******************************************************************
-
- # Optional "amount" is either a line count, or a percentage of lines.
-
- def eval_print_amount(self, sel, list, msg):
- new_list = list
- if type(sel) == type(""):
- new_list = []
- for func in list:
- if re.search(sel, func_std_string(func)):
- new_list.append(func)
- else:
- count = len(list)
- if type(sel) == type(1.0) and 0.0 <= sel < 1.0:
- count = int(count * sel + .5)
- new_list = list[:count]
- elif type(sel) == type(1) and 0 <= sel < count:
- count = sel
- new_list = list[:count]
- if len(list) != len(new_list):
- msg = msg + " List reduced from %r to %r due to restriction <%r>\n" % (
- len(list), len(new_list), sel)
-
- return new_list, msg
-
- def get_print_list(self, sel_list):
- width = self.max_name_len
- if self.fcn_list:
- list = self.fcn_list[:]
- msg = " Ordered by: " + self.sort_type + '\n'
- else:
- list = self.stats.keys()
- msg = " Random listing order was used\n"
-
- for selection in sel_list:
- list, msg = self.eval_print_amount(selection, list, msg)
-
- count = len(list)
-
- if not list:
- return 0, list
- print >> self.stream, msg
- if count < len(self.stats):
- width = 0
- for func in list:
- if len(func_std_string(func)) > width:
- width = len(func_std_string(func))
- return width+2, list
-
- def print_stats(self, *amount):
- for filename in self.files:
- print >> self.stream, filename
- if self.files: print >> self.stream
- indent = ' ' * 8
- for func in self.top_level:
- print >> self.stream, indent, func_get_function_name(func)
-
- print >> self.stream, indent, self.total_calls, "function calls",
- if self.total_calls != self.prim_calls:
- print >> self.stream, "(%d primitive calls)" % self.prim_calls,
- print >> self.stream, "in %.3f CPU seconds" % self.total_tt
- print >> self.stream
- width, list = self.get_print_list(amount)
- if list:
- self.print_title()
- for func in list:
- self.print_line(func)
- print >> self.stream
- print >> self.stream
- return self
-
- def print_callees(self, *amount):
- width, list = self.get_print_list(amount)
- if list:
- self.calc_callees()
-
- self.print_call_heading(width, "called...")
- for func in list:
- if func in self.all_callees:
- self.print_call_line(width, func, self.all_callees[func])
- else:
- self.print_call_line(width, func, {})
- print >> self.stream
- print >> self.stream
- return self
-
- def print_callers(self, *amount):
- width, list = self.get_print_list(amount)
- if list:
- self.print_call_heading(width, "was called by...")
- for func in list:
- cc, nc, tt, ct, callers = self.stats[func]
- self.print_call_line(width, func, callers, "<-")
- print >> self.stream
- print >> self.stream
- return self
-
- def print_call_heading(self, name_size, column_title):
- print >> self.stream, "Function ".ljust(name_size) + column_title
- # print sub-header only if we have new-style callers
- subheader = False
- for cc, nc, tt, ct, callers in self.stats.itervalues():
- if callers:
- value = callers.itervalues().next()
- subheader = isinstance(value, tuple)
- break
- if subheader:
- print >> self.stream, " "*name_size + " ncalls tottime cumtime"
-
- def print_call_line(self, name_size, source, call_dict, arrow="->"):
- print >> self.stream, func_std_string(source).ljust(name_size) + arrow,
- if not call_dict:
- print >> self.stream
- return
- clist = call_dict.keys()
- clist.sort()
- indent = ""
- for func in clist:
- name = func_std_string(func)
- value = call_dict[func]
- if isinstance(value, tuple):
- nc, cc, tt, ct = value
- if nc != cc:
- substats = '%d/%d' % (nc, cc)
- else:
- substats = '%d' % (nc,)
- substats = '%s %s %s %s' % (substats.rjust(7+2*len(indent)),
- f8(tt), f8(ct), name)
- left_width = name_size + 1
- else:
- substats = '%s(%r) %s' % (name, value, f8(self.stats[func][3]))
- left_width = name_size + 3
- print >> self.stream, indent*left_width + substats
- indent = " "
-
- def print_title(self):
- print >> self.stream, ' ncalls tottime percall cumtime percall',
- print >> self.stream, 'filename:lineno(function)'
-
- def print_line(self, func): # hack : should print percentages
- cc, nc, tt, ct, callers = self.stats[func]
- c = str(nc)
- if nc != cc:
- c = c + '/' + str(cc)
- print >> self.stream, c.rjust(9),
- print >> self.stream, f8(tt),
- if nc == 0:
- print >> self.stream, ' '*8,
- else:
- print >> self.stream, f8(tt/nc),
- print >> self.stream, f8(ct),
- if cc == 0:
- print >> self.stream, ' '*8,
- else:
- print >> self.stream, f8(ct/cc),
- print >> self.stream, func_std_string(func)
-
-class TupleComp:
- """This class provides a generic function for comparing any two tuples.
- Each instance records a list of tuple-indices (from most significant
- to least significant), and sort direction (ascending or decending) for
- each tuple-index. The compare functions can then be used as the function
- argument to the system sort() function when a list of tuples need to be
- sorted in the instances order."""
-
- def __init__(self, comp_select_list):
- self.comp_select_list = comp_select_list
-
- def compare (self, left, right):
- for index, direction in self.comp_select_list:
- l = left[index]
- r = right[index]
- if l < r:
- return -direction
- if l > r:
- return direction
- return 0
-
-#**************************************************************************
-# func_name is a triple (file:string, line:int, name:string)
-
-def func_strip_path(func_name):
- filename, line, name = func_name
- return os.path.basename(filename), line, name
-
-def func_get_function_name(func):
- return func[2]
-
-def func_std_string(func_name): # match what old profile produced
- if func_name[:2] == ('~', 0):
- # special case for built-in functions
- name = func_name[2]
- if name.startswith('<') and name.endswith('>'):
- return '{%s}' % name[1:-1]
- else:
- return name
- else:
- return "%s:%d(%s)" % func_name
-
-#**************************************************************************
-# The following functions combine statists for pairs functions.
-# The bulk of the processing involves correctly handling "call" lists,
-# such as callers and callees.
-#**************************************************************************
-
-def add_func_stats(target, source):
- """Add together all the stats for two profile entries."""
- cc, nc, tt, ct, callers = source
- t_cc, t_nc, t_tt, t_ct, t_callers = target
- return (cc+t_cc, nc+t_nc, tt+t_tt, ct+t_ct,
- add_callers(t_callers, callers))
-
-def add_callers(target, source):
- """Combine two caller lists in a single list."""
- new_callers = {}
- for func, caller in target.iteritems():
- new_callers[func] = caller
- for func, caller in source.iteritems():
- if func in new_callers:
- new_callers[func] = caller + new_callers[func]
- else:
- new_callers[func] = caller
- return new_callers
-
-def count_calls(callers):
- """Sum the caller statistics to get total number of calls received."""
- nc = 0
- for calls in callers.itervalues():
- nc += calls
- return nc
-
-#**************************************************************************
-# The following functions support printing of reports
-#**************************************************************************
-
-def f8(x):
- return "%8.3f" % x
-
-#**************************************************************************
-# Statistics browser added by ESR, April 2001
-#**************************************************************************
-
-if __name__ == '__main__':
- import cmd
- try:
- import readline
- except ImportError:
- pass
-
- class ProfileBrowser(cmd.Cmd):
- def __init__(self, profile=None):
- cmd.Cmd.__init__(self)
- self.prompt = "% "
- if profile is not None:
- self.stats = Stats(profile)
- self.stream = self.stats.stream
- else:
- self.stats = None
- self.stream = sys.stdout
-
- def generic(self, fn, line):
- args = line.split()
- processed = []
- for term in args:
- try:
- processed.append(int(term))
- continue
- except ValueError:
- pass
- try:
- frac = float(term)
- if frac > 1 or frac < 0:
- print >> self.stream, "Fraction argument must be in [0, 1]"
- continue
- processed.append(frac)
- continue
- except ValueError:
- pass
- processed.append(term)
- if self.stats:
- getattr(self.stats, fn)(*processed)
- else:
- print >> self.stream, "No statistics object is loaded."
- return 0
- def generic_help(self):
- print >> self.stream, "Arguments may be:"
- print >> self.stream, "* An integer maximum number of entries to print."
- print >> self.stream, "* A decimal fractional number between 0 and 1, controlling"
- print >> self.stream, " what fraction of selected entries to print."
- print >> self.stream, "* A regular expression; only entries with function names"
- print >> self.stream, " that match it are printed."
-
- def do_add(self, line):
- self.stats.add(line)
- return 0
- def help_add(self):
- print >> self.stream, "Add profile info from given file to current statistics object."
-
- def do_callees(self, line):
- return self.generic('print_callees', line)
- def help_callees(self):
- print >> self.stream, "Print callees statistics from the current stat object."
- self.generic_help()
-
- def do_callers(self, line):
- return self.generic('print_callers', line)
- def help_callers(self):
- print >> self.stream, "Print callers statistics from the current stat object."
- self.generic_help()
-
- def do_EOF(self, line):
- print >> self.stream, ""
- return 1
- def help_EOF(self):
- print >> self.stream, "Leave the profile brower."
-
- def do_quit(self, line):
- return 1
- def help_quit(self):
- print >> self.stream, "Leave the profile brower."
-
- def do_read(self, line):
- if line:
- try:
- self.stats = Stats(line)
- except IOError, args:
- print >> self.stream, args[1]
- return
- self.prompt = line + "% "
- elif len(self.prompt) > 2:
- line = self.prompt[-2:]
- else:
- print >> self.stream, "No statistics object is current -- cannot reload."
- return 0
- def help_read(self):
- print >> self.stream, "Read in profile data from a specified file."
-
- def do_reverse(self, line):
- self.stats.reverse_order()
- return 0
- def help_reverse(self):
- print >> self.stream, "Reverse the sort order of the profiling report."
-
- def do_sort(self, line):
- abbrevs = self.stats.get_sort_arg_defs()
- if line and not filter(lambda x,a=abbrevs: x not in a,line.split()):
- self.stats.sort_stats(*line.split())
- else:
- print >> self.stream, "Valid sort keys (unique prefixes are accepted):"
- for (key, value) in Stats.sort_arg_dict_default.iteritems():
- print >> self.stream, "%s -- %s" % (key, value[1])
- return 0
- def help_sort(self):
- print >> self.stream, "Sort profile data according to specified keys."
- print >> self.stream, "(Typing `sort' without arguments lists valid keys.)"
- def complete_sort(self, text, *args):
- return [a for a in Stats.sort_arg_dict_default if a.startswith(text)]
-
- def do_stats(self, line):
- return self.generic('print_stats', line)
- def help_stats(self):
- print >> self.stream, "Print statistics from the current stat object."
- self.generic_help()
-
- def do_strip(self, line):
- self.stats.strip_dirs()
- return 0
- def help_strip(self):
- print >> self.stream, "Strip leading path information from filenames in the report."
-
- def postcmd(self, stop, line):
- if stop:
- return stop
- return None
-
- import sys
- if len(sys.argv) > 1:
- initprofile = sys.argv[1]
- else:
- initprofile = None
- try:
- browser = ProfileBrowser(initprofile)
- print >> browser.stream, "Welcome to the profile statistics browser."
- browser.cmdloop()
- print >> browser.stream, "Goodbye."
- except KeyboardInterrupt:
- pass
-
-# That's all, folks.
diff --git a/sys/lib/python/pty.py b/sys/lib/python/pty.py
deleted file mode 100644
index 9fd470942..000000000
--- a/sys/lib/python/pty.py
+++ /dev/null
@@ -1,174 +0,0 @@
-"""Pseudo terminal utilities."""
-
-# Bugs: No signal handling. Doesn't set slave termios and window size.
-# Only tested on Linux.
-# See: W. Richard Stevens. 1992. Advanced Programming in the
-# UNIX Environment. Chapter 19.
-# Author: Steen Lumholt -- with additions by Guido.
-
-from select import select
-import os
-import tty
-
-__all__ = ["openpty","fork","spawn"]
-
-STDIN_FILENO = 0
-STDOUT_FILENO = 1
-STDERR_FILENO = 2
-
-CHILD = 0
-
-def openpty():
- """openpty() -> (master_fd, slave_fd)
- Open a pty master/slave pair, using os.openpty() if possible."""
-
- try:
- return os.openpty()
- except (AttributeError, OSError):
- pass
- master_fd, slave_name = _open_terminal()
- slave_fd = slave_open(slave_name)
- return master_fd, slave_fd
-
-def master_open():
- """master_open() -> (master_fd, slave_name)
- Open a pty master and return the fd, and the filename of the slave end.
- Deprecated, use openpty() instead."""
-
- try:
- master_fd, slave_fd = os.openpty()
- except (AttributeError, OSError):
- pass
- else:
- slave_name = os.ttyname(slave_fd)
- os.close(slave_fd)
- return master_fd, slave_name
-
- return _open_terminal()
-
-def _open_terminal():
- """Open pty master and return (master_fd, tty_name).
- SGI and generic BSD version, for when openpty() fails."""
- try:
- import sgi
- except ImportError:
- pass
- else:
- try:
- tty_name, master_fd = sgi._getpty(os.O_RDWR, 0666, 0)
- except IOError, msg:
- raise os.error, msg
- return master_fd, tty_name
- for x in 'pqrstuvwxyzPQRST':
- for y in '0123456789abcdef':
- pty_name = '/dev/pty' + x + y
- try:
- fd = os.open(pty_name, os.O_RDWR)
- except os.error:
- continue
- return (fd, '/dev/tty' + x + y)
- raise os.error, 'out of pty devices'
-
-def slave_open(tty_name):
- """slave_open(tty_name) -> slave_fd
- Open the pty slave and acquire the controlling terminal, returning
- opened filedescriptor.
- Deprecated, use openpty() instead."""
-
- result = os.open(tty_name, os.O_RDWR)
- try:
- from fcntl import ioctl, I_PUSH
- except ImportError:
- return result
- try:
- ioctl(result, I_PUSH, "ptem")
- ioctl(result, I_PUSH, "ldterm")
- except IOError:
- pass
- return result
-
-def fork():
- """fork() -> (pid, master_fd)
- Fork and make the child a session leader with a controlling terminal."""
-
- try:
- pid, fd = os.forkpty()
- except (AttributeError, OSError):
- pass
- else:
- if pid == CHILD:
- try:
- os.setsid()
- except OSError:
- # os.forkpty() already set us session leader
- pass
- return pid, fd
-
- master_fd, slave_fd = openpty()
- pid = os.fork()
- if pid == CHILD:
- # Establish a new session.
- os.setsid()
- os.close(master_fd)
-
- # Slave becomes stdin/stdout/stderr of child.
- os.dup2(slave_fd, STDIN_FILENO)
- os.dup2(slave_fd, STDOUT_FILENO)
- os.dup2(slave_fd, STDERR_FILENO)
- if (slave_fd > STDERR_FILENO):
- os.close (slave_fd)
-
- # Explicitly open the tty to make it become a controlling tty.
- tmp_fd = os.open(os.ttyname(STDOUT_FILENO), os.O_RDWR)
- os.close(tmp_fd)
- else:
- os.close(slave_fd)
-
- # Parent and child process.
- return pid, master_fd
-
-def _writen(fd, data):
- """Write all the data to a descriptor."""
- while data != '':
- n = os.write(fd, data)
- data = data[n:]
-
-def _read(fd):
- """Default read function."""
- return os.read(fd, 1024)
-
-def _copy(master_fd, master_read=_read, stdin_read=_read):
- """Parent copy loop.
- Copies
- pty master -> standard output (master_read)
- standard input -> pty master (stdin_read)"""
- while 1:
- rfds, wfds, xfds = select(
- [master_fd, STDIN_FILENO], [], [])
- if master_fd in rfds:
- data = master_read(master_fd)
- os.write(STDOUT_FILENO, data)
- if STDIN_FILENO in rfds:
- data = stdin_read(STDIN_FILENO)
- _writen(master_fd, data)
-
-def spawn(argv, master_read=_read, stdin_read=_read):
- """Create a spawned process."""
- if type(argv) == type(''):
- argv = (argv,)
- pid, master_fd = fork()
- if pid == CHILD:
- os.execlp(argv[0], *argv)
- try:
- mode = tty.tcgetattr(STDIN_FILENO)
- tty.setraw(STDIN_FILENO)
- restore = 1
- except tty.error: # This is the same as termios.error
- restore = 0
- try:
- _copy(master_fd, master_read, stdin_read)
- except (IOError, OSError):
- if restore:
- tty.tcsetattr(STDIN_FILENO, tty.TCSAFLUSH, mode)
-
- os.close(master_fd)
diff --git a/sys/lib/python/py_compile.py b/sys/lib/python/py_compile.py
deleted file mode 100644
index 1cb41f1e0..000000000
--- a/sys/lib/python/py_compile.py
+++ /dev/null
@@ -1,164 +0,0 @@
-"""Routine to "compile" a .py file to a .pyc (or .pyo) file.
-
-This module has intimate knowledge of the format of .pyc files.
-"""
-
-import __builtin__
-import imp
-import marshal
-import os
-import sys
-import traceback
-
-MAGIC = imp.get_magic()
-
-__all__ = ["compile", "main", "PyCompileError"]
-
-
-class PyCompileError(Exception):
- """Exception raised when an error occurs while attempting to
- compile the file.
-
- To raise this exception, use
-
- raise PyCompileError(exc_type,exc_value,file[,msg])
-
- where
-
- exc_type: exception type to be used in error message
- type name can be accesses as class variable
- 'exc_type_name'
-
- exc_value: exception value to be used in error message
- can be accesses as class variable 'exc_value'
-
- file: name of file being compiled to be used in error message
- can be accesses as class variable 'file'
-
- msg: string message to be written as error message
- If no value is given, a default exception message will be given,
- consistent with 'standard' py_compile output.
- message (or default) can be accesses as class variable 'msg'
-
- """
-
- def __init__(self, exc_type, exc_value, file, msg=''):
- exc_type_name = exc_type.__name__
- if exc_type is SyntaxError:
- tbtext = ''.join(traceback.format_exception_only(exc_type, exc_value))
- errmsg = tbtext.replace('File "<string>"', 'File "%s"' % file)
- else:
- errmsg = "Sorry: %s: %s" % (exc_type_name,exc_value)
-
- Exception.__init__(self,msg or errmsg,exc_type_name,exc_value,file)
-
- self.exc_type_name = exc_type_name
- self.exc_value = exc_value
- self.file = file
- self.msg = msg or errmsg
-
- def __str__(self):
- return self.msg
-
-
-# Define an internal helper according to the platform
-if os.name == "mac":
- import MacOS
- def set_creator_type(file):
- MacOS.SetCreatorAndType(file, 'Pyth', 'PYC ')
-else:
- def set_creator_type(file):
- pass
-
-def wr_long(f, x):
- """Internal; write a 32-bit int to a file in little-endian order."""
- f.write(chr( x & 0xff))
- f.write(chr((x >> 8) & 0xff))
- f.write(chr((x >> 16) & 0xff))
- f.write(chr((x >> 24) & 0xff))
-
-def compile(file, cfile=None, dfile=None, doraise=False):
- """Byte-compile one Python source file to Python bytecode.
-
- Arguments:
-
- file: source filename
- cfile: target filename; defaults to source with 'c' or 'o' appended
- ('c' normally, 'o' in optimizing mode, giving .pyc or .pyo)
- dfile: purported filename; defaults to source (this is the filename
- that will show up in error messages)
- doraise: flag indicating whether or not an exception should be
- raised when a compile error is found. If an exception
- occurs and this flag is set to False, a string
- indicating the nature of the exception will be printed,
- and the function will return to the caller. If an
- exception occurs and this flag is set to True, a
- PyCompileError exception will be raised.
-
- Note that it isn't necessary to byte-compile Python modules for
- execution efficiency -- Python itself byte-compiles a module when
- it is loaded, and if it can, writes out the bytecode to the
- corresponding .pyc (or .pyo) file.
-
- However, if a Python installation is shared between users, it is a
- good idea to byte-compile all modules upon installation, since
- other users may not be able to write in the source directories,
- and thus they won't be able to write the .pyc/.pyo file, and then
- they would be byte-compiling every module each time it is loaded.
- This can slow down program start-up considerably.
-
- See compileall.py for a script/module that uses this module to
- byte-compile all installed files (or all files in selected
- directories).
-
- """
- f = open(file, 'U')
- try:
- timestamp = long(os.fstat(f.fileno()).st_mtime)
- except AttributeError:
- timestamp = long(os.stat(file).st_mtime)
- codestring = f.read()
- f.close()
- if codestring and codestring[-1] != '\n':
- codestring = codestring + '\n'
- try:
- codeobject = __builtin__.compile(codestring, dfile or file,'exec')
- except Exception,err:
- py_exc = PyCompileError(err.__class__,err.args,dfile or file)
- if doraise:
- raise py_exc
- else:
- sys.stderr.write(py_exc.msg + '\n')
- return
- if cfile is None:
- cfile = file + (__debug__ and 'c' or 'o')
- fc = open(cfile, 'wb')
- fc.write('\0\0\0\0')
- wr_long(fc, timestamp)
- marshal.dump(codeobject, fc)
- fc.flush()
- fc.seek(0, 0)
- fc.write(MAGIC)
- fc.close()
- set_creator_type(cfile)
-
-def main(args=None):
- """Compile several source files.
-
- The files named in 'args' (or on the command line, if 'args' is
- not specified) are compiled and the resulting bytecode is cached
- in the normal manner. This function does not search a directory
- structure to locate source files; it only compiles files named
- explicitly.
-
- """
- if args is None:
- args = sys.argv[1:]
- for filename in args:
- try:
- compile(filename, doraise=True)
- except PyCompileError,err:
- sys.stderr.write(err.msg)
-
-if __name__ == "__main__":
- main()
diff --git a/sys/lib/python/pyclbr.py b/sys/lib/python/pyclbr.py
deleted file mode 100644
index 079b38c41..000000000
--- a/sys/lib/python/pyclbr.py
+++ /dev/null
@@ -1,340 +0,0 @@
-"""Parse a Python module and describe its classes and methods.
-
-Parse enough of a Python file to recognize imports and class and
-method definitions, and to find out the superclasses of a class.
-
-The interface consists of a single function:
- readmodule_ex(module [, path])
-where module is the name of a Python module, and path is an optional
-list of directories where the module is to be searched. If present,
-path is prepended to the system search path sys.path. The return
-value is a dictionary. The keys of the dictionary are the names of
-the classes defined in the module (including classes that are defined
-via the from XXX import YYY construct). The values are class
-instances of the class Class defined here. One special key/value pair
-is present for packages: the key '__path__' has a list as its value
-which contains the package search path.
-
-A class is described by the class Class in this module. Instances
-of this class have the following instance variables:
- module -- the module name
- name -- the name of the class
- super -- a list of super classes (Class instances)
- methods -- a dictionary of methods
- file -- the file in which the class was defined
- lineno -- the line in the file on which the class statement occurred
-The dictionary of methods uses the method names as keys and the line
-numbers on which the method was defined as values.
-If the name of a super class is not recognized, the corresponding
-entry in the list of super classes is not a class instance but a
-string giving the name of the super class. Since import statements
-are recognized and imported modules are scanned as well, this
-shouldn't happen often.
-
-A function is described by the class Function in this module.
-Instances of this class have the following instance variables:
- module -- the module name
- name -- the name of the class
- file -- the file in which the class was defined
- lineno -- the line in the file on which the class statement occurred
-"""
-
-import sys
-import imp
-import tokenize # Python tokenizer
-from token import NAME, DEDENT, NEWLINE, OP
-from operator import itemgetter
-
-__all__ = ["readmodule", "readmodule_ex", "Class", "Function"]
-
-_modules = {} # cache of modules we've seen
-
-# each Python class is represented by an instance of this class
-class Class:
- '''Class to represent a Python class.'''
- def __init__(self, module, name, super, file, lineno):
- self.module = module
- self.name = name
- if super is None:
- super = []
- self.super = super
- self.methods = {}
- self.file = file
- self.lineno = lineno
-
- def _addmethod(self, name, lineno):
- self.methods[name] = lineno
-
-class Function:
- '''Class to represent a top-level Python function'''
- def __init__(self, module, name, file, lineno):
- self.module = module
- self.name = name
- self.file = file
- self.lineno = lineno
-
-def readmodule(module, path=[]):
- '''Backwards compatible interface.
-
- Call readmodule_ex() and then only keep Class objects from the
- resulting dictionary.'''
-
- dict = _readmodule(module, path)
- res = {}
- for key, value in dict.items():
- if isinstance(value, Class):
- res[key] = value
- return res
-
-def readmodule_ex(module, path=[]):
- '''Read a module file and return a dictionary of classes.
-
- Search for MODULE in PATH and sys.path, read and parse the
- module and return a dictionary with one entry for each class
- found in the module.
-
- If INPACKAGE is true, it must be the dotted name of the package in
- which we are searching for a submodule, and then PATH must be the
- package search path; otherwise, we are searching for a top-level
- module, and PATH is combined with sys.path.
- '''
- return _readmodule(module, path)
-
-def _readmodule(module, path, inpackage=None):
- '''Do the hard work for readmodule[_ex].'''
- # Compute the full module name (prepending inpackage if set)
- if inpackage:
- fullmodule = "%s.%s" % (inpackage, module)
- else:
- fullmodule = module
-
- # Check in the cache
- if fullmodule in _modules:
- return _modules[fullmodule]
-
- # Initialize the dict for this module's contents
- dict = {}
-
- # Check if it is a built-in module; we don't do much for these
- if module in sys.builtin_module_names and not inpackage:
- _modules[module] = dict
- return dict
-
- # Check for a dotted module name
- i = module.rfind('.')
- if i >= 0:
- package = module[:i]
- submodule = module[i+1:]
- parent = _readmodule(package, path, inpackage)
- if inpackage:
- package = "%s.%s" % (inpackage, package)
- return _readmodule(submodule, parent['__path__'], package)
-
- # Search the path for the module
- f = None
- if inpackage:
- f, file, (suff, mode, type) = imp.find_module(module, path)
- else:
- f, file, (suff, mode, type) = imp.find_module(module, path + sys.path)
- if type == imp.PKG_DIRECTORY:
- dict['__path__'] = [file]
- path = [file] + path
- f, file, (suff, mode, type) = imp.find_module('__init__', [file])
- _modules[fullmodule] = dict
- if type != imp.PY_SOURCE:
- # not Python source, can't do anything with this module
- f.close()
- return dict
-
- stack = [] # stack of (class, indent) pairs
-
- g = tokenize.generate_tokens(f.readline)
- try:
- for tokentype, token, start, end, line in g:
- if tokentype == DEDENT:
- lineno, thisindent = start
- # close nested classes and defs
- while stack and stack[-1][1] >= thisindent:
- del stack[-1]
- elif token == 'def':
- lineno, thisindent = start
- # close previous nested classes and defs
- while stack and stack[-1][1] >= thisindent:
- del stack[-1]
- tokentype, meth_name, start, end, line = g.next()
- if tokentype != NAME:
- continue # Syntax error
- if stack:
- cur_class = stack[-1][0]
- if isinstance(cur_class, Class):
- # it's a method
- cur_class._addmethod(meth_name, lineno)
- # else it's a nested def
- else:
- # it's a function
- dict[meth_name] = Function(fullmodule, meth_name, file, lineno)
- stack.append((None, thisindent)) # Marker for nested fns
- elif token == 'class':
- lineno, thisindent = start
- # close previous nested classes and defs
- while stack and stack[-1][1] >= thisindent:
- del stack[-1]
- tokentype, class_name, start, end, line = g.next()
- if tokentype != NAME:
- continue # Syntax error
- # parse what follows the class name
- tokentype, token, start, end, line = g.next()
- inherit = None
- if token == '(':
- names = [] # List of superclasses
- # there's a list of superclasses
- level = 1
- super = [] # Tokens making up current superclass
- while True:
- tokentype, token, start, end, line = g.next()
- if token in (')', ',') and level == 1:
- n = "".join(super)
- if n in dict:
- # we know this super class
- n = dict[n]
- else:
- c = n.split('.')
- if len(c) > 1:
- # super class is of the form
- # module.class: look in module for
- # class
- m = c[-2]
- c = c[-1]
- if m in _modules:
- d = _modules[m]
- if c in d:
- n = d[c]
- names.append(n)
- super = []
- if token == '(':
- level += 1
- elif token == ')':
- level -= 1
- if level == 0:
- break
- elif token == ',' and level == 1:
- pass
- # only use NAME and OP (== dot) tokens for type name
- elif tokentype in (NAME, OP) and level == 1:
- super.append(token)
- # expressions in the base list are not supported
- inherit = names
- cur_class = Class(fullmodule, class_name, inherit, file, lineno)
- if not stack:
- dict[class_name] = cur_class
- stack.append((cur_class, thisindent))
- elif token == 'import' and start[1] == 0:
- modules = _getnamelist(g)
- for mod, mod2 in modules:
- try:
- # Recursively read the imported module
- if not inpackage:
- _readmodule(mod, path)
- else:
- try:
- _readmodule(mod, path, inpackage)
- except ImportError:
- _readmodule(mod, [])
- except:
- # If we can't find or parse the imported module,
- # too bad -- don't die here.
- pass
- elif token == 'from' and start[1] == 0:
- mod, token = _getname(g)
- if not mod or token != "import":
- continue
- names = _getnamelist(g)
- try:
- # Recursively read the imported module
- d = _readmodule(mod, path, inpackage)
- except:
- # If we can't find or parse the imported module,
- # too bad -- don't die here.
- continue
- # add any classes that were defined in the imported module
- # to our name space if they were mentioned in the list
- for n, n2 in names:
- if n in d:
- dict[n2 or n] = d[n]
- elif n == '*':
- # don't add names that start with _
- for n in d:
- if n[0] != '_':
- dict[n] = d[n]
- except StopIteration:
- pass
-
- f.close()
- return dict
-
-def _getnamelist(g):
- # Helper to get a comma-separated list of dotted names plus 'as'
- # clauses. Return a list of pairs (name, name2) where name2 is
- # the 'as' name, or None if there is no 'as' clause.
- names = []
- while True:
- name, token = _getname(g)
- if not name:
- break
- if token == 'as':
- name2, token = _getname(g)
- else:
- name2 = None
- names.append((name, name2))
- while token != "," and "\n" not in token:
- tokentype, token, start, end, line = g.next()
- if token != ",":
- break
- return names
-
-def _getname(g):
- # Helper to get a dotted name, return a pair (name, token) where
- # name is the dotted name, or None if there was no dotted name,
- # and token is the next input token.
- parts = []
- tokentype, token, start, end, line = g.next()
- if tokentype != NAME and token != '*':
- return (None, token)
- parts.append(token)
- while True:
- tokentype, token, start, end, line = g.next()
- if token != '.':
- break
- tokentype, token, start, end, line = g.next()
- if tokentype != NAME:
- break
- parts.append(token)
- return (".".join(parts), token)
-
-def _main():
- # Main program for testing.
- import os
- mod = sys.argv[1]
- if os.path.exists(mod):
- path = [os.path.dirname(mod)]
- mod = os.path.basename(mod)
- if mod.lower().endswith(".py"):
- mod = mod[:-3]
- else:
- path = []
- dict = readmodule_ex(mod, path)
- objs = dict.values()
- objs.sort(lambda a, b: cmp(getattr(a, 'lineno', 0),
- getattr(b, 'lineno', 0)))
- for obj in objs:
- if isinstance(obj, Class):
- print "class", obj.name, obj.super, obj.lineno
- methods = sorted(obj.methods.iteritems(), key=itemgetter(1))
- for name, lineno in methods:
- if name != "__path__":
- print " def", name, lineno
- elif isinstance(obj, Function):
- print "def", obj.name, obj.lineno
-
-if __name__ == "__main__":
- _main()
diff --git a/sys/lib/python/pydoc.py b/sys/lib/python/pydoc.py
deleted file mode 100755
index 4acf00d71..000000000
--- a/sys/lib/python/pydoc.py
+++ /dev/null
@@ -1,2255 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: Latin-1 -*-
-"""Generate Python documentation in HTML or text for interactive use.
-
-In the Python interpreter, do "from pydoc import help" to provide online
-help. Calling help(thing) on a Python object documents the object.
-
-Or, at the shell command line outside of Python:
-
-Run "pydoc <name>" to show documentation on something. <name> may be
-the name of a function, module, package, or a dotted reference to a
-class or function within a module or module in a package. If the
-argument contains a path segment delimiter (e.g. slash on Unix,
-backslash on Windows) it is treated as the path to a Python source file.
-
-Run "pydoc -k <keyword>" to search for a keyword in the synopsis lines
-of all available modules.
-
-Run "pydoc -p <port>" to start an HTTP server on a given port on the
-local machine to generate documentation web pages.
-
-For platforms without a command line, "pydoc -g" starts the HTTP server
-and also pops up a little window for controlling it.
-
-Run "pydoc -w <name>" to write out the HTML documentation for a module
-to a file named "<name>.html".
-
-Module docs for core modules are assumed to be in
-
- http://www.python.org/doc/current/lib/
-
-This can be overridden by setting the PYTHONDOCS environment variable
-to a different URL or to a local directory containing the Library
-Reference Manual pages.
-"""
-
-__author__ = "Ka-Ping Yee <ping@lfw.org>"
-__date__ = "26 February 2001"
-
-__version__ = "$Revision: 54366 $"
-__credits__ = """Guido van Rossum, for an excellent programming language.
-Tommy Burnette, the original creator of manpy.
-Paul Prescod, for all his work on onlinehelp.
-Richard Chamberlain, for the first implementation of textdoc.
-"""
-
-# Known bugs that can't be fixed here:
-# - imp.load_module() cannot be prevented from clobbering existing
-# loaded modules, so calling synopsis() on a binary module file
-# changes the contents of any existing module with the same name.
-# - If the __file__ attribute on a module is a relative path and
-# the current directory is changed with os.chdir(), an incorrect
-# path will be displayed.
-
-import sys, imp, os, re, types, inspect, __builtin__, pkgutil
-from repr import Repr
-from string import expandtabs, find, join, lower, split, strip, rfind, rstrip
-try:
- from collections import deque
-except ImportError:
- # Python 2.3 compatibility
- class deque(list):
- def popleft(self):
- return self.pop(0)
-
-# --------------------------------------------------------- common routines
-
-def pathdirs():
- """Convert sys.path into a list of absolute, existing, unique paths."""
- dirs = []
- normdirs = []
- for dir in sys.path:
- dir = os.path.abspath(dir or '.')
- normdir = os.path.normcase(dir)
- if normdir not in normdirs and os.path.isdir(dir):
- dirs.append(dir)
- normdirs.append(normdir)
- return dirs
-
-def getdoc(object):
- """Get the doc string or comments for an object."""
- result = inspect.getdoc(object) or inspect.getcomments(object)
- return result and re.sub('^ *\n', '', rstrip(result)) or ''
-
-def splitdoc(doc):
- """Split a doc string into a synopsis line (if any) and the rest."""
- lines = split(strip(doc), '\n')
- if len(lines) == 1:
- return lines[0], ''
- elif len(lines) >= 2 and not rstrip(lines[1]):
- return lines[0], join(lines[2:], '\n')
- return '', join(lines, '\n')
-
-def classname(object, modname):
- """Get a class name and qualify it with a module name if necessary."""
- name = object.__name__
- if object.__module__ != modname:
- name = object.__module__ + '.' + name
- return name
-
-def isdata(object):
- """Check if an object is of a type that probably means it's data."""
- return not (inspect.ismodule(object) or inspect.isclass(object) or
- inspect.isroutine(object) or inspect.isframe(object) or
- inspect.istraceback(object) or inspect.iscode(object))
-
-def replace(text, *pairs):
- """Do a series of global replacements on a string."""
- while pairs:
- text = join(split(text, pairs[0]), pairs[1])
- pairs = pairs[2:]
- return text
-
-def cram(text, maxlen):
- """Omit part of a string if needed to make it fit in a maximum length."""
- if len(text) > maxlen:
- pre = max(0, (maxlen-3)//2)
- post = max(0, maxlen-3-pre)
- return text[:pre] + '...' + text[len(text)-post:]
- return text
-
-_re_stripid = re.compile(r' at 0x[0-9a-f]{6,16}(>+)$', re.IGNORECASE)
-def stripid(text):
- """Remove the hexadecimal id from a Python object representation."""
- # The behaviour of %p is implementation-dependent in terms of case.
- if _re_stripid.search(repr(Exception)):
- return _re_stripid.sub(r'\1', text)
- return text
-
-def _is_some_method(obj):
- return inspect.ismethod(obj) or inspect.ismethoddescriptor(obj)
-
-def allmethods(cl):
- methods = {}
- for key, value in inspect.getmembers(cl, _is_some_method):
- methods[key] = 1
- for base in cl.__bases__:
- methods.update(allmethods(base)) # all your base are belong to us
- for key in methods.keys():
- methods[key] = getattr(cl, key)
- return methods
-
-def _split_list(s, predicate):
- """Split sequence s via predicate, and return pair ([true], [false]).
-
- The return value is a 2-tuple of lists,
- ([x for x in s if predicate(x)],
- [x for x in s if not predicate(x)])
- """
-
- yes = []
- no = []
- for x in s:
- if predicate(x):
- yes.append(x)
- else:
- no.append(x)
- return yes, no
-
-def visiblename(name, all=None):
- """Decide whether to show documentation on a variable."""
- # Certain special names are redundant.
- if name in ('__builtins__', '__doc__', '__file__', '__path__',
- '__module__', '__name__', '__slots__'): return 0
- # Private names are hidden, but special names are displayed.
- if name.startswith('__') and name.endswith('__'): return 1
- if all is not None:
- # only document that which the programmer exported in __all__
- return name in all
- else:
- return not name.startswith('_')
-
-def classify_class_attrs(object):
- """Wrap inspect.classify_class_attrs, with fixup for data descriptors."""
- def fixup((name, kind, cls, value)):
- if inspect.isdatadescriptor(value):
- kind = 'data descriptor'
- return name, kind, cls, value
- return map(fixup, inspect.classify_class_attrs(object))
-
-# ----------------------------------------------------- module manipulation
-
-def ispackage(path):
- """Guess whether a path refers to a package directory."""
- if os.path.isdir(path):
- for ext in ('.py', '.pyc', '.pyo'):
- if os.path.isfile(os.path.join(path, '__init__' + ext)):
- return True
- return False
-
-def source_synopsis(file):
- line = file.readline()
- while line[:1] == '#' or not strip(line):
- line = file.readline()
- if not line: break
- line = strip(line)
- if line[:4] == 'r"""': line = line[1:]
- if line[:3] == '"""':
- line = line[3:]
- if line[-1:] == '\\': line = line[:-1]
- while not strip(line):
- line = file.readline()
- if not line: break
- result = strip(split(line, '"""')[0])
- else: result = None
- return result
-
-def synopsis(filename, cache={}):
- """Get the one-line summary out of a module file."""
- mtime = os.stat(filename).st_mtime
- lastupdate, result = cache.get(filename, (0, None))
- if lastupdate < mtime:
- info = inspect.getmoduleinfo(filename)
- try:
- file = open(filename)
- except IOError:
- # module can't be opened, so skip it
- return None
- if info and 'b' in info[2]: # binary modules have to be imported
- try: module = imp.load_module('__temp__', file, filename, info[1:])
- except: return None
- result = (module.__doc__ or '').splitlines()[0]
- del sys.modules['__temp__']
- else: # text modules can be directly examined
- result = source_synopsis(file)
- file.close()
- cache[filename] = (mtime, result)
- return result
-
-class ErrorDuringImport(Exception):
- """Errors that occurred while trying to import something to document it."""
- def __init__(self, filename, (exc, value, tb)):
- self.filename = filename
- self.exc = exc
- self.value = value
- self.tb = tb
-
- def __str__(self):
- exc = self.exc
- if type(exc) is types.ClassType:
- exc = exc.__name__
- return 'problem in %s - %s: %s' % (self.filename, exc, self.value)
-
-def importfile(path):
- """Import a Python source file or compiled file given its path."""
- magic = imp.get_magic()
- file = open(path, 'r')
- if file.read(len(magic)) == magic:
- kind = imp.PY_COMPILED
- else:
- kind = imp.PY_SOURCE
- file.close()
- filename = os.path.basename(path)
- name, ext = os.path.splitext(filename)
- file = open(path, 'r')
- try:
- module = imp.load_module(name, file, path, (ext, 'r', kind))
- except:
- raise ErrorDuringImport(path, sys.exc_info())
- file.close()
- return module
-
-def safeimport(path, forceload=0, cache={}):
- """Import a module; handle errors; return None if the module isn't found.
-
- If the module *is* found but an exception occurs, it's wrapped in an
- ErrorDuringImport exception and reraised. Unlike __import__, if a
- package path is specified, the module at the end of the path is returned,
- not the package at the beginning. If the optional 'forceload' argument
- is 1, we reload the module from disk (unless it's a dynamic extension)."""
- try:
- # If forceload is 1 and the module has been previously loaded from
- # disk, we always have to reload the module. Checking the file's
- # mtime isn't good enough (e.g. the module could contain a class
- # that inherits from another module that has changed).
- if forceload and path in sys.modules:
- if path not in sys.builtin_module_names:
- # Avoid simply calling reload() because it leaves names in
- # the currently loaded module lying around if they're not
- # defined in the new source file. Instead, remove the
- # module from sys.modules and re-import. Also remove any
- # submodules because they won't appear in the newly loaded
- # module's namespace if they're already in sys.modules.
- subs = [m for m in sys.modules if m.startswith(path + '.')]
- for key in [path] + subs:
- # Prevent garbage collection.
- cache[key] = sys.modules[key]
- del sys.modules[key]
- module = __import__(path)
- except:
- # Did the error occur before or after the module was found?
- (exc, value, tb) = info = sys.exc_info()
- if path in sys.modules:
- # An error occurred while executing the imported module.
- raise ErrorDuringImport(sys.modules[path].__file__, info)
- elif exc is SyntaxError:
- # A SyntaxError occurred before we could execute the module.
- raise ErrorDuringImport(value.filename, info)
- elif exc is ImportError and \
- split(lower(str(value)))[:2] == ['no', 'module']:
- # The module was not found.
- return None
- else:
- # Some other error occurred during the importing process.
- raise ErrorDuringImport(path, sys.exc_info())
- for part in split(path, '.')[1:]:
- try: module = getattr(module, part)
- except AttributeError: return None
- return module
-
-# ---------------------------------------------------- formatter base class
-
-class Doc:
- def document(self, object, name=None, *args):
- """Generate documentation for an object."""
- args = (object, name) + args
- # 'try' clause is to attempt to handle the possibility that inspect
- # identifies something in a way that pydoc itself has issues handling;
- # think 'super' and how it is a descriptor (which raises the exception
- # by lacking a __name__ attribute) and an instance.
- if inspect.isgetsetdescriptor(object): return self.docdata(*args)
- if inspect.ismemberdescriptor(object): return self.docdata(*args)
- try:
- if inspect.ismodule(object): return self.docmodule(*args)
- if inspect.isclass(object): return self.docclass(*args)
- if inspect.isroutine(object): return self.docroutine(*args)
- except AttributeError:
- pass
- if isinstance(object, property): return self.docproperty(*args)
- return self.docother(*args)
-
- def fail(self, object, name=None, *args):
- """Raise an exception for unimplemented types."""
- message = "don't know how to document object%s of type %s" % (
- name and ' ' + repr(name), type(object).__name__)
- raise TypeError, message
-
- docmodule = docclass = docroutine = docother = docproperty = docdata = fail
-
- def getdocloc(self, object):
- """Return the location of module docs or None"""
-
- try:
- file = inspect.getabsfile(object)
- except TypeError:
- file = '(built-in)'
-
- docloc = os.environ.get("PYTHONDOCS",
- "http://www.python.org/doc/current/lib")
- basedir = os.path.join(sys.exec_prefix, "lib",
- "python"+sys.version[0:3])
- if (isinstance(object, type(os)) and
- (object.__name__ in ('errno', 'exceptions', 'gc', 'imp',
- 'marshal', 'posix', 'signal', 'sys',
- 'thread', 'zipimport') or
- (file.startswith(basedir) and
- not file.startswith(os.path.join(basedir, 'site-packages'))))):
- htmlfile = "module-%s.html" % object.__name__
- if docloc.startswith("http://"):
- docloc = "%s/%s" % (docloc.rstrip("/"), htmlfile)
- else:
- docloc = os.path.join(docloc, htmlfile)
- else:
- docloc = None
- return docloc
-
-# -------------------------------------------- HTML documentation generator
-
-class HTMLRepr(Repr):
- """Class for safely making an HTML representation of a Python object."""
- def __init__(self):
- Repr.__init__(self)
- self.maxlist = self.maxtuple = 20
- self.maxdict = 10
- self.maxstring = self.maxother = 100
-
- def escape(self, text):
- return replace(text, '&', '&amp;', '<', '&lt;', '>', '&gt;')
-
- def repr(self, object):
- return Repr.repr(self, object)
-
- def repr1(self, x, level):
- if hasattr(type(x), '__name__'):
- methodname = 'repr_' + join(split(type(x).__name__), '_')
- if hasattr(self, methodname):
- return getattr(self, methodname)(x, level)
- return self.escape(cram(stripid(repr(x)), self.maxother))
-
- def repr_string(self, x, level):
- test = cram(x, self.maxstring)
- testrepr = repr(test)
- if '\\' in test and '\\' not in replace(testrepr, r'\\', ''):
- # Backslashes are only literal in the string and are never
- # needed to make any special characters, so show a raw string.
- return 'r' + testrepr[0] + self.escape(test) + testrepr[0]
- return re.sub(r'((\\[\\abfnrtv\'"]|\\[0-9]..|\\x..|\\u....)+)',
- r'<font color="#c040c0">\1</font>',
- self.escape(testrepr))
-
- repr_str = repr_string
-
- def repr_instance(self, x, level):
- try:
- return self.escape(cram(stripid(repr(x)), self.maxstring))
- except:
- return self.escape('<%s instance>' % x.__class__.__name__)
-
- repr_unicode = repr_string
-
-class HTMLDoc(Doc):
- """Formatter class for HTML documentation."""
-
- # ------------------------------------------- HTML formatting utilities
-
- _repr_instance = HTMLRepr()
- repr = _repr_instance.repr
- escape = _repr_instance.escape
-
- def page(self, title, contents):
- """Format an HTML page."""
- return '''
-<!doctype html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
-<html><head><title>Python: %s</title>
-</head><body bgcolor="#f0f0f8">
-%s
-</body></html>''' % (title, contents)
-
- def heading(self, title, fgcol, bgcol, extras=''):
- """Format a page heading."""
- return '''
-<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="heading">
-<tr bgcolor="%s">
-<td valign=bottom>&nbsp;<br>
-<font color="%s" face="helvetica, arial">&nbsp;<br>%s</font></td
-><td align=right valign=bottom
-><font color="%s" face="helvetica, arial">%s</font></td></tr></table>
- ''' % (bgcol, fgcol, title, fgcol, extras or '&nbsp;')
-
- def section(self, title, fgcol, bgcol, contents, width=6,
- prelude='', marginalia=None, gap='&nbsp;'):
- """Format a section with a heading."""
- if marginalia is None:
- marginalia = '<tt>' + '&nbsp;' * width + '</tt>'
- result = '''<p>
-<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="section">
-<tr bgcolor="%s">
-<td colspan=3 valign=bottom>&nbsp;<br>
-<font color="%s" face="helvetica, arial">%s</font></td></tr>
- ''' % (bgcol, fgcol, title)
- if prelude:
- result = result + '''
-<tr bgcolor="%s"><td rowspan=2>%s</td>
-<td colspan=2>%s</td></tr>
-<tr><td>%s</td>''' % (bgcol, marginalia, prelude, gap)
- else:
- result = result + '''
-<tr><td bgcolor="%s">%s</td><td>%s</td>''' % (bgcol, marginalia, gap)
-
- return result + '\n<td width="100%%">%s</td></tr></table>' % contents
-
- def bigsection(self, title, *args):
- """Format a section with a big heading."""
- title = '<big><strong>%s</strong></big>' % title
- return self.section(title, *args)
-
- def preformat(self, text):
- """Format literal preformatted text."""
- text = self.escape(expandtabs(text))
- return replace(text, '\n\n', '\n \n', '\n\n', '\n \n',
- ' ', '&nbsp;', '\n', '<br>\n')
-
- def multicolumn(self, list, format, cols=4):
- """Format a list of items into a multi-column list."""
- result = ''
- rows = (len(list)+cols-1)/cols
- for col in range(cols):
- result = result + '<td width="%d%%" valign=top>' % (100/cols)
- for i in range(rows*col, rows*col+rows):
- if i < len(list):
- result = result + format(list[i]) + '<br>\n'
- result = result + '</td>'
- return '<table width="100%%" summary="list"><tr>%s</tr></table>' % result
-
- def grey(self, text): return '<font color="#909090">%s</font>' % text
-
- def namelink(self, name, *dicts):
- """Make a link for an identifier, given name-to-URL mappings."""
- for dict in dicts:
- if name in dict:
- return '<a href="%s">%s</a>' % (dict[name], name)
- return name
-
- def classlink(self, object, modname):
- """Make a link for a class."""
- name, module = object.__name__, sys.modules.get(object.__module__)
- if hasattr(module, name) and getattr(module, name) is object:
- return '<a href="%s.html#%s">%s</a>' % (
- module.__name__, name, classname(object, modname))
- return classname(object, modname)
-
- def modulelink(self, object):
- """Make a link for a module."""
- return '<a href="%s.html">%s</a>' % (object.__name__, object.__name__)
-
- def modpkglink(self, (name, path, ispackage, shadowed)):
- """Make a link for a module or package to display in an index."""
- if shadowed:
- return self.grey(name)
- if path:
- url = '%s.%s.html' % (path, name)
- else:
- url = '%s.html' % name
- if ispackage:
- text = '<strong>%s</strong>&nbsp;(package)' % name
- else:
- text = name
- return '<a href="%s">%s</a>' % (url, text)
-
- def markup(self, text, escape=None, funcs={}, classes={}, methods={}):
- """Mark up some plain text, given a context of symbols to look for.
- Each context dictionary maps object names to anchor names."""
- escape = escape or self.escape
- results = []
- here = 0
- pattern = re.compile(r'\b((http|ftp)://\S+[\w/]|'
- r'RFC[- ]?(\d+)|'
- r'PEP[- ]?(\d+)|'
- r'(self\.)?(\w+))')
- while True:
- match = pattern.search(text, here)
- if not match: break
- start, end = match.span()
- results.append(escape(text[here:start]))
-
- all, scheme, rfc, pep, selfdot, name = match.groups()
- if scheme:
- url = escape(all).replace('"', '&quot;')
- results.append('<a href="%s">%s</a>' % (url, url))
- elif rfc:
- url = 'http://www.rfc-editor.org/rfc/rfc%d.txt' % int(rfc)
- results.append('<a href="%s">%s</a>' % (url, escape(all)))
- elif pep:
- url = 'http://www.python.org/peps/pep-%04d.html' % int(pep)
- results.append('<a href="%s">%s</a>' % (url, escape(all)))
- elif text[end:end+1] == '(':
- results.append(self.namelink(name, methods, funcs, classes))
- elif selfdot:
- results.append('self.<strong>%s</strong>' % name)
- else:
- results.append(self.namelink(name, classes))
- here = end
- results.append(escape(text[here:]))
- return join(results, '')
-
- # ---------------------------------------------- type-specific routines
-
- def formattree(self, tree, modname, parent=None):
- """Produce HTML for a class tree as given by inspect.getclasstree()."""
- result = ''
- for entry in tree:
- if type(entry) is type(()):
- c, bases = entry
- result = result + '<dt><font face="helvetica, arial">'
- result = result + self.classlink(c, modname)
- if bases and bases != (parent,):
- parents = []
- for base in bases:
- parents.append(self.classlink(base, modname))
- result = result + '(' + join(parents, ', ') + ')'
- result = result + '\n</font></dt>'
- elif type(entry) is type([]):
- result = result + '<dd>\n%s</dd>\n' % self.formattree(
- entry, modname, c)
- return '<dl>\n%s</dl>\n' % result
-
- def docmodule(self, object, name=None, mod=None, *ignored):
- """Produce HTML documentation for a module object."""
- name = object.__name__ # ignore the passed-in name
- try:
- all = object.__all__
- except AttributeError:
- all = None
- parts = split(name, '.')
- links = []
- for i in range(len(parts)-1):
- links.append(
- '<a href="%s.html"><font color="#ffffff">%s</font></a>' %
- (join(parts[:i+1], '.'), parts[i]))
- linkedname = join(links + parts[-1:], '.')
- head = '<big><big><strong>%s</strong></big></big>' % linkedname
- try:
- path = inspect.getabsfile(object)
- url = path
- if sys.platform == 'win32':
- import nturl2path
- url = nturl2path.pathname2url(path)
- filelink = '<a href="file:%s">%s</a>' % (url, path)
- except TypeError:
- filelink = '(built-in)'
- info = []
- if hasattr(object, '__version__'):
- version = str(object.__version__)
- if version[:11] == '$' + 'Revision: ' and version[-1:] == '$':
- version = strip(version[11:-1])
- info.append('version %s' % self.escape(version))
- if hasattr(object, '__date__'):
- info.append(self.escape(str(object.__date__)))
- if info:
- head = head + ' (%s)' % join(info, ', ')
- docloc = self.getdocloc(object)
- if docloc is not None:
- docloc = '<br><a href="%(docloc)s">Module Docs</a>' % locals()
- else:
- docloc = ''
- result = self.heading(
- head, '#ffffff', '#7799ee',
- '<a href=".">index</a><br>' + filelink + docloc)
-
- modules = inspect.getmembers(object, inspect.ismodule)
-
- classes, cdict = [], {}
- for key, value in inspect.getmembers(object, inspect.isclass):
- # if __all__ exists, believe it. Otherwise use old heuristic.
- if (all is not None or
- (inspect.getmodule(value) or object) is object):
- if visiblename(key, all):
- classes.append((key, value))
- cdict[key] = cdict[value] = '#' + key
- for key, value in classes:
- for base in value.__bases__:
- key, modname = base.__name__, base.__module__
- module = sys.modules.get(modname)
- if modname != name and module and hasattr(module, key):
- if getattr(module, key) is base:
- if not key in cdict:
- cdict[key] = cdict[base] = modname + '.html#' + key
- funcs, fdict = [], {}
- for key, value in inspect.getmembers(object, inspect.isroutine):
- # if __all__ exists, believe it. Otherwise use old heuristic.
- if (all is not None or
- inspect.isbuiltin(value) or inspect.getmodule(value) is object):
- if visiblename(key, all):
- funcs.append((key, value))
- fdict[key] = '#-' + key
- if inspect.isfunction(value): fdict[value] = fdict[key]
- data = []
- for key, value in inspect.getmembers(object, isdata):
- if visiblename(key, all):
- data.append((key, value))
-
- doc = self.markup(getdoc(object), self.preformat, fdict, cdict)
- doc = doc and '<tt>%s</tt>' % doc
- result = result + '<p>%s</p>\n' % doc
-
- if hasattr(object, '__path__'):
- modpkgs = []
- for importer, modname, ispkg in pkgutil.iter_modules(object.__path__):
- modpkgs.append((modname, name, ispkg, 0))
- modpkgs.sort()
- contents = self.multicolumn(modpkgs, self.modpkglink)
- result = result + self.bigsection(
- 'Package Contents', '#ffffff', '#aa55cc', contents)
- elif modules:
- contents = self.multicolumn(
- modules, lambda (key, value), s=self: s.modulelink(value))
- result = result + self.bigsection(
- 'Modules', '#fffff', '#aa55cc', contents)
-
- if classes:
- classlist = map(lambda (key, value): value, classes)
- contents = [
- self.formattree(inspect.getclasstree(classlist, 1), name)]
- for key, value in classes:
- contents.append(self.document(value, key, name, fdict, cdict))
- result = result + self.bigsection(
- 'Classes', '#ffffff', '#ee77aa', join(contents))
- if funcs:
- contents = []
- for key, value in funcs:
- contents.append(self.document(value, key, name, fdict, cdict))
- result = result + self.bigsection(
- 'Functions', '#ffffff', '#eeaa77', join(contents))
- if data:
- contents = []
- for key, value in data:
- contents.append(self.document(value, key))
- result = result + self.bigsection(
- 'Data', '#ffffff', '#55aa55', join(contents, '<br>\n'))
- if hasattr(object, '__author__'):
- contents = self.markup(str(object.__author__), self.preformat)
- result = result + self.bigsection(
- 'Author', '#ffffff', '#7799ee', contents)
- if hasattr(object, '__credits__'):
- contents = self.markup(str(object.__credits__), self.preformat)
- result = result + self.bigsection(
- 'Credits', '#ffffff', '#7799ee', contents)
-
- return result
-
- def docclass(self, object, name=None, mod=None, funcs={}, classes={},
- *ignored):
- """Produce HTML documentation for a class object."""
- realname = object.__name__
- name = name or realname
- bases = object.__bases__
-
- contents = []
- push = contents.append
-
- # Cute little class to pump out a horizontal rule between sections.
- class HorizontalRule:
- def __init__(self):
- self.needone = 0
- def maybe(self):
- if self.needone:
- push('<hr>\n')
- self.needone = 1
- hr = HorizontalRule()
-
- # List the mro, if non-trivial.
- mro = deque(inspect.getmro(object))
- if len(mro) > 2:
- hr.maybe()
- push('<dl><dt>Method resolution order:</dt>\n')
- for base in mro:
- push('<dd>%s</dd>\n' % self.classlink(base,
- object.__module__))
- push('</dl>\n')
-
- def spill(msg, attrs, predicate):
- ok, attrs = _split_list(attrs, predicate)
- if ok:
- hr.maybe()
- push(msg)
- for name, kind, homecls, value in ok:
- push(self.document(getattr(object, name), name, mod,
- funcs, classes, mdict, object))
- push('\n')
- return attrs
-
- def spilldescriptors(msg, attrs, predicate):
- ok, attrs = _split_list(attrs, predicate)
- if ok:
- hr.maybe()
- push(msg)
- for name, kind, homecls, value in ok:
- push(self._docdescriptor(name, value, mod))
- return attrs
-
- def spilldata(msg, attrs, predicate):
- ok, attrs = _split_list(attrs, predicate)
- if ok:
- hr.maybe()
- push(msg)
- for name, kind, homecls, value in ok:
- base = self.docother(getattr(object, name), name, mod)
- if callable(value) or inspect.isdatadescriptor(value):
- doc = getattr(value, "__doc__", None)
- else:
- doc = None
- if doc is None:
- push('<dl><dt>%s</dl>\n' % base)
- else:
- doc = self.markup(getdoc(value), self.preformat,
- funcs, classes, mdict)
- doc = '<dd><tt>%s</tt>' % doc
- push('<dl><dt>%s%s</dl>\n' % (base, doc))
- push('\n')
- return attrs
-
- attrs = filter(lambda (name, kind, cls, value): visiblename(name),
- classify_class_attrs(object))
- mdict = {}
- for key, kind, homecls, value in attrs:
- mdict[key] = anchor = '#' + name + '-' + key
- value = getattr(object, key)
- try:
- # The value may not be hashable (e.g., a data attr with
- # a dict or list value).
- mdict[value] = anchor
- except TypeError:
- pass
-
- while attrs:
- if mro:
- thisclass = mro.popleft()
- else:
- thisclass = attrs[0][2]
- attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass)
-
- if thisclass is __builtin__.object:
- attrs = inherited
- continue
- elif thisclass is object:
- tag = 'defined here'
- else:
- tag = 'inherited from %s' % self.classlink(thisclass,
- object.__module__)
- tag += ':<br>\n'
-
- # Sort attrs by name.
- try:
- attrs.sort(key=lambda t: t[0])
- except TypeError:
- attrs.sort(lambda t1, t2: cmp(t1[0], t2[0])) # 2.3 compat
-
- # Pump out the attrs, segregated by kind.
- attrs = spill('Methods %s' % tag, attrs,
- lambda t: t[1] == 'method')
- attrs = spill('Class methods %s' % tag, attrs,
- lambda t: t[1] == 'class method')
- attrs = spill('Static methods %s' % tag, attrs,
- lambda t: t[1] == 'static method')
- attrs = spilldescriptors('Data descriptors %s' % tag, attrs,
- lambda t: t[1] == 'data descriptor')
- attrs = spilldata('Data and other attributes %s' % tag, attrs,
- lambda t: t[1] == 'data')
- assert attrs == []
- attrs = inherited
-
- contents = ''.join(contents)
-
- if name == realname:
- title = '<a name="%s">class <strong>%s</strong></a>' % (
- name, realname)
- else:
- title = '<strong>%s</strong> = <a name="%s">class %s</a>' % (
- name, name, realname)
- if bases:
- parents = []
- for base in bases:
- parents.append(self.classlink(base, object.__module__))
- title = title + '(%s)' % join(parents, ', ')
- doc = self.markup(getdoc(object), self.preformat, funcs, classes, mdict)
- doc = doc and '<tt>%s<br>&nbsp;</tt>' % doc
-
- return self.section(title, '#000000', '#ffc8d8', contents, 3, doc)
-
- def formatvalue(self, object):
- """Format an argument default value as text."""
- return self.grey('=' + self.repr(object))
-
- def docroutine(self, object, name=None, mod=None,
- funcs={}, classes={}, methods={}, cl=None):
- """Produce HTML documentation for a function or method object."""
- realname = object.__name__
- name = name or realname
- anchor = (cl and cl.__name__ or '') + '-' + name
- note = ''
- skipdocs = 0
- if inspect.ismethod(object):
- imclass = object.im_class
- if cl:
- if imclass is not cl:
- note = ' from ' + self.classlink(imclass, mod)
- else:
- if object.im_self is not None:
- note = ' method of %s instance' % self.classlink(
- object.im_self.__class__, mod)
- else:
- note = ' unbound %s method' % self.classlink(imclass,mod)
- object = object.im_func
-
- if name == realname:
- title = '<a name="%s"><strong>%s</strong></a>' % (anchor, realname)
- else:
- if (cl and realname in cl.__dict__ and
- cl.__dict__[realname] is object):
- reallink = '<a href="#%s">%s</a>' % (
- cl.__name__ + '-' + realname, realname)
- skipdocs = 1
- else:
- reallink = realname
- title = '<a name="%s"><strong>%s</strong></a> = %s' % (
- anchor, name, reallink)
- if inspect.isfunction(object):
- args, varargs, varkw, defaults = inspect.getargspec(object)
- argspec = inspect.formatargspec(
- args, varargs, varkw, defaults, formatvalue=self.formatvalue)
- if realname == '<lambda>':
- title = '<strong>%s</strong> <em>lambda</em> ' % name
- argspec = argspec[1:-1] # remove parentheses
- else:
- argspec = '(...)'
-
- decl = title + argspec + (note and self.grey(
- '<font face="helvetica, arial">%s</font>' % note))
-
- if skipdocs:
- return '<dl><dt>%s</dt></dl>\n' % decl
- else:
- doc = self.markup(
- getdoc(object), self.preformat, funcs, classes, methods)
- doc = doc and '<dd><tt>%s</tt></dd>' % doc
- return '<dl><dt>%s</dt>%s</dl>\n' % (decl, doc)
-
- def _docdescriptor(self, name, value, mod):
- results = []
- push = results.append
-
- if name:
- push('<dl><dt><strong>%s</strong></dt>\n' % name)
- if value.__doc__ is not None:
- doc = self.markup(getdoc(value), self.preformat)
- push('<dd><tt>%s</tt></dd>\n' % doc)
- push('</dl>\n')
-
- return ''.join(results)
-
- def docproperty(self, object, name=None, mod=None, cl=None):
- """Produce html documentation for a property."""
- return self._docdescriptor(name, object, mod)
-
- def docother(self, object, name=None, mod=None, *ignored):
- """Produce HTML documentation for a data object."""
- lhs = name and '<strong>%s</strong> = ' % name or ''
- return lhs + self.repr(object)
-
- def docdata(self, object, name=None, mod=None, cl=None):
- """Produce html documentation for a data descriptor."""
- return self._docdescriptor(name, object, mod)
-
- def index(self, dir, shadowed=None):
- """Generate an HTML index for a directory of modules."""
- modpkgs = []
- if shadowed is None: shadowed = {}
- for importer, name, ispkg in pkgutil.iter_modules([dir]):
- modpkgs.append((name, '', ispkg, name in shadowed))
- shadowed[name] = 1
-
- modpkgs.sort()
- contents = self.multicolumn(modpkgs, self.modpkglink)
- return self.bigsection(dir, '#ffffff', '#ee77aa', contents)
-
-# -------------------------------------------- text documentation generator
-
-class TextRepr(Repr):
- """Class for safely making a text representation of a Python object."""
- def __init__(self):
- Repr.__init__(self)
- self.maxlist = self.maxtuple = 20
- self.maxdict = 10
- self.maxstring = self.maxother = 100
-
- def repr1(self, x, level):
- if hasattr(type(x), '__name__'):
- methodname = 'repr_' + join(split(type(x).__name__), '_')
- if hasattr(self, methodname):
- return getattr(self, methodname)(x, level)
- return cram(stripid(repr(x)), self.maxother)
-
- def repr_string(self, x, level):
- test = cram(x, self.maxstring)
- testrepr = repr(test)
- if '\\' in test and '\\' not in replace(testrepr, r'\\', ''):
- # Backslashes are only literal in the string and are never
- # needed to make any special characters, so show a raw string.
- return 'r' + testrepr[0] + test + testrepr[0]
- return testrepr
-
- repr_str = repr_string
-
- def repr_instance(self, x, level):
- try:
- return cram(stripid(repr(x)), self.maxstring)
- except:
- return '<%s instance>' % x.__class__.__name__
-
-class TextDoc(Doc):
- """Formatter class for text documentation."""
-
- # ------------------------------------------- text formatting utilities
-
- _repr_instance = TextRepr()
- repr = _repr_instance.repr
-
- def bold(self, text):
- """Format a string in bold by overstriking."""
- return join(map(lambda ch: ch + '\b' + ch, text), '')
-
- def indent(self, text, prefix=' '):
- """Indent text by prepending a given prefix to each line."""
- if not text: return ''
- lines = split(text, '\n')
- lines = map(lambda line, prefix=prefix: prefix + line, lines)
- if lines: lines[-1] = rstrip(lines[-1])
- return join(lines, '\n')
-
- def section(self, title, contents):
- """Format a section with a given heading."""
- return self.bold(title) + '\n' + rstrip(self.indent(contents)) + '\n\n'
-
- # ---------------------------------------------- type-specific routines
-
- def formattree(self, tree, modname, parent=None, prefix=''):
- """Render in text a class tree as returned by inspect.getclasstree()."""
- result = ''
- for entry in tree:
- if type(entry) is type(()):
- c, bases = entry
- result = result + prefix + classname(c, modname)
- if bases and bases != (parent,):
- parents = map(lambda c, m=modname: classname(c, m), bases)
- result = result + '(%s)' % join(parents, ', ')
- result = result + '\n'
- elif type(entry) is type([]):
- result = result + self.formattree(
- entry, modname, c, prefix + ' ')
- return result
-
- def docmodule(self, object, name=None, mod=None):
- """Produce text documentation for a given module object."""
- name = object.__name__ # ignore the passed-in name
- synop, desc = splitdoc(getdoc(object))
- result = self.section('NAME', name + (synop and ' - ' + synop))
-
- try:
- all = object.__all__
- except AttributeError:
- all = None
-
- try:
- file = inspect.getabsfile(object)
- except TypeError:
- file = '(built-in)'
- result = result + self.section('FILE', file)
-
- docloc = self.getdocloc(object)
- if docloc is not None:
- result = result + self.section('MODULE DOCS', docloc)
-
- if desc:
- result = result + self.section('DESCRIPTION', desc)
-
- classes = []
- for key, value in inspect.getmembers(object, inspect.isclass):
- # if __all__ exists, believe it. Otherwise use old heuristic.
- if (all is not None
- or (inspect.getmodule(value) or object) is object):
- if visiblename(key, all):
- classes.append((key, value))
- funcs = []
- for key, value in inspect.getmembers(object, inspect.isroutine):
- # if __all__ exists, believe it. Otherwise use old heuristic.
- if (all is not None or
- inspect.isbuiltin(value) or inspect.getmodule(value) is object):
- if visiblename(key, all):
- funcs.append((key, value))
- data = []
- for key, value in inspect.getmembers(object, isdata):
- if visiblename(key, all):
- data.append((key, value))
-
- if hasattr(object, '__path__'):
- modpkgs = []
- for importer, modname, ispkg in pkgutil.iter_modules(object.__path__):
- if ispkg:
- modpkgs.append(modname + ' (package)')
- else:
- modpkgs.append(modname)
-
- modpkgs.sort()
- result = result + self.section(
- 'PACKAGE CONTENTS', join(modpkgs, '\n'))
-
- if classes:
- classlist = map(lambda (key, value): value, classes)
- contents = [self.formattree(
- inspect.getclasstree(classlist, 1), name)]
- for key, value in classes:
- contents.append(self.document(value, key, name))
- result = result + self.section('CLASSES', join(contents, '\n'))
-
- if funcs:
- contents = []
- for key, value in funcs:
- contents.append(self.document(value, key, name))
- result = result + self.section('FUNCTIONS', join(contents, '\n'))
-
- if data:
- contents = []
- for key, value in data:
- contents.append(self.docother(value, key, name, maxlen=70))
- result = result + self.section('DATA', join(contents, '\n'))
-
- if hasattr(object, '__version__'):
- version = str(object.__version__)
- if version[:11] == '$' + 'Revision: ' and version[-1:] == '$':
- version = strip(version[11:-1])
- result = result + self.section('VERSION', version)
- if hasattr(object, '__date__'):
- result = result + self.section('DATE', str(object.__date__))
- if hasattr(object, '__author__'):
- result = result + self.section('AUTHOR', str(object.__author__))
- if hasattr(object, '__credits__'):
- result = result + self.section('CREDITS', str(object.__credits__))
- return result
-
- def docclass(self, object, name=None, mod=None):
- """Produce text documentation for a given class object."""
- realname = object.__name__
- name = name or realname
- bases = object.__bases__
-
- def makename(c, m=object.__module__):
- return classname(c, m)
-
- if name == realname:
- title = 'class ' + self.bold(realname)
- else:
- title = self.bold(name) + ' = class ' + realname
- if bases:
- parents = map(makename, bases)
- title = title + '(%s)' % join(parents, ', ')
-
- doc = getdoc(object)
- contents = doc and [doc + '\n'] or []
- push = contents.append
-
- # List the mro, if non-trivial.
- mro = deque(inspect.getmro(object))
- if len(mro) > 2:
- push("Method resolution order:")
- for base in mro:
- push(' ' + makename(base))
- push('')
-
- # Cute little class to pump out a horizontal rule between sections.
- class HorizontalRule:
- def __init__(self):
- self.needone = 0
- def maybe(self):
- if self.needone:
- push('-' * 70)
- self.needone = 1
- hr = HorizontalRule()
-
- def spill(msg, attrs, predicate):
- ok, attrs = _split_list(attrs, predicate)
- if ok:
- hr.maybe()
- push(msg)
- for name, kind, homecls, value in ok:
- push(self.document(getattr(object, name),
- name, mod, object))
- return attrs
-
- def spilldescriptors(msg, attrs, predicate):
- ok, attrs = _split_list(attrs, predicate)
- if ok:
- hr.maybe()
- push(msg)
- for name, kind, homecls, value in ok:
- push(self._docdescriptor(name, value, mod))
- return attrs
-
- def spilldata(msg, attrs, predicate):
- ok, attrs = _split_list(attrs, predicate)
- if ok:
- hr.maybe()
- push(msg)
- for name, kind, homecls, value in ok:
- if callable(value) or inspect.isdatadescriptor(value):
- doc = getdoc(value)
- else:
- doc = None
- push(self.docother(getattr(object, name),
- name, mod, maxlen=70, doc=doc) + '\n')
- return attrs
-
- attrs = filter(lambda (name, kind, cls, value): visiblename(name),
- classify_class_attrs(object))
- while attrs:
- if mro:
- thisclass = mro.popleft()
- else:
- thisclass = attrs[0][2]
- attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass)
-
- if thisclass is __builtin__.object:
- attrs = inherited
- continue
- elif thisclass is object:
- tag = "defined here"
- else:
- tag = "inherited from %s" % classname(thisclass,
- object.__module__)
- filter(lambda t: not t[0].startswith('_'), attrs)
-
- # Sort attrs by name.
- attrs.sort()
-
- # Pump out the attrs, segregated by kind.
- attrs = spill("Methods %s:\n" % tag, attrs,
- lambda t: t[1] == 'method')
- attrs = spill("Class methods %s:\n" % tag, attrs,
- lambda t: t[1] == 'class method')
- attrs = spill("Static methods %s:\n" % tag, attrs,
- lambda t: t[1] == 'static method')
- attrs = spilldescriptors("Data descriptors %s:\n" % tag, attrs,
- lambda t: t[1] == 'data descriptor')
- attrs = spilldata("Data and other attributes %s:\n" % tag, attrs,
- lambda t: t[1] == 'data')
- assert attrs == []
- attrs = inherited
-
- contents = '\n'.join(contents)
- if not contents:
- return title + '\n'
- return title + '\n' + self.indent(rstrip(contents), ' | ') + '\n'
-
- def formatvalue(self, object):
- """Format an argument default value as text."""
- return '=' + self.repr(object)
-
- def docroutine(self, object, name=None, mod=None, cl=None):
- """Produce text documentation for a function or method object."""
- realname = object.__name__
- name = name or realname
- note = ''
- skipdocs = 0
- if inspect.ismethod(object):
- imclass = object.im_class
- if cl:
- if imclass is not cl:
- note = ' from ' + classname(imclass, mod)
- else:
- if object.im_self is not None:
- note = ' method of %s instance' % classname(
- object.im_self.__class__, mod)
- else:
- note = ' unbound %s method' % classname(imclass,mod)
- object = object.im_func
-
- if name == realname:
- title = self.bold(realname)
- else:
- if (cl and realname in cl.__dict__ and
- cl.__dict__[realname] is object):
- skipdocs = 1
- title = self.bold(name) + ' = ' + realname
- if inspect.isfunction(object):
- args, varargs, varkw, defaults = inspect.getargspec(object)
- argspec = inspect.formatargspec(
- args, varargs, varkw, defaults, formatvalue=self.formatvalue)
- if realname == '<lambda>':
- title = self.bold(name) + ' lambda '
- argspec = argspec[1:-1] # remove parentheses
- else:
- argspec = '(...)'
- decl = title + argspec + note
-
- if skipdocs:
- return decl + '\n'
- else:
- doc = getdoc(object) or ''
- return decl + '\n' + (doc and rstrip(self.indent(doc)) + '\n')
-
- def _docdescriptor(self, name, value, mod):
- results = []
- push = results.append
-
- if name:
- push(self.bold(name))
- push('\n')
- doc = getdoc(value) or ''
- if doc:
- push(self.indent(doc))
- push('\n')
- return ''.join(results)
-
- def docproperty(self, object, name=None, mod=None, cl=None):
- """Produce text documentation for a property."""
- return self._docdescriptor(name, object, mod)
-
- def docdata(self, object, name=None, mod=None, cl=None):
- """Produce text documentation for a data descriptor."""
- return self._docdescriptor(name, object, mod)
-
- def docother(self, object, name=None, mod=None, parent=None, maxlen=None, doc=None):
- """Produce text documentation for a data object."""
- repr = self.repr(object)
- if maxlen:
- line = (name and name + ' = ' or '') + repr
- chop = maxlen - len(line)
- if chop < 0: repr = repr[:chop] + '...'
- line = (name and self.bold(name) + ' = ' or '') + repr
- if doc is not None:
- line += '\n' + self.indent(str(doc))
- return line
-
-# --------------------------------------------------------- user interfaces
-
-def pager(text):
- """The first time this is called, determine what kind of pager to use."""
- global pager
- pager = getpager()
- pager(text)
-
-def getpager():
- """Decide what method to use for paging through text."""
- if type(sys.stdout) is not types.FileType:
- return plainpager
- if not sys.stdin.isatty() or not sys.stdout.isatty():
- return plainpager
- if 'PAGER' in os.environ:
- if sys.platform == 'win32': # pipes completely broken in Windows
- return lambda text: tempfilepager(plain(text), os.environ['PAGER'])
- elif os.environ.get('TERM') in ('dumb', 'emacs'):
- return lambda text: pipepager(plain(text), os.environ['PAGER'])
- else:
- return lambda text: pipepager(text, os.environ['PAGER'])
- if os.environ.get('TERM') in ('dumb', 'emacs'):
- return plainpager
- if sys.platform == 'win32' or sys.platform.startswith('os2'):
- return lambda text: tempfilepager(plain(text), 'more <')
- if hasattr(os, 'system') and os.system('(less) 2>/dev/null') == 0:
- return lambda text: pipepager(text, 'less')
-
- import tempfile
- (fd, filename) = tempfile.mkstemp()
- os.close(fd)
- try:
- if hasattr(os, 'system') and os.system('more %s' % filename) == 0:
- return lambda text: pipepager(text, 'more')
- else:
- return ttypager
- finally:
- os.unlink(filename)
-
-def plain(text):
- """Remove boldface formatting from text."""
- return re.sub('.\b', '', text)
-
-def pipepager(text, cmd):
- """Page through text by feeding it to another program."""
- pipe = os.popen(cmd, 'w')
- try:
- pipe.write(text)
- pipe.close()
- except IOError:
- pass # Ignore broken pipes caused by quitting the pager program.
-
-def tempfilepager(text, cmd):
- """Page through text by invoking a program on a temporary file."""
- import tempfile
- filename = tempfile.mktemp()
- file = open(filename, 'w')
- file.write(text)
- file.close()
- try:
- os.system(cmd + ' ' + filename)
- finally:
- os.unlink(filename)
-
-def ttypager(text):
- """Page through text on a text terminal."""
- lines = split(plain(text), '\n')
- try:
- import tty
- fd = sys.stdin.fileno()
- old = tty.tcgetattr(fd)
- tty.setcbreak(fd)
- getchar = lambda: sys.stdin.read(1)
- except (ImportError, AttributeError):
- tty = None
- getchar = lambda: sys.stdin.readline()[:-1][:1]
-
- try:
- r = inc = os.environ.get('LINES', 25) - 1
- sys.stdout.write(join(lines[:inc], '\n') + '\n')
- while lines[r:]:
- sys.stdout.write('-- more --')
- sys.stdout.flush()
- c = getchar()
-
- if c in ('q', 'Q'):
- sys.stdout.write('\r \r')
- break
- elif c in ('\r', '\n'):
- sys.stdout.write('\r \r' + lines[r] + '\n')
- r = r + 1
- continue
- if c in ('b', 'B', '\x1b'):
- r = r - inc - inc
- if r < 0: r = 0
- sys.stdout.write('\n' + join(lines[r:r+inc], '\n') + '\n')
- r = r + inc
-
- finally:
- if tty:
- tty.tcsetattr(fd, tty.TCSAFLUSH, old)
-
-def plainpager(text):
- """Simply print unformatted text. This is the ultimate fallback."""
- sys.stdout.write(plain(text))
-
-def describe(thing):
- """Produce a short description of the given thing."""
- if inspect.ismodule(thing):
- if thing.__name__ in sys.builtin_module_names:
- return 'built-in module ' + thing.__name__
- if hasattr(thing, '__path__'):
- return 'package ' + thing.__name__
- else:
- return 'module ' + thing.__name__
- if inspect.isbuiltin(thing):
- return 'built-in function ' + thing.__name__
- if inspect.isgetsetdescriptor(thing):
- return 'getset descriptor %s.%s.%s' % (
- thing.__objclass__.__module__, thing.__objclass__.__name__,
- thing.__name__)
- if inspect.ismemberdescriptor(thing):
- return 'member descriptor %s.%s.%s' % (
- thing.__objclass__.__module__, thing.__objclass__.__name__,
- thing.__name__)
- if inspect.isclass(thing):
- return 'class ' + thing.__name__
- if inspect.isfunction(thing):
- return 'function ' + thing.__name__
- if inspect.ismethod(thing):
- return 'method ' + thing.__name__
- if type(thing) is types.InstanceType:
- return 'instance of ' + thing.__class__.__name__
- return type(thing).__name__
-
-def locate(path, forceload=0):
- """Locate an object by name or dotted path, importing as necessary."""
- parts = [part for part in split(path, '.') if part]
- module, n = None, 0
- while n < len(parts):
- nextmodule = safeimport(join(parts[:n+1], '.'), forceload)
- if nextmodule: module, n = nextmodule, n + 1
- else: break
- if module:
- object = module
- for part in parts[n:]:
- try: object = getattr(object, part)
- except AttributeError: return None
- return object
- else:
- if hasattr(__builtin__, path):
- return getattr(__builtin__, path)
-
-# --------------------------------------- interactive interpreter interface
-
-text = TextDoc()
-html = HTMLDoc()
-
-def resolve(thing, forceload=0):
- """Given an object or a path to an object, get the object and its name."""
- if isinstance(thing, str):
- object = locate(thing, forceload)
- if not object:
- raise ImportError, 'no Python documentation found for %r' % thing
- return object, thing
- else:
- return thing, getattr(thing, '__name__', None)
-
-def doc(thing, title='Python Library Documentation: %s', forceload=0):
- """Display text documentation, given an object or a path to an object."""
- try:
- object, name = resolve(thing, forceload)
- desc = describe(object)
- module = inspect.getmodule(object)
- if name and '.' in name:
- desc += ' in ' + name[:name.rfind('.')]
- elif module and module is not object:
- desc += ' in module ' + module.__name__
- if not (inspect.ismodule(object) or
- inspect.isclass(object) or
- inspect.isroutine(object) or
- inspect.isgetsetdescriptor(object) or
- inspect.ismemberdescriptor(object) or
- isinstance(object, property)):
- # If the passed object is a piece of data or an instance,
- # document its available methods instead of its value.
- object = type(object)
- desc += ' object'
- pager(title % desc + '\n\n' + text.document(object, name))
- except (ImportError, ErrorDuringImport), value:
- print value
-
-def writedoc(thing, forceload=0):
- """Write HTML documentation to a file in the current directory."""
- try:
- object, name = resolve(thing, forceload)
- page = html.page(describe(object), html.document(object, name))
- file = open(name + '.html', 'w')
- file.write(page)
- file.close()
- print 'wrote', name + '.html'
- except (ImportError, ErrorDuringImport), value:
- print value
-
-def writedocs(dir, pkgpath='', done=None):
- """Write out HTML documentation for all modules in a directory tree."""
- if done is None: done = {}
- for importer, modname, ispkg in pkgutil.walk_packages([dir], pkgpath):
- writedoc(modname)
- return
-
-class Helper:
- keywords = {
- 'and': 'BOOLEAN',
- 'as': 'with',
- 'assert': ('ref/assert', ''),
- 'break': ('ref/break', 'while for'),
- 'class': ('ref/class', 'CLASSES SPECIALMETHODS'),
- 'continue': ('ref/continue', 'while for'),
- 'def': ('ref/function', ''),
- 'del': ('ref/del', 'BASICMETHODS'),
- 'elif': 'if',
- 'else': ('ref/if', 'while for'),
- 'except': 'try',
- 'exec': ('ref/exec', ''),
- 'finally': 'try',
- 'for': ('ref/for', 'break continue while'),
- 'from': 'import',
- 'global': ('ref/global', 'NAMESPACES'),
- 'if': ('ref/if', 'TRUTHVALUE'),
- 'import': ('ref/import', 'MODULES'),
- 'in': ('ref/comparisons', 'SEQUENCEMETHODS2'),
- 'is': 'COMPARISON',
- 'lambda': ('ref/lambdas', 'FUNCTIONS'),
- 'not': 'BOOLEAN',
- 'or': 'BOOLEAN',
- 'pass': ('ref/pass', ''),
- 'print': ('ref/print', ''),
- 'raise': ('ref/raise', 'EXCEPTIONS'),
- 'return': ('ref/return', 'FUNCTIONS'),
- 'try': ('ref/try', 'EXCEPTIONS'),
- 'while': ('ref/while', 'break continue if TRUTHVALUE'),
- 'with': ('ref/with', 'CONTEXTMANAGERS EXCEPTIONS yield'),
- 'yield': ('ref/yield', ''),
- }
-
- topics = {
- 'TYPES': ('ref/types', 'STRINGS UNICODE NUMBERS SEQUENCES MAPPINGS FUNCTIONS CLASSES MODULES FILES inspect'),
- 'STRINGS': ('ref/strings', 'str UNICODE SEQUENCES STRINGMETHODS FORMATTING TYPES'),
- 'STRINGMETHODS': ('lib/string-methods', 'STRINGS FORMATTING'),
- 'FORMATTING': ('lib/typesseq-strings', 'OPERATORS'),
- 'UNICODE': ('ref/strings', 'encodings unicode SEQUENCES STRINGMETHODS FORMATTING TYPES'),
- 'NUMBERS': ('ref/numbers', 'INTEGER FLOAT COMPLEX TYPES'),
- 'INTEGER': ('ref/integers', 'int range'),
- 'FLOAT': ('ref/floating', 'float math'),
- 'COMPLEX': ('ref/imaginary', 'complex cmath'),
- 'SEQUENCES': ('lib/typesseq', 'STRINGMETHODS FORMATTING xrange LISTS'),
- 'MAPPINGS': 'DICTIONARIES',
- 'FUNCTIONS': ('lib/typesfunctions', 'def TYPES'),
- 'METHODS': ('lib/typesmethods', 'class def CLASSES TYPES'),
- 'CODEOBJECTS': ('lib/bltin-code-objects', 'compile FUNCTIONS TYPES'),
- 'TYPEOBJECTS': ('lib/bltin-type-objects', 'types TYPES'),
- 'FRAMEOBJECTS': 'TYPES',
- 'TRACEBACKS': 'TYPES',
- 'NONE': ('lib/bltin-null-object', ''),
- 'ELLIPSIS': ('lib/bltin-ellipsis-object', 'SLICINGS'),
- 'FILES': ('lib/bltin-file-objects', ''),
- 'SPECIALATTRIBUTES': ('lib/specialattrs', ''),
- 'CLASSES': ('ref/types', 'class SPECIALMETHODS PRIVATENAMES'),
- 'MODULES': ('lib/typesmodules', 'import'),
- 'PACKAGES': 'import',
- 'EXPRESSIONS': ('ref/summary', 'lambda or and not in is BOOLEAN COMPARISON BITWISE SHIFTING BINARY FORMATTING POWER UNARY ATTRIBUTES SUBSCRIPTS SLICINGS CALLS TUPLES LISTS DICTIONARIES BACKQUOTES'),
- 'OPERATORS': 'EXPRESSIONS',
- 'PRECEDENCE': 'EXPRESSIONS',
- 'OBJECTS': ('ref/objects', 'TYPES'),
- 'SPECIALMETHODS': ('ref/specialnames', 'BASICMETHODS ATTRIBUTEMETHODS CALLABLEMETHODS SEQUENCEMETHODS1 MAPPINGMETHODS SEQUENCEMETHODS2 NUMBERMETHODS CLASSES'),
- 'BASICMETHODS': ('ref/customization', 'cmp hash repr str SPECIALMETHODS'),
- 'ATTRIBUTEMETHODS': ('ref/attribute-access', 'ATTRIBUTES SPECIALMETHODS'),
- 'CALLABLEMETHODS': ('ref/callable-types', 'CALLS SPECIALMETHODS'),
- 'SEQUENCEMETHODS1': ('ref/sequence-types', 'SEQUENCES SEQUENCEMETHODS2 SPECIALMETHODS'),
- 'SEQUENCEMETHODS2': ('ref/sequence-methods', 'SEQUENCES SEQUENCEMETHODS1 SPECIALMETHODS'),
- 'MAPPINGMETHODS': ('ref/sequence-types', 'MAPPINGS SPECIALMETHODS'),
- 'NUMBERMETHODS': ('ref/numeric-types', 'NUMBERS AUGMENTEDASSIGNMENT SPECIALMETHODS'),
- 'EXECUTION': ('ref/execmodel', 'NAMESPACES DYNAMICFEATURES EXCEPTIONS'),
- 'NAMESPACES': ('ref/naming', 'global ASSIGNMENT DELETION DYNAMICFEATURES'),
- 'DYNAMICFEATURES': ('ref/dynamic-features', ''),
- 'SCOPING': 'NAMESPACES',
- 'FRAMES': 'NAMESPACES',
- 'EXCEPTIONS': ('ref/exceptions', 'try except finally raise'),
- 'COERCIONS': ('ref/coercion-rules','CONVERSIONS'),
- 'CONVERSIONS': ('ref/conversions', 'COERCIONS'),
- 'IDENTIFIERS': ('ref/identifiers', 'keywords SPECIALIDENTIFIERS'),
- 'SPECIALIDENTIFIERS': ('ref/id-classes', ''),
- 'PRIVATENAMES': ('ref/atom-identifiers', ''),
- 'LITERALS': ('ref/atom-literals', 'STRINGS BACKQUOTES NUMBERS TUPLELITERALS LISTLITERALS DICTIONARYLITERALS'),
- 'TUPLES': 'SEQUENCES',
- 'TUPLELITERALS': ('ref/exprlists', 'TUPLES LITERALS'),
- 'LISTS': ('lib/typesseq-mutable', 'LISTLITERALS'),
- 'LISTLITERALS': ('ref/lists', 'LISTS LITERALS'),
- 'DICTIONARIES': ('lib/typesmapping', 'DICTIONARYLITERALS'),
- 'DICTIONARYLITERALS': ('ref/dict', 'DICTIONARIES LITERALS'),
- 'BACKQUOTES': ('ref/string-conversions', 'repr str STRINGS LITERALS'),
- 'ATTRIBUTES': ('ref/attribute-references', 'getattr hasattr setattr ATTRIBUTEMETHODS'),
- 'SUBSCRIPTS': ('ref/subscriptions', 'SEQUENCEMETHODS1'),
- 'SLICINGS': ('ref/slicings', 'SEQUENCEMETHODS2'),
- 'CALLS': ('ref/calls', 'EXPRESSIONS'),
- 'POWER': ('ref/power', 'EXPRESSIONS'),
- 'UNARY': ('ref/unary', 'EXPRESSIONS'),
- 'BINARY': ('ref/binary', 'EXPRESSIONS'),
- 'SHIFTING': ('ref/shifting', 'EXPRESSIONS'),
- 'BITWISE': ('ref/bitwise', 'EXPRESSIONS'),
- 'COMPARISON': ('ref/comparisons', 'EXPRESSIONS BASICMETHODS'),
- 'BOOLEAN': ('ref/Booleans', 'EXPRESSIONS TRUTHVALUE'),
- 'ASSERTION': 'assert',
- 'ASSIGNMENT': ('ref/assignment', 'AUGMENTEDASSIGNMENT'),
- 'AUGMENTEDASSIGNMENT': ('ref/augassign', 'NUMBERMETHODS'),
- 'DELETION': 'del',
- 'PRINTING': 'print',
- 'RETURNING': 'return',
- 'IMPORTING': 'import',
- 'CONDITIONAL': 'if',
- 'LOOPING': ('ref/compound', 'for while break continue'),
- 'TRUTHVALUE': ('lib/truth', 'if while and or not BASICMETHODS'),
- 'DEBUGGING': ('lib/module-pdb', 'pdb'),
- 'CONTEXTMANAGERS': ('ref/context-managers', 'with'),
- }
-
- def __init__(self, input, output):
- self.input = input
- self.output = output
- self.docdir = None
- execdir = os.path.dirname(sys.executable)
- homedir = os.environ.get('PYTHONHOME')
- for dir in [os.environ.get('PYTHONDOCS'),
- homedir and os.path.join(homedir, 'doc'),
- os.path.join(execdir, 'doc'),
- '/usr/doc/python-docs-' + split(sys.version)[0],
- '/usr/doc/python-' + split(sys.version)[0],
- '/usr/doc/python-docs-' + sys.version[:3],
- '/usr/doc/python-' + sys.version[:3],
- os.path.join(sys.prefix, 'Resources/English.lproj/Documentation')]:
- if dir and os.path.isdir(os.path.join(dir, 'lib')):
- self.docdir = dir
-
- def __repr__(self):
- if inspect.stack()[1][3] == '?':
- self()
- return ''
- return '<pydoc.Helper instance>'
-
- def __call__(self, request=None):
- if request is not None:
- self.help(request)
- else:
- self.intro()
- self.interact()
- self.output.write('''
-You are now leaving help and returning to the Python interpreter.
-If you want to ask for help on a particular object directly from the
-interpreter, you can type "help(object)". Executing "help('string')"
-has the same effect as typing a particular string at the help> prompt.
-''')
-
- def interact(self):
- self.output.write('\n')
- while True:
- try:
- request = self.getline('help> ')
- if not request: break
- except (KeyboardInterrupt, EOFError):
- break
- request = strip(replace(request, '"', '', "'", ''))
- if lower(request) in ('q', 'quit'): break
- self.help(request)
-
- def getline(self, prompt):
- """Read one line, using raw_input when available."""
- if self.input is sys.stdin:
- return raw_input(prompt)
- else:
- self.output.write(prompt)
- self.output.flush()
- return self.input.readline()
-
- def help(self, request):
- if type(request) is type(''):
- if request == 'help': self.intro()
- elif request == 'keywords': self.listkeywords()
- elif request == 'topics': self.listtopics()
- elif request == 'modules': self.listmodules()
- elif request[:8] == 'modules ':
- self.listmodules(split(request)[1])
- elif request in self.keywords: self.showtopic(request)
- elif request in self.topics: self.showtopic(request)
- elif request: doc(request, 'Help on %s:')
- elif isinstance(request, Helper): self()
- else: doc(request, 'Help on %s:')
- self.output.write('\n')
-
- def intro(self):
- self.output.write('''
-Welcome to Python %s! This is the online help utility.
-
-If this is your first time using Python, you should definitely check out
-the tutorial on the Internet at http://www.python.org/doc/tut/.
-
-Enter the name of any module, keyword, or topic to get help on writing
-Python programs and using Python modules. To quit this help utility and
-return to the interpreter, just type "quit".
-
-To get a list of available modules, keywords, or topics, type "modules",
-"keywords", or "topics". Each module also comes with a one-line summary
-of what it does; to list the modules whose summaries contain a given word
-such as "spam", type "modules spam".
-''' % sys.version[:3])
-
- def list(self, items, columns=4, width=80):
- items = items[:]
- items.sort()
- colw = width / columns
- rows = (len(items) + columns - 1) / columns
- for row in range(rows):
- for col in range(columns):
- i = col * rows + row
- if i < len(items):
- self.output.write(items[i])
- if col < columns - 1:
- self.output.write(' ' + ' ' * (colw-1 - len(items[i])))
- self.output.write('\n')
-
- def listkeywords(self):
- self.output.write('''
-Here is a list of the Python keywords. Enter any keyword to get more help.
-
-''')
- self.list(self.keywords.keys())
-
- def listtopics(self):
- self.output.write('''
-Here is a list of available topics. Enter any topic name to get more help.
-
-''')
- self.list(self.topics.keys())
-
- def showtopic(self, topic):
- if not self.docdir:
- self.output.write('''
-Sorry, topic and keyword documentation is not available because the Python
-HTML documentation files could not be found. If you have installed them,
-please set the environment variable PYTHONDOCS to indicate their location.
-
-On the Microsoft Windows operating system, the files can be built by
-running "hh -decompile . PythonNN.chm" in the C:\PythonNN\Doc> directory.
-''')
- return
- target = self.topics.get(topic, self.keywords.get(topic))
- if not target:
- self.output.write('no documentation found for %s\n' % repr(topic))
- return
- if type(target) is type(''):
- return self.showtopic(target)
-
- filename, xrefs = target
- filename = self.docdir + '/' + filename + '.html'
- try:
- file = open(filename)
- except:
- self.output.write('could not read docs from %s\n' % filename)
- return
-
- divpat = re.compile('<div[^>]*navigat.*?</div.*?>', re.I | re.S)
- addrpat = re.compile('<address.*?>.*?</address.*?>', re.I | re.S)
- document = re.sub(addrpat, '', re.sub(divpat, '', file.read()))
- file.close()
-
- import htmllib, formatter, StringIO
- buffer = StringIO.StringIO()
- parser = htmllib.HTMLParser(
- formatter.AbstractFormatter(formatter.DumbWriter(buffer)))
- parser.start_table = parser.do_p
- parser.end_table = lambda parser=parser: parser.do_p({})
- parser.start_tr = parser.do_br
- parser.start_td = parser.start_th = lambda a, b=buffer: b.write('\t')
- parser.feed(document)
- buffer = replace(buffer.getvalue(), '\xa0', ' ', '\n', '\n ')
- pager(' ' + strip(buffer) + '\n')
- if xrefs:
- buffer = StringIO.StringIO()
- formatter.DumbWriter(buffer).send_flowing_data(
- 'Related help topics: ' + join(split(xrefs), ', ') + '\n')
- self.output.write('\n%s\n' % buffer.getvalue())
-
- def listmodules(self, key=''):
- if key:
- self.output.write('''
-Here is a list of matching modules. Enter any module name to get more help.
-
-''')
- apropos(key)
- else:
- self.output.write('''
-Please wait a moment while I gather a list of all available modules...
-
-''')
- modules = {}
- def callback(path, modname, desc, modules=modules):
- if modname and modname[-9:] == '.__init__':
- modname = modname[:-9] + ' (package)'
- if find(modname, '.') < 0:
- modules[modname] = 1
- ModuleScanner().run(callback)
- self.list(modules.keys())
- self.output.write('''
-Enter any module name to get more help. Or, type "modules spam" to search
-for modules whose descriptions contain the word "spam".
-''')
-
-help = Helper(sys.stdin, sys.stdout)
-
-class Scanner:
- """A generic tree iterator."""
- def __init__(self, roots, children, descendp):
- self.roots = roots[:]
- self.state = []
- self.children = children
- self.descendp = descendp
-
- def next(self):
- if not self.state:
- if not self.roots:
- return None
- root = self.roots.pop(0)
- self.state = [(root, self.children(root))]
- node, children = self.state[-1]
- if not children:
- self.state.pop()
- return self.next()
- child = children.pop(0)
- if self.descendp(child):
- self.state.append((child, self.children(child)))
- return child
-
-
-class ModuleScanner:
- """An interruptible scanner that searches module synopses."""
-
- def run(self, callback, key=None, completer=None):
- if key: key = lower(key)
- self.quit = False
- seen = {}
-
- for modname in sys.builtin_module_names:
- if modname != '__main__':
- seen[modname] = 1
- if key is None:
- callback(None, modname, '')
- else:
- desc = split(__import__(modname).__doc__ or '', '\n')[0]
- if find(lower(modname + ' - ' + desc), key) >= 0:
- callback(None, modname, desc)
-
- for importer, modname, ispkg in pkgutil.walk_packages():
- if self.quit:
- break
- if key is None:
- callback(None, modname, '')
- else:
- loader = importer.find_module(modname)
- if hasattr(loader,'get_source'):
- import StringIO
- desc = source_synopsis(
- StringIO.StringIO(loader.get_source(modname))
- ) or ''
- if hasattr(loader,'get_filename'):
- path = loader.get_filename(modname)
- else:
- path = None
- else:
- module = loader.load_module(modname)
- desc = (module.__doc__ or '').splitlines()[0]
- path = getattr(module,'__file__',None)
- if find(lower(modname + ' - ' + desc), key) >= 0:
- callback(path, modname, desc)
-
- if completer:
- completer()
-
-def apropos(key):
- """Print all the one-line module summaries that contain a substring."""
- def callback(path, modname, desc):
- if modname[-9:] == '.__init__':
- modname = modname[:-9] + ' (package)'
- print modname, desc and '- ' + desc
- try: import warnings
- except ImportError: pass
- else: warnings.filterwarnings('ignore') # ignore problems during import
- ModuleScanner().run(callback, key)
-
-# --------------------------------------------------- web browser interface
-
-def serve(port, callback=None, completer=None):
- import BaseHTTPServer, mimetools, select
-
- # Patch up mimetools.Message so it doesn't break if rfc822 is reloaded.
- class Message(mimetools.Message):
- def __init__(self, fp, seekable=1):
- Message = self.__class__
- Message.__bases__[0].__bases__[0].__init__(self, fp, seekable)
- self.encodingheader = self.getheader('content-transfer-encoding')
- self.typeheader = self.getheader('content-type')
- self.parsetype()
- self.parseplist()
-
- class DocHandler(BaseHTTPServer.BaseHTTPRequestHandler):
- def send_document(self, title, contents):
- try:
- self.send_response(200)
- self.send_header('Content-Type', 'text/html')
- self.end_headers()
- self.wfile.write(html.page(title, contents))
- except IOError: pass
-
- def do_GET(self):
- path = self.path
- if path[-5:] == '.html': path = path[:-5]
- if path[:1] == '/': path = path[1:]
- if path and path != '.':
- try:
- obj = locate(path, forceload=1)
- except ErrorDuringImport, value:
- self.send_document(path, html.escape(str(value)))
- return
- if obj:
- self.send_document(describe(obj), html.document(obj, path))
- else:
- self.send_document(path,
-'no Python documentation found for %s' % repr(path))
- else:
- heading = html.heading(
-'<big><big><strong>Python: Index of Modules</strong></big></big>',
-'#ffffff', '#7799ee')
- def bltinlink(name):
- return '<a href="%s.html">%s</a>' % (name, name)
- names = filter(lambda x: x != '__main__',
- sys.builtin_module_names)
- contents = html.multicolumn(names, bltinlink)
- indices = ['<p>' + html.bigsection(
- 'Built-in Modules', '#ffffff', '#ee77aa', contents)]
-
- seen = {}
- for dir in sys.path:
- indices.append(html.index(dir, seen))
- contents = heading + join(indices) + '''<p align=right>
-<font color="#909090" face="helvetica, arial"><strong>
-pydoc</strong> by Ka-Ping Yee &lt;ping@lfw.org&gt;</font>'''
- self.send_document('Index of Modules', contents)
-
- def log_message(self, *args): pass
-
- class DocServer(BaseHTTPServer.HTTPServer):
- def __init__(self, port, callback):
- host = (sys.platform == 'mac') and '127.0.0.1' or 'localhost'
- self.address = ('', port)
- self.url = 'http://%s:%d/' % (host, port)
- self.callback = callback
- self.base.__init__(self, self.address, self.handler)
-
- def serve_until_quit(self):
- import select
- self.quit = False
- while not self.quit:
- rd, wr, ex = select.select([self.socket.fileno()], [], [], 1)
- if rd: self.handle_request()
-
- def server_activate(self):
- self.base.server_activate(self)
- if self.callback: self.callback(self)
-
- DocServer.base = BaseHTTPServer.HTTPServer
- DocServer.handler = DocHandler
- DocHandler.MessageClass = Message
- try:
- try:
- DocServer(port, callback).serve_until_quit()
- except (KeyboardInterrupt, select.error):
- pass
- finally:
- if completer: completer()
-
-# ----------------------------------------------------- graphical interface
-
-def gui():
- """Graphical interface (starts web server and pops up a control window)."""
- class GUI:
- def __init__(self, window, port=7464):
- self.window = window
- self.server = None
- self.scanner = None
-
- import Tkinter
- self.server_frm = Tkinter.Frame(window)
- self.title_lbl = Tkinter.Label(self.server_frm,
- text='Starting server...\n ')
- self.open_btn = Tkinter.Button(self.server_frm,
- text='open browser', command=self.open, state='disabled')
- self.quit_btn = Tkinter.Button(self.server_frm,
- text='quit serving', command=self.quit, state='disabled')
-
- self.search_frm = Tkinter.Frame(window)
- self.search_lbl = Tkinter.Label(self.search_frm, text='Search for')
- self.search_ent = Tkinter.Entry(self.search_frm)
- self.search_ent.bind('<Return>', self.search)
- self.stop_btn = Tkinter.Button(self.search_frm,
- text='stop', pady=0, command=self.stop, state='disabled')
- if sys.platform == 'win32':
- # Trying to hide and show this button crashes under Windows.
- self.stop_btn.pack(side='right')
-
- self.window.title('pydoc')
- self.window.protocol('WM_DELETE_WINDOW', self.quit)
- self.title_lbl.pack(side='top', fill='x')
- self.open_btn.pack(side='left', fill='x', expand=1)
- self.quit_btn.pack(side='right', fill='x', expand=1)
- self.server_frm.pack(side='top', fill='x')
-
- self.search_lbl.pack(side='left')
- self.search_ent.pack(side='right', fill='x', expand=1)
- self.search_frm.pack(side='top', fill='x')
- self.search_ent.focus_set()
-
- font = ('helvetica', sys.platform == 'win32' and 8 or 10)
- self.result_lst = Tkinter.Listbox(window, font=font, height=6)
- self.result_lst.bind('<Button-1>', self.select)
- self.result_lst.bind('<Double-Button-1>', self.goto)
- self.result_scr = Tkinter.Scrollbar(window,
- orient='vertical', command=self.result_lst.yview)
- self.result_lst.config(yscrollcommand=self.result_scr.set)
-
- self.result_frm = Tkinter.Frame(window)
- self.goto_btn = Tkinter.Button(self.result_frm,
- text='go to selected', command=self.goto)
- self.hide_btn = Tkinter.Button(self.result_frm,
- text='hide results', command=self.hide)
- self.goto_btn.pack(side='left', fill='x', expand=1)
- self.hide_btn.pack(side='right', fill='x', expand=1)
-
- self.window.update()
- self.minwidth = self.window.winfo_width()
- self.minheight = self.window.winfo_height()
- self.bigminheight = (self.server_frm.winfo_reqheight() +
- self.search_frm.winfo_reqheight() +
- self.result_lst.winfo_reqheight() +
- self.result_frm.winfo_reqheight())
- self.bigwidth, self.bigheight = self.minwidth, self.bigminheight
- self.expanded = 0
- self.window.wm_geometry('%dx%d' % (self.minwidth, self.minheight))
- self.window.wm_minsize(self.minwidth, self.minheight)
- self.window.tk.willdispatch()
-
- import threading
- threading.Thread(
- target=serve, args=(port, self.ready, self.quit)).start()
-
- def ready(self, server):
- self.server = server
- self.title_lbl.config(
- text='Python documentation server at\n' + server.url)
- self.open_btn.config(state='normal')
- self.quit_btn.config(state='normal')
-
- def open(self, event=None, url=None):
- url = url or self.server.url
- try:
- import webbrowser
- webbrowser.open(url)
- except ImportError: # pre-webbrowser.py compatibility
- if sys.platform == 'win32':
- os.system('start "%s"' % url)
- elif sys.platform == 'mac':
- try: import ic
- except ImportError: pass
- else: ic.launchurl(url)
- else:
- rc = os.system('netscape -remote "openURL(%s)" &' % url)
- if rc: os.system('netscape "%s" &' % url)
-
- def quit(self, event=None):
- if self.server:
- self.server.quit = 1
- self.window.quit()
-
- def search(self, event=None):
- key = self.search_ent.get()
- self.stop_btn.pack(side='right')
- self.stop_btn.config(state='normal')
- self.search_lbl.config(text='Searching for "%s"...' % key)
- self.search_ent.forget()
- self.search_lbl.pack(side='left')
- self.result_lst.delete(0, 'end')
- self.goto_btn.config(state='disabled')
- self.expand()
-
- import threading
- if self.scanner:
- self.scanner.quit = 1
- self.scanner = ModuleScanner()
- threading.Thread(target=self.scanner.run,
- args=(self.update, key, self.done)).start()
-
- def update(self, path, modname, desc):
- if modname[-9:] == '.__init__':
- modname = modname[:-9] + ' (package)'
- self.result_lst.insert('end',
- modname + ' - ' + (desc or '(no description)'))
-
- def stop(self, event=None):
- if self.scanner:
- self.scanner.quit = 1
- self.scanner = None
-
- def done(self):
- self.scanner = None
- self.search_lbl.config(text='Search for')
- self.search_lbl.pack(side='left')
- self.search_ent.pack(side='right', fill='x', expand=1)
- if sys.platform != 'win32': self.stop_btn.forget()
- self.stop_btn.config(state='disabled')
-
- def select(self, event=None):
- self.goto_btn.config(state='normal')
-
- def goto(self, event=None):
- selection = self.result_lst.curselection()
- if selection:
- modname = split(self.result_lst.get(selection[0]))[0]
- self.open(url=self.server.url + modname + '.html')
-
- def collapse(self):
- if not self.expanded: return
- self.result_frm.forget()
- self.result_scr.forget()
- self.result_lst.forget()
- self.bigwidth = self.window.winfo_width()
- self.bigheight = self.window.winfo_height()
- self.window.wm_geometry('%dx%d' % (self.minwidth, self.minheight))
- self.window.wm_minsize(self.minwidth, self.minheight)
- self.expanded = 0
-
- def expand(self):
- if self.expanded: return
- self.result_frm.pack(side='bottom', fill='x')
- self.result_scr.pack(side='right', fill='y')
- self.result_lst.pack(side='top', fill='both', expand=1)
- self.window.wm_geometry('%dx%d' % (self.bigwidth, self.bigheight))
- self.window.wm_minsize(self.minwidth, self.bigminheight)
- self.expanded = 1
-
- def hide(self, event=None):
- self.stop()
- self.collapse()
-
- import Tkinter
- try:
- root = Tkinter.Tk()
- # Tk will crash if pythonw.exe has an XP .manifest
- # file and the root has is not destroyed explicitly.
- # If the problem is ever fixed in Tk, the explicit
- # destroy can go.
- try:
- gui = GUI(root)
- root.mainloop()
- finally:
- root.destroy()
- except KeyboardInterrupt:
- pass
-
-# -------------------------------------------------- command-line interface
-
-def ispath(x):
- return isinstance(x, str) and find(x, os.sep) >= 0
-
-def cli():
- """Command-line interface (looks at sys.argv to decide what to do)."""
- import getopt
- class BadUsage: pass
-
- # Scripts don't get the current directory in their path by default.
- scriptdir = os.path.dirname(sys.argv[0])
- if scriptdir in sys.path:
- sys.path.remove(scriptdir)
- sys.path.insert(0, '.')
-
- try:
- opts, args = getopt.getopt(sys.argv[1:], 'gk:p:w')
- writing = 0
-
- for opt, val in opts:
- if opt == '-g':
- gui()
- return
- if opt == '-k':
- apropos(val)
- return
- if opt == '-p':
- try:
- port = int(val)
- except ValueError:
- raise BadUsage
- def ready(server):
- print 'pydoc server ready at %s' % server.url
- def stopped():
- print 'pydoc server stopped'
- serve(port, ready, stopped)
- return
- if opt == '-w':
- writing = 1
-
- if not args: raise BadUsage
- for arg in args:
- if ispath(arg) and not os.path.exists(arg):
- print 'file %r does not exist' % arg
- break
- try:
- if ispath(arg) and os.path.isfile(arg):
- arg = importfile(arg)
- if writing:
- if ispath(arg) and os.path.isdir(arg):
- writedocs(arg)
- else:
- writedoc(arg)
- else:
- help.help(arg)
- except ErrorDuringImport, value:
- print value
-
- except (getopt.error, BadUsage):
- cmd = os.path.basename(sys.argv[0])
- print """pydoc - the Python documentation tool
-
-%s <name> ...
- Show text documentation on something. <name> may be the name of a
- Python keyword, topic, function, module, or package, or a dotted
- reference to a class or function within a module or module in a
- package. If <name> contains a '%s', it is used as the path to a
- Python source file to document. If name is 'keywords', 'topics',
- or 'modules', a listing of these things is displayed.
-
-%s -k <keyword>
- Search for a keyword in the synopsis lines of all available modules.
-
-%s -p <port>
- Start an HTTP server on the given port on the local machine.
-
-%s -g
- Pop up a graphical interface for finding and serving documentation.
-
-%s -w <name> ...
- Write out the HTML documentation for a module to a file in the current
- directory. If <name> contains a '%s', it is treated as a filename; if
- it names a directory, documentation is written for all the contents.
-""" % (cmd, os.sep, cmd, cmd, cmd, cmd, os.sep)
-
-if __name__ == '__main__': cli()
diff --git a/sys/lib/python/quopri.py b/sys/lib/python/quopri.py
deleted file mode 100755
index 8788afc2f..000000000
--- a/sys/lib/python/quopri.py
+++ /dev/null
@@ -1,237 +0,0 @@
-#! /usr/bin/env python
-
-"""Conversions to/from quoted-printable transport encoding as per RFC 1521."""
-
-# (Dec 1991 version).
-
-__all__ = ["encode", "decode", "encodestring", "decodestring"]
-
-ESCAPE = '='
-MAXLINESIZE = 76
-HEX = '0123456789ABCDEF'
-EMPTYSTRING = ''
-
-try:
- from binascii import a2b_qp, b2a_qp
-except ImportError:
- a2b_qp = None
- b2a_qp = None
-
-
-def needsquoting(c, quotetabs, header):
- """Decide whether a particular character needs to be quoted.
-
- The 'quotetabs' flag indicates whether embedded tabs and spaces should be
- quoted. Note that line-ending tabs and spaces are always encoded, as per
- RFC 1521.
- """
- if c in ' \t':
- return quotetabs
- # if header, we have to escape _ because _ is used to escape space
- if c == '_':
- return header
- return c == ESCAPE or not (' ' <= c <= '~')
-
-def quote(c):
- """Quote a single character."""
- i = ord(c)
- return ESCAPE + HEX[i//16] + HEX[i%16]
-
-
-
-def encode(input, output, quotetabs, header = 0):
- """Read 'input', apply quoted-printable encoding, and write to 'output'.
-
- 'input' and 'output' are files with readline() and write() methods.
- The 'quotetabs' flag indicates whether embedded tabs and spaces should be
- quoted. Note that line-ending tabs and spaces are always encoded, as per
- RFC 1521.
- The 'header' flag indicates whether we are encoding spaces as _ as per
- RFC 1522.
- """
-
- if b2a_qp is not None:
- data = input.read()
- odata = b2a_qp(data, quotetabs = quotetabs, header = header)
- output.write(odata)
- return
-
- def write(s, output=output, lineEnd='\n'):
- # RFC 1521 requires that the line ending in a space or tab must have
- # that trailing character encoded.
- if s and s[-1:] in ' \t':
- output.write(s[:-1] + quote(s[-1]) + lineEnd)
- elif s == '.':
- output.write(quote(s) + lineEnd)
- else:
- output.write(s + lineEnd)
-
- prevline = None
- while 1:
- line = input.readline()
- if not line:
- break
- outline = []
- # Strip off any readline induced trailing newline
- stripped = ''
- if line[-1:] == '\n':
- line = line[:-1]
- stripped = '\n'
- # Calculate the un-length-limited encoded line
- for c in line:
- if needsquoting(c, quotetabs, header):
- c = quote(c)
- if header and c == ' ':
- outline.append('_')
- else:
- outline.append(c)
- # First, write out the previous line
- if prevline is not None:
- write(prevline)
- # Now see if we need any soft line breaks because of RFC-imposed
- # length limitations. Then do the thisline->prevline dance.
- thisline = EMPTYSTRING.join(outline)
- while len(thisline) > MAXLINESIZE:
- # Don't forget to include the soft line break `=' sign in the
- # length calculation!
- write(thisline[:MAXLINESIZE-1], lineEnd='=\n')
- thisline = thisline[MAXLINESIZE-1:]
- # Write out the current line
- prevline = thisline
- # Write out the last line, without a trailing newline
- if prevline is not None:
- write(prevline, lineEnd=stripped)
-
-def encodestring(s, quotetabs = 0, header = 0):
- if b2a_qp is not None:
- return b2a_qp(s, quotetabs = quotetabs, header = header)
- from cStringIO import StringIO
- infp = StringIO(s)
- outfp = StringIO()
- encode(infp, outfp, quotetabs, header)
- return outfp.getvalue()
-
-
-
-def decode(input, output, header = 0):
- """Read 'input', apply quoted-printable decoding, and write to 'output'.
- 'input' and 'output' are files with readline() and write() methods.
- If 'header' is true, decode underscore as space (per RFC 1522)."""
-
- if a2b_qp is not None:
- data = input.read()
- odata = a2b_qp(data, header = header)
- output.write(odata)
- return
-
- new = ''
- while 1:
- line = input.readline()
- if not line: break
- i, n = 0, len(line)
- if n > 0 and line[n-1] == '\n':
- partial = 0; n = n-1
- # Strip trailing whitespace
- while n > 0 and line[n-1] in " \t\r":
- n = n-1
- else:
- partial = 1
- while i < n:
- c = line[i]
- if c == '_' and header:
- new = new + ' '; i = i+1
- elif c != ESCAPE:
- new = new + c; i = i+1
- elif i+1 == n and not partial:
- partial = 1; break
- elif i+1 < n and line[i+1] == ESCAPE:
- new = new + ESCAPE; i = i+2
- elif i+2 < n and ishex(line[i+1]) and ishex(line[i+2]):
- new = new + chr(unhex(line[i+1:i+3])); i = i+3
- else: # Bad escape sequence -- leave it in
- new = new + c; i = i+1
- if not partial:
- output.write(new + '\n')
- new = ''
- if new:
- output.write(new)
-
-def decodestring(s, header = 0):
- if a2b_qp is not None:
- return a2b_qp(s, header = header)
- from cStringIO import StringIO
- infp = StringIO(s)
- outfp = StringIO()
- decode(infp, outfp, header = header)
- return outfp.getvalue()
-
-
-
-# Other helper functions
-def ishex(c):
- """Return true if the character 'c' is a hexadecimal digit."""
- return '0' <= c <= '9' or 'a' <= c <= 'f' or 'A' <= c <= 'F'
-
-def unhex(s):
- """Get the integer value of a hexadecimal number."""
- bits = 0
- for c in s:
- if '0' <= c <= '9':
- i = ord('0')
- elif 'a' <= c <= 'f':
- i = ord('a')-10
- elif 'A' <= c <= 'F':
- i = ord('A')-10
- else:
- break
- bits = bits*16 + (ord(c) - i)
- return bits
-
-
-
-def main():
- import sys
- import getopt
- try:
- opts, args = getopt.getopt(sys.argv[1:], 'td')
- except getopt.error, msg:
- sys.stdout = sys.stderr
- print msg
- print "usage: quopri [-t | -d] [file] ..."
- print "-t: quote tabs"
- print "-d: decode; default encode"
- sys.exit(2)
- deco = 0
- tabs = 0
- for o, a in opts:
- if o == '-t': tabs = 1
- if o == '-d': deco = 1
- if tabs and deco:
- sys.stdout = sys.stderr
- print "-t and -d are mutually exclusive"
- sys.exit(2)
- if not args: args = ['-']
- sts = 0
- for file in args:
- if file == '-':
- fp = sys.stdin
- else:
- try:
- fp = open(file)
- except IOError, msg:
- sys.stderr.write("%s: can't open (%s)\n" % (file, msg))
- sts = 1
- continue
- if deco:
- decode(fp, sys.stdout)
- else:
- encode(fp, sys.stdout, tabs)
- if fp is not sys.stdin:
- fp.close()
- if sts:
- sys.exit(sts)
-
-
-
-if __name__ == '__main__':
- main()
diff --git a/sys/lib/python/random.py b/sys/lib/python/random.py
deleted file mode 100644
index ed87ddd64..000000000
--- a/sys/lib/python/random.py
+++ /dev/null
@@ -1,862 +0,0 @@
-"""Random variable generators.
-
- integers
- --------
- uniform within range
-
- sequences
- ---------
- pick random element
- pick random sample
- generate random permutation
-
- distributions on the real line:
- ------------------------------
- uniform
- normal (Gaussian)
- lognormal
- negative exponential
- gamma
- beta
- pareto
- Weibull
-
- distributions on the circle (angles 0 to 2pi)
- ---------------------------------------------
- circular uniform
- von Mises
-
-General notes on the underlying Mersenne Twister core generator:
-
-* The period is 2**19937-1.
-* It is one of the most extensively tested generators in existence.
-* Without a direct way to compute N steps forward, the semantics of
- jumpahead(n) are weakened to simply jump to another distant state and rely
- on the large period to avoid overlapping sequences.
-* The random() method is implemented in C, executes in a single Python step,
- and is, therefore, threadsafe.
-
-"""
-
-from warnings import warn as _warn
-from types import MethodType as _MethodType, BuiltinMethodType as _BuiltinMethodType
-from math import log as _log, exp as _exp, pi as _pi, e as _e, ceil as _ceil
-from math import sqrt as _sqrt, acos as _acos, cos as _cos, sin as _sin
-from os import urandom as _urandom
-from binascii import hexlify as _hexlify
-
-__all__ = ["Random","seed","random","uniform","randint","choice","sample",
- "randrange","shuffle","normalvariate","lognormvariate",
- "expovariate","vonmisesvariate","gammavariate",
- "gauss","betavariate","paretovariate","weibullvariate",
- "getstate","setstate","jumpahead", "WichmannHill", "getrandbits",
- "SystemRandom"]
-
-NV_MAGICCONST = 4 * _exp(-0.5)/_sqrt(2.0)
-TWOPI = 2.0*_pi
-LOG4 = _log(4.0)
-SG_MAGICCONST = 1.0 + _log(4.5)
-BPF = 53 # Number of bits in a float
-RECIP_BPF = 2**-BPF
-
-
-# Translated by Guido van Rossum from C source provided by
-# Adrian Baddeley. Adapted by Raymond Hettinger for use with
-# the Mersenne Twister and os.urandom() core generators.
-
-import _random
-
-class Random(_random.Random):
- """Random number generator base class used by bound module functions.
-
- Used to instantiate instances of Random to get generators that don't
- share state. Especially useful for multi-threaded programs, creating
- a different instance of Random for each thread, and using the jumpahead()
- method to ensure that the generated sequences seen by each thread don't
- overlap.
-
- Class Random can also be subclassed if you want to use a different basic
- generator of your own devising: in that case, override the following
- methods: random(), seed(), getstate(), setstate() and jumpahead().
- Optionally, implement a getrandombits() method so that randrange()
- can cover arbitrarily large ranges.
-
- """
-
- VERSION = 2 # used by getstate/setstate
-
- def __init__(self, x=None):
- """Initialize an instance.
-
- Optional argument x controls seeding, as for Random.seed().
- """
-
- self.seed(x)
- self.gauss_next = None
-
- def seed(self, a=None):
- """Initialize internal state from hashable object.
-
- None or no argument seeds from current time or from an operating
- system specific randomness source if available.
-
- If a is not None or an int or long, hash(a) is used instead.
- """
-
- if a is None:
- try:
- a = long(_hexlify(_urandom(16)), 16)
- except NotImplementedError:
- import time
- a = long(time.time() * 256) # use fractional seconds
-
- super(Random, self).seed(a)
- self.gauss_next = None
-
- def getstate(self):
- """Return internal state; can be passed to setstate() later."""
- return self.VERSION, super(Random, self).getstate(), self.gauss_next
-
- def setstate(self, state):
- """Restore internal state from object returned by getstate()."""
- version = state[0]
- if version == 2:
- version, internalstate, self.gauss_next = state
- super(Random, self).setstate(internalstate)
- else:
- raise ValueError("state with version %s passed to "
- "Random.setstate() of version %s" %
- (version, self.VERSION))
-
-## ---- Methods below this point do not need to be overridden when
-## ---- subclassing for the purpose of using a different core generator.
-
-## -------------------- pickle support -------------------
-
- def __getstate__(self): # for pickle
- return self.getstate()
-
- def __setstate__(self, state): # for pickle
- self.setstate(state)
-
- def __reduce__(self):
- return self.__class__, (), self.getstate()
-
-## -------------------- integer methods -------------------
-
- def randrange(self, start, stop=None, step=1, int=int, default=None,
- maxwidth=1L<<BPF):
- """Choose a random item from range(start, stop[, step]).
-
- This fixes the problem with randint() which includes the
- endpoint; in Python this is usually not what you want.
- Do not supply the 'int', 'default', and 'maxwidth' arguments.
- """
-
- # This code is a bit messy to make it fast for the
- # common case while still doing adequate error checking.
- istart = int(start)
- if istart != start:
- raise ValueError, "non-integer arg 1 for randrange()"
- if stop is default:
- if istart > 0:
- if istart >= maxwidth:
- return self._randbelow(istart)
- return int(self.random() * istart)
- raise ValueError, "empty range for randrange()"
-
- # stop argument supplied.
- istop = int(stop)
- if istop != stop:
- raise ValueError, "non-integer stop for randrange()"
- width = istop - istart
- if step == 1 and width > 0:
- # Note that
- # int(istart + self.random()*width)
- # instead would be incorrect. For example, consider istart
- # = -2 and istop = 0. Then the guts would be in
- # -2.0 to 0.0 exclusive on both ends (ignoring that random()
- # might return 0.0), and because int() truncates toward 0, the
- # final result would be -1 or 0 (instead of -2 or -1).
- # istart + int(self.random()*width)
- # would also be incorrect, for a subtler reason: the RHS
- # can return a long, and then randrange() would also return
- # a long, but we're supposed to return an int (for backward
- # compatibility).
-
- if width >= maxwidth:
- return int(istart + self._randbelow(width))
- return int(istart + int(self.random()*width))
- if step == 1:
- raise ValueError, "empty range for randrange() (%d,%d, %d)" % (istart, istop, width)
-
- # Non-unit step argument supplied.
- istep = int(step)
- if istep != step:
- raise ValueError, "non-integer step for randrange()"
- if istep > 0:
- n = (width + istep - 1) // istep
- elif istep < 0:
- n = (width + istep + 1) // istep
- else:
- raise ValueError, "zero step for randrange()"
-
- if n <= 0:
- raise ValueError, "empty range for randrange()"
-
- if n >= maxwidth:
- return istart + istep*self._randbelow(n)
- return istart + istep*int(self.random() * n)
-
- def randint(self, a, b):
- """Return random integer in range [a, b], including both end points.
- """
-
- return self.randrange(a, b+1)
-
- def _randbelow(self, n, _log=_log, int=int, _maxwidth=1L<<BPF,
- _Method=_MethodType, _BuiltinMethod=_BuiltinMethodType):
- """Return a random int in the range [0,n)
-
- Handles the case where n has more bits than returned
- by a single call to the underlying generator.
- """
-
- try:
- getrandbits = self.getrandbits
- except AttributeError:
- pass
- else:
- # Only call self.getrandbits if the original random() builtin method
- # has not been overridden or if a new getrandbits() was supplied.
- # This assures that the two methods correspond.
- if type(self.random) is _BuiltinMethod or type(getrandbits) is _Method:
- k = int(1.00001 + _log(n-1, 2.0)) # 2**k > n-1 > 2**(k-2)
- r = getrandbits(k)
- while r >= n:
- r = getrandbits(k)
- return r
- if n >= _maxwidth:
- _warn("Underlying random() generator does not supply \n"
- "enough bits to choose from a population range this large")
- return int(self.random() * n)
-
-## -------------------- sequence methods -------------------
-
- def choice(self, seq):
- """Choose a random element from a non-empty sequence."""
- return seq[int(self.random() * len(seq))] # raises IndexError if seq is empty
-
- def shuffle(self, x, random=None, int=int):
- """x, random=random.random -> shuffle list x in place; return None.
-
- Optional arg random is a 0-argument function returning a random
- float in [0.0, 1.0); by default, the standard random.random.
- """
-
- if random is None:
- random = self.random
- for i in reversed(xrange(1, len(x))):
- # pick an element in x[:i+1] with which to exchange x[i]
- j = int(random() * (i+1))
- x[i], x[j] = x[j], x[i]
-
- def sample(self, population, k):
- """Chooses k unique random elements from a population sequence.
-
- Returns a new list containing elements from the population while
- leaving the original population unchanged. The resulting list is
- in selection order so that all sub-slices will also be valid random
- samples. This allows raffle winners (the sample) to be partitioned
- into grand prize and second place winners (the subslices).
-
- Members of the population need not be hashable or unique. If the
- population contains repeats, then each occurrence is a possible
- selection in the sample.
-
- To choose a sample in a range of integers, use xrange as an argument.
- This is especially fast and space efficient for sampling from a
- large population: sample(xrange(10000000), 60)
- """
-
- # XXX Although the documentation says `population` is "a sequence",
- # XXX attempts are made to cater to any iterable with a __len__
- # XXX method. This has had mixed success. Examples from both
- # XXX sides: sets work fine, and should become officially supported;
- # XXX dicts are much harder, and have failed in various subtle
- # XXX ways across attempts. Support for mapping types should probably
- # XXX be dropped (and users should pass mapping.keys() or .values()
- # XXX explicitly).
-
- # Sampling without replacement entails tracking either potential
- # selections (the pool) in a list or previous selections in a set.
-
- # When the number of selections is small compared to the
- # population, then tracking selections is efficient, requiring
- # only a small set and an occasional reselection. For
- # a larger number of selections, the pool tracking method is
- # preferred since the list takes less space than the
- # set and it doesn't suffer from frequent reselections.
-
- n = len(population)
- if not 0 <= k <= n:
- raise ValueError, "sample larger than population"
- random = self.random
- _int = int
- result = [None] * k
- setsize = 21 # size of a small set minus size of an empty list
- if k > 5:
- setsize += 4 ** _ceil(_log(k * 3, 4)) # table size for big sets
- if n <= setsize or hasattr(population, "keys"):
- # An n-length list is smaller than a k-length set, or this is a
- # mapping type so the other algorithm wouldn't work.
- pool = list(population)
- for i in xrange(k): # invariant: non-selected at [0,n-i)
- j = _int(random() * (n-i))
- result[i] = pool[j]
- pool[j] = pool[n-i-1] # move non-selected item into vacancy
- else:
- try:
- selected = set()
- selected_add = selected.add
- for i in xrange(k):
- j = _int(random() * n)
- while j in selected:
- j = _int(random() * n)
- selected_add(j)
- result[i] = population[j]
- except (TypeError, KeyError): # handle (at least) sets
- if isinstance(population, list):
- raise
- return self.sample(tuple(population), k)
- return result
-
-## -------------------- real-valued distributions -------------------
-
-## -------------------- uniform distribution -------------------
-
- def uniform(self, a, b):
- """Get a random number in the range [a, b)."""
- return a + (b-a) * self.random()
-
-## -------------------- normal distribution --------------------
-
- def normalvariate(self, mu, sigma):
- """Normal distribution.
-
- mu is the mean, and sigma is the standard deviation.
-
- """
- # mu = mean, sigma = standard deviation
-
- # Uses Kinderman and Monahan method. Reference: Kinderman,
- # A.J. and Monahan, J.F., "Computer generation of random
- # variables using the ratio of uniform deviates", ACM Trans
- # Math Software, 3, (1977), pp257-260.
-
- random = self.random
- while 1:
- u1 = random()
- u2 = 1.0 - random()
- z = NV_MAGICCONST*(u1-0.5)/u2
- zz = z*z/4.0
- if zz <= -_log(u2):
- break
- return mu + z*sigma
-
-## -------------------- lognormal distribution --------------------
-
- def lognormvariate(self, mu, sigma):
- """Log normal distribution.
-
- If you take the natural logarithm of this distribution, you'll get a
- normal distribution with mean mu and standard deviation sigma.
- mu can have any value, and sigma must be greater than zero.
-
- """
- return _exp(self.normalvariate(mu, sigma))
-
-## -------------------- exponential distribution --------------------
-
- def expovariate(self, lambd):
- """Exponential distribution.
-
- lambd is 1.0 divided by the desired mean. (The parameter would be
- called "lambda", but that is a reserved word in Python.) Returned
- values range from 0 to positive infinity.
-
- """
- # lambd: rate lambd = 1/mean
- # ('lambda' is a Python reserved word)
-
- random = self.random
- u = random()
- while u <= 1e-7:
- u = random()
- return -_log(u)/lambd
-
-## -------------------- von Mises distribution --------------------
-
- def vonmisesvariate(self, mu, kappa):
- """Circular data distribution.
-
- mu is the mean angle, expressed in radians between 0 and 2*pi, and
- kappa is the concentration parameter, which must be greater than or
- equal to zero. If kappa is equal to zero, this distribution reduces
- to a uniform random angle over the range 0 to 2*pi.
-
- """
- # mu: mean angle (in radians between 0 and 2*pi)
- # kappa: concentration parameter kappa (>= 0)
- # if kappa = 0 generate uniform random angle
-
- # Based upon an algorithm published in: Fisher, N.I.,
- # "Statistical Analysis of Circular Data", Cambridge
- # University Press, 1993.
-
- # Thanks to Magnus Kessler for a correction to the
- # implementation of step 4.
-
- random = self.random
- if kappa <= 1e-6:
- return TWOPI * random()
-
- a = 1.0 + _sqrt(1.0 + 4.0 * kappa * kappa)
- b = (a - _sqrt(2.0 * a))/(2.0 * kappa)
- r = (1.0 + b * b)/(2.0 * b)
-
- while 1:
- u1 = random()
-
- z = _cos(_pi * u1)
- f = (1.0 + r * z)/(r + z)
- c = kappa * (r - f)
-
- u2 = random()
-
- if u2 < c * (2.0 - c) or u2 <= c * _exp(1.0 - c):
- break
-
- u3 = random()
- if u3 > 0.5:
- theta = (mu % TWOPI) + _acos(f)
- else:
- theta = (mu % TWOPI) - _acos(f)
-
- return theta
-
-## -------------------- gamma distribution --------------------
-
- def gammavariate(self, alpha, beta):
- """Gamma distribution. Not the gamma function!
-
- Conditions on the parameters are alpha > 0 and beta > 0.
-
- """
-
- # alpha > 0, beta > 0, mean is alpha*beta, variance is alpha*beta**2
-
- # Warning: a few older sources define the gamma distribution in terms
- # of alpha > -1.0
- if alpha <= 0.0 or beta <= 0.0:
- raise ValueError, 'gammavariate: alpha and beta must be > 0.0'
-
- random = self.random
- if alpha > 1.0:
-
- # Uses R.C.H. Cheng, "The generation of Gamma
- # variables with non-integral shape parameters",
- # Applied Statistics, (1977), 26, No. 1, p71-74
-
- ainv = _sqrt(2.0 * alpha - 1.0)
- bbb = alpha - LOG4
- ccc = alpha + ainv
-
- while 1:
- u1 = random()
- if not 1e-7 < u1 < .9999999:
- continue
- u2 = 1.0 - random()
- v = _log(u1/(1.0-u1))/ainv
- x = alpha*_exp(v)
- z = u1*u1*u2
- r = bbb+ccc*v-x
- if r + SG_MAGICCONST - 4.5*z >= 0.0 or r >= _log(z):
- return x * beta
-
- elif alpha == 1.0:
- # expovariate(1)
- u = random()
- while u <= 1e-7:
- u = random()
- return -_log(u) * beta
-
- else: # alpha is between 0 and 1 (exclusive)
-
- # Uses ALGORITHM GS of Statistical Computing - Kennedy & Gentle
-
- while 1:
- u = random()
- b = (_e + alpha)/_e
- p = b*u
- if p <= 1.0:
- x = p ** (1.0/alpha)
- else:
- x = -_log((b-p)/alpha)
- u1 = random()
- if p > 1.0:
- if u1 <= x ** (alpha - 1.0):
- break
- elif u1 <= _exp(-x):
- break
- return x * beta
-
-## -------------------- Gauss (faster alternative) --------------------
-
- def gauss(self, mu, sigma):
- """Gaussian distribution.
-
- mu is the mean, and sigma is the standard deviation. This is
- slightly faster than the normalvariate() function.
-
- Not thread-safe without a lock around calls.
-
- """
-
- # When x and y are two variables from [0, 1), uniformly
- # distributed, then
- #
- # cos(2*pi*x)*sqrt(-2*log(1-y))
- # sin(2*pi*x)*sqrt(-2*log(1-y))
- #
- # are two *independent* variables with normal distribution
- # (mu = 0, sigma = 1).
- # (Lambert Meertens)
- # (corrected version; bug discovered by Mike Miller, fixed by LM)
-
- # Multithreading note: When two threads call this function
- # simultaneously, it is possible that they will receive the
- # same return value. The window is very small though. To
- # avoid this, you have to use a lock around all calls. (I
- # didn't want to slow this down in the serial case by using a
- # lock here.)
-
- random = self.random
- z = self.gauss_next
- self.gauss_next = None
- if z is None:
- x2pi = random() * TWOPI
- g2rad = _sqrt(-2.0 * _log(1.0 - random()))
- z = _cos(x2pi) * g2rad
- self.gauss_next = _sin(x2pi) * g2rad
-
- return mu + z*sigma
-
-## -------------------- beta --------------------
-## See
-## http://sourceforge.net/bugs/?func=detailbug&bug_id=130030&group_id=5470
-## for Ivan Frohne's insightful analysis of why the original implementation:
-##
-## def betavariate(self, alpha, beta):
-## # Discrete Event Simulation in C, pp 87-88.
-##
-## y = self.expovariate(alpha)
-## z = self.expovariate(1.0/beta)
-## return z/(y+z)
-##
-## was dead wrong, and how it probably got that way.
-
- def betavariate(self, alpha, beta):
- """Beta distribution.
-
- Conditions on the parameters are alpha > 0 and beta > 0.
- Returned values range between 0 and 1.
-
- """
-
- # This version due to Janne Sinkkonen, and matches all the std
- # texts (e.g., Knuth Vol 2 Ed 3 pg 134 "the beta distribution").
- y = self.gammavariate(alpha, 1.)
- if y == 0:
- return 0.0
- else:
- return y / (y + self.gammavariate(beta, 1.))
-
-## -------------------- Pareto --------------------
-
- def paretovariate(self, alpha):
- """Pareto distribution. alpha is the shape parameter."""
- # Jain, pg. 495
-
- u = 1.0 - self.random()
- return 1.0 / pow(u, 1.0/alpha)
-
-## -------------------- Weibull --------------------
-
- def weibullvariate(self, alpha, beta):
- """Weibull distribution.
-
- alpha is the scale parameter and beta is the shape parameter.
-
- """
- # Jain, pg. 499; bug fix courtesy Bill Arms
-
- u = 1.0 - self.random()
- return alpha * pow(-_log(u), 1.0/beta)
-
-## -------------------- Wichmann-Hill -------------------
-
-class WichmannHill(Random):
-
- VERSION = 1 # used by getstate/setstate
-
- def seed(self, a=None):
- """Initialize internal state from hashable object.
-
- None or no argument seeds from current time or from an operating
- system specific randomness source if available.
-
- If a is not None or an int or long, hash(a) is used instead.
-
- If a is an int or long, a is used directly. Distinct values between
- 0 and 27814431486575L inclusive are guaranteed to yield distinct
- internal states (this guarantee is specific to the default
- Wichmann-Hill generator).
- """
-
- if a is None:
- try:
- a = long(_hexlify(_urandom(16)), 16)
- except NotImplementedError:
- import time
- a = long(time.time() * 256) # use fractional seconds
-
- if not isinstance(a, (int, long)):
- a = hash(a)
-
- a, x = divmod(a, 30268)
- a, y = divmod(a, 30306)
- a, z = divmod(a, 30322)
- self._seed = int(x)+1, int(y)+1, int(z)+1
-
- self.gauss_next = None
-
- def random(self):
- """Get the next random number in the range [0.0, 1.0)."""
-
- # Wichman-Hill random number generator.
- #
- # Wichmann, B. A. & Hill, I. D. (1982)
- # Algorithm AS 183:
- # An efficient and portable pseudo-random number generator
- # Applied Statistics 31 (1982) 188-190
- #
- # see also:
- # Correction to Algorithm AS 183
- # Applied Statistics 33 (1984) 123
- #
- # McLeod, A. I. (1985)
- # A remark on Algorithm AS 183
- # Applied Statistics 34 (1985),198-200
-
- # This part is thread-unsafe:
- # BEGIN CRITICAL SECTION
- x, y, z = self._seed
- x = (171 * x) % 30269
- y = (172 * y) % 30307
- z = (170 * z) % 30323
- self._seed = x, y, z
- # END CRITICAL SECTION
-
- # Note: on a platform using IEEE-754 double arithmetic, this can
- # never return 0.0 (asserted by Tim; proof too long for a comment).
- return (x/30269.0 + y/30307.0 + z/30323.0) % 1.0
-
- def getstate(self):
- """Return internal state; can be passed to setstate() later."""
- return self.VERSION, self._seed, self.gauss_next
-
- def setstate(self, state):
- """Restore internal state from object returned by getstate()."""
- version = state[0]
- if version == 1:
- version, self._seed, self.gauss_next = state
- else:
- raise ValueError("state with version %s passed to "
- "Random.setstate() of version %s" %
- (version, self.VERSION))
-
- def jumpahead(self, n):
- """Act as if n calls to random() were made, but quickly.
-
- n is an int, greater than or equal to 0.
-
- Example use: If you have 2 threads and know that each will
- consume no more than a million random numbers, create two Random
- objects r1 and r2, then do
- r2.setstate(r1.getstate())
- r2.jumpahead(1000000)
- Then r1 and r2 will use guaranteed-disjoint segments of the full
- period.
- """
-
- if not n >= 0:
- raise ValueError("n must be >= 0")
- x, y, z = self._seed
- x = int(x * pow(171, n, 30269)) % 30269
- y = int(y * pow(172, n, 30307)) % 30307
- z = int(z * pow(170, n, 30323)) % 30323
- self._seed = x, y, z
-
- def __whseed(self, x=0, y=0, z=0):
- """Set the Wichmann-Hill seed from (x, y, z).
-
- These must be integers in the range [0, 256).
- """
-
- if not type(x) == type(y) == type(z) == int:
- raise TypeError('seeds must be integers')
- if not (0 <= x < 256 and 0 <= y < 256 and 0 <= z < 256):
- raise ValueError('seeds must be in range(0, 256)')
- if 0 == x == y == z:
- # Initialize from current time
- import time
- t = long(time.time() * 256)
- t = int((t&0xffffff) ^ (t>>24))
- t, x = divmod(t, 256)
- t, y = divmod(t, 256)
- t, z = divmod(t, 256)
- # Zero is a poor seed, so substitute 1
- self._seed = (x or 1, y or 1, z or 1)
-
- self.gauss_next = None
-
- def whseed(self, a=None):
- """Seed from hashable object's hash code.
-
- None or no argument seeds from current time. It is not guaranteed
- that objects with distinct hash codes lead to distinct internal
- states.
-
- This is obsolete, provided for compatibility with the seed routine
- used prior to Python 2.1. Use the .seed() method instead.
- """
-
- if a is None:
- self.__whseed()
- return
- a = hash(a)
- a, x = divmod(a, 256)
- a, y = divmod(a, 256)
- a, z = divmod(a, 256)
- x = (x + a) % 256 or 1
- y = (y + a) % 256 or 1
- z = (z + a) % 256 or 1
- self.__whseed(x, y, z)
-
-## --------------- Operating System Random Source ------------------
-
-class SystemRandom(Random):
- """Alternate random number generator using sources provided
- by the operating system (such as /dev/urandom on Unix or
- CryptGenRandom on Windows).
-
- Not available on all systems (see os.urandom() for details).
- """
-
- def random(self):
- """Get the next random number in the range [0.0, 1.0)."""
- return (long(_hexlify(_urandom(7)), 16) >> 3) * RECIP_BPF
-
- def getrandbits(self, k):
- """getrandbits(k) -> x. Generates a long int with k random bits."""
- if k <= 0:
- raise ValueError('number of bits must be greater than zero')
- if k != int(k):
- raise TypeError('number of bits should be an integer')
- bytes = (k + 7) // 8 # bits / 8 and rounded up
- x = long(_hexlify(_urandom(bytes)), 16)
- return x >> (bytes * 8 - k) # trim excess bits
-
- def _stub(self, *args, **kwds):
- "Stub method. Not used for a system random number generator."
- return None
- seed = jumpahead = _stub
-
- def _notimplemented(self, *args, **kwds):
- "Method should not be called for a system random number generator."
- raise NotImplementedError('System entropy source does not have state.')
- getstate = setstate = _notimplemented
-
-## -------------------- test program --------------------
-
-def _test_generator(n, func, args):
- import time
- print n, 'times', func.__name__
- total = 0.0
- sqsum = 0.0
- smallest = 1e10
- largest = -1e10
- t0 = time.time()
- for i in range(n):
- x = func(*args)
- total += x
- sqsum = sqsum + x*x
- smallest = min(x, smallest)
- largest = max(x, largest)
- t1 = time.time()
- print round(t1-t0, 3), 'sec,',
- avg = total/n
- stddev = _sqrt(sqsum/n - avg*avg)
- print 'avg %g, stddev %g, min %g, max %g' % \
- (avg, stddev, smallest, largest)
-
-
-def _test(N=2000):
- _test_generator(N, random, ())
- _test_generator(N, normalvariate, (0.0, 1.0))
- _test_generator(N, lognormvariate, (0.0, 1.0))
- _test_generator(N, vonmisesvariate, (0.0, 1.0))
- _test_generator(N, gammavariate, (0.01, 1.0))
- _test_generator(N, gammavariate, (0.1, 1.0))
- _test_generator(N, gammavariate, (0.1, 2.0))
- _test_generator(N, gammavariate, (0.5, 1.0))
- _test_generator(N, gammavariate, (0.9, 1.0))
- _test_generator(N, gammavariate, (1.0, 1.0))
- _test_generator(N, gammavariate, (2.0, 1.0))
- _test_generator(N, gammavariate, (20.0, 1.0))
- _test_generator(N, gammavariate, (200.0, 1.0))
- _test_generator(N, gauss, (0.0, 1.0))
- _test_generator(N, betavariate, (3.0, 3.0))
-
-# Create one instance, seeded from current time, and export its methods
-# as module-level functions. The functions share state across all uses
-#(both in the user's code and in the Python libraries), but that's fine
-# for most programs and is easier for the casual user than making them
-# instantiate their own Random() instance.
-
-_inst = Random()
-seed = _inst.seed
-random = _inst.random
-uniform = _inst.uniform
-randint = _inst.randint
-choice = _inst.choice
-randrange = _inst.randrange
-sample = _inst.sample
-shuffle = _inst.shuffle
-normalvariate = _inst.normalvariate
-lognormvariate = _inst.lognormvariate
-expovariate = _inst.expovariate
-vonmisesvariate = _inst.vonmisesvariate
-gammavariate = _inst.gammavariate
-gauss = _inst.gauss
-betavariate = _inst.betavariate
-paretovariate = _inst.paretovariate
-weibullvariate = _inst.weibullvariate
-getstate = _inst.getstate
-setstate = _inst.setstate
-jumpahead = _inst.jumpahead
-getrandbits = _inst.getrandbits
-
-if __name__ == '__main__':
- _test()
diff --git a/sys/lib/python/re.py b/sys/lib/python/re.py
deleted file mode 100644
index a33e34e4e..000000000
--- a/sys/lib/python/re.py
+++ /dev/null
@@ -1,315 +0,0 @@
-#
-# Secret Labs' Regular Expression Engine
-#
-# re-compatible interface for the sre matching engine
-#
-# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved.
-#
-# This version of the SRE library can be redistributed under CNRI's
-# Python 1.6 license. For any other use, please contact Secret Labs
-# AB (info@pythonware.com).
-#
-# Portions of this engine have been developed in cooperation with
-# CNRI. Hewlett-Packard provided funding for 1.6 integration and
-# other compatibility work.
-#
-
-r"""Support for regular expressions (RE).
-
-This module provides regular expression matching operations similar to
-those found in Perl. It supports both 8-bit and Unicode strings; both
-the pattern and the strings being processed can contain null bytes and
-characters outside the US ASCII range.
-
-Regular expressions can contain both special and ordinary characters.
-Most ordinary characters, like "A", "a", or "0", are the simplest
-regular expressions; they simply match themselves. You can
-concatenate ordinary characters, so last matches the string 'last'.
-
-The special characters are:
- "." Matches any character except a newline.
- "^" Matches the start of the string.
- "$" Matches the end of the string.
- "*" Matches 0 or more (greedy) repetitions of the preceding RE.
- Greedy means that it will match as many repetitions as possible.
- "+" Matches 1 or more (greedy) repetitions of the preceding RE.
- "?" Matches 0 or 1 (greedy) of the preceding RE.
- *?,+?,?? Non-greedy versions of the previous three special characters.
- {m,n} Matches from m to n repetitions of the preceding RE.
- {m,n}? Non-greedy version of the above.
- "\\" Either escapes special characters or signals a special sequence.
- [] Indicates a set of characters.
- A "^" as the first character indicates a complementing set.
- "|" A|B, creates an RE that will match either A or B.
- (...) Matches the RE inside the parentheses.
- The contents can be retrieved or matched later in the string.
- (?iLmsux) Set the I, L, M, S, U, or X flag for the RE (see below).
- (?:...) Non-grouping version of regular parentheses.
- (?P<name>...) The substring matched by the group is accessible by name.
- (?P=name) Matches the text matched earlier by the group named name.
- (?#...) A comment; ignored.
- (?=...) Matches if ... matches next, but doesn't consume the string.
- (?!...) Matches if ... doesn't match next.
-
-The special sequences consist of "\\" and a character from the list
-below. If the ordinary character is not on the list, then the
-resulting RE will match the second character.
- \number Matches the contents of the group of the same number.
- \A Matches only at the start of the string.
- \Z Matches only at the end of the string.
- \b Matches the empty string, but only at the start or end of a word.
- \B Matches the empty string, but not at the start or end of a word.
- \d Matches any decimal digit; equivalent to the set [0-9].
- \D Matches any non-digit character; equivalent to the set [^0-9].
- \s Matches any whitespace character; equivalent to [ \t\n\r\f\v].
- \S Matches any non-whitespace character; equiv. to [^ \t\n\r\f\v].
- \w Matches any alphanumeric character; equivalent to [a-zA-Z0-9_].
- With LOCALE, it will match the set [0-9_] plus characters defined
- as letters for the current locale.
- \W Matches the complement of \w.
- \\ Matches a literal backslash.
-
-This module exports the following functions:
- match Match a regular expression pattern to the beginning of a string.
- search Search a string for the presence of a pattern.
- sub Substitute occurrences of a pattern found in a string.
- subn Same as sub, but also return the number of substitutions made.
- split Split a string by the occurrences of a pattern.
- findall Find all occurrences of a pattern in a string.
- compile Compile a pattern into a RegexObject.
- purge Clear the regular expression cache.
- escape Backslash all non-alphanumerics in a string.
-
-Some of the functions in this module takes flags as optional parameters:
- I IGNORECASE Perform case-insensitive matching.
- L LOCALE Make \w, \W, \b, \B, dependent on the current locale.
- M MULTILINE "^" matches the beginning of lines as well as the string.
- "$" matches the end of lines as well as the string.
- S DOTALL "." matches any character at all, including the newline.
- X VERBOSE Ignore whitespace and comments for nicer looking RE's.
- U UNICODE Make \w, \W, \b, \B, dependent on the Unicode locale.
-
-This module also defines an exception 'error'.
-
-"""
-
-import sys
-import sre_compile
-import sre_parse
-
-# public symbols
-__all__ = [ "match", "search", "sub", "subn", "split", "findall",
- "compile", "purge", "template", "escape", "I", "L", "M", "S", "X",
- "U", "IGNORECASE", "LOCALE", "MULTILINE", "DOTALL", "VERBOSE",
- "UNICODE", "error" ]
-
-__version__ = "2.2.1"
-
-# flags
-I = IGNORECASE = sre_compile.SRE_FLAG_IGNORECASE # ignore case
-L = LOCALE = sre_compile.SRE_FLAG_LOCALE # assume current 8-bit locale
-U = UNICODE = sre_compile.SRE_FLAG_UNICODE # assume unicode locale
-M = MULTILINE = sre_compile.SRE_FLAG_MULTILINE # make anchors look for newline
-S = DOTALL = sre_compile.SRE_FLAG_DOTALL # make dot match newline
-X = VERBOSE = sre_compile.SRE_FLAG_VERBOSE # ignore whitespace and comments
-
-# sre extensions (experimental, don't rely on these)
-T = TEMPLATE = sre_compile.SRE_FLAG_TEMPLATE # disable backtracking
-DEBUG = sre_compile.SRE_FLAG_DEBUG # dump pattern after compilation
-
-# sre exception
-error = sre_compile.error
-
-# --------------------------------------------------------------------
-# public interface
-
-def match(pattern, string, flags=0):
- """Try to apply the pattern at the start of the string, returning
- a match object, or None if no match was found."""
- return _compile(pattern, flags).match(string)
-
-def search(pattern, string, flags=0):
- """Scan through string looking for a match to the pattern, returning
- a match object, or None if no match was found."""
- return _compile(pattern, flags).search(string)
-
-def sub(pattern, repl, string, count=0):
- """Return the string obtained by replacing the leftmost
- non-overlapping occurrences of the pattern in string by the
- replacement repl. repl can be either a string or a callable;
- if a callable, it's passed the match object and must return
- a replacement string to be used."""
- return _compile(pattern, 0).sub(repl, string, count)
-
-def subn(pattern, repl, string, count=0):
- """Return a 2-tuple containing (new_string, number).
- new_string is the string obtained by replacing the leftmost
- non-overlapping occurrences of the pattern in the source
- string by the replacement repl. number is the number of
- substitutions that were made. repl can be either a string or a
- callable; if a callable, it's passed the match object and must
- return a replacement string to be used."""
- return _compile(pattern, 0).subn(repl, string, count)
-
-def split(pattern, string, maxsplit=0):
- """Split the source string by the occurrences of the pattern,
- returning a list containing the resulting substrings."""
- return _compile(pattern, 0).split(string, maxsplit)
-
-def findall(pattern, string, flags=0):
- """Return a list of all non-overlapping matches in the string.
-
- If one or more groups are present in the pattern, return a
- list of groups; this will be a list of tuples if the pattern
- has more than one group.
-
- Empty matches are included in the result."""
- return _compile(pattern, flags).findall(string)
-
-if sys.hexversion >= 0x02020000:
- __all__.append("finditer")
- def finditer(pattern, string, flags=0):
- """Return an iterator over all non-overlapping matches in the
- string. For each match, the iterator returns a match object.
-
- Empty matches are included in the result."""
- return _compile(pattern, flags).finditer(string)
-
-def compile(pattern, flags=0):
- "Compile a regular expression pattern, returning a pattern object."
- return _compile(pattern, flags)
-
-def purge():
- "Clear the regular expression cache"
- _cache.clear()
- _cache_repl.clear()
-
-def template(pattern, flags=0):
- "Compile a template pattern, returning a pattern object"
- return _compile(pattern, flags|T)
-
-_alphanum = {}
-for c in 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01234567890':
- _alphanum[c] = 1
-del c
-
-def escape(pattern):
- "Escape all non-alphanumeric characters in pattern."
- s = list(pattern)
- alphanum = _alphanum
- for i in range(len(pattern)):
- c = pattern[i]
- if c not in alphanum:
- if c == "\000":
- s[i] = "\\000"
- else:
- s[i] = "\\" + c
- return pattern[:0].join(s)
-
-# --------------------------------------------------------------------
-# internals
-
-_cache = {}
-_cache_repl = {}
-
-_pattern_type = type(sre_compile.compile("", 0))
-
-_MAXCACHE = 100
-
-def _compile(*key):
- # internal: compile pattern
- cachekey = (type(key[0]),) + key
- p = _cache.get(cachekey)
- if p is not None:
- return p
- pattern, flags = key
- if isinstance(pattern, _pattern_type):
- return pattern
- if not sre_compile.isstring(pattern):
- raise TypeError, "first argument must be string or compiled pattern"
- try:
- p = sre_compile.compile(pattern, flags)
- except error, v:
- raise error, v # invalid expression
- if len(_cache) >= _MAXCACHE:
- _cache.clear()
- _cache[cachekey] = p
- return p
-
-def _compile_repl(*key):
- # internal: compile replacement pattern
- p = _cache_repl.get(key)
- if p is not None:
- return p
- repl, pattern = key
- try:
- p = sre_parse.parse_template(repl, pattern)
- except error, v:
- raise error, v # invalid expression
- if len(_cache_repl) >= _MAXCACHE:
- _cache_repl.clear()
- _cache_repl[key] = p
- return p
-
-def _expand(pattern, match, template):
- # internal: match.expand implementation hook
- template = sre_parse.parse_template(template, pattern)
- return sre_parse.expand_template(template, match)
-
-def _subx(pattern, template):
- # internal: pattern.sub/subn implementation helper
- template = _compile_repl(template, pattern)
- if not template[0] and len(template[1]) == 1:
- # literal replacement
- return template[1][0]
- def filter(match, template=template):
- return sre_parse.expand_template(template, match)
- return filter
-
-# register myself for pickling
-
-import copy_reg
-
-def _pickle(p):
- return _compile, (p.pattern, p.flags)
-
-copy_reg.pickle(_pattern_type, _pickle, _compile)
-
-# --------------------------------------------------------------------
-# experimental stuff (see python-dev discussions for details)
-
-class Scanner:
- def __init__(self, lexicon, flags=0):
- from sre_constants import BRANCH, SUBPATTERN
- self.lexicon = lexicon
- # combine phrases into a compound pattern
- p = []
- s = sre_parse.Pattern()
- s.flags = flags
- for phrase, action in lexicon:
- p.append(sre_parse.SubPattern(s, [
- (SUBPATTERN, (len(p)+1, sre_parse.parse(phrase, flags))),
- ]))
- p = sre_parse.SubPattern(s, [(BRANCH, (None, p))])
- s.groups = len(p)
- self.scanner = sre_compile.compile(p)
- def scan(self, string):
- result = []
- append = result.append
- match = self.scanner.scanner(string).match
- i = 0
- while 1:
- m = match()
- if not m:
- break
- j = m.end()
- if i == j:
- break
- action = self.lexicon[m.lastindex-1][1]
- if callable(action):
- self.match = m
- action = action(self, m.group())
- if action is not None:
- append(action)
- i = j
- return result, string[i:]
diff --git a/sys/lib/python/repr.py b/sys/lib/python/repr.py
deleted file mode 100644
index 53b5207e5..000000000
--- a/sys/lib/python/repr.py
+++ /dev/null
@@ -1,122 +0,0 @@
-"""Redo the `...` (representation) but with limits on most sizes."""
-
-__all__ = ["Repr","repr"]
-
-import __builtin__
-from itertools import islice
-
-class Repr:
-
- def __init__(self):
- self.maxlevel = 6
- self.maxtuple = 6
- self.maxlist = 6
- self.maxarray = 5
- self.maxdict = 4
- self.maxset = 6
- self.maxfrozenset = 6
- self.maxdeque = 6
- self.maxstring = 30
- self.maxlong = 40
- self.maxother = 20
-
- def repr(self, x):
- return self.repr1(x, self.maxlevel)
-
- def repr1(self, x, level):
- typename = type(x).__name__
- if ' ' in typename:
- parts = typename.split()
- typename = '_'.join(parts)
- if hasattr(self, 'repr_' + typename):
- return getattr(self, 'repr_' + typename)(x, level)
- else:
- s = __builtin__.repr(x)
- if len(s) > self.maxother:
- i = max(0, (self.maxother-3)//2)
- j = max(0, self.maxother-3-i)
- s = s[:i] + '...' + s[len(s)-j:]
- return s
-
- def _repr_iterable(self, x, level, left, right, maxiter, trail=''):
- n = len(x)
- if level <= 0 and n:
- s = '...'
- else:
- newlevel = level - 1
- repr1 = self.repr1
- pieces = [repr1(elem, newlevel) for elem in islice(x, maxiter)]
- if n > maxiter: pieces.append('...')
- s = ', '.join(pieces)
- if n == 1 and trail: right = trail + right
- return '%s%s%s' % (left, s, right)
-
- def repr_tuple(self, x, level):
- return self._repr_iterable(x, level, '(', ')', self.maxlist, ',')
-
- def repr_list(self, x, level):
- return self._repr_iterable(x, level, '[', ']', self.maxlist)
-
- def repr_array(self, x, level):
- header = "array('%s', [" % x.typecode
- return self._repr_iterable(x, level, header, '])', self.maxarray)
-
- def repr_set(self, x, level):
- x = sorted(x)
- return self._repr_iterable(x, level, 'set([', '])', self.maxset)
-
- def repr_frozenset(self, x, level):
- x = sorted(x)
- return self._repr_iterable(x, level, 'frozenset([', '])',
- self.maxfrozenset)
-
- def repr_deque(self, x, level):
- return self._repr_iterable(x, level, 'deque([', '])', self.maxdeque)
-
- def repr_dict(self, x, level):
- n = len(x)
- if n == 0: return '{}'
- if level <= 0: return '{...}'
- newlevel = level - 1
- repr1 = self.repr1
- pieces = []
- for key in islice(sorted(x), self.maxdict):
- keyrepr = repr1(key, newlevel)
- valrepr = repr1(x[key], newlevel)
- pieces.append('%s: %s' % (keyrepr, valrepr))
- if n > self.maxdict: pieces.append('...')
- s = ', '.join(pieces)
- return '{%s}' % (s,)
-
- def repr_str(self, x, level):
- s = __builtin__.repr(x[:self.maxstring])
- if len(s) > self.maxstring:
- i = max(0, (self.maxstring-3)//2)
- j = max(0, self.maxstring-3-i)
- s = __builtin__.repr(x[:i] + x[len(x)-j:])
- s = s[:i] + '...' + s[len(s)-j:]
- return s
-
- def repr_long(self, x, level):
- s = __builtin__.repr(x) # XXX Hope this isn't too slow...
- if len(s) > self.maxlong:
- i = max(0, (self.maxlong-3)//2)
- j = max(0, self.maxlong-3-i)
- s = s[:i] + '...' + s[len(s)-j:]
- return s
-
- def repr_instance(self, x, level):
- try:
- s = __builtin__.repr(x)
- # Bugs in x.__repr__() can cause arbitrary
- # exceptions -- then make up something
- except:
- return '<%s instance at %x>' % (x.__class__.__name__, id(x))
- if len(s) > self.maxstring:
- i = max(0, (self.maxstring-3)//2)
- j = max(0, self.maxstring-3-i)
- s = s[:i] + '...' + s[len(s)-j:]
- return s
-
-aRepr = Repr()
-repr = aRepr.repr
diff --git a/sys/lib/python/rexec.py b/sys/lib/python/rexec.py
deleted file mode 100644
index d289d6aa1..000000000
--- a/sys/lib/python/rexec.py
+++ /dev/null
@@ -1,585 +0,0 @@
-"""Restricted execution facilities.
-
-The class RExec exports methods r_exec(), r_eval(), r_execfile(), and
-r_import(), which correspond roughly to the built-in operations
-exec, eval(), execfile() and import, but executing the code in an
-environment that only exposes those built-in operations that are
-deemed safe. To this end, a modest collection of 'fake' modules is
-created which mimics the standard modules by the same names. It is a
-policy decision which built-in modules and operations are made
-available; this module provides a reasonable default, but derived
-classes can change the policies e.g. by overriding or extending class
-variables like ok_builtin_modules or methods like make_sys().
-
-XXX To do:
-- r_open should allow writing tmp dir
-- r_exec etc. with explicit globals/locals? (Use rexec("exec ... in ...")?)
-
-"""
-
-
-import sys
-import __builtin__
-import os
-import ihooks
-import imp
-
-__all__ = ["RExec"]
-
-class FileBase:
-
- ok_file_methods = ('fileno', 'flush', 'isatty', 'read', 'readline',
- 'readlines', 'seek', 'tell', 'write', 'writelines', 'xreadlines',
- '__iter__')
-
-
-class FileWrapper(FileBase):
-
- # XXX This is just like a Bastion -- should use that!
-
- def __init__(self, f):
- for m in self.ok_file_methods:
- if not hasattr(self, m) and hasattr(f, m):
- setattr(self, m, getattr(f, m))
-
- def close(self):
- self.flush()
-
-
-TEMPLATE = """
-def %s(self, *args):
- return getattr(self.mod, self.name).%s(*args)
-"""
-
-class FileDelegate(FileBase):
-
- def __init__(self, mod, name):
- self.mod = mod
- self.name = name
-
- for m in FileBase.ok_file_methods + ('close',):
- exec TEMPLATE % (m, m)
-
-
-class RHooks(ihooks.Hooks):
-
- def __init__(self, *args):
- # Hacks to support both old and new interfaces:
- # old interface was RHooks(rexec[, verbose])
- # new interface is RHooks([verbose])
- verbose = 0
- rexec = None
- if args and type(args[-1]) == type(0):
- verbose = args[-1]
- args = args[:-1]
- if args and hasattr(args[0], '__class__'):
- rexec = args[0]
- args = args[1:]
- if args:
- raise TypeError, "too many arguments"
- ihooks.Hooks.__init__(self, verbose)
- self.rexec = rexec
-
- def set_rexec(self, rexec):
- # Called by RExec instance to complete initialization
- self.rexec = rexec
-
- def get_suffixes(self):
- return self.rexec.get_suffixes()
-
- def is_builtin(self, name):
- return self.rexec.is_builtin(name)
-
- def init_builtin(self, name):
- m = __import__(name)
- return self.rexec.copy_except(m, ())
-
- def init_frozen(self, name): raise SystemError, "don't use this"
- def load_source(self, *args): raise SystemError, "don't use this"
- def load_compiled(self, *args): raise SystemError, "don't use this"
- def load_package(self, *args): raise SystemError, "don't use this"
-
- def load_dynamic(self, name, filename, file):
- return self.rexec.load_dynamic(name, filename, file)
-
- def add_module(self, name):
- return self.rexec.add_module(name)
-
- def modules_dict(self):
- return self.rexec.modules
-
- def default_path(self):
- return self.rexec.modules['sys'].path
-
-
-# XXX Backwards compatibility
-RModuleLoader = ihooks.FancyModuleLoader
-RModuleImporter = ihooks.ModuleImporter
-
-
-class RExec(ihooks._Verbose):
- """Basic restricted execution framework.
-
- Code executed in this restricted environment will only have access to
- modules and functions that are deemed safe; you can subclass RExec to
- add or remove capabilities as desired.
-
- The RExec class can prevent code from performing unsafe operations like
- reading or writing disk files, or using TCP/IP sockets. However, it does
- not protect against code using extremely large amounts of memory or
- processor time.
-
- """
-
- ok_path = tuple(sys.path) # That's a policy decision
-
- ok_builtin_modules = ('audioop', 'array', 'binascii',
- 'cmath', 'errno', 'imageop',
- 'marshal', 'math', 'md5', 'operator',
- 'parser', 'select',
- 'sha', '_sre', 'strop', 'struct', 'time',
- '_weakref')
-
- ok_posix_names = ('error', 'fstat', 'listdir', 'lstat', 'readlink',
- 'stat', 'times', 'uname', 'getpid', 'getppid',
- 'getcwd', 'getuid', 'getgid', 'geteuid', 'getegid')
-
- ok_sys_names = ('byteorder', 'copyright', 'exit', 'getdefaultencoding',
- 'getrefcount', 'hexversion', 'maxint', 'maxunicode',
- 'platform', 'ps1', 'ps2', 'version', 'version_info')
-
- nok_builtin_names = ('open', 'file', 'reload', '__import__')
-
- ok_file_types = (imp.C_EXTENSION, imp.PY_SOURCE)
-
- def __init__(self, hooks = None, verbose = 0):
- """Returns an instance of the RExec class.
-
- The hooks parameter is an instance of the RHooks class or a subclass
- of it. If it is omitted or None, the default RHooks class is
- instantiated.
-
- Whenever the RExec module searches for a module (even a built-in one)
- or reads a module's code, it doesn't actually go out to the file
- system itself. Rather, it calls methods of an RHooks instance that
- was passed to or created by its constructor. (Actually, the RExec
- object doesn't make these calls --- they are made by a module loader
- object that's part of the RExec object. This allows another level of
- flexibility, which can be useful when changing the mechanics of
- import within the restricted environment.)
-
- By providing an alternate RHooks object, we can control the file
- system accesses made to import a module, without changing the
- actual algorithm that controls the order in which those accesses are
- made. For instance, we could substitute an RHooks object that
- passes all filesystem requests to a file server elsewhere, via some
- RPC mechanism such as ILU. Grail's applet loader uses this to support
- importing applets from a URL for a directory.
-
- If the verbose parameter is true, additional debugging output may be
- sent to standard output.
-
- """
-
- raise RuntimeError, "This code is not secure in Python 2.2 and 2.3"
-
- ihooks._Verbose.__init__(self, verbose)
- # XXX There's a circular reference here:
- self.hooks = hooks or RHooks(verbose)
- self.hooks.set_rexec(self)
- self.modules = {}
- self.ok_dynamic_modules = self.ok_builtin_modules
- list = []
- for mname in self.ok_builtin_modules:
- if mname in sys.builtin_module_names:
- list.append(mname)
- self.ok_builtin_modules = tuple(list)
- self.set_trusted_path()
- self.make_builtin()
- self.make_initial_modules()
- # make_sys must be last because it adds the already created
- # modules to its builtin_module_names
- self.make_sys()
- self.loader = RModuleLoader(self.hooks, verbose)
- self.importer = RModuleImporter(self.loader, verbose)
-
- def set_trusted_path(self):
- # Set the path from which dynamic modules may be loaded.
- # Those dynamic modules must also occur in ok_builtin_modules
- self.trusted_path = filter(os.path.isabs, sys.path)
-
- def load_dynamic(self, name, filename, file):
- if name not in self.ok_dynamic_modules:
- raise ImportError, "untrusted dynamic module: %s" % name
- if name in sys.modules:
- src = sys.modules[name]
- else:
- src = imp.load_dynamic(name, filename, file)
- dst = self.copy_except(src, [])
- return dst
-
- def make_initial_modules(self):
- self.make_main()
- self.make_osname()
-
- # Helpers for RHooks
-
- def get_suffixes(self):
- return [item # (suff, mode, type)
- for item in imp.get_suffixes()
- if item[2] in self.ok_file_types]
-
- def is_builtin(self, mname):
- return mname in self.ok_builtin_modules
-
- # The make_* methods create specific built-in modules
-
- def make_builtin(self):
- m = self.copy_except(__builtin__, self.nok_builtin_names)
- m.__import__ = self.r_import
- m.reload = self.r_reload
- m.open = m.file = self.r_open
-
- def make_main(self):
- m = self.add_module('__main__')
-
- def make_osname(self):
- osname = os.name
- src = __import__(osname)
- dst = self.copy_only(src, self.ok_posix_names)
- dst.environ = e = {}
- for key, value in os.environ.items():
- e[key] = value
-
- def make_sys(self):
- m = self.copy_only(sys, self.ok_sys_names)
- m.modules = self.modules
- m.argv = ['RESTRICTED']
- m.path = map(None, self.ok_path)
- m.exc_info = self.r_exc_info
- m = self.modules['sys']
- l = self.modules.keys() + list(self.ok_builtin_modules)
- l.sort()
- m.builtin_module_names = tuple(l)
-
- # The copy_* methods copy existing modules with some changes
-
- def copy_except(self, src, exceptions):
- dst = self.copy_none(src)
- for name in dir(src):
- setattr(dst, name, getattr(src, name))
- for name in exceptions:
- try:
- delattr(dst, name)
- except AttributeError:
- pass
- return dst
-
- def copy_only(self, src, names):
- dst = self.copy_none(src)
- for name in names:
- try:
- value = getattr(src, name)
- except AttributeError:
- continue
- setattr(dst, name, value)
- return dst
-
- def copy_none(self, src):
- m = self.add_module(src.__name__)
- m.__doc__ = src.__doc__
- return m
-
- # Add a module -- return an existing module or create one
-
- def add_module(self, mname):
- m = self.modules.get(mname)
- if m is None:
- self.modules[mname] = m = self.hooks.new_module(mname)
- m.__builtins__ = self.modules['__builtin__']
- return m
-
- # The r* methods are public interfaces
-
- def r_exec(self, code):
- """Execute code within a restricted environment.
-
- The code parameter must either be a string containing one or more
- lines of Python code, or a compiled code object, which will be
- executed in the restricted environment's __main__ module.
-
- """
- m = self.add_module('__main__')
- exec code in m.__dict__
-
- def r_eval(self, code):
- """Evaluate code within a restricted environment.
-
- The code parameter must either be a string containing a Python
- expression, or a compiled code object, which will be evaluated in
- the restricted environment's __main__ module. The value of the
- expression or code object will be returned.
-
- """
- m = self.add_module('__main__')
- return eval(code, m.__dict__)
-
- def r_execfile(self, file):
- """Execute the Python code in the file in the restricted
- environment's __main__ module.
-
- """
- m = self.add_module('__main__')
- execfile(file, m.__dict__)
-
- def r_import(self, mname, globals={}, locals={}, fromlist=[]):
- """Import a module, raising an ImportError exception if the module
- is considered unsafe.
-
- This method is implicitly called by code executing in the
- restricted environment. Overriding this method in a subclass is
- used to change the policies enforced by a restricted environment.
-
- """
- return self.importer.import_module(mname, globals, locals, fromlist)
-
- def r_reload(self, m):
- """Reload the module object, re-parsing and re-initializing it.
-
- This method is implicitly called by code executing in the
- restricted environment. Overriding this method in a subclass is
- used to change the policies enforced by a restricted environment.
-
- """
- return self.importer.reload(m)
-
- def r_unload(self, m):
- """Unload the module.
-
- Removes it from the restricted environment's sys.modules dictionary.
-
- This method is implicitly called by code executing in the
- restricted environment. Overriding this method in a subclass is
- used to change the policies enforced by a restricted environment.
-
- """
- return self.importer.unload(m)
-
- # The s_* methods are similar but also swap std{in,out,err}
-
- def make_delegate_files(self):
- s = self.modules['sys']
- self.delegate_stdin = FileDelegate(s, 'stdin')
- self.delegate_stdout = FileDelegate(s, 'stdout')
- self.delegate_stderr = FileDelegate(s, 'stderr')
- self.restricted_stdin = FileWrapper(sys.stdin)
- self.restricted_stdout = FileWrapper(sys.stdout)
- self.restricted_stderr = FileWrapper(sys.stderr)
-
- def set_files(self):
- if not hasattr(self, 'save_stdin'):
- self.save_files()
- if not hasattr(self, 'delegate_stdin'):
- self.make_delegate_files()
- s = self.modules['sys']
- s.stdin = self.restricted_stdin
- s.stdout = self.restricted_stdout
- s.stderr = self.restricted_stderr
- sys.stdin = self.delegate_stdin
- sys.stdout = self.delegate_stdout
- sys.stderr = self.delegate_stderr
-
- def reset_files(self):
- self.restore_files()
- s = self.modules['sys']
- self.restricted_stdin = s.stdin
- self.restricted_stdout = s.stdout
- self.restricted_stderr = s.stderr
-
-
- def save_files(self):
- self.save_stdin = sys.stdin
- self.save_stdout = sys.stdout
- self.save_stderr = sys.stderr
-
- def restore_files(self):
- sys.stdin = self.save_stdin
- sys.stdout = self.save_stdout
- sys.stderr = self.save_stderr
-
- def s_apply(self, func, args=(), kw={}):
- self.save_files()
- try:
- self.set_files()
- r = func(*args, **kw)
- finally:
- self.restore_files()
- return r
-
- def s_exec(self, *args):
- """Execute code within a restricted environment.
-
- Similar to the r_exec() method, but the code will be granted access
- to restricted versions of the standard I/O streams sys.stdin,
- sys.stderr, and sys.stdout.
-
- The code parameter must either be a string containing one or more
- lines of Python code, or a compiled code object, which will be
- executed in the restricted environment's __main__ module.
-
- """
- return self.s_apply(self.r_exec, args)
-
- def s_eval(self, *args):
- """Evaluate code within a restricted environment.
-
- Similar to the r_eval() method, but the code will be granted access
- to restricted versions of the standard I/O streams sys.stdin,
- sys.stderr, and sys.stdout.
-
- The code parameter must either be a string containing a Python
- expression, or a compiled code object, which will be evaluated in
- the restricted environment's __main__ module. The value of the
- expression or code object will be returned.
-
- """
- return self.s_apply(self.r_eval, args)
-
- def s_execfile(self, *args):
- """Execute the Python code in the file in the restricted
- environment's __main__ module.
-
- Similar to the r_execfile() method, but the code will be granted
- access to restricted versions of the standard I/O streams sys.stdin,
- sys.stderr, and sys.stdout.
-
- """
- return self.s_apply(self.r_execfile, args)
-
- def s_import(self, *args):
- """Import a module, raising an ImportError exception if the module
- is considered unsafe.
-
- This method is implicitly called by code executing in the
- restricted environment. Overriding this method in a subclass is
- used to change the policies enforced by a restricted environment.
-
- Similar to the r_import() method, but has access to restricted
- versions of the standard I/O streams sys.stdin, sys.stderr, and
- sys.stdout.
-
- """
- return self.s_apply(self.r_import, args)
-
- def s_reload(self, *args):
- """Reload the module object, re-parsing and re-initializing it.
-
- This method is implicitly called by code executing in the
- restricted environment. Overriding this method in a subclass is
- used to change the policies enforced by a restricted environment.
-
- Similar to the r_reload() method, but has access to restricted
- versions of the standard I/O streams sys.stdin, sys.stderr, and
- sys.stdout.
-
- """
- return self.s_apply(self.r_reload, args)
-
- def s_unload(self, *args):
- """Unload the module.
-
- Removes it from the restricted environment's sys.modules dictionary.
-
- This method is implicitly called by code executing in the
- restricted environment. Overriding this method in a subclass is
- used to change the policies enforced by a restricted environment.
-
- Similar to the r_unload() method, but has access to restricted
- versions of the standard I/O streams sys.stdin, sys.stderr, and
- sys.stdout.
-
- """
- return self.s_apply(self.r_unload, args)
-
- # Restricted open(...)
-
- def r_open(self, file, mode='r', buf=-1):
- """Method called when open() is called in the restricted environment.
-
- The arguments are identical to those of the open() function, and a
- file object (or a class instance compatible with file objects)
- should be returned. RExec's default behaviour is allow opening
- any file for reading, but forbidding any attempt to write a file.
-
- This method is implicitly called by code executing in the
- restricted environment. Overriding this method in a subclass is
- used to change the policies enforced by a restricted environment.
-
- """
- mode = str(mode)
- if mode not in ('r', 'rb'):
- raise IOError, "can't open files for writing in restricted mode"
- return open(file, mode, buf)
-
- # Restricted version of sys.exc_info()
-
- def r_exc_info(self):
- ty, va, tr = sys.exc_info()
- tr = None
- return ty, va, tr
-
-
-def test():
- import getopt, traceback
- opts, args = getopt.getopt(sys.argv[1:], 'vt:')
- verbose = 0
- trusted = []
- for o, a in opts:
- if o == '-v':
- verbose = verbose+1
- if o == '-t':
- trusted.append(a)
- r = RExec(verbose=verbose)
- if trusted:
- r.ok_builtin_modules = r.ok_builtin_modules + tuple(trusted)
- if args:
- r.modules['sys'].argv = args
- r.modules['sys'].path.insert(0, os.path.dirname(args[0]))
- else:
- r.modules['sys'].path.insert(0, "")
- fp = sys.stdin
- if args and args[0] != '-':
- try:
- fp = open(args[0])
- except IOError, msg:
- print "%s: can't open file %r" % (sys.argv[0], args[0])
- return 1
- if fp.isatty():
- try:
- import readline
- except ImportError:
- pass
- import code
- class RestrictedConsole(code.InteractiveConsole):
- def runcode(self, co):
- self.locals['__builtins__'] = r.modules['__builtin__']
- r.s_apply(code.InteractiveConsole.runcode, (self, co))
- try:
- RestrictedConsole(r.modules['__main__'].__dict__).interact()
- except SystemExit, n:
- return n
- else:
- text = fp.read()
- fp.close()
- c = compile(text, fp.name, 'exec')
- try:
- r.s_exec(c)
- except SystemExit, n:
- return n
- except:
- traceback.print_exc()
- return 1
-
-
-if __name__ == '__main__':
- sys.exit(test())
diff --git a/sys/lib/python/rfc822.py b/sys/lib/python/rfc822.py
deleted file mode 100644
index 14cc7297f..000000000
--- a/sys/lib/python/rfc822.py
+++ /dev/null
@@ -1,1007 +0,0 @@
-"""RFC 2822 message manipulation.
-
-Note: This is only a very rough sketch of a full RFC-822 parser; in particular
-the tokenizing of addresses does not adhere to all the quoting rules.
-
-Note: RFC 2822 is a long awaited update to RFC 822. This module should
-conform to RFC 2822, and is thus mis-named (it's not worth renaming it). Some
-effort at RFC 2822 updates have been made, but a thorough audit has not been
-performed. Consider any RFC 2822 non-conformance to be a bug.
-
- RFC 2822: http://www.faqs.org/rfcs/rfc2822.html
- RFC 822 : http://www.faqs.org/rfcs/rfc822.html (obsolete)
-
-Directions for use:
-
-To create a Message object: first open a file, e.g.:
-
- fp = open(file, 'r')
-
-You can use any other legal way of getting an open file object, e.g. use
-sys.stdin or call os.popen(). Then pass the open file object to the Message()
-constructor:
-
- m = Message(fp)
-
-This class can work with any input object that supports a readline method. If
-the input object has seek and tell capability, the rewindbody method will
-work; also illegal lines will be pushed back onto the input stream. If the
-input object lacks seek but has an `unread' method that can push back a line
-of input, Message will use that to push back illegal lines. Thus this class
-can be used to parse messages coming from a buffered stream.
-
-The optional `seekable' argument is provided as a workaround for certain stdio
-libraries in which tell() discards buffered data before discovering that the
-lseek() system call doesn't work. For maximum portability, you should set the
-seekable argument to zero to prevent that initial \code{tell} when passing in
-an unseekable object such as a a file object created from a socket object. If
-it is 1 on entry -- which it is by default -- the tell() method of the open
-file object is called once; if this raises an exception, seekable is reset to
-0. For other nonzero values of seekable, this test is not made.
-
-To get the text of a particular header there are several methods:
-
- str = m.getheader(name)
- str = m.getrawheader(name)
-
-where name is the name of the header, e.g. 'Subject'. The difference is that
-getheader() strips the leading and trailing whitespace, while getrawheader()
-doesn't. Both functions retain embedded whitespace (including newlines)
-exactly as they are specified in the header, and leave the case of the text
-unchanged.
-
-For addresses and address lists there are functions
-
- realname, mailaddress = m.getaddr(name)
- list = m.getaddrlist(name)
-
-where the latter returns a list of (realname, mailaddr) tuples.
-
-There is also a method
-
- time = m.getdate(name)
-
-which parses a Date-like field and returns a time-compatible tuple,
-i.e. a tuple such as returned by time.localtime() or accepted by
-time.mktime().
-
-See the class definition for lower level access methods.
-
-There are also some utility functions here.
-"""
-# Cleanup and extensions by Eric S. Raymond <esr@thyrsus.com>
-
-import time
-
-__all__ = ["Message","AddressList","parsedate","parsedate_tz","mktime_tz"]
-
-_blanklines = ('\r\n', '\n') # Optimization for islast()
-
-
-class Message:
- """Represents a single RFC 2822-compliant message."""
-
- def __init__(self, fp, seekable = 1):
- """Initialize the class instance and read the headers."""
- if seekable == 1:
- # Exercise tell() to make sure it works
- # (and then assume seek() works, too)
- try:
- fp.tell()
- except (AttributeError, IOError):
- seekable = 0
- self.fp = fp
- self.seekable = seekable
- self.startofheaders = None
- self.startofbody = None
- #
- if self.seekable:
- try:
- self.startofheaders = self.fp.tell()
- except IOError:
- self.seekable = 0
- #
- self.readheaders()
- #
- if self.seekable:
- try:
- self.startofbody = self.fp.tell()
- except IOError:
- self.seekable = 0
-
- def rewindbody(self):
- """Rewind the file to the start of the body (if seekable)."""
- if not self.seekable:
- raise IOError, "unseekable file"
- self.fp.seek(self.startofbody)
-
- def readheaders(self):
- """Read header lines.
-
- Read header lines up to the entirely blank line that terminates them.
- The (normally blank) line that ends the headers is skipped, but not
- included in the returned list. If a non-header line ends the headers,
- (which is an error), an attempt is made to backspace over it; it is
- never included in the returned list.
-
- The variable self.status is set to the empty string if all went well,
- otherwise it is an error message. The variable self.headers is a
- completely uninterpreted list of lines contained in the header (so
- printing them will reproduce the header exactly as it appears in the
- file).
- """
- self.dict = {}
- self.unixfrom = ''
- self.headers = lst = []
- self.status = ''
- headerseen = ""
- firstline = 1
- startofline = unread = tell = None
- if hasattr(self.fp, 'unread'):
- unread = self.fp.unread
- elif self.seekable:
- tell = self.fp.tell
- while 1:
- if tell:
- try:
- startofline = tell()
- except IOError:
- startofline = tell = None
- self.seekable = 0
- line = self.fp.readline()
- if not line:
- self.status = 'EOF in headers'
- break
- # Skip unix From name time lines
- if firstline and line.startswith('From '):
- self.unixfrom = self.unixfrom + line
- continue
- firstline = 0
- if headerseen and line[0] in ' \t':
- # It's a continuation line.
- lst.append(line)
- x = (self.dict[headerseen] + "\n " + line.strip())
- self.dict[headerseen] = x.strip()
- continue
- elif self.iscomment(line):
- # It's a comment. Ignore it.
- continue
- elif self.islast(line):
- # Note! No pushback here! The delimiter line gets eaten.
- break
- headerseen = self.isheader(line)
- if headerseen:
- # It's a legal header line, save it.
- lst.append(line)
- self.dict[headerseen] = line[len(headerseen)+1:].strip()
- continue
- else:
- # It's not a header line; throw it back and stop here.
- if not self.dict:
- self.status = 'No headers'
- else:
- self.status = 'Non-header line where header expected'
- # Try to undo the read.
- if unread:
- unread(line)
- elif tell:
- self.fp.seek(startofline)
- else:
- self.status = self.status + '; bad seek'
- break
-
- def isheader(self, line):
- """Determine whether a given line is a legal header.
-
- This method should return the header name, suitably canonicalized.
- You may override this method in order to use Message parsing on tagged
- data in RFC 2822-like formats with special header formats.
- """
- i = line.find(':')
- if i > 0:
- return line[:i].lower()
- return None
-
- def islast(self, line):
- """Determine whether a line is a legal end of RFC 2822 headers.
-
- You may override this method if your application wants to bend the
- rules, e.g. to strip trailing whitespace, or to recognize MH template
- separators ('--------'). For convenience (e.g. for code reading from
- sockets) a line consisting of \r\n also matches.
- """
- return line in _blanklines
-
- def iscomment(self, line):
- """Determine whether a line should be skipped entirely.
-
- You may override this method in order to use Message parsing on tagged
- data in RFC 2822-like formats that support embedded comments or
- free-text data.
- """
- return False
-
- def getallmatchingheaders(self, name):
- """Find all header lines matching a given header name.
-
- Look through the list of headers and find all lines matching a given
- header name (and their continuation lines). A list of the lines is
- returned, without interpretation. If the header does not occur, an
- empty list is returned. If the header occurs multiple times, all
- occurrences are returned. Case is not important in the header name.
- """
- name = name.lower() + ':'
- n = len(name)
- lst = []
- hit = 0
- for line in self.headers:
- if line[:n].lower() == name:
- hit = 1
- elif not line[:1].isspace():
- hit = 0
- if hit:
- lst.append(line)
- return lst
-
- def getfirstmatchingheader(self, name):
- """Get the first header line matching name.
-
- This is similar to getallmatchingheaders, but it returns only the
- first matching header (and its continuation lines).
- """
- name = name.lower() + ':'
- n = len(name)
- lst = []
- hit = 0
- for line in self.headers:
- if hit:
- if not line[:1].isspace():
- break
- elif line[:n].lower() == name:
- hit = 1
- if hit:
- lst.append(line)
- return lst
-
- def getrawheader(self, name):
- """A higher-level interface to getfirstmatchingheader().
-
- Return a string containing the literal text of the header but with the
- keyword stripped. All leading, trailing and embedded whitespace is
- kept in the string, however. Return None if the header does not
- occur.
- """
-
- lst = self.getfirstmatchingheader(name)
- if not lst:
- return None
- lst[0] = lst[0][len(name) + 1:]
- return ''.join(lst)
-
- def getheader(self, name, default=None):
- """Get the header value for a name.
-
- This is the normal interface: it returns a stripped version of the
- header value for a given header name, or None if it doesn't exist.
- This uses the dictionary version which finds the *last* such header.
- """
- return self.dict.get(name.lower(), default)
- get = getheader
-
- def getheaders(self, name):
- """Get all values for a header.
-
- This returns a list of values for headers given more than once; each
- value in the result list is stripped in the same way as the result of
- getheader(). If the header is not given, return an empty list.
- """
- result = []
- current = ''
- have_header = 0
- for s in self.getallmatchingheaders(name):
- if s[0].isspace():
- if current:
- current = "%s\n %s" % (current, s.strip())
- else:
- current = s.strip()
- else:
- if have_header:
- result.append(current)
- current = s[s.find(":") + 1:].strip()
- have_header = 1
- if have_header:
- result.append(current)
- return result
-
- def getaddr(self, name):
- """Get a single address from a header, as a tuple.
-
- An example return value:
- ('Guido van Rossum', 'guido@cwi.nl')
- """
- # New, by Ben Escoto
- alist = self.getaddrlist(name)
- if alist:
- return alist[0]
- else:
- return (None, None)
-
- def getaddrlist(self, name):
- """Get a list of addresses from a header.
-
- Retrieves a list of addresses from a header, where each address is a
- tuple as returned by getaddr(). Scans all named headers, so it works
- properly with multiple To: or Cc: headers for example.
- """
- raw = []
- for h in self.getallmatchingheaders(name):
- if h[0] in ' \t':
- raw.append(h)
- else:
- if raw:
- raw.append(', ')
- i = h.find(':')
- if i > 0:
- addr = h[i+1:]
- raw.append(addr)
- alladdrs = ''.join(raw)
- a = AddressList(alladdrs)
- return a.addresslist
-
- def getdate(self, name):
- """Retrieve a date field from a header.
-
- Retrieves a date field from the named header, returning a tuple
- compatible with time.mktime().
- """
- try:
- data = self[name]
- except KeyError:
- return None
- return parsedate(data)
-
- def getdate_tz(self, name):
- """Retrieve a date field from a header as a 10-tuple.
-
- The first 9 elements make up a tuple compatible with time.mktime(),
- and the 10th is the offset of the poster's time zone from GMT/UTC.
- """
- try:
- data = self[name]
- except KeyError:
- return None
- return parsedate_tz(data)
-
-
- # Access as a dictionary (only finds *last* header of each type):
-
- def __len__(self):
- """Get the number of headers in a message."""
- return len(self.dict)
-
- def __getitem__(self, name):
- """Get a specific header, as from a dictionary."""
- return self.dict[name.lower()]
-
- def __setitem__(self, name, value):
- """Set the value of a header.
-
- Note: This is not a perfect inversion of __getitem__, because any
- changed headers get stuck at the end of the raw-headers list rather
- than where the altered header was.
- """
- del self[name] # Won't fail if it doesn't exist
- self.dict[name.lower()] = value
- text = name + ": " + value
- for line in text.split("\n"):
- self.headers.append(line + "\n")
-
- def __delitem__(self, name):
- """Delete all occurrences of a specific header, if it is present."""
- name = name.lower()
- if not name in self.dict:
- return
- del self.dict[name]
- name = name + ':'
- n = len(name)
- lst = []
- hit = 0
- for i in range(len(self.headers)):
- line = self.headers[i]
- if line[:n].lower() == name:
- hit = 1
- elif not line[:1].isspace():
- hit = 0
- if hit:
- lst.append(i)
- for i in reversed(lst):
- del self.headers[i]
-
- def setdefault(self, name, default=""):
- lowername = name.lower()
- if lowername in self.dict:
- return self.dict[lowername]
- else:
- text = name + ": " + default
- for line in text.split("\n"):
- self.headers.append(line + "\n")
- self.dict[lowername] = default
- return default
-
- def has_key(self, name):
- """Determine whether a message contains the named header."""
- return name.lower() in self.dict
-
- def __contains__(self, name):
- """Determine whether a message contains the named header."""
- return name.lower() in self.dict
-
- def __iter__(self):
- return iter(self.dict)
-
- def keys(self):
- """Get all of a message's header field names."""
- return self.dict.keys()
-
- def values(self):
- """Get all of a message's header field values."""
- return self.dict.values()
-
- def items(self):
- """Get all of a message's headers.
-
- Returns a list of name, value tuples.
- """
- return self.dict.items()
-
- def __str__(self):
- return ''.join(self.headers)
-
-
-# Utility functions
-# -----------------
-
-# XXX Should fix unquote() and quote() to be really conformant.
-# XXX The inverses of the parse functions may also be useful.
-
-
-def unquote(s):
- """Remove quotes from a string."""
- if len(s) > 1:
- if s.startswith('"') and s.endswith('"'):
- return s[1:-1].replace('\\\\', '\\').replace('\\"', '"')
- if s.startswith('<') and s.endswith('>'):
- return s[1:-1]
- return s
-
-
-def quote(s):
- """Add quotes around a string."""
- return s.replace('\\', '\\\\').replace('"', '\\"')
-
-
-def parseaddr(address):
- """Parse an address into a (realname, mailaddr) tuple."""
- a = AddressList(address)
- lst = a.addresslist
- if not lst:
- return (None, None)
- return lst[0]
-
-
-class AddrlistClass:
- """Address parser class by Ben Escoto.
-
- To understand what this class does, it helps to have a copy of
- RFC 2822 in front of you.
-
- http://www.faqs.org/rfcs/rfc2822.html
-
- Note: this class interface is deprecated and may be removed in the future.
- Use rfc822.AddressList instead.
- """
-
- def __init__(self, field):
- """Initialize a new instance.
-
- `field' is an unparsed address header field, containing one or more
- addresses.
- """
- self.specials = '()<>@,:;.\"[]'
- self.pos = 0
- self.LWS = ' \t'
- self.CR = '\r\n'
- self.atomends = self.specials + self.LWS + self.CR
- # Note that RFC 2822 now specifies `.' as obs-phrase, meaning that it
- # is obsolete syntax. RFC 2822 requires that we recognize obsolete
- # syntax, so allow dots in phrases.
- self.phraseends = self.atomends.replace('.', '')
- self.field = field
- self.commentlist = []
-
- def gotonext(self):
- """Parse up to the start of the next address."""
- while self.pos < len(self.field):
- if self.field[self.pos] in self.LWS + '\n\r':
- self.pos = self.pos + 1
- elif self.field[self.pos] == '(':
- self.commentlist.append(self.getcomment())
- else: break
-
- def getaddrlist(self):
- """Parse all addresses.
-
- Returns a list containing all of the addresses.
- """
- result = []
- ad = self.getaddress()
- while ad:
- result += ad
- ad = self.getaddress()
- return result
-
- def getaddress(self):
- """Parse the next address."""
- self.commentlist = []
- self.gotonext()
-
- oldpos = self.pos
- oldcl = self.commentlist
- plist = self.getphraselist()
-
- self.gotonext()
- returnlist = []
-
- if self.pos >= len(self.field):
- # Bad email address technically, no domain.
- if plist:
- returnlist = [(' '.join(self.commentlist), plist[0])]
-
- elif self.field[self.pos] in '.@':
- # email address is just an addrspec
- # this isn't very efficient since we start over
- self.pos = oldpos
- self.commentlist = oldcl
- addrspec = self.getaddrspec()
- returnlist = [(' '.join(self.commentlist), addrspec)]
-
- elif self.field[self.pos] == ':':
- # address is a group
- returnlist = []
-
- fieldlen = len(self.field)
- self.pos += 1
- while self.pos < len(self.field):
- self.gotonext()
- if self.pos < fieldlen and self.field[self.pos] == ';':
- self.pos += 1
- break
- returnlist = returnlist + self.getaddress()
-
- elif self.field[self.pos] == '<':
- # Address is a phrase then a route addr
- routeaddr = self.getrouteaddr()
-
- if self.commentlist:
- returnlist = [(' '.join(plist) + ' (' + \
- ' '.join(self.commentlist) + ')', routeaddr)]
- else: returnlist = [(' '.join(plist), routeaddr)]
-
- else:
- if plist:
- returnlist = [(' '.join(self.commentlist), plist[0])]
- elif self.field[self.pos] in self.specials:
- self.pos += 1
-
- self.gotonext()
- if self.pos < len(self.field) and self.field[self.pos] == ',':
- self.pos += 1
- return returnlist
-
- def getrouteaddr(self):
- """Parse a route address (Return-path value).
-
- This method just skips all the route stuff and returns the addrspec.
- """
- if self.field[self.pos] != '<':
- return
-
- expectroute = 0
- self.pos += 1
- self.gotonext()
- adlist = ""
- while self.pos < len(self.field):
- if expectroute:
- self.getdomain()
- expectroute = 0
- elif self.field[self.pos] == '>':
- self.pos += 1
- break
- elif self.field[self.pos] == '@':
- self.pos += 1
- expectroute = 1
- elif self.field[self.pos] == ':':
- self.pos += 1
- else:
- adlist = self.getaddrspec()
- self.pos += 1
- break
- self.gotonext()
-
- return adlist
-
- def getaddrspec(self):
- """Parse an RFC 2822 addr-spec."""
- aslist = []
-
- self.gotonext()
- while self.pos < len(self.field):
- if self.field[self.pos] == '.':
- aslist.append('.')
- self.pos += 1
- elif self.field[self.pos] == '"':
- aslist.append('"%s"' % self.getquote())
- elif self.field[self.pos] in self.atomends:
- break
- else: aslist.append(self.getatom())
- self.gotonext()
-
- if self.pos >= len(self.field) or self.field[self.pos] != '@':
- return ''.join(aslist)
-
- aslist.append('@')
- self.pos += 1
- self.gotonext()
- return ''.join(aslist) + self.getdomain()
-
- def getdomain(self):
- """Get the complete domain name from an address."""
- sdlist = []
- while self.pos < len(self.field):
- if self.field[self.pos] in self.LWS:
- self.pos += 1
- elif self.field[self.pos] == '(':
- self.commentlist.append(self.getcomment())
- elif self.field[self.pos] == '[':
- sdlist.append(self.getdomainliteral())
- elif self.field[self.pos] == '.':
- self.pos += 1
- sdlist.append('.')
- elif self.field[self.pos] in self.atomends:
- break
- else: sdlist.append(self.getatom())
- return ''.join(sdlist)
-
- def getdelimited(self, beginchar, endchars, allowcomments = 1):
- """Parse a header fragment delimited by special characters.
-
- `beginchar' is the start character for the fragment. If self is not
- looking at an instance of `beginchar' then getdelimited returns the
- empty string.
-
- `endchars' is a sequence of allowable end-delimiting characters.
- Parsing stops when one of these is encountered.
-
- If `allowcomments' is non-zero, embedded RFC 2822 comments are allowed
- within the parsed fragment.
- """
- if self.field[self.pos] != beginchar:
- return ''
-
- slist = ['']
- quote = 0
- self.pos += 1
- while self.pos < len(self.field):
- if quote == 1:
- slist.append(self.field[self.pos])
- quote = 0
- elif self.field[self.pos] in endchars:
- self.pos += 1
- break
- elif allowcomments and self.field[self.pos] == '(':
- slist.append(self.getcomment())
- continue # have already advanced pos from getcomment
- elif self.field[self.pos] == '\\':
- quote = 1
- else:
- slist.append(self.field[self.pos])
- self.pos += 1
-
- return ''.join(slist)
-
- def getquote(self):
- """Get a quote-delimited fragment from self's field."""
- return self.getdelimited('"', '"\r', 0)
-
- def getcomment(self):
- """Get a parenthesis-delimited fragment from self's field."""
- return self.getdelimited('(', ')\r', 1)
-
- def getdomainliteral(self):
- """Parse an RFC 2822 domain-literal."""
- return '[%s]' % self.getdelimited('[', ']\r', 0)
-
- def getatom(self, atomends=None):
- """Parse an RFC 2822 atom.
-
- Optional atomends specifies a different set of end token delimiters
- (the default is to use self.atomends). This is used e.g. in
- getphraselist() since phrase endings must not include the `.' (which
- is legal in phrases)."""
- atomlist = ['']
- if atomends is None:
- atomends = self.atomends
-
- while self.pos < len(self.field):
- if self.field[self.pos] in atomends:
- break
- else: atomlist.append(self.field[self.pos])
- self.pos += 1
-
- return ''.join(atomlist)
-
- def getphraselist(self):
- """Parse a sequence of RFC 2822 phrases.
-
- A phrase is a sequence of words, which are in turn either RFC 2822
- atoms or quoted-strings. Phrases are canonicalized by squeezing all
- runs of continuous whitespace into one space.
- """
- plist = []
-
- while self.pos < len(self.field):
- if self.field[self.pos] in self.LWS:
- self.pos += 1
- elif self.field[self.pos] == '"':
- plist.append(self.getquote())
- elif self.field[self.pos] == '(':
- self.commentlist.append(self.getcomment())
- elif self.field[self.pos] in self.phraseends:
- break
- else:
- plist.append(self.getatom(self.phraseends))
-
- return plist
-
-class AddressList(AddrlistClass):
- """An AddressList encapsulates a list of parsed RFC 2822 addresses."""
- def __init__(self, field):
- AddrlistClass.__init__(self, field)
- if field:
- self.addresslist = self.getaddrlist()
- else:
- self.addresslist = []
-
- def __len__(self):
- return len(self.addresslist)
-
- def __str__(self):
- return ", ".join(map(dump_address_pair, self.addresslist))
-
- def __add__(self, other):
- # Set union
- newaddr = AddressList(None)
- newaddr.addresslist = self.addresslist[:]
- for x in other.addresslist:
- if not x in self.addresslist:
- newaddr.addresslist.append(x)
- return newaddr
-
- def __iadd__(self, other):
- # Set union, in-place
- for x in other.addresslist:
- if not x in self.addresslist:
- self.addresslist.append(x)
- return self
-
- def __sub__(self, other):
- # Set difference
- newaddr = AddressList(None)
- for x in self.addresslist:
- if not x in other.addresslist:
- newaddr.addresslist.append(x)
- return newaddr
-
- def __isub__(self, other):
- # Set difference, in-place
- for x in other.addresslist:
- if x in self.addresslist:
- self.addresslist.remove(x)
- return self
-
- def __getitem__(self, index):
- # Make indexing, slices, and 'in' work
- return self.addresslist[index]
-
-def dump_address_pair(pair):
- """Dump a (name, address) pair in a canonicalized form."""
- if pair[0]:
- return '"' + pair[0] + '" <' + pair[1] + '>'
- else:
- return pair[1]
-
-# Parse a date field
-
-_monthnames = ['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul',
- 'aug', 'sep', 'oct', 'nov', 'dec',
- 'january', 'february', 'march', 'april', 'may', 'june', 'july',
- 'august', 'september', 'october', 'november', 'december']
-_daynames = ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun']
-
-# The timezone table does not include the military time zones defined
-# in RFC822, other than Z. According to RFC1123, the description in
-# RFC822 gets the signs wrong, so we can't rely on any such time
-# zones. RFC1123 recommends that numeric timezone indicators be used
-# instead of timezone names.
-
-_timezones = {'UT':0, 'UTC':0, 'GMT':0, 'Z':0,
- 'AST': -400, 'ADT': -300, # Atlantic (used in Canada)
- 'EST': -500, 'EDT': -400, # Eastern
- 'CST': -600, 'CDT': -500, # Central
- 'MST': -700, 'MDT': -600, # Mountain
- 'PST': -800, 'PDT': -700 # Pacific
- }
-
-
-def parsedate_tz(data):
- """Convert a date string to a time tuple.
-
- Accounts for military timezones.
- """
- if not data:
- return None
- data = data.split()
- if data[0][-1] in (',', '.') or data[0].lower() in _daynames:
- # There's a dayname here. Skip it
- del data[0]
- else:
- # no space after the "weekday,"?
- i = data[0].rfind(',')
- if i >= 0:
- data[0] = data[0][i+1:]
- if len(data) == 3: # RFC 850 date, deprecated
- stuff = data[0].split('-')
- if len(stuff) == 3:
- data = stuff + data[1:]
- if len(data) == 4:
- s = data[3]
- i = s.find('+')
- if i > 0:
- data[3:] = [s[:i], s[i+1:]]
- else:
- data.append('') # Dummy tz
- if len(data) < 5:
- return None
- data = data[:5]
- [dd, mm, yy, tm, tz] = data
- mm = mm.lower()
- if not mm in _monthnames:
- dd, mm = mm, dd.lower()
- if not mm in _monthnames:
- return None
- mm = _monthnames.index(mm)+1
- if mm > 12: mm = mm - 12
- if dd[-1] == ',':
- dd = dd[:-1]
- i = yy.find(':')
- if i > 0:
- yy, tm = tm, yy
- if yy[-1] == ',':
- yy = yy[:-1]
- if not yy[0].isdigit():
- yy, tz = tz, yy
- if tm[-1] == ',':
- tm = tm[:-1]
- tm = tm.split(':')
- if len(tm) == 2:
- [thh, tmm] = tm
- tss = '0'
- elif len(tm) == 3:
- [thh, tmm, tss] = tm
- else:
- return None
- try:
- yy = int(yy)
- dd = int(dd)
- thh = int(thh)
- tmm = int(tmm)
- tss = int(tss)
- except ValueError:
- return None
- tzoffset = None
- tz = tz.upper()
- if tz in _timezones:
- tzoffset = _timezones[tz]
- else:
- try:
- tzoffset = int(tz)
- except ValueError:
- pass
- # Convert a timezone offset into seconds ; -0500 -> -18000
- if tzoffset:
- if tzoffset < 0:
- tzsign = -1
- tzoffset = -tzoffset
- else:
- tzsign = 1
- tzoffset = tzsign * ( (tzoffset//100)*3600 + (tzoffset % 100)*60)
- return (yy, mm, dd, thh, tmm, tss, 0, 1, 0, tzoffset)
-
-
-def parsedate(data):
- """Convert a time string to a time tuple."""
- t = parsedate_tz(data)
- if t is None:
- return t
- return t[:9]
-
-
-def mktime_tz(data):
- """Turn a 10-tuple as returned by parsedate_tz() into a UTC timestamp."""
- if data[9] is None:
- # No zone info, so localtime is better assumption than GMT
- return time.mktime(data[:8] + (-1,))
- else:
- t = time.mktime(data[:8] + (0,))
- return t - data[9] - time.timezone
-
-def formatdate(timeval=None):
- """Returns time format preferred for Internet standards.
-
- Sun, 06 Nov 1994 08:49:37 GMT ; RFC 822, updated by RFC 1123
-
- According to RFC 1123, day and month names must always be in
- English. If not for that, this code could use strftime(). It
- can't because strftime() honors the locale and could generated
- non-English names.
- """
- if timeval is None:
- timeval = time.time()
- timeval = time.gmtime(timeval)
- return "%s, %02d %s %04d %02d:%02d:%02d GMT" % (
- ("Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun")[timeval[6]],
- timeval[2],
- ("Jan", "Feb", "Mar", "Apr", "May", "Jun",
- "Jul", "Aug", "Sep", "Oct", "Nov", "Dec")[timeval[1]-1],
- timeval[0], timeval[3], timeval[4], timeval[5])
-
-
-# When used as script, run a small test program.
-# The first command line argument must be a filename containing one
-# message in RFC-822 format.
-
-if __name__ == '__main__':
- import sys, os
- file = os.path.join(os.environ['HOME'], 'Mail/inbox/1')
- if sys.argv[1:]: file = sys.argv[1]
- f = open(file, 'r')
- m = Message(f)
- print 'From:', m.getaddr('from')
- print 'To:', m.getaddrlist('to')
- print 'Subject:', m.getheader('subject')
- print 'Date:', m.getheader('date')
- date = m.getdate_tz('date')
- tz = date[-1]
- date = time.localtime(mktime_tz(date))
- if date:
- print 'ParsedDate:', time.asctime(date),
- hhmmss = tz
- hhmm, ss = divmod(hhmmss, 60)
- hh, mm = divmod(hhmm, 60)
- print "%+03d%02d" % (hh, mm),
- if ss: print ".%02d" % ss,
- print
- else:
- print 'ParsedDate:', None
- m.rewindbody()
- n = 0
- while f.readline():
- n += 1
- print 'Lines:', n
- print '-'*70
- print 'len =', len(m)
- if 'Date' in m: print 'Date =', m['Date']
- if 'X-Nonsense' in m: pass
- print 'keys =', m.keys()
- print 'values =', m.values()
- print 'items =', m.items()
diff --git a/sys/lib/python/rlcompleter.py b/sys/lib/python/rlcompleter.py
deleted file mode 100644
index dab0cb9ea..000000000
--- a/sys/lib/python/rlcompleter.py
+++ /dev/null
@@ -1,154 +0,0 @@
-"""Word completion for GNU readline 2.0.
-
-This requires the latest extension to the readline module. The completer
-completes keywords, built-ins and globals in a selectable namespace (which
-defaults to __main__); when completing NAME.NAME..., it evaluates (!) the
-expression up to the last dot and completes its attributes.
-
-It's very cool to do "import sys" type "sys.", hit the
-completion key (twice), and see the list of names defined by the
-sys module!
-
-Tip: to use the tab key as the completion key, call
-
- readline.parse_and_bind("tab: complete")
-
-Notes:
-
-- Exceptions raised by the completer function are *ignored* (and
-generally cause the completion to fail). This is a feature -- since
-readline sets the tty device in raw (or cbreak) mode, printing a
-traceback wouldn't work well without some complicated hoopla to save,
-reset and restore the tty state.
-
-- The evaluation of the NAME.NAME... form may cause arbitrary
-application defined code to be executed if an object with a
-__getattr__ hook is found. Since it is the responsibility of the
-application (or the user) to enable this feature, I consider this an
-acceptable risk. More complicated expressions (e.g. function calls or
-indexing operations) are *not* evaluated.
-
-- GNU readline is also used by the built-in functions input() and
-raw_input(), and thus these also benefit/suffer from the completer
-features. Clearly an interactive application can benefit by
-specifying its own completer function and using raw_input() for all
-its input.
-
-- When the original stdin is not a tty device, GNU readline is never
-used, and this module (and the readline module) are silently inactive.
-
-"""
-
-import __builtin__
-import __main__
-
-__all__ = ["Completer"]
-
-class Completer:
- def __init__(self, namespace = None):
- """Create a new completer for the command line.
-
- Completer([namespace]) -> completer instance.
-
- If unspecified, the default namespace where completions are performed
- is __main__ (technically, __main__.__dict__). Namespaces should be
- given as dictionaries.
-
- Completer instances should be used as the completion mechanism of
- readline via the set_completer() call:
-
- readline.set_completer(Completer(my_namespace).complete)
- """
-
- if namespace and not isinstance(namespace, dict):
- raise TypeError,'namespace must be a dictionary'
-
- # Don't bind to namespace quite yet, but flag whether the user wants a
- # specific namespace or to use __main__.__dict__. This will allow us
- # to bind to __main__.__dict__ at completion time, not now.
- if namespace is None:
- self.use_main_ns = 1
- else:
- self.use_main_ns = 0
- self.namespace = namespace
-
- def complete(self, text, state):
- """Return the next possible completion for 'text'.
-
- This is called successively with state == 0, 1, 2, ... until it
- returns None. The completion should begin with 'text'.
-
- """
- if self.use_main_ns:
- self.namespace = __main__.__dict__
-
- if state == 0:
- if "." in text:
- self.matches = self.attr_matches(text)
- else:
- self.matches = self.global_matches(text)
- try:
- return self.matches[state]
- except IndexError:
- return None
-
- def global_matches(self, text):
- """Compute matches when text is a simple name.
-
- Return a list of all keywords, built-in functions and names currently
- defined in self.namespace that match.
-
- """
- import keyword
- matches = []
- n = len(text)
- for list in [keyword.kwlist,
- __builtin__.__dict__,
- self.namespace]:
- for word in list:
- if word[:n] == text and word != "__builtins__":
- matches.append(word)
- return matches
-
- def attr_matches(self, text):
- """Compute matches when text contains a dot.
-
- Assuming the text is of the form NAME.NAME....[NAME], and is
- evaluatable in self.namespace, it will be evaluated and its attributes
- (as revealed by dir()) are used as possible completions. (For class
- instances, class members are also considered.)
-
- WARNING: this can still invoke arbitrary C code, if an object
- with a __getattr__ hook is evaluated.
-
- """
- import re
- m = re.match(r"(\w+(\.\w+)*)\.(\w*)", text)
- if not m:
- return
- expr, attr = m.group(1, 3)
- object = eval(expr, self.namespace)
- words = dir(object)
- if hasattr(object,'__class__'):
- words.append('__class__')
- words = words + get_class_members(object.__class__)
- matches = []
- n = len(attr)
- for word in words:
- if word[:n] == attr and word != "__builtins__":
- matches.append("%s.%s" % (expr, word))
- return matches
-
-def get_class_members(klass):
- ret = dir(klass)
- if hasattr(klass,'__bases__'):
- for base in klass.__bases__:
- ret = ret + get_class_members(base)
- return ret
-
-try:
- import readline
-except ImportError:
- pass
-else:
- readline.set_completer(Completer().complete)
diff --git a/sys/lib/python/robotparser.py b/sys/lib/python/robotparser.py
deleted file mode 100644
index 48ea06668..000000000
--- a/sys/lib/python/robotparser.py
+++ /dev/null
@@ -1,292 +0,0 @@
-""" robotparser.py
-
- Copyright (C) 2000 Bastian Kleineidam
-
- You can choose between two licenses when using this package:
- 1) GNU GPLv2
- 2) PSF license for Python 2.2
-
- The robots.txt Exclusion Protocol is implemented as specified in
- http://info.webcrawler.com/mak/projects/robots/norobots-rfc.html
-"""
-import urlparse,urllib
-
-__all__ = ["RobotFileParser"]
-
-debug = 0
-
-def _debug(msg):
- if debug: print msg
-
-
-class RobotFileParser:
- """ This class provides a set of methods to read, parse and answer
- questions about a single robots.txt file.
-
- """
-
- def __init__(self, url=''):
- self.entries = []
- self.default_entry = None
- self.disallow_all = False
- self.allow_all = False
- self.set_url(url)
- self.last_checked = 0
-
- def mtime(self):
- """Returns the time the robots.txt file was last fetched.
-
- This is useful for long-running web spiders that need to
- check for new robots.txt files periodically.
-
- """
- return self.last_checked
-
- def modified(self):
- """Sets the time the robots.txt file was last fetched to the
- current time.
-
- """
- import time
- self.last_checked = time.time()
-
- def set_url(self, url):
- """Sets the URL referring to a robots.txt file."""
- self.url = url
- self.host, self.path = urlparse.urlparse(url)[1:3]
-
- def read(self):
- """Reads the robots.txt URL and feeds it to the parser."""
- opener = URLopener()
- f = opener.open(self.url)
- lines = []
- line = f.readline()
- while line:
- lines.append(line.strip())
- line = f.readline()
- self.errcode = opener.errcode
- if self.errcode == 401 or self.errcode == 403:
- self.disallow_all = True
- _debug("disallow all")
- elif self.errcode >= 400:
- self.allow_all = True
- _debug("allow all")
- elif self.errcode == 200 and lines:
- _debug("parse lines")
- self.parse(lines)
-
- def _add_entry(self, entry):
- if "*" in entry.useragents:
- # the default entry is considered last
- self.default_entry = entry
- else:
- self.entries.append(entry)
-
- def parse(self, lines):
- """parse the input lines from a robots.txt file.
- We allow that a user-agent: line is not preceded by
- one or more blank lines."""
- state = 0
- linenumber = 0
- entry = Entry()
-
- for line in lines:
- linenumber = linenumber + 1
- if not line:
- if state==1:
- _debug("line %d: warning: you should insert"
- " allow: or disallow: directives below any"
- " user-agent: line" % linenumber)
- entry = Entry()
- state = 0
- elif state==2:
- self._add_entry(entry)
- entry = Entry()
- state = 0
- # remove optional comment and strip line
- i = line.find('#')
- if i>=0:
- line = line[:i]
- line = line.strip()
- if not line:
- continue
- line = line.split(':', 1)
- if len(line) == 2:
- line[0] = line[0].strip().lower()
- line[1] = urllib.unquote(line[1].strip())
- if line[0] == "user-agent":
- if state==2:
- _debug("line %d: warning: you should insert a blank"
- " line before any user-agent"
- " directive" % linenumber)
- self._add_entry(entry)
- entry = Entry()
- entry.useragents.append(line[1])
- state = 1
- elif line[0] == "disallow":
- if state==0:
- _debug("line %d: error: you must insert a user-agent:"
- " directive before this line" % linenumber)
- else:
- entry.rulelines.append(RuleLine(line[1], False))
- state = 2
- elif line[0] == "allow":
- if state==0:
- _debug("line %d: error: you must insert a user-agent:"
- " directive before this line" % linenumber)
- else:
- entry.rulelines.append(RuleLine(line[1], True))
- else:
- _debug("line %d: warning: unknown key %s" % (linenumber,
- line[0]))
- else:
- _debug("line %d: error: malformed line %s"%(linenumber, line))
- if state==2:
- self.entries.append(entry)
- _debug("Parsed rules:\n%s" % str(self))
-
-
- def can_fetch(self, useragent, url):
- """using the parsed robots.txt decide if useragent can fetch url"""
- _debug("Checking robots.txt allowance for:\n user agent: %s\n url: %s" %
- (useragent, url))
- if self.disallow_all:
- return False
- if self.allow_all:
- return True
- # search for given user agent matches
- # the first match counts
- url = urllib.quote(urlparse.urlparse(urllib.unquote(url))[2]) or "/"
- for entry in self.entries:
- if entry.applies_to(useragent):
- return entry.allowance(url)
- # try the default entry last
- if self.default_entry:
- return self.default_entry.allowance(url)
- # agent not found ==> access granted
- return True
-
-
- def __str__(self):
- ret = ""
- for entry in self.entries:
- ret = ret + str(entry) + "\n"
- return ret
-
-
-class RuleLine:
- """A rule line is a single "Allow:" (allowance==True) or "Disallow:"
- (allowance==False) followed by a path."""
- def __init__(self, path, allowance):
- if path == '' and not allowance:
- # an empty value means allow all
- allowance = True
- self.path = urllib.quote(path)
- self.allowance = allowance
-
- def applies_to(self, filename):
- return self.path=="*" or filename.startswith(self.path)
-
- def __str__(self):
- return (self.allowance and "Allow" or "Disallow")+": "+self.path
-
-
-class Entry:
- """An entry has one or more user-agents and zero or more rulelines"""
- def __init__(self):
- self.useragents = []
- self.rulelines = []
-
- def __str__(self):
- ret = ""
- for agent in self.useragents:
- ret = ret + "User-agent: "+agent+"\n"
- for line in self.rulelines:
- ret = ret + str(line) + "\n"
- return ret
-
- def applies_to(self, useragent):
- """check if this entry applies to the specified agent"""
- # split the name token and make it lower case
- useragent = useragent.split("/")[0].lower()
- for agent in self.useragents:
- if agent=='*':
- # we have the catch-all agent
- return True
- agent = agent.lower()
- if agent in useragent:
- return True
- return False
-
- def allowance(self, filename):
- """Preconditions:
- - our agent applies to this entry
- - filename is URL decoded"""
- for line in self.rulelines:
- _debug((filename, str(line), line.allowance))
- if line.applies_to(filename):
- return line.allowance
- return True
-
-class URLopener(urllib.FancyURLopener):
- def __init__(self, *args):
- urllib.FancyURLopener.__init__(self, *args)
- self.errcode = 200
-
- def http_error_default(self, url, fp, errcode, errmsg, headers):
- self.errcode = errcode
- return urllib.FancyURLopener.http_error_default(self, url, fp, errcode,
- errmsg, headers)
-
-def _check(a,b):
- if not b:
- ac = "access denied"
- else:
- ac = "access allowed"
- if a!=b:
- print "failed"
- else:
- print "ok (%s)" % ac
- print
-
-def _test():
- global debug
- rp = RobotFileParser()
- debug = 1
-
- # robots.txt that exists, gotten to by redirection
- rp.set_url('http://www.musi-cal.com/robots.txt')
- rp.read()
-
- # test for re.escape
- _check(rp.can_fetch('*', 'http://www.musi-cal.com/'), 1)
- # this should match the first rule, which is a disallow
- _check(rp.can_fetch('', 'http://www.musi-cal.com/'), 0)
- # various cherry pickers
- _check(rp.can_fetch('CherryPickerSE',
- 'http://www.musi-cal.com/cgi-bin/event-search'
- '?city=San+Francisco'), 0)
- _check(rp.can_fetch('CherryPickerSE/1.0',
- 'http://www.musi-cal.com/cgi-bin/event-search'
- '?city=San+Francisco'), 0)
- _check(rp.can_fetch('CherryPickerSE/1.5',
- 'http://www.musi-cal.com/cgi-bin/event-search'
- '?city=San+Francisco'), 0)
- # case sensitivity
- _check(rp.can_fetch('ExtractorPro', 'http://www.musi-cal.com/blubba'), 0)
- _check(rp.can_fetch('extractorpro', 'http://www.musi-cal.com/blubba'), 0)
- # substring test
- _check(rp.can_fetch('toolpak/1.1', 'http://www.musi-cal.com/blubba'), 0)
- # tests for catch-all * agent
- _check(rp.can_fetch('spam', 'http://www.musi-cal.com/search'), 0)
- _check(rp.can_fetch('spam', 'http://www.musi-cal.com/Musician/me'), 1)
- _check(rp.can_fetch('spam', 'http://www.musi-cal.com/'), 1)
- _check(rp.can_fetch('spam', 'http://www.musi-cal.com/'), 1)
-
- # robots.txt that does not exist
- rp.set_url('http://www.lycos.com/robots.txt')
- rp.read()
- _check(rp.can_fetch('Mozilla', 'http://www.lycos.com/search'), 1)
-
-if __name__ == '__main__':
- _test()
diff --git a/sys/lib/python/runpy.py b/sys/lib/python/runpy.py
deleted file mode 100755
index 8290dfea7..000000000
--- a/sys/lib/python/runpy.py
+++ /dev/null
@@ -1,104 +0,0 @@
-"""runpy.py - locating and running Python code using the module namespace
-
-Provides support for locating and running Python scripts using the Python
-module namespace instead of the native filesystem.
-
-This allows Python code to play nicely with non-filesystem based PEP 302
-importers when locating support scripts as well as when importing modules.
-"""
-# Written by Nick Coghlan <ncoghlan at gmail.com>
-# to implement PEP 338 (Executing Modules as Scripts)
-
-import sys
-import imp
-try:
- from imp import get_loader
-except ImportError:
- from pkgutil import get_loader
-
-__all__ = [
- "run_module",
-]
-
-
-def _run_code(code, run_globals, init_globals,
- mod_name, mod_fname, mod_loader):
- """Helper for _run_module_code"""
- if init_globals is not None:
- run_globals.update(init_globals)
- run_globals.update(__name__ = mod_name,
- __file__ = mod_fname,
- __loader__ = mod_loader)
- exec code in run_globals
- return run_globals
-
-def _run_module_code(code, init_globals=None,
- mod_name=None, mod_fname=None,
- mod_loader=None, alter_sys=False):
- """Helper for run_module"""
- # Set up the top level namespace dictionary
- if alter_sys:
- # Modify sys.argv[0] and sys.module[mod_name]
- temp_module = imp.new_module(mod_name)
- mod_globals = temp_module.__dict__
- saved_argv0 = sys.argv[0]
- restore_module = mod_name in sys.modules
- if restore_module:
- saved_module = sys.modules[mod_name]
- sys.argv[0] = mod_fname
- sys.modules[mod_name] = temp_module
- try:
- _run_code(code, mod_globals, init_globals,
- mod_name, mod_fname, mod_loader)
- finally:
- sys.argv[0] = saved_argv0
- if restore_module:
- sys.modules[mod_name] = saved_module
- else:
- del sys.modules[mod_name]
- # Copy the globals of the temporary module, as they
- # may be cleared when the temporary module goes away
- return mod_globals.copy()
- else:
- # Leave the sys module alone
- return _run_code(code, {}, init_globals,
- mod_name, mod_fname, mod_loader)
-
-
-# This helper is needed due to a missing component in the PEP 302
-# loader protocol (specifically, "get_filename" is non-standard)
-def _get_filename(loader, mod_name):
- try:
- get_filename = loader.get_filename
- except AttributeError:
- return None
- else:
- return get_filename(mod_name)
-
-
-def run_module(mod_name, init_globals=None,
- run_name=None, alter_sys=False):
- """Execute a module's code without importing it
-
- Returns the resulting top level namespace dictionary
- """
- loader = get_loader(mod_name)
- if loader is None:
- raise ImportError("No module named " + mod_name)
- code = loader.get_code(mod_name)
- if code is None:
- raise ImportError("No code object available for " + mod_name)
- filename = _get_filename(loader, mod_name)
- if run_name is None:
- run_name = mod_name
- return _run_module_code(code, init_globals, run_name,
- filename, loader, alter_sys)
-
-
-if __name__ == "__main__":
- # Run the module specified as the next command line argument
- if len(sys.argv) < 2:
- print >> sys.stderr, "No module specified for execution"
- else:
- del sys.argv[0] # Make the requested module sys.argv[0]
- run_module(sys.argv[0], run_name="__main__", alter_sys=True)
diff --git a/sys/lib/python/sched.py b/sys/lib/python/sched.py
deleted file mode 100644
index 7c3235e9f..000000000
--- a/sys/lib/python/sched.py
+++ /dev/null
@@ -1,117 +0,0 @@
-"""A generally useful event scheduler class.
-
-Each instance of this class manages its own queue.
-No multi-threading is implied; you are supposed to hack that
-yourself, or use a single instance per application.
-
-Each instance is parametrized with two functions, one that is
-supposed to return the current time, one that is supposed to
-implement a delay. You can implement real-time scheduling by
-substituting time and sleep from built-in module time, or you can
-implement simulated time by writing your own functions. This can
-also be used to integrate scheduling with STDWIN events; the delay
-function is allowed to modify the queue. Time can be expressed as
-integers or floating point numbers, as long as it is consistent.
-
-Events are specified by tuples (time, priority, action, argument).
-As in UNIX, lower priority numbers mean higher priority; in this
-way the queue can be maintained as a priority queue. Execution of the
-event means calling the action function, passing it the argument.
-Remember that in Python, multiple function arguments can be packed
-in a tuple. The action function may be an instance method so it
-has another way to reference private data (besides global variables).
-Parameterless functions or methods cannot be used, however.
-"""
-
-# XXX The timefunc and delayfunc should have been defined as methods
-# XXX so you can define new kinds of schedulers using subclassing
-# XXX instead of having to define a module or class just to hold
-# XXX the global state of your particular time and delay functions.
-
-import heapq
-
-__all__ = ["scheduler"]
-
-class scheduler:
- def __init__(self, timefunc, delayfunc):
- """Initialize a new instance, passing the time and delay
- functions"""
- self.queue = []
- self.timefunc = timefunc
- self.delayfunc = delayfunc
-
- def enterabs(self, time, priority, action, argument):
- """Enter a new event in the queue at an absolute time.
-
- Returns an ID for the event which can be used to remove it,
- if necessary.
-
- """
- event = time, priority, action, argument
- heapq.heappush(self.queue, event)
- return event # The ID
-
- def enter(self, delay, priority, action, argument):
- """A variant that specifies the time as a relative time.
-
- This is actually the more commonly used interface.
-
- """
- time = self.timefunc() + delay
- return self.enterabs(time, priority, action, argument)
-
- def cancel(self, event):
- """Remove an event from the queue.
-
- This must be presented the ID as returned by enter().
- If the event is not in the queue, this raises RuntimeError.
-
- """
- self.queue.remove(event)
- heapq.heapify(self.queue)
-
- def empty(self):
- """Check whether the queue is empty."""
- return not self.queue
-
- def run(self):
- """Execute events until the queue is empty.
-
- When there is a positive delay until the first event, the
- delay function is called and the event is left in the queue;
- otherwise, the event is removed from the queue and executed
- (its action function is called, passing it the argument). If
- the delay function returns prematurely, it is simply
- restarted.
-
- It is legal for both the delay function and the action
- function to to modify the queue or to raise an exception;
- exceptions are not caught but the scheduler's state remains
- well-defined so run() may be called again.
-
- A questionably hack is added to allow other threads to run:
- just after an event is executed, a delay of 0 is executed, to
- avoid monopolizing the CPU when other threads are also
- runnable.
-
- """
- # localize variable access to minimize overhead
- # and to improve thread safety
- q = self.queue
- delayfunc = self.delayfunc
- timefunc = self.timefunc
- pop = heapq.heappop
- while q:
- time, priority, action, argument = checked_event = q[0]
- now = timefunc()
- if now < time:
- delayfunc(time - now)
- else:
- event = pop(q)
- # Verify that the event was not removed or altered
- # by another thread after we last looked at q[0].
- if event is checked_event:
- void = action(*argument)
- delayfunc(0) # Let other threads run
- else:
- heapq.heappush(event)
diff --git a/sys/lib/python/sets.py b/sys/lib/python/sets.py
deleted file mode 100644
index 32a0dd64f..000000000
--- a/sys/lib/python/sets.py
+++ /dev/null
@@ -1,577 +0,0 @@
-"""Classes to represent arbitrary sets (including sets of sets).
-
-This module implements sets using dictionaries whose values are
-ignored. The usual operations (union, intersection, deletion, etc.)
-are provided as both methods and operators.
-
-Important: sets are not sequences! While they support 'x in s',
-'len(s)', and 'for x in s', none of those operations are unique for
-sequences; for example, mappings support all three as well. The
-characteristic operation for sequences is subscripting with small
-integers: s[i], for i in range(len(s)). Sets don't support
-subscripting at all. Also, sequences allow multiple occurrences and
-their elements have a definite order; sets on the other hand don't
-record multiple occurrences and don't remember the order of element
-insertion (which is why they don't support s[i]).
-
-The following classes are provided:
-
-BaseSet -- All the operations common to both mutable and immutable
- sets. This is an abstract class, not meant to be directly
- instantiated.
-
-Set -- Mutable sets, subclass of BaseSet; not hashable.
-
-ImmutableSet -- Immutable sets, subclass of BaseSet; hashable.
- An iterable argument is mandatory to create an ImmutableSet.
-
-_TemporarilyImmutableSet -- A wrapper around a Set, hashable,
- giving the same hash value as the immutable set equivalent
- would have. Do not use this class directly.
-
-Only hashable objects can be added to a Set. In particular, you cannot
-really add a Set as an element to another Set; if you try, what is
-actually added is an ImmutableSet built from it (it compares equal to
-the one you tried adding).
-
-When you ask if `x in y' where x is a Set and y is a Set or
-ImmutableSet, x is wrapped into a _TemporarilyImmutableSet z, and
-what's tested is actually `z in y'.
-
-"""
-
-# Code history:
-#
-# - Greg V. Wilson wrote the first version, using a different approach
-# to the mutable/immutable problem, and inheriting from dict.
-#
-# - Alex Martelli modified Greg's version to implement the current
-# Set/ImmutableSet approach, and make the data an attribute.
-#
-# - Guido van Rossum rewrote much of the code, made some API changes,
-# and cleaned up the docstrings.
-#
-# - Raymond Hettinger added a number of speedups and other
-# improvements.
-
-from __future__ import generators
-try:
- from itertools import ifilter, ifilterfalse
-except ImportError:
- # Code to make the module run under Py2.2
- def ifilter(predicate, iterable):
- if predicate is None:
- def predicate(x):
- return x
- for x in iterable:
- if predicate(x):
- yield x
- def ifilterfalse(predicate, iterable):
- if predicate is None:
- def predicate(x):
- return x
- for x in iterable:
- if not predicate(x):
- yield x
- try:
- True, False
- except NameError:
- True, False = (0==0, 0!=0)
-
-__all__ = ['BaseSet', 'Set', 'ImmutableSet']
-
-class BaseSet(object):
- """Common base class for mutable and immutable sets."""
-
- __slots__ = ['_data']
-
- # Constructor
-
- def __init__(self):
- """This is an abstract class."""
- # Don't call this from a concrete subclass!
- if self.__class__ is BaseSet:
- raise TypeError, ("BaseSet is an abstract class. "
- "Use Set or ImmutableSet.")
-
- # Standard protocols: __len__, __repr__, __str__, __iter__
-
- def __len__(self):
- """Return the number of elements of a set."""
- return len(self._data)
-
- def __repr__(self):
- """Return string representation of a set.
-
- This looks like 'Set([<list of elements>])'.
- """
- return self._repr()
-
- # __str__ is the same as __repr__
- __str__ = __repr__
-
- def _repr(self, sorted=False):
- elements = self._data.keys()
- if sorted:
- elements.sort()
- return '%s(%r)' % (self.__class__.__name__, elements)
-
- def __iter__(self):
- """Return an iterator over the elements or a set.
-
- This is the keys iterator for the underlying dict.
- """
- return self._data.iterkeys()
-
- # Three-way comparison is not supported. However, because __eq__ is
- # tried before __cmp__, if Set x == Set y, x.__eq__(y) returns True and
- # then cmp(x, y) returns 0 (Python doesn't actually call __cmp__ in this
- # case).
-
- def __cmp__(self, other):
- raise TypeError, "can't compare sets using cmp()"
-
- # Equality comparisons using the underlying dicts. Mixed-type comparisons
- # are allowed here, where Set == z for non-Set z always returns False,
- # and Set != z always True. This allows expressions like "x in y" to
- # give the expected result when y is a sequence of mixed types, not
- # raising a pointless TypeError just because y contains a Set, or x is
- # a Set and y contain's a non-set ("in" invokes only __eq__).
- # Subtle: it would be nicer if __eq__ and __ne__ could return
- # NotImplemented instead of True or False. Then the other comparand
- # would get a chance to determine the result, and if the other comparand
- # also returned NotImplemented then it would fall back to object address
- # comparison (which would always return False for __eq__ and always
- # True for __ne__). However, that doesn't work, because this type
- # *also* implements __cmp__: if, e.g., __eq__ returns NotImplemented,
- # Python tries __cmp__ next, and the __cmp__ here then raises TypeError.
-
- def __eq__(self, other):
- if isinstance(other, BaseSet):
- return self._data == other._data
- else:
- return False
-
- def __ne__(self, other):
- if isinstance(other, BaseSet):
- return self._data != other._data
- else:
- return True
-
- # Copying operations
-
- def copy(self):
- """Return a shallow copy of a set."""
- result = self.__class__()
- result._data.update(self._data)
- return result
-
- __copy__ = copy # For the copy module
-
- def __deepcopy__(self, memo):
- """Return a deep copy of a set; used by copy module."""
- # This pre-creates the result and inserts it in the memo
- # early, in case the deep copy recurses into another reference
- # to this same set. A set can't be an element of itself, but
- # it can certainly contain an object that has a reference to
- # itself.
- from copy import deepcopy
- result = self.__class__()
- memo[id(self)] = result
- data = result._data
- value = True
- for elt in self:
- data[deepcopy(elt, memo)] = value
- return result
-
- # Standard set operations: union, intersection, both differences.
- # Each has an operator version (e.g. __or__, invoked with |) and a
- # method version (e.g. union).
- # Subtle: Each pair requires distinct code so that the outcome is
- # correct when the type of other isn't suitable. For example, if
- # we did "union = __or__" instead, then Set().union(3) would return
- # NotImplemented instead of raising TypeError (albeit that *why* it
- # raises TypeError as-is is also a bit subtle).
-
- def __or__(self, other):
- """Return the union of two sets as a new set.
-
- (I.e. all elements that are in either set.)
- """
- if not isinstance(other, BaseSet):
- return NotImplemented
- return self.union(other)
-
- def union(self, other):
- """Return the union of two sets as a new set.
-
- (I.e. all elements that are in either set.)
- """
- result = self.__class__(self)
- result._update(other)
- return result
-
- def __and__(self, other):
- """Return the intersection of two sets as a new set.
-
- (I.e. all elements that are in both sets.)
- """
- if not isinstance(other, BaseSet):
- return NotImplemented
- return self.intersection(other)
-
- def intersection(self, other):
- """Return the intersection of two sets as a new set.
-
- (I.e. all elements that are in both sets.)
- """
- if not isinstance(other, BaseSet):
- other = Set(other)
- if len(self) <= len(other):
- little, big = self, other
- else:
- little, big = other, self
- common = ifilter(big._data.has_key, little)
- return self.__class__(common)
-
- def __xor__(self, other):
- """Return the symmetric difference of two sets as a new set.
-
- (I.e. all elements that are in exactly one of the sets.)
- """
- if not isinstance(other, BaseSet):
- return NotImplemented
- return self.symmetric_difference(other)
-
- def symmetric_difference(self, other):
- """Return the symmetric difference of two sets as a new set.
-
- (I.e. all elements that are in exactly one of the sets.)
- """
- result = self.__class__()
- data = result._data
- value = True
- selfdata = self._data
- try:
- otherdata = other._data
- except AttributeError:
- otherdata = Set(other)._data
- for elt in ifilterfalse(otherdata.has_key, selfdata):
- data[elt] = value
- for elt in ifilterfalse(selfdata.has_key, otherdata):
- data[elt] = value
- return result
-
- def __sub__(self, other):
- """Return the difference of two sets as a new Set.
-
- (I.e. all elements that are in this set and not in the other.)
- """
- if not isinstance(other, BaseSet):
- return NotImplemented
- return self.difference(other)
-
- def difference(self, other):
- """Return the difference of two sets as a new Set.
-
- (I.e. all elements that are in this set and not in the other.)
- """
- result = self.__class__()
- data = result._data
- try:
- otherdata = other._data
- except AttributeError:
- otherdata = Set(other)._data
- value = True
- for elt in ifilterfalse(otherdata.has_key, self):
- data[elt] = value
- return result
-
- # Membership test
-
- def __contains__(self, element):
- """Report whether an element is a member of a set.
-
- (Called in response to the expression `element in self'.)
- """
- try:
- return element in self._data
- except TypeError:
- transform = getattr(element, "__as_temporarily_immutable__", None)
- if transform is None:
- raise # re-raise the TypeError exception we caught
- return transform() in self._data
-
- # Subset and superset test
-
- def issubset(self, other):
- """Report whether another set contains this set."""
- self._binary_sanity_check(other)
- if len(self) > len(other): # Fast check for obvious cases
- return False
- for elt in ifilterfalse(other._data.has_key, self):
- return False
- return True
-
- def issuperset(self, other):
- """Report whether this set contains another set."""
- self._binary_sanity_check(other)
- if len(self) < len(other): # Fast check for obvious cases
- return False
- for elt in ifilterfalse(self._data.has_key, other):
- return False
- return True
-
- # Inequality comparisons using the is-subset relation.
- __le__ = issubset
- __ge__ = issuperset
-
- def __lt__(self, other):
- self._binary_sanity_check(other)
- return len(self) < len(other) and self.issubset(other)
-
- def __gt__(self, other):
- self._binary_sanity_check(other)
- return len(self) > len(other) and self.issuperset(other)
-
- # Assorted helpers
-
- def _binary_sanity_check(self, other):
- # Check that the other argument to a binary operation is also
- # a set, raising a TypeError otherwise.
- if not isinstance(other, BaseSet):
- raise TypeError, "Binary operation only permitted between sets"
-
- def _compute_hash(self):
- # Calculate hash code for a set by xor'ing the hash codes of
- # the elements. This ensures that the hash code does not depend
- # on the order in which elements are added to the set. This is
- # not called __hash__ because a BaseSet should not be hashable;
- # only an ImmutableSet is hashable.
- result = 0
- for elt in self:
- result ^= hash(elt)
- return result
-
- def _update(self, iterable):
- # The main loop for update() and the subclass __init__() methods.
- data = self._data
-
- # Use the fast update() method when a dictionary is available.
- if isinstance(iterable, BaseSet):
- data.update(iterable._data)
- return
-
- value = True
-
- if type(iterable) in (list, tuple, xrange):
- # Optimized: we know that __iter__() and next() can't
- # raise TypeError, so we can move 'try:' out of the loop.
- it = iter(iterable)
- while True:
- try:
- for element in it:
- data[element] = value
- return
- except TypeError:
- transform = getattr(element, "__as_immutable__", None)
- if transform is None:
- raise # re-raise the TypeError exception we caught
- data[transform()] = value
- else:
- # Safe: only catch TypeError where intended
- for element in iterable:
- try:
- data[element] = value
- except TypeError:
- transform = getattr(element, "__as_immutable__", None)
- if transform is None:
- raise # re-raise the TypeError exception we caught
- data[transform()] = value
-
-
-class ImmutableSet(BaseSet):
- """Immutable set class."""
-
- __slots__ = ['_hashcode']
-
- # BaseSet + hashing
-
- def __init__(self, iterable=None):
- """Construct an immutable set from an optional iterable."""
- self._hashcode = None
- self._data = {}
- if iterable is not None:
- self._update(iterable)
-
- def __hash__(self):
- if self._hashcode is None:
- self._hashcode = self._compute_hash()
- return self._hashcode
-
- def __getstate__(self):
- return self._data, self._hashcode
-
- def __setstate__(self, state):
- self._data, self._hashcode = state
-
-class Set(BaseSet):
- """ Mutable set class."""
-
- __slots__ = []
-
- # BaseSet + operations requiring mutability; no hashing
-
- def __init__(self, iterable=None):
- """Construct a set from an optional iterable."""
- self._data = {}
- if iterable is not None:
- self._update(iterable)
-
- def __getstate__(self):
- # getstate's results are ignored if it is not
- return self._data,
-
- def __setstate__(self, data):
- self._data, = data
-
- def __hash__(self):
- """A Set cannot be hashed."""
- # We inherit object.__hash__, so we must deny this explicitly
- raise TypeError, "Can't hash a Set, only an ImmutableSet."
-
- # In-place union, intersection, differences.
- # Subtle: The xyz_update() functions deliberately return None,
- # as do all mutating operations on built-in container types.
- # The __xyz__ spellings have to return self, though.
-
- def __ior__(self, other):
- """Update a set with the union of itself and another."""
- self._binary_sanity_check(other)
- self._data.update(other._data)
- return self
-
- def union_update(self, other):
- """Update a set with the union of itself and another."""
- self._update(other)
-
- def __iand__(self, other):
- """Update a set with the intersection of itself and another."""
- self._binary_sanity_check(other)
- self._data = (self & other)._data
- return self
-
- def intersection_update(self, other):
- """Update a set with the intersection of itself and another."""
- if isinstance(other, BaseSet):
- self &= other
- else:
- self._data = (self.intersection(other))._data
-
- def __ixor__(self, other):
- """Update a set with the symmetric difference of itself and another."""
- self._binary_sanity_check(other)
- self.symmetric_difference_update(other)
- return self
-
- def symmetric_difference_update(self, other):
- """Update a set with the symmetric difference of itself and another."""
- data = self._data
- value = True
- if not isinstance(other, BaseSet):
- other = Set(other)
- if self is other:
- self.clear()
- for elt in other:
- if elt in data:
- del data[elt]
- else:
- data[elt] = value
-
- def __isub__(self, other):
- """Remove all elements of another set from this set."""
- self._binary_sanity_check(other)
- self.difference_update(other)
- return self
-
- def difference_update(self, other):
- """Remove all elements of another set from this set."""
- data = self._data
- if not isinstance(other, BaseSet):
- other = Set(other)
- if self is other:
- self.clear()
- for elt in ifilter(data.has_key, other):
- del data[elt]
-
- # Python dict-like mass mutations: update, clear
-
- def update(self, iterable):
- """Add all values from an iterable (such as a list or file)."""
- self._update(iterable)
-
- def clear(self):
- """Remove all elements from this set."""
- self._data.clear()
-
- # Single-element mutations: add, remove, discard
-
- def add(self, element):
- """Add an element to a set.
-
- This has no effect if the element is already present.
- """
- try:
- self._data[element] = True
- except TypeError:
- transform = getattr(element, "__as_immutable__", None)
- if transform is None:
- raise # re-raise the TypeError exception we caught
- self._data[transform()] = True
-
- def remove(self, element):
- """Remove an element from a set; it must be a member.
-
- If the element is not a member, raise a KeyError.
- """
- try:
- del self._data[element]
- except TypeError:
- transform = getattr(element, "__as_temporarily_immutable__", None)
- if transform is None:
- raise # re-raise the TypeError exception we caught
- del self._data[transform()]
-
- def discard(self, element):
- """Remove an element from a set if it is a member.
-
- If the element is not a member, do nothing.
- """
- try:
- self.remove(element)
- except KeyError:
- pass
-
- def pop(self):
- """Remove and return an arbitrary set element."""
- return self._data.popitem()[0]
-
- def __as_immutable__(self):
- # Return a copy of self as an immutable set
- return ImmutableSet(self)
-
- def __as_temporarily_immutable__(self):
- # Return self wrapped in a temporarily immutable set
- return _TemporarilyImmutableSet(self)
-
-
-class _TemporarilyImmutableSet(BaseSet):
- # Wrap a mutable set as if it was temporarily immutable.
- # This only supplies hashing and equality comparisons.
-
- def __init__(self, set):
- self._set = set
- self._data = set._data # Needed by ImmutableSet.__eq__()
-
- def __hash__(self):
- return self._set._compute_hash()
diff --git a/sys/lib/python/sgmllib.py b/sys/lib/python/sgmllib.py
deleted file mode 100644
index 3ab57c230..000000000
--- a/sys/lib/python/sgmllib.py
+++ /dev/null
@@ -1,548 +0,0 @@
-"""A parser for SGML, using the derived class as a static DTD."""
-
-# XXX This only supports those SGML features used by HTML.
-
-# XXX There should be a way to distinguish between PCDATA (parsed
-# character data -- the normal case), RCDATA (replaceable character
-# data -- only char and entity references and end tags are special)
-# and CDATA (character data -- only end tags are special). RCDATA is
-# not supported at all.
-
-
-import markupbase
-import re
-
-__all__ = ["SGMLParser", "SGMLParseError"]
-
-# Regular expressions used for parsing
-
-interesting = re.compile('[&<]')
-incomplete = re.compile('&([a-zA-Z][a-zA-Z0-9]*|#[0-9]*)?|'
- '<([a-zA-Z][^<>]*|'
- '/([a-zA-Z][^<>]*)?|'
- '![^<>]*)?')
-
-entityref = re.compile('&([a-zA-Z][-.a-zA-Z0-9]*)[^a-zA-Z0-9]')
-charref = re.compile('&#([0-9]+)[^0-9]')
-
-starttagopen = re.compile('<[>a-zA-Z]')
-shorttagopen = re.compile('<[a-zA-Z][-.a-zA-Z0-9]*/')
-shorttag = re.compile('<([a-zA-Z][-.a-zA-Z0-9]*)/([^/]*)/')
-piclose = re.compile('>')
-endbracket = re.compile('[<>]')
-tagfind = re.compile('[a-zA-Z][-_.a-zA-Z0-9]*')
-attrfind = re.compile(
- r'\s*([a-zA-Z_][-:.a-zA-Z_0-9]*)(\s*=\s*'
- r'(\'[^\']*\'|"[^"]*"|[][\-a-zA-Z0-9./,:;+*%?!&$\(\)_#=~\'"@]*))?')
-
-
-class SGMLParseError(RuntimeError):
- """Exception raised for all parse errors."""
- pass
-
-
-# SGML parser base class -- find tags and call handler functions.
-# Usage: p = SGMLParser(); p.feed(data); ...; p.close().
-# The dtd is defined by deriving a class which defines methods
-# with special names to handle tags: start_foo and end_foo to handle
-# <foo> and </foo>, respectively, or do_foo to handle <foo> by itself.
-# (Tags are converted to lower case for this purpose.) The data
-# between tags is passed to the parser by calling self.handle_data()
-# with some data as argument (the data may be split up in arbitrary
-# chunks). Entity references are passed by calling
-# self.handle_entityref() with the entity reference as argument.
-
-class SGMLParser(markupbase.ParserBase):
- # Definition of entities -- derived classes may override
- entity_or_charref = re.compile('&(?:'
- '([a-zA-Z][-.a-zA-Z0-9]*)|#([0-9]+)'
- ')(;?)')
-
- def __init__(self, verbose=0):
- """Initialize and reset this instance."""
- self.verbose = verbose
- self.reset()
-
- def reset(self):
- """Reset this instance. Loses all unprocessed data."""
- self.__starttag_text = None
- self.rawdata = ''
- self.stack = []
- self.lasttag = '???'
- self.nomoretags = 0
- self.literal = 0
- markupbase.ParserBase.reset(self)
-
- def setnomoretags(self):
- """Enter literal mode (CDATA) till EOF.
-
- Intended for derived classes only.
- """
- self.nomoretags = self.literal = 1
-
- def setliteral(self, *args):
- """Enter literal mode (CDATA).
-
- Intended for derived classes only.
- """
- self.literal = 1
-
- def feed(self, data):
- """Feed some data to the parser.
-
- Call this as often as you want, with as little or as much text
- as you want (may include '\n'). (This just saves the text,
- all the processing is done by goahead().)
- """
-
- self.rawdata = self.rawdata + data
- self.goahead(0)
-
- def close(self):
- """Handle the remaining data."""
- self.goahead(1)
-
- def error(self, message):
- raise SGMLParseError(message)
-
- # Internal -- handle data as far as reasonable. May leave state
- # and data to be processed by a subsequent call. If 'end' is
- # true, force handling all data as if followed by EOF marker.
- def goahead(self, end):
- rawdata = self.rawdata
- i = 0
- n = len(rawdata)
- while i < n:
- if self.nomoretags:
- self.handle_data(rawdata[i:n])
- i = n
- break
- match = interesting.search(rawdata, i)
- if match: j = match.start()
- else: j = n
- if i < j:
- self.handle_data(rawdata[i:j])
- i = j
- if i == n: break
- if rawdata[i] == '<':
- if starttagopen.match(rawdata, i):
- if self.literal:
- self.handle_data(rawdata[i])
- i = i+1
- continue
- k = self.parse_starttag(i)
- if k < 0: break
- i = k
- continue
- if rawdata.startswith("</", i):
- k = self.parse_endtag(i)
- if k < 0: break
- i = k
- self.literal = 0
- continue
- if self.literal:
- if n > (i + 1):
- self.handle_data("<")
- i = i+1
- else:
- # incomplete
- break
- continue
- if rawdata.startswith("<!--", i):
- # Strictly speaking, a comment is --.*--
- # within a declaration tag <!...>.
- # This should be removed,
- # and comments handled only in parse_declaration.
- k = self.parse_comment(i)
- if k < 0: break
- i = k
- continue
- if rawdata.startswith("<?", i):
- k = self.parse_pi(i)
- if k < 0: break
- i = i+k
- continue
- if rawdata.startswith("<!", i):
- # This is some sort of declaration; in "HTML as
- # deployed," this should only be the document type
- # declaration ("<!DOCTYPE html...>").
- k = self.parse_declaration(i)
- if k < 0: break
- i = k
- continue
- elif rawdata[i] == '&':
- if self.literal:
- self.handle_data(rawdata[i])
- i = i+1
- continue
- match = charref.match(rawdata, i)
- if match:
- name = match.group(1)
- self.handle_charref(name)
- i = match.end(0)
- if rawdata[i-1] != ';': i = i-1
- continue
- match = entityref.match(rawdata, i)
- if match:
- name = match.group(1)
- self.handle_entityref(name)
- i = match.end(0)
- if rawdata[i-1] != ';': i = i-1
- continue
- else:
- self.error('neither < nor & ??')
- # We get here only if incomplete matches but
- # nothing else
- match = incomplete.match(rawdata, i)
- if not match:
- self.handle_data(rawdata[i])
- i = i+1
- continue
- j = match.end(0)
- if j == n:
- break # Really incomplete
- self.handle_data(rawdata[i:j])
- i = j
- # end while
- if end and i < n:
- self.handle_data(rawdata[i:n])
- i = n
- self.rawdata = rawdata[i:]
- # XXX if end: check for empty stack
-
- # Extensions for the DOCTYPE scanner:
- _decl_otherchars = '='
-
- # Internal -- parse processing instr, return length or -1 if not terminated
- def parse_pi(self, i):
- rawdata = self.rawdata
- if rawdata[i:i+2] != '<?':
- self.error('unexpected call to parse_pi()')
- match = piclose.search(rawdata, i+2)
- if not match:
- return -1
- j = match.start(0)
- self.handle_pi(rawdata[i+2: j])
- j = match.end(0)
- return j-i
-
- def get_starttag_text(self):
- return self.__starttag_text
-
- # Internal -- handle starttag, return length or -1 if not terminated
- def parse_starttag(self, i):
- self.__starttag_text = None
- start_pos = i
- rawdata = self.rawdata
- if shorttagopen.match(rawdata, i):
- # SGML shorthand: <tag/data/ == <tag>data</tag>
- # XXX Can data contain &... (entity or char refs)?
- # XXX Can data contain < or > (tag characters)?
- # XXX Can there be whitespace before the first /?
- match = shorttag.match(rawdata, i)
- if not match:
- return -1
- tag, data = match.group(1, 2)
- self.__starttag_text = '<%s/' % tag
- tag = tag.lower()
- k = match.end(0)
- self.finish_shorttag(tag, data)
- self.__starttag_text = rawdata[start_pos:match.end(1) + 1]
- return k
- # XXX The following should skip matching quotes (' or ")
- # As a shortcut way to exit, this isn't so bad, but shouldn't
- # be used to locate the actual end of the start tag since the
- # < or > characters may be embedded in an attribute value.
- match = endbracket.search(rawdata, i+1)
- if not match:
- return -1
- j = match.start(0)
- # Now parse the data between i+1 and j into a tag and attrs
- attrs = []
- if rawdata[i:i+2] == '<>':
- # SGML shorthand: <> == <last open tag seen>
- k = j
- tag = self.lasttag
- else:
- match = tagfind.match(rawdata, i+1)
- if not match:
- self.error('unexpected call to parse_starttag')
- k = match.end(0)
- tag = rawdata[i+1:k].lower()
- self.lasttag = tag
- while k < j:
- match = attrfind.match(rawdata, k)
- if not match: break
- attrname, rest, attrvalue = match.group(1, 2, 3)
- if not rest:
- attrvalue = attrname
- else:
- if (attrvalue[:1] == "'" == attrvalue[-1:] or
- attrvalue[:1] == '"' == attrvalue[-1:]):
- # strip quotes
- attrvalue = attrvalue[1:-1]
- attrvalue = self.entity_or_charref.sub(
- self._convert_ref, attrvalue)
- attrs.append((attrname.lower(), attrvalue))
- k = match.end(0)
- if rawdata[j] == '>':
- j = j+1
- self.__starttag_text = rawdata[start_pos:j]
- self.finish_starttag(tag, attrs)
- return j
-
- # Internal -- convert entity or character reference
- def _convert_ref(self, match):
- if match.group(2):
- return self.convert_charref(match.group(2)) or \
- '&#%s%s' % match.groups()[1:]
- elif match.group(3):
- return self.convert_entityref(match.group(1)) or \
- '&%s;' % match.group(1)
- else:
- return '&%s' % match.group(1)
-
- # Internal -- parse endtag
- def parse_endtag(self, i):
- rawdata = self.rawdata
- match = endbracket.search(rawdata, i+1)
- if not match:
- return -1
- j = match.start(0)
- tag = rawdata[i+2:j].strip().lower()
- if rawdata[j] == '>':
- j = j+1
- self.finish_endtag(tag)
- return j
-
- # Internal -- finish parsing of <tag/data/ (same as <tag>data</tag>)
- def finish_shorttag(self, tag, data):
- self.finish_starttag(tag, [])
- self.handle_data(data)
- self.finish_endtag(tag)
-
- # Internal -- finish processing of start tag
- # Return -1 for unknown tag, 0 for open-only tag, 1 for balanced tag
- def finish_starttag(self, tag, attrs):
- try:
- method = getattr(self, 'start_' + tag)
- except AttributeError:
- try:
- method = getattr(self, 'do_' + tag)
- except AttributeError:
- self.unknown_starttag(tag, attrs)
- return -1
- else:
- self.handle_starttag(tag, method, attrs)
- return 0
- else:
- self.stack.append(tag)
- self.handle_starttag(tag, method, attrs)
- return 1
-
- # Internal -- finish processing of end tag
- def finish_endtag(self, tag):
- if not tag:
- found = len(self.stack) - 1
- if found < 0:
- self.unknown_endtag(tag)
- return
- else:
- if tag not in self.stack:
- try:
- method = getattr(self, 'end_' + tag)
- except AttributeError:
- self.unknown_endtag(tag)
- else:
- self.report_unbalanced(tag)
- return
- found = len(self.stack)
- for i in range(found):
- if self.stack[i] == tag: found = i
- while len(self.stack) > found:
- tag = self.stack[-1]
- try:
- method = getattr(self, 'end_' + tag)
- except AttributeError:
- method = None
- if method:
- self.handle_endtag(tag, method)
- else:
- self.unknown_endtag(tag)
- del self.stack[-1]
-
- # Overridable -- handle start tag
- def handle_starttag(self, tag, method, attrs):
- method(attrs)
-
- # Overridable -- handle end tag
- def handle_endtag(self, tag, method):
- method()
-
- # Example -- report an unbalanced </...> tag.
- def report_unbalanced(self, tag):
- if self.verbose:
- print '*** Unbalanced </' + tag + '>'
- print '*** Stack:', self.stack
-
- def convert_charref(self, name):
- """Convert character reference, may be overridden."""
- try:
- n = int(name)
- except ValueError:
- return
- if not 0 <= n <= 255:
- return
- return self.convert_codepoint(n)
-
- def convert_codepoint(self, codepoint):
- return chr(codepoint)
-
- def handle_charref(self, name):
- """Handle character reference, no need to override."""
- replacement = self.convert_charref(name)
- if replacement is None:
- self.unknown_charref(name)
- else:
- self.handle_data(replacement)
-
- # Definition of entities -- derived classes may override
- entitydefs = \
- {'lt': '<', 'gt': '>', 'amp': '&', 'quot': '"', 'apos': '\''}
-
- def convert_entityref(self, name):
- """Convert entity references.
-
- As an alternative to overriding this method; one can tailor the
- results by setting up the self.entitydefs mapping appropriately.
- """
- table = self.entitydefs
- if name in table:
- return table[name]
- else:
- return
-
- def handle_entityref(self, name):
- """Handle entity references, no need to override."""
- replacement = self.convert_entityref(name)
- if replacement is None:
- self.unknown_entityref(name)
- else:
- self.handle_data(self.convert_entityref(name))
-
- # Example -- handle data, should be overridden
- def handle_data(self, data):
- pass
-
- # Example -- handle comment, could be overridden
- def handle_comment(self, data):
- pass
-
- # Example -- handle declaration, could be overridden
- def handle_decl(self, decl):
- pass
-
- # Example -- handle processing instruction, could be overridden
- def handle_pi(self, data):
- pass
-
- # To be overridden -- handlers for unknown objects
- def unknown_starttag(self, tag, attrs): pass
- def unknown_endtag(self, tag): pass
- def unknown_charref(self, ref): pass
- def unknown_entityref(self, ref): pass
-
-
-class TestSGMLParser(SGMLParser):
-
- def __init__(self, verbose=0):
- self.testdata = ""
- SGMLParser.__init__(self, verbose)
-
- def handle_data(self, data):
- self.testdata = self.testdata + data
- if len(repr(self.testdata)) >= 70:
- self.flush()
-
- def flush(self):
- data = self.testdata
- if data:
- self.testdata = ""
- print 'data:', repr(data)
-
- def handle_comment(self, data):
- self.flush()
- r = repr(data)
- if len(r) > 68:
- r = r[:32] + '...' + r[-32:]
- print 'comment:', r
-
- def unknown_starttag(self, tag, attrs):
- self.flush()
- if not attrs:
- print 'start tag: <' + tag + '>'
- else:
- print 'start tag: <' + tag,
- for name, value in attrs:
- print name + '=' + '"' + value + '"',
- print '>'
-
- def unknown_endtag(self, tag):
- self.flush()
- print 'end tag: </' + tag + '>'
-
- def unknown_entityref(self, ref):
- self.flush()
- print '*** unknown entity ref: &' + ref + ';'
-
- def unknown_charref(self, ref):
- self.flush()
- print '*** unknown char ref: &#' + ref + ';'
-
- def unknown_decl(self, data):
- self.flush()
- print '*** unknown decl: [' + data + ']'
-
- def close(self):
- SGMLParser.close(self)
- self.flush()
-
-
-def test(args = None):
- import sys
-
- if args is None:
- args = sys.argv[1:]
-
- if args and args[0] == '-s':
- args = args[1:]
- klass = SGMLParser
- else:
- klass = TestSGMLParser
-
- if args:
- file = args[0]
- else:
- file = 'test.html'
-
- if file == '-':
- f = sys.stdin
- else:
- try:
- f = open(file, 'r')
- except IOError, msg:
- print file, ":", msg
- sys.exit(1)
-
- data = f.read()
- if f is not sys.stdin:
- f.close()
-
- x = klass()
- for c in data:
- x.feed(c)
- x.close()
-
-
-if __name__ == '__main__':
- test()
diff --git a/sys/lib/python/sha.py b/sys/lib/python/sha.py
deleted file mode 100644
index de3214cfd..000000000
--- a/sys/lib/python/sha.py
+++ /dev/null
@@ -1,11 +0,0 @@
-# $Id: sha.py 39316 2005-08-21 18:45:59Z greg $
-#
-# Copyright (C) 2005 Gregory P. Smith (greg@electricrain.com)
-# Licensed to PSF under a Contributor Agreement.
-
-from hashlib import sha1 as sha
-new = sha
-
-blocksize = 1 # legacy value (wrong in any useful sense)
-digest_size = 20
-digestsize = 20
diff --git a/sys/lib/python/shelve.py b/sys/lib/python/shelve.py
deleted file mode 100644
index 7a75445b0..000000000
--- a/sys/lib/python/shelve.py
+++ /dev/null
@@ -1,225 +0,0 @@
-"""Manage shelves of pickled objects.
-
-A "shelf" is a persistent, dictionary-like object. The difference
-with dbm databases is that the values (not the keys!) in a shelf can
-be essentially arbitrary Python objects -- anything that the "pickle"
-module can handle. This includes most class instances, recursive data
-types, and objects containing lots of shared sub-objects. The keys
-are ordinary strings.
-
-To summarize the interface (key is a string, data is an arbitrary
-object):
-
- import shelve
- d = shelve.open(filename) # open, with (g)dbm filename -- no suffix
-
- d[key] = data # store data at key (overwrites old data if
- # using an existing key)
- data = d[key] # retrieve a COPY of the data at key (raise
- # KeyError if no such key) -- NOTE that this
- # access returns a *copy* of the entry!
- del d[key] # delete data stored at key (raises KeyError
- # if no such key)
- flag = d.has_key(key) # true if the key exists; same as "key in d"
- list = d.keys() # a list of all existing keys (slow!)
-
- d.close() # close it
-
-Dependent on the implementation, closing a persistent dictionary may
-or may not be necessary to flush changes to disk.
-
-Normally, d[key] returns a COPY of the entry. This needs care when
-mutable entries are mutated: for example, if d[key] is a list,
- d[key].append(anitem)
-does NOT modify the entry d[key] itself, as stored in the persistent
-mapping -- it only modifies the copy, which is then immediately
-discarded, so that the append has NO effect whatsoever. To append an
-item to d[key] in a way that will affect the persistent mapping, use:
- data = d[key]
- data.append(anitem)
- d[key] = data
-
-To avoid the problem with mutable entries, you may pass the keyword
-argument writeback=True in the call to shelve.open. When you use:
- d = shelve.open(filename, writeback=True)
-then d keeps a cache of all entries you access, and writes them all back
-to the persistent mapping when you call d.close(). This ensures that
-such usage as d[key].append(anitem) works as intended.
-
-However, using keyword argument writeback=True may consume vast amount
-of memory for the cache, and it may make d.close() very slow, if you
-access many of d's entries after opening it in this way: d has no way to
-check which of the entries you access are mutable and/or which ones you
-actually mutate, so it must cache, and write back at close, all of the
-entries that you access. You can call d.sync() to write back all the
-entries in the cache, and empty the cache (d.sync() also synchronizes
-the persistent dictionary on disk, if feasible).
-"""
-
-# Try using cPickle and cStringIO if available.
-
-try:
- from cPickle import Pickler, Unpickler
-except ImportError:
- from pickle import Pickler, Unpickler
-
-try:
- from cStringIO import StringIO
-except ImportError:
- from StringIO import StringIO
-
-import UserDict
-import warnings
-
-__all__ = ["Shelf","BsdDbShelf","DbfilenameShelf","open"]
-
-class Shelf(UserDict.DictMixin):
- """Base class for shelf implementations.
-
- This is initialized with a dictionary-like object.
- See the module's __doc__ string for an overview of the interface.
- """
-
- def __init__(self, dict, protocol=None, writeback=False):
- self.dict = dict
- if protocol is None:
- protocol = 0
- self._protocol = protocol
- self.writeback = writeback
- self.cache = {}
-
- def keys(self):
- return self.dict.keys()
-
- def __len__(self):
- return len(self.dict)
-
- def has_key(self, key):
- return self.dict.has_key(key)
-
- def __contains__(self, key):
- return self.dict.has_key(key)
-
- def get(self, key, default=None):
- if self.dict.has_key(key):
- return self[key]
- return default
-
- def __getitem__(self, key):
- try:
- value = self.cache[key]
- except KeyError:
- f = StringIO(self.dict[key])
- value = Unpickler(f).load()
- if self.writeback:
- self.cache[key] = value
- return value
-
- def __setitem__(self, key, value):
- if self.writeback:
- self.cache[key] = value
- f = StringIO()
- p = Pickler(f, self._protocol)
- p.dump(value)
- self.dict[key] = f.getvalue()
-
- def __delitem__(self, key):
- del self.dict[key]
- try:
- del self.cache[key]
- except KeyError:
- pass
-
- def close(self):
- self.sync()
- try:
- self.dict.close()
- except AttributeError:
- pass
- self.dict = 0
-
- def __del__(self):
- if not hasattr(self, 'writeback'):
- # __init__ didn't succeed, so don't bother closing
- return
- self.close()
-
- def sync(self):
- if self.writeback and self.cache:
- self.writeback = False
- for key, entry in self.cache.iteritems():
- self[key] = entry
- self.writeback = True
- self.cache = {}
- if hasattr(self.dict, 'sync'):
- self.dict.sync()
-
-
-class BsdDbShelf(Shelf):
- """Shelf implementation using the "BSD" db interface.
-
- This adds methods first(), next(), previous(), last() and
- set_location() that have no counterpart in [g]dbm databases.
-
- The actual database must be opened using one of the "bsddb"
- modules "open" routines (i.e. bsddb.hashopen, bsddb.btopen or
- bsddb.rnopen) and passed to the constructor.
-
- See the module's __doc__ string for an overview of the interface.
- """
-
- def __init__(self, dict, protocol=None, writeback=False):
- Shelf.__init__(self, dict, protocol, writeback)
-
- def set_location(self, key):
- (key, value) = self.dict.set_location(key)
- f = StringIO(value)
- return (key, Unpickler(f).load())
-
- def next(self):
- (key, value) = self.dict.next()
- f = StringIO(value)
- return (key, Unpickler(f).load())
-
- def previous(self):
- (key, value) = self.dict.previous()
- f = StringIO(value)
- return (key, Unpickler(f).load())
-
- def first(self):
- (key, value) = self.dict.first()
- f = StringIO(value)
- return (key, Unpickler(f).load())
-
- def last(self):
- (key, value) = self.dict.last()
- f = StringIO(value)
- return (key, Unpickler(f).load())
-
-
-class DbfilenameShelf(Shelf):
- """Shelf implementation using the "anydbm" generic dbm interface.
-
- This is initialized with the filename for the dbm database.
- See the module's __doc__ string for an overview of the interface.
- """
-
- def __init__(self, filename, flag='c', protocol=None, writeback=False):
- import anydbm
- Shelf.__init__(self, anydbm.open(filename, flag), protocol, writeback)
-
-
-def open(filename, flag='c', protocol=None, writeback=False):
- """Open a persistent dictionary for reading and writing.
-
- The filename parameter is the base filename for the underlying
- database. As a side-effect, an extension may be added to the
- filename and more than one file may be created. The optional flag
- parameter has the same interpretation as the flag parameter of
- anydbm.open(). The optional protocol parameter specifies the
- version of the pickle protocol (0, 1, or 2).
-
- See the module's __doc__ string for an overview of the interface.
- """
-
- return DbfilenameShelf(filename, flag, protocol, writeback)
diff --git a/sys/lib/python/shlex.py b/sys/lib/python/shlex.py
deleted file mode 100644
index 6632b8759..000000000
--- a/sys/lib/python/shlex.py
+++ /dev/null
@@ -1,292 +0,0 @@
-# -*- coding: iso-8859-1 -*-
-"""A lexical analyzer class for simple shell-like syntaxes."""
-
-# Module and documentation by Eric S. Raymond, 21 Dec 1998
-# Input stacking and error message cleanup added by ESR, March 2000
-# push_source() and pop_source() made explicit by ESR, January 2001.
-# Posix compliance, split(), string arguments, and
-# iterator interface by Gustavo Niemeyer, April 2003.
-
-import os.path
-import sys
-from collections import deque
-
-try:
- from cStringIO import StringIO
-except ImportError:
- from StringIO import StringIO
-
-__all__ = ["shlex", "split"]
-
-class shlex:
- "A lexical analyzer class for simple shell-like syntaxes."
- def __init__(self, instream=None, infile=None, posix=False):
- if isinstance(instream, basestring):
- instream = StringIO(instream)
- if instream is not None:
- self.instream = instream
- self.infile = infile
- else:
- self.instream = sys.stdin
- self.infile = None
- self.posix = posix
- if posix:
- self.eof = None
- else:
- self.eof = ''
- self.commenters = '#'
- self.wordchars = ('abcdfeghijklmnopqrstuvwxyz'
- 'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_')
- if self.posix:
- self.wordchars += ('ßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿ'
- 'ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝÞ')
- self.whitespace = ' \t\r\n'
- self.whitespace_split = False
- self.quotes = '\'"'
- self.escape = '\\'
- self.escapedquotes = '"'
- self.state = ' '
- self.pushback = deque()
- self.lineno = 1
- self.debug = 0
- self.token = ''
- self.filestack = deque()
- self.source = None
- if self.debug:
- print 'shlex: reading from %s, line %d' \
- % (self.instream, self.lineno)
-
- def push_token(self, tok):
- "Push a token onto the stack popped by the get_token method"
- if self.debug >= 1:
- print "shlex: pushing token " + repr(tok)
- self.pushback.appendleft(tok)
-
- def push_source(self, newstream, newfile=None):
- "Push an input source onto the lexer's input source stack."
- if isinstance(newstream, basestring):
- newstream = StringIO(newstream)
- self.filestack.appendleft((self.infile, self.instream, self.lineno))
- self.infile = newfile
- self.instream = newstream
- self.lineno = 1
- if self.debug:
- if newfile is not None:
- print 'shlex: pushing to file %s' % (self.infile,)
- else:
- print 'shlex: pushing to stream %s' % (self.instream,)
-
- def pop_source(self):
- "Pop the input source stack."
- self.instream.close()
- (self.infile, self.instream, self.lineno) = self.filestack.popleft()
- if self.debug:
- print 'shlex: popping to %s, line %d' \
- % (self.instream, self.lineno)
- self.state = ' '
-
- def get_token(self):
- "Get a token from the input stream (or from stack if it's nonempty)"
- if self.pushback:
- tok = self.pushback.popleft()
- if self.debug >= 1:
- print "shlex: popping token " + repr(tok)
- return tok
- # No pushback. Get a token.
- raw = self.read_token()
- # Handle inclusions
- if self.source is not None:
- while raw == self.source:
- spec = self.sourcehook(self.read_token())
- if spec:
- (newfile, newstream) = spec
- self.push_source(newstream, newfile)
- raw = self.get_token()
- # Maybe we got EOF instead?
- while raw == self.eof:
- if not self.filestack:
- return self.eof
- else:
- self.pop_source()
- raw = self.get_token()
- # Neither inclusion nor EOF
- if self.debug >= 1:
- if raw != self.eof:
- print "shlex: token=" + repr(raw)
- else:
- print "shlex: token=EOF"
- return raw
-
- def read_token(self):
- quoted = False
- escapedstate = ' '
- while True:
- nextchar = self.instream.read(1)
- if nextchar == '\n':
- self.lineno = self.lineno + 1
- if self.debug >= 3:
- print "shlex: in state", repr(self.state), \
- "I see character:", repr(nextchar)
- if self.state is None:
- self.token = '' # past end of file
- break
- elif self.state == ' ':
- if not nextchar:
- self.state = None # end of file
- break
- elif nextchar in self.whitespace:
- if self.debug >= 2:
- print "shlex: I see whitespace in whitespace state"
- if self.token or (self.posix and quoted):
- break # emit current token
- else:
- continue
- elif nextchar in self.commenters:
- self.instream.readline()
- self.lineno = self.lineno + 1
- elif self.posix and nextchar in self.escape:
- escapedstate = 'a'
- self.state = nextchar
- elif nextchar in self.wordchars:
- self.token = nextchar
- self.state = 'a'
- elif nextchar in self.quotes:
- if not self.posix:
- self.token = nextchar
- self.state = nextchar
- elif self.whitespace_split:
- self.token = nextchar
- self.state = 'a'
- else:
- self.token = nextchar
- if self.token or (self.posix and quoted):
- break # emit current token
- else:
- continue
- elif self.state in self.quotes:
- quoted = True
- if not nextchar: # end of file
- if self.debug >= 2:
- print "shlex: I see EOF in quotes state"
- # XXX what error should be raised here?
- raise ValueError, "No closing quotation"
- if nextchar == self.state:
- if not self.posix:
- self.token = self.token + nextchar
- self.state = ' '
- break
- else:
- self.state = 'a'
- elif self.posix and nextchar in self.escape and \
- self.state in self.escapedquotes:
- escapedstate = self.state
- self.state = nextchar
- else:
- self.token = self.token + nextchar
- elif self.state in self.escape:
- if not nextchar: # end of file
- if self.debug >= 2:
- print "shlex: I see EOF in escape state"
- # XXX what error should be raised here?
- raise ValueError, "No escaped character"
- # In posix shells, only the quote itself or the escape
- # character may be escaped within quotes.
- if escapedstate in self.quotes and \
- nextchar != self.state and nextchar != escapedstate:
- self.token = self.token + self.state
- self.token = self.token + nextchar
- self.state = escapedstate
- elif self.state == 'a':
- if not nextchar:
- self.state = None # end of file
- break
- elif nextchar in self.whitespace:
- if self.debug >= 2:
- print "shlex: I see whitespace in word state"
- self.state = ' '
- if self.token or (self.posix and quoted):
- break # emit current token
- else:
- continue
- elif nextchar in self.commenters:
- self.instream.readline()
- self.lineno = self.lineno + 1
- if self.posix:
- self.state = ' '
- if self.token or (self.posix and quoted):
- break # emit current token
- else:
- continue
- elif self.posix and nextchar in self.quotes:
- self.state = nextchar
- elif self.posix and nextchar in self.escape:
- escapedstate = 'a'
- self.state = nextchar
- elif nextchar in self.wordchars or nextchar in self.quotes \
- or self.whitespace_split:
- self.token = self.token + nextchar
- else:
- self.pushback.appendleft(nextchar)
- if self.debug >= 2:
- print "shlex: I see punctuation in word state"
- self.state = ' '
- if self.token:
- break # emit current token
- else:
- continue
- result = self.token
- self.token = ''
- if self.posix and not quoted and result == '':
- result = None
- if self.debug > 1:
- if result:
- print "shlex: raw token=" + repr(result)
- else:
- print "shlex: raw token=EOF"
- return result
-
- def sourcehook(self, newfile):
- "Hook called on a filename to be sourced."
- if newfile[0] == '"':
- newfile = newfile[1:-1]
- # This implements cpp-like semantics for relative-path inclusion.
- if isinstance(self.infile, basestring) and not os.path.isabs(newfile):
- newfile = os.path.join(os.path.dirname(self.infile), newfile)
- return (newfile, open(newfile, "r"))
-
- def error_leader(self, infile=None, lineno=None):
- "Emit a C-compiler-like, Emacs-friendly error-message leader."
- if infile is None:
- infile = self.infile
- if lineno is None:
- lineno = self.lineno
- return "\"%s\", line %d: " % (infile, lineno)
-
- def __iter__(self):
- return self
-
- def next(self):
- token = self.get_token()
- if token == self.eof:
- raise StopIteration
- return token
-
-def split(s, comments=False):
- lex = shlex(s, posix=True)
- lex.whitespace_split = True
- if not comments:
- lex.commenters = ''
- return list(lex)
-
-if __name__ == '__main__':
- if len(sys.argv) == 1:
- lexer = shlex()
- else:
- file = sys.argv[1]
- lexer = shlex(open(file), file)
- while 1:
- tt = lexer.get_token()
- if tt:
- print "Token: " + repr(tt)
- else:
- break
diff --git a/sys/lib/python/shutil.py b/sys/lib/python/shutil.py
deleted file mode 100644
index c3ff687bf..000000000
--- a/sys/lib/python/shutil.py
+++ /dev/null
@@ -1,203 +0,0 @@
-"""Utility functions for copying files and directory trees.
-
-XXX The functions here don't copy the resource fork or other metadata on Mac.
-
-"""
-
-import os
-import sys
-import stat
-from os.path import abspath
-
-__all__ = ["copyfileobj","copyfile","copymode","copystat","copy","copy2",
- "copytree","move","rmtree","Error"]
-
-class Error(EnvironmentError):
- pass
-
-def copyfileobj(fsrc, fdst, length=16*1024):
- """copy data from file-like object fsrc to file-like object fdst"""
- while 1:
- buf = fsrc.read(length)
- if not buf:
- break
- fdst.write(buf)
-
-def _samefile(src, dst):
- # Macintosh, Unix.
- if hasattr(os.path,'samefile'):
- try:
- return os.path.samefile(src, dst)
- except OSError:
- return False
-
- # All other platforms: check for same pathname.
- return (os.path.normcase(os.path.abspath(src)) ==
- os.path.normcase(os.path.abspath(dst)))
-
-def copyfile(src, dst):
- """Copy data from src to dst"""
- if _samefile(src, dst):
- raise Error, "`%s` and `%s` are the same file" % (src, dst)
-
- fsrc = None
- fdst = None
- try:
- fsrc = open(src, 'rb')
- fdst = open(dst, 'wb')
- copyfileobj(fsrc, fdst)
- finally:
- if fdst:
- fdst.close()
- if fsrc:
- fsrc.close()
-
-def copymode(src, dst):
- """Copy mode bits from src to dst"""
- if hasattr(os, 'chmod'):
- st = os.stat(src)
- mode = stat.S_IMODE(st.st_mode)
- os.chmod(dst, mode)
-
-def copystat(src, dst):
- """Copy all stat info (mode bits, atime and mtime) from src to dst"""
- st = os.stat(src)
- mode = stat.S_IMODE(st.st_mode)
- if hasattr(os, 'utime'):
- os.utime(dst, (st.st_atime, st.st_mtime))
- if hasattr(os, 'chmod'):
- os.chmod(dst, mode)
-
-
-def copy(src, dst):
- """Copy data and mode bits ("cp src dst").
-
- The destination may be a directory.
-
- """
- if os.path.isdir(dst):
- dst = os.path.join(dst, os.path.basename(src))
- copyfile(src, dst)
- copymode(src, dst)
-
-def copy2(src, dst):
- """Copy data and all stat info ("cp -p src dst").
-
- The destination may be a directory.
-
- """
- if os.path.isdir(dst):
- dst = os.path.join(dst, os.path.basename(src))
- copyfile(src, dst)
- copystat(src, dst)
-
-
-def copytree(src, dst, symlinks=False):
- """Recursively copy a directory tree using copy2().
-
- The destination directory must not already exist.
- If exception(s) occur, an Error is raised with a list of reasons.
-
- If the optional symlinks flag is true, symbolic links in the
- source tree result in symbolic links in the destination tree; if
- it is false, the contents of the files pointed to by symbolic
- links are copied.
-
- XXX Consider this example code rather than the ultimate tool.
-
- """
- names = os.listdir(src)
- os.makedirs(dst)
- errors = []
- for name in names:
- srcname = os.path.join(src, name)
- dstname = os.path.join(dst, name)
- try:
- if symlinks and os.path.islink(srcname):
- linkto = os.readlink(srcname)
- os.symlink(linkto, dstname)
- elif os.path.isdir(srcname):
- copytree(srcname, dstname, symlinks)
- else:
- copy2(srcname, dstname)
- # XXX What about devices, sockets etc.?
- except (IOError, os.error), why:
- errors.append((srcname, dstname, str(why)))
- # catch the Error from the recursive copytree so that we can
- # continue with other files
- except Error, err:
- errors.extend(err.args[0])
- try:
- copystat(src, dst)
- except WindowsError:
- # can't copy file access times on Windows
- pass
- except OSError, why:
- errors.extend((src, dst, str(why)))
- if errors:
- raise Error, errors
-
-def rmtree(path, ignore_errors=False, onerror=None):
- """Recursively delete a directory tree.
-
- If ignore_errors is set, errors are ignored; otherwise, if onerror
- is set, it is called to handle the error with arguments (func,
- path, exc_info) where func is os.listdir, os.remove, or os.rmdir;
- path is the argument to that function that caused it to fail; and
- exc_info is a tuple returned by sys.exc_info(). If ignore_errors
- is false and onerror is None, an exception is raised.
-
- """
- if ignore_errors:
- def onerror(*args):
- pass
- elif onerror is None:
- def onerror(*args):
- raise
- names = []
- try:
- names = os.listdir(path)
- except os.error, err:
- onerror(os.listdir, path, sys.exc_info())
- for name in names:
- fullname = os.path.join(path, name)
- try:
- mode = os.lstat(fullname).st_mode
- except os.error:
- mode = 0
- if stat.S_ISDIR(mode):
- rmtree(fullname, ignore_errors, onerror)
- else:
- try:
- os.remove(fullname)
- except os.error, err:
- onerror(os.remove, fullname, sys.exc_info())
- try:
- os.rmdir(path)
- except os.error:
- onerror(os.rmdir, path, sys.exc_info())
-
-def move(src, dst):
- """Recursively move a file or directory to another location.
-
- If the destination is on our current filesystem, then simply use
- rename. Otherwise, copy src to the dst and then remove src.
- A lot more could be done here... A look at a mv.c shows a lot of
- the issues this implementation glosses over.
-
- """
-
- try:
- os.rename(src, dst)
- except OSError:
- if os.path.isdir(src):
- if destinsrc(src, dst):
- raise Error, "Cannot move a directory '%s' into itself '%s'." % (src, dst)
- copytree(src, dst, symlinks=True)
- rmtree(src)
- else:
- copy2(src,dst)
- os.unlink(src)
-
-def destinsrc(src, dst):
- return abspath(dst).startswith(abspath(src))
diff --git a/sys/lib/python/site-packages/README b/sys/lib/python/site-packages/README
deleted file mode 100644
index 273f6251a..000000000
--- a/sys/lib/python/site-packages/README
+++ /dev/null
@@ -1,2 +0,0 @@
-This directory exists so that 3rd party packages can be installed
-here. Read the source for site.py for more details.
diff --git a/sys/lib/python/site.py b/sys/lib/python/site.py
deleted file mode 100644
index 113f2215e..000000000
--- a/sys/lib/python/site.py
+++ /dev/null
@@ -1,424 +0,0 @@
-"""Append module search paths for third-party packages to sys.path.
-
-****************************************************************
-* This module is automatically imported during initialization. *
-****************************************************************
-
-In earlier versions of Python (up to 1.5a3), scripts or modules that
-needed to use site-specific modules would place ``import site''
-somewhere near the top of their code. Because of the automatic
-import, this is no longer necessary (but code that does it still
-works).
-
-This will append site-specific paths to the module search path. On
-Unix (including Mac OSX), it starts with sys.prefix and
-sys.exec_prefix (if different) and appends
-lib/python<version>/site-packages as well as lib/site-python.
-On other platforms (such as Windows), it tries each of the
-prefixes directly, as well as with lib/site-packages appended. The
-resulting directories, if they exist, are appended to sys.path, and
-also inspected for path configuration files.
-
-A path configuration file is a file whose name has the form
-<package>.pth; its contents are additional directories (one per line)
-to be added to sys.path. Non-existing directories (or
-non-directories) are never added to sys.path; no directory is added to
-sys.path more than once. Blank lines and lines beginning with
-'#' are skipped. Lines starting with 'import' are executed.
-
-For example, suppose sys.prefix and sys.exec_prefix are set to
-/usr/local and there is a directory /usr/local/lib/python2.5/site-packages
-with three subdirectories, foo, bar and spam, and two path
-configuration files, foo.pth and bar.pth. Assume foo.pth contains the
-following:
-
- # foo package configuration
- foo
- bar
- bletch
-
-and bar.pth contains:
-
- # bar package configuration
- bar
-
-Then the following directories are added to sys.path, in this order:
-
- /usr/local/lib/python2.5/site-packages/bar
- /usr/local/lib/python2.5/site-packages/foo
-
-Note that bletch is omitted because it doesn't exist; bar precedes foo
-because bar.pth comes alphabetically before foo.pth; and spam is
-omitted because it is not mentioned in either path configuration file.
-
-After these path manipulations, an attempt is made to import a module
-named sitecustomize, which can perform arbitrary additional
-site-specific customizations. If this import fails with an
-ImportError exception, it is silently ignored.
-
-"""
-
-import sys
-import os
-import __builtin__
-
-
-def makepath(*paths):
- dir = os.path.abspath(os.path.join(*paths))
- return dir, os.path.normcase(dir)
-
-def abs__file__():
- """Set all module' __file__ attribute to an absolute path"""
- for m in sys.modules.values():
- if hasattr(m, '__loader__'):
- continue # don't mess with a PEP 302-supplied __file__
- try:
- m.__file__ = os.path.abspath(m.__file__)
- except AttributeError:
- continue
-
-def removeduppaths():
- """ Remove duplicate entries from sys.path along with making them
- absolute"""
- # This ensures that the initial path provided by the interpreter contains
- # only absolute pathnames, even if we're running from the build directory.
- L = []
- known_paths = set()
- for dir in sys.path:
- # Filter out duplicate paths (on case-insensitive file systems also
- # if they only differ in case); turn relative paths into absolute
- # paths.
- dir, dircase = makepath(dir)
- if not dircase in known_paths:
- L.append(dir)
- known_paths.add(dircase)
- sys.path[:] = L
- return known_paths
-
-# XXX This should not be part of site.py, since it is needed even when
-# using the -S option for Python. See http://www.python.org/sf/586680
-def addbuilddir():
- """Append ./build/lib.<platform> in case we're running in the build dir
- (especially for Guido :-)"""
- from distutils.util import get_platform
- s = "build/lib.%s-%.3s" % (get_platform(), sys.version)
- s = os.path.join(os.path.dirname(sys.path[-1]), s)
- sys.path.append(s)
-
-def _init_pathinfo():
- """Return a set containing all existing directory entries from sys.path"""
- d = set()
- for dir in sys.path:
- try:
- if os.path.isdir(dir):
- dir, dircase = makepath(dir)
- d.add(dircase)
- except TypeError:
- continue
- return d
-
-def addpackage(sitedir, name, known_paths):
- """Add a new path to known_paths by combining sitedir and 'name' or execute
- sitedir if it starts with 'import'"""
- if known_paths is None:
- _init_pathinfo()
- reset = 1
- else:
- reset = 0
- fullname = os.path.join(sitedir, name)
- try:
- f = open(fullname, "rU")
- except IOError:
- return
- try:
- for line in f:
- if line.startswith("#"):
- continue
- if line.startswith("import"):
- exec line
- continue
- line = line.rstrip()
- dir, dircase = makepath(sitedir, line)
- if not dircase in known_paths and os.path.exists(dir):
- sys.path.append(dir)
- known_paths.add(dircase)
- finally:
- f.close()
- if reset:
- known_paths = None
- return known_paths
-
-def addsitedir(sitedir, known_paths=None):
- """Add 'sitedir' argument to sys.path if missing and handle .pth files in
- 'sitedir'"""
- if known_paths is None:
- known_paths = _init_pathinfo()
- reset = 1
- else:
- reset = 0
- sitedir, sitedircase = makepath(sitedir)
- if not sitedircase in known_paths:
- sys.path.append(sitedir) # Add path component
- try:
- names = os.listdir(sitedir)
- except os.error:
- return
- names.sort()
- for name in names:
- if name.endswith(os.extsep + "pth"):
- addpackage(sitedir, name, known_paths)
- if reset:
- known_paths = None
- return known_paths
-
-def addsitepackages(known_paths):
- """Add site-packages (and possibly site-python) to sys.path"""
- prefixes = [sys.prefix]
- if sys.exec_prefix != sys.prefix:
- prefixes.append(sys.exec_prefix)
- for prefix in prefixes:
- if prefix:
- if sys.platform in ('os2emx', 'riscos'):
- sitedirs = [os.path.join(prefix, "Lib", "site-packages")]
- elif os.sep == '/':
- sitedirs = [os.path.join(prefix,
- "lib",
- "python" + sys.version[:3],
- "site-packages"),
- os.path.join(prefix, "lib", "site-python")]
- else:
- sitedirs = [prefix, os.path.join(prefix, "lib", "site-packages")]
- if sys.platform == 'darwin':
- # for framework builds *only* we add the standard Apple
- # locations. Currently only per-user, but /Library and
- # /Network/Library could be added too
- if 'Python.framework' in prefix:
- home = os.environ.get('HOME')
- if home:
- sitedirs.append(
- os.path.join(home,
- 'Library',
- 'Python',
- sys.version[:3],
- 'site-packages'))
- for sitedir in sitedirs:
- if os.path.isdir(sitedir):
- addsitedir(sitedir, known_paths)
- return None
-
-
-def setBEGINLIBPATH():
- """The OS/2 EMX port has optional extension modules that do double duty
- as DLLs (and must use the .DLL file extension) for other extensions.
- The library search path needs to be amended so these will be found
- during module import. Use BEGINLIBPATH so that these are at the start
- of the library search path.
-
- """
- dllpath = os.path.join(sys.prefix, "Lib", "lib-dynload")
- libpath = os.environ['BEGINLIBPATH'].split(';')
- if libpath[-1]:
- libpath.append(dllpath)
- else:
- libpath[-1] = dllpath
- os.environ['BEGINLIBPATH'] = ';'.join(libpath)
-
-
-def setquit():
- """Define new built-ins 'quit' and 'exit'.
- These are simply strings that display a hint on how to exit.
-
- """
- if os.sep == ':':
- eof = 'Cmd-Q'
- elif os.sep == '\\':
- eof = 'Ctrl-Z plus Return'
- else:
- eof = 'Ctrl-D (i.e. EOF)'
-
- class Quitter(object):
- def __init__(self, name):
- self.name = name
- def __repr__(self):
- return 'Use %s() or %s to exit' % (self.name, eof)
- def __call__(self, code=None):
- # Shells like IDLE catch the SystemExit, but listen when their
- # stdin wrapper is closed.
- try:
- sys.stdin.close()
- except:
- pass
- raise SystemExit(code)
- __builtin__.quit = Quitter('quit')
- __builtin__.exit = Quitter('exit')
-
-
-class _Printer(object):
- """interactive prompt objects for printing the license text, a list of
- contributors and the copyright notice."""
-
- MAXLINES = 23
-
- def __init__(self, name, data, files=(), dirs=()):
- self.__name = name
- self.__data = data
- self.__files = files
- self.__dirs = dirs
- self.__lines = None
-
- def __setup(self):
- if self.__lines:
- return
- data = None
- for dir in self.__dirs:
- for filename in self.__files:
- filename = os.path.join(dir, filename)
- try:
- fp = file(filename, "rU")
- data = fp.read()
- fp.close()
- break
- except IOError:
- pass
- if data:
- break
- if not data:
- data = self.__data
- self.__lines = data.split('\n')
- self.__linecnt = len(self.__lines)
-
- def __repr__(self):
- self.__setup()
- if len(self.__lines) <= self.MAXLINES:
- return "\n".join(self.__lines)
- else:
- return "Type %s() to see the full %s text" % ((self.__name,)*2)
-
- def __call__(self):
- self.__setup()
- prompt = 'Hit Return for more, or q (and Return) to quit: '
- lineno = 0
- while 1:
- try:
- for i in range(lineno, lineno + self.MAXLINES):
- print self.__lines[i]
- except IndexError:
- break
- else:
- lineno += self.MAXLINES
- key = None
- while key is None:
- key = raw_input(prompt)
- if key not in ('', 'q'):
- key = None
- if key == 'q':
- break
-
-def setcopyright():
- """Set 'copyright' and 'credits' in __builtin__"""
- __builtin__.copyright = _Printer("copyright", sys.copyright)
- if sys.platform[:4] == 'java':
- __builtin__.credits = _Printer(
- "credits",
- "Jython is maintained by the Jython developers (www.jython.org).")
- else:
- __builtin__.credits = _Printer("credits", """\
- Thanks to CWI, CNRI, BeOpen.com, Zope Corporation and a cast of thousands
- for supporting Python development. See www.python.org for more information.""")
- here = os.path.dirname(os.__file__)
- __builtin__.license = _Printer(
- "license", "See http://www.python.org/%.3s/license.html" % sys.version,
- ["LICENSE.txt", "LICENSE"],
- [os.path.join(here, os.pardir), here, os.curdir])
-
-
-class _Helper(object):
- """Define the built-in 'help'.
- This is a wrapper around pydoc.help (with a twist).
-
- """
-
- def __repr__(self):
- return "Type help() for interactive help, " \
- "or help(object) for help about object."
- def __call__(self, *args, **kwds):
- import pydoc
- return pydoc.help(*args, **kwds)
-
-def sethelper():
- __builtin__.help = _Helper()
-
-def aliasmbcs():
- """On Windows, some default encodings are not provided by Python,
- while they are always available as "mbcs" in each locale. Make
- them usable by aliasing to "mbcs" in such a case."""
- if sys.platform == 'win32':
- import locale, codecs
- enc = locale.getdefaultlocale()[1]
- if enc.startswith('cp'): # "cp***" ?
- try:
- codecs.lookup(enc)
- except LookupError:
- import encodings
- encodings._cache[enc] = encodings._unknown
- encodings.aliases.aliases[enc] = 'mbcs'
-
-def setencoding():
- """Set the string encoding used by the Unicode implementation. The
- default is 'ascii', but if you're willing to experiment, you can
- change this."""
- encoding = "ascii" # Default value set by _PyUnicode_Init()
- if 0:
- # Enable to support locale aware default string encodings.
- import locale
- loc = locale.getdefaultlocale()
- if loc[1]:
- encoding = loc[1]
- if 0:
- # Enable to switch off string to Unicode coercion and implicit
- # Unicode to string conversion.
- encoding = "undefined"
- if encoding != "ascii":
- # On Non-Unicode builds this will raise an AttributeError...
- sys.setdefaultencoding(encoding) # Needs Python Unicode build !
-
-
-def execsitecustomize():
- """Run custom site specific code, if available."""
- try:
- import sitecustomize
- except ImportError:
- pass
-
-
-def main():
- abs__file__()
- paths_in_sys = removeduppaths()
- if (os.name == "posix" and sys.path and
- os.path.basename(sys.path[-1]) == "Modules"):
- addbuilddir()
- paths_in_sys = addsitepackages(paths_in_sys)
- if sys.platform == 'os2emx':
- setBEGINLIBPATH()
- setquit()
- setcopyright()
- sethelper()
- aliasmbcs()
- setencoding()
- execsitecustomize()
- # Remove sys.setdefaultencoding() so that users cannot change the
- # encoding after initialization. The test for presence is needed when
- # this module is run as a script, because this code is executed twice.
- if hasattr(sys, "setdefaultencoding"):
- del sys.setdefaultencoding
-
-main()
-
-def _test():
- print "sys.path = ["
- for dir in sys.path:
- print " %r," % (dir,)
- print "]"
-
-if __name__ == '__main__':
- _test()
diff --git a/sys/lib/python/smtpd.py b/sys/lib/python/smtpd.py
deleted file mode 100755
index c656ec763..000000000
--- a/sys/lib/python/smtpd.py
+++ /dev/null
@@ -1,549 +0,0 @@
-#! /usr/bin/env python
-"""An RFC 2821 smtp proxy.
-
-Usage: %(program)s [options] [localhost:localport [remotehost:remoteport]]
-
-Options:
-
- --nosetuid
- -n
- This program generally tries to setuid `nobody', unless this flag is
- set. The setuid call will fail if this program is not run as root (in
- which case, use this flag).
-
- --version
- -V
- Print the version number and exit.
-
- --class classname
- -c classname
- Use `classname' as the concrete SMTP proxy class. Uses `PureProxy' by
- default.
-
- --debug
- -d
- Turn on debugging prints.
-
- --help
- -h
- Print this message and exit.
-
-Version: %(__version__)s
-
-If localhost is not given then `localhost' is used, and if localport is not
-given then 8025 is used. If remotehost is not given then `localhost' is used,
-and if remoteport is not given, then 25 is used.
-"""
-
-
-# Overview:
-#
-# This file implements the minimal SMTP protocol as defined in RFC 821. It
-# has a hierarchy of classes which implement the backend functionality for the
-# smtpd. A number of classes are provided:
-#
-# SMTPServer - the base class for the backend. Raises NotImplementedError
-# if you try to use it.
-#
-# DebuggingServer - simply prints each message it receives on stdout.
-#
-# PureProxy - Proxies all messages to a real smtpd which does final
-# delivery. One known problem with this class is that it doesn't handle
-# SMTP errors from the backend server at all. This should be fixed
-# (contributions are welcome!).
-#
-# MailmanProxy - An experimental hack to work with GNU Mailman
-# <www.list.org>. Using this server as your real incoming smtpd, your
-# mailhost will automatically recognize and accept mail destined to Mailman
-# lists when those lists are created. Every message not destined for a list
-# gets forwarded to a real backend smtpd, as with PureProxy. Again, errors
-# are not handled correctly yet.
-#
-# Please note that this script requires Python 2.0
-#
-# Author: Barry Warsaw <barry@python.org>
-#
-# TODO:
-#
-# - support mailbox delivery
-# - alias files
-# - ESMTP
-# - handle error codes from the backend smtpd
-
-import sys
-import os
-import errno
-import getopt
-import time
-import socket
-import asyncore
-import asynchat
-
-__all__ = ["SMTPServer","DebuggingServer","PureProxy","MailmanProxy"]
-
-program = sys.argv[0]
-__version__ = 'Python SMTP proxy version 0.2'
-
-
-class Devnull:
- def write(self, msg): pass
- def flush(self): pass
-
-
-DEBUGSTREAM = Devnull()
-NEWLINE = '\n'
-EMPTYSTRING = ''
-COMMASPACE = ', '
-
-
-
-def usage(code, msg=''):
- print >> sys.stderr, __doc__ % globals()
- if msg:
- print >> sys.stderr, msg
- sys.exit(code)
-
-
-
-class SMTPChannel(asynchat.async_chat):
- COMMAND = 0
- DATA = 1
-
- def __init__(self, server, conn, addr):
- asynchat.async_chat.__init__(self, conn)
- self.__server = server
- self.__conn = conn
- self.__addr = addr
- self.__line = []
- self.__state = self.COMMAND
- self.__greeting = 0
- self.__mailfrom = None
- self.__rcpttos = []
- self.__data = ''
- self.__fqdn = socket.getfqdn()
- self.__peer = conn.getpeername()
- print >> DEBUGSTREAM, 'Peer:', repr(self.__peer)
- self.push('220 %s %s' % (self.__fqdn, __version__))
- self.set_terminator('\r\n')
-
- # Overrides base class for convenience
- def push(self, msg):
- asynchat.async_chat.push(self, msg + '\r\n')
-
- # Implementation of base class abstract method
- def collect_incoming_data(self, data):
- self.__line.append(data)
-
- # Implementation of base class abstract method
- def found_terminator(self):
- line = EMPTYSTRING.join(self.__line)
- print >> DEBUGSTREAM, 'Data:', repr(line)
- self.__line = []
- if self.__state == self.COMMAND:
- if not line:
- self.push('500 Error: bad syntax')
- return
- method = None
- i = line.find(' ')
- if i < 0:
- command = line.upper()
- arg = None
- else:
- command = line[:i].upper()
- arg = line[i+1:].strip()
- method = getattr(self, 'smtp_' + command, None)
- if not method:
- self.push('502 Error: command "%s" not implemented' % command)
- return
- method(arg)
- return
- else:
- if self.__state != self.DATA:
- self.push('451 Internal confusion')
- return
- # Remove extraneous carriage returns and de-transparency according
- # to RFC 821, Section 4.5.2.
- data = []
- for text in line.split('\r\n'):
- if text and text[0] == '.':
- data.append(text[1:])
- else:
- data.append(text)
- self.__data = NEWLINE.join(data)
- status = self.__server.process_message(self.__peer,
- self.__mailfrom,
- self.__rcpttos,
- self.__data)
- self.__rcpttos = []
- self.__mailfrom = None
- self.__state = self.COMMAND
- self.set_terminator('\r\n')
- if not status:
- self.push('250 Ok')
- else:
- self.push(status)
-
- # SMTP and ESMTP commands
- def smtp_HELO(self, arg):
- if not arg:
- self.push('501 Syntax: HELO hostname')
- return
- if self.__greeting:
- self.push('503 Duplicate HELO/EHLO')
- else:
- self.__greeting = arg
- self.push('250 %s' % self.__fqdn)
-
- def smtp_NOOP(self, arg):
- if arg:
- self.push('501 Syntax: NOOP')
- else:
- self.push('250 Ok')
-
- def smtp_QUIT(self, arg):
- # args is ignored
- self.push('221 Bye')
- self.close_when_done()
-
- # factored
- def __getaddr(self, keyword, arg):
- address = None
- keylen = len(keyword)
- if arg[:keylen].upper() == keyword:
- address = arg[keylen:].strip()
- if not address:
- pass
- elif address[0] == '<' and address[-1] == '>' and address != '<>':
- # Addresses can be in the form <person@dom.com> but watch out
- # for null address, e.g. <>
- address = address[1:-1]
- return address
-
- def smtp_MAIL(self, arg):
- print >> DEBUGSTREAM, '===> MAIL', arg
- address = self.__getaddr('FROM:', arg)
- if not address:
- self.push('501 Syntax: MAIL FROM:<address>')
- return
- if self.__mailfrom:
- self.push('503 Error: nested MAIL command')
- return
- self.__mailfrom = address
- print >> DEBUGSTREAM, 'sender:', self.__mailfrom
- self.push('250 Ok')
-
- def smtp_RCPT(self, arg):
- print >> DEBUGSTREAM, '===> RCPT', arg
- if not self.__mailfrom:
- self.push('503 Error: need MAIL command')
- return
- address = self.__getaddr('TO:', arg)
- if not address:
- self.push('501 Syntax: RCPT TO: <address>')
- return
- self.__rcpttos.append(address)
- print >> DEBUGSTREAM, 'recips:', self.__rcpttos
- self.push('250 Ok')
-
- def smtp_RSET(self, arg):
- if arg:
- self.push('501 Syntax: RSET')
- return
- # Resets the sender, recipients, and data, but not the greeting
- self.__mailfrom = None
- self.__rcpttos = []
- self.__data = ''
- self.__state = self.COMMAND
- self.push('250 Ok')
-
- def smtp_DATA(self, arg):
- if not self.__rcpttos:
- self.push('503 Error: need RCPT command')
- return
- if arg:
- self.push('501 Syntax: DATA')
- return
- self.__state = self.DATA
- self.set_terminator('\r\n.\r\n')
- self.push('354 End data with <CR><LF>.<CR><LF>')
-
-
-
-class SMTPServer(asyncore.dispatcher):
- def __init__(self, localaddr, remoteaddr):
- self._localaddr = localaddr
- self._remoteaddr = remoteaddr
- asyncore.dispatcher.__init__(self)
- self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
- # try to re-use a server port if possible
- self.set_reuse_addr()
- self.bind(localaddr)
- self.listen(5)
- print >> DEBUGSTREAM, \
- '%s started at %s\n\tLocal addr: %s\n\tRemote addr:%s' % (
- self.__class__.__name__, time.ctime(time.time()),
- localaddr, remoteaddr)
-
- def handle_accept(self):
- conn, addr = self.accept()
- print >> DEBUGSTREAM, 'Incoming connection from %s' % repr(addr)
- channel = SMTPChannel(self, conn, addr)
-
- # API for "doing something useful with the message"
- def process_message(self, peer, mailfrom, rcpttos, data):
- """Override this abstract method to handle messages from the client.
-
- peer is a tuple containing (ipaddr, port) of the client that made the
- socket connection to our smtp port.
-
- mailfrom is the raw address the client claims the message is coming
- from.
-
- rcpttos is a list of raw addresses the client wishes to deliver the
- message to.
-
- data is a string containing the entire full text of the message,
- headers (if supplied) and all. It has been `de-transparencied'
- according to RFC 821, Section 4.5.2. In other words, a line
- containing a `.' followed by other text has had the leading dot
- removed.
-
- This function should return None, for a normal `250 Ok' response;
- otherwise it returns the desired response string in RFC 821 format.
-
- """
- raise NotImplementedError
-
-
-
-class DebuggingServer(SMTPServer):
- # Do something with the gathered message
- def process_message(self, peer, mailfrom, rcpttos, data):
- inheaders = 1
- lines = data.split('\n')
- print '---------- MESSAGE FOLLOWS ----------'
- for line in lines:
- # headers first
- if inheaders and not line:
- print 'X-Peer:', peer[0]
- inheaders = 0
- print line
- print '------------ END MESSAGE ------------'
-
-
-
-class PureProxy(SMTPServer):
- def process_message(self, peer, mailfrom, rcpttos, data):
- lines = data.split('\n')
- # Look for the last header
- i = 0
- for line in lines:
- if not line:
- break
- i += 1
- lines.insert(i, 'X-Peer: %s' % peer[0])
- data = NEWLINE.join(lines)
- refused = self._deliver(mailfrom, rcpttos, data)
- # TBD: what to do with refused addresses?
- print >> DEBUGSTREAM, 'we got some refusals:', refused
-
- def _deliver(self, mailfrom, rcpttos, data):
- import smtplib
- refused = {}
- try:
- s = smtplib.SMTP()
- s.connect(self._remoteaddr[0], self._remoteaddr[1])
- try:
- refused = s.sendmail(mailfrom, rcpttos, data)
- finally:
- s.quit()
- except smtplib.SMTPRecipientsRefused, e:
- print >> DEBUGSTREAM, 'got SMTPRecipientsRefused'
- refused = e.recipients
- except (socket.error, smtplib.SMTPException), e:
- print >> DEBUGSTREAM, 'got', e.__class__
- # All recipients were refused. If the exception had an associated
- # error code, use it. Otherwise,fake it with a non-triggering
- # exception code.
- errcode = getattr(e, 'smtp_code', -1)
- errmsg = getattr(e, 'smtp_error', 'ignore')
- for r in rcpttos:
- refused[r] = (errcode, errmsg)
- return refused
-
-
-
-class MailmanProxy(PureProxy):
- def process_message(self, peer, mailfrom, rcpttos, data):
- from cStringIO import StringIO
- from Mailman import Utils
- from Mailman import Message
- from Mailman import MailList
- # If the message is to a Mailman mailing list, then we'll invoke the
- # Mailman script directly, without going through the real smtpd.
- # Otherwise we'll forward it to the local proxy for disposition.
- listnames = []
- for rcpt in rcpttos:
- local = rcpt.lower().split('@')[0]
- # We allow the following variations on the theme
- # listname
- # listname-admin
- # listname-owner
- # listname-request
- # listname-join
- # listname-leave
- parts = local.split('-')
- if len(parts) > 2:
- continue
- listname = parts[0]
- if len(parts) == 2:
- command = parts[1]
- else:
- command = ''
- if not Utils.list_exists(listname) or command not in (
- '', 'admin', 'owner', 'request', 'join', 'leave'):
- continue
- listnames.append((rcpt, listname, command))
- # Remove all list recipients from rcpttos and forward what we're not
- # going to take care of ourselves. Linear removal should be fine
- # since we don't expect a large number of recipients.
- for rcpt, listname, command in listnames:
- rcpttos.remove(rcpt)
- # If there's any non-list destined recipients left,
- print >> DEBUGSTREAM, 'forwarding recips:', ' '.join(rcpttos)
- if rcpttos:
- refused = self._deliver(mailfrom, rcpttos, data)
- # TBD: what to do with refused addresses?
- print >> DEBUGSTREAM, 'we got refusals:', refused
- # Now deliver directly to the list commands
- mlists = {}
- s = StringIO(data)
- msg = Message.Message(s)
- # These headers are required for the proper execution of Mailman. All
- # MTAs in existance seem to add these if the original message doesn't
- # have them.
- if not msg.getheader('from'):
- msg['From'] = mailfrom
- if not msg.getheader('date'):
- msg['Date'] = time.ctime(time.time())
- for rcpt, listname, command in listnames:
- print >> DEBUGSTREAM, 'sending message to', rcpt
- mlist = mlists.get(listname)
- if not mlist:
- mlist = MailList.MailList(listname, lock=0)
- mlists[listname] = mlist
- # dispatch on the type of command
- if command == '':
- # post
- msg.Enqueue(mlist, tolist=1)
- elif command == 'admin':
- msg.Enqueue(mlist, toadmin=1)
- elif command == 'owner':
- msg.Enqueue(mlist, toowner=1)
- elif command == 'request':
- msg.Enqueue(mlist, torequest=1)
- elif command in ('join', 'leave'):
- # TBD: this is a hack!
- if command == 'join':
- msg['Subject'] = 'subscribe'
- else:
- msg['Subject'] = 'unsubscribe'
- msg.Enqueue(mlist, torequest=1)
-
-
-
-class Options:
- setuid = 1
- classname = 'PureProxy'
-
-
-
-def parseargs():
- global DEBUGSTREAM
- try:
- opts, args = getopt.getopt(
- sys.argv[1:], 'nVhc:d',
- ['class=', 'nosetuid', 'version', 'help', 'debug'])
- except getopt.error, e:
- usage(1, e)
-
- options = Options()
- for opt, arg in opts:
- if opt in ('-h', '--help'):
- usage(0)
- elif opt in ('-V', '--version'):
- print >> sys.stderr, __version__
- sys.exit(0)
- elif opt in ('-n', '--nosetuid'):
- options.setuid = 0
- elif opt in ('-c', '--class'):
- options.classname = arg
- elif opt in ('-d', '--debug'):
- DEBUGSTREAM = sys.stderr
-
- # parse the rest of the arguments
- if len(args) < 1:
- localspec = 'localhost:8025'
- remotespec = 'localhost:25'
- elif len(args) < 2:
- localspec = args[0]
- remotespec = 'localhost:25'
- elif len(args) < 3:
- localspec = args[0]
- remotespec = args[1]
- else:
- usage(1, 'Invalid arguments: %s' % COMMASPACE.join(args))
-
- # split into host/port pairs
- i = localspec.find(':')
- if i < 0:
- usage(1, 'Bad local spec: %s' % localspec)
- options.localhost = localspec[:i]
- try:
- options.localport = int(localspec[i+1:])
- except ValueError:
- usage(1, 'Bad local port: %s' % localspec)
- i = remotespec.find(':')
- if i < 0:
- usage(1, 'Bad remote spec: %s' % remotespec)
- options.remotehost = remotespec[:i]
- try:
- options.remoteport = int(remotespec[i+1:])
- except ValueError:
- usage(1, 'Bad remote port: %s' % remotespec)
- return options
-
-
-
-if __name__ == '__main__':
- options = parseargs()
- # Become nobody
- if options.setuid:
- try:
- import pwd
- except ImportError:
- print >> sys.stderr, \
- 'Cannot import module "pwd"; try running with -n option.'
- sys.exit(1)
- nobody = pwd.getpwnam('nobody')[2]
- try:
- os.setuid(nobody)
- except OSError, e:
- if e.errno != errno.EPERM: raise
- print >> sys.stderr, \
- 'Cannot setuid "nobody"; try running with -n option.'
- sys.exit(1)
- classname = options.classname
- if "." in classname:
- lastdot = classname.rfind(".")
- mod = __import__(classname[:lastdot], globals(), locals(), [""])
- classname = classname[lastdot+1:]
- else:
- import __main__ as mod
- class_ = getattr(mod, classname)
- proxy = class_((options.localhost, options.localport),
- (options.remotehost, options.remoteport))
- try:
- asyncore.loop()
- except KeyboardInterrupt:
- pass
diff --git a/sys/lib/python/smtplib.py b/sys/lib/python/smtplib.py
deleted file mode 100755
index 9c8c4fa48..000000000
--- a/sys/lib/python/smtplib.py
+++ /dev/null
@@ -1,743 +0,0 @@
-#! /usr/bin/env python
-
-'''SMTP/ESMTP client class.
-
-This should follow RFC 821 (SMTP), RFC 1869 (ESMTP), RFC 2554 (SMTP
-Authentication) and RFC 2487 (Secure SMTP over TLS).
-
-Notes:
-
-Please remember, when doing ESMTP, that the names of the SMTP service
-extensions are NOT the same thing as the option keywords for the RCPT
-and MAIL commands!
-
-Example:
-
- >>> import smtplib
- >>> s=smtplib.SMTP("localhost")
- >>> print s.help()
- This is Sendmail version 8.8.4
- Topics:
- HELO EHLO MAIL RCPT DATA
- RSET NOOP QUIT HELP VRFY
- EXPN VERB ETRN DSN
- For more info use "HELP <topic>".
- To report bugs in the implementation send email to
- sendmail-bugs@sendmail.org.
- For local information send email to Postmaster at your site.
- End of HELP info
- >>> s.putcmd("vrfy","someone@here")
- >>> s.getreply()
- (250, "Somebody OverHere <somebody@here.my.org>")
- >>> s.quit()
-'''
-
-# Author: The Dragon De Monsyne <dragondm@integral.org>
-# ESMTP support, test code and doc fixes added by
-# Eric S. Raymond <esr@thyrsus.com>
-# Better RFC 821 compliance (MAIL and RCPT, and CRLF in data)
-# by Carey Evans <c.evans@clear.net.nz>, for picky mail servers.
-# RFC 2554 (authentication) support by Gerhard Haering <gerhard@bigfoot.de>.
-#
-# This was modified from the Python 1.5 library HTTP lib.
-
-import socket
-import re
-import email.Utils
-import base64
-import hmac
-from email.base64MIME import encode as encode_base64
-from sys import stderr
-
-__all__ = ["SMTPException","SMTPServerDisconnected","SMTPResponseException",
- "SMTPSenderRefused","SMTPRecipientsRefused","SMTPDataError",
- "SMTPConnectError","SMTPHeloError","SMTPAuthenticationError",
- "quoteaddr","quotedata","SMTP"]
-
-SMTP_PORT = 25
-CRLF="\r\n"
-
-OLDSTYLE_AUTH = re.compile(r"auth=(.*)", re.I)
-
-# Exception classes used by this module.
-class SMTPException(Exception):
- """Base class for all exceptions raised by this module."""
-
-class SMTPServerDisconnected(SMTPException):
- """Not connected to any SMTP server.
-
- This exception is raised when the server unexpectedly disconnects,
- or when an attempt is made to use the SMTP instance before
- connecting it to a server.
- """
-
-class SMTPResponseException(SMTPException):
- """Base class for all exceptions that include an SMTP error code.
-
- These exceptions are generated in some instances when the SMTP
- server returns an error code. The error code is stored in the
- `smtp_code' attribute of the error, and the `smtp_error' attribute
- is set to the error message.
- """
-
- def __init__(self, code, msg):
- self.smtp_code = code
- self.smtp_error = msg
- self.args = (code, msg)
-
-class SMTPSenderRefused(SMTPResponseException):
- """Sender address refused.
-
- In addition to the attributes set by on all SMTPResponseException
- exceptions, this sets `sender' to the string that the SMTP refused.
- """
-
- def __init__(self, code, msg, sender):
- self.smtp_code = code
- self.smtp_error = msg
- self.sender = sender
- self.args = (code, msg, sender)
-
-class SMTPRecipientsRefused(SMTPException):
- """All recipient addresses refused.
-
- The errors for each recipient are accessible through the attribute
- 'recipients', which is a dictionary of exactly the same sort as
- SMTP.sendmail() returns.
- """
-
- def __init__(self, recipients):
- self.recipients = recipients
- self.args = ( recipients,)
-
-
-class SMTPDataError(SMTPResponseException):
- """The SMTP server didn't accept the data."""
-
-class SMTPConnectError(SMTPResponseException):
- """Error during connection establishment."""
-
-class SMTPHeloError(SMTPResponseException):
- """The server refused our HELO reply."""
-
-class SMTPAuthenticationError(SMTPResponseException):
- """Authentication error.
-
- Most probably the server didn't accept the username/password
- combination provided.
- """
-
-class SSLFakeSocket:
- """A fake socket object that really wraps a SSLObject.
-
- It only supports what is needed in smtplib.
- """
- def __init__(self, realsock, sslobj):
- self.realsock = realsock
- self.sslobj = sslobj
-
- def send(self, str):
- self.sslobj.write(str)
- return len(str)
-
- sendall = send
-
- def close(self):
- self.realsock.close()
-
-class SSLFakeFile:
- """A fake file like object that really wraps a SSLObject.
-
- It only supports what is needed in smtplib.
- """
- def __init__(self, sslobj):
- self.sslobj = sslobj
-
- def readline(self):
- str = ""
- chr = None
- while chr != "\n":
- chr = self.sslobj.read(1)
- str += chr
- return str
-
- def close(self):
- pass
-
-def quoteaddr(addr):
- """Quote a subset of the email addresses defined by RFC 821.
-
- Should be able to handle anything rfc822.parseaddr can handle.
- """
- m = (None, None)
- try:
- m = email.Utils.parseaddr(addr)[1]
- except AttributeError:
- pass
- if m == (None, None): # Indicates parse failure or AttributeError
- # something weird here.. punt -ddm
- return "<%s>" % addr
- elif m is None:
- # the sender wants an empty return address
- return "<>"
- else:
- return "<%s>" % m
-
-def quotedata(data):
- """Quote data for email.
-
- Double leading '.', and change Unix newline '\\n', or Mac '\\r' into
- Internet CRLF end-of-line.
- """
- return re.sub(r'(?m)^\.', '..',
- re.sub(r'(?:\r\n|\n|\r(?!\n))', CRLF, data))
-
-
-class SMTP:
- """This class manages a connection to an SMTP or ESMTP server.
- SMTP Objects:
- SMTP objects have the following attributes:
- helo_resp
- This is the message given by the server in response to the
- most recent HELO command.
-
- ehlo_resp
- This is the message given by the server in response to the
- most recent EHLO command. This is usually multiline.
-
- does_esmtp
- This is a True value _after you do an EHLO command_, if the
- server supports ESMTP.
-
- esmtp_features
- This is a dictionary, which, if the server supports ESMTP,
- will _after you do an EHLO command_, contain the names of the
- SMTP service extensions this server supports, and their
- parameters (if any).
-
- Note, all extension names are mapped to lower case in the
- dictionary.
-
- See each method's docstrings for details. In general, there is a
- method of the same name to perform each SMTP command. There is also a
- method called 'sendmail' that will do an entire mail transaction.
- """
- debuglevel = 0
- file = None
- helo_resp = None
- ehlo_resp = None
- does_esmtp = 0
-
- def __init__(self, host = '', port = 0, local_hostname = None):
- """Initialize a new instance.
-
- If specified, `host' is the name of the remote host to which to
- connect. If specified, `port' specifies the port to which to connect.
- By default, smtplib.SMTP_PORT is used. An SMTPConnectError is raised
- if the specified `host' doesn't respond correctly. If specified,
- `local_hostname` is used as the FQDN of the local host. By default,
- the local hostname is found using socket.getfqdn().
-
- """
- self.esmtp_features = {}
- if host:
- (code, msg) = self.connect(host, port)
- if code != 220:
- raise SMTPConnectError(code, msg)
- if local_hostname is not None:
- self.local_hostname = local_hostname
- else:
- # RFC 2821 says we should use the fqdn in the EHLO/HELO verb, and
- # if that can't be calculated, that we should use a domain literal
- # instead (essentially an encoded IP address like [A.B.C.D]).
- fqdn = socket.getfqdn()
- if '.' in fqdn:
- self.local_hostname = fqdn
- else:
- # We can't find an fqdn hostname, so use a domain literal
- addr = '127.0.0.1'
- try:
- addr = socket.gethostbyname(socket.gethostname())
- except socket.gaierror:
- pass
- self.local_hostname = '[%s]' % addr
-
- def set_debuglevel(self, debuglevel):
- """Set the debug output level.
-
- A non-false value results in debug messages for connection and for all
- messages sent to and received from the server.
-
- """
- self.debuglevel = debuglevel
-
- def connect(self, host='localhost', port = 0):
- """Connect to a host on a given port.
-
- If the hostname ends with a colon (`:') followed by a number, and
- there is no port specified, that suffix will be stripped off and the
- number interpreted as the port number to use.
-
- Note: This method is automatically invoked by __init__, if a host is
- specified during instantiation.
-
- """
- if not port and (host.find(':') == host.rfind(':')):
- i = host.rfind(':')
- if i >= 0:
- host, port = host[:i], host[i+1:]
- try: port = int(port)
- except ValueError:
- raise socket.error, "nonnumeric port"
- if not port: port = SMTP_PORT
- if self.debuglevel > 0: print>>stderr, 'connect:', (host, port)
- msg = "getaddrinfo returns an empty list"
- self.sock = None
- for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):
- af, socktype, proto, canonname, sa = res
- try:
- self.sock = socket.socket(af, socktype, proto)
- if self.debuglevel > 0: print>>stderr, 'connect:', sa
- self.sock.connect(sa)
- except socket.error, msg:
- if self.debuglevel > 0: print>>stderr, 'connect fail:', msg
- if self.sock:
- self.sock.close()
- self.sock = None
- continue
- break
- if not self.sock:
- raise socket.error, msg
- (code, msg) = self.getreply()
- if self.debuglevel > 0: print>>stderr, "connect:", msg
- return (code, msg)
-
- def send(self, str):
- """Send `str' to the server."""
- if self.debuglevel > 0: print>>stderr, 'send:', repr(str)
- if self.sock:
- try:
- self.sock.sendall(str)
- except socket.error:
- self.close()
- raise SMTPServerDisconnected('Server not connected')
- else:
- raise SMTPServerDisconnected('please run connect() first')
-
- def putcmd(self, cmd, args=""):
- """Send a command to the server."""
- if args == "":
- str = '%s%s' % (cmd, CRLF)
- else:
- str = '%s %s%s' % (cmd, args, CRLF)
- self.send(str)
-
- def getreply(self):
- """Get a reply from the server.
-
- Returns a tuple consisting of:
-
- - server response code (e.g. '250', or such, if all goes well)
- Note: returns -1 if it can't read response code.
-
- - server response string corresponding to response code (multiline
- responses are converted to a single, multiline string).
-
- Raises SMTPServerDisconnected if end-of-file is reached.
- """
- resp=[]
- if self.file is None:
- self.file = self.sock.makefile('rb')
- while 1:
- line = self.file.readline()
- if line == '':
- self.close()
- raise SMTPServerDisconnected("Connection unexpectedly closed")
- if self.debuglevel > 0: print>>stderr, 'reply:', repr(line)
- resp.append(line[4:].strip())
- code=line[:3]
- # Check that the error code is syntactically correct.
- # Don't attempt to read a continuation line if it is broken.
- try:
- errcode = int(code)
- except ValueError:
- errcode = -1
- break
- # Check if multiline response.
- if line[3:4]!="-":
- break
-
- errmsg = "\n".join(resp)
- if self.debuglevel > 0:
- print>>stderr, 'reply: retcode (%s); Msg: %s' % (errcode,errmsg)
- return errcode, errmsg
-
- def docmd(self, cmd, args=""):
- """Send a command, and return its response code."""
- self.putcmd(cmd,args)
- return self.getreply()
-
- # std smtp commands
- def helo(self, name=''):
- """SMTP 'helo' command.
- Hostname to send for this command defaults to the FQDN of the local
- host.
- """
- self.putcmd("helo", name or self.local_hostname)
- (code,msg)=self.getreply()
- self.helo_resp=msg
- return (code,msg)
-
- def ehlo(self, name=''):
- """ SMTP 'ehlo' command.
- Hostname to send for this command defaults to the FQDN of the local
- host.
- """
- self.esmtp_features = {}
- self.putcmd("ehlo", name or self.local_hostname)
- (code,msg)=self.getreply()
- # According to RFC1869 some (badly written)
- # MTA's will disconnect on an ehlo. Toss an exception if
- # that happens -ddm
- if code == -1 and len(msg) == 0:
- self.close()
- raise SMTPServerDisconnected("Server not connected")
- self.ehlo_resp=msg
- if code != 250:
- return (code,msg)
- self.does_esmtp=1
- #parse the ehlo response -ddm
- resp=self.ehlo_resp.split('\n')
- del resp[0]
- for each in resp:
- # To be able to communicate with as many SMTP servers as possible,
- # we have to take the old-style auth advertisement into account,
- # because:
- # 1) Else our SMTP feature parser gets confused.
- # 2) There are some servers that only advertise the auth methods we
- # support using the old style.
- auth_match = OLDSTYLE_AUTH.match(each)
- if auth_match:
- # This doesn't remove duplicates, but that's no problem
- self.esmtp_features["auth"] = self.esmtp_features.get("auth", "") \
- + " " + auth_match.groups(0)[0]
- continue
-
- # RFC 1869 requires a space between ehlo keyword and parameters.
- # It's actually stricter, in that only spaces are allowed between
- # parameters, but were not going to check for that here. Note
- # that the space isn't present if there are no parameters.
- m=re.match(r'(?P<feature>[A-Za-z0-9][A-Za-z0-9\-]*) ?',each)
- if m:
- feature=m.group("feature").lower()
- params=m.string[m.end("feature"):].strip()
- if feature == "auth":
- self.esmtp_features[feature] = self.esmtp_features.get(feature, "") \
- + " " + params
- else:
- self.esmtp_features[feature]=params
- return (code,msg)
-
- def has_extn(self, opt):
- """Does the server support a given SMTP service extension?"""
- return opt.lower() in self.esmtp_features
-
- def help(self, args=''):
- """SMTP 'help' command.
- Returns help text from server."""
- self.putcmd("help", args)
- return self.getreply()[1]
-
- def rset(self):
- """SMTP 'rset' command -- resets session."""
- return self.docmd("rset")
-
- def noop(self):
- """SMTP 'noop' command -- doesn't do anything :>"""
- return self.docmd("noop")
-
- def mail(self,sender,options=[]):
- """SMTP 'mail' command -- begins mail xfer session."""
- optionlist = ''
- if options and self.does_esmtp:
- optionlist = ' ' + ' '.join(options)
- self.putcmd("mail", "FROM:%s%s" % (quoteaddr(sender) ,optionlist))
- return self.getreply()
-
- def rcpt(self,recip,options=[]):
- """SMTP 'rcpt' command -- indicates 1 recipient for this mail."""
- optionlist = ''
- if options and self.does_esmtp:
- optionlist = ' ' + ' '.join(options)
- self.putcmd("rcpt","TO:%s%s" % (quoteaddr(recip),optionlist))
- return self.getreply()
-
- def data(self,msg):
- """SMTP 'DATA' command -- sends message data to server.
-
- Automatically quotes lines beginning with a period per rfc821.
- Raises SMTPDataError if there is an unexpected reply to the
- DATA command; the return value from this method is the final
- response code received when the all data is sent.
- """
- self.putcmd("data")
- (code,repl)=self.getreply()
- if self.debuglevel >0 : print>>stderr, "data:", (code,repl)
- if code != 354:
- raise SMTPDataError(code,repl)
- else:
- q = quotedata(msg)
- if q[-2:] != CRLF:
- q = q + CRLF
- q = q + "." + CRLF
- self.send(q)
- (code,msg)=self.getreply()
- if self.debuglevel >0 : print>>stderr, "data:", (code,msg)
- return (code,msg)
-
- def verify(self, address):
- """SMTP 'verify' command -- checks for address validity."""
- self.putcmd("vrfy", quoteaddr(address))
- return self.getreply()
- # a.k.a.
- vrfy=verify
-
- def expn(self, address):
- """SMTP 'verify' command -- checks for address validity."""
- self.putcmd("expn", quoteaddr(address))
- return self.getreply()
-
- # some useful methods
-
- def login(self, user, password):
- """Log in on an SMTP server that requires authentication.
-
- The arguments are:
- - user: The user name to authenticate with.
- - password: The password for the authentication.
-
- If there has been no previous EHLO or HELO command this session, this
- method tries ESMTP EHLO first.
-
- This method will return normally if the authentication was successful.
-
- This method may raise the following exceptions:
-
- SMTPHeloError The server didn't reply properly to
- the helo greeting.
- SMTPAuthenticationError The server didn't accept the username/
- password combination.
- SMTPException No suitable authentication method was
- found.
- """
-
- def encode_cram_md5(challenge, user, password):
- challenge = base64.decodestring(challenge)
- response = user + " " + hmac.HMAC(password, challenge).hexdigest()
- return encode_base64(response, eol="")
-
- def encode_plain(user, password):
- return encode_base64("\0%s\0%s" % (user, password), eol="")
-
-
- AUTH_PLAIN = "PLAIN"
- AUTH_CRAM_MD5 = "CRAM-MD5"
- AUTH_LOGIN = "LOGIN"
-
- if self.helo_resp is None and self.ehlo_resp is None:
- if not (200 <= self.ehlo()[0] <= 299):
- (code, resp) = self.helo()
- if not (200 <= code <= 299):
- raise SMTPHeloError(code, resp)
-
- if not self.has_extn("auth"):
- raise SMTPException("SMTP AUTH extension not supported by server.")
-
- # Authentication methods the server supports:
- authlist = self.esmtp_features["auth"].split()
-
- # List of authentication methods we support: from preferred to
- # less preferred methods. Except for the purpose of testing the weaker
- # ones, we prefer stronger methods like CRAM-MD5:
- preferred_auths = [AUTH_CRAM_MD5, AUTH_PLAIN, AUTH_LOGIN]
-
- # Determine the authentication method we'll use
- authmethod = None
- for method in preferred_auths:
- if method in authlist:
- authmethod = method
- break
-
- if authmethod == AUTH_CRAM_MD5:
- (code, resp) = self.docmd("AUTH", AUTH_CRAM_MD5)
- if code == 503:
- # 503 == 'Error: already authenticated'
- return (code, resp)
- (code, resp) = self.docmd(encode_cram_md5(resp, user, password))
- elif authmethod == AUTH_PLAIN:
- (code, resp) = self.docmd("AUTH",
- AUTH_PLAIN + " " + encode_plain(user, password))
- elif authmethod == AUTH_LOGIN:
- (code, resp) = self.docmd("AUTH",
- "%s %s" % (AUTH_LOGIN, encode_base64(user, eol="")))
- if code != 334:
- raise SMTPAuthenticationError(code, resp)
- (code, resp) = self.docmd(encode_base64(password, eol=""))
- elif authmethod is None:
- raise SMTPException("No suitable authentication method found.")
- if code not in (235, 503):
- # 235 == 'Authentication successful'
- # 503 == 'Error: already authenticated'
- raise SMTPAuthenticationError(code, resp)
- return (code, resp)
-
- def starttls(self, keyfile = None, certfile = None):
- """Puts the connection to the SMTP server into TLS mode.
-
- If the server supports TLS, this will encrypt the rest of the SMTP
- session. If you provide the keyfile and certfile parameters,
- the identity of the SMTP server and client can be checked. This,
- however, depends on whether the socket module really checks the
- certificates.
- """
- (resp, reply) = self.docmd("STARTTLS")
- if resp == 220:
- sslobj = socket.ssl(self.sock, keyfile, certfile)
- self.sock = SSLFakeSocket(self.sock, sslobj)
- self.file = SSLFakeFile(sslobj)
- return (resp, reply)
-
- def sendmail(self, from_addr, to_addrs, msg, mail_options=[],
- rcpt_options=[]):
- """This command performs an entire mail transaction.
-
- The arguments are:
- - from_addr : The address sending this mail.
- - to_addrs : A list of addresses to send this mail to. A bare
- string will be treated as a list with 1 address.
- - msg : The message to send.
- - mail_options : List of ESMTP options (such as 8bitmime) for the
- mail command.
- - rcpt_options : List of ESMTP options (such as DSN commands) for
- all the rcpt commands.
-
- If there has been no previous EHLO or HELO command this session, this
- method tries ESMTP EHLO first. If the server does ESMTP, message size
- and each of the specified options will be passed to it. If EHLO
- fails, HELO will be tried and ESMTP options suppressed.
-
- This method will return normally if the mail is accepted for at least
- one recipient. It returns a dictionary, with one entry for each
- recipient that was refused. Each entry contains a tuple of the SMTP
- error code and the accompanying error message sent by the server.
-
- This method may raise the following exceptions:
-
- SMTPHeloError The server didn't reply properly to
- the helo greeting.
- SMTPRecipientsRefused The server rejected ALL recipients
- (no mail was sent).
- SMTPSenderRefused The server didn't accept the from_addr.
- SMTPDataError The server replied with an unexpected
- error code (other than a refusal of
- a recipient).
-
- Note: the connection will be open even after an exception is raised.
-
- Example:
-
- >>> import smtplib
- >>> s=smtplib.SMTP("localhost")
- >>> tolist=["one@one.org","two@two.org","three@three.org","four@four.org"]
- >>> msg = '''\\
- ... From: Me@my.org
- ... Subject: testin'...
- ...
- ... This is a test '''
- >>> s.sendmail("me@my.org",tolist,msg)
- { "three@three.org" : ( 550 ,"User unknown" ) }
- >>> s.quit()
-
- In the above example, the message was accepted for delivery to three
- of the four addresses, and one was rejected, with the error code
- 550. If all addresses are accepted, then the method will return an
- empty dictionary.
-
- """
- if self.helo_resp is None and self.ehlo_resp is None:
- if not (200 <= self.ehlo()[0] <= 299):
- (code,resp) = self.helo()
- if not (200 <= code <= 299):
- raise SMTPHeloError(code, resp)
- esmtp_opts = []
- if self.does_esmtp:
- # Hmmm? what's this? -ddm
- # self.esmtp_features['7bit']=""
- if self.has_extn('size'):
- esmtp_opts.append("size=%d" % len(msg))
- for option in mail_options:
- esmtp_opts.append(option)
-
- (code,resp) = self.mail(from_addr, esmtp_opts)
- if code != 250:
- self.rset()
- raise SMTPSenderRefused(code, resp, from_addr)
- senderrs={}
- if isinstance(to_addrs, basestring):
- to_addrs = [to_addrs]
- for each in to_addrs:
- (code,resp)=self.rcpt(each, rcpt_options)
- if (code != 250) and (code != 251):
- senderrs[each]=(code,resp)
- if len(senderrs)==len(to_addrs):
- # the server refused all our recipients
- self.rset()
- raise SMTPRecipientsRefused(senderrs)
- (code,resp) = self.data(msg)
- if code != 250:
- self.rset()
- raise SMTPDataError(code, resp)
- #if we got here then somebody got our mail
- return senderrs
-
-
- def close(self):
- """Close the connection to the SMTP server."""
- if self.file:
- self.file.close()
- self.file = None
- if self.sock:
- self.sock.close()
- self.sock = None
-
-
- def quit(self):
- """Terminate the SMTP session."""
- self.docmd("quit")
- self.close()
-
-
-# Test the sendmail method, which tests most of the others.
-# Note: This always sends to localhost.
-if __name__ == '__main__':
- import sys
-
- def prompt(prompt):
- sys.stdout.write(prompt + ": ")
- return sys.stdin.readline().strip()
-
- fromaddr = prompt("From")
- toaddrs = prompt("To").split(',')
- print "Enter message, end with ^D:"
- msg = ''
- while 1:
- line = sys.stdin.readline()
- if not line:
- break
- msg = msg + line
- print "Message length is %d" % len(msg)
-
- server = SMTP('localhost')
- server.set_debuglevel(1)
- server.sendmail(fromaddr, toaddrs, msg)
- server.quit()
diff --git a/sys/lib/python/sndhdr.py b/sys/lib/python/sndhdr.py
deleted file mode 100644
index df2ccf17b..000000000
--- a/sys/lib/python/sndhdr.py
+++ /dev/null
@@ -1,228 +0,0 @@
-"""Routines to help recognizing sound files.
-
-Function whathdr() recognizes various types of sound file headers.
-It understands almost all headers that SOX can decode.
-
-The return tuple contains the following items, in this order:
-- file type (as SOX understands it)
-- sampling rate (0 if unknown or hard to decode)
-- number of channels (0 if unknown or hard to decode)
-- number of frames in the file (-1 if unknown or hard to decode)
-- number of bits/sample, or 'U' for U-LAW, or 'A' for A-LAW
-
-If the file doesn't have a recognizable type, it returns None.
-If the file can't be opened, IOError is raised.
-
-To compute the total time, divide the number of frames by the
-sampling rate (a frame contains a sample for each channel).
-
-Function what() calls whathdr(). (It used to also use some
-heuristics for raw data, but this doesn't work very well.)
-
-Finally, the function test() is a simple main program that calls
-what() for all files mentioned on the argument list. For directory
-arguments it calls what() for all files in that directory. Default
-argument is "." (testing all files in the current directory). The
-option -r tells it to recurse down directories found inside
-explicitly given directories.
-"""
-
-# The file structure is top-down except that the test program and its
-# subroutine come last.
-
-__all__ = ["what","whathdr"]
-
-def what(filename):
- """Guess the type of a sound file"""
- res = whathdr(filename)
- return res
-
-
-def whathdr(filename):
- """Recognize sound headers"""
- f = open(filename, 'rb')
- h = f.read(512)
- for tf in tests:
- res = tf(h, f)
- if res:
- return res
- return None
-
-
-#-----------------------------------#
-# Subroutines per sound header type #
-#-----------------------------------#
-
-tests = []
-
-def test_aifc(h, f):
- import aifc
- if h[:4] != 'FORM':
- return None
- if h[8:12] == 'AIFC':
- fmt = 'aifc'
- elif h[8:12] == 'AIFF':
- fmt = 'aiff'
- else:
- return None
- f.seek(0)
- try:
- a = aifc.openfp(f, 'r')
- except (EOFError, aifc.Error):
- return None
- return (fmt, a.getframerate(), a.getnchannels(), \
- a.getnframes(), 8*a.getsampwidth())
-
-tests.append(test_aifc)
-
-
-def test_au(h, f):
- if h[:4] == '.snd':
- f = get_long_be
- elif h[:4] in ('\0ds.', 'dns.'):
- f = get_long_le
- else:
- return None
- type = 'au'
- hdr_size = f(h[4:8])
- data_size = f(h[8:12])
- encoding = f(h[12:16])
- rate = f(h[16:20])
- nchannels = f(h[20:24])
- sample_size = 1 # default
- if encoding == 1:
- sample_bits = 'U'
- elif encoding == 2:
- sample_bits = 8
- elif encoding == 3:
- sample_bits = 16
- sample_size = 2
- else:
- sample_bits = '?'
- frame_size = sample_size * nchannels
- return type, rate, nchannels, data_size/frame_size, sample_bits
-
-tests.append(test_au)
-
-
-def test_hcom(h, f):
- if h[65:69] != 'FSSD' or h[128:132] != 'HCOM':
- return None
- divisor = get_long_be(h[128+16:128+20])
- return 'hcom', 22050/divisor, 1, -1, 8
-
-tests.append(test_hcom)
-
-
-def test_voc(h, f):
- if h[:20] != 'Creative Voice File\032':
- return None
- sbseek = get_short_le(h[20:22])
- rate = 0
- if 0 <= sbseek < 500 and h[sbseek] == '\1':
- ratecode = ord(h[sbseek+4])
- rate = int(1000000.0 / (256 - ratecode))
- return 'voc', rate, 1, -1, 8
-
-tests.append(test_voc)
-
-
-def test_wav(h, f):
- # 'RIFF' <len> 'WAVE' 'fmt ' <len>
- if h[:4] != 'RIFF' or h[8:12] != 'WAVE' or h[12:16] != 'fmt ':
- return None
- style = get_short_le(h[20:22])
- nchannels = get_short_le(h[22:24])
- rate = get_long_le(h[24:28])
- sample_bits = get_short_le(h[34:36])
- return 'wav', rate, nchannels, -1, sample_bits
-
-tests.append(test_wav)
-
-
-def test_8svx(h, f):
- if h[:4] != 'FORM' or h[8:12] != '8SVX':
- return None
- # Should decode it to get #channels -- assume always 1
- return '8svx', 0, 1, 0, 8
-
-tests.append(test_8svx)
-
-
-def test_sndt(h, f):
- if h[:5] == 'SOUND':
- nsamples = get_long_le(h[8:12])
- rate = get_short_le(h[20:22])
- return 'sndt', rate, 1, nsamples, 8
-
-tests.append(test_sndt)
-
-
-def test_sndr(h, f):
- if h[:2] == '\0\0':
- rate = get_short_le(h[2:4])
- if 4000 <= rate <= 25000:
- return 'sndr', rate, 1, -1, 8
-
-tests.append(test_sndr)
-
-
-#---------------------------------------------#
-# Subroutines to extract numbers from strings #
-#---------------------------------------------#
-
-def get_long_be(s):
- return (ord(s[0])<<24) | (ord(s[1])<<16) | (ord(s[2])<<8) | ord(s[3])
-
-def get_long_le(s):
- return (ord(s[3])<<24) | (ord(s[2])<<16) | (ord(s[1])<<8) | ord(s[0])
-
-def get_short_be(s):
- return (ord(s[0])<<8) | ord(s[1])
-
-def get_short_le(s):
- return (ord(s[1])<<8) | ord(s[0])
-
-
-#--------------------#
-# Small test program #
-#--------------------#
-
-def test():
- import sys
- recursive = 0
- if sys.argv[1:] and sys.argv[1] == '-r':
- del sys.argv[1:2]
- recursive = 1
- try:
- if sys.argv[1:]:
- testall(sys.argv[1:], recursive, 1)
- else:
- testall(['.'], recursive, 1)
- except KeyboardInterrupt:
- sys.stderr.write('\n[Interrupted]\n')
- sys.exit(1)
-
-def testall(list, recursive, toplevel):
- import sys
- import os
- for filename in list:
- if os.path.isdir(filename):
- print filename + '/:',
- if recursive or toplevel:
- print 'recursing down:'
- import glob
- names = glob.glob(os.path.join(filename, '*'))
- testall(names, recursive, 0)
- else:
- print '*** directory (use -r) ***'
- else:
- print filename + ':',
- sys.stdout.flush()
- try:
- print what(filename)
- except IOError:
- print '*** not found ***'
-
-if __name__ == '__main__':
- test()
diff --git a/sys/lib/python/socket.py b/sys/lib/python/socket.py
deleted file mode 100644
index 0082e7656..000000000
--- a/sys/lib/python/socket.py
+++ /dev/null
@@ -1,414 +0,0 @@
-# Wrapper module for _socket, providing some additional facilities
-# implemented in Python.
-
-"""\
-This module provides socket operations and some related functions.
-On Unix, it supports IP (Internet Protocol) and Unix domain sockets.
-On other systems, it only supports IP. Functions specific for a
-socket are available as methods of the socket object.
-
-Functions:
-
-socket() -- create a new socket object
-socketpair() -- create a pair of new socket objects [*]
-fromfd() -- create a socket object from an open file descriptor [*]
-gethostname() -- return the current hostname
-gethostbyname() -- map a hostname to its IP number
-gethostbyaddr() -- map an IP number or hostname to DNS info
-getservbyname() -- map a service name and a protocol name to a port number
-getprotobyname() -- mape a protocol name (e.g. 'tcp') to a number
-ntohs(), ntohl() -- convert 16, 32 bit int from network to host byte order
-htons(), htonl() -- convert 16, 32 bit int from host to network byte order
-inet_aton() -- convert IP addr string (123.45.67.89) to 32-bit packed format
-inet_ntoa() -- convert 32-bit packed format IP to string (123.45.67.89)
-ssl() -- secure socket layer support (only available if configured)
-socket.getdefaulttimeout() -- get the default timeout value
-socket.setdefaulttimeout() -- set the default timeout value
-
- [*] not available on all platforms!
-
-Special objects:
-
-SocketType -- type object for socket objects
-error -- exception raised for I/O errors
-has_ipv6 -- boolean value indicating if IPv6 is supported
-
-Integer constants:
-
-AF_INET, AF_UNIX -- socket domains (first argument to socket() call)
-SOCK_STREAM, SOCK_DGRAM, SOCK_RAW -- socket types (second argument)
-
-Many other constants may be defined; these may be used in calls to
-the setsockopt() and getsockopt() methods.
-"""
-
-import _socket
-from _socket import *
-
-_have_ssl = False
-try:
- import _ssl
- from _ssl import *
- _have_ssl = True
-except ImportError:
- pass
-
-import os, sys
-
-try:
- from errno import EBADF
-except ImportError:
- EBADF = 9
-
-__all__ = ["getfqdn"]
-__all__.extend(os._get_exports_list(_socket))
-if _have_ssl:
- __all__.extend(os._get_exports_list(_ssl))
-
-_realsocket = socket
-if _have_ssl:
- _realssl = ssl
- def ssl(sock, keyfile=None, certfile=None):
- if hasattr(sock, "_sock"):
- sock = sock._sock
- return _realssl(sock, keyfile, certfile)
-
-# WSA error codes
-if sys.platform.lower().startswith("win"):
- errorTab = {}
- errorTab[10004] = "The operation was interrupted."
- errorTab[10009] = "A bad file handle was passed."
- errorTab[10013] = "Permission denied."
- errorTab[10014] = "A fault occurred on the network??" # WSAEFAULT
- errorTab[10022] = "An invalid operation was attempted."
- errorTab[10035] = "The socket operation would block"
- errorTab[10036] = "A blocking operation is already in progress."
- errorTab[10048] = "The network address is in use."
- errorTab[10054] = "The connection has been reset."
- errorTab[10058] = "The network has been shut down."
- errorTab[10060] = "The operation timed out."
- errorTab[10061] = "Connection refused."
- errorTab[10063] = "The name is too long."
- errorTab[10064] = "The host is down."
- errorTab[10065] = "The host is unreachable."
- __all__.append("errorTab")
-
-
-
-def getfqdn(name=''):
- """Get fully qualified domain name from name.
-
- An empty argument is interpreted as meaning the local host.
-
- First the hostname returned by gethostbyaddr() is checked, then
- possibly existing aliases. In case no FQDN is available, hostname
- from gethostname() is returned.
- """
- name = name.strip()
- if not name or name == '0.0.0.0':
- name = gethostname()
- try:
- hostname, aliases, ipaddrs = gethostbyaddr(name)
- except error:
- pass
- else:
- aliases.insert(0, hostname)
- for name in aliases:
- if '.' in name:
- break
- else:
- name = hostname
- return name
-
-
-_socketmethods = (
- 'bind', 'connect', 'connect_ex', 'fileno', 'listen',
- 'getpeername', 'getsockname', 'getsockopt', 'setsockopt',
- 'sendall', 'setblocking',
- 'settimeout', 'gettimeout', 'shutdown')
-
-if sys.platform == "riscos":
- _socketmethods = _socketmethods + ('sleeptaskw',)
-
-# All the method names that must be delegated to either the real socket
-# object or the _closedsocket object.
-_delegate_methods = ("recv", "recvfrom", "recv_into", "recvfrom_into",
- "send", "sendto")
-
-class _closedsocket(object):
- __slots__ = []
- def _dummy(*args):
- raise error(EBADF, 'Bad file descriptor')
- # All _delegate_methods must also be initialized here.
- send = recv = recv_into = sendto = recvfrom = recvfrom_into = _dummy
- __getattr__ = _dummy
-
-class _socketobject(object):
-
- __doc__ = _realsocket.__doc__
-
- __slots__ = ["_sock", "__weakref__"] + list(_delegate_methods)
-
- def __init__(self, family=AF_INET, type=SOCK_STREAM, proto=0, _sock=None):
- if _sock is None:
- _sock = _realsocket(family, type, proto)
- self._sock = _sock
- for method in _delegate_methods:
- setattr(self, method, getattr(_sock, method))
-
- def close(self):
- self._sock = _closedsocket()
- dummy = self._sock._dummy
- for method in _delegate_methods:
- setattr(self, method, dummy)
- close.__doc__ = _realsocket.close.__doc__
-
- def accept(self):
- sock, addr = self._sock.accept()
- return _socketobject(_sock=sock), addr
- accept.__doc__ = _realsocket.accept.__doc__
-
- def dup(self):
- """dup() -> socket object
-
- Return a new socket object connected to the same system resource."""
- return _socketobject(_sock=self._sock)
-
- def makefile(self, mode='r', bufsize=-1):
- """makefile([mode[, bufsize]]) -> file object
-
- Return a regular file object corresponding to the socket. The mode
- and bufsize arguments are as for the built-in open() function."""
- return _fileobject(self._sock, mode, bufsize)
-
- family = property(lambda self: self._sock.family, doc="the socket family")
- type = property(lambda self: self._sock.type, doc="the socket type")
- proto = property(lambda self: self._sock.proto, doc="the socket protocol")
-
- _s = ("def %s(self, *args): return self._sock.%s(*args)\n\n"
- "%s.__doc__ = _realsocket.%s.__doc__\n")
- for _m in _socketmethods:
- exec _s % (_m, _m, _m, _m)
- del _m, _s
-
-socket = SocketType = _socketobject
-
-class _fileobject(object):
- """Faux file object attached to a socket object."""
-
- default_bufsize = 8192
- name = "<socket>"
-
- __slots__ = ["mode", "bufsize", "softspace",
- # "closed" is a property, see below
- "_sock", "_rbufsize", "_wbufsize", "_rbuf", "_wbuf",
- "_close"]
-
- def __init__(self, sock, mode='rb', bufsize=-1, close=False):
- self._sock = sock
- self.mode = mode # Not actually used in this version
- if bufsize < 0:
- bufsize = self.default_bufsize
- self.bufsize = bufsize
- self.softspace = False
- if bufsize == 0:
- self._rbufsize = 1
- elif bufsize == 1:
- self._rbufsize = self.default_bufsize
- else:
- self._rbufsize = bufsize
- self._wbufsize = bufsize
- self._rbuf = "" # A string
- self._wbuf = [] # A list of strings
- self._close = close
-
- def _getclosed(self):
- return self._sock is None
- closed = property(_getclosed, doc="True if the file is closed")
-
- def close(self):
- try:
- if self._sock:
- self.flush()
- finally:
- if self._close:
- self._sock.close()
- self._sock = None
-
- def __del__(self):
- try:
- self.close()
- except:
- # close() may fail if __init__ didn't complete
- pass
-
- def flush(self):
- if self._wbuf:
- buffer = "".join(self._wbuf)
- self._wbuf = []
- self._sock.sendall(buffer)
-
- def fileno(self):
- return self._sock.fileno()
-
- def write(self, data):
- data = str(data) # XXX Should really reject non-string non-buffers
- if not data:
- return
- self._wbuf.append(data)
- if (self._wbufsize == 0 or
- self._wbufsize == 1 and '\n' in data or
- self._get_wbuf_len() >= self._wbufsize):
- self.flush()
-
- def writelines(self, list):
- # XXX We could do better here for very long lists
- # XXX Should really reject non-string non-buffers
- self._wbuf.extend(filter(None, map(str, list)))
- if (self._wbufsize <= 1 or
- self._get_wbuf_len() >= self._wbufsize):
- self.flush()
-
- def _get_wbuf_len(self):
- buf_len = 0
- for x in self._wbuf:
- buf_len += len(x)
- return buf_len
-
- def read(self, size=-1):
- data = self._rbuf
- if size < 0:
- # Read until EOF
- buffers = []
- if data:
- buffers.append(data)
- self._rbuf = ""
- if self._rbufsize <= 1:
- recv_size = self.default_bufsize
- else:
- recv_size = self._rbufsize
- while True:
- data = self._sock.recv(recv_size)
- if not data:
- break
- buffers.append(data)
- return "".join(buffers)
- else:
- # Read until size bytes or EOF seen, whichever comes first
- buf_len = len(data)
- if buf_len >= size:
- self._rbuf = data[size:]
- return data[:size]
- buffers = []
- if data:
- buffers.append(data)
- self._rbuf = ""
- while True:
- left = size - buf_len
- recv_size = max(self._rbufsize, left)
- data = self._sock.recv(recv_size)
- if not data:
- break
- buffers.append(data)
- n = len(data)
- if n >= left:
- self._rbuf = data[left:]
- buffers[-1] = data[:left]
- break
- buf_len += n
- return "".join(buffers)
-
- def readline(self, size=-1):
- data = self._rbuf
- if size < 0:
- # Read until \n or EOF, whichever comes first
- if self._rbufsize <= 1:
- # Speed up unbuffered case
- assert data == ""
- buffers = []
- recv = self._sock.recv
- while data != "\n":
- data = recv(1)
- if not data:
- break
- buffers.append(data)
- return "".join(buffers)
- nl = data.find('\n')
- if nl >= 0:
- nl += 1
- self._rbuf = data[nl:]
- return data[:nl]
- buffers = []
- if data:
- buffers.append(data)
- self._rbuf = ""
- while True:
- data = self._sock.recv(self._rbufsize)
- if not data:
- break
- buffers.append(data)
- nl = data.find('\n')
- if nl >= 0:
- nl += 1
- self._rbuf = data[nl:]
- buffers[-1] = data[:nl]
- break
- return "".join(buffers)
- else:
- # Read until size bytes or \n or EOF seen, whichever comes first
- nl = data.find('\n', 0, size)
- if nl >= 0:
- nl += 1
- self._rbuf = data[nl:]
- return data[:nl]
- buf_len = len(data)
- if buf_len >= size:
- self._rbuf = data[size:]
- return data[:size]
- buffers = []
- if data:
- buffers.append(data)
- self._rbuf = ""
- while True:
- data = self._sock.recv(self._rbufsize)
- if not data:
- break
- buffers.append(data)
- left = size - buf_len
- nl = data.find('\n', 0, left)
- if nl >= 0:
- nl += 1
- self._rbuf = data[nl:]
- buffers[-1] = data[:nl]
- break
- n = len(data)
- if n >= left:
- self._rbuf = data[left:]
- buffers[-1] = data[:left]
- break
- buf_len += n
- return "".join(buffers)
-
- def readlines(self, sizehint=0):
- total = 0
- list = []
- while True:
- line = self.readline()
- if not line:
- break
- list.append(line)
- total += len(line)
- if sizehint and total >= sizehint:
- break
- return list
-
- # Iterator protocols
-
- def __iter__(self):
- return self
-
- def next(self):
- line = self.readline()
- if not line:
- raise StopIteration
- return line
diff --git a/sys/lib/python/sqlite3/__init__.py b/sys/lib/python/sqlite3/__init__.py
deleted file mode 100644
index 41ef2b76d..000000000
--- a/sys/lib/python/sqlite3/__init__.py
+++ /dev/null
@@ -1,24 +0,0 @@
-#-*- coding: ISO-8859-1 -*-
-# pysqlite2/__init__.py: the pysqlite2 package.
-#
-# Copyright (C) 2005 Gerhard Häring <gh@ghaering.de>
-#
-# This file is part of pysqlite.
-#
-# This software is provided 'as-is', without any express or implied
-# warranty. In no event will the authors be held liable for any damages
-# arising from the use of this software.
-#
-# Permission is granted to anyone to use this software for any purpose,
-# including commercial applications, and to alter it and redistribute it
-# freely, subject to the following restrictions:
-#
-# 1. The origin of this software must not be misrepresented; you must not
-# claim that you wrote the original software. If you use this software
-# in a product, an acknowledgment in the product documentation would be
-# appreciated but is not required.
-# 2. Altered source versions must be plainly marked as such, and must not be
-# misrepresented as being the original software.
-# 3. This notice may not be removed or altered from any source distribution.
-
-from dbapi2 import *
diff --git a/sys/lib/python/sqlite3/dbapi2.py b/sys/lib/python/sqlite3/dbapi2.py
deleted file mode 100644
index 665dbb276..000000000
--- a/sys/lib/python/sqlite3/dbapi2.py
+++ /dev/null
@@ -1,88 +0,0 @@
-#-*- coding: ISO-8859-1 -*-
-# pysqlite2/dbapi2.py: the DB-API 2.0 interface
-#
-# Copyright (C) 2004-2005 Gerhard Häring <gh@ghaering.de>
-#
-# This file is part of pysqlite.
-#
-# This software is provided 'as-is', without any express or implied
-# warranty. In no event will the authors be held liable for any damages
-# arising from the use of this software.
-#
-# Permission is granted to anyone to use this software for any purpose,
-# including commercial applications, and to alter it and redistribute it
-# freely, subject to the following restrictions:
-#
-# 1. The origin of this software must not be misrepresented; you must not
-# claim that you wrote the original software. If you use this software
-# in a product, an acknowledgment in the product documentation would be
-# appreciated but is not required.
-# 2. Altered source versions must be plainly marked as such, and must not be
-# misrepresented as being the original software.
-# 3. This notice may not be removed or altered from any source distribution.
-
-import datetime
-import time
-
-from _sqlite3 import *
-
-paramstyle = "qmark"
-
-threadsafety = 1
-
-apilevel = "2.0"
-
-Date = datetime.date
-
-Time = datetime.time
-
-Timestamp = datetime.datetime
-
-def DateFromTicks(ticks):
- return apply(Date, time.localtime(ticks)[:3])
-
-def TimeFromTicks(ticks):
- return apply(Time, time.localtime(ticks)[3:6])
-
-def TimestampFromTicks(ticks):
- return apply(Timestamp, time.localtime(ticks)[:6])
-
-version_info = tuple([int(x) for x in version.split(".")])
-sqlite_version_info = tuple([int(x) for x in sqlite_version.split(".")])
-
-Binary = buffer
-
-def register_adapters_and_converters():
- def adapt_date(val):
- return val.isoformat()
-
- def adapt_datetime(val):
- return val.isoformat(" ")
-
- def convert_date(val):
- return datetime.date(*map(int, val.split("-")))
-
- def convert_timestamp(val):
- datepart, timepart = val.split(" ")
- year, month, day = map(int, datepart.split("-"))
- timepart_full = timepart.split(".")
- hours, minutes, seconds = map(int, timepart_full[0].split(":"))
- if len(timepart_full) == 2:
- microseconds = int(float("0." + timepart_full[1]) * 1000000)
- else:
- microseconds = 0
-
- val = datetime.datetime(year, month, day, hours, minutes, seconds, microseconds)
- return val
-
-
- register_adapter(datetime.date, adapt_date)
- register_adapter(datetime.datetime, adapt_datetime)
- register_converter("date", convert_date)
- register_converter("timestamp", convert_timestamp)
-
-register_adapters_and_converters()
-
-# Clean up namespace
-
-del(register_adapters_and_converters)
diff --git a/sys/lib/python/sre.py b/sys/lib/python/sre.py
deleted file mode 100644
index c04576baf..000000000
--- a/sys/lib/python/sre.py
+++ /dev/null
@@ -1,13 +0,0 @@
-"""This file is only retained for backwards compatibility.
-It will be removed in the future. sre was moved to re in version 2.5.
-"""
-
-import warnings
-warnings.warn("The sre module is deprecated, please import re.",
- DeprecationWarning, 2)
-
-from re import *
-from re import __all__
-
-# old pickles expect the _compile() reconstructor in this module
-from re import _compile
diff --git a/sys/lib/python/sre_compile.py b/sys/lib/python/sre_compile.py
deleted file mode 100644
index 1d448dae2..000000000
--- a/sys/lib/python/sre_compile.py
+++ /dev/null
@@ -1,531 +0,0 @@
-#
-# Secret Labs' Regular Expression Engine
-#
-# convert template to internal format
-#
-# Copyright (c) 1997-2001 by Secret Labs AB. All rights reserved.
-#
-# See the sre.py file for information on usage and redistribution.
-#
-
-"""Internal support module for sre"""
-
-import _sre, sys
-
-from sre_constants import *
-
-assert _sre.MAGIC == MAGIC, "SRE module mismatch"
-
-if _sre.CODESIZE == 2:
- MAXCODE = 65535
-else:
- MAXCODE = 0xFFFFFFFFL
-
-def _identityfunction(x):
- return x
-
-def set(seq):
- s = {}
- for elem in seq:
- s[elem] = 1
- return s
-
-_LITERAL_CODES = set([LITERAL, NOT_LITERAL])
-_REPEATING_CODES = set([REPEAT, MIN_REPEAT, MAX_REPEAT])
-_SUCCESS_CODES = set([SUCCESS, FAILURE])
-_ASSERT_CODES = set([ASSERT, ASSERT_NOT])
-
-def _compile(code, pattern, flags):
- # internal: compile a (sub)pattern
- emit = code.append
- _len = len
- LITERAL_CODES = _LITERAL_CODES
- REPEATING_CODES = _REPEATING_CODES
- SUCCESS_CODES = _SUCCESS_CODES
- ASSERT_CODES = _ASSERT_CODES
- for op, av in pattern:
- if op in LITERAL_CODES:
- if flags & SRE_FLAG_IGNORECASE:
- emit(OPCODES[OP_IGNORE[op]])
- emit(_sre.getlower(av, flags))
- else:
- emit(OPCODES[op])
- emit(av)
- elif op is IN:
- if flags & SRE_FLAG_IGNORECASE:
- emit(OPCODES[OP_IGNORE[op]])
- def fixup(literal, flags=flags):
- return _sre.getlower(literal, flags)
- else:
- emit(OPCODES[op])
- fixup = _identityfunction
- skip = _len(code); emit(0)
- _compile_charset(av, flags, code, fixup)
- code[skip] = _len(code) - skip
- elif op is ANY:
- if flags & SRE_FLAG_DOTALL:
- emit(OPCODES[ANY_ALL])
- else:
- emit(OPCODES[ANY])
- elif op in REPEATING_CODES:
- if flags & SRE_FLAG_TEMPLATE:
- raise error, "internal: unsupported template operator"
- emit(OPCODES[REPEAT])
- skip = _len(code); emit(0)
- emit(av[0])
- emit(av[1])
- _compile(code, av[2], flags)
- emit(OPCODES[SUCCESS])
- code[skip] = _len(code) - skip
- elif _simple(av) and op is not REPEAT:
- if op is MAX_REPEAT:
- emit(OPCODES[REPEAT_ONE])
- else:
- emit(OPCODES[MIN_REPEAT_ONE])
- skip = _len(code); emit(0)
- emit(av[0])
- emit(av[1])
- _compile(code, av[2], flags)
- emit(OPCODES[SUCCESS])
- code[skip] = _len(code) - skip
- else:
- emit(OPCODES[REPEAT])
- skip = _len(code); emit(0)
- emit(av[0])
- emit(av[1])
- _compile(code, av[2], flags)
- code[skip] = _len(code) - skip
- if op is MAX_REPEAT:
- emit(OPCODES[MAX_UNTIL])
- else:
- emit(OPCODES[MIN_UNTIL])
- elif op is SUBPATTERN:
- if av[0]:
- emit(OPCODES[MARK])
- emit((av[0]-1)*2)
- # _compile_info(code, av[1], flags)
- _compile(code, av[1], flags)
- if av[0]:
- emit(OPCODES[MARK])
- emit((av[0]-1)*2+1)
- elif op in SUCCESS_CODES:
- emit(OPCODES[op])
- elif op in ASSERT_CODES:
- emit(OPCODES[op])
- skip = _len(code); emit(0)
- if av[0] >= 0:
- emit(0) # look ahead
- else:
- lo, hi = av[1].getwidth()
- if lo != hi:
- raise error, "look-behind requires fixed-width pattern"
- emit(lo) # look behind
- _compile(code, av[1], flags)
- emit(OPCODES[SUCCESS])
- code[skip] = _len(code) - skip
- elif op is CALL:
- emit(OPCODES[op])
- skip = _len(code); emit(0)
- _compile(code, av, flags)
- emit(OPCODES[SUCCESS])
- code[skip] = _len(code) - skip
- elif op is AT:
- emit(OPCODES[op])
- if flags & SRE_FLAG_MULTILINE:
- av = AT_MULTILINE.get(av, av)
- if flags & SRE_FLAG_LOCALE:
- av = AT_LOCALE.get(av, av)
- elif flags & SRE_FLAG_UNICODE:
- av = AT_UNICODE.get(av, av)
- emit(ATCODES[av])
- elif op is BRANCH:
- emit(OPCODES[op])
- tail = []
- tailappend = tail.append
- for av in av[1]:
- skip = _len(code); emit(0)
- # _compile_info(code, av, flags)
- _compile(code, av, flags)
- emit(OPCODES[JUMP])
- tailappend(_len(code)); emit(0)
- code[skip] = _len(code) - skip
- emit(0) # end of branch
- for tail in tail:
- code[tail] = _len(code) - tail
- elif op is CATEGORY:
- emit(OPCODES[op])
- if flags & SRE_FLAG_LOCALE:
- av = CH_LOCALE[av]
- elif flags & SRE_FLAG_UNICODE:
- av = CH_UNICODE[av]
- emit(CHCODES[av])
- elif op is GROUPREF:
- if flags & SRE_FLAG_IGNORECASE:
- emit(OPCODES[OP_IGNORE[op]])
- else:
- emit(OPCODES[op])
- emit(av-1)
- elif op is GROUPREF_EXISTS:
- emit(OPCODES[op])
- emit(av[0]-1)
- skipyes = _len(code); emit(0)
- _compile(code, av[1], flags)
- if av[2]:
- emit(OPCODES[JUMP])
- skipno = _len(code); emit(0)
- code[skipyes] = _len(code) - skipyes + 1
- _compile(code, av[2], flags)
- code[skipno] = _len(code) - skipno
- else:
- code[skipyes] = _len(code) - skipyes + 1
- else:
- raise ValueError, ("unsupported operand type", op)
-
-def _compile_charset(charset, flags, code, fixup=None):
- # compile charset subprogram
- emit = code.append
- if fixup is None:
- fixup = _identityfunction
- for op, av in _optimize_charset(charset, fixup):
- emit(OPCODES[op])
- if op is NEGATE:
- pass
- elif op is LITERAL:
- emit(fixup(av))
- elif op is RANGE:
- emit(fixup(av[0]))
- emit(fixup(av[1]))
- elif op is CHARSET:
- code.extend(av)
- elif op is BIGCHARSET:
- code.extend(av)
- elif op is CATEGORY:
- if flags & SRE_FLAG_LOCALE:
- emit(CHCODES[CH_LOCALE[av]])
- elif flags & SRE_FLAG_UNICODE:
- emit(CHCODES[CH_UNICODE[av]])
- else:
- emit(CHCODES[av])
- else:
- raise error, "internal: unsupported set operator"
- emit(OPCODES[FAILURE])
-
-def _optimize_charset(charset, fixup):
- # internal: optimize character set
- out = []
- outappend = out.append
- charmap = [0]*256
- try:
- for op, av in charset:
- if op is NEGATE:
- outappend((op, av))
- elif op is LITERAL:
- charmap[fixup(av)] = 1
- elif op is RANGE:
- for i in range(fixup(av[0]), fixup(av[1])+1):
- charmap[i] = 1
- elif op is CATEGORY:
- # XXX: could append to charmap tail
- return charset # cannot compress
- except IndexError:
- # character set contains unicode characters
- return _optimize_unicode(charset, fixup)
- # compress character map
- i = p = n = 0
- runs = []
- runsappend = runs.append
- for c in charmap:
- if c:
- if n == 0:
- p = i
- n = n + 1
- elif n:
- runsappend((p, n))
- n = 0
- i = i + 1
- if n:
- runsappend((p, n))
- if len(runs) <= 2:
- # use literal/range
- for p, n in runs:
- if n == 1:
- outappend((LITERAL, p))
- else:
- outappend((RANGE, (p, p+n-1)))
- if len(out) < len(charset):
- return out
- else:
- # use bitmap
- data = _mk_bitmap(charmap)
- outappend((CHARSET, data))
- return out
- return charset
-
-def _mk_bitmap(bits):
- data = []
- dataappend = data.append
- if _sre.CODESIZE == 2:
- start = (1, 0)
- else:
- start = (1L, 0L)
- m, v = start
- for c in bits:
- if c:
- v = v + m
- m = m + m
- if m > MAXCODE:
- dataappend(v)
- m, v = start
- return data
-
-# To represent a big charset, first a bitmap of all characters in the
-# set is constructed. Then, this bitmap is sliced into chunks of 256
-# characters, duplicate chunks are eliminitated, and each chunk is
-# given a number. In the compiled expression, the charset is
-# represented by a 16-bit word sequence, consisting of one word for
-# the number of different chunks, a sequence of 256 bytes (128 words)
-# of chunk numbers indexed by their original chunk position, and a
-# sequence of chunks (16 words each).
-
-# Compression is normally good: in a typical charset, large ranges of
-# Unicode will be either completely excluded (e.g. if only cyrillic
-# letters are to be matched), or completely included (e.g. if large
-# subranges of Kanji match). These ranges will be represented by
-# chunks of all one-bits or all zero-bits.
-
-# Matching can be also done efficiently: the more significant byte of
-# the Unicode character is an index into the chunk number, and the
-# less significant byte is a bit index in the chunk (just like the
-# CHARSET matching).
-
-# In UCS-4 mode, the BIGCHARSET opcode still supports only subsets
-# of the basic multilingual plane; an efficient representation
-# for all of UTF-16 has not yet been developed. This means,
-# in particular, that negated charsets cannot be represented as
-# bigcharsets.
-
-def _optimize_unicode(charset, fixup):
- try:
- import array
- except ImportError:
- return charset
- charmap = [0]*65536
- negate = 0
- try:
- for op, av in charset:
- if op is NEGATE:
- negate = 1
- elif op is LITERAL:
- charmap[fixup(av)] = 1
- elif op is RANGE:
- for i in xrange(fixup(av[0]), fixup(av[1])+1):
- charmap[i] = 1
- elif op is CATEGORY:
- # XXX: could expand category
- return charset # cannot compress
- except IndexError:
- # non-BMP characters
- return charset
- if negate:
- if sys.maxunicode != 65535:
- # XXX: negation does not work with big charsets
- return charset
- for i in xrange(65536):
- charmap[i] = not charmap[i]
- comps = {}
- mapping = [0]*256
- block = 0
- data = []
- for i in xrange(256):
- chunk = tuple(charmap[i*256:(i+1)*256])
- new = comps.setdefault(chunk, block)
- mapping[i] = new
- if new == block:
- block = block + 1
- data = data + _mk_bitmap(chunk)
- header = [block]
- if _sre.CODESIZE == 2:
- code = 'H'
- else:
- code = 'I'
- # Convert block indices to byte array of 256 bytes
- mapping = array.array('b', mapping).tostring()
- # Convert byte array to word array
- mapping = array.array(code, mapping)
- assert mapping.itemsize == _sre.CODESIZE
- header = header + mapping.tolist()
- data[0:0] = header
- return [(BIGCHARSET, data)]
-
-def _simple(av):
- # check if av is a "simple" operator
- lo, hi = av[2].getwidth()
- if lo == 0 and hi == MAXREPEAT:
- raise error, "nothing to repeat"
- return lo == hi == 1 and av[2][0][0] != SUBPATTERN
-
-def _compile_info(code, pattern, flags):
- # internal: compile an info block. in the current version,
- # this contains min/max pattern width, and an optional literal
- # prefix or a character map
- lo, hi = pattern.getwidth()
- if lo == 0:
- return # not worth it
- # look for a literal prefix
- prefix = []
- prefixappend = prefix.append
- prefix_skip = 0
- charset = [] # not used
- charsetappend = charset.append
- if not (flags & SRE_FLAG_IGNORECASE):
- # look for literal prefix
- for op, av in pattern.data:
- if op is LITERAL:
- if len(prefix) == prefix_skip:
- prefix_skip = prefix_skip + 1
- prefixappend(av)
- elif op is SUBPATTERN and len(av[1]) == 1:
- op, av = av[1][0]
- if op is LITERAL:
- prefixappend(av)
- else:
- break
- else:
- break
- # if no prefix, look for charset prefix
- if not prefix and pattern.data:
- op, av = pattern.data[0]
- if op is SUBPATTERN and av[1]:
- op, av = av[1][0]
- if op is LITERAL:
- charsetappend((op, av))
- elif op is BRANCH:
- c = []
- cappend = c.append
- for p in av[1]:
- if not p:
- break
- op, av = p[0]
- if op is LITERAL:
- cappend((op, av))
- else:
- break
- else:
- charset = c
- elif op is BRANCH:
- c = []
- cappend = c.append
- for p in av[1]:
- if not p:
- break
- op, av = p[0]
- if op is LITERAL:
- cappend((op, av))
- else:
- break
- else:
- charset = c
- elif op is IN:
- charset = av
-## if prefix:
-## print "*** PREFIX", prefix, prefix_skip
-## if charset:
-## print "*** CHARSET", charset
- # add an info block
- emit = code.append
- emit(OPCODES[INFO])
- skip = len(code); emit(0)
- # literal flag
- mask = 0
- if prefix:
- mask = SRE_INFO_PREFIX
- if len(prefix) == prefix_skip == len(pattern.data):
- mask = mask + SRE_INFO_LITERAL
- elif charset:
- mask = mask + SRE_INFO_CHARSET
- emit(mask)
- # pattern length
- if lo < MAXCODE:
- emit(lo)
- else:
- emit(MAXCODE)
- prefix = prefix[:MAXCODE]
- if hi < MAXCODE:
- emit(hi)
- else:
- emit(0)
- # add literal prefix
- if prefix:
- emit(len(prefix)) # length
- emit(prefix_skip) # skip
- code.extend(prefix)
- # generate overlap table
- table = [-1] + ([0]*len(prefix))
- for i in xrange(len(prefix)):
- table[i+1] = table[i]+1
- while table[i+1] > 0 and prefix[i] != prefix[table[i+1]-1]:
- table[i+1] = table[table[i+1]-1]+1
- code.extend(table[1:]) # don't store first entry
- elif charset:
- _compile_charset(charset, flags, code)
- code[skip] = len(code) - skip
-
-try:
- unicode
-except NameError:
- STRING_TYPES = (type(""),)
-else:
- STRING_TYPES = (type(""), type(unicode("")))
-
-def isstring(obj):
- for tp in STRING_TYPES:
- if isinstance(obj, tp):
- return 1
- return 0
-
-def _code(p, flags):
-
- flags = p.pattern.flags | flags
- code = []
-
- # compile info block
- _compile_info(code, p, flags)
-
- # compile the pattern
- _compile(code, p.data, flags)
-
- code.append(OPCODES[SUCCESS])
-
- return code
-
-def compile(p, flags=0):
- # internal: convert pattern list to internal format
-
- if isstring(p):
- import sre_parse
- pattern = p
- p = sre_parse.parse(p, flags)
- else:
- pattern = None
-
- code = _code(p, flags)
-
- # print code
-
- # XXX: <fl> get rid of this limitation!
- if p.pattern.groups > 100:
- raise AssertionError(
- "sorry, but this version only supports 100 named groups"
- )
-
- # map in either direction
- groupindex = p.pattern.groupdict
- indexgroup = [None] * p.pattern.groups
- for k, i in groupindex.items():
- indexgroup[i] = k
-
- return _sre.compile(
- pattern, flags, code,
- p.pattern.groups-1,
- groupindex, indexgroup
- )
diff --git a/sys/lib/python/sre_constants.py b/sys/lib/python/sre_constants.py
deleted file mode 100644
index 1863f48bb..000000000
--- a/sys/lib/python/sre_constants.py
+++ /dev/null
@@ -1,261 +0,0 @@
-#
-# Secret Labs' Regular Expression Engine
-#
-# various symbols used by the regular expression engine.
-# run this script to update the _sre include files!
-#
-# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved.
-#
-# See the sre.py file for information on usage and redistribution.
-#
-
-"""Internal support module for sre"""
-
-# update when constants are added or removed
-
-MAGIC = 20031017
-
-# max code word in this release
-
-MAXREPEAT = 65535
-
-# SRE standard exception (access as sre.error)
-# should this really be here?
-
-class error(Exception):
- pass
-
-# operators
-
-FAILURE = "failure"
-SUCCESS = "success"
-
-ANY = "any"
-ANY_ALL = "any_all"
-ASSERT = "assert"
-ASSERT_NOT = "assert_not"
-AT = "at"
-BIGCHARSET = "bigcharset"
-BRANCH = "branch"
-CALL = "call"
-CATEGORY = "category"
-CHARSET = "charset"
-GROUPREF = "groupref"
-GROUPREF_IGNORE = "groupref_ignore"
-GROUPREF_EXISTS = "groupref_exists"
-IN = "in"
-IN_IGNORE = "in_ignore"
-INFO = "info"
-JUMP = "jump"
-LITERAL = "literal"
-LITERAL_IGNORE = "literal_ignore"
-MARK = "mark"
-MAX_REPEAT = "max_repeat"
-MAX_UNTIL = "max_until"
-MIN_REPEAT = "min_repeat"
-MIN_UNTIL = "min_until"
-NEGATE = "negate"
-NOT_LITERAL = "not_literal"
-NOT_LITERAL_IGNORE = "not_literal_ignore"
-RANGE = "range"
-REPEAT = "repeat"
-REPEAT_ONE = "repeat_one"
-SUBPATTERN = "subpattern"
-MIN_REPEAT_ONE = "min_repeat_one"
-
-# positions
-AT_BEGINNING = "at_beginning"
-AT_BEGINNING_LINE = "at_beginning_line"
-AT_BEGINNING_STRING = "at_beginning_string"
-AT_BOUNDARY = "at_boundary"
-AT_NON_BOUNDARY = "at_non_boundary"
-AT_END = "at_end"
-AT_END_LINE = "at_end_line"
-AT_END_STRING = "at_end_string"
-AT_LOC_BOUNDARY = "at_loc_boundary"
-AT_LOC_NON_BOUNDARY = "at_loc_non_boundary"
-AT_UNI_BOUNDARY = "at_uni_boundary"
-AT_UNI_NON_BOUNDARY = "at_uni_non_boundary"
-
-# categories
-CATEGORY_DIGIT = "category_digit"
-CATEGORY_NOT_DIGIT = "category_not_digit"
-CATEGORY_SPACE = "category_space"
-CATEGORY_NOT_SPACE = "category_not_space"
-CATEGORY_WORD = "category_word"
-CATEGORY_NOT_WORD = "category_not_word"
-CATEGORY_LINEBREAK = "category_linebreak"
-CATEGORY_NOT_LINEBREAK = "category_not_linebreak"
-CATEGORY_LOC_WORD = "category_loc_word"
-CATEGORY_LOC_NOT_WORD = "category_loc_not_word"
-CATEGORY_UNI_DIGIT = "category_uni_digit"
-CATEGORY_UNI_NOT_DIGIT = "category_uni_not_digit"
-CATEGORY_UNI_SPACE = "category_uni_space"
-CATEGORY_UNI_NOT_SPACE = "category_uni_not_space"
-CATEGORY_UNI_WORD = "category_uni_word"
-CATEGORY_UNI_NOT_WORD = "category_uni_not_word"
-CATEGORY_UNI_LINEBREAK = "category_uni_linebreak"
-CATEGORY_UNI_NOT_LINEBREAK = "category_uni_not_linebreak"
-
-OPCODES = [
-
- # failure=0 success=1 (just because it looks better that way :-)
- FAILURE, SUCCESS,
-
- ANY, ANY_ALL,
- ASSERT, ASSERT_NOT,
- AT,
- BRANCH,
- CALL,
- CATEGORY,
- CHARSET, BIGCHARSET,
- GROUPREF, GROUPREF_EXISTS, GROUPREF_IGNORE,
- IN, IN_IGNORE,
- INFO,
- JUMP,
- LITERAL, LITERAL_IGNORE,
- MARK,
- MAX_UNTIL,
- MIN_UNTIL,
- NOT_LITERAL, NOT_LITERAL_IGNORE,
- NEGATE,
- RANGE,
- REPEAT,
- REPEAT_ONE,
- SUBPATTERN,
- MIN_REPEAT_ONE
-
-]
-
-ATCODES = [
- AT_BEGINNING, AT_BEGINNING_LINE, AT_BEGINNING_STRING, AT_BOUNDARY,
- AT_NON_BOUNDARY, AT_END, AT_END_LINE, AT_END_STRING,
- AT_LOC_BOUNDARY, AT_LOC_NON_BOUNDARY, AT_UNI_BOUNDARY,
- AT_UNI_NON_BOUNDARY
-]
-
-CHCODES = [
- CATEGORY_DIGIT, CATEGORY_NOT_DIGIT, CATEGORY_SPACE,
- CATEGORY_NOT_SPACE, CATEGORY_WORD, CATEGORY_NOT_WORD,
- CATEGORY_LINEBREAK, CATEGORY_NOT_LINEBREAK, CATEGORY_LOC_WORD,
- CATEGORY_LOC_NOT_WORD, CATEGORY_UNI_DIGIT, CATEGORY_UNI_NOT_DIGIT,
- CATEGORY_UNI_SPACE, CATEGORY_UNI_NOT_SPACE, CATEGORY_UNI_WORD,
- CATEGORY_UNI_NOT_WORD, CATEGORY_UNI_LINEBREAK,
- CATEGORY_UNI_NOT_LINEBREAK
-]
-
-def makedict(list):
- d = {}
- i = 0
- for item in list:
- d[item] = i
- i = i + 1
- return d
-
-OPCODES = makedict(OPCODES)
-ATCODES = makedict(ATCODES)
-CHCODES = makedict(CHCODES)
-
-# replacement operations for "ignore case" mode
-OP_IGNORE = {
- GROUPREF: GROUPREF_IGNORE,
- IN: IN_IGNORE,
- LITERAL: LITERAL_IGNORE,
- NOT_LITERAL: NOT_LITERAL_IGNORE
-}
-
-AT_MULTILINE = {
- AT_BEGINNING: AT_BEGINNING_LINE,
- AT_END: AT_END_LINE
-}
-
-AT_LOCALE = {
- AT_BOUNDARY: AT_LOC_BOUNDARY,
- AT_NON_BOUNDARY: AT_LOC_NON_BOUNDARY
-}
-
-AT_UNICODE = {
- AT_BOUNDARY: AT_UNI_BOUNDARY,
- AT_NON_BOUNDARY: AT_UNI_NON_BOUNDARY
-}
-
-CH_LOCALE = {
- CATEGORY_DIGIT: CATEGORY_DIGIT,
- CATEGORY_NOT_DIGIT: CATEGORY_NOT_DIGIT,
- CATEGORY_SPACE: CATEGORY_SPACE,
- CATEGORY_NOT_SPACE: CATEGORY_NOT_SPACE,
- CATEGORY_WORD: CATEGORY_LOC_WORD,
- CATEGORY_NOT_WORD: CATEGORY_LOC_NOT_WORD,
- CATEGORY_LINEBREAK: CATEGORY_LINEBREAK,
- CATEGORY_NOT_LINEBREAK: CATEGORY_NOT_LINEBREAK
-}
-
-CH_UNICODE = {
- CATEGORY_DIGIT: CATEGORY_UNI_DIGIT,
- CATEGORY_NOT_DIGIT: CATEGORY_UNI_NOT_DIGIT,
- CATEGORY_SPACE: CATEGORY_UNI_SPACE,
- CATEGORY_NOT_SPACE: CATEGORY_UNI_NOT_SPACE,
- CATEGORY_WORD: CATEGORY_UNI_WORD,
- CATEGORY_NOT_WORD: CATEGORY_UNI_NOT_WORD,
- CATEGORY_LINEBREAK: CATEGORY_UNI_LINEBREAK,
- CATEGORY_NOT_LINEBREAK: CATEGORY_UNI_NOT_LINEBREAK
-}
-
-# flags
-SRE_FLAG_TEMPLATE = 1 # template mode (disable backtracking)
-SRE_FLAG_IGNORECASE = 2 # case insensitive
-SRE_FLAG_LOCALE = 4 # honour system locale
-SRE_FLAG_MULTILINE = 8 # treat target as multiline string
-SRE_FLAG_DOTALL = 16 # treat target as a single string
-SRE_FLAG_UNICODE = 32 # use unicode locale
-SRE_FLAG_VERBOSE = 64 # ignore whitespace and comments
-SRE_FLAG_DEBUG = 128 # debugging
-
-# flags for INFO primitive
-SRE_INFO_PREFIX = 1 # has prefix
-SRE_INFO_LITERAL = 2 # entire pattern is literal (given by prefix)
-SRE_INFO_CHARSET = 4 # pattern starts with character from given set
-
-if __name__ == "__main__":
- def dump(f, d, prefix):
- items = d.items()
- items.sort(key=lambda a: a[1])
- for k, v in items:
- f.write("#define %s_%s %s\n" % (prefix, k.upper(), v))
- f = open("sre_constants.h", "w")
- f.write("""\
-/*
- * Secret Labs' Regular Expression Engine
- *
- * regular expression matching engine
- *
- * NOTE: This file is generated by sre_constants.py. If you need
- * to change anything in here, edit sre_constants.py and run it.
- *
- * Copyright (c) 1997-2001 by Secret Labs AB. All rights reserved.
- *
- * See the _sre.c file for information on usage and redistribution.
- */
-
-""")
-
- f.write("#define SRE_MAGIC %d\n" % MAGIC)
-
- dump(f, OPCODES, "SRE_OP")
- dump(f, ATCODES, "SRE")
- dump(f, CHCODES, "SRE")
-
- f.write("#define SRE_FLAG_TEMPLATE %d\n" % SRE_FLAG_TEMPLATE)
- f.write("#define SRE_FLAG_IGNORECASE %d\n" % SRE_FLAG_IGNORECASE)
- f.write("#define SRE_FLAG_LOCALE %d\n" % SRE_FLAG_LOCALE)
- f.write("#define SRE_FLAG_MULTILINE %d\n" % SRE_FLAG_MULTILINE)
- f.write("#define SRE_FLAG_DOTALL %d\n" % SRE_FLAG_DOTALL)
- f.write("#define SRE_FLAG_UNICODE %d\n" % SRE_FLAG_UNICODE)
- f.write("#define SRE_FLAG_VERBOSE %d\n" % SRE_FLAG_VERBOSE)
-
- f.write("#define SRE_INFO_PREFIX %d\n" % SRE_INFO_PREFIX)
- f.write("#define SRE_INFO_LITERAL %d\n" % SRE_INFO_LITERAL)
- f.write("#define SRE_INFO_CHARSET %d\n" % SRE_INFO_CHARSET)
-
- f.close()
- print "done"
diff --git a/sys/lib/python/sre_parse.py b/sys/lib/python/sre_parse.py
deleted file mode 100644
index 319bf43b3..000000000
--- a/sys/lib/python/sre_parse.py
+++ /dev/null
@@ -1,796 +0,0 @@
-#
-# Secret Labs' Regular Expression Engine
-#
-# convert re-style regular expression to sre pattern
-#
-# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved.
-#
-# See the sre.py file for information on usage and redistribution.
-#
-
-"""Internal support module for sre"""
-
-# XXX: show string offset and offending character for all errors
-
-import sys
-
-from sre_constants import *
-
-def set(seq):
- s = {}
- for elem in seq:
- s[elem] = 1
- return s
-
-SPECIAL_CHARS = ".\\[{()*+?^$|"
-REPEAT_CHARS = "*+?{"
-
-DIGITS = set("0123456789")
-
-OCTDIGITS = set("01234567")
-HEXDIGITS = set("0123456789abcdefABCDEF")
-
-WHITESPACE = set(" \t\n\r\v\f")
-
-ESCAPES = {
- r"\a": (LITERAL, ord("\a")),
- r"\b": (LITERAL, ord("\b")),
- r"\f": (LITERAL, ord("\f")),
- r"\n": (LITERAL, ord("\n")),
- r"\r": (LITERAL, ord("\r")),
- r"\t": (LITERAL, ord("\t")),
- r"\v": (LITERAL, ord("\v")),
- r"\\": (LITERAL, ord("\\"))
-}
-
-CATEGORIES = {
- r"\A": (AT, AT_BEGINNING_STRING), # start of string
- r"\b": (AT, AT_BOUNDARY),
- r"\B": (AT, AT_NON_BOUNDARY),
- r"\d": (IN, [(CATEGORY, CATEGORY_DIGIT)]),
- r"\D": (IN, [(CATEGORY, CATEGORY_NOT_DIGIT)]),
- r"\s": (IN, [(CATEGORY, CATEGORY_SPACE)]),
- r"\S": (IN, [(CATEGORY, CATEGORY_NOT_SPACE)]),
- r"\w": (IN, [(CATEGORY, CATEGORY_WORD)]),
- r"\W": (IN, [(CATEGORY, CATEGORY_NOT_WORD)]),
- r"\Z": (AT, AT_END_STRING), # end of string
-}
-
-FLAGS = {
- # standard flags
- "i": SRE_FLAG_IGNORECASE,
- "L": SRE_FLAG_LOCALE,
- "m": SRE_FLAG_MULTILINE,
- "s": SRE_FLAG_DOTALL,
- "x": SRE_FLAG_VERBOSE,
- # extensions
- "t": SRE_FLAG_TEMPLATE,
- "u": SRE_FLAG_UNICODE,
-}
-
-class Pattern:
- # master pattern object. keeps track of global attributes
- def __init__(self):
- self.flags = 0
- self.open = []
- self.groups = 1
- self.groupdict = {}
- def opengroup(self, name=None):
- gid = self.groups
- self.groups = gid + 1
- if name is not None:
- ogid = self.groupdict.get(name, None)
- if ogid is not None:
- raise error, ("redefinition of group name %s as group %d; "
- "was group %d" % (repr(name), gid, ogid))
- self.groupdict[name] = gid
- self.open.append(gid)
- return gid
- def closegroup(self, gid):
- self.open.remove(gid)
- def checkgroup(self, gid):
- return gid < self.groups and gid not in self.open
-
-class SubPattern:
- # a subpattern, in intermediate form
- def __init__(self, pattern, data=None):
- self.pattern = pattern
- if data is None:
- data = []
- self.data = data
- self.width = None
- def dump(self, level=0):
- nl = 1
- seqtypes = type(()), type([])
- for op, av in self.data:
- print level*" " + op,; nl = 0
- if op == "in":
- # member sublanguage
- print; nl = 1
- for op, a in av:
- print (level+1)*" " + op, a
- elif op == "branch":
- print; nl = 1
- i = 0
- for a in av[1]:
- if i > 0:
- print level*" " + "or"
- a.dump(level+1); nl = 1
- i = i + 1
- elif type(av) in seqtypes:
- for a in av:
- if isinstance(a, SubPattern):
- if not nl: print
- a.dump(level+1); nl = 1
- else:
- print a, ; nl = 0
- else:
- print av, ; nl = 0
- if not nl: print
- def __repr__(self):
- return repr(self.data)
- def __len__(self):
- return len(self.data)
- def __delitem__(self, index):
- del self.data[index]
- def __getitem__(self, index):
- return self.data[index]
- def __setitem__(self, index, code):
- self.data[index] = code
- def __getslice__(self, start, stop):
- return SubPattern(self.pattern, self.data[start:stop])
- def insert(self, index, code):
- self.data.insert(index, code)
- def append(self, code):
- self.data.append(code)
- def getwidth(self):
- # determine the width (min, max) for this subpattern
- if self.width:
- return self.width
- lo = hi = 0L
- UNITCODES = (ANY, RANGE, IN, LITERAL, NOT_LITERAL, CATEGORY)
- REPEATCODES = (MIN_REPEAT, MAX_REPEAT)
- for op, av in self.data:
- if op is BRANCH:
- i = sys.maxint
- j = 0
- for av in av[1]:
- l, h = av.getwidth()
- i = min(i, l)
- j = max(j, h)
- lo = lo + i
- hi = hi + j
- elif op is CALL:
- i, j = av.getwidth()
- lo = lo + i
- hi = hi + j
- elif op is SUBPATTERN:
- i, j = av[1].getwidth()
- lo = lo + i
- hi = hi + j
- elif op in REPEATCODES:
- i, j = av[2].getwidth()
- lo = lo + long(i) * av[0]
- hi = hi + long(j) * av[1]
- elif op in UNITCODES:
- lo = lo + 1
- hi = hi + 1
- elif op == SUCCESS:
- break
- self.width = int(min(lo, sys.maxint)), int(min(hi, sys.maxint))
- return self.width
-
-class Tokenizer:
- def __init__(self, string):
- self.string = string
- self.index = 0
- self.__next()
- def __next(self):
- if self.index >= len(self.string):
- self.next = None
- return
- char = self.string[self.index]
- if char[0] == "\\":
- try:
- c = self.string[self.index + 1]
- except IndexError:
- raise error, "bogus escape (end of line)"
- char = char + c
- self.index = self.index + len(char)
- self.next = char
- def match(self, char, skip=1):
- if char == self.next:
- if skip:
- self.__next()
- return 1
- return 0
- def get(self):
- this = self.next
- self.__next()
- return this
- def tell(self):
- return self.index, self.next
- def seek(self, index):
- self.index, self.next = index
-
-def isident(char):
- return "a" <= char <= "z" or "A" <= char <= "Z" or char == "_"
-
-def isdigit(char):
- return "0" <= char <= "9"
-
-def isname(name):
- # check that group name is a valid string
- if not isident(name[0]):
- return False
- for char in name[1:]:
- if not isident(char) and not isdigit(char):
- return False
- return True
-
-def _class_escape(source, escape):
- # handle escape code inside character class
- code = ESCAPES.get(escape)
- if code:
- return code
- code = CATEGORIES.get(escape)
- if code:
- return code
- try:
- c = escape[1:2]
- if c == "x":
- # hexadecimal escape (exactly two digits)
- while source.next in HEXDIGITS and len(escape) < 4:
- escape = escape + source.get()
- escape = escape[2:]
- if len(escape) != 2:
- raise error, "bogus escape: %s" % repr("\\" + escape)
- return LITERAL, int(escape, 16) & 0xff
- elif c in OCTDIGITS:
- # octal escape (up to three digits)
- while source.next in OCTDIGITS and len(escape) < 4:
- escape = escape + source.get()
- escape = escape[1:]
- return LITERAL, int(escape, 8) & 0xff
- elif c in DIGITS:
- raise error, "bogus escape: %s" % repr(escape)
- if len(escape) == 2:
- return LITERAL, ord(escape[1])
- except ValueError:
- pass
- raise error, "bogus escape: %s" % repr(escape)
-
-def _escape(source, escape, state):
- # handle escape code in expression
- code = CATEGORIES.get(escape)
- if code:
- return code
- code = ESCAPES.get(escape)
- if code:
- return code
- try:
- c = escape[1:2]
- if c == "x":
- # hexadecimal escape
- while source.next in HEXDIGITS and len(escape) < 4:
- escape = escape + source.get()
- if len(escape) != 4:
- raise ValueError
- return LITERAL, int(escape[2:], 16) & 0xff
- elif c == "0":
- # octal escape
- while source.next in OCTDIGITS and len(escape) < 4:
- escape = escape + source.get()
- return LITERAL, int(escape[1:], 8) & 0xff
- elif c in DIGITS:
- # octal escape *or* decimal group reference (sigh)
- if source.next in DIGITS:
- escape = escape + source.get()
- if (escape[1] in OCTDIGITS and escape[2] in OCTDIGITS and
- source.next in OCTDIGITS):
- # got three octal digits; this is an octal escape
- escape = escape + source.get()
- return LITERAL, int(escape[1:], 8) & 0xff
- # not an octal escape, so this is a group reference
- group = int(escape[1:])
- if group < state.groups:
- if not state.checkgroup(group):
- raise error, "cannot refer to open group"
- return GROUPREF, group
- raise ValueError
- if len(escape) == 2:
- return LITERAL, ord(escape[1])
- except ValueError:
- pass
- raise error, "bogus escape: %s" % repr(escape)
-
-def _parse_sub(source, state, nested=1):
- # parse an alternation: a|b|c
-
- items = []
- itemsappend = items.append
- sourcematch = source.match
- while 1:
- itemsappend(_parse(source, state))
- if sourcematch("|"):
- continue
- if not nested:
- break
- if not source.next or sourcematch(")", 0):
- break
- else:
- raise error, "pattern not properly closed"
-
- if len(items) == 1:
- return items[0]
-
- subpattern = SubPattern(state)
- subpatternappend = subpattern.append
-
- # check if all items share a common prefix
- while 1:
- prefix = None
- for item in items:
- if not item:
- break
- if prefix is None:
- prefix = item[0]
- elif item[0] != prefix:
- break
- else:
- # all subitems start with a common "prefix".
- # move it out of the branch
- for item in items:
- del item[0]
- subpatternappend(prefix)
- continue # check next one
- break
-
- # check if the branch can be replaced by a character set
- for item in items:
- if len(item) != 1 or item[0][0] != LITERAL:
- break
- else:
- # we can store this as a character set instead of a
- # branch (the compiler may optimize this even more)
- set = []
- setappend = set.append
- for item in items:
- setappend(item[0])
- subpatternappend((IN, set))
- return subpattern
-
- subpattern.append((BRANCH, (None, items)))
- return subpattern
-
-def _parse_sub_cond(source, state, condgroup):
- item_yes = _parse(source, state)
- if source.match("|"):
- item_no = _parse(source, state)
- if source.match("|"):
- raise error, "conditional backref with more than two branches"
- else:
- item_no = None
- if source.next and not source.match(")", 0):
- raise error, "pattern not properly closed"
- subpattern = SubPattern(state)
- subpattern.append((GROUPREF_EXISTS, (condgroup, item_yes, item_no)))
- return subpattern
-
-_PATTERNENDERS = set("|)")
-_ASSERTCHARS = set("=!<")
-_LOOKBEHINDASSERTCHARS = set("=!")
-_REPEATCODES = set([MIN_REPEAT, MAX_REPEAT])
-
-def _parse(source, state):
- # parse a simple pattern
- subpattern = SubPattern(state)
-
- # precompute constants into local variables
- subpatternappend = subpattern.append
- sourceget = source.get
- sourcematch = source.match
- _len = len
- PATTERNENDERS = _PATTERNENDERS
- ASSERTCHARS = _ASSERTCHARS
- LOOKBEHINDASSERTCHARS = _LOOKBEHINDASSERTCHARS
- REPEATCODES = _REPEATCODES
-
- while 1:
-
- if source.next in PATTERNENDERS:
- break # end of subpattern
- this = sourceget()
- if this is None:
- break # end of pattern
-
- if state.flags & SRE_FLAG_VERBOSE:
- # skip whitespace and comments
- if this in WHITESPACE:
- continue
- if this == "#":
- while 1:
- this = sourceget()
- if this in (None, "\n"):
- break
- continue
-
- if this and this[0] not in SPECIAL_CHARS:
- subpatternappend((LITERAL, ord(this)))
-
- elif this == "[":
- # character set
- set = []
- setappend = set.append
-## if sourcematch(":"):
-## pass # handle character classes
- if sourcematch("^"):
- setappend((NEGATE, None))
- # check remaining characters
- start = set[:]
- while 1:
- this = sourceget()
- if this == "]" and set != start:
- break
- elif this and this[0] == "\\":
- code1 = _class_escape(source, this)
- elif this:
- code1 = LITERAL, ord(this)
- else:
- raise error, "unexpected end of regular expression"
- if sourcematch("-"):
- # potential range
- this = sourceget()
- if this == "]":
- if code1[0] is IN:
- code1 = code1[1][0]
- setappend(code1)
- setappend((LITERAL, ord("-")))
- break
- elif this:
- if this[0] == "\\":
- code2 = _class_escape(source, this)
- else:
- code2 = LITERAL, ord(this)
- if code1[0] != LITERAL or code2[0] != LITERAL:
- raise error, "bad character range"
- lo = code1[1]
- hi = code2[1]
- if hi < lo:
- raise error, "bad character range"
- setappend((RANGE, (lo, hi)))
- else:
- raise error, "unexpected end of regular expression"
- else:
- if code1[0] is IN:
- code1 = code1[1][0]
- setappend(code1)
-
- # XXX: <fl> should move set optimization to compiler!
- if _len(set)==1 and set[0][0] is LITERAL:
- subpatternappend(set[0]) # optimization
- elif _len(set)==2 and set[0][0] is NEGATE and set[1][0] is LITERAL:
- subpatternappend((NOT_LITERAL, set[1][1])) # optimization
- else:
- # XXX: <fl> should add charmap optimization here
- subpatternappend((IN, set))
-
- elif this and this[0] in REPEAT_CHARS:
- # repeat previous item
- if this == "?":
- min, max = 0, 1
- elif this == "*":
- min, max = 0, MAXREPEAT
-
- elif this == "+":
- min, max = 1, MAXREPEAT
- elif this == "{":
- if source.next == "}":
- subpatternappend((LITERAL, ord(this)))
- continue
- here = source.tell()
- min, max = 0, MAXREPEAT
- lo = hi = ""
- while source.next in DIGITS:
- lo = lo + source.get()
- if sourcematch(","):
- while source.next in DIGITS:
- hi = hi + sourceget()
- else:
- hi = lo
- if not sourcematch("}"):
- subpatternappend((LITERAL, ord(this)))
- source.seek(here)
- continue
- if lo:
- min = int(lo)
- if hi:
- max = int(hi)
- if max < min:
- raise error, "bad repeat interval"
- else:
- raise error, "not supported"
- # figure out which item to repeat
- if subpattern:
- item = subpattern[-1:]
- else:
- item = None
- if not item or (_len(item) == 1 and item[0][0] == AT):
- raise error, "nothing to repeat"
- if item[0][0] in REPEATCODES:
- raise error, "multiple repeat"
- if sourcematch("?"):
- subpattern[-1] = (MIN_REPEAT, (min, max, item))
- else:
- subpattern[-1] = (MAX_REPEAT, (min, max, item))
-
- elif this == ".":
- subpatternappend((ANY, None))
-
- elif this == "(":
- group = 1
- name = None
- condgroup = None
- if sourcematch("?"):
- group = 0
- # options
- if sourcematch("P"):
- # python extensions
- if sourcematch("<"):
- # named group: skip forward to end of name
- name = ""
- while 1:
- char = sourceget()
- if char is None:
- raise error, "unterminated name"
- if char == ">":
- break
- name = name + char
- group = 1
- if not isname(name):
- raise error, "bad character in group name"
- elif sourcematch("="):
- # named backreference
- name = ""
- while 1:
- char = sourceget()
- if char is None:
- raise error, "unterminated name"
- if char == ")":
- break
- name = name + char
- if not isname(name):
- raise error, "bad character in group name"
- gid = state.groupdict.get(name)
- if gid is None:
- raise error, "unknown group name"
- subpatternappend((GROUPREF, gid))
- continue
- else:
- char = sourceget()
- if char is None:
- raise error, "unexpected end of pattern"
- raise error, "unknown specifier: ?P%s" % char
- elif sourcematch(":"):
- # non-capturing group
- group = 2
- elif sourcematch("#"):
- # comment
- while 1:
- if source.next is None or source.next == ")":
- break
- sourceget()
- if not sourcematch(")"):
- raise error, "unbalanced parenthesis"
- continue
- elif source.next in ASSERTCHARS:
- # lookahead assertions
- char = sourceget()
- dir = 1
- if char == "<":
- if source.next not in LOOKBEHINDASSERTCHARS:
- raise error, "syntax error"
- dir = -1 # lookbehind
- char = sourceget()
- p = _parse_sub(source, state)
- if not sourcematch(")"):
- raise error, "unbalanced parenthesis"
- if char == "=":
- subpatternappend((ASSERT, (dir, p)))
- else:
- subpatternappend((ASSERT_NOT, (dir, p)))
- continue
- elif sourcematch("("):
- # conditional backreference group
- condname = ""
- while 1:
- char = sourceget()
- if char is None:
- raise error, "unterminated name"
- if char == ")":
- break
- condname = condname + char
- group = 2
- if isname(condname):
- condgroup = state.groupdict.get(condname)
- if condgroup is None:
- raise error, "unknown group name"
- else:
- try:
- condgroup = int(condname)
- except ValueError:
- raise error, "bad character in group name"
- else:
- # flags
- if not source.next in FLAGS:
- raise error, "unexpected end of pattern"
- while source.next in FLAGS:
- state.flags = state.flags | FLAGS[sourceget()]
- if group:
- # parse group contents
- if group == 2:
- # anonymous group
- group = None
- else:
- group = state.opengroup(name)
- if condgroup:
- p = _parse_sub_cond(source, state, condgroup)
- else:
- p = _parse_sub(source, state)
- if not sourcematch(")"):
- raise error, "unbalanced parenthesis"
- if group is not None:
- state.closegroup(group)
- subpatternappend((SUBPATTERN, (group, p)))
- else:
- while 1:
- char = sourceget()
- if char is None:
- raise error, "unexpected end of pattern"
- if char == ")":
- break
- raise error, "unknown extension"
-
- elif this == "^":
- subpatternappend((AT, AT_BEGINNING))
-
- elif this == "$":
- subpattern.append((AT, AT_END))
-
- elif this and this[0] == "\\":
- code = _escape(source, this, state)
- subpatternappend(code)
-
- else:
- raise error, "parser error"
-
- return subpattern
-
-def parse(str, flags=0, pattern=None):
- # parse 're' pattern into list of (opcode, argument) tuples
-
- source = Tokenizer(str)
-
- if pattern is None:
- pattern = Pattern()
- pattern.flags = flags
- pattern.str = str
-
- p = _parse_sub(source, pattern, 0)
-
- tail = source.get()
- if tail == ")":
- raise error, "unbalanced parenthesis"
- elif tail:
- raise error, "bogus characters at end of regular expression"
-
- if flags & SRE_FLAG_DEBUG:
- p.dump()
-
- if not (flags & SRE_FLAG_VERBOSE) and p.pattern.flags & SRE_FLAG_VERBOSE:
- # the VERBOSE flag was switched on inside the pattern. to be
- # on the safe side, we'll parse the whole thing again...
- return parse(str, p.pattern.flags)
-
- return p
-
-def parse_template(source, pattern):
- # parse 're' replacement string into list of literals and
- # group references
- s = Tokenizer(source)
- sget = s.get
- p = []
- a = p.append
- def literal(literal, p=p, pappend=a):
- if p and p[-1][0] is LITERAL:
- p[-1] = LITERAL, p[-1][1] + literal
- else:
- pappend((LITERAL, literal))
- sep = source[:0]
- if type(sep) is type(""):
- makechar = chr
- else:
- makechar = unichr
- while 1:
- this = sget()
- if this is None:
- break # end of replacement string
- if this and this[0] == "\\":
- # group
- c = this[1:2]
- if c == "g":
- name = ""
- if s.match("<"):
- while 1:
- char = sget()
- if char is None:
- raise error, "unterminated group name"
- if char == ">":
- break
- name = name + char
- if not name:
- raise error, "bad group name"
- try:
- index = int(name)
- if index < 0:
- raise error, "negative group number"
- except ValueError:
- if not isname(name):
- raise error, "bad character in group name"
- try:
- index = pattern.groupindex[name]
- except KeyError:
- raise IndexError, "unknown group name"
- a((MARK, index))
- elif c == "0":
- if s.next in OCTDIGITS:
- this = this + sget()
- if s.next in OCTDIGITS:
- this = this + sget()
- literal(makechar(int(this[1:], 8) & 0xff))
- elif c in DIGITS:
- isoctal = False
- if s.next in DIGITS:
- this = this + sget()
- if (c in OCTDIGITS and this[2] in OCTDIGITS and
- s.next in OCTDIGITS):
- this = this + sget()
- isoctal = True
- literal(makechar(int(this[1:], 8) & 0xff))
- if not isoctal:
- a((MARK, int(this[1:])))
- else:
- try:
- this = makechar(ESCAPES[this][1])
- except KeyError:
- pass
- literal(this)
- else:
- literal(this)
- # convert template to groups and literals lists
- i = 0
- groups = []
- groupsappend = groups.append
- literals = [None] * len(p)
- for c, s in p:
- if c is MARK:
- groupsappend((i, s))
- # literal[i] is already None
- else:
- literals[i] = s
- i = i + 1
- return groups, literals
-
-def expand_template(template, match):
- g = match.group
- sep = match.string[:0]
- groups, literals = template
- literals = literals[:]
- try:
- for index, group in groups:
- literals[index] = s = g(group)
- if s is None:
- raise error, "unmatched group"
- except IndexError:
- raise error, "invalid group reference"
- return sep.join(literals)
diff --git a/sys/lib/python/stat.py b/sys/lib/python/stat.py
deleted file mode 100644
index 70750d8b1..000000000
--- a/sys/lib/python/stat.py
+++ /dev/null
@@ -1,86 +0,0 @@
-"""Constants/functions for interpreting results of os.stat() and os.lstat().
-
-Suggested usage: from stat import *
-"""
-
-# XXX Strictly spoken, this module may have to be adapted for each POSIX
-# implementation; in practice, however, the numeric constants used by
-# stat() are almost universal (even for stat() emulations on non-UNIX
-# systems like MS-DOS).
-
-# Indices for stat struct members in tuple returned by os.stat()
-
-ST_MODE = 0
-ST_INO = 1
-ST_DEV = 2
-ST_NLINK = 3
-ST_UID = 4
-ST_GID = 5
-ST_SIZE = 6
-ST_ATIME = 7
-ST_MTIME = 8
-ST_CTIME = 9
-
-# Extract bits from the mode
-
-def S_IMODE(mode):
- return mode & 07777
-
-def S_IFMT(mode):
- return mode & 0170000
-
-# Constants used as S_IFMT() for various file types
-# (not all are implemented on all systems)
-
-S_IFDIR = 0040000
-S_IFCHR = 0020000
-S_IFBLK = 0060000
-S_IFREG = 0100000
-S_IFIFO = 0010000
-S_IFLNK = 0120000
-S_IFSOCK = 0140000
-
-# Functions to test for each file type
-
-def S_ISDIR(mode):
- return S_IFMT(mode) == S_IFDIR
-
-def S_ISCHR(mode):
- return S_IFMT(mode) == S_IFCHR
-
-def S_ISBLK(mode):
- return S_IFMT(mode) == S_IFBLK
-
-def S_ISREG(mode):
- return S_IFMT(mode) == S_IFREG
-
-def S_ISFIFO(mode):
- return S_IFMT(mode) == S_IFIFO
-
-def S_ISLNK(mode):
- return S_IFMT(mode) == S_IFLNK
-
-def S_ISSOCK(mode):
- return S_IFMT(mode) == S_IFSOCK
-
-# Names for permission bits
-
-S_ISUID = 04000
-S_ISGID = 02000
-S_ENFMT = S_ISGID
-S_ISVTX = 01000
-S_IREAD = 00400
-S_IWRITE = 00200
-S_IEXEC = 00100
-S_IRWXU = 00700
-S_IRUSR = 00400
-S_IWUSR = 00200
-S_IXUSR = 00100
-S_IRWXG = 00070
-S_IRGRP = 00040
-S_IWGRP = 00020
-S_IXGRP = 00010
-S_IRWXO = 00007
-S_IROTH = 00004
-S_IWOTH = 00002
-S_IXOTH = 00001
diff --git a/sys/lib/python/statvfs.py b/sys/lib/python/statvfs.py
deleted file mode 100644
index 06a323fa6..000000000
--- a/sys/lib/python/statvfs.py
+++ /dev/null
@@ -1,15 +0,0 @@
-"""Constants for interpreting the results of os.statvfs() and os.fstatvfs()."""
-
-# Indices for statvfs struct members in the tuple returned by
-# os.statvfs() and os.fstatvfs().
-
-F_BSIZE = 0 # Preferred file system block size
-F_FRSIZE = 1 # Fundamental file system block size
-F_BLOCKS = 2 # Total number of file system blocks (FRSIZE)
-F_BFREE = 3 # Total number of free blocks
-F_BAVAIL = 4 # Free blocks available to non-superuser
-F_FILES = 5 # Total number of file nodes
-F_FFREE = 6 # Total number of free file nodes
-F_FAVAIL = 7 # Free nodes available to non-superuser
-F_FLAG = 8 # Flags (see your local statvfs man page)
-F_NAMEMAX = 9 # Maximum file name length
diff --git a/sys/lib/python/string.py b/sys/lib/python/string.py
deleted file mode 100644
index 921bd8b1d..000000000
--- a/sys/lib/python/string.py
+++ /dev/null
@@ -1,529 +0,0 @@
-"""A collection of string operations (most are no longer used).
-
-Warning: most of the code you see here isn't normally used nowadays.
-Beginning with Python 1.6, many of these functions are implemented as
-methods on the standard string object. They used to be implemented by
-a built-in module called strop, but strop is now obsolete itself.
-
-Public module variables:
-
-whitespace -- a string containing all characters considered whitespace
-lowercase -- a string containing all characters considered lowercase letters
-uppercase -- a string containing all characters considered uppercase letters
-letters -- a string containing all characters considered letters
-digits -- a string containing all characters considered decimal digits
-hexdigits -- a string containing all characters considered hexadecimal digits
-octdigits -- a string containing all characters considered octal digits
-punctuation -- a string containing all characters considered punctuation
-printable -- a string containing all characters considered printable
-
-"""
-
-# Some strings for ctype-style character classification
-whitespace = ' \t\n\r\v\f'
-lowercase = 'abcdefghijklmnopqrstuvwxyz'
-uppercase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
-letters = lowercase + uppercase
-ascii_lowercase = lowercase
-ascii_uppercase = uppercase
-ascii_letters = ascii_lowercase + ascii_uppercase
-digits = '0123456789'
-hexdigits = digits + 'abcdef' + 'ABCDEF'
-octdigits = '01234567'
-punctuation = """!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~"""
-printable = digits + letters + punctuation + whitespace
-
-# Case conversion helpers
-# Use str to convert Unicode literal in case of -U
-l = map(chr, xrange(256))
-_idmap = str('').join(l)
-del l
-
-# Functions which aren't available as string methods.
-
-# Capitalize the words in a string, e.g. " aBc dEf " -> "Abc Def".
-def capwords(s, sep=None):
- """capwords(s, [sep]) -> string
-
- Split the argument into words using split, capitalize each
- word using capitalize, and join the capitalized words using
- join. Note that this replaces runs of whitespace characters by
- a single space.
-
- """
- return (sep or ' ').join([x.capitalize() for x in s.split(sep)])
-
-
-# Construct a translation string
-_idmapL = None
-def maketrans(fromstr, tostr):
- """maketrans(frm, to) -> string
-
- Return a translation table (a string of 256 bytes long)
- suitable for use in string.translate. The strings frm and to
- must be of the same length.
-
- """
- if len(fromstr) != len(tostr):
- raise ValueError, "maketrans arguments must have same length"
- global _idmapL
- if not _idmapL:
- _idmapL = map(None, _idmap)
- L = _idmapL[:]
- fromstr = map(ord, fromstr)
- for i in range(len(fromstr)):
- L[fromstr[i]] = tostr[i]
- return ''.join(L)
-
-
-
-####################################################################
-import re as _re
-
-class _multimap:
- """Helper class for combining multiple mappings.
-
- Used by .{safe_,}substitute() to combine the mapping and keyword
- arguments.
- """
- def __init__(self, primary, secondary):
- self._primary = primary
- self._secondary = secondary
-
- def __getitem__(self, key):
- try:
- return self._primary[key]
- except KeyError:
- return self._secondary[key]
-
-
-class _TemplateMetaclass(type):
- pattern = r"""
- %(delim)s(?:
- (?P<escaped>%(delim)s) | # Escape sequence of two delimiters
- (?P<named>%(id)s) | # delimiter and a Python identifier
- {(?P<braced>%(id)s)} | # delimiter and a braced identifier
- (?P<invalid>) # Other ill-formed delimiter exprs
- )
- """
-
- def __init__(cls, name, bases, dct):
- super(_TemplateMetaclass, cls).__init__(name, bases, dct)
- if 'pattern' in dct:
- pattern = cls.pattern
- else:
- pattern = _TemplateMetaclass.pattern % {
- 'delim' : _re.escape(cls.delimiter),
- 'id' : cls.idpattern,
- }
- cls.pattern = _re.compile(pattern, _re.IGNORECASE | _re.VERBOSE)
-
-
-class Template:
- """A string class for supporting $-substitutions."""
- __metaclass__ = _TemplateMetaclass
-
- delimiter = '$'
- idpattern = r'[_a-z][_a-z0-9]*'
-
- def __init__(self, template):
- self.template = template
-
- # Search for $$, $identifier, ${identifier}, and any bare $'s
-
- def _invalid(self, mo):
- i = mo.start('invalid')
- lines = self.template[:i].splitlines(True)
- if not lines:
- colno = 1
- lineno = 1
- else:
- colno = i - len(''.join(lines[:-1]))
- lineno = len(lines)
- raise ValueError('Invalid placeholder in string: line %d, col %d' %
- (lineno, colno))
-
- def substitute(self, *args, **kws):
- if len(args) > 1:
- raise TypeError('Too many positional arguments')
- if not args:
- mapping = kws
- elif kws:
- mapping = _multimap(kws, args[0])
- else:
- mapping = args[0]
- # Helper function for .sub()
- def convert(mo):
- # Check the most common path first.
- named = mo.group('named') or mo.group('braced')
- if named is not None:
- val = mapping[named]
- # We use this idiom instead of str() because the latter will
- # fail if val is a Unicode containing non-ASCII characters.
- return '%s' % (val,)
- if mo.group('escaped') is not None:
- return self.delimiter
- if mo.group('invalid') is not None:
- self._invalid(mo)
- raise ValueError('Unrecognized named group in pattern',
- self.pattern)
- return self.pattern.sub(convert, self.template)
-
- def safe_substitute(self, *args, **kws):
- if len(args) > 1:
- raise TypeError('Too many positional arguments')
- if not args:
- mapping = kws
- elif kws:
- mapping = _multimap(kws, args[0])
- else:
- mapping = args[0]
- # Helper function for .sub()
- def convert(mo):
- named = mo.group('named')
- if named is not None:
- try:
- # We use this idiom instead of str() because the latter
- # will fail if val is a Unicode containing non-ASCII
- return '%s' % (mapping[named],)
- except KeyError:
- return self.delimiter + named
- braced = mo.group('braced')
- if braced is not None:
- try:
- return '%s' % (mapping[braced],)
- except KeyError:
- return self.delimiter + '{' + braced + '}'
- if mo.group('escaped') is not None:
- return self.delimiter
- if mo.group('invalid') is not None:
- return self.delimiter
- raise ValueError('Unrecognized named group in pattern',
- self.pattern)
- return self.pattern.sub(convert, self.template)
-
-
-
-####################################################################
-# NOTE: Everything below here is deprecated. Use string methods instead.
-# This stuff will go away in Python 3.0.
-
-# Backward compatible names for exceptions
-index_error = ValueError
-atoi_error = ValueError
-atof_error = ValueError
-atol_error = ValueError
-
-# convert UPPER CASE letters to lower case
-def lower(s):
- """lower(s) -> string
-
- Return a copy of the string s converted to lowercase.
-
- """
- return s.lower()
-
-# Convert lower case letters to UPPER CASE
-def upper(s):
- """upper(s) -> string
-
- Return a copy of the string s converted to uppercase.
-
- """
- return s.upper()
-
-# Swap lower case letters and UPPER CASE
-def swapcase(s):
- """swapcase(s) -> string
-
- Return a copy of the string s with upper case characters
- converted to lowercase and vice versa.
-
- """
- return s.swapcase()
-
-# Strip leading and trailing tabs and spaces
-def strip(s, chars=None):
- """strip(s [,chars]) -> string
-
- Return a copy of the string s with leading and trailing
- whitespace removed.
- If chars is given and not None, remove characters in chars instead.
- If chars is unicode, S will be converted to unicode before stripping.
-
- """
- return s.strip(chars)
-
-# Strip leading tabs and spaces
-def lstrip(s, chars=None):
- """lstrip(s [,chars]) -> string
-
- Return a copy of the string s with leading whitespace removed.
- If chars is given and not None, remove characters in chars instead.
-
- """
- return s.lstrip(chars)
-
-# Strip trailing tabs and spaces
-def rstrip(s, chars=None):
- """rstrip(s [,chars]) -> string
-
- Return a copy of the string s with trailing whitespace removed.
- If chars is given and not None, remove characters in chars instead.
-
- """
- return s.rstrip(chars)
-
-
-# Split a string into a list of space/tab-separated words
-def split(s, sep=None, maxsplit=-1):
- """split(s [,sep [,maxsplit]]) -> list of strings
-
- Return a list of the words in the string s, using sep as the
- delimiter string. If maxsplit is given, splits at no more than
- maxsplit places (resulting in at most maxsplit+1 words). If sep
- is not specified or is None, any whitespace string is a separator.
-
- (split and splitfields are synonymous)
-
- """
- return s.split(sep, maxsplit)
-splitfields = split
-
-# Split a string into a list of space/tab-separated words
-def rsplit(s, sep=None, maxsplit=-1):
- """rsplit(s [,sep [,maxsplit]]) -> list of strings
-
- Return a list of the words in the string s, using sep as the
- delimiter string, starting at the end of the string and working
- to the front. If maxsplit is given, at most maxsplit splits are
- done. If sep is not specified or is None, any whitespace string
- is a separator.
- """
- return s.rsplit(sep, maxsplit)
-
-# Join fields with optional separator
-def join(words, sep = ' '):
- """join(list [,sep]) -> string
-
- Return a string composed of the words in list, with
- intervening occurrences of sep. The default separator is a
- single space.
-
- (joinfields and join are synonymous)
-
- """
- return sep.join(words)
-joinfields = join
-
-# Find substring, raise exception if not found
-def index(s, *args):
- """index(s, sub [,start [,end]]) -> int
-
- Like find but raises ValueError when the substring is not found.
-
- """
- return s.index(*args)
-
-# Find last substring, raise exception if not found
-def rindex(s, *args):
- """rindex(s, sub [,start [,end]]) -> int
-
- Like rfind but raises ValueError when the substring is not found.
-
- """
- return s.rindex(*args)
-
-# Count non-overlapping occurrences of substring
-def count(s, *args):
- """count(s, sub[, start[,end]]) -> int
-
- Return the number of occurrences of substring sub in string
- s[start:end]. Optional arguments start and end are
- interpreted as in slice notation.
-
- """
- return s.count(*args)
-
-# Find substring, return -1 if not found
-def find(s, *args):
- """find(s, sub [,start [,end]]) -> in
-
- Return the lowest index in s where substring sub is found,
- such that sub is contained within s[start,end]. Optional
- arguments start and end are interpreted as in slice notation.
-
- Return -1 on failure.
-
- """
- return s.find(*args)
-
-# Find last substring, return -1 if not found
-def rfind(s, *args):
- """rfind(s, sub [,start [,end]]) -> int
-
- Return the highest index in s where substring sub is found,
- such that sub is contained within s[start,end]. Optional
- arguments start and end are interpreted as in slice notation.
-
- Return -1 on failure.
-
- """
- return s.rfind(*args)
-
-# for a bit of speed
-_float = float
-_int = int
-_long = long
-
-# Convert string to float
-def atof(s):
- """atof(s) -> float
-
- Return the floating point number represented by the string s.
-
- """
- return _float(s)
-
-
-# Convert string to integer
-def atoi(s , base=10):
- """atoi(s [,base]) -> int
-
- Return the integer represented by the string s in the given
- base, which defaults to 10. The string s must consist of one
- or more digits, possibly preceded by a sign. If base is 0, it
- is chosen from the leading characters of s, 0 for octal, 0x or
- 0X for hexadecimal. If base is 16, a preceding 0x or 0X is
- accepted.
-
- """
- return _int(s, base)
-
-
-# Convert string to long integer
-def atol(s, base=10):
- """atol(s [,base]) -> long
-
- Return the long integer represented by the string s in the
- given base, which defaults to 10. The string s must consist
- of one or more digits, possibly preceded by a sign. If base
- is 0, it is chosen from the leading characters of s, 0 for
- octal, 0x or 0X for hexadecimal. If base is 16, a preceding
- 0x or 0X is accepted. A trailing L or l is not accepted,
- unless base is 0.
-
- """
- return _long(s, base)
-
-
-# Left-justify a string
-def ljust(s, width, *args):
- """ljust(s, width[, fillchar]) -> string
-
- Return a left-justified version of s, in a field of the
- specified width, padded with spaces as needed. The string is
- never truncated. If specified the fillchar is used instead of spaces.
-
- """
- return s.ljust(width, *args)
-
-# Right-justify a string
-def rjust(s, width, *args):
- """rjust(s, width[, fillchar]) -> string
-
- Return a right-justified version of s, in a field of the
- specified width, padded with spaces as needed. The string is
- never truncated. If specified the fillchar is used instead of spaces.
-
- """
- return s.rjust(width, *args)
-
-# Center a string
-def center(s, width, *args):
- """center(s, width[, fillchar]) -> string
-
- Return a center version of s, in a field of the specified
- width. padded with spaces as needed. The string is never
- truncated. If specified the fillchar is used instead of spaces.
-
- """
- return s.center(width, *args)
-
-# Zero-fill a number, e.g., (12, 3) --> '012' and (-3, 3) --> '-03'
-# Decadent feature: the argument may be a string or a number
-# (Use of this is deprecated; it should be a string as with ljust c.s.)
-def zfill(x, width):
- """zfill(x, width) -> string
-
- Pad a numeric string x with zeros on the left, to fill a field
- of the specified width. The string x is never truncated.
-
- """
- if not isinstance(x, basestring):
- x = repr(x)
- return x.zfill(width)
-
-# Expand tabs in a string.
-# Doesn't take non-printing chars into account, but does understand \n.
-def expandtabs(s, tabsize=8):
- """expandtabs(s [,tabsize]) -> string
-
- Return a copy of the string s with all tab characters replaced
- by the appropriate number of spaces, depending on the current
- column, and the tabsize (default 8).
-
- """
- return s.expandtabs(tabsize)
-
-# Character translation through look-up table.
-def translate(s, table, deletions=""):
- """translate(s,table [,deletions]) -> string
-
- Return a copy of the string s, where all characters occurring
- in the optional argument deletions are removed, and the
- remaining characters have been mapped through the given
- translation table, which must be a string of length 256. The
- deletions argument is not allowed for Unicode strings.
-
- """
- if deletions:
- return s.translate(table, deletions)
- else:
- # Add s[:0] so that if s is Unicode and table is an 8-bit string,
- # table is converted to Unicode. This means that table *cannot*
- # be a dictionary -- for that feature, use u.translate() directly.
- return s.translate(table + s[:0])
-
-# Capitalize a string, e.g. "aBc dEf" -> "Abc def".
-def capitalize(s):
- """capitalize(s) -> string
-
- Return a copy of the string s with only its first character
- capitalized.
-
- """
- return s.capitalize()
-
-# Substring replacement (global)
-def replace(s, old, new, maxsplit=-1):
- """replace (str, old, new[, maxsplit]) -> string
-
- Return a copy of string str with all occurrences of substring
- old replaced by new. If the optional argument maxsplit is
- given, only the first maxsplit occurrences are replaced.
-
- """
- return s.replace(old, new, maxsplit)
-
-
-# Try importing optional built-in module "strop" -- if it exists,
-# it redefines some string operations that are 100-1000 times faster.
-# It also defines values for whitespace, lowercase and uppercase
-# that match <ctype.h>'s definitions.
-
-try:
- from strop import maketrans, lowercase, uppercase, whitespace
- letters = lowercase + uppercase
-except ImportError:
- pass # Use the original versions
diff --git a/sys/lib/python/stringold.py b/sys/lib/python/stringold.py
deleted file mode 100644
index 213a04cce..000000000
--- a/sys/lib/python/stringold.py
+++ /dev/null
@@ -1,429 +0,0 @@
-# module 'string' -- A collection of string operations
-
-# Warning: most of the code you see here isn't normally used nowadays. With
-# Python 1.6, many of these functions are implemented as methods on the
-# standard string object. They used to be implemented by a built-in module
-# called strop, but strop is now obsolete itself.
-
-"""Common string manipulations.
-
-Public module variables:
-
-whitespace -- a string containing all characters considered whitespace
-lowercase -- a string containing all characters considered lowercase letters
-uppercase -- a string containing all characters considered uppercase letters
-letters -- a string containing all characters considered letters
-digits -- a string containing all characters considered decimal digits
-hexdigits -- a string containing all characters considered hexadecimal digits
-octdigits -- a string containing all characters considered octal digits
-
-"""
-
-# Some strings for ctype-style character classification
-whitespace = ' \t\n\r\v\f'
-lowercase = 'abcdefghijklmnopqrstuvwxyz'
-uppercase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
-letters = lowercase + uppercase
-digits = '0123456789'
-hexdigits = digits + 'abcdef' + 'ABCDEF'
-octdigits = '01234567'
-
-# Case conversion helpers
-_idmap = ''
-for i in range(256): _idmap = _idmap + chr(i)
-del i
-
-# Backward compatible names for exceptions
-index_error = ValueError
-atoi_error = ValueError
-atof_error = ValueError
-atol_error = ValueError
-
-# convert UPPER CASE letters to lower case
-def lower(s):
- """lower(s) -> string
-
- Return a copy of the string s converted to lowercase.
-
- """
- return s.lower()
-
-# Convert lower case letters to UPPER CASE
-def upper(s):
- """upper(s) -> string
-
- Return a copy of the string s converted to uppercase.
-
- """
- return s.upper()
-
-# Swap lower case letters and UPPER CASE
-def swapcase(s):
- """swapcase(s) -> string
-
- Return a copy of the string s with upper case characters
- converted to lowercase and vice versa.
-
- """
- return s.swapcase()
-
-# Strip leading and trailing tabs and spaces
-def strip(s):
- """strip(s) -> string
-
- Return a copy of the string s with leading and trailing
- whitespace removed.
-
- """
- return s.strip()
-
-# Strip leading tabs and spaces
-def lstrip(s):
- """lstrip(s) -> string
-
- Return a copy of the string s with leading whitespace removed.
-
- """
- return s.lstrip()
-
-# Strip trailing tabs and spaces
-def rstrip(s):
- """rstrip(s) -> string
-
- Return a copy of the string s with trailing whitespace
- removed.
-
- """
- return s.rstrip()
-
-
-# Split a string into a list of space/tab-separated words
-def split(s, sep=None, maxsplit=0):
- """split(str [,sep [,maxsplit]]) -> list of strings
-
- Return a list of the words in the string s, using sep as the
- delimiter string. If maxsplit is nonzero, splits into at most
- maxsplit words If sep is not specified, any whitespace string
- is a separator. Maxsplit defaults to 0.
-
- (split and splitfields are synonymous)
-
- """
- return s.split(sep, maxsplit)
-splitfields = split
-
-# Join fields with optional separator
-def join(words, sep = ' '):
- """join(list [,sep]) -> string
-
- Return a string composed of the words in list, with
- intervening occurrences of sep. The default separator is a
- single space.
-
- (joinfields and join are synonymous)
-
- """
- return sep.join(words)
-joinfields = join
-
-# for a little bit of speed
-_apply = apply
-
-# Find substring, raise exception if not found
-def index(s, *args):
- """index(s, sub [,start [,end]]) -> int
-
- Like find but raises ValueError when the substring is not found.
-
- """
- return _apply(s.index, args)
-
-# Find last substring, raise exception if not found
-def rindex(s, *args):
- """rindex(s, sub [,start [,end]]) -> int
-
- Like rfind but raises ValueError when the substring is not found.
-
- """
- return _apply(s.rindex, args)
-
-# Count non-overlapping occurrences of substring
-def count(s, *args):
- """count(s, sub[, start[,end]]) -> int
-
- Return the number of occurrences of substring sub in string
- s[start:end]. Optional arguments start and end are
- interpreted as in slice notation.
-
- """
- return _apply(s.count, args)
-
-# Find substring, return -1 if not found
-def find(s, *args):
- """find(s, sub [,start [,end]]) -> in
-
- Return the lowest index in s where substring sub is found,
- such that sub is contained within s[start,end]. Optional
- arguments start and end are interpreted as in slice notation.
-
- Return -1 on failure.
-
- """
- return _apply(s.find, args)
-
-# Find last substring, return -1 if not found
-def rfind(s, *args):
- """rfind(s, sub [,start [,end]]) -> int
-
- Return the highest index in s where substring sub is found,
- such that sub is contained within s[start,end]. Optional
- arguments start and end are interpreted as in slice notation.
-
- Return -1 on failure.
-
- """
- return _apply(s.rfind, args)
-
-# for a bit of speed
-_float = float
-_int = int
-_long = long
-_StringType = type('')
-
-# Convert string to float
-def atof(s):
- """atof(s) -> float
-
- Return the floating point number represented by the string s.
-
- """
- if type(s) == _StringType:
- return _float(s)
- else:
- raise TypeError('argument 1: expected string, %s found' %
- type(s).__name__)
-
-# Convert string to integer
-def atoi(*args):
- """atoi(s [,base]) -> int
-
- Return the integer represented by the string s in the given
- base, which defaults to 10. The string s must consist of one
- or more digits, possibly preceded by a sign. If base is 0, it
- is chosen from the leading characters of s, 0 for octal, 0x or
- 0X for hexadecimal. If base is 16, a preceding 0x or 0X is
- accepted.
-
- """
- try:
- s = args[0]
- except IndexError:
- raise TypeError('function requires at least 1 argument: %d given' %
- len(args))
- # Don't catch type error resulting from too many arguments to int(). The
- # error message isn't compatible but the error type is, and this function
- # is complicated enough already.
- if type(s) == _StringType:
- return _apply(_int, args)
- else:
- raise TypeError('argument 1: expected string, %s found' %
- type(s).__name__)
-
-
-# Convert string to long integer
-def atol(*args):
- """atol(s [,base]) -> long
-
- Return the long integer represented by the string s in the
- given base, which defaults to 10. The string s must consist
- of one or more digits, possibly preceded by a sign. If base
- is 0, it is chosen from the leading characters of s, 0 for
- octal, 0x or 0X for hexadecimal. If base is 16, a preceding
- 0x or 0X is accepted. A trailing L or l is not accepted,
- unless base is 0.
-
- """
- try:
- s = args[0]
- except IndexError:
- raise TypeError('function requires at least 1 argument: %d given' %
- len(args))
- # Don't catch type error resulting from too many arguments to long(). The
- # error message isn't compatible but the error type is, and this function
- # is complicated enough already.
- if type(s) == _StringType:
- return _apply(_long, args)
- else:
- raise TypeError('argument 1: expected string, %s found' %
- type(s).__name__)
-
-
-# Left-justify a string
-def ljust(s, width):
- """ljust(s, width) -> string
-
- Return a left-justified version of s, in a field of the
- specified width, padded with spaces as needed. The string is
- never truncated.
-
- """
- n = width - len(s)
- if n <= 0: return s
- return s + ' '*n
-
-# Right-justify a string
-def rjust(s, width):
- """rjust(s, width) -> string
-
- Return a right-justified version of s, in a field of the
- specified width, padded with spaces as needed. The string is
- never truncated.
-
- """
- n = width - len(s)
- if n <= 0: return s
- return ' '*n + s
-
-# Center a string
-def center(s, width):
- """center(s, width) -> string
-
- Return a center version of s, in a field of the specified
- width. padded with spaces as needed. The string is never
- truncated.
-
- """
- n = width - len(s)
- if n <= 0: return s
- half = n/2
- if n%2 and width%2:
- # This ensures that center(center(s, i), j) = center(s, j)
- half = half+1
- return ' '*half + s + ' '*(n-half)
-
-# Zero-fill a number, e.g., (12, 3) --> '012' and (-3, 3) --> '-03'
-# Decadent feature: the argument may be a string or a number
-# (Use of this is deprecated; it should be a string as with ljust c.s.)
-def zfill(x, width):
- """zfill(x, width) -> string
-
- Pad a numeric string x with zeros on the left, to fill a field
- of the specified width. The string x is never truncated.
-
- """
- if type(x) == type(''): s = x
- else: s = repr(x)
- n = len(s)
- if n >= width: return s
- sign = ''
- if s[0] in ('-', '+'):
- sign, s = s[0], s[1:]
- return sign + '0'*(width-n) + s
-
-# Expand tabs in a string.
-# Doesn't take non-printing chars into account, but does understand \n.
-def expandtabs(s, tabsize=8):
- """expandtabs(s [,tabsize]) -> string
-
- Return a copy of the string s with all tab characters replaced
- by the appropriate number of spaces, depending on the current
- column, and the tabsize (default 8).
-
- """
- res = line = ''
- for c in s:
- if c == '\t':
- c = ' '*(tabsize - len(line) % tabsize)
- line = line + c
- if c == '\n':
- res = res + line
- line = ''
- return res + line
-
-# Character translation through look-up table.
-def translate(s, table, deletions=""):
- """translate(s,table [,deletechars]) -> string
-
- Return a copy of the string s, where all characters occurring
- in the optional argument deletechars are removed, and the
- remaining characters have been mapped through the given
- translation table, which must be a string of length 256.
-
- """
- return s.translate(table, deletions)
-
-# Capitalize a string, e.g. "aBc dEf" -> "Abc def".
-def capitalize(s):
- """capitalize(s) -> string
-
- Return a copy of the string s with only its first character
- capitalized.
-
- """
- return s.capitalize()
-
-# Capitalize the words in a string, e.g. " aBc dEf " -> "Abc Def".
-def capwords(s, sep=None):
- """capwords(s, [sep]) -> string
-
- Split the argument into words using split, capitalize each
- word using capitalize, and join the capitalized words using
- join. Note that this replaces runs of whitespace characters by
- a single space.
-
- """
- return join(map(capitalize, s.split(sep)), sep or ' ')
-
-# Construct a translation string
-_idmapL = None
-def maketrans(fromstr, tostr):
- """maketrans(frm, to) -> string
-
- Return a translation table (a string of 256 bytes long)
- suitable for use in string.translate. The strings frm and to
- must be of the same length.
-
- """
- if len(fromstr) != len(tostr):
- raise ValueError, "maketrans arguments must have same length"
- global _idmapL
- if not _idmapL:
- _idmapL = map(None, _idmap)
- L = _idmapL[:]
- fromstr = map(ord, fromstr)
- for i in range(len(fromstr)):
- L[fromstr[i]] = tostr[i]
- return join(L, "")
-
-# Substring replacement (global)
-def replace(s, old, new, maxsplit=0):
- """replace (str, old, new[, maxsplit]) -> string
-
- Return a copy of string str with all occurrences of substring
- old replaced by new. If the optional argument maxsplit is
- given, only the first maxsplit occurrences are replaced.
-
- """
- return s.replace(old, new, maxsplit)
-
-
-# XXX: transitional
-#
-# If string objects do not have methods, then we need to use the old string.py
-# library, which uses strop for many more things than just the few outlined
-# below.
-try:
- ''.upper
-except AttributeError:
- from stringold import *
-
-# Try importing optional built-in module "strop" -- if it exists,
-# it redefines some string operations that are 100-1000 times faster.
-# It also defines values for whitespace, lowercase and uppercase
-# that match <ctype.h>'s definitions.
-
-try:
- from strop import maketrans, lowercase, uppercase, whitespace
- letters = lowercase + uppercase
-except ImportError:
- pass # Use the original versions
diff --git a/sys/lib/python/stringprep.py b/sys/lib/python/stringprep.py
deleted file mode 100644
index 1d49dd1dd..000000000
--- a/sys/lib/python/stringprep.py
+++ /dev/null
@@ -1,272 +0,0 @@
-# This file is generated by mkstringprep.py. DO NOT EDIT.
-"""Library that exposes various tables found in the StringPrep RFC 3454.
-
-There are two kinds of tables: sets, for which a member test is provided,
-and mappings, for which a mapping function is provided.
-"""
-
-from unicodedata import ucd_3_2_0 as unicodedata
-
-assert unicodedata.unidata_version == '3.2.0'
-
-def in_table_a1(code):
- if unicodedata.category(code) != 'Cn': return False
- c = ord(code)
- if 0xFDD0 <= c < 0xFDF0: return False
- return (c & 0xFFFF) not in (0xFFFE, 0xFFFF)
-
-
-b1_set = set([173, 847, 6150, 6155, 6156, 6157, 8203, 8204, 8205, 8288, 65279] + range(65024,65040))
-def in_table_b1(code):
- return ord(code) in b1_set
-
-
-b3_exceptions = {
-0xb5:u'\u03bc', 0xdf:u'ss', 0x130:u'i\u0307', 0x149:u'\u02bcn',
-0x17f:u's', 0x1f0:u'j\u030c', 0x345:u'\u03b9', 0x37a:u' \u03b9',
-0x390:u'\u03b9\u0308\u0301', 0x3b0:u'\u03c5\u0308\u0301', 0x3c2:u'\u03c3', 0x3d0:u'\u03b2',
-0x3d1:u'\u03b8', 0x3d2:u'\u03c5', 0x3d3:u'\u03cd', 0x3d4:u'\u03cb',
-0x3d5:u'\u03c6', 0x3d6:u'\u03c0', 0x3f0:u'\u03ba', 0x3f1:u'\u03c1',
-0x3f2:u'\u03c3', 0x3f5:u'\u03b5', 0x587:u'\u0565\u0582', 0x1e96:u'h\u0331',
-0x1e97:u't\u0308', 0x1e98:u'w\u030a', 0x1e99:u'y\u030a', 0x1e9a:u'a\u02be',
-0x1e9b:u'\u1e61', 0x1f50:u'\u03c5\u0313', 0x1f52:u'\u03c5\u0313\u0300', 0x1f54:u'\u03c5\u0313\u0301',
-0x1f56:u'\u03c5\u0313\u0342', 0x1f80:u'\u1f00\u03b9', 0x1f81:u'\u1f01\u03b9', 0x1f82:u'\u1f02\u03b9',
-0x1f83:u'\u1f03\u03b9', 0x1f84:u'\u1f04\u03b9', 0x1f85:u'\u1f05\u03b9', 0x1f86:u'\u1f06\u03b9',
-0x1f87:u'\u1f07\u03b9', 0x1f88:u'\u1f00\u03b9', 0x1f89:u'\u1f01\u03b9', 0x1f8a:u'\u1f02\u03b9',
-0x1f8b:u'\u1f03\u03b9', 0x1f8c:u'\u1f04\u03b9', 0x1f8d:u'\u1f05\u03b9', 0x1f8e:u'\u1f06\u03b9',
-0x1f8f:u'\u1f07\u03b9', 0x1f90:u'\u1f20\u03b9', 0x1f91:u'\u1f21\u03b9', 0x1f92:u'\u1f22\u03b9',
-0x1f93:u'\u1f23\u03b9', 0x1f94:u'\u1f24\u03b9', 0x1f95:u'\u1f25\u03b9', 0x1f96:u'\u1f26\u03b9',
-0x1f97:u'\u1f27\u03b9', 0x1f98:u'\u1f20\u03b9', 0x1f99:u'\u1f21\u03b9', 0x1f9a:u'\u1f22\u03b9',
-0x1f9b:u'\u1f23\u03b9', 0x1f9c:u'\u1f24\u03b9', 0x1f9d:u'\u1f25\u03b9', 0x1f9e:u'\u1f26\u03b9',
-0x1f9f:u'\u1f27\u03b9', 0x1fa0:u'\u1f60\u03b9', 0x1fa1:u'\u1f61\u03b9', 0x1fa2:u'\u1f62\u03b9',
-0x1fa3:u'\u1f63\u03b9', 0x1fa4:u'\u1f64\u03b9', 0x1fa5:u'\u1f65\u03b9', 0x1fa6:u'\u1f66\u03b9',
-0x1fa7:u'\u1f67\u03b9', 0x1fa8:u'\u1f60\u03b9', 0x1fa9:u'\u1f61\u03b9', 0x1faa:u'\u1f62\u03b9',
-0x1fab:u'\u1f63\u03b9', 0x1fac:u'\u1f64\u03b9', 0x1fad:u'\u1f65\u03b9', 0x1fae:u'\u1f66\u03b9',
-0x1faf:u'\u1f67\u03b9', 0x1fb2:u'\u1f70\u03b9', 0x1fb3:u'\u03b1\u03b9', 0x1fb4:u'\u03ac\u03b9',
-0x1fb6:u'\u03b1\u0342', 0x1fb7:u'\u03b1\u0342\u03b9', 0x1fbc:u'\u03b1\u03b9', 0x1fbe:u'\u03b9',
-0x1fc2:u'\u1f74\u03b9', 0x1fc3:u'\u03b7\u03b9', 0x1fc4:u'\u03ae\u03b9', 0x1fc6:u'\u03b7\u0342',
-0x1fc7:u'\u03b7\u0342\u03b9', 0x1fcc:u'\u03b7\u03b9', 0x1fd2:u'\u03b9\u0308\u0300', 0x1fd3:u'\u03b9\u0308\u0301',
-0x1fd6:u'\u03b9\u0342', 0x1fd7:u'\u03b9\u0308\u0342', 0x1fe2:u'\u03c5\u0308\u0300', 0x1fe3:u'\u03c5\u0308\u0301',
-0x1fe4:u'\u03c1\u0313', 0x1fe6:u'\u03c5\u0342', 0x1fe7:u'\u03c5\u0308\u0342', 0x1ff2:u'\u1f7c\u03b9',
-0x1ff3:u'\u03c9\u03b9', 0x1ff4:u'\u03ce\u03b9', 0x1ff6:u'\u03c9\u0342', 0x1ff7:u'\u03c9\u0342\u03b9',
-0x1ffc:u'\u03c9\u03b9', 0x20a8:u'rs', 0x2102:u'c', 0x2103:u'\xb0c',
-0x2107:u'\u025b', 0x2109:u'\xb0f', 0x210b:u'h', 0x210c:u'h',
-0x210d:u'h', 0x2110:u'i', 0x2111:u'i', 0x2112:u'l',
-0x2115:u'n', 0x2116:u'no', 0x2119:u'p', 0x211a:u'q',
-0x211b:u'r', 0x211c:u'r', 0x211d:u'r', 0x2120:u'sm',
-0x2121:u'tel', 0x2122:u'tm', 0x2124:u'z', 0x2128:u'z',
-0x212c:u'b', 0x212d:u'c', 0x2130:u'e', 0x2131:u'f',
-0x2133:u'm', 0x213e:u'\u03b3', 0x213f:u'\u03c0', 0x2145:u'd',
-0x3371:u'hpa', 0x3373:u'au', 0x3375:u'ov', 0x3380:u'pa',
-0x3381:u'na', 0x3382:u'\u03bca', 0x3383:u'ma', 0x3384:u'ka',
-0x3385:u'kb', 0x3386:u'mb', 0x3387:u'gb', 0x338a:u'pf',
-0x338b:u'nf', 0x338c:u'\u03bcf', 0x3390:u'hz', 0x3391:u'khz',
-0x3392:u'mhz', 0x3393:u'ghz', 0x3394:u'thz', 0x33a9:u'pa',
-0x33aa:u'kpa', 0x33ab:u'mpa', 0x33ac:u'gpa', 0x33b4:u'pv',
-0x33b5:u'nv', 0x33b6:u'\u03bcv', 0x33b7:u'mv', 0x33b8:u'kv',
-0x33b9:u'mv', 0x33ba:u'pw', 0x33bb:u'nw', 0x33bc:u'\u03bcw',
-0x33bd:u'mw', 0x33be:u'kw', 0x33bf:u'mw', 0x33c0:u'k\u03c9',
-0x33c1:u'm\u03c9', 0x33c3:u'bq', 0x33c6:u'c\u2215kg', 0x33c7:u'co.',
-0x33c8:u'db', 0x33c9:u'gy', 0x33cb:u'hp', 0x33cd:u'kk',
-0x33ce:u'km', 0x33d7:u'ph', 0x33d9:u'ppm', 0x33da:u'pr',
-0x33dc:u'sv', 0x33dd:u'wb', 0xfb00:u'ff', 0xfb01:u'fi',
-0xfb02:u'fl', 0xfb03:u'ffi', 0xfb04:u'ffl', 0xfb05:u'st',
-0xfb06:u'st', 0xfb13:u'\u0574\u0576', 0xfb14:u'\u0574\u0565', 0xfb15:u'\u0574\u056b',
-0xfb16:u'\u057e\u0576', 0xfb17:u'\u0574\u056d', 0x1d400:u'a', 0x1d401:u'b',
-0x1d402:u'c', 0x1d403:u'd', 0x1d404:u'e', 0x1d405:u'f',
-0x1d406:u'g', 0x1d407:u'h', 0x1d408:u'i', 0x1d409:u'j',
-0x1d40a:u'k', 0x1d40b:u'l', 0x1d40c:u'm', 0x1d40d:u'n',
-0x1d40e:u'o', 0x1d40f:u'p', 0x1d410:u'q', 0x1d411:u'r',
-0x1d412:u's', 0x1d413:u't', 0x1d414:u'u', 0x1d415:u'v',
-0x1d416:u'w', 0x1d417:u'x', 0x1d418:u'y', 0x1d419:u'z',
-0x1d434:u'a', 0x1d435:u'b', 0x1d436:u'c', 0x1d437:u'd',
-0x1d438:u'e', 0x1d439:u'f', 0x1d43a:u'g', 0x1d43b:u'h',
-0x1d43c:u'i', 0x1d43d:u'j', 0x1d43e:u'k', 0x1d43f:u'l',
-0x1d440:u'm', 0x1d441:u'n', 0x1d442:u'o', 0x1d443:u'p',
-0x1d444:u'q', 0x1d445:u'r', 0x1d446:u's', 0x1d447:u't',
-0x1d448:u'u', 0x1d449:u'v', 0x1d44a:u'w', 0x1d44b:u'x',
-0x1d44c:u'y', 0x1d44d:u'z', 0x1d468:u'a', 0x1d469:u'b',
-0x1d46a:u'c', 0x1d46b:u'd', 0x1d46c:u'e', 0x1d46d:u'f',
-0x1d46e:u'g', 0x1d46f:u'h', 0x1d470:u'i', 0x1d471:u'j',
-0x1d472:u'k', 0x1d473:u'l', 0x1d474:u'm', 0x1d475:u'n',
-0x1d476:u'o', 0x1d477:u'p', 0x1d478:u'q', 0x1d479:u'r',
-0x1d47a:u's', 0x1d47b:u't', 0x1d47c:u'u', 0x1d47d:u'v',
-0x1d47e:u'w', 0x1d47f:u'x', 0x1d480:u'y', 0x1d481:u'z',
-0x1d49c:u'a', 0x1d49e:u'c', 0x1d49f:u'd', 0x1d4a2:u'g',
-0x1d4a5:u'j', 0x1d4a6:u'k', 0x1d4a9:u'n', 0x1d4aa:u'o',
-0x1d4ab:u'p', 0x1d4ac:u'q', 0x1d4ae:u's', 0x1d4af:u't',
-0x1d4b0:u'u', 0x1d4b1:u'v', 0x1d4b2:u'w', 0x1d4b3:u'x',
-0x1d4b4:u'y', 0x1d4b5:u'z', 0x1d4d0:u'a', 0x1d4d1:u'b',
-0x1d4d2:u'c', 0x1d4d3:u'd', 0x1d4d4:u'e', 0x1d4d5:u'f',
-0x1d4d6:u'g', 0x1d4d7:u'h', 0x1d4d8:u'i', 0x1d4d9:u'j',
-0x1d4da:u'k', 0x1d4db:u'l', 0x1d4dc:u'm', 0x1d4dd:u'n',
-0x1d4de:u'o', 0x1d4df:u'p', 0x1d4e0:u'q', 0x1d4e1:u'r',
-0x1d4e2:u's', 0x1d4e3:u't', 0x1d4e4:u'u', 0x1d4e5:u'v',
-0x1d4e6:u'w', 0x1d4e7:u'x', 0x1d4e8:u'y', 0x1d4e9:u'z',
-0x1d504:u'a', 0x1d505:u'b', 0x1d507:u'd', 0x1d508:u'e',
-0x1d509:u'f', 0x1d50a:u'g', 0x1d50d:u'j', 0x1d50e:u'k',
-0x1d50f:u'l', 0x1d510:u'm', 0x1d511:u'n', 0x1d512:u'o',
-0x1d513:u'p', 0x1d514:u'q', 0x1d516:u's', 0x1d517:u't',
-0x1d518:u'u', 0x1d519:u'v', 0x1d51a:u'w', 0x1d51b:u'x',
-0x1d51c:u'y', 0x1d538:u'a', 0x1d539:u'b', 0x1d53b:u'd',
-0x1d53c:u'e', 0x1d53d:u'f', 0x1d53e:u'g', 0x1d540:u'i',
-0x1d541:u'j', 0x1d542:u'k', 0x1d543:u'l', 0x1d544:u'm',
-0x1d546:u'o', 0x1d54a:u's', 0x1d54b:u't', 0x1d54c:u'u',
-0x1d54d:u'v', 0x1d54e:u'w', 0x1d54f:u'x', 0x1d550:u'y',
-0x1d56c:u'a', 0x1d56d:u'b', 0x1d56e:u'c', 0x1d56f:u'd',
-0x1d570:u'e', 0x1d571:u'f', 0x1d572:u'g', 0x1d573:u'h',
-0x1d574:u'i', 0x1d575:u'j', 0x1d576:u'k', 0x1d577:u'l',
-0x1d578:u'm', 0x1d579:u'n', 0x1d57a:u'o', 0x1d57b:u'p',
-0x1d57c:u'q', 0x1d57d:u'r', 0x1d57e:u's', 0x1d57f:u't',
-0x1d580:u'u', 0x1d581:u'v', 0x1d582:u'w', 0x1d583:u'x',
-0x1d584:u'y', 0x1d585:u'z', 0x1d5a0:u'a', 0x1d5a1:u'b',
-0x1d5a2:u'c', 0x1d5a3:u'd', 0x1d5a4:u'e', 0x1d5a5:u'f',
-0x1d5a6:u'g', 0x1d5a7:u'h', 0x1d5a8:u'i', 0x1d5a9:u'j',
-0x1d5aa:u'k', 0x1d5ab:u'l', 0x1d5ac:u'm', 0x1d5ad:u'n',
-0x1d5ae:u'o', 0x1d5af:u'p', 0x1d5b0:u'q', 0x1d5b1:u'r',
-0x1d5b2:u's', 0x1d5b3:u't', 0x1d5b4:u'u', 0x1d5b5:u'v',
-0x1d5b6:u'w', 0x1d5b7:u'x', 0x1d5b8:u'y', 0x1d5b9:u'z',
-0x1d5d4:u'a', 0x1d5d5:u'b', 0x1d5d6:u'c', 0x1d5d7:u'd',
-0x1d5d8:u'e', 0x1d5d9:u'f', 0x1d5da:u'g', 0x1d5db:u'h',
-0x1d5dc:u'i', 0x1d5dd:u'j', 0x1d5de:u'k', 0x1d5df:u'l',
-0x1d5e0:u'm', 0x1d5e1:u'n', 0x1d5e2:u'o', 0x1d5e3:u'p',
-0x1d5e4:u'q', 0x1d5e5:u'r', 0x1d5e6:u's', 0x1d5e7:u't',
-0x1d5e8:u'u', 0x1d5e9:u'v', 0x1d5ea:u'w', 0x1d5eb:u'x',
-0x1d5ec:u'y', 0x1d5ed:u'z', 0x1d608:u'a', 0x1d609:u'b',
-0x1d60a:u'c', 0x1d60b:u'd', 0x1d60c:u'e', 0x1d60d:u'f',
-0x1d60e:u'g', 0x1d60f:u'h', 0x1d610:u'i', 0x1d611:u'j',
-0x1d612:u'k', 0x1d613:u'l', 0x1d614:u'm', 0x1d615:u'n',
-0x1d616:u'o', 0x1d617:u'p', 0x1d618:u'q', 0x1d619:u'r',
-0x1d61a:u's', 0x1d61b:u't', 0x1d61c:u'u', 0x1d61d:u'v',
-0x1d61e:u'w', 0x1d61f:u'x', 0x1d620:u'y', 0x1d621:u'z',
-0x1d63c:u'a', 0x1d63d:u'b', 0x1d63e:u'c', 0x1d63f:u'd',
-0x1d640:u'e', 0x1d641:u'f', 0x1d642:u'g', 0x1d643:u'h',
-0x1d644:u'i', 0x1d645:u'j', 0x1d646:u'k', 0x1d647:u'l',
-0x1d648:u'm', 0x1d649:u'n', 0x1d64a:u'o', 0x1d64b:u'p',
-0x1d64c:u'q', 0x1d64d:u'r', 0x1d64e:u's', 0x1d64f:u't',
-0x1d650:u'u', 0x1d651:u'v', 0x1d652:u'w', 0x1d653:u'x',
-0x1d654:u'y', 0x1d655:u'z', 0x1d670:u'a', 0x1d671:u'b',
-0x1d672:u'c', 0x1d673:u'd', 0x1d674:u'e', 0x1d675:u'f',
-0x1d676:u'g', 0x1d677:u'h', 0x1d678:u'i', 0x1d679:u'j',
-0x1d67a:u'k', 0x1d67b:u'l', 0x1d67c:u'm', 0x1d67d:u'n',
-0x1d67e:u'o', 0x1d67f:u'p', 0x1d680:u'q', 0x1d681:u'r',
-0x1d682:u's', 0x1d683:u't', 0x1d684:u'u', 0x1d685:u'v',
-0x1d686:u'w', 0x1d687:u'x', 0x1d688:u'y', 0x1d689:u'z',
-0x1d6a8:u'\u03b1', 0x1d6a9:u'\u03b2', 0x1d6aa:u'\u03b3', 0x1d6ab:u'\u03b4',
-0x1d6ac:u'\u03b5', 0x1d6ad:u'\u03b6', 0x1d6ae:u'\u03b7', 0x1d6af:u'\u03b8',
-0x1d6b0:u'\u03b9', 0x1d6b1:u'\u03ba', 0x1d6b2:u'\u03bb', 0x1d6b3:u'\u03bc',
-0x1d6b4:u'\u03bd', 0x1d6b5:u'\u03be', 0x1d6b6:u'\u03bf', 0x1d6b7:u'\u03c0',
-0x1d6b8:u'\u03c1', 0x1d6b9:u'\u03b8', 0x1d6ba:u'\u03c3', 0x1d6bb:u'\u03c4',
-0x1d6bc:u'\u03c5', 0x1d6bd:u'\u03c6', 0x1d6be:u'\u03c7', 0x1d6bf:u'\u03c8',
-0x1d6c0:u'\u03c9', 0x1d6d3:u'\u03c3', 0x1d6e2:u'\u03b1', 0x1d6e3:u'\u03b2',
-0x1d6e4:u'\u03b3', 0x1d6e5:u'\u03b4', 0x1d6e6:u'\u03b5', 0x1d6e7:u'\u03b6',
-0x1d6e8:u'\u03b7', 0x1d6e9:u'\u03b8', 0x1d6ea:u'\u03b9', 0x1d6eb:u'\u03ba',
-0x1d6ec:u'\u03bb', 0x1d6ed:u'\u03bc', 0x1d6ee:u'\u03bd', 0x1d6ef:u'\u03be',
-0x1d6f0:u'\u03bf', 0x1d6f1:u'\u03c0', 0x1d6f2:u'\u03c1', 0x1d6f3:u'\u03b8',
-0x1d6f4:u'\u03c3', 0x1d6f5:u'\u03c4', 0x1d6f6:u'\u03c5', 0x1d6f7:u'\u03c6',
-0x1d6f8:u'\u03c7', 0x1d6f9:u'\u03c8', 0x1d6fa:u'\u03c9', 0x1d70d:u'\u03c3',
-0x1d71c:u'\u03b1', 0x1d71d:u'\u03b2', 0x1d71e:u'\u03b3', 0x1d71f:u'\u03b4',
-0x1d720:u'\u03b5', 0x1d721:u'\u03b6', 0x1d722:u'\u03b7', 0x1d723:u'\u03b8',
-0x1d724:u'\u03b9', 0x1d725:u'\u03ba', 0x1d726:u'\u03bb', 0x1d727:u'\u03bc',
-0x1d728:u'\u03bd', 0x1d729:u'\u03be', 0x1d72a:u'\u03bf', 0x1d72b:u'\u03c0',
-0x1d72c:u'\u03c1', 0x1d72d:u'\u03b8', 0x1d72e:u'\u03c3', 0x1d72f:u'\u03c4',
-0x1d730:u'\u03c5', 0x1d731:u'\u03c6', 0x1d732:u'\u03c7', 0x1d733:u'\u03c8',
-0x1d734:u'\u03c9', 0x1d747:u'\u03c3', 0x1d756:u'\u03b1', 0x1d757:u'\u03b2',
-0x1d758:u'\u03b3', 0x1d759:u'\u03b4', 0x1d75a:u'\u03b5', 0x1d75b:u'\u03b6',
-0x1d75c:u'\u03b7', 0x1d75d:u'\u03b8', 0x1d75e:u'\u03b9', 0x1d75f:u'\u03ba',
-0x1d760:u'\u03bb', 0x1d761:u'\u03bc', 0x1d762:u'\u03bd', 0x1d763:u'\u03be',
-0x1d764:u'\u03bf', 0x1d765:u'\u03c0', 0x1d766:u'\u03c1', 0x1d767:u'\u03b8',
-0x1d768:u'\u03c3', 0x1d769:u'\u03c4', 0x1d76a:u'\u03c5', 0x1d76b:u'\u03c6',
-0x1d76c:u'\u03c7', 0x1d76d:u'\u03c8', 0x1d76e:u'\u03c9', 0x1d781:u'\u03c3',
-0x1d790:u'\u03b1', 0x1d791:u'\u03b2', 0x1d792:u'\u03b3', 0x1d793:u'\u03b4',
-0x1d794:u'\u03b5', 0x1d795:u'\u03b6', 0x1d796:u'\u03b7', 0x1d797:u'\u03b8',
-0x1d798:u'\u03b9', 0x1d799:u'\u03ba', 0x1d79a:u'\u03bb', 0x1d79b:u'\u03bc',
-0x1d79c:u'\u03bd', 0x1d79d:u'\u03be', 0x1d79e:u'\u03bf', 0x1d79f:u'\u03c0',
-0x1d7a0:u'\u03c1', 0x1d7a1:u'\u03b8', 0x1d7a2:u'\u03c3', 0x1d7a3:u'\u03c4',
-0x1d7a4:u'\u03c5', 0x1d7a5:u'\u03c6', 0x1d7a6:u'\u03c7', 0x1d7a7:u'\u03c8',
-0x1d7a8:u'\u03c9', 0x1d7bb:u'\u03c3', }
-
-def map_table_b3(code):
- r = b3_exceptions.get(ord(code))
- if r is not None: return r
- return code.lower()
-
-
-def map_table_b2(a):
- al = map_table_b3(a)
- b = unicodedata.normalize("NFKC", al)
- bl = u"".join([map_table_b3(ch) for ch in b])
- c = unicodedata.normalize("NFKC", bl)
- if b != c:
- return c
- else:
- return al
-
-
-def in_table_c11(code):
- return code == u" "
-
-
-def in_table_c12(code):
- return unicodedata.category(code) == "Zs" and code != u" "
-
-def in_table_c11_c12(code):
- return unicodedata.category(code) == "Zs"
-
-
-def in_table_c21(code):
- return ord(code) < 128 and unicodedata.category(code) == "Cc"
-
-c22_specials = set([1757, 1807, 6158, 8204, 8205, 8232, 8233, 65279] + range(8288,8292) + range(8298,8304) + range(65529,65533) + range(119155,119163))
-def in_table_c22(code):
- c = ord(code)
- if c < 128: return False
- if unicodedata.category(code) == "Cc": return True
- return c in c22_specials
-
-def in_table_c21_c22(code):
- return unicodedata.category(code) == "Cc" or \
- ord(code) in c22_specials
-
-
-def in_table_c3(code):
- return unicodedata.category(code) == "Co"
-
-
-def in_table_c4(code):
- c = ord(code)
- if c < 0xFDD0: return False
- if c < 0xFDF0: return True
- return (ord(code) & 0xFFFF) in (0xFFFE, 0xFFFF)
-
-
-def in_table_c5(code):
- return unicodedata.category(code) == "Cs"
-
-
-c6_set = set(range(65529,65534))
-def in_table_c6(code):
- return ord(code) in c6_set
-
-
-c7_set = set(range(12272,12284))
-def in_table_c7(code):
- return ord(code) in c7_set
-
-
-c8_set = set([832, 833, 8206, 8207] + range(8234,8239) + range(8298,8304))
-def in_table_c8(code):
- return ord(code) in c8_set
-
-
-c9_set = set([917505] + range(917536,917632))
-def in_table_c9(code):
- return ord(code) in c9_set
-
-
-def in_table_d1(code):
- return unicodedata.bidirectional(code) in ("R","AL")
-
-
-def in_table_d2(code):
- return unicodedata.bidirectional(code) == "L"
diff --git a/sys/lib/python/struct.py b/sys/lib/python/struct.py
deleted file mode 100644
index 07c21bf9b..000000000
--- a/sys/lib/python/struct.py
+++ /dev/null
@@ -1,99 +0,0 @@
-"""
-Functions to convert between Python values and C structs.
-Python strings are used to hold the data representing the C struct
-and also as format strings to describe the layout of data in the C struct.
-
-The optional first format char indicates byte order, size and alignment:
- @: native order, size & alignment (default)
- =: native order, std. size & alignment
- <: little-endian, std. size & alignment
- >: big-endian, std. size & alignment
- !: same as >
-
-The remaining chars indicate types of args and must match exactly;
-these can be preceded by a decimal repeat count:
- x: pad byte (no data); c:char; b:signed byte; B:unsigned byte;
- h:short; H:unsigned short; i:int; I:unsigned int;
- l:long; L:unsigned long; f:float; d:double.
-Special cases (preceding decimal count indicates length):
- s:string (array of char); p: pascal string (with count byte).
-Special case (only available in native format):
- P:an integer type that is wide enough to hold a pointer.
-Special case (not in native mode unless 'long long' in platform C):
- q:long long; Q:unsigned long long
-Whitespace between formats is ignored.
-
-The variable struct.error is an exception raised on errors.
-"""
-__version__ = '0.1'
-
-from _struct import Struct, error
-
-_MAXCACHE = 100
-_cache = {}
-
-def _compile(fmt):
- # Internal: compile struct pattern
- if len(_cache) >= _MAXCACHE:
- _cache.clear()
- s = Struct(fmt)
- _cache[fmt] = s
- return s
-
-def calcsize(fmt):
- """
- Return size of C struct described by format string fmt.
- See struct.__doc__ for more on format strings.
- """
- try:
- o = _cache[fmt]
- except KeyError:
- o = _compile(fmt)
- return o.size
-
-def pack(fmt, *args):
- """
- Return string containing values v1, v2, ... packed according to fmt.
- See struct.__doc__ for more on format strings.
- """
- try:
- o = _cache[fmt]
- except KeyError:
- o = _compile(fmt)
- return o.pack(*args)
-
-def pack_into(fmt, buf, offset, *args):
- """
- Pack the values v1, v2, ... according to fmt, write
- the packed bytes into the writable buffer buf starting at offset.
- See struct.__doc__ for more on format strings.
- """
- try:
- o = _cache[fmt]
- except KeyError:
- o = _compile(fmt)
- return o.pack_into(buf, offset, *args)
-
-def unpack(fmt, s):
- """
- Unpack the string, containing packed C structure data, according
- to fmt. Requires len(string)==calcsize(fmt).
- See struct.__doc__ for more on format strings.
- """
- try:
- o = _cache[fmt]
- except KeyError:
- o = _compile(fmt)
- return o.unpack(s)
-
-def unpack_from(fmt, buf, offset=0):
- """
- Unpack the buffer, containing packed C structure data, according to
- fmt starting at offset. Requires len(buffer[offset:]) >= calcsize(fmt).
- See struct.__doc__ for more on format strings.
- """
- try:
- o = _cache[fmt]
- except KeyError:
- o = _compile(fmt)
- return o.unpack_from(buf, offset)
diff --git a/sys/lib/python/subprocess.py b/sys/lib/python/subprocess.py
deleted file mode 100644
index 33a3bc226..000000000
--- a/sys/lib/python/subprocess.py
+++ /dev/null
@@ -1,1249 +0,0 @@
-# subprocess - Subprocesses with accessible I/O streams
-#
-# For more information about this module, see PEP 324.
-#
-# This module should remain compatible with Python 2.2, see PEP 291.
-#
-# Copyright (c) 2003-2005 by Peter Astrand <astrand@lysator.liu.se>
-#
-# Licensed to PSF under a Contributor Agreement.
-# See http://www.python.org/2.4/license for licensing details.
-
-r"""subprocess - Subprocesses with accessible I/O streams
-
-This module allows you to spawn processes, connect to their
-input/output/error pipes, and obtain their return codes. This module
-intends to replace several other, older modules and functions, like:
-
-os.system
-os.spawn*
-os.popen*
-popen2.*
-commands.*
-
-Information about how the subprocess module can be used to replace these
-modules and functions can be found below.
-
-
-
-Using the subprocess module
-===========================
-This module defines one class called Popen:
-
-class Popen(args, bufsize=0, executable=None,
- stdin=None, stdout=None, stderr=None,
- preexec_fn=None, close_fds=False, shell=False,
- cwd=None, env=None, universal_newlines=False,
- startupinfo=None, creationflags=0):
-
-
-Arguments are:
-
-args should be a string, or a sequence of program arguments. The
-program to execute is normally the first item in the args sequence or
-string, but can be explicitly set by using the executable argument.
-
-On UNIX, with shell=False (default): In this case, the Popen class
-uses os.execvp() to execute the child program. args should normally
-be a sequence. A string will be treated as a sequence with the string
-as the only item (the program to execute).
-
-On UNIX, with shell=True: If args is a string, it specifies the
-command string to execute through the shell. If args is a sequence,
-the first item specifies the command string, and any additional items
-will be treated as additional shell arguments.
-
-On Windows: the Popen class uses CreateProcess() to execute the child
-program, which operates on strings. If args is a sequence, it will be
-converted to a string using the list2cmdline method. Please note that
-not all MS Windows applications interpret the command line the same
-way: The list2cmdline is designed for applications using the same
-rules as the MS C runtime.
-
-bufsize, if given, has the same meaning as the corresponding argument
-to the built-in open() function: 0 means unbuffered, 1 means line
-buffered, any other positive value means use a buffer of
-(approximately) that size. A negative bufsize means to use the system
-default, which usually means fully buffered. The default value for
-bufsize is 0 (unbuffered).
-
-stdin, stdout and stderr specify the executed programs' standard
-input, standard output and standard error file handles, respectively.
-Valid values are PIPE, an existing file descriptor (a positive
-integer), an existing file object, and None. PIPE indicates that a
-new pipe to the child should be created. With None, no redirection
-will occur; the child's file handles will be inherited from the
-parent. Additionally, stderr can be STDOUT, which indicates that the
-stderr data from the applications should be captured into the same
-file handle as for stdout.
-
-If preexec_fn is set to a callable object, this object will be called
-in the child process just before the child is executed.
-
-If close_fds is true, all file descriptors except 0, 1 and 2 will be
-closed before the child process is executed.
-
-if shell is true, the specified command will be executed through the
-shell.
-
-If cwd is not None, the current directory will be changed to cwd
-before the child is executed.
-
-If env is not None, it defines the environment variables for the new
-process.
-
-If universal_newlines is true, the file objects stdout and stderr are
-opened as a text files, but lines may be terminated by any of '\n',
-the Unix end-of-line convention, '\r', the Macintosh convention or
-'\r\n', the Windows convention. All of these external representations
-are seen as '\n' by the Python program. Note: This feature is only
-available if Python is built with universal newline support (the
-default). Also, the newlines attribute of the file objects stdout,
-stdin and stderr are not updated by the communicate() method.
-
-The startupinfo and creationflags, if given, will be passed to the
-underlying CreateProcess() function. They can specify things such as
-appearance of the main window and priority for the new process.
-(Windows only)
-
-
-This module also defines two shortcut functions:
-
-call(*popenargs, **kwargs):
- Run command with arguments. Wait for command to complete, then
- return the returncode attribute.
-
- The arguments are the same as for the Popen constructor. Example:
-
- retcode = call(["ls", "-l"])
-
-check_call(*popenargs, **kwargs):
- Run command with arguments. Wait for command to complete. If the
- exit code was zero then return, otherwise raise
- CalledProcessError. The CalledProcessError object will have the
- return code in the returncode attribute.
-
- The arguments are the same as for the Popen constructor. Example:
-
- check_call(["ls", "-l"])
-
-Exceptions
-----------
-Exceptions raised in the child process, before the new program has
-started to execute, will be re-raised in the parent. Additionally,
-the exception object will have one extra attribute called
-'child_traceback', which is a string containing traceback information
-from the childs point of view.
-
-The most common exception raised is OSError. This occurs, for
-example, when trying to execute a non-existent file. Applications
-should prepare for OSErrors.
-
-A ValueError will be raised if Popen is called with invalid arguments.
-
-check_call() will raise CalledProcessError, if the called process
-returns a non-zero return code.
-
-
-Security
---------
-Unlike some other popen functions, this implementation will never call
-/bin/sh implicitly. This means that all characters, including shell
-metacharacters, can safely be passed to child processes.
-
-
-Popen objects
-=============
-Instances of the Popen class have the following methods:
-
-poll()
- Check if child process has terminated. Returns returncode
- attribute.
-
-wait()
- Wait for child process to terminate. Returns returncode attribute.
-
-communicate(input=None)
- Interact with process: Send data to stdin. Read data from stdout
- and stderr, until end-of-file is reached. Wait for process to
- terminate. The optional input argument should be a string to be
- sent to the child process, or None, if no data should be sent to
- the child.
-
- communicate() returns a tuple (stdout, stderr).
-
- Note: The data read is buffered in memory, so do not use this
- method if the data size is large or unlimited.
-
-The following attributes are also available:
-
-stdin
- If the stdin argument is PIPE, this attribute is a file object
- that provides input to the child process. Otherwise, it is None.
-
-stdout
- If the stdout argument is PIPE, this attribute is a file object
- that provides output from the child process. Otherwise, it is
- None.
-
-stderr
- If the stderr argument is PIPE, this attribute is file object that
- provides error output from the child process. Otherwise, it is
- None.
-
-pid
- The process ID of the child process.
-
-returncode
- The child return code. A None value indicates that the process
- hasn't terminated yet. A negative value -N indicates that the
- child was terminated by signal N (UNIX only).
-
-
-Replacing older functions with the subprocess module
-====================================================
-In this section, "a ==> b" means that b can be used as a replacement
-for a.
-
-Note: All functions in this section fail (more or less) silently if
-the executed program cannot be found; this module raises an OSError
-exception.
-
-In the following examples, we assume that the subprocess module is
-imported with "from subprocess import *".
-
-
-Replacing /bin/sh shell backquote
----------------------------------
-output=`mycmd myarg`
-==>
-output = Popen(["mycmd", "myarg"], stdout=PIPE).communicate()[0]
-
-
-Replacing shell pipe line
--------------------------
-output=`dmesg | grep hda`
-==>
-p1 = Popen(["dmesg"], stdout=PIPE)
-p2 = Popen(["grep", "hda"], stdin=p1.stdout, stdout=PIPE)
-output = p2.communicate()[0]
-
-
-Replacing os.system()
----------------------
-sts = os.system("mycmd" + " myarg")
-==>
-p = Popen("mycmd" + " myarg", shell=True)
-pid, sts = os.waitpid(p.pid, 0)
-
-Note:
-
-* Calling the program through the shell is usually not required.
-
-* It's easier to look at the returncode attribute than the
- exitstatus.
-
-A more real-world example would look like this:
-
-try:
- retcode = call("mycmd" + " myarg", shell=True)
- if retcode < 0:
- print >>sys.stderr, "Child was terminated by signal", -retcode
- else:
- print >>sys.stderr, "Child returned", retcode
-except OSError, e:
- print >>sys.stderr, "Execution failed:", e
-
-
-Replacing os.spawn*
--------------------
-P_NOWAIT example:
-
-pid = os.spawnlp(os.P_NOWAIT, "/bin/mycmd", "mycmd", "myarg")
-==>
-pid = Popen(["/bin/mycmd", "myarg"]).pid
-
-
-P_WAIT example:
-
-retcode = os.spawnlp(os.P_WAIT, "/bin/mycmd", "mycmd", "myarg")
-==>
-retcode = call(["/bin/mycmd", "myarg"])
-
-
-Vector example:
-
-os.spawnvp(os.P_NOWAIT, path, args)
-==>
-Popen([path] + args[1:])
-
-
-Environment example:
-
-os.spawnlpe(os.P_NOWAIT, "/bin/mycmd", "mycmd", "myarg", env)
-==>
-Popen(["/bin/mycmd", "myarg"], env={"PATH": "/usr/bin"})
-
-
-Replacing os.popen*
--------------------
-pipe = os.popen(cmd, mode='r', bufsize)
-==>
-pipe = Popen(cmd, shell=True, bufsize=bufsize, stdout=PIPE).stdout
-
-pipe = os.popen(cmd, mode='w', bufsize)
-==>
-pipe = Popen(cmd, shell=True, bufsize=bufsize, stdin=PIPE).stdin
-
-
-(child_stdin, child_stdout) = os.popen2(cmd, mode, bufsize)
-==>
-p = Popen(cmd, shell=True, bufsize=bufsize,
- stdin=PIPE, stdout=PIPE, close_fds=True)
-(child_stdin, child_stdout) = (p.stdin, p.stdout)
-
-
-(child_stdin,
- child_stdout,
- child_stderr) = os.popen3(cmd, mode, bufsize)
-==>
-p = Popen(cmd, shell=True, bufsize=bufsize,
- stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True)
-(child_stdin,
- child_stdout,
- child_stderr) = (p.stdin, p.stdout, p.stderr)
-
-
-(child_stdin, child_stdout_and_stderr) = os.popen4(cmd, mode, bufsize)
-==>
-p = Popen(cmd, shell=True, bufsize=bufsize,
- stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True)
-(child_stdin, child_stdout_and_stderr) = (p.stdin, p.stdout)
-
-
-Replacing popen2.*
-------------------
-Note: If the cmd argument to popen2 functions is a string, the command
-is executed through /bin/sh. If it is a list, the command is directly
-executed.
-
-(child_stdout, child_stdin) = popen2.popen2("somestring", bufsize, mode)
-==>
-p = Popen(["somestring"], shell=True, bufsize=bufsize
- stdin=PIPE, stdout=PIPE, close_fds=True)
-(child_stdout, child_stdin) = (p.stdout, p.stdin)
-
-
-(child_stdout, child_stdin) = popen2.popen2(["mycmd", "myarg"], bufsize, mode)
-==>
-p = Popen(["mycmd", "myarg"], bufsize=bufsize,
- stdin=PIPE, stdout=PIPE, close_fds=True)
-(child_stdout, child_stdin) = (p.stdout, p.stdin)
-
-The popen2.Popen3 and popen3.Popen4 basically works as subprocess.Popen,
-except that:
-
-* subprocess.Popen raises an exception if the execution fails
-* the capturestderr argument is replaced with the stderr argument.
-* stdin=PIPE and stdout=PIPE must be specified.
-* popen2 closes all filedescriptors by default, but you have to specify
- close_fds=True with subprocess.Popen.
-
-
-"""
-
-import sys
-mswindows = (sys.platform == "win32")
-
-import os
-import types
-import traceback
-
-# Exception classes used by this module.
-class CalledProcessError(Exception):
- """This exception is raised when a process run by check_call() returns
- a non-zero exit status. The exit status will be stored in the
- returncode attribute."""
- def __init__(self, returncode, cmd):
- self.returncode = returncode
- self.cmd = cmd
- def __str__(self):
- return "Command '%s' returned non-zero exit status %d" % (self.cmd, self.returncode)
-
-
-if mswindows:
- import threading
- import msvcrt
- if 0: # <-- change this to use pywin32 instead of the _subprocess driver
- import pywintypes
- from win32api import GetStdHandle, STD_INPUT_HANDLE, \
- STD_OUTPUT_HANDLE, STD_ERROR_HANDLE
- from win32api import GetCurrentProcess, DuplicateHandle, \
- GetModuleFileName, GetVersion
- from win32con import DUPLICATE_SAME_ACCESS, SW_HIDE
- from win32pipe import CreatePipe
- from win32process import CreateProcess, STARTUPINFO, \
- GetExitCodeProcess, STARTF_USESTDHANDLES, \
- STARTF_USESHOWWINDOW, CREATE_NEW_CONSOLE
- from win32event import WaitForSingleObject, INFINITE, WAIT_OBJECT_0
- else:
- from _subprocess import *
- class STARTUPINFO:
- dwFlags = 0
- hStdInput = None
- hStdOutput = None
- hStdError = None
- wShowWindow = 0
- class pywintypes:
- error = IOError
-else:
- import select
- import errno
- import fcntl
- import pickle
-
-__all__ = ["Popen", "PIPE", "STDOUT", "call", "check_call", "CalledProcessError"]
-
-try:
- MAXFD = os.sysconf("SC_OPEN_MAX")
-except:
- MAXFD = 256
-
-# True/False does not exist on 2.2.0
-try:
- False
-except NameError:
- False = 0
- True = 1
-
-_active = []
-
-def _cleanup():
- for inst in _active[:]:
- if inst.poll(_deadstate=sys.maxint) >= 0:
- try:
- _active.remove(inst)
- except ValueError:
- # This can happen if two threads create a new Popen instance.
- # It's harmless that it was already removed, so ignore.
- pass
-
-PIPE = -1
-STDOUT = -2
-
-
-def call(*popenargs, **kwargs):
- """Run command with arguments. Wait for command to complete, then
- return the returncode attribute.
-
- The arguments are the same as for the Popen constructor. Example:
-
- retcode = call(["ls", "-l"])
- """
- return Popen(*popenargs, **kwargs).wait()
-
-
-def check_call(*popenargs, **kwargs):
- """Run command with arguments. Wait for command to complete. If
- the exit code was zero then return, otherwise raise
- CalledProcessError. The CalledProcessError object will have the
- return code in the returncode attribute.
-
- The arguments are the same as for the Popen constructor. Example:
-
- check_call(["ls", "-l"])
- """
- retcode = call(*popenargs, **kwargs)
- cmd = kwargs.get("args")
- if cmd is None:
- cmd = popenargs[0]
- if retcode:
- raise CalledProcessError(retcode, cmd)
- return retcode
-
-
-def list2cmdline(seq):
- """
- Translate a sequence of arguments into a command line
- string, using the same rules as the MS C runtime:
-
- 1) Arguments are delimited by white space, which is either a
- space or a tab.
-
- 2) A string surrounded by double quotation marks is
- interpreted as a single argument, regardless of white space
- contained within. A quoted string can be embedded in an
- argument.
-
- 3) A double quotation mark preceded by a backslash is
- interpreted as a literal double quotation mark.
-
- 4) Backslashes are interpreted literally, unless they
- immediately precede a double quotation mark.
-
- 5) If backslashes immediately precede a double quotation mark,
- every pair of backslashes is interpreted as a literal
- backslash. If the number of backslashes is odd, the last
- backslash escapes the next double quotation mark as
- described in rule 3.
- """
-
- # See
- # http://msdn.microsoft.com/library/en-us/vccelng/htm/progs_12.asp
- result = []
- needquote = False
- for arg in seq:
- bs_buf = []
-
- # Add a space to separate this argument from the others
- if result:
- result.append(' ')
-
- needquote = (" " in arg) or ("\t" in arg) or arg == ""
- if needquote:
- result.append('"')
-
- for c in arg:
- if c == '\\':
- # Don't know if we need to double yet.
- bs_buf.append(c)
- elif c == '"':
- # Double backspaces.
- result.append('\\' * len(bs_buf)*2)
- bs_buf = []
- result.append('\\"')
- else:
- # Normal char
- if bs_buf:
- result.extend(bs_buf)
- bs_buf = []
- result.append(c)
-
- # Add remaining backspaces, if any.
- if bs_buf:
- result.extend(bs_buf)
-
- if needquote:
- result.extend(bs_buf)
- result.append('"')
-
- return ''.join(result)
-
-
-class Popen(object):
- def __init__(self, args, bufsize=0, executable=None,
- stdin=None, stdout=None, stderr=None,
- preexec_fn=None, close_fds=False, shell=False,
- cwd=None, env=None, universal_newlines=False,
- startupinfo=None, creationflags=0):
- """Create new Popen instance."""
- _cleanup()
-
- self._child_created = False
- if not isinstance(bufsize, (int, long)):
- raise TypeError("bufsize must be an integer")
-
- if mswindows:
- if preexec_fn is not None:
- raise ValueError("preexec_fn is not supported on Windows "
- "platforms")
- if close_fds:
- raise ValueError("close_fds is not supported on Windows "
- "platforms")
- else:
- # POSIX
- if startupinfo is not None:
- raise ValueError("startupinfo is only supported on Windows "
- "platforms")
- if creationflags != 0:
- raise ValueError("creationflags is only supported on Windows "
- "platforms")
-
- self.stdin = None
- self.stdout = None
- self.stderr = None
- self.pid = None
- self.returncode = None
- self.universal_newlines = universal_newlines
-
- # Input and output objects. The general principle is like
- # this:
- #
- # Parent Child
- # ------ -----
- # p2cwrite ---stdin---> p2cread
- # c2pread <--stdout--- c2pwrite
- # errread <--stderr--- errwrite
- #
- # On POSIX, the child objects are file descriptors. On
- # Windows, these are Windows file handles. The parent objects
- # are file descriptors on both platforms. The parent objects
- # are None when not using PIPEs. The child objects are None
- # when not redirecting.
-
- (p2cread, p2cwrite,
- c2pread, c2pwrite,
- errread, errwrite) = self._get_handles(stdin, stdout, stderr)
-
- self._execute_child(args, executable, preexec_fn, close_fds,
- cwd, env, universal_newlines,
- startupinfo, creationflags, shell,
- p2cread, p2cwrite,
- c2pread, c2pwrite,
- errread, errwrite)
-
- # On Windows, you cannot just redirect one or two handles: You
- # either have to redirect all three or none. If the subprocess
- # user has only redirected one or two handles, we are
- # automatically creating PIPEs for the rest. We should close
- # these after the process is started. See bug #1124861.
- if mswindows:
- if stdin is None and p2cwrite is not None:
- os.close(p2cwrite)
- p2cwrite = None
- if stdout is None and c2pread is not None:
- os.close(c2pread)
- c2pread = None
- if stderr is None and errread is not None:
- os.close(errread)
- errread = None
-
- if p2cwrite:
- self.stdin = os.fdopen(p2cwrite, 'wb', bufsize)
- if c2pread:
- if universal_newlines:
- self.stdout = os.fdopen(c2pread, 'rU', bufsize)
- else:
- self.stdout = os.fdopen(c2pread, 'rb', bufsize)
- if errread:
- if universal_newlines:
- self.stderr = os.fdopen(errread, 'rU', bufsize)
- else:
- self.stderr = os.fdopen(errread, 'rb', bufsize)
-
-
- def _translate_newlines(self, data):
- data = data.replace("\r\n", "\n")
- data = data.replace("\r", "\n")
- return data
-
-
- def __del__(self):
- if not self._child_created:
- # We didn't get to successfully create a child process.
- return
- # In case the child hasn't been waited on, check if it's done.
- self.poll(_deadstate=sys.maxint)
- if self.returncode is None and _active is not None:
- # Child is still running, keep us alive until we can wait on it.
- _active.append(self)
-
-
- def communicate(self, input=None):
- """Interact with process: Send data to stdin. Read data from
- stdout and stderr, until end-of-file is reached. Wait for
- process to terminate. The optional input argument should be a
- string to be sent to the child process, or None, if no data
- should be sent to the child.
-
- communicate() returns a tuple (stdout, stderr)."""
-
- # Optimization: If we are only using one pipe, or no pipe at
- # all, using select() or threads is unnecessary.
- if [self.stdin, self.stdout, self.stderr].count(None) >= 2:
- stdout = None
- stderr = None
- if self.stdin:
- if input:
- self.stdin.write(input)
- self.stdin.close()
- elif self.stdout:
- stdout = self.stdout.read()
- elif self.stderr:
- stderr = self.stderr.read()
- self.wait()
- return (stdout, stderr)
-
- return self._communicate(input)
-
-
- if mswindows:
- #
- # Windows methods
- #
- def _get_handles(self, stdin, stdout, stderr):
- """Construct and return tupel with IO objects:
- p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite
- """
- if stdin is None and stdout is None and stderr is None:
- return (None, None, None, None, None, None)
-
- p2cread, p2cwrite = None, None
- c2pread, c2pwrite = None, None
- errread, errwrite = None, None
-
- if stdin is None:
- p2cread = GetStdHandle(STD_INPUT_HANDLE)
- if p2cread is not None:
- pass
- elif stdin is None or stdin == PIPE:
- p2cread, p2cwrite = CreatePipe(None, 0)
- # Detach and turn into fd
- p2cwrite = p2cwrite.Detach()
- p2cwrite = msvcrt.open_osfhandle(p2cwrite, 0)
- elif isinstance(stdin, int):
- p2cread = msvcrt.get_osfhandle(stdin)
- else:
- # Assuming file-like object
- p2cread = msvcrt.get_osfhandle(stdin.fileno())
- p2cread = self._make_inheritable(p2cread)
-
- if stdout is None:
- c2pwrite = GetStdHandle(STD_OUTPUT_HANDLE)
- if c2pwrite is not None:
- pass
- elif stdout is None or stdout == PIPE:
- c2pread, c2pwrite = CreatePipe(None, 0)
- # Detach and turn into fd
- c2pread = c2pread.Detach()
- c2pread = msvcrt.open_osfhandle(c2pread, 0)
- elif isinstance(stdout, int):
- c2pwrite = msvcrt.get_osfhandle(stdout)
- else:
- # Assuming file-like object
- c2pwrite = msvcrt.get_osfhandle(stdout.fileno())
- c2pwrite = self._make_inheritable(c2pwrite)
-
- if stderr is None:
- errwrite = GetStdHandle(STD_ERROR_HANDLE)
- if errwrite is not None:
- pass
- elif stderr is None or stderr == PIPE:
- errread, errwrite = CreatePipe(None, 0)
- # Detach and turn into fd
- errread = errread.Detach()
- errread = msvcrt.open_osfhandle(errread, 0)
- elif stderr == STDOUT:
- errwrite = c2pwrite
- elif isinstance(stderr, int):
- errwrite = msvcrt.get_osfhandle(stderr)
- else:
- # Assuming file-like object
- errwrite = msvcrt.get_osfhandle(stderr.fileno())
- errwrite = self._make_inheritable(errwrite)
-
- return (p2cread, p2cwrite,
- c2pread, c2pwrite,
- errread, errwrite)
-
-
- def _make_inheritable(self, handle):
- """Return a duplicate of handle, which is inheritable"""
- return DuplicateHandle(GetCurrentProcess(), handle,
- GetCurrentProcess(), 0, 1,
- DUPLICATE_SAME_ACCESS)
-
-
- def _find_w9xpopen(self):
- """Find and return absolut path to w9xpopen.exe"""
- w9xpopen = os.path.join(os.path.dirname(GetModuleFileName(0)),
- "w9xpopen.exe")
- if not os.path.exists(w9xpopen):
- # Eeek - file-not-found - possibly an embedding
- # situation - see if we can locate it in sys.exec_prefix
- w9xpopen = os.path.join(os.path.dirname(sys.exec_prefix),
- "w9xpopen.exe")
- if not os.path.exists(w9xpopen):
- raise RuntimeError("Cannot locate w9xpopen.exe, which is "
- "needed for Popen to work with your "
- "shell or platform.")
- return w9xpopen
-
-
- def _execute_child(self, args, executable, preexec_fn, close_fds,
- cwd, env, universal_newlines,
- startupinfo, creationflags, shell,
- p2cread, p2cwrite,
- c2pread, c2pwrite,
- errread, errwrite):
- """Execute program (MS Windows version)"""
-
- if not isinstance(args, types.StringTypes):
- args = list2cmdline(args)
-
- # Process startup details
- if startupinfo is None:
- startupinfo = STARTUPINFO()
- if None not in (p2cread, c2pwrite, errwrite):
- startupinfo.dwFlags |= STARTF_USESTDHANDLES
- startupinfo.hStdInput = p2cread
- startupinfo.hStdOutput = c2pwrite
- startupinfo.hStdError = errwrite
-
- if shell:
- startupinfo.dwFlags |= STARTF_USESHOWWINDOW
- startupinfo.wShowWindow = SW_HIDE
- comspec = os.environ.get("COMSPEC", "cmd.exe")
- args = comspec + " /c " + args
- if (GetVersion() >= 0x80000000L or
- os.path.basename(comspec).lower() == "command.com"):
- # Win9x, or using command.com on NT. We need to
- # use the w9xpopen intermediate program. For more
- # information, see KB Q150956
- # (http://web.archive.org/web/20011105084002/http://support.microsoft.com/support/kb/articles/Q150/9/56.asp)
- w9xpopen = self._find_w9xpopen()
- args = '"%s" %s' % (w9xpopen, args)
- # Not passing CREATE_NEW_CONSOLE has been known to
- # cause random failures on win9x. Specifically a
- # dialog: "Your program accessed mem currently in
- # use at xxx" and a hopeful warning about the
- # stability of your system. Cost is Ctrl+C wont
- # kill children.
- creationflags |= CREATE_NEW_CONSOLE
-
- # Start the process
- try:
- hp, ht, pid, tid = CreateProcess(executable, args,
- # no special security
- None, None,
- # must inherit handles to pass std
- # handles
- 1,
- creationflags,
- env,
- cwd,
- startupinfo)
- except pywintypes.error, e:
- # Translate pywintypes.error to WindowsError, which is
- # a subclass of OSError. FIXME: We should really
- # translate errno using _sys_errlist (or simliar), but
- # how can this be done from Python?
- raise WindowsError(*e.args)
-
- # Retain the process handle, but close the thread handle
- self._child_created = True
- self._handle = hp
- self.pid = pid
- ht.Close()
-
- # Child is launched. Close the parent's copy of those pipe
- # handles that only the child should have open. You need
- # to make sure that no handles to the write end of the
- # output pipe are maintained in this process or else the
- # pipe will not close when the child process exits and the
- # ReadFile will hang.
- if p2cread is not None:
- p2cread.Close()
- if c2pwrite is not None:
- c2pwrite.Close()
- if errwrite is not None:
- errwrite.Close()
-
-
- def poll(self, _deadstate=None):
- """Check if child process has terminated. Returns returncode
- attribute."""
- if self.returncode is None:
- if WaitForSingleObject(self._handle, 0) == WAIT_OBJECT_0:
- self.returncode = GetExitCodeProcess(self._handle)
- return self.returncode
-
-
- def wait(self):
- """Wait for child process to terminate. Returns returncode
- attribute."""
- if self.returncode is None:
- obj = WaitForSingleObject(self._handle, INFINITE)
- self.returncode = GetExitCodeProcess(self._handle)
- return self.returncode
-
-
- def _readerthread(self, fh, buffer):
- buffer.append(fh.read())
-
-
- def _communicate(self, input):
- stdout = None # Return
- stderr = None # Return
-
- if self.stdout:
- stdout = []
- stdout_thread = threading.Thread(target=self._readerthread,
- args=(self.stdout, stdout))
- stdout_thread.setDaemon(True)
- stdout_thread.start()
- if self.stderr:
- stderr = []
- stderr_thread = threading.Thread(target=self._readerthread,
- args=(self.stderr, stderr))
- stderr_thread.setDaemon(True)
- stderr_thread.start()
-
- if self.stdin:
- if input is not None:
- self.stdin.write(input)
- self.stdin.close()
-
- if self.stdout:
- stdout_thread.join()
- if self.stderr:
- stderr_thread.join()
-
- # All data exchanged. Translate lists into strings.
- if stdout is not None:
- stdout = stdout[0]
- if stderr is not None:
- stderr = stderr[0]
-
- # Translate newlines, if requested. We cannot let the file
- # object do the translation: It is based on stdio, which is
- # impossible to combine with select (unless forcing no
- # buffering).
- if self.universal_newlines and hasattr(file, 'newlines'):
- if stdout:
- stdout = self._translate_newlines(stdout)
- if stderr:
- stderr = self._translate_newlines(stderr)
-
- self.wait()
- return (stdout, stderr)
-
- else:
- #
- # POSIX methods
- #
- def _get_handles(self, stdin, stdout, stderr):
- """Construct and return tupel with IO objects:
- p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite
- """
- p2cread, p2cwrite = None, None
- c2pread, c2pwrite = None, None
- errread, errwrite = None, None
-
- if stdin is None:
- pass
- elif stdin == PIPE:
- p2cread, p2cwrite = os.pipe()
- elif isinstance(stdin, int):
- p2cread = stdin
- else:
- # Assuming file-like object
- p2cread = stdin.fileno()
-
- if stdout is None:
- pass
- elif stdout == PIPE:
- c2pread, c2pwrite = os.pipe()
- elif isinstance(stdout, int):
- c2pwrite = stdout
- else:
- # Assuming file-like object
- c2pwrite = stdout.fileno()
-
- if stderr is None:
- pass
- elif stderr == PIPE:
- errread, errwrite = os.pipe()
- elif stderr == STDOUT:
- errwrite = c2pwrite
- elif isinstance(stderr, int):
- errwrite = stderr
- else:
- # Assuming file-like object
- errwrite = stderr.fileno()
-
- return (p2cread, p2cwrite,
- c2pread, c2pwrite,
- errread, errwrite)
-
-
- def _set_cloexec_flag(self, fd):
- try:
- cloexec_flag = fcntl.FD_CLOEXEC
- except AttributeError:
- cloexec_flag = 1
-
- old = fcntl.fcntl(fd, fcntl.F_GETFD)
- fcntl.fcntl(fd, fcntl.F_SETFD, old | cloexec_flag)
-
-
- def _close_fds(self, but):
- for i in xrange(3, MAXFD):
- if i == but:
- continue
- try:
- os.close(i)
- except:
- pass
-
-
- def _execute_child(self, args, executable, preexec_fn, close_fds,
- cwd, env, universal_newlines,
- startupinfo, creationflags, shell,
- p2cread, p2cwrite,
- c2pread, c2pwrite,
- errread, errwrite):
- """Execute program (POSIX version)"""
-
- if isinstance(args, types.StringTypes):
- args = [args]
- else:
- args = list(args)
-
- if shell:
- if sys.platform == 'plan9':
- args = ["/bin/ape/sh", "-c"] + args
- else:
- args = ["/bin/sh", "-c"] + args
-
- if executable is None:
- executable = args[0]
-
- # For transferring possible exec failure from child to parent
- # The first char specifies the exception type: 0 means
- # OSError, 1 means some other error.
- errpipe_read, errpipe_write = os.pipe()
- self._set_cloexec_flag(errpipe_write)
-
- self.pid = os.fork()
- self._child_created = True
- if self.pid == 0:
- # Child
- try:
- # Close parent's pipe ends
- if p2cwrite:
- os.close(p2cwrite)
- if c2pread:
- os.close(c2pread)
- if errread:
- os.close(errread)
- os.close(errpipe_read)
-
- # Dup fds for child
- if p2cread:
- os.dup2(p2cread, 0)
- if c2pwrite:
- os.dup2(c2pwrite, 1)
- if errwrite:
- os.dup2(errwrite, 2)
-
- # Close pipe fds. Make sure we don't close the same
- # fd more than once, or standard fds.
- if p2cread and p2cread not in (0,):
- os.close(p2cread)
- if c2pwrite and c2pwrite not in (p2cread, 1):
- os.close(c2pwrite)
- if errwrite and errwrite not in (p2cread, c2pwrite, 2):
- os.close(errwrite)
-
- # Close all other fds, if asked for
- if close_fds:
- self._close_fds(but=errpipe_write)
-
- if cwd is not None:
- os.chdir(cwd)
-
- if preexec_fn:
- apply(preexec_fn)
-
- if env is None:
- os.execvp(executable, args)
- else:
- os.execvpe(executable, args, env)
-
- except:
- exc_type, exc_value, tb = sys.exc_info()
- # Save the traceback and attach it to the exception object
- exc_lines = traceback.format_exception(exc_type,
- exc_value,
- tb)
- exc_value.child_traceback = ''.join(exc_lines)
- os.write(errpipe_write, pickle.dumps(exc_value))
-
- # This exitcode won't be reported to applications, so it
- # really doesn't matter what we return.
- os._exit(255)
-
- # Parent
- os.close(errpipe_write)
- if p2cread and p2cwrite:
- os.close(p2cread)
- if c2pwrite and c2pread:
- os.close(c2pwrite)
- if errwrite and errread:
- os.close(errwrite)
-
- # Wait for exec to fail or succeed; possibly raising exception
- data = os.read(errpipe_read, 1048576) # Exceptions limited to 1 MB
- os.close(errpipe_read)
- if data != "":
- os.waitpid(self.pid, 0)
- child_exception = pickle.loads(data)
- raise child_exception
-
-
- def _handle_exitstatus(self, sts):
- if os.WIFSIGNALED(sts):
- self.returncode = -os.WTERMSIG(sts)
- elif os.WIFEXITED(sts):
- self.returncode = os.WEXITSTATUS(sts)
- else:
- # Should never happen
- raise RuntimeError("Unknown child exit status!")
-
-
- def poll(self, _deadstate=None):
- """Check if child process has terminated. Returns returncode
- attribute."""
- if self.returncode is None:
- try:
- pid, sts = os.waitpid(self.pid, os.WNOHANG)
- if pid == self.pid:
- self._handle_exitstatus(sts)
- except os.error:
- if _deadstate is not None:
- self.returncode = _deadstate
- return self.returncode
-
-
- def wait(self):
- """Wait for child process to terminate. Returns returncode
- attribute."""
- if self.returncode is None:
- pid, sts = os.waitpid(self.pid, 0)
- self._handle_exitstatus(sts)
- return self.returncode
-
-
- def _communicate(self, input):
- read_set = []
- write_set = []
- stdout = None # Return
- stderr = None # Return
-
- if self.stdin:
- # Flush stdio buffer. This might block, if the user has
- # been writing to .stdin in an uncontrolled fashion.
- self.stdin.flush()
- if input:
- write_set.append(self.stdin)
- else:
- self.stdin.close()
- if self.stdout:
- read_set.append(self.stdout)
- stdout = []
- if self.stderr:
- read_set.append(self.stderr)
- stderr = []
-
- input_offset = 0
- while read_set or write_set:
- rlist, wlist, xlist = select.select(read_set, write_set, [])
-
- if self.stdin in wlist:
- # When select has indicated that the file is writable,
- # we can write up to PIPE_BUF bytes without risk
- # blocking. POSIX defines PIPE_BUF >= 512
- bytes_written = os.write(self.stdin.fileno(), buffer(input, input_offset, 512))
- input_offset += bytes_written
- if input_offset >= len(input):
- self.stdin.close()
- write_set.remove(self.stdin)
-
- if self.stdout in rlist:
- data = os.read(self.stdout.fileno(), 1024)
- if data == "":
- self.stdout.close()
- read_set.remove(self.stdout)
- stdout.append(data)
-
- if self.stderr in rlist:
- data = os.read(self.stderr.fileno(), 1024)
- if data == "":
- self.stderr.close()
- read_set.remove(self.stderr)
- stderr.append(data)
-
- # All data exchanged. Translate lists into strings.
- if stdout is not None:
- stdout = ''.join(stdout)
- if stderr is not None:
- stderr = ''.join(stderr)
-
- # Translate newlines, if requested. We cannot let the file
- # object do the translation: It is based on stdio, which is
- # impossible to combine with select (unless forcing no
- # buffering).
- if self.universal_newlines and hasattr(file, 'newlines'):
- if stdout:
- stdout = self._translate_newlines(stdout)
- if stderr:
- stderr = self._translate_newlines(stderr)
-
- self.wait()
- return (stdout, stderr)
-
-
-def _demo_posix():
- #
- # Example 1: Simple redirection: Get process list
- #
- plist = Popen(["ps"], stdout=PIPE).communicate()[0]
- print "Process list:"
- print plist
-
- #
- # Example 2: Change uid before executing child
- #
- if os.getuid() == 0:
- p = Popen(["id"], preexec_fn=lambda: os.setuid(100))
- p.wait()
-
- #
- # Example 3: Connecting several subprocesses
- #
- print "Looking for 'hda'..."
- p1 = Popen(["dmesg"], stdout=PIPE)
- p2 = Popen(["grep", "hda"], stdin=p1.stdout, stdout=PIPE)
- print repr(p2.communicate()[0])
-
- #
- # Example 4: Catch execution error
- #
- print
- print "Trying a weird file..."
- try:
- print Popen(["/this/path/does/not/exist"]).communicate()
- except OSError, e:
- if e.errno == errno.ENOENT:
- print "The file didn't exist. I thought so..."
- print "Child traceback:"
- print e.child_traceback
- else:
- print "Error", e.errno
- else:
- print >>sys.stderr, "Gosh. No error."
-
-
-def _demo_windows():
- #
- # Example 1: Connecting several subprocesses
- #
- print "Looking for 'PROMPT' in set output..."
- p1 = Popen("set", stdout=PIPE, shell=True)
- p2 = Popen('find "PROMPT"', stdin=p1.stdout, stdout=PIPE)
- print repr(p2.communicate()[0])
-
- #
- # Example 2: Simple execution of program
- #
- print "Executing calc..."
- p = Popen("calc")
- p.wait()
-
-
-if __name__ == "__main__":
- if mswindows:
- _demo_windows()
- else:
- _demo_posix()
diff --git a/sys/lib/python/sunau.py b/sys/lib/python/sunau.py
deleted file mode 100644
index 0a402955b..000000000
--- a/sys/lib/python/sunau.py
+++ /dev/null
@@ -1,474 +0,0 @@
-"""Stuff to parse Sun and NeXT audio files.
-
-An audio file consists of a header followed by the data. The structure
-of the header is as follows.
-
- +---------------+
- | magic word |
- +---------------+
- | header size |
- +---------------+
- | data size |
- +---------------+
- | encoding |
- +---------------+
- | sample rate |
- +---------------+
- | # of channels |
- +---------------+
- | info |
- | |
- +---------------+
-
-The magic word consists of the 4 characters '.snd'. Apart from the
-info field, all header fields are 4 bytes in size. They are all
-32-bit unsigned integers encoded in big-endian byte order.
-
-The header size really gives the start of the data.
-The data size is the physical size of the data. From the other
-parameters the number of frames can be calculated.
-The encoding gives the way in which audio samples are encoded.
-Possible values are listed below.
-The info field currently consists of an ASCII string giving a
-human-readable description of the audio file. The info field is
-padded with NUL bytes to the header size.
-
-Usage.
-
-Reading audio files:
- f = sunau.open(file, 'r')
-where file is either the name of a file or an open file pointer.
-The open file pointer must have methods read(), seek(), and close().
-When the setpos() and rewind() methods are not used, the seek()
-method is not necessary.
-
-This returns an instance of a class with the following public methods:
- getnchannels() -- returns number of audio channels (1 for
- mono, 2 for stereo)
- getsampwidth() -- returns sample width in bytes
- getframerate() -- returns sampling frequency
- getnframes() -- returns number of audio frames
- getcomptype() -- returns compression type ('NONE' or 'ULAW')
- getcompname() -- returns human-readable version of
- compression type ('not compressed' matches 'NONE')
- getparams() -- returns a tuple consisting of all of the
- above in the above order
- getmarkers() -- returns None (for compatibility with the
- aifc module)
- getmark(id) -- raises an error since the mark does not
- exist (for compatibility with the aifc module)
- readframes(n) -- returns at most n frames of audio
- rewind() -- rewind to the beginning of the audio stream
- setpos(pos) -- seek to the specified position
- tell() -- return the current position
- close() -- close the instance (make it unusable)
-The position returned by tell() and the position given to setpos()
-are compatible and have nothing to do with the actual position in the
-file.
-The close() method is called automatically when the class instance
-is destroyed.
-
-Writing audio files:
- f = sunau.open(file, 'w')
-where file is either the name of a file or an open file pointer.
-The open file pointer must have methods write(), tell(), seek(), and
-close().
-
-This returns an instance of a class with the following public methods:
- setnchannels(n) -- set the number of channels
- setsampwidth(n) -- set the sample width
- setframerate(n) -- set the frame rate
- setnframes(n) -- set the number of frames
- setcomptype(type, name)
- -- set the compression type and the
- human-readable compression type
- setparams(tuple)-- set all parameters at once
- tell() -- return current position in output file
- writeframesraw(data)
- -- write audio frames without pathing up the
- file header
- writeframes(data)
- -- write audio frames and patch up the file header
- close() -- patch up the file header and close the
- output file
-You should set the parameters before the first writeframesraw or
-writeframes. The total number of frames does not need to be set,
-but when it is set to the correct value, the header does not have to
-be patched up.
-It is best to first set all parameters, perhaps possibly the
-compression type, and then write audio frames using writeframesraw.
-When all frames have been written, either call writeframes('') or
-close() to patch up the sizes in the header.
-The close() method is called automatically when the class instance
-is destroyed.
-"""
-
-# from <multimedia/audio_filehdr.h>
-AUDIO_FILE_MAGIC = 0x2e736e64
-AUDIO_FILE_ENCODING_MULAW_8 = 1
-AUDIO_FILE_ENCODING_LINEAR_8 = 2
-AUDIO_FILE_ENCODING_LINEAR_16 = 3
-AUDIO_FILE_ENCODING_LINEAR_24 = 4
-AUDIO_FILE_ENCODING_LINEAR_32 = 5
-AUDIO_FILE_ENCODING_FLOAT = 6
-AUDIO_FILE_ENCODING_DOUBLE = 7
-AUDIO_FILE_ENCODING_ADPCM_G721 = 23
-AUDIO_FILE_ENCODING_ADPCM_G722 = 24
-AUDIO_FILE_ENCODING_ADPCM_G723_3 = 25
-AUDIO_FILE_ENCODING_ADPCM_G723_5 = 26
-AUDIO_FILE_ENCODING_ALAW_8 = 27
-
-# from <multimedia/audio_hdr.h>
-AUDIO_UNKNOWN_SIZE = 0xFFFFFFFFL # ((unsigned)(~0))
-
-_simple_encodings = [AUDIO_FILE_ENCODING_MULAW_8,
- AUDIO_FILE_ENCODING_LINEAR_8,
- AUDIO_FILE_ENCODING_LINEAR_16,
- AUDIO_FILE_ENCODING_LINEAR_24,
- AUDIO_FILE_ENCODING_LINEAR_32,
- AUDIO_FILE_ENCODING_ALAW_8]
-
-class Error(Exception):
- pass
-
-def _read_u32(file):
- x = 0L
- for i in range(4):
- byte = file.read(1)
- if byte == '':
- raise EOFError
- x = x*256 + ord(byte)
- return x
-
-def _write_u32(file, x):
- data = []
- for i in range(4):
- d, m = divmod(x, 256)
- data.insert(0, m)
- x = d
- for i in range(4):
- file.write(chr(int(data[i])))
-
-class Au_read:
-
- def __init__(self, f):
- if type(f) == type(''):
- import __builtin__
- f = __builtin__.open(f, 'rb')
- self.initfp(f)
-
- def __del__(self):
- if self._file:
- self.close()
-
- def initfp(self, file):
- self._file = file
- self._soundpos = 0
- magic = int(_read_u32(file))
- if magic != AUDIO_FILE_MAGIC:
- raise Error, 'bad magic number'
- self._hdr_size = int(_read_u32(file))
- if self._hdr_size < 24:
- raise Error, 'header size too small'
- if self._hdr_size > 100:
- raise Error, 'header size ridiculously large'
- self._data_size = _read_u32(file)
- if self._data_size != AUDIO_UNKNOWN_SIZE:
- self._data_size = int(self._data_size)
- self._encoding = int(_read_u32(file))
- if self._encoding not in _simple_encodings:
- raise Error, 'encoding not (yet) supported'
- if self._encoding in (AUDIO_FILE_ENCODING_MULAW_8,
- AUDIO_FILE_ENCODING_ALAW_8):
- self._sampwidth = 2
- self._framesize = 1
- elif self._encoding == AUDIO_FILE_ENCODING_LINEAR_8:
- self._framesize = self._sampwidth = 1
- elif self._encoding == AUDIO_FILE_ENCODING_LINEAR_16:
- self._framesize = self._sampwidth = 2
- elif self._encoding == AUDIO_FILE_ENCODING_LINEAR_24:
- self._framesize = self._sampwidth = 3
- elif self._encoding == AUDIO_FILE_ENCODING_LINEAR_32:
- self._framesize = self._sampwidth = 4
- else:
- raise Error, 'unknown encoding'
- self._framerate = int(_read_u32(file))
- self._nchannels = int(_read_u32(file))
- self._framesize = self._framesize * self._nchannels
- if self._hdr_size > 24:
- self._info = file.read(self._hdr_size - 24)
- for i in range(len(self._info)):
- if self._info[i] == '\0':
- self._info = self._info[:i]
- break
- else:
- self._info = ''
-
- def getfp(self):
- return self._file
-
- def getnchannels(self):
- return self._nchannels
-
- def getsampwidth(self):
- return self._sampwidth
-
- def getframerate(self):
- return self._framerate
-
- def getnframes(self):
- if self._data_size == AUDIO_UNKNOWN_SIZE:
- return AUDIO_UNKNOWN_SIZE
- if self._encoding in _simple_encodings:
- return self._data_size / self._framesize
- return 0 # XXX--must do some arithmetic here
-
- def getcomptype(self):
- if self._encoding == AUDIO_FILE_ENCODING_MULAW_8:
- return 'ULAW'
- elif self._encoding == AUDIO_FILE_ENCODING_ALAW_8:
- return 'ALAW'
- else:
- return 'NONE'
-
- def getcompname(self):
- if self._encoding == AUDIO_FILE_ENCODING_MULAW_8:
- return 'CCITT G.711 u-law'
- elif self._encoding == AUDIO_FILE_ENCODING_ALAW_8:
- return 'CCITT G.711 A-law'
- else:
- return 'not compressed'
-
- def getparams(self):
- return self.getnchannels(), self.getsampwidth(), \
- self.getframerate(), self.getnframes(), \
- self.getcomptype(), self.getcompname()
-
- def getmarkers(self):
- return None
-
- def getmark(self, id):
- raise Error, 'no marks'
-
- def readframes(self, nframes):
- if self._encoding in _simple_encodings:
- if nframes == AUDIO_UNKNOWN_SIZE:
- data = self._file.read()
- else:
- data = self._file.read(nframes * self._framesize * self._nchannels)
- if self._encoding == AUDIO_FILE_ENCODING_MULAW_8:
- import audioop
- data = audioop.ulaw2lin(data, self._sampwidth)
- return data
- return None # XXX--not implemented yet
-
- def rewind(self):
- self._soundpos = 0
- self._file.seek(self._hdr_size)
-
- def tell(self):
- return self._soundpos
-
- def setpos(self, pos):
- if pos < 0 or pos > self.getnframes():
- raise Error, 'position not in range'
- self._file.seek(pos * self._framesize + self._hdr_size)
- self._soundpos = pos
-
- def close(self):
- self._file = None
-
-class Au_write:
-
- def __init__(self, f):
- if type(f) == type(''):
- import __builtin__
- f = __builtin__.open(f, 'wb')
- self.initfp(f)
-
- def __del__(self):
- if self._file:
- self.close()
-
- def initfp(self, file):
- self._file = file
- self._framerate = 0
- self._nchannels = 0
- self._sampwidth = 0
- self._framesize = 0
- self._nframes = AUDIO_UNKNOWN_SIZE
- self._nframeswritten = 0
- self._datawritten = 0
- self._datalength = 0
- self._info = ''
- self._comptype = 'ULAW' # default is U-law
-
- def setnchannels(self, nchannels):
- if self._nframeswritten:
- raise Error, 'cannot change parameters after starting to write'
- if nchannels not in (1, 2, 4):
- raise Error, 'only 1, 2, or 4 channels supported'
- self._nchannels = nchannels
-
- def getnchannels(self):
- if not self._nchannels:
- raise Error, 'number of channels not set'
- return self._nchannels
-
- def setsampwidth(self, sampwidth):
- if self._nframeswritten:
- raise Error, 'cannot change parameters after starting to write'
- if sampwidth not in (1, 2, 4):
- raise Error, 'bad sample width'
- self._sampwidth = sampwidth
-
- def getsampwidth(self):
- if not self._framerate:
- raise Error, 'sample width not specified'
- return self._sampwidth
-
- def setframerate(self, framerate):
- if self._nframeswritten:
- raise Error, 'cannot change parameters after starting to write'
- self._framerate = framerate
-
- def getframerate(self):
- if not self._framerate:
- raise Error, 'frame rate not set'
- return self._framerate
-
- def setnframes(self, nframes):
- if self._nframeswritten:
- raise Error, 'cannot change parameters after starting to write'
- if nframes < 0:
- raise Error, '# of frames cannot be negative'
- self._nframes = nframes
-
- def getnframes(self):
- return self._nframeswritten
-
- def setcomptype(self, type, name):
- if type in ('NONE', 'ULAW'):
- self._comptype = type
- else:
- raise Error, 'unknown compression type'
-
- def getcomptype(self):
- return self._comptype
-
- def getcompname(self):
- if self._comptype == 'ULAW':
- return 'CCITT G.711 u-law'
- elif self._comptype == 'ALAW':
- return 'CCITT G.711 A-law'
- else:
- return 'not compressed'
-
- def setparams(self, (nchannels, sampwidth, framerate, nframes, comptype, compname)):
- self.setnchannels(nchannels)
- self.setsampwidth(sampwidth)
- self.setframerate(framerate)
- self.setnframes(nframes)
- self.setcomptype(comptype, compname)
-
- def getparams(self):
- return self.getnchannels(), self.getsampwidth(), \
- self.getframerate(), self.getnframes(), \
- self.getcomptype(), self.getcompname()
-
- def tell(self):
- return self._nframeswritten
-
- def writeframesraw(self, data):
- self._ensure_header_written()
- nframes = len(data) / self._framesize
- if self._comptype == 'ULAW':
- import audioop
- data = audioop.lin2ulaw(data, self._sampwidth)
- self._file.write(data)
- self._nframeswritten = self._nframeswritten + nframes
- self._datawritten = self._datawritten + len(data)
-
- def writeframes(self, data):
- self.writeframesraw(data)
- if self._nframeswritten != self._nframes or \
- self._datalength != self._datawritten:
- self._patchheader()
-
- def close(self):
- self._ensure_header_written()
- if self._nframeswritten != self._nframes or \
- self._datalength != self._datawritten:
- self._patchheader()
- self._file.flush()
- self._file = None
-
- #
- # private methods
- #
-
- def _ensure_header_written(self):
- if not self._nframeswritten:
- if not self._nchannels:
- raise Error, '# of channels not specified'
- if not self._sampwidth:
- raise Error, 'sample width not specified'
- if not self._framerate:
- raise Error, 'frame rate not specified'
- self._write_header()
-
- def _write_header(self):
- if self._comptype == 'NONE':
- if self._sampwidth == 1:
- encoding = AUDIO_FILE_ENCODING_LINEAR_8
- self._framesize = 1
- elif self._sampwidth == 2:
- encoding = AUDIO_FILE_ENCODING_LINEAR_16
- self._framesize = 2
- elif self._sampwidth == 4:
- encoding = AUDIO_FILE_ENCODING_LINEAR_32
- self._framesize = 4
- else:
- raise Error, 'internal error'
- elif self._comptype == 'ULAW':
- encoding = AUDIO_FILE_ENCODING_MULAW_8
- self._framesize = 1
- else:
- raise Error, 'internal error'
- self._framesize = self._framesize * self._nchannels
- _write_u32(self._file, AUDIO_FILE_MAGIC)
- header_size = 25 + len(self._info)
- header_size = (header_size + 7) & ~7
- _write_u32(self._file, header_size)
- if self._nframes == AUDIO_UNKNOWN_SIZE:
- length = AUDIO_UNKNOWN_SIZE
- else:
- length = self._nframes * self._framesize
- _write_u32(self._file, length)
- self._datalength = length
- _write_u32(self._file, encoding)
- _write_u32(self._file, self._framerate)
- _write_u32(self._file, self._nchannels)
- self._file.write(self._info)
- self._file.write('\0'*(header_size - len(self._info) - 24))
-
- def _patchheader(self):
- self._file.seek(8)
- _write_u32(self._file, self._datawritten)
- self._datalength = self._datawritten
- self._file.seek(0, 2)
-
-def open(f, mode=None):
- if mode is None:
- if hasattr(f, 'mode'):
- mode = f.mode
- else:
- mode = 'rb'
- if mode in ('r', 'rb'):
- return Au_read(f)
- elif mode in ('w', 'wb'):
- return Au_write(f)
- else:
- raise Error, "mode must be 'r', 'rb', 'w', or 'wb'"
-
-openfp = open
diff --git a/sys/lib/python/sunaudio.py b/sys/lib/python/sunaudio.py
deleted file mode 100644
index 3b0ee2793..000000000
--- a/sys/lib/python/sunaudio.py
+++ /dev/null
@@ -1,44 +0,0 @@
-"""Interpret sun audio headers."""
-
-MAGIC = '.snd'
-
-class error(Exception):
- pass
-
-
-def get_long_be(s):
- """Convert a 4-char value to integer."""
- return (ord(s[0])<<24) | (ord(s[1])<<16) | (ord(s[2])<<8) | ord(s[3])
-
-
-def gethdr(fp):
- """Read a sound header from an open file."""
- if fp.read(4) != MAGIC:
- raise error, 'gethdr: bad magic word'
- hdr_size = get_long_be(fp.read(4))
- data_size = get_long_be(fp.read(4))
- encoding = get_long_be(fp.read(4))
- sample_rate = get_long_be(fp.read(4))
- channels = get_long_be(fp.read(4))
- excess = hdr_size - 24
- if excess < 0:
- raise error, 'gethdr: bad hdr_size'
- if excess > 0:
- info = fp.read(excess)
- else:
- info = ''
- return (data_size, encoding, sample_rate, channels, info)
-
-
-def printhdr(file):
- """Read and print the sound header of a named file."""
- hdr = gethdr(open(file, 'r'))
- data_size, encoding, sample_rate, channels, info = hdr
- while info[-1:] == '\0':
- info = info[:-1]
- print 'File name: ', file
- print 'Data size: ', data_size
- print 'Encoding: ', encoding
- print 'Sample rate:', sample_rate
- print 'Channels: ', channels
- print 'Info: ', repr(info)
diff --git a/sys/lib/python/symbol.py b/sys/lib/python/symbol.py
deleted file mode 100755
index c65013813..000000000
--- a/sys/lib/python/symbol.py
+++ /dev/null
@@ -1,113 +0,0 @@
-#! /usr/bin/env python
-
-"""Non-terminal symbols of Python grammar (from "graminit.h")."""
-
-# This file is automatically generated; please don't muck it up!
-#
-# To update the symbols in this file, 'cd' to the top directory of
-# the python source tree after building the interpreter and run:
-#
-# python Lib/symbol.py
-
-#--start constants--
-single_input = 256
-file_input = 257
-eval_input = 258
-decorator = 259
-decorators = 260
-funcdef = 261
-parameters = 262
-varargslist = 263
-fpdef = 264
-fplist = 265
-stmt = 266
-simple_stmt = 267
-small_stmt = 268
-expr_stmt = 269
-augassign = 270
-print_stmt = 271
-del_stmt = 272
-pass_stmt = 273
-flow_stmt = 274
-break_stmt = 275
-continue_stmt = 276
-return_stmt = 277
-yield_stmt = 278
-raise_stmt = 279
-import_stmt = 280
-import_name = 281
-import_from = 282
-import_as_name = 283
-dotted_as_name = 284
-import_as_names = 285
-dotted_as_names = 286
-dotted_name = 287
-global_stmt = 288
-exec_stmt = 289
-assert_stmt = 290
-compound_stmt = 291
-if_stmt = 292
-while_stmt = 293
-for_stmt = 294
-try_stmt = 295
-with_stmt = 296
-with_var = 297
-except_clause = 298
-suite = 299
-testlist_safe = 300
-old_test = 301
-old_lambdef = 302
-test = 303
-or_test = 304
-and_test = 305
-not_test = 306
-comparison = 307
-comp_op = 308
-expr = 309
-xor_expr = 310
-and_expr = 311
-shift_expr = 312
-arith_expr = 313
-term = 314
-factor = 315
-power = 316
-atom = 317
-listmaker = 318
-testlist_gexp = 319
-lambdef = 320
-trailer = 321
-subscriptlist = 322
-subscript = 323
-sliceop = 324
-exprlist = 325
-testlist = 326
-dictmaker = 327
-classdef = 328
-arglist = 329
-argument = 330
-list_iter = 331
-list_for = 332
-list_if = 333
-gen_iter = 334
-gen_for = 335
-gen_if = 336
-testlist1 = 337
-encoding_decl = 338
-yield_expr = 339
-#--end constants--
-
-sym_name = {}
-for _name, _value in globals().items():
- if type(_value) is type(0):
- sym_name[_value] = _name
-
-
-def main():
- import sys
- import token
- if len(sys.argv) == 1:
- sys.argv = sys.argv + ["Include/graminit.h", "Lib/symbol.py"]
- token.main()
-
-if __name__ == "__main__":
- main()
diff --git a/sys/lib/python/symtable.py b/sys/lib/python/symtable.py
deleted file mode 100644
index 3f1332e8e..000000000
--- a/sys/lib/python/symtable.py
+++ /dev/null
@@ -1,252 +0,0 @@
-"""Interface to the compiler's internal symbol tables"""
-
-import _symtable
-from _symtable import USE, DEF_GLOBAL, DEF_LOCAL, DEF_PARAM, \
- DEF_STAR, DEF_DOUBLESTAR, DEF_INTUPLE, DEF_FREE, \
- DEF_FREE_GLOBAL, DEF_FREE_CLASS, DEF_IMPORT, DEF_BOUND, \
- OPT_IMPORT_STAR, OPT_EXEC, OPT_BARE_EXEC
-
-import weakref
-
-__all__ = ["symtable", "SymbolTable", "newSymbolTable", "Class",
- "Function", "Symbol"]
-
-def symtable(code, filename, compile_type):
- raw = _symtable.symtable(code, filename, compile_type)
- for top in raw.itervalues():
- if top.name == 'top':
- break
- return newSymbolTable(top, filename)
-
-class SymbolTableFactory:
- def __init__(self):
- self.__memo = weakref.WeakValueDictionary()
-
- def new(self, table, filename):
- if table.type == _symtable.TYPE_FUNCTION:
- return Function(table, filename)
- if table.type == _symtable.TYPE_CLASS:
- return Class(table, filename)
- return SymbolTable(table, filename)
-
- def __call__(self, table, filename):
- key = table, filename
- obj = self.__memo.get(key, None)
- if obj is None:
- obj = self.__memo[key] = self.new(table, filename)
- return obj
-
-newSymbolTable = SymbolTableFactory()
-
-def is_free(flags):
- if (flags & (USE | DEF_FREE)) \
- and (flags & (DEF_LOCAL | DEF_PARAM | DEF_GLOBAL)):
- return True
- if flags & DEF_FREE_CLASS:
- return True
- return False
-
-class SymbolTable:
- def __init__(self, raw_table, filename):
- self._table = raw_table
- self._filename = filename
- self._symbols = {}
-
- def __repr__(self):
- if self.__class__ == SymbolTable:
- kind = ""
- else:
- kind = "%s " % self.__class__.__name__
-
- if self._table.name == "global":
- return "<%sSymbolTable for module %s>" % (kind, self._filename)
- else:
- return "<%sSymbolTable for %s in %s>" % (kind, self._table.name,
- self._filename)
-
- def get_type(self):
- if self._table.type == _symtable.TYPE_MODULE:
- return "module"
- if self._table.type == _symtable.TYPE_FUNCTION:
- return "function"
- if self._table.type == _symtable.TYPE_CLASS:
- return "class"
- assert self._table.type in (1, 2, 3), \
- "unexpected type: %s" % self._table.type
-
- def get_id(self):
- return self._table.id
-
- def get_name(self):
- return self._table.name
-
- def get_lineno(self):
- return self._table.lineno
-
- def is_optimized(self):
- return bool(self._table.type == _symtable.TYPE_FUNCTION
- and not self._table.optimized)
-
- def is_nested(self):
- return bool(self._table.nested)
-
- def has_children(self):
- return bool(self._table.children)
-
- def has_exec(self):
- """Return true if the scope uses exec"""
- return bool(self._table.optimized & (OPT_EXEC | OPT_BARE_EXEC))
-
- def has_import_star(self):
- """Return true if the scope uses import *"""
- return bool(self._table.optimized & OPT_IMPORT_STAR)
-
- def get_identifiers(self):
- return self._table.symbols.keys()
-
- def lookup(self, name):
- sym = self._symbols.get(name)
- if sym is None:
- flags = self._table.symbols[name]
- namespaces = self.__check_children(name)
- sym = self._symbols[name] = Symbol(name, flags, namespaces)
- return sym
-
- def get_symbols(self):
- return [self.lookup(ident) for ident in self.get_identifiers()]
-
- def __check_children(self, name):
- return [newSymbolTable(st, self._filename)
- for st in self._table.children
- if st.name == name]
-
- def get_children(self):
- return [newSymbolTable(st, self._filename)
- for st in self._table.children]
-
-class Function(SymbolTable):
-
- # Default values for instance variables
- __params = None
- __locals = None
- __frees = None
- __globals = None
-
- def __idents_matching(self, test_func):
- return tuple([ident for ident in self.get_identifiers()
- if test_func(self._table.symbols[ident])])
-
- def get_parameters(self):
- if self.__params is None:
- self.__params = self.__idents_matching(lambda x:x & DEF_PARAM)
- return self.__params
-
- def get_locals(self):
- if self.__locals is None:
- self.__locals = self.__idents_matching(lambda x:x & DEF_BOUND)
- return self.__locals
-
- def get_globals(self):
- if self.__globals is None:
- glob = DEF_GLOBAL | DEF_FREE_GLOBAL
- self.__globals = self.__idents_matching(lambda x:x & glob)
- return self.__globals
-
- def get_frees(self):
- if self.__frees is None:
- self.__frees = self.__idents_matching(is_free)
- return self.__frees
-
-class Class(SymbolTable):
-
- __methods = None
-
- def get_methods(self):
- if self.__methods is None:
- d = {}
- for st in self._table.children:
- d[st.name] = 1
- self.__methods = tuple(d)
- return self.__methods
-
-class Symbol:
- def __init__(self, name, flags, namespaces=None):
- self.__name = name
- self.__flags = flags
- self.__namespaces = namespaces or ()
-
- def __repr__(self):
- return "<symbol '%s'>" % self.__name
-
- def get_name(self):
- return self.__name
-
- def is_referenced(self):
- return bool(self.__flags & _symtable.USE)
-
- def is_parameter(self):
- return bool(self.__flags & DEF_PARAM)
-
- def is_global(self):
- return bool((self.__flags & DEF_GLOBAL)
- or (self.__flags & DEF_FREE_GLOBAL))
-
- def is_vararg(self):
- return bool(self.__flags & DEF_STAR)
-
- def is_keywordarg(self):
- return bool(self.__flags & DEF_DOUBLESTAR)
-
- def is_local(self):
- return bool(self.__flags & DEF_BOUND)
-
- def is_free(self):
- if (self.__flags & (USE | DEF_FREE)) \
- and (self.__flags & (DEF_LOCAL | DEF_PARAM | DEF_GLOBAL)):
- return True
- if self.__flags & DEF_FREE_CLASS:
- return True
- return False
-
- def is_imported(self):
- return bool(self.__flags & DEF_IMPORT)
-
- def is_assigned(self):
- return bool(self.__flags & DEF_LOCAL)
-
- def is_in_tuple(self):
- return bool(self.__flags & DEF_INTUPLE)
-
- def is_namespace(self):
- """Returns true if name binding introduces new namespace.
-
- If the name is used as the target of a function or class
- statement, this will be true.
-
- Note that a single name can be bound to multiple objects. If
- is_namespace() is true, the name may also be bound to other
- objects, like an int or list, that does not introduce a new
- namespace.
- """
- return bool(self.__namespaces)
-
- def get_namespaces(self):
- """Return a list of namespaces bound to this name"""
- return self.__namespaces
-
- def get_namespace(self):
- """Returns the single namespace bound to this name.
-
- Raises ValueError if the name is bound to multiple namespaces.
- """
- if len(self.__namespaces) != 1:
- raise ValueError, "name is bound to multiple namespaces"
- return self.__namespaces[0]
-
-if __name__ == "__main__":
- import os, sys
- src = open(sys.argv[0]).read()
- mod = symtable(src, os.path.split(sys.argv[0])[1], "exec")
- for ident in mod.get_identifiers():
- info = mod.lookup(ident)
- print info, info.is_local(), info.is_namespace()
diff --git a/sys/lib/python/tabnanny.py b/sys/lib/python/tabnanny.py
deleted file mode 100755
index 76665ac91..000000000
--- a/sys/lib/python/tabnanny.py
+++ /dev/null
@@ -1,329 +0,0 @@
-#! /usr/bin/env python
-
-"""The Tab Nanny despises ambiguous indentation. She knows no mercy.
-
-tabnanny -- Detection of ambiguous indentation
-
-For the time being this module is intended to be called as a script.
-However it is possible to import it into an IDE and use the function
-check() described below.
-
-Warning: The API provided by this module is likely to change in future
-releases; such changes may not be backward compatible.
-"""
-
-# Released to the public domain, by Tim Peters, 15 April 1998.
-
-# XXX Note: this is now a standard library module.
-# XXX The API needs to undergo changes however; the current code is too
-# XXX script-like. This will be addressed later.
-
-__version__ = "6"
-
-import os
-import sys
-import getopt
-import tokenize
-if not hasattr(tokenize, 'NL'):
- raise ValueError("tokenize.NL doesn't exist -- tokenize module too old")
-
-__all__ = ["check", "NannyNag", "process_tokens"]
-
-verbose = 0
-filename_only = 0
-
-def errprint(*args):
- sep = ""
- for arg in args:
- sys.stderr.write(sep + str(arg))
- sep = " "
- sys.stderr.write("\n")
-
-def main():
- global verbose, filename_only
- try:
- opts, args = getopt.getopt(sys.argv[1:], "qv")
- except getopt.error, msg:
- errprint(msg)
- return
- for o, a in opts:
- if o == '-q':
- filename_only = filename_only + 1
- if o == '-v':
- verbose = verbose + 1
- if not args:
- errprint("Usage:", sys.argv[0], "[-v] file_or_directory ...")
- return
- for arg in args:
- check(arg)
-
-class NannyNag(Exception):
- """
- Raised by tokeneater() if detecting an ambiguous indent.
- Captured and handled in check().
- """
- def __init__(self, lineno, msg, line):
- self.lineno, self.msg, self.line = lineno, msg, line
- def get_lineno(self):
- return self.lineno
- def get_msg(self):
- return self.msg
- def get_line(self):
- return self.line
-
-def check(file):
- """check(file_or_dir)
-
- If file_or_dir is a directory and not a symbolic link, then recursively
- descend the directory tree named by file_or_dir, checking all .py files
- along the way. If file_or_dir is an ordinary Python source file, it is
- checked for whitespace related problems. The diagnostic messages are
- written to standard output using the print statement.
- """
-
- if os.path.isdir(file) and not os.path.islink(file):
- if verbose:
- print "%r: listing directory" % (file,)
- names = os.listdir(file)
- for name in names:
- fullname = os.path.join(file, name)
- if (os.path.isdir(fullname) and
- not os.path.islink(fullname) or
- os.path.normcase(name[-3:]) == ".py"):
- check(fullname)
- return
-
- try:
- f = open(file)
- except IOError, msg:
- errprint("%r: I/O Error: %s" % (file, msg))
- return
-
- if verbose > 1:
- print "checking %r ..." % file
-
- try:
- process_tokens(tokenize.generate_tokens(f.readline))
-
- except tokenize.TokenError, msg:
- errprint("%r: Token Error: %s" % (file, msg))
- return
-
- except IndentationError, msg:
- errprint("%r: Indentation Error: %s" % (file, msg))
- return
-
- except NannyNag, nag:
- badline = nag.get_lineno()
- line = nag.get_line()
- if verbose:
- print "%r: *** Line %d: trouble in tab city! ***" % (file, badline)
- print "offending line: %r" % (line,)
- print nag.get_msg()
- else:
- if ' ' in file: file = '"' + file + '"'
- if filename_only: print file
- else: print file, badline, repr(line)
- return
-
- if verbose:
- print "%r: Clean bill of health." % (file,)
-
-class Whitespace:
- # the characters used for space and tab
- S, T = ' \t'
-
- # members:
- # raw
- # the original string
- # n
- # the number of leading whitespace characters in raw
- # nt
- # the number of tabs in raw[:n]
- # norm
- # the normal form as a pair (count, trailing), where:
- # count
- # a tuple such that raw[:n] contains count[i]
- # instances of S * i + T
- # trailing
- # the number of trailing spaces in raw[:n]
- # It's A Theorem that m.indent_level(t) ==
- # n.indent_level(t) for all t >= 1 iff m.norm == n.norm.
- # is_simple
- # true iff raw[:n] is of the form (T*)(S*)
-
- def __init__(self, ws):
- self.raw = ws
- S, T = Whitespace.S, Whitespace.T
- count = []
- b = n = nt = 0
- for ch in self.raw:
- if ch == S:
- n = n + 1
- b = b + 1
- elif ch == T:
- n = n + 1
- nt = nt + 1
- if b >= len(count):
- count = count + [0] * (b - len(count) + 1)
- count[b] = count[b] + 1
- b = 0
- else:
- break
- self.n = n
- self.nt = nt
- self.norm = tuple(count), b
- self.is_simple = len(count) <= 1
-
- # return length of longest contiguous run of spaces (whether or not
- # preceding a tab)
- def longest_run_of_spaces(self):
- count, trailing = self.norm
- return max(len(count)-1, trailing)
-
- def indent_level(self, tabsize):
- # count, il = self.norm
- # for i in range(len(count)):
- # if count[i]:
- # il = il + (i/tabsize + 1)*tabsize * count[i]
- # return il
-
- # quicker:
- # il = trailing + sum (i/ts + 1)*ts*count[i] =
- # trailing + ts * sum (i/ts + 1)*count[i] =
- # trailing + ts * sum i/ts*count[i] + count[i] =
- # trailing + ts * [(sum i/ts*count[i]) + (sum count[i])] =
- # trailing + ts * [(sum i/ts*count[i]) + num_tabs]
- # and note that i/ts*count[i] is 0 when i < ts
-
- count, trailing = self.norm
- il = 0
- for i in range(tabsize, len(count)):
- il = il + i/tabsize * count[i]
- return trailing + tabsize * (il + self.nt)
-
- # return true iff self.indent_level(t) == other.indent_level(t)
- # for all t >= 1
- def equal(self, other):
- return self.norm == other.norm
-
- # return a list of tuples (ts, i1, i2) such that
- # i1 == self.indent_level(ts) != other.indent_level(ts) == i2.
- # Intended to be used after not self.equal(other) is known, in which
- # case it will return at least one witnessing tab size.
- def not_equal_witness(self, other):
- n = max(self.longest_run_of_spaces(),
- other.longest_run_of_spaces()) + 1
- a = []
- for ts in range(1, n+1):
- if self.indent_level(ts) != other.indent_level(ts):
- a.append( (ts,
- self.indent_level(ts),
- other.indent_level(ts)) )
- return a
-
- # Return True iff self.indent_level(t) < other.indent_level(t)
- # for all t >= 1.
- # The algorithm is due to Vincent Broman.
- # Easy to prove it's correct.
- # XXXpost that.
- # Trivial to prove n is sharp (consider T vs ST).
- # Unknown whether there's a faster general way. I suspected so at
- # first, but no longer.
- # For the special (but common!) case where M and N are both of the
- # form (T*)(S*), M.less(N) iff M.len() < N.len() and
- # M.num_tabs() <= N.num_tabs(). Proof is easy but kinda long-winded.
- # XXXwrite that up.
- # Note that M is of the form (T*)(S*) iff len(M.norm[0]) <= 1.
- def less(self, other):
- if self.n >= other.n:
- return False
- if self.is_simple and other.is_simple:
- return self.nt <= other.nt
- n = max(self.longest_run_of_spaces(),
- other.longest_run_of_spaces()) + 1
- # the self.n >= other.n test already did it for ts=1
- for ts in range(2, n+1):
- if self.indent_level(ts) >= other.indent_level(ts):
- return False
- return True
-
- # return a list of tuples (ts, i1, i2) such that
- # i1 == self.indent_level(ts) >= other.indent_level(ts) == i2.
- # Intended to be used after not self.less(other) is known, in which
- # case it will return at least one witnessing tab size.
- def not_less_witness(self, other):
- n = max(self.longest_run_of_spaces(),
- other.longest_run_of_spaces()) + 1
- a = []
- for ts in range(1, n+1):
- if self.indent_level(ts) >= other.indent_level(ts):
- a.append( (ts,
- self.indent_level(ts),
- other.indent_level(ts)) )
- return a
-
-def format_witnesses(w):
- firsts = map(lambda tup: str(tup[0]), w)
- prefix = "at tab size"
- if len(w) > 1:
- prefix = prefix + "s"
- return prefix + " " + ', '.join(firsts)
-
-def process_tokens(tokens):
- INDENT = tokenize.INDENT
- DEDENT = tokenize.DEDENT
- NEWLINE = tokenize.NEWLINE
- JUNK = tokenize.COMMENT, tokenize.NL
- indents = [Whitespace("")]
- check_equal = 0
-
- for (type, token, start, end, line) in tokens:
- if type == NEWLINE:
- # a program statement, or ENDMARKER, will eventually follow,
- # after some (possibly empty) run of tokens of the form
- # (NL | COMMENT)* (INDENT | DEDENT+)?
- # If an INDENT appears, setting check_equal is wrong, and will
- # be undone when we see the INDENT.
- check_equal = 1
-
- elif type == INDENT:
- check_equal = 0
- thisguy = Whitespace(token)
- if not indents[-1].less(thisguy):
- witness = indents[-1].not_less_witness(thisguy)
- msg = "indent not greater e.g. " + format_witnesses(witness)
- raise NannyNag(start[0], msg, line)
- indents.append(thisguy)
-
- elif type == DEDENT:
- # there's nothing we need to check here! what's important is
- # that when the run of DEDENTs ends, the indentation of the
- # program statement (or ENDMARKER) that triggered the run is
- # equal to what's left at the top of the indents stack
-
- # Ouch! This assert triggers if the last line of the source
- # is indented *and* lacks a newline -- then DEDENTs pop out
- # of thin air.
- # assert check_equal # else no earlier NEWLINE, or an earlier INDENT
- check_equal = 1
-
- del indents[-1]
-
- elif check_equal and type not in JUNK:
- # this is the first "real token" following a NEWLINE, so it
- # must be the first token of the next program statement, or an
- # ENDMARKER; the "line" argument exposes the leading whitespace
- # for this statement; in the case of ENDMARKER, line is an empty
- # string, so will properly match the empty string with which the
- # "indents" stack was seeded
- check_equal = 0
- thisguy = Whitespace(line)
- if not indents[-1].equal(thisguy):
- witness = indents[-1].not_equal_witness(thisguy)
- msg = "indent not equal e.g. " + format_witnesses(witness)
- raise NannyNag(start[0], msg, line)
-
-
-if __name__ == '__main__':
- main()
diff --git a/sys/lib/python/tarfile.py b/sys/lib/python/tarfile.py
deleted file mode 100644
index 88505c2b0..000000000
--- a/sys/lib/python/tarfile.py
+++ /dev/null
@@ -1,2176 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: iso-8859-1 -*-
-#-------------------------------------------------------------------
-# tarfile.py
-#-------------------------------------------------------------------
-# Copyright (C) 2002 Lars Gustäbel <lars@gustaebel.de>
-# All rights reserved.
-#
-# Permission is hereby granted, free of charge, to any person
-# obtaining a copy of this software and associated documentation
-# files (the "Software"), to deal in the Software without
-# restriction, including without limitation the rights to use,
-# copy, modify, merge, publish, distribute, sublicense, and/or sell
-# copies of the Software, and to permit persons to whom the
-# Software is furnished to do so, subject to the following
-# conditions:
-#
-# The above copyright notice and this permission notice shall be
-# included in all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-# OTHER DEALINGS IN THE SOFTWARE.
-#
-"""Read from and write to tar format archives.
-"""
-
-__version__ = "$Revision: 53162 $"
-# $Source$
-
-version = "0.8.0"
-__author__ = "Lars Gustäbel (lars@gustaebel.de)"
-__date__ = "$Date: 2006-12-27 21:36:58 +1100 (Wed, 27 Dec 2006) $"
-__cvsid__ = "$Id: tarfile.py 53162 2006-12-27 10:36:58Z lars.gustaebel $"
-__credits__ = "Gustavo Niemeyer, Niels Gustäbel, Richard Townsend."
-
-#---------
-# Imports
-#---------
-import sys
-import os
-import shutil
-import stat
-import errno
-import time
-import struct
-import copy
-
-if sys.platform == 'mac':
- # This module needs work for MacOS9, especially in the area of pathname
- # handling. In many places it is assumed a simple substitution of / by the
- # local os.path.sep is good enough to convert pathnames, but this does not
- # work with the mac rooted:path:name versus :nonrooted:path:name syntax
- raise ImportError, "tarfile does not work for platform==mac"
-
-try:
- import grp, pwd
-except ImportError:
- grp = pwd = None
-
-# from tarfile import *
-__all__ = ["TarFile", "TarInfo", "is_tarfile", "TarError"]
-
-#---------------------------------------------------------
-# tar constants
-#---------------------------------------------------------
-NUL = "\0" # the null character
-BLOCKSIZE = 512 # length of processing blocks
-RECORDSIZE = BLOCKSIZE * 20 # length of records
-MAGIC = "ustar" # magic tar string
-VERSION = "00" # version number
-
-LENGTH_NAME = 100 # maximum length of a filename
-LENGTH_LINK = 100 # maximum length of a linkname
-LENGTH_PREFIX = 155 # maximum length of the prefix field
-MAXSIZE_MEMBER = 077777777777L # maximum size of a file (11 octal digits)
-
-REGTYPE = "0" # regular file
-AREGTYPE = "\0" # regular file
-LNKTYPE = "1" # link (inside tarfile)
-SYMTYPE = "2" # symbolic link
-CHRTYPE = "3" # character special device
-BLKTYPE = "4" # block special device
-DIRTYPE = "5" # directory
-FIFOTYPE = "6" # fifo special device
-CONTTYPE = "7" # contiguous file
-
-GNUTYPE_LONGNAME = "L" # GNU tar extension for longnames
-GNUTYPE_LONGLINK = "K" # GNU tar extension for longlink
-GNUTYPE_SPARSE = "S" # GNU tar extension for sparse file
-
-#---------------------------------------------------------
-# tarfile constants
-#---------------------------------------------------------
-SUPPORTED_TYPES = (REGTYPE, AREGTYPE, LNKTYPE, # file types that tarfile
- SYMTYPE, DIRTYPE, FIFOTYPE, # can cope with.
- CONTTYPE, CHRTYPE, BLKTYPE,
- GNUTYPE_LONGNAME, GNUTYPE_LONGLINK,
- GNUTYPE_SPARSE)
-
-REGULAR_TYPES = (REGTYPE, AREGTYPE, # file types that somehow
- CONTTYPE, GNUTYPE_SPARSE) # represent regular files
-
-#---------------------------------------------------------
-# Bits used in the mode field, values in octal.
-#---------------------------------------------------------
-S_IFLNK = 0120000 # symbolic link
-S_IFREG = 0100000 # regular file
-S_IFBLK = 0060000 # block device
-S_IFDIR = 0040000 # directory
-S_IFCHR = 0020000 # character device
-S_IFIFO = 0010000 # fifo
-
-TSUID = 04000 # set UID on execution
-TSGID = 02000 # set GID on execution
-TSVTX = 01000 # reserved
-
-TUREAD = 0400 # read by owner
-TUWRITE = 0200 # write by owner
-TUEXEC = 0100 # execute/search by owner
-TGREAD = 0040 # read by group
-TGWRITE = 0020 # write by group
-TGEXEC = 0010 # execute/search by group
-TOREAD = 0004 # read by other
-TOWRITE = 0002 # write by other
-TOEXEC = 0001 # execute/search by other
-
-#---------------------------------------------------------
-# Some useful functions
-#---------------------------------------------------------
-
-def stn(s, length):
- """Convert a python string to a null-terminated string buffer.
- """
- return s[:length] + (length - len(s)) * NUL
-
-def nti(s):
- """Convert a number field to a python number.
- """
- # There are two possible encodings for a number field, see
- # itn() below.
- if s[0] != chr(0200):
- n = int(s.rstrip(NUL + " ") or "0", 8)
- else:
- n = 0L
- for i in xrange(len(s) - 1):
- n <<= 8
- n += ord(s[i + 1])
- return n
-
-def itn(n, digits=8, posix=False):
- """Convert a python number to a number field.
- """
- # POSIX 1003.1-1988 requires numbers to be encoded as a string of
- # octal digits followed by a null-byte, this allows values up to
- # (8**(digits-1))-1. GNU tar allows storing numbers greater than
- # that if necessary. A leading 0200 byte indicates this particular
- # encoding, the following digits-1 bytes are a big-endian
- # representation. This allows values up to (256**(digits-1))-1.
- if 0 <= n < 8 ** (digits - 1):
- s = "%0*o" % (digits - 1, n) + NUL
- else:
- if posix:
- raise ValueError("overflow in number field")
-
- if n < 0:
- # XXX We mimic GNU tar's behaviour with negative numbers,
- # this could raise OverflowError.
- n = struct.unpack("L", struct.pack("l", n))[0]
-
- s = ""
- for i in xrange(digits - 1):
- s = chr(n & 0377) + s
- n >>= 8
- s = chr(0200) + s
- return s
-
-def calc_chksums(buf):
- """Calculate the checksum for a member's header by summing up all
- characters except for the chksum field which is treated as if
- it was filled with spaces. According to the GNU tar sources,
- some tars (Sun and NeXT) calculate chksum with signed char,
- which will be different if there are chars in the buffer with
- the high bit set. So we calculate two checksums, unsigned and
- signed.
- """
- unsigned_chksum = 256 + sum(struct.unpack("148B", buf[:148]) + struct.unpack("356B", buf[156:512]))
- signed_chksum = 256 + sum(struct.unpack("148b", buf[:148]) + struct.unpack("356b", buf[156:512]))
- return unsigned_chksum, signed_chksum
-
-def copyfileobj(src, dst, length=None):
- """Copy length bytes from fileobj src to fileobj dst.
- If length is None, copy the entire content.
- """
- if length == 0:
- return
- if length is None:
- shutil.copyfileobj(src, dst)
- return
-
- BUFSIZE = 16 * 1024
- blocks, remainder = divmod(length, BUFSIZE)
- for b in xrange(blocks):
- buf = src.read(BUFSIZE)
- if len(buf) < BUFSIZE:
- raise IOError("end of file reached")
- dst.write(buf)
-
- if remainder != 0:
- buf = src.read(remainder)
- if len(buf) < remainder:
- raise IOError("end of file reached")
- dst.write(buf)
- return
-
-filemode_table = (
- ((S_IFLNK, "l"),
- (S_IFREG, "-"),
- (S_IFBLK, "b"),
- (S_IFDIR, "d"),
- (S_IFCHR, "c"),
- (S_IFIFO, "p")),
-
- ((TUREAD, "r"),),
- ((TUWRITE, "w"),),
- ((TUEXEC|TSUID, "s"),
- (TSUID, "S"),
- (TUEXEC, "x")),
-
- ((TGREAD, "r"),),
- ((TGWRITE, "w"),),
- ((TGEXEC|TSGID, "s"),
- (TSGID, "S"),
- (TGEXEC, "x")),
-
- ((TOREAD, "r"),),
- ((TOWRITE, "w"),),
- ((TOEXEC|TSVTX, "t"),
- (TSVTX, "T"),
- (TOEXEC, "x"))
-)
-
-def filemode(mode):
- """Convert a file's mode to a string of the form
- -rwxrwxrwx.
- Used by TarFile.list()
- """
- perm = []
- for table in filemode_table:
- for bit, char in table:
- if mode & bit == bit:
- perm.append(char)
- break
- else:
- perm.append("-")
- return "".join(perm)
-
-if os.sep != "/":
- normpath = lambda path: os.path.normpath(path).replace(os.sep, "/")
-else:
- normpath = os.path.normpath
-
-class TarError(Exception):
- """Base exception."""
- pass
-class ExtractError(TarError):
- """General exception for extract errors."""
- pass
-class ReadError(TarError):
- """Exception for unreadble tar archives."""
- pass
-class CompressionError(TarError):
- """Exception for unavailable compression methods."""
- pass
-class StreamError(TarError):
- """Exception for unsupported operations on stream-like TarFiles."""
- pass
-
-#---------------------------
-# internal stream interface
-#---------------------------
-class _LowLevelFile:
- """Low-level file object. Supports reading and writing.
- It is used instead of a regular file object for streaming
- access.
- """
-
- def __init__(self, name, mode):
- mode = {
- "r": os.O_RDONLY,
- "w": os.O_WRONLY | os.O_CREAT | os.O_TRUNC,
- }[mode]
- if hasattr(os, "O_BINARY"):
- mode |= os.O_BINARY
- self.fd = os.open(name, mode)
-
- def close(self):
- os.close(self.fd)
-
- def read(self, size):
- return os.read(self.fd, size)
-
- def write(self, s):
- os.write(self.fd, s)
-
-class _Stream:
- """Class that serves as an adapter between TarFile and
- a stream-like object. The stream-like object only
- needs to have a read() or write() method and is accessed
- blockwise. Use of gzip or bzip2 compression is possible.
- A stream-like object could be for example: sys.stdin,
- sys.stdout, a socket, a tape device etc.
-
- _Stream is intended to be used only internally.
- """
-
- def __init__(self, name, mode, comptype, fileobj, bufsize):
- """Construct a _Stream object.
- """
- self._extfileobj = True
- if fileobj is None:
- fileobj = _LowLevelFile(name, mode)
- self._extfileobj = False
-
- if comptype == '*':
- # Enable transparent compression detection for the
- # stream interface
- fileobj = _StreamProxy(fileobj)
- comptype = fileobj.getcomptype()
-
- self.name = name or ""
- self.mode = mode
- self.comptype = comptype
- self.fileobj = fileobj
- self.bufsize = bufsize
- self.buf = ""
- self.pos = 0L
- self.closed = False
-
- if comptype == "gz":
- try:
- import zlib
- except ImportError:
- raise CompressionError("zlib module is not available")
- self.zlib = zlib
- self.crc = zlib.crc32("")
- if mode == "r":
- self._init_read_gz()
- else:
- self._init_write_gz()
-
- if comptype == "bz2":
- try:
- import bz2
- except ImportError:
- raise CompressionError("bz2 module is not available")
- if mode == "r":
- self.dbuf = ""
- self.cmp = bz2.BZ2Decompressor()
- else:
- self.cmp = bz2.BZ2Compressor()
-
- def __del__(self):
- if hasattr(self, "closed") and not self.closed:
- self.close()
-
- def _init_write_gz(self):
- """Initialize for writing with gzip compression.
- """
- self.cmp = self.zlib.compressobj(9, self.zlib.DEFLATED,
- -self.zlib.MAX_WBITS,
- self.zlib.DEF_MEM_LEVEL,
- 0)
- timestamp = struct.pack("<L", long(time.time()))
- self.__write("\037\213\010\010%s\002\377" % timestamp)
- if self.name.endswith(".gz"):
- self.name = self.name[:-3]
- self.__write(self.name + NUL)
-
- def write(self, s):
- """Write string s to the stream.
- """
- if self.comptype == "gz":
- self.crc = self.zlib.crc32(s, self.crc)
- self.pos += len(s)
- if self.comptype != "tar":
- s = self.cmp.compress(s)
- self.__write(s)
-
- def __write(self, s):
- """Write string s to the stream if a whole new block
- is ready to be written.
- """
- self.buf += s
- while len(self.buf) > self.bufsize:
- self.fileobj.write(self.buf[:self.bufsize])
- self.buf = self.buf[self.bufsize:]
-
- def close(self):
- """Close the _Stream object. No operation should be
- done on it afterwards.
- """
- if self.closed:
- return
-
- if self.mode == "w" and self.comptype != "tar":
- self.buf += self.cmp.flush()
-
- if self.mode == "w" and self.buf:
- self.fileobj.write(self.buf)
- self.buf = ""
- if self.comptype == "gz":
- # The native zlib crc is an unsigned 32-bit integer, but
- # the Python wrapper implicitly casts that to a signed C
- # long. So, on a 32-bit box self.crc may "look negative",
- # while the same crc on a 64-bit box may "look positive".
- # To avoid irksome warnings from the `struct` module, force
- # it to look positive on all boxes.
- self.fileobj.write(struct.pack("<L", self.crc & 0xffffffffL))
- self.fileobj.write(struct.pack("<L", self.pos & 0xffffFFFFL))
-
- if not self._extfileobj:
- self.fileobj.close()
-
- self.closed = True
-
- def _init_read_gz(self):
- """Initialize for reading a gzip compressed fileobj.
- """
- self.cmp = self.zlib.decompressobj(-self.zlib.MAX_WBITS)
- self.dbuf = ""
-
- # taken from gzip.GzipFile with some alterations
- if self.__read(2) != "\037\213":
- raise ReadError("not a gzip file")
- if self.__read(1) != "\010":
- raise CompressionError("unsupported compression method")
-
- flag = ord(self.__read(1))
- self.__read(6)
-
- if flag & 4:
- xlen = ord(self.__read(1)) + 256 * ord(self.__read(1))
- self.read(xlen)
- if flag & 8:
- while True:
- s = self.__read(1)
- if not s or s == NUL:
- break
- if flag & 16:
- while True:
- s = self.__read(1)
- if not s or s == NUL:
- break
- if flag & 2:
- self.__read(2)
-
- def tell(self):
- """Return the stream's file pointer position.
- """
- return self.pos
-
- def seek(self, pos=0):
- """Set the stream's file pointer to pos. Negative seeking
- is forbidden.
- """
- if pos - self.pos >= 0:
- blocks, remainder = divmod(pos - self.pos, self.bufsize)
- for i in xrange(blocks):
- self.read(self.bufsize)
- self.read(remainder)
- else:
- raise StreamError("seeking backwards is not allowed")
- return self.pos
-
- def read(self, size=None):
- """Return the next size number of bytes from the stream.
- If size is not defined, return all bytes of the stream
- up to EOF.
- """
- if size is None:
- t = []
- while True:
- buf = self._read(self.bufsize)
- if not buf:
- break
- t.append(buf)
- buf = "".join(t)
- else:
- buf = self._read(size)
- self.pos += len(buf)
- return buf
-
- def _read(self, size):
- """Return size bytes from the stream.
- """
- if self.comptype == "tar":
- return self.__read(size)
-
- c = len(self.dbuf)
- t = [self.dbuf]
- while c < size:
- buf = self.__read(self.bufsize)
- if not buf:
- break
- buf = self.cmp.decompress(buf)
- t.append(buf)
- c += len(buf)
- t = "".join(t)
- self.dbuf = t[size:]
- return t[:size]
-
- def __read(self, size):
- """Return size bytes from stream. If internal buffer is empty,
- read another block from the stream.
- """
- c = len(self.buf)
- t = [self.buf]
- while c < size:
- buf = self.fileobj.read(self.bufsize)
- if not buf:
- break
- t.append(buf)
- c += len(buf)
- t = "".join(t)
- self.buf = t[size:]
- return t[:size]
-# class _Stream
-
-class _StreamProxy(object):
- """Small proxy class that enables transparent compression
- detection for the Stream interface (mode 'r|*').
- """
-
- def __init__(self, fileobj):
- self.fileobj = fileobj
- self.buf = self.fileobj.read(BLOCKSIZE)
-
- def read(self, size):
- self.read = self.fileobj.read
- return self.buf
-
- def getcomptype(self):
- if self.buf.startswith("\037\213\010"):
- return "gz"
- if self.buf.startswith("BZh91"):
- return "bz2"
- return "tar"
-
- def close(self):
- self.fileobj.close()
-# class StreamProxy
-
-class _BZ2Proxy(object):
- """Small proxy class that enables external file object
- support for "r:bz2" and "w:bz2" modes. This is actually
- a workaround for a limitation in bz2 module's BZ2File
- class which (unlike gzip.GzipFile) has no support for
- a file object argument.
- """
-
- blocksize = 16 * 1024
-
- def __init__(self, fileobj, mode):
- self.fileobj = fileobj
- self.mode = mode
- self.init()
-
- def init(self):
- import bz2
- self.pos = 0
- if self.mode == "r":
- self.bz2obj = bz2.BZ2Decompressor()
- self.fileobj.seek(0)
- self.buf = ""
- else:
- self.bz2obj = bz2.BZ2Compressor()
-
- def read(self, size):
- b = [self.buf]
- x = len(self.buf)
- while x < size:
- try:
- raw = self.fileobj.read(self.blocksize)
- data = self.bz2obj.decompress(raw)
- b.append(data)
- except EOFError:
- break
- x += len(data)
- self.buf = "".join(b)
-
- buf = self.buf[:size]
- self.buf = self.buf[size:]
- self.pos += len(buf)
- return buf
-
- def seek(self, pos):
- if pos < self.pos:
- self.init()
- self.read(pos - self.pos)
-
- def tell(self):
- return self.pos
-
- def write(self, data):
- self.pos += len(data)
- raw = self.bz2obj.compress(data)
- self.fileobj.write(raw)
-
- def close(self):
- if self.mode == "w":
- raw = self.bz2obj.flush()
- self.fileobj.write(raw)
- self.fileobj.close()
-# class _BZ2Proxy
-
-#------------------------
-# Extraction file object
-#------------------------
-class _FileInFile(object):
- """A thin wrapper around an existing file object that
- provides a part of its data as an individual file
- object.
- """
-
- def __init__(self, fileobj, offset, size, sparse=None):
- self.fileobj = fileobj
- self.offset = offset
- self.size = size
- self.sparse = sparse
- self.position = 0
-
- def tell(self):
- """Return the current file position.
- """
- return self.position
-
- def seek(self, position):
- """Seek to a position in the file.
- """
- self.position = position
-
- def read(self, size=None):
- """Read data from the file.
- """
- if size is None:
- size = self.size - self.position
- else:
- size = min(size, self.size - self.position)
-
- if self.sparse is None:
- return self.readnormal(size)
- else:
- return self.readsparse(size)
-
- def readnormal(self, size):
- """Read operation for regular files.
- """
- self.fileobj.seek(self.offset + self.position)
- self.position += size
- return self.fileobj.read(size)
-
- def readsparse(self, size):
- """Read operation for sparse files.
- """
- data = []
- while size > 0:
- buf = self.readsparsesection(size)
- if not buf:
- break
- size -= len(buf)
- data.append(buf)
- return "".join(data)
-
- def readsparsesection(self, size):
- """Read a single section of a sparse file.
- """
- section = self.sparse.find(self.position)
-
- if section is None:
- return ""
-
- size = min(size, section.offset + section.size - self.position)
-
- if isinstance(section, _data):
- realpos = section.realpos + self.position - section.offset
- self.fileobj.seek(self.offset + realpos)
- self.position += size
- return self.fileobj.read(size)
- else:
- self.position += size
- return NUL * size
-#class _FileInFile
-
-
-class ExFileObject(object):
- """File-like object for reading an archive member.
- Is returned by TarFile.extractfile().
- """
- blocksize = 1024
-
- def __init__(self, tarfile, tarinfo):
- self.fileobj = _FileInFile(tarfile.fileobj,
- tarinfo.offset_data,
- tarinfo.size,
- getattr(tarinfo, "sparse", None))
- self.name = tarinfo.name
- self.mode = "r"
- self.closed = False
- self.size = tarinfo.size
-
- self.position = 0
- self.buffer = ""
-
- def read(self, size=None):
- """Read at most size bytes from the file. If size is not
- present or None, read all data until EOF is reached.
- """
- if self.closed:
- raise ValueError("I/O operation on closed file")
-
- buf = ""
- if self.buffer:
- if size is None:
- buf = self.buffer
- self.buffer = ""
- else:
- buf = self.buffer[:size]
- self.buffer = self.buffer[size:]
-
- if size is None:
- buf += self.fileobj.read()
- else:
- buf += self.fileobj.read(size - len(buf))
-
- self.position += len(buf)
- return buf
-
- def readline(self, size=-1):
- """Read one entire line from the file. If size is present
- and non-negative, return a string with at most that
- size, which may be an incomplete line.
- """
- if self.closed:
- raise ValueError("I/O operation on closed file")
-
- if "\n" in self.buffer:
- pos = self.buffer.find("\n") + 1
- else:
- buffers = [self.buffer]
- while True:
- buf = self.fileobj.read(self.blocksize)
- buffers.append(buf)
- if not buf or "\n" in buf:
- self.buffer = "".join(buffers)
- pos = self.buffer.find("\n") + 1
- if pos == 0:
- # no newline found.
- pos = len(self.buffer)
- break
-
- if size != -1:
- pos = min(size, pos)
-
- buf = self.buffer[:pos]
- self.buffer = self.buffer[pos:]
- self.position += len(buf)
- return buf
-
- def readlines(self):
- """Return a list with all remaining lines.
- """
- result = []
- while True:
- line = self.readline()
- if not line: break
- result.append(line)
- return result
-
- def tell(self):
- """Return the current file position.
- """
- if self.closed:
- raise ValueError("I/O operation on closed file")
-
- return self.position
-
- def seek(self, pos, whence=os.SEEK_SET):
- """Seek to a position in the file.
- """
- if self.closed:
- raise ValueError("I/O operation on closed file")
-
- if whence == os.SEEK_SET:
- self.position = min(max(pos, 0), self.size)
- elif whence == os.SEEK_CUR:
- if pos < 0:
- self.position = max(self.position + pos, 0)
- else:
- self.position = min(self.position + pos, self.size)
- elif whence == os.SEEK_END:
- self.position = max(min(self.size + pos, self.size), 0)
- else:
- raise ValueError("Invalid argument")
-
- self.buffer = ""
- self.fileobj.seek(self.position)
-
- def close(self):
- """Close the file object.
- """
- self.closed = True
-
- def __iter__(self):
- """Get an iterator over the file's lines.
- """
- while True:
- line = self.readline()
- if not line:
- break
- yield line
-#class ExFileObject
-
-#------------------
-# Exported Classes
-#------------------
-class TarInfo(object):
- """Informational class which holds the details about an
- archive member given by a tar header block.
- TarInfo objects are returned by TarFile.getmember(),
- TarFile.getmembers() and TarFile.gettarinfo() and are
- usually created internally.
- """
-
- def __init__(self, name=""):
- """Construct a TarInfo object. name is the optional name
- of the member.
- """
- self.name = name # member name (dirnames must end with '/')
- self.mode = 0666 # file permissions
- self.uid = 0 # user id
- self.gid = 0 # group id
- self.size = 0 # file size
- self.mtime = 0 # modification time
- self.chksum = 0 # header checksum
- self.type = REGTYPE # member type
- self.linkname = "" # link name
- self.uname = "user" # user name
- self.gname = "group" # group name
- self.devmajor = 0 # device major number
- self.devminor = 0 # device minor number
-
- self.offset = 0 # the tar header starts here
- self.offset_data = 0 # the file's data starts here
-
- def __repr__(self):
- return "<%s %r at %#x>" % (self.__class__.__name__,self.name,id(self))
-
- @classmethod
- def frombuf(cls, buf):
- """Construct a TarInfo object from a 512 byte string buffer.
- """
- if len(buf) != BLOCKSIZE:
- raise ValueError("truncated header")
- if buf.count(NUL) == BLOCKSIZE:
- raise ValueError("empty header")
-
- tarinfo = cls()
- tarinfo.buf = buf
- tarinfo.name = buf[0:100].rstrip(NUL)
- tarinfo.mode = nti(buf[100:108])
- tarinfo.uid = nti(buf[108:116])
- tarinfo.gid = nti(buf[116:124])
- tarinfo.size = nti(buf[124:136])
- tarinfo.mtime = nti(buf[136:148])
- tarinfo.chksum = nti(buf[148:156])
- tarinfo.type = buf[156:157]
- tarinfo.linkname = buf[157:257].rstrip(NUL)
- tarinfo.uname = buf[265:297].rstrip(NUL)
- tarinfo.gname = buf[297:329].rstrip(NUL)
- tarinfo.devmajor = nti(buf[329:337])
- tarinfo.devminor = nti(buf[337:345])
- prefix = buf[345:500].rstrip(NUL)
-
- if prefix and not tarinfo.issparse():
- tarinfo.name = prefix + "/" + tarinfo.name
-
- if tarinfo.chksum not in calc_chksums(buf):
- raise ValueError("invalid header")
- return tarinfo
-
- def tobuf(self, posix=False):
- """Return a tar header as a string of 512 byte blocks.
- """
- buf = ""
- type = self.type
- prefix = ""
-
- if self.name.endswith("/"):
- type = DIRTYPE
-
- if type in (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK):
- # Prevent "././@LongLink" from being normalized.
- name = self.name
- else:
- name = normpath(self.name)
-
- if type == DIRTYPE:
- # directories should end with '/'
- name += "/"
-
- linkname = self.linkname
- if linkname:
- # if linkname is empty we end up with a '.'
- linkname = normpath(linkname)
-
- if posix:
- if self.size > MAXSIZE_MEMBER:
- raise ValueError("file is too large (>= 8 GB)")
-
- if len(self.linkname) > LENGTH_LINK:
- raise ValueError("linkname is too long (>%d)" % (LENGTH_LINK))
-
- if len(name) > LENGTH_NAME:
- prefix = name[:LENGTH_PREFIX + 1]
- while prefix and prefix[-1] != "/":
- prefix = prefix[:-1]
-
- name = name[len(prefix):]
- prefix = prefix[:-1]
-
- if not prefix or len(name) > LENGTH_NAME:
- raise ValueError("name is too long")
-
- else:
- if len(self.linkname) > LENGTH_LINK:
- buf += self._create_gnulong(self.linkname, GNUTYPE_LONGLINK)
-
- if len(name) > LENGTH_NAME:
- buf += self._create_gnulong(name, GNUTYPE_LONGNAME)
-
- parts = [
- stn(name, 100),
- itn(self.mode & 07777, 8, posix),
- itn(self.uid, 8, posix),
- itn(self.gid, 8, posix),
- itn(self.size, 12, posix),
- itn(self.mtime, 12, posix),
- " ", # checksum field
- type,
- stn(self.linkname, 100),
- stn(MAGIC, 6),
- stn(VERSION, 2),
- stn(self.uname, 32),
- stn(self.gname, 32),
- itn(self.devmajor, 8, posix),
- itn(self.devminor, 8, posix),
- stn(prefix, 155)
- ]
-
- buf += struct.pack("%ds" % BLOCKSIZE, "".join(parts))
- chksum = calc_chksums(buf[-BLOCKSIZE:])[0]
- buf = buf[:-364] + "%06o\0" % chksum + buf[-357:]
- self.buf = buf
- return buf
-
- def _create_gnulong(self, name, type):
- """Create a GNU longname/longlink header from name.
- It consists of an extended tar header, with the length
- of the longname as size, followed by data blocks,
- which contain the longname as a null terminated string.
- """
- name += NUL
-
- tarinfo = self.__class__()
- tarinfo.name = "././@LongLink"
- tarinfo.type = type
- tarinfo.mode = 0
- tarinfo.size = len(name)
-
- # create extended header
- buf = tarinfo.tobuf()
- # create name blocks
- buf += name
- blocks, remainder = divmod(len(name), BLOCKSIZE)
- if remainder > 0:
- buf += (BLOCKSIZE - remainder) * NUL
- return buf
-
- def isreg(self):
- return self.type in REGULAR_TYPES
- def isfile(self):
- return self.isreg()
- def isdir(self):
- return self.type == DIRTYPE
- def issym(self):
- return self.type == SYMTYPE
- def islnk(self):
- return self.type == LNKTYPE
- def ischr(self):
- return self.type == CHRTYPE
- def isblk(self):
- return self.type == BLKTYPE
- def isfifo(self):
- return self.type == FIFOTYPE
- def issparse(self):
- return self.type == GNUTYPE_SPARSE
- def isdev(self):
- return self.type in (CHRTYPE, BLKTYPE, FIFOTYPE)
-# class TarInfo
-
-class TarFile(object):
- """The TarFile Class provides an interface to tar archives.
- """
-
- debug = 0 # May be set from 0 (no msgs) to 3 (all msgs)
-
- dereference = False # If true, add content of linked file to the
- # tar file, else the link.
-
- ignore_zeros = False # If true, skips empty or invalid blocks and
- # continues processing.
-
- errorlevel = 0 # If 0, fatal errors only appear in debug
- # messages (if debug >= 0). If > 0, errors
- # are passed to the caller as exceptions.
-
- posix = False # If True, generates POSIX.1-1990-compliant
- # archives (no GNU extensions!)
-
- fileobject = ExFileObject
-
- def __init__(self, name=None, mode="r", fileobj=None):
- """Open an (uncompressed) tar archive `name'. `mode' is either 'r' to
- read from an existing archive, 'a' to append data to an existing
- file or 'w' to create a new file overwriting an existing one. `mode'
- defaults to 'r'.
- If `fileobj' is given, it is used for reading or writing data. If it
- can be determined, `mode' is overridden by `fileobj's mode.
- `fileobj' is not closed, when TarFile is closed.
- """
- self.name = os.path.abspath(name)
-
- if len(mode) > 1 or mode not in "raw":
- raise ValueError("mode must be 'r', 'a' or 'w'")
- self._mode = mode
- self.mode = {"r": "rb", "a": "r+b", "w": "wb"}[mode]
-
- if not fileobj:
- fileobj = file(self.name, self.mode)
- self._extfileobj = False
- else:
- if self.name is None and hasattr(fileobj, "name"):
- self.name = os.path.abspath(fileobj.name)
- if hasattr(fileobj, "mode"):
- self.mode = fileobj.mode
- self._extfileobj = True
- self.fileobj = fileobj
-
- # Init datastructures
- self.closed = False
- self.members = [] # list of members as TarInfo objects
- self._loaded = False # flag if all members have been read
- self.offset = 0L # current position in the archive file
- self.inodes = {} # dictionary caching the inodes of
- # archive members already added
-
- if self._mode == "r":
- self.firstmember = None
- self.firstmember = self.next()
-
- if self._mode == "a":
- # Move to the end of the archive,
- # before the first empty block.
- self.firstmember = None
- while True:
- try:
- tarinfo = self.next()
- except ReadError:
- self.fileobj.seek(0)
- break
- if tarinfo is None:
- self.fileobj.seek(- BLOCKSIZE, 1)
- break
-
- if self._mode in "aw":
- self._loaded = True
-
- #--------------------------------------------------------------------------
- # Below are the classmethods which act as alternate constructors to the
- # TarFile class. The open() method is the only one that is needed for
- # public use; it is the "super"-constructor and is able to select an
- # adequate "sub"-constructor for a particular compression using the mapping
- # from OPEN_METH.
- #
- # This concept allows one to subclass TarFile without losing the comfort of
- # the super-constructor. A sub-constructor is registered and made available
- # by adding it to the mapping in OPEN_METH.
-
- @classmethod
- def open(cls, name=None, mode="r", fileobj=None, bufsize=20*512):
- """Open a tar archive for reading, writing or appending. Return
- an appropriate TarFile class.
-
- mode:
- 'r' or 'r:*' open for reading with transparent compression
- 'r:' open for reading exclusively uncompressed
- 'r:gz' open for reading with gzip compression
- 'r:bz2' open for reading with bzip2 compression
- 'a' or 'a:' open for appending
- 'w' or 'w:' open for writing without compression
- 'w:gz' open for writing with gzip compression
- 'w:bz2' open for writing with bzip2 compression
-
- 'r|*' open a stream of tar blocks with transparent compression
- 'r|' open an uncompressed stream of tar blocks for reading
- 'r|gz' open a gzip compressed stream of tar blocks
- 'r|bz2' open a bzip2 compressed stream of tar blocks
- 'w|' open an uncompressed stream for writing
- 'w|gz' open a gzip compressed stream for writing
- 'w|bz2' open a bzip2 compressed stream for writing
- """
-
- if not name and not fileobj:
- raise ValueError("nothing to open")
-
- if mode in ("r", "r:*"):
- # Find out which *open() is appropriate for opening the file.
- for comptype in cls.OPEN_METH:
- func = getattr(cls, cls.OPEN_METH[comptype])
- if fileobj is not None:
- saved_pos = fileobj.tell()
- try:
- return func(name, "r", fileobj)
- except (ReadError, CompressionError):
- if fileobj is not None:
- fileobj.seek(saved_pos)
- continue
- raise ReadError("file could not be opened successfully")
-
- elif ":" in mode:
- filemode, comptype = mode.split(":", 1)
- filemode = filemode or "r"
- comptype = comptype or "tar"
-
- # Select the *open() function according to
- # given compression.
- if comptype in cls.OPEN_METH:
- func = getattr(cls, cls.OPEN_METH[comptype])
- else:
- raise CompressionError("unknown compression type %r" % comptype)
- return func(name, filemode, fileobj)
-
- elif "|" in mode:
- filemode, comptype = mode.split("|", 1)
- filemode = filemode or "r"
- comptype = comptype or "tar"
-
- if filemode not in "rw":
- raise ValueError("mode must be 'r' or 'w'")
-
- t = cls(name, filemode,
- _Stream(name, filemode, comptype, fileobj, bufsize))
- t._extfileobj = False
- return t
-
- elif mode in "aw":
- return cls.taropen(name, mode, fileobj)
-
- raise ValueError("undiscernible mode")
-
- @classmethod
- def taropen(cls, name, mode="r", fileobj=None):
- """Open uncompressed tar archive name for reading or writing.
- """
- if len(mode) > 1 or mode not in "raw":
- raise ValueError("mode must be 'r', 'a' or 'w'")
- return cls(name, mode, fileobj)
-
- @classmethod
- def gzopen(cls, name, mode="r", fileobj=None, compresslevel=9):
- """Open gzip compressed tar archive name for reading or writing.
- Appending is not allowed.
- """
- if len(mode) > 1 or mode not in "rw":
- raise ValueError("mode must be 'r' or 'w'")
-
- try:
- import gzip
- gzip.GzipFile
- except (ImportError, AttributeError):
- raise CompressionError("gzip module is not available")
-
- if fileobj is None:
- fileobj = file(name, mode + "b")
-
- try:
- t = cls.taropen(name, mode,
- gzip.GzipFile(name, mode, compresslevel, fileobj))
- except IOError:
- raise ReadError("not a gzip file")
- t._extfileobj = False
- return t
-
- @classmethod
- def bz2open(cls, name, mode="r", fileobj=None, compresslevel=9):
- """Open bzip2 compressed tar archive name for reading or writing.
- Appending is not allowed.
- """
- if len(mode) > 1 or mode not in "rw":
- raise ValueError("mode must be 'r' or 'w'.")
-
- try:
- import bz2
- except ImportError:
- raise CompressionError("bz2 module is not available")
-
- if fileobj is not None:
- fileobj = _BZ2Proxy(fileobj, mode)
- else:
- fileobj = bz2.BZ2File(name, mode, compresslevel=compresslevel)
-
- try:
- t = cls.taropen(name, mode, fileobj)
- except IOError:
- raise ReadError("not a bzip2 file")
- t._extfileobj = False
- return t
-
- # All *open() methods are registered here.
- OPEN_METH = {
- "tar": "taropen", # uncompressed tar
- "gz": "gzopen", # gzip compressed tar
- "bz2": "bz2open" # bzip2 compressed tar
- }
-
- #--------------------------------------------------------------------------
- # The public methods which TarFile provides:
-
- def close(self):
- """Close the TarFile. In write-mode, two finishing zero blocks are
- appended to the archive.
- """
- if self.closed:
- return
-
- if self._mode in "aw":
- self.fileobj.write(NUL * (BLOCKSIZE * 2))
- self.offset += (BLOCKSIZE * 2)
- # fill up the end with zero-blocks
- # (like option -b20 for tar does)
- blocks, remainder = divmod(self.offset, RECORDSIZE)
- if remainder > 0:
- self.fileobj.write(NUL * (RECORDSIZE - remainder))
-
- if not self._extfileobj:
- self.fileobj.close()
- self.closed = True
-
- def getmember(self, name):
- """Return a TarInfo object for member `name'. If `name' can not be
- found in the archive, KeyError is raised. If a member occurs more
- than once in the archive, its last occurence is assumed to be the
- most up-to-date version.
- """
- tarinfo = self._getmember(name)
- if tarinfo is None:
- raise KeyError("filename %r not found" % name)
- return tarinfo
-
- def getmembers(self):
- """Return the members of the archive as a list of TarInfo objects. The
- list has the same order as the members in the archive.
- """
- self._check()
- if not self._loaded: # if we want to obtain a list of
- self._load() # all members, we first have to
- # scan the whole archive.
- return self.members
-
- def getnames(self):
- """Return the members of the archive as a list of their names. It has
- the same order as the list returned by getmembers().
- """
- return [tarinfo.name for tarinfo in self.getmembers()]
-
- def gettarinfo(self, name=None, arcname=None, fileobj=None):
- """Create a TarInfo object for either the file `name' or the file
- object `fileobj' (using os.fstat on its file descriptor). You can
- modify some of the TarInfo's attributes before you add it using
- addfile(). If given, `arcname' specifies an alternative name for the
- file in the archive.
- """
- self._check("aw")
-
- # When fileobj is given, replace name by
- # fileobj's real name.
- if fileobj is not None:
- name = fileobj.name
-
- # Building the name of the member in the archive.
- # Backward slashes are converted to forward slashes,
- # Absolute paths are turned to relative paths.
- if arcname is None:
- arcname = name
- arcname = normpath(arcname)
- drv, arcname = os.path.splitdrive(arcname)
- while arcname[0:1] == "/":
- arcname = arcname[1:]
-
- # Now, fill the TarInfo object with
- # information specific for the file.
- tarinfo = TarInfo()
-
- # Use os.stat or os.lstat, depending on platform
- # and if symlinks shall be resolved.
- if fileobj is None:
- if hasattr(os, "lstat") and not self.dereference:
- statres = os.lstat(name)
- else:
- statres = os.stat(name)
- else:
- statres = os.fstat(fileobj.fileno())
- linkname = ""
-
- stmd = statres.st_mode
- if stat.S_ISREG(stmd):
- inode = (statres.st_ino, statres.st_dev)
- if not self.dereference and \
- statres.st_nlink > 1 and inode in self.inodes:
- # Is it a hardlink to an already
- # archived file?
- type = LNKTYPE
- linkname = self.inodes[inode]
- else:
- # The inode is added only if its valid.
- # For win32 it is always 0.
- type = REGTYPE
- if inode[0]:
- self.inodes[inode] = arcname
- elif stat.S_ISDIR(stmd):
- type = DIRTYPE
- if arcname[-1:] != "/":
- arcname += "/"
- elif stat.S_ISFIFO(stmd):
- type = FIFOTYPE
- elif stat.S_ISLNK(stmd):
- type = SYMTYPE
- linkname = os.readlink(name)
- elif stat.S_ISCHR(stmd):
- type = CHRTYPE
- elif stat.S_ISBLK(stmd):
- type = BLKTYPE
- else:
- return None
-
- # Fill the TarInfo object with all
- # information we can get.
- tarinfo.name = arcname
- tarinfo.mode = stmd
- tarinfo.uid = statres.st_uid
- tarinfo.gid = statres.st_gid
- if stat.S_ISREG(stmd):
- tarinfo.size = statres.st_size
- else:
- tarinfo.size = 0L
- tarinfo.mtime = statres.st_mtime
- tarinfo.type = type
- tarinfo.linkname = linkname
- if pwd:
- try:
- tarinfo.uname = pwd.getpwuid(tarinfo.uid)[0]
- except KeyError:
- pass
- if grp:
- try:
- tarinfo.gname = grp.getgrgid(tarinfo.gid)[0]
- except KeyError:
- pass
-
- if type in (CHRTYPE, BLKTYPE):
- if hasattr(os, "major") and hasattr(os, "minor"):
- tarinfo.devmajor = os.major(statres.st_rdev)
- tarinfo.devminor = os.minor(statres.st_rdev)
- return tarinfo
-
- def list(self, verbose=True):
- """Print a table of contents to sys.stdout. If `verbose' is False, only
- the names of the members are printed. If it is True, an `ls -l'-like
- output is produced.
- """
- self._check()
-
- for tarinfo in self:
- if verbose:
- print filemode(tarinfo.mode),
- print "%s/%s" % (tarinfo.uname or tarinfo.uid,
- tarinfo.gname or tarinfo.gid),
- if tarinfo.ischr() or tarinfo.isblk():
- print "%10s" % ("%d,%d" \
- % (tarinfo.devmajor, tarinfo.devminor)),
- else:
- print "%10d" % tarinfo.size,
- print "%d-%02d-%02d %02d:%02d:%02d" \
- % time.localtime(tarinfo.mtime)[:6],
-
- print tarinfo.name,
-
- if verbose:
- if tarinfo.issym():
- print "->", tarinfo.linkname,
- if tarinfo.islnk():
- print "link to", tarinfo.linkname,
- print
-
- def add(self, name, arcname=None, recursive=True):
- """Add the file `name' to the archive. `name' may be any type of file
- (directory, fifo, symbolic link, etc.). If given, `arcname'
- specifies an alternative name for the file in the archive.
- Directories are added recursively by default. This can be avoided by
- setting `recursive' to False.
- """
- self._check("aw")
-
- if arcname is None:
- arcname = name
-
- # Skip if somebody tries to archive the archive...
- if self.name is not None and os.path.abspath(name) == self.name:
- self._dbg(2, "tarfile: Skipped %r" % name)
- return
-
- # Special case: The user wants to add the current
- # working directory.
- if name == ".":
- if recursive:
- if arcname == ".":
- arcname = ""
- for f in os.listdir("."):
- self.add(f, os.path.join(arcname, f))
- return
-
- self._dbg(1, name)
-
- # Create a TarInfo object from the file.
- tarinfo = self.gettarinfo(name, arcname)
-
- if tarinfo is None:
- self._dbg(1, "tarfile: Unsupported type %r" % name)
- return
-
- # Append the tar header and data to the archive.
- if tarinfo.isreg():
- f = file(name, "rb")
- self.addfile(tarinfo, f)
- f.close()
-
- elif tarinfo.isdir():
- self.addfile(tarinfo)
- if recursive:
- for f in os.listdir(name):
- self.add(os.path.join(name, f), os.path.join(arcname, f))
-
- else:
- self.addfile(tarinfo)
-
- def addfile(self, tarinfo, fileobj=None):
- """Add the TarInfo object `tarinfo' to the archive. If `fileobj' is
- given, tarinfo.size bytes are read from it and added to the archive.
- You can create TarInfo objects using gettarinfo().
- On Windows platforms, `fileobj' should always be opened with mode
- 'rb' to avoid irritation about the file size.
- """
- self._check("aw")
-
- tarinfo = copy.copy(tarinfo)
-
- buf = tarinfo.tobuf(self.posix)
- self.fileobj.write(buf)
- self.offset += len(buf)
-
- # If there's data to follow, append it.
- if fileobj is not None:
- copyfileobj(fileobj, self.fileobj, tarinfo.size)
- blocks, remainder = divmod(tarinfo.size, BLOCKSIZE)
- if remainder > 0:
- self.fileobj.write(NUL * (BLOCKSIZE - remainder))
- blocks += 1
- self.offset += blocks * BLOCKSIZE
-
- self.members.append(tarinfo)
-
- def extractall(self, path=".", members=None):
- """Extract all members from the archive to the current working
- directory and set owner, modification time and permissions on
- directories afterwards. `path' specifies a different directory
- to extract to. `members' is optional and must be a subset of the
- list returned by getmembers().
- """
- directories = []
-
- if members is None:
- members = self
-
- for tarinfo in members:
- if tarinfo.isdir():
- # Extract directory with a safe mode, so that
- # all files below can be extracted as well.
- try:
- os.makedirs(os.path.join(path, tarinfo.name), 0777)
- except EnvironmentError:
- pass
- directories.append(tarinfo)
- else:
- self.extract(tarinfo, path)
-
- # Reverse sort directories.
- directories.sort(lambda a, b: cmp(a.name, b.name))
- directories.reverse()
-
- # Set correct owner, mtime and filemode on directories.
- for tarinfo in directories:
- path = os.path.join(path, tarinfo.name)
- try:
- self.chown(tarinfo, path)
- self.utime(tarinfo, path)
- self.chmod(tarinfo, path)
- except ExtractError, e:
- if self.errorlevel > 1:
- raise
- else:
- self._dbg(1, "tarfile: %s" % e)
-
- def extract(self, member, path=""):
- """Extract a member from the archive to the current working directory,
- using its full name. Its file information is extracted as accurately
- as possible. `member' may be a filename or a TarInfo object. You can
- specify a different directory using `path'.
- """
- self._check("r")
-
- if isinstance(member, TarInfo):
- tarinfo = member
- else:
- tarinfo = self.getmember(member)
-
- # Prepare the link target for makelink().
- if tarinfo.islnk():
- tarinfo._link_target = os.path.join(path, tarinfo.linkname)
-
- try:
- self._extract_member(tarinfo, os.path.join(path, tarinfo.name))
- except EnvironmentError, e:
- if self.errorlevel > 0:
- raise
- else:
- if e.filename is None:
- self._dbg(1, "tarfile: %s" % e.strerror)
- else:
- self._dbg(1, "tarfile: %s %r" % (e.strerror, e.filename))
- except ExtractError, e:
- if self.errorlevel > 1:
- raise
- else:
- self._dbg(1, "tarfile: %s" % e)
-
- def extractfile(self, member):
- """Extract a member from the archive as a file object. `member' may be
- a filename or a TarInfo object. If `member' is a regular file, a
- file-like object is returned. If `member' is a link, a file-like
- object is constructed from the link's target. If `member' is none of
- the above, None is returned.
- The file-like object is read-only and provides the following
- methods: read(), readline(), readlines(), seek() and tell()
- """
- self._check("r")
-
- if isinstance(member, TarInfo):
- tarinfo = member
- else:
- tarinfo = self.getmember(member)
-
- if tarinfo.isreg():
- return self.fileobject(self, tarinfo)
-
- elif tarinfo.type not in SUPPORTED_TYPES:
- # If a member's type is unknown, it is treated as a
- # regular file.
- return self.fileobject(self, tarinfo)
-
- elif tarinfo.islnk() or tarinfo.issym():
- if isinstance(self.fileobj, _Stream):
- # A small but ugly workaround for the case that someone tries
- # to extract a (sym)link as a file-object from a non-seekable
- # stream of tar blocks.
- raise StreamError("cannot extract (sym)link as file object")
- else:
- # A (sym)link's file object is its target's file object.
- return self.extractfile(self._getmember(tarinfo.linkname,
- tarinfo))
- else:
- # If there's no data associated with the member (directory, chrdev,
- # blkdev, etc.), return None instead of a file object.
- return None
-
- def _extract_member(self, tarinfo, targetpath):
- """Extract the TarInfo object tarinfo to a physical
- file called targetpath.
- """
- # Fetch the TarInfo object for the given name
- # and build the destination pathname, replacing
- # forward slashes to platform specific separators.
- if targetpath[-1:] == "/":
- targetpath = targetpath[:-1]
- targetpath = os.path.normpath(targetpath)
-
- # Create all upper directories.
- upperdirs = os.path.dirname(targetpath)
- if upperdirs and not os.path.exists(upperdirs):
- ti = TarInfo()
- ti.name = upperdirs
- ti.type = DIRTYPE
- ti.mode = 0777
- ti.mtime = tarinfo.mtime
- ti.uid = tarinfo.uid
- ti.gid = tarinfo.gid
- ti.uname = tarinfo.uname
- ti.gname = tarinfo.gname
- try:
- self._extract_member(ti, ti.name)
- except:
- pass
-
- if tarinfo.islnk() or tarinfo.issym():
- self._dbg(1, "%s -> %s" % (tarinfo.name, tarinfo.linkname))
- else:
- self._dbg(1, tarinfo.name)
-
- if tarinfo.isreg():
- self.makefile(tarinfo, targetpath)
- elif tarinfo.isdir():
- self.makedir(tarinfo, targetpath)
- elif tarinfo.isfifo():
- self.makefifo(tarinfo, targetpath)
- elif tarinfo.ischr() or tarinfo.isblk():
- self.makedev(tarinfo, targetpath)
- elif tarinfo.islnk() or tarinfo.issym():
- self.makelink(tarinfo, targetpath)
- elif tarinfo.type not in SUPPORTED_TYPES:
- self.makeunknown(tarinfo, targetpath)
- else:
- self.makefile(tarinfo, targetpath)
-
- self.chown(tarinfo, targetpath)
- if not tarinfo.issym():
- self.chmod(tarinfo, targetpath)
- self.utime(tarinfo, targetpath)
-
- #--------------------------------------------------------------------------
- # Below are the different file methods. They are called via
- # _extract_member() when extract() is called. They can be replaced in a
- # subclass to implement other functionality.
-
- def makedir(self, tarinfo, targetpath):
- """Make a directory called targetpath.
- """
- try:
- os.mkdir(targetpath)
- except EnvironmentError, e:
- if e.errno != errno.EEXIST:
- raise
-
- def makefile(self, tarinfo, targetpath):
- """Make a file called targetpath.
- """
- source = self.extractfile(tarinfo)
- target = file(targetpath, "wb")
- copyfileobj(source, target)
- source.close()
- target.close()
-
- def makeunknown(self, tarinfo, targetpath):
- """Make a file from a TarInfo object with an unknown type
- at targetpath.
- """
- self.makefile(tarinfo, targetpath)
- self._dbg(1, "tarfile: Unknown file type %r, " \
- "extracted as regular file." % tarinfo.type)
-
- def makefifo(self, tarinfo, targetpath):
- """Make a fifo called targetpath.
- """
- if hasattr(os, "mkfifo"):
- os.mkfifo(targetpath)
- else:
- raise ExtractError("fifo not supported by system")
-
- def makedev(self, tarinfo, targetpath):
- """Make a character or block device called targetpath.
- """
- if not hasattr(os, "mknod") or not hasattr(os, "makedev"):
- raise ExtractError("special devices not supported by system")
-
- mode = tarinfo.mode
- if tarinfo.isblk():
- mode |= stat.S_IFBLK
- else:
- mode |= stat.S_IFCHR
-
- os.mknod(targetpath, mode,
- os.makedev(tarinfo.devmajor, tarinfo.devminor))
-
- def makelink(self, tarinfo, targetpath):
- """Make a (symbolic) link called targetpath. If it cannot be created
- (platform limitation), we try to make a copy of the referenced file
- instead of a link.
- """
- linkpath = tarinfo.linkname
- try:
- if tarinfo.issym():
- os.symlink(linkpath, targetpath)
- else:
- # See extract().
- os.link(tarinfo._link_target, targetpath)
- except AttributeError:
- if tarinfo.issym():
- linkpath = os.path.join(os.path.dirname(tarinfo.name),
- linkpath)
- linkpath = normpath(linkpath)
-
- try:
- self._extract_member(self.getmember(linkpath), targetpath)
- except (EnvironmentError, KeyError), e:
- linkpath = os.path.normpath(linkpath)
- try:
- shutil.copy2(linkpath, targetpath)
- except EnvironmentError, e:
- raise IOError("link could not be created")
-
- def chown(self, tarinfo, targetpath):
- """Set owner of targetpath according to tarinfo.
- """
- if pwd and hasattr(os, "geteuid") and os.geteuid() == 0:
- # We have to be root to do so.
- try:
- g = grp.getgrnam(tarinfo.gname)[2]
- except KeyError:
- try:
- g = grp.getgrgid(tarinfo.gid)[2]
- except KeyError:
- g = os.getgid()
- try:
- u = pwd.getpwnam(tarinfo.uname)[2]
- except KeyError:
- try:
- u = pwd.getpwuid(tarinfo.uid)[2]
- except KeyError:
- u = os.getuid()
- try:
- if tarinfo.issym() and hasattr(os, "lchown"):
- os.lchown(targetpath, u, g)
- else:
- if sys.platform != "os2emx":
- os.chown(targetpath, u, g)
- except EnvironmentError, e:
- raise ExtractError("could not change owner")
-
- def chmod(self, tarinfo, targetpath):
- """Set file permissions of targetpath according to tarinfo.
- """
- if hasattr(os, 'chmod'):
- try:
- os.chmod(targetpath, tarinfo.mode)
- except EnvironmentError, e:
- raise ExtractError("could not change mode")
-
- def utime(self, tarinfo, targetpath):
- """Set modification time of targetpath according to tarinfo.
- """
- if not hasattr(os, 'utime'):
- return
- if sys.platform == "win32" and tarinfo.isdir():
- # According to msdn.microsoft.com, it is an error (EACCES)
- # to use utime() on directories.
- return
- try:
- os.utime(targetpath, (tarinfo.mtime, tarinfo.mtime))
- except EnvironmentError, e:
- raise ExtractError("could not change modification time")
-
- #--------------------------------------------------------------------------
- def next(self):
- """Return the next member of the archive as a TarInfo object, when
- TarFile is opened for reading. Return None if there is no more
- available.
- """
- self._check("ra")
- if self.firstmember is not None:
- m = self.firstmember
- self.firstmember = None
- return m
-
- # Read the next block.
- self.fileobj.seek(self.offset)
- while True:
- buf = self.fileobj.read(BLOCKSIZE)
- if not buf:
- return None
-
- try:
- tarinfo = TarInfo.frombuf(buf)
-
- # Set the TarInfo object's offset to the current position of the
- # TarFile and set self.offset to the position where the data blocks
- # should begin.
- tarinfo.offset = self.offset
- self.offset += BLOCKSIZE
-
- tarinfo = self.proc_member(tarinfo)
-
- except ValueError, e:
- if self.ignore_zeros:
- self._dbg(2, "0x%X: empty or invalid block: %s" %
- (self.offset, e))
- self.offset += BLOCKSIZE
- continue
- else:
- if self.offset == 0:
- raise ReadError("empty, unreadable or compressed "
- "file: %s" % e)
- return None
- break
-
- # Some old tar programs represent a directory as a regular
- # file with a trailing slash.
- if tarinfo.isreg() and tarinfo.name.endswith("/"):
- tarinfo.type = DIRTYPE
-
- # Directory names should have a '/' at the end.
- if tarinfo.isdir():
- tarinfo.name += "/"
-
- self.members.append(tarinfo)
- return tarinfo
-
- #--------------------------------------------------------------------------
- # The following are methods that are called depending on the type of a
- # member. The entry point is proc_member() which is called with a TarInfo
- # object created from the header block from the current offset. The
- # proc_member() method can be overridden in a subclass to add custom
- # proc_*() methods. A proc_*() method MUST implement the following
- # operations:
- # 1. Set tarinfo.offset_data to the position where the data blocks begin,
- # if there is data that follows.
- # 2. Set self.offset to the position where the next member's header will
- # begin.
- # 3. Return tarinfo or another valid TarInfo object.
- def proc_member(self, tarinfo):
- """Choose the right processing method for tarinfo depending
- on its type and call it.
- """
- if tarinfo.type in (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK):
- return self.proc_gnulong(tarinfo)
- elif tarinfo.type == GNUTYPE_SPARSE:
- return self.proc_sparse(tarinfo)
- else:
- return self.proc_builtin(tarinfo)
-
- def proc_builtin(self, tarinfo):
- """Process a builtin type member or an unknown member
- which will be treated as a regular file.
- """
- tarinfo.offset_data = self.offset
- if tarinfo.isreg() or tarinfo.type not in SUPPORTED_TYPES:
- # Skip the following data blocks.
- self.offset += self._block(tarinfo.size)
- return tarinfo
-
- def proc_gnulong(self, tarinfo):
- """Process the blocks that hold a GNU longname
- or longlink member.
- """
- buf = ""
- count = tarinfo.size
- while count > 0:
- block = self.fileobj.read(BLOCKSIZE)
- buf += block
- self.offset += BLOCKSIZE
- count -= BLOCKSIZE
-
- # Fetch the next header and process it.
- b = self.fileobj.read(BLOCKSIZE)
- t = TarInfo.frombuf(b)
- t.offset = self.offset
- self.offset += BLOCKSIZE
- next = self.proc_member(t)
-
- # Patch the TarInfo object from the next header with
- # the longname information.
- next.offset = tarinfo.offset
- if tarinfo.type == GNUTYPE_LONGNAME:
- next.name = buf.rstrip(NUL)
- elif tarinfo.type == GNUTYPE_LONGLINK:
- next.linkname = buf.rstrip(NUL)
-
- return next
-
- def proc_sparse(self, tarinfo):
- """Process a GNU sparse header plus extra headers.
- """
- buf = tarinfo.buf
- sp = _ringbuffer()
- pos = 386
- lastpos = 0L
- realpos = 0L
- # There are 4 possible sparse structs in the
- # first header.
- for i in xrange(4):
- try:
- offset = nti(buf[pos:pos + 12])
- numbytes = nti(buf[pos + 12:pos + 24])
- except ValueError:
- break
- if offset > lastpos:
- sp.append(_hole(lastpos, offset - lastpos))
- sp.append(_data(offset, numbytes, realpos))
- realpos += numbytes
- lastpos = offset + numbytes
- pos += 24
-
- isextended = ord(buf[482])
- origsize = nti(buf[483:495])
-
- # If the isextended flag is given,
- # there are extra headers to process.
- while isextended == 1:
- buf = self.fileobj.read(BLOCKSIZE)
- self.offset += BLOCKSIZE
- pos = 0
- for i in xrange(21):
- try:
- offset = nti(buf[pos:pos + 12])
- numbytes = nti(buf[pos + 12:pos + 24])
- except ValueError:
- break
- if offset > lastpos:
- sp.append(_hole(lastpos, offset - lastpos))
- sp.append(_data(offset, numbytes, realpos))
- realpos += numbytes
- lastpos = offset + numbytes
- pos += 24
- isextended = ord(buf[504])
-
- if lastpos < origsize:
- sp.append(_hole(lastpos, origsize - lastpos))
-
- tarinfo.sparse = sp
-
- tarinfo.offset_data = self.offset
- self.offset += self._block(tarinfo.size)
- tarinfo.size = origsize
-
- return tarinfo
-
- #--------------------------------------------------------------------------
- # Little helper methods:
-
- def _block(self, count):
- """Round up a byte count by BLOCKSIZE and return it,
- e.g. _block(834) => 1024.
- """
- blocks, remainder = divmod(count, BLOCKSIZE)
- if remainder:
- blocks += 1
- return blocks * BLOCKSIZE
-
- def _getmember(self, name, tarinfo=None):
- """Find an archive member by name from bottom to top.
- If tarinfo is given, it is used as the starting point.
- """
- # Ensure that all members have been loaded.
- members = self.getmembers()
-
- if tarinfo is None:
- end = len(members)
- else:
- end = members.index(tarinfo)
-
- for i in xrange(end - 1, -1, -1):
- if name == members[i].name:
- return members[i]
-
- def _load(self):
- """Read through the entire archive file and look for readable
- members.
- """
- while True:
- tarinfo = self.next()
- if tarinfo is None:
- break
- self._loaded = True
-
- def _check(self, mode=None):
- """Check if TarFile is still open, and if the operation's mode
- corresponds to TarFile's mode.
- """
- if self.closed:
- raise IOError("%s is closed" % self.__class__.__name__)
- if mode is not None and self._mode not in mode:
- raise IOError("bad operation for mode %r" % self._mode)
-
- def __iter__(self):
- """Provide an iterator object.
- """
- if self._loaded:
- return iter(self.members)
- else:
- return TarIter(self)
-
- def _dbg(self, level, msg):
- """Write debugging output to sys.stderr.
- """
- if level <= self.debug:
- print >> sys.stderr, msg
-# class TarFile
-
-class TarIter:
- """Iterator Class.
-
- for tarinfo in TarFile(...):
- suite...
- """
-
- def __init__(self, tarfile):
- """Construct a TarIter object.
- """
- self.tarfile = tarfile
- self.index = 0
- def __iter__(self):
- """Return iterator object.
- """
- return self
- def next(self):
- """Return the next item using TarFile's next() method.
- When all members have been read, set TarFile as _loaded.
- """
- # Fix for SF #1100429: Under rare circumstances it can
- # happen that getmembers() is called during iteration,
- # which will cause TarIter to stop prematurely.
- if not self.tarfile._loaded:
- tarinfo = self.tarfile.next()
- if not tarinfo:
- self.tarfile._loaded = True
- raise StopIteration
- else:
- try:
- tarinfo = self.tarfile.members[self.index]
- except IndexError:
- raise StopIteration
- self.index += 1
- return tarinfo
-
-# Helper classes for sparse file support
-class _section:
- """Base class for _data and _hole.
- """
- def __init__(self, offset, size):
- self.offset = offset
- self.size = size
- def __contains__(self, offset):
- return self.offset <= offset < self.offset + self.size
-
-class _data(_section):
- """Represent a data section in a sparse file.
- """
- def __init__(self, offset, size, realpos):
- _section.__init__(self, offset, size)
- self.realpos = realpos
-
-class _hole(_section):
- """Represent a hole section in a sparse file.
- """
- pass
-
-class _ringbuffer(list):
- """Ringbuffer class which increases performance
- over a regular list.
- """
- def __init__(self):
- self.idx = 0
- def find(self, offset):
- idx = self.idx
- while True:
- item = self[idx]
- if offset in item:
- break
- idx += 1
- if idx == len(self):
- idx = 0
- if idx == self.idx:
- # End of File
- return None
- self.idx = idx
- return item
-
-#---------------------------------------------
-# zipfile compatible TarFile class
-#---------------------------------------------
-TAR_PLAIN = 0 # zipfile.ZIP_STORED
-TAR_GZIPPED = 8 # zipfile.ZIP_DEFLATED
-class TarFileCompat:
- """TarFile class compatible with standard module zipfile's
- ZipFile class.
- """
- def __init__(self, file, mode="r", compression=TAR_PLAIN):
- if compression == TAR_PLAIN:
- self.tarfile = TarFile.taropen(file, mode)
- elif compression == TAR_GZIPPED:
- self.tarfile = TarFile.gzopen(file, mode)
- else:
- raise ValueError("unknown compression constant")
- if mode[0:1] == "r":
- members = self.tarfile.getmembers()
- for m in members:
- m.filename = m.name
- m.file_size = m.size
- m.date_time = time.gmtime(m.mtime)[:6]
- def namelist(self):
- return map(lambda m: m.name, self.infolist())
- def infolist(self):
- return filter(lambda m: m.type in REGULAR_TYPES,
- self.tarfile.getmembers())
- def printdir(self):
- self.tarfile.list()
- def testzip(self):
- return
- def getinfo(self, name):
- return self.tarfile.getmember(name)
- def read(self, name):
- return self.tarfile.extractfile(self.tarfile.getmember(name)).read()
- def write(self, filename, arcname=None, compress_type=None):
- self.tarfile.add(filename, arcname)
- def writestr(self, zinfo, bytes):
- try:
- from cStringIO import StringIO
- except ImportError:
- from StringIO import StringIO
- import calendar
- zinfo.name = zinfo.filename
- zinfo.size = zinfo.file_size
- zinfo.mtime = calendar.timegm(zinfo.date_time)
- self.tarfile.addfile(zinfo, StringIO(bytes))
- def close(self):
- self.tarfile.close()
-#class TarFileCompat
-
-#--------------------
-# exported functions
-#--------------------
-def is_tarfile(name):
- """Return True if name points to a tar archive that we
- are able to handle, else return False.
- """
- try:
- t = open(name)
- t.close()
- return True
- except TarError:
- return False
-
-open = TarFile.open
diff --git a/sys/lib/python/telnetlib.py b/sys/lib/python/telnetlib.py
deleted file mode 100644
index a13e85cc9..000000000
--- a/sys/lib/python/telnetlib.py
+++ /dev/null
@@ -1,669 +0,0 @@
-r"""TELNET client class.
-
-Based on RFC 854: TELNET Protocol Specification, by J. Postel and
-J. Reynolds
-
-Example:
-
->>> from telnetlib import Telnet
->>> tn = Telnet('www.python.org', 79) # connect to finger port
->>> tn.write('guido\r\n')
->>> print tn.read_all()
-Login Name TTY Idle When Where
-guido Guido van Rossum pts/2 <Dec 2 11:10> snag.cnri.reston..
-
->>>
-
-Note that read_all() won't read until eof -- it just reads some data
--- but it guarantees to read at least one byte unless EOF is hit.
-
-It is possible to pass a Telnet object to select.select() in order to
-wait until more data is available. Note that in this case,
-read_eager() may return '' even if there was data on the socket,
-because the protocol negotiation may have eaten the data. This is why
-EOFError is needed in some cases to distinguish between "no data" and
-"connection closed" (since the socket also appears ready for reading
-when it is closed).
-
-To do:
-- option negotiation
-- timeout should be intrinsic to the connection object instead of an
- option on one of the read calls only
-
-"""
-
-
-# Imported modules
-import sys
-import socket
-import select
-
-__all__ = ["Telnet"]
-
-# Tunable parameters
-DEBUGLEVEL = 0
-
-# Telnet protocol defaults
-TELNET_PORT = 23
-
-# Telnet protocol characters (don't change)
-IAC = chr(255) # "Interpret As Command"
-DONT = chr(254)
-DO = chr(253)
-WONT = chr(252)
-WILL = chr(251)
-theNULL = chr(0)
-
-SE = chr(240) # Subnegotiation End
-NOP = chr(241) # No Operation
-DM = chr(242) # Data Mark
-BRK = chr(243) # Break
-IP = chr(244) # Interrupt process
-AO = chr(245) # Abort output
-AYT = chr(246) # Are You There
-EC = chr(247) # Erase Character
-EL = chr(248) # Erase Line
-GA = chr(249) # Go Ahead
-SB = chr(250) # Subnegotiation Begin
-
-
-# Telnet protocol options code (don't change)
-# These ones all come from arpa/telnet.h
-BINARY = chr(0) # 8-bit data path
-ECHO = chr(1) # echo
-RCP = chr(2) # prepare to reconnect
-SGA = chr(3) # suppress go ahead
-NAMS = chr(4) # approximate message size
-STATUS = chr(5) # give status
-TM = chr(6) # timing mark
-RCTE = chr(7) # remote controlled transmission and echo
-NAOL = chr(8) # negotiate about output line width
-NAOP = chr(9) # negotiate about output page size
-NAOCRD = chr(10) # negotiate about CR disposition
-NAOHTS = chr(11) # negotiate about horizontal tabstops
-NAOHTD = chr(12) # negotiate about horizontal tab disposition
-NAOFFD = chr(13) # negotiate about formfeed disposition
-NAOVTS = chr(14) # negotiate about vertical tab stops
-NAOVTD = chr(15) # negotiate about vertical tab disposition
-NAOLFD = chr(16) # negotiate about output LF disposition
-XASCII = chr(17) # extended ascii character set
-LOGOUT = chr(18) # force logout
-BM = chr(19) # byte macro
-DET = chr(20) # data entry terminal
-SUPDUP = chr(21) # supdup protocol
-SUPDUPOUTPUT = chr(22) # supdup output
-SNDLOC = chr(23) # send location
-TTYPE = chr(24) # terminal type
-EOR = chr(25) # end or record
-TUID = chr(26) # TACACS user identification
-OUTMRK = chr(27) # output marking
-TTYLOC = chr(28) # terminal location number
-VT3270REGIME = chr(29) # 3270 regime
-X3PAD = chr(30) # X.3 PAD
-NAWS = chr(31) # window size
-TSPEED = chr(32) # terminal speed
-LFLOW = chr(33) # remote flow control
-LINEMODE = chr(34) # Linemode option
-XDISPLOC = chr(35) # X Display Location
-OLD_ENVIRON = chr(36) # Old - Environment variables
-AUTHENTICATION = chr(37) # Authenticate
-ENCRYPT = chr(38) # Encryption option
-NEW_ENVIRON = chr(39) # New - Environment variables
-# the following ones come from
-# http://www.iana.org/assignments/telnet-options
-# Unfortunately, that document does not assign identifiers
-# to all of them, so we are making them up
-TN3270E = chr(40) # TN3270E
-XAUTH = chr(41) # XAUTH
-CHARSET = chr(42) # CHARSET
-RSP = chr(43) # Telnet Remote Serial Port
-COM_PORT_OPTION = chr(44) # Com Port Control Option
-SUPPRESS_LOCAL_ECHO = chr(45) # Telnet Suppress Local Echo
-TLS = chr(46) # Telnet Start TLS
-KERMIT = chr(47) # KERMIT
-SEND_URL = chr(48) # SEND-URL
-FORWARD_X = chr(49) # FORWARD_X
-PRAGMA_LOGON = chr(138) # TELOPT PRAGMA LOGON
-SSPI_LOGON = chr(139) # TELOPT SSPI LOGON
-PRAGMA_HEARTBEAT = chr(140) # TELOPT PRAGMA HEARTBEAT
-EXOPL = chr(255) # Extended-Options-List
-NOOPT = chr(0)
-
-class Telnet:
-
- """Telnet interface class.
-
- An instance of this class represents a connection to a telnet
- server. The instance is initially not connected; the open()
- method must be used to establish a connection. Alternatively, the
- host name and optional port number can be passed to the
- constructor, too.
-
- Don't try to reopen an already connected instance.
-
- This class has many read_*() methods. Note that some of them
- raise EOFError when the end of the connection is read, because
- they can return an empty string for other reasons. See the
- individual doc strings.
-
- read_until(expected, [timeout])
- Read until the expected string has been seen, or a timeout is
- hit (default is no timeout); may block.
-
- read_all()
- Read all data until EOF; may block.
-
- read_some()
- Read at least one byte or EOF; may block.
-
- read_very_eager()
- Read all data available already queued or on the socket,
- without blocking.
-
- read_eager()
- Read either data already queued or some data available on the
- socket, without blocking.
-
- read_lazy()
- Read all data in the raw queue (processing it first), without
- doing any socket I/O.
-
- read_very_lazy()
- Reads all data in the cooked queue, without doing any socket
- I/O.
-
- read_sb_data()
- Reads available data between SB ... SE sequence. Don't block.
-
- set_option_negotiation_callback(callback)
- Each time a telnet option is read on the input flow, this callback
- (if set) is called with the following parameters :
- callback(telnet socket, command, option)
- option will be chr(0) when there is no option.
- No other action is done afterwards by telnetlib.
-
- """
-
- def __init__(self, host=None, port=0):
- """Constructor.
-
- When called without arguments, create an unconnected instance.
- With a hostname argument, it connects the instance; a port
- number is optional.
-
- """
- self.debuglevel = DEBUGLEVEL
- self.host = host
- self.port = port
- self.sock = None
- self.rawq = ''
- self.irawq = 0
- self.cookedq = ''
- self.eof = 0
- self.iacseq = '' # Buffer for IAC sequence.
- self.sb = 0 # flag for SB and SE sequence.
- self.sbdataq = ''
- self.option_callback = None
- if host is not None:
- self.open(host, port)
-
- def open(self, host, port=0):
- """Connect to a host.
-
- The optional second argument is the port number, which
- defaults to the standard telnet port (23).
-
- Don't try to reopen an already connected instance.
-
- """
- self.eof = 0
- if not port:
- port = TELNET_PORT
- self.host = host
- self.port = port
- msg = "getaddrinfo returns an empty list"
- for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):
- af, socktype, proto, canonname, sa = res
- try:
- self.sock = socket.socket(af, socktype, proto)
- self.sock.connect(sa)
- except socket.error, msg:
- if self.sock:
- self.sock.close()
- self.sock = None
- continue
- break
- if not self.sock:
- raise socket.error, msg
-
- def __del__(self):
- """Destructor -- close the connection."""
- self.close()
-
- def msg(self, msg, *args):
- """Print a debug message, when the debug level is > 0.
-
- If extra arguments are present, they are substituted in the
- message using the standard string formatting operator.
-
- """
- if self.debuglevel > 0:
- print 'Telnet(%s,%d):' % (self.host, self.port),
- if args:
- print msg % args
- else:
- print msg
-
- def set_debuglevel(self, debuglevel):
- """Set the debug level.
-
- The higher it is, the more debug output you get (on sys.stdout).
-
- """
- self.debuglevel = debuglevel
-
- def close(self):
- """Close the connection."""
- if self.sock:
- self.sock.close()
- self.sock = 0
- self.eof = 1
- self.iacseq = ''
- self.sb = 0
-
- def get_socket(self):
- """Return the socket object used internally."""
- return self.sock
-
- def fileno(self):
- """Return the fileno() of the socket object used internally."""
- return self.sock.fileno()
-
- def write(self, buffer):
- """Write a string to the socket, doubling any IAC characters.
-
- Can block if the connection is blocked. May raise
- socket.error if the connection is closed.
-
- """
- if IAC in buffer:
- buffer = buffer.replace(IAC, IAC+IAC)
- self.msg("send %r", buffer)
- self.sock.sendall(buffer)
-
- def read_until(self, match, timeout=None):
- """Read until a given string is encountered or until timeout.
-
- When no match is found, return whatever is available instead,
- possibly the empty string. Raise EOFError if the connection
- is closed and no cooked data is available.
-
- """
- n = len(match)
- self.process_rawq()
- i = self.cookedq.find(match)
- if i >= 0:
- i = i+n
- buf = self.cookedq[:i]
- self.cookedq = self.cookedq[i:]
- return buf
- s_reply = ([self], [], [])
- s_args = s_reply
- if timeout is not None:
- s_args = s_args + (timeout,)
- from time import time
- time_start = time()
- while not self.eof and select.select(*s_args) == s_reply:
- i = max(0, len(self.cookedq)-n)
- self.fill_rawq()
- self.process_rawq()
- i = self.cookedq.find(match, i)
- if i >= 0:
- i = i+n
- buf = self.cookedq[:i]
- self.cookedq = self.cookedq[i:]
- return buf
- if timeout is not None:
- elapsed = time() - time_start
- if elapsed >= timeout:
- break
- s_args = s_reply + (timeout-elapsed,)
- return self.read_very_lazy()
-
- def read_all(self):
- """Read all data until EOF; block until connection closed."""
- self.process_rawq()
- while not self.eof:
- self.fill_rawq()
- self.process_rawq()
- buf = self.cookedq
- self.cookedq = ''
- return buf
-
- def read_some(self):
- """Read at least one byte of cooked data unless EOF is hit.
-
- Return '' if EOF is hit. Block if no data is immediately
- available.
-
- """
- self.process_rawq()
- while not self.cookedq and not self.eof:
- self.fill_rawq()
- self.process_rawq()
- buf = self.cookedq
- self.cookedq = ''
- return buf
-
- def read_very_eager(self):
- """Read everything that's possible without blocking in I/O (eager).
-
- Raise EOFError if connection closed and no cooked data
- available. Return '' if no cooked data available otherwise.
- Don't block unless in the midst of an IAC sequence.
-
- """
- self.process_rawq()
- while not self.eof and self.sock_avail():
- self.fill_rawq()
- self.process_rawq()
- return self.read_very_lazy()
-
- def read_eager(self):
- """Read readily available data.
-
- Raise EOFError if connection closed and no cooked data
- available. Return '' if no cooked data available otherwise.
- Don't block unless in the midst of an IAC sequence.
-
- """
- self.process_rawq()
- while not self.cookedq and not self.eof and self.sock_avail():
- self.fill_rawq()
- self.process_rawq()
- return self.read_very_lazy()
-
- def read_lazy(self):
- """Process and return data that's already in the queues (lazy).
-
- Raise EOFError if connection closed and no data available.
- Return '' if no cooked data available otherwise. Don't block
- unless in the midst of an IAC sequence.
-
- """
- self.process_rawq()
- return self.read_very_lazy()
-
- def read_very_lazy(self):
- """Return any data available in the cooked queue (very lazy).
-
- Raise EOFError if connection closed and no data available.
- Return '' if no cooked data available otherwise. Don't block.
-
- """
- buf = self.cookedq
- self.cookedq = ''
- if not buf and self.eof and not self.rawq:
- raise EOFError, 'telnet connection closed'
- return buf
-
- def read_sb_data(self):
- """Return any data available in the SB ... SE queue.
-
- Return '' if no SB ... SE available. Should only be called
- after seeing a SB or SE command. When a new SB command is
- found, old unread SB data will be discarded. Don't block.
-
- """
- buf = self.sbdataq
- self.sbdataq = ''
- return buf
-
- def set_option_negotiation_callback(self, callback):
- """Provide a callback function called after each receipt of a telnet option."""
- self.option_callback = callback
-
- def process_rawq(self):
- """Transfer from raw queue to cooked queue.
-
- Set self.eof when connection is closed. Don't block unless in
- the midst of an IAC sequence.
-
- """
- buf = ['', '']
- try:
- while self.rawq:
- c = self.rawq_getchar()
- if not self.iacseq:
- if c == theNULL:
- continue
- if c == "\021":
- continue
- if c != IAC:
- buf[self.sb] = buf[self.sb] + c
- continue
- else:
- self.iacseq += c
- elif len(self.iacseq) == 1:
- # 'IAC: IAC CMD [OPTION only for WILL/WONT/DO/DONT]'
- if c in (DO, DONT, WILL, WONT):
- self.iacseq += c
- continue
-
- self.iacseq = ''
- if c == IAC:
- buf[self.sb] = buf[self.sb] + c
- else:
- if c == SB: # SB ... SE start.
- self.sb = 1
- self.sbdataq = ''
- elif c == SE:
- self.sb = 0
- self.sbdataq = self.sbdataq + buf[1]
- buf[1] = ''
- if self.option_callback:
- # Callback is supposed to look into
- # the sbdataq
- self.option_callback(self.sock, c, NOOPT)
- else:
- # We can't offer automatic processing of
- # suboptions. Alas, we should not get any
- # unless we did a WILL/DO before.
- self.msg('IAC %d not recognized' % ord(c))
- elif len(self.iacseq) == 2:
- cmd = self.iacseq[1]
- self.iacseq = ''
- opt = c
- if cmd in (DO, DONT):
- self.msg('IAC %s %d',
- cmd == DO and 'DO' or 'DONT', ord(opt))
- if self.option_callback:
- self.option_callback(self.sock, cmd, opt)
- else:
- self.sock.sendall(IAC + WONT + opt)
- elif cmd in (WILL, WONT):
- self.msg('IAC %s %d',
- cmd == WILL and 'WILL' or 'WONT', ord(opt))
- if self.option_callback:
- self.option_callback(self.sock, cmd, opt)
- else:
- self.sock.sendall(IAC + DONT + opt)
- except EOFError: # raised by self.rawq_getchar()
- self.iacseq = '' # Reset on EOF
- self.sb = 0
- pass
- self.cookedq = self.cookedq + buf[0]
- self.sbdataq = self.sbdataq + buf[1]
-
- def rawq_getchar(self):
- """Get next char from raw queue.
-
- Block if no data is immediately available. Raise EOFError
- when connection is closed.
-
- """
- if not self.rawq:
- self.fill_rawq()
- if self.eof:
- raise EOFError
- c = self.rawq[self.irawq]
- self.irawq = self.irawq + 1
- if self.irawq >= len(self.rawq):
- self.rawq = ''
- self.irawq = 0
- return c
-
- def fill_rawq(self):
- """Fill raw queue from exactly one recv() system call.
-
- Block if no data is immediately available. Set self.eof when
- connection is closed.
-
- """
- if self.irawq >= len(self.rawq):
- self.rawq = ''
- self.irawq = 0
- # The buffer size should be fairly small so as to avoid quadratic
- # behavior in process_rawq() above
- buf = self.sock.recv(50)
- self.msg("recv %r", buf)
- self.eof = (not buf)
- self.rawq = self.rawq + buf
-
- def sock_avail(self):
- """Test whether data is available on the socket."""
- return select.select([self], [], [], 0) == ([self], [], [])
-
- def interact(self):
- """Interaction function, emulates a very dumb telnet client."""
- if sys.platform == "win32":
- self.mt_interact()
- return
- while 1:
- rfd, wfd, xfd = select.select([self, sys.stdin], [], [])
- if self in rfd:
- try:
- text = self.read_eager()
- except EOFError:
- print '*** Connection closed by remote host ***'
- break
- if text:
- sys.stdout.write(text)
- sys.stdout.flush()
- if sys.stdin in rfd:
- line = sys.stdin.readline()
- if not line:
- break
- self.write(line)
-
- def mt_interact(self):
- """Multithreaded version of interact()."""
- import thread
- thread.start_new_thread(self.listener, ())
- while 1:
- line = sys.stdin.readline()
- if not line:
- break
- self.write(line)
-
- def listener(self):
- """Helper for mt_interact() -- this executes in the other thread."""
- while 1:
- try:
- data = self.read_eager()
- except EOFError:
- print '*** Connection closed by remote host ***'
- return
- if data:
- sys.stdout.write(data)
- else:
- sys.stdout.flush()
-
- def expect(self, list, timeout=None):
- """Read until one from a list of a regular expressions matches.
-
- The first argument is a list of regular expressions, either
- compiled (re.RegexObject instances) or uncompiled (strings).
- The optional second argument is a timeout, in seconds; default
- is no timeout.
-
- Return a tuple of three items: the index in the list of the
- first regular expression that matches; the match object
- returned; and the text read up till and including the match.
-
- If EOF is read and no text was read, raise EOFError.
- Otherwise, when nothing matches, return (-1, None, text) where
- text is the text received so far (may be the empty string if a
- timeout happened).
-
- If a regular expression ends with a greedy match (e.g. '.*')
- or if more than one expression can match the same input, the
- results are undeterministic, and may depend on the I/O timing.
-
- """
- re = None
- list = list[:]
- indices = range(len(list))
- for i in indices:
- if not hasattr(list[i], "search"):
- if not re: import re
- list[i] = re.compile(list[i])
- if timeout is not None:
- from time import time
- time_start = time()
- while 1:
- self.process_rawq()
- for i in indices:
- m = list[i].search(self.cookedq)
- if m:
- e = m.end()
- text = self.cookedq[:e]
- self.cookedq = self.cookedq[e:]
- return (i, m, text)
- if self.eof:
- break
- if timeout is not None:
- elapsed = time() - time_start
- if elapsed >= timeout:
- break
- s_args = ([self.fileno()], [], [], timeout-elapsed)
- r, w, x = select.select(*s_args)
- if not r:
- break
- self.fill_rawq()
- text = self.read_very_lazy()
- if not text and self.eof:
- raise EOFError
- return (-1, None, text)
-
-
-def test():
- """Test program for telnetlib.
-
- Usage: python telnetlib.py [-d] ... [host [port]]
-
- Default host is localhost; default port is 23.
-
- """
- debuglevel = 0
- while sys.argv[1:] and sys.argv[1] == '-d':
- debuglevel = debuglevel+1
- del sys.argv[1]
- host = 'localhost'
- if sys.argv[1:]:
- host = sys.argv[1]
- port = 0
- if sys.argv[2:]:
- portstr = sys.argv[2]
- try:
- port = int(portstr)
- except ValueError:
- port = socket.getservbyname(portstr, 'tcp')
- tn = Telnet()
- tn.set_debuglevel(debuglevel)
- tn.open(host, port)
- tn.interact()
- tn.close()
-
-if __name__ == '__main__':
- test()
diff --git a/sys/lib/python/tempfile.py b/sys/lib/python/tempfile.py
deleted file mode 100644
index 2e8cd6d7d..000000000
--- a/sys/lib/python/tempfile.py
+++ /dev/null
@@ -1,472 +0,0 @@
-"""Temporary files.
-
-This module provides generic, low- and high-level interfaces for
-creating temporary files and directories. The interfaces listed
-as "safe" just below can be used without fear of race conditions.
-Those listed as "unsafe" cannot, and are provided for backward
-compatibility only.
-
-This module also provides some data items to the user:
-
- TMP_MAX - maximum number of names that will be tried before
- giving up.
- template - the default prefix for all temporary names.
- You may change this to control the default prefix.
- tempdir - If this is set to a string before the first use of
- any routine from this module, it will be considered as
- another candidate location to store temporary files.
-"""
-
-__all__ = [
- "NamedTemporaryFile", "TemporaryFile", # high level safe interfaces
- "mkstemp", "mkdtemp", # low level safe interfaces
- "mktemp", # deprecated unsafe interface
- "TMP_MAX", "gettempprefix", # constants
- "tempdir", "gettempdir"
- ]
-
-
-# Imports.
-
-import os as _os
-import errno as _errno
-from random import Random as _Random
-
-if _os.name == 'mac':
- import Carbon.Folder as _Folder
- import Carbon.Folders as _Folders
-
-try:
- import fcntl as _fcntl
-except ImportError:
- def _set_cloexec(fd):
- pass
-else:
- def _set_cloexec(fd):
- try:
- flags = _fcntl.fcntl(fd, _fcntl.F_GETFD, 0)
- except IOError:
- pass
- else:
- # flags read successfully, modify
- flags |= _fcntl.FD_CLOEXEC
- _fcntl.fcntl(fd, _fcntl.F_SETFD, flags)
-
-
-try:
- import thread as _thread
-except ImportError:
- import dummy_thread as _thread
-_allocate_lock = _thread.allocate_lock
-
-_text_openflags = _os.O_RDWR | _os.O_CREAT | _os.O_EXCL
-if hasattr(_os, 'O_NOINHERIT'):
- _text_openflags |= _os.O_NOINHERIT
-if hasattr(_os, 'O_NOFOLLOW'):
- _text_openflags |= _os.O_NOFOLLOW
-
-_bin_openflags = _text_openflags
-if hasattr(_os, 'O_BINARY'):
- _bin_openflags |= _os.O_BINARY
-
-if hasattr(_os, 'TMP_MAX'):
- TMP_MAX = _os.TMP_MAX
-else:
- TMP_MAX = 10000
-
-template = "tmp"
-
-tempdir = None
-
-# Internal routines.
-
-_once_lock = _allocate_lock()
-
-if hasattr(_os, "lstat"):
- _stat = _os.lstat
-elif hasattr(_os, "stat"):
- _stat = _os.stat
-else:
- # Fallback. All we need is something that raises os.error if the
- # file doesn't exist.
- def _stat(fn):
- try:
- f = open(fn)
- except IOError:
- raise _os.error
- f.close()
-
-def _exists(fn):
- try:
- _stat(fn)
- except _os.error:
- return False
- else:
- return True
-
-class _RandomNameSequence:
- """An instance of _RandomNameSequence generates an endless
- sequence of unpredictable strings which can safely be incorporated
- into file names. Each string is six characters long. Multiple
- threads can safely use the same instance at the same time.
-
- _RandomNameSequence is an iterator."""
-
- characters = ("abcdefghijklmnopqrstuvwxyz" +
- "ABCDEFGHIJKLMNOPQRSTUVWXYZ" +
- "0123456789-_")
-
- def __init__(self):
- self.mutex = _allocate_lock()
- self.rng = _Random()
- self.normcase = _os.path.normcase
-
- def __iter__(self):
- return self
-
- def next(self):
- m = self.mutex
- c = self.characters
- choose = self.rng.choice
-
- m.acquire()
- try:
- letters = [choose(c) for dummy in "123456"]
- finally:
- m.release()
-
- return self.normcase(''.join(letters))
-
-def _candidate_tempdir_list():
- """Generate a list of candidate temporary directories which
- _get_default_tempdir will try."""
-
- dirlist = []
-
- # First, try the environment.
- for envname in 'TMPDIR', 'TEMP', 'TMP':
- dirname = _os.getenv(envname)
- if dirname: dirlist.append(dirname)
-
- # Failing that, try OS-specific locations.
- if _os.name == 'mac':
- try:
- fsr = _Folder.FSFindFolder(_Folders.kOnSystemDisk,
- _Folders.kTemporaryFolderType, 1)
- dirname = fsr.as_pathname()
- dirlist.append(dirname)
- except _Folder.error:
- pass
- elif _os.name == 'riscos':
- dirname = _os.getenv('Wimp$ScrapDir')
- if dirname: dirlist.append(dirname)
- elif _os.name == 'nt':
- dirlist.extend([ r'c:\temp', r'c:\tmp', r'\temp', r'\tmp' ])
- else:
- dirlist.extend([ '/tmp', '/var/tmp', '/usr/tmp' ])
-
- # As a last resort, the current directory.
- try:
- dirlist.append(_os.getcwd())
- except (AttributeError, _os.error):
- dirlist.append(_os.curdir)
-
- return dirlist
-
-def _get_default_tempdir():
- """Calculate the default directory to use for temporary files.
- This routine should be called exactly once.
-
- We determine whether or not a candidate temp dir is usable by
- trying to create and write to a file in that directory. If this
- is successful, the test file is deleted. To prevent denial of
- service, the name of the test file must be randomized."""
-
- namer = _RandomNameSequence()
- dirlist = _candidate_tempdir_list()
- flags = _text_openflags
-
- for dir in dirlist:
- if dir != _os.curdir:
- dir = _os.path.normcase(_os.path.abspath(dir))
- # Try only a few names per directory.
- for seq in xrange(100):
- name = namer.next()
- filename = _os.path.join(dir, name)
- try:
- fd = _os.open(filename, flags, 0600)
- fp = _os.fdopen(fd, 'w')
- fp.write('blat')
- fp.close()
- _os.unlink(filename)
- del fp, fd
- return dir
- except (OSError, IOError), e:
- if e[0] != _errno.EEXIST:
- break # no point trying more names in this directory
- pass
- raise IOError, (_errno.ENOENT,
- ("No usable temporary directory found in %s" % dirlist))
-
-_name_sequence = None
-
-def _get_candidate_names():
- """Common setup sequence for all user-callable interfaces."""
-
- global _name_sequence
- if _name_sequence is None:
- _once_lock.acquire()
- try:
- if _name_sequence is None:
- _name_sequence = _RandomNameSequence()
- finally:
- _once_lock.release()
- return _name_sequence
-
-
-def _mkstemp_inner(dir, pre, suf, flags):
- """Code common to mkstemp, TemporaryFile, and NamedTemporaryFile."""
-
- names = _get_candidate_names()
-
- for seq in xrange(TMP_MAX):
- name = names.next()
- file = _os.path.join(dir, pre + name + suf)
- try:
- fd = _os.open(file, flags, 0600)
- _set_cloexec(fd)
- return (fd, _os.path.abspath(file))
- except OSError, e:
- if e.errno == _errno.EEXIST:
- continue # try again
- raise
-
- raise IOError, (_errno.EEXIST, "No usable temporary file name found")
-
-
-# User visible interfaces.
-
-def gettempprefix():
- """Accessor for tempdir.template."""
- return template
-
-tempdir = None
-
-def gettempdir():
- """Accessor for tempdir.tempdir."""
- global tempdir
- if tempdir is None:
- _once_lock.acquire()
- try:
- if tempdir is None:
- tempdir = _get_default_tempdir()
- finally:
- _once_lock.release()
- return tempdir
-
-def mkstemp(suffix="", prefix=template, dir=None, text=False):
- """mkstemp([suffix, [prefix, [dir, [text]]]])
- User-callable function to create and return a unique temporary
- file. The return value is a pair (fd, name) where fd is the
- file descriptor returned by os.open, and name is the filename.
-
- If 'suffix' is specified, the file name will end with that suffix,
- otherwise there will be no suffix.
-
- If 'prefix' is specified, the file name will begin with that prefix,
- otherwise a default prefix is used.
-
- If 'dir' is specified, the file will be created in that directory,
- otherwise a default directory is used.
-
- If 'text' is specified and true, the file is opened in text
- mode. Else (the default) the file is opened in binary mode. On
- some operating systems, this makes no difference.
-
- The file is readable and writable only by the creating user ID.
- If the operating system uses permission bits to indicate whether a
- file is executable, the file is executable by no one. The file
- descriptor is not inherited by children of this process.
-
- Caller is responsible for deleting the file when done with it.
- """
-
- if dir is None:
- dir = gettempdir()
-
- if text:
- flags = _text_openflags
- else:
- flags = _bin_openflags
-
- return _mkstemp_inner(dir, prefix, suffix, flags)
-
-
-def mkdtemp(suffix="", prefix=template, dir=None):
- """mkdtemp([suffix, [prefix, [dir]]])
- User-callable function to create and return a unique temporary
- directory. The return value is the pathname of the directory.
-
- Arguments are as for mkstemp, except that the 'text' argument is
- not accepted.
-
- The directory is readable, writable, and searchable only by the
- creating user.
-
- Caller is responsible for deleting the directory when done with it.
- """
-
- if dir is None:
- dir = gettempdir()
-
- names = _get_candidate_names()
-
- for seq in xrange(TMP_MAX):
- name = names.next()
- file = _os.path.join(dir, prefix + name + suffix)
- try:
- _os.mkdir(file, 0700)
- return file
- except OSError, e:
- if e.errno == _errno.EEXIST:
- continue # try again
- raise
-
- raise IOError, (_errno.EEXIST, "No usable temporary directory name found")
-
-def mktemp(suffix="", prefix=template, dir=None):
- """mktemp([suffix, [prefix, [dir]]])
- User-callable function to return a unique temporary file name. The
- file is not created.
-
- Arguments are as for mkstemp, except that the 'text' argument is
- not accepted.
-
- This function is unsafe and should not be used. The file name
- refers to a file that did not exist at some point, but by the time
- you get around to creating it, someone else may have beaten you to
- the punch.
- """
-
-## from warnings import warn as _warn
-## _warn("mktemp is a potential security risk to your program",
-## RuntimeWarning, stacklevel=2)
-
- if dir is None:
- dir = gettempdir()
-
- names = _get_candidate_names()
- for seq in xrange(TMP_MAX):
- name = names.next()
- file = _os.path.join(dir, prefix + name + suffix)
- if not _exists(file):
- return file
-
- raise IOError, (_errno.EEXIST, "No usable temporary filename found")
-
-class _TemporaryFileWrapper:
- """Temporary file wrapper
-
- This class provides a wrapper around files opened for
- temporary use. In particular, it seeks to automatically
- remove the file when it is no longer needed.
- """
-
- def __init__(self, file, name):
- self.file = file
- self.name = name
- self.close_called = False
-
- def __getattr__(self, name):
- file = self.__dict__['file']
- a = getattr(file, name)
- if type(a) != type(0):
- setattr(self, name, a)
- return a
-
- # NT provides delete-on-close as a primitive, so we don't need
- # the wrapper to do anything special. We still use it so that
- # file.name is useful (i.e. not "(fdopen)") with NamedTemporaryFile.
- if _os.name != 'nt':
-
- # Cache the unlinker so we don't get spurious errors at
- # shutdown when the module-level "os" is None'd out. Note
- # that this must be referenced as self.unlink, because the
- # name TemporaryFileWrapper may also get None'd out before
- # __del__ is called.
- unlink = _os.unlink
-
- def close(self):
- if not self.close_called:
- self.close_called = True
- self.file.close()
- self.unlink(self.name)
-
- def __del__(self):
- self.close()
-
-def NamedTemporaryFile(mode='w+b', bufsize=-1, suffix="",
- prefix=template, dir=None):
- """Create and return a temporary file.
- Arguments:
- 'prefix', 'suffix', 'dir' -- as for mkstemp.
- 'mode' -- the mode argument to os.fdopen (default "w+b").
- 'bufsize' -- the buffer size argument to os.fdopen (default -1).
- The file is created as mkstemp() would do it.
-
- Returns an object with a file-like interface; the name of the file
- is accessible as file.name. The file will be automatically deleted
- when it is closed.
- """
-
- if dir is None:
- dir = gettempdir()
-
- if 'b' in mode:
- flags = _bin_openflags
- else:
- flags = _text_openflags
-
- # Setting O_TEMPORARY in the flags causes the OS to delete
- # the file when it is closed. This is only supported by Windows.
- if _os.name == 'nt':
- flags |= _os.O_TEMPORARY
-
- (fd, name) = _mkstemp_inner(dir, prefix, suffix, flags)
- file = _os.fdopen(fd, mode, bufsize)
- return _TemporaryFileWrapper(file, name)
-
-if _os.name != 'posix' or _os.sys.platform == 'cygwin':
- # On non-POSIX and Cygwin systems, assume that we cannot unlink a file
- # while it is open.
- TemporaryFile = NamedTemporaryFile
-
-else:
- def TemporaryFile(mode='w+b', bufsize=-1, suffix="",
- prefix=template, dir=None):
- """Create and return a temporary file.
- Arguments:
- 'prefix', 'suffix', 'dir' -- as for mkstemp.
- 'mode' -- the mode argument to os.fdopen (default "w+b").
- 'bufsize' -- the buffer size argument to os.fdopen (default -1).
- The file is created as mkstemp() would do it.
-
- Returns an object with a file-like interface. The file has no
- name, and will cease to exist when it is closed.
- """
-
- if dir is None:
- dir = gettempdir()
-
- if 'b' in mode:
- flags = _bin_openflags
- else:
- flags = _text_openflags
-
- (fd, name) = _mkstemp_inner(dir, prefix, suffix, flags)
- try:
- _os.unlink(name)
- return _os.fdopen(fd, mode, bufsize)
- except:
- _os.close(fd)
- raise
diff --git a/sys/lib/python/textwrap.py b/sys/lib/python/textwrap.py
deleted file mode 100644
index d3bd7c7e0..000000000
--- a/sys/lib/python/textwrap.py
+++ /dev/null
@@ -1,374 +0,0 @@
-"""Text wrapping and filling.
-"""
-
-# Copyright (C) 1999-2001 Gregory P. Ward.
-# Copyright (C) 2002, 2003 Python Software Foundation.
-# Written by Greg Ward <gward@python.net>
-
-__revision__ = "$Id: textwrap.py 46863 2006-06-11 19:42:51Z tim.peters $"
-
-import string, re
-
-# Do the right thing with boolean values for all known Python versions
-# (so this module can be copied to projects that don't depend on Python
-# 2.3, e.g. Optik and Docutils).
-try:
- True, False
-except NameError:
- (True, False) = (1, 0)
-
-__all__ = ['TextWrapper', 'wrap', 'fill']
-
-# Hardcode the recognized whitespace characters to the US-ASCII
-# whitespace characters. The main reason for doing this is that in
-# ISO-8859-1, 0xa0 is non-breaking whitespace, so in certain locales
-# that character winds up in string.whitespace. Respecting
-# string.whitespace in those cases would 1) make textwrap treat 0xa0 the
-# same as any other whitespace char, which is clearly wrong (it's a
-# *non-breaking* space), 2) possibly cause problems with Unicode,
-# since 0xa0 is not in range(128).
-_whitespace = '\t\n\x0b\x0c\r '
-
-class TextWrapper:
- """
- Object for wrapping/filling text. The public interface consists of
- the wrap() and fill() methods; the other methods are just there for
- subclasses to override in order to tweak the default behaviour.
- If you want to completely replace the main wrapping algorithm,
- you'll probably have to override _wrap_chunks().
-
- Several instance attributes control various aspects of wrapping:
- width (default: 70)
- the maximum width of wrapped lines (unless break_long_words
- is false)
- initial_indent (default: "")
- string that will be prepended to the first line of wrapped
- output. Counts towards the line's width.
- subsequent_indent (default: "")
- string that will be prepended to all lines save the first
- of wrapped output; also counts towards each line's width.
- expand_tabs (default: true)
- Expand tabs in input text to spaces before further processing.
- Each tab will become 1 .. 8 spaces, depending on its position in
- its line. If false, each tab is treated as a single character.
- replace_whitespace (default: true)
- Replace all whitespace characters in the input text by spaces
- after tab expansion. Note that if expand_tabs is false and
- replace_whitespace is true, every tab will be converted to a
- single space!
- fix_sentence_endings (default: false)
- Ensure that sentence-ending punctuation is always followed
- by two spaces. Off by default because the algorithm is
- (unavoidably) imperfect.
- break_long_words (default: true)
- Break words longer than 'width'. If false, those words will not
- be broken, and some lines might be longer than 'width'.
- """
-
- whitespace_trans = string.maketrans(_whitespace, ' ' * len(_whitespace))
-
- unicode_whitespace_trans = {}
- uspace = ord(u' ')
- for x in map(ord, _whitespace):
- unicode_whitespace_trans[x] = uspace
-
- # This funky little regex is just the trick for splitting
- # text up into word-wrappable chunks. E.g.
- # "Hello there -- you goof-ball, use the -b option!"
- # splits into
- # Hello/ /there/ /--/ /you/ /goof-/ball,/ /use/ /the/ /-b/ /option!
- # (after stripping out empty strings).
- wordsep_re = re.compile(
- r'(\s+|' # any whitespace
- r'[^\s\w]*\w+[a-zA-Z]-(?=\w+[a-zA-Z])|' # hyphenated words
- r'(?<=[\w\!\"\'\&\.\,\?])-{2,}(?=\w))') # em-dash
-
- # XXX this is not locale- or charset-aware -- string.lowercase
- # is US-ASCII only (and therefore English-only)
- sentence_end_re = re.compile(r'[%s]' # lowercase letter
- r'[\.\!\?]' # sentence-ending punct.
- r'[\"\']?' # optional end-of-quote
- % string.lowercase)
-
-
- def __init__(self,
- width=70,
- initial_indent="",
- subsequent_indent="",
- expand_tabs=True,
- replace_whitespace=True,
- fix_sentence_endings=False,
- break_long_words=True):
- self.width = width
- self.initial_indent = initial_indent
- self.subsequent_indent = subsequent_indent
- self.expand_tabs = expand_tabs
- self.replace_whitespace = replace_whitespace
- self.fix_sentence_endings = fix_sentence_endings
- self.break_long_words = break_long_words
-
-
- # -- Private methods -----------------------------------------------
- # (possibly useful for subclasses to override)
-
- def _munge_whitespace(self, text):
- """_munge_whitespace(text : string) -> string
-
- Munge whitespace in text: expand tabs and convert all other
- whitespace characters to spaces. Eg. " foo\tbar\n\nbaz"
- becomes " foo bar baz".
- """
- if self.expand_tabs:
- text = text.expandtabs()
- if self.replace_whitespace:
- if isinstance(text, str):
- text = text.translate(self.whitespace_trans)
- elif isinstance(text, unicode):
- text = text.translate(self.unicode_whitespace_trans)
- return text
-
-
- def _split(self, text):
- """_split(text : string) -> [string]
-
- Split the text to wrap into indivisible chunks. Chunks are
- not quite the same as words; see wrap_chunks() for full
- details. As an example, the text
- Look, goof-ball -- use the -b option!
- breaks into the following chunks:
- 'Look,', ' ', 'goof-', 'ball', ' ', '--', ' ',
- 'use', ' ', 'the', ' ', '-b', ' ', 'option!'
- """
- chunks = self.wordsep_re.split(text)
- chunks = filter(None, chunks)
- return chunks
-
- def _fix_sentence_endings(self, chunks):
- """_fix_sentence_endings(chunks : [string])
-
- Correct for sentence endings buried in 'chunks'. Eg. when the
- original text contains "... foo.\nBar ...", munge_whitespace()
- and split() will convert that to [..., "foo.", " ", "Bar", ...]
- which has one too few spaces; this method simply changes the one
- space to two.
- """
- i = 0
- pat = self.sentence_end_re
- while i < len(chunks)-1:
- if chunks[i+1] == " " and pat.search(chunks[i]):
- chunks[i+1] = " "
- i += 2
- else:
- i += 1
-
- def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
- """_handle_long_word(chunks : [string],
- cur_line : [string],
- cur_len : int, width : int)
-
- Handle a chunk of text (most likely a word, not whitespace) that
- is too long to fit in any line.
- """
- space_left = max(width - cur_len, 1)
-
- # If we're allowed to break long words, then do so: put as much
- # of the next chunk onto the current line as will fit.
- if self.break_long_words:
- cur_line.append(reversed_chunks[-1][:space_left])
- reversed_chunks[-1] = reversed_chunks[-1][space_left:]
-
- # Otherwise, we have to preserve the long word intact. Only add
- # it to the current line if there's nothing already there --
- # that minimizes how much we violate the width constraint.
- elif not cur_line:
- cur_line.append(reversed_chunks.pop())
-
- # If we're not allowed to break long words, and there's already
- # text on the current line, do nothing. Next time through the
- # main loop of _wrap_chunks(), we'll wind up here again, but
- # cur_len will be zero, so the next line will be entirely
- # devoted to the long word that we can't handle right now.
-
- def _wrap_chunks(self, chunks):
- """_wrap_chunks(chunks : [string]) -> [string]
-
- Wrap a sequence of text chunks and return a list of lines of
- length 'self.width' or less. (If 'break_long_words' is false,
- some lines may be longer than this.) Chunks correspond roughly
- to words and the whitespace between them: each chunk is
- indivisible (modulo 'break_long_words'), but a line break can
- come between any two chunks. Chunks should not have internal
- whitespace; ie. a chunk is either all whitespace or a "word".
- Whitespace chunks will be removed from the beginning and end of
- lines, but apart from that whitespace is preserved.
- """
- lines = []
- if self.width <= 0:
- raise ValueError("invalid width %r (must be > 0)" % self.width)
-
- # Arrange in reverse order so items can be efficiently popped
- # from a stack of chucks.
- chunks.reverse()
-
- while chunks:
-
- # Start the list of chunks that will make up the current line.
- # cur_len is just the length of all the chunks in cur_line.
- cur_line = []
- cur_len = 0
-
- # Figure out which static string will prefix this line.
- if lines:
- indent = self.subsequent_indent
- else:
- indent = self.initial_indent
-
- # Maximum width for this line.
- width = self.width - len(indent)
-
- # First chunk on line is whitespace -- drop it, unless this
- # is the very beginning of the text (ie. no lines started yet).
- if chunks[-1].strip() == '' and lines:
- del chunks[-1]
-
- while chunks:
- l = len(chunks[-1])
-
- # Can at least squeeze this chunk onto the current line.
- if cur_len + l <= width:
- cur_line.append(chunks.pop())
- cur_len += l
-
- # Nope, this line is full.
- else:
- break
-
- # The current line is full, and the next chunk is too big to
- # fit on *any* line (not just this one).
- if chunks and len(chunks[-1]) > width:
- self._handle_long_word(chunks, cur_line, cur_len, width)
-
- # If the last chunk on this line is all whitespace, drop it.
- if cur_line and cur_line[-1].strip() == '':
- del cur_line[-1]
-
- # Convert current line back to a string and store it in list
- # of all lines (return value).
- if cur_line:
- lines.append(indent + ''.join(cur_line))
-
- return lines
-
-
- # -- Public interface ----------------------------------------------
-
- def wrap(self, text):
- """wrap(text : string) -> [string]
-
- Reformat the single paragraph in 'text' so it fits in lines of
- no more than 'self.width' columns, and return a list of wrapped
- lines. Tabs in 'text' are expanded with string.expandtabs(),
- and all other whitespace characters (including newline) are
- converted to space.
- """
- text = self._munge_whitespace(text)
- chunks = self._split(text)
- if self.fix_sentence_endings:
- self._fix_sentence_endings(chunks)
- return self._wrap_chunks(chunks)
-
- def fill(self, text):
- """fill(text : string) -> string
-
- Reformat the single paragraph in 'text' to fit in lines of no
- more than 'self.width' columns, and return a new string
- containing the entire wrapped paragraph.
- """
- return "\n".join(self.wrap(text))
-
-
-# -- Convenience interface ---------------------------------------------
-
-def wrap(text, width=70, **kwargs):
- """Wrap a single paragraph of text, returning a list of wrapped lines.
-
- Reformat the single paragraph in 'text' so it fits in lines of no
- more than 'width' columns, and return a list of wrapped lines. By
- default, tabs in 'text' are expanded with string.expandtabs(), and
- all other whitespace characters (including newline) are converted to
- space. See TextWrapper class for available keyword args to customize
- wrapping behaviour.
- """
- w = TextWrapper(width=width, **kwargs)
- return w.wrap(text)
-
-def fill(text, width=70, **kwargs):
- """Fill a single paragraph of text, returning a new string.
-
- Reformat the single paragraph in 'text' to fit in lines of no more
- than 'width' columns, and return a new string containing the entire
- wrapped paragraph. As with wrap(), tabs are expanded and other
- whitespace characters converted to space. See TextWrapper class for
- available keyword args to customize wrapping behaviour.
- """
- w = TextWrapper(width=width, **kwargs)
- return w.fill(text)
-
-
-# -- Loosely related functionality -------------------------------------
-
-_whitespace_only_re = re.compile('^[ \t]+$', re.MULTILINE)
-_leading_whitespace_re = re.compile('(^[ \t]*)(?:[^ \t\n])', re.MULTILINE)
-
-def dedent(text):
- """Remove any common leading whitespace from every line in `text`.
-
- This can be used to make triple-quoted strings line up with the left
- edge of the display, while still presenting them in the source code
- in indented form.
-
- Note that tabs and spaces are both treated as whitespace, but they
- are not equal: the lines " hello" and "\thello" are
- considered to have no common leading whitespace. (This behaviour is
- new in Python 2.5; older versions of this module incorrectly
- expanded tabs before searching for common leading whitespace.)
- """
- # Look for the longest leading string of spaces and tabs common to
- # all lines.
- margin = None
- text = _whitespace_only_re.sub('', text)
- indents = _leading_whitespace_re.findall(text)
- for indent in indents:
- if margin is None:
- margin = indent
-
- # Current line more deeply indented than previous winner:
- # no change (previous winner is still on top).
- elif indent.startswith(margin):
- pass
-
- # Current line consistent with and no deeper than previous winner:
- # it's the new winner.
- elif margin.startswith(indent):
- margin = indent
-
- # Current line and previous winner have no common whitespace:
- # there is no margin.
- else:
- margin = ""
- break
-
- # sanity check (testing/debugging only)
- if 0 and margin:
- for line in text.split("\n"):
- assert not line or line.startswith(margin), \
- "line = %r, margin = %r" % (line, margin)
-
- if margin:
- text = re.sub(r'(?m)^' + margin, '', text)
- return text
-
-if __name__ == "__main__":
- #print dedent("\tfoo\n\tbar")
- #print dedent(" \thello there\n \t how are you?")
- print dedent("Hello there.\n This is indented.")
diff --git a/sys/lib/python/this.py b/sys/lib/python/this.py
deleted file mode 100644
index 37754b785..000000000
--- a/sys/lib/python/this.py
+++ /dev/null
@@ -1,28 +0,0 @@
-s = """Gur Mra bs Clguba, ol Gvz Crgref
-
-Ornhgvshy vf orggre guna htyl.
-Rkcyvpvg vf orggre guna vzcyvpvg.
-Fvzcyr vf orggre guna pbzcyrk.
-Pbzcyrk vf orggre guna pbzcyvpngrq.
-Syng vf orggre guna arfgrq.
-Fcnefr vf orggre guna qrafr.
-Ernqnovyvgl pbhagf.
-Fcrpvny pnfrf nera'g fcrpvny rabhtu gb oernx gur ehyrf.
-Nygubhtu cenpgvpnyvgl orngf chevgl.
-Reebef fubhyq arire cnff fvyragyl.
-Hayrff rkcyvpvgyl fvyraprq.
-Va gur snpr bs nzovthvgl, ershfr gur grzcgngvba gb thrff.
-Gurer fubhyq or bar-- naq cersrenoyl bayl bar --boivbhf jnl gb qb vg.
-Nygubhtu gung jnl znl abg or boivbhf ng svefg hayrff lbh'er Qhgpu.
-Abj vf orggre guna arire.
-Nygubhtu arire vf bsgra orggre guna *evtug* abj.
-Vs gur vzcyrzragngvba vf uneq gb rkcynva, vg'f n onq vqrn.
-Vs gur vzcyrzragngvba vf rnfl gb rkcynva, vg znl or n tbbq vqrn.
-Anzrfcnprf ner bar ubaxvat terng vqrn -- yrg'f qb zber bs gubfr!"""
-
-d = {}
-for c in (65, 97):
- for i in range(26):
- d[chr(i+c)] = chr((i+13) % 26 + c)
-
-print "".join([d.get(c, c) for c in s])
diff --git a/sys/lib/python/threading.py b/sys/lib/python/threading.py
deleted file mode 100644
index fecd3cc30..000000000
--- a/sys/lib/python/threading.py
+++ /dev/null
@@ -1,816 +0,0 @@
-"""Thread module emulating a subset of Java's threading model."""
-
-import sys as _sys
-
-try:
- import thread
-except ImportError:
- del _sys.modules[__name__]
- raise
-
-from time import time as _time, sleep as _sleep
-from traceback import format_exc as _format_exc
-from collections import deque
-
-# Rename some stuff so "from threading import *" is safe
-__all__ = ['activeCount', 'Condition', 'currentThread', 'enumerate', 'Event',
- 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Thread',
- 'Timer', 'setprofile', 'settrace', 'local', 'stack_size']
-
-_start_new_thread = thread.start_new_thread
-_allocate_lock = thread.allocate_lock
-_get_ident = thread.get_ident
-ThreadError = thread.error
-del thread
-
-
-# Debug support (adapted from ihooks.py).
-# All the major classes here derive from _Verbose. We force that to
-# be a new-style class so that all the major classes here are new-style.
-# This helps debugging (type(instance) is more revealing for instances
-# of new-style classes).
-
-_VERBOSE = False
-
-if __debug__:
-
- class _Verbose(object):
-
- def __init__(self, verbose=None):
- if verbose is None:
- verbose = _VERBOSE
- self.__verbose = verbose
-
- def _note(self, format, *args):
- if self.__verbose:
- format = format % args
- format = "%s: %s\n" % (
- currentThread().getName(), format)
- _sys.stderr.write(format)
-
-else:
- # Disable this when using "python -O"
- class _Verbose(object):
- def __init__(self, verbose=None):
- pass
- def _note(self, *args):
- pass
-
-# Support for profile and trace hooks
-
-_profile_hook = None
-_trace_hook = None
-
-def setprofile(func):
- global _profile_hook
- _profile_hook = func
-
-def settrace(func):
- global _trace_hook
- _trace_hook = func
-
-# Synchronization classes
-
-Lock = _allocate_lock
-
-def RLock(*args, **kwargs):
- return _RLock(*args, **kwargs)
-
-class _RLock(_Verbose):
-
- def __init__(self, verbose=None):
- _Verbose.__init__(self, verbose)
- self.__block = _allocate_lock()
- self.__owner = None
- self.__count = 0
-
- def __repr__(self):
- return "<%s(%s, %d)>" % (
- self.__class__.__name__,
- self.__owner and self.__owner.getName(),
- self.__count)
-
- def acquire(self, blocking=1):
- me = currentThread()
- if self.__owner is me:
- self.__count = self.__count + 1
- if __debug__:
- self._note("%s.acquire(%s): recursive success", self, blocking)
- return 1
- rc = self.__block.acquire(blocking)
- if rc:
- self.__owner = me
- self.__count = 1
- if __debug__:
- self._note("%s.acquire(%s): initial success", self, blocking)
- else:
- if __debug__:
- self._note("%s.acquire(%s): failure", self, blocking)
- return rc
-
- __enter__ = acquire
-
- def release(self):
- me = currentThread()
- assert self.__owner is me, "release() of un-acquire()d lock"
- self.__count = count = self.__count - 1
- if not count:
- self.__owner = None
- self.__block.release()
- if __debug__:
- self._note("%s.release(): final release", self)
- else:
- if __debug__:
- self._note("%s.release(): non-final release", self)
-
- def __exit__(self, t, v, tb):
- self.release()
-
- # Internal methods used by condition variables
-
- def _acquire_restore(self, (count, owner)):
- self.__block.acquire()
- self.__count = count
- self.__owner = owner
- if __debug__:
- self._note("%s._acquire_restore()", self)
-
- def _release_save(self):
- if __debug__:
- self._note("%s._release_save()", self)
- count = self.__count
- self.__count = 0
- owner = self.__owner
- self.__owner = None
- self.__block.release()
- return (count, owner)
-
- def _is_owned(self):
- return self.__owner is currentThread()
-
-
-def Condition(*args, **kwargs):
- return _Condition(*args, **kwargs)
-
-class _Condition(_Verbose):
-
- def __init__(self, lock=None, verbose=None):
- _Verbose.__init__(self, verbose)
- if lock is None:
- lock = RLock()
- self.__lock = lock
- # Export the lock's acquire() and release() methods
- self.acquire = lock.acquire
- self.release = lock.release
- # If the lock defines _release_save() and/or _acquire_restore(),
- # these override the default implementations (which just call
- # release() and acquire() on the lock). Ditto for _is_owned().
- try:
- self._release_save = lock._release_save
- except AttributeError:
- pass
- try:
- self._acquire_restore = lock._acquire_restore
- except AttributeError:
- pass
- try:
- self._is_owned = lock._is_owned
- except AttributeError:
- pass
- self.__waiters = []
-
- def __enter__(self):
- return self.__lock.__enter__()
-
- def __exit__(self, *args):
- return self.__lock.__exit__(*args)
-
- def __repr__(self):
- return "<Condition(%s, %d)>" % (self.__lock, len(self.__waiters))
-
- def _release_save(self):
- self.__lock.release() # No state to save
-
- def _acquire_restore(self, x):
- self.__lock.acquire() # Ignore saved state
-
- def _is_owned(self):
- # Return True if lock is owned by currentThread.
- # This method is called only if __lock doesn't have _is_owned().
- if self.__lock.acquire(0):
- self.__lock.release()
- return False
- else:
- return True
-
- def wait(self, timeout=None):
- assert self._is_owned(), "wait() of un-acquire()d lock"
- waiter = _allocate_lock()
- waiter.acquire()
- self.__waiters.append(waiter)
- saved_state = self._release_save()
- try: # restore state no matter what (e.g., KeyboardInterrupt)
- if timeout is None:
- waiter.acquire()
- if __debug__:
- self._note("%s.wait(): got it", self)
- else:
- # Balancing act: We can't afford a pure busy loop, so we
- # have to sleep; but if we sleep the whole timeout time,
- # we'll be unresponsive. The scheme here sleeps very
- # little at first, longer as time goes on, but never longer
- # than 20 times per second (or the timeout time remaining).
- endtime = _time() + timeout
- delay = 0.0005 # 500 us -> initial delay of 1 ms
- while True:
- gotit = waiter.acquire(0)
- if gotit:
- break
- remaining = endtime - _time()
- if remaining <= 0:
- break
- delay = min(delay * 2, remaining, .05)
- _sleep(delay)
- if not gotit:
- if __debug__:
- self._note("%s.wait(%s): timed out", self, timeout)
- try:
- self.__waiters.remove(waiter)
- except ValueError:
- pass
- else:
- if __debug__:
- self._note("%s.wait(%s): got it", self, timeout)
- finally:
- self._acquire_restore(saved_state)
-
- def notify(self, n=1):
- assert self._is_owned(), "notify() of un-acquire()d lock"
- __waiters = self.__waiters
- waiters = __waiters[:n]
- if not waiters:
- if __debug__:
- self._note("%s.notify(): no waiters", self)
- return
- self._note("%s.notify(): notifying %d waiter%s", self, n,
- n!=1 and "s" or "")
- for waiter in waiters:
- waiter.release()
- try:
- __waiters.remove(waiter)
- except ValueError:
- pass
-
- def notifyAll(self):
- self.notify(len(self.__waiters))
-
-
-def Semaphore(*args, **kwargs):
- return _Semaphore(*args, **kwargs)
-
-class _Semaphore(_Verbose):
-
- # After Tim Peters' semaphore class, but not quite the same (no maximum)
-
- def __init__(self, value=1, verbose=None):
- assert value >= 0, "Semaphore initial value must be >= 0"
- _Verbose.__init__(self, verbose)
- self.__cond = Condition(Lock())
- self.__value = value
-
- def acquire(self, blocking=1):
- rc = False
- self.__cond.acquire()
- while self.__value == 0:
- if not blocking:
- break
- if __debug__:
- self._note("%s.acquire(%s): blocked waiting, value=%s",
- self, blocking, self.__value)
- self.__cond.wait()
- else:
- self.__value = self.__value - 1
- if __debug__:
- self._note("%s.acquire: success, value=%s",
- self, self.__value)
- rc = True
- self.__cond.release()
- return rc
-
- __enter__ = acquire
-
- def release(self):
- self.__cond.acquire()
- self.__value = self.__value + 1
- if __debug__:
- self._note("%s.release: success, value=%s",
- self, self.__value)
- self.__cond.notify()
- self.__cond.release()
-
- def __exit__(self, t, v, tb):
- self.release()
-
-
-def BoundedSemaphore(*args, **kwargs):
- return _BoundedSemaphore(*args, **kwargs)
-
-class _BoundedSemaphore(_Semaphore):
- """Semaphore that checks that # releases is <= # acquires"""
- def __init__(self, value=1, verbose=None):
- _Semaphore.__init__(self, value, verbose)
- self._initial_value = value
-
- def release(self):
- if self._Semaphore__value >= self._initial_value:
- raise ValueError, "Semaphore released too many times"
- return _Semaphore.release(self)
-
-
-def Event(*args, **kwargs):
- return _Event(*args, **kwargs)
-
-class _Event(_Verbose):
-
- # After Tim Peters' event class (without is_posted())
-
- def __init__(self, verbose=None):
- _Verbose.__init__(self, verbose)
- self.__cond = Condition(Lock())
- self.__flag = False
-
- def isSet(self):
- return self.__flag
-
- def set(self):
- self.__cond.acquire()
- try:
- self.__flag = True
- self.__cond.notifyAll()
- finally:
- self.__cond.release()
-
- def clear(self):
- self.__cond.acquire()
- try:
- self.__flag = False
- finally:
- self.__cond.release()
-
- def wait(self, timeout=None):
- self.__cond.acquire()
- try:
- if not self.__flag:
- self.__cond.wait(timeout)
- finally:
- self.__cond.release()
-
-# Helper to generate new thread names
-_counter = 0
-def _newname(template="Thread-%d"):
- global _counter
- _counter = _counter + 1
- return template % _counter
-
-# Active thread administration
-_active_limbo_lock = _allocate_lock()
-_active = {} # maps thread id to Thread object
-_limbo = {}
-
-
-# Main class for threads
-
-class Thread(_Verbose):
-
- __initialized = False
- # Need to store a reference to sys.exc_info for printing
- # out exceptions when a thread tries to use a global var. during interp.
- # shutdown and thus raises an exception about trying to perform some
- # operation on/with a NoneType
- __exc_info = _sys.exc_info
-
- def __init__(self, group=None, target=None, name=None,
- args=(), kwargs=None, verbose=None):
- assert group is None, "group argument must be None for now"
- _Verbose.__init__(self, verbose)
- if kwargs is None:
- kwargs = {}
- self.__target = target
- self.__name = str(name or _newname())
- self.__args = args
- self.__kwargs = kwargs
- self.__daemonic = self._set_daemon()
- self.__started = False
- self.__stopped = False
- self.__block = Condition(Lock())
- self.__initialized = True
- # sys.stderr is not stored in the class like
- # sys.exc_info since it can be changed between instances
- self.__stderr = _sys.stderr
-
- def _set_daemon(self):
- # Overridden in _MainThread and _DummyThread
- return currentThread().isDaemon()
-
- def __repr__(self):
- assert self.__initialized, "Thread.__init__() was not called"
- status = "initial"
- if self.__started:
- status = "started"
- if self.__stopped:
- status = "stopped"
- if self.__daemonic:
- status = status + " daemon"
- return "<%s(%s, %s)>" % (self.__class__.__name__, self.__name, status)
-
- def start(self):
- assert self.__initialized, "Thread.__init__() not called"
- assert not self.__started, "thread already started"
- if __debug__:
- self._note("%s.start(): starting thread", self)
- _active_limbo_lock.acquire()
- _limbo[self] = self
- _active_limbo_lock.release()
- _start_new_thread(self.__bootstrap, ())
- self.__started = True
- _sleep(0.000001) # 1 usec, to let the thread run (Solaris hack)
-
- def run(self):
- if self.__target:
- self.__target(*self.__args, **self.__kwargs)
-
- def __bootstrap(self):
- try:
- self.__started = True
- _active_limbo_lock.acquire()
- _active[_get_ident()] = self
- del _limbo[self]
- _active_limbo_lock.release()
- if __debug__:
- self._note("%s.__bootstrap(): thread started", self)
-
- if _trace_hook:
- self._note("%s.__bootstrap(): registering trace hook", self)
- _sys.settrace(_trace_hook)
- if _profile_hook:
- self._note("%s.__bootstrap(): registering profile hook", self)
- _sys.setprofile(_profile_hook)
-
- try:
- self.run()
- except SystemExit:
- if __debug__:
- self._note("%s.__bootstrap(): raised SystemExit", self)
- except:
- if __debug__:
- self._note("%s.__bootstrap(): unhandled exception", self)
- # If sys.stderr is no more (most likely from interpreter
- # shutdown) use self.__stderr. Otherwise still use sys (as in
- # _sys) in case sys.stderr was redefined since the creation of
- # self.
- if _sys:
- _sys.stderr.write("Exception in thread %s:\n%s\n" %
- (self.getName(), _format_exc()))
- else:
- # Do the best job possible w/o a huge amt. of code to
- # approximate a traceback (code ideas from
- # Lib/traceback.py)
- exc_type, exc_value, exc_tb = self.__exc_info()
- try:
- print>>self.__stderr, (
- "Exception in thread " + self.getName() +
- " (most likely raised during interpreter shutdown):")
- print>>self.__stderr, (
- "Traceback (most recent call last):")
- while exc_tb:
- print>>self.__stderr, (
- ' File "%s", line %s, in %s' %
- (exc_tb.tb_frame.f_code.co_filename,
- exc_tb.tb_lineno,
- exc_tb.tb_frame.f_code.co_name))
- exc_tb = exc_tb.tb_next
- print>>self.__stderr, ("%s: %s" % (exc_type, exc_value))
- # Make sure that exc_tb gets deleted since it is a memory
- # hog; deleting everything else is just for thoroughness
- finally:
- del exc_type, exc_value, exc_tb
- else:
- if __debug__:
- self._note("%s.__bootstrap(): normal return", self)
- finally:
- self.__stop()
- try:
- self.__delete()
- except:
- pass
-
- def __stop(self):
- self.__block.acquire()
- self.__stopped = True
- self.__block.notifyAll()
- self.__block.release()
-
- def __delete(self):
- "Remove current thread from the dict of currently running threads."
-
- # Notes about running with dummy_thread:
- #
- # Must take care to not raise an exception if dummy_thread is being
- # used (and thus this module is being used as an instance of
- # dummy_threading). dummy_thread.get_ident() always returns -1 since
- # there is only one thread if dummy_thread is being used. Thus
- # len(_active) is always <= 1 here, and any Thread instance created
- # overwrites the (if any) thread currently registered in _active.
- #
- # An instance of _MainThread is always created by 'threading'. This
- # gets overwritten the instant an instance of Thread is created; both
- # threads return -1 from dummy_thread.get_ident() and thus have the
- # same key in the dict. So when the _MainThread instance created by
- # 'threading' tries to clean itself up when atexit calls this method
- # it gets a KeyError if another Thread instance was created.
- #
- # This all means that KeyError from trying to delete something from
- # _active if dummy_threading is being used is a red herring. But
- # since it isn't if dummy_threading is *not* being used then don't
- # hide the exception.
-
- _active_limbo_lock.acquire()
- try:
- try:
- del _active[_get_ident()]
- except KeyError:
- if 'dummy_threading' not in _sys.modules:
- raise
- finally:
- _active_limbo_lock.release()
-
- def join(self, timeout=None):
- assert self.__initialized, "Thread.__init__() not called"
- assert self.__started, "cannot join thread before it is started"
- assert self is not currentThread(), "cannot join current thread"
- if __debug__:
- if not self.__stopped:
- self._note("%s.join(): waiting until thread stops", self)
- self.__block.acquire()
- try:
- if timeout is None:
- while not self.__stopped:
- self.__block.wait()
- if __debug__:
- self._note("%s.join(): thread stopped", self)
- else:
- deadline = _time() + timeout
- while not self.__stopped:
- delay = deadline - _time()
- if delay <= 0:
- if __debug__:
- self._note("%s.join(): timed out", self)
- break
- self.__block.wait(delay)
- else:
- if __debug__:
- self._note("%s.join(): thread stopped", self)
- finally:
- self.__block.release()
-
- def getName(self):
- assert self.__initialized, "Thread.__init__() not called"
- return self.__name
-
- def setName(self, name):
- assert self.__initialized, "Thread.__init__() not called"
- self.__name = str(name)
-
- def isAlive(self):
- assert self.__initialized, "Thread.__init__() not called"
- return self.__started and not self.__stopped
-
- def isDaemon(self):
- assert self.__initialized, "Thread.__init__() not called"
- return self.__daemonic
-
- def setDaemon(self, daemonic):
- assert self.__initialized, "Thread.__init__() not called"
- assert not self.__started, "cannot set daemon status of active thread"
- self.__daemonic = daemonic
-
-# The timer class was contributed by Itamar Shtull-Trauring
-
-def Timer(*args, **kwargs):
- return _Timer(*args, **kwargs)
-
-class _Timer(Thread):
- """Call a function after a specified number of seconds:
-
- t = Timer(30.0, f, args=[], kwargs={})
- t.start()
- t.cancel() # stop the timer's action if it's still waiting
- """
-
- def __init__(self, interval, function, args=[], kwargs={}):
- Thread.__init__(self)
- self.interval = interval
- self.function = function
- self.args = args
- self.kwargs = kwargs
- self.finished = Event()
-
- def cancel(self):
- """Stop the timer if it hasn't finished yet"""
- self.finished.set()
-
- def run(self):
- self.finished.wait(self.interval)
- if not self.finished.isSet():
- self.function(*self.args, **self.kwargs)
- self.finished.set()
-
-# Special thread class to represent the main thread
-# This is garbage collected through an exit handler
-
-class _MainThread(Thread):
-
- def __init__(self):
- Thread.__init__(self, name="MainThread")
- self._Thread__started = True
- _active_limbo_lock.acquire()
- _active[_get_ident()] = self
- _active_limbo_lock.release()
-
- def _set_daemon(self):
- return False
-
- def _exitfunc(self):
- self._Thread__stop()
- t = _pickSomeNonDaemonThread()
- if t:
- if __debug__:
- self._note("%s: waiting for other threads", self)
- while t:
- t.join()
- t = _pickSomeNonDaemonThread()
- if __debug__:
- self._note("%s: exiting", self)
- self._Thread__delete()
-
-def _pickSomeNonDaemonThread():
- for t in enumerate():
- if not t.isDaemon() and t.isAlive():
- return t
- return None
-
-
-# Dummy thread class to represent threads not started here.
-# These aren't garbage collected when they die, nor can they be waited for.
-# If they invoke anything in threading.py that calls currentThread(), they
-# leave an entry in the _active dict forever after.
-# Their purpose is to return *something* from currentThread().
-# They are marked as daemon threads so we won't wait for them
-# when we exit (conform previous semantics).
-
-class _DummyThread(Thread):
-
- def __init__(self):
- Thread.__init__(self, name=_newname("Dummy-%d"))
-
- # Thread.__block consumes an OS-level locking primitive, which
- # can never be used by a _DummyThread. Since a _DummyThread
- # instance is immortal, that's bad, so release this resource.
- del self._Thread__block
-
- self._Thread__started = True
- _active_limbo_lock.acquire()
- _active[_get_ident()] = self
- _active_limbo_lock.release()
-
- def _set_daemon(self):
- return True
-
- def join(self, timeout=None):
- assert False, "cannot join a dummy thread"
-
-
-# Global API functions
-
-def currentThread():
- try:
- return _active[_get_ident()]
- except KeyError:
- ##print "currentThread(): no current thread for", _get_ident()
- return _DummyThread()
-
-def activeCount():
- _active_limbo_lock.acquire()
- count = len(_active) + len(_limbo)
- _active_limbo_lock.release()
- return count
-
-def enumerate():
- _active_limbo_lock.acquire()
- active = _active.values() + _limbo.values()
- _active_limbo_lock.release()
- return active
-
-from thread import stack_size
-
-# Create the main thread object,
-# and make it available for the interpreter
-# (Py_Main) as threading._shutdown.
-
-_shutdown = _MainThread()._exitfunc
-
-# get thread-local implementation, either from the thread
-# module, or from the python fallback
-
-try:
- from thread import _local as local
-except ImportError:
- from _threading_local import local
-
-
-# Self-test code
-
-def _test():
-
- class BoundedQueue(_Verbose):
-
- def __init__(self, limit):
- _Verbose.__init__(self)
- self.mon = RLock()
- self.rc = Condition(self.mon)
- self.wc = Condition(self.mon)
- self.limit = limit
- self.queue = deque()
-
- def put(self, item):
- self.mon.acquire()
- while len(self.queue) >= self.limit:
- self._note("put(%s): queue full", item)
- self.wc.wait()
- self.queue.append(item)
- self._note("put(%s): appended, length now %d",
- item, len(self.queue))
- self.rc.notify()
- self.mon.release()
-
- def get(self):
- self.mon.acquire()
- while not self.queue:
- self._note("get(): queue empty")
- self.rc.wait()
- item = self.queue.popleft()
- self._note("get(): got %s, %d left", item, len(self.queue))
- self.wc.notify()
- self.mon.release()
- return item
-
- class ProducerThread(Thread):
-
- def __init__(self, queue, quota):
- Thread.__init__(self, name="Producer")
- self.queue = queue
- self.quota = quota
-
- def run(self):
- from random import random
- counter = 0
- while counter < self.quota:
- counter = counter + 1
- self.queue.put("%s.%d" % (self.getName(), counter))
- _sleep(random() * 0.00001)
-
-
- class ConsumerThread(Thread):
-
- def __init__(self, queue, count):
- Thread.__init__(self, name="Consumer")
- self.queue = queue
- self.count = count
-
- def run(self):
- while self.count > 0:
- item = self.queue.get()
- print item
- self.count = self.count - 1
-
- NP = 3
- QL = 4
- NI = 5
-
- Q = BoundedQueue(QL)
- P = []
- for i in range(NP):
- t = ProducerThread(Q, NI)
- t.setName("Producer-%d" % (i+1))
- P.append(t)
- C = ConsumerThread(Q, NI*NP)
- for t in P:
- t.start()
- _sleep(0.000001)
- C.start()
- for t in P:
- t.join()
- C.join()
-
-if __name__ == '__main__':
- _test()
diff --git a/sys/lib/python/timeit.py b/sys/lib/python/timeit.py
deleted file mode 100644
index 8c0f7a539..000000000
--- a/sys/lib/python/timeit.py
+++ /dev/null
@@ -1,285 +0,0 @@
-#! /usr/bin/env python
-
-"""Tool for measuring execution time of small code snippets.
-
-This module avoids a number of common traps for measuring execution
-times. See also Tim Peters' introduction to the Algorithms chapter in
-the Python Cookbook, published by O'Reilly.
-
-Library usage: see the Timer class.
-
-Command line usage:
- python timeit.py [-n N] [-r N] [-s S] [-t] [-c] [-h] [statement]
-
-Options:
- -n/--number N: how many times to execute 'statement' (default: see below)
- -r/--repeat N: how many times to repeat the timer (default 3)
- -s/--setup S: statement to be executed once initially (default 'pass')
- -t/--time: use time.time() (default on Unix)
- -c/--clock: use time.clock() (default on Windows)
- -v/--verbose: print raw timing results; repeat for more digits precision
- -h/--help: print this usage message and exit
- statement: statement to be timed (default 'pass')
-
-A multi-line statement may be given by specifying each line as a
-separate argument; indented lines are possible by enclosing an
-argument in quotes and using leading spaces. Multiple -s options are
-treated similarly.
-
-If -n is not given, a suitable number of loops is calculated by trying
-successive powers of 10 until the total time is at least 0.2 seconds.
-
-The difference in default timer function is because on Windows,
-clock() has microsecond granularity but time()'s granularity is 1/60th
-of a second; on Unix, clock() has 1/100th of a second granularity and
-time() is much more precise. On either platform, the default timer
-functions measure wall clock time, not the CPU time. This means that
-other processes running on the same computer may interfere with the
-timing. The best thing to do when accurate timing is necessary is to
-repeat the timing a few times and use the best time. The -r option is
-good for this; the default of 3 repetitions is probably enough in most
-cases. On Unix, you can use clock() to measure CPU time.
-
-Note: there is a certain baseline overhead associated with executing a
-pass statement. The code here doesn't try to hide it, but you should
-be aware of it. The baseline overhead can be measured by invoking the
-program without arguments.
-
-The baseline overhead differs between Python versions! Also, to
-fairly compare older Python versions to Python 2.3, you may want to
-use python -O for the older versions to avoid timing SET_LINENO
-instructions.
-"""
-
-import gc
-import sys
-import time
-try:
- import itertools
-except ImportError:
- # Must be an older Python version (see timeit() below)
- itertools = None
-
-__all__ = ["Timer"]
-
-dummy_src_name = "<timeit-src>"
-default_number = 1000000
-default_repeat = 3
-
-if sys.platform == "win32":
- # On Windows, the best timer is time.clock()
- default_timer = time.clock
-else:
- # On most other platforms the best timer is time.time()
- default_timer = time.time
-
-# Don't change the indentation of the template; the reindent() calls
-# in Timer.__init__() depend on setup being indented 4 spaces and stmt
-# being indented 8 spaces.
-template = """
-def inner(_it, _timer):
- %(setup)s
- _t0 = _timer()
- for _i in _it:
- %(stmt)s
- _t1 = _timer()
- return _t1 - _t0
-"""
-
-def reindent(src, indent):
- """Helper to reindent a multi-line statement."""
- return src.replace("\n", "\n" + " "*indent)
-
-class Timer:
- """Class for timing execution speed of small code snippets.
-
- The constructor takes a statement to be timed, an additional
- statement used for setup, and a timer function. Both statements
- default to 'pass'; the timer function is platform-dependent (see
- module doc string).
-
- To measure the execution time of the first statement, use the
- timeit() method. The repeat() method is a convenience to call
- timeit() multiple times and return a list of results.
-
- The statements may contain newlines, as long as they don't contain
- multi-line string literals.
- """
-
- def __init__(self, stmt="pass", setup="pass", timer=default_timer):
- """Constructor. See class doc string."""
- self.timer = timer
- stmt = reindent(stmt, 8)
- setup = reindent(setup, 4)
- src = template % {'stmt': stmt, 'setup': setup}
- self.src = src # Save for traceback display
- code = compile(src, dummy_src_name, "exec")
- ns = {}
- exec code in globals(), ns
- self.inner = ns["inner"]
-
- def print_exc(self, file=None):
- """Helper to print a traceback from the timed code.
-
- Typical use:
-
- t = Timer(...) # outside the try/except
- try:
- t.timeit(...) # or t.repeat(...)
- except:
- t.print_exc()
-
- The advantage over the standard traceback is that source lines
- in the compiled template will be displayed.
-
- The optional file argument directs where the traceback is
- sent; it defaults to sys.stderr.
- """
- import linecache, traceback
- linecache.cache[dummy_src_name] = (len(self.src),
- None,
- self.src.split("\n"),
- dummy_src_name)
- traceback.print_exc(file=file)
-
- def timeit(self, number=default_number):
- """Time 'number' executions of the main statement.
-
- To be precise, this executes the setup statement once, and
- then returns the time it takes to execute the main statement
- a number of times, as a float measured in seconds. The
- argument is the number of times through the loop, defaulting
- to one million. The main statement, the setup statement and
- the timer function to be used are passed to the constructor.
- """
- if itertools:
- it = itertools.repeat(None, number)
- else:
- it = [None] * number
- gcold = gc.isenabled()
- gc.disable()
- timing = self.inner(it, self.timer)
- if gcold:
- gc.enable()
- return timing
-
- def repeat(self, repeat=default_repeat, number=default_number):
- """Call timeit() a few times.
-
- This is a convenience function that calls the timeit()
- repeatedly, returning a list of results. The first argument
- specifies how many times to call timeit(), defaulting to 3;
- the second argument specifies the timer argument, defaulting
- to one million.
-
- Note: it's tempting to calculate mean and standard deviation
- from the result vector and report these. However, this is not
- very useful. In a typical case, the lowest value gives a
- lower bound for how fast your machine can run the given code
- snippet; higher values in the result vector are typically not
- caused by variability in Python's speed, but by other
- processes interfering with your timing accuracy. So the min()
- of the result is probably the only number you should be
- interested in. After that, you should look at the entire
- vector and apply common sense rather than statistics.
- """
- r = []
- for i in range(repeat):
- t = self.timeit(number)
- r.append(t)
- return r
-
-def main(args=None):
- """Main program, used when run as a script.
-
- The optional argument specifies the command line to be parsed,
- defaulting to sys.argv[1:].
-
- The return value is an exit code to be passed to sys.exit(); it
- may be None to indicate success.
-
- When an exception happens during timing, a traceback is printed to
- stderr and the return value is 1. Exceptions at other times
- (including the template compilation) are not caught.
- """
- if args is None:
- args = sys.argv[1:]
- import getopt
- try:
- opts, args = getopt.getopt(args, "n:s:r:tcvh",
- ["number=", "setup=", "repeat=",
- "time", "clock", "verbose", "help"])
- except getopt.error, err:
- print err
- print "use -h/--help for command line help"
- return 2
- timer = default_timer
- stmt = "\n".join(args) or "pass"
- number = 0 # auto-determine
- setup = []
- repeat = default_repeat
- verbose = 0
- precision = 3
- for o, a in opts:
- if o in ("-n", "--number"):
- number = int(a)
- if o in ("-s", "--setup"):
- setup.append(a)
- if o in ("-r", "--repeat"):
- repeat = int(a)
- if repeat <= 0:
- repeat = 1
- if o in ("-t", "--time"):
- timer = time.time
- if o in ("-c", "--clock"):
- timer = time.clock
- if o in ("-v", "--verbose"):
- if verbose:
- precision += 1
- verbose += 1
- if o in ("-h", "--help"):
- print __doc__,
- return 0
- setup = "\n".join(setup) or "pass"
- # Include the current directory, so that local imports work (sys.path
- # contains the directory of this script, rather than the current
- # directory)
- import os
- sys.path.insert(0, os.curdir)
- t = Timer(stmt, setup, timer)
- if number == 0:
- # determine number so that 0.2 <= total time < 2.0
- for i in range(1, 10):
- number = 10**i
- try:
- x = t.timeit(number)
- except:
- t.print_exc()
- return 1
- if verbose:
- print "%d loops -> %.*g secs" % (number, precision, x)
- if x >= 0.2:
- break
- try:
- r = t.repeat(repeat, number)
- except:
- t.print_exc()
- return 1
- best = min(r)
- if verbose:
- print "raw times:", " ".join(["%.*g" % (precision, x) for x in r])
- print "%d loops," % number,
- usec = best * 1e6 / number
- if usec < 1000:
- print "best of %d: %.*g usec per loop" % (repeat, precision, usec)
- else:
- msec = usec / 1000
- if msec < 1000:
- print "best of %d: %.*g msec per loop" % (repeat, precision, msec)
- else:
- sec = msec / 1000
- print "best of %d: %.*g sec per loop" % (repeat, precision, sec)
- return None
-
-if __name__ == "__main__":
- sys.exit(main())
diff --git a/sys/lib/python/toaiff.py b/sys/lib/python/toaiff.py
deleted file mode 100644
index 3c8a02ba7..000000000
--- a/sys/lib/python/toaiff.py
+++ /dev/null
@@ -1,107 +0,0 @@
-"""Convert "arbitrary" sound files to AIFF (Apple and SGI's audio format).
-
-Input may be compressed.
-Uncompressed file type may be AIFF, WAV, VOC, 8SVX, NeXT/Sun, and others.
-An exception is raised if the file is not of a recognized type.
-Returned filename is either the input filename or a temporary filename;
-in the latter case the caller must ensure that it is removed.
-Other temporary files used are removed by the function.
-"""
-
-import os
-import tempfile
-import pipes
-import sndhdr
-
-__all__ = ["error", "toaiff"]
-
-table = {}
-
-t = pipes.Template()
-t.append('sox -t au - -t aiff -r 8000 -', '--')
-table['au'] = t
-
-# XXX The following is actually sub-optimal.
-# XXX The HCOM sampling rate can be 22k, 22k/2, 22k/3 or 22k/4.
-# XXX We must force the output sampling rate else the SGI won't play
-# XXX files sampled at 5.5k or 7.333k; however this means that files
-# XXX sampled at 11k are unnecessarily expanded.
-# XXX Similar comments apply to some other file types.
-t = pipes.Template()
-t.append('sox -t hcom - -t aiff -r 22050 -', '--')
-table['hcom'] = t
-
-t = pipes.Template()
-t.append('sox -t voc - -t aiff -r 11025 -', '--')
-table['voc'] = t
-
-t = pipes.Template()
-t.append('sox -t wav - -t aiff -', '--')
-table['wav'] = t
-
-t = pipes.Template()
-t.append('sox -t 8svx - -t aiff -r 16000 -', '--')
-table['8svx'] = t
-
-t = pipes.Template()
-t.append('sox -t sndt - -t aiff -r 16000 -', '--')
-table['sndt'] = t
-
-t = pipes.Template()
-t.append('sox -t sndr - -t aiff -r 16000 -', '--')
-table['sndr'] = t
-
-uncompress = pipes.Template()
-uncompress.append('uncompress', '--')
-
-
-class error(Exception):
- pass
-
-def toaiff(filename):
- temps = []
- ret = None
- try:
- ret = _toaiff(filename, temps)
- finally:
- for temp in temps[:]:
- if temp != ret:
- try:
- os.unlink(temp)
- except os.error:
- pass
- temps.remove(temp)
- return ret
-
-def _toaiff(filename, temps):
- if filename[-2:] == '.Z':
- (fd, fname) = tempfile.mkstemp()
- os.close(fd)
- temps.append(fname)
- sts = uncompress.copy(filename, fname)
- if sts:
- raise error, filename + ': uncompress failed'
- else:
- fname = filename
- try:
- ftype = sndhdr.whathdr(fname)
- if ftype:
- ftype = ftype[0] # All we're interested in
- except IOError, msg:
- if type(msg) == type(()) and len(msg) == 2 and \
- type(msg[0]) == type(0) and type(msg[1]) == type(''):
- msg = msg[1]
- if type(msg) != type(''):
- msg = repr(msg)
- raise error, filename + ': ' + msg
- if ftype == 'aiff':
- return fname
- if ftype is None or not ftype in table:
- raise error, '%s: unsupported audio file type %r' % (filename, ftype)
- (fd, temp) = tempfile.mkstemp()
- os.close(fd)
- temps.append(temp)
- sts = table[ftype].copy(fname, temp)
- if sts:
- raise error, filename + ': conversion to aiff failed'
- return temp
diff --git a/sys/lib/python/token.py b/sys/lib/python/token.py
deleted file mode 100755
index c4db6c511..000000000
--- a/sys/lib/python/token.py
+++ /dev/null
@@ -1,141 +0,0 @@
-#! /usr/bin/env python
-
-"""Token constants (from "token.h")."""
-
-# This file is automatically generated; please don't muck it up!
-#
-# To update the symbols in this file, 'cd' to the top directory of
-# the python source tree after building the interpreter and run:
-#
-# python Lib/token.py
-
-#--start constants--
-ENDMARKER = 0
-NAME = 1
-NUMBER = 2
-STRING = 3
-NEWLINE = 4
-INDENT = 5
-DEDENT = 6
-LPAR = 7
-RPAR = 8
-LSQB = 9
-RSQB = 10
-COLON = 11
-COMMA = 12
-SEMI = 13
-PLUS = 14
-MINUS = 15
-STAR = 16
-SLASH = 17
-VBAR = 18
-AMPER = 19
-LESS = 20
-GREATER = 21
-EQUAL = 22
-DOT = 23
-PERCENT = 24
-BACKQUOTE = 25
-LBRACE = 26
-RBRACE = 27
-EQEQUAL = 28
-NOTEQUAL = 29
-LESSEQUAL = 30
-GREATEREQUAL = 31
-TILDE = 32
-CIRCUMFLEX = 33
-LEFTSHIFT = 34
-RIGHTSHIFT = 35
-DOUBLESTAR = 36
-PLUSEQUAL = 37
-MINEQUAL = 38
-STAREQUAL = 39
-SLASHEQUAL = 40
-PERCENTEQUAL = 41
-AMPEREQUAL = 42
-VBAREQUAL = 43
-CIRCUMFLEXEQUAL = 44
-LEFTSHIFTEQUAL = 45
-RIGHTSHIFTEQUAL = 46
-DOUBLESTAREQUAL = 47
-DOUBLESLASH = 48
-DOUBLESLASHEQUAL = 49
-AT = 50
-OP = 51
-ERRORTOKEN = 52
-N_TOKENS = 53
-NT_OFFSET = 256
-#--end constants--
-
-tok_name = {}
-for _name, _value in globals().items():
- if type(_value) is type(0):
- tok_name[_value] = _name
-
-
-def ISTERMINAL(x):
- return x < NT_OFFSET
-
-def ISNONTERMINAL(x):
- return x >= NT_OFFSET
-
-def ISEOF(x):
- return x == ENDMARKER
-
-
-def main():
- import re
- import sys
- args = sys.argv[1:]
- inFileName = args and args[0] or "Include/token.h"
- outFileName = "Lib/token.py"
- if len(args) > 1:
- outFileName = args[1]
- try:
- fp = open(inFileName)
- except IOError, err:
- sys.stdout.write("I/O error: %s\n" % str(err))
- sys.exit(1)
- lines = fp.read().split("\n")
- fp.close()
- prog = re.compile(
- "#define[ \t][ \t]*([A-Z0-9][A-Z0-9_]*)[ \t][ \t]*([0-9][0-9]*)",
- re.IGNORECASE)
- tokens = {}
- for line in lines:
- match = prog.match(line)
- if match:
- name, val = match.group(1, 2)
- val = int(val)
- tokens[val] = name # reverse so we can sort them...
- keys = tokens.keys()
- keys.sort()
- # load the output skeleton from the target:
- try:
- fp = open(outFileName)
- except IOError, err:
- sys.stderr.write("I/O error: %s\n" % str(err))
- sys.exit(2)
- format = fp.read().split("\n")
- fp.close()
- try:
- start = format.index("#--start constants--") + 1
- end = format.index("#--end constants--")
- except ValueError:
- sys.stderr.write("target does not contain format markers")
- sys.exit(3)
- lines = []
- for val in keys:
- lines.append("%s = %d" % (tokens[val], val))
- format[start:end] = lines
- try:
- fp = open(outFileName, 'w')
- except IOError, err:
- sys.stderr.write("I/O error: %s\n" % str(err))
- sys.exit(4)
- fp.write("\n".join(format))
- fp.close()
-
-
-if __name__ == "__main__":
- main()
diff --git a/sys/lib/python/tokenize.py b/sys/lib/python/tokenize.py
deleted file mode 100644
index a9be4cfe0..000000000
--- a/sys/lib/python/tokenize.py
+++ /dev/null
@@ -1,345 +0,0 @@
-"""Tokenization help for Python programs.
-
-generate_tokens(readline) is a generator that breaks a stream of
-text into Python tokens. It accepts a readline-like method which is called
-repeatedly to get the next line of input (or "" for EOF). It generates
-5-tuples with these members:
-
- the token type (see token.py)
- the token (a string)
- the starting (row, column) indices of the token (a 2-tuple of ints)
- the ending (row, column) indices of the token (a 2-tuple of ints)
- the original line (string)
-
-It is designed to match the working of the Python tokenizer exactly, except
-that it produces COMMENT tokens for comments and gives type OP for all
-operators
-
-Older entry points
- tokenize_loop(readline, tokeneater)
- tokenize(readline, tokeneater=printtoken)
-are the same, except instead of generating tokens, tokeneater is a callback
-function to which the 5 fields described above are passed as 5 arguments,
-each time a new token is found."""
-
-__author__ = 'Ka-Ping Yee <ping@lfw.org>'
-__credits__ = \
- 'GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, Skip Montanaro'
-
-import string, re
-from token import *
-
-import token
-__all__ = [x for x in dir(token) if x[0] != '_'] + ["COMMENT", "tokenize",
- "generate_tokens", "NL", "untokenize"]
-del x
-del token
-
-COMMENT = N_TOKENS
-tok_name[COMMENT] = 'COMMENT'
-NL = N_TOKENS + 1
-tok_name[NL] = 'NL'
-N_TOKENS += 2
-
-def group(*choices): return '(' + '|'.join(choices) + ')'
-def any(*choices): return group(*choices) + '*'
-def maybe(*choices): return group(*choices) + '?'
-
-Whitespace = r'[ \f\t]*'
-Comment = r'#[^\r\n]*'
-Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment)
-Name = r'[a-zA-Z_]\w*'
-
-Hexnumber = r'0[xX][\da-fA-F]*[lL]?'
-Octnumber = r'0[0-7]*[lL]?'
-Decnumber = r'[1-9]\d*[lL]?'
-Intnumber = group(Hexnumber, Octnumber, Decnumber)
-Exponent = r'[eE][-+]?\d+'
-Pointfloat = group(r'\d+\.\d*', r'\.\d+') + maybe(Exponent)
-Expfloat = r'\d+' + Exponent
-Floatnumber = group(Pointfloat, Expfloat)
-Imagnumber = group(r'\d+[jJ]', Floatnumber + r'[jJ]')
-Number = group(Imagnumber, Floatnumber, Intnumber)
-
-# Tail end of ' string.
-Single = r"[^'\\]*(?:\\.[^'\\]*)*'"
-# Tail end of " string.
-Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
-# Tail end of ''' string.
-Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
-# Tail end of """ string.
-Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
-Triple = group("[uU]?[rR]?'''", '[uU]?[rR]?"""')
-# Single-line ' or " string.
-String = group(r"[uU]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
- r'[uU]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
-
-# Because of leftmost-then-longest match semantics, be sure to put the
-# longest operators first (e.g., if = came before ==, == would get
-# recognized as two instances of =).
-Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"<>", r"!=",
- r"//=?",
- r"[+\-*/%&|^=<>]=?",
- r"~")
-
-Bracket = '[][(){}]'
-Special = group(r'\r?\n', r'[:;.,`@]')
-Funny = group(Operator, Bracket, Special)
-
-PlainToken = group(Number, Funny, String, Name)
-Token = Ignore + PlainToken
-
-# First (or only) line of ' or " string.
-ContStr = group(r"[uU]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
- group("'", r'\\\r?\n'),
- r'[uU]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
- group('"', r'\\\r?\n'))
-PseudoExtras = group(r'\\\r?\n', Comment, Triple)
-PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
-
-tokenprog, pseudoprog, single3prog, double3prog = map(
- re.compile, (Token, PseudoToken, Single3, Double3))
-endprogs = {"'": re.compile(Single), '"': re.compile(Double),
- "'''": single3prog, '"""': double3prog,
- "r'''": single3prog, 'r"""': double3prog,
- "u'''": single3prog, 'u"""': double3prog,
- "ur'''": single3prog, 'ur"""': double3prog,
- "R'''": single3prog, 'R"""': double3prog,
- "U'''": single3prog, 'U"""': double3prog,
- "uR'''": single3prog, 'uR"""': double3prog,
- "Ur'''": single3prog, 'Ur"""': double3prog,
- "UR'''": single3prog, 'UR"""': double3prog,
- 'r': None, 'R': None, 'u': None, 'U': None}
-
-triple_quoted = {}
-for t in ("'''", '"""',
- "r'''", 'r"""', "R'''", 'R"""',
- "u'''", 'u"""', "U'''", 'U"""',
- "ur'''", 'ur"""', "Ur'''", 'Ur"""',
- "uR'''", 'uR"""', "UR'''", 'UR"""'):
- triple_quoted[t] = t
-single_quoted = {}
-for t in ("'", '"',
- "r'", 'r"', "R'", 'R"',
- "u'", 'u"', "U'", 'U"',
- "ur'", 'ur"', "Ur'", 'Ur"',
- "uR'", 'uR"', "UR'", 'UR"' ):
- single_quoted[t] = t
-
-tabsize = 8
-
-class TokenError(Exception): pass
-
-class StopTokenizing(Exception): pass
-
-def printtoken(type, token, (srow, scol), (erow, ecol), line): # for testing
- print "%d,%d-%d,%d:\t%s\t%s" % \
- (srow, scol, erow, ecol, tok_name[type], repr(token))
-
-def tokenize(readline, tokeneater=printtoken):
- """
- The tokenize() function accepts two parameters: one representing the
- input stream, and one providing an output mechanism for tokenize().
-
- The first parameter, readline, must be a callable object which provides
- the same interface as the readline() method of built-in file objects.
- Each call to the function should return one line of input as a string.
-
- The second parameter, tokeneater, must also be a callable object. It is
- called once for each token, with five arguments, corresponding to the
- tuples generated by generate_tokens().
- """
- try:
- tokenize_loop(readline, tokeneater)
- except StopTokenizing:
- pass
-
-# backwards compatible interface
-def tokenize_loop(readline, tokeneater):
- for token_info in generate_tokens(readline):
- tokeneater(*token_info)
-
-
-def untokenize(iterable):
- """Transform tokens back into Python source code.
-
- Each element returned by the iterable must be a token sequence
- with at least two elements, a token number and token value.
-
- Round-trip invariant:
- # Output text will tokenize the back to the input
- t1 = [tok[:2] for tok in generate_tokens(f.readline)]
- newcode = untokenize(t1)
- readline = iter(newcode.splitlines(1)).next
- t2 = [tok[:2] for tokin generate_tokens(readline)]
- assert t1 == t2
- """
-
- startline = False
- indents = []
- toks = []
- toks_append = toks.append
- for tok in iterable:
- toknum, tokval = tok[:2]
-
- if toknum in (NAME, NUMBER):
- tokval += ' '
-
- if toknum == INDENT:
- indents.append(tokval)
- continue
- elif toknum == DEDENT:
- indents.pop()
- continue
- elif toknum in (NEWLINE, COMMENT, NL):
- startline = True
- elif startline and indents:
- toks_append(indents[-1])
- startline = False
- toks_append(tokval)
- return ''.join(toks)
-
-
-def generate_tokens(readline):
- """
- The generate_tokens() generator requires one argment, readline, which
- must be a callable object which provides the same interface as the
- readline() method of built-in file objects. Each call to the function
- should return one line of input as a string. Alternately, readline
- can be a callable function terminating with StopIteration:
- readline = open(myfile).next # Example of alternate readline
-
- The generator produces 5-tuples with these members: the token type; the
- token string; a 2-tuple (srow, scol) of ints specifying the row and
- column where the token begins in the source; a 2-tuple (erow, ecol) of
- ints specifying the row and column where the token ends in the source;
- and the line on which the token was found. The line passed is the
- logical line; continuation lines are included.
- """
- lnum = parenlev = continued = 0
- namechars, numchars = string.ascii_letters + '_', '0123456789'
- contstr, needcont = '', 0
- contline = None
- indents = [0]
-
- while 1: # loop over lines in stream
- try:
- line = readline()
- except StopIteration:
- line = ''
- lnum = lnum + 1
- pos, max = 0, len(line)
-
- if contstr: # continued string
- if not line:
- raise TokenError, ("EOF in multi-line string", strstart)
- endmatch = endprog.match(line)
- if endmatch:
- pos = end = endmatch.end(0)
- yield (STRING, contstr + line[:end],
- strstart, (lnum, end), contline + line)
- contstr, needcont = '', 0
- contline = None
- elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n':
- yield (ERRORTOKEN, contstr + line,
- strstart, (lnum, len(line)), contline)
- contstr = ''
- contline = None
- continue
- else:
- contstr = contstr + line
- contline = contline + line
- continue
-
- elif parenlev == 0 and not continued: # new statement
- if not line: break
- column = 0
- while pos < max: # measure leading whitespace
- if line[pos] == ' ': column = column + 1
- elif line[pos] == '\t': column = (column/tabsize + 1)*tabsize
- elif line[pos] == '\f': column = 0
- else: break
- pos = pos + 1
- if pos == max: break
-
- if line[pos] in '#\r\n': # skip comments or blank lines
- yield ((NL, COMMENT)[line[pos] == '#'], line[pos:],
- (lnum, pos), (lnum, len(line)), line)
- continue
-
- if column > indents[-1]: # count indents or dedents
- indents.append(column)
- yield (INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
- while column < indents[-1]:
- if column not in indents:
- raise IndentationError(
- "unindent does not match any outer indentation level",
- ("<tokenize>", lnum, pos, line))
- indents = indents[:-1]
- yield (DEDENT, '', (lnum, pos), (lnum, pos), line)
-
- else: # continued statement
- if not line:
- raise TokenError, ("EOF in multi-line statement", (lnum, 0))
- continued = 0
-
- while pos < max:
- pseudomatch = pseudoprog.match(line, pos)
- if pseudomatch: # scan for tokens
- start, end = pseudomatch.span(1)
- spos, epos, pos = (lnum, start), (lnum, end), end
- token, initial = line[start:end], line[start]
-
- if initial in numchars or \
- (initial == '.' and token != '.'): # ordinary number
- yield (NUMBER, token, spos, epos, line)
- elif initial in '\r\n':
- yield (parenlev > 0 and NL or NEWLINE,
- token, spos, epos, line)
- elif initial == '#':
- yield (COMMENT, token, spos, epos, line)
- elif token in triple_quoted:
- endprog = endprogs[token]
- endmatch = endprog.match(line, pos)
- if endmatch: # all on one line
- pos = endmatch.end(0)
- token = line[start:pos]
- yield (STRING, token, spos, (lnum, pos), line)
- else:
- strstart = (lnum, start) # multiple lines
- contstr = line[start:]
- contline = line
- break
- elif initial in single_quoted or \
- token[:2] in single_quoted or \
- token[:3] in single_quoted:
- if token[-1] == '\n': # continued string
- strstart = (lnum, start)
- endprog = (endprogs[initial] or endprogs[token[1]] or
- endprogs[token[2]])
- contstr, needcont = line[start:], 1
- contline = line
- break
- else: # ordinary string
- yield (STRING, token, spos, epos, line)
- elif initial in namechars: # ordinary name
- yield (NAME, token, spos, epos, line)
- elif initial == '\\': # continued stmt
- continued = 1
- else:
- if initial in '([{': parenlev = parenlev + 1
- elif initial in ')]}': parenlev = parenlev - 1
- yield (OP, token, spos, epos, line)
- else:
- yield (ERRORTOKEN, line[pos],
- (lnum, pos), (lnum, pos+1), line)
- pos = pos + 1
-
- for indent in indents[1:]: # pop remaining indent levels
- yield (DEDENT, '', (lnum, 0), (lnum, 0), '')
- yield (ENDMARKER, '', (lnum, 0), (lnum, 0), '')
-
-if __name__ == '__main__': # testing
- import sys
- if len(sys.argv) > 1: tokenize(open(sys.argv[1]).readline)
- else: tokenize(sys.stdin.readline)
diff --git a/sys/lib/python/trace.py b/sys/lib/python/trace.py
deleted file mode 100644
index 364e3f7bc..000000000
--- a/sys/lib/python/trace.py
+++ /dev/null
@@ -1,792 +0,0 @@
-#!/usr/bin/env python
-
-# portions copyright 2001, Autonomous Zones Industries, Inc., all rights...
-# err... reserved and offered to the public under the terms of the
-# Python 2.2 license.
-# Author: Zooko O'Whielacronx
-# http://zooko.com/
-# mailto:zooko@zooko.com
-#
-# Copyright 2000, Mojam Media, Inc., all rights reserved.
-# Author: Skip Montanaro
-#
-# Copyright 1999, Bioreason, Inc., all rights reserved.
-# Author: Andrew Dalke
-#
-# Copyright 1995-1997, Automatrix, Inc., all rights reserved.
-# Author: Skip Montanaro
-#
-# Copyright 1991-1995, Stichting Mathematisch Centrum, all rights reserved.
-#
-#
-# Permission to use, copy, modify, and distribute this Python software and
-# its associated documentation for any purpose without fee is hereby
-# granted, provided that the above copyright notice appears in all copies,
-# and that both that copyright notice and this permission notice appear in
-# supporting documentation, and that the name of neither Automatrix,
-# Bioreason or Mojam Media be used in advertising or publicity pertaining to
-# distribution of the software without specific, written prior permission.
-#
-"""program/module to trace Python program or function execution
-
-Sample use, command line:
- trace.py -c -f counts --ignore-dir '$prefix' spam.py eggs
- trace.py -t --ignore-dir '$prefix' spam.py eggs
- trace.py --trackcalls spam.py eggs
-
-Sample use, programmatically
- import sys
-
- # create a Trace object, telling it what to ignore, and whether to
- # do tracing or line-counting or both.
- tracer = trace.Trace(ignoredirs=[sys.prefix, sys.exec_prefix,], trace=0,
- count=1)
- # run the new command using the given tracer
- tracer.run('main()')
- # make a report, placing output in /tmp
- r = tracer.results()
- r.write_results(show_missing=True, coverdir="/tmp")
-"""
-
-import linecache
-import os
-import re
-import sys
-import threading
-import token
-import tokenize
-import types
-import gc
-
-try:
- import cPickle
- pickle = cPickle
-except ImportError:
- import pickle
-
-def usage(outfile):
- outfile.write("""Usage: %s [OPTIONS] <file> [ARGS]
-
-Meta-options:
---help Display this help then exit.
---version Output version information then exit.
-
-Otherwise, exactly one of the following three options must be given:
--t, --trace Print each line to sys.stdout before it is executed.
--c, --count Count the number of times each line is executed
- and write the counts to <module>.cover for each
- module executed, in the module's directory.
- See also `--coverdir', `--file', `--no-report' below.
--l, --listfuncs Keep track of which functions are executed at least
- once and write the results to sys.stdout after the
- program exits.
--T, --trackcalls Keep track of caller/called pairs and write the
- results to sys.stdout after the program exits.
--r, --report Generate a report from a counts file; do not execute
- any code. `--file' must specify the results file to
- read, which must have been created in a previous run
- with `--count --file=FILE'.
-
-Modifiers:
--f, --file=<file> File to accumulate counts over several runs.
--R, --no-report Do not generate the coverage report files.
- Useful if you want to accumulate over several runs.
--C, --coverdir=<dir> Directory where the report files. The coverage
- report for <package>.<module> is written to file
- <dir>/<package>/<module>.cover.
--m, --missing Annotate executable lines that were not executed
- with '>>>>>> '.
--s, --summary Write a brief summary on stdout for each file.
- (Can only be used with --count or --report.)
-
-Filters, may be repeated multiple times:
---ignore-module=<mod> Ignore the given module and its submodules
- (if it is a package).
---ignore-dir=<dir> Ignore files in the given directory (multiple
- directories can be joined by os.pathsep).
-""" % sys.argv[0])
-
-PRAGMA_NOCOVER = "#pragma NO COVER"
-
-# Simple rx to find lines with no code.
-rx_blank = re.compile(r'^\s*(#.*)?$')
-
-class Ignore:
- def __init__(self, modules = None, dirs = None):
- self._mods = modules or []
- self._dirs = dirs or []
-
- self._dirs = map(os.path.normpath, self._dirs)
- self._ignore = { '<string>': 1 }
-
- def names(self, filename, modulename):
- if self._ignore.has_key(modulename):
- return self._ignore[modulename]
-
- # haven't seen this one before, so see if the module name is
- # on the ignore list. Need to take some care since ignoring
- # "cmp" musn't mean ignoring "cmpcache" but ignoring
- # "Spam" must also mean ignoring "Spam.Eggs".
- for mod in self._mods:
- if mod == modulename: # Identical names, so ignore
- self._ignore[modulename] = 1
- return 1
- # check if the module is a proper submodule of something on
- # the ignore list
- n = len(mod)
- # (will not overflow since if the first n characters are the
- # same and the name has not already occurred, then the size
- # of "name" is greater than that of "mod")
- if mod == modulename[:n] and modulename[n] == '.':
- self._ignore[modulename] = 1
- return 1
-
- # Now check that __file__ isn't in one of the directories
- if filename is None:
- # must be a built-in, so we must ignore
- self._ignore[modulename] = 1
- return 1
-
- # Ignore a file when it contains one of the ignorable paths
- for d in self._dirs:
- # The '+ os.sep' is to ensure that d is a parent directory,
- # as compared to cases like:
- # d = "/usr/local"
- # filename = "/usr/local.py"
- # or
- # d = "/usr/local.py"
- # filename = "/usr/local.py"
- if filename.startswith(d + os.sep):
- self._ignore[modulename] = 1
- return 1
-
- # Tried the different ways, so we don't ignore this module
- self._ignore[modulename] = 0
- return 0
-
-def modname(path):
- """Return a plausible module name for the patch."""
-
- base = os.path.basename(path)
- filename, ext = os.path.splitext(base)
- return filename
-
-def fullmodname(path):
- """Return a plausible module name for the path."""
-
- # If the file 'path' is part of a package, then the filename isn't
- # enough to uniquely identify it. Try to do the right thing by
- # looking in sys.path for the longest matching prefix. We'll
- # assume that the rest is the package name.
-
- comparepath = os.path.normcase(path)
- longest = ""
- for dir in sys.path:
- dir = os.path.normcase(dir)
- if comparepath.startswith(dir) and comparepath[len(dir)] == os.sep:
- if len(dir) > len(longest):
- longest = dir
-
- if longest:
- base = path[len(longest) + 1:]
- else:
- base = path
- base = base.replace(os.sep, ".")
- if os.altsep:
- base = base.replace(os.altsep, ".")
- filename, ext = os.path.splitext(base)
- return filename
-
-class CoverageResults:
- def __init__(self, counts=None, calledfuncs=None, infile=None,
- callers=None, outfile=None):
- self.counts = counts
- if self.counts is None:
- self.counts = {}
- self.counter = self.counts.copy() # map (filename, lineno) to count
- self.calledfuncs = calledfuncs
- if self.calledfuncs is None:
- self.calledfuncs = {}
- self.calledfuncs = self.calledfuncs.copy()
- self.callers = callers
- if self.callers is None:
- self.callers = {}
- self.callers = self.callers.copy()
- self.infile = infile
- self.outfile = outfile
- if self.infile:
- # Try to merge existing counts file.
- try:
- counts, calledfuncs, callers = \
- pickle.load(open(self.infile, 'rb'))
- self.update(self.__class__(counts, calledfuncs, callers))
- except (IOError, EOFError, ValueError), err:
- print >> sys.stderr, ("Skipping counts file %r: %s"
- % (self.infile, err))
-
- def update(self, other):
- """Merge in the data from another CoverageResults"""
- counts = self.counts
- calledfuncs = self.calledfuncs
- callers = self.callers
- other_counts = other.counts
- other_calledfuncs = other.calledfuncs
- other_callers = other.callers
-
- for key in other_counts.keys():
- counts[key] = counts.get(key, 0) + other_counts[key]
-
- for key in other_calledfuncs.keys():
- calledfuncs[key] = 1
-
- for key in other_callers.keys():
- callers[key] = 1
-
- def write_results(self, show_missing=True, summary=False, coverdir=None):
- """
- @param coverdir
- """
- if self.calledfuncs:
- print
- print "functions called:"
- calls = self.calledfuncs.keys()
- calls.sort()
- for filename, modulename, funcname in calls:
- print ("filename: %s, modulename: %s, funcname: %s"
- % (filename, modulename, funcname))
-
- if self.callers:
- print
- print "calling relationships:"
- calls = self.callers.keys()
- calls.sort()
- lastfile = lastcfile = ""
- for ((pfile, pmod, pfunc), (cfile, cmod, cfunc)) in calls:
- if pfile != lastfile:
- print
- print "***", pfile, "***"
- lastfile = pfile
- lastcfile = ""
- if cfile != pfile and lastcfile != cfile:
- print " -->", cfile
- lastcfile = cfile
- print " %s.%s -> %s.%s" % (pmod, pfunc, cmod, cfunc)
-
- # turn the counts data ("(filename, lineno) = count") into something
- # accessible on a per-file basis
- per_file = {}
- for filename, lineno in self.counts.keys():
- lines_hit = per_file[filename] = per_file.get(filename, {})
- lines_hit[lineno] = self.counts[(filename, lineno)]
-
- # accumulate summary info, if needed
- sums = {}
-
- for filename, count in per_file.iteritems():
- # skip some "files" we don't care about...
- if filename == "<string>":
- continue
-
- if filename.endswith((".pyc", ".pyo")):
- filename = filename[:-1]
-
- if coverdir is None:
- dir = os.path.dirname(os.path.abspath(filename))
- modulename = modname(filename)
- else:
- dir = coverdir
- if not os.path.exists(dir):
- os.makedirs(dir)
- modulename = fullmodname(filename)
-
- # If desired, get a list of the line numbers which represent
- # executable content (returned as a dict for better lookup speed)
- if show_missing:
- lnotab = find_executable_linenos(filename)
- else:
- lnotab = {}
-
- source = linecache.getlines(filename)
- coverpath = os.path.join(dir, modulename + ".cover")
- n_hits, n_lines = self.write_results_file(coverpath, source,
- lnotab, count)
-
- if summary and n_lines:
- percent = int(100 * n_hits / n_lines)
- sums[modulename] = n_lines, percent, modulename, filename
-
- if summary and sums:
- mods = sums.keys()
- mods.sort()
- print "lines cov% module (path)"
- for m in mods:
- n_lines, percent, modulename, filename = sums[m]
- print "%5d %3d%% %s (%s)" % sums[m]
-
- if self.outfile:
- # try and store counts and module info into self.outfile
- try:
- pickle.dump((self.counts, self.calledfuncs, self.callers),
- open(self.outfile, 'wb'), 1)
- except IOError, err:
- print >> sys.stderr, "Can't save counts files because %s" % err
-
- def write_results_file(self, path, lines, lnotab, lines_hit):
- """Return a coverage results file in path."""
-
- try:
- outfile = open(path, "w")
- except IOError, err:
- print >> sys.stderr, ("trace: Could not open %r for writing: %s"
- "- skipping" % (path, err))
- return 0, 0
-
- n_lines = 0
- n_hits = 0
- for i, line in enumerate(lines):
- lineno = i + 1
- # do the blank/comment match to try to mark more lines
- # (help the reader find stuff that hasn't been covered)
- if lineno in lines_hit:
- outfile.write("%5d: " % lines_hit[lineno])
- n_hits += 1
- n_lines += 1
- elif rx_blank.match(line):
- outfile.write(" ")
- else:
- # lines preceded by no marks weren't hit
- # Highlight them if so indicated, unless the line contains
- # #pragma: NO COVER
- if lineno in lnotab and not PRAGMA_NOCOVER in lines[i]:
- outfile.write(">>>>>> ")
- n_lines += 1
- else:
- outfile.write(" ")
- outfile.write(lines[i].expandtabs(8))
- outfile.close()
-
- return n_hits, n_lines
-
-def find_lines_from_code(code, strs):
- """Return dict where keys are lines in the line number table."""
- linenos = {}
-
- line_increments = [ord(c) for c in code.co_lnotab[1::2]]
- table_length = len(line_increments)
- docstring = False
-
- lineno = code.co_firstlineno
- for li in line_increments:
- lineno += li
- if lineno not in strs:
- linenos[lineno] = 1
-
- return linenos
-
-def find_lines(code, strs):
- """Return lineno dict for all code objects reachable from code."""
- # get all of the lineno information from the code of this scope level
- linenos = find_lines_from_code(code, strs)
-
- # and check the constants for references to other code objects
- for c in code.co_consts:
- if isinstance(c, types.CodeType):
- # find another code object, so recurse into it
- linenos.update(find_lines(c, strs))
- return linenos
-
-def find_strings(filename):
- """Return a dict of possible docstring positions.
-
- The dict maps line numbers to strings. There is an entry for
- line that contains only a string or a part of a triple-quoted
- string.
- """
- d = {}
- # If the first token is a string, then it's the module docstring.
- # Add this special case so that the test in the loop passes.
- prev_ttype = token.INDENT
- f = open(filename)
- for ttype, tstr, start, end, line in tokenize.generate_tokens(f.readline):
- if ttype == token.STRING:
- if prev_ttype == token.INDENT:
- sline, scol = start
- eline, ecol = end
- for i in range(sline, eline + 1):
- d[i] = 1
- prev_ttype = ttype
- f.close()
- return d
-
-def find_executable_linenos(filename):
- """Return dict where keys are line numbers in the line number table."""
- try:
- prog = open(filename, "rU").read()
- except IOError, err:
- print >> sys.stderr, ("Not printing coverage data for %r: %s"
- % (filename, err))
- return {}
- code = compile(prog, filename, "exec")
- strs = find_strings(filename)
- return find_lines(code, strs)
-
-class Trace:
- def __init__(self, count=1, trace=1, countfuncs=0, countcallers=0,
- ignoremods=(), ignoredirs=(), infile=None, outfile=None):
- """
- @param count true iff it should count number of times each
- line is executed
- @param trace true iff it should print out each line that is
- being counted
- @param countfuncs true iff it should just output a list of
- (filename, modulename, funcname,) for functions
- that were called at least once; This overrides
- `count' and `trace'
- @param ignoremods a list of the names of modules to ignore
- @param ignoredirs a list of the names of directories to ignore
- all of the (recursive) contents of
- @param infile file from which to read stored counts to be
- added into the results
- @param outfile file in which to write the results
- """
- self.infile = infile
- self.outfile = outfile
- self.ignore = Ignore(ignoremods, ignoredirs)
- self.counts = {} # keys are (filename, linenumber)
- self.blabbed = {} # for debugging
- self.pathtobasename = {} # for memoizing os.path.basename
- self.donothing = 0
- self.trace = trace
- self._calledfuncs = {}
- self._callers = {}
- self._caller_cache = {}
- if countcallers:
- self.globaltrace = self.globaltrace_trackcallers
- elif countfuncs:
- self.globaltrace = self.globaltrace_countfuncs
- elif trace and count:
- self.globaltrace = self.globaltrace_lt
- self.localtrace = self.localtrace_trace_and_count
- elif trace:
- self.globaltrace = self.globaltrace_lt
- self.localtrace = self.localtrace_trace
- elif count:
- self.globaltrace = self.globaltrace_lt
- self.localtrace = self.localtrace_count
- else:
- # Ahem -- do nothing? Okay.
- self.donothing = 1
-
- def run(self, cmd):
- import __main__
- dict = __main__.__dict__
- if not self.donothing:
- sys.settrace(self.globaltrace)
- threading.settrace(self.globaltrace)
- try:
- exec cmd in dict, dict
- finally:
- if not self.donothing:
- sys.settrace(None)
- threading.settrace(None)
-
- def runctx(self, cmd, globals=None, locals=None):
- if globals is None: globals = {}
- if locals is None: locals = {}
- if not self.donothing:
- sys.settrace(self.globaltrace)
- threading.settrace(self.globaltrace)
- try:
- exec cmd in globals, locals
- finally:
- if not self.donothing:
- sys.settrace(None)
- threading.settrace(None)
-
- def runfunc(self, func, *args, **kw):
- result = None
- if not self.donothing:
- sys.settrace(self.globaltrace)
- try:
- result = func(*args, **kw)
- finally:
- if not self.donothing:
- sys.settrace(None)
- return result
-
- def file_module_function_of(self, frame):
- code = frame.f_code
- filename = code.co_filename
- if filename:
- modulename = modname(filename)
- else:
- modulename = None
-
- funcname = code.co_name
- clsname = None
- if code in self._caller_cache:
- if self._caller_cache[code] is not None:
- clsname = self._caller_cache[code]
- else:
- self._caller_cache[code] = None
- ## use of gc.get_referrers() was suggested by Michael Hudson
- # all functions which refer to this code object
- funcs = [f for f in gc.get_referrers(code)
- if hasattr(f, "func_doc")]
- # require len(func) == 1 to avoid ambiguity caused by calls to
- # new.function(): "In the face of ambiguity, refuse the
- # temptation to guess."
- if len(funcs) == 1:
- dicts = [d for d in gc.get_referrers(funcs[0])
- if isinstance(d, dict)]
- if len(dicts) == 1:
- classes = [c for c in gc.get_referrers(dicts[0])
- if hasattr(c, "__bases__")]
- if len(classes) == 1:
- # ditto for new.classobj()
- clsname = str(classes[0])
- # cache the result - assumption is that new.* is
- # not called later to disturb this relationship
- # _caller_cache could be flushed if functions in
- # the new module get called.
- self._caller_cache[code] = clsname
- if clsname is not None:
- # final hack - module name shows up in str(cls), but we've already
- # computed module name, so remove it
- clsname = clsname.split(".")[1:]
- clsname = ".".join(clsname)
- funcname = "%s.%s" % (clsname, funcname)
-
- return filename, modulename, funcname
-
- def globaltrace_trackcallers(self, frame, why, arg):
- """Handler for call events.
-
- Adds information about who called who to the self._callers dict.
- """
- if why == 'call':
- # XXX Should do a better job of identifying methods
- this_func = self.file_module_function_of(frame)
- parent_func = self.file_module_function_of(frame.f_back)
- self._callers[(parent_func, this_func)] = 1
-
- def globaltrace_countfuncs(self, frame, why, arg):
- """Handler for call events.
-
- Adds (filename, modulename, funcname) to the self._calledfuncs dict.
- """
- if why == 'call':
- this_func = self.file_module_function_of(frame)
- self._calledfuncs[this_func] = 1
-
- def globaltrace_lt(self, frame, why, arg):
- """Handler for call events.
-
- If the code block being entered is to be ignored, returns `None',
- else returns self.localtrace.
- """
- if why == 'call':
- code = frame.f_code
- filename = frame.f_globals.get('__file__', None)
- if filename:
- # XXX modname() doesn't work right for packages, so
- # the ignore support won't work right for packages
- modulename = modname(filename)
- if modulename is not None:
- ignore_it = self.ignore.names(filename, modulename)
- if not ignore_it:
- if self.trace:
- print (" --- modulename: %s, funcname: %s"
- % (modulename, code.co_name))
- return self.localtrace
- else:
- return None
-
- def localtrace_trace_and_count(self, frame, why, arg):
- if why == "line":
- # record the file name and line number of every trace
- filename = frame.f_code.co_filename
- lineno = frame.f_lineno
- key = filename, lineno
- self.counts[key] = self.counts.get(key, 0) + 1
-
- bname = os.path.basename(filename)
- print "%s(%d): %s" % (bname, lineno,
- linecache.getline(filename, lineno)),
- return self.localtrace
-
- def localtrace_trace(self, frame, why, arg):
- if why == "line":
- # record the file name and line number of every trace
- filename = frame.f_code.co_filename
- lineno = frame.f_lineno
-
- bname = os.path.basename(filename)
- print "%s(%d): %s" % (bname, lineno,
- linecache.getline(filename, lineno)),
- return self.localtrace
-
- def localtrace_count(self, frame, why, arg):
- if why == "line":
- filename = frame.f_code.co_filename
- lineno = frame.f_lineno
- key = filename, lineno
- self.counts[key] = self.counts.get(key, 0) + 1
- return self.localtrace
-
- def results(self):
- return CoverageResults(self.counts, infile=self.infile,
- outfile=self.outfile,
- calledfuncs=self._calledfuncs,
- callers=self._callers)
-
-def _err_exit(msg):
- sys.stderr.write("%s: %s\n" % (sys.argv[0], msg))
- sys.exit(1)
-
-def main(argv=None):
- import getopt
-
- if argv is None:
- argv = sys.argv
- try:
- opts, prog_argv = getopt.getopt(argv[1:], "tcrRf:d:msC:lT",
- ["help", "version", "trace", "count",
- "report", "no-report", "summary",
- "file=", "missing",
- "ignore-module=", "ignore-dir=",
- "coverdir=", "listfuncs",
- "trackcalls"])
-
- except getopt.error, msg:
- sys.stderr.write("%s: %s\n" % (sys.argv[0], msg))
- sys.stderr.write("Try `%s --help' for more information\n"
- % sys.argv[0])
- sys.exit(1)
-
- trace = 0
- count = 0
- report = 0
- no_report = 0
- counts_file = None
- missing = 0
- ignore_modules = []
- ignore_dirs = []
- coverdir = None
- summary = 0
- listfuncs = False
- countcallers = False
-
- for opt, val in opts:
- if opt == "--help":
- usage(sys.stdout)
- sys.exit(0)
-
- if opt == "--version":
- sys.stdout.write("trace 2.0\n")
- sys.exit(0)
-
- if opt == "-T" or opt == "--trackcalls":
- countcallers = True
- continue
-
- if opt == "-l" or opt == "--listfuncs":
- listfuncs = True
- continue
-
- if opt == "-t" or opt == "--trace":
- trace = 1
- continue
-
- if opt == "-c" or opt == "--count":
- count = 1
- continue
-
- if opt == "-r" or opt == "--report":
- report = 1
- continue
-
- if opt == "-R" or opt == "--no-report":
- no_report = 1
- continue
-
- if opt == "-f" or opt == "--file":
- counts_file = val
- continue
-
- if opt == "-m" or opt == "--missing":
- missing = 1
- continue
-
- if opt == "-C" or opt == "--coverdir":
- coverdir = val
- continue
-
- if opt == "-s" or opt == "--summary":
- summary = 1
- continue
-
- if opt == "--ignore-module":
- ignore_modules.append(val)
- continue
-
- if opt == "--ignore-dir":
- for s in val.split(os.pathsep):
- s = os.path.expandvars(s)
- # should I also call expanduser? (after all, could use $HOME)
-
- s = s.replace("$prefix",
- os.path.join(sys.prefix, "lib",
- "python" + sys.version[:3]))
- s = s.replace("$exec_prefix",
- os.path.join(sys.exec_prefix, "lib",
- "python" + sys.version[:3]))
- s = os.path.normpath(s)
- ignore_dirs.append(s)
- continue
-
- assert 0, "Should never get here"
-
- if listfuncs and (count or trace):
- _err_exit("cannot specify both --listfuncs and (--trace or --count)")
-
- if not (count or trace or report or listfuncs or countcallers):
- _err_exit("must specify one of --trace, --count, --report, "
- "--listfuncs, or --trackcalls")
-
- if report and no_report:
- _err_exit("cannot specify both --report and --no-report")
-
- if report and not counts_file:
- _err_exit("--report requires a --file")
-
- if no_report and len(prog_argv) == 0:
- _err_exit("missing name of file to run")
-
- # everything is ready
- if report:
- results = CoverageResults(infile=counts_file, outfile=counts_file)
- results.write_results(missing, summary=summary, coverdir=coverdir)
- else:
- sys.argv = prog_argv
- progname = prog_argv[0]
- sys.path[0] = os.path.split(progname)[0]
-
- t = Trace(count, trace, countfuncs=listfuncs,
- countcallers=countcallers, ignoremods=ignore_modules,
- ignoredirs=ignore_dirs, infile=counts_file,
- outfile=counts_file)
- try:
- t.run('execfile(%r)' % (progname,))
- except IOError, err:
- _err_exit("Cannot run file %r because: %s" % (sys.argv[0], err))
- except SystemExit:
- pass
-
- results = t.results()
-
- if not no_report:
- results.write_results(missing, summary=summary, coverdir=coverdir)
-
-if __name__=='__main__':
- main()
diff --git a/sys/lib/python/traceback.py b/sys/lib/python/traceback.py
deleted file mode 100644
index 31b825537..000000000
--- a/sys/lib/python/traceback.py
+++ /dev/null
@@ -1,312 +0,0 @@
-"""Extract, format and print information about Python stack traces."""
-
-import linecache
-import sys
-import types
-
-__all__ = ['extract_stack', 'extract_tb', 'format_exception',
- 'format_exception_only', 'format_list', 'format_stack',
- 'format_tb', 'print_exc', 'format_exc', 'print_exception',
- 'print_last', 'print_stack', 'print_tb', 'tb_lineno']
-
-def _print(file, str='', terminator='\n'):
- file.write(str+terminator)
-
-
-def print_list(extracted_list, file=None):
- """Print the list of tuples as returned by extract_tb() or
- extract_stack() as a formatted stack trace to the given file."""
- if file is None:
- file = sys.stderr
- for filename, lineno, name, line in extracted_list:
- _print(file,
- ' File "%s", line %d, in %s' % (filename,lineno,name))
- if line:
- _print(file, ' %s' % line.strip())
-
-def format_list(extracted_list):
- """Format a list of traceback entry tuples for printing.
-
- Given a list of tuples as returned by extract_tb() or
- extract_stack(), return a list of strings ready for printing.
- Each string in the resulting list corresponds to the item with the
- same index in the argument list. Each string ends in a newline;
- the strings may contain internal newlines as well, for those items
- whose source text line is not None.
- """
- list = []
- for filename, lineno, name, line in extracted_list:
- item = ' File "%s", line %d, in %s\n' % (filename,lineno,name)
- if line:
- item = item + ' %s\n' % line.strip()
- list.append(item)
- return list
-
-
-def print_tb(tb, limit=None, file=None):
- """Print up to 'limit' stack trace entries from the traceback 'tb'.
-
- If 'limit' is omitted or None, all entries are printed. If 'file'
- is omitted or None, the output goes to sys.stderr; otherwise
- 'file' should be an open file or file-like object with a write()
- method.
- """
- if file is None:
- file = sys.stderr
- if limit is None:
- if hasattr(sys, 'tracebacklimit'):
- limit = sys.tracebacklimit
- n = 0
- while tb is not None and (limit is None or n < limit):
- f = tb.tb_frame
- lineno = tb.tb_lineno
- co = f.f_code
- filename = co.co_filename
- name = co.co_name
- _print(file,
- ' File "%s", line %d, in %s' % (filename,lineno,name))
- linecache.checkcache(filename)
- line = linecache.getline(filename, lineno, f.f_globals)
- if line: _print(file, ' ' + line.strip())
- tb = tb.tb_next
- n = n+1
-
-def format_tb(tb, limit = None):
- """A shorthand for 'format_list(extract_stack(f, limit))."""
- return format_list(extract_tb(tb, limit))
-
-def extract_tb(tb, limit = None):
- """Return list of up to limit pre-processed entries from traceback.
-
- This is useful for alternate formatting of stack traces. If
- 'limit' is omitted or None, all entries are extracted. A
- pre-processed stack trace entry is a quadruple (filename, line
- number, function name, text) representing the information that is
- usually printed for a stack trace. The text is a string with
- leading and trailing whitespace stripped; if the source is not
- available it is None.
- """
- if limit is None:
- if hasattr(sys, 'tracebacklimit'):
- limit = sys.tracebacklimit
- list = []
- n = 0
- while tb is not None and (limit is None or n < limit):
- f = tb.tb_frame
- lineno = tb.tb_lineno
- co = f.f_code
- filename = co.co_filename
- name = co.co_name
- linecache.checkcache(filename)
- line = linecache.getline(filename, lineno, f.f_globals)
- if line: line = line.strip()
- else: line = None
- list.append((filename, lineno, name, line))
- tb = tb.tb_next
- n = n+1
- return list
-
-
-def print_exception(etype, value, tb, limit=None, file=None):
- """Print exception up to 'limit' stack trace entries from 'tb' to 'file'.
-
- This differs from print_tb() in the following ways: (1) if
- traceback is not None, it prints a header "Traceback (most recent
- call last):"; (2) it prints the exception type and value after the
- stack trace; (3) if type is SyntaxError and value has the
- appropriate format, it prints the line where the syntax error
- occurred with a caret on the next line indicating the approximate
- position of the error.
- """
- if file is None:
- file = sys.stderr
- if tb:
- _print(file, 'Traceback (most recent call last):')
- print_tb(tb, limit, file)
- lines = format_exception_only(etype, value)
- for line in lines[:-1]:
- _print(file, line, ' ')
- _print(file, lines[-1], '')
-
-def format_exception(etype, value, tb, limit = None):
- """Format a stack trace and the exception information.
-
- The arguments have the same meaning as the corresponding arguments
- to print_exception(). The return value is a list of strings, each
- ending in a newline and some containing internal newlines. When
- these lines are concatenated and printed, exactly the same text is
- printed as does print_exception().
- """
- if tb:
- list = ['Traceback (most recent call last):\n']
- list = list + format_tb(tb, limit)
- else:
- list = []
- list = list + format_exception_only(etype, value)
- return list
-
-def format_exception_only(etype, value):
- """Format the exception part of a traceback.
-
- The arguments are the exception type and value such as given by
- sys.last_type and sys.last_value. The return value is a list of
- strings, each ending in a newline.
-
- Normally, the list contains a single string; however, for
- SyntaxError exceptions, it contains several lines that (when
- printed) display detailed information about where the syntax
- error occurred.
-
- The message indicating which exception occurred is always the last
- string in the list.
-
- """
-
- # An instance should not have a meaningful value parameter, but
- # sometimes does, particularly for string exceptions, such as
- # >>> raise string1, string2 # deprecated
- #
- # Clear these out first because issubtype(string1, SyntaxError)
- # would throw another exception and mask the original problem.
- if (isinstance(etype, BaseException) or
- isinstance(etype, types.InstanceType) or
- etype is None or type(etype) is str):
- return [_format_final_exc_line(etype, value)]
-
- stype = etype.__name__
-
- if not issubclass(etype, SyntaxError):
- return [_format_final_exc_line(stype, value)]
-
- # It was a syntax error; show exactly where the problem was found.
- lines = []
- try:
- msg, (filename, lineno, offset, badline) = value
- except Exception:
- pass
- else:
- filename = filename or "<string>"
- lines.append(' File "%s", line %d\n' % (filename, lineno))
- if badline is not None:
- lines.append(' %s\n' % badline.strip())
- if offset is not None:
- caretspace = badline[:offset].lstrip()
- # non-space whitespace (likes tabs) must be kept for alignment
- caretspace = ((c.isspace() and c or ' ') for c in caretspace)
- # only three spaces to account for offset1 == pos 0
- lines.append(' %s^\n' % ''.join(caretspace))
- value = msg
-
- lines.append(_format_final_exc_line(stype, value))
- return lines
-
-def _format_final_exc_line(etype, value):
- """Return a list of a single line -- normal case for format_exception_only"""
- valuestr = _some_str(value)
- if value is None or not valuestr:
- line = "%s\n" % etype
- else:
- line = "%s: %s\n" % (etype, valuestr)
- return line
-
-def _some_str(value):
- try:
- return str(value)
- except:
- return '<unprintable %s object>' % type(value).__name__
-
-
-def print_exc(limit=None, file=None):
- """Shorthand for 'print_exception(sys.exc_type, sys.exc_value, sys.exc_traceback, limit, file)'.
- (In fact, it uses sys.exc_info() to retrieve the same information
- in a thread-safe way.)"""
- if file is None:
- file = sys.stderr
- try:
- etype, value, tb = sys.exc_info()
- print_exception(etype, value, tb, limit, file)
- finally:
- etype = value = tb = None
-
-
-def format_exc(limit=None):
- """Like print_exc() but return a string."""
- try:
- etype, value, tb = sys.exc_info()
- return ''.join(format_exception(etype, value, tb, limit))
- finally:
- etype = value = tb = None
-
-
-def print_last(limit=None, file=None):
- """This is a shorthand for 'print_exception(sys.last_type,
- sys.last_value, sys.last_traceback, limit, file)'."""
- if file is None:
- file = sys.stderr
- print_exception(sys.last_type, sys.last_value, sys.last_traceback,
- limit, file)
-
-
-def print_stack(f=None, limit=None, file=None):
- """Print a stack trace from its invocation point.
-
- The optional 'f' argument can be used to specify an alternate
- stack frame at which to start. The optional 'limit' and 'file'
- arguments have the same meaning as for print_exception().
- """
- if f is None:
- try:
- raise ZeroDivisionError
- except ZeroDivisionError:
- f = sys.exc_info()[2].tb_frame.f_back
- print_list(extract_stack(f, limit), file)
-
-def format_stack(f=None, limit=None):
- """Shorthand for 'format_list(extract_stack(f, limit))'."""
- if f is None:
- try:
- raise ZeroDivisionError
- except ZeroDivisionError:
- f = sys.exc_info()[2].tb_frame.f_back
- return format_list(extract_stack(f, limit))
-
-def extract_stack(f=None, limit = None):
- """Extract the raw traceback from the current stack frame.
-
- The return value has the same format as for extract_tb(). The
- optional 'f' and 'limit' arguments have the same meaning as for
- print_stack(). Each item in the list is a quadruple (filename,
- line number, function name, text), and the entries are in order
- from oldest to newest stack frame.
- """
- if f is None:
- try:
- raise ZeroDivisionError
- except ZeroDivisionError:
- f = sys.exc_info()[2].tb_frame.f_back
- if limit is None:
- if hasattr(sys, 'tracebacklimit'):
- limit = sys.tracebacklimit
- list = []
- n = 0
- while f is not None and (limit is None or n < limit):
- lineno = f.f_lineno
- co = f.f_code
- filename = co.co_filename
- name = co.co_name
- linecache.checkcache(filename)
- line = linecache.getline(filename, lineno, f.f_globals)
- if line: line = line.strip()
- else: line = None
- list.append((filename, lineno, name, line))
- f = f.f_back
- n = n+1
- list.reverse()
- return list
-
-def tb_lineno(tb):
- """Calculate correct line number of traceback given in tb.
-
- Obsolete in 2.3.
- """
- return tb.tb_lineno
diff --git a/sys/lib/python/tty.py b/sys/lib/python/tty.py
deleted file mode 100644
index a72eb6755..000000000
--- a/sys/lib/python/tty.py
+++ /dev/null
@@ -1,36 +0,0 @@
-"""Terminal utilities."""
-
-# Author: Steen Lumholt.
-
-from termios import *
-
-__all__ = ["setraw", "setcbreak"]
-
-# Indexes for termios list.
-IFLAG = 0
-OFLAG = 1
-CFLAG = 2
-LFLAG = 3
-ISPEED = 4
-OSPEED = 5
-CC = 6
-
-def setraw(fd, when=TCSAFLUSH):
- """Put terminal into a raw mode."""
- mode = tcgetattr(fd)
- mode[IFLAG] = mode[IFLAG] & ~(BRKINT | ICRNL | INPCK | ISTRIP | IXON)
- mode[OFLAG] = mode[OFLAG] & ~(OPOST)
- mode[CFLAG] = mode[CFLAG] & ~(CSIZE | PARENB)
- mode[CFLAG] = mode[CFLAG] | CS8
- mode[LFLAG] = mode[LFLAG] & ~(ECHO | ICANON | IEXTEN | ISIG)
- mode[CC][VMIN] = 1
- mode[CC][VTIME] = 0
- tcsetattr(fd, when, mode)
-
-def setcbreak(fd, when=TCSAFLUSH):
- """Put terminal into a cbreak mode."""
- mode = tcgetattr(fd)
- mode[LFLAG] = mode[LFLAG] & ~(ECHO | ICANON)
- mode[CC][VMIN] = 1
- mode[CC][VTIME] = 0
- tcsetattr(fd, when, mode)
diff --git a/sys/lib/python/types.py b/sys/lib/python/types.py
deleted file mode 100644
index 6c8c2b26f..000000000
--- a/sys/lib/python/types.py
+++ /dev/null
@@ -1,101 +0,0 @@
-"""Define names for all type symbols known in the standard interpreter.
-
-Types that are part of optional modules (e.g. array) are not listed.
-"""
-import sys
-
-# Iterators in Python aren't a matter of type but of protocol. A large
-# and changing number of builtin types implement *some* flavor of
-# iterator. Don't check the type! Use hasattr to check for both
-# "__iter__" and "next" attributes instead.
-
-NoneType = type(None)
-TypeType = type
-ObjectType = object
-
-IntType = int
-LongType = long
-FloatType = float
-BooleanType = bool
-try:
- ComplexType = complex
-except NameError:
- pass
-
-StringType = str
-
-# StringTypes is already outdated. Instead of writing "type(x) in
-# types.StringTypes", you should use "isinstance(x, basestring)". But
-# we keep around for compatibility with Python 2.2.
-try:
- UnicodeType = unicode
- StringTypes = (StringType, UnicodeType)
-except NameError:
- StringTypes = (StringType,)
-
-BufferType = buffer
-
-TupleType = tuple
-ListType = list
-DictType = DictionaryType = dict
-
-def _f(): pass
-FunctionType = type(_f)
-LambdaType = type(lambda: None) # Same as FunctionType
-try:
- CodeType = type(_f.func_code)
-except RuntimeError:
- # Execution in restricted environment
- pass
-
-def _g():
- yield 1
-GeneratorType = type(_g())
-
-class _C:
- def _m(self): pass
-ClassType = type(_C)
-UnboundMethodType = type(_C._m) # Same as MethodType
-_x = _C()
-InstanceType = type(_x)
-MethodType = type(_x._m)
-
-BuiltinFunctionType = type(len)
-BuiltinMethodType = type([].append) # Same as BuiltinFunctionType
-
-ModuleType = type(sys)
-FileType = file
-XRangeType = xrange
-
-try:
- raise TypeError
-except TypeError:
- try:
- tb = sys.exc_info()[2]
- TracebackType = type(tb)
- FrameType = type(tb.tb_frame)
- except AttributeError:
- # In the restricted environment, exc_info returns (None, None,
- # None) Then, tb.tb_frame gives an attribute error
- pass
- tb = None; del tb
-
-SliceType = slice
-EllipsisType = type(Ellipsis)
-
-DictProxyType = type(TypeType.__dict__)
-NotImplementedType = type(NotImplemented)
-
-# Extension types defined in a C helper module. XXX There may be no
-# equivalent in implementations other than CPython, so it seems better to
-# leave them undefined then to set them to e.g. None.
-try:
- import _types
-except ImportError:
- pass
-else:
- GetSetDescriptorType = type(_types.Helper.getter)
- MemberDescriptorType = type(_types.Helper.member)
- del _types
-
-del sys, _f, _g, _C, _x # Not for export
diff --git a/sys/lib/python/unittest.py b/sys/lib/python/unittest.py
deleted file mode 100644
index cd91c2c5b..000000000
--- a/sys/lib/python/unittest.py
+++ /dev/null
@@ -1,816 +0,0 @@
-#!/usr/bin/env python
-'''
-Python unit testing framework, based on Erich Gamma's JUnit and Kent Beck's
-Smalltalk testing framework.
-
-This module contains the core framework classes that form the basis of
-specific test cases and suites (TestCase, TestSuite etc.), and also a
-text-based utility class for running the tests and reporting the results
- (TextTestRunner).
-
-Simple usage:
-
- import unittest
-
- class IntegerArithmenticTestCase(unittest.TestCase):
- def testAdd(self): ## test method names begin 'test*'
- self.assertEquals((1 + 2), 3)
- self.assertEquals(0 + 1, 1)
- def testMultiply(self):
- self.assertEquals((0 * 10), 0)
- self.assertEquals((5 * 8), 40)
-
- if __name__ == '__main__':
- unittest.main()
-
-Further information is available in the bundled documentation, and from
-
- http://pyunit.sourceforge.net/
-
-Copyright (c) 1999-2003 Steve Purcell
-This module is free software, and you may redistribute it and/or modify
-it under the same terms as Python itself, so long as this copyright message
-and disclaimer are retained in their original form.
-
-IN NO EVENT SHALL THE AUTHOR BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT,
-SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OF
-THIS CODE, EVEN IF THE AUTHOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
-DAMAGE.
-
-THE AUTHOR SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
-PARTICULAR PURPOSE. THE CODE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS,
-AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE,
-SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
-'''
-
-__author__ = "Steve Purcell"
-__email__ = "stephen_purcell at yahoo dot com"
-__version__ = "#Revision: 1.63 $"[11:-2]
-
-import time
-import sys
-import traceback
-import os
-import types
-
-##############################################################################
-# Exported classes and functions
-##############################################################################
-__all__ = ['TestResult', 'TestCase', 'TestSuite', 'TextTestRunner',
- 'TestLoader', 'FunctionTestCase', 'main', 'defaultTestLoader']
-
-# Expose obsolete functions for backwards compatibility
-__all__.extend(['getTestCaseNames', 'makeSuite', 'findTestCases'])
-
-
-##############################################################################
-# Backward compatibility
-##############################################################################
-if sys.version_info[:2] < (2, 2):
- False, True = 0, 1
- def isinstance(obj, clsinfo):
- import __builtin__
- if type(clsinfo) in (tuple, list):
- for cls in clsinfo:
- if cls is type: cls = types.ClassType
- if __builtin__.isinstance(obj, cls):
- return 1
- return 0
- else: return __builtin__.isinstance(obj, clsinfo)
-
-
-##############################################################################
-# Test framework core
-##############################################################################
-
-# All classes defined herein are 'new-style' classes, allowing use of 'super()'
-__metaclass__ = type
-
-def _strclass(cls):
- return "%s.%s" % (cls.__module__, cls.__name__)
-
-__unittest = 1
-
-class TestResult:
- """Holder for test result information.
-
- Test results are automatically managed by the TestCase and TestSuite
- classes, and do not need to be explicitly manipulated by writers of tests.
-
- Each instance holds the total number of tests run, and collections of
- failures and errors that occurred among those test runs. The collections
- contain tuples of (testcase, exceptioninfo), where exceptioninfo is the
- formatted traceback of the error that occurred.
- """
- def __init__(self):
- self.failures = []
- self.errors = []
- self.testsRun = 0
- self.shouldStop = 0
-
- def startTest(self, test):
- "Called when the given test is about to be run"
- self.testsRun = self.testsRun + 1
-
- def stopTest(self, test):
- "Called when the given test has been run"
- pass
-
- def addError(self, test, err):
- """Called when an error has occurred. 'err' is a tuple of values as
- returned by sys.exc_info().
- """
- self.errors.append((test, self._exc_info_to_string(err, test)))
-
- def addFailure(self, test, err):
- """Called when an error has occurred. 'err' is a tuple of values as
- returned by sys.exc_info()."""
- self.failures.append((test, self._exc_info_to_string(err, test)))
-
- def addSuccess(self, test):
- "Called when a test has completed successfully"
- pass
-
- def wasSuccessful(self):
- "Tells whether or not this result was a success"
- return len(self.failures) == len(self.errors) == 0
-
- def stop(self):
- "Indicates that the tests should be aborted"
- self.shouldStop = True
-
- def _exc_info_to_string(self, err, test):
- """Converts a sys.exc_info()-style tuple of values into a string."""
- exctype, value, tb = err
- # Skip test runner traceback levels
- while tb and self._is_relevant_tb_level(tb):
- tb = tb.tb_next
- if exctype is test.failureException:
- # Skip assert*() traceback levels
- length = self._count_relevant_tb_levels(tb)
- return ''.join(traceback.format_exception(exctype, value, tb, length))
- return ''.join(traceback.format_exception(exctype, value, tb))
-
- def _is_relevant_tb_level(self, tb):
- return tb.tb_frame.f_globals.has_key('__unittest')
-
- def _count_relevant_tb_levels(self, tb):
- length = 0
- while tb and not self._is_relevant_tb_level(tb):
- length += 1
- tb = tb.tb_next
- return length
-
- def __repr__(self):
- return "<%s run=%i errors=%i failures=%i>" % \
- (_strclass(self.__class__), self.testsRun, len(self.errors),
- len(self.failures))
-
-class TestCase:
- """A class whose instances are single test cases.
-
- By default, the test code itself should be placed in a method named
- 'runTest'.
-
- If the fixture may be used for many test cases, create as
- many test methods as are needed. When instantiating such a TestCase
- subclass, specify in the constructor arguments the name of the test method
- that the instance is to execute.
-
- Test authors should subclass TestCase for their own tests. Construction
- and deconstruction of the test's environment ('fixture') can be
- implemented by overriding the 'setUp' and 'tearDown' methods respectively.
-
- If it is necessary to override the __init__ method, the base class
- __init__ method must always be called. It is important that subclasses
- should not change the signature of their __init__ method, since instances
- of the classes are instantiated automatically by parts of the framework
- in order to be run.
- """
-
- # This attribute determines which exception will be raised when
- # the instance's assertion methods fail; test methods raising this
- # exception will be deemed to have 'failed' rather than 'errored'
-
- failureException = AssertionError
-
- def __init__(self, methodName='runTest'):
- """Create an instance of the class that will use the named test
- method when executed. Raises a ValueError if the instance does
- not have a method with the specified name.
- """
- try:
- self._testMethodName = methodName
- testMethod = getattr(self, methodName)
- self._testMethodDoc = testMethod.__doc__
- except AttributeError:
- raise ValueError, "no such test method in %s: %s" % \
- (self.__class__, methodName)
-
- def setUp(self):
- "Hook method for setting up the test fixture before exercising it."
- pass
-
- def tearDown(self):
- "Hook method for deconstructing the test fixture after testing it."
- pass
-
- def countTestCases(self):
- return 1
-
- def defaultTestResult(self):
- return TestResult()
-
- def shortDescription(self):
- """Returns a one-line description of the test, or None if no
- description has been provided.
-
- The default implementation of this method returns the first line of
- the specified test method's docstring.
- """
- doc = self._testMethodDoc
- return doc and doc.split("\n")[0].strip() or None
-
- def id(self):
- return "%s.%s" % (_strclass(self.__class__), self._testMethodName)
-
- def __str__(self):
- return "%s (%s)" % (self._testMethodName, _strclass(self.__class__))
-
- def __repr__(self):
- return "<%s testMethod=%s>" % \
- (_strclass(self.__class__), self._testMethodName)
-
- def run(self, result=None):
- if result is None: result = self.defaultTestResult()
- result.startTest(self)
- testMethod = getattr(self, self._testMethodName)
- try:
- try:
- self.setUp()
- except KeyboardInterrupt:
- raise
- except:
- result.addError(self, self._exc_info())
- return
-
- ok = False
- try:
- testMethod()
- ok = True
- except self.failureException:
- result.addFailure(self, self._exc_info())
- except KeyboardInterrupt:
- raise
- except:
- result.addError(self, self._exc_info())
-
- try:
- self.tearDown()
- except KeyboardInterrupt:
- raise
- except:
- result.addError(self, self._exc_info())
- ok = False
- if ok: result.addSuccess(self)
- finally:
- result.stopTest(self)
-
- def __call__(self, *args, **kwds):
- return self.run(*args, **kwds)
-
- def debug(self):
- """Run the test without collecting errors in a TestResult"""
- self.setUp()
- getattr(self, self._testMethodName)()
- self.tearDown()
-
- def _exc_info(self):
- """Return a version of sys.exc_info() with the traceback frame
- minimised; usually the top level of the traceback frame is not
- needed.
- """
- exctype, excvalue, tb = sys.exc_info()
- if sys.platform[:4] == 'java': ## tracebacks look different in Jython
- return (exctype, excvalue, tb)
- return (exctype, excvalue, tb)
-
- def fail(self, msg=None):
- """Fail immediately, with the given message."""
- raise self.failureException, msg
-
- def failIf(self, expr, msg=None):
- "Fail the test if the expression is true."
- if expr: raise self.failureException, msg
-
- def failUnless(self, expr, msg=None):
- """Fail the test unless the expression is true."""
- if not expr: raise self.failureException, msg
-
- def failUnlessRaises(self, excClass, callableObj, *args, **kwargs):
- """Fail unless an exception of class excClass is thrown
- by callableObj when invoked with arguments args and keyword
- arguments kwargs. If a different type of exception is
- thrown, it will not be caught, and the test case will be
- deemed to have suffered an error, exactly as for an
- unexpected exception.
- """
- try:
- callableObj(*args, **kwargs)
- except excClass:
- return
- else:
- if hasattr(excClass,'__name__'): excName = excClass.__name__
- else: excName = str(excClass)
- raise self.failureException, "%s not raised" % excName
-
- def failUnlessEqual(self, first, second, msg=None):
- """Fail if the two objects are unequal as determined by the '=='
- operator.
- """
- if not first == second:
- raise self.failureException, \
- (msg or '%r != %r' % (first, second))
-
- def failIfEqual(self, first, second, msg=None):
- """Fail if the two objects are equal as determined by the '=='
- operator.
- """
- if first == second:
- raise self.failureException, \
- (msg or '%r == %r' % (first, second))
-
- def failUnlessAlmostEqual(self, first, second, places=7, msg=None):
- """Fail if the two objects are unequal as determined by their
- difference rounded to the given number of decimal places
- (default 7) and comparing to zero.
-
- Note that decimal places (from zero) are usually not the same
- as significant digits (measured from the most signficant digit).
- """
- if round(second-first, places) != 0:
- raise self.failureException, \
- (msg or '%r != %r within %r places' % (first, second, places))
-
- def failIfAlmostEqual(self, first, second, places=7, msg=None):
- """Fail if the two objects are equal as determined by their
- difference rounded to the given number of decimal places
- (default 7) and comparing to zero.
-
- Note that decimal places (from zero) are usually not the same
- as significant digits (measured from the most signficant digit).
- """
- if round(second-first, places) == 0:
- raise self.failureException, \
- (msg or '%r == %r within %r places' % (first, second, places))
-
- # Synonyms for assertion methods
-
- assertEqual = assertEquals = failUnlessEqual
-
- assertNotEqual = assertNotEquals = failIfEqual
-
- assertAlmostEqual = assertAlmostEquals = failUnlessAlmostEqual
-
- assertNotAlmostEqual = assertNotAlmostEquals = failIfAlmostEqual
-
- assertRaises = failUnlessRaises
-
- assert_ = assertTrue = failUnless
-
- assertFalse = failIf
-
-
-
-class TestSuite:
- """A test suite is a composite test consisting of a number of TestCases.
-
- For use, create an instance of TestSuite, then add test case instances.
- When all tests have been added, the suite can be passed to a test
- runner, such as TextTestRunner. It will run the individual test cases
- in the order in which they were added, aggregating the results. When
- subclassing, do not forget to call the base class constructor.
- """
- def __init__(self, tests=()):
- self._tests = []
- self.addTests(tests)
-
- def __repr__(self):
- return "<%s tests=%s>" % (_strclass(self.__class__), self._tests)
-
- __str__ = __repr__
-
- def __iter__(self):
- return iter(self._tests)
-
- def countTestCases(self):
- cases = 0
- for test in self._tests:
- cases += test.countTestCases()
- return cases
-
- def addTest(self, test):
- # sanity checks
- if not callable(test):
- raise TypeError("the test to add must be callable")
- if (isinstance(test, (type, types.ClassType)) and
- issubclass(test, (TestCase, TestSuite))):
- raise TypeError("TestCases and TestSuites must be instantiated "
- "before passing them to addTest()")
- self._tests.append(test)
-
- def addTests(self, tests):
- if isinstance(tests, basestring):
- raise TypeError("tests must be an iterable of tests, not a string")
- for test in tests:
- self.addTest(test)
-
- def run(self, result):
- for test in self._tests:
- if result.shouldStop:
- break
- test(result)
- return result
-
- def __call__(self, *args, **kwds):
- return self.run(*args, **kwds)
-
- def debug(self):
- """Run the tests without collecting errors in a TestResult"""
- for test in self._tests: test.debug()
-
-
-class FunctionTestCase(TestCase):
- """A test case that wraps a test function.
-
- This is useful for slipping pre-existing test functions into the
- PyUnit framework. Optionally, set-up and tidy-up functions can be
- supplied. As with TestCase, the tidy-up ('tearDown') function will
- always be called if the set-up ('setUp') function ran successfully.
- """
-
- def __init__(self, testFunc, setUp=None, tearDown=None,
- description=None):
- TestCase.__init__(self)
- self.__setUpFunc = setUp
- self.__tearDownFunc = tearDown
- self.__testFunc = testFunc
- self.__description = description
-
- def setUp(self):
- if self.__setUpFunc is not None:
- self.__setUpFunc()
-
- def tearDown(self):
- if self.__tearDownFunc is not None:
- self.__tearDownFunc()
-
- def runTest(self):
- self.__testFunc()
-
- def id(self):
- return self.__testFunc.__name__
-
- def __str__(self):
- return "%s (%s)" % (_strclass(self.__class__), self.__testFunc.__name__)
-
- def __repr__(self):
- return "<%s testFunc=%s>" % (_strclass(self.__class__), self.__testFunc)
-
- def shortDescription(self):
- if self.__description is not None: return self.__description
- doc = self.__testFunc.__doc__
- return doc and doc.split("\n")[0].strip() or None
-
-
-
-##############################################################################
-# Locating and loading tests
-##############################################################################
-
-class TestLoader:
- """This class is responsible for loading tests according to various
- criteria and returning them wrapped in a Test
- """
- testMethodPrefix = 'test'
- sortTestMethodsUsing = cmp
- suiteClass = TestSuite
-
- def loadTestsFromTestCase(self, testCaseClass):
- """Return a suite of all tests cases contained in testCaseClass"""
- if issubclass(testCaseClass, TestSuite):
- raise TypeError("Test cases should not be derived from TestSuite. Maybe you meant to derive from TestCase?")
- testCaseNames = self.getTestCaseNames(testCaseClass)
- if not testCaseNames and hasattr(testCaseClass, 'runTest'):
- testCaseNames = ['runTest']
- return self.suiteClass(map(testCaseClass, testCaseNames))
-
- def loadTestsFromModule(self, module):
- """Return a suite of all tests cases contained in the given module"""
- tests = []
- for name in dir(module):
- obj = getattr(module, name)
- if (isinstance(obj, (type, types.ClassType)) and
- issubclass(obj, TestCase)):
- tests.append(self.loadTestsFromTestCase(obj))
- return self.suiteClass(tests)
-
- def loadTestsFromName(self, name, module=None):
- """Return a suite of all tests cases given a string specifier.
-
- The name may resolve either to a module, a test case class, a
- test method within a test case class, or a callable object which
- returns a TestCase or TestSuite instance.
-
- The method optionally resolves the names relative to a given module.
- """
- parts = name.split('.')
- if module is None:
- parts_copy = parts[:]
- while parts_copy:
- try:
- module = __import__('.'.join(parts_copy))
- break
- except ImportError:
- del parts_copy[-1]
- if not parts_copy: raise
- parts = parts[1:]
- obj = module
- for part in parts:
- parent, obj = obj, getattr(obj, part)
-
- if type(obj) == types.ModuleType:
- return self.loadTestsFromModule(obj)
- elif (isinstance(obj, (type, types.ClassType)) and
- issubclass(obj, TestCase)):
- return self.loadTestsFromTestCase(obj)
- elif type(obj) == types.UnboundMethodType:
- return parent(obj.__name__)
- elif isinstance(obj, TestSuite):
- return obj
- elif callable(obj):
- test = obj()
- if not isinstance(test, (TestCase, TestSuite)):
- raise ValueError, \
- "calling %s returned %s, not a test" % (obj,test)
- return test
- else:
- raise ValueError, "don't know how to make test from: %s" % obj
-
- def loadTestsFromNames(self, names, module=None):
- """Return a suite of all tests cases found using the given sequence
- of string specifiers. See 'loadTestsFromName()'.
- """
- suites = [self.loadTestsFromName(name, module) for name in names]
- return self.suiteClass(suites)
-
- def getTestCaseNames(self, testCaseClass):
- """Return a sorted sequence of method names found within testCaseClass
- """
- def isTestMethod(attrname, testCaseClass=testCaseClass, prefix=self.testMethodPrefix):
- return attrname.startswith(prefix) and callable(getattr(testCaseClass, attrname))
- testFnNames = filter(isTestMethod, dir(testCaseClass))
- for baseclass in testCaseClass.__bases__:
- for testFnName in self.getTestCaseNames(baseclass):
- if testFnName not in testFnNames: # handle overridden methods
- testFnNames.append(testFnName)
- if self.sortTestMethodsUsing:
- testFnNames.sort(self.sortTestMethodsUsing)
- return testFnNames
-
-
-
-defaultTestLoader = TestLoader()
-
-
-##############################################################################
-# Patches for old functions: these functions should be considered obsolete
-##############################################################################
-
-def _makeLoader(prefix, sortUsing, suiteClass=None):
- loader = TestLoader()
- loader.sortTestMethodsUsing = sortUsing
- loader.testMethodPrefix = prefix
- if suiteClass: loader.suiteClass = suiteClass
- return loader
-
-def getTestCaseNames(testCaseClass, prefix, sortUsing=cmp):
- return _makeLoader(prefix, sortUsing).getTestCaseNames(testCaseClass)
-
-def makeSuite(testCaseClass, prefix='test', sortUsing=cmp, suiteClass=TestSuite):
- return _makeLoader(prefix, sortUsing, suiteClass).loadTestsFromTestCase(testCaseClass)
-
-def findTestCases(module, prefix='test', sortUsing=cmp, suiteClass=TestSuite):
- return _makeLoader(prefix, sortUsing, suiteClass).loadTestsFromModule(module)
-
-
-##############################################################################
-# Text UI
-##############################################################################
-
-class _WritelnDecorator:
- """Used to decorate file-like objects with a handy 'writeln' method"""
- def __init__(self,stream):
- self.stream = stream
-
- def __getattr__(self, attr):
- return getattr(self.stream,attr)
-
- def writeln(self, arg=None):
- if arg: self.write(arg)
- self.write('\n') # text-mode streams translate to \r\n if needed
-
-
-class _TextTestResult(TestResult):
- """A test result class that can print formatted text results to a stream.
-
- Used by TextTestRunner.
- """
- separator1 = '=' * 70
- separator2 = '-' * 70
-
- def __init__(self, stream, descriptions, verbosity):
- TestResult.__init__(self)
- self.stream = stream
- self.showAll = verbosity > 1
- self.dots = verbosity == 1
- self.descriptions = descriptions
-
- def getDescription(self, test):
- if self.descriptions:
- return test.shortDescription() or str(test)
- else:
- return str(test)
-
- def startTest(self, test):
- TestResult.startTest(self, test)
- if self.showAll:
- self.stream.write(self.getDescription(test))
- self.stream.write(" ... ")
-
- def addSuccess(self, test):
- TestResult.addSuccess(self, test)
- if self.showAll:
- self.stream.writeln("ok")
- elif self.dots:
- self.stream.write('.')
-
- def addError(self, test, err):
- TestResult.addError(self, test, err)
- if self.showAll:
- self.stream.writeln("ERROR")
- elif self.dots:
- self.stream.write('E')
-
- def addFailure(self, test, err):
- TestResult.addFailure(self, test, err)
- if self.showAll:
- self.stream.writeln("FAIL")
- elif self.dots:
- self.stream.write('F')
-
- def printErrors(self):
- if self.dots or self.showAll:
- self.stream.writeln()
- self.printErrorList('ERROR', self.errors)
- self.printErrorList('FAIL', self.failures)
-
- def printErrorList(self, flavour, errors):
- for test, err in errors:
- self.stream.writeln(self.separator1)
- self.stream.writeln("%s: %s" % (flavour,self.getDescription(test)))
- self.stream.writeln(self.separator2)
- self.stream.writeln("%s" % err)
-
-
-class TextTestRunner:
- """A test runner class that displays results in textual form.
-
- It prints out the names of tests as they are run, errors as they
- occur, and a summary of the results at the end of the test run.
- """
- def __init__(self, stream=sys.stderr, descriptions=1, verbosity=1):
- self.stream = _WritelnDecorator(stream)
- self.descriptions = descriptions
- self.verbosity = verbosity
-
- def _makeResult(self):
- return _TextTestResult(self.stream, self.descriptions, self.verbosity)
-
- def run(self, test):
- "Run the given test case or test suite."
- result = self._makeResult()
- startTime = time.time()
- test(result)
- stopTime = time.time()
- timeTaken = stopTime - startTime
- result.printErrors()
- self.stream.writeln(result.separator2)
- run = result.testsRun
- self.stream.writeln("Ran %d test%s in %.3fs" %
- (run, run != 1 and "s" or "", timeTaken))
- self.stream.writeln()
- if not result.wasSuccessful():
- self.stream.write("FAILED (")
- failed, errored = map(len, (result.failures, result.errors))
- if failed:
- self.stream.write("failures=%d" % failed)
- if errored:
- if failed: self.stream.write(", ")
- self.stream.write("errors=%d" % errored)
- self.stream.writeln(")")
- else:
- self.stream.writeln("OK")
- return result
-
-
-
-##############################################################################
-# Facilities for running tests from the command line
-##############################################################################
-
-class TestProgram:
- """A command-line program that runs a set of tests; this is primarily
- for making test modules conveniently executable.
- """
- USAGE = """\
-Usage: %(progName)s [options] [test] [...]
-
-Options:
- -h, --help Show this message
- -v, --verbose Verbose output
- -q, --quiet Minimal output
-
-Examples:
- %(progName)s - run default set of tests
- %(progName)s MyTestSuite - run suite 'MyTestSuite'
- %(progName)s MyTestCase.testSomething - run MyTestCase.testSomething
- %(progName)s MyTestCase - run all 'test*' test methods
- in MyTestCase
-"""
- def __init__(self, module='__main__', defaultTest=None,
- argv=None, testRunner=None, testLoader=defaultTestLoader):
- if type(module) == type(''):
- self.module = __import__(module)
- for part in module.split('.')[1:]:
- self.module = getattr(self.module, part)
- else:
- self.module = module
- if argv is None:
- argv = sys.argv
- self.verbosity = 1
- self.defaultTest = defaultTest
- self.testRunner = testRunner
- self.testLoader = testLoader
- self.progName = os.path.basename(argv[0])
- self.parseArgs(argv)
- self.runTests()
-
- def usageExit(self, msg=None):
- if msg: print msg
- print self.USAGE % self.__dict__
- sys.exit(2)
-
- def parseArgs(self, argv):
- import getopt
- try:
- options, args = getopt.getopt(argv[1:], 'hHvq',
- ['help','verbose','quiet'])
- for opt, value in options:
- if opt in ('-h','-H','--help'):
- self.usageExit()
- if opt in ('-q','--quiet'):
- self.verbosity = 0
- if opt in ('-v','--verbose'):
- self.verbosity = 2
- if len(args) == 0 and self.defaultTest is None:
- self.test = self.testLoader.loadTestsFromModule(self.module)
- return
- if len(args) > 0:
- self.testNames = args
- else:
- self.testNames = (self.defaultTest,)
- self.createTests()
- except getopt.error, msg:
- self.usageExit(msg)
-
- def createTests(self):
- self.test = self.testLoader.loadTestsFromNames(self.testNames,
- self.module)
-
- def runTests(self):
- if self.testRunner is None:
- self.testRunner = TextTestRunner(verbosity=self.verbosity)
- result = self.testRunner.run(self.test)
- sys.exit(not result.wasSuccessful())
-
-main = TestProgram
-
-
-##############################################################################
-# Executing this module from the command line
-##############################################################################
-
-if __name__ == "__main__":
- main(module=None)
diff --git a/sys/lib/python/urllib.py b/sys/lib/python/urllib.py
deleted file mode 100644
index 963187cfb..000000000
--- a/sys/lib/python/urllib.py
+++ /dev/null
@@ -1,1538 +0,0 @@
-"""Open an arbitrary URL.
-
-See the following document for more info on URLs:
-"Names and Addresses, URIs, URLs, URNs, URCs", at
-http://www.w3.org/pub/WWW/Addressing/Overview.html
-
-See also the HTTP spec (from which the error codes are derived):
-"HTTP - Hypertext Transfer Protocol", at
-http://www.w3.org/pub/WWW/Protocols/
-
-Related standards and specs:
-- RFC1808: the "relative URL" spec. (authoritative status)
-- RFC1738 - the "URL standard". (authoritative status)
-- RFC1630 - the "URI spec". (informational status)
-
-The object returned by URLopener().open(file) will differ per
-protocol. All you know is that is has methods read(), readline(),
-readlines(), fileno(), close() and info(). The read*(), fileno()
-and close() methods work like those of open files.
-The info() method returns a mimetools.Message object which can be
-used to query various info about the object, if available.
-(mimetools.Message objects are queried with the getheader() method.)
-"""
-
-import string
-import socket
-import os
-import time
-import sys
-from urlparse import urljoin as basejoin
-
-__all__ = ["urlopen", "URLopener", "FancyURLopener", "urlretrieve",
- "urlcleanup", "quote", "quote_plus", "unquote", "unquote_plus",
- "urlencode", "url2pathname", "pathname2url", "splittag",
- "localhost", "thishost", "ftperrors", "basejoin", "unwrap",
- "splittype", "splithost", "splituser", "splitpasswd", "splitport",
- "splitnport", "splitquery", "splitattr", "splitvalue",
- "splitgophertype", "getproxies"]
-
-__version__ = '1.17' # XXX This version is not always updated :-(
-
-MAXFTPCACHE = 10 # Trim the ftp cache beyond this size
-
-# Helper for non-unix systems
-if os.name == 'mac':
- from macurl2path import url2pathname, pathname2url
-elif os.name == 'nt':
- from nturl2path import url2pathname, pathname2url
-elif os.name == 'riscos':
- from rourl2path import url2pathname, pathname2url
-else:
- def url2pathname(pathname):
- """OS-specific conversion from a relative URL of the 'file' scheme
- to a file system path; not recommended for general use."""
- return unquote(pathname)
-
- def pathname2url(pathname):
- """OS-specific conversion from a file system path to a relative URL
- of the 'file' scheme; not recommended for general use."""
- return quote(pathname)
-
-# This really consists of two pieces:
-# (1) a class which handles opening of all sorts of URLs
-# (plus assorted utilities etc.)
-# (2) a set of functions for parsing URLs
-# XXX Should these be separated out into different modules?
-
-
-# Shortcut for basic usage
-_urlopener = None
-def urlopen(url, data=None, proxies=None):
- """urlopen(url [, data]) -> open file-like object"""
- global _urlopener
- if proxies is not None:
- opener = FancyURLopener(proxies=proxies)
- elif not _urlopener:
- opener = FancyURLopener()
- _urlopener = opener
- else:
- opener = _urlopener
- if data is None:
- return opener.open(url)
- else:
- return opener.open(url, data)
-def urlretrieve(url, filename=None, reporthook=None, data=None):
- global _urlopener
- if not _urlopener:
- _urlopener = FancyURLopener()
- return _urlopener.retrieve(url, filename, reporthook, data)
-def urlcleanup():
- if _urlopener:
- _urlopener.cleanup()
-
-# exception raised when downloaded size does not match content-length
-class ContentTooShortError(IOError):
- def __init__(self, message, content):
- IOError.__init__(self, message)
- self.content = content
-
-ftpcache = {}
-class URLopener:
- """Class to open URLs.
- This is a class rather than just a subroutine because we may need
- more than one set of global protocol-specific options.
- Note -- this is a base class for those who don't want the
- automatic handling of errors type 302 (relocated) and 401
- (authorization needed)."""
-
- __tempfiles = None
-
- version = "Python-urllib/%s" % __version__
-
- # Constructor
- def __init__(self, proxies=None, **x509):
- if proxies is None:
- proxies = getproxies()
- assert hasattr(proxies, 'has_key'), "proxies must be a mapping"
- self.proxies = proxies
- self.key_file = x509.get('key_file')
- self.cert_file = x509.get('cert_file')
- self.addheaders = [('User-Agent', self.version)]
- self.__tempfiles = []
- self.__unlink = os.unlink # See cleanup()
- self.tempcache = None
- # Undocumented feature: if you assign {} to tempcache,
- # it is used to cache files retrieved with
- # self.retrieve(). This is not enabled by default
- # since it does not work for changing documents (and I
- # haven't got the logic to check expiration headers
- # yet).
- self.ftpcache = ftpcache
- # Undocumented feature: you can use a different
- # ftp cache by assigning to the .ftpcache member;
- # in case you want logically independent URL openers
- # XXX This is not threadsafe. Bah.
-
- def __del__(self):
- self.close()
-
- def close(self):
- self.cleanup()
-
- def cleanup(self):
- # This code sometimes runs when the rest of this module
- # has already been deleted, so it can't use any globals
- # or import anything.
- if self.__tempfiles:
- for file in self.__tempfiles:
- try:
- self.__unlink(file)
- except OSError:
- pass
- del self.__tempfiles[:]
- if self.tempcache:
- self.tempcache.clear()
-
- def addheader(self, *args):
- """Add a header to be used by the HTTP interface only
- e.g. u.addheader('Accept', 'sound/basic')"""
- self.addheaders.append(args)
-
- # External interface
- def open(self, fullurl, data=None):
- """Use URLopener().open(file) instead of open(file, 'r')."""
- fullurl = unwrap(toBytes(fullurl))
- if self.tempcache and fullurl in self.tempcache:
- filename, headers = self.tempcache[fullurl]
- fp = open(filename, 'rb')
- return addinfourl(fp, headers, fullurl)
- urltype, url = splittype(fullurl)
- if not urltype:
- urltype = 'file'
- if urltype in self.proxies:
- proxy = self.proxies[urltype]
- urltype, proxyhost = splittype(proxy)
- host, selector = splithost(proxyhost)
- url = (host, fullurl) # Signal special case to open_*()
- else:
- proxy = None
- name = 'open_' + urltype
- self.type = urltype
- name = name.replace('-', '_')
- if not hasattr(self, name):
- if proxy:
- return self.open_unknown_proxy(proxy, fullurl, data)
- else:
- return self.open_unknown(fullurl, data)
- try:
- if data is None:
- return getattr(self, name)(url)
- else:
- return getattr(self, name)(url, data)
- except socket.error, msg:
- raise IOError, ('socket error', msg), sys.exc_info()[2]
-
- def open_unknown(self, fullurl, data=None):
- """Overridable interface to open unknown URL type."""
- type, url = splittype(fullurl)
- raise IOError, ('url error', 'unknown url type', type)
-
- def open_unknown_proxy(self, proxy, fullurl, data=None):
- """Overridable interface to open unknown URL type."""
- type, url = splittype(fullurl)
- raise IOError, ('url error', 'invalid proxy for %s' % type, proxy)
-
- # External interface
- def retrieve(self, url, filename=None, reporthook=None, data=None):
- """retrieve(url) returns (filename, headers) for a local object
- or (tempfilename, headers) for a remote object."""
- url = unwrap(toBytes(url))
- if self.tempcache and url in self.tempcache:
- return self.tempcache[url]
- type, url1 = splittype(url)
- if filename is None and (not type or type == 'file'):
- try:
- fp = self.open_local_file(url1)
- hdrs = fp.info()
- del fp
- return url2pathname(splithost(url1)[1]), hdrs
- except IOError, msg:
- pass
- fp = self.open(url, data)
- headers = fp.info()
- if filename:
- tfp = open(filename, 'wb')
- else:
- import tempfile
- garbage, path = splittype(url)
- garbage, path = splithost(path or "")
- path, garbage = splitquery(path or "")
- path, garbage = splitattr(path or "")
- suffix = os.path.splitext(path)[1]
- (fd, filename) = tempfile.mkstemp(suffix)
- self.__tempfiles.append(filename)
- tfp = os.fdopen(fd, 'wb')
- result = filename, headers
- if self.tempcache is not None:
- self.tempcache[url] = result
- bs = 1024*8
- size = -1
- read = 0
- blocknum = 0
- if reporthook:
- if "content-length" in headers:
- size = int(headers["Content-Length"])
- reporthook(blocknum, bs, size)
- while 1:
- block = fp.read(bs)
- if block == "":
- break
- read += len(block)
- tfp.write(block)
- blocknum += 1
- if reporthook:
- reporthook(blocknum, bs, size)
- fp.close()
- tfp.close()
- del fp
- del tfp
-
- # raise exception if actual size does not match content-length header
- if size >= 0 and read < size:
- raise ContentTooShortError("retrieval incomplete: got only %i out "
- "of %i bytes" % (read, size), result)
-
- return result
-
- # Each method named open_<type> knows how to open that type of URL
-
- def open_http(self, url, data=None):
- """Use HTTP protocol."""
- import httplib
- user_passwd = None
- proxy_passwd= None
- if isinstance(url, str):
- host, selector = splithost(url)
- if host:
- user_passwd, host = splituser(host)
- host = unquote(host)
- realhost = host
- else:
- host, selector = url
- # check whether the proxy contains authorization information
- proxy_passwd, host = splituser(host)
- # now we proceed with the url we want to obtain
- urltype, rest = splittype(selector)
- url = rest
- user_passwd = None
- if urltype.lower() != 'http':
- realhost = None
- else:
- realhost, rest = splithost(rest)
- if realhost:
- user_passwd, realhost = splituser(realhost)
- if user_passwd:
- selector = "%s://%s%s" % (urltype, realhost, rest)
- if proxy_bypass(realhost):
- host = realhost
-
- #print "proxy via http:", host, selector
- if not host: raise IOError, ('http error', 'no host given')
-
- if proxy_passwd:
- import base64
- proxy_auth = base64.b64encode(proxy_passwd).strip()
- else:
- proxy_auth = None
-
- if user_passwd:
- import base64
- auth = base64.b64encode(user_passwd).strip()
- else:
- auth = None
- h = httplib.HTTP(host)
- if data is not None:
- h.putrequest('POST', selector)
- h.putheader('Content-Type', 'application/x-www-form-urlencoded')
- h.putheader('Content-Length', '%d' % len(data))
- else:
- h.putrequest('GET', selector)
- if proxy_auth: h.putheader('Proxy-Authorization', 'Basic %s' % proxy_auth)
- if auth: h.putheader('Authorization', 'Basic %s' % auth)
- if realhost: h.putheader('Host', realhost)
- for args in self.addheaders: h.putheader(*args)
- h.endheaders()
- if data is not None:
- h.send(data)
- errcode, errmsg, headers = h.getreply()
- if errcode == -1:
- # something went wrong with the HTTP status line
- raise IOError, ('http protocol error', 0,
- 'got a bad status line', None)
- fp = h.getfile()
- if errcode == 200:
- return addinfourl(fp, headers, "http:" + url)
- else:
- if data is None:
- return self.http_error(url, fp, errcode, errmsg, headers)
- else:
- return self.http_error(url, fp, errcode, errmsg, headers, data)
-
- def http_error(self, url, fp, errcode, errmsg, headers, data=None):
- """Handle http errors.
- Derived class can override this, or provide specific handlers
- named http_error_DDD where DDD is the 3-digit error code."""
- # First check if there's a specific handler for this error
- name = 'http_error_%d' % errcode
- if hasattr(self, name):
- method = getattr(self, name)
- if data is None:
- result = method(url, fp, errcode, errmsg, headers)
- else:
- result = method(url, fp, errcode, errmsg, headers, data)
- if result: return result
- return self.http_error_default(url, fp, errcode, errmsg, headers)
-
- def http_error_default(self, url, fp, errcode, errmsg, headers):
- """Default error handler: close the connection and raise IOError."""
- void = fp.read()
- fp.close()
- raise IOError, ('http error', errcode, errmsg, headers)
-
- if hasattr(socket, "ssl"):
- def open_https(self, url, data=None):
- """Use HTTPS protocol."""
- import httplib
- user_passwd = None
- proxy_passwd = None
- if isinstance(url, str):
- host, selector = splithost(url)
- if host:
- user_passwd, host = splituser(host)
- host = unquote(host)
- realhost = host
- else:
- host, selector = url
- # here, we determine, whether the proxy contains authorization information
- proxy_passwd, host = splituser(host)
- urltype, rest = splittype(selector)
- url = rest
- user_passwd = None
- if urltype.lower() != 'https':
- realhost = None
- else:
- realhost, rest = splithost(rest)
- if realhost:
- user_passwd, realhost = splituser(realhost)
- if user_passwd:
- selector = "%s://%s%s" % (urltype, realhost, rest)
- #print "proxy via https:", host, selector
- if not host: raise IOError, ('https error', 'no host given')
- if proxy_passwd:
- import base64
- proxy_auth = base64.b64encode(proxy_passwd).strip()
- else:
- proxy_auth = None
- if user_passwd:
- import base64
- auth = base64.b64encode(user_passwd).strip()
- else:
- auth = None
- h = httplib.HTTPS(host, 0,
- key_file=self.key_file,
- cert_file=self.cert_file)
- if data is not None:
- h.putrequest('POST', selector)
- h.putheader('Content-Type',
- 'application/x-www-form-urlencoded')
- h.putheader('Content-Length', '%d' % len(data))
- else:
- h.putrequest('GET', selector)
- if proxy_auth: h.putheader('Proxy-Authorization', 'Basic %s' % proxy_auth)
- if auth: h.putheader('Authorization', 'Basic %s' % auth)
- if realhost: h.putheader('Host', realhost)
- for args in self.addheaders: h.putheader(*args)
- h.endheaders()
- if data is not None:
- h.send(data)
- errcode, errmsg, headers = h.getreply()
- if errcode == -1:
- # something went wrong with the HTTP status line
- raise IOError, ('http protocol error', 0,
- 'got a bad status line', None)
- fp = h.getfile()
- if errcode == 200:
- return addinfourl(fp, headers, "https:" + url)
- else:
- if data is None:
- return self.http_error(url, fp, errcode, errmsg, headers)
- else:
- return self.http_error(url, fp, errcode, errmsg, headers,
- data)
-
- def open_gopher(self, url):
- """Use Gopher protocol."""
- if not isinstance(url, str):
- raise IOError, ('gopher error', 'proxy support for gopher protocol currently not implemented')
- import gopherlib
- host, selector = splithost(url)
- if not host: raise IOError, ('gopher error', 'no host given')
- host = unquote(host)
- type, selector = splitgophertype(selector)
- selector, query = splitquery(selector)
- selector = unquote(selector)
- if query:
- query = unquote(query)
- fp = gopherlib.send_query(selector, query, host)
- else:
- fp = gopherlib.send_selector(selector, host)
- return addinfourl(fp, noheaders(), "gopher:" + url)
-
- def open_file(self, url):
- """Use local file or FTP depending on form of URL."""
- if not isinstance(url, str):
- raise IOError, ('file error', 'proxy support for file protocol currently not implemented')
- if url[:2] == '//' and url[2:3] != '/' and url[2:12].lower() != 'localhost/':
- return self.open_ftp(url)
- else:
- return self.open_local_file(url)
-
- def open_local_file(self, url):
- """Use local file."""
- import mimetypes, mimetools, email.Utils
- try:
- from cStringIO import StringIO
- except ImportError:
- from StringIO import StringIO
- host, file = splithost(url)
- localname = url2pathname(file)
- try:
- stats = os.stat(localname)
- except OSError, e:
- raise IOError(e.errno, e.strerror, e.filename)
- size = stats.st_size
- modified = email.Utils.formatdate(stats.st_mtime, usegmt=True)
- mtype = mimetypes.guess_type(url)[0]
- headers = mimetools.Message(StringIO(
- 'Content-Type: %s\nContent-Length: %d\nLast-modified: %s\n' %
- (mtype or 'text/plain', size, modified)))
- if not host:
- urlfile = file
- if file[:1] == '/':
- urlfile = 'file://' + file
- return addinfourl(open(localname, 'rb'),
- headers, urlfile)
- host, port = splitport(host)
- if not port \
- and socket.gethostbyname(host) in (localhost(), thishost()):
- urlfile = file
- if file[:1] == '/':
- urlfile = 'file://' + file
- return addinfourl(open(localname, 'rb'),
- headers, urlfile)
- raise IOError, ('local file error', 'not on local host')
-
- def open_ftp(self, url):
- """Use FTP protocol."""
- if not isinstance(url, str):
- raise IOError, ('ftp error', 'proxy support for ftp protocol currently not implemented')
- import mimetypes, mimetools
- try:
- from cStringIO import StringIO
- except ImportError:
- from StringIO import StringIO
- host, path = splithost(url)
- if not host: raise IOError, ('ftp error', 'no host given')
- host, port = splitport(host)
- user, host = splituser(host)
- if user: user, passwd = splitpasswd(user)
- else: passwd = None
- host = unquote(host)
- user = unquote(user or '')
- passwd = unquote(passwd or '')
- host = socket.gethostbyname(host)
- if not port:
- import ftplib
- port = ftplib.FTP_PORT
- else:
- port = int(port)
- path, attrs = splitattr(path)
- path = unquote(path)
- dirs = path.split('/')
- dirs, file = dirs[:-1], dirs[-1]
- if dirs and not dirs[0]: dirs = dirs[1:]
- if dirs and not dirs[0]: dirs[0] = '/'
- key = user, host, port, '/'.join(dirs)
- # XXX thread unsafe!
- if len(self.ftpcache) > MAXFTPCACHE:
- # Prune the cache, rather arbitrarily
- for k in self.ftpcache.keys():
- if k != key:
- v = self.ftpcache[k]
- del self.ftpcache[k]
- v.close()
- try:
- if not key in self.ftpcache:
- self.ftpcache[key] = \
- ftpwrapper(user, passwd, host, port, dirs)
- if not file: type = 'D'
- else: type = 'I'
- for attr in attrs:
- attr, value = splitvalue(attr)
- if attr.lower() == 'type' and \
- value in ('a', 'A', 'i', 'I', 'd', 'D'):
- type = value.upper()
- (fp, retrlen) = self.ftpcache[key].retrfile(file, type)
- mtype = mimetypes.guess_type("ftp:" + url)[0]
- headers = ""
- if mtype:
- headers += "Content-Type: %s\n" % mtype
- if retrlen is not None and retrlen >= 0:
- headers += "Content-Length: %d\n" % retrlen
- headers = mimetools.Message(StringIO(headers))
- return addinfourl(fp, headers, "ftp:" + url)
- except ftperrors(), msg:
- raise IOError, ('ftp error', msg), sys.exc_info()[2]
-
- def open_data(self, url, data=None):
- """Use "data" URL."""
- if not isinstance(url, str):
- raise IOError, ('data error', 'proxy support for data protocol currently not implemented')
- # ignore POSTed data
- #
- # syntax of data URLs:
- # dataurl := "data:" [ mediatype ] [ ";base64" ] "," data
- # mediatype := [ type "/" subtype ] *( ";" parameter )
- # data := *urlchar
- # parameter := attribute "=" value
- import mimetools
- try:
- from cStringIO import StringIO
- except ImportError:
- from StringIO import StringIO
- try:
- [type, data] = url.split(',', 1)
- except ValueError:
- raise IOError, ('data error', 'bad data URL')
- if not type:
- type = 'text/plain;charset=US-ASCII'
- semi = type.rfind(';')
- if semi >= 0 and '=' not in type[semi:]:
- encoding = type[semi+1:]
- type = type[:semi]
- else:
- encoding = ''
- msg = []
- msg.append('Date: %s'%time.strftime('%a, %d %b %Y %T GMT',
- time.gmtime(time.time())))
- msg.append('Content-type: %s' % type)
- if encoding == 'base64':
- import base64
- data = base64.decodestring(data)
- else:
- data = unquote(data)
- msg.append('Content-Length: %d' % len(data))
- msg.append('')
- msg.append(data)
- msg = '\n'.join(msg)
- f = StringIO(msg)
- headers = mimetools.Message(f, 0)
- #f.fileno = None # needed for addinfourl
- return addinfourl(f, headers, url)
-
-
-class FancyURLopener(URLopener):
- """Derived class with handlers for errors we can handle (perhaps)."""
-
- def __init__(self, *args, **kwargs):
- URLopener.__init__(self, *args, **kwargs)
- self.auth_cache = {}
- self.tries = 0
- self.maxtries = 10
-
- def http_error_default(self, url, fp, errcode, errmsg, headers):
- """Default error handling -- don't raise an exception."""
- return addinfourl(fp, headers, "http:" + url)
-
- def http_error_302(self, url, fp, errcode, errmsg, headers, data=None):
- """Error 302 -- relocated (temporarily)."""
- self.tries += 1
- if self.maxtries and self.tries >= self.maxtries:
- if hasattr(self, "http_error_500"):
- meth = self.http_error_500
- else:
- meth = self.http_error_default
- self.tries = 0
- return meth(url, fp, 500,
- "Internal Server Error: Redirect Recursion", headers)
- result = self.redirect_internal(url, fp, errcode, errmsg, headers,
- data)
- self.tries = 0
- return result
-
- def redirect_internal(self, url, fp, errcode, errmsg, headers, data):
- if 'location' in headers:
- newurl = headers['location']
- elif 'uri' in headers:
- newurl = headers['uri']
- else:
- return
- void = fp.read()
- fp.close()
- # In case the server sent a relative URL, join with original:
- newurl = basejoin(self.type + ":" + url, newurl)
- return self.open(newurl)
-
- def http_error_301(self, url, fp, errcode, errmsg, headers, data=None):
- """Error 301 -- also relocated (permanently)."""
- return self.http_error_302(url, fp, errcode, errmsg, headers, data)
-
- def http_error_303(self, url, fp, errcode, errmsg, headers, data=None):
- """Error 303 -- also relocated (essentially identical to 302)."""
- return self.http_error_302(url, fp, errcode, errmsg, headers, data)
-
- def http_error_307(self, url, fp, errcode, errmsg, headers, data=None):
- """Error 307 -- relocated, but turn POST into error."""
- if data is None:
- return self.http_error_302(url, fp, errcode, errmsg, headers, data)
- else:
- return self.http_error_default(url, fp, errcode, errmsg, headers)
-
- def http_error_401(self, url, fp, errcode, errmsg, headers, data=None):
- """Error 401 -- authentication required.
- This function supports Basic authentication only."""
- if not 'www-authenticate' in headers:
- URLopener.http_error_default(self, url, fp,
- errcode, errmsg, headers)
- stuff = headers['www-authenticate']
- import re
- match = re.match('[ \t]*([^ \t]+)[ \t]+realm="([^"]*)"', stuff)
- if not match:
- URLopener.http_error_default(self, url, fp,
- errcode, errmsg, headers)
- scheme, realm = match.groups()
- if scheme.lower() != 'basic':
- URLopener.http_error_default(self, url, fp,
- errcode, errmsg, headers)
- name = 'retry_' + self.type + '_basic_auth'
- if data is None:
- return getattr(self,name)(url, realm)
- else:
- return getattr(self,name)(url, realm, data)
-
- def http_error_407(self, url, fp, errcode, errmsg, headers, data=None):
- """Error 407 -- proxy authentication required.
- This function supports Basic authentication only."""
- if not 'proxy-authenticate' in headers:
- URLopener.http_error_default(self, url, fp,
- errcode, errmsg, headers)
- stuff = headers['proxy-authenticate']
- import re
- match = re.match('[ \t]*([^ \t]+)[ \t]+realm="([^"]*)"', stuff)
- if not match:
- URLopener.http_error_default(self, url, fp,
- errcode, errmsg, headers)
- scheme, realm = match.groups()
- if scheme.lower() != 'basic':
- URLopener.http_error_default(self, url, fp,
- errcode, errmsg, headers)
- name = 'retry_proxy_' + self.type + '_basic_auth'
- if data is None:
- return getattr(self,name)(url, realm)
- else:
- return getattr(self,name)(url, realm, data)
-
- def retry_proxy_http_basic_auth(self, url, realm, data=None):
- host, selector = splithost(url)
- newurl = 'http://' + host + selector
- proxy = self.proxies['http']
- urltype, proxyhost = splittype(proxy)
- proxyhost, proxyselector = splithost(proxyhost)
- i = proxyhost.find('@') + 1
- proxyhost = proxyhost[i:]
- user, passwd = self.get_user_passwd(proxyhost, realm, i)
- if not (user or passwd): return None
- proxyhost = quote(user, safe='') + ':' + quote(passwd, safe='') + '@' + proxyhost
- self.proxies['http'] = 'http://' + proxyhost + proxyselector
- if data is None:
- return self.open(newurl)
- else:
- return self.open(newurl, data)
-
- def retry_proxy_https_basic_auth(self, url, realm, data=None):
- host, selector = splithost(url)
- newurl = 'https://' + host + selector
- proxy = self.proxies['https']
- urltype, proxyhost = splittype(proxy)
- proxyhost, proxyselector = splithost(proxyhost)
- i = proxyhost.find('@') + 1
- proxyhost = proxyhost[i:]
- user, passwd = self.get_user_passwd(proxyhost, realm, i)
- if not (user or passwd): return None
- proxyhost = quote(user, safe='') + ':' + quote(passwd, safe='') + '@' + proxyhost
- self.proxies['https'] = 'https://' + proxyhost + proxyselector
- if data is None:
- return self.open(newurl)
- else:
- return self.open(newurl, data)
-
- def retry_http_basic_auth(self, url, realm, data=None):
- host, selector = splithost(url)
- i = host.find('@') + 1
- host = host[i:]
- user, passwd = self.get_user_passwd(host, realm, i)
- if not (user or passwd): return None
- host = quote(user, safe='') + ':' + quote(passwd, safe='') + '@' + host
- newurl = 'http://' + host + selector
- if data is None:
- return self.open(newurl)
- else:
- return self.open(newurl, data)
-
- def retry_https_basic_auth(self, url, realm, data=None):
- host, selector = splithost(url)
- i = host.find('@') + 1
- host = host[i:]
- user, passwd = self.get_user_passwd(host, realm, i)
- if not (user or passwd): return None
- host = quote(user, safe='') + ':' + quote(passwd, safe='') + '@' + host
- newurl = 'https://' + host + selector
- if data is None:
- return self.open(newurl)
- else:
- return self.open(newurl, data)
-
- def get_user_passwd(self, host, realm, clear_cache = 0):
- key = realm + '@' + host.lower()
- if key in self.auth_cache:
- if clear_cache:
- del self.auth_cache[key]
- else:
- return self.auth_cache[key]
- user, passwd = self.prompt_user_passwd(host, realm)
- if user or passwd: self.auth_cache[key] = (user, passwd)
- return user, passwd
-
- def prompt_user_passwd(self, host, realm):
- """Override this in a GUI environment!"""
- import getpass
- try:
- user = raw_input("Enter username for %s at %s: " % (realm,
- host))
- passwd = getpass.getpass("Enter password for %s in %s at %s: " %
- (user, realm, host))
- return user, passwd
- except KeyboardInterrupt:
- print
- return None, None
-
-
-# Utility functions
-
-_localhost = None
-def localhost():
- """Return the IP address of the magic hostname 'localhost'."""
- global _localhost
- if _localhost is None:
- _localhost = socket.gethostbyname('localhost')
- return _localhost
-
-_thishost = None
-def thishost():
- """Return the IP address of the current host."""
- global _thishost
- if _thishost is None:
- _thishost = socket.gethostbyname(socket.gethostname())
- return _thishost
-
-_ftperrors = None
-def ftperrors():
- """Return the set of errors raised by the FTP class."""
- global _ftperrors
- if _ftperrors is None:
- import ftplib
- _ftperrors = ftplib.all_errors
- return _ftperrors
-
-_noheaders = None
-def noheaders():
- """Return an empty mimetools.Message object."""
- global _noheaders
- if _noheaders is None:
- import mimetools
- try:
- from cStringIO import StringIO
- except ImportError:
- from StringIO import StringIO
- _noheaders = mimetools.Message(StringIO(), 0)
- _noheaders.fp.close() # Recycle file descriptor
- return _noheaders
-
-
-# Utility classes
-
-class ftpwrapper:
- """Class used by open_ftp() for cache of open FTP connections."""
-
- def __init__(self, user, passwd, host, port, dirs):
- self.user = user
- self.passwd = passwd
- self.host = host
- self.port = port
- self.dirs = dirs
- self.init()
-
- def init(self):
- import ftplib
- self.busy = 0
- self.ftp = ftplib.FTP()
- self.ftp.connect(self.host, self.port)
- self.ftp.login(self.user, self.passwd)
- for dir in self.dirs:
- self.ftp.cwd(dir)
-
- def retrfile(self, file, type):
- import ftplib
- self.endtransfer()
- if type in ('d', 'D'): cmd = 'TYPE A'; isdir = 1
- else: cmd = 'TYPE ' + type; isdir = 0
- try:
- self.ftp.voidcmd(cmd)
- except ftplib.all_errors:
- self.init()
- self.ftp.voidcmd(cmd)
- conn = None
- if file and not isdir:
- # Try to retrieve as a file
- try:
- cmd = 'RETR ' + file
- conn = self.ftp.ntransfercmd(cmd)
- except ftplib.error_perm, reason:
- if str(reason)[:3] != '550':
- raise IOError, ('ftp error', reason), sys.exc_info()[2]
- if not conn:
- # Set transfer mode to ASCII!
- self.ftp.voidcmd('TYPE A')
- # Try a directory listing
- if file: cmd = 'LIST ' + file
- else: cmd = 'LIST'
- conn = self.ftp.ntransfercmd(cmd)
- self.busy = 1
- # Pass back both a suitably decorated object and a retrieval length
- return (addclosehook(conn[0].makefile('rb'),
- self.endtransfer), conn[1])
- def endtransfer(self):
- if not self.busy:
- return
- self.busy = 0
- try:
- self.ftp.voidresp()
- except ftperrors():
- pass
-
- def close(self):
- self.endtransfer()
- try:
- self.ftp.close()
- except ftperrors():
- pass
-
-class addbase:
- """Base class for addinfo and addclosehook."""
-
- def __init__(self, fp):
- self.fp = fp
- self.read = self.fp.read
- self.readline = self.fp.readline
- if hasattr(self.fp, "readlines"): self.readlines = self.fp.readlines
- if hasattr(self.fp, "fileno"):
- self.fileno = self.fp.fileno
- else:
- self.fileno = lambda: None
- if hasattr(self.fp, "__iter__"):
- self.__iter__ = self.fp.__iter__
- if hasattr(self.fp, "next"):
- self.next = self.fp.next
-
- def __repr__(self):
- return '<%s at %r whose fp = %r>' % (self.__class__.__name__,
- id(self), self.fp)
-
- def close(self):
- self.read = None
- self.readline = None
- self.readlines = None
- self.fileno = None
- if self.fp: self.fp.close()
- self.fp = None
-
-class addclosehook(addbase):
- """Class to add a close hook to an open file."""
-
- def __init__(self, fp, closehook, *hookargs):
- addbase.__init__(self, fp)
- self.closehook = closehook
- self.hookargs = hookargs
-
- def close(self):
- addbase.close(self)
- if self.closehook:
- self.closehook(*self.hookargs)
- self.closehook = None
- self.hookargs = None
-
-class addinfo(addbase):
- """class to add an info() method to an open file."""
-
- def __init__(self, fp, headers):
- addbase.__init__(self, fp)
- self.headers = headers
-
- def info(self):
- return self.headers
-
-class addinfourl(addbase):
- """class to add info() and geturl() methods to an open file."""
-
- def __init__(self, fp, headers, url):
- addbase.__init__(self, fp)
- self.headers = headers
- self.url = url
-
- def info(self):
- return self.headers
-
- def geturl(self):
- return self.url
-
-
-# Utilities to parse URLs (most of these return None for missing parts):
-# unwrap('<URL:type://host/path>') --> 'type://host/path'
-# splittype('type:opaquestring') --> 'type', 'opaquestring'
-# splithost('//host[:port]/path') --> 'host[:port]', '/path'
-# splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'
-# splitpasswd('user:passwd') -> 'user', 'passwd'
-# splitport('host:port') --> 'host', 'port'
-# splitquery('/path?query') --> '/path', 'query'
-# splittag('/path#tag') --> '/path', 'tag'
-# splitattr('/path;attr1=value1;attr2=value2;...') ->
-# '/path', ['attr1=value1', 'attr2=value2', ...]
-# splitvalue('attr=value') --> 'attr', 'value'
-# splitgophertype('/Xselector') --> 'X', 'selector'
-# unquote('abc%20def') -> 'abc def'
-# quote('abc def') -> 'abc%20def')
-
-try:
- unicode
-except NameError:
- def _is_unicode(x):
- return 0
-else:
- def _is_unicode(x):
- return isinstance(x, unicode)
-
-def toBytes(url):
- """toBytes(u"URL") --> 'URL'."""
- # Most URL schemes require ASCII. If that changes, the conversion
- # can be relaxed
- if _is_unicode(url):
- try:
- url = url.encode("ASCII")
- except UnicodeError:
- raise UnicodeError("URL " + repr(url) +
- " contains non-ASCII characters")
- return url
-
-def unwrap(url):
- """unwrap('<URL:type://host/path>') --> 'type://host/path'."""
- url = url.strip()
- if url[:1] == '<' and url[-1:] == '>':
- url = url[1:-1].strip()
- if url[:4] == 'URL:': url = url[4:].strip()
- return url
-
-_typeprog = None
-def splittype(url):
- """splittype('type:opaquestring') --> 'type', 'opaquestring'."""
- global _typeprog
- if _typeprog is None:
- import re
- _typeprog = re.compile('^([^/:]+):')
-
- match = _typeprog.match(url)
- if match:
- scheme = match.group(1)
- return scheme.lower(), url[len(scheme) + 1:]
- return None, url
-
-_hostprog = None
-def splithost(url):
- """splithost('//host[:port]/path') --> 'host[:port]', '/path'."""
- global _hostprog
- if _hostprog is None:
- import re
- _hostprog = re.compile('^//([^/?]*)(.*)$')
-
- match = _hostprog.match(url)
- if match: return match.group(1, 2)
- return None, url
-
-_userprog = None
-def splituser(host):
- """splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'."""
- global _userprog
- if _userprog is None:
- import re
- _userprog = re.compile('^(.*)@(.*)$')
-
- match = _userprog.match(host)
- if match: return map(unquote, match.group(1, 2))
- return None, host
-
-_passwdprog = None
-def splitpasswd(user):
- """splitpasswd('user:passwd') -> 'user', 'passwd'."""
- global _passwdprog
- if _passwdprog is None:
- import re
- _passwdprog = re.compile('^([^:]*):(.*)$')
-
- match = _passwdprog.match(user)
- if match: return match.group(1, 2)
- return user, None
-
-# splittag('/path#tag') --> '/path', 'tag'
-_portprog = None
-def splitport(host):
- """splitport('host:port') --> 'host', 'port'."""
- global _portprog
- if _portprog is None:
- import re
- _portprog = re.compile('^(.*):([0-9]+)$')
-
- match = _portprog.match(host)
- if match: return match.group(1, 2)
- return host, None
-
-_nportprog = None
-def splitnport(host, defport=-1):
- """Split host and port, returning numeric port.
- Return given default port if no ':' found; defaults to -1.
- Return numerical port if a valid number are found after ':'.
- Return None if ':' but not a valid number."""
- global _nportprog
- if _nportprog is None:
- import re
- _nportprog = re.compile('^(.*):(.*)$')
-
- match = _nportprog.match(host)
- if match:
- host, port = match.group(1, 2)
- try:
- if not port: raise ValueError, "no digits"
- nport = int(port)
- except ValueError:
- nport = None
- return host, nport
- return host, defport
-
-_queryprog = None
-def splitquery(url):
- """splitquery('/path?query') --> '/path', 'query'."""
- global _queryprog
- if _queryprog is None:
- import re
- _queryprog = re.compile('^(.*)\?([^?]*)$')
-
- match = _queryprog.match(url)
- if match: return match.group(1, 2)
- return url, None
-
-_tagprog = None
-def splittag(url):
- """splittag('/path#tag') --> '/path', 'tag'."""
- global _tagprog
- if _tagprog is None:
- import re
- _tagprog = re.compile('^(.*)#([^#]*)$')
-
- match = _tagprog.match(url)
- if match: return match.group(1, 2)
- return url, None
-
-def splitattr(url):
- """splitattr('/path;attr1=value1;attr2=value2;...') ->
- '/path', ['attr1=value1', 'attr2=value2', ...]."""
- words = url.split(';')
- return words[0], words[1:]
-
-_valueprog = None
-def splitvalue(attr):
- """splitvalue('attr=value') --> 'attr', 'value'."""
- global _valueprog
- if _valueprog is None:
- import re
- _valueprog = re.compile('^([^=]*)=(.*)$')
-
- match = _valueprog.match(attr)
- if match: return match.group(1, 2)
- return attr, None
-
-def splitgophertype(selector):
- """splitgophertype('/Xselector') --> 'X', 'selector'."""
- if selector[:1] == '/' and selector[1:2]:
- return selector[1], selector[2:]
- return None, selector
-
-_hextochr = dict(('%02x' % i, chr(i)) for i in range(256))
-_hextochr.update(('%02X' % i, chr(i)) for i in range(256))
-
-def unquote(s):
- """unquote('abc%20def') -> 'abc def'."""
- res = s.split('%')
- for i in xrange(1, len(res)):
- item = res[i]
- try:
- res[i] = _hextochr[item[:2]] + item[2:]
- except KeyError:
- res[i] = '%' + item
- except UnicodeDecodeError:
- res[i] = unichr(int(item[:2], 16)) + item[2:]
- return "".join(res)
-
-def unquote_plus(s):
- """unquote('%7e/abc+def') -> '~/abc def'"""
- s = s.replace('+', ' ')
- return unquote(s)
-
-always_safe = ('ABCDEFGHIJKLMNOPQRSTUVWXYZ'
- 'abcdefghijklmnopqrstuvwxyz'
- '0123456789' '_.-')
-_safemaps = {}
-
-def quote(s, safe = '/'):
- """quote('abc def') -> 'abc%20def'
-
- Each part of a URL, e.g. the path info, the query, etc., has a
- different set of reserved characters that must be quoted.
-
- RFC 2396 Uniform Resource Identifiers (URI): Generic Syntax lists
- the following reserved characters.
-
- reserved = ";" | "/" | "?" | ":" | "@" | "&" | "=" | "+" |
- "$" | ","
-
- Each of these characters is reserved in some component of a URL,
- but not necessarily in all of them.
-
- By default, the quote function is intended for quoting the path
- section of a URL. Thus, it will not encode '/'. This character
- is reserved, but in typical usage the quote function is being
- called on a path where the existing slash characters are used as
- reserved characters.
- """
- cachekey = (safe, always_safe)
- try:
- safe_map = _safemaps[cachekey]
- except KeyError:
- safe += always_safe
- safe_map = {}
- for i in range(256):
- c = chr(i)
- safe_map[c] = (c in safe) and c or ('%%%02X' % i)
- _safemaps[cachekey] = safe_map
- res = map(safe_map.__getitem__, s)
- return ''.join(res)
-
-def quote_plus(s, safe = ''):
- """Quote the query fragment of a URL; replacing ' ' with '+'"""
- if ' ' in s:
- s = quote(s, safe + ' ')
- return s.replace(' ', '+')
- return quote(s, safe)
-
-def urlencode(query,doseq=0):
- """Encode a sequence of two-element tuples or dictionary into a URL query string.
-
- If any values in the query arg are sequences and doseq is true, each
- sequence element is converted to a separate parameter.
-
- If the query arg is a sequence of two-element tuples, the order of the
- parameters in the output will match the order of parameters in the
- input.
- """
-
- if hasattr(query,"items"):
- # mapping objects
- query = query.items()
- else:
- # it's a bother at times that strings and string-like objects are
- # sequences...
- try:
- # non-sequence items should not work with len()
- # non-empty strings will fail this
- if len(query) and not isinstance(query[0], tuple):
- raise TypeError
- # zero-length sequences of all types will get here and succeed,
- # but that's a minor nit - since the original implementation
- # allowed empty dicts that type of behavior probably should be
- # preserved for consistency
- except TypeError:
- ty,va,tb = sys.exc_info()
- raise TypeError, "not a valid non-string sequence or mapping object", tb
-
- l = []
- if not doseq:
- # preserve old behavior
- for k, v in query:
- k = quote_plus(str(k))
- v = quote_plus(str(v))
- l.append(k + '=' + v)
- else:
- for k, v in query:
- k = quote_plus(str(k))
- if isinstance(v, str):
- v = quote_plus(v)
- l.append(k + '=' + v)
- elif _is_unicode(v):
- # is there a reasonable way to convert to ASCII?
- # encode generates a string, but "replace" or "ignore"
- # lose information and "strict" can raise UnicodeError
- v = quote_plus(v.encode("ASCII","replace"))
- l.append(k + '=' + v)
- else:
- try:
- # is this a sufficient test for sequence-ness?
- x = len(v)
- except TypeError:
- # not a sequence
- v = quote_plus(str(v))
- l.append(k + '=' + v)
- else:
- # loop over the sequence
- for elt in v:
- l.append(k + '=' + quote_plus(str(elt)))
- return '&'.join(l)
-
-# Proxy handling
-def getproxies_environment():
- """Return a dictionary of scheme -> proxy server URL mappings.
-
- Scan the environment for variables named <scheme>_proxy;
- this seems to be the standard convention. If you need a
- different way, you can pass a proxies dictionary to the
- [Fancy]URLopener constructor.
-
- """
- proxies = {}
- for name, value in os.environ.items():
- name = name.lower()
- if value and name[-6:] == '_proxy':
- proxies[name[:-6]] = value
- return proxies
-
-if sys.platform == 'darwin':
- def getproxies_internetconfig():
- """Return a dictionary of scheme -> proxy server URL mappings.
-
- By convention the mac uses Internet Config to store
- proxies. An HTTP proxy, for instance, is stored under
- the HttpProxy key.
-
- """
- try:
- import ic
- except ImportError:
- return {}
-
- try:
- config = ic.IC()
- except ic.error:
- return {}
- proxies = {}
- # HTTP:
- if 'UseHTTPProxy' in config and config['UseHTTPProxy']:
- try:
- value = config['HTTPProxyHost']
- except ic.error:
- pass
- else:
- proxies['http'] = 'http://%s' % value
- # FTP: XXXX To be done.
- # Gopher: XXXX To be done.
- return proxies
-
- def proxy_bypass(x):
- return 0
-
- def getproxies():
- return getproxies_environment() or getproxies_internetconfig()
-
-elif os.name == 'nt':
- def getproxies_registry():
- """Return a dictionary of scheme -> proxy server URL mappings.
-
- Win32 uses the registry to store proxies.
-
- """
- proxies = {}
- try:
- import _winreg
- except ImportError:
- # Std module, so should be around - but you never know!
- return proxies
- try:
- internetSettings = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER,
- r'Software\Microsoft\Windows\CurrentVersion\Internet Settings')
- proxyEnable = _winreg.QueryValueEx(internetSettings,
- 'ProxyEnable')[0]
- if proxyEnable:
- # Returned as Unicode but problems if not converted to ASCII
- proxyServer = str(_winreg.QueryValueEx(internetSettings,
- 'ProxyServer')[0])
- if '=' in proxyServer:
- # Per-protocol settings
- for p in proxyServer.split(';'):
- protocol, address = p.split('=', 1)
- # See if address has a type:// prefix
- import re
- if not re.match('^([^/:]+)://', address):
- address = '%s://%s' % (protocol, address)
- proxies[protocol] = address
- else:
- # Use one setting for all protocols
- if proxyServer[:5] == 'http:':
- proxies['http'] = proxyServer
- else:
- proxies['http'] = 'http://%s' % proxyServer
- proxies['ftp'] = 'ftp://%s' % proxyServer
- internetSettings.Close()
- except (WindowsError, ValueError, TypeError):
- # Either registry key not found etc, or the value in an
- # unexpected format.
- # proxies already set up to be empty so nothing to do
- pass
- return proxies
-
- def getproxies():
- """Return a dictionary of scheme -> proxy server URL mappings.
-
- Returns settings gathered from the environment, if specified,
- or the registry.
-
- """
- return getproxies_environment() or getproxies_registry()
-
- def proxy_bypass(host):
- try:
- import _winreg
- import re
- except ImportError:
- # Std modules, so should be around - but you never know!
- return 0
- try:
- internetSettings = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER,
- r'Software\Microsoft\Windows\CurrentVersion\Internet Settings')
- proxyEnable = _winreg.QueryValueEx(internetSettings,
- 'ProxyEnable')[0]
- proxyOverride = str(_winreg.QueryValueEx(internetSettings,
- 'ProxyOverride')[0])
- # ^^^^ Returned as Unicode but problems if not converted to ASCII
- except WindowsError:
- return 0
- if not proxyEnable or not proxyOverride:
- return 0
- # try to make a host list from name and IP address.
- rawHost, port = splitport(host)
- host = [rawHost]
- try:
- addr = socket.gethostbyname(rawHost)
- if addr != rawHost:
- host.append(addr)
- except socket.error:
- pass
- try:
- fqdn = socket.getfqdn(rawHost)
- if fqdn != rawHost:
- host.append(fqdn)
- except socket.error:
- pass
- # make a check value list from the registry entry: replace the
- # '<local>' string by the localhost entry and the corresponding
- # canonical entry.
- proxyOverride = proxyOverride.split(';')
- i = 0
- while i < len(proxyOverride):
- if proxyOverride[i] == '<local>':
- proxyOverride[i:i+1] = ['localhost',
- '127.0.0.1',
- socket.gethostname(),
- socket.gethostbyname(
- socket.gethostname())]
- i += 1
- # print proxyOverride
- # now check if we match one of the registry values.
- for test in proxyOverride:
- test = test.replace(".", r"\.") # mask dots
- test = test.replace("*", r".*") # change glob sequence
- test = test.replace("?", r".") # change glob char
- for val in host:
- # print "%s <--> %s" %( test, val )
- if re.match(test, val, re.I):
- return 1
- return 0
-
-else:
- # By default use environment variables
- getproxies = getproxies_environment
-
- def proxy_bypass(host):
- return 0
-
-# Test and time quote() and unquote()
-def test1():
- s = ''
- for i in range(256): s = s + chr(i)
- s = s*4
- t0 = time.time()
- qs = quote(s)
- uqs = unquote(qs)
- t1 = time.time()
- if uqs != s:
- print 'Wrong!'
- print repr(s)
- print repr(qs)
- print repr(uqs)
- print round(t1 - t0, 3), 'sec'
-
-
-def reporthook(blocknum, blocksize, totalsize):
- # Report during remote transfers
- print "Block number: %d, Block size: %d, Total size: %d" % (
- blocknum, blocksize, totalsize)
-
-# Test program
-def test(args=[]):
- if not args:
- args = [
- '/etc/passwd',
- 'file:/etc/passwd',
- 'file://localhost/etc/passwd',
- 'ftp://ftp.gnu.org/pub/README',
-## 'gopher://gopher.micro.umn.edu/1/',
- 'http://www.python.org/index.html',
- ]
- if hasattr(URLopener, "open_https"):
- args.append('https://synergy.as.cmu.edu/~geek/')
- try:
- for url in args:
- print '-'*10, url, '-'*10
- fn, h = urlretrieve(url, None, reporthook)
- print fn
- if h:
- print '======'
- for k in h.keys(): print k + ':', h[k]
- print '======'
- fp = open(fn, 'rb')
- data = fp.read()
- del fp
- if '\r' in data:
- table = string.maketrans("", "")
- data = data.translate(table, "\r")
- print data
- fn, h = None, None
- print '-'*40
- finally:
- urlcleanup()
-
-def main():
- import getopt, sys
- try:
- opts, args = getopt.getopt(sys.argv[1:], "th")
- except getopt.error, msg:
- print msg
- print "Use -h for help"
- return
- t = 0
- for o, a in opts:
- if o == '-t':
- t = t + 1
- if o == '-h':
- print "Usage: python urllib.py [-t] [url ...]"
- print "-t runs self-test;",
- print "otherwise, contents of urls are printed"
- return
- if t:
- if t > 1:
- test1()
- test(args)
- else:
- if not args:
- print "Use -h for help"
- for url in args:
- print urlopen(url).read(),
-
-# Run test program when run as a script
-if __name__ == '__main__':
- main()
diff --git a/sys/lib/python/urllib2.py b/sys/lib/python/urllib2.py
deleted file mode 100644
index 4e926a154..000000000
--- a/sys/lib/python/urllib2.py
+++ /dev/null
@@ -1,1353 +0,0 @@
-"""An extensible library for opening URLs using a variety of protocols
-
-The simplest way to use this module is to call the urlopen function,
-which accepts a string containing a URL or a Request object (described
-below). It opens the URL and returns the results as file-like
-object; the returned object has some extra methods described below.
-
-The OpenerDirector manages a collection of Handler objects that do
-all the actual work. Each Handler implements a particular protocol or
-option. The OpenerDirector is a composite object that invokes the
-Handlers needed to open the requested URL. For example, the
-HTTPHandler performs HTTP GET and POST requests and deals with
-non-error returns. The HTTPRedirectHandler automatically deals with
-HTTP 301, 302, 303 and 307 redirect errors, and the HTTPDigestAuthHandler
-deals with digest authentication.
-
-urlopen(url, data=None) -- basic usage is the same as original
-urllib. pass the url and optionally data to post to an HTTP URL, and
-get a file-like object back. One difference is that you can also pass
-a Request instance instead of URL. Raises a URLError (subclass of
-IOError); for HTTP errors, raises an HTTPError, which can also be
-treated as a valid response.
-
-build_opener -- function that creates a new OpenerDirector instance.
-will install the default handlers. accepts one or more Handlers as
-arguments, either instances or Handler classes that it will
-instantiate. if one of the argument is a subclass of the default
-handler, the argument will be installed instead of the default.
-
-install_opener -- installs a new opener as the default opener.
-
-objects of interest:
-OpenerDirector --
-
-Request -- an object that encapsulates the state of a request. the
-state can be a simple as the URL. it can also include extra HTTP
-headers, e.g. a User-Agent.
-
-BaseHandler --
-
-exceptions:
-URLError-- a subclass of IOError, individual protocols have their own
-specific subclass
-
-HTTPError-- also a valid HTTP response, so you can treat an HTTP error
-as an exceptional event or valid response
-
-internals:
-BaseHandler and parent
-_call_chain conventions
-
-Example usage:
-
-import urllib2
-
-# set up authentication info
-authinfo = urllib2.HTTPBasicAuthHandler()
-authinfo.add_password('realm', 'host', 'username', 'password')
-
-proxy_support = urllib2.ProxyHandler({"http" : "http://ahad-haam:3128"})
-
-# build a new opener that adds authentication and caching FTP handlers
-opener = urllib2.build_opener(proxy_support, authinfo, urllib2.CacheFTPHandler)
-
-# install it
-urllib2.install_opener(opener)
-
-f = urllib2.urlopen('http://www.python.org/')
-
-
-"""
-
-# XXX issues:
-# If an authentication error handler that tries to perform
-# authentication for some reason but fails, how should the error be
-# signalled? The client needs to know the HTTP error code. But if
-# the handler knows that the problem was, e.g., that it didn't know
-# that hash algo that requested in the challenge, it would be good to
-# pass that information along to the client, too.
-# ftp errors aren't handled cleanly
-# check digest against correct (i.e. non-apache) implementation
-
-# Possible extensions:
-# complex proxies XXX not sure what exactly was meant by this
-# abstract factory for opener
-
-import base64
-import hashlib
-import httplib
-import mimetools
-import os
-import posixpath
-import random
-import re
-import socket
-import sys
-import time
-import urlparse
-import bisect
-
-try:
- from cStringIO import StringIO
-except ImportError:
- from StringIO import StringIO
-
-from urllib import (unwrap, unquote, splittype, splithost, quote,
- addinfourl, splitport, splitgophertype, splitquery,
- splitattr, ftpwrapper, noheaders, splituser, splitpasswd, splitvalue)
-
-# support for FileHandler, proxies via environment variables
-from urllib import localhost, url2pathname, getproxies
-
-# used in User-Agent header sent
-__version__ = sys.version[:3]
-
-_opener = None
-def urlopen(url, data=None):
- global _opener
- if _opener is None:
- _opener = build_opener()
- return _opener.open(url, data)
-
-def install_opener(opener):
- global _opener
- _opener = opener
-
-# do these error classes make sense?
-# make sure all of the IOError stuff is overridden. we just want to be
-# subtypes.
-
-class URLError(IOError):
- # URLError is a sub-type of IOError, but it doesn't share any of
- # the implementation. need to override __init__ and __str__.
- # It sets self.args for compatibility with other EnvironmentError
- # subclasses, but args doesn't have the typical format with errno in
- # slot 0 and strerror in slot 1. This may be better than nothing.
- def __init__(self, reason):
- self.args = reason,
- self.reason = reason
-
- def __str__(self):
- return '<urlopen error %s>' % self.reason
-
-class HTTPError(URLError, addinfourl):
- """Raised when HTTP error occurs, but also acts like non-error return"""
- __super_init = addinfourl.__init__
-
- def __init__(self, url, code, msg, hdrs, fp):
- self.code = code
- self.msg = msg
- self.hdrs = hdrs
- self.fp = fp
- self.filename = url
- # The addinfourl classes depend on fp being a valid file
- # object. In some cases, the HTTPError may not have a valid
- # file object. If this happens, the simplest workaround is to
- # not initialize the base classes.
- if fp is not None:
- self.__super_init(fp, hdrs, url)
-
- def __str__(self):
- return 'HTTP Error %s: %s' % (self.code, self.msg)
-
-class GopherError(URLError):
- pass
-
-# copied from cookielib.py
-_cut_port_re = re.compile(r":\d+$")
-def request_host(request):
- """Return request-host, as defined by RFC 2965.
-
- Variation from RFC: returned value is lowercased, for convenient
- comparison.
-
- """
- url = request.get_full_url()
- host = urlparse.urlparse(url)[1]
- if host == "":
- host = request.get_header("Host", "")
-
- # remove port, if present
- host = _cut_port_re.sub("", host, 1)
- return host.lower()
-
-class Request:
-
- def __init__(self, url, data=None, headers={},
- origin_req_host=None, unverifiable=False):
- # unwrap('<URL:type://host/path>') --> 'type://host/path'
- self.__original = unwrap(url)
- self.type = None
- # self.__r_type is what's left after doing the splittype
- self.host = None
- self.port = None
- self.data = data
- self.headers = {}
- for key, value in headers.items():
- self.add_header(key, value)
- self.unredirected_hdrs = {}
- if origin_req_host is None:
- origin_req_host = request_host(self)
- self.origin_req_host = origin_req_host
- self.unverifiable = unverifiable
-
- def __getattr__(self, attr):
- # XXX this is a fallback mechanism to guard against these
- # methods getting called in a non-standard order. this may be
- # too complicated and/or unnecessary.
- # XXX should the __r_XXX attributes be public?
- if attr[:12] == '_Request__r_':
- name = attr[12:]
- if hasattr(Request, 'get_' + name):
- getattr(self, 'get_' + name)()
- return getattr(self, attr)
- raise AttributeError, attr
-
- def get_method(self):
- if self.has_data():
- return "POST"
- else:
- return "GET"
-
- # XXX these helper methods are lame
-
- def add_data(self, data):
- self.data = data
-
- def has_data(self):
- return self.data is not None
-
- def get_data(self):
- return self.data
-
- def get_full_url(self):
- return self.__original
-
- def get_type(self):
- if self.type is None:
- self.type, self.__r_type = splittype(self.__original)
- if self.type is None:
- raise ValueError, "unknown url type: %s" % self.__original
- return self.type
-
- def get_host(self):
- if self.host is None:
- self.host, self.__r_host = splithost(self.__r_type)
- if self.host:
- self.host = unquote(self.host)
- return self.host
-
- def get_selector(self):
- return self.__r_host
-
- def set_proxy(self, host, type):
- self.host, self.type = host, type
- self.__r_host = self.__original
-
- def get_origin_req_host(self):
- return self.origin_req_host
-
- def is_unverifiable(self):
- return self.unverifiable
-
- def add_header(self, key, val):
- # useful for something like authentication
- self.headers[key.capitalize()] = val
-
- def add_unredirected_header(self, key, val):
- # will not be added to a redirected request
- self.unredirected_hdrs[key.capitalize()] = val
-
- def has_header(self, header_name):
- return (header_name in self.headers or
- header_name in self.unredirected_hdrs)
-
- def get_header(self, header_name, default=None):
- return self.headers.get(
- header_name,
- self.unredirected_hdrs.get(header_name, default))
-
- def header_items(self):
- hdrs = self.unredirected_hdrs.copy()
- hdrs.update(self.headers)
- return hdrs.items()
-
-class OpenerDirector:
- def __init__(self):
- client_version = "Python-urllib/%s" % __version__
- self.addheaders = [('User-agent', client_version)]
- # manage the individual handlers
- self.handlers = []
- self.handle_open = {}
- self.handle_error = {}
- self.process_response = {}
- self.process_request = {}
-
- def add_handler(self, handler):
- added = False
- for meth in dir(handler):
- if meth in ["redirect_request", "do_open", "proxy_open"]:
- # oops, coincidental match
- continue
-
- i = meth.find("_")
- protocol = meth[:i]
- condition = meth[i+1:]
-
- if condition.startswith("error"):
- j = condition.find("_") + i + 1
- kind = meth[j+1:]
- try:
- kind = int(kind)
- except ValueError:
- pass
- lookup = self.handle_error.get(protocol, {})
- self.handle_error[protocol] = lookup
- elif condition == "open":
- kind = protocol
- lookup = self.handle_open
- elif condition == "response":
- kind = protocol
- lookup = self.process_response
- elif condition == "request":
- kind = protocol
- lookup = self.process_request
- else:
- continue
-
- handlers = lookup.setdefault(kind, [])
- if handlers:
- bisect.insort(handlers, handler)
- else:
- handlers.append(handler)
- added = True
-
- if added:
- # XXX why does self.handlers need to be sorted?
- bisect.insort(self.handlers, handler)
- handler.add_parent(self)
-
- def close(self):
- # Only exists for backwards compatibility.
- pass
-
- def _call_chain(self, chain, kind, meth_name, *args):
- # Handlers raise an exception if no one else should try to handle
- # the request, or return None if they can't but another handler
- # could. Otherwise, they return the response.
- handlers = chain.get(kind, ())
- for handler in handlers:
- func = getattr(handler, meth_name)
-
- result = func(*args)
- if result is not None:
- return result
-
- def open(self, fullurl, data=None):
- # accept a URL or a Request object
- if isinstance(fullurl, basestring):
- req = Request(fullurl, data)
- else:
- req = fullurl
- if data is not None:
- req.add_data(data)
-
- protocol = req.get_type()
-
- # pre-process request
- meth_name = protocol+"_request"
- for processor in self.process_request.get(protocol, []):
- meth = getattr(processor, meth_name)
- req = meth(req)
-
- response = self._open(req, data)
-
- # post-process response
- meth_name = protocol+"_response"
- for processor in self.process_response.get(protocol, []):
- meth = getattr(processor, meth_name)
- response = meth(req, response)
-
- return response
-
- def _open(self, req, data=None):
- result = self._call_chain(self.handle_open, 'default',
- 'default_open', req)
- if result:
- return result
-
- protocol = req.get_type()
- result = self._call_chain(self.handle_open, protocol, protocol +
- '_open', req)
- if result:
- return result
-
- return self._call_chain(self.handle_open, 'unknown',
- 'unknown_open', req)
-
- def error(self, proto, *args):
- if proto in ('http', 'https'):
- # XXX http[s] protocols are special-cased
- dict = self.handle_error['http'] # https is not different than http
- proto = args[2] # YUCK!
- meth_name = 'http_error_%s' % proto
- http_err = 1
- orig_args = args
- else:
- dict = self.handle_error
- meth_name = proto + '_error'
- http_err = 0
- args = (dict, proto, meth_name) + args
- result = self._call_chain(*args)
- if result:
- return result
-
- if http_err:
- args = (dict, 'default', 'http_error_default') + orig_args
- return self._call_chain(*args)
-
-# XXX probably also want an abstract factory that knows when it makes
-# sense to skip a superclass in favor of a subclass and when it might
-# make sense to include both
-
-def build_opener(*handlers):
- """Create an opener object from a list of handlers.
-
- The opener will use several default handlers, including support
- for HTTP and FTP.
-
- If any of the handlers passed as arguments are subclasses of the
- default handlers, the default handlers will not be used.
- """
- import types
- def isclass(obj):
- return isinstance(obj, types.ClassType) or hasattr(obj, "__bases__")
-
- opener = OpenerDirector()
- default_classes = [ProxyHandler, UnknownHandler, HTTPHandler,
- HTTPDefaultErrorHandler, HTTPRedirectHandler,
- FTPHandler, FileHandler, HTTPErrorProcessor]
- if hasattr(httplib, 'HTTPS'):
- default_classes.append(HTTPSHandler)
- skip = []
- for klass in default_classes:
- for check in handlers:
- if isclass(check):
- if issubclass(check, klass):
- skip.append(klass)
- elif isinstance(check, klass):
- skip.append(klass)
- for klass in skip:
- default_classes.remove(klass)
-
- for klass in default_classes:
- opener.add_handler(klass())
-
- for h in handlers:
- if isclass(h):
- h = h()
- opener.add_handler(h)
- return opener
-
-class BaseHandler:
- handler_order = 500
-
- def add_parent(self, parent):
- self.parent = parent
-
- def close(self):
- # Only exists for backwards compatibility
- pass
-
- def __lt__(self, other):
- if not hasattr(other, "handler_order"):
- # Try to preserve the old behavior of having custom classes
- # inserted after default ones (works only for custom user
- # classes which are not aware of handler_order).
- return True
- return self.handler_order < other.handler_order
-
-
-class HTTPErrorProcessor(BaseHandler):
- """Process HTTP error responses."""
- handler_order = 1000 # after all other processing
-
- def http_response(self, request, response):
- code, msg, hdrs = response.code, response.msg, response.info()
-
- if code not in (200, 206):
- response = self.parent.error(
- 'http', request, response, code, msg, hdrs)
-
- return response
-
- https_response = http_response
-
-class HTTPDefaultErrorHandler(BaseHandler):
- def http_error_default(self, req, fp, code, msg, hdrs):
- raise HTTPError(req.get_full_url(), code, msg, hdrs, fp)
-
-class HTTPRedirectHandler(BaseHandler):
- # maximum number of redirections to any single URL
- # this is needed because of the state that cookies introduce
- max_repeats = 4
- # maximum total number of redirections (regardless of URL) before
- # assuming we're in a loop
- max_redirections = 10
-
- def redirect_request(self, req, fp, code, msg, headers, newurl):
- """Return a Request or None in response to a redirect.
-
- This is called by the http_error_30x methods when a
- redirection response is received. If a redirection should
- take place, return a new Request to allow http_error_30x to
- perform the redirect. Otherwise, raise HTTPError if no-one
- else should try to handle this url. Return None if you can't
- but another Handler might.
- """
- m = req.get_method()
- if (code in (301, 302, 303, 307) and m in ("GET", "HEAD")
- or code in (301, 302, 303) and m == "POST"):
- # Strictly (according to RFC 2616), 301 or 302 in response
- # to a POST MUST NOT cause a redirection without confirmation
- # from the user (of urllib2, in this case). In practice,
- # essentially all clients do redirect in this case, so we
- # do the same.
- # be conciliant with URIs containing a space
- newurl = newurl.replace(' ', '%20')
- return Request(newurl,
- headers=req.headers,
- origin_req_host=req.get_origin_req_host(),
- unverifiable=True)
- else:
- raise HTTPError(req.get_full_url(), code, msg, headers, fp)
-
- # Implementation note: To avoid the server sending us into an
- # infinite loop, the request object needs to track what URLs we
- # have already seen. Do this by adding a handler-specific
- # attribute to the Request object.
- def http_error_302(self, req, fp, code, msg, headers):
- # Some servers (incorrectly) return multiple Location headers
- # (so probably same goes for URI). Use first header.
- if 'location' in headers:
- newurl = headers.getheaders('location')[0]
- elif 'uri' in headers:
- newurl = headers.getheaders('uri')[0]
- else:
- return
- newurl = urlparse.urljoin(req.get_full_url(), newurl)
-
- # XXX Probably want to forget about the state of the current
- # request, although that might interact poorly with other
- # handlers that also use handler-specific request attributes
- new = self.redirect_request(req, fp, code, msg, headers, newurl)
- if new is None:
- return
-
- # loop detection
- # .redirect_dict has a key url if url was previously visited.
- if hasattr(req, 'redirect_dict'):
- visited = new.redirect_dict = req.redirect_dict
- if (visited.get(newurl, 0) >= self.max_repeats or
- len(visited) >= self.max_redirections):
- raise HTTPError(req.get_full_url(), code,
- self.inf_msg + msg, headers, fp)
- else:
- visited = new.redirect_dict = req.redirect_dict = {}
- visited[newurl] = visited.get(newurl, 0) + 1
-
- # Don't close the fp until we are sure that we won't use it
- # with HTTPError.
- fp.read()
- fp.close()
-
- return self.parent.open(new)
-
- http_error_301 = http_error_303 = http_error_307 = http_error_302
-
- inf_msg = "The HTTP server returned a redirect error that would " \
- "lead to an infinite loop.\n" \
- "The last 30x error message was:\n"
-
-
-def _parse_proxy(proxy):
- """Return (scheme, user, password, host/port) given a URL or an authority.
-
- If a URL is supplied, it must have an authority (host:port) component.
- According to RFC 3986, having an authority component means the URL must
- have two slashes after the scheme:
-
- >>> _parse_proxy('file:/ftp.example.com/')
- Traceback (most recent call last):
- ValueError: proxy URL with no authority: 'file:/ftp.example.com/'
-
- The first three items of the returned tuple may be None.
-
- Examples of authority parsing:
-
- >>> _parse_proxy('proxy.example.com')
- (None, None, None, 'proxy.example.com')
- >>> _parse_proxy('proxy.example.com:3128')
- (None, None, None, 'proxy.example.com:3128')
-
- The authority component may optionally include userinfo (assumed to be
- username:password):
-
- >>> _parse_proxy('joe:password@proxy.example.com')
- (None, 'joe', 'password', 'proxy.example.com')
- >>> _parse_proxy('joe:password@proxy.example.com:3128')
- (None, 'joe', 'password', 'proxy.example.com:3128')
-
- Same examples, but with URLs instead:
-
- >>> _parse_proxy('http://proxy.example.com/')
- ('http', None, None, 'proxy.example.com')
- >>> _parse_proxy('http://proxy.example.com:3128/')
- ('http', None, None, 'proxy.example.com:3128')
- >>> _parse_proxy('http://joe:password@proxy.example.com/')
- ('http', 'joe', 'password', 'proxy.example.com')
- >>> _parse_proxy('http://joe:password@proxy.example.com:3128')
- ('http', 'joe', 'password', 'proxy.example.com:3128')
-
- Everything after the authority is ignored:
-
- >>> _parse_proxy('ftp://joe:password@proxy.example.com/rubbish:3128')
- ('ftp', 'joe', 'password', 'proxy.example.com')
-
- Test for no trailing '/' case:
-
- >>> _parse_proxy('http://joe:password@proxy.example.com')
- ('http', 'joe', 'password', 'proxy.example.com')
-
- """
- scheme, r_scheme = splittype(proxy)
- if not r_scheme.startswith("/"):
- # authority
- scheme = None
- authority = proxy
- else:
- # URL
- if not r_scheme.startswith("//"):
- raise ValueError("proxy URL with no authority: %r" % proxy)
- # We have an authority, so for RFC 3986-compliant URLs (by ss 3.
- # and 3.3.), path is empty or starts with '/'
- end = r_scheme.find("/", 2)
- if end == -1:
- end = None
- authority = r_scheme[2:end]
- userinfo, hostport = splituser(authority)
- if userinfo is not None:
- user, password = splitpasswd(userinfo)
- else:
- user = password = None
- return scheme, user, password, hostport
-
-class ProxyHandler(BaseHandler):
- # Proxies must be in front
- handler_order = 100
-
- def __init__(self, proxies=None):
- if proxies is None:
- proxies = getproxies()
- assert hasattr(proxies, 'has_key'), "proxies must be a mapping"
- self.proxies = proxies
- for type, url in proxies.items():
- setattr(self, '%s_open' % type,
- lambda r, proxy=url, type=type, meth=self.proxy_open: \
- meth(r, proxy, type))
-
- def proxy_open(self, req, proxy, type):
- orig_type = req.get_type()
- proxy_type, user, password, hostport = _parse_proxy(proxy)
- if proxy_type is None:
- proxy_type = orig_type
- if user and password:
- user_pass = '%s:%s' % (unquote(user), unquote(password))
- creds = base64.b64encode(user_pass).strip()
- req.add_header('Proxy-authorization', 'Basic ' + creds)
- hostport = unquote(hostport)
- req.set_proxy(hostport, proxy_type)
- if orig_type == proxy_type:
- # let other handlers take care of it
- return None
- else:
- # need to start over, because the other handlers don't
- # grok the proxy's URL type
- # e.g. if we have a constructor arg proxies like so:
- # {'http': 'ftp://proxy.example.com'}, we may end up turning
- # a request for http://acme.example.com/a into one for
- # ftp://proxy.example.com/a
- return self.parent.open(req)
-
-class HTTPPasswordMgr:
-
- def __init__(self):
- self.passwd = {}
-
- def add_password(self, realm, uri, user, passwd):
- # uri could be a single URI or a sequence
- if isinstance(uri, basestring):
- uri = [uri]
- if not realm in self.passwd:
- self.passwd[realm] = {}
- for default_port in True, False:
- reduced_uri = tuple(
- [self.reduce_uri(u, default_port) for u in uri])
- self.passwd[realm][reduced_uri] = (user, passwd)
-
- def find_user_password(self, realm, authuri):
- domains = self.passwd.get(realm, {})
- for default_port in True, False:
- reduced_authuri = self.reduce_uri(authuri, default_port)
- for uris, authinfo in domains.iteritems():
- for uri in uris:
- if self.is_suburi(uri, reduced_authuri):
- return authinfo
- return None, None
-
- def reduce_uri(self, uri, default_port=True):
- """Accept authority or URI and extract only the authority and path."""
- # note HTTP URLs do not have a userinfo component
- parts = urlparse.urlsplit(uri)
- if parts[1]:
- # URI
- scheme = parts[0]
- authority = parts[1]
- path = parts[2] or '/'
- else:
- # host or host:port
- scheme = None
- authority = uri
- path = '/'
- host, port = splitport(authority)
- if default_port and port is None and scheme is not None:
- dport = {"http": 80,
- "https": 443,
- }.get(scheme)
- if dport is not None:
- authority = "%s:%d" % (host, dport)
- return authority, path
-
- def is_suburi(self, base, test):
- """Check if test is below base in a URI tree
-
- Both args must be URIs in reduced form.
- """
- if base == test:
- return True
- if base[0] != test[0]:
- return False
- common = posixpath.commonprefix((base[1], test[1]))
- if len(common) == len(base[1]):
- return True
- return False
-
-
-class HTTPPasswordMgrWithDefaultRealm(HTTPPasswordMgr):
-
- def find_user_password(self, realm, authuri):
- user, password = HTTPPasswordMgr.find_user_password(self, realm,
- authuri)
- if user is not None:
- return user, password
- return HTTPPasswordMgr.find_user_password(self, None, authuri)
-
-
-class AbstractBasicAuthHandler:
-
- # XXX this allows for multiple auth-schemes, but will stupidly pick
- # the last one with a realm specified.
-
- rx = re.compile('(?:.*,)*[ \t]*([^ \t]+)[ \t]+realm="([^"]*)"', re.I)
-
- # XXX could pre-emptively send auth info already accepted (RFC 2617,
- # end of section 2, and section 1.2 immediately after "credentials"
- # production).
-
- def __init__(self, password_mgr=None):
- if password_mgr is None:
- password_mgr = HTTPPasswordMgr()
- self.passwd = password_mgr
- self.add_password = self.passwd.add_password
-
- def http_error_auth_reqed(self, authreq, host, req, headers):
- # host may be an authority (without userinfo) or a URL with an
- # authority
- # XXX could be multiple headers
- authreq = headers.get(authreq, None)
- if authreq:
- mo = AbstractBasicAuthHandler.rx.search(authreq)
- if mo:
- scheme, realm = mo.groups()
- if scheme.lower() == 'basic':
- return self.retry_http_basic_auth(host, req, realm)
-
- def retry_http_basic_auth(self, host, req, realm):
- user, pw = self.passwd.find_user_password(realm, host)
- if pw is not None:
- raw = "%s:%s" % (user, pw)
- auth = 'Basic %s' % base64.b64encode(raw).strip()
- if req.headers.get(self.auth_header, None) == auth:
- return None
- req.add_header(self.auth_header, auth)
- return self.parent.open(req)
- else:
- return None
-
-
-class HTTPBasicAuthHandler(AbstractBasicAuthHandler, BaseHandler):
-
- auth_header = 'Authorization'
-
- def http_error_401(self, req, fp, code, msg, headers):
- url = req.get_full_url()
- return self.http_error_auth_reqed('www-authenticate',
- url, req, headers)
-
-
-class ProxyBasicAuthHandler(AbstractBasicAuthHandler, BaseHandler):
-
- auth_header = 'Proxy-authorization'
-
- def http_error_407(self, req, fp, code, msg, headers):
- # http_error_auth_reqed requires that there is no userinfo component in
- # authority. Assume there isn't one, since urllib2 does not (and
- # should not, RFC 3986 s. 3.2.1) support requests for URLs containing
- # userinfo.
- authority = req.get_host()
- return self.http_error_auth_reqed('proxy-authenticate',
- authority, req, headers)
-
-
-def randombytes(n):
- """Return n random bytes."""
- # Use /dev/urandom if it is available. Fall back to random module
- # if not. It might be worthwhile to extend this function to use
- # other platform-specific mechanisms for getting random bytes.
- if os.path.exists("/dev/urandom"):
- f = open("/dev/urandom")
- s = f.read(n)
- f.close()
- return s
- else:
- L = [chr(random.randrange(0, 256)) for i in range(n)]
- return "".join(L)
-
-class AbstractDigestAuthHandler:
- # Digest authentication is specified in RFC 2617.
-
- # XXX The client does not inspect the Authentication-Info header
- # in a successful response.
-
- # XXX It should be possible to test this implementation against
- # a mock server that just generates a static set of challenges.
-
- # XXX qop="auth-int" supports is shaky
-
- def __init__(self, passwd=None):
- if passwd is None:
- passwd = HTTPPasswordMgr()
- self.passwd = passwd
- self.add_password = self.passwd.add_password
- self.retried = 0
- self.nonce_count = 0
-
- def reset_retry_count(self):
- self.retried = 0
-
- def http_error_auth_reqed(self, auth_header, host, req, headers):
- authreq = headers.get(auth_header, None)
- if self.retried > 5:
- # Don't fail endlessly - if we failed once, we'll probably
- # fail a second time. Hm. Unless the Password Manager is
- # prompting for the information. Crap. This isn't great
- # but it's better than the current 'repeat until recursion
- # depth exceeded' approach <wink>
- raise HTTPError(req.get_full_url(), 401, "digest auth failed",
- headers, None)
- else:
- self.retried += 1
- if authreq:
- scheme = authreq.split()[0]
- if scheme.lower() == 'digest':
- return self.retry_http_digest_auth(req, authreq)
-
- def retry_http_digest_auth(self, req, auth):
- token, challenge = auth.split(' ', 1)
- chal = parse_keqv_list(parse_http_list(challenge))
- auth = self.get_authorization(req, chal)
- if auth:
- auth_val = 'Digest %s' % auth
- if req.headers.get(self.auth_header, None) == auth_val:
- return None
- req.add_unredirected_header(self.auth_header, auth_val)
- resp = self.parent.open(req)
- return resp
-
- def get_cnonce(self, nonce):
- # The cnonce-value is an opaque
- # quoted string value provided by the client and used by both client
- # and server to avoid chosen plaintext attacks, to provide mutual
- # authentication, and to provide some message integrity protection.
- # This isn't a fabulous effort, but it's probably Good Enough.
- dig = hashlib.sha1("%s:%s:%s:%s" % (self.nonce_count, nonce, time.ctime(),
- randombytes(8))).hexdigest()
- return dig[:16]
-
- def get_authorization(self, req, chal):
- try:
- realm = chal['realm']
- nonce = chal['nonce']
- qop = chal.get('qop')
- algorithm = chal.get('algorithm', 'MD5')
- # mod_digest doesn't send an opaque, even though it isn't
- # supposed to be optional
- opaque = chal.get('opaque', None)
- except KeyError:
- return None
-
- H, KD = self.get_algorithm_impls(algorithm)
- if H is None:
- return None
-
- user, pw = self.passwd.find_user_password(realm, req.get_full_url())
- if user is None:
- return None
-
- # XXX not implemented yet
- if req.has_data():
- entdig = self.get_entity_digest(req.get_data(), chal)
- else:
- entdig = None
-
- A1 = "%s:%s:%s" % (user, realm, pw)
- A2 = "%s:%s" % (req.get_method(),
- # XXX selector: what about proxies and full urls
- req.get_selector())
- if qop == 'auth':
- self.nonce_count += 1
- ncvalue = '%08x' % self.nonce_count
- cnonce = self.get_cnonce(nonce)
- noncebit = "%s:%s:%s:%s:%s" % (nonce, ncvalue, cnonce, qop, H(A2))
- respdig = KD(H(A1), noncebit)
- elif qop is None:
- respdig = KD(H(A1), "%s:%s" % (nonce, H(A2)))
- else:
- # XXX handle auth-int.
- pass
-
- # XXX should the partial digests be encoded too?
-
- base = 'username="%s", realm="%s", nonce="%s", uri="%s", ' \
- 'response="%s"' % (user, realm, nonce, req.get_selector(),
- respdig)
- if opaque:
- base += ', opaque="%s"' % opaque
- if entdig:
- base += ', digest="%s"' % entdig
- base += ', algorithm="%s"' % algorithm
- if qop:
- base += ', qop=auth, nc=%s, cnonce="%s"' % (ncvalue, cnonce)
- return base
-
- def get_algorithm_impls(self, algorithm):
- # lambdas assume digest modules are imported at the top level
- if algorithm == 'MD5':
- H = lambda x: hashlib.md5(x).hexdigest()
- elif algorithm == 'SHA':
- H = lambda x: hashlib.sha1(x).hexdigest()
- # XXX MD5-sess
- KD = lambda s, d: H("%s:%s" % (s, d))
- return H, KD
-
- def get_entity_digest(self, data, chal):
- # XXX not implemented yet
- return None
-
-
-class HTTPDigestAuthHandler(BaseHandler, AbstractDigestAuthHandler):
- """An authentication protocol defined by RFC 2069
-
- Digest authentication improves on basic authentication because it
- does not transmit passwords in the clear.
- """
-
- auth_header = 'Authorization'
- handler_order = 490 # before Basic auth
-
- def http_error_401(self, req, fp, code, msg, headers):
- host = urlparse.urlparse(req.get_full_url())[1]
- retry = self.http_error_auth_reqed('www-authenticate',
- host, req, headers)
- self.reset_retry_count()
- return retry
-
-
-class ProxyDigestAuthHandler(BaseHandler, AbstractDigestAuthHandler):
-
- auth_header = 'Proxy-Authorization'
- handler_order = 490 # before Basic auth
-
- def http_error_407(self, req, fp, code, msg, headers):
- host = req.get_host()
- retry = self.http_error_auth_reqed('proxy-authenticate',
- host, req, headers)
- self.reset_retry_count()
- return retry
-
-class AbstractHTTPHandler(BaseHandler):
-
- def __init__(self, debuglevel=0):
- self._debuglevel = debuglevel
-
- def set_http_debuglevel(self, level):
- self._debuglevel = level
-
- def do_request_(self, request):
- host = request.get_host()
- if not host:
- raise URLError('no host given')
-
- if request.has_data(): # POST
- data = request.get_data()
- if not request.has_header('Content-type'):
- request.add_unredirected_header(
- 'Content-type',
- 'application/x-www-form-urlencoded')
- if not request.has_header('Content-length'):
- request.add_unredirected_header(
- 'Content-length', '%d' % len(data))
-
- scheme, sel = splittype(request.get_selector())
- sel_host, sel_path = splithost(sel)
- if not request.has_header('Host'):
- request.add_unredirected_header('Host', sel_host or host)
- for name, value in self.parent.addheaders:
- name = name.capitalize()
- if not request.has_header(name):
- request.add_unredirected_header(name, value)
-
- return request
-
- def do_open(self, http_class, req):
- """Return an addinfourl object for the request, using http_class.
-
- http_class must implement the HTTPConnection API from httplib.
- The addinfourl return value is a file-like object. It also
- has methods and attributes including:
- - info(): return a mimetools.Message object for the headers
- - geturl(): return the original request URL
- - code: HTTP status code
- """
- host = req.get_host()
- if not host:
- raise URLError('no host given')
-
- h = http_class(host) # will parse host:port
- h.set_debuglevel(self._debuglevel)
-
- headers = dict(req.headers)
- headers.update(req.unredirected_hdrs)
- # We want to make an HTTP/1.1 request, but the addinfourl
- # class isn't prepared to deal with a persistent connection.
- # It will try to read all remaining data from the socket,
- # which will block while the server waits for the next request.
- # So make sure the connection gets closed after the (only)
- # request.
- headers["Connection"] = "close"
- headers = dict(
- (name.title(), val) for name, val in headers.items())
- try:
- h.request(req.get_method(), req.get_selector(), req.data, headers)
- r = h.getresponse()
- except socket.error, err: # XXX what error?
- raise URLError(err)
-
- # Pick apart the HTTPResponse object to get the addinfourl
- # object initialized properly.
-
- # Wrap the HTTPResponse object in socket's file object adapter
- # for Windows. That adapter calls recv(), so delegate recv()
- # to read(). This weird wrapping allows the returned object to
- # have readline() and readlines() methods.
-
- # XXX It might be better to extract the read buffering code
- # out of socket._fileobject() and into a base class.
-
- r.recv = r.read
- fp = socket._fileobject(r, close=True)
-
- resp = addinfourl(fp, r.msg, req.get_full_url())
- resp.code = r.status
- resp.msg = r.reason
- return resp
-
-
-class HTTPHandler(AbstractHTTPHandler):
-
- def http_open(self, req):
- return self.do_open(httplib.HTTPConnection, req)
-
- http_request = AbstractHTTPHandler.do_request_
-
-if hasattr(httplib, 'HTTPS'):
- class HTTPSHandler(AbstractHTTPHandler):
-
- def https_open(self, req):
- return self.do_open(httplib.HTTPSConnection, req)
-
- https_request = AbstractHTTPHandler.do_request_
-
-class HTTPCookieProcessor(BaseHandler):
- def __init__(self, cookiejar=None):
- import cookielib
- if cookiejar is None:
- cookiejar = cookielib.CookieJar()
- self.cookiejar = cookiejar
-
- def http_request(self, request):
- self.cookiejar.add_cookie_header(request)
- return request
-
- def http_response(self, request, response):
- self.cookiejar.extract_cookies(response, request)
- return response
-
- https_request = http_request
- https_response = http_response
-
-class UnknownHandler(BaseHandler):
- def unknown_open(self, req):
- type = req.get_type()
- raise URLError('unknown url type: %s' % type)
-
-def parse_keqv_list(l):
- """Parse list of key=value strings where keys are not duplicated."""
- parsed = {}
- for elt in l:
- k, v = elt.split('=', 1)
- if v[0] == '"' and v[-1] == '"':
- v = v[1:-1]
- parsed[k] = v
- return parsed
-
-def parse_http_list(s):
- """Parse lists as described by RFC 2068 Section 2.
-
- In particular, parse comma-separated lists where the elements of
- the list may include quoted-strings. A quoted-string could
- contain a comma. A non-quoted string could have quotes in the
- middle. Neither commas nor quotes count if they are escaped.
- Only double-quotes count, not single-quotes.
- """
- res = []
- part = ''
-
- escape = quote = False
- for cur in s:
- if escape:
- part += cur
- escape = False
- continue
- if quote:
- if cur == '\\':
- escape = True
- continue
- elif cur == '"':
- quote = False
- part += cur
- continue
-
- if cur == ',':
- res.append(part)
- part = ''
- continue
-
- if cur == '"':
- quote = True
-
- part += cur
-
- # append last part
- if part:
- res.append(part)
-
- return [part.strip() for part in res]
-
-class FileHandler(BaseHandler):
- # Use local file or FTP depending on form of URL
- def file_open(self, req):
- url = req.get_selector()
- if url[:2] == '//' and url[2:3] != '/':
- req.type = 'ftp'
- return self.parent.open(req)
- else:
- return self.open_local_file(req)
-
- # names for the localhost
- names = None
- def get_names(self):
- if FileHandler.names is None:
- try:
- FileHandler.names = (socket.gethostbyname('localhost'),
- socket.gethostbyname(socket.gethostname()))
- except socket.gaierror:
- FileHandler.names = (socket.gethostbyname('localhost'),)
- return FileHandler.names
-
- # not entirely sure what the rules are here
- def open_local_file(self, req):
- import email.Utils
- import mimetypes
- host = req.get_host()
- file = req.get_selector()
- localfile = url2pathname(file)
- stats = os.stat(localfile)
- size = stats.st_size
- modified = email.Utils.formatdate(stats.st_mtime, usegmt=True)
- mtype = mimetypes.guess_type(file)[0]
- headers = mimetools.Message(StringIO(
- 'Content-type: %s\nContent-length: %d\nLast-modified: %s\n' %
- (mtype or 'text/plain', size, modified)))
- if host:
- host, port = splitport(host)
- if not host or \
- (not port and socket.gethostbyname(host) in self.get_names()):
- return addinfourl(open(localfile, 'rb'),
- headers, 'file:'+file)
- raise URLError('file not on local host')
-
-class FTPHandler(BaseHandler):
- def ftp_open(self, req):
- import ftplib
- import mimetypes
- host = req.get_host()
- if not host:
- raise IOError, ('ftp error', 'no host given')
- host, port = splitport(host)
- if port is None:
- port = ftplib.FTP_PORT
- else:
- port = int(port)
-
- # username/password handling
- user, host = splituser(host)
- if user:
- user, passwd = splitpasswd(user)
- else:
- passwd = None
- host = unquote(host)
- user = unquote(user or '')
- passwd = unquote(passwd or '')
-
- try:
- host = socket.gethostbyname(host)
- except socket.error, msg:
- raise URLError(msg)
- path, attrs = splitattr(req.get_selector())
- dirs = path.split('/')
- dirs = map(unquote, dirs)
- dirs, file = dirs[:-1], dirs[-1]
- if dirs and not dirs[0]:
- dirs = dirs[1:]
- try:
- fw = self.connect_ftp(user, passwd, host, port, dirs)
- type = file and 'I' or 'D'
- for attr in attrs:
- attr, value = splitvalue(attr)
- if attr.lower() == 'type' and \
- value in ('a', 'A', 'i', 'I', 'd', 'D'):
- type = value.upper()
- fp, retrlen = fw.retrfile(file, type)
- headers = ""
- mtype = mimetypes.guess_type(req.get_full_url())[0]
- if mtype:
- headers += "Content-type: %s\n" % mtype
- if retrlen is not None and retrlen >= 0:
- headers += "Content-length: %d\n" % retrlen
- sf = StringIO(headers)
- headers = mimetools.Message(sf)
- return addinfourl(fp, headers, req.get_full_url())
- except ftplib.all_errors, msg:
- raise IOError, ('ftp error', msg), sys.exc_info()[2]
-
- def connect_ftp(self, user, passwd, host, port, dirs):
- fw = ftpwrapper(user, passwd, host, port, dirs)
-## fw.ftp.set_debuglevel(1)
- return fw
-
-class CacheFTPHandler(FTPHandler):
- # XXX would be nice to have pluggable cache strategies
- # XXX this stuff is definitely not thread safe
- def __init__(self):
- self.cache = {}
- self.timeout = {}
- self.soonest = 0
- self.delay = 60
- self.max_conns = 16
-
- def setTimeout(self, t):
- self.delay = t
-
- def setMaxConns(self, m):
- self.max_conns = m
-
- def connect_ftp(self, user, passwd, host, port, dirs):
- key = user, host, port, '/'.join(dirs)
- if key in self.cache:
- self.timeout[key] = time.time() + self.delay
- else:
- self.cache[key] = ftpwrapper(user, passwd, host, port, dirs)
- self.timeout[key] = time.time() + self.delay
- self.check_cache()
- return self.cache[key]
-
- def check_cache(self):
- # first check for old ones
- t = time.time()
- if self.soonest <= t:
- for k, v in self.timeout.items():
- if v < t:
- self.cache[k].close()
- del self.cache[k]
- del self.timeout[k]
- self.soonest = min(self.timeout.values())
-
- # then check the size
- if len(self.cache) == self.max_conns:
- for k, v in self.timeout.items():
- if v == self.soonest:
- del self.cache[k]
- del self.timeout[k]
- break
- self.soonest = min(self.timeout.values())
-
-class GopherHandler(BaseHandler):
- def gopher_open(self, req):
- # XXX can raise socket.error
- import gopherlib # this raises DeprecationWarning in 2.5
- host = req.get_host()
- if not host:
- raise GopherError('no host given')
- host = unquote(host)
- selector = req.get_selector()
- type, selector = splitgophertype(selector)
- selector, query = splitquery(selector)
- selector = unquote(selector)
- if query:
- query = unquote(query)
- fp = gopherlib.send_query(selector, query, host)
- else:
- fp = gopherlib.send_selector(selector, host)
- return addinfourl(fp, noheaders(), req.get_full_url())
diff --git a/sys/lib/python/urlparse.py b/sys/lib/python/urlparse.py
deleted file mode 100644
index eade040ff..000000000
--- a/sys/lib/python/urlparse.py
+++ /dev/null
@@ -1,375 +0,0 @@
-"""Parse (absolute and relative) URLs.
-
-See RFC 1808: "Relative Uniform Resource Locators", by R. Fielding,
-UC Irvine, June 1995.
-"""
-
-__all__ = ["urlparse", "urlunparse", "urljoin", "urldefrag",
- "urlsplit", "urlunsplit"]
-
-# A classification of schemes ('' means apply by default)
-uses_relative = ['ftp', 'http', 'gopher', 'nntp', 'imap',
- 'wais', 'file', 'https', 'shttp', 'mms',
- 'prospero', 'rtsp', 'rtspu', '', 'sftp']
-uses_netloc = ['ftp', 'http', 'gopher', 'nntp', 'telnet',
- 'imap', 'wais', 'file', 'mms', 'https', 'shttp',
- 'snews', 'prospero', 'rtsp', 'rtspu', 'rsync', '',
- 'svn', 'svn+ssh', 'sftp']
-non_hierarchical = ['gopher', 'hdl', 'mailto', 'news',
- 'telnet', 'wais', 'imap', 'snews', 'sip', 'sips']
-uses_params = ['ftp', 'hdl', 'prospero', 'http', 'imap',
- 'https', 'shttp', 'rtsp', 'rtspu', 'sip', 'sips',
- 'mms', '', 'sftp']
-uses_query = ['http', 'wais', 'imap', 'https', 'shttp', 'mms',
- 'gopher', 'rtsp', 'rtspu', 'sip', 'sips', '']
-uses_fragment = ['ftp', 'hdl', 'http', 'gopher', 'news',
- 'nntp', 'wais', 'https', 'shttp', 'snews',
- 'file', 'prospero', '']
-
-# Characters valid in scheme names
-scheme_chars = ('abcdefghijklmnopqrstuvwxyz'
- 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
- '0123456789'
- '+-.')
-
-MAX_CACHE_SIZE = 20
-_parse_cache = {}
-
-def clear_cache():
- """Clear the parse cache."""
- global _parse_cache
- _parse_cache = {}
-
-
-class BaseResult(tuple):
- """Base class for the parsed result objects.
-
- This provides the attributes shared by the two derived result
- objects as read-only properties. The derived classes are
- responsible for checking the right number of arguments were
- supplied to the constructor.
-
- """
-
- __slots__ = ()
-
- # Attributes that access the basic components of the URL:
-
- @property
- def scheme(self):
- return self[0]
-
- @property
- def netloc(self):
- return self[1]
-
- @property
- def path(self):
- return self[2]
-
- @property
- def query(self):
- return self[-2]
-
- @property
- def fragment(self):
- return self[-1]
-
- # Additional attributes that provide access to parsed-out portions
- # of the netloc:
-
- @property
- def username(self):
- netloc = self.netloc
- if "@" in netloc:
- userinfo = netloc.split("@", 1)[0]
- if ":" in userinfo:
- userinfo = userinfo.split(":", 1)[0]
- return userinfo
- return None
-
- @property
- def password(self):
- netloc = self.netloc
- if "@" in netloc:
- userinfo = netloc.split("@", 1)[0]
- if ":" in userinfo:
- return userinfo.split(":", 1)[1]
- return None
-
- @property
- def hostname(self):
- netloc = self.netloc
- if "@" in netloc:
- netloc = netloc.split("@", 1)[1]
- if ":" in netloc:
- netloc = netloc.split(":", 1)[0]
- return netloc.lower() or None
-
- @property
- def port(self):
- netloc = self.netloc
- if "@" in netloc:
- netloc = netloc.split("@", 1)[1]
- if ":" in netloc:
- port = netloc.split(":", 1)[1]
- return int(port, 10)
- return None
-
-
-class SplitResult(BaseResult):
-
- __slots__ = ()
-
- def __new__(cls, scheme, netloc, path, query, fragment):
- return BaseResult.__new__(
- cls, (scheme, netloc, path, query, fragment))
-
- def geturl(self):
- return urlunsplit(self)
-
-
-class ParseResult(BaseResult):
-
- __slots__ = ()
-
- def __new__(cls, scheme, netloc, path, params, query, fragment):
- return BaseResult.__new__(
- cls, (scheme, netloc, path, params, query, fragment))
-
- @property
- def params(self):
- return self[3]
-
- def geturl(self):
- return urlunparse(self)
-
-
-def urlparse(url, scheme='', allow_fragments=True):
- """Parse a URL into 6 components:
- <scheme>://<netloc>/<path>;<params>?<query>#<fragment>
- Return a 6-tuple: (scheme, netloc, path, params, query, fragment).
- Note that we don't break the components up in smaller bits
- (e.g. netloc is a single string) and we don't expand % escapes."""
- tuple = urlsplit(url, scheme, allow_fragments)
- scheme, netloc, url, query, fragment = tuple
- if scheme in uses_params and ';' in url:
- url, params = _splitparams(url)
- else:
- params = ''
- return ParseResult(scheme, netloc, url, params, query, fragment)
-
-def _splitparams(url):
- if '/' in url:
- i = url.find(';', url.rfind('/'))
- if i < 0:
- return url, ''
- else:
- i = url.find(';')
- return url[:i], url[i+1:]
-
-def _splitnetloc(url, start=0):
- for c in '/?#': # the order is important!
- delim = url.find(c, start)
- if delim >= 0:
- break
- else:
- delim = len(url)
- return url[start:delim], url[delim:]
-
-def urlsplit(url, scheme='', allow_fragments=True):
- """Parse a URL into 5 components:
- <scheme>://<netloc>/<path>?<query>#<fragment>
- Return a 5-tuple: (scheme, netloc, path, query, fragment).
- Note that we don't break the components up in smaller bits
- (e.g. netloc is a single string) and we don't expand % escapes."""
- allow_fragments = bool(allow_fragments)
- key = url, scheme, allow_fragments
- cached = _parse_cache.get(key, None)
- if cached:
- return cached
- if len(_parse_cache) >= MAX_CACHE_SIZE: # avoid runaway growth
- clear_cache()
- netloc = query = fragment = ''
- i = url.find(':')
- if i > 0:
- if url[:i] == 'http': # optimize the common case
- scheme = url[:i].lower()
- url = url[i+1:]
- if url[:2] == '//':
- netloc, url = _splitnetloc(url, 2)
- if allow_fragments and '#' in url:
- url, fragment = url.split('#', 1)
- if '?' in url:
- url, query = url.split('?', 1)
- v = SplitResult(scheme, netloc, url, query, fragment)
- _parse_cache[key] = v
- return v
- for c in url[:i]:
- if c not in scheme_chars:
- break
- else:
- scheme, url = url[:i].lower(), url[i+1:]
- if scheme in uses_netloc and url[:2] == '//':
- netloc, url = _splitnetloc(url, 2)
- if allow_fragments and scheme in uses_fragment and '#' in url:
- url, fragment = url.split('#', 1)
- if scheme in uses_query and '?' in url:
- url, query = url.split('?', 1)
- v = SplitResult(scheme, netloc, url, query, fragment)
- _parse_cache[key] = v
- return v
-
-def urlunparse((scheme, netloc, url, params, query, fragment)):
- """Put a parsed URL back together again. This may result in a
- slightly different, but equivalent URL, if the URL that was parsed
- originally had redundant delimiters, e.g. a ? with an empty query
- (the draft states that these are equivalent)."""
- if params:
- url = "%s;%s" % (url, params)
- return urlunsplit((scheme, netloc, url, query, fragment))
-
-def urlunsplit((scheme, netloc, url, query, fragment)):
- if netloc or (scheme and scheme in uses_netloc and url[:2] != '//'):
- if url and url[:1] != '/': url = '/' + url
- url = '//' + (netloc or '') + url
- if scheme:
- url = scheme + ':' + url
- if query:
- url = url + '?' + query
- if fragment:
- url = url + '#' + fragment
- return url
-
-def urljoin(base, url, allow_fragments=True):
- """Join a base URL and a possibly relative URL to form an absolute
- interpretation of the latter."""
- if not base:
- return url
- if not url:
- return base
- bscheme, bnetloc, bpath, bparams, bquery, bfragment = \
- urlparse(base, '', allow_fragments)
- scheme, netloc, path, params, query, fragment = \
- urlparse(url, bscheme, allow_fragments)
- if scheme != bscheme or scheme not in uses_relative:
- return url
- if scheme in uses_netloc:
- if netloc:
- return urlunparse((scheme, netloc, path,
- params, query, fragment))
- netloc = bnetloc
- if path[:1] == '/':
- return urlunparse((scheme, netloc, path,
- params, query, fragment))
- if not (path or params or query):
- return urlunparse((scheme, netloc, bpath,
- bparams, bquery, fragment))
- segments = bpath.split('/')[:-1] + path.split('/')
- # XXX The stuff below is bogus in various ways...
- if segments[-1] == '.':
- segments[-1] = ''
- while '.' in segments:
- segments.remove('.')
- while 1:
- i = 1
- n = len(segments) - 1
- while i < n:
- if (segments[i] == '..'
- and segments[i-1] not in ('', '..')):
- del segments[i-1:i+1]
- break
- i = i+1
- else:
- break
- if segments == ['', '..']:
- segments[-1] = ''
- elif len(segments) >= 2 and segments[-1] == '..':
- segments[-2:] = ['']
- return urlunparse((scheme, netloc, '/'.join(segments),
- params, query, fragment))
-
-def urldefrag(url):
- """Removes any existing fragment from URL.
-
- Returns a tuple of the defragmented URL and the fragment. If
- the URL contained no fragments, the second element is the
- empty string.
- """
- if '#' in url:
- s, n, p, a, q, frag = urlparse(url)
- defrag = urlunparse((s, n, p, a, q, ''))
- return defrag, frag
- else:
- return url, ''
-
-
-test_input = """
- http://a/b/c/d
-
- g:h = <URL:g:h>
- http:g = <URL:http://a/b/c/g>
- http: = <URL:http://a/b/c/d>
- g = <URL:http://a/b/c/g>
- ./g = <URL:http://a/b/c/g>
- g/ = <URL:http://a/b/c/g/>
- /g = <URL:http://a/g>
- //g = <URL:http://g>
- ?y = <URL:http://a/b/c/d?y>
- g?y = <URL:http://a/b/c/g?y>
- g?y/./x = <URL:http://a/b/c/g?y/./x>
- . = <URL:http://a/b/c/>
- ./ = <URL:http://a/b/c/>
- .. = <URL:http://a/b/>
- ../ = <URL:http://a/b/>
- ../g = <URL:http://a/b/g>
- ../.. = <URL:http://a/>
- ../../g = <URL:http://a/g>
- ../../../g = <URL:http://a/../g>
- ./../g = <URL:http://a/b/g>
- ./g/. = <URL:http://a/b/c/g/>
- /./g = <URL:http://a/./g>
- g/./h = <URL:http://a/b/c/g/h>
- g/../h = <URL:http://a/b/c/h>
- http:g = <URL:http://a/b/c/g>
- http: = <URL:http://a/b/c/d>
- http:?y = <URL:http://a/b/c/d?y>
- http:g?y = <URL:http://a/b/c/g?y>
- http:g?y/./x = <URL:http://a/b/c/g?y/./x>
-"""
-
-def test():
- import sys
- base = ''
- if sys.argv[1:]:
- fn = sys.argv[1]
- if fn == '-':
- fp = sys.stdin
- else:
- fp = open(fn)
- else:
- try:
- from cStringIO import StringIO
- except ImportError:
- from StringIO import StringIO
- fp = StringIO(test_input)
- while 1:
- line = fp.readline()
- if not line: break
- words = line.split()
- if not words:
- continue
- url = words[0]
- parts = urlparse(url)
- print '%-10s : %s' % (url, parts)
- abs = urljoin(base, url)
- if not base:
- base = abs
- wrapped = '<URL:%s>' % abs
- print '%-10s = %s' % (url, wrapped)
- if len(words) == 3 and words[1] == '=':
- if wrapped != words[2]:
- print 'EXPECTED', words[2], '!!!!!!!!!!'
-
-if __name__ == '__main__':
- test()
diff --git a/sys/lib/python/user.py b/sys/lib/python/user.py
deleted file mode 100644
index e550e52ad..000000000
--- a/sys/lib/python/user.py
+++ /dev/null
@@ -1,45 +0,0 @@
-"""Hook to allow user-specified customization code to run.
-
-As a policy, Python doesn't run user-specified code on startup of
-Python programs (interactive sessions execute the script specified in
-the PYTHONSTARTUP environment variable if it exists).
-
-However, some programs or sites may find it convenient to allow users
-to have a standard customization file, which gets run when a program
-requests it. This module implements such a mechanism. A program
-that wishes to use the mechanism must execute the statement
-
- import user
-
-The user module looks for a file .pythonrc.py in the user's home
-directory and if it can be opened, execfile()s it in its own global
-namespace. Errors during this phase are not caught; that's up to the
-program that imports the user module, if it wishes.
-
-The user's .pythonrc.py could conceivably test for sys.version if it
-wishes to do different things depending on the Python version.
-
-"""
-
-import os
-
-home = os.curdir # Default
-if 'HOME' in os.environ:
- home = os.environ['HOME']
-elif os.name == 'posix':
- home = os.path.expanduser("~/")
-elif os.name == 'nt': # Contributed by Jeff Bauer
- if 'HOMEPATH' in os.environ:
- if 'HOMEDRIVE' in os.environ:
- home = os.environ['HOMEDRIVE'] + os.environ['HOMEPATH']
- else:
- home = os.environ['HOMEPATH']
-
-pythonrc = os.path.join(home, ".pythonrc.py")
-try:
- f = open(pythonrc)
-except IOError:
- pass
-else:
- f.close()
- execfile(pythonrc)
diff --git a/sys/lib/python/uu.py b/sys/lib/python/uu.py
deleted file mode 100755
index da89f7298..000000000
--- a/sys/lib/python/uu.py
+++ /dev/null
@@ -1,186 +0,0 @@
-#! /usr/bin/env python
-
-# Copyright 1994 by Lance Ellinghouse
-# Cathedral City, California Republic, United States of America.
-# All Rights Reserved
-# Permission to use, copy, modify, and distribute this software and its
-# documentation for any purpose and without fee is hereby granted,
-# provided that the above copyright notice appear in all copies and that
-# both that copyright notice and this permission notice appear in
-# supporting documentation, and that the name of Lance Ellinghouse
-# not be used in advertising or publicity pertaining to distribution
-# of the software without specific, written prior permission.
-# LANCE ELLINGHOUSE DISCLAIMS ALL WARRANTIES WITH REGARD TO
-# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
-# FITNESS, IN NO EVENT SHALL LANCE ELLINGHOUSE CENTRUM BE LIABLE
-# FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
-# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-#
-# Modified by Jack Jansen, CWI, July 1995:
-# - Use binascii module to do the actual line-by-line conversion
-# between ascii and binary. This results in a 1000-fold speedup. The C
-# version is still 5 times faster, though.
-# - Arguments more compliant with python standard
-
-"""Implementation of the UUencode and UUdecode functions.
-
-encode(in_file, out_file [,name, mode])
-decode(in_file [, out_file, mode])
-"""
-
-import binascii
-import os
-import sys
-
-__all__ = ["Error", "encode", "decode"]
-
-class Error(Exception):
- pass
-
-def encode(in_file, out_file, name=None, mode=None):
- """Uuencode file"""
- #
- # If in_file is a pathname open it and change defaults
- #
- if in_file == '-':
- in_file = sys.stdin
- elif isinstance(in_file, basestring):
- if name is None:
- name = os.path.basename(in_file)
- if mode is None:
- try:
- mode = os.stat(in_file).st_mode
- except AttributeError:
- pass
- in_file = open(in_file, 'rb')
- #
- # Open out_file if it is a pathname
- #
- if out_file == '-':
- out_file = sys.stdout
- elif isinstance(out_file, basestring):
- out_file = open(out_file, 'w')
- #
- # Set defaults for name and mode
- #
- if name is None:
- name = '-'
- if mode is None:
- mode = 0666
- #
- # Write the data
- #
- out_file.write('begin %o %s\n' % ((mode&0777),name))
- data = in_file.read(45)
- while len(data) > 0:
- out_file.write(binascii.b2a_uu(data))
- data = in_file.read(45)
- out_file.write(' \nend\n')
-
-
-def decode(in_file, out_file=None, mode=None, quiet=0):
- """Decode uuencoded file"""
- #
- # Open the input file, if needed.
- #
- if in_file == '-':
- in_file = sys.stdin
- elif isinstance(in_file, basestring):
- in_file = open(in_file)
- #
- # Read until a begin is encountered or we've exhausted the file
- #
- while True:
- hdr = in_file.readline()
- if not hdr:
- raise Error('No valid begin line found in input file')
- if not hdr.startswith('begin'):
- continue
- hdrfields = hdr.split(' ', 2)
- if len(hdrfields) == 3 and hdrfields[0] == 'begin':
- try:
- int(hdrfields[1], 8)
- break
- except ValueError:
- pass
- if out_file is None:
- out_file = hdrfields[2].rstrip()
- if os.path.exists(out_file):
- raise Error('Cannot overwrite existing file: %s' % out_file)
- if mode is None:
- mode = int(hdrfields[1], 8)
- #
- # Open the output file
- #
- opened = False
- if out_file == '-':
- out_file = sys.stdout
- elif isinstance(out_file, basestring):
- fp = open(out_file, 'wb')
- try:
- os.path.chmod(out_file, mode)
- except AttributeError:
- pass
- out_file = fp
- opened = True
- #
- # Main decoding loop
- #
- s = in_file.readline()
- while s and s.strip() != 'end':
- try:
- data = binascii.a2b_uu(s)
- except binascii.Error, v:
- # Workaround for broken uuencoders by /Fredrik Lundh
- nbytes = (((ord(s[0])-32) & 63) * 4 + 5) // 3
- data = binascii.a2b_uu(s[:nbytes])
- if not quiet:
- sys.stderr.write("Warning: %s\n" % v)
- out_file.write(data)
- s = in_file.readline()
- if not s:
- raise Error('Truncated input file')
- if opened:
- out_file.close()
-
-def test():
- """uuencode/uudecode main program"""
-
- import optparse
- parser = optparse.OptionParser(usage='usage: %prog [-d] [-t] [input [output]]')
- parser.add_option('-d', '--decode', dest='decode', help='Decode (instead of encode)?', default=False, action='store_true')
- parser.add_option('-t', '--text', dest='text', help='data is text, encoded format unix-compatible text?', default=False, action='store_true')
-
- (options, args) = parser.parse_args()
- if len(args) > 2:
- parser.error('incorrect number of arguments')
- sys.exit(1)
-
- input = sys.stdin
- output = sys.stdout
- if len(args) > 0:
- input = args[0]
- if len(args) > 1:
- output = args[1]
-
- if options.decode:
- if options.text:
- if isinstance(output, basestring):
- output = open(output, 'w')
- else:
- print sys.argv[0], ': cannot do -t to stdout'
- sys.exit(1)
- decode(input, output)
- else:
- if options.text:
- if isinstance(input, basestring):
- input = open(input, 'r')
- else:
- print sys.argv[0], ': cannot do -t from stdin'
- sys.exit(1)
- encode(input, output)
-
-if __name__ == '__main__':
- test()
diff --git a/sys/lib/python/uuid.py b/sys/lib/python/uuid.py
deleted file mode 100644
index ae3da25ca..000000000
--- a/sys/lib/python/uuid.py
+++ /dev/null
@@ -1,541 +0,0 @@
-r"""UUID objects (universally unique identifiers) according to RFC 4122.
-
-This module provides immutable UUID objects (class UUID) and the functions
-uuid1(), uuid3(), uuid4(), uuid5() for generating version 1, 3, 4, and 5
-UUIDs as specified in RFC 4122.
-
-If all you want is a unique ID, you should probably call uuid1() or uuid4().
-Note that uuid1() may compromise privacy since it creates a UUID containing
-the computer's network address. uuid4() creates a random UUID.
-
-Typical usage:
-
- >>> import uuid
-
- # make a UUID based on the host ID and current time
- >>> uuid.uuid1()
- UUID('a8098c1a-f86e-11da-bd1a-00112444be1e')
-
- # make a UUID using an MD5 hash of a namespace UUID and a name
- >>> uuid.uuid3(uuid.NAMESPACE_DNS, 'python.org')
- UUID('6fa459ea-ee8a-3ca4-894e-db77e160355e')
-
- # make a random UUID
- >>> uuid.uuid4()
- UUID('16fd2706-8baf-433b-82eb-8c7fada847da')
-
- # make a UUID using a SHA-1 hash of a namespace UUID and a name
- >>> uuid.uuid5(uuid.NAMESPACE_DNS, 'python.org')
- UUID('886313e1-3b8a-5372-9b90-0c9aee199e5d')
-
- # make a UUID from a string of hex digits (braces and hyphens ignored)
- >>> x = uuid.UUID('{00010203-0405-0607-0809-0a0b0c0d0e0f}')
-
- # convert a UUID to a string of hex digits in standard form
- >>> str(x)
- '00010203-0405-0607-0809-0a0b0c0d0e0f'
-
- # get the raw 16 bytes of the UUID
- >>> x.bytes
- '\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f'
-
- # make a UUID from a 16-byte string
- >>> uuid.UUID(bytes=x.bytes)
- UUID('00010203-0405-0607-0809-0a0b0c0d0e0f')
-"""
-
-__author__ = 'Ka-Ping Yee <ping@zesty.ca>'
-
-RESERVED_NCS, RFC_4122, RESERVED_MICROSOFT, RESERVED_FUTURE = [
- 'reserved for NCS compatibility', 'specified in RFC 4122',
- 'reserved for Microsoft compatibility', 'reserved for future definition']
-
-class UUID(object):
- """Instances of the UUID class represent UUIDs as specified in RFC 4122.
- UUID objects are immutable, hashable, and usable as dictionary keys.
- Converting a UUID to a string with str() yields something in the form
- '12345678-1234-1234-1234-123456789abc'. The UUID constructor accepts
- five possible forms: a similar string of hexadecimal digits, or a tuple
- of six integer fields (with 32-bit, 16-bit, 16-bit, 8-bit, 8-bit, and
- 48-bit values respectively) as an argument named 'fields', or a string
- of 16 bytes (with all the integer fields in big-endian order) as an
- argument named 'bytes', or a string of 16 bytes (with the first three
- fields in little-endian order) as an argument named 'bytes_le', or a
- single 128-bit integer as an argument named 'int'.
-
- UUIDs have these read-only attributes:
-
- bytes the UUID as a 16-byte string (containing the six
- integer fields in big-endian byte order)
-
- bytes_le the UUID as a 16-byte string (with time_low, time_mid,
- and time_hi_version in little-endian byte order)
-
- fields a tuple of the six integer fields of the UUID,
- which are also available as six individual attributes
- and two derived attributes:
-
- time_low the first 32 bits of the UUID
- time_mid the next 16 bits of the UUID
- time_hi_version the next 16 bits of the UUID
- clock_seq_hi_variant the next 8 bits of the UUID
- clock_seq_low the next 8 bits of the UUID
- node the last 48 bits of the UUID
-
- time the 60-bit timestamp
- clock_seq the 14-bit sequence number
-
- hex the UUID as a 32-character hexadecimal string
-
- int the UUID as a 128-bit integer
-
- urn the UUID as a URN as specified in RFC 4122
-
- variant the UUID variant (one of the constants RESERVED_NCS,
- RFC_4122, RESERVED_MICROSOFT, or RESERVED_FUTURE)
-
- version the UUID version number (1 through 5, meaningful only
- when the variant is RFC_4122)
- """
-
- def __init__(self, hex=None, bytes=None, bytes_le=None, fields=None,
- int=None, version=None):
- r"""Create a UUID from either a string of 32 hexadecimal digits,
- a string of 16 bytes as the 'bytes' argument, a string of 16 bytes
- in little-endian order as the 'bytes_le' argument, a tuple of six
- integers (32-bit time_low, 16-bit time_mid, 16-bit time_hi_version,
- 8-bit clock_seq_hi_variant, 8-bit clock_seq_low, 48-bit node) as
- the 'fields' argument, or a single 128-bit integer as the 'int'
- argument. When a string of hex digits is given, curly braces,
- hyphens, and a URN prefix are all optional. For example, these
- expressions all yield the same UUID:
-
- UUID('{12345678-1234-5678-1234-567812345678}')
- UUID('12345678123456781234567812345678')
- UUID('urn:uuid:12345678-1234-5678-1234-567812345678')
- UUID(bytes='\x12\x34\x56\x78'*4)
- UUID(bytes_le='\x78\x56\x34\x12\x34\x12\x78\x56' +
- '\x12\x34\x56\x78\x12\x34\x56\x78')
- UUID(fields=(0x12345678, 0x1234, 0x5678, 0x12, 0x34, 0x567812345678))
- UUID(int=0x12345678123456781234567812345678)
-
- Exactly one of 'hex', 'bytes', 'bytes_le', 'fields', or 'int' must
- be given. The 'version' argument is optional; if given, the resulting
- UUID will have its variant and version set according to RFC 4122,
- overriding the given 'hex', 'bytes', 'bytes_le', 'fields', or 'int'.
- """
-
- if [hex, bytes, bytes_le, fields, int].count(None) != 4:
- raise TypeError('need one of hex, bytes, bytes_le, fields, or int')
- if hex is not None:
- hex = hex.replace('urn:', '').replace('uuid:', '')
- hex = hex.strip('{}').replace('-', '')
- if len(hex) != 32:
- raise ValueError('badly formed hexadecimal UUID string')
- int = long(hex, 16)
- if bytes_le is not None:
- if len(bytes_le) != 16:
- raise ValueError('bytes_le is not a 16-char string')
- bytes = (bytes_le[3] + bytes_le[2] + bytes_le[1] + bytes_le[0] +
- bytes_le[5] + bytes_le[4] + bytes_le[7] + bytes_le[6] +
- bytes_le[8:])
- if bytes is not None:
- if len(bytes) != 16:
- raise ValueError('bytes is not a 16-char string')
- int = long(('%02x'*16) % tuple(map(ord, bytes)), 16)
- if fields is not None:
- if len(fields) != 6:
- raise ValueError('fields is not a 6-tuple')
- (time_low, time_mid, time_hi_version,
- clock_seq_hi_variant, clock_seq_low, node) = fields
- if not 0 <= time_low < 1<<32L:
- raise ValueError('field 1 out of range (need a 32-bit value)')
- if not 0 <= time_mid < 1<<16L:
- raise ValueError('field 2 out of range (need a 16-bit value)')
- if not 0 <= time_hi_version < 1<<16L:
- raise ValueError('field 3 out of range (need a 16-bit value)')
- if not 0 <= clock_seq_hi_variant < 1<<8L:
- raise ValueError('field 4 out of range (need an 8-bit value)')
- if not 0 <= clock_seq_low < 1<<8L:
- raise ValueError('field 5 out of range (need an 8-bit value)')
- if not 0 <= node < 1<<48L:
- raise ValueError('field 6 out of range (need a 48-bit value)')
- clock_seq = (clock_seq_hi_variant << 8L) | clock_seq_low
- int = ((time_low << 96L) | (time_mid << 80L) |
- (time_hi_version << 64L) | (clock_seq << 48L) | node)
- if int is not None:
- if not 0 <= int < 1<<128L:
- raise ValueError('int is out of range (need a 128-bit value)')
- if version is not None:
- if not 1 <= version <= 5:
- raise ValueError('illegal version number')
- # Set the variant to RFC 4122.
- int &= ~(0xc000 << 48L)
- int |= 0x8000 << 48L
- # Set the version number.
- int &= ~(0xf000 << 64L)
- int |= version << 76L
- self.__dict__['int'] = int
-
- def __cmp__(self, other):
- if isinstance(other, UUID):
- return cmp(self.int, other.int)
- return NotImplemented
-
- def __hash__(self):
- return hash(self.int)
-
- def __int__(self):
- return self.int
-
- def __repr__(self):
- return 'UUID(%r)' % str(self)
-
- def __setattr__(self, name, value):
- raise TypeError('UUID objects are immutable')
-
- def __str__(self):
- hex = '%032x' % self.int
- return '%s-%s-%s-%s-%s' % (
- hex[:8], hex[8:12], hex[12:16], hex[16:20], hex[20:])
-
- def get_bytes(self):
- bytes = ''
- for shift in range(0, 128, 8):
- bytes = chr((self.int >> shift) & 0xff) + bytes
- return bytes
-
- bytes = property(get_bytes)
-
- def get_bytes_le(self):
- bytes = self.bytes
- return (bytes[3] + bytes[2] + bytes[1] + bytes[0] +
- bytes[5] + bytes[4] + bytes[7] + bytes[6] + bytes[8:])
-
- bytes_le = property(get_bytes_le)
-
- def get_fields(self):
- return (self.time_low, self.time_mid, self.time_hi_version,
- self.clock_seq_hi_variant, self.clock_seq_low, self.node)
-
- fields = property(get_fields)
-
- def get_time_low(self):
- return self.int >> 96L
-
- time_low = property(get_time_low)
-
- def get_time_mid(self):
- return (self.int >> 80L) & 0xffff
-
- time_mid = property(get_time_mid)
-
- def get_time_hi_version(self):
- return (self.int >> 64L) & 0xffff
-
- time_hi_version = property(get_time_hi_version)
-
- def get_clock_seq_hi_variant(self):
- return (self.int >> 56L) & 0xff
-
- clock_seq_hi_variant = property(get_clock_seq_hi_variant)
-
- def get_clock_seq_low(self):
- return (self.int >> 48L) & 0xff
-
- clock_seq_low = property(get_clock_seq_low)
-
- def get_time(self):
- return (((self.time_hi_version & 0x0fffL) << 48L) |
- (self.time_mid << 32L) | self.time_low)
-
- time = property(get_time)
-
- def get_clock_seq(self):
- return (((self.clock_seq_hi_variant & 0x3fL) << 8L) |
- self.clock_seq_low)
-
- clock_seq = property(get_clock_seq)
-
- def get_node(self):
- return self.int & 0xffffffffffff
-
- node = property(get_node)
-
- def get_hex(self):
- return '%032x' % self.int
-
- hex = property(get_hex)
-
- def get_urn(self):
- return 'urn:uuid:' + str(self)
-
- urn = property(get_urn)
-
- def get_variant(self):
- if not self.int & (0x8000 << 48L):
- return RESERVED_NCS
- elif not self.int & (0x4000 << 48L):
- return RFC_4122
- elif not self.int & (0x2000 << 48L):
- return RESERVED_MICROSOFT
- else:
- return RESERVED_FUTURE
-
- variant = property(get_variant)
-
- def get_version(self):
- # The version bits are only meaningful for RFC 4122 UUIDs.
- if self.variant == RFC_4122:
- return int((self.int >> 76L) & 0xf)
-
- version = property(get_version)
-
-def _find_mac(command, args, hw_identifiers, get_index):
- import os
- for dir in ['', '/sbin/', '/usr/sbin']:
- executable = os.path.join(dir, command)
- if not os.path.exists(executable):
- continue
-
- try:
- # LC_ALL to get English output, 2>/dev/null to
- # prevent output on stderr
- cmd = 'LC_ALL=C %s %s 2>/dev/null' % (executable, args)
- pipe = os.popen(cmd)
- except IOError:
- continue
-
- for line in pipe:
- words = line.lower().split()
- for i in range(len(words)):
- if words[i] in hw_identifiers:
- return int(words[get_index(i)].replace(':', ''), 16)
- return None
-
-def _ifconfig_getnode():
- """Get the hardware address on Unix by running ifconfig."""
-
- # This works on Linux ('' or '-a'), Tru64 ('-av'), but not all Unixes.
- for args in ('', '-a', '-av'):
- mac = _find_mac('ifconfig', args, ['hwaddr', 'ether'], lambda i: i+1)
- if mac:
- return mac
-
- import socket
- ip_addr = socket.gethostbyname(socket.gethostname())
-
- # Try getting the MAC addr from arp based on our IP address (Solaris).
- mac = _find_mac('arp', '-an', [ip_addr], lambda i: -1)
- if mac:
- return mac
-
- # This might work on HP-UX.
- mac = _find_mac('lanscan', '-ai', ['lan0'], lambda i: 0)
- if mac:
- return mac
-
- return None
-
-def _ipconfig_getnode():
- """Get the hardware address on Windows by running ipconfig.exe."""
- import os, re
- dirs = ['', r'c:\windows\system32', r'c:\winnt\system32']
- try:
- import ctypes
- buffer = ctypes.create_string_buffer(300)
- ctypes.windll.kernel32.GetSystemDirectoryA(buffer, 300)
- dirs.insert(0, buffer.value.decode('mbcs'))
- except:
- pass
- for dir in dirs:
- try:
- pipe = os.popen(os.path.join(dir, 'ipconfig') + ' /all')
- except IOError:
- continue
- for line in pipe:
- value = line.split(':')[-1].strip().lower()
- if re.match('([0-9a-f][0-9a-f]-){5}[0-9a-f][0-9a-f]', value):
- return int(value.replace('-', ''), 16)
-
-def _netbios_getnode():
- """Get the hardware address on Windows using NetBIOS calls.
- See http://support.microsoft.com/kb/118623 for details."""
- import win32wnet, netbios
- ncb = netbios.NCB()
- ncb.Command = netbios.NCBENUM
- ncb.Buffer = adapters = netbios.LANA_ENUM()
- adapters._pack()
- if win32wnet.Netbios(ncb) != 0:
- return
- adapters._unpack()
- for i in range(adapters.length):
- ncb.Reset()
- ncb.Command = netbios.NCBRESET
- ncb.Lana_num = ord(adapters.lana[i])
- if win32wnet.Netbios(ncb) != 0:
- continue
- ncb.Reset()
- ncb.Command = netbios.NCBASTAT
- ncb.Lana_num = ord(adapters.lana[i])
- ncb.Callname = '*'.ljust(16)
- ncb.Buffer = status = netbios.ADAPTER_STATUS()
- if win32wnet.Netbios(ncb) != 0:
- continue
- status._unpack()
- bytes = map(ord, status.adapter_address)
- return ((bytes[0]<<40L) + (bytes[1]<<32L) + (bytes[2]<<24L) +
- (bytes[3]<<16L) + (bytes[4]<<8L) + bytes[5])
-
-# Thanks to Thomas Heller for ctypes and for his help with its use here.
-
-# If ctypes is available, use it to find system routines for UUID generation.
-_uuid_generate_random = _uuid_generate_time = _UuidCreate = None
-try:
- import ctypes, ctypes.util
- _buffer = ctypes.create_string_buffer(16)
-
- # The uuid_generate_* routines are provided by libuuid on at least
- # Linux and FreeBSD, and provided by libc on Mac OS X.
- for libname in ['uuid', 'c']:
- try:
- lib = ctypes.CDLL(ctypes.util.find_library(libname))
- except:
- continue
- if hasattr(lib, 'uuid_generate_random'):
- _uuid_generate_random = lib.uuid_generate_random
- if hasattr(lib, 'uuid_generate_time'):
- _uuid_generate_time = lib.uuid_generate_time
-
- # On Windows prior to 2000, UuidCreate gives a UUID containing the
- # hardware address. On Windows 2000 and later, UuidCreate makes a
- # random UUID and UuidCreateSequential gives a UUID containing the
- # hardware address. These routines are provided by the RPC runtime.
- # NOTE: at least on Tim's WinXP Pro SP2 desktop box, while the last
- # 6 bytes returned by UuidCreateSequential are fixed, they don't appear
- # to bear any relationship to the MAC address of any network device
- # on the box.
- try:
- lib = ctypes.windll.rpcrt4
- except:
- lib = None
- _UuidCreate = getattr(lib, 'UuidCreateSequential',
- getattr(lib, 'UuidCreate', None))
-except:
- pass
-
-def _unixdll_getnode():
- """Get the hardware address on Unix using ctypes."""
- _uuid_generate_time(_buffer)
- return UUID(bytes=_buffer.raw).node
-
-def _windll_getnode():
- """Get the hardware address on Windows using ctypes."""
- if _UuidCreate(_buffer) == 0:
- return UUID(bytes=_buffer.raw).node
-
-def _random_getnode():
- """Get a random node ID, with eighth bit set as suggested by RFC 4122."""
- import random
- return random.randrange(0, 1<<48L) | 0x010000000000L
-
-_node = None
-
-def getnode():
- """Get the hardware address as a 48-bit positive integer.
-
- The first time this runs, it may launch a separate program, which could
- be quite slow. If all attempts to obtain the hardware address fail, we
- choose a random 48-bit number with its eighth bit set to 1 as recommended
- in RFC 4122.
- """
-
- global _node
- if _node is not None:
- return _node
-
- import sys
- if sys.platform == 'win32':
- getters = [_windll_getnode, _netbios_getnode, _ipconfig_getnode]
- else:
- getters = [_unixdll_getnode, _ifconfig_getnode]
-
- for getter in getters + [_random_getnode]:
- try:
- _node = getter()
- except:
- continue
- if _node is not None:
- return _node
-
-_last_timestamp = None
-
-def uuid1(node=None, clock_seq=None):
- """Generate a UUID from a host ID, sequence number, and the current time.
- If 'node' is not given, getnode() is used to obtain the hardware
- address. If 'clock_seq' is given, it is used as the sequence number;
- otherwise a random 14-bit sequence number is chosen."""
-
- # When the system provides a version-1 UUID generator, use it (but don't
- # use UuidCreate here because its UUIDs don't conform to RFC 4122).
- if _uuid_generate_time and node is clock_seq is None:
- _uuid_generate_time(_buffer)
- return UUID(bytes=_buffer.raw)
-
- global _last_timestamp
- import time
- nanoseconds = int(time.time() * 1e9)
- # 0x01b21dd213814000 is the number of 100-ns intervals between the
- # UUID epoch 1582-10-15 00:00:00 and the Unix epoch 1970-01-01 00:00:00.
- timestamp = int(nanoseconds/100) + 0x01b21dd213814000L
- if timestamp <= _last_timestamp:
- timestamp = _last_timestamp + 1
- _last_timestamp = timestamp
- if clock_seq is None:
- import random
- clock_seq = random.randrange(1<<14L) # instead of stable storage
- time_low = timestamp & 0xffffffffL
- time_mid = (timestamp >> 32L) & 0xffffL
- time_hi_version = (timestamp >> 48L) & 0x0fffL
- clock_seq_low = clock_seq & 0xffL
- clock_seq_hi_variant = (clock_seq >> 8L) & 0x3fL
- if node is None:
- node = getnode()
- return UUID(fields=(time_low, time_mid, time_hi_version,
- clock_seq_hi_variant, clock_seq_low, node), version=1)
-
-def uuid3(namespace, name):
- """Generate a UUID from the MD5 hash of a namespace UUID and a name."""
- import md5
- hash = md5.md5(namespace.bytes + name).digest()
- return UUID(bytes=hash[:16], version=3)
-
-def uuid4():
- """Generate a random UUID."""
-
- # When the system provides a version-4 UUID generator, use it.
- if _uuid_generate_random:
- _uuid_generate_random(_buffer)
- return UUID(bytes=_buffer.raw)
-
- # Otherwise, get randomness from urandom or the 'random' module.
- try:
- import os
- return UUID(bytes=os.urandom(16), version=4)
- except:
- import random
- bytes = [chr(random.randrange(256)) for i in range(16)]
- return UUID(bytes=bytes, version=4)
-
-def uuid5(namespace, name):
- """Generate a UUID from the SHA-1 hash of a namespace UUID and a name."""
- import sha
- hash = sha.sha(namespace.bytes + name).digest()
- return UUID(bytes=hash[:16], version=5)
-
-# The following standard UUIDs are for use with uuid3() or uuid5().
-
-NAMESPACE_DNS = UUID('6ba7b810-9dad-11d1-80b4-00c04fd430c8')
-NAMESPACE_URL = UUID('6ba7b811-9dad-11d1-80b4-00c04fd430c8')
-NAMESPACE_OID = UUID('6ba7b812-9dad-11d1-80b4-00c04fd430c8')
-NAMESPACE_X500 = UUID('6ba7b814-9dad-11d1-80b4-00c04fd430c8')
diff --git a/sys/lib/python/warnings.py b/sys/lib/python/warnings.py
deleted file mode 100644
index b7fac6997..000000000
--- a/sys/lib/python/warnings.py
+++ /dev/null
@@ -1,264 +0,0 @@
-"""Python part of the warnings subsystem."""
-
-# Note: function level imports should *not* be used
-# in this module as it may cause import lock deadlock.
-# See bug 683658.
-import sys, types
-import linecache
-
-__all__ = ["warn", "showwarning", "formatwarning", "filterwarnings",
- "resetwarnings"]
-
-# filters contains a sequence of filter 5-tuples
-# The components of the 5-tuple are:
-# - an action: error, ignore, always, default, module, or once
-# - a compiled regex that must match the warning message
-# - a class representing the warning category
-# - a compiled regex that must match the module that is being warned
-# - a line number for the line being warning, or 0 to mean any line
-# If either if the compiled regexs are None, match anything.
-filters = []
-defaultaction = "default"
-onceregistry = {}
-
-def warn(message, category=None, stacklevel=1):
- """Issue a warning, or maybe ignore it or raise an exception."""
- # Check if message is already a Warning object
- if isinstance(message, Warning):
- category = message.__class__
- # Check category argument
- if category is None:
- category = UserWarning
- assert issubclass(category, Warning)
- # Get context information
- try:
- caller = sys._getframe(stacklevel)
- except ValueError:
- globals = sys.__dict__
- lineno = 1
- else:
- globals = caller.f_globals
- lineno = caller.f_lineno
- if '__name__' in globals:
- module = globals['__name__']
- else:
- module = "<string>"
- filename = globals.get('__file__')
- if filename:
- fnl = filename.lower()
- if fnl.endswith((".pyc", ".pyo")):
- filename = filename[:-1]
- else:
- if module == "__main__":
- try:
- filename = sys.argv[0]
- except AttributeError:
- # embedded interpreters don't have sys.argv, see bug #839151
- filename = '__main__'
- if not filename:
- filename = module
- registry = globals.setdefault("__warningregistry__", {})
- warn_explicit(message, category, filename, lineno, module, registry,
- globals)
-
-def warn_explicit(message, category, filename, lineno,
- module=None, registry=None, module_globals=None):
- if module is None:
- module = filename or "<unknown>"
- if module[-3:].lower() == ".py":
- module = module[:-3] # XXX What about leading pathname?
- if registry is None:
- registry = {}
- if isinstance(message, Warning):
- text = str(message)
- category = message.__class__
- else:
- text = message
- message = category(message)
- key = (text, category, lineno)
- # Quick test for common case
- if registry.get(key):
- return
- # Search the filters
- for item in filters:
- action, msg, cat, mod, ln = item
- if ((msg is None or msg.match(text)) and
- issubclass(category, cat) and
- (mod is None or mod.match(module)) and
- (ln == 0 or lineno == ln)):
- break
- else:
- action = defaultaction
- # Early exit actions
- if action == "ignore":
- registry[key] = 1
- return
-
- # Prime the linecache for formatting, in case the
- # "file" is actually in a zipfile or something.
- linecache.getlines(filename, module_globals)
-
- if action == "error":
- raise message
- # Other actions
- if action == "once":
- registry[key] = 1
- oncekey = (text, category)
- if onceregistry.get(oncekey):
- return
- onceregistry[oncekey] = 1
- elif action == "always":
- pass
- elif action == "module":
- registry[key] = 1
- altkey = (text, category, 0)
- if registry.get(altkey):
- return
- registry[altkey] = 1
- elif action == "default":
- registry[key] = 1
- else:
- # Unrecognized actions are errors
- raise RuntimeError(
- "Unrecognized action (%r) in warnings.filters:\n %s" %
- (action, item))
- # Print message and context
- showwarning(message, category, filename, lineno)
-
-def showwarning(message, category, filename, lineno, file=None):
- """Hook to write a warning to a file; replace if you like."""
- if file is None:
- file = sys.stderr
- try:
- file.write(formatwarning(message, category, filename, lineno))
- except IOError:
- pass # the file (probably stderr) is invalid - this warning gets lost.
-
-def formatwarning(message, category, filename, lineno):
- """Function to format a warning the standard way."""
- s = "%s:%s: %s: %s\n" % (filename, lineno, category.__name__, message)
- line = linecache.getline(filename, lineno).strip()
- if line:
- s = s + " " + line + "\n"
- return s
-
-def filterwarnings(action, message="", category=Warning, module="", lineno=0,
- append=0):
- """Insert an entry into the list of warnings filters (at the front).
-
- Use assertions to check that all arguments have the right type."""
- import re
- assert action in ("error", "ignore", "always", "default", "module",
- "once"), "invalid action: %r" % (action,)
- assert isinstance(message, basestring), "message must be a string"
- assert isinstance(category, (type, types.ClassType)), \
- "category must be a class"
- assert issubclass(category, Warning), "category must be a Warning subclass"
- assert isinstance(module, basestring), "module must be a string"
- assert isinstance(lineno, int) and lineno >= 0, \
- "lineno must be an int >= 0"
- item = (action, re.compile(message, re.I), category,
- re.compile(module), lineno)
- if append:
- filters.append(item)
- else:
- filters.insert(0, item)
-
-def simplefilter(action, category=Warning, lineno=0, append=0):
- """Insert a simple entry into the list of warnings filters (at the front).
-
- A simple filter matches all modules and messages.
- """
- assert action in ("error", "ignore", "always", "default", "module",
- "once"), "invalid action: %r" % (action,)
- assert isinstance(lineno, int) and lineno >= 0, \
- "lineno must be an int >= 0"
- item = (action, None, category, None, lineno)
- if append:
- filters.append(item)
- else:
- filters.insert(0, item)
-
-def resetwarnings():
- """Clear the list of warning filters, so that no filters are active."""
- filters[:] = []
-
-class _OptionError(Exception):
- """Exception used by option processing helpers."""
- pass
-
-# Helper to process -W options passed via sys.warnoptions
-def _processoptions(args):
- for arg in args:
- try:
- _setoption(arg)
- except _OptionError, msg:
- print >>sys.stderr, "Invalid -W option ignored:", msg
-
-# Helper for _processoptions()
-def _setoption(arg):
- import re
- parts = arg.split(':')
- if len(parts) > 5:
- raise _OptionError("too many fields (max 5): %r" % (arg,))
- while len(parts) < 5:
- parts.append('')
- action, message, category, module, lineno = [s.strip()
- for s in parts]
- action = _getaction(action)
- message = re.escape(message)
- category = _getcategory(category)
- module = re.escape(module)
- if module:
- module = module + '$'
- if lineno:
- try:
- lineno = int(lineno)
- if lineno < 0:
- raise ValueError
- except (ValueError, OverflowError):
- raise _OptionError("invalid lineno %r" % (lineno,))
- else:
- lineno = 0
- filterwarnings(action, message, category, module, lineno)
-
-# Helper for _setoption()
-def _getaction(action):
- if not action:
- return "default"
- if action == "all": return "always" # Alias
- for a in ('default', 'always', 'ignore', 'module', 'once', 'error'):
- if a.startswith(action):
- return a
- raise _OptionError("invalid action: %r" % (action,))
-
-# Helper for _setoption()
-def _getcategory(category):
- import re
- if not category:
- return Warning
- if re.match("^[a-zA-Z0-9_]+$", category):
- try:
- cat = eval(category)
- except NameError:
- raise _OptionError("unknown warning category: %r" % (category,))
- else:
- i = category.rfind(".")
- module = category[:i]
- klass = category[i+1:]
- try:
- m = __import__(module, None, None, [klass])
- except ImportError:
- raise _OptionError("invalid module name: %r" % (module,))
- try:
- cat = getattr(m, klass)
- except AttributeError:
- raise _OptionError("unknown warning category: %r" % (category,))
- if not issubclass(cat, Warning):
- raise _OptionError("invalid warning category: %r" % (category,))
- return cat
-
-# Module initialization
-_processoptions(sys.warnoptions)
-simplefilter("ignore", category=PendingDeprecationWarning, append=1)
-simplefilter("ignore", category=ImportWarning, append=1)
diff --git a/sys/lib/python/wave.py b/sys/lib/python/wave.py
deleted file mode 100644
index b993b400c..000000000
--- a/sys/lib/python/wave.py
+++ /dev/null
@@ -1,499 +0,0 @@
-"""Stuff to parse WAVE files.
-
-Usage.
-
-Reading WAVE files:
- f = wave.open(file, 'r')
-where file is either the name of a file or an open file pointer.
-The open file pointer must have methods read(), seek(), and close().
-When the setpos() and rewind() methods are not used, the seek()
-method is not necessary.
-
-This returns an instance of a class with the following public methods:
- getnchannels() -- returns number of audio channels (1 for
- mono, 2 for stereo)
- getsampwidth() -- returns sample width in bytes
- getframerate() -- returns sampling frequency
- getnframes() -- returns number of audio frames
- getcomptype() -- returns compression type ('NONE' for linear samples)
- getcompname() -- returns human-readable version of
- compression type ('not compressed' linear samples)
- getparams() -- returns a tuple consisting of all of the
- above in the above order
- getmarkers() -- returns None (for compatibility with the
- aifc module)
- getmark(id) -- raises an error since the mark does not
- exist (for compatibility with the aifc module)
- readframes(n) -- returns at most n frames of audio
- rewind() -- rewind to the beginning of the audio stream
- setpos(pos) -- seek to the specified position
- tell() -- return the current position
- close() -- close the instance (make it unusable)
-The position returned by tell() and the position given to setpos()
-are compatible and have nothing to do with the actual position in the
-file.
-The close() method is called automatically when the class instance
-is destroyed.
-
-Writing WAVE files:
- f = wave.open(file, 'w')
-where file is either the name of a file or an open file pointer.
-The open file pointer must have methods write(), tell(), seek(), and
-close().
-
-This returns an instance of a class with the following public methods:
- setnchannels(n) -- set the number of channels
- setsampwidth(n) -- set the sample width
- setframerate(n) -- set the frame rate
- setnframes(n) -- set the number of frames
- setcomptype(type, name)
- -- set the compression type and the
- human-readable compression type
- setparams(tuple)
- -- set all parameters at once
- tell() -- return current position in output file
- writeframesraw(data)
- -- write audio frames without pathing up the
- file header
- writeframes(data)
- -- write audio frames and patch up the file header
- close() -- patch up the file header and close the
- output file
-You should set the parameters before the first writeframesraw or
-writeframes. The total number of frames does not need to be set,
-but when it is set to the correct value, the header does not have to
-be patched up.
-It is best to first set all parameters, perhaps possibly the
-compression type, and then write audio frames using writeframesraw.
-When all frames have been written, either call writeframes('') or
-close() to patch up the sizes in the header.
-The close() method is called automatically when the class instance
-is destroyed.
-"""
-
-import __builtin__
-
-__all__ = ["open", "openfp", "Error"]
-
-class Error(Exception):
- pass
-
-WAVE_FORMAT_PCM = 0x0001
-
-_array_fmts = None, 'b', 'h', None, 'l'
-
-# Determine endian-ness
-import struct
-if struct.pack("h", 1) == "\000\001":
- big_endian = 1
-else:
- big_endian = 0
-
-from chunk import Chunk
-
-class Wave_read:
- """Variables used in this class:
-
- These variables are available to the user though appropriate
- methods of this class:
- _file -- the open file with methods read(), close(), and seek()
- set through the __init__() method
- _nchannels -- the number of audio channels
- available through the getnchannels() method
- _nframes -- the number of audio frames
- available through the getnframes() method
- _sampwidth -- the number of bytes per audio sample
- available through the getsampwidth() method
- _framerate -- the sampling frequency
- available through the getframerate() method
- _comptype -- the AIFF-C compression type ('NONE' if AIFF)
- available through the getcomptype() method
- _compname -- the human-readable AIFF-C compression type
- available through the getcomptype() method
- _soundpos -- the position in the audio stream
- available through the tell() method, set through the
- setpos() method
-
- These variables are used internally only:
- _fmt_chunk_read -- 1 iff the FMT chunk has been read
- _data_seek_needed -- 1 iff positioned correctly in audio
- file for readframes()
- _data_chunk -- instantiation of a chunk class for the DATA chunk
- _framesize -- size of one frame in the file
- """
-
- def initfp(self, file):
- self._convert = None
- self._soundpos = 0
- self._file = Chunk(file, bigendian = 0)
- if self._file.getname() != 'RIFF':
- raise Error, 'file does not start with RIFF id'
- if self._file.read(4) != 'WAVE':
- raise Error, 'not a WAVE file'
- self._fmt_chunk_read = 0
- self._data_chunk = None
- while 1:
- self._data_seek_needed = 1
- try:
- chunk = Chunk(self._file, bigendian = 0)
- except EOFError:
- break
- chunkname = chunk.getname()
- if chunkname == 'fmt ':
- self._read_fmt_chunk(chunk)
- self._fmt_chunk_read = 1
- elif chunkname == 'data':
- if not self._fmt_chunk_read:
- raise Error, 'data chunk before fmt chunk'
- self._data_chunk = chunk
- self._nframes = chunk.chunksize // self._framesize
- self._data_seek_needed = 0
- break
- chunk.skip()
- if not self._fmt_chunk_read or not self._data_chunk:
- raise Error, 'fmt chunk and/or data chunk missing'
-
- def __init__(self, f):
- self._i_opened_the_file = None
- if isinstance(f, basestring):
- f = __builtin__.open(f, 'rb')
- self._i_opened_the_file = f
- # else, assume it is an open file object already
- try:
- self.initfp(f)
- except:
- if self._i_opened_the_file:
- f.close()
- raise
-
- def __del__(self):
- self.close()
- #
- # User visible methods.
- #
- def getfp(self):
- return self._file
-
- def rewind(self):
- self._data_seek_needed = 1
- self._soundpos = 0
-
- def close(self):
- if self._i_opened_the_file:
- self._i_opened_the_file.close()
- self._i_opened_the_file = None
- self._file = None
-
- def tell(self):
- return self._soundpos
-
- def getnchannels(self):
- return self._nchannels
-
- def getnframes(self):
- return self._nframes
-
- def getsampwidth(self):
- return self._sampwidth
-
- def getframerate(self):
- return self._framerate
-
- def getcomptype(self):
- return self._comptype
-
- def getcompname(self):
- return self._compname
-
- def getparams(self):
- return self.getnchannels(), self.getsampwidth(), \
- self.getframerate(), self.getnframes(), \
- self.getcomptype(), self.getcompname()
-
- def getmarkers(self):
- return None
-
- def getmark(self, id):
- raise Error, 'no marks'
-
- def setpos(self, pos):
- if pos < 0 or pos > self._nframes:
- raise Error, 'position not in range'
- self._soundpos = pos
- self._data_seek_needed = 1
-
- def readframes(self, nframes):
- if self._data_seek_needed:
- self._data_chunk.seek(0, 0)
- pos = self._soundpos * self._framesize
- if pos:
- self._data_chunk.seek(pos, 0)
- self._data_seek_needed = 0
- if nframes == 0:
- return ''
- if self._sampwidth > 1 and big_endian:
- # unfortunately the fromfile() method does not take
- # something that only looks like a file object, so
- # we have to reach into the innards of the chunk object
- import array
- chunk = self._data_chunk
- data = array.array(_array_fmts[self._sampwidth])
- nitems = nframes * self._nchannels
- if nitems * self._sampwidth > chunk.chunksize - chunk.size_read:
- nitems = (chunk.chunksize - chunk.size_read) / self._sampwidth
- data.fromfile(chunk.file.file, nitems)
- # "tell" data chunk how much was read
- chunk.size_read = chunk.size_read + nitems * self._sampwidth
- # do the same for the outermost chunk
- chunk = chunk.file
- chunk.size_read = chunk.size_read + nitems * self._sampwidth
- data.byteswap()
- data = data.tostring()
- else:
- data = self._data_chunk.read(nframes * self._framesize)
- if self._convert and data:
- data = self._convert(data)
- self._soundpos = self._soundpos + len(data) // (self._nchannels * self._sampwidth)
- return data
-
- #
- # Internal methods.
- #
-
- def _read_fmt_chunk(self, chunk):
- wFormatTag, self._nchannels, self._framerate, dwAvgBytesPerSec, wBlockAlign = struct.unpack('<hhllh', chunk.read(14))
- if wFormatTag == WAVE_FORMAT_PCM:
- sampwidth = struct.unpack('<h', chunk.read(2))[0]
- self._sampwidth = (sampwidth + 7) // 8
- else:
- raise Error, 'unknown format: %r' % (wFormatTag,)
- self._framesize = self._nchannels * self._sampwidth
- self._comptype = 'NONE'
- self._compname = 'not compressed'
-
-class Wave_write:
- """Variables used in this class:
-
- These variables are user settable through appropriate methods
- of this class:
- _file -- the open file with methods write(), close(), tell(), seek()
- set through the __init__() method
- _comptype -- the AIFF-C compression type ('NONE' in AIFF)
- set through the setcomptype() or setparams() method
- _compname -- the human-readable AIFF-C compression type
- set through the setcomptype() or setparams() method
- _nchannels -- the number of audio channels
- set through the setnchannels() or setparams() method
- _sampwidth -- the number of bytes per audio sample
- set through the setsampwidth() or setparams() method
- _framerate -- the sampling frequency
- set through the setframerate() or setparams() method
- _nframes -- the number of audio frames written to the header
- set through the setnframes() or setparams() method
-
- These variables are used internally only:
- _datalength -- the size of the audio samples written to the header
- _nframeswritten -- the number of frames actually written
- _datawritten -- the size of the audio samples actually written
- """
-
- def __init__(self, f):
- self._i_opened_the_file = None
- if isinstance(f, basestring):
- f = __builtin__.open(f, 'wb')
- self._i_opened_the_file = f
- try:
- self.initfp(f)
- except:
- if self._i_opened_the_file:
- f.close()
- raise
-
- def initfp(self, file):
- self._file = file
- self._convert = None
- self._nchannels = 0
- self._sampwidth = 0
- self._framerate = 0
- self._nframes = 0
- self._nframeswritten = 0
- self._datawritten = 0
- self._datalength = 0
-
- def __del__(self):
- self.close()
-
- #
- # User visible methods.
- #
- def setnchannels(self, nchannels):
- if self._datawritten:
- raise Error, 'cannot change parameters after starting to write'
- if nchannels < 1:
- raise Error, 'bad # of channels'
- self._nchannels = nchannels
-
- def getnchannels(self):
- if not self._nchannels:
- raise Error, 'number of channels not set'
- return self._nchannels
-
- def setsampwidth(self, sampwidth):
- if self._datawritten:
- raise Error, 'cannot change parameters after starting to write'
- if sampwidth < 1 or sampwidth > 4:
- raise Error, 'bad sample width'
- self._sampwidth = sampwidth
-
- def getsampwidth(self):
- if not self._sampwidth:
- raise Error, 'sample width not set'
- return self._sampwidth
-
- def setframerate(self, framerate):
- if self._datawritten:
- raise Error, 'cannot change parameters after starting to write'
- if framerate <= 0:
- raise Error, 'bad frame rate'
- self._framerate = framerate
-
- def getframerate(self):
- if not self._framerate:
- raise Error, 'frame rate not set'
- return self._framerate
-
- def setnframes(self, nframes):
- if self._datawritten:
- raise Error, 'cannot change parameters after starting to write'
- self._nframes = nframes
-
- def getnframes(self):
- return self._nframeswritten
-
- def setcomptype(self, comptype, compname):
- if self._datawritten:
- raise Error, 'cannot change parameters after starting to write'
- if comptype not in ('NONE',):
- raise Error, 'unsupported compression type'
- self._comptype = comptype
- self._compname = compname
-
- def getcomptype(self):
- return self._comptype
-
- def getcompname(self):
- return self._compname
-
- def setparams(self, (nchannels, sampwidth, framerate, nframes, comptype, compname)):
- if self._datawritten:
- raise Error, 'cannot change parameters after starting to write'
- self.setnchannels(nchannels)
- self.setsampwidth(sampwidth)
- self.setframerate(framerate)
- self.setnframes(nframes)
- self.setcomptype(comptype, compname)
-
- def getparams(self):
- if not self._nchannels or not self._sampwidth or not self._framerate:
- raise Error, 'not all parameters set'
- return self._nchannels, self._sampwidth, self._framerate, \
- self._nframes, self._comptype, self._compname
-
- def setmark(self, id, pos, name):
- raise Error, 'setmark() not supported'
-
- def getmark(self, id):
- raise Error, 'no marks'
-
- def getmarkers(self):
- return None
-
- def tell(self):
- return self._nframeswritten
-
- def writeframesraw(self, data):
- self._ensure_header_written(len(data))
- nframes = len(data) // (self._sampwidth * self._nchannels)
- if self._convert:
- data = self._convert(data)
- if self._sampwidth > 1 and big_endian:
- import array
- data = array.array(_array_fmts[self._sampwidth], data)
- data.byteswap()
- data.tofile(self._file)
- self._datawritten = self._datawritten + len(data) * self._sampwidth
- else:
- self._file.write(data)
- self._datawritten = self._datawritten + len(data)
- self._nframeswritten = self._nframeswritten + nframes
-
- def writeframes(self, data):
- self.writeframesraw(data)
- if self._datalength != self._datawritten:
- self._patchheader()
-
- def close(self):
- if self._file:
- self._ensure_header_written(0)
- if self._datalength != self._datawritten:
- self._patchheader()
- self._file.flush()
- self._file = None
- if self._i_opened_the_file:
- self._i_opened_the_file.close()
- self._i_opened_the_file = None
-
- #
- # Internal methods.
- #
-
- def _ensure_header_written(self, datasize):
- if not self._datawritten:
- if not self._nchannels:
- raise Error, '# channels not specified'
- if not self._sampwidth:
- raise Error, 'sample width not specified'
- if not self._framerate:
- raise Error, 'sampling rate not specified'
- self._write_header(datasize)
-
- def _write_header(self, initlength):
- self._file.write('RIFF')
- if not self._nframes:
- self._nframes = initlength / (self._nchannels * self._sampwidth)
- self._datalength = self._nframes * self._nchannels * self._sampwidth
- self._form_length_pos = self._file.tell()
- self._file.write(struct.pack('<l4s4slhhllhh4s',
- 36 + self._datalength, 'WAVE', 'fmt ', 16,
- WAVE_FORMAT_PCM, self._nchannels, self._framerate,
- self._nchannels * self._framerate * self._sampwidth,
- self._nchannels * self._sampwidth,
- self._sampwidth * 8, 'data'))
- self._data_length_pos = self._file.tell()
- self._file.write(struct.pack('<l', self._datalength))
-
- def _patchheader(self):
- if self._datawritten == self._datalength:
- return
- curpos = self._file.tell()
- self._file.seek(self._form_length_pos, 0)
- self._file.write(struct.pack('<l', 36 + self._datawritten))
- self._file.seek(self._data_length_pos, 0)
- self._file.write(struct.pack('<l', self._datawritten))
- self._file.seek(curpos, 0)
- self._datalength = self._datawritten
-
-def open(f, mode=None):
- if mode is None:
- if hasattr(f, 'mode'):
- mode = f.mode
- else:
- mode = 'rb'
- if mode in ('r', 'rb'):
- return Wave_read(f)
- elif mode in ('w', 'wb'):
- return Wave_write(f)
- else:
- raise Error, "mode must be 'r', 'rb', 'w', or 'wb'"
-
-openfp = open # B/W compatibility
diff --git a/sys/lib/python/weakref.py b/sys/lib/python/weakref.py
deleted file mode 100644
index 4f6d757fe..000000000
--- a/sys/lib/python/weakref.py
+++ /dev/null
@@ -1,355 +0,0 @@
-"""Weak reference support for Python.
-
-This module is an implementation of PEP 205:
-
-http://python.sourceforge.net/peps/pep-0205.html
-"""
-
-# Naming convention: Variables named "wr" are weak reference objects;
-# they are called this instead of "ref" to avoid name collisions with
-# the module-global ref() function imported from _weakref.
-
-import UserDict
-
-from _weakref import (
- getweakrefcount,
- getweakrefs,
- ref,
- proxy,
- CallableProxyType,
- ProxyType,
- ReferenceType)
-
-from exceptions import ReferenceError
-
-
-ProxyTypes = (ProxyType, CallableProxyType)
-
-__all__ = ["ref", "proxy", "getweakrefcount", "getweakrefs",
- "WeakKeyDictionary", "ReferenceType", "ProxyType",
- "CallableProxyType", "ProxyTypes", "WeakValueDictionary"]
-
-
-class WeakValueDictionary(UserDict.UserDict):
- """Mapping class that references values weakly.
-
- Entries in the dictionary will be discarded when no strong
- reference to the value exists anymore
- """
- # We inherit the constructor without worrying about the input
- # dictionary; since it uses our .update() method, we get the right
- # checks (if the other dictionary is a WeakValueDictionary,
- # objects are unwrapped on the way out, and we always wrap on the
- # way in).
-
- def __init__(self, *args, **kw):
- def remove(wr, selfref=ref(self)):
- self = selfref()
- if self is not None:
- del self.data[wr.key]
- self._remove = remove
- UserDict.UserDict.__init__(self, *args, **kw)
-
- def __getitem__(self, key):
- o = self.data[key]()
- if o is None:
- raise KeyError, key
- else:
- return o
-
- def __contains__(self, key):
- try:
- o = self.data[key]()
- except KeyError:
- return False
- return o is not None
-
- def has_key(self, key):
- try:
- o = self.data[key]()
- except KeyError:
- return False
- return o is not None
-
- def __repr__(self):
- return "<WeakValueDictionary at %s>" % id(self)
-
- def __setitem__(self, key, value):
- self.data[key] = KeyedRef(value, self._remove, key)
-
- def copy(self):
- new = WeakValueDictionary()
- for key, wr in self.data.items():
- o = wr()
- if o is not None:
- new[key] = o
- return new
-
- def get(self, key, default=None):
- try:
- wr = self.data[key]
- except KeyError:
- return default
- else:
- o = wr()
- if o is None:
- # This should only happen
- return default
- else:
- return o
-
- def items(self):
- L = []
- for key, wr in self.data.items():
- o = wr()
- if o is not None:
- L.append((key, o))
- return L
-
- def iteritems(self):
- for wr in self.data.itervalues():
- value = wr()
- if value is not None:
- yield wr.key, value
-
- def iterkeys(self):
- return self.data.iterkeys()
-
- def __iter__(self):
- return self.data.iterkeys()
-
- def itervaluerefs(self):
- """Return an iterator that yields the weak references to the values.
-
- The references are not guaranteed to be 'live' at the time
- they are used, so the result of calling the references needs
- to be checked before being used. This can be used to avoid
- creating references that will cause the garbage collector to
- keep the values around longer than needed.
-
- """
- return self.data.itervalues()
-
- def itervalues(self):
- for wr in self.data.itervalues():
- obj = wr()
- if obj is not None:
- yield obj
-
- def popitem(self):
- while 1:
- key, wr = self.data.popitem()
- o = wr()
- if o is not None:
- return key, o
-
- def pop(self, key, *args):
- try:
- o = self.data.pop(key)()
- except KeyError:
- if args:
- return args[0]
- raise
- if o is None:
- raise KeyError, key
- else:
- return o
-
- def setdefault(self, key, default=None):
- try:
- wr = self.data[key]
- except KeyError:
- self.data[key] = KeyedRef(default, self._remove, key)
- return default
- else:
- return wr()
-
- def update(self, dict=None, **kwargs):
- d = self.data
- if dict is not None:
- if not hasattr(dict, "items"):
- dict = type({})(dict)
- for key, o in dict.items():
- d[key] = KeyedRef(o, self._remove, key)
- if len(kwargs):
- self.update(kwargs)
-
- def valuerefs(self):
- """Return a list of weak references to the values.
-
- The references are not guaranteed to be 'live' at the time
- they are used, so the result of calling the references needs
- to be checked before being used. This can be used to avoid
- creating references that will cause the garbage collector to
- keep the values around longer than needed.
-
- """
- return self.data.values()
-
- def values(self):
- L = []
- for wr in self.data.values():
- o = wr()
- if o is not None:
- L.append(o)
- return L
-
-
-class KeyedRef(ref):
- """Specialized reference that includes a key corresponding to the value.
-
- This is used in the WeakValueDictionary to avoid having to create
- a function object for each key stored in the mapping. A shared
- callback object can use the 'key' attribute of a KeyedRef instead
- of getting a reference to the key from an enclosing scope.
-
- """
-
- __slots__ = "key",
-
- def __new__(type, ob, callback, key):
- self = ref.__new__(type, ob, callback)
- self.key = key
- return self
-
- def __init__(self, ob, callback, key):
- super(KeyedRef, self).__init__(ob, callback)
-
-
-class WeakKeyDictionary(UserDict.UserDict):
- """ Mapping class that references keys weakly.
-
- Entries in the dictionary will be discarded when there is no
- longer a strong reference to the key. This can be used to
- associate additional data with an object owned by other parts of
- an application without adding attributes to those objects. This
- can be especially useful with objects that override attribute
- accesses.
- """
-
- def __init__(self, dict=None):
- self.data = {}
- def remove(k, selfref=ref(self)):
- self = selfref()
- if self is not None:
- del self.data[k]
- self._remove = remove
- if dict is not None: self.update(dict)
-
- def __delitem__(self, key):
- del self.data[ref(key)]
-
- def __getitem__(self, key):
- return self.data[ref(key)]
-
- def __repr__(self):
- return "<WeakKeyDictionary at %s>" % id(self)
-
- def __setitem__(self, key, value):
- self.data[ref(key, self._remove)] = value
-
- def copy(self):
- new = WeakKeyDictionary()
- for key, value in self.data.items():
- o = key()
- if o is not None:
- new[o] = value
- return new
-
- def get(self, key, default=None):
- return self.data.get(ref(key),default)
-
- def has_key(self, key):
- try:
- wr = ref(key)
- except TypeError:
- return 0
- return wr in self.data
-
- def __contains__(self, key):
- try:
- wr = ref(key)
- except TypeError:
- return 0
- return wr in self.data
-
- def items(self):
- L = []
- for key, value in self.data.items():
- o = key()
- if o is not None:
- L.append((o, value))
- return L
-
- def iteritems(self):
- for wr, value in self.data.iteritems():
- key = wr()
- if key is not None:
- yield key, value
-
- def iterkeyrefs(self):
- """Return an iterator that yields the weak references to the keys.
-
- The references are not guaranteed to be 'live' at the time
- they are used, so the result of calling the references needs
- to be checked before being used. This can be used to avoid
- creating references that will cause the garbage collector to
- keep the keys around longer than needed.
-
- """
- return self.data.iterkeys()
-
- def iterkeys(self):
- for wr in self.data.iterkeys():
- obj = wr()
- if obj is not None:
- yield obj
-
- def __iter__(self):
- return self.iterkeys()
-
- def itervalues(self):
- return self.data.itervalues()
-
- def keyrefs(self):
- """Return a list of weak references to the keys.
-
- The references are not guaranteed to be 'live' at the time
- they are used, so the result of calling the references needs
- to be checked before being used. This can be used to avoid
- creating references that will cause the garbage collector to
- keep the keys around longer than needed.
-
- """
- return self.data.keys()
-
- def keys(self):
- L = []
- for wr in self.data.keys():
- o = wr()
- if o is not None:
- L.append(o)
- return L
-
- def popitem(self):
- while 1:
- key, value = self.data.popitem()
- o = key()
- if o is not None:
- return o, value
-
- def pop(self, key, *args):
- return self.data.pop(ref(key), *args)
-
- def setdefault(self, key, default=None):
- return self.data.setdefault(ref(key, self._remove),default)
-
- def update(self, dict=None, **kwargs):
- d = self.data
- if dict is not None:
- if not hasattr(dict, "items"):
- dict = type({})(dict)
- for key, value in dict.items():
- d[ref(key, self._remove)] = value
- if len(kwargs):
- self.update(kwargs)
diff --git a/sys/lib/python/webbrowser.py b/sys/lib/python/webbrowser.py
deleted file mode 100644
index b71ef8d7d..000000000
--- a/sys/lib/python/webbrowser.py
+++ /dev/null
@@ -1,651 +0,0 @@
-#! /usr/bin/env python
-"""Interfaces for launching and remotely controlling Web browsers."""
-
-import os
-import shlex
-import sys
-import stat
-import subprocess
-import time
-
-__all__ = ["Error", "open", "open_new", "open_new_tab", "get", "register"]
-
-class Error(Exception):
- pass
-
-_browsers = {} # Dictionary of available browser controllers
-_tryorder = [] # Preference order of available browsers
-
-def register(name, klass, instance=None, update_tryorder=1):
- """Register a browser connector and, optionally, connection."""
- _browsers[name.lower()] = [klass, instance]
- if update_tryorder > 0:
- _tryorder.append(name)
- elif update_tryorder < 0:
- _tryorder.insert(0, name)
-
-def get(using=None):
- """Return a browser launcher instance appropriate for the environment."""
- if using is not None:
- alternatives = [using]
- else:
- alternatives = _tryorder
- for browser in alternatives:
- if '%s' in browser:
- # User gave us a command line, split it into name and args
- browser = shlex.split(browser)
- if browser[-1] == '&':
- return BackgroundBrowser(browser[:-1])
- else:
- return GenericBrowser(browser)
- else:
- # User gave us a browser name or path.
- try:
- command = _browsers[browser.lower()]
- except KeyError:
- command = _synthesize(browser)
- if command[1] is not None:
- return command[1]
- elif command[0] is not None:
- return command[0]()
- raise Error("could not locate runnable browser")
-
-# Please note: the following definition hides a builtin function.
-# It is recommended one does "import webbrowser" and uses webbrowser.open(url)
-# instead of "from webbrowser import *".
-
-def open(url, new=0, autoraise=1):
- for name in _tryorder:
- browser = get(name)
- if browser.open(url, new, autoraise):
- return True
- return False
-
-def open_new(url):
- return open(url, 1)
-
-def open_new_tab(url):
- return open(url, 2)
-
-
-def _synthesize(browser, update_tryorder=1):
- """Attempt to synthesize a controller base on existing controllers.
-
- This is useful to create a controller when a user specifies a path to
- an entry in the BROWSER environment variable -- we can copy a general
- controller to operate using a specific installation of the desired
- browser in this way.
-
- If we can't create a controller in this way, or if there is no
- executable for the requested browser, return [None, None].
-
- """
- cmd = browser.split()[0]
- if not _iscommand(cmd):
- return [None, None]
- name = os.path.basename(cmd)
- try:
- command = _browsers[name.lower()]
- except KeyError:
- return [None, None]
- # now attempt to clone to fit the new name:
- controller = command[1]
- if controller and name.lower() == controller.basename:
- import copy
- controller = copy.copy(controller)
- controller.name = browser
- controller.basename = os.path.basename(browser)
- register(browser, None, controller, update_tryorder)
- return [None, controller]
- return [None, None]
-
-
-if sys.platform[:3] == "win":
- def _isexecutable(cmd):
- cmd = cmd.lower()
- if os.path.isfile(cmd) and cmd.endswith((".exe", ".bat")):
- return True
- for ext in ".exe", ".bat":
- if os.path.isfile(cmd + ext):
- return True
- return False
-else:
- def _isexecutable(cmd):
- if os.path.isfile(cmd):
- mode = os.stat(cmd)[stat.ST_MODE]
- if mode & stat.S_IXUSR or mode & stat.S_IXGRP or mode & stat.S_IXOTH:
- return True
- return False
-
-def _iscommand(cmd):
- """Return True if cmd is executable or can be found on the executable
- search path."""
- if _isexecutable(cmd):
- return True
- path = os.environ.get("PATH")
- if not path:
- return False
- for d in path.split(os.pathsep):
- exe = os.path.join(d, cmd)
- if _isexecutable(exe):
- return True
- return False
-
-
-# General parent classes
-
-class BaseBrowser(object):
- """Parent class for all browsers. Do not use directly."""
-
- args = ['%s']
-
- def __init__(self, name=""):
- self.name = name
- self.basename = name
-
- def open(self, url, new=0, autoraise=1):
- raise NotImplementedError
-
- def open_new(self, url):
- return self.open(url, 1)
-
- def open_new_tab(self, url):
- return self.open(url, 2)
-
-
-class GenericBrowser(BaseBrowser):
- """Class for all browsers started with a command
- and without remote functionality."""
-
- def __init__(self, name):
- if isinstance(name, basestring):
- self.name = name
- else:
- # name should be a list with arguments
- self.name = name[0]
- self.args = name[1:]
- self.basename = os.path.basename(self.name)
-
- def open(self, url, new=0, autoraise=1):
- cmdline = [self.name] + [arg.replace("%s", url)
- for arg in self.args]
- try:
- if sys.platform[:3] == 'win':
- p = subprocess.Popen(cmdline)
- else:
- p = subprocess.Popen(cmdline, close_fds=True)
- return not p.wait()
- except OSError:
- return False
-
-
-class BackgroundBrowser(GenericBrowser):
- """Class for all browsers which are to be started in the
- background."""
-
- def open(self, url, new=0, autoraise=1):
- cmdline = [self.name] + [arg.replace("%s", url)
- for arg in self.args]
- try:
- if sys.platform[:3] == 'win':
- p = subprocess.Popen(cmdline)
- else:
- setsid = getattr(os, 'setsid', None)
- if not setsid:
- setsid = getattr(os, 'setpgrp', None)
- p = subprocess.Popen(cmdline, close_fds=True, preexec_fn=setsid)
- return (p.poll() is None)
- except OSError:
- return False
-
-
-class UnixBrowser(BaseBrowser):
- """Parent class for all Unix browsers with remote functionality."""
-
- raise_opts = None
- remote_args = ['%action', '%s']
- remote_action = None
- remote_action_newwin = None
- remote_action_newtab = None
- background = False
- redirect_stdout = True
-
- def _invoke(self, args, remote, autoraise):
- raise_opt = []
- if remote and self.raise_opts:
- # use autoraise argument only for remote invocation
- autoraise = int(bool(autoraise))
- opt = self.raise_opts[autoraise]
- if opt: raise_opt = [opt]
-
- cmdline = [self.name] + raise_opt + args
-
- if remote or self.background:
- inout = file(os.devnull, "r+")
- else:
- # for TTY browsers, we need stdin/out
- inout = None
- # if possible, put browser in separate process group, so
- # keyboard interrupts don't affect browser as well as Python
- setsid = getattr(os, 'setsid', None)
- if not setsid:
- setsid = getattr(os, 'setpgrp', None)
-
- p = subprocess.Popen(cmdline, close_fds=True, stdin=inout,
- stdout=(self.redirect_stdout and inout or None),
- stderr=inout, preexec_fn=setsid)
- if remote:
- # wait five secons. If the subprocess is not finished, the
- # remote invocation has (hopefully) started a new instance.
- time.sleep(1)
- rc = p.poll()
- if rc is None:
- time.sleep(4)
- rc = p.poll()
- if rc is None:
- return True
- # if remote call failed, open() will try direct invocation
- return not rc
- elif self.background:
- if p.poll() is None:
- return True
- else:
- return False
- else:
- return not p.wait()
-
- def open(self, url, new=0, autoraise=1):
- if new == 0:
- action = self.remote_action
- elif new == 1:
- action = self.remote_action_newwin
- elif new == 2:
- if self.remote_action_newtab is None:
- action = self.remote_action_newwin
- else:
- action = self.remote_action_newtab
- else:
- raise Error("Bad 'new' parameter to open(); " +
- "expected 0, 1, or 2, got %s" % new)
-
- args = [arg.replace("%s", url).replace("%action", action)
- for arg in self.remote_args]
- success = self._invoke(args, True, autoraise)
- if not success:
- # remote invocation failed, try straight way
- args = [arg.replace("%s", url) for arg in self.args]
- return self._invoke(args, False, False)
- else:
- return True
-
-
-class Mozilla(UnixBrowser):
- """Launcher class for Mozilla/Netscape browsers."""
-
- raise_opts = ["-noraise", "-raise"]
-
- remote_args = ['-remote', 'openURL(%s%action)']
- remote_action = ""
- remote_action_newwin = ",new-window"
- remote_action_newtab = ",new-tab"
-
- background = True
-
-Netscape = Mozilla
-
-
-class Galeon(UnixBrowser):
- """Launcher class for Galeon/Epiphany browsers."""
-
- raise_opts = ["-noraise", ""]
- remote_args = ['%action', '%s']
- remote_action = "-n"
- remote_action_newwin = "-w"
-
- background = True
-
-
-class Opera(UnixBrowser):
- "Launcher class for Opera browser."
-
- raise_opts = ["", "-raise"]
-
- remote_args = ['-remote', 'openURL(%s%action)']
- remote_action = ""
- remote_action_newwin = ",new-window"
- remote_action_newtab = ",new-page"
- background = True
-
-
-class Elinks(UnixBrowser):
- "Launcher class for Elinks browsers."
-
- remote_args = ['-remote', 'openURL(%s%action)']
- remote_action = ""
- remote_action_newwin = ",new-window"
- remote_action_newtab = ",new-tab"
- background = False
-
- # elinks doesn't like its stdout to be redirected -
- # it uses redirected stdout as a signal to do -dump
- redirect_stdout = False
-
-
-class Konqueror(BaseBrowser):
- """Controller for the KDE File Manager (kfm, or Konqueror).
-
- See the output of ``kfmclient --commands``
- for more information on the Konqueror remote-control interface.
- """
-
- def open(self, url, new=0, autoraise=1):
- # XXX Currently I know no way to prevent KFM from opening a new win.
- if new == 2:
- action = "newTab"
- else:
- action = "openURL"
-
- devnull = file(os.devnull, "r+")
- # if possible, put browser in separate process group, so
- # keyboard interrupts don't affect browser as well as Python
- setsid = getattr(os, 'setsid', None)
- if not setsid:
- setsid = getattr(os, 'setpgrp', None)
-
- try:
- p = subprocess.Popen(["kfmclient", action, url],
- close_fds=True, stdin=devnull,
- stdout=devnull, stderr=devnull)
- except OSError:
- # fall through to next variant
- pass
- else:
- p.wait()
- # kfmclient's return code unfortunately has no meaning as it seems
- return True
-
- try:
- p = subprocess.Popen(["konqueror", "--silent", url],
- close_fds=True, stdin=devnull,
- stdout=devnull, stderr=devnull,
- preexec_fn=setsid)
- except OSError:
- # fall through to next variant
- pass
- else:
- if p.poll() is None:
- # Should be running now.
- return True
-
- try:
- p = subprocess.Popen(["kfm", "-d", url],
- close_fds=True, stdin=devnull,
- stdout=devnull, stderr=devnull,
- preexec_fn=setsid)
- except OSError:
- return False
- else:
- return (p.poll() is None)
-
-
-class Grail(BaseBrowser):
- # There should be a way to maintain a connection to Grail, but the
- # Grail remote control protocol doesn't really allow that at this
- # point. It probably never will!
- def _find_grail_rc(self):
- import glob
- import pwd
- import socket
- import tempfile
- tempdir = os.path.join(tempfile.gettempdir(),
- ".grail-unix")
- user = pwd.getpwuid(os.getuid())[0]
- filename = os.path.join(tempdir, user + "-*")
- maybes = glob.glob(filename)
- if not maybes:
- return None
- s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
- for fn in maybes:
- # need to PING each one until we find one that's live
- try:
- s.connect(fn)
- except socket.error:
- # no good; attempt to clean it out, but don't fail:
- try:
- os.unlink(fn)
- except IOError:
- pass
- else:
- return s
-
- def _remote(self, action):
- s = self._find_grail_rc()
- if not s:
- return 0
- s.send(action)
- s.close()
- return 1
-
- def open(self, url, new=0, autoraise=1):
- if new:
- ok = self._remote("LOADNEW " + url)
- else:
- ok = self._remote("LOAD " + url)
- return ok
-
-
-#
-# Platform support for Unix
-#
-
-# These are the right tests because all these Unix browsers require either
-# a console terminal or an X display to run.
-
-def register_X_browsers():
- # The default Gnome browser
- if _iscommand("gconftool-2"):
- # get the web browser string from gconftool
- gc = 'gconftool-2 -g /desktop/gnome/url-handlers/http/command 2>/dev/null'
- out = os.popen(gc)
- commd = out.read().strip()
- retncode = out.close()
-
- # if successful, register it
- if retncode is None and commd:
- register("gnome", None, BackgroundBrowser(commd.split()))
-
- # First, the Mozilla/Netscape browsers
- for browser in ("mozilla-firefox", "firefox",
- "mozilla-firebird", "firebird",
- "seamonkey", "mozilla", "netscape"):
- if _iscommand(browser):
- register(browser, None, Mozilla(browser))
-
- # Konqueror/kfm, the KDE browser.
- if _iscommand("kfm"):
- register("kfm", Konqueror, Konqueror("kfm"))
- elif _iscommand("konqueror"):
- register("konqueror", Konqueror, Konqueror("konqueror"))
-
- # Gnome's Galeon and Epiphany
- for browser in ("galeon", "epiphany"):
- if _iscommand(browser):
- register(browser, None, Galeon(browser))
-
- # Skipstone, another Gtk/Mozilla based browser
- if _iscommand("skipstone"):
- register("skipstone", None, BackgroundBrowser("skipstone"))
-
- # Opera, quite popular
- if _iscommand("opera"):
- register("opera", None, Opera("opera"))
-
- # Next, Mosaic -- old but still in use.
- if _iscommand("mosaic"):
- register("mosaic", None, BackgroundBrowser("mosaic"))
-
- # Grail, the Python browser. Does anybody still use it?
- if _iscommand("grail"):
- register("grail", Grail, None)
-
-# Prefer X browsers if present
-if os.environ.get("DISPLAY"):
- register_X_browsers()
-
-# Also try console browsers
-if os.environ.get("TERM"):
- # The Links/elinks browsers <http://artax.karlin.mff.cuni.cz/~mikulas/links/>
- if _iscommand("links"):
- register("links", None, GenericBrowser("links"))
- if _iscommand("elinks"):
- register("elinks", None, Elinks("elinks"))
- # The Lynx browser <http://lynx.isc.org/>, <http://lynx.browser.org/>
- if _iscommand("lynx"):
- register("lynx", None, GenericBrowser("lynx"))
- # The w3m browser <http://w3m.sourceforge.net/>
- if _iscommand("w3m"):
- register("w3m", None, GenericBrowser("w3m"))
-
-#
-# Platform support for Windows
-#
-
-if sys.platform[:3] == "win":
- class WindowsDefault(BaseBrowser):
- def open(self, url, new=0, autoraise=1):
- os.startfile(url)
- return True # Oh, my...
-
- _tryorder = []
- _browsers = {}
- # Prefer mozilla/netscape/opera if present
- for browser in ("firefox", "firebird", "seamonkey", "mozilla",
- "netscape", "opera"):
- if _iscommand(browser):
- register(browser, None, BackgroundBrowser(browser))
- register("windows-default", WindowsDefault)
-
-#
-# Platform support for MacOS
-#
-
-try:
- import ic
-except ImportError:
- pass
-else:
- class InternetConfig(BaseBrowser):
- def open(self, url, new=0, autoraise=1):
- ic.launchurl(url)
- return True # Any way to get status?
-
- register("internet-config", InternetConfig, update_tryorder=-1)
-
-if sys.platform == 'darwin':
- # Adapted from patch submitted to SourceForge by Steven J. Burr
- class MacOSX(BaseBrowser):
- """Launcher class for Aqua browsers on Mac OS X
-
- Optionally specify a browser name on instantiation. Note that this
- will not work for Aqua browsers if the user has moved the application
- package after installation.
-
- If no browser is specified, the default browser, as specified in the
- Internet System Preferences panel, will be used.
- """
- def __init__(self, name):
- self.name = name
-
- def open(self, url, new=0, autoraise=1):
- assert "'" not in url
- # hack for local urls
- if not ':' in url:
- url = 'file:'+url
-
- # new must be 0 or 1
- new = int(bool(new))
- if self.name == "default":
- # User called open, open_new or get without a browser parameter
- script = 'open location "%s"' % url.replace('"', '%22') # opens in default browser
- else:
- # User called get and chose a browser
- if self.name == "OmniWeb":
- toWindow = ""
- else:
- # Include toWindow parameter of OpenURL command for browsers
- # that support it. 0 == new window; -1 == existing
- toWindow = "toWindow %d" % (new - 1)
- cmd = 'OpenURL "%s"' % url.replace('"', '%22')
- script = '''tell application "%s"
- activate
- %s %s
- end tell''' % (self.name, cmd, toWindow)
- # Open pipe to AppleScript through osascript command
- osapipe = os.popen("osascript", "w")
- if osapipe is None:
- return False
- # Write script to osascript's stdin
- osapipe.write(script)
- rc = osapipe.close()
- return not rc
-
- # Don't clear _tryorder or _browsers since OS X can use above Unix support
- # (but we prefer using the OS X specific stuff)
- register("MacOSX", None, MacOSX('default'), -1)
-
-
-#
-# Platform support for OS/2
-#
-
-if sys.platform[:3] == "os2" and _iscommand("netscape"):
- _tryorder = []
- _browsers = {}
- register("os2netscape", None,
- GenericBrowser(["start", "netscape", "%s"]), -1)
-
-
-# OK, now that we know what the default preference orders for each
-# platform are, allow user to override them with the BROWSER variable.
-if "BROWSER" in os.environ:
- _userchoices = os.environ["BROWSER"].split(os.pathsep)
- _userchoices.reverse()
-
- # Treat choices in same way as if passed into get() but do register
- # and prepend to _tryorder
- for cmdline in _userchoices:
- if cmdline != '':
- _synthesize(cmdline, -1)
- cmdline = None # to make del work if _userchoices was empty
- del cmdline
- del _userchoices
-
-# what to do if _tryorder is now empty?
-
-
-def main():
- import getopt
- usage = """Usage: %s [-n | -t] url
- -n: open new window
- -t: open new tab""" % sys.argv[0]
- try:
- opts, args = getopt.getopt(sys.argv[1:], 'ntd')
- except getopt.error, msg:
- print >>sys.stderr, msg
- print >>sys.stderr, usage
- sys.exit(1)
- new_win = 0
- for o, a in opts:
- if o == '-n': new_win = 1
- elif o == '-t': new_win = 2
- if len(args) <> 1:
- print >>sys.stderr, usage
- sys.exit(1)
-
- url = args[0]
- open(url, new_win)
-
- print "\a"
-
-if __name__ == "__main__":
- main()
diff --git a/sys/lib/python/whichdb.py b/sys/lib/python/whichdb.py
deleted file mode 100644
index f077f8753..000000000
--- a/sys/lib/python/whichdb.py
+++ /dev/null
@@ -1,117 +0,0 @@
-# !/usr/bin/env python
-"""Guess which db package to use to open a db file."""
-
-import os
-import struct
-import sys
-
-try:
- import dbm
- _dbmerror = dbm.error
-except ImportError:
- dbm = None
- # just some sort of valid exception which might be raised in the
- # dbm test
- _dbmerror = IOError
-
-def whichdb(filename):
- """Guess which db package to use to open a db file.
-
- Return values:
-
- - None if the database file can't be read;
- - empty string if the file can be read but can't be recognized
- - the module name (e.g. "dbm" or "gdbm") if recognized.
-
- Importing the given module may still fail, and opening the
- database using that module may still fail.
- """
-
- # Check for dbm first -- this has a .pag and a .dir file
- try:
- f = open(filename + os.extsep + "pag", "rb")
- f.close()
- # dbm linked with gdbm on OS/2 doesn't have .dir file
- if not (dbm.library == "GNU gdbm" and sys.platform == "os2emx"):
- f = open(filename + os.extsep + "dir", "rb")
- f.close()
- return "dbm"
- except IOError:
- # some dbm emulations based on Berkeley DB generate a .db file
- # some do not, but they should be caught by the dbhash checks
- try:
- f = open(filename + os.extsep + "db", "rb")
- f.close()
- # guarantee we can actually open the file using dbm
- # kind of overkill, but since we are dealing with emulations
- # it seems like a prudent step
- if dbm is not None:
- d = dbm.open(filename)
- d.close()
- return "dbm"
- except (IOError, _dbmerror):
- pass
-
- # Check for dumbdbm next -- this has a .dir and a .dat file
- try:
- # First check for presence of files
- os.stat(filename + os.extsep + "dat")
- size = os.stat(filename + os.extsep + "dir").st_size
- # dumbdbm files with no keys are empty
- if size == 0:
- return "dumbdbm"
- f = open(filename + os.extsep + "dir", "rb")
- try:
- if f.read(1) in ("'", '"'):
- return "dumbdbm"
- finally:
- f.close()
- except (OSError, IOError):
- pass
-
- # See if the file exists, return None if not
- try:
- f = open(filename, "rb")
- except IOError:
- return None
-
- # Read the start of the file -- the magic number
- s16 = f.read(16)
- f.close()
- s = s16[0:4]
-
- # Return "" if not at least 4 bytes
- if len(s) != 4:
- return ""
-
- # Convert to 4-byte int in native byte order -- return "" if impossible
- try:
- (magic,) = struct.unpack("=l", s)
- except struct.error:
- return ""
-
- # Check for GNU dbm
- if magic == 0x13579ace:
- return "gdbm"
-
- # Check for old Berkeley db hash file format v2
- if magic in (0x00061561, 0x61150600):
- return "bsddb185"
-
- # Later versions of Berkeley db hash file have a 12-byte pad in
- # front of the file type
- try:
- (magic,) = struct.unpack("=l", s16[-4:])
- except struct.error:
- return ""
-
- # Check for BSD hash
- if magic in (0x00061561, 0x61150600):
- return "dbhash"
-
- # Unknown
- return ""
-
-if __name__ == "__main__":
- for filename in sys.argv[1:]:
- print whichdb(filename) or "UNKNOWN", filename
diff --git a/sys/lib/python/wsgiref.egg-info b/sys/lib/python/wsgiref.egg-info
deleted file mode 100644
index c0b7893c3..000000000
--- a/sys/lib/python/wsgiref.egg-info
+++ /dev/null
@@ -1,8 +0,0 @@
-Metadata-Version: 1.0
-Name: wsgiref
-Version: 0.1.2
-Summary: WSGI (PEP 333) Reference Library
-Author: Phillip J. Eby
-Author-email: web-sig@python.org
-License: PSF or ZPL
-Platform: UNKNOWN
diff --git a/sys/lib/python/wsgiref/__init__.py b/sys/lib/python/wsgiref/__init__.py
deleted file mode 100644
index 46c579f8e..000000000
--- a/sys/lib/python/wsgiref/__init__.py
+++ /dev/null
@@ -1,23 +0,0 @@
-"""wsgiref -- a WSGI (PEP 333) Reference Library
-
-Current Contents:
-
-* util -- Miscellaneous useful functions and wrappers
-
-* headers -- Manage response headers
-
-* handlers -- base classes for server/gateway implementations
-
-* simple_server -- a simple BaseHTTPServer that supports WSGI
-
-* validate -- validation wrapper that sits between an app and a server
- to detect errors in either
-
-To-Do:
-
-* cgi_gateway -- Run WSGI apps under CGI (pending a deployment standard)
-
-* cgi_wrapper -- Run CGI apps under WSGI
-
-* router -- a simple middleware component that handles URL traversal
-"""
diff --git a/sys/lib/python/wsgiref/handlers.py b/sys/lib/python/wsgiref/handlers.py
deleted file mode 100644
index 099371b07..000000000
--- a/sys/lib/python/wsgiref/handlers.py
+++ /dev/null
@@ -1,492 +0,0 @@
-"""Base classes for server/gateway implementations"""
-
-from types import StringType
-from util import FileWrapper, guess_scheme, is_hop_by_hop
-from headers import Headers
-
-import sys, os, time
-
-__all__ = ['BaseHandler', 'SimpleHandler', 'BaseCGIHandler', 'CGIHandler']
-
-try:
- dict
-except NameError:
- def dict(items):
- d = {}
- for k,v in items:
- d[k] = v
- return d
-
-try:
- True
- False
-except NameError:
- True = not None
- False = not True
-
-
-# Weekday and month names for HTTP date/time formatting; always English!
-_weekdayname = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
-_monthname = [None, # Dummy so we can use 1-based month numbers
- "Jan", "Feb", "Mar", "Apr", "May", "Jun",
- "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
-
-def format_date_time(timestamp):
- year, month, day, hh, mm, ss, wd, y, z = time.gmtime(timestamp)
- return "%s, %02d %3s %4d %02d:%02d:%02d GMT" % (
- _weekdayname[wd], day, _monthname[month], year, hh, mm, ss
- )
-
-
-
-class BaseHandler:
- """Manage the invocation of a WSGI application"""
-
- # Configuration parameters; can override per-subclass or per-instance
- wsgi_version = (1,0)
- wsgi_multithread = True
- wsgi_multiprocess = True
- wsgi_run_once = False
-
- origin_server = True # We are transmitting direct to client
- http_version = "1.0" # Version that should be used for response
- server_software = None # String name of server software, if any
-
- # os_environ is used to supply configuration from the OS environment:
- # by default it's a copy of 'os.environ' as of import time, but you can
- # override this in e.g. your __init__ method.
- os_environ = dict(os.environ.items())
-
- # Collaborator classes
- wsgi_file_wrapper = FileWrapper # set to None to disable
- headers_class = Headers # must be a Headers-like class
-
- # Error handling (also per-subclass or per-instance)
- traceback_limit = None # Print entire traceback to self.get_stderr()
- error_status = "500 Dude, this is whack!"
- error_headers = [('Content-Type','text/plain')]
- error_body = "A server error occurred. Please contact the administrator."
-
- # State variables (don't mess with these)
- status = result = None
- headers_sent = False
- headers = None
- bytes_sent = 0
-
-
-
-
-
-
-
-
- def run(self, application):
- """Invoke the application"""
- # Note to self: don't move the close()! Asynchronous servers shouldn't
- # call close() from finish_response(), so if you close() anywhere but
- # the double-error branch here, you'll break asynchronous servers by
- # prematurely closing. Async servers must return from 'run()' without
- # closing if there might still be output to iterate over.
- try:
- self.setup_environ()
- self.result = application(self.environ, self.start_response)
- self.finish_response()
- except:
- try:
- self.handle_error()
- except:
- # If we get an error handling an error, just give up already!
- self.close()
- raise # ...and let the actual server figure it out.
-
-
- def setup_environ(self):
- """Set up the environment for one request"""
-
- env = self.environ = self.os_environ.copy()
- self.add_cgi_vars()
-
- env['wsgi.input'] = self.get_stdin()
- env['wsgi.errors'] = self.get_stderr()
- env['wsgi.version'] = self.wsgi_version
- env['wsgi.run_once'] = self.wsgi_run_once
- env['wsgi.url_scheme'] = self.get_scheme()
- env['wsgi.multithread'] = self.wsgi_multithread
- env['wsgi.multiprocess'] = self.wsgi_multiprocess
-
- if self.wsgi_file_wrapper is not None:
- env['wsgi.file_wrapper'] = self.wsgi_file_wrapper
-
- if self.origin_server and self.server_software:
- env.setdefault('SERVER_SOFTWARE',self.server_software)
-
-
- def finish_response(self):
- """Send any iterable data, then close self and the iterable
-
- Subclasses intended for use in asynchronous servers will
- want to redefine this method, such that it sets up callbacks
- in the event loop to iterate over the data, and to call
- 'self.close()' once the response is finished.
- """
- if not self.result_is_file() or not self.sendfile():
- for data in self.result:
- self.write(data)
- self.finish_content()
- self.close()
-
-
- def get_scheme(self):
- """Return the URL scheme being used"""
- return guess_scheme(self.environ)
-
-
- def set_content_length(self):
- """Compute Content-Length or switch to chunked encoding if possible"""
- try:
- blocks = len(self.result)
- except (TypeError,AttributeError,NotImplementedError):
- pass
- else:
- if blocks==1:
- self.headers['Content-Length'] = str(self.bytes_sent)
- return
- # XXX Try for chunked encoding if origin server and client is 1.1
-
-
- def cleanup_headers(self):
- """Make any necessary header changes or defaults
-
- Subclasses can extend this to add other defaults.
- """
- if not self.headers.has_key('Content-Length'):
- self.set_content_length()
-
- def start_response(self, status, headers,exc_info=None):
- """'start_response()' callable as specified by PEP 333"""
-
- if exc_info:
- try:
- if self.headers_sent:
- # Re-raise original exception if headers sent
- raise exc_info[0], exc_info[1], exc_info[2]
- finally:
- exc_info = None # avoid dangling circular ref
- elif self.headers is not None:
- raise AssertionError("Headers already set!")
-
- assert type(status) is StringType,"Status must be a string"
- assert len(status)>=4,"Status must be at least 4 characters"
- assert int(status[:3]),"Status message must begin w/3-digit code"
- assert status[3]==" ", "Status message must have a space after code"
- if __debug__:
- for name,val in headers:
- assert type(name) is StringType,"Header names must be strings"
- assert type(val) is StringType,"Header values must be strings"
- assert not is_hop_by_hop(name),"Hop-by-hop headers not allowed"
- self.status = status
- self.headers = self.headers_class(headers)
- return self.write
-
-
- def send_preamble(self):
- """Transmit version/status/date/server, via self._write()"""
- if self.origin_server:
- if self.client_is_modern():
- self._write('HTTP/%s %s\r\n' % (self.http_version,self.status))
- if not self.headers.has_key('Date'):
- self._write(
- 'Date: %s\r\n' % format_date_time(time.time())
- )
- if self.server_software and not self.headers.has_key('Server'):
- self._write('Server: %s\r\n' % self.server_software)
- else:
- self._write('Status: %s\r\n' % self.status)
-
- def write(self, data):
- """'write()' callable as specified by PEP 333"""
-
- assert type(data) is StringType,"write() argument must be string"
-
- if not self.status:
- raise AssertionError("write() before start_response()")
-
- elif not self.headers_sent:
- # Before the first output, send the stored headers
- self.bytes_sent = len(data) # make sure we know content-length
- self.send_headers()
- else:
- self.bytes_sent += len(data)
-
- # XXX check Content-Length and truncate if too many bytes written?
- self._write(data)
- self._flush()
-
-
- def sendfile(self):
- """Platform-specific file transmission
-
- Override this method in subclasses to support platform-specific
- file transmission. It is only called if the application's
- return iterable ('self.result') is an instance of
- 'self.wsgi_file_wrapper'.
-
- This method should return a true value if it was able to actually
- transmit the wrapped file-like object using a platform-specific
- approach. It should return a false value if normal iteration
- should be used instead. An exception can be raised to indicate
- that transmission was attempted, but failed.
-
- NOTE: this method should call 'self.send_headers()' if
- 'self.headers_sent' is false and it is going to attempt direct
- transmission of the file.
- """
- return False # No platform-specific transmission by default
-
-
- def finish_content(self):
- """Ensure headers and content have both been sent"""
- if not self.headers_sent:
- self.headers['Content-Length'] = "0"
- self.send_headers()
- else:
- pass # XXX check if content-length was too short?
-
- def close(self):
- """Close the iterable (if needed) and reset all instance vars
-
- Subclasses may want to also drop the client connection.
- """
- try:
- if hasattr(self.result,'close'):
- self.result.close()
- finally:
- self.result = self.headers = self.status = self.environ = None
- self.bytes_sent = 0; self.headers_sent = False
-
-
- def send_headers(self):
- """Transmit headers to the client, via self._write()"""
- self.cleanup_headers()
- self.headers_sent = True
- if not self.origin_server or self.client_is_modern():
- self.send_preamble()
- self._write(str(self.headers))
-
-
- def result_is_file(self):
- """True if 'self.result' is an instance of 'self.wsgi_file_wrapper'"""
- wrapper = self.wsgi_file_wrapper
- return wrapper is not None and isinstance(self.result,wrapper)
-
-
- def client_is_modern(self):
- """True if client can accept status and headers"""
- return self.environ['SERVER_PROTOCOL'].upper() != 'HTTP/0.9'
-
-
- def log_exception(self,exc_info):
- """Log the 'exc_info' tuple in the server log
-
- Subclasses may override to retarget the output or change its format.
- """
- try:
- from traceback import print_exception
- stderr = self.get_stderr()
- print_exception(
- exc_info[0], exc_info[1], exc_info[2],
- self.traceback_limit, stderr
- )
- stderr.flush()
- finally:
- exc_info = None
-
- def handle_error(self):
- """Log current error, and send error output to client if possible"""
- self.log_exception(sys.exc_info())
- if not self.headers_sent:
- self.result = self.error_output(self.environ, self.start_response)
- self.finish_response()
- # XXX else: attempt advanced recovery techniques for HTML or text?
-
- def error_output(self, environ, start_response):
- """WSGI mini-app to create error output
-
- By default, this just uses the 'error_status', 'error_headers',
- and 'error_body' attributes to generate an output page. It can
- be overridden in a subclass to dynamically generate diagnostics,
- choose an appropriate message for the user's preferred language, etc.
-
- Note, however, that it's not recommended from a security perspective to
- spit out diagnostics to any old user; ideally, you should have to do
- something special to enable diagnostic output, which is why we don't
- include any here!
- """
- start_response(self.error_status,self.error_headers[:],sys.exc_info())
- return [self.error_body]
-
-
- # Pure abstract methods; *must* be overridden in subclasses
-
- def _write(self,data):
- """Override in subclass to buffer data for send to client
-
- It's okay if this method actually transmits the data; BaseHandler
- just separates write and flush operations for greater efficiency
- when the underlying system actually has such a distinction.
- """
- raise NotImplementedError
-
- def _flush(self):
- """Override in subclass to force sending of recent '_write()' calls
-
- It's okay if this method is a no-op (i.e., if '_write()' actually
- sends the data.
- """
- raise NotImplementedError
-
- def get_stdin(self):
- """Override in subclass to return suitable 'wsgi.input'"""
- raise NotImplementedError
-
- def get_stderr(self):
- """Override in subclass to return suitable 'wsgi.errors'"""
- raise NotImplementedError
-
- def add_cgi_vars(self):
- """Override in subclass to insert CGI variables in 'self.environ'"""
- raise NotImplementedError
-
-
-
-
-
-
-
-
-
-
-
-class SimpleHandler(BaseHandler):
- """Handler that's just initialized with streams, environment, etc.
-
- This handler subclass is intended for synchronous HTTP/1.0 origin servers,
- and handles sending the entire response output, given the correct inputs.
-
- Usage::
-
- handler = SimpleHandler(
- inp,out,err,env, multithread=False, multiprocess=True
- )
- handler.run(app)"""
-
- def __init__(self,stdin,stdout,stderr,environ,
- multithread=True, multiprocess=False
- ):
- self.stdin = stdin
- self.stdout = stdout
- self.stderr = stderr
- self.base_env = environ
- self.wsgi_multithread = multithread
- self.wsgi_multiprocess = multiprocess
-
- def get_stdin(self):
- return self.stdin
-
- def get_stderr(self):
- return self.stderr
-
- def add_cgi_vars(self):
- self.environ.update(self.base_env)
-
- def _write(self,data):
- self.stdout.write(data)
- self._write = self.stdout.write
-
- def _flush(self):
- self.stdout.flush()
- self._flush = self.stdout.flush
-
-
-class BaseCGIHandler(SimpleHandler):
-
- """CGI-like systems using input/output/error streams and environ mapping
-
- Usage::
-
- handler = BaseCGIHandler(inp,out,err,env)
- handler.run(app)
-
- This handler class is useful for gateway protocols like ReadyExec and
- FastCGI, that have usable input/output/error streams and an environment
- mapping. It's also the base class for CGIHandler, which just uses
- sys.stdin, os.environ, and so on.
-
- The constructor also takes keyword arguments 'multithread' and
- 'multiprocess' (defaulting to 'True' and 'False' respectively) to control
- the configuration sent to the application. It sets 'origin_server' to
- False (to enable CGI-like output), and assumes that 'wsgi.run_once' is
- False.
- """
-
- origin_server = False
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-class CGIHandler(BaseCGIHandler):
-
- """CGI-based invocation via sys.stdin/stdout/stderr and os.environ
-
- Usage::
-
- CGIHandler().run(app)
-
- The difference between this class and BaseCGIHandler is that it always
- uses 'wsgi.run_once' of 'True', 'wsgi.multithread' of 'False', and
- 'wsgi.multiprocess' of 'True'. It does not take any initialization
- parameters, but always uses 'sys.stdin', 'os.environ', and friends.
-
- If you need to override any of these parameters, use BaseCGIHandler
- instead.
- """
-
- wsgi_run_once = True
-
- def __init__(self):
- BaseCGIHandler.__init__(
- self, sys.stdin, sys.stdout, sys.stderr, dict(os.environ.items()),
- multithread=False, multiprocess=True
- )
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-#
diff --git a/sys/lib/python/wsgiref/headers.py b/sys/lib/python/wsgiref/headers.py
deleted file mode 100644
index 016eb86f9..000000000
--- a/sys/lib/python/wsgiref/headers.py
+++ /dev/null
@@ -1,205 +0,0 @@
-"""Manage HTTP Response Headers
-
-Much of this module is red-handedly pilfered from email.Message in the stdlib,
-so portions are Copyright (C) 2001,2002 Python Software Foundation, and were
-written by Barry Warsaw.
-"""
-
-from types import ListType, TupleType
-
-# Regular expression that matches `special' characters in parameters, the
-# existance of which force quoting of the parameter value.
-import re
-tspecials = re.compile(r'[ \(\)<>@,;:\\"/\[\]\?=]')
-
-def _formatparam(param, value=None, quote=1):
- """Convenience function to format and return a key=value pair.
-
- This will quote the value if needed or if quote is true.
- """
- if value is not None and len(value) > 0:
- if quote or tspecials.search(value):
- value = value.replace('\\', '\\\\').replace('"', r'\"')
- return '%s="%s"' % (param, value)
- else:
- return '%s=%s' % (param, value)
- else:
- return param
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-class Headers:
-
- """Manage a collection of HTTP response headers"""
-
- def __init__(self,headers):
- if type(headers) is not ListType:
- raise TypeError("Headers must be a list of name/value tuples")
- self._headers = headers
-
- def __len__(self):
- """Return the total number of headers, including duplicates."""
- return len(self._headers)
-
- def __setitem__(self, name, val):
- """Set the value of a header."""
- del self[name]
- self._headers.append((name, val))
-
- def __delitem__(self,name):
- """Delete all occurrences of a header, if present.
-
- Does *not* raise an exception if the header is missing.
- """
- name = name.lower()
- self._headers[:] = [kv for kv in self._headers if kv[0].lower()<>name]
-
- def __getitem__(self,name):
- """Get the first header value for 'name'
-
- Return None if the header is missing instead of raising an exception.
-
- Note that if the header appeared multiple times, the first exactly which
- occurrance gets returned is undefined. Use getall() to get all
- the values matching a header field name.
- """
- return self.get(name)
-
-
-
-
-
- def has_key(self, name):
- """Return true if the message contains the header."""
- return self.get(name) is not None
-
- __contains__ = has_key
-
-
- def get_all(self, name):
- """Return a list of all the values for the named field.
-
- These will be sorted in the order they appeared in the original header
- list or were added to this instance, and may contain duplicates. Any
- fields deleted and re-inserted are always appended to the header list.
- If no fields exist with the given name, returns an empty list.
- """
- name = name.lower()
- return [kv[1] for kv in self._headers if kv[0].lower()==name]
-
-
- def get(self,name,default=None):
- """Get the first header value for 'name', or return 'default'"""
- name = name.lower()
- for k,v in self._headers:
- if k.lower()==name:
- return v
- return default
-
-
- def keys(self):
- """Return a list of all the header field names.
-
- These will be sorted in the order they appeared in the original header
- list, or were added to this instance, and may contain duplicates.
- Any fields deleted and re-inserted are always appended to the header
- list.
- """
- return [k for k, v in self._headers]
-
-
-
-
- def values(self):
- """Return a list of all header values.
-
- These will be sorted in the order they appeared in the original header
- list, or were added to this instance, and may contain duplicates.
- Any fields deleted and re-inserted are always appended to the header
- list.
- """
- return [v for k, v in self._headers]
-
- def items(self):
- """Get all the header fields and values.
-
- These will be sorted in the order they were in the original header
- list, or were added to this instance, and may contain duplicates.
- Any fields deleted and re-inserted are always appended to the header
- list.
- """
- return self._headers[:]
-
- def __repr__(self):
- return "Headers(%s)" % `self._headers`
-
- def __str__(self):
- """str() returns the formatted headers, complete with end line,
- suitable for direct HTTP transmission."""
- return '\r\n'.join(["%s: %s" % kv for kv in self._headers]+['',''])
-
- def setdefault(self,name,value):
- """Return first matching header value for 'name', or 'value'
-
- If there is no header named 'name', add a new header with name 'name'
- and value 'value'."""
- result = self.get(name)
- if result is None:
- self._headers.append((name,value))
- return value
- else:
- return result
-
-
- def add_header(self, _name, _value, **_params):
- """Extended header setting.
-
- _name is the header field to add. keyword arguments can be used to set
- additional parameters for the header field, with underscores converted
- to dashes. Normally the parameter will be added as key="value" unless
- value is None, in which case only the key will be added.
-
- Example:
-
- h.add_header('content-disposition', 'attachment', filename='bud.gif')
-
- Note that unlike the corresponding 'email.Message' method, this does
- *not* handle '(charset, language, value)' tuples: all values must be
- strings or None.
- """
- parts = []
- if _value is not None:
- parts.append(_value)
- for k, v in _params.items():
- if v is None:
- parts.append(k.replace('_', '-'))
- else:
- parts.append(_formatparam(k.replace('_', '-'), v))
- self._headers.append((_name, "; ".join(parts)))
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-#
diff --git a/sys/lib/python/wsgiref/simple_server.py b/sys/lib/python/wsgiref/simple_server.py
deleted file mode 100644
index 95996cc2f..000000000
--- a/sys/lib/python/wsgiref/simple_server.py
+++ /dev/null
@@ -1,205 +0,0 @@
-"""BaseHTTPServer that implements the Python WSGI protocol (PEP 333, rev 1.21)
-
-This is both an example of how WSGI can be implemented, and a basis for running
-simple web applications on a local machine, such as might be done when testing
-or debugging an application. It has not been reviewed for security issues,
-however, and we strongly recommend that you use a "real" web server for
-production use.
-
-For example usage, see the 'if __name__=="__main__"' block at the end of the
-module. See also the BaseHTTPServer module docs for other API information.
-"""
-
-from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
-import urllib, sys
-from wsgiref.handlers import SimpleHandler
-
-__version__ = "0.1"
-__all__ = ['WSGIServer', 'WSGIRequestHandler', 'demo_app', 'make_server']
-
-
-server_version = "WSGIServer/" + __version__
-sys_version = "Python/" + sys.version.split()[0]
-software_version = server_version + ' ' + sys_version
-
-
-class ServerHandler(SimpleHandler):
-
- server_software = software_version
-
- def close(self):
- try:
- self.request_handler.log_request(
- self.status.split(' ',1)[0], self.bytes_sent
- )
- finally:
- SimpleHandler.close(self)
-
-
-
-
-
-class WSGIServer(HTTPServer):
-
- """BaseHTTPServer that implements the Python WSGI protocol"""
-
- application = None
-
- def server_bind(self):
- """Override server_bind to store the server name."""
- HTTPServer.server_bind(self)
- self.setup_environ()
-
- def setup_environ(self):
- # Set up base environment
- env = self.base_environ = {}
- env['SERVER_NAME'] = self.server_name
- env['GATEWAY_INTERFACE'] = 'CGI/1.1'
- env['SERVER_PORT'] = str(self.server_port)
- env['REMOTE_HOST']=''
- env['CONTENT_LENGTH']=''
- env['SCRIPT_NAME'] = ''
-
- def get_app(self):
- return self.application
-
- def set_app(self,application):
- self.application = application
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-class WSGIRequestHandler(BaseHTTPRequestHandler):
-
- server_version = "WSGIServer/" + __version__
-
- def get_environ(self):
- env = self.server.base_environ.copy()
- env['SERVER_PROTOCOL'] = self.request_version
- env['REQUEST_METHOD'] = self.command
- if '?' in self.path:
- path,query = self.path.split('?',1)
- else:
- path,query = self.path,''
-
- env['PATH_INFO'] = urllib.unquote(path)
- env['QUERY_STRING'] = query
-
- host = self.address_string()
- if host != self.client_address[0]:
- env['REMOTE_HOST'] = host
- env['REMOTE_ADDR'] = self.client_address[0]
-
- if self.headers.typeheader is None:
- env['CONTENT_TYPE'] = self.headers.type
- else:
- env['CONTENT_TYPE'] = self.headers.typeheader
-
- length = self.headers.getheader('content-length')
- if length:
- env['CONTENT_LENGTH'] = length
-
- for h in self.headers.headers:
- k,v = h.split(':',1)
- k=k.replace('-','_').upper(); v=v.strip()
- if k in env:
- continue # skip content length, type,etc.
- if 'HTTP_'+k in env:
- env['HTTP_'+k] += ','+v # comma-separate multiple headers
- else:
- env['HTTP_'+k] = v
- return env
-
- def get_stderr(self):
- return sys.stderr
-
- def handle(self):
- """Handle a single HTTP request"""
-
- self.raw_requestline = self.rfile.readline()
- if not self.parse_request(): # An error code has been sent, just exit
- return
-
- handler = ServerHandler(
- self.rfile, self.wfile, self.get_stderr(), self.get_environ()
- )
- handler.request_handler = self # backpointer for logging
- handler.run(self.server.get_app())
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-def demo_app(environ,start_response):
- from StringIO import StringIO
- stdout = StringIO()
- print >>stdout, "Hello world!"
- print >>stdout
- h = environ.items(); h.sort()
- for k,v in h:
- print >>stdout, k,'=',`v`
- start_response("200 OK", [('Content-Type','text/plain')])
- return [stdout.getvalue()]
-
-
-def make_server(
- host, port, app, server_class=WSGIServer, handler_class=WSGIRequestHandler
-):
- """Create a new WSGI server listening on `host` and `port` for `app`"""
- server = server_class((host, port), handler_class)
- server.set_app(app)
- return server
-
-
-if __name__ == '__main__':
- httpd = make_server('', 8000, demo_app)
- sa = httpd.socket.getsockname()
- print "Serving HTTP on", sa[0], "port", sa[1], "..."
- import webbrowser
- webbrowser.open('http://localhost:8000/xyz?abc')
- httpd.handle_request() # serve one request, then exit
-
-
-
-
-
-
-
-
-
-
-
-
-#
diff --git a/sys/lib/python/wsgiref/util.py b/sys/lib/python/wsgiref/util.py
deleted file mode 100644
index 9009b876e..000000000
--- a/sys/lib/python/wsgiref/util.py
+++ /dev/null
@@ -1,205 +0,0 @@
-"""Miscellaneous WSGI-related Utilities"""
-
-import posixpath
-
-__all__ = [
- 'FileWrapper', 'guess_scheme', 'application_uri', 'request_uri',
- 'shift_path_info', 'setup_testing_defaults',
-]
-
-
-class FileWrapper:
- """Wrapper to convert file-like objects to iterables"""
-
- def __init__(self, filelike, blksize=8192):
- self.filelike = filelike
- self.blksize = blksize
- if hasattr(filelike,'close'):
- self.close = filelike.close
-
- def __getitem__(self,key):
- data = self.filelike.read(self.blksize)
- if data:
- return data
- raise IndexError
-
- def __iter__(self):
- return self
-
- def next(self):
- data = self.filelike.read(self.blksize)
- if data:
- return data
- raise StopIteration
-
-
-
-
-
-
-
-
-def guess_scheme(environ):
- """Return a guess for whether 'wsgi.url_scheme' should be 'http' or 'https'
- """
- if environ.get("HTTPS") in ('yes','on','1'):
- return 'https'
- else:
- return 'http'
-
-def application_uri(environ):
- """Return the application's base URI (no PATH_INFO or QUERY_STRING)"""
- url = environ['wsgi.url_scheme']+'://'
- from urllib import quote
-
- if environ.get('HTTP_HOST'):
- url += environ['HTTP_HOST']
- else:
- url += environ['SERVER_NAME']
-
- if environ['wsgi.url_scheme'] == 'https':
- if environ['SERVER_PORT'] != '443':
- url += ':' + environ['SERVER_PORT']
- else:
- if environ['SERVER_PORT'] != '80':
- url += ':' + environ['SERVER_PORT']
-
- url += quote(environ.get('SCRIPT_NAME') or '/')
- return url
-
-def request_uri(environ, include_query=1):
- """Return the full request URI, optionally including the query string"""
- url = application_uri(environ)
- from urllib import quote
- path_info = quote(environ.get('PATH_INFO',''))
- if not environ.get('SCRIPT_NAME'):
- url += path_info[1:]
- else:
- url += path_info
- if include_query and environ.get('QUERY_STRING'):
- url += '?' + environ['QUERY_STRING']
- return url
-
-def shift_path_info(environ):
- """Shift a name from PATH_INFO to SCRIPT_NAME, returning it
-
- If there are no remaining path segments in PATH_INFO, return None.
- Note: 'environ' is modified in-place; use a copy if you need to keep
- the original PATH_INFO or SCRIPT_NAME.
-
- Note: when PATH_INFO is just a '/', this returns '' and appends a trailing
- '/' to SCRIPT_NAME, even though empty path segments are normally ignored,
- and SCRIPT_NAME doesn't normally end in a '/'. This is intentional
- behavior, to ensure that an application can tell the difference between
- '/x' and '/x/' when traversing to objects.
- """
- path_info = environ.get('PATH_INFO','')
- if not path_info:
- return None
-
- path_parts = path_info.split('/')
- path_parts[1:-1] = [p for p in path_parts[1:-1] if p and p<>'.']
- name = path_parts[1]
- del path_parts[1]
-
- script_name = environ.get('SCRIPT_NAME','')
- script_name = posixpath.normpath(script_name+'/'+name)
- if script_name.endswith('/'):
- script_name = script_name[:-1]
- if not name and not script_name.endswith('/'):
- script_name += '/'
-
- environ['SCRIPT_NAME'] = script_name
- environ['PATH_INFO'] = '/'.join(path_parts)
-
- # Special case: '/.' on PATH_INFO doesn't get stripped,
- # because we don't strip the last element of PATH_INFO
- # if there's only one path part left. Instead of fixing this
- # above, we fix it here so that PATH_INFO gets normalized to
- # an empty string in the environ.
- if name=='.':
- name = None
- return name
-
-def setup_testing_defaults(environ):
- """Update 'environ' with trivial defaults for testing purposes
-
- This adds various parameters required for WSGI, including HTTP_HOST,
- SERVER_NAME, SERVER_PORT, REQUEST_METHOD, SCRIPT_NAME, PATH_INFO,
- and all of the wsgi.* variables. It only supplies default values,
- and does not replace any existing settings for these variables.
-
- This routine is intended to make it easier for unit tests of WSGI
- servers and applications to set up dummy environments. It should *not*
- be used by actual WSGI servers or applications, since the data is fake!
- """
-
- environ.setdefault('SERVER_NAME','127.0.0.1')
- environ.setdefault('SERVER_PROTOCOL','HTTP/1.0')
-
- environ.setdefault('HTTP_HOST',environ['SERVER_NAME'])
- environ.setdefault('REQUEST_METHOD','GET')
-
- if 'SCRIPT_NAME' not in environ and 'PATH_INFO' not in environ:
- environ.setdefault('SCRIPT_NAME','')
- environ.setdefault('PATH_INFO','/')
-
- environ.setdefault('wsgi.version', (1,0))
- environ.setdefault('wsgi.run_once', 0)
- environ.setdefault('wsgi.multithread', 0)
- environ.setdefault('wsgi.multiprocess', 0)
-
- from StringIO import StringIO
- environ.setdefault('wsgi.input', StringIO(""))
- environ.setdefault('wsgi.errors', StringIO())
- environ.setdefault('wsgi.url_scheme',guess_scheme(environ))
-
- if environ['wsgi.url_scheme']=='http':
- environ.setdefault('SERVER_PORT', '80')
- elif environ['wsgi.url_scheme']=='https':
- environ.setdefault('SERVER_PORT', '443')
-
-
-
-
-_hoppish = {
- 'connection':1, 'keep-alive':1, 'proxy-authenticate':1,
- 'proxy-authorization':1, 'te':1, 'trailers':1, 'transfer-encoding':1,
- 'upgrade':1
-}.has_key
-
-def is_hop_by_hop(header_name):
- """Return true if 'header_name' is an HTTP/1.1 "Hop-by-Hop" header"""
- return _hoppish(header_name.lower())
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-#
diff --git a/sys/lib/python/wsgiref/validate.py b/sys/lib/python/wsgiref/validate.py
deleted file mode 100644
index 23ab9f83f..000000000
--- a/sys/lib/python/wsgiref/validate.py
+++ /dev/null
@@ -1,432 +0,0 @@
-# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
-# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
-# Also licenced under the Apache License, 2.0: http://opensource.org/licenses/apache2.0.php
-# Licensed to PSF under a Contributor Agreement
-"""
-Middleware to check for obedience to the WSGI specification.
-
-Some of the things this checks:
-
-* Signature of the application and start_response (including that
- keyword arguments are not used).
-
-* Environment checks:
-
- - Environment is a dictionary (and not a subclass).
-
- - That all the required keys are in the environment: REQUEST_METHOD,
- SERVER_NAME, SERVER_PORT, wsgi.version, wsgi.input, wsgi.errors,
- wsgi.multithread, wsgi.multiprocess, wsgi.run_once
-
- - That HTTP_CONTENT_TYPE and HTTP_CONTENT_LENGTH are not in the
- environment (these headers should appear as CONTENT_LENGTH and
- CONTENT_TYPE).
-
- - Warns if QUERY_STRING is missing, as the cgi module acts
- unpredictably in that case.
-
- - That CGI-style variables (that don't contain a .) have
- (non-unicode) string values
-
- - That wsgi.version is a tuple
-
- - That wsgi.url_scheme is 'http' or 'https' (@@: is this too
- restrictive?)
-
- - Warns if the REQUEST_METHOD is not known (@@: probably too
- restrictive).
-
- - That SCRIPT_NAME and PATH_INFO are empty or start with /
-
- - That at least one of SCRIPT_NAME or PATH_INFO are set.
-
- - That CONTENT_LENGTH is a positive integer.
-
- - That SCRIPT_NAME is not '/' (it should be '', and PATH_INFO should
- be '/').
-
- - That wsgi.input has the methods read, readline, readlines, and
- __iter__
-
- - That wsgi.errors has the methods flush, write, writelines
-
-* The status is a string, contains a space, starts with an integer,
- and that integer is in range (> 100).
-
-* That the headers is a list (not a subclass, not another kind of
- sequence).
-
-* That the items of the headers are tuples of strings.
-
-* That there is no 'status' header (that is used in CGI, but not in
- WSGI).
-
-* That the headers don't contain newlines or colons, end in _ or -, or
- contain characters codes below 037.
-
-* That Content-Type is given if there is content (CGI often has a
- default content type, but WSGI does not).
-
-* That no Content-Type is given when there is no content (@@: is this
- too restrictive?)
-
-* That the exc_info argument to start_response is a tuple or None.
-
-* That all calls to the writer are with strings, and no other methods
- on the writer are accessed.
-
-* That wsgi.input is used properly:
-
- - .read() is called with zero or one argument
-
- - That it returns a string
-
- - That readline, readlines, and __iter__ return strings
-
- - That .close() is not called
-
- - No other methods are provided
-
-* That wsgi.errors is used properly:
-
- - .write() and .writelines() is called with a string
-
- - That .close() is not called, and no other methods are provided.
-
-* The response iterator:
-
- - That it is not a string (it should be a list of a single string; a
- string will work, but perform horribly).
-
- - That .next() returns a string
-
- - That the iterator is not iterated over until start_response has
- been called (that can signal either a server or application
- error).
-
- - That .close() is called (doesn't raise exception, only prints to
- sys.stderr, because we only know it isn't called when the object
- is garbage collected).
-"""
-__all__ = ['validator']
-
-
-import re
-import sys
-from types import DictType, StringType, TupleType, ListType
-import warnings
-
-header_re = re.compile(r'^[a-zA-Z][a-zA-Z0-9\-_]*$')
-bad_header_value_re = re.compile(r'[\000-\037]')
-
-class WSGIWarning(Warning):
- """
- Raised in response to WSGI-spec-related warnings
- """
-
-def assert_(cond, *args):
- if not cond:
- raise AssertionError(*args)
-
-def validator(application):
-
- """
- When applied between a WSGI server and a WSGI application, this
- middleware will check for WSGI compliancy on a number of levels.
- This middleware does not modify the request or response in any
- way, but will throw an AssertionError if anything seems off
- (except for a failure to close the application iterator, which
- will be printed to stderr -- there's no way to throw an exception
- at that point).
- """
-
- def lint_app(*args, **kw):
- assert_(len(args) == 2, "Two arguments required")
- assert_(not kw, "No keyword arguments allowed")
- environ, start_response = args
-
- check_environ(environ)
-
- # We use this to check if the application returns without
- # calling start_response:
- start_response_started = []
-
- def start_response_wrapper(*args, **kw):
- assert_(len(args) == 2 or len(args) == 3, (
- "Invalid number of arguments: %s" % (args,)))
- assert_(not kw, "No keyword arguments allowed")
- status = args[0]
- headers = args[1]
- if len(args) == 3:
- exc_info = args[2]
- else:
- exc_info = None
-
- check_status(status)
- check_headers(headers)
- check_content_type(status, headers)
- check_exc_info(exc_info)
-
- start_response_started.append(None)
- return WriteWrapper(start_response(*args))
-
- environ['wsgi.input'] = InputWrapper(environ['wsgi.input'])
- environ['wsgi.errors'] = ErrorWrapper(environ['wsgi.errors'])
-
- iterator = application(environ, start_response_wrapper)
- assert_(iterator is not None and iterator != False,
- "The application must return an iterator, if only an empty list")
-
- check_iterator(iterator)
-
- return IteratorWrapper(iterator, start_response_started)
-
- return lint_app
-
-class InputWrapper:
-
- def __init__(self, wsgi_input):
- self.input = wsgi_input
-
- def read(self, *args):
- assert_(len(args) <= 1)
- v = self.input.read(*args)
- assert_(type(v) is type(""))
- return v
-
- def readline(self):
- v = self.input.readline()
- assert_(type(v) is type(""))
- return v
-
- def readlines(self, *args):
- assert_(len(args) <= 1)
- lines = self.input.readlines(*args)
- assert_(type(lines) is type([]))
- for line in lines:
- assert_(type(line) is type(""))
- return lines
-
- def __iter__(self):
- while 1:
- line = self.readline()
- if not line:
- return
- yield line
-
- def close(self):
- assert_(0, "input.close() must not be called")
-
-class ErrorWrapper:
-
- def __init__(self, wsgi_errors):
- self.errors = wsgi_errors
-
- def write(self, s):
- assert_(type(s) is type(""))
- self.errors.write(s)
-
- def flush(self):
- self.errors.flush()
-
- def writelines(self, seq):
- for line in seq:
- self.write(line)
-
- def close(self):
- assert_(0, "errors.close() must not be called")
-
-class WriteWrapper:
-
- def __init__(self, wsgi_writer):
- self.writer = wsgi_writer
-
- def __call__(self, s):
- assert_(type(s) is type(""))
- self.writer(s)
-
-class PartialIteratorWrapper:
-
- def __init__(self, wsgi_iterator):
- self.iterator = wsgi_iterator
-
- def __iter__(self):
- # We want to make sure __iter__ is called
- return IteratorWrapper(self.iterator, None)
-
-class IteratorWrapper:
-
- def __init__(self, wsgi_iterator, check_start_response):
- self.original_iterator = wsgi_iterator
- self.iterator = iter(wsgi_iterator)
- self.closed = False
- self.check_start_response = check_start_response
-
- def __iter__(self):
- return self
-
- def next(self):
- assert_(not self.closed,
- "Iterator read after closed")
- v = self.iterator.next()
- if self.check_start_response is not None:
- assert_(self.check_start_response,
- "The application returns and we started iterating over its body, but start_response has not yet been called")
- self.check_start_response = None
- return v
-
- def close(self):
- self.closed = True
- if hasattr(self.original_iterator, 'close'):
- self.original_iterator.close()
-
- def __del__(self):
- if not self.closed:
- sys.stderr.write(
- "Iterator garbage collected without being closed")
- assert_(self.closed,
- "Iterator garbage collected without being closed")
-
-def check_environ(environ):
- assert_(type(environ) is DictType,
- "Environment is not of the right type: %r (environment: %r)"
- % (type(environ), environ))
-
- for key in ['REQUEST_METHOD', 'SERVER_NAME', 'SERVER_PORT',
- 'wsgi.version', 'wsgi.input', 'wsgi.errors',
- 'wsgi.multithread', 'wsgi.multiprocess',
- 'wsgi.run_once']:
- assert_(key in environ,
- "Environment missing required key: %r" % (key,))
-
- for key in ['HTTP_CONTENT_TYPE', 'HTTP_CONTENT_LENGTH']:
- assert_(key not in environ,
- "Environment should not have the key: %s "
- "(use %s instead)" % (key, key[5:]))
-
- if 'QUERY_STRING' not in environ:
- warnings.warn(
- 'QUERY_STRING is not in the WSGI environment; the cgi '
- 'module will use sys.argv when this variable is missing, '
- 'so application errors are more likely',
- WSGIWarning)
-
- for key in environ.keys():
- if '.' in key:
- # Extension, we don't care about its type
- continue
- assert_(type(environ[key]) is StringType,
- "Environmental variable %s is not a string: %r (value: %r)"
- % (key, type(environ[key]), environ[key]))
-
- assert_(type(environ['wsgi.version']) is TupleType,
- "wsgi.version should be a tuple (%r)" % (environ['wsgi.version'],))
- assert_(environ['wsgi.url_scheme'] in ('http', 'https'),
- "wsgi.url_scheme unknown: %r" % environ['wsgi.url_scheme'])
-
- check_input(environ['wsgi.input'])
- check_errors(environ['wsgi.errors'])
-
- # @@: these need filling out:
- if environ['REQUEST_METHOD'] not in (
- 'GET', 'HEAD', 'POST', 'OPTIONS','PUT','DELETE','TRACE'):
- warnings.warn(
- "Unknown REQUEST_METHOD: %r" % environ['REQUEST_METHOD'],
- WSGIWarning)
-
- assert_(not environ.get('SCRIPT_NAME')
- or environ['SCRIPT_NAME'].startswith('/'),
- "SCRIPT_NAME doesn't start with /: %r" % environ['SCRIPT_NAME'])
- assert_(not environ.get('PATH_INFO')
- or environ['PATH_INFO'].startswith('/'),
- "PATH_INFO doesn't start with /: %r" % environ['PATH_INFO'])
- if environ.get('CONTENT_LENGTH'):
- assert_(int(environ['CONTENT_LENGTH']) >= 0,
- "Invalid CONTENT_LENGTH: %r" % environ['CONTENT_LENGTH'])
-
- if not environ.get('SCRIPT_NAME'):
- assert_(environ.has_key('PATH_INFO'),
- "One of SCRIPT_NAME or PATH_INFO are required (PATH_INFO "
- "should at least be '/' if SCRIPT_NAME is empty)")
- assert_(environ.get('SCRIPT_NAME') != '/',
- "SCRIPT_NAME cannot be '/'; it should instead be '', and "
- "PATH_INFO should be '/'")
-
-def check_input(wsgi_input):
- for attr in ['read', 'readline', 'readlines', '__iter__']:
- assert_(hasattr(wsgi_input, attr),
- "wsgi.input (%r) doesn't have the attribute %s"
- % (wsgi_input, attr))
-
-def check_errors(wsgi_errors):
- for attr in ['flush', 'write', 'writelines']:
- assert_(hasattr(wsgi_errors, attr),
- "wsgi.errors (%r) doesn't have the attribute %s"
- % (wsgi_errors, attr))
-
-def check_status(status):
- assert_(type(status) is StringType,
- "Status must be a string (not %r)" % status)
- # Implicitly check that we can turn it into an integer:
- status_code = status.split(None, 1)[0]
- assert_(len(status_code) == 3,
- "Status codes must be three characters: %r" % status_code)
- status_int = int(status_code)
- assert_(status_int >= 100, "Status code is invalid: %r" % status_int)
- if len(status) < 4 or status[3] != ' ':
- warnings.warn(
- "The status string (%r) should be a three-digit integer "
- "followed by a single space and a status explanation"
- % status, WSGIWarning)
-
-def check_headers(headers):
- assert_(type(headers) is ListType,
- "Headers (%r) must be of type list: %r"
- % (headers, type(headers)))
- header_names = {}
- for item in headers:
- assert_(type(item) is TupleType,
- "Individual headers (%r) must be of type tuple: %r"
- % (item, type(item)))
- assert_(len(item) == 2)
- name, value = item
- assert_(name.lower() != 'status',
- "The Status header cannot be used; it conflicts with CGI "
- "script, and HTTP status is not given through headers "
- "(value: %r)." % value)
- header_names[name.lower()] = None
- assert_('\n' not in name and ':' not in name,
- "Header names may not contain ':' or '\\n': %r" % name)
- assert_(header_re.search(name), "Bad header name: %r" % name)
- assert_(not name.endswith('-') and not name.endswith('_'),
- "Names may not end in '-' or '_': %r" % name)
- if bad_header_value_re.search(value):
- assert_(0, "Bad header value: %r (bad char: %r)"
- % (value, bad_header_value_re.search(value).group(0)))
-
-def check_content_type(status, headers):
- code = int(status.split(None, 1)[0])
- # @@: need one more person to verify this interpretation of RFC 2616
- # http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html
- NO_MESSAGE_BODY = (204, 304)
- for name, value in headers:
- if name.lower() == 'content-type':
- if code not in NO_MESSAGE_BODY:
- return
- assert_(0, ("Content-Type header found in a %s response, "
- "which must not return content.") % code)
- if code not in NO_MESSAGE_BODY:
- assert_(0, "No Content-Type header found in headers (%s)" % headers)
-
-def check_exc_info(exc_info):
- assert_(exc_info is None or type(exc_info) is type(()),
- "exc_info (%r) is not a tuple: %r" % (exc_info, type(exc_info)))
- # More exc_info checks?
-
-def check_iterator(iterator):
- # Technically a string is legal, which is why it's a really bad
- # idea, because it may cause the response to be returned
- # character-by-character
- assert_(not isinstance(iterator, str),
- "You should not return a string as your application iterator, "
- "instead return a single-item list containing that string.")
diff --git a/sys/lib/python/xdrlib.py b/sys/lib/python/xdrlib.py
deleted file mode 100644
index b349eb9b7..000000000
--- a/sys/lib/python/xdrlib.py
+++ /dev/null
@@ -1,287 +0,0 @@
-"""Implements (a subset of) Sun XDR -- eXternal Data Representation.
-
-See: RFC 1014
-
-"""
-
-import struct
-try:
- from cStringIO import StringIO as _StringIO
-except ImportError:
- from StringIO import StringIO as _StringIO
-
-__all__ = ["Error", "Packer", "Unpacker", "ConversionError"]
-
-# exceptions
-class Error(Exception):
- """Exception class for this module. Use:
-
- except xdrlib.Error, var:
- # var has the Error instance for the exception
-
- Public ivars:
- msg -- contains the message
-
- """
- def __init__(self, msg):
- self.msg = msg
- def __repr__(self):
- return repr(self.msg)
- def __str__(self):
- return str(self.msg)
-
-
-class ConversionError(Error):
- pass
-
-
-
-class Packer:
- """Pack various data representations into a buffer."""
-
- def __init__(self):
- self.reset()
-
- def reset(self):
- self.__buf = _StringIO()
-
- def get_buffer(self):
- return self.__buf.getvalue()
- # backwards compatibility
- get_buf = get_buffer
-
- def pack_uint(self, x):
- self.__buf.write(struct.pack('>L', x))
-
- pack_int = pack_uint
- pack_enum = pack_int
-
- def pack_bool(self, x):
- if x: self.__buf.write('\0\0\0\1')
- else: self.__buf.write('\0\0\0\0')
-
- def pack_uhyper(self, x):
- self.pack_uint(x>>32 & 0xffffffffL)
- self.pack_uint(x & 0xffffffffL)
-
- pack_hyper = pack_uhyper
-
- def pack_float(self, x):
- try: self.__buf.write(struct.pack('>f', x))
- except struct.error, msg:
- raise ConversionError, msg
-
- def pack_double(self, x):
- try: self.__buf.write(struct.pack('>d', x))
- except struct.error, msg:
- raise ConversionError, msg
-
- def pack_fstring(self, n, s):
- if n < 0:
- raise ValueError, 'fstring size must be nonnegative'
- data = s[:n]
- n = ((n+3)//4)*4
- data = data + (n - len(data)) * '\0'
- self.__buf.write(data)
-
- pack_fopaque = pack_fstring
-
- def pack_string(self, s):
- n = len(s)
- self.pack_uint(n)
- self.pack_fstring(n, s)
-
- pack_opaque = pack_string
- pack_bytes = pack_string
-
- def pack_list(self, list, pack_item):
- for item in list:
- self.pack_uint(1)
- pack_item(item)
- self.pack_uint(0)
-
- def pack_farray(self, n, list, pack_item):
- if len(list) != n:
- raise ValueError, 'wrong array size'
- for item in list:
- pack_item(item)
-
- def pack_array(self, list, pack_item):
- n = len(list)
- self.pack_uint(n)
- self.pack_farray(n, list, pack_item)
-
-
-
-class Unpacker:
- """Unpacks various data representations from the given buffer."""
-
- def __init__(self, data):
- self.reset(data)
-
- def reset(self, data):
- self.__buf = data
- self.__pos = 0
-
- def get_position(self):
- return self.__pos
-
- def set_position(self, position):
- self.__pos = position
-
- def get_buffer(self):
- return self.__buf
-
- def done(self):
- if self.__pos < len(self.__buf):
- raise Error('unextracted data remains')
-
- def unpack_uint(self):
- i = self.__pos
- self.__pos = j = i+4
- data = self.__buf[i:j]
- if len(data) < 4:
- raise EOFError
- x = struct.unpack('>L', data)[0]
- try:
- return int(x)
- except OverflowError:
- return x
-
- def unpack_int(self):
- i = self.__pos
- self.__pos = j = i+4
- data = self.__buf[i:j]
- if len(data) < 4:
- raise EOFError
- return struct.unpack('>l', data)[0]
-
- unpack_enum = unpack_int
-
- def unpack_bool(self):
- return bool(self.unpack_int())
-
- def unpack_uhyper(self):
- hi = self.unpack_uint()
- lo = self.unpack_uint()
- return long(hi)<<32 | lo
-
- def unpack_hyper(self):
- x = self.unpack_uhyper()
- if x >= 0x8000000000000000L:
- x = x - 0x10000000000000000L
- return x
-
- def unpack_float(self):
- i = self.__pos
- self.__pos = j = i+4
- data = self.__buf[i:j]
- if len(data) < 4:
- raise EOFError
- return struct.unpack('>f', data)[0]
-
- def unpack_double(self):
- i = self.__pos
- self.__pos = j = i+8
- data = self.__buf[i:j]
- if len(data) < 8:
- raise EOFError
- return struct.unpack('>d', data)[0]
-
- def unpack_fstring(self, n):
- if n < 0:
- raise ValueError, 'fstring size must be nonnegative'
- i = self.__pos
- j = i + (n+3)//4*4
- if j > len(self.__buf):
- raise EOFError
- self.__pos = j
- return self.__buf[i:i+n]
-
- unpack_fopaque = unpack_fstring
-
- def unpack_string(self):
- n = self.unpack_uint()
- return self.unpack_fstring(n)
-
- unpack_opaque = unpack_string
- unpack_bytes = unpack_string
-
- def unpack_list(self, unpack_item):
- list = []
- while 1:
- x = self.unpack_uint()
- if x == 0: break
- if x != 1:
- raise ConversionError, '0 or 1 expected, got %r' % (x,)
- item = unpack_item()
- list.append(item)
- return list
-
- def unpack_farray(self, n, unpack_item):
- list = []
- for i in range(n):
- list.append(unpack_item())
- return list
-
- def unpack_array(self, unpack_item):
- n = self.unpack_uint()
- return self.unpack_farray(n, unpack_item)
-
-
-# test suite
-def _test():
- p = Packer()
- packtest = [
- (p.pack_uint, (9,)),
- (p.pack_bool, (True,)),
- (p.pack_bool, (False,)),
- (p.pack_uhyper, (45L,)),
- (p.pack_float, (1.9,)),
- (p.pack_double, (1.9,)),
- (p.pack_string, ('hello world',)),
- (p.pack_list, (range(5), p.pack_uint)),
- (p.pack_array, (['what', 'is', 'hapnin', 'doctor'], p.pack_string)),
- ]
- succeedlist = [1] * len(packtest)
- count = 0
- for method, args in packtest:
- print 'pack test', count,
- try:
- method(*args)
- print 'succeeded'
- except ConversionError, var:
- print 'ConversionError:', var.msg
- succeedlist[count] = 0
- count = count + 1
- data = p.get_buffer()
- # now verify
- up = Unpacker(data)
- unpacktest = [
- (up.unpack_uint, (), lambda x: x == 9),
- (up.unpack_bool, (), lambda x: x is True),
- (up.unpack_bool, (), lambda x: x is False),
- (up.unpack_uhyper, (), lambda x: x == 45L),
- (up.unpack_float, (), lambda x: 1.89 < x < 1.91),
- (up.unpack_double, (), lambda x: 1.89 < x < 1.91),
- (up.unpack_string, (), lambda x: x == 'hello world'),
- (up.unpack_list, (up.unpack_uint,), lambda x: x == range(5)),
- (up.unpack_array, (up.unpack_string,),
- lambda x: x == ['what', 'is', 'hapnin', 'doctor']),
- ]
- count = 0
- for method, args, pred in unpacktest:
- print 'unpack test', count,
- try:
- if succeedlist[count]:
- x = method(*args)
- print pred(x) and 'succeeded' or 'failed', ':', x
- else:
- print 'skipping'
- except ConversionError, var:
- print 'ConversionError:', var.msg
- count = count + 1
-
-
-if __name__ == '__main__':
- _test()
diff --git a/sys/lib/python/xml/__init__.py b/sys/lib/python/xml/__init__.py
deleted file mode 100644
index 5f79380f9..000000000
--- a/sys/lib/python/xml/__init__.py
+++ /dev/null
@@ -1,47 +0,0 @@
-"""Core XML support for Python.
-
-This package contains four sub-packages:
-
-dom -- The W3C Document Object Model. This supports DOM Level 1 +
- Namespaces.
-
-parsers -- Python wrappers for XML parsers (currently only supports Expat).
-
-sax -- The Simple API for XML, developed by XML-Dev, led by David
- Megginson and ported to Python by Lars Marius Garshol. This
- supports the SAX 2 API.
-
-etree -- The ElementTree XML library. This is a subset of the full
- ElementTree XML release.
-
-"""
-
-
-__all__ = ["dom", "parsers", "sax", "etree"]
-
-# When being checked-out without options, this has the form
-# "<dollar>Revision: x.y </dollar>"
-# When exported using -kv, it is "x.y".
-__version__ = "$Revision: 41660 $".split()[-2:][0]
-
-
-_MINIMUM_XMLPLUS_VERSION = (0, 8, 4)
-
-
-try:
- import _xmlplus
-except ImportError:
- pass
-else:
- try:
- v = _xmlplus.version_info
- except AttributeError:
- # _xmlplus is too old; ignore it
- pass
- else:
- if v >= _MINIMUM_XMLPLUS_VERSION:
- import sys
- _xmlplus.__path__.extend(__path__)
- sys.modules[__name__] = _xmlplus
- else:
- del v
diff --git a/sys/lib/python/xml/dom/NodeFilter.py b/sys/lib/python/xml/dom/NodeFilter.py
deleted file mode 100644
index fc052459d..000000000
--- a/sys/lib/python/xml/dom/NodeFilter.py
+++ /dev/null
@@ -1,27 +0,0 @@
-# This is the Python mapping for interface NodeFilter from
-# DOM2-Traversal-Range. It contains only constants.
-
-class NodeFilter:
- """
- This is the DOM2 NodeFilter interface. It contains only constants.
- """
- FILTER_ACCEPT = 1
- FILTER_REJECT = 2
- FILTER_SKIP = 3
-
- SHOW_ALL = 0xFFFFFFFFL
- SHOW_ELEMENT = 0x00000001
- SHOW_ATTRIBUTE = 0x00000002
- SHOW_TEXT = 0x00000004
- SHOW_CDATA_SECTION = 0x00000008
- SHOW_ENTITY_REFERENCE = 0x00000010
- SHOW_ENTITY = 0x00000020
- SHOW_PROCESSING_INSTRUCTION = 0x00000040
- SHOW_COMMENT = 0x00000080
- SHOW_DOCUMENT = 0x00000100
- SHOW_DOCUMENT_TYPE = 0x00000200
- SHOW_DOCUMENT_FRAGMENT = 0x00000400
- SHOW_NOTATION = 0x00000800
-
- def acceptNode(self, node):
- raise NotImplementedError
diff --git a/sys/lib/python/xml/dom/__init__.py b/sys/lib/python/xml/dom/__init__.py
deleted file mode 100644
index 6363d0063..000000000
--- a/sys/lib/python/xml/dom/__init__.py
+++ /dev/null
@@ -1,139 +0,0 @@
-"""W3C Document Object Model implementation for Python.
-
-The Python mapping of the Document Object Model is documented in the
-Python Library Reference in the section on the xml.dom package.
-
-This package contains the following modules:
-
-minidom -- A simple implementation of the Level 1 DOM with namespace
- support added (based on the Level 2 specification) and other
- minor Level 2 functionality.
-
-pulldom -- DOM builder supporting on-demand tree-building for selected
- subtrees of the document.
-
-"""
-
-
-class Node:
- """Class giving the NodeType constants."""
-
- # DOM implementations may use this as a base class for their own
- # Node implementations. If they don't, the constants defined here
- # should still be used as the canonical definitions as they match
- # the values given in the W3C recommendation. Client code can
- # safely refer to these values in all tests of Node.nodeType
- # values.
-
- ELEMENT_NODE = 1
- ATTRIBUTE_NODE = 2
- TEXT_NODE = 3
- CDATA_SECTION_NODE = 4
- ENTITY_REFERENCE_NODE = 5
- ENTITY_NODE = 6
- PROCESSING_INSTRUCTION_NODE = 7
- COMMENT_NODE = 8
- DOCUMENT_NODE = 9
- DOCUMENT_TYPE_NODE = 10
- DOCUMENT_FRAGMENT_NODE = 11
- NOTATION_NODE = 12
-
-
-#ExceptionCode
-INDEX_SIZE_ERR = 1
-DOMSTRING_SIZE_ERR = 2
-HIERARCHY_REQUEST_ERR = 3
-WRONG_DOCUMENT_ERR = 4
-INVALID_CHARACTER_ERR = 5
-NO_DATA_ALLOWED_ERR = 6
-NO_MODIFICATION_ALLOWED_ERR = 7
-NOT_FOUND_ERR = 8
-NOT_SUPPORTED_ERR = 9
-INUSE_ATTRIBUTE_ERR = 10
-INVALID_STATE_ERR = 11
-SYNTAX_ERR = 12
-INVALID_MODIFICATION_ERR = 13
-NAMESPACE_ERR = 14
-INVALID_ACCESS_ERR = 15
-VALIDATION_ERR = 16
-
-
-class DOMException(Exception):
- """Abstract base class for DOM exceptions.
- Exceptions with specific codes are specializations of this class."""
-
- def __init__(self, *args, **kw):
- if self.__class__ is DOMException:
- raise RuntimeError(
- "DOMException should not be instantiated directly")
- Exception.__init__(self, *args, **kw)
-
- def _get_code(self):
- return self.code
-
-
-class IndexSizeErr(DOMException):
- code = INDEX_SIZE_ERR
-
-class DomstringSizeErr(DOMException):
- code = DOMSTRING_SIZE_ERR
-
-class HierarchyRequestErr(DOMException):
- code = HIERARCHY_REQUEST_ERR
-
-class WrongDocumentErr(DOMException):
- code = WRONG_DOCUMENT_ERR
-
-class InvalidCharacterErr(DOMException):
- code = INVALID_CHARACTER_ERR
-
-class NoDataAllowedErr(DOMException):
- code = NO_DATA_ALLOWED_ERR
-
-class NoModificationAllowedErr(DOMException):
- code = NO_MODIFICATION_ALLOWED_ERR
-
-class NotFoundErr(DOMException):
- code = NOT_FOUND_ERR
-
-class NotSupportedErr(DOMException):
- code = NOT_SUPPORTED_ERR
-
-class InuseAttributeErr(DOMException):
- code = INUSE_ATTRIBUTE_ERR
-
-class InvalidStateErr(DOMException):
- code = INVALID_STATE_ERR
-
-class SyntaxErr(DOMException):
- code = SYNTAX_ERR
-
-class InvalidModificationErr(DOMException):
- code = INVALID_MODIFICATION_ERR
-
-class NamespaceErr(DOMException):
- code = NAMESPACE_ERR
-
-class InvalidAccessErr(DOMException):
- code = INVALID_ACCESS_ERR
-
-class ValidationErr(DOMException):
- code = VALIDATION_ERR
-
-class UserDataHandler:
- """Class giving the operation constants for UserDataHandler.handle()."""
-
- # Based on DOM Level 3 (WD 9 April 2002)
-
- NODE_CLONED = 1
- NODE_IMPORTED = 2
- NODE_DELETED = 3
- NODE_RENAMED = 4
-
-XML_NAMESPACE = "http://www.w3.org/XML/1998/namespace"
-XMLNS_NAMESPACE = "http://www.w3.org/2000/xmlns/"
-XHTML_NAMESPACE = "http://www.w3.org/1999/xhtml"
-EMPTY_NAMESPACE = None
-EMPTY_PREFIX = None
-
-from domreg import getDOMImplementation,registerDOMImplementation
diff --git a/sys/lib/python/xml/dom/domreg.py b/sys/lib/python/xml/dom/domreg.py
deleted file mode 100644
index 684c43601..000000000
--- a/sys/lib/python/xml/dom/domreg.py
+++ /dev/null
@@ -1,99 +0,0 @@
-"""Registration facilities for DOM. This module should not be used
-directly. Instead, the functions getDOMImplementation and
-registerDOMImplementation should be imported from xml.dom."""
-
-from xml.dom.minicompat import * # isinstance, StringTypes
-
-# This is a list of well-known implementations. Well-known names
-# should be published by posting to xml-sig@python.org, and are
-# subsequently recorded in this file.
-
-well_known_implementations = {
- 'minidom':'xml.dom.minidom',
- '4DOM': 'xml.dom.DOMImplementation',
- }
-
-# DOM implementations not officially registered should register
-# themselves with their
-
-registered = {}
-
-def registerDOMImplementation(name, factory):
- """registerDOMImplementation(name, factory)
-
- Register the factory function with the name. The factory function
- should return an object which implements the DOMImplementation
- interface. The factory function can either return the same object,
- or a new one (e.g. if that implementation supports some
- customization)."""
-
- registered[name] = factory
-
-def _good_enough(dom, features):
- "_good_enough(dom, features) -> Return 1 if the dom offers the features"
- for f,v in features:
- if not dom.hasFeature(f,v):
- return 0
- return 1
-
-def getDOMImplementation(name = None, features = ()):
- """getDOMImplementation(name = None, features = ()) -> DOM implementation.
-
- Return a suitable DOM implementation. The name is either
- well-known, the module name of a DOM implementation, or None. If
- it is not None, imports the corresponding module and returns
- DOMImplementation object if the import succeeds.
-
- If name is not given, consider the available implementations to
- find one with the required feature set. If no implementation can
- be found, raise an ImportError. The features list must be a sequence
- of (feature, version) pairs which are passed to hasFeature."""
-
- import os
- creator = None
- mod = well_known_implementations.get(name)
- if mod:
- mod = __import__(mod, {}, {}, ['getDOMImplementation'])
- return mod.getDOMImplementation()
- elif name:
- return registered[name]()
- elif os.environ.has_key("PYTHON_DOM"):
- return getDOMImplementation(name = os.environ["PYTHON_DOM"])
-
- # User did not specify a name, try implementations in arbitrary
- # order, returning the one that has the required features
- if isinstance(features, StringTypes):
- features = _parse_feature_string(features)
- for creator in registered.values():
- dom = creator()
- if _good_enough(dom, features):
- return dom
-
- for creator in well_known_implementations.keys():
- try:
- dom = getDOMImplementation(name = creator)
- except StandardError: # typically ImportError, or AttributeError
- continue
- if _good_enough(dom, features):
- return dom
-
- raise ImportError,"no suitable DOM implementation found"
-
-def _parse_feature_string(s):
- features = []
- parts = s.split()
- i = 0
- length = len(parts)
- while i < length:
- feature = parts[i]
- if feature[0] in "0123456789":
- raise ValueError, "bad feature name: %r" % (feature,)
- i = i + 1
- version = None
- if i < length:
- v = parts[i]
- if v[0] in "0123456789":
- i = i + 1
- version = v
- features.append((feature, version))
- return tuple(features)
diff --git a/sys/lib/python/xml/dom/expatbuilder.py b/sys/lib/python/xml/dom/expatbuilder.py
deleted file mode 100644
index a2f8a3383..000000000
--- a/sys/lib/python/xml/dom/expatbuilder.py
+++ /dev/null
@@ -1,983 +0,0 @@
-"""Facility to use the Expat parser to load a minidom instance
-from a string or file.
-
-This avoids all the overhead of SAX and pulldom to gain performance.
-"""
-
-# Warning!
-#
-# This module is tightly bound to the implementation details of the
-# minidom DOM and can't be used with other DOM implementations. This
-# is due, in part, to a lack of appropriate methods in the DOM (there is
-# no way to create Entity and Notation nodes via the DOM Level 2
-# interface), and for performance. The later is the cause of some fairly
-# cryptic code.
-#
-# Performance hacks:
-#
-# - .character_data_handler() has an extra case in which continuing
-# data is appended to an existing Text node; this can be a
-# speedup since pyexpat can break up character data into multiple
-# callbacks even though we set the buffer_text attribute on the
-# parser. This also gives us the advantage that we don't need a
-# separate normalization pass.
-#
-# - Determining that a node exists is done using an identity comparison
-# with None rather than a truth test; this avoids searching for and
-# calling any methods on the node object if it exists. (A rather
-# nice speedup is achieved this way as well!)
-
-from xml.dom import xmlbuilder, minidom, Node
-from xml.dom import EMPTY_NAMESPACE, EMPTY_PREFIX, XMLNS_NAMESPACE
-from xml.parsers import expat
-from xml.dom.minidom import _append_child, _set_attribute_node
-from xml.dom.NodeFilter import NodeFilter
-
-from xml.dom.minicompat import *
-
-TEXT_NODE = Node.TEXT_NODE
-CDATA_SECTION_NODE = Node.CDATA_SECTION_NODE
-DOCUMENT_NODE = Node.DOCUMENT_NODE
-
-FILTER_ACCEPT = xmlbuilder.DOMBuilderFilter.FILTER_ACCEPT
-FILTER_REJECT = xmlbuilder.DOMBuilderFilter.FILTER_REJECT
-FILTER_SKIP = xmlbuilder.DOMBuilderFilter.FILTER_SKIP
-FILTER_INTERRUPT = xmlbuilder.DOMBuilderFilter.FILTER_INTERRUPT
-
-theDOMImplementation = minidom.getDOMImplementation()
-
-# Expat typename -> TypeInfo
-_typeinfo_map = {
- "CDATA": minidom.TypeInfo(None, "cdata"),
- "ENUM": minidom.TypeInfo(None, "enumeration"),
- "ENTITY": minidom.TypeInfo(None, "entity"),
- "ENTITIES": minidom.TypeInfo(None, "entities"),
- "ID": minidom.TypeInfo(None, "id"),
- "IDREF": minidom.TypeInfo(None, "idref"),
- "IDREFS": minidom.TypeInfo(None, "idrefs"),
- "NMTOKEN": minidom.TypeInfo(None, "nmtoken"),
- "NMTOKENS": minidom.TypeInfo(None, "nmtokens"),
- }
-
-class ElementInfo(object):
- __slots__ = '_attr_info', '_model', 'tagName'
-
- def __init__(self, tagName, model=None):
- self.tagName = tagName
- self._attr_info = []
- self._model = model
-
- def __getstate__(self):
- return self._attr_info, self._model, self.tagName
-
- def __setstate__(self, state):
- self._attr_info, self._model, self.tagName = state
-
- def getAttributeType(self, aname):
- for info in self._attr_info:
- if info[1] == aname:
- t = info[-2]
- if t[0] == "(":
- return _typeinfo_map["ENUM"]
- else:
- return _typeinfo_map[info[-2]]
- return minidom._no_type
-
- def getAttributeTypeNS(self, namespaceURI, localName):
- return minidom._no_type
-
- def isElementContent(self):
- if self._model:
- type = self._model[0]
- return type not in (expat.model.XML_CTYPE_ANY,
- expat.model.XML_CTYPE_MIXED)
- else:
- return False
-
- def isEmpty(self):
- if self._model:
- return self._model[0] == expat.model.XML_CTYPE_EMPTY
- else:
- return False
-
- def isId(self, aname):
- for info in self._attr_info:
- if info[1] == aname:
- return info[-2] == "ID"
- return False
-
- def isIdNS(self, euri, ename, auri, aname):
- # not sure this is meaningful
- return self.isId((auri, aname))
-
-def _intern(builder, s):
- return builder._intern_setdefault(s, s)
-
-def _parse_ns_name(builder, name):
- assert ' ' in name
- parts = name.split(' ')
- intern = builder._intern_setdefault
- if len(parts) == 3:
- uri, localname, prefix = parts
- prefix = intern(prefix, prefix)
- qname = "%s:%s" % (prefix, localname)
- qname = intern(qname, qname)
- localname = intern(localname, localname)
- else:
- uri, localname = parts
- prefix = EMPTY_PREFIX
- qname = localname = intern(localname, localname)
- return intern(uri, uri), localname, prefix, qname
-
-
-class ExpatBuilder:
- """Document builder that uses Expat to build a ParsedXML.DOM document
- instance."""
-
- def __init__(self, options=None):
- if options is None:
- options = xmlbuilder.Options()
- self._options = options
- if self._options.filter is not None:
- self._filter = FilterVisibilityController(self._options.filter)
- else:
- self._filter = None
- # This *really* doesn't do anything in this case, so
- # override it with something fast & minimal.
- self._finish_start_element = id
- self._parser = None
- self.reset()
-
- def createParser(self):
- """Create a new parser object."""
- return expat.ParserCreate()
-
- def getParser(self):
- """Return the parser object, creating a new one if needed."""
- if not self._parser:
- self._parser = self.createParser()
- self._intern_setdefault = self._parser.intern.setdefault
- self._parser.buffer_text = True
- self._parser.ordered_attributes = True
- self._parser.specified_attributes = True
- self.install(self._parser)
- return self._parser
-
- def reset(self):
- """Free all data structures used during DOM construction."""
- self.document = theDOMImplementation.createDocument(
- EMPTY_NAMESPACE, None, None)
- self.curNode = self.document
- self._elem_info = self.document._elem_info
- self._cdata = False
-
- def install(self, parser):
- """Install the callbacks needed to build the DOM into the parser."""
- # This creates circular references!
- parser.StartDoctypeDeclHandler = self.start_doctype_decl_handler
- parser.StartElementHandler = self.first_element_handler
- parser.EndElementHandler = self.end_element_handler
- parser.ProcessingInstructionHandler = self.pi_handler
- if self._options.entities:
- parser.EntityDeclHandler = self.entity_decl_handler
- parser.NotationDeclHandler = self.notation_decl_handler
- if self._options.comments:
- parser.CommentHandler = self.comment_handler
- if self._options.cdata_sections:
- parser.StartCdataSectionHandler = self.start_cdata_section_handler
- parser.EndCdataSectionHandler = self.end_cdata_section_handler
- parser.CharacterDataHandler = self.character_data_handler_cdata
- else:
- parser.CharacterDataHandler = self.character_data_handler
- parser.ExternalEntityRefHandler = self.external_entity_ref_handler
- parser.XmlDeclHandler = self.xml_decl_handler
- parser.ElementDeclHandler = self.element_decl_handler
- parser.AttlistDeclHandler = self.attlist_decl_handler
-
- def parseFile(self, file):
- """Parse a document from a file object, returning the document
- node."""
- parser = self.getParser()
- first_buffer = True
- try:
- while 1:
- buffer = file.read(16*1024)
- if not buffer:
- break
- parser.Parse(buffer, 0)
- if first_buffer and self.document.documentElement:
- self._setup_subset(buffer)
- first_buffer = False
- parser.Parse("", True)
- except ParseEscape:
- pass
- doc = self.document
- self.reset()
- self._parser = None
- return doc
-
- def parseString(self, string):
- """Parse a document from a string, returning the document node."""
- parser = self.getParser()
- try:
- parser.Parse(string, True)
- self._setup_subset(string)
- except ParseEscape:
- pass
- doc = self.document
- self.reset()
- self._parser = None
- return doc
-
- def _setup_subset(self, buffer):
- """Load the internal subset if there might be one."""
- if self.document.doctype:
- extractor = InternalSubsetExtractor()
- extractor.parseString(buffer)
- subset = extractor.getSubset()
- self.document.doctype.internalSubset = subset
-
- def start_doctype_decl_handler(self, doctypeName, systemId, publicId,
- has_internal_subset):
- doctype = self.document.implementation.createDocumentType(
- doctypeName, publicId, systemId)
- doctype.ownerDocument = self.document
- self.document.childNodes.append(doctype)
- self.document.doctype = doctype
- if self._filter and self._filter.acceptNode(doctype) == FILTER_REJECT:
- self.document.doctype = None
- del self.document.childNodes[-1]
- doctype = None
- self._parser.EntityDeclHandler = None
- self._parser.NotationDeclHandler = None
- if has_internal_subset:
- if doctype is not None:
- doctype.entities._seq = []
- doctype.notations._seq = []
- self._parser.CommentHandler = None
- self._parser.ProcessingInstructionHandler = None
- self._parser.EndDoctypeDeclHandler = self.end_doctype_decl_handler
-
- def end_doctype_decl_handler(self):
- if self._options.comments:
- self._parser.CommentHandler = self.comment_handler
- self._parser.ProcessingInstructionHandler = self.pi_handler
- if not (self._elem_info or self._filter):
- self._finish_end_element = id
-
- def pi_handler(self, target, data):
- node = self.document.createProcessingInstruction(target, data)
- _append_child(self.curNode, node)
- if self._filter and self._filter.acceptNode(node) == FILTER_REJECT:
- self.curNode.removeChild(node)
-
- def character_data_handler_cdata(self, data):
- childNodes = self.curNode.childNodes
- if self._cdata:
- if ( self._cdata_continue
- and childNodes[-1].nodeType == CDATA_SECTION_NODE):
- childNodes[-1].appendData(data)
- return
- node = self.document.createCDATASection(data)
- self._cdata_continue = True
- elif childNodes and childNodes[-1].nodeType == TEXT_NODE:
- node = childNodes[-1]
- value = node.data + data
- d = node.__dict__
- d['data'] = d['nodeValue'] = value
- return
- else:
- node = minidom.Text()
- d = node.__dict__
- d['data'] = d['nodeValue'] = data
- d['ownerDocument'] = self.document
- _append_child(self.curNode, node)
-
- def character_data_handler(self, data):
- childNodes = self.curNode.childNodes
- if childNodes and childNodes[-1].nodeType == TEXT_NODE:
- node = childNodes[-1]
- d = node.__dict__
- d['data'] = d['nodeValue'] = node.data + data
- return
- node = minidom.Text()
- d = node.__dict__
- d['data'] = d['nodeValue'] = node.data + data
- d['ownerDocument'] = self.document
- _append_child(self.curNode, node)
-
- def entity_decl_handler(self, entityName, is_parameter_entity, value,
- base, systemId, publicId, notationName):
- if is_parameter_entity:
- # we don't care about parameter entities for the DOM
- return
- if not self._options.entities:
- return
- node = self.document._create_entity(entityName, publicId,
- systemId, notationName)
- if value is not None:
- # internal entity
- # node *should* be readonly, but we'll cheat
- child = self.document.createTextNode(value)
- node.childNodes.append(child)
- self.document.doctype.entities._seq.append(node)
- if self._filter and self._filter.acceptNode(node) == FILTER_REJECT:
- del self.document.doctype.entities._seq[-1]
-
- def notation_decl_handler(self, notationName, base, systemId, publicId):
- node = self.document._create_notation(notationName, publicId, systemId)
- self.document.doctype.notations._seq.append(node)
- if self._filter and self._filter.acceptNode(node) == FILTER_ACCEPT:
- del self.document.doctype.notations._seq[-1]
-
- def comment_handler(self, data):
- node = self.document.createComment(data)
- _append_child(self.curNode, node)
- if self._filter and self._filter.acceptNode(node) == FILTER_REJECT:
- self.curNode.removeChild(node)
-
- def start_cdata_section_handler(self):
- self._cdata = True
- self._cdata_continue = False
-
- def end_cdata_section_handler(self):
- self._cdata = False
- self._cdata_continue = False
-
- def external_entity_ref_handler(self, context, base, systemId, publicId):
- return 1
-
- def first_element_handler(self, name, attributes):
- if self._filter is None and not self._elem_info:
- self._finish_end_element = id
- self.getParser().StartElementHandler = self.start_element_handler
- self.start_element_handler(name, attributes)
-
- def start_element_handler(self, name, attributes):
- node = self.document.createElement(name)
- _append_child(self.curNode, node)
- self.curNode = node
-
- if attributes:
- for i in range(0, len(attributes), 2):
- a = minidom.Attr(attributes[i], EMPTY_NAMESPACE,
- None, EMPTY_PREFIX)
- value = attributes[i+1]
- d = a.childNodes[0].__dict__
- d['data'] = d['nodeValue'] = value
- d = a.__dict__
- d['value'] = d['nodeValue'] = value
- d['ownerDocument'] = self.document
- _set_attribute_node(node, a)
-
- if node is not self.document.documentElement:
- self._finish_start_element(node)
-
- def _finish_start_element(self, node):
- if self._filter:
- # To be general, we'd have to call isSameNode(), but this
- # is sufficient for minidom:
- if node is self.document.documentElement:
- return
- filt = self._filter.startContainer(node)
- if filt == FILTER_REJECT:
- # ignore this node & all descendents
- Rejecter(self)
- elif filt == FILTER_SKIP:
- # ignore this node, but make it's children become
- # children of the parent node
- Skipper(self)
- else:
- return
- self.curNode = node.parentNode
- node.parentNode.removeChild(node)
- node.unlink()
-
- # If this ever changes, Namespaces.end_element_handler() needs to
- # be changed to match.
- #
- def end_element_handler(self, name):
- curNode = self.curNode
- self.curNode = curNode.parentNode
- self._finish_end_element(curNode)
-
- def _finish_end_element(self, curNode):
- info = self._elem_info.get(curNode.tagName)
- if info:
- self._handle_white_text_nodes(curNode, info)
- if self._filter:
- if curNode is self.document.documentElement:
- return
- if self._filter.acceptNode(curNode) == FILTER_REJECT:
- self.curNode.removeChild(curNode)
- curNode.unlink()
-
- def _handle_white_text_nodes(self, node, info):
- if (self._options.whitespace_in_element_content
- or not info.isElementContent()):
- return
-
- # We have element type information and should remove ignorable
- # whitespace; identify for text nodes which contain only
- # whitespace.
- L = []
- for child in node.childNodes:
- if child.nodeType == TEXT_NODE and not child.data.strip():
- L.append(child)
-
- # Remove ignorable whitespace from the tree.
- for child in L:
- node.removeChild(child)
-
- def element_decl_handler(self, name, model):
- info = self._elem_info.get(name)
- if info is None:
- self._elem_info[name] = ElementInfo(name, model)
- else:
- assert info._model is None
- info._model = model
-
- def attlist_decl_handler(self, elem, name, type, default, required):
- info = self._elem_info.get(elem)
- if info is None:
- info = ElementInfo(elem)
- self._elem_info[elem] = info
- info._attr_info.append(
- [None, name, None, None, default, 0, type, required])
-
- def xml_decl_handler(self, version, encoding, standalone):
- self.document.version = version
- self.document.encoding = encoding
- # This is still a little ugly, thanks to the pyexpat API. ;-(
- if standalone >= 0:
- if standalone:
- self.document.standalone = True
- else:
- self.document.standalone = False
-
-
-# Don't include FILTER_INTERRUPT, since that's checked separately
-# where allowed.
-_ALLOWED_FILTER_RETURNS = (FILTER_ACCEPT, FILTER_REJECT, FILTER_SKIP)
-
-class FilterVisibilityController(object):
- """Wrapper around a DOMBuilderFilter which implements the checks
- to make the whatToShow filter attribute work."""
-
- __slots__ = 'filter',
-
- def __init__(self, filter):
- self.filter = filter
-
- def startContainer(self, node):
- mask = self._nodetype_mask[node.nodeType]
- if self.filter.whatToShow & mask:
- val = self.filter.startContainer(node)
- if val == FILTER_INTERRUPT:
- raise ParseEscape
- if val not in _ALLOWED_FILTER_RETURNS:
- raise ValueError, \
- "startContainer() returned illegal value: " + repr(val)
- return val
- else:
- return FILTER_ACCEPT
-
- def acceptNode(self, node):
- mask = self._nodetype_mask[node.nodeType]
- if self.filter.whatToShow & mask:
- val = self.filter.acceptNode(node)
- if val == FILTER_INTERRUPT:
- raise ParseEscape
- if val == FILTER_SKIP:
- # move all child nodes to the parent, and remove this node
- parent = node.parentNode
- for child in node.childNodes[:]:
- parent.appendChild(child)
- # node is handled by the caller
- return FILTER_REJECT
- if val not in _ALLOWED_FILTER_RETURNS:
- raise ValueError, \
- "acceptNode() returned illegal value: " + repr(val)
- return val
- else:
- return FILTER_ACCEPT
-
- _nodetype_mask = {
- Node.ELEMENT_NODE: NodeFilter.SHOW_ELEMENT,
- Node.ATTRIBUTE_NODE: NodeFilter.SHOW_ATTRIBUTE,
- Node.TEXT_NODE: NodeFilter.SHOW_TEXT,
- Node.CDATA_SECTION_NODE: NodeFilter.SHOW_CDATA_SECTION,
- Node.ENTITY_REFERENCE_NODE: NodeFilter.SHOW_ENTITY_REFERENCE,
- Node.ENTITY_NODE: NodeFilter.SHOW_ENTITY,
- Node.PROCESSING_INSTRUCTION_NODE: NodeFilter.SHOW_PROCESSING_INSTRUCTION,
- Node.COMMENT_NODE: NodeFilter.SHOW_COMMENT,
- Node.DOCUMENT_NODE: NodeFilter.SHOW_DOCUMENT,
- Node.DOCUMENT_TYPE_NODE: NodeFilter.SHOW_DOCUMENT_TYPE,
- Node.DOCUMENT_FRAGMENT_NODE: NodeFilter.SHOW_DOCUMENT_FRAGMENT,
- Node.NOTATION_NODE: NodeFilter.SHOW_NOTATION,
- }
-
-
-class FilterCrutch(object):
- __slots__ = '_builder', '_level', '_old_start', '_old_end'
-
- def __init__(self, builder):
- self._level = 0
- self._builder = builder
- parser = builder._parser
- self._old_start = parser.StartElementHandler
- self._old_end = parser.EndElementHandler
- parser.StartElementHandler = self.start_element_handler
- parser.EndElementHandler = self.end_element_handler
-
-class Rejecter(FilterCrutch):
- __slots__ = ()
-
- def __init__(self, builder):
- FilterCrutch.__init__(self, builder)
- parser = builder._parser
- for name in ("ProcessingInstructionHandler",
- "CommentHandler",
- "CharacterDataHandler",
- "StartCdataSectionHandler",
- "EndCdataSectionHandler",
- "ExternalEntityRefHandler",
- ):
- setattr(parser, name, None)
-
- def start_element_handler(self, *args):
- self._level = self._level + 1
-
- def end_element_handler(self, *args):
- if self._level == 0:
- # restore the old handlers
- parser = self._builder._parser
- self._builder.install(parser)
- parser.StartElementHandler = self._old_start
- parser.EndElementHandler = self._old_end
- else:
- self._level = self._level - 1
-
-class Skipper(FilterCrutch):
- __slots__ = ()
-
- def start_element_handler(self, *args):
- node = self._builder.curNode
- self._old_start(*args)
- if self._builder.curNode is not node:
- self._level = self._level + 1
-
- def end_element_handler(self, *args):
- if self._level == 0:
- # We're popping back out of the node we're skipping, so we
- # shouldn't need to do anything but reset the handlers.
- self._builder._parser.StartElementHandler = self._old_start
- self._builder._parser.EndElementHandler = self._old_end
- self._builder = None
- else:
- self._level = self._level - 1
- self._old_end(*args)
-
-
-# framework document used by the fragment builder.
-# Takes a string for the doctype, subset string, and namespace attrs string.
-
-_FRAGMENT_BUILDER_INTERNAL_SYSTEM_ID = \
- "http://xml.python.org/entities/fragment-builder/internal"
-
-_FRAGMENT_BUILDER_TEMPLATE = (
- '''\
-<!DOCTYPE wrapper
- %%s [
- <!ENTITY fragment-builder-internal
- SYSTEM "%s">
-%%s
-]>
-<wrapper %%s
->&fragment-builder-internal;</wrapper>'''
- % _FRAGMENT_BUILDER_INTERNAL_SYSTEM_ID)
-
-
-class FragmentBuilder(ExpatBuilder):
- """Builder which constructs document fragments given XML source
- text and a context node.
-
- The context node is expected to provide information about the
- namespace declarations which are in scope at the start of the
- fragment.
- """
-
- def __init__(self, context, options=None):
- if context.nodeType == DOCUMENT_NODE:
- self.originalDocument = context
- self.context = context
- else:
- self.originalDocument = context.ownerDocument
- self.context = context
- ExpatBuilder.__init__(self, options)
-
- def reset(self):
- ExpatBuilder.reset(self)
- self.fragment = None
-
- def parseFile(self, file):
- """Parse a document fragment from a file object, returning the
- fragment node."""
- return self.parseString(file.read())
-
- def parseString(self, string):
- """Parse a document fragment from a string, returning the
- fragment node."""
- self._source = string
- parser = self.getParser()
- doctype = self.originalDocument.doctype
- ident = ""
- if doctype:
- subset = doctype.internalSubset or self._getDeclarations()
- if doctype.publicId:
- ident = ('PUBLIC "%s" "%s"'
- % (doctype.publicId, doctype.systemId))
- elif doctype.systemId:
- ident = 'SYSTEM "%s"' % doctype.systemId
- else:
- subset = ""
- nsattrs = self._getNSattrs() # get ns decls from node's ancestors
- document = _FRAGMENT_BUILDER_TEMPLATE % (ident, subset, nsattrs)
- try:
- parser.Parse(document, 1)
- except:
- self.reset()
- raise
- fragment = self.fragment
- self.reset()
-## self._parser = None
- return fragment
-
- def _getDeclarations(self):
- """Re-create the internal subset from the DocumentType node.
-
- This is only needed if we don't already have the
- internalSubset as a string.
- """
- doctype = self.context.ownerDocument.doctype
- s = ""
- if doctype:
- for i in range(doctype.notations.length):
- notation = doctype.notations.item(i)
- if s:
- s = s + "\n "
- s = "%s<!NOTATION %s" % (s, notation.nodeName)
- if notation.publicId:
- s = '%s PUBLIC "%s"\n "%s">' \
- % (s, notation.publicId, notation.systemId)
- else:
- s = '%s SYSTEM "%s">' % (s, notation.systemId)
- for i in range(doctype.entities.length):
- entity = doctype.entities.item(i)
- if s:
- s = s + "\n "
- s = "%s<!ENTITY %s" % (s, entity.nodeName)
- if entity.publicId:
- s = '%s PUBLIC "%s"\n "%s"' \
- % (s, entity.publicId, entity.systemId)
- elif entity.systemId:
- s = '%s SYSTEM "%s"' % (s, entity.systemId)
- else:
- s = '%s "%s"' % (s, entity.firstChild.data)
- if entity.notationName:
- s = "%s NOTATION %s" % (s, entity.notationName)
- s = s + ">"
- return s
-
- def _getNSattrs(self):
- return ""
-
- def external_entity_ref_handler(self, context, base, systemId, publicId):
- if systemId == _FRAGMENT_BUILDER_INTERNAL_SYSTEM_ID:
- # this entref is the one that we made to put the subtree
- # in; all of our given input is parsed in here.
- old_document = self.document
- old_cur_node = self.curNode
- parser = self._parser.ExternalEntityParserCreate(context)
- # put the real document back, parse into the fragment to return
- self.document = self.originalDocument
- self.fragment = self.document.createDocumentFragment()
- self.curNode = self.fragment
- try:
- parser.Parse(self._source, 1)
- finally:
- self.curNode = old_cur_node
- self.document = old_document
- self._source = None
- return -1
- else:
- return ExpatBuilder.external_entity_ref_handler(
- self, context, base, systemId, publicId)
-
-
-class Namespaces:
- """Mix-in class for builders; adds support for namespaces."""
-
- def _initNamespaces(self):
- # list of (prefix, uri) ns declarations. Namespace attrs are
- # constructed from this and added to the element's attrs.
- self._ns_ordered_prefixes = []
-
- def createParser(self):
- """Create a new namespace-handling parser."""
- parser = expat.ParserCreate(namespace_separator=" ")
- parser.namespace_prefixes = True
- return parser
-
- def install(self, parser):
- """Insert the namespace-handlers onto the parser."""
- ExpatBuilder.install(self, parser)
- if self._options.namespace_declarations:
- parser.StartNamespaceDeclHandler = (
- self.start_namespace_decl_handler)
-
- def start_namespace_decl_handler(self, prefix, uri):
- """Push this namespace declaration on our storage."""
- self._ns_ordered_prefixes.append((prefix, uri))
-
- def start_element_handler(self, name, attributes):
- if ' ' in name:
- uri, localname, prefix, qname = _parse_ns_name(self, name)
- else:
- uri = EMPTY_NAMESPACE
- qname = name
- localname = None
- prefix = EMPTY_PREFIX
- node = minidom.Element(qname, uri, prefix, localname)
- node.ownerDocument = self.document
- _append_child(self.curNode, node)
- self.curNode = node
-
- if self._ns_ordered_prefixes:
- for prefix, uri in self._ns_ordered_prefixes:
- if prefix:
- a = minidom.Attr(_intern(self, 'xmlns:' + prefix),
- XMLNS_NAMESPACE, prefix, "xmlns")
- else:
- a = minidom.Attr("xmlns", XMLNS_NAMESPACE,
- "xmlns", EMPTY_PREFIX)
- d = a.childNodes[0].__dict__
- d['data'] = d['nodeValue'] = uri
- d = a.__dict__
- d['value'] = d['nodeValue'] = uri
- d['ownerDocument'] = self.document
- _set_attribute_node(node, a)
- del self._ns_ordered_prefixes[:]
-
- if attributes:
- _attrs = node._attrs
- _attrsNS = node._attrsNS
- for i in range(0, len(attributes), 2):
- aname = attributes[i]
- value = attributes[i+1]
- if ' ' in aname:
- uri, localname, prefix, qname = _parse_ns_name(self, aname)
- a = minidom.Attr(qname, uri, localname, prefix)
- _attrs[qname] = a
- _attrsNS[(uri, localname)] = a
- else:
- a = minidom.Attr(aname, EMPTY_NAMESPACE,
- aname, EMPTY_PREFIX)
- _attrs[aname] = a
- _attrsNS[(EMPTY_NAMESPACE, aname)] = a
- d = a.childNodes[0].__dict__
- d['data'] = d['nodeValue'] = value
- d = a.__dict__
- d['ownerDocument'] = self.document
- d['value'] = d['nodeValue'] = value
- d['ownerElement'] = node
-
- if __debug__:
- # This only adds some asserts to the original
- # end_element_handler(), so we only define this when -O is not
- # used. If changing one, be sure to check the other to see if
- # it needs to be changed as well.
- #
- def end_element_handler(self, name):
- curNode = self.curNode
- if ' ' in name:
- uri, localname, prefix, qname = _parse_ns_name(self, name)
- assert (curNode.namespaceURI == uri
- and curNode.localName == localname
- and curNode.prefix == prefix), \
- "element stack messed up! (namespace)"
- else:
- assert curNode.nodeName == name, \
- "element stack messed up - bad nodeName"
- assert curNode.namespaceURI == EMPTY_NAMESPACE, \
- "element stack messed up - bad namespaceURI"
- self.curNode = curNode.parentNode
- self._finish_end_element(curNode)
-
-
-class ExpatBuilderNS(Namespaces, ExpatBuilder):
- """Document builder that supports namespaces."""
-
- def reset(self):
- ExpatBuilder.reset(self)
- self._initNamespaces()
-
-
-class FragmentBuilderNS(Namespaces, FragmentBuilder):
- """Fragment builder that supports namespaces."""
-
- def reset(self):
- FragmentBuilder.reset(self)
- self._initNamespaces()
-
- def _getNSattrs(self):
- """Return string of namespace attributes from this element and
- ancestors."""
- # XXX This needs to be re-written to walk the ancestors of the
- # context to build up the namespace information from
- # declarations, elements, and attributes found in context.
- # Otherwise we have to store a bunch more data on the DOM
- # (though that *might* be more reliable -- not clear).
- attrs = ""
- context = self.context
- L = []
- while context:
- if hasattr(context, '_ns_prefix_uri'):
- for prefix, uri in context._ns_prefix_uri.items():
- # add every new NS decl from context to L and attrs string
- if prefix in L:
- continue
- L.append(prefix)
- if prefix:
- declname = "xmlns:" + prefix
- else:
- declname = "xmlns"
- if attrs:
- attrs = "%s\n %s='%s'" % (attrs, declname, uri)
- else:
- attrs = " %s='%s'" % (declname, uri)
- context = context.parentNode
- return attrs
-
-
-class ParseEscape(Exception):
- """Exception raised to short-circuit parsing in InternalSubsetExtractor."""
- pass
-
-class InternalSubsetExtractor(ExpatBuilder):
- """XML processor which can rip out the internal document type subset."""
-
- subset = None
-
- def getSubset(self):
- """Return the internal subset as a string."""
- return self.subset
-
- def parseFile(self, file):
- try:
- ExpatBuilder.parseFile(self, file)
- except ParseEscape:
- pass
-
- def parseString(self, string):
- try:
- ExpatBuilder.parseString(self, string)
- except ParseEscape:
- pass
-
- def install(self, parser):
- parser.StartDoctypeDeclHandler = self.start_doctype_decl_handler
- parser.StartElementHandler = self.start_element_handler
-
- def start_doctype_decl_handler(self, name, publicId, systemId,
- has_internal_subset):
- if has_internal_subset:
- parser = self.getParser()
- self.subset = []
- parser.DefaultHandler = self.subset.append
- parser.EndDoctypeDeclHandler = self.end_doctype_decl_handler
- else:
- raise ParseEscape()
-
- def end_doctype_decl_handler(self):
- s = ''.join(self.subset).replace('\r\n', '\n').replace('\r', '\n')
- self.subset = s
- raise ParseEscape()
-
- def start_element_handler(self, name, attrs):
- raise ParseEscape()
-
-
-def parse(file, namespaces=True):
- """Parse a document, returning the resulting Document node.
-
- 'file' may be either a file name or an open file object.
- """
- if namespaces:
- builder = ExpatBuilderNS()
- else:
- builder = ExpatBuilder()
-
- if isinstance(file, StringTypes):
- fp = open(file, 'rb')
- try:
- result = builder.parseFile(fp)
- finally:
- fp.close()
- else:
- result = builder.parseFile(file)
- return result
-
-
-def parseString(string, namespaces=True):
- """Parse a document from a string, returning the resulting
- Document node.
- """
- if namespaces:
- builder = ExpatBuilderNS()
- else:
- builder = ExpatBuilder()
- return builder.parseString(string)
-
-
-def parseFragment(file, context, namespaces=True):
- """Parse a fragment of a document, given the context from which it
- was originally extracted. context should be the parent of the
- node(s) which are in the fragment.
-
- 'file' may be either a file name or an open file object.
- """
- if namespaces:
- builder = FragmentBuilderNS(context)
- else:
- builder = FragmentBuilder(context)
-
- if isinstance(file, StringTypes):
- fp = open(file, 'rb')
- try:
- result = builder.parseFile(fp)
- finally:
- fp.close()
- else:
- result = builder.parseFile(file)
- return result
-
-
-def parseFragmentString(string, context, namespaces=True):
- """Parse a fragment of a document from a string, given the context
- from which it was originally extracted. context should be the
- parent of the node(s) which are in the fragment.
- """
- if namespaces:
- builder = FragmentBuilderNS(context)
- else:
- builder = FragmentBuilder(context)
- return builder.parseString(string)
-
-
-def makeBuilder(options):
- """Create a builder based on an Options object."""
- if options.namespaces:
- return ExpatBuilderNS(options)
- else:
- return ExpatBuilder(options)
diff --git a/sys/lib/python/xml/dom/minicompat.py b/sys/lib/python/xml/dom/minicompat.py
deleted file mode 100644
index d491fb69f..000000000
--- a/sys/lib/python/xml/dom/minicompat.py
+++ /dev/null
@@ -1,110 +0,0 @@
-"""Python version compatibility support for minidom."""
-
-# This module should only be imported using "import *".
-#
-# The following names are defined:
-#
-# NodeList -- lightest possible NodeList implementation
-#
-# EmptyNodeList -- lightest possible NodeList that is guarateed to
-# remain empty (immutable)
-#
-# StringTypes -- tuple of defined string types
-#
-# defproperty -- function used in conjunction with GetattrMagic;
-# using these together is needed to make them work
-# as efficiently as possible in both Python 2.2+
-# and older versions. For example:
-#
-# class MyClass(GetattrMagic):
-# def _get_myattr(self):
-# return something
-#
-# defproperty(MyClass, "myattr",
-# "return some value")
-#
-# For Python 2.2 and newer, this will construct a
-# property object on the class, which avoids
-# needing to override __getattr__(). It will only
-# work for read-only attributes.
-#
-# For older versions of Python, inheriting from
-# GetattrMagic will use the traditional
-# __getattr__() hackery to achieve the same effect,
-# but less efficiently.
-#
-# defproperty() should be used for each version of
-# the relevant _get_<property>() function.
-
-__all__ = ["NodeList", "EmptyNodeList", "StringTypes", "defproperty"]
-
-import xml.dom
-
-try:
- unicode
-except NameError:
- StringTypes = type(''),
-else:
- StringTypes = type(''), type(unicode(''))
-
-
-class NodeList(list):
- __slots__ = ()
-
- def item(self, index):
- if 0 <= index < len(self):
- return self[index]
-
- def _get_length(self):
- return len(self)
-
- def _set_length(self, value):
- raise xml.dom.NoModificationAllowedErr(
- "attempt to modify read-only attribute 'length'")
-
- length = property(_get_length, _set_length,
- doc="The number of nodes in the NodeList.")
-
- def __getstate__(self):
- return list(self)
-
- def __setstate__(self, state):
- self[:] = state
-
-
-class EmptyNodeList(tuple):
- __slots__ = ()
-
- def __add__(self, other):
- NL = NodeList()
- NL.extend(other)
- return NL
-
- def __radd__(self, other):
- NL = NodeList()
- NL.extend(other)
- return NL
-
- def item(self, index):
- return None
-
- def _get_length(self):
- return 0
-
- def _set_length(self, value):
- raise xml.dom.NoModificationAllowedErr(
- "attempt to modify read-only attribute 'length'")
-
- length = property(_get_length, _set_length,
- doc="The number of nodes in the NodeList.")
-
-
-def defproperty(klass, name, doc):
- get = getattr(klass, ("_get_" + name)).im_func
- def set(self, value, name=name):
- raise xml.dom.NoModificationAllowedErr(
- "attempt to modify read-only attribute " + repr(name))
- assert not hasattr(klass, "_set_" + name), \
- "expected not to find _set_" + name
- prop = property(get, set, doc=doc)
- setattr(klass, name, prop)
diff --git a/sys/lib/python/xml/dom/minidom.py b/sys/lib/python/xml/dom/minidom.py
deleted file mode 100644
index 3a3578162..000000000
--- a/sys/lib/python/xml/dom/minidom.py
+++ /dev/null
@@ -1,1936 +0,0 @@
-"""\
-minidom.py -- a lightweight DOM implementation.
-
-parse("foo.xml")
-
-parseString("<foo><bar/></foo>")
-
-Todo:
-=====
- * convenience methods for getting elements and text.
- * more testing
- * bring some of the writer and linearizer code into conformance with this
- interface
- * SAX 2 namespaces
-"""
-
-import xml.dom
-
-from xml.dom import EMPTY_NAMESPACE, EMPTY_PREFIX, XMLNS_NAMESPACE, domreg
-from xml.dom.minicompat import *
-from xml.dom.xmlbuilder import DOMImplementationLS, DocumentLS
-
-# This is used by the ID-cache invalidation checks; the list isn't
-# actually complete, since the nodes being checked will never be the
-# DOCUMENT_NODE or DOCUMENT_FRAGMENT_NODE. (The node being checked is
-# the node being added or removed, not the node being modified.)
-#
-_nodeTypes_with_children = (xml.dom.Node.ELEMENT_NODE,
- xml.dom.Node.ENTITY_REFERENCE_NODE)
-
-
-class Node(xml.dom.Node):
- namespaceURI = None # this is non-null only for elements and attributes
- parentNode = None
- ownerDocument = None
- nextSibling = None
- previousSibling = None
-
- prefix = EMPTY_PREFIX # non-null only for NS elements and attributes
-
- def __nonzero__(self):
- return True
-
- def toxml(self, encoding = None):
- return self.toprettyxml("", "", encoding)
-
- def toprettyxml(self, indent="\t", newl="\n", encoding = None):
- # indent = the indentation string to prepend, per level
- # newl = the newline string to append
- writer = _get_StringIO()
- if encoding is not None:
- import codecs
- # Can't use codecs.getwriter to preserve 2.0 compatibility
- writer = codecs.lookup(encoding)[3](writer)
- if self.nodeType == Node.DOCUMENT_NODE:
- # Can pass encoding only to document, to put it into XML header
- self.writexml(writer, "", indent, newl, encoding)
- else:
- self.writexml(writer, "", indent, newl)
- return writer.getvalue()
-
- def hasChildNodes(self):
- if self.childNodes:
- return True
- else:
- return False
-
- def _get_childNodes(self):
- return self.childNodes
-
- def _get_firstChild(self):
- if self.childNodes:
- return self.childNodes[0]
-
- def _get_lastChild(self):
- if self.childNodes:
- return self.childNodes[-1]
-
- def insertBefore(self, newChild, refChild):
- if newChild.nodeType == self.DOCUMENT_FRAGMENT_NODE:
- for c in tuple(newChild.childNodes):
- self.insertBefore(c, refChild)
- ### The DOM does not clearly specify what to return in this case
- return newChild
- if newChild.nodeType not in self._child_node_types:
- raise xml.dom.HierarchyRequestErr(
- "%s cannot be child of %s" % (repr(newChild), repr(self)))
- if newChild.parentNode is not None:
- newChild.parentNode.removeChild(newChild)
- if refChild is None:
- self.appendChild(newChild)
- else:
- try:
- index = self.childNodes.index(refChild)
- except ValueError:
- raise xml.dom.NotFoundErr()
- if newChild.nodeType in _nodeTypes_with_children:
- _clear_id_cache(self)
- self.childNodes.insert(index, newChild)
- newChild.nextSibling = refChild
- refChild.previousSibling = newChild
- if index:
- node = self.childNodes[index-1]
- node.nextSibling = newChild
- newChild.previousSibling = node
- else:
- newChild.previousSibling = None
- newChild.parentNode = self
- return newChild
-
- def appendChild(self, node):
- if node.nodeType == self.DOCUMENT_FRAGMENT_NODE:
- for c in tuple(node.childNodes):
- self.appendChild(c)
- ### The DOM does not clearly specify what to return in this case
- return node
- if node.nodeType not in self._child_node_types:
- raise xml.dom.HierarchyRequestErr(
- "%s cannot be child of %s" % (repr(node), repr(self)))
- elif node.nodeType in _nodeTypes_with_children:
- _clear_id_cache(self)
- if node.parentNode is not None:
- node.parentNode.removeChild(node)
- _append_child(self, node)
- node.nextSibling = None
- return node
-
- def replaceChild(self, newChild, oldChild):
- if newChild.nodeType == self.DOCUMENT_FRAGMENT_NODE:
- refChild = oldChild.nextSibling
- self.removeChild(oldChild)
- return self.insertBefore(newChild, refChild)
- if newChild.nodeType not in self._child_node_types:
- raise xml.dom.HierarchyRequestErr(
- "%s cannot be child of %s" % (repr(newChild), repr(self)))
- if newChild is oldChild:
- return
- if newChild.parentNode is not None:
- newChild.parentNode.removeChild(newChild)
- try:
- index = self.childNodes.index(oldChild)
- except ValueError:
- raise xml.dom.NotFoundErr()
- self.childNodes[index] = newChild
- newChild.parentNode = self
- oldChild.parentNode = None
- if (newChild.nodeType in _nodeTypes_with_children
- or oldChild.nodeType in _nodeTypes_with_children):
- _clear_id_cache(self)
- newChild.nextSibling = oldChild.nextSibling
- newChild.previousSibling = oldChild.previousSibling
- oldChild.nextSibling = None
- oldChild.previousSibling = None
- if newChild.previousSibling:
- newChild.previousSibling.nextSibling = newChild
- if newChild.nextSibling:
- newChild.nextSibling.previousSibling = newChild
- return oldChild
-
- def removeChild(self, oldChild):
- try:
- self.childNodes.remove(oldChild)
- except ValueError:
- raise xml.dom.NotFoundErr()
- if oldChild.nextSibling is not None:
- oldChild.nextSibling.previousSibling = oldChild.previousSibling
- if oldChild.previousSibling is not None:
- oldChild.previousSibling.nextSibling = oldChild.nextSibling
- oldChild.nextSibling = oldChild.previousSibling = None
- if oldChild.nodeType in _nodeTypes_with_children:
- _clear_id_cache(self)
-
- oldChild.parentNode = None
- return oldChild
-
- def normalize(self):
- L = []
- for child in self.childNodes:
- if child.nodeType == Node.TEXT_NODE:
- data = child.data
- if data and L and L[-1].nodeType == child.nodeType:
- # collapse text node
- node = L[-1]
- node.data = node.data + child.data
- node.nextSibling = child.nextSibling
- child.unlink()
- elif data:
- if L:
- L[-1].nextSibling = child
- child.previousSibling = L[-1]
- else:
- child.previousSibling = None
- L.append(child)
- else:
- # empty text node; discard
- child.unlink()
- else:
- if L:
- L[-1].nextSibling = child
- child.previousSibling = L[-1]
- else:
- child.previousSibling = None
- L.append(child)
- if child.nodeType == Node.ELEMENT_NODE:
- child.normalize()
- self.childNodes[:] = L
-
- def cloneNode(self, deep):
- return _clone_node(self, deep, self.ownerDocument or self)
-
- def isSupported(self, feature, version):
- return self.ownerDocument.implementation.hasFeature(feature, version)
-
- def _get_localName(self):
- # Overridden in Element and Attr where localName can be Non-Null
- return None
-
- # Node interfaces from Level 3 (WD 9 April 2002)
-
- def isSameNode(self, other):
- return self is other
-
- def getInterface(self, feature):
- if self.isSupported(feature, None):
- return self
- else:
- return None
-
- # The "user data" functions use a dictionary that is only present
- # if some user data has been set, so be careful not to assume it
- # exists.
-
- def getUserData(self, key):
- try:
- return self._user_data[key][0]
- except (AttributeError, KeyError):
- return None
-
- def setUserData(self, key, data, handler):
- old = None
- try:
- d = self._user_data
- except AttributeError:
- d = {}
- self._user_data = d
- if d.has_key(key):
- old = d[key][0]
- if data is None:
- # ignore handlers passed for None
- handler = None
- if old is not None:
- del d[key]
- else:
- d[key] = (data, handler)
- return old
-
- def _call_user_data_handler(self, operation, src, dst):
- if hasattr(self, "_user_data"):
- for key, (data, handler) in self._user_data.items():
- if handler is not None:
- handler.handle(operation, key, data, src, dst)
-
- # minidom-specific API:
-
- def unlink(self):
- self.parentNode = self.ownerDocument = None
- if self.childNodes:
- for child in self.childNodes:
- child.unlink()
- self.childNodes = NodeList()
- self.previousSibling = None
- self.nextSibling = None
-
-defproperty(Node, "firstChild", doc="First child node, or None.")
-defproperty(Node, "lastChild", doc="Last child node, or None.")
-defproperty(Node, "localName", doc="Namespace-local name of this node.")
-
-
-def _append_child(self, node):
- # fast path with less checks; usable by DOM builders if careful
- childNodes = self.childNodes
- if childNodes:
- last = childNodes[-1]
- node.__dict__["previousSibling"] = last
- last.__dict__["nextSibling"] = node
- childNodes.append(node)
- node.__dict__["parentNode"] = self
-
-def _in_document(node):
- # return True iff node is part of a document tree
- while node is not None:
- if node.nodeType == Node.DOCUMENT_NODE:
- return True
- node = node.parentNode
- return False
-
-def _write_data(writer, data):
- "Writes datachars to writer."
- data = data.replace("&", "&amp;").replace("<", "&lt;")
- data = data.replace("\"", "&quot;").replace(">", "&gt;")
- writer.write(data)
-
-def _get_elements_by_tagName_helper(parent, name, rc):
- for node in parent.childNodes:
- if node.nodeType == Node.ELEMENT_NODE and \
- (name == "*" or node.tagName == name):
- rc.append(node)
- _get_elements_by_tagName_helper(node, name, rc)
- return rc
-
-def _get_elements_by_tagName_ns_helper(parent, nsURI, localName, rc):
- for node in parent.childNodes:
- if node.nodeType == Node.ELEMENT_NODE:
- if ((localName == "*" or node.localName == localName) and
- (nsURI == "*" or node.namespaceURI == nsURI)):
- rc.append(node)
- _get_elements_by_tagName_ns_helper(node, nsURI, localName, rc)
- return rc
-
-class DocumentFragment(Node):
- nodeType = Node.DOCUMENT_FRAGMENT_NODE
- nodeName = "#document-fragment"
- nodeValue = None
- attributes = None
- parentNode = None
- _child_node_types = (Node.ELEMENT_NODE,
- Node.TEXT_NODE,
- Node.CDATA_SECTION_NODE,
- Node.ENTITY_REFERENCE_NODE,
- Node.PROCESSING_INSTRUCTION_NODE,
- Node.COMMENT_NODE,
- Node.NOTATION_NODE)
-
- def __init__(self):
- self.childNodes = NodeList()
-
-
-class Attr(Node):
- nodeType = Node.ATTRIBUTE_NODE
- attributes = None
- ownerElement = None
- specified = False
- _is_id = False
-
- _child_node_types = (Node.TEXT_NODE, Node.ENTITY_REFERENCE_NODE)
-
- def __init__(self, qName, namespaceURI=EMPTY_NAMESPACE, localName=None,
- prefix=None):
- # skip setattr for performance
- d = self.__dict__
- d["nodeName"] = d["name"] = qName
- d["namespaceURI"] = namespaceURI
- d["prefix"] = prefix
- d['childNodes'] = NodeList()
-
- # Add the single child node that represents the value of the attr
- self.childNodes.append(Text())
-
- # nodeValue and value are set elsewhere
-
- def _get_localName(self):
- return self.nodeName.split(":", 1)[-1]
-
- def _get_name(self):
- return self.name
-
- def _get_specified(self):
- return self.specified
-
- def __setattr__(self, name, value):
- d = self.__dict__
- if name in ("value", "nodeValue"):
- d["value"] = d["nodeValue"] = value
- d2 = self.childNodes[0].__dict__
- d2["data"] = d2["nodeValue"] = value
- if self.ownerElement is not None:
- _clear_id_cache(self.ownerElement)
- elif name in ("name", "nodeName"):
- d["name"] = d["nodeName"] = value
- if self.ownerElement is not None:
- _clear_id_cache(self.ownerElement)
- else:
- d[name] = value
-
- def _set_prefix(self, prefix):
- nsuri = self.namespaceURI
- if prefix == "xmlns":
- if nsuri and nsuri != XMLNS_NAMESPACE:
- raise xml.dom.NamespaceErr(
- "illegal use of 'xmlns' prefix for the wrong namespace")
- d = self.__dict__
- d['prefix'] = prefix
- if prefix is None:
- newName = self.localName
- else:
- newName = "%s:%s" % (prefix, self.localName)
- if self.ownerElement:
- _clear_id_cache(self.ownerElement)
- d['nodeName'] = d['name'] = newName
-
- def _set_value(self, value):
- d = self.__dict__
- d['value'] = d['nodeValue'] = value
- if self.ownerElement:
- _clear_id_cache(self.ownerElement)
- self.childNodes[0].data = value
-
- def unlink(self):
- # This implementation does not call the base implementation
- # since most of that is not needed, and the expense of the
- # method call is not warranted. We duplicate the removal of
- # children, but that's all we needed from the base class.
- elem = self.ownerElement
- if elem is not None:
- del elem._attrs[self.nodeName]
- del elem._attrsNS[(self.namespaceURI, self.localName)]
- if self._is_id:
- self._is_id = False
- elem._magic_id_nodes -= 1
- self.ownerDocument._magic_id_count -= 1
- for child in self.childNodes:
- child.unlink()
- del self.childNodes[:]
-
- def _get_isId(self):
- if self._is_id:
- return True
- doc = self.ownerDocument
- elem = self.ownerElement
- if doc is None or elem is None:
- return False
-
- info = doc._get_elem_info(elem)
- if info is None:
- return False
- if self.namespaceURI:
- return info.isIdNS(self.namespaceURI, self.localName)
- else:
- return info.isId(self.nodeName)
-
- def _get_schemaType(self):
- doc = self.ownerDocument
- elem = self.ownerElement
- if doc is None or elem is None:
- return _no_type
-
- info = doc._get_elem_info(elem)
- if info is None:
- return _no_type
- if self.namespaceURI:
- return info.getAttributeTypeNS(self.namespaceURI, self.localName)
- else:
- return info.getAttributeType(self.nodeName)
-
-defproperty(Attr, "isId", doc="True if this attribute is an ID.")
-defproperty(Attr, "localName", doc="Namespace-local name of this attribute.")
-defproperty(Attr, "schemaType", doc="Schema type for this attribute.")
-
-
-class NamedNodeMap(object):
- """The attribute list is a transient interface to the underlying
- dictionaries. Mutations here will change the underlying element's
- dictionary.
-
- Ordering is imposed artificially and does not reflect the order of
- attributes as found in an input document.
- """
-
- __slots__ = ('_attrs', '_attrsNS', '_ownerElement')
-
- def __init__(self, attrs, attrsNS, ownerElement):
- self._attrs = attrs
- self._attrsNS = attrsNS
- self._ownerElement = ownerElement
-
- def _get_length(self):
- return len(self._attrs)
-
- def item(self, index):
- try:
- return self[self._attrs.keys()[index]]
- except IndexError:
- return None
-
- def items(self):
- L = []
- for node in self._attrs.values():
- L.append((node.nodeName, node.value))
- return L
-
- def itemsNS(self):
- L = []
- for node in self._attrs.values():
- L.append(((node.namespaceURI, node.localName), node.value))
- return L
-
- def has_key(self, key):
- if isinstance(key, StringTypes):
- return self._attrs.has_key(key)
- else:
- return self._attrsNS.has_key(key)
-
- def keys(self):
- return self._attrs.keys()
-
- def keysNS(self):
- return self._attrsNS.keys()
-
- def values(self):
- return self._attrs.values()
-
- def get(self, name, value=None):
- return self._attrs.get(name, value)
-
- __len__ = _get_length
-
- def __cmp__(self, other):
- if self._attrs is getattr(other, "_attrs", None):
- return 0
- else:
- return cmp(id(self), id(other))
-
- def __getitem__(self, attname_or_tuple):
- if isinstance(attname_or_tuple, tuple):
- return self._attrsNS[attname_or_tuple]
- else:
- return self._attrs[attname_or_tuple]
-
- # same as set
- def __setitem__(self, attname, value):
- if isinstance(value, StringTypes):
- try:
- node = self._attrs[attname]
- except KeyError:
- node = Attr(attname)
- node.ownerDocument = self._ownerElement.ownerDocument
- self.setNamedItem(node)
- node.value = value
- else:
- if not isinstance(value, Attr):
- raise TypeError, "value must be a string or Attr object"
- node = value
- self.setNamedItem(node)
-
- def getNamedItem(self, name):
- try:
- return self._attrs[name]
- except KeyError:
- return None
-
- def getNamedItemNS(self, namespaceURI, localName):
- try:
- return self._attrsNS[(namespaceURI, localName)]
- except KeyError:
- return None
-
- def removeNamedItem(self, name):
- n = self.getNamedItem(name)
- if n is not None:
- _clear_id_cache(self._ownerElement)
- del self._attrs[n.nodeName]
- del self._attrsNS[(n.namespaceURI, n.localName)]
- if n.__dict__.has_key('ownerElement'):
- n.__dict__['ownerElement'] = None
- return n
- else:
- raise xml.dom.NotFoundErr()
-
- def removeNamedItemNS(self, namespaceURI, localName):
- n = self.getNamedItemNS(namespaceURI, localName)
- if n is not None:
- _clear_id_cache(self._ownerElement)
- del self._attrsNS[(n.namespaceURI, n.localName)]
- del self._attrs[n.nodeName]
- if n.__dict__.has_key('ownerElement'):
- n.__dict__['ownerElement'] = None
- return n
- else:
- raise xml.dom.NotFoundErr()
-
- def setNamedItem(self, node):
- if not isinstance(node, Attr):
- raise xml.dom.HierarchyRequestErr(
- "%s cannot be child of %s" % (repr(node), repr(self)))
- old = self._attrs.get(node.name)
- if old:
- old.unlink()
- self._attrs[node.name] = node
- self._attrsNS[(node.namespaceURI, node.localName)] = node
- node.ownerElement = self._ownerElement
- _clear_id_cache(node.ownerElement)
- return old
-
- def setNamedItemNS(self, node):
- return self.setNamedItem(node)
-
- def __delitem__(self, attname_or_tuple):
- node = self[attname_or_tuple]
- _clear_id_cache(node.ownerElement)
- node.unlink()
-
- def __getstate__(self):
- return self._attrs, self._attrsNS, self._ownerElement
-
- def __setstate__(self, state):
- self._attrs, self._attrsNS, self._ownerElement = state
-
-defproperty(NamedNodeMap, "length",
- doc="Number of nodes in the NamedNodeMap.")
-
-AttributeList = NamedNodeMap
-
-
-class TypeInfo(object):
- __slots__ = 'namespace', 'name'
-
- def __init__(self, namespace, name):
- self.namespace = namespace
- self.name = name
-
- def __repr__(self):
- if self.namespace:
- return "<TypeInfo %r (from %r)>" % (self.name, self.namespace)
- else:
- return "<TypeInfo %r>" % self.name
-
- def _get_name(self):
- return self.name
-
- def _get_namespace(self):
- return self.namespace
-
-_no_type = TypeInfo(None, None)
-
-class Element(Node):
- nodeType = Node.ELEMENT_NODE
- nodeValue = None
- schemaType = _no_type
-
- _magic_id_nodes = 0
-
- _child_node_types = (Node.ELEMENT_NODE,
- Node.PROCESSING_INSTRUCTION_NODE,
- Node.COMMENT_NODE,
- Node.TEXT_NODE,
- Node.CDATA_SECTION_NODE,
- Node.ENTITY_REFERENCE_NODE)
-
- def __init__(self, tagName, namespaceURI=EMPTY_NAMESPACE, prefix=None,
- localName=None):
- self.tagName = self.nodeName = tagName
- self.prefix = prefix
- self.namespaceURI = namespaceURI
- self.childNodes = NodeList()
-
- self._attrs = {} # attributes are double-indexed:
- self._attrsNS = {} # tagName -> Attribute
- # URI,localName -> Attribute
- # in the future: consider lazy generation
- # of attribute objects this is too tricky
- # for now because of headaches with
- # namespaces.
-
- def _get_localName(self):
- return self.tagName.split(":", 1)[-1]
-
- def _get_tagName(self):
- return self.tagName
-
- def unlink(self):
- for attr in self._attrs.values():
- attr.unlink()
- self._attrs = None
- self._attrsNS = None
- Node.unlink(self)
-
- def getAttribute(self, attname):
- try:
- return self._attrs[attname].value
- except KeyError:
- return ""
-
- def getAttributeNS(self, namespaceURI, localName):
- try:
- return self._attrsNS[(namespaceURI, localName)].value
- except KeyError:
- return ""
-
- def setAttribute(self, attname, value):
- attr = self.getAttributeNode(attname)
- if attr is None:
- attr = Attr(attname)
- # for performance
- d = attr.__dict__
- d["value"] = d["nodeValue"] = value
- d["ownerDocument"] = self.ownerDocument
- self.setAttributeNode(attr)
- elif value != attr.value:
- d = attr.__dict__
- d["value"] = d["nodeValue"] = value
- if attr.isId:
- _clear_id_cache(self)
-
- def setAttributeNS(self, namespaceURI, qualifiedName, value):
- prefix, localname = _nssplit(qualifiedName)
- attr = self.getAttributeNodeNS(namespaceURI, localname)
- if attr is None:
- # for performance
- attr = Attr(qualifiedName, namespaceURI, localname, prefix)
- d = attr.__dict__
- d["prefix"] = prefix
- d["nodeName"] = qualifiedName
- d["value"] = d["nodeValue"] = value
- d["ownerDocument"] = self.ownerDocument
- self.setAttributeNode(attr)
- else:
- d = attr.__dict__
- if value != attr.value:
- d["value"] = d["nodeValue"] = value
- if attr.isId:
- _clear_id_cache(self)
- if attr.prefix != prefix:
- d["prefix"] = prefix
- d["nodeName"] = qualifiedName
-
- def getAttributeNode(self, attrname):
- return self._attrs.get(attrname)
-
- def getAttributeNodeNS(self, namespaceURI, localName):
- return self._attrsNS.get((namespaceURI, localName))
-
- def setAttributeNode(self, attr):
- if attr.ownerElement not in (None, self):
- raise xml.dom.InuseAttributeErr("attribute node already owned")
- old1 = self._attrs.get(attr.name, None)
- if old1 is not None:
- self.removeAttributeNode(old1)
- old2 = self._attrsNS.get((attr.namespaceURI, attr.localName), None)
- if old2 is not None and old2 is not old1:
- self.removeAttributeNode(old2)
- _set_attribute_node(self, attr)
-
- if old1 is not attr:
- # It might have already been part of this node, in which case
- # it doesn't represent a change, and should not be returned.
- return old1
- if old2 is not attr:
- return old2
-
- setAttributeNodeNS = setAttributeNode
-
- def removeAttribute(self, name):
- try:
- attr = self._attrs[name]
- except KeyError:
- raise xml.dom.NotFoundErr()
- self.removeAttributeNode(attr)
-
- def removeAttributeNS(self, namespaceURI, localName):
- try:
- attr = self._attrsNS[(namespaceURI, localName)]
- except KeyError:
- raise xml.dom.NotFoundErr()
- self.removeAttributeNode(attr)
-
- def removeAttributeNode(self, node):
- if node is None:
- raise xml.dom.NotFoundErr()
- try:
- self._attrs[node.name]
- except KeyError:
- raise xml.dom.NotFoundErr()
- _clear_id_cache(self)
- node.unlink()
- # Restore this since the node is still useful and otherwise
- # unlinked
- node.ownerDocument = self.ownerDocument
-
- removeAttributeNodeNS = removeAttributeNode
-
- def hasAttribute(self, name):
- return self._attrs.has_key(name)
-
- def hasAttributeNS(self, namespaceURI, localName):
- return self._attrsNS.has_key((namespaceURI, localName))
-
- def getElementsByTagName(self, name):
- return _get_elements_by_tagName_helper(self, name, NodeList())
-
- def getElementsByTagNameNS(self, namespaceURI, localName):
- return _get_elements_by_tagName_ns_helper(
- self, namespaceURI, localName, NodeList())
-
- def __repr__(self):
- return "<DOM Element: %s at %#x>" % (self.tagName, id(self))
-
- def writexml(self, writer, indent="", addindent="", newl=""):
- # indent = current indentation
- # addindent = indentation to add to higher levels
- # newl = newline string
- writer.write(indent+"<" + self.tagName)
-
- attrs = self._get_attributes()
- a_names = attrs.keys()
- a_names.sort()
-
- for a_name in a_names:
- writer.write(" %s=\"" % a_name)
- _write_data(writer, attrs[a_name].value)
- writer.write("\"")
- if self.childNodes:
- writer.write(">%s"%(newl))
- for node in self.childNodes:
- node.writexml(writer,indent+addindent,addindent,newl)
- writer.write("%s</%s>%s" % (indent,self.tagName,newl))
- else:
- writer.write("/>%s"%(newl))
-
- def _get_attributes(self):
- return NamedNodeMap(self._attrs, self._attrsNS, self)
-
- def hasAttributes(self):
- if self._attrs:
- return True
- else:
- return False
-
- # DOM Level 3 attributes, based on the 22 Oct 2002 draft
-
- def setIdAttribute(self, name):
- idAttr = self.getAttributeNode(name)
- self.setIdAttributeNode(idAttr)
-
- def setIdAttributeNS(self, namespaceURI, localName):
- idAttr = self.getAttributeNodeNS(namespaceURI, localName)
- self.setIdAttributeNode(idAttr)
-
- def setIdAttributeNode(self, idAttr):
- if idAttr is None or not self.isSameNode(idAttr.ownerElement):
- raise xml.dom.NotFoundErr()
- if _get_containing_entref(self) is not None:
- raise xml.dom.NoModificationAllowedErr()
- if not idAttr._is_id:
- idAttr.__dict__['_is_id'] = True
- self._magic_id_nodes += 1
- self.ownerDocument._magic_id_count += 1
- _clear_id_cache(self)
-
-defproperty(Element, "attributes",
- doc="NamedNodeMap of attributes on the element.")
-defproperty(Element, "localName",
- doc="Namespace-local name of this element.")
-
-
-def _set_attribute_node(element, attr):
- _clear_id_cache(element)
- element._attrs[attr.name] = attr
- element._attrsNS[(attr.namespaceURI, attr.localName)] = attr
-
- # This creates a circular reference, but Element.unlink()
- # breaks the cycle since the references to the attribute
- # dictionaries are tossed.
- attr.__dict__['ownerElement'] = element
-
-
-class Childless:
- """Mixin that makes childless-ness easy to implement and avoids
- the complexity of the Node methods that deal with children.
- """
-
- attributes = None
- childNodes = EmptyNodeList()
- firstChild = None
- lastChild = None
-
- def _get_firstChild(self):
- return None
-
- def _get_lastChild(self):
- return None
-
- def appendChild(self, node):
- raise xml.dom.HierarchyRequestErr(
- self.nodeName + " nodes cannot have children")
-
- def hasChildNodes(self):
- return False
-
- def insertBefore(self, newChild, refChild):
- raise xml.dom.HierarchyRequestErr(
- self.nodeName + " nodes do not have children")
-
- def removeChild(self, oldChild):
- raise xml.dom.NotFoundErr(
- self.nodeName + " nodes do not have children")
-
- def replaceChild(self, newChild, oldChild):
- raise xml.dom.HierarchyRequestErr(
- self.nodeName + " nodes do not have children")
-
-
-class ProcessingInstruction(Childless, Node):
- nodeType = Node.PROCESSING_INSTRUCTION_NODE
-
- def __init__(self, target, data):
- self.target = self.nodeName = target
- self.data = self.nodeValue = data
-
- def _get_data(self):
- return self.data
- def _set_data(self, value):
- d = self.__dict__
- d['data'] = d['nodeValue'] = value
-
- def _get_target(self):
- return self.target
- def _set_target(self, value):
- d = self.__dict__
- d['target'] = d['nodeName'] = value
-
- def __setattr__(self, name, value):
- if name == "data" or name == "nodeValue":
- self.__dict__['data'] = self.__dict__['nodeValue'] = value
- elif name == "target" or name == "nodeName":
- self.__dict__['target'] = self.__dict__['nodeName'] = value
- else:
- self.__dict__[name] = value
-
- def writexml(self, writer, indent="", addindent="", newl=""):
- writer.write("%s<?%s %s?>%s" % (indent,self.target, self.data, newl))
-
-
-class CharacterData(Childless, Node):
- def _get_length(self):
- return len(self.data)
- __len__ = _get_length
-
- def _get_data(self):
- return self.__dict__['data']
- def _set_data(self, data):
- d = self.__dict__
- d['data'] = d['nodeValue'] = data
-
- _get_nodeValue = _get_data
- _set_nodeValue = _set_data
-
- def __setattr__(self, name, value):
- if name == "data" or name == "nodeValue":
- self.__dict__['data'] = self.__dict__['nodeValue'] = value
- else:
- self.__dict__[name] = value
-
- def __repr__(self):
- data = self.data
- if len(data) > 10:
- dotdotdot = "..."
- else:
- dotdotdot = ""
- return "<DOM %s node \"%s%s\">" % (
- self.__class__.__name__, data[0:10], dotdotdot)
-
- def substringData(self, offset, count):
- if offset < 0:
- raise xml.dom.IndexSizeErr("offset cannot be negative")
- if offset >= len(self.data):
- raise xml.dom.IndexSizeErr("offset cannot be beyond end of data")
- if count < 0:
- raise xml.dom.IndexSizeErr("count cannot be negative")
- return self.data[offset:offset+count]
-
- def appendData(self, arg):
- self.data = self.data + arg
-
- def insertData(self, offset, arg):
- if offset < 0:
- raise xml.dom.IndexSizeErr("offset cannot be negative")
- if offset >= len(self.data):
- raise xml.dom.IndexSizeErr("offset cannot be beyond end of data")
- if arg:
- self.data = "%s%s%s" % (
- self.data[:offset], arg, self.data[offset:])
-
- def deleteData(self, offset, count):
- if offset < 0:
- raise xml.dom.IndexSizeErr("offset cannot be negative")
- if offset >= len(self.data):
- raise xml.dom.IndexSizeErr("offset cannot be beyond end of data")
- if count < 0:
- raise xml.dom.IndexSizeErr("count cannot be negative")
- if count:
- self.data = self.data[:offset] + self.data[offset+count:]
-
- def replaceData(self, offset, count, arg):
- if offset < 0:
- raise xml.dom.IndexSizeErr("offset cannot be negative")
- if offset >= len(self.data):
- raise xml.dom.IndexSizeErr("offset cannot be beyond end of data")
- if count < 0:
- raise xml.dom.IndexSizeErr("count cannot be negative")
- if count:
- self.data = "%s%s%s" % (
- self.data[:offset], arg, self.data[offset+count:])
-
-defproperty(CharacterData, "length", doc="Length of the string data.")
-
-
-class Text(CharacterData):
- # Make sure we don't add an instance __dict__ if we don't already
- # have one, at least when that's possible:
- # XXX this does not work, CharacterData is an old-style class
- # __slots__ = ()
-
- nodeType = Node.TEXT_NODE
- nodeName = "#text"
- attributes = None
-
- def splitText(self, offset):
- if offset < 0 or offset > len(self.data):
- raise xml.dom.IndexSizeErr("illegal offset value")
- newText = self.__class__()
- newText.data = self.data[offset:]
- newText.ownerDocument = self.ownerDocument
- next = self.nextSibling
- if self.parentNode and self in self.parentNode.childNodes:
- if next is None:
- self.parentNode.appendChild(newText)
- else:
- self.parentNode.insertBefore(newText, next)
- self.data = self.data[:offset]
- return newText
-
- def writexml(self, writer, indent="", addindent="", newl=""):
- _write_data(writer, "%s%s%s"%(indent, self.data, newl))
-
- # DOM Level 3 (WD 9 April 2002)
-
- def _get_wholeText(self):
- L = [self.data]
- n = self.previousSibling
- while n is not None:
- if n.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE):
- L.insert(0, n.data)
- n = n.previousSibling
- else:
- break
- n = self.nextSibling
- while n is not None:
- if n.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE):
- L.append(n.data)
- n = n.nextSibling
- else:
- break
- return ''.join(L)
-
- def replaceWholeText(self, content):
- # XXX This needs to be seriously changed if minidom ever
- # supports EntityReference nodes.
- parent = self.parentNode
- n = self.previousSibling
- while n is not None:
- if n.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE):
- next = n.previousSibling
- parent.removeChild(n)
- n = next
- else:
- break
- n = self.nextSibling
- if not content:
- parent.removeChild(self)
- while n is not None:
- if n.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE):
- next = n.nextSibling
- parent.removeChild(n)
- n = next
- else:
- break
- if content:
- d = self.__dict__
- d['data'] = content
- d['nodeValue'] = content
- return self
- else:
- return None
-
- def _get_isWhitespaceInElementContent(self):
- if self.data.strip():
- return False
- elem = _get_containing_element(self)
- if elem is None:
- return False
- info = self.ownerDocument._get_elem_info(elem)
- if info is None:
- return False
- else:
- return info.isElementContent()
-
-defproperty(Text, "isWhitespaceInElementContent",
- doc="True iff this text node contains only whitespace"
- " and is in element content.")
-defproperty(Text, "wholeText",
- doc="The text of all logically-adjacent text nodes.")
-
-
-def _get_containing_element(node):
- c = node.parentNode
- while c is not None:
- if c.nodeType == Node.ELEMENT_NODE:
- return c
- c = c.parentNode
- return None
-
-def _get_containing_entref(node):
- c = node.parentNode
- while c is not None:
- if c.nodeType == Node.ENTITY_REFERENCE_NODE:
- return c
- c = c.parentNode
- return None
-
-
-class Comment(Childless, CharacterData):
- nodeType = Node.COMMENT_NODE
- nodeName = "#comment"
-
- def __init__(self, data):
- self.data = self.nodeValue = data
-
- def writexml(self, writer, indent="", addindent="", newl=""):
- writer.write("%s<!--%s-->%s" % (indent, self.data, newl))
-
-
-class CDATASection(Text):
- # Make sure we don't add an instance __dict__ if we don't already
- # have one, at least when that's possible:
- # XXX this does not work, Text is an old-style class
- # __slots__ = ()
-
- nodeType = Node.CDATA_SECTION_NODE
- nodeName = "#cdata-section"
-
- def writexml(self, writer, indent="", addindent="", newl=""):
- if self.data.find("]]>") >= 0:
- raise ValueError("']]>' not allowed in a CDATA section")
- writer.write("<![CDATA[%s]]>" % self.data)
-
-
-class ReadOnlySequentialNamedNodeMap(object):
- __slots__ = '_seq',
-
- def __init__(self, seq=()):
- # seq should be a list or tuple
- self._seq = seq
-
- def __len__(self):
- return len(self._seq)
-
- def _get_length(self):
- return len(self._seq)
-
- def getNamedItem(self, name):
- for n in self._seq:
- if n.nodeName == name:
- return n
-
- def getNamedItemNS(self, namespaceURI, localName):
- for n in self._seq:
- if n.namespaceURI == namespaceURI and n.localName == localName:
- return n
-
- def __getitem__(self, name_or_tuple):
- if isinstance(name_or_tuple, tuple):
- node = self.getNamedItemNS(*name_or_tuple)
- else:
- node = self.getNamedItem(name_or_tuple)
- if node is None:
- raise KeyError, name_or_tuple
- return node
-
- def item(self, index):
- if index < 0:
- return None
- try:
- return self._seq[index]
- except IndexError:
- return None
-
- def removeNamedItem(self, name):
- raise xml.dom.NoModificationAllowedErr(
- "NamedNodeMap instance is read-only")
-
- def removeNamedItemNS(self, namespaceURI, localName):
- raise xml.dom.NoModificationAllowedErr(
- "NamedNodeMap instance is read-only")
-
- def setNamedItem(self, node):
- raise xml.dom.NoModificationAllowedErr(
- "NamedNodeMap instance is read-only")
-
- def setNamedItemNS(self, node):
- raise xml.dom.NoModificationAllowedErr(
- "NamedNodeMap instance is read-only")
-
- def __getstate__(self):
- return [self._seq]
-
- def __setstate__(self, state):
- self._seq = state[0]
-
-defproperty(ReadOnlySequentialNamedNodeMap, "length",
- doc="Number of entries in the NamedNodeMap.")
-
-
-class Identified:
- """Mix-in class that supports the publicId and systemId attributes."""
-
- # XXX this does not work, this is an old-style class
- # __slots__ = 'publicId', 'systemId'
-
- def _identified_mixin_init(self, publicId, systemId):
- self.publicId = publicId
- self.systemId = systemId
-
- def _get_publicId(self):
- return self.publicId
-
- def _get_systemId(self):
- return self.systemId
-
-class DocumentType(Identified, Childless, Node):
- nodeType = Node.DOCUMENT_TYPE_NODE
- nodeValue = None
- name = None
- publicId = None
- systemId = None
- internalSubset = None
-
- def __init__(self, qualifiedName):
- self.entities = ReadOnlySequentialNamedNodeMap()
- self.notations = ReadOnlySequentialNamedNodeMap()
- if qualifiedName:
- prefix, localname = _nssplit(qualifiedName)
- self.name = localname
- self.nodeName = self.name
-
- def _get_internalSubset(self):
- return self.internalSubset
-
- def cloneNode(self, deep):
- if self.ownerDocument is None:
- # it's ok
- clone = DocumentType(None)
- clone.name = self.name
- clone.nodeName = self.name
- operation = xml.dom.UserDataHandler.NODE_CLONED
- if deep:
- clone.entities._seq = []
- clone.notations._seq = []
- for n in self.notations._seq:
- notation = Notation(n.nodeName, n.publicId, n.systemId)
- clone.notations._seq.append(notation)
- n._call_user_data_handler(operation, n, notation)
- for e in self.entities._seq:
- entity = Entity(e.nodeName, e.publicId, e.systemId,
- e.notationName)
- entity.actualEncoding = e.actualEncoding
- entity.encoding = e.encoding
- entity.version = e.version
- clone.entities._seq.append(entity)
- e._call_user_data_handler(operation, n, entity)
- self._call_user_data_handler(operation, self, clone)
- return clone
- else:
- return None
-
- def writexml(self, writer, indent="", addindent="", newl=""):
- writer.write("<!DOCTYPE ")
- writer.write(self.name)
- if self.publicId:
- writer.write("%s PUBLIC '%s'%s '%s'"
- % (newl, self.publicId, newl, self.systemId))
- elif self.systemId:
- writer.write("%s SYSTEM '%s'" % (newl, self.systemId))
- if self.internalSubset is not None:
- writer.write(" [")
- writer.write(self.internalSubset)
- writer.write("]")
- writer.write(">"+newl)
-
-class Entity(Identified, Node):
- attributes = None
- nodeType = Node.ENTITY_NODE
- nodeValue = None
-
- actualEncoding = None
- encoding = None
- version = None
-
- def __init__(self, name, publicId, systemId, notation):
- self.nodeName = name
- self.notationName = notation
- self.childNodes = NodeList()
- self._identified_mixin_init(publicId, systemId)
-
- def _get_actualEncoding(self):
- return self.actualEncoding
-
- def _get_encoding(self):
- return self.encoding
-
- def _get_version(self):
- return self.version
-
- def appendChild(self, newChild):
- raise xml.dom.HierarchyRequestErr(
- "cannot append children to an entity node")
-
- def insertBefore(self, newChild, refChild):
- raise xml.dom.HierarchyRequestErr(
- "cannot insert children below an entity node")
-
- def removeChild(self, oldChild):
- raise xml.dom.HierarchyRequestErr(
- "cannot remove children from an entity node")
-
- def replaceChild(self, newChild, oldChild):
- raise xml.dom.HierarchyRequestErr(
- "cannot replace children of an entity node")
-
-class Notation(Identified, Childless, Node):
- nodeType = Node.NOTATION_NODE
- nodeValue = None
-
- def __init__(self, name, publicId, systemId):
- self.nodeName = name
- self._identified_mixin_init(publicId, systemId)
-
-
-class DOMImplementation(DOMImplementationLS):
- _features = [("core", "1.0"),
- ("core", "2.0"),
- ("core", "3.0"),
- ("core", None),
- ("xml", "1.0"),
- ("xml", "2.0"),
- ("xml", "3.0"),
- ("xml", None),
- ("ls-load", "3.0"),
- ("ls-load", None),
- ]
-
- def hasFeature(self, feature, version):
- if version == "":
- version = None
- return (feature.lower(), version) in self._features
-
- def createDocument(self, namespaceURI, qualifiedName, doctype):
- if doctype and doctype.parentNode is not None:
- raise xml.dom.WrongDocumentErr(
- "doctype object owned by another DOM tree")
- doc = self._create_document()
-
- add_root_element = not (namespaceURI is None
- and qualifiedName is None
- and doctype is None)
-
- if not qualifiedName and add_root_element:
- # The spec is unclear what to raise here; SyntaxErr
- # would be the other obvious candidate. Since Xerces raises
- # InvalidCharacterErr, and since SyntaxErr is not listed
- # for createDocument, that seems to be the better choice.
- # XXX: need to check for illegal characters here and in
- # createElement.
-
- # DOM Level III clears this up when talking about the return value
- # of this function. If namespaceURI, qName and DocType are
- # Null the document is returned without a document element
- # Otherwise if doctype or namespaceURI are not None
- # Then we go back to the above problem
- raise xml.dom.InvalidCharacterErr("Element with no name")
-
- if add_root_element:
- prefix, localname = _nssplit(qualifiedName)
- if prefix == "xml" \
- and namespaceURI != "http://www.w3.org/XML/1998/namespace":
- raise xml.dom.NamespaceErr("illegal use of 'xml' prefix")
- if prefix and not namespaceURI:
- raise xml.dom.NamespaceErr(
- "illegal use of prefix without namespaces")
- element = doc.createElementNS(namespaceURI, qualifiedName)
- if doctype:
- doc.appendChild(doctype)
- doc.appendChild(element)
-
- if doctype:
- doctype.parentNode = doctype.ownerDocument = doc
-
- doc.doctype = doctype
- doc.implementation = self
- return doc
-
- def createDocumentType(self, qualifiedName, publicId, systemId):
- doctype = DocumentType(qualifiedName)
- doctype.publicId = publicId
- doctype.systemId = systemId
- return doctype
-
- # DOM Level 3 (WD 9 April 2002)
-
- def getInterface(self, feature):
- if self.hasFeature(feature, None):
- return self
- else:
- return None
-
- # internal
- def _create_document(self):
- return Document()
-
-class ElementInfo(object):
- """Object that represents content-model information for an element.
-
- This implementation is not expected to be used in practice; DOM
- builders should provide implementations which do the right thing
- using information available to it.
-
- """
-
- __slots__ = 'tagName',
-
- def __init__(self, name):
- self.tagName = name
-
- def getAttributeType(self, aname):
- return _no_type
-
- def getAttributeTypeNS(self, namespaceURI, localName):
- return _no_type
-
- def isElementContent(self):
- return False
-
- def isEmpty(self):
- """Returns true iff this element is declared to have an EMPTY
- content model."""
- return False
-
- def isId(self, aname):
- """Returns true iff the named attribte is a DTD-style ID."""
- return False
-
- def isIdNS(self, namespaceURI, localName):
- """Returns true iff the identified attribute is a DTD-style ID."""
- return False
-
- def __getstate__(self):
- return self.tagName
-
- def __setstate__(self, state):
- self.tagName = state
-
-def _clear_id_cache(node):
- if node.nodeType == Node.DOCUMENT_NODE:
- node._id_cache.clear()
- node._id_search_stack = None
- elif _in_document(node):
- node.ownerDocument._id_cache.clear()
- node.ownerDocument._id_search_stack= None
-
-class Document(Node, DocumentLS):
- _child_node_types = (Node.ELEMENT_NODE, Node.PROCESSING_INSTRUCTION_NODE,
- Node.COMMENT_NODE, Node.DOCUMENT_TYPE_NODE)
-
- nodeType = Node.DOCUMENT_NODE
- nodeName = "#document"
- nodeValue = None
- attributes = None
- doctype = None
- parentNode = None
- previousSibling = nextSibling = None
-
- implementation = DOMImplementation()
-
- # Document attributes from Level 3 (WD 9 April 2002)
-
- actualEncoding = None
- encoding = None
- standalone = None
- version = None
- strictErrorChecking = False
- errorHandler = None
- documentURI = None
-
- _magic_id_count = 0
-
- def __init__(self):
- self.childNodes = NodeList()
- # mapping of (namespaceURI, localName) -> ElementInfo
- # and tagName -> ElementInfo
- self._elem_info = {}
- self._id_cache = {}
- self._id_search_stack = None
-
- def _get_elem_info(self, element):
- if element.namespaceURI:
- key = element.namespaceURI, element.localName
- else:
- key = element.tagName
- return self._elem_info.get(key)
-
- def _get_actualEncoding(self):
- return self.actualEncoding
-
- def _get_doctype(self):
- return self.doctype
-
- def _get_documentURI(self):
- return self.documentURI
-
- def _get_encoding(self):
- return self.encoding
-
- def _get_errorHandler(self):
- return self.errorHandler
-
- def _get_standalone(self):
- return self.standalone
-
- def _get_strictErrorChecking(self):
- return self.strictErrorChecking
-
- def _get_version(self):
- return self.version
-
- def appendChild(self, node):
- if node.nodeType not in self._child_node_types:
- raise xml.dom.HierarchyRequestErr(
- "%s cannot be child of %s" % (repr(node), repr(self)))
- if node.parentNode is not None:
- # This needs to be done before the next test since this
- # may *be* the document element, in which case it should
- # end up re-ordered to the end.
- node.parentNode.removeChild(node)
-
- if node.nodeType == Node.ELEMENT_NODE \
- and self._get_documentElement():
- raise xml.dom.HierarchyRequestErr(
- "two document elements disallowed")
- return Node.appendChild(self, node)
-
- def removeChild(self, oldChild):
- try:
- self.childNodes.remove(oldChild)
- except ValueError:
- raise xml.dom.NotFoundErr()
- oldChild.nextSibling = oldChild.previousSibling = None
- oldChild.parentNode = None
- if self.documentElement is oldChild:
- self.documentElement = None
-
- return oldChild
-
- def _get_documentElement(self):
- for node in self.childNodes:
- if node.nodeType == Node.ELEMENT_NODE:
- return node
-
- def unlink(self):
- if self.doctype is not None:
- self.doctype.unlink()
- self.doctype = None
- Node.unlink(self)
-
- def cloneNode(self, deep):
- if not deep:
- return None
- clone = self.implementation.createDocument(None, None, None)
- clone.encoding = self.encoding
- clone.standalone = self.standalone
- clone.version = self.version
- for n in self.childNodes:
- childclone = _clone_node(n, deep, clone)
- assert childclone.ownerDocument.isSameNode(clone)
- clone.childNodes.append(childclone)
- if childclone.nodeType == Node.DOCUMENT_NODE:
- assert clone.documentElement is None
- elif childclone.nodeType == Node.DOCUMENT_TYPE_NODE:
- assert clone.doctype is None
- clone.doctype = childclone
- childclone.parentNode = clone
- self._call_user_data_handler(xml.dom.UserDataHandler.NODE_CLONED,
- self, clone)
- return clone
-
- def createDocumentFragment(self):
- d = DocumentFragment()
- d.ownerDocument = self
- return d
-
- def createElement(self, tagName):
- e = Element(tagName)
- e.ownerDocument = self
- return e
-
- def createTextNode(self, data):
- if not isinstance(data, StringTypes):
- raise TypeError, "node contents must be a string"
- t = Text()
- t.data = data
- t.ownerDocument = self
- return t
-
- def createCDATASection(self, data):
- if not isinstance(data, StringTypes):
- raise TypeError, "node contents must be a string"
- c = CDATASection()
- c.data = data
- c.ownerDocument = self
- return c
-
- def createComment(self, data):
- c = Comment(data)
- c.ownerDocument = self
- return c
-
- def createProcessingInstruction(self, target, data):
- p = ProcessingInstruction(target, data)
- p.ownerDocument = self
- return p
-
- def createAttribute(self, qName):
- a = Attr(qName)
- a.ownerDocument = self
- a.value = ""
- return a
-
- def createElementNS(self, namespaceURI, qualifiedName):
- prefix, localName = _nssplit(qualifiedName)
- e = Element(qualifiedName, namespaceURI, prefix)
- e.ownerDocument = self
- return e
-
- def createAttributeNS(self, namespaceURI, qualifiedName):
- prefix, localName = _nssplit(qualifiedName)
- a = Attr(qualifiedName, namespaceURI, localName, prefix)
- a.ownerDocument = self
- a.value = ""
- return a
-
- # A couple of implementation-specific helpers to create node types
- # not supported by the W3C DOM specs:
-
- def _create_entity(self, name, publicId, systemId, notationName):
- e = Entity(name, publicId, systemId, notationName)
- e.ownerDocument = self
- return e
-
- def _create_notation(self, name, publicId, systemId):
- n = Notation(name, publicId, systemId)
- n.ownerDocument = self
- return n
-
- def getElementById(self, id):
- if self._id_cache.has_key(id):
- return self._id_cache[id]
- if not (self._elem_info or self._magic_id_count):
- return None
-
- stack = self._id_search_stack
- if stack is None:
- # we never searched before, or the cache has been cleared
- stack = [self.documentElement]
- self._id_search_stack = stack
- elif not stack:
- # Previous search was completed and cache is still valid;
- # no matching node.
- return None
-
- result = None
- while stack:
- node = stack.pop()
- # add child elements to stack for continued searching
- stack.extend([child for child in node.childNodes
- if child.nodeType in _nodeTypes_with_children])
- # check this node
- info = self._get_elem_info(node)
- if info:
- # We have to process all ID attributes before
- # returning in order to get all the attributes set to
- # be IDs using Element.setIdAttribute*().
- for attr in node.attributes.values():
- if attr.namespaceURI:
- if info.isIdNS(attr.namespaceURI, attr.localName):
- self._id_cache[attr.value] = node
- if attr.value == id:
- result = node
- elif not node._magic_id_nodes:
- break
- elif info.isId(attr.name):
- self._id_cache[attr.value] = node
- if attr.value == id:
- result = node
- elif not node._magic_id_nodes:
- break
- elif attr._is_id:
- self._id_cache[attr.value] = node
- if attr.value == id:
- result = node
- elif node._magic_id_nodes == 1:
- break
- elif node._magic_id_nodes:
- for attr in node.attributes.values():
- if attr._is_id:
- self._id_cache[attr.value] = node
- if attr.value == id:
- result = node
- if result is not None:
- break
- return result
-
- def getElementsByTagName(self, name):
- return _get_elements_by_tagName_helper(self, name, NodeList())
-
- def getElementsByTagNameNS(self, namespaceURI, localName):
- return _get_elements_by_tagName_ns_helper(
- self, namespaceURI, localName, NodeList())
-
- def isSupported(self, feature, version):
- return self.implementation.hasFeature(feature, version)
-
- def importNode(self, node, deep):
- if node.nodeType == Node.DOCUMENT_NODE:
- raise xml.dom.NotSupportedErr("cannot import document nodes")
- elif node.nodeType == Node.DOCUMENT_TYPE_NODE:
- raise xml.dom.NotSupportedErr("cannot import document type nodes")
- return _clone_node(node, deep, self)
-
- def writexml(self, writer, indent="", addindent="", newl="",
- encoding = None):
- if encoding is None:
- writer.write('<?xml version="1.0" ?>'+newl)
- else:
- writer.write('<?xml version="1.0" encoding="%s"?>%s' % (encoding, newl))
- for node in self.childNodes:
- node.writexml(writer, indent, addindent, newl)
-
- # DOM Level 3 (WD 9 April 2002)
-
- def renameNode(self, n, namespaceURI, name):
- if n.ownerDocument is not self:
- raise xml.dom.WrongDocumentErr(
- "cannot rename nodes from other documents;\n"
- "expected %s,\nfound %s" % (self, n.ownerDocument))
- if n.nodeType not in (Node.ELEMENT_NODE, Node.ATTRIBUTE_NODE):
- raise xml.dom.NotSupportedErr(
- "renameNode() only applies to element and attribute nodes")
- if namespaceURI != EMPTY_NAMESPACE:
- if ':' in name:
- prefix, localName = name.split(':', 1)
- if ( prefix == "xmlns"
- and namespaceURI != xml.dom.XMLNS_NAMESPACE):
- raise xml.dom.NamespaceErr(
- "illegal use of 'xmlns' prefix")
- else:
- if ( name == "xmlns"
- and namespaceURI != xml.dom.XMLNS_NAMESPACE
- and n.nodeType == Node.ATTRIBUTE_NODE):
- raise xml.dom.NamespaceErr(
- "illegal use of the 'xmlns' attribute")
- prefix = None
- localName = name
- else:
- prefix = None
- localName = None
- if n.nodeType == Node.ATTRIBUTE_NODE:
- element = n.ownerElement
- if element is not None:
- is_id = n._is_id
- element.removeAttributeNode(n)
- else:
- element = None
- # avoid __setattr__
- d = n.__dict__
- d['prefix'] = prefix
- d['localName'] = localName
- d['namespaceURI'] = namespaceURI
- d['nodeName'] = name
- if n.nodeType == Node.ELEMENT_NODE:
- d['tagName'] = name
- else:
- # attribute node
- d['name'] = name
- if element is not None:
- element.setAttributeNode(n)
- if is_id:
- element.setIdAttributeNode(n)
- # It's not clear from a semantic perspective whether we should
- # call the user data handlers for the NODE_RENAMED event since
- # we're re-using the existing node. The draft spec has been
- # interpreted as meaning "no, don't call the handler unless a
- # new node is created."
- return n
-
-defproperty(Document, "documentElement",
- doc="Top-level element of this document.")
-
-
-def _clone_node(node, deep, newOwnerDocument):
- """
- Clone a node and give it the new owner document.
- Called by Node.cloneNode and Document.importNode
- """
- if node.ownerDocument.isSameNode(newOwnerDocument):
- operation = xml.dom.UserDataHandler.NODE_CLONED
- else:
- operation = xml.dom.UserDataHandler.NODE_IMPORTED
- if node.nodeType == Node.ELEMENT_NODE:
- clone = newOwnerDocument.createElementNS(node.namespaceURI,
- node.nodeName)
- for attr in node.attributes.values():
- clone.setAttributeNS(attr.namespaceURI, attr.nodeName, attr.value)
- a = clone.getAttributeNodeNS(attr.namespaceURI, attr.localName)
- a.specified = attr.specified
-
- if deep:
- for child in node.childNodes:
- c = _clone_node(child, deep, newOwnerDocument)
- clone.appendChild(c)
-
- elif node.nodeType == Node.DOCUMENT_FRAGMENT_NODE:
- clone = newOwnerDocument.createDocumentFragment()
- if deep:
- for child in node.childNodes:
- c = _clone_node(child, deep, newOwnerDocument)
- clone.appendChild(c)
-
- elif node.nodeType == Node.TEXT_NODE:
- clone = newOwnerDocument.createTextNode(node.data)
- elif node.nodeType == Node.CDATA_SECTION_NODE:
- clone = newOwnerDocument.createCDATASection(node.data)
- elif node.nodeType == Node.PROCESSING_INSTRUCTION_NODE:
- clone = newOwnerDocument.createProcessingInstruction(node.target,
- node.data)
- elif node.nodeType == Node.COMMENT_NODE:
- clone = newOwnerDocument.createComment(node.data)
- elif node.nodeType == Node.ATTRIBUTE_NODE:
- clone = newOwnerDocument.createAttributeNS(node.namespaceURI,
- node.nodeName)
- clone.specified = True
- clone.value = node.value
- elif node.nodeType == Node.DOCUMENT_TYPE_NODE:
- assert node.ownerDocument is not newOwnerDocument
- operation = xml.dom.UserDataHandler.NODE_IMPORTED
- clone = newOwnerDocument.implementation.createDocumentType(
- node.name, node.publicId, node.systemId)
- clone.ownerDocument = newOwnerDocument
- if deep:
- clone.entities._seq = []
- clone.notations._seq = []
- for n in node.notations._seq:
- notation = Notation(n.nodeName, n.publicId, n.systemId)
- notation.ownerDocument = newOwnerDocument
- clone.notations._seq.append(notation)
- if hasattr(n, '_call_user_data_handler'):
- n._call_user_data_handler(operation, n, notation)
- for e in node.entities._seq:
- entity = Entity(e.nodeName, e.publicId, e.systemId,
- e.notationName)
- entity.actualEncoding = e.actualEncoding
- entity.encoding = e.encoding
- entity.version = e.version
- entity.ownerDocument = newOwnerDocument
- clone.entities._seq.append(entity)
- if hasattr(e, '_call_user_data_handler'):
- e._call_user_data_handler(operation, n, entity)
- else:
- # Note the cloning of Document and DocumentType nodes is
- # implemenetation specific. minidom handles those cases
- # directly in the cloneNode() methods.
- raise xml.dom.NotSupportedErr("Cannot clone node %s" % repr(node))
-
- # Check for _call_user_data_handler() since this could conceivably
- # used with other DOM implementations (one of the FourThought
- # DOMs, perhaps?).
- if hasattr(node, '_call_user_data_handler'):
- node._call_user_data_handler(operation, node, clone)
- return clone
-
-
-def _nssplit(qualifiedName):
- fields = qualifiedName.split(':', 1)
- if len(fields) == 2:
- return fields
- else:
- return (None, fields[0])
-
-
-def _get_StringIO():
- # we can't use cStringIO since it doesn't support Unicode strings
- from StringIO import StringIO
- return StringIO()
-
-def _do_pulldom_parse(func, args, kwargs):
- events = func(*args, **kwargs)
- toktype, rootNode = events.getEvent()
- events.expandNode(rootNode)
- events.clear()
- return rootNode
-
-def parse(file, parser=None, bufsize=None):
- """Parse a file into a DOM by filename or file object."""
- if parser is None and not bufsize:
- from xml.dom import expatbuilder
- return expatbuilder.parse(file)
- else:
- from xml.dom import pulldom
- return _do_pulldom_parse(pulldom.parse, (file,),
- {'parser': parser, 'bufsize': bufsize})
-
-def parseString(string, parser=None):
- """Parse a file into a DOM from a string."""
- if parser is None:
- from xml.dom import expatbuilder
- return expatbuilder.parseString(string)
- else:
- from xml.dom import pulldom
- return _do_pulldom_parse(pulldom.parseString, (string,),
- {'parser': parser})
-
-def getDOMImplementation(features=None):
- if features:
- if isinstance(features, StringTypes):
- features = domreg._parse_feature_string(features)
- for f, v in features:
- if not Document.implementation.hasFeature(f, v):
- return None
- return Document.implementation
diff --git a/sys/lib/python/xml/dom/pulldom.py b/sys/lib/python/xml/dom/pulldom.py
deleted file mode 100644
index 18f49b501..000000000
--- a/sys/lib/python/xml/dom/pulldom.py
+++ /dev/null
@@ -1,351 +0,0 @@
-import xml.sax
-import xml.sax.handler
-import types
-
-try:
- _StringTypes = [types.StringType, types.UnicodeType]
-except AttributeError:
- _StringTypes = [types.StringType]
-
-START_ELEMENT = "START_ELEMENT"
-END_ELEMENT = "END_ELEMENT"
-COMMENT = "COMMENT"
-START_DOCUMENT = "START_DOCUMENT"
-END_DOCUMENT = "END_DOCUMENT"
-PROCESSING_INSTRUCTION = "PROCESSING_INSTRUCTION"
-IGNORABLE_WHITESPACE = "IGNORABLE_WHITESPACE"
-CHARACTERS = "CHARACTERS"
-
-class PullDOM(xml.sax.ContentHandler):
- _locator = None
- document = None
-
- def __init__(self, documentFactory=None):
- from xml.dom import XML_NAMESPACE
- self.documentFactory = documentFactory
- self.firstEvent = [None, None]
- self.lastEvent = self.firstEvent
- self.elementStack = []
- self.push = self.elementStack.append
- try:
- self.pop = self.elementStack.pop
- except AttributeError:
- # use class' pop instead
- pass
- self._ns_contexts = [{XML_NAMESPACE:'xml'}] # contains uri -> prefix dicts
- self._current_context = self._ns_contexts[-1]
- self.pending_events = []
-
- def pop(self):
- result = self.elementStack[-1]
- del self.elementStack[-1]
- return result
-
- def setDocumentLocator(self, locator):
- self._locator = locator
-
- def startPrefixMapping(self, prefix, uri):
- if not hasattr(self, '_xmlns_attrs'):
- self._xmlns_attrs = []
- self._xmlns_attrs.append((prefix or 'xmlns', uri))
- self._ns_contexts.append(self._current_context.copy())
- self._current_context[uri] = prefix or None
-
- def endPrefixMapping(self, prefix):
- self._current_context = self._ns_contexts.pop()
-
- def startElementNS(self, name, tagName , attrs):
- # Retrieve xml namespace declaration attributes.
- xmlns_uri = 'http://www.w3.org/2000/xmlns/'
- xmlns_attrs = getattr(self, '_xmlns_attrs', None)
- if xmlns_attrs is not None:
- for aname, value in xmlns_attrs:
- attrs._attrs[(xmlns_uri, aname)] = value
- self._xmlns_attrs = []
- uri, localname = name
- if uri:
- # When using namespaces, the reader may or may not
- # provide us with the original name. If not, create
- # *a* valid tagName from the current context.
- if tagName is None:
- prefix = self._current_context[uri]
- if prefix:
- tagName = prefix + ":" + localname
- else:
- tagName = localname
- if self.document:
- node = self.document.createElementNS(uri, tagName)
- else:
- node = self.buildDocument(uri, tagName)
- else:
- # When the tagname is not prefixed, it just appears as
- # localname
- if self.document:
- node = self.document.createElement(localname)
- else:
- node = self.buildDocument(None, localname)
-
- for aname,value in attrs.items():
- a_uri, a_localname = aname
- if a_uri == xmlns_uri:
- if a_localname == 'xmlns':
- qname = a_localname
- else:
- qname = 'xmlns:' + a_localname
- attr = self.document.createAttributeNS(a_uri, qname)
- node.setAttributeNodeNS(attr)
- elif a_uri:
- prefix = self._current_context[a_uri]
- if prefix:
- qname = prefix + ":" + a_localname
- else:
- qname = a_localname
- attr = self.document.createAttributeNS(a_uri, qname)
- node.setAttributeNodeNS(attr)
- else:
- attr = self.document.createAttribute(a_localname)
- node.setAttributeNode(attr)
- attr.value = value
-
- self.lastEvent[1] = [(START_ELEMENT, node), None]
- self.lastEvent = self.lastEvent[1]
- self.push(node)
-
- def endElementNS(self, name, tagName):
- self.lastEvent[1] = [(END_ELEMENT, self.pop()), None]
- self.lastEvent = self.lastEvent[1]
-
- def startElement(self, name, attrs):
- if self.document:
- node = self.document.createElement(name)
- else:
- node = self.buildDocument(None, name)
-
- for aname,value in attrs.items():
- attr = self.document.createAttribute(aname)
- attr.value = value
- node.setAttributeNode(attr)
-
- self.lastEvent[1] = [(START_ELEMENT, node), None]
- self.lastEvent = self.lastEvent[1]
- self.push(node)
-
- def endElement(self, name):
- self.lastEvent[1] = [(END_ELEMENT, self.pop()), None]
- self.lastEvent = self.lastEvent[1]
-
- def comment(self, s):
- if self.document:
- node = self.document.createComment(s)
- self.lastEvent[1] = [(COMMENT, node), None]
- self.lastEvent = self.lastEvent[1]
- else:
- event = [(COMMENT, s), None]
- self.pending_events.append(event)
-
- def processingInstruction(self, target, data):
- if self.document:
- node = self.document.createProcessingInstruction(target, data)
- self.lastEvent[1] = [(PROCESSING_INSTRUCTION, node), None]
- self.lastEvent = self.lastEvent[1]
- else:
- event = [(PROCESSING_INSTRUCTION, target, data), None]
- self.pending_events.append(event)
-
- def ignorableWhitespace(self, chars):
- node = self.document.createTextNode(chars)
- self.lastEvent[1] = [(IGNORABLE_WHITESPACE, node), None]
- self.lastEvent = self.lastEvent[1]
-
- def characters(self, chars):
- node = self.document.createTextNode(chars)
- self.lastEvent[1] = [(CHARACTERS, node), None]
- self.lastEvent = self.lastEvent[1]
-
- def startDocument(self):
- if self.documentFactory is None:
- import xml.dom.minidom
- self.documentFactory = xml.dom.minidom.Document.implementation
-
- def buildDocument(self, uri, tagname):
- # Can't do that in startDocument, since we need the tagname
- # XXX: obtain DocumentType
- node = self.documentFactory.createDocument(uri, tagname, None)
- self.document = node
- self.lastEvent[1] = [(START_DOCUMENT, node), None]
- self.lastEvent = self.lastEvent[1]
- self.push(node)
- # Put everything we have seen so far into the document
- for e in self.pending_events:
- if e[0][0] == PROCESSING_INSTRUCTION:
- _,target,data = e[0]
- n = self.document.createProcessingInstruction(target, data)
- e[0] = (PROCESSING_INSTRUCTION, n)
- elif e[0][0] == COMMENT:
- n = self.document.createComment(e[0][1])
- e[0] = (COMMENT, n)
- else:
- raise AssertionError("Unknown pending event ",e[0][0])
- self.lastEvent[1] = e
- self.lastEvent = e
- self.pending_events = None
- return node.firstChild
-
- def endDocument(self):
- self.lastEvent[1] = [(END_DOCUMENT, self.document), None]
- self.pop()
-
- def clear(self):
- "clear(): Explicitly release parsing structures"
- self.document = None
-
-class ErrorHandler:
- def warning(self, exception):
- print exception
- def error(self, exception):
- raise exception
- def fatalError(self, exception):
- raise exception
-
-class DOMEventStream:
- def __init__(self, stream, parser, bufsize):
- self.stream = stream
- self.parser = parser
- self.bufsize = bufsize
- if not hasattr(self.parser, 'feed'):
- self.getEvent = self._slurp
- self.reset()
-
- def reset(self):
- self.pulldom = PullDOM()
- # This content handler relies on namespace support
- self.parser.setFeature(xml.sax.handler.feature_namespaces, 1)
- self.parser.setContentHandler(self.pulldom)
-
- def __getitem__(self, pos):
- rc = self.getEvent()
- if rc:
- return rc
- raise IndexError
-
- def next(self):
- rc = self.getEvent()
- if rc:
- return rc
- raise StopIteration
-
- def __iter__(self):
- return self
-
- def expandNode(self, node):
- event = self.getEvent()
- parents = [node]
- while event:
- token, cur_node = event
- if cur_node is node:
- return
- if token != END_ELEMENT:
- parents[-1].appendChild(cur_node)
- if token == START_ELEMENT:
- parents.append(cur_node)
- elif token == END_ELEMENT:
- del parents[-1]
- event = self.getEvent()
-
- def getEvent(self):
- # use IncrementalParser interface, so we get the desired
- # pull effect
- if not self.pulldom.firstEvent[1]:
- self.pulldom.lastEvent = self.pulldom.firstEvent
- while not self.pulldom.firstEvent[1]:
- buf = self.stream.read(self.bufsize)
- if not buf:
- self.parser.close()
- return None
- self.parser.feed(buf)
- rc = self.pulldom.firstEvent[1][0]
- self.pulldom.firstEvent[1] = self.pulldom.firstEvent[1][1]
- return rc
-
- def _slurp(self):
- """ Fallback replacement for getEvent() using the
- standard SAX2 interface, which means we slurp the
- SAX events into memory (no performance gain, but
- we are compatible to all SAX parsers).
- """
- self.parser.parse(self.stream)
- self.getEvent = self._emit
- return self._emit()
-
- def _emit(self):
- """ Fallback replacement for getEvent() that emits
- the events that _slurp() read previously.
- """
- rc = self.pulldom.firstEvent[1][0]
- self.pulldom.firstEvent[1] = self.pulldom.firstEvent[1][1]
- return rc
-
- def clear(self):
- """clear(): Explicitly release parsing objects"""
- self.pulldom.clear()
- del self.pulldom
- self.parser = None
- self.stream = None
-
-class SAX2DOM(PullDOM):
-
- def startElementNS(self, name, tagName , attrs):
- PullDOM.startElementNS(self, name, tagName, attrs)
- curNode = self.elementStack[-1]
- parentNode = self.elementStack[-2]
- parentNode.appendChild(curNode)
-
- def startElement(self, name, attrs):
- PullDOM.startElement(self, name, attrs)
- curNode = self.elementStack[-1]
- parentNode = self.elementStack[-2]
- parentNode.appendChild(curNode)
-
- def processingInstruction(self, target, data):
- PullDOM.processingInstruction(self, target, data)
- node = self.lastEvent[0][1]
- parentNode = self.elementStack[-1]
- parentNode.appendChild(node)
-
- def ignorableWhitespace(self, chars):
- PullDOM.ignorableWhitespace(self, chars)
- node = self.lastEvent[0][1]
- parentNode = self.elementStack[-1]
- parentNode.appendChild(node)
-
- def characters(self, chars):
- PullDOM.characters(self, chars)
- node = self.lastEvent[0][1]
- parentNode = self.elementStack[-1]
- parentNode.appendChild(node)
-
-
-default_bufsize = (2 ** 14) - 20
-
-def parse(stream_or_string, parser=None, bufsize=None):
- if bufsize is None:
- bufsize = default_bufsize
- if type(stream_or_string) in _StringTypes:
- stream = open(stream_or_string)
- else:
- stream = stream_or_string
- if not parser:
- parser = xml.sax.make_parser()
- return DOMEventStream(stream, parser, bufsize)
-
-def parseString(string, parser=None):
- try:
- from cStringIO import StringIO
- except ImportError:
- from StringIO import StringIO
-
- bufsize = len(string)
- buf = StringIO(string)
- if not parser:
- parser = xml.sax.make_parser()
- return DOMEventStream(buf, parser, bufsize)
diff --git a/sys/lib/python/xml/dom/xmlbuilder.py b/sys/lib/python/xml/dom/xmlbuilder.py
deleted file mode 100644
index ac1d448f0..000000000
--- a/sys/lib/python/xml/dom/xmlbuilder.py
+++ /dev/null
@@ -1,386 +0,0 @@
-"""Implementation of the DOM Level 3 'LS-Load' feature."""
-
-import copy
-import xml.dom
-
-from xml.dom.NodeFilter import NodeFilter
-
-
-__all__ = ["DOMBuilder", "DOMEntityResolver", "DOMInputSource"]
-
-
-class Options:
- """Features object that has variables set for each DOMBuilder feature.
-
- The DOMBuilder class uses an instance of this class to pass settings to
- the ExpatBuilder class.
- """
-
- # Note that the DOMBuilder class in LoadSave constrains which of these
- # values can be set using the DOM Level 3 LoadSave feature.
-
- namespaces = 1
- namespace_declarations = True
- validation = False
- external_parameter_entities = True
- external_general_entities = True
- external_dtd_subset = True
- validate_if_schema = False
- validate = False
- datatype_normalization = False
- create_entity_ref_nodes = True
- entities = True
- whitespace_in_element_content = True
- cdata_sections = True
- comments = True
- charset_overrides_xml_encoding = True
- infoset = False
- supported_mediatypes_only = False
-
- errorHandler = None
- filter = None
-
-
-class DOMBuilder:
- entityResolver = None
- errorHandler = None
- filter = None
-
- ACTION_REPLACE = 1
- ACTION_APPEND_AS_CHILDREN = 2
- ACTION_INSERT_AFTER = 3
- ACTION_INSERT_BEFORE = 4
-
- _legal_actions = (ACTION_REPLACE, ACTION_APPEND_AS_CHILDREN,
- ACTION_INSERT_AFTER, ACTION_INSERT_BEFORE)
-
- def __init__(self):
- self._options = Options()
-
- def _get_entityResolver(self):
- return self.entityResolver
- def _set_entityResolver(self, entityResolver):
- self.entityResolver = entityResolver
-
- def _get_errorHandler(self):
- return self.errorHandler
- def _set_errorHandler(self, errorHandler):
- self.errorHandler = errorHandler
-
- def _get_filter(self):
- return self.filter
- def _set_filter(self, filter):
- self.filter = filter
-
- def setFeature(self, name, state):
- if self.supportsFeature(name):
- state = state and 1 or 0
- try:
- settings = self._settings[(_name_xform(name), state)]
- except KeyError:
- raise xml.dom.NotSupportedErr(
- "unsupported feature: %r" % (name,))
- else:
- for name, value in settings:
- setattr(self._options, name, value)
- else:
- raise xml.dom.NotFoundErr("unknown feature: " + repr(name))
-
- def supportsFeature(self, name):
- return hasattr(self._options, _name_xform(name))
-
- def canSetFeature(self, name, state):
- key = (_name_xform(name), state and 1 or 0)
- return self._settings.has_key(key)
-
- # This dictionary maps from (feature,value) to a list of
- # (option,value) pairs that should be set on the Options object.
- # If a (feature,value) setting is not in this dictionary, it is
- # not supported by the DOMBuilder.
- #
- _settings = {
- ("namespace_declarations", 0): [
- ("namespace_declarations", 0)],
- ("namespace_declarations", 1): [
- ("namespace_declarations", 1)],
- ("validation", 0): [
- ("validation", 0)],
- ("external_general_entities", 0): [
- ("external_general_entities", 0)],
- ("external_general_entities", 1): [
- ("external_general_entities", 1)],
- ("external_parameter_entities", 0): [
- ("external_parameter_entities", 0)],
- ("external_parameter_entities", 1): [
- ("external_parameter_entities", 1)],
- ("validate_if_schema", 0): [
- ("validate_if_schema", 0)],
- ("create_entity_ref_nodes", 0): [
- ("create_entity_ref_nodes", 0)],
- ("create_entity_ref_nodes", 1): [
- ("create_entity_ref_nodes", 1)],
- ("entities", 0): [
- ("create_entity_ref_nodes", 0),
- ("entities", 0)],
- ("entities", 1): [
- ("entities", 1)],
- ("whitespace_in_element_content", 0): [
- ("whitespace_in_element_content", 0)],
- ("whitespace_in_element_content", 1): [
- ("whitespace_in_element_content", 1)],
- ("cdata_sections", 0): [
- ("cdata_sections", 0)],
- ("cdata_sections", 1): [
- ("cdata_sections", 1)],
- ("comments", 0): [
- ("comments", 0)],
- ("comments", 1): [
- ("comments", 1)],
- ("charset_overrides_xml_encoding", 0): [
- ("charset_overrides_xml_encoding", 0)],
- ("charset_overrides_xml_encoding", 1): [
- ("charset_overrides_xml_encoding", 1)],
- ("infoset", 0): [],
- ("infoset", 1): [
- ("namespace_declarations", 0),
- ("validate_if_schema", 0),
- ("create_entity_ref_nodes", 0),
- ("entities", 0),
- ("cdata_sections", 0),
- ("datatype_normalization", 1),
- ("whitespace_in_element_content", 1),
- ("comments", 1),
- ("charset_overrides_xml_encoding", 1)],
- ("supported_mediatypes_only", 0): [
- ("supported_mediatypes_only", 0)],
- ("namespaces", 0): [
- ("namespaces", 0)],
- ("namespaces", 1): [
- ("namespaces", 1)],
- }
-
- def getFeature(self, name):
- xname = _name_xform(name)
- try:
- return getattr(self._options, xname)
- except AttributeError:
- if name == "infoset":
- options = self._options
- return (options.datatype_normalization
- and options.whitespace_in_element_content
- and options.comments
- and options.charset_overrides_xml_encoding
- and not (options.namespace_declarations
- or options.validate_if_schema
- or options.create_entity_ref_nodes
- or options.entities
- or options.cdata_sections))
- raise xml.dom.NotFoundErr("feature %s not known" % repr(name))
-
- def parseURI(self, uri):
- if self.entityResolver:
- input = self.entityResolver.resolveEntity(None, uri)
- else:
- input = DOMEntityResolver().resolveEntity(None, uri)
- return self.parse(input)
-
- def parse(self, input):
- options = copy.copy(self._options)
- options.filter = self.filter
- options.errorHandler = self.errorHandler
- fp = input.byteStream
- if fp is None and options.systemId:
- import urllib2
- fp = urllib2.urlopen(input.systemId)
- return self._parse_bytestream(fp, options)
-
- def parseWithContext(self, input, cnode, action):
- if action not in self._legal_actions:
- raise ValueError("not a legal action")
- raise NotImplementedError("Haven't written this yet...")
-
- def _parse_bytestream(self, stream, options):
- import xml.dom.expatbuilder
- builder = xml.dom.expatbuilder.makeBuilder(options)
- return builder.parseFile(stream)
-
-
-def _name_xform(name):
- return name.lower().replace('-', '_')
-
-
-class DOMEntityResolver(object):
- __slots__ = '_opener',
-
- def resolveEntity(self, publicId, systemId):
- assert systemId is not None
- source = DOMInputSource()
- source.publicId = publicId
- source.systemId = systemId
- source.byteStream = self._get_opener().open(systemId)
-
- # determine the encoding if the transport provided it
- source.encoding = self._guess_media_encoding(source)
-
- # determine the base URI is we can
- import posixpath, urlparse
- parts = urlparse.urlparse(systemId)
- scheme, netloc, path, params, query, fragment = parts
- # XXX should we check the scheme here as well?
- if path and not path.endswith("/"):
- path = posixpath.dirname(path) + "/"
- parts = scheme, netloc, path, params, query, fragment
- source.baseURI = urlparse.urlunparse(parts)
-
- return source
-
- def _get_opener(self):
- try:
- return self._opener
- except AttributeError:
- self._opener = self._create_opener()
- return self._opener
-
- def _create_opener(self):
- import urllib2
- return urllib2.build_opener()
-
- def _guess_media_encoding(self, source):
- info = source.byteStream.info()
- if info.has_key("Content-Type"):
- for param in info.getplist():
- if param.startswith("charset="):
- return param.split("=", 1)[1].lower()
-
-
-class DOMInputSource(object):
- __slots__ = ('byteStream', 'characterStream', 'stringData',
- 'encoding', 'publicId', 'systemId', 'baseURI')
-
- def __init__(self):
- self.byteStream = None
- self.characterStream = None
- self.stringData = None
- self.encoding = None
- self.publicId = None
- self.systemId = None
- self.baseURI = None
-
- def _get_byteStream(self):
- return self.byteStream
- def _set_byteStream(self, byteStream):
- self.byteStream = byteStream
-
- def _get_characterStream(self):
- return self.characterStream
- def _set_characterStream(self, characterStream):
- self.characterStream = characterStream
-
- def _get_stringData(self):
- return self.stringData
- def _set_stringData(self, data):
- self.stringData = data
-
- def _get_encoding(self):
- return self.encoding
- def _set_encoding(self, encoding):
- self.encoding = encoding
-
- def _get_publicId(self):
- return self.publicId
- def _set_publicId(self, publicId):
- self.publicId = publicId
-
- def _get_systemId(self):
- return self.systemId
- def _set_systemId(self, systemId):
- self.systemId = systemId
-
- def _get_baseURI(self):
- return self.baseURI
- def _set_baseURI(self, uri):
- self.baseURI = uri
-
-
-class DOMBuilderFilter:
- """Element filter which can be used to tailor construction of
- a DOM instance.
- """
-
- # There's really no need for this class; concrete implementations
- # should just implement the endElement() and startElement()
- # methods as appropriate. Using this makes it easy to only
- # implement one of them.
-
- FILTER_ACCEPT = 1
- FILTER_REJECT = 2
- FILTER_SKIP = 3
- FILTER_INTERRUPT = 4
-
- whatToShow = NodeFilter.SHOW_ALL
-
- def _get_whatToShow(self):
- return self.whatToShow
-
- def acceptNode(self, element):
- return self.FILTER_ACCEPT
-
- def startContainer(self, element):
- return self.FILTER_ACCEPT
-
-del NodeFilter
-
-
-class DocumentLS:
- """Mixin to create documents that conform to the load/save spec."""
-
- async = False
-
- def _get_async(self):
- return False
- def _set_async(self, async):
- if async:
- raise xml.dom.NotSupportedErr(
- "asynchronous document loading is not supported")
-
- def abort(self):
- # What does it mean to "clear" a document? Does the
- # documentElement disappear?
- raise NotImplementedError(
- "haven't figured out what this means yet")
-
- def load(self, uri):
- raise NotImplementedError("haven't written this yet")
-
- def loadXML(self, source):
- raise NotImplementedError("haven't written this yet")
-
- def saveXML(self, snode):
- if snode is None:
- snode = self
- elif snode.ownerDocument is not self:
- raise xml.dom.WrongDocumentErr()
- return snode.toxml()
-
-
-class DOMImplementationLS:
- MODE_SYNCHRONOUS = 1
- MODE_ASYNCHRONOUS = 2
-
- def createDOMBuilder(self, mode, schemaType):
- if schemaType is not None:
- raise xml.dom.NotSupportedErr(
- "schemaType not yet supported")
- if mode == self.MODE_SYNCHRONOUS:
- return DOMBuilder()
- if mode == self.MODE_ASYNCHRONOUS:
- raise xml.dom.NotSupportedErr(
- "asynchronous builders are not supported")
- raise ValueError("unknown value for mode")
-
- def createDOMWriter(self):
- raise NotImplementedError(
- "the writer interface hasn't been written yet!")
-
- def createDOMInputSource(self):
- return DOMInputSource()
diff --git a/sys/lib/python/xml/etree/ElementInclude.py b/sys/lib/python/xml/etree/ElementInclude.py
deleted file mode 100644
index 974cc2146..000000000
--- a/sys/lib/python/xml/etree/ElementInclude.py
+++ /dev/null
@@ -1,143 +0,0 @@
-#
-# ElementTree
-# $Id: ElementInclude.py 1862 2004-06-18 07:31:02Z Fredrik $
-#
-# limited xinclude support for element trees
-#
-# history:
-# 2003-08-15 fl created
-# 2003-11-14 fl fixed default loader
-#
-# Copyright (c) 2003-2004 by Fredrik Lundh. All rights reserved.
-#
-# fredrik@pythonware.com
-# http://www.pythonware.com
-#
-# --------------------------------------------------------------------
-# The ElementTree toolkit is
-#
-# Copyright (c) 1999-2004 by Fredrik Lundh
-#
-# By obtaining, using, and/or copying this software and/or its
-# associated documentation, you agree that you have read, understood,
-# and will comply with the following terms and conditions:
-#
-# Permission to use, copy, modify, and distribute this software and
-# its associated documentation for any purpose and without fee is
-# hereby granted, provided that the above copyright notice appears in
-# all copies, and that both that copyright notice and this permission
-# notice appear in supporting documentation, and that the name of
-# Secret Labs AB or the author not be used in advertising or publicity
-# pertaining to distribution of the software without specific, written
-# prior permission.
-#
-# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
-# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
-# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
-# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
-# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
-# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
-# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
-# OF THIS SOFTWARE.
-# --------------------------------------------------------------------
-
-# Licensed to PSF under a Contributor Agreement.
-# See http://www.python.org/2.4/license for licensing details.
-
-##
-# Limited XInclude support for the ElementTree package.
-##
-
-import copy
-import ElementTree
-
-XINCLUDE = "{http://www.w3.org/2001/XInclude}"
-
-XINCLUDE_INCLUDE = XINCLUDE + "include"
-XINCLUDE_FALLBACK = XINCLUDE + "fallback"
-
-##
-# Fatal include error.
-
-class FatalIncludeError(SyntaxError):
- pass
-
-##
-# Default loader. This loader reads an included resource from disk.
-#
-# @param href Resource reference.
-# @param parse Parse mode. Either "xml" or "text".
-# @param encoding Optional text encoding.
-# @return The expanded resource. If the parse mode is "xml", this
-# is an ElementTree instance. If the parse mode is "text", this
-# is a Unicode string. If the loader fails, it can return None
-# or raise an IOError exception.
-# @throws IOError If the loader fails to load the resource.
-
-def default_loader(href, parse, encoding=None):
- file = open(href)
- if parse == "xml":
- data = ElementTree.parse(file).getroot()
- else:
- data = file.read()
- if encoding:
- data = data.decode(encoding)
- file.close()
- return data
-
-##
-# Expand XInclude directives.
-#
-# @param elem Root element.
-# @param loader Optional resource loader. If omitted, it defaults
-# to {@link default_loader}. If given, it should be a callable
-# that implements the same interface as <b>default_loader</b>.
-# @throws FatalIncludeError If the function fails to include a given
-# resource, or if the tree contains malformed XInclude elements.
-# @throws IOError If the function fails to load a given resource.
-
-def include(elem, loader=None):
- if loader is None:
- loader = default_loader
- # look for xinclude elements
- i = 0
- while i < len(elem):
- e = elem[i]
- if e.tag == XINCLUDE_INCLUDE:
- # process xinclude directive
- href = e.get("href")
- parse = e.get("parse", "xml")
- if parse == "xml":
- node = loader(href, parse)
- if node is None:
- raise FatalIncludeError(
- "cannot load %r as %r" % (href, parse)
- )
- node = copy.copy(node)
- if e.tail:
- node.tail = (node.tail or "") + e.tail
- elem[i] = node
- elif parse == "text":
- text = loader(href, parse, e.get("encoding"))
- if text is None:
- raise FatalIncludeError(
- "cannot load %r as %r" % (href, parse)
- )
- if i:
- node = elem[i-1]
- node.tail = (node.tail or "") + text
- else:
- elem.text = (elem.text or "") + text + (e.tail or "")
- del elem[i]
- continue
- else:
- raise FatalIncludeError(
- "unknown parse type in xi:include tag (%r)" % parse
- )
- elif e.tag == XINCLUDE_FALLBACK:
- raise FatalIncludeError(
- "xi:fallback tag must be child of xi:include (%r)" % e.tag
- )
- else:
- include(e, loader)
- i = i + 1
diff --git a/sys/lib/python/xml/etree/ElementPath.py b/sys/lib/python/xml/etree/ElementPath.py
deleted file mode 100644
index 00dbe9d54..000000000
--- a/sys/lib/python/xml/etree/ElementPath.py
+++ /dev/null
@@ -1,198 +0,0 @@
-#
-# ElementTree
-# $Id: ElementPath.py 1858 2004-06-17 21:31:41Z Fredrik $
-#
-# limited xpath support for element trees
-#
-# history:
-# 2003-05-23 fl created
-# 2003-05-28 fl added support for // etc
-# 2003-08-27 fl fixed parsing of periods in element names
-#
-# Copyright (c) 2003-2004 by Fredrik Lundh. All rights reserved.
-#
-# fredrik@pythonware.com
-# http://www.pythonware.com
-#
-# --------------------------------------------------------------------
-# The ElementTree toolkit is
-#
-# Copyright (c) 1999-2004 by Fredrik Lundh
-#
-# By obtaining, using, and/or copying this software and/or its
-# associated documentation, you agree that you have read, understood,
-# and will comply with the following terms and conditions:
-#
-# Permission to use, copy, modify, and distribute this software and
-# its associated documentation for any purpose and without fee is
-# hereby granted, provided that the above copyright notice appears in
-# all copies, and that both that copyright notice and this permission
-# notice appear in supporting documentation, and that the name of
-# Secret Labs AB or the author not be used in advertising or publicity
-# pertaining to distribution of the software without specific, written
-# prior permission.
-#
-# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
-# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
-# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
-# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
-# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
-# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
-# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
-# OF THIS SOFTWARE.
-# --------------------------------------------------------------------
-
-# Licensed to PSF under a Contributor Agreement.
-# See http://www.python.org/2.4/license for licensing details.
-
-##
-# Implementation module for XPath support. There's usually no reason
-# to import this module directly; the <b>ElementTree</b> does this for
-# you, if needed.
-##
-
-import re
-
-xpath_tokenizer = re.compile(
- "(::|\.\.|\(\)|[/.*:\[\]\(\)@=])|((?:\{[^}]+\})?[^/:\[\]\(\)@=\s]+)|\s+"
- ).findall
-
-class xpath_descendant_or_self:
- pass
-
-##
-# Wrapper for a compiled XPath.
-
-class Path:
-
- ##
- # Create an Path instance from an XPath expression.
-
- def __init__(self, path):
- tokens = xpath_tokenizer(path)
- # the current version supports 'path/path'-style expressions only
- self.path = []
- self.tag = None
- if tokens and tokens[0][0] == "/":
- raise SyntaxError("cannot use absolute path on element")
- while tokens:
- op, tag = tokens.pop(0)
- if tag or op == "*":
- self.path.append(tag or op)
- elif op == ".":
- pass
- elif op == "/":
- self.path.append(xpath_descendant_or_self())
- continue
- else:
- raise SyntaxError("unsupported path syntax (%s)" % op)
- if tokens:
- op, tag = tokens.pop(0)
- if op != "/":
- raise SyntaxError(
- "expected path separator (%s)" % (op or tag)
- )
- if self.path and isinstance(self.path[-1], xpath_descendant_or_self):
- raise SyntaxError("path cannot end with //")
- if len(self.path) == 1 and isinstance(self.path[0], type("")):
- self.tag = self.path[0]
-
- ##
- # Find first matching object.
-
- def find(self, element):
- tag = self.tag
- if tag is None:
- nodeset = self.findall(element)
- if not nodeset:
- return None
- return nodeset[0]
- for elem in element:
- if elem.tag == tag:
- return elem
- return None
-
- ##
- # Find text for first matching object.
-
- def findtext(self, element, default=None):
- tag = self.tag
- if tag is None:
- nodeset = self.findall(element)
- if not nodeset:
- return default
- return nodeset[0].text or ""
- for elem in element:
- if elem.tag == tag:
- return elem.text or ""
- return default
-
- ##
- # Find all matching objects.
-
- def findall(self, element):
- nodeset = [element]
- index = 0
- while 1:
- try:
- path = self.path[index]
- index = index + 1
- except IndexError:
- return nodeset
- set = []
- if isinstance(path, xpath_descendant_or_self):
- try:
- tag = self.path[index]
- if not isinstance(tag, type("")):
- tag = None
- else:
- index = index + 1
- except IndexError:
- tag = None # invalid path
- for node in nodeset:
- new = list(node.getiterator(tag))
- if new and new[0] is node:
- set.extend(new[1:])
- else:
- set.extend(new)
- else:
- for node in nodeset:
- for node in node:
- if path == "*" or node.tag == path:
- set.append(node)
- if not set:
- return []
- nodeset = set
-
-_cache = {}
-
-##
-# (Internal) Compile path.
-
-def _compile(path):
- p = _cache.get(path)
- if p is not None:
- return p
- p = Path(path)
- if len(_cache) >= 100:
- _cache.clear()
- _cache[path] = p
- return p
-
-##
-# Find first matching object.
-
-def find(element, path):
- return _compile(path).find(element)
-
-##
-# Find text for first matching object.
-
-def findtext(element, path, default=None):
- return _compile(path).findtext(element, default)
-
-##
-# Find all matching objects.
-
-def findall(element, path):
- return _compile(path).findall(element)
diff --git a/sys/lib/python/xml/etree/ElementTree.py b/sys/lib/python/xml/etree/ElementTree.py
deleted file mode 100644
index 7dbc72e78..000000000
--- a/sys/lib/python/xml/etree/ElementTree.py
+++ /dev/null
@@ -1,1260 +0,0 @@
-#
-# ElementTree
-# $Id: ElementTree.py 2326 2005-03-17 07:45:21Z fredrik $
-#
-# light-weight XML support for Python 1.5.2 and later.
-#
-# history:
-# 2001-10-20 fl created (from various sources)
-# 2001-11-01 fl return root from parse method
-# 2002-02-16 fl sort attributes in lexical order
-# 2002-04-06 fl TreeBuilder refactoring, added PythonDoc markup
-# 2002-05-01 fl finished TreeBuilder refactoring
-# 2002-07-14 fl added basic namespace support to ElementTree.write
-# 2002-07-25 fl added QName attribute support
-# 2002-10-20 fl fixed encoding in write
-# 2002-11-24 fl changed default encoding to ascii; fixed attribute encoding
-# 2002-11-27 fl accept file objects or file names for parse/write
-# 2002-12-04 fl moved XMLTreeBuilder back to this module
-# 2003-01-11 fl fixed entity encoding glitch for us-ascii
-# 2003-02-13 fl added XML literal factory
-# 2003-02-21 fl added ProcessingInstruction/PI factory
-# 2003-05-11 fl added tostring/fromstring helpers
-# 2003-05-26 fl added ElementPath support
-# 2003-07-05 fl added makeelement factory method
-# 2003-07-28 fl added more well-known namespace prefixes
-# 2003-08-15 fl fixed typo in ElementTree.findtext (Thomas Dartsch)
-# 2003-09-04 fl fall back on emulator if ElementPath is not installed
-# 2003-10-31 fl markup updates
-# 2003-11-15 fl fixed nested namespace bug
-# 2004-03-28 fl added XMLID helper
-# 2004-06-02 fl added default support to findtext
-# 2004-06-08 fl fixed encoding of non-ascii element/attribute names
-# 2004-08-23 fl take advantage of post-2.1 expat features
-# 2005-02-01 fl added iterparse implementation
-# 2005-03-02 fl fixed iterparse support for pre-2.2 versions
-#
-# Copyright (c) 1999-2005 by Fredrik Lundh. All rights reserved.
-#
-# fredrik@pythonware.com
-# http://www.pythonware.com
-#
-# --------------------------------------------------------------------
-# The ElementTree toolkit is
-#
-# Copyright (c) 1999-2005 by Fredrik Lundh
-#
-# By obtaining, using, and/or copying this software and/or its
-# associated documentation, you agree that you have read, understood,
-# and will comply with the following terms and conditions:
-#
-# Permission to use, copy, modify, and distribute this software and
-# its associated documentation for any purpose and without fee is
-# hereby granted, provided that the above copyright notice appears in
-# all copies, and that both that copyright notice and this permission
-# notice appear in supporting documentation, and that the name of
-# Secret Labs AB or the author not be used in advertising or publicity
-# pertaining to distribution of the software without specific, written
-# prior permission.
-#
-# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
-# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
-# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
-# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
-# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
-# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
-# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
-# OF THIS SOFTWARE.
-# --------------------------------------------------------------------
-
-# Licensed to PSF under a Contributor Agreement.
-# See http://www.python.org/2.4/license for licensing details.
-
-__all__ = [
- # public symbols
- "Comment",
- "dump",
- "Element", "ElementTree",
- "fromstring",
- "iselement", "iterparse",
- "parse",
- "PI", "ProcessingInstruction",
- "QName",
- "SubElement",
- "tostring",
- "TreeBuilder",
- "VERSION", "XML",
- "XMLParser", "XMLTreeBuilder",
- ]
-
-##
-# The <b>Element</b> type is a flexible container object, designed to
-# store hierarchical data structures in memory. The type can be
-# described as a cross between a list and a dictionary.
-# <p>
-# Each element has a number of properties associated with it:
-# <ul>
-# <li>a <i>tag</i>. This is a string identifying what kind of data
-# this element represents (the element type, in other words).</li>
-# <li>a number of <i>attributes</i>, stored in a Python dictionary.</li>
-# <li>a <i>text</i> string.</li>
-# <li>an optional <i>tail</i> string.</li>
-# <li>a number of <i>child elements</i>, stored in a Python sequence</li>
-# </ul>
-#
-# To create an element instance, use the {@link #Element} or {@link
-# #SubElement} factory functions.
-# <p>
-# The {@link #ElementTree} class can be used to wrap an element
-# structure, and convert it from and to XML.
-##
-
-import string, sys, re
-
-class _SimpleElementPath:
- # emulate pre-1.2 find/findtext/findall behaviour
- def find(self, element, tag):
- for elem in element:
- if elem.tag == tag:
- return elem
- return None
- def findtext(self, element, tag, default=None):
- for elem in element:
- if elem.tag == tag:
- return elem.text or ""
- return default
- def findall(self, element, tag):
- if tag[:3] == ".//":
- return element.getiterator(tag[3:])
- result = []
- for elem in element:
- if elem.tag == tag:
- result.append(elem)
- return result
-
-try:
- import ElementPath
-except ImportError:
- # FIXME: issue warning in this case?
- ElementPath = _SimpleElementPath()
-
-# TODO: add support for custom namespace resolvers/default namespaces
-# TODO: add improved support for incremental parsing
-
-VERSION = "1.2.6"
-
-##
-# Internal element class. This class defines the Element interface,
-# and provides a reference implementation of this interface.
-# <p>
-# You should not create instances of this class directly. Use the
-# appropriate factory functions instead, such as {@link #Element}
-# and {@link #SubElement}.
-#
-# @see Element
-# @see SubElement
-# @see Comment
-# @see ProcessingInstruction
-
-class _ElementInterface:
- # <tag attrib>text<child/>...</tag>tail
-
- ##
- # (Attribute) Element tag.
-
- tag = None
-
- ##
- # (Attribute) Element attribute dictionary. Where possible, use
- # {@link #_ElementInterface.get},
- # {@link #_ElementInterface.set},
- # {@link #_ElementInterface.keys}, and
- # {@link #_ElementInterface.items} to access
- # element attributes.
-
- attrib = None
-
- ##
- # (Attribute) Text before first subelement. This is either a
- # string or the value None, if there was no text.
-
- text = None
-
- ##
- # (Attribute) Text after this element's end tag, but before the
- # next sibling element's start tag. This is either a string or
- # the value None, if there was no text.
-
- tail = None # text after end tag, if any
-
- def __init__(self, tag, attrib):
- self.tag = tag
- self.attrib = attrib
- self._children = []
-
- def __repr__(self):
- return "<Element %s at %x>" % (self.tag, id(self))
-
- ##
- # Creates a new element object of the same type as this element.
- #
- # @param tag Element tag.
- # @param attrib Element attributes, given as a dictionary.
- # @return A new element instance.
-
- def makeelement(self, tag, attrib):
- return Element(tag, attrib)
-
- ##
- # Returns the number of subelements.
- #
- # @return The number of subelements.
-
- def __len__(self):
- return len(self._children)
-
- ##
- # Returns the given subelement.
- #
- # @param index What subelement to return.
- # @return The given subelement.
- # @exception IndexError If the given element does not exist.
-
- def __getitem__(self, index):
- return self._children[index]
-
- ##
- # Replaces the given subelement.
- #
- # @param index What subelement to replace.
- # @param element The new element value.
- # @exception IndexError If the given element does not exist.
- # @exception AssertionError If element is not a valid object.
-
- def __setitem__(self, index, element):
- assert iselement(element)
- self._children[index] = element
-
- ##
- # Deletes the given subelement.
- #
- # @param index What subelement to delete.
- # @exception IndexError If the given element does not exist.
-
- def __delitem__(self, index):
- del self._children[index]
-
- ##
- # Returns a list containing subelements in the given range.
- #
- # @param start The first subelement to return.
- # @param stop The first subelement that shouldn't be returned.
- # @return A sequence object containing subelements.
-
- def __getslice__(self, start, stop):
- return self._children[start:stop]
-
- ##
- # Replaces a number of subelements with elements from a sequence.
- #
- # @param start The first subelement to replace.
- # @param stop The first subelement that shouldn't be replaced.
- # @param elements A sequence object with zero or more elements.
- # @exception AssertionError If a sequence member is not a valid object.
-
- def __setslice__(self, start, stop, elements):
- for element in elements:
- assert iselement(element)
- self._children[start:stop] = list(elements)
-
- ##
- # Deletes a number of subelements.
- #
- # @param start The first subelement to delete.
- # @param stop The first subelement to leave in there.
-
- def __delslice__(self, start, stop):
- del self._children[start:stop]
-
- ##
- # Adds a subelement to the end of this element.
- #
- # @param element The element to add.
- # @exception AssertionError If a sequence member is not a valid object.
-
- def append(self, element):
- assert iselement(element)
- self._children.append(element)
-
- ##
- # Inserts a subelement at the given position in this element.
- #
- # @param index Where to insert the new subelement.
- # @exception AssertionError If the element is not a valid object.
-
- def insert(self, index, element):
- assert iselement(element)
- self._children.insert(index, element)
-
- ##
- # Removes a matching subelement. Unlike the <b>find</b> methods,
- # this method compares elements based on identity, not on tag
- # value or contents.
- #
- # @param element What element to remove.
- # @exception ValueError If a matching element could not be found.
- # @exception AssertionError If the element is not a valid object.
-
- def remove(self, element):
- assert iselement(element)
- self._children.remove(element)
-
- ##
- # Returns all subelements. The elements are returned in document
- # order.
- #
- # @return A list of subelements.
- # @defreturn list of Element instances
-
- def getchildren(self):
- return self._children
-
- ##
- # Finds the first matching subelement, by tag name or path.
- #
- # @param path What element to look for.
- # @return The first matching element, or None if no element was found.
- # @defreturn Element or None
-
- def find(self, path):
- return ElementPath.find(self, path)
-
- ##
- # Finds text for the first matching subelement, by tag name or path.
- #
- # @param path What element to look for.
- # @param default What to return if the element was not found.
- # @return The text content of the first matching element, or the
- # default value no element was found. Note that if the element
- # has is found, but has no text content, this method returns an
- # empty string.
- # @defreturn string
-
- def findtext(self, path, default=None):
- return ElementPath.findtext(self, path, default)
-
- ##
- # Finds all matching subelements, by tag name or path.
- #
- # @param path What element to look for.
- # @return A list or iterator containing all matching elements,
- # in document order.
- # @defreturn list of Element instances
-
- def findall(self, path):
- return ElementPath.findall(self, path)
-
- ##
- # Resets an element. This function removes all subelements, clears
- # all attributes, and sets the text and tail attributes to None.
-
- def clear(self):
- self.attrib.clear()
- self._children = []
- self.text = self.tail = None
-
- ##
- # Gets an element attribute.
- #
- # @param key What attribute to look for.
- # @param default What to return if the attribute was not found.
- # @return The attribute value, or the default value, if the
- # attribute was not found.
- # @defreturn string or None
-
- def get(self, key, default=None):
- return self.attrib.get(key, default)
-
- ##
- # Sets an element attribute.
- #
- # @param key What attribute to set.
- # @param value The attribute value.
-
- def set(self, key, value):
- self.attrib[key] = value
-
- ##
- # Gets a list of attribute names. The names are returned in an
- # arbitrary order (just like for an ordinary Python dictionary).
- #
- # @return A list of element attribute names.
- # @defreturn list of strings
-
- def keys(self):
- return self.attrib.keys()
-
- ##
- # Gets element attributes, as a sequence. The attributes are
- # returned in an arbitrary order.
- #
- # @return A list of (name, value) tuples for all attributes.
- # @defreturn list of (string, string) tuples
-
- def items(self):
- return self.attrib.items()
-
- ##
- # Creates a tree iterator. The iterator loops over this element
- # and all subelements, in document order, and returns all elements
- # with a matching tag.
- # <p>
- # If the tree structure is modified during iteration, the result
- # is undefined.
- #
- # @param tag What tags to look for (default is to return all elements).
- # @return A list or iterator containing all the matching elements.
- # @defreturn list or iterator
-
- def getiterator(self, tag=None):
- nodes = []
- if tag == "*":
- tag = None
- if tag is None or self.tag == tag:
- nodes.append(self)
- for node in self._children:
- nodes.extend(node.getiterator(tag))
- return nodes
-
-# compatibility
-_Element = _ElementInterface
-
-##
-# Element factory. This function returns an object implementing the
-# standard Element interface. The exact class or type of that object
-# is implementation dependent, but it will always be compatible with
-# the {@link #_ElementInterface} class in this module.
-# <p>
-# The element name, attribute names, and attribute values can be
-# either 8-bit ASCII strings or Unicode strings.
-#
-# @param tag The element name.
-# @param attrib An optional dictionary, containing element attributes.
-# @param **extra Additional attributes, given as keyword arguments.
-# @return An element instance.
-# @defreturn Element
-
-def Element(tag, attrib={}, **extra):
- attrib = attrib.copy()
- attrib.update(extra)
- return _ElementInterface(tag, attrib)
-
-##
-# Subelement factory. This function creates an element instance, and
-# appends it to an existing element.
-# <p>
-# The element name, attribute names, and attribute values can be
-# either 8-bit ASCII strings or Unicode strings.
-#
-# @param parent The parent element.
-# @param tag The subelement name.
-# @param attrib An optional dictionary, containing element attributes.
-# @param **extra Additional attributes, given as keyword arguments.
-# @return An element instance.
-# @defreturn Element
-
-def SubElement(parent, tag, attrib={}, **extra):
- attrib = attrib.copy()
- attrib.update(extra)
- element = parent.makeelement(tag, attrib)
- parent.append(element)
- return element
-
-##
-# Comment element factory. This factory function creates a special
-# element that will be serialized as an XML comment.
-# <p>
-# The comment string can be either an 8-bit ASCII string or a Unicode
-# string.
-#
-# @param text A string containing the comment string.
-# @return An element instance, representing a comment.
-# @defreturn Element
-
-def Comment(text=None):
- element = Element(Comment)
- element.text = text
- return element
-
-##
-# PI element factory. This factory function creates a special element
-# that will be serialized as an XML processing instruction.
-#
-# @param target A string containing the PI target.
-# @param text A string containing the PI contents, if any.
-# @return An element instance, representing a PI.
-# @defreturn Element
-
-def ProcessingInstruction(target, text=None):
- element = Element(ProcessingInstruction)
- element.text = target
- if text:
- element.text = element.text + " " + text
- return element
-
-PI = ProcessingInstruction
-
-##
-# QName wrapper. This can be used to wrap a QName attribute value, in
-# order to get proper namespace handling on output.
-#
-# @param text A string containing the QName value, in the form {uri}local,
-# or, if the tag argument is given, the URI part of a QName.
-# @param tag Optional tag. If given, the first argument is interpreted as
-# an URI, and this argument is interpreted as a local name.
-# @return An opaque object, representing the QName.
-
-class QName:
- def __init__(self, text_or_uri, tag=None):
- if tag:
- text_or_uri = "{%s}%s" % (text_or_uri, tag)
- self.text = text_or_uri
- def __str__(self):
- return self.text
- def __hash__(self):
- return hash(self.text)
- def __cmp__(self, other):
- if isinstance(other, QName):
- return cmp(self.text, other.text)
- return cmp(self.text, other)
-
-##
-# ElementTree wrapper class. This class represents an entire element
-# hierarchy, and adds some extra support for serialization to and from
-# standard XML.
-#
-# @param element Optional root element.
-# @keyparam file Optional file handle or name. If given, the
-# tree is initialized with the contents of this XML file.
-
-class ElementTree:
-
- def __init__(self, element=None, file=None):
- assert element is None or iselement(element)
- self._root = element # first node
- if file:
- self.parse(file)
-
- ##
- # Gets the root element for this tree.
- #
- # @return An element instance.
- # @defreturn Element
-
- def getroot(self):
- return self._root
-
- ##
- # Replaces the root element for this tree. This discards the
- # current contents of the tree, and replaces it with the given
- # element. Use with care.
- #
- # @param element An element instance.
-
- def _setroot(self, element):
- assert iselement(element)
- self._root = element
-
- ##
- # Loads an external XML document into this element tree.
- #
- # @param source A file name or file object.
- # @param parser An optional parser instance. If not given, the
- # standard {@link XMLTreeBuilder} parser is used.
- # @return The document root element.
- # @defreturn Element
-
- def parse(self, source, parser=None):
- if not hasattr(source, "read"):
- source = open(source, "rb")
- if not parser:
- parser = XMLTreeBuilder()
- while 1:
- data = source.read(32768)
- if not data:
- break
- parser.feed(data)
- self._root = parser.close()
- return self._root
-
- ##
- # Creates a tree iterator for the root element. The iterator loops
- # over all elements in this tree, in document order.
- #
- # @param tag What tags to look for (default is to return all elements)
- # @return An iterator.
- # @defreturn iterator
-
- def getiterator(self, tag=None):
- assert self._root is not None
- return self._root.getiterator(tag)
-
- ##
- # Finds the first toplevel element with given tag.
- # Same as getroot().find(path).
- #
- # @param path What element to look for.
- # @return The first matching element, or None if no element was found.
- # @defreturn Element or None
-
- def find(self, path):
- assert self._root is not None
- if path[:1] == "/":
- path = "." + path
- return self._root.find(path)
-
- ##
- # Finds the element text for the first toplevel element with given
- # tag. Same as getroot().findtext(path).
- #
- # @param path What toplevel element to look for.
- # @param default What to return if the element was not found.
- # @return The text content of the first matching element, or the
- # default value no element was found. Note that if the element
- # has is found, but has no text content, this method returns an
- # empty string.
- # @defreturn string
-
- def findtext(self, path, default=None):
- assert self._root is not None
- if path[:1] == "/":
- path = "." + path
- return self._root.findtext(path, default)
-
- ##
- # Finds all toplevel elements with the given tag.
- # Same as getroot().findall(path).
- #
- # @param path What element to look for.
- # @return A list or iterator containing all matching elements,
- # in document order.
- # @defreturn list of Element instances
-
- def findall(self, path):
- assert self._root is not None
- if path[:1] == "/":
- path = "." + path
- return self._root.findall(path)
-
- ##
- # Writes the element tree to a file, as XML.
- #
- # @param file A file name, or a file object opened for writing.
- # @param encoding Optional output encoding (default is US-ASCII).
-
- def write(self, file, encoding="us-ascii"):
- assert self._root is not None
- if not hasattr(file, "write"):
- file = open(file, "wb")
- if not encoding:
- encoding = "us-ascii"
- elif encoding != "utf-8" and encoding != "us-ascii":
- file.write("<?xml version='1.0' encoding='%s'?>\n" % encoding)
- self._write(file, self._root, encoding, {})
-
- def _write(self, file, node, encoding, namespaces):
- # write XML to file
- tag = node.tag
- if tag is Comment:
- file.write("<!-- %s -->" % _escape_cdata(node.text, encoding))
- elif tag is ProcessingInstruction:
- file.write("<?%s?>" % _escape_cdata(node.text, encoding))
- else:
- items = node.items()
- xmlns_items = [] # new namespaces in this scope
- try:
- if isinstance(tag, QName) or tag[:1] == "{":
- tag, xmlns = fixtag(tag, namespaces)
- if xmlns: xmlns_items.append(xmlns)
- except TypeError:
- _raise_serialization_error(tag)
- file.write("<" + _encode(tag, encoding))
- if items or xmlns_items:
- items.sort() # lexical order
- for k, v in items:
- try:
- if isinstance(k, QName) or k[:1] == "{":
- k, xmlns = fixtag(k, namespaces)
- if xmlns: xmlns_items.append(xmlns)
- except TypeError:
- _raise_serialization_error(k)
- try:
- if isinstance(v, QName):
- v, xmlns = fixtag(v, namespaces)
- if xmlns: xmlns_items.append(xmlns)
- except TypeError:
- _raise_serialization_error(v)
- file.write(" %s=\"%s\"" % (_encode(k, encoding),
- _escape_attrib(v, encoding)))
- for k, v in xmlns_items:
- file.write(" %s=\"%s\"" % (_encode(k, encoding),
- _escape_attrib(v, encoding)))
- if node.text or len(node):
- file.write(">")
- if node.text:
- file.write(_escape_cdata(node.text, encoding))
- for n in node:
- self._write(file, n, encoding, namespaces)
- file.write("</" + _encode(tag, encoding) + ">")
- else:
- file.write(" />")
- for k, v in xmlns_items:
- del namespaces[v]
- if node.tail:
- file.write(_escape_cdata(node.tail, encoding))
-
-# --------------------------------------------------------------------
-# helpers
-
-##
-# Checks if an object appears to be a valid element object.
-#
-# @param An element instance.
-# @return A true value if this is an element object.
-# @defreturn flag
-
-def iselement(element):
- # FIXME: not sure about this; might be a better idea to look
- # for tag/attrib/text attributes
- return isinstance(element, _ElementInterface) or hasattr(element, "tag")
-
-##
-# Writes an element tree or element structure to sys.stdout. This
-# function should be used for debugging only.
-# <p>
-# The exact output format is implementation dependent. In this
-# version, it's written as an ordinary XML file.
-#
-# @param elem An element tree or an individual element.
-
-def dump(elem):
- # debugging
- if not isinstance(elem, ElementTree):
- elem = ElementTree(elem)
- elem.write(sys.stdout)
- tail = elem.getroot().tail
- if not tail or tail[-1] != "\n":
- sys.stdout.write("\n")
-
-def _encode(s, encoding):
- try:
- return s.encode(encoding)
- except AttributeError:
- return s # 1.5.2: assume the string uses the right encoding
-
-if sys.version[:3] == "1.5":
- _escape = re.compile(r"[&<>\"\x80-\xff]+") # 1.5.2
-else:
- _escape = re.compile(eval(r'u"[&<>\"\u0080-\uffff]+"'))
-
-_escape_map = {
- "&": "&amp;",
- "<": "&lt;",
- ">": "&gt;",
- '"': "&quot;",
-}
-
-_namespace_map = {
- # "well-known" namespace prefixes
- "http://www.w3.org/XML/1998/namespace": "xml",
- "http://www.w3.org/1999/xhtml": "html",
- "http://www.w3.org/1999/02/22-rdf-syntax-ns#": "rdf",
- "http://schemas.xmlsoap.org/wsdl/": "wsdl",
-}
-
-def _raise_serialization_error(text):
- raise TypeError(
- "cannot serialize %r (type %s)" % (text, type(text).__name__)
- )
-
-def _encode_entity(text, pattern=_escape):
- # map reserved and non-ascii characters to numerical entities
- def escape_entities(m, map=_escape_map):
- out = []
- append = out.append
- for char in m.group():
- text = map.get(char)
- if text is None:
- text = "&#%d;" % ord(char)
- append(text)
- return string.join(out, "")
- try:
- return _encode(pattern.sub(escape_entities, text), "ascii")
- except TypeError:
- _raise_serialization_error(text)
-
-#
-# the following functions assume an ascii-compatible encoding
-# (or "utf-16")
-
-def _escape_cdata(text, encoding=None, replace=string.replace):
- # escape character data
- try:
- if encoding:
- try:
- text = _encode(text, encoding)
- except UnicodeError:
- return _encode_entity(text)
- text = replace(text, "&", "&amp;")
- text = replace(text, "<", "&lt;")
- text = replace(text, ">", "&gt;")
- return text
- except (TypeError, AttributeError):
- _raise_serialization_error(text)
-
-def _escape_attrib(text, encoding=None, replace=string.replace):
- # escape attribute value
- try:
- if encoding:
- try:
- text = _encode(text, encoding)
- except UnicodeError:
- return _encode_entity(text)
- text = replace(text, "&", "&amp;")
- text = replace(text, "'", "&apos;") # FIXME: overkill
- text = replace(text, "\"", "&quot;")
- text = replace(text, "<", "&lt;")
- text = replace(text, ">", "&gt;")
- return text
- except (TypeError, AttributeError):
- _raise_serialization_error(text)
-
-def fixtag(tag, namespaces):
- # given a decorated tag (of the form {uri}tag), return prefixed
- # tag and namespace declaration, if any
- if isinstance(tag, QName):
- tag = tag.text
- namespace_uri, tag = string.split(tag[1:], "}", 1)
- prefix = namespaces.get(namespace_uri)
- if prefix is None:
- prefix = _namespace_map.get(namespace_uri)
- if prefix is None:
- prefix = "ns%d" % len(namespaces)
- namespaces[namespace_uri] = prefix
- if prefix == "xml":
- xmlns = None
- else:
- xmlns = ("xmlns:%s" % prefix, namespace_uri)
- else:
- xmlns = None
- return "%s:%s" % (prefix, tag), xmlns
-
-##
-# Parses an XML document into an element tree.
-#
-# @param source A filename or file object containing XML data.
-# @param parser An optional parser instance. If not given, the
-# standard {@link XMLTreeBuilder} parser is used.
-# @return An ElementTree instance
-
-def parse(source, parser=None):
- tree = ElementTree()
- tree.parse(source, parser)
- return tree
-
-##
-# Parses an XML document into an element tree incrementally, and reports
-# what's going on to the user.
-#
-# @param source A filename or file object containing XML data.
-# @param events A list of events to report back. If omitted, only "end"
-# events are reported.
-# @return A (event, elem) iterator.
-
-class iterparse:
-
- def __init__(self, source, events=None):
- if not hasattr(source, "read"):
- source = open(source, "rb")
- self._file = source
- self._events = []
- self._index = 0
- self.root = self._root = None
- self._parser = XMLTreeBuilder()
- # wire up the parser for event reporting
- parser = self._parser._parser
- append = self._events.append
- if events is None:
- events = ["end"]
- for event in events:
- if event == "start":
- try:
- parser.ordered_attributes = 1
- parser.specified_attributes = 1
- def handler(tag, attrib_in, event=event, append=append,
- start=self._parser._start_list):
- append((event, start(tag, attrib_in)))
- parser.StartElementHandler = handler
- except AttributeError:
- def handler(tag, attrib_in, event=event, append=append,
- start=self._parser._start):
- append((event, start(tag, attrib_in)))
- parser.StartElementHandler = handler
- elif event == "end":
- def handler(tag, event=event, append=append,
- end=self._parser._end):
- append((event, end(tag)))
- parser.EndElementHandler = handler
- elif event == "start-ns":
- def handler(prefix, uri, event=event, append=append):
- try:
- uri = _encode(uri, "ascii")
- except UnicodeError:
- pass
- append((event, (prefix or "", uri)))
- parser.StartNamespaceDeclHandler = handler
- elif event == "end-ns":
- def handler(prefix, event=event, append=append):
- append((event, None))
- parser.EndNamespaceDeclHandler = handler
-
- def next(self):
- while 1:
- try:
- item = self._events[self._index]
- except IndexError:
- if self._parser is None:
- self.root = self._root
- try:
- raise StopIteration
- except NameError:
- raise IndexError
- # load event buffer
- del self._events[:]
- self._index = 0
- data = self._file.read(16384)
- if data:
- self._parser.feed(data)
- else:
- self._root = self._parser.close()
- self._parser = None
- else:
- self._index = self._index + 1
- return item
-
- try:
- iter
- def __iter__(self):
- return self
- except NameError:
- def __getitem__(self, index):
- return self.next()
-
-##
-# Parses an XML document from a string constant. This function can
-# be used to embed "XML literals" in Python code.
-#
-# @param source A string containing XML data.
-# @return An Element instance.
-# @defreturn Element
-
-def XML(text):
- parser = XMLTreeBuilder()
- parser.feed(text)
- return parser.close()
-
-##
-# Parses an XML document from a string constant, and also returns
-# a dictionary which maps from element id:s to elements.
-#
-# @param source A string containing XML data.
-# @return A tuple containing an Element instance and a dictionary.
-# @defreturn (Element, dictionary)
-
-def XMLID(text):
- parser = XMLTreeBuilder()
- parser.feed(text)
- tree = parser.close()
- ids = {}
- for elem in tree.getiterator():
- id = elem.get("id")
- if id:
- ids[id] = elem
- return tree, ids
-
-##
-# Parses an XML document from a string constant. Same as {@link #XML}.
-#
-# @def fromstring(text)
-# @param source A string containing XML data.
-# @return An Element instance.
-# @defreturn Element
-
-fromstring = XML
-
-##
-# Generates a string representation of an XML element, including all
-# subelements.
-#
-# @param element An Element instance.
-# @return An encoded string containing the XML data.
-# @defreturn string
-
-def tostring(element, encoding=None):
- class dummy:
- pass
- data = []
- file = dummy()
- file.write = data.append
- ElementTree(element).write(file, encoding)
- return string.join(data, "")
-
-##
-# Generic element structure builder. This builder converts a sequence
-# of {@link #TreeBuilder.start}, {@link #TreeBuilder.data}, and {@link
-# #TreeBuilder.end} method calls to a well-formed element structure.
-# <p>
-# You can use this class to build an element structure using a custom XML
-# parser, or a parser for some other XML-like format.
-#
-# @param element_factory Optional element factory. This factory
-# is called to create new Element instances, as necessary.
-
-class TreeBuilder:
-
- def __init__(self, element_factory=None):
- self._data = [] # data collector
- self._elem = [] # element stack
- self._last = None # last element
- self._tail = None # true if we're after an end tag
- if element_factory is None:
- element_factory = _ElementInterface
- self._factory = element_factory
-
- ##
- # Flushes the parser buffers, and returns the toplevel documen
- # element.
- #
- # @return An Element instance.
- # @defreturn Element
-
- def close(self):
- assert len(self._elem) == 0, "missing end tags"
- assert self._last != None, "missing toplevel element"
- return self._last
-
- def _flush(self):
- if self._data:
- if self._last is not None:
- text = string.join(self._data, "")
- if self._tail:
- assert self._last.tail is None, "internal error (tail)"
- self._last.tail = text
- else:
- assert self._last.text is None, "internal error (text)"
- self._last.text = text
- self._data = []
-
- ##
- # Adds text to the current element.
- #
- # @param data A string. This should be either an 8-bit string
- # containing ASCII text, or a Unicode string.
-
- def data(self, data):
- self._data.append(data)
-
- ##
- # Opens a new element.
- #
- # @param tag The element name.
- # @param attrib A dictionary containing element attributes.
- # @return The opened element.
- # @defreturn Element
-
- def start(self, tag, attrs):
- self._flush()
- self._last = elem = self._factory(tag, attrs)
- if self._elem:
- self._elem[-1].append(elem)
- self._elem.append(elem)
- self._tail = 0
- return elem
-
- ##
- # Closes the current element.
- #
- # @param tag The element name.
- # @return The closed element.
- # @defreturn Element
-
- def end(self, tag):
- self._flush()
- self._last = self._elem.pop()
- assert self._last.tag == tag,\
- "end tag mismatch (expected %s, got %s)" % (
- self._last.tag, tag)
- self._tail = 1
- return self._last
-
-##
-# Element structure builder for XML source data, based on the
-# <b>expat</b> parser.
-#
-# @keyparam target Target object. If omitted, the builder uses an
-# instance of the standard {@link #TreeBuilder} class.
-# @keyparam html Predefine HTML entities. This flag is not supported
-# by the current implementation.
-# @see #ElementTree
-# @see #TreeBuilder
-
-class XMLTreeBuilder:
-
- def __init__(self, html=0, target=None):
- try:
- from xml.parsers import expat
- except ImportError:
- raise ImportError(
- "No module named expat; use SimpleXMLTreeBuilder instead"
- )
- self._parser = parser = expat.ParserCreate(None, "}")
- if target is None:
- target = TreeBuilder()
- self._target = target
- self._names = {} # name memo cache
- # callbacks
- parser.DefaultHandlerExpand = self._default
- parser.StartElementHandler = self._start
- parser.EndElementHandler = self._end
- parser.CharacterDataHandler = self._data
- # let expat do the buffering, if supported
- try:
- self._parser.buffer_text = 1
- except AttributeError:
- pass
- # use new-style attribute handling, if supported
- try:
- self._parser.ordered_attributes = 1
- self._parser.specified_attributes = 1
- parser.StartElementHandler = self._start_list
- except AttributeError:
- pass
- encoding = None
- if not parser.returns_unicode:
- encoding = "utf-8"
- # target.xml(encoding, None)
- self._doctype = None
- self.entity = {}
-
- def _fixtext(self, text):
- # convert text string to ascii, if possible
- try:
- return _encode(text, "ascii")
- except UnicodeError:
- return text
-
- def _fixname(self, key):
- # expand qname, and convert name string to ascii, if possible
- try:
- name = self._names[key]
- except KeyError:
- name = key
- if "}" in name:
- name = "{" + name
- self._names[key] = name = self._fixtext(name)
- return name
-
- def _start(self, tag, attrib_in):
- fixname = self._fixname
- tag = fixname(tag)
- attrib = {}
- for key, value in attrib_in.items():
- attrib[fixname(key)] = self._fixtext(value)
- return self._target.start(tag, attrib)
-
- def _start_list(self, tag, attrib_in):
- fixname = self._fixname
- tag = fixname(tag)
- attrib = {}
- if attrib_in:
- for i in range(0, len(attrib_in), 2):
- attrib[fixname(attrib_in[i])] = self._fixtext(attrib_in[i+1])
- return self._target.start(tag, attrib)
-
- def _data(self, text):
- return self._target.data(self._fixtext(text))
-
- def _end(self, tag):
- return self._target.end(self._fixname(tag))
-
- def _default(self, text):
- prefix = text[:1]
- if prefix == "&":
- # deal with undefined entities
- try:
- self._target.data(self.entity[text[1:-1]])
- except KeyError:
- from xml.parsers import expat
- raise expat.error(
- "undefined entity %s: line %d, column %d" %
- (text, self._parser.ErrorLineNumber,
- self._parser.ErrorColumnNumber)
- )
- elif prefix == "<" and text[:9] == "<!DOCTYPE":
- self._doctype = [] # inside a doctype declaration
- elif self._doctype is not None:
- # parse doctype contents
- if prefix == ">":
- self._doctype = None
- return
- text = string.strip(text)
- if not text:
- return
- self._doctype.append(text)
- n = len(self._doctype)
- if n > 2:
- type = self._doctype[1]
- if type == "PUBLIC" and n == 4:
- name, type, pubid, system = self._doctype
- elif type == "SYSTEM" and n == 3:
- name, type, system = self._doctype
- pubid = None
- else:
- return
- if pubid:
- pubid = pubid[1:-1]
- self.doctype(name, pubid, system[1:-1])
- self._doctype = None
-
- ##
- # Handles a doctype declaration.
- #
- # @param name Doctype name.
- # @param pubid Public identifier.
- # @param system System identifier.
-
- def doctype(self, name, pubid, system):
- pass
-
- ##
- # Feeds data to the parser.
- #
- # @param data Encoded data.
-
- def feed(self, data):
- self._parser.Parse(data, 0)
-
- ##
- # Finishes feeding data to the parser.
- #
- # @return An element structure.
- # @defreturn Element
-
- def close(self):
- self._parser.Parse("", 1) # end of data
- tree = self._target.close()
- del self._target, self._parser # get rid of circular references
- return tree
-
-# compatibility
-XMLParser = XMLTreeBuilder
diff --git a/sys/lib/python/xml/etree/__init__.py b/sys/lib/python/xml/etree/__init__.py
deleted file mode 100644
index 3dd2c929e..000000000
--- a/sys/lib/python/xml/etree/__init__.py
+++ /dev/null
@@ -1,33 +0,0 @@
-# $Id: __init__.py 1821 2004-06-03 16:57:49Z fredrik $
-# elementtree package
-
-# --------------------------------------------------------------------
-# The ElementTree toolkit is
-#
-# Copyright (c) 1999-2004 by Fredrik Lundh
-#
-# By obtaining, using, and/or copying this software and/or its
-# associated documentation, you agree that you have read, understood,
-# and will comply with the following terms and conditions:
-#
-# Permission to use, copy, modify, and distribute this software and
-# its associated documentation for any purpose and without fee is
-# hereby granted, provided that the above copyright notice appears in
-# all copies, and that both that copyright notice and this permission
-# notice appear in supporting documentation, and that the name of
-# Secret Labs AB or the author not be used in advertising or publicity
-# pertaining to distribution of the software without specific, written
-# prior permission.
-#
-# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
-# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
-# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
-# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
-# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
-# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
-# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
-# OF THIS SOFTWARE.
-# --------------------------------------------------------------------
-
-# Licensed to PSF under a Contributor Agreement.
-# See http://www.python.org/2.4/license for licensing details.
diff --git a/sys/lib/python/xml/etree/cElementTree.py b/sys/lib/python/xml/etree/cElementTree.py
deleted file mode 100644
index a6f127abd..000000000
--- a/sys/lib/python/xml/etree/cElementTree.py
+++ /dev/null
@@ -1,3 +0,0 @@
-# Wrapper module for _elementtree
-
-from _elementtree import *
diff --git a/sys/lib/python/xml/parsers/__init__.py b/sys/lib/python/xml/parsers/__init__.py
deleted file mode 100644
index eb314a3b4..000000000
--- a/sys/lib/python/xml/parsers/__init__.py
+++ /dev/null
@@ -1,8 +0,0 @@
-"""Python interfaces to XML parsers.
-
-This package contains one module:
-
-expat -- Python wrapper for James Clark's Expat parser, with namespace
- support.
-
-"""
diff --git a/sys/lib/python/xml/parsers/expat.py b/sys/lib/python/xml/parsers/expat.py
deleted file mode 100644
index 00b5c78ec..000000000
--- a/sys/lib/python/xml/parsers/expat.py
+++ /dev/null
@@ -1,4 +0,0 @@
-"""Interface to the Expat non-validating XML parser."""
-__version__ = '$Revision: 17640 $'
-
-from pyexpat import *
diff --git a/sys/lib/python/xml/sax/__init__.py b/sys/lib/python/xml/sax/__init__.py
deleted file mode 100644
index 6b1b1ba00..000000000
--- a/sys/lib/python/xml/sax/__init__.py
+++ /dev/null
@@ -1,108 +0,0 @@
-"""Simple API for XML (SAX) implementation for Python.
-
-This module provides an implementation of the SAX 2 interface;
-information about the Java version of the interface can be found at
-http://www.megginson.com/SAX/. The Python version of the interface is
-documented at <...>.
-
-This package contains the following modules:
-
-handler -- Base classes and constants which define the SAX 2 API for
- the 'client-side' of SAX for Python.
-
-saxutils -- Implementation of the convenience classes commonly used to
- work with SAX.
-
-xmlreader -- Base classes and constants which define the SAX 2 API for
- the parsers used with SAX for Python.
-
-expatreader -- Driver that allows use of the Expat parser with SAX.
-"""
-
-from xmlreader import InputSource
-from handler import ContentHandler, ErrorHandler
-from _exceptions import SAXException, SAXNotRecognizedException, \
- SAXParseException, SAXNotSupportedException, \
- SAXReaderNotAvailable
-
-
-def parse(source, handler, errorHandler=ErrorHandler()):
- parser = make_parser()
- parser.setContentHandler(handler)
- parser.setErrorHandler(errorHandler)
- parser.parse(source)
-
-def parseString(string, handler, errorHandler=ErrorHandler()):
- try:
- from cStringIO import StringIO
- except ImportError:
- from StringIO import StringIO
-
- if errorHandler is None:
- errorHandler = ErrorHandler()
- parser = make_parser()
- parser.setContentHandler(handler)
- parser.setErrorHandler(errorHandler)
-
- inpsrc = InputSource()
- inpsrc.setByteStream(StringIO(string))
- parser.parse(inpsrc)
-
-# this is the parser list used by the make_parser function if no
-# alternatives are given as parameters to the function
-
-default_parser_list = ["xml.sax.expatreader"]
-
-# tell modulefinder that importing sax potentially imports expatreader
-_false = 0
-if _false:
- import xml.sax.expatreader
-
-import os, sys
-if os.environ.has_key("PY_SAX_PARSER"):
- default_parser_list = os.environ["PY_SAX_PARSER"].split(",")
-del os
-
-_key = "python.xml.sax.parser"
-if sys.platform[:4] == "java" and sys.registry.containsKey(_key):
- default_parser_list = sys.registry.getProperty(_key).split(",")
-
-
-def make_parser(parser_list = []):
- """Creates and returns a SAX parser.
-
- Creates the first parser it is able to instantiate of the ones
- given in the list created by doing parser_list +
- default_parser_list. The lists must contain the names of Python
- modules containing both a SAX parser and a create_parser function."""
-
- for parser_name in parser_list + default_parser_list:
- try:
- return _create_parser(parser_name)
- except ImportError,e:
- import sys
- if sys.modules.has_key(parser_name):
- # The parser module was found, but importing it
- # failed unexpectedly, pass this exception through
- raise
- except SAXReaderNotAvailable:
- # The parser module detected that it won't work properly,
- # so try the next one
- pass
-
- raise SAXReaderNotAvailable("No parsers found", None)
-
-# --- Internal utility methods used by make_parser
-
-if sys.platform[ : 4] == "java":
- def _create_parser(parser_name):
- from org.python.core import imp
- drv_module = imp.importName(parser_name, 0, globals())
- return drv_module.create_parser()
-
-else:
- def _create_parser(parser_name):
- drv_module = __import__(parser_name,{},{},['create_parser'])
- return drv_module.create_parser()
-
-del sys
diff --git a/sys/lib/python/xml/sax/_exceptions.py b/sys/lib/python/xml/sax/_exceptions.py
deleted file mode 100644
index fdd614aee..000000000
--- a/sys/lib/python/xml/sax/_exceptions.py
+++ /dev/null
@@ -1,131 +0,0 @@
-"""Different kinds of SAX Exceptions"""
-import sys
-if sys.platform[:4] == "java":
- from java.lang import Exception
-del sys
-
-# ===== SAXEXCEPTION =====
-
-class SAXException(Exception):
- """Encapsulate an XML error or warning. This class can contain
- basic error or warning information from either the XML parser or
- the application: you can subclass it to provide additional
- functionality, or to add localization. Note that although you will
- receive a SAXException as the argument to the handlers in the
- ErrorHandler interface, you are not actually required to throw
- the exception; instead, you can simply read the information in
- it."""
-
- def __init__(self, msg, exception=None):
- """Creates an exception. The message is required, but the exception
- is optional."""
- self._msg = msg
- self._exception = exception
- Exception.__init__(self, msg)
-
- def getMessage(self):
- "Return a message for this exception."
- return self._msg
-
- def getException(self):
- "Return the embedded exception, or None if there was none."
- return self._exception
-
- def __str__(self):
- "Create a string representation of the exception."
- return self._msg
-
- def __getitem__(self, ix):
- """Avoids weird error messages if someone does exception[ix] by
- mistake, since Exception has __getitem__ defined."""
- raise AttributeError("__getitem__")
-
-
-# ===== SAXPARSEEXCEPTION =====
-
-class SAXParseException(SAXException):
- """Encapsulate an XML parse error or warning.
-
- This exception will include information for locating the error in
- the original XML document. Note that although the application will
- receive a SAXParseException as the argument to the handlers in the
- ErrorHandler interface, the application is not actually required
- to throw the exception; instead, it can simply read the
- information in it and take a different action.
-
- Since this exception is a subclass of SAXException, it inherits
- the ability to wrap another exception."""
-
- def __init__(self, msg, exception, locator):
- "Creates the exception. The exception parameter is allowed to be None."
- SAXException.__init__(self, msg, exception)
- self._locator = locator
-
- # We need to cache this stuff at construction time.
- # If this exception is thrown, the objects through which we must
- # traverse to get this information may be deleted by the time
- # it gets caught.
- self._systemId = self._locator.getSystemId()
- self._colnum = self._locator.getColumnNumber()
- self._linenum = self._locator.getLineNumber()
-
- def getColumnNumber(self):
- """The column number of the end of the text where the exception
- occurred."""
- return self._colnum
-
- def getLineNumber(self):
- "The line number of the end of the text where the exception occurred."
- return self._linenum
-
- def getPublicId(self):
- "Get the public identifier of the entity where the exception occurred."
- return self._locator.getPublicId()
-
- def getSystemId(self):
- "Get the system identifier of the entity where the exception occurred."
- return self._systemId
-
- def __str__(self):
- "Create a string representation of the exception."
- sysid = self.getSystemId()
- if sysid is None:
- sysid = "<unknown>"
- linenum = self.getLineNumber()
- if linenum is None:
- linenum = "?"
- colnum = self.getColumnNumber()
- if colnum is None:
- colnum = "?"
- return "%s:%s:%s: %s" % (sysid, linenum, colnum, self._msg)
-
-
-# ===== SAXNOTRECOGNIZEDEXCEPTION =====
-
-class SAXNotRecognizedException(SAXException):
- """Exception class for an unrecognized identifier.
-
- An XMLReader will raise this exception when it is confronted with an
- unrecognized feature or property. SAX applications and extensions may
- use this class for similar purposes."""
-
-
-# ===== SAXNOTSUPPORTEDEXCEPTION =====
-
-class SAXNotSupportedException(SAXException):
- """Exception class for an unsupported operation.
-
- An XMLReader will raise this exception when a service it cannot
- perform is requested (specifically setting a state or value). SAX
- applications and extensions may use this class for similar
- purposes."""
-
-# ===== SAXNOTSUPPORTEDEXCEPTION =====
-
-class SAXReaderNotAvailable(SAXNotSupportedException):
- """Exception class for a missing driver.
-
- An XMLReader module (driver) should raise this exception when it
- is first imported, e.g. when a support module cannot be imported.
- It also may be raised during parsing, e.g. if executing an external
- program is not permitted."""
diff --git a/sys/lib/python/xml/sax/expatreader.py b/sys/lib/python/xml/sax/expatreader.py
deleted file mode 100644
index bb9c294e5..000000000
--- a/sys/lib/python/xml/sax/expatreader.py
+++ /dev/null
@@ -1,414 +0,0 @@
-"""
-SAX driver for the pyexpat C module. This driver works with
-pyexpat.__version__ == '2.22'.
-"""
-
-version = "0.20"
-
-from xml.sax._exceptions import *
-from xml.sax.handler import feature_validation, feature_namespaces
-from xml.sax.handler import feature_namespace_prefixes
-from xml.sax.handler import feature_external_ges, feature_external_pes
-from xml.sax.handler import feature_string_interning
-from xml.sax.handler import property_xml_string, property_interning_dict
-
-# xml.parsers.expat does not raise ImportError in Jython
-import sys
-if sys.platform[:4] == "java":
- raise SAXReaderNotAvailable("expat not available in Java", None)
-del sys
-
-try:
- from xml.parsers import expat
-except ImportError:
- raise SAXReaderNotAvailable("expat not supported", None)
-else:
- if not hasattr(expat, "ParserCreate"):
- raise SAXReaderNotAvailable("expat not supported", None)
-from xml.sax import xmlreader, saxutils, handler
-
-AttributesImpl = xmlreader.AttributesImpl
-AttributesNSImpl = xmlreader.AttributesNSImpl
-
-# If we're using a sufficiently recent version of Python, we can use
-# weak references to avoid cycles between the parser and content
-# handler, otherwise we'll just have to pretend.
-try:
- import _weakref
-except ImportError:
- def _mkproxy(o):
- return o
-else:
- import weakref
- _mkproxy = weakref.proxy
- del weakref, _weakref
-
-# --- ExpatLocator
-
-class ExpatLocator(xmlreader.Locator):
- """Locator for use with the ExpatParser class.
-
- This uses a weak reference to the parser object to avoid creating
- a circular reference between the parser and the content handler.
- """
- def __init__(self, parser):
- self._ref = _mkproxy(parser)
-
- def getColumnNumber(self):
- parser = self._ref
- if parser._parser is None:
- return None
- return parser._parser.ErrorColumnNumber
-
- def getLineNumber(self):
- parser = self._ref
- if parser._parser is None:
- return 1
- return parser._parser.ErrorLineNumber
-
- def getPublicId(self):
- parser = self._ref
- if parser is None:
- return None
- return parser._source.getPublicId()
-
- def getSystemId(self):
- parser = self._ref
- if parser is None:
- return None
- return parser._source.getSystemId()
-
-
-# --- ExpatParser
-
-class ExpatParser(xmlreader.IncrementalParser, xmlreader.Locator):
- """SAX driver for the pyexpat C module."""
-
- def __init__(self, namespaceHandling=0, bufsize=2**16-20):
- xmlreader.IncrementalParser.__init__(self, bufsize)
- self._source = xmlreader.InputSource()
- self._parser = None
- self._namespaces = namespaceHandling
- self._lex_handler_prop = None
- self._parsing = 0
- self._entity_stack = []
- self._external_ges = 1
- self._interning = None
-
- # XMLReader methods
-
- def parse(self, source):
- "Parse an XML document from a URL or an InputSource."
- source = saxutils.prepare_input_source(source)
-
- self._source = source
- self.reset()
- self._cont_handler.setDocumentLocator(ExpatLocator(self))
- xmlreader.IncrementalParser.parse(self, source)
-
- def prepareParser(self, source):
- if source.getSystemId() != None:
- self._parser.SetBase(source.getSystemId())
-
- # Redefined setContentHandler to allow changing handlers during parsing
-
- def setContentHandler(self, handler):
- xmlreader.IncrementalParser.setContentHandler(self, handler)
- if self._parsing:
- self._reset_cont_handler()
-
- def getFeature(self, name):
- if name == feature_namespaces:
- return self._namespaces
- elif name == feature_string_interning:
- return self._interning is not None
- elif name in (feature_validation, feature_external_pes,
- feature_namespace_prefixes):
- return 0
- elif name == feature_external_ges:
- return self._external_ges
- raise SAXNotRecognizedException("Feature '%s' not recognized" % name)
-
- def setFeature(self, name, state):
- if self._parsing:
- raise SAXNotSupportedException("Cannot set features while parsing")
-
- if name == feature_namespaces:
- self._namespaces = state
- elif name == feature_external_ges:
- self._external_ges = state
- elif name == feature_string_interning:
- if state:
- if self._interning is None:
- self._interning = {}
- else:
- self._interning = None
- elif name == feature_validation:
- if state:
- raise SAXNotSupportedException(
- "expat does not support validation")
- elif name == feature_external_pes:
- if state:
- raise SAXNotSupportedException(
- "expat does not read external parameter entities")
- elif name == feature_namespace_prefixes:
- if state:
- raise SAXNotSupportedException(
- "expat does not report namespace prefixes")
- else:
- raise SAXNotRecognizedException(
- "Feature '%s' not recognized" % name)
-
- def getProperty(self, name):
- if name == handler.property_lexical_handler:
- return self._lex_handler_prop
- elif name == property_interning_dict:
- return self._interning
- elif name == property_xml_string:
- if self._parser:
- if hasattr(self._parser, "GetInputContext"):
- return self._parser.GetInputContext()
- else:
- raise SAXNotRecognizedException(
- "This version of expat does not support getting"
- " the XML string")
- else:
- raise SAXNotSupportedException(
- "XML string cannot be returned when not parsing")
- raise SAXNotRecognizedException("Property '%s' not recognized" % name)
-
- def setProperty(self, name, value):
- if name == handler.property_lexical_handler:
- self._lex_handler_prop = value
- if self._parsing:
- self._reset_lex_handler_prop()
- elif name == property_interning_dict:
- self._interning = value
- elif name == property_xml_string:
- raise SAXNotSupportedException("Property '%s' cannot be set" %
- name)
- else:
- raise SAXNotRecognizedException("Property '%s' not recognized" %
- name)
-
- # IncrementalParser methods
-
- def feed(self, data, isFinal = 0):
- if not self._parsing:
- self.reset()
- self._parsing = 1
- self._cont_handler.startDocument()
-
- try:
- # The isFinal parameter is internal to the expat reader.
- # If it is set to true, expat will check validity of the entire
- # document. When feeding chunks, they are not normally final -
- # except when invoked from close.
- self._parser.Parse(data, isFinal)
- except expat.error, e:
- exc = SAXParseException(expat.ErrorString(e.code), e, self)
- # FIXME: when to invoke error()?
- self._err_handler.fatalError(exc)
-
- def close(self):
- if self._entity_stack:
- # If we are completing an external entity, do nothing here
- return
- self.feed("", isFinal = 1)
- self._cont_handler.endDocument()
- self._parsing = 0
- # break cycle created by expat handlers pointing to our methods
- self._parser = None
-
- def _reset_cont_handler(self):
- self._parser.ProcessingInstructionHandler = \
- self._cont_handler.processingInstruction
- self._parser.CharacterDataHandler = self._cont_handler.characters
-
- def _reset_lex_handler_prop(self):
- lex = self._lex_handler_prop
- parser = self._parser
- if lex is None:
- parser.CommentHandler = None
- parser.StartCdataSectionHandler = None
- parser.EndCdataSectionHandler = None
- parser.StartDoctypeDeclHandler = None
- parser.EndDoctypeDeclHandler = None
- else:
- parser.CommentHandler = lex.comment
- parser.StartCdataSectionHandler = lex.startCDATA
- parser.EndCdataSectionHandler = lex.endCDATA
- parser.StartDoctypeDeclHandler = self.start_doctype_decl
- parser.EndDoctypeDeclHandler = lex.endDTD
-
- def reset(self):
- if self._namespaces:
- self._parser = expat.ParserCreate(self._source.getEncoding(), " ",
- intern=self._interning)
- self._parser.namespace_prefixes = 1
- self._parser.StartElementHandler = self.start_element_ns
- self._parser.EndElementHandler = self.end_element_ns
- else:
- self._parser = expat.ParserCreate(self._source.getEncoding(),
- intern = self._interning)
- self._parser.StartElementHandler = self.start_element
- self._parser.EndElementHandler = self.end_element
-
- self._reset_cont_handler()
- self._parser.UnparsedEntityDeclHandler = self.unparsed_entity_decl
- self._parser.NotationDeclHandler = self.notation_decl
- self._parser.StartNamespaceDeclHandler = self.start_namespace_decl
- self._parser.EndNamespaceDeclHandler = self.end_namespace_decl
-
- self._decl_handler_prop = None
- if self._lex_handler_prop:
- self._reset_lex_handler_prop()
-# self._parser.DefaultHandler =
-# self._parser.DefaultHandlerExpand =
-# self._parser.NotStandaloneHandler =
- self._parser.ExternalEntityRefHandler = self.external_entity_ref
- try:
- self._parser.SkippedEntityHandler = self.skipped_entity_handler
- except AttributeError:
- # This pyexpat does not support SkippedEntity
- pass
- self._parser.SetParamEntityParsing(
- expat.XML_PARAM_ENTITY_PARSING_UNLESS_STANDALONE)
-
- self._parsing = 0
- self._entity_stack = []
-
- # Locator methods
-
- def getColumnNumber(self):
- if self._parser is None:
- return None
- return self._parser.ErrorColumnNumber
-
- def getLineNumber(self):
- if self._parser is None:
- return 1
- return self._parser.ErrorLineNumber
-
- def getPublicId(self):
- return self._source.getPublicId()
-
- def getSystemId(self):
- return self._source.getSystemId()
-
- # event handlers
- def start_element(self, name, attrs):
- self._cont_handler.startElement(name, AttributesImpl(attrs))
-
- def end_element(self, name):
- self._cont_handler.endElement(name)
-
- def start_element_ns(self, name, attrs):
- pair = name.split()
- if len(pair) == 1:
- # no namespace
- pair = (None, name)
- elif len(pair) == 3:
- pair = pair[0], pair[1]
- else:
- # default namespace
- pair = tuple(pair)
-
- newattrs = {}
- qnames = {}
- for (aname, value) in attrs.items():
- parts = aname.split()
- length = len(parts)
- if length == 1:
- # no namespace
- qname = aname
- apair = (None, aname)
- elif length == 3:
- qname = "%s:%s" % (parts[2], parts[1])
- apair = parts[0], parts[1]
- else:
- # default namespace
- qname = parts[1]
- apair = tuple(parts)
-
- newattrs[apair] = value
- qnames[apair] = qname
-
- self._cont_handler.startElementNS(pair, None,
- AttributesNSImpl(newattrs, qnames))
-
- def end_element_ns(self, name):
- pair = name.split()
- if len(pair) == 1:
- pair = (None, name)
- elif len(pair) == 3:
- pair = pair[0], pair[1]
- else:
- pair = tuple(pair)
-
- self._cont_handler.endElementNS(pair, None)
-
- # this is not used (call directly to ContentHandler)
- def processing_instruction(self, target, data):
- self._cont_handler.processingInstruction(target, data)
-
- # this is not used (call directly to ContentHandler)
- def character_data(self, data):
- self._cont_handler.characters(data)
-
- def start_namespace_decl(self, prefix, uri):
- self._cont_handler.startPrefixMapping(prefix, uri)
-
- def end_namespace_decl(self, prefix):
- self._cont_handler.endPrefixMapping(prefix)
-
- def start_doctype_decl(self, name, sysid, pubid, has_internal_subset):
- self._lex_handler_prop.startDTD(name, pubid, sysid)
-
- def unparsed_entity_decl(self, name, base, sysid, pubid, notation_name):
- self._dtd_handler.unparsedEntityDecl(name, pubid, sysid, notation_name)
-
- def notation_decl(self, name, base, sysid, pubid):
- self._dtd_handler.notationDecl(name, pubid, sysid)
-
- def external_entity_ref(self, context, base, sysid, pubid):
- if not self._external_ges:
- return 1
-
- source = self._ent_handler.resolveEntity(pubid, sysid)
- source = saxutils.prepare_input_source(source,
- self._source.getSystemId() or
- "")
-
- self._entity_stack.append((self._parser, self._source))
- self._parser = self._parser.ExternalEntityParserCreate(context)
- self._source = source
-
- try:
- xmlreader.IncrementalParser.parse(self, source)
- except:
- return 0 # FIXME: save error info here?
-
- (self._parser, self._source) = self._entity_stack[-1]
- del self._entity_stack[-1]
- return 1
-
- def skipped_entity_handler(self, name, is_pe):
- if is_pe:
- # The SAX spec requires to report skipped PEs with a '%'
- name = '%'+name
- self._cont_handler.skippedEntity(name)
-
-# ---
-
-def create_parser(*args, **kwargs):
- return ExpatParser(*args, **kwargs)
-
-# ---
-
-if __name__ == "__main__":
- import xml.sax
- p = create_parser()
- p.setContentHandler(xml.sax.XMLGenerator())
- p.setErrorHandler(xml.sax.ErrorHandler())
- p.parse("../../../hamlet.xml")
diff --git a/sys/lib/python/xml/sax/handler.py b/sys/lib/python/xml/sax/handler.py
deleted file mode 100644
index 11783b0b5..000000000
--- a/sys/lib/python/xml/sax/handler.py
+++ /dev/null
@@ -1,342 +0,0 @@
-"""
-This module contains the core classes of version 2.0 of SAX for Python.
-This file provides only default classes with absolutely minimum
-functionality, from which drivers and applications can be subclassed.
-
-Many of these classes are empty and are included only as documentation
-of the interfaces.
-
-$Id: handler.py 35816 2004-05-06 03:47:48Z fdrake $
-"""
-
-version = '2.0beta'
-
-#============================================================================
-#
-# HANDLER INTERFACES
-#
-#============================================================================
-
-# ===== ERRORHANDLER =====
-
-class ErrorHandler:
- """Basic interface for SAX error handlers.
-
- If you create an object that implements this interface, then
- register the object with your XMLReader, the parser will call the
- methods in your object to report all warnings and errors. There
- are three levels of errors available: warnings, (possibly)
- recoverable errors, and unrecoverable errors. All methods take a
- SAXParseException as the only parameter."""
-
- def error(self, exception):
- "Handle a recoverable error."
- raise exception
-
- def fatalError(self, exception):
- "Handle a non-recoverable error."
- raise exception
-
- def warning(self, exception):
- "Handle a warning."
- print exception
-
-
-# ===== CONTENTHANDLER =====
-
-class ContentHandler:
- """Interface for receiving logical document content events.
-
- This is the main callback interface in SAX, and the one most
- important to applications. The order of events in this interface
- mirrors the order of the information in the document."""
-
- def __init__(self):
- self._locator = None
-
- def setDocumentLocator(self, locator):
- """Called by the parser to give the application a locator for
- locating the origin of document events.
-
- SAX parsers are strongly encouraged (though not absolutely
- required) to supply a locator: if it does so, it must supply
- the locator to the application by invoking this method before
- invoking any of the other methods in the DocumentHandler
- interface.
-
- The locator allows the application to determine the end
- position of any document-related event, even if the parser is
- not reporting an error. Typically, the application will use
- this information for reporting its own errors (such as
- character content that does not match an application's
- business rules). The information returned by the locator is
- probably not sufficient for use with a search engine.
-
- Note that the locator will return correct information only
- during the invocation of the events in this interface. The
- application should not attempt to use it at any other time."""
- self._locator = locator
-
- def startDocument(self):
- """Receive notification of the beginning of a document.
-
- The SAX parser will invoke this method only once, before any
- other methods in this interface or in DTDHandler (except for
- setDocumentLocator)."""
-
- def endDocument(self):
- """Receive notification of the end of a document.
-
- The SAX parser will invoke this method only once, and it will
- be the last method invoked during the parse. The parser shall
- not invoke this method until it has either abandoned parsing
- (because of an unrecoverable error) or reached the end of
- input."""
-
- def startPrefixMapping(self, prefix, uri):
- """Begin the scope of a prefix-URI Namespace mapping.
-
- The information from this event is not necessary for normal
- Namespace processing: the SAX XML reader will automatically
- replace prefixes for element and attribute names when the
- http://xml.org/sax/features/namespaces feature is true (the
- default).
-
- There are cases, however, when applications need to use
- prefixes in character data or in attribute values, where they
- cannot safely be expanded automatically; the
- start/endPrefixMapping event supplies the information to the
- application to expand prefixes in those contexts itself, if
- necessary.
-
- Note that start/endPrefixMapping events are not guaranteed to
- be properly nested relative to each-other: all
- startPrefixMapping events will occur before the corresponding
- startElement event, and all endPrefixMapping events will occur
- after the corresponding endElement event, but their order is
- not guaranteed."""
-
- def endPrefixMapping(self, prefix):
- """End the scope of a prefix-URI mapping.
-
- See startPrefixMapping for details. This event will always
- occur after the corresponding endElement event, but the order
- of endPrefixMapping events is not otherwise guaranteed."""
-
- def startElement(self, name, attrs):
- """Signals the start of an element in non-namespace mode.
-
- The name parameter contains the raw XML 1.0 name of the
- element type as a string and the attrs parameter holds an
- instance of the Attributes class containing the attributes of
- the element."""
-
- def endElement(self, name):
- """Signals the end of an element in non-namespace mode.
-
- The name parameter contains the name of the element type, just
- as with the startElement event."""
-
- def startElementNS(self, name, qname, attrs):
- """Signals the start of an element in namespace mode.
-
- The name parameter contains the name of the element type as a
- (uri, localname) tuple, the qname parameter the raw XML 1.0
- name used in the source document, and the attrs parameter
- holds an instance of the Attributes class containing the
- attributes of the element.
-
- The uri part of the name tuple is None for elements which have
- no namespace."""
-
- def endElementNS(self, name, qname):
- """Signals the end of an element in namespace mode.
-
- The name parameter contains the name of the element type, just
- as with the startElementNS event."""
-
- def characters(self, content):
- """Receive notification of character data.
-
- The Parser will call this method to report each chunk of
- character data. SAX parsers may return all contiguous
- character data in a single chunk, or they may split it into
- several chunks; however, all of the characters in any single
- event must come from the same external entity so that the
- Locator provides useful information."""
-
- def ignorableWhitespace(self, whitespace):
- """Receive notification of ignorable whitespace in element content.
-
- Validating Parsers must use this method to report each chunk
- of ignorable whitespace (see the W3C XML 1.0 recommendation,
- section 2.10): non-validating parsers may also use this method
- if they are capable of parsing and using content models.
-
- SAX parsers may return all contiguous whitespace in a single
- chunk, or they may split it into several chunks; however, all
- of the characters in any single event must come from the same
- external entity, so that the Locator provides useful
- information."""
-
- def processingInstruction(self, target, data):
- """Receive notification of a processing instruction.
-
- The Parser will invoke this method once for each processing
- instruction found: note that processing instructions may occur
- before or after the main document element.
-
- A SAX parser should never report an XML declaration (XML 1.0,
- section 2.8) or a text declaration (XML 1.0, section 4.3.1)
- using this method."""
-
- def skippedEntity(self, name):
- """Receive notification of a skipped entity.
-
- The Parser will invoke this method once for each entity
- skipped. Non-validating processors may skip entities if they
- have not seen the declarations (because, for example, the
- entity was declared in an external DTD subset). All processors
- may skip external entities, depending on the values of the
- http://xml.org/sax/features/external-general-entities and the
- http://xml.org/sax/features/external-parameter-entities
- properties."""
-
-
-# ===== DTDHandler =====
-
-class DTDHandler:
- """Handle DTD events.
-
- This interface specifies only those DTD events required for basic
- parsing (unparsed entities and attributes)."""
-
- def notationDecl(self, name, publicId, systemId):
- "Handle a notation declaration event."
-
- def unparsedEntityDecl(self, name, publicId, systemId, ndata):
- "Handle an unparsed entity declaration event."
-
-
-# ===== ENTITYRESOLVER =====
-
-class EntityResolver:
- """Basic interface for resolving entities. If you create an object
- implementing this interface, then register the object with your
- Parser, the parser will call the method in your object to
- resolve all external entities. Note that DefaultHandler implements
- this interface with the default behaviour."""
-
- def resolveEntity(self, publicId, systemId):
- """Resolve the system identifier of an entity and return either
- the system identifier to read from as a string, or an InputSource
- to read from."""
- return systemId
-
-
-#============================================================================
-#
-# CORE FEATURES
-#
-#============================================================================
-
-feature_namespaces = "http://xml.org/sax/features/namespaces"
-# true: Perform Namespace processing (default).
-# false: Optionally do not perform Namespace processing
-# (implies namespace-prefixes).
-# access: (parsing) read-only; (not parsing) read/write
-
-feature_namespace_prefixes = "http://xml.org/sax/features/namespace-prefixes"
-# true: Report the original prefixed names and attributes used for Namespace
-# declarations.
-# false: Do not report attributes used for Namespace declarations, and
-# optionally do not report original prefixed names (default).
-# access: (parsing) read-only; (not parsing) read/write
-
-feature_string_interning = "http://xml.org/sax/features/string-interning"
-# true: All element names, prefixes, attribute names, Namespace URIs, and
-# local names are interned using the built-in intern function.
-# false: Names are not necessarily interned, although they may be (default).
-# access: (parsing) read-only; (not parsing) read/write
-
-feature_validation = "http://xml.org/sax/features/validation"
-# true: Report all validation errors (implies external-general-entities and
-# external-parameter-entities).
-# false: Do not report validation errors.
-# access: (parsing) read-only; (not parsing) read/write
-
-feature_external_ges = "http://xml.org/sax/features/external-general-entities"
-# true: Include all external general (text) entities.
-# false: Do not include external general entities.
-# access: (parsing) read-only; (not parsing) read/write
-
-feature_external_pes = "http://xml.org/sax/features/external-parameter-entities"
-# true: Include all external parameter entities, including the external
-# DTD subset.
-# false: Do not include any external parameter entities, even the external
-# DTD subset.
-# access: (parsing) read-only; (not parsing) read/write
-
-all_features = [feature_namespaces,
- feature_namespace_prefixes,
- feature_string_interning,
- feature_validation,
- feature_external_ges,
- feature_external_pes]
-
-
-#============================================================================
-#
-# CORE PROPERTIES
-#
-#============================================================================
-
-property_lexical_handler = "http://xml.org/sax/properties/lexical-handler"
-# data type: xml.sax.sax2lib.LexicalHandler
-# description: An optional extension handler for lexical events like comments.
-# access: read/write
-
-property_declaration_handler = "http://xml.org/sax/properties/declaration-handler"
-# data type: xml.sax.sax2lib.DeclHandler
-# description: An optional extension handler for DTD-related events other
-# than notations and unparsed entities.
-# access: read/write
-
-property_dom_node = "http://xml.org/sax/properties/dom-node"
-# data type: org.w3c.dom.Node
-# description: When parsing, the current DOM node being visited if this is
-# a DOM iterator; when not parsing, the root DOM node for
-# iteration.
-# access: (parsing) read-only; (not parsing) read/write
-
-property_xml_string = "http://xml.org/sax/properties/xml-string"
-# data type: String
-# description: The literal string of characters that was the source for
-# the current event.
-# access: read-only
-
-property_encoding = "http://www.python.org/sax/properties/encoding"
-# data type: String
-# description: The name of the encoding to assume for input data.
-# access: write: set the encoding, e.g. established by a higher-level
-# protocol. May change during parsing (e.g. after
-# processing a META tag)
-# read: return the current encoding (possibly established through
-# auto-detection.
-# initial value: UTF-8
-#
-
-property_interning_dict = "http://www.python.org/sax/properties/interning-dict"
-# data type: Dictionary
-# description: The dictionary used to intern common strings in the document
-# access: write: Request that the parser uses a specific dictionary, to
-# allow interning across different documents
-# read: return the current interning dictionary, or None
-#
-
-all_properties = [property_lexical_handler,
- property_dom_node,
- property_declaration_handler,
- property_xml_string,
- property_encoding,
- property_interning_dict]
diff --git a/sys/lib/python/xml/sax/saxutils.py b/sys/lib/python/xml/sax/saxutils.py
deleted file mode 100644
index 46818f3d8..000000000
--- a/sys/lib/python/xml/sax/saxutils.py
+++ /dev/null
@@ -1,302 +0,0 @@
-"""\
-A library of useful helper classes to the SAX classes, for the
-convenience of application and driver writers.
-"""
-
-import os, urlparse, urllib, types
-import handler
-import xmlreader
-
-try:
- _StringTypes = [types.StringType, types.UnicodeType]
-except AttributeError:
- _StringTypes = [types.StringType]
-
-# See whether the xmlcharrefreplace error handler is
-# supported
-try:
- from codecs import xmlcharrefreplace_errors
- _error_handling = "xmlcharrefreplace"
- del xmlcharrefreplace_errors
-except ImportError:
- _error_handling = "strict"
-
-def __dict_replace(s, d):
- """Replace substrings of a string using a dictionary."""
- for key, value in d.items():
- s = s.replace(key, value)
- return s
-
-def escape(data, entities={}):
- """Escape &, <, and > in a string of data.
-
- You can escape other strings of data by passing a dictionary as
- the optional entities parameter. The keys and values must all be
- strings; each key will be replaced with its corresponding value.
- """
-
- # must do ampersand first
- data = data.replace("&", "&amp;")
- data = data.replace(">", "&gt;")
- data = data.replace("<", "&lt;")
- if entities:
- data = __dict_replace(data, entities)
- return data
-
-def unescape(data, entities={}):
- """Unescape &amp;, &lt;, and &gt; in a string of data.
-
- You can unescape other strings of data by passing a dictionary as
- the optional entities parameter. The keys and values must all be
- strings; each key will be replaced with its corresponding value.
- """
- data = data.replace("&lt;", "<")
- data = data.replace("&gt;", ">")
- if entities:
- data = __dict_replace(data, entities)
- # must do ampersand last
- return data.replace("&amp;", "&")
-
-def quoteattr(data, entities={}):
- """Escape and quote an attribute value.
-
- Escape &, <, and > in a string of data, then quote it for use as
- an attribute value. The \" character will be escaped as well, if
- necessary.
-
- You can escape other strings of data by passing a dictionary as
- the optional entities parameter. The keys and values must all be
- strings; each key will be replaced with its corresponding value.
- """
- entities = entities.copy()
- entities.update({'\n': '&#10;', '\r': '&#13;', '\t':'&#9;'})
- data = escape(data, entities)
- if '"' in data:
- if "'" in data:
- data = '"%s"' % data.replace('"', "&quot;")
- else:
- data = "'%s'" % data
- else:
- data = '"%s"' % data
- return data
-
-
-class XMLGenerator(handler.ContentHandler):
-
- def __init__(self, out=None, encoding="iso-8859-1"):
- if out is None:
- import sys
- out = sys.stdout
- handler.ContentHandler.__init__(self)
- self._out = out
- self._ns_contexts = [{}] # contains uri -> prefix dicts
- self._current_context = self._ns_contexts[-1]
- self._undeclared_ns_maps = []
- self._encoding = encoding
-
- def _write(self, text):
- if isinstance(text, str):
- self._out.write(text)
- else:
- self._out.write(text.encode(self._encoding, _error_handling))
-
- def _qname(self, name):
- """Builds a qualified name from a (ns_url, localname) pair"""
- if name[0]:
- # The name is in a non-empty namespace
- prefix = self._current_context[name[0]]
- if prefix:
- # If it is not the default namespace, prepend the prefix
- return prefix + ":" + name[1]
- # Return the unqualified name
- return name[1]
-
- # ContentHandler methods
-
- def startDocument(self):
- self._write('<?xml version="1.0" encoding="%s"?>\n' %
- self._encoding)
-
- def startPrefixMapping(self, prefix, uri):
- self._ns_contexts.append(self._current_context.copy())
- self._current_context[uri] = prefix
- self._undeclared_ns_maps.append((prefix, uri))
-
- def endPrefixMapping(self, prefix):
- self._current_context = self._ns_contexts[-1]
- del self._ns_contexts[-1]
-
- def startElement(self, name, attrs):
- self._write('<' + name)
- for (name, value) in attrs.items():
- self._write(' %s=%s' % (name, quoteattr(value)))
- self._write('>')
-
- def endElement(self, name):
- self._write('</%s>' % name)
-
- def startElementNS(self, name, qname, attrs):
- self._write('<' + self._qname(name))
-
- for prefix, uri in self._undeclared_ns_maps:
- if prefix:
- self._out.write(' xmlns:%s="%s"' % (prefix, uri))
- else:
- self._out.write(' xmlns="%s"' % uri)
- self._undeclared_ns_maps = []
-
- for (name, value) in attrs.items():
- self._write(' %s=%s' % (self._qname(name), quoteattr(value)))
- self._write('>')
-
- def endElementNS(self, name, qname):
- self._write('</%s>' % self._qname(name))
-
- def characters(self, content):
- self._write(escape(content))
-
- def ignorableWhitespace(self, content):
- self._write(content)
-
- def processingInstruction(self, target, data):
- self._write('<?%s %s?>' % (target, data))
-
-
-class XMLFilterBase(xmlreader.XMLReader):
- """This class is designed to sit between an XMLReader and the
- client application's event handlers. By default, it does nothing
- but pass requests up to the reader and events on to the handlers
- unmodified, but subclasses can override specific methods to modify
- the event stream or the configuration requests as they pass
- through."""
-
- def __init__(self, parent = None):
- xmlreader.XMLReader.__init__(self)
- self._parent = parent
-
- # ErrorHandler methods
-
- def error(self, exception):
- self._err_handler.error(exception)
-
- def fatalError(self, exception):
- self._err_handler.fatalError(exception)
-
- def warning(self, exception):
- self._err_handler.warning(exception)
-
- # ContentHandler methods
-
- def setDocumentLocator(self, locator):
- self._cont_handler.setDocumentLocator(locator)
-
- def startDocument(self):
- self._cont_handler.startDocument()
-
- def endDocument(self):
- self._cont_handler.endDocument()
-
- def startPrefixMapping(self, prefix, uri):
- self._cont_handler.startPrefixMapping(prefix, uri)
-
- def endPrefixMapping(self, prefix):
- self._cont_handler.endPrefixMapping(prefix)
-
- def startElement(self, name, attrs):
- self._cont_handler.startElement(name, attrs)
-
- def endElement(self, name):
- self._cont_handler.endElement(name)
-
- def startElementNS(self, name, qname, attrs):
- self._cont_handler.startElementNS(name, qname, attrs)
-
- def endElementNS(self, name, qname):
- self._cont_handler.endElementNS(name, qname)
-
- def characters(self, content):
- self._cont_handler.characters(content)
-
- def ignorableWhitespace(self, chars):
- self._cont_handler.ignorableWhitespace(chars)
-
- def processingInstruction(self, target, data):
- self._cont_handler.processingInstruction(target, data)
-
- def skippedEntity(self, name):
- self._cont_handler.skippedEntity(name)
-
- # DTDHandler methods
-
- def notationDecl(self, name, publicId, systemId):
- self._dtd_handler.notationDecl(name, publicId, systemId)
-
- def unparsedEntityDecl(self, name, publicId, systemId, ndata):
- self._dtd_handler.unparsedEntityDecl(name, publicId, systemId, ndata)
-
- # EntityResolver methods
-
- def resolveEntity(self, publicId, systemId):
- return self._ent_handler.resolveEntity(publicId, systemId)
-
- # XMLReader methods
-
- def parse(self, source):
- self._parent.setContentHandler(self)
- self._parent.setErrorHandler(self)
- self._parent.setEntityResolver(self)
- self._parent.setDTDHandler(self)
- self._parent.parse(source)
-
- def setLocale(self, locale):
- self._parent.setLocale(locale)
-
- def getFeature(self, name):
- return self._parent.getFeature(name)
-
- def setFeature(self, name, state):
- self._parent.setFeature(name, state)
-
- def getProperty(self, name):
- return self._parent.getProperty(name)
-
- def setProperty(self, name, value):
- self._parent.setProperty(name, value)
-
- # XMLFilter methods
-
- def getParent(self):
- return self._parent
-
- def setParent(self, parent):
- self._parent = parent
-
-# --- Utility functions
-
-def prepare_input_source(source, base = ""):
- """This function takes an InputSource and an optional base URL and
- returns a fully resolved InputSource object ready for reading."""
-
- if type(source) in _StringTypes:
- source = xmlreader.InputSource(source)
- elif hasattr(source, "read"):
- f = source
- source = xmlreader.InputSource()
- source.setByteStream(f)
- if hasattr(f, "name"):
- source.setSystemId(f.name)
-
- if source.getByteStream() is None:
- sysid = source.getSystemId()
- basehead = os.path.dirname(os.path.normpath(base))
- sysidfilename = os.path.join(basehead, sysid)
- if os.path.isfile(sysidfilename):
- source.setSystemId(sysidfilename)
- f = open(sysidfilename, "rb")
- else:
- source.setSystemId(urlparse.urljoin(base, sysid))
- f = urllib.urlopen(source.getSystemId())
-
- source.setByteStream(f)
-
- return source
diff --git a/sys/lib/python/xml/sax/xmlreader.py b/sys/lib/python/xml/sax/xmlreader.py
deleted file mode 100644
index 9a2361e34..000000000
--- a/sys/lib/python/xml/sax/xmlreader.py
+++ /dev/null
@@ -1,381 +0,0 @@
-"""An XML Reader is the SAX 2 name for an XML parser. XML Parsers
-should be based on this code. """
-
-import handler
-
-from _exceptions import SAXNotSupportedException, SAXNotRecognizedException
-
-
-# ===== XMLREADER =====
-
-class XMLReader:
- """Interface for reading an XML document using callbacks.
-
- XMLReader is the interface that an XML parser's SAX2 driver must
- implement. This interface allows an application to set and query
- features and properties in the parser, to register event handlers
- for document processing, and to initiate a document parse.
-
- All SAX interfaces are assumed to be synchronous: the parse
- methods must not return until parsing is complete, and readers
- must wait for an event-handler callback to return before reporting
- the next event."""
-
- def __init__(self):
- self._cont_handler = handler.ContentHandler()
- self._dtd_handler = handler.DTDHandler()
- self._ent_handler = handler.EntityResolver()
- self._err_handler = handler.ErrorHandler()
-
- def parse(self, source):
- "Parse an XML document from a system identifier or an InputSource."
- raise NotImplementedError("This method must be implemented!")
-
- def getContentHandler(self):
- "Returns the current ContentHandler."
- return self._cont_handler
-
- def setContentHandler(self, handler):
- "Registers a new object to receive document content events."
- self._cont_handler = handler
-
- def getDTDHandler(self):
- "Returns the current DTD handler."
- return self._dtd_handler
-
- def setDTDHandler(self, handler):
- "Register an object to receive basic DTD-related events."
- self._dtd_handler = handler
-
- def getEntityResolver(self):
- "Returns the current EntityResolver."
- return self._ent_handler
-
- def setEntityResolver(self, resolver):
- "Register an object to resolve external entities."
- self._ent_handler = resolver
-
- def getErrorHandler(self):
- "Returns the current ErrorHandler."
- return self._err_handler
-
- def setErrorHandler(self, handler):
- "Register an object to receive error-message events."
- self._err_handler = handler
-
- def setLocale(self, locale):
- """Allow an application to set the locale for errors and warnings.
-
- SAX parsers are not required to provide localization for errors
- and warnings; if they cannot support the requested locale,
- however, they must throw a SAX exception. Applications may
- request a locale change in the middle of a parse."""
- raise SAXNotSupportedException("Locale support not implemented")
-
- def getFeature(self, name):
- "Looks up and returns the state of a SAX2 feature."
- raise SAXNotRecognizedException("Feature '%s' not recognized" % name)
-
- def setFeature(self, name, state):
- "Sets the state of a SAX2 feature."
- raise SAXNotRecognizedException("Feature '%s' not recognized" % name)
-
- def getProperty(self, name):
- "Looks up and returns the value of a SAX2 property."
- raise SAXNotRecognizedException("Property '%s' not recognized" % name)
-
- def setProperty(self, name, value):
- "Sets the value of a SAX2 property."
- raise SAXNotRecognizedException("Property '%s' not recognized" % name)
-
-class IncrementalParser(XMLReader):
- """This interface adds three extra methods to the XMLReader
- interface that allow XML parsers to support incremental
- parsing. Support for this interface is optional, since not all
- underlying XML parsers support this functionality.
-
- When the parser is instantiated it is ready to begin accepting
- data from the feed method immediately. After parsing has been
- finished with a call to close the reset method must be called to
- make the parser ready to accept new data, either from feed or
- using the parse method.
-
- Note that these methods must _not_ be called during parsing, that
- is, after parse has been called and before it returns.
-
- By default, the class also implements the parse method of the XMLReader
- interface using the feed, close and reset methods of the
- IncrementalParser interface as a convenience to SAX 2.0 driver
- writers."""
-
- def __init__(self, bufsize=2**16):
- self._bufsize = bufsize
- XMLReader.__init__(self)
-
- def parse(self, source):
- import saxutils
- source = saxutils.prepare_input_source(source)
-
- self.prepareParser(source)
- file = source.getByteStream()
- buffer = file.read(self._bufsize)
- while buffer != "":
- self.feed(buffer)
- buffer = file.read(self._bufsize)
- self.close()
-
- def feed(self, data):
- """This method gives the raw XML data in the data parameter to
- the parser and makes it parse the data, emitting the
- corresponding events. It is allowed for XML constructs to be
- split across several calls to feed.
-
- feed may raise SAXException."""
- raise NotImplementedError("This method must be implemented!")
-
- def prepareParser(self, source):
- """This method is called by the parse implementation to allow
- the SAX 2.0 driver to prepare itself for parsing."""
- raise NotImplementedError("prepareParser must be overridden!")
-
- def close(self):
- """This method is called when the entire XML document has been
- passed to the parser through the feed method, to notify the
- parser that there are no more data. This allows the parser to
- do the final checks on the document and empty the internal
- data buffer.
-
- The parser will not be ready to parse another document until
- the reset method has been called.
-
- close may raise SAXException."""
- raise NotImplementedError("This method must be implemented!")
-
- def reset(self):
- """This method is called after close has been called to reset
- the parser so that it is ready to parse new documents. The
- results of calling parse or feed after close without calling
- reset are undefined."""
- raise NotImplementedError("This method must be implemented!")
-
-# ===== LOCATOR =====
-
-class Locator:
- """Interface for associating a SAX event with a document
- location. A locator object will return valid results only during
- calls to DocumentHandler methods; at any other time, the
- results are unpredictable."""
-
- def getColumnNumber(self):
- "Return the column number where the current event ends."
- return -1
-
- def getLineNumber(self):
- "Return the line number where the current event ends."
- return -1
-
- def getPublicId(self):
- "Return the public identifier for the current event."
- return None
-
- def getSystemId(self):
- "Return the system identifier for the current event."
- return None
-
-# ===== INPUTSOURCE =====
-
-class InputSource:
- """Encapsulation of the information needed by the XMLReader to
- read entities.
-
- This class may include information about the public identifier,
- system identifier, byte stream (possibly with character encoding
- information) and/or the character stream of an entity.
-
- Applications will create objects of this class for use in the
- XMLReader.parse method and for returning from
- EntityResolver.resolveEntity.
-
- An InputSource belongs to the application, the XMLReader is not
- allowed to modify InputSource objects passed to it from the
- application, although it may make copies and modify those."""
-
- def __init__(self, system_id = None):
- self.__system_id = system_id
- self.__public_id = None
- self.__encoding = None
- self.__bytefile = None
- self.__charfile = None
-
- def setPublicId(self, public_id):
- "Sets the public identifier of this InputSource."
- self.__public_id = public_id
-
- def getPublicId(self):
- "Returns the public identifier of this InputSource."
- return self.__public_id
-
- def setSystemId(self, system_id):
- "Sets the system identifier of this InputSource."
- self.__system_id = system_id
-
- def getSystemId(self):
- "Returns the system identifier of this InputSource."
- return self.__system_id
-
- def setEncoding(self, encoding):
- """Sets the character encoding of this InputSource.
-
- The encoding must be a string acceptable for an XML encoding
- declaration (see section 4.3.3 of the XML recommendation).
-
- The encoding attribute of the InputSource is ignored if the
- InputSource also contains a character stream."""
- self.__encoding = encoding
-
- def getEncoding(self):
- "Get the character encoding of this InputSource."
- return self.__encoding
-
- def setByteStream(self, bytefile):
- """Set the byte stream (a Python file-like object which does
- not perform byte-to-character conversion) for this input
- source.
-
- The SAX parser will ignore this if there is also a character
- stream specified, but it will use a byte stream in preference
- to opening a URI connection itself.
-
- If the application knows the character encoding of the byte
- stream, it should set it with the setEncoding method."""
- self.__bytefile = bytefile
-
- def getByteStream(self):
- """Get the byte stream for this input source.
-
- The getEncoding method will return the character encoding for
- this byte stream, or None if unknown."""
- return self.__bytefile
-
- def setCharacterStream(self, charfile):
- """Set the character stream for this input source. (The stream
- must be a Python 2.0 Unicode-wrapped file-like that performs
- conversion to Unicode strings.)
-
- If there is a character stream specified, the SAX parser will
- ignore any byte stream and will not attempt to open a URI
- connection to the system identifier."""
- self.__charfile = charfile
-
- def getCharacterStream(self):
- "Get the character stream for this input source."
- return self.__charfile
-
-# ===== ATTRIBUTESIMPL =====
-
-class AttributesImpl:
-
- def __init__(self, attrs):
- """Non-NS-aware implementation.
-
- attrs should be of the form {name : value}."""
- self._attrs = attrs
-
- def getLength(self):
- return len(self._attrs)
-
- def getType(self, name):
- return "CDATA"
-
- def getValue(self, name):
- return self._attrs[name]
-
- def getValueByQName(self, name):
- return self._attrs[name]
-
- def getNameByQName(self, name):
- if not self._attrs.has_key(name):
- raise KeyError, name
- return name
-
- def getQNameByName(self, name):
- if not self._attrs.has_key(name):
- raise KeyError, name
- return name
-
- def getNames(self):
- return self._attrs.keys()
-
- def getQNames(self):
- return self._attrs.keys()
-
- def __len__(self):
- return len(self._attrs)
-
- def __getitem__(self, name):
- return self._attrs[name]
-
- def keys(self):
- return self._attrs.keys()
-
- def has_key(self, name):
- return self._attrs.has_key(name)
-
- def __contains__(self, name):
- return self._attrs.has_key(name)
-
- def get(self, name, alternative=None):
- return self._attrs.get(name, alternative)
-
- def copy(self):
- return self.__class__(self._attrs)
-
- def items(self):
- return self._attrs.items()
-
- def values(self):
- return self._attrs.values()
-
-# ===== ATTRIBUTESNSIMPL =====
-
-class AttributesNSImpl(AttributesImpl):
-
- def __init__(self, attrs, qnames):
- """NS-aware implementation.
-
- attrs should be of the form {(ns_uri, lname): value, ...}.
- qnames of the form {(ns_uri, lname): qname, ...}."""
- self._attrs = attrs
- self._qnames = qnames
-
- def getValueByQName(self, name):
- for (nsname, qname) in self._qnames.items():
- if qname == name:
- return self._attrs[nsname]
-
- raise KeyError, name
-
- def getNameByQName(self, name):
- for (nsname, qname) in self._qnames.items():
- if qname == name:
- return nsname
-
- raise KeyError, name
-
- def getQNameByName(self, name):
- return self._qnames[name]
-
- def getQNames(self):
- return self._qnames.values()
-
- def copy(self):
- return self.__class__(self._attrs, self._qnames)
-
-
-def _test():
- XMLReader()
- IncrementalParser()
- Locator()
-
-if __name__ == "__main__":
- _test()
diff --git a/sys/lib/python/xmllib.py b/sys/lib/python/xmllib.py
deleted file mode 100644
index 2a189cdd8..000000000
--- a/sys/lib/python/xmllib.py
+++ /dev/null
@@ -1,929 +0,0 @@
-"""A parser for XML, using the derived class as static DTD."""
-
-# Author: Sjoerd Mullender.
-
-import re
-import string
-
-import warnings
-warnings.warn("The xmllib module is obsolete. Use xml.sax instead.", DeprecationWarning)
-del warnings
-
-version = '0.3'
-
-class Error(RuntimeError):
- pass
-
-# Regular expressions used for parsing
-
-_S = '[ \t\r\n]+' # white space
-_opS = '[ \t\r\n]*' # optional white space
-_Name = '[a-zA-Z_:][-a-zA-Z0-9._:]*' # valid XML name
-_QStr = "(?:'[^']*'|\"[^\"]*\")" # quoted XML string
-illegal = re.compile('[^\t\r\n -\176\240-\377]') # illegal chars in content
-interesting = re.compile('[]&<]')
-
-amp = re.compile('&')
-ref = re.compile('&(' + _Name + '|#[0-9]+|#x[0-9a-fA-F]+)[^-a-zA-Z0-9._:]')
-entityref = re.compile('&(?P<name>' + _Name + ')[^-a-zA-Z0-9._:]')
-charref = re.compile('&#(?P<char>[0-9]+[^0-9]|x[0-9a-fA-F]+[^0-9a-fA-F])')
-space = re.compile(_S + '$')
-newline = re.compile('\n')
-
-attrfind = re.compile(
- _S + '(?P<name>' + _Name + ')'
- '(' + _opS + '=' + _opS +
- '(?P<value>'+_QStr+'|[-a-zA-Z0-9.:+*%?!\(\)_#=~]+))?')
-starttagopen = re.compile('<' + _Name)
-starttagend = re.compile(_opS + '(?P<slash>/?)>')
-starttagmatch = re.compile('<(?P<tagname>'+_Name+')'
- '(?P<attrs>(?:'+attrfind.pattern+')*)'+
- starttagend.pattern)
-endtagopen = re.compile('</')
-endbracket = re.compile(_opS + '>')
-endbracketfind = re.compile('(?:[^>\'"]|'+_QStr+')*>')
-tagfind = re.compile(_Name)
-cdataopen = re.compile(r'<!\[CDATA\[')
-cdataclose = re.compile(r'\]\]>')
-# this matches one of the following:
-# SYSTEM SystemLiteral
-# PUBLIC PubidLiteral SystemLiteral
-_SystemLiteral = '(?P<%s>'+_QStr+')'
-_PublicLiteral = '(?P<%s>"[-\'\(\)+,./:=?;!*#@$_%% \n\ra-zA-Z0-9]*"|' \
- "'[-\(\)+,./:=?;!*#@$_%% \n\ra-zA-Z0-9]*')"
-_ExternalId = '(?:SYSTEM|' \
- 'PUBLIC'+_S+_PublicLiteral%'pubid'+ \
- ')'+_S+_SystemLiteral%'syslit'
-doctype = re.compile('<!DOCTYPE'+_S+'(?P<name>'+_Name+')'
- '(?:'+_S+_ExternalId+')?'+_opS)
-xmldecl = re.compile('<\?xml'+_S+
- 'version'+_opS+'='+_opS+'(?P<version>'+_QStr+')'+
- '(?:'+_S+'encoding'+_opS+'='+_opS+
- "(?P<encoding>'[A-Za-z][-A-Za-z0-9._]*'|"
- '"[A-Za-z][-A-Za-z0-9._]*"))?'
- '(?:'+_S+'standalone'+_opS+'='+_opS+
- '(?P<standalone>\'(?:yes|no)\'|"(?:yes|no)"))?'+
- _opS+'\?>')
-procopen = re.compile(r'<\?(?P<proc>' + _Name + ')' + _opS)
-procclose = re.compile(_opS + r'\?>')
-commentopen = re.compile('<!--')
-commentclose = re.compile('-->')
-doubledash = re.compile('--')
-attrtrans = string.maketrans(' \r\n\t', ' ')
-
-# definitions for XML namespaces
-_NCName = '[a-zA-Z_][-a-zA-Z0-9._]*' # XML Name, minus the ":"
-ncname = re.compile(_NCName + '$')
-qname = re.compile('(?:(?P<prefix>' + _NCName + '):)?' # optional prefix
- '(?P<local>' + _NCName + ')$')
-
-xmlns = re.compile('xmlns(?::(?P<ncname>'+_NCName+'))?$')
-
-# XML parser base class -- find tags and call handler functions.
-# Usage: p = XMLParser(); p.feed(data); ...; p.close().
-# The dtd is defined by deriving a class which defines methods with
-# special names to handle tags: start_foo and end_foo to handle <foo>
-# and </foo>, respectively. The data between tags is passed to the
-# parser by calling self.handle_data() with some data as argument (the
-# data may be split up in arbitrary chunks).
-
-class XMLParser:
- attributes = {} # default, to be overridden
- elements = {} # default, to be overridden
-
- # parsing options, settable using keyword args in __init__
- __accept_unquoted_attributes = 0
- __accept_missing_endtag_name = 0
- __map_case = 0
- __accept_utf8 = 0
- __translate_attribute_references = 1
-
- # Interface -- initialize and reset this instance
- def __init__(self, **kw):
- self.__fixed = 0
- if 'accept_unquoted_attributes' in kw:
- self.__accept_unquoted_attributes = kw['accept_unquoted_attributes']
- if 'accept_missing_endtag_name' in kw:
- self.__accept_missing_endtag_name = kw['accept_missing_endtag_name']
- if 'map_case' in kw:
- self.__map_case = kw['map_case']
- if 'accept_utf8' in kw:
- self.__accept_utf8 = kw['accept_utf8']
- if 'translate_attribute_references' in kw:
- self.__translate_attribute_references = kw['translate_attribute_references']
- self.reset()
-
- def __fixelements(self):
- self.__fixed = 1
- self.elements = {}
- self.__fixdict(self.__dict__)
- self.__fixclass(self.__class__)
-
- def __fixclass(self, kl):
- self.__fixdict(kl.__dict__)
- for k in kl.__bases__:
- self.__fixclass(k)
-
- def __fixdict(self, dict):
- for key in dict.keys():
- if key[:6] == 'start_':
- tag = key[6:]
- start, end = self.elements.get(tag, (None, None))
- if start is None:
- self.elements[tag] = getattr(self, key), end
- elif key[:4] == 'end_':
- tag = key[4:]
- start, end = self.elements.get(tag, (None, None))
- if end is None:
- self.elements[tag] = start, getattr(self, key)
-
- # Interface -- reset this instance. Loses all unprocessed data
- def reset(self):
- self.rawdata = ''
- self.stack = []
- self.nomoretags = 0
- self.literal = 0
- self.lineno = 1
- self.__at_start = 1
- self.__seen_doctype = None
- self.__seen_starttag = 0
- self.__use_namespaces = 0
- self.__namespaces = {'xml':None} # xml is implicitly declared
- # backward compatibility hack: if elements not overridden,
- # fill it in ourselves
- if self.elements is XMLParser.elements:
- self.__fixelements()
-
- # For derived classes only -- enter literal mode (CDATA) till EOF
- def setnomoretags(self):
- self.nomoretags = self.literal = 1
-
- # For derived classes only -- enter literal mode (CDATA)
- def setliteral(self, *args):
- self.literal = 1
-
- # Interface -- feed some data to the parser. Call this as
- # often as you want, with as little or as much text as you
- # want (may include '\n'). (This just saves the text, all the
- # processing is done by goahead().)
- def feed(self, data):
- self.rawdata = self.rawdata + data
- self.goahead(0)
-
- # Interface -- handle the remaining data
- def close(self):
- self.goahead(1)
- if self.__fixed:
- self.__fixed = 0
- # remove self.elements so that we don't leak
- del self.elements
-
- # Interface -- translate references
- def translate_references(self, data, all = 1):
- if not self.__translate_attribute_references:
- return data
- i = 0
- while 1:
- res = amp.search(data, i)
- if res is None:
- return data
- s = res.start(0)
- res = ref.match(data, s)
- if res is None:
- self.syntax_error("bogus `&'")
- i = s+1
- continue
- i = res.end(0)
- str = res.group(1)
- rescan = 0
- if str[0] == '#':
- if str[1] == 'x':
- str = chr(int(str[2:], 16))
- else:
- str = chr(int(str[1:]))
- if data[i - 1] != ';':
- self.syntax_error("`;' missing after char reference")
- i = i-1
- elif all:
- if str in self.entitydefs:
- str = self.entitydefs[str]
- rescan = 1
- elif data[i - 1] != ';':
- self.syntax_error("bogus `&'")
- i = s + 1 # just past the &
- continue
- else:
- self.syntax_error("reference to unknown entity `&%s;'" % str)
- str = '&' + str + ';'
- elif data[i - 1] != ';':
- self.syntax_error("bogus `&'")
- i = s + 1 # just past the &
- continue
-
- # when we get here, str contains the translated text and i points
- # to the end of the string that is to be replaced
- data = data[:s] + str + data[i:]
- if rescan:
- i = s
- else:
- i = s + len(str)
-
- # Interface - return a dictionary of all namespaces currently valid
- def getnamespace(self):
- nsdict = {}
- for t, d, nst in self.stack:
- nsdict.update(d)
- return nsdict
-
- # Internal -- handle data as far as reasonable. May leave state
- # and data to be processed by a subsequent call. If 'end' is
- # true, force handling all data as if followed by EOF marker.
- def goahead(self, end):
- rawdata = self.rawdata
- i = 0
- n = len(rawdata)
- while i < n:
- if i > 0:
- self.__at_start = 0
- if self.nomoretags:
- data = rawdata[i:n]
- self.handle_data(data)
- self.lineno = self.lineno + data.count('\n')
- i = n
- break
- res = interesting.search(rawdata, i)
- if res:
- j = res.start(0)
- else:
- j = n
- if i < j:
- data = rawdata[i:j]
- if self.__at_start and space.match(data) is None:
- self.syntax_error('illegal data at start of file')
- self.__at_start = 0
- if not self.stack and space.match(data) is None:
- self.syntax_error('data not in content')
- if not self.__accept_utf8 and illegal.search(data):
- self.syntax_error('illegal character in content')
- self.handle_data(data)
- self.lineno = self.lineno + data.count('\n')
- i = j
- if i == n: break
- if rawdata[i] == '<':
- if starttagopen.match(rawdata, i):
- if self.literal:
- data = rawdata[i]
- self.handle_data(data)
- self.lineno = self.lineno + data.count('\n')
- i = i+1
- continue
- k = self.parse_starttag(i)
- if k < 0: break
- self.__seen_starttag = 1
- self.lineno = self.lineno + rawdata[i:k].count('\n')
- i = k
- continue
- if endtagopen.match(rawdata, i):
- k = self.parse_endtag(i)
- if k < 0: break
- self.lineno = self.lineno + rawdata[i:k].count('\n')
- i = k
- continue
- if commentopen.match(rawdata, i):
- if self.literal:
- data = rawdata[i]
- self.handle_data(data)
- self.lineno = self.lineno + data.count('\n')
- i = i+1
- continue
- k = self.parse_comment(i)
- if k < 0: break
- self.lineno = self.lineno + rawdata[i:k].count('\n')
- i = k
- continue
- if cdataopen.match(rawdata, i):
- k = self.parse_cdata(i)
- if k < 0: break
- self.lineno = self.lineno + rawdata[i:k].count('\n')
- i = k
- continue
- res = xmldecl.match(rawdata, i)
- if res:
- if not self.__at_start:
- self.syntax_error("<?xml?> declaration not at start of document")
- version, encoding, standalone = res.group('version',
- 'encoding',
- 'standalone')
- if version[1:-1] != '1.0':
- raise Error('only XML version 1.0 supported')
- if encoding: encoding = encoding[1:-1]
- if standalone: standalone = standalone[1:-1]
- self.handle_xml(encoding, standalone)
- i = res.end(0)
- continue
- res = procopen.match(rawdata, i)
- if res:
- k = self.parse_proc(i)
- if k < 0: break
- self.lineno = self.lineno + rawdata[i:k].count('\n')
- i = k
- continue
- res = doctype.match(rawdata, i)
- if res:
- if self.literal:
- data = rawdata[i]
- self.handle_data(data)
- self.lineno = self.lineno + data.count('\n')
- i = i+1
- continue
- if self.__seen_doctype:
- self.syntax_error('multiple DOCTYPE elements')
- if self.__seen_starttag:
- self.syntax_error('DOCTYPE not at beginning of document')
- k = self.parse_doctype(res)
- if k < 0: break
- self.__seen_doctype = res.group('name')
- if self.__map_case:
- self.__seen_doctype = self.__seen_doctype.lower()
- self.lineno = self.lineno + rawdata[i:k].count('\n')
- i = k
- continue
- elif rawdata[i] == '&':
- if self.literal:
- data = rawdata[i]
- self.handle_data(data)
- i = i+1
- continue
- res = charref.match(rawdata, i)
- if res is not None:
- i = res.end(0)
- if rawdata[i-1] != ';':
- self.syntax_error("`;' missing in charref")
- i = i-1
- if not self.stack:
- self.syntax_error('data not in content')
- self.handle_charref(res.group('char')[:-1])
- self.lineno = self.lineno + res.group(0).count('\n')
- continue
- res = entityref.match(rawdata, i)
- if res is not None:
- i = res.end(0)
- if rawdata[i-1] != ';':
- self.syntax_error("`;' missing in entityref")
- i = i-1
- name = res.group('name')
- if self.__map_case:
- name = name.lower()
- if name in self.entitydefs:
- self.rawdata = rawdata = rawdata[:res.start(0)] + self.entitydefs[name] + rawdata[i:]
- n = len(rawdata)
- i = res.start(0)
- else:
- self.unknown_entityref(name)
- self.lineno = self.lineno + res.group(0).count('\n')
- continue
- elif rawdata[i] == ']':
- if self.literal:
- data = rawdata[i]
- self.handle_data(data)
- i = i+1
- continue
- if n-i < 3:
- break
- if cdataclose.match(rawdata, i):
- self.syntax_error("bogus `]]>'")
- self.handle_data(rawdata[i])
- i = i+1
- continue
- else:
- raise Error('neither < nor & ??')
- # We get here only if incomplete matches but
- # nothing else
- break
- # end while
- if i > 0:
- self.__at_start = 0
- if end and i < n:
- data = rawdata[i]
- self.syntax_error("bogus `%s'" % data)
- if not self.__accept_utf8 and illegal.search(data):
- self.syntax_error('illegal character in content')
- self.handle_data(data)
- self.lineno = self.lineno + data.count('\n')
- self.rawdata = rawdata[i+1:]
- return self.goahead(end)
- self.rawdata = rawdata[i:]
- if end:
- if not self.__seen_starttag:
- self.syntax_error('no elements in file')
- if self.stack:
- self.syntax_error('missing end tags')
- while self.stack:
- self.finish_endtag(self.stack[-1][0])
-
- # Internal -- parse comment, return length or -1 if not terminated
- def parse_comment(self, i):
- rawdata = self.rawdata
- if rawdata[i:i+4] != '<!--':
- raise Error('unexpected call to handle_comment')
- res = commentclose.search(rawdata, i+4)
- if res is None:
- return -1
- if doubledash.search(rawdata, i+4, res.start(0)):
- self.syntax_error("`--' inside comment")
- if rawdata[res.start(0)-1] == '-':
- self.syntax_error('comment cannot end in three dashes')
- if not self.__accept_utf8 and \
- illegal.search(rawdata, i+4, res.start(0)):
- self.syntax_error('illegal character in comment')
- self.handle_comment(rawdata[i+4: res.start(0)])
- return res.end(0)
-
- # Internal -- handle DOCTYPE tag, return length or -1 if not terminated
- def parse_doctype(self, res):
- rawdata = self.rawdata
- n = len(rawdata)
- name = res.group('name')
- if self.__map_case:
- name = name.lower()
- pubid, syslit = res.group('pubid', 'syslit')
- if pubid is not None:
- pubid = pubid[1:-1] # remove quotes
- pubid = ' '.join(pubid.split()) # normalize
- if syslit is not None: syslit = syslit[1:-1] # remove quotes
- j = k = res.end(0)
- if k >= n:
- return -1
- if rawdata[k] == '[':
- level = 0
- k = k+1
- dq = sq = 0
- while k < n:
- c = rawdata[k]
- if not sq and c == '"':
- dq = not dq
- elif not dq and c == "'":
- sq = not sq
- elif sq or dq:
- pass
- elif level <= 0 and c == ']':
- res = endbracket.match(rawdata, k+1)
- if res is None:
- return -1
- self.handle_doctype(name, pubid, syslit, rawdata[j+1:k])
- return res.end(0)
- elif c == '<':
- level = level + 1
- elif c == '>':
- level = level - 1
- if level < 0:
- self.syntax_error("bogus `>' in DOCTYPE")
- k = k+1
- res = endbracketfind.match(rawdata, k)
- if res is None:
- return -1
- if endbracket.match(rawdata, k) is None:
- self.syntax_error('garbage in DOCTYPE')
- self.handle_doctype(name, pubid, syslit, None)
- return res.end(0)
-
- # Internal -- handle CDATA tag, return length or -1 if not terminated
- def parse_cdata(self, i):
- rawdata = self.rawdata
- if rawdata[i:i+9] != '<![CDATA[':
- raise Error('unexpected call to parse_cdata')
- res = cdataclose.search(rawdata, i+9)
- if res is None:
- return -1
- if not self.__accept_utf8 and \
- illegal.search(rawdata, i+9, res.start(0)):
- self.syntax_error('illegal character in CDATA')
- if not self.stack:
- self.syntax_error('CDATA not in content')
- self.handle_cdata(rawdata[i+9:res.start(0)])
- return res.end(0)
-
- __xml_namespace_attributes = {'ns':None, 'src':None, 'prefix':None}
- # Internal -- handle a processing instruction tag
- def parse_proc(self, i):
- rawdata = self.rawdata
- end = procclose.search(rawdata, i)
- if end is None:
- return -1
- j = end.start(0)
- if not self.__accept_utf8 and illegal.search(rawdata, i+2, j):
- self.syntax_error('illegal character in processing instruction')
- res = tagfind.match(rawdata, i+2)
- if res is None:
- raise Error('unexpected call to parse_proc')
- k = res.end(0)
- name = res.group(0)
- if self.__map_case:
- name = name.lower()
- if name == 'xml:namespace':
- self.syntax_error('old-fashioned namespace declaration')
- self.__use_namespaces = -1
- # namespace declaration
- # this must come after the <?xml?> declaration (if any)
- # and before the <!DOCTYPE> (if any).
- if self.__seen_doctype or self.__seen_starttag:
- self.syntax_error('xml:namespace declaration too late in document')
- attrdict, namespace, k = self.parse_attributes(name, k, j)
- if namespace:
- self.syntax_error('namespace declaration inside namespace declaration')
- for attrname in attrdict.keys():
- if not attrname in self.__xml_namespace_attributes:
- self.syntax_error("unknown attribute `%s' in xml:namespace tag" % attrname)
- if not 'ns' in attrdict or not 'prefix' in attrdict:
- self.syntax_error('xml:namespace without required attributes')
- prefix = attrdict.get('prefix')
- if ncname.match(prefix) is None:
- self.syntax_error('xml:namespace illegal prefix value')
- return end.end(0)
- if prefix in self.__namespaces:
- self.syntax_error('xml:namespace prefix not unique')
- self.__namespaces[prefix] = attrdict['ns']
- else:
- if name.lower() == 'xml':
- self.syntax_error('illegal processing instruction target name')
- self.handle_proc(name, rawdata[k:j])
- return end.end(0)
-
- # Internal -- parse attributes between i and j
- def parse_attributes(self, tag, i, j):
- rawdata = self.rawdata
- attrdict = {}
- namespace = {}
- while i < j:
- res = attrfind.match(rawdata, i)
- if res is None:
- break
- attrname, attrvalue = res.group('name', 'value')
- if self.__map_case:
- attrname = attrname.lower()
- i = res.end(0)
- if attrvalue is None:
- self.syntax_error("no value specified for attribute `%s'" % attrname)
- attrvalue = attrname
- elif attrvalue[:1] == "'" == attrvalue[-1:] or \
- attrvalue[:1] == '"' == attrvalue[-1:]:
- attrvalue = attrvalue[1:-1]
- elif not self.__accept_unquoted_attributes:
- self.syntax_error("attribute `%s' value not quoted" % attrname)
- res = xmlns.match(attrname)
- if res is not None:
- # namespace declaration
- ncname = res.group('ncname')
- namespace[ncname or ''] = attrvalue or None
- if not self.__use_namespaces:
- self.__use_namespaces = len(self.stack)+1
- continue
- if '<' in attrvalue:
- self.syntax_error("`<' illegal in attribute value")
- if attrname in attrdict:
- self.syntax_error("attribute `%s' specified twice" % attrname)
- attrvalue = attrvalue.translate(attrtrans)
- attrdict[attrname] = self.translate_references(attrvalue)
- return attrdict, namespace, i
-
- # Internal -- handle starttag, return length or -1 if not terminated
- def parse_starttag(self, i):
- rawdata = self.rawdata
- # i points to start of tag
- end = endbracketfind.match(rawdata, i+1)
- if end is None:
- return -1
- tag = starttagmatch.match(rawdata, i)
- if tag is None or tag.end(0) != end.end(0):
- self.syntax_error('garbage in starttag')
- return end.end(0)
- nstag = tagname = tag.group('tagname')
- if self.__map_case:
- nstag = tagname = nstag.lower()
- if not self.__seen_starttag and self.__seen_doctype and \
- tagname != self.__seen_doctype:
- self.syntax_error('starttag does not match DOCTYPE')
- if self.__seen_starttag and not self.stack:
- self.syntax_error('multiple elements on top level')
- k, j = tag.span('attrs')
- attrdict, nsdict, k = self.parse_attributes(tagname, k, j)
- self.stack.append((tagname, nsdict, nstag))
- if self.__use_namespaces:
- res = qname.match(tagname)
- else:
- res = None
- if res is not None:
- prefix, nstag = res.group('prefix', 'local')
- if prefix is None:
- prefix = ''
- ns = None
- for t, d, nst in self.stack:
- if prefix in d:
- ns = d[prefix]
- if ns is None and prefix != '':
- ns = self.__namespaces.get(prefix)
- if ns is not None:
- nstag = ns + ' ' + nstag
- elif prefix != '':
- nstag = prefix + ':' + nstag # undo split
- self.stack[-1] = tagname, nsdict, nstag
- # translate namespace of attributes
- attrnamemap = {} # map from new name to old name (used for error reporting)
- for key in attrdict.keys():
- attrnamemap[key] = key
- if self.__use_namespaces:
- nattrdict = {}
- for key, val in attrdict.items():
- okey = key
- res = qname.match(key)
- if res is not None:
- aprefix, key = res.group('prefix', 'local')
- if self.__map_case:
- key = key.lower()
- if aprefix is not None:
- ans = None
- for t, d, nst in self.stack:
- if aprefix in d:
- ans = d[aprefix]
- if ans is None:
- ans = self.__namespaces.get(aprefix)
- if ans is not None:
- key = ans + ' ' + key
- else:
- key = aprefix + ':' + key
- nattrdict[key] = val
- attrnamemap[key] = okey
- attrdict = nattrdict
- attributes = self.attributes.get(nstag)
- if attributes is not None:
- for key in attrdict.keys():
- if not key in attributes:
- self.syntax_error("unknown attribute `%s' in tag `%s'" % (attrnamemap[key], tagname))
- for key, val in attributes.items():
- if val is not None and not key in attrdict:
- attrdict[key] = val
- method = self.elements.get(nstag, (None, None))[0]
- self.finish_starttag(nstag, attrdict, method)
- if tag.group('slash') == '/':
- self.finish_endtag(tagname)
- return tag.end(0)
-
- # Internal -- parse endtag
- def parse_endtag(self, i):
- rawdata = self.rawdata
- end = endbracketfind.match(rawdata, i+1)
- if end is None:
- return -1
- res = tagfind.match(rawdata, i+2)
- if res is None:
- if self.literal:
- self.handle_data(rawdata[i])
- return i+1
- if not self.__accept_missing_endtag_name:
- self.syntax_error('no name specified in end tag')
- tag = self.stack[-1][0]
- k = i+2
- else:
- tag = res.group(0)
- if self.__map_case:
- tag = tag.lower()
- if self.literal:
- if not self.stack or tag != self.stack[-1][0]:
- self.handle_data(rawdata[i])
- return i+1
- k = res.end(0)
- if endbracket.match(rawdata, k) is None:
- self.syntax_error('garbage in end tag')
- self.finish_endtag(tag)
- return end.end(0)
-
- # Internal -- finish processing of start tag
- def finish_starttag(self, tagname, attrdict, method):
- if method is not None:
- self.handle_starttag(tagname, method, attrdict)
- else:
- self.unknown_starttag(tagname, attrdict)
-
- # Internal -- finish processing of end tag
- def finish_endtag(self, tag):
- self.literal = 0
- if not tag:
- self.syntax_error('name-less end tag')
- found = len(self.stack) - 1
- if found < 0:
- self.unknown_endtag(tag)
- return
- else:
- found = -1
- for i in range(len(self.stack)):
- if tag == self.stack[i][0]:
- found = i
- if found == -1:
- self.syntax_error('unopened end tag')
- return
- while len(self.stack) > found:
- if found < len(self.stack) - 1:
- self.syntax_error('missing close tag for %s' % self.stack[-1][2])
- nstag = self.stack[-1][2]
- method = self.elements.get(nstag, (None, None))[1]
- if method is not None:
- self.handle_endtag(nstag, method)
- else:
- self.unknown_endtag(nstag)
- if self.__use_namespaces == len(self.stack):
- self.__use_namespaces = 0
- del self.stack[-1]
-
- # Overridable -- handle xml processing instruction
- def handle_xml(self, encoding, standalone):
- pass
-
- # Overridable -- handle DOCTYPE
- def handle_doctype(self, tag, pubid, syslit, data):
- pass
-
- # Overridable -- handle start tag
- def handle_starttag(self, tag, method, attrs):
- method(attrs)
-
- # Overridable -- handle end tag
- def handle_endtag(self, tag, method):
- method()
-
- # Example -- handle character reference, no need to override
- def handle_charref(self, name):
- try:
- if name[0] == 'x':
- n = int(name[1:], 16)
- else:
- n = int(name)
- except ValueError:
- self.unknown_charref(name)
- return
- if not 0 <= n <= 255:
- self.unknown_charref(name)
- return
- self.handle_data(chr(n))
-
- # Definition of entities -- derived classes may override
- entitydefs = {'lt': '&#60;', # must use charref
- 'gt': '&#62;',
- 'amp': '&#38;', # must use charref
- 'quot': '&#34;',
- 'apos': '&#39;',
- }
-
- # Example -- handle data, should be overridden
- def handle_data(self, data):
- pass
-
- # Example -- handle cdata, could be overridden
- def handle_cdata(self, data):
- pass
-
- # Example -- handle comment, could be overridden
- def handle_comment(self, data):
- pass
-
- # Example -- handle processing instructions, could be overridden
- def handle_proc(self, name, data):
- pass
-
- # Example -- handle relatively harmless syntax errors, could be overridden
- def syntax_error(self, message):
- raise Error('Syntax error at line %d: %s' % (self.lineno, message))
-
- # To be overridden -- handlers for unknown objects
- def unknown_starttag(self, tag, attrs): pass
- def unknown_endtag(self, tag): pass
- def unknown_charref(self, ref): pass
- def unknown_entityref(self, name):
- self.syntax_error("reference to unknown entity `&%s;'" % name)
-
-
-class TestXMLParser(XMLParser):
-
- def __init__(self, **kw):
- self.testdata = ""
- XMLParser.__init__(self, **kw)
-
- def handle_xml(self, encoding, standalone):
- self.flush()
- print 'xml: encoding =',encoding,'standalone =',standalone
-
- def handle_doctype(self, tag, pubid, syslit, data):
- self.flush()
- print 'DOCTYPE:',tag, repr(data)
-
- def handle_data(self, data):
- self.testdata = self.testdata + data
- if len(repr(self.testdata)) >= 70:
- self.flush()
-
- def flush(self):
- data = self.testdata
- if data:
- self.testdata = ""
- print 'data:', repr(data)
-
- def handle_cdata(self, data):
- self.flush()
- print 'cdata:', repr(data)
-
- def handle_proc(self, name, data):
- self.flush()
- print 'processing:',name,repr(data)
-
- def handle_comment(self, data):
- self.flush()
- r = repr(data)
- if len(r) > 68:
- r = r[:32] + '...' + r[-32:]
- print 'comment:', r
-
- def syntax_error(self, message):
- print 'error at line %d:' % self.lineno, message
-
- def unknown_starttag(self, tag, attrs):
- self.flush()
- if not attrs:
- print 'start tag: <' + tag + '>'
- else:
- print 'start tag: <' + tag,
- for name, value in attrs.items():
- print name + '=' + '"' + value + '"',
- print '>'
-
- def unknown_endtag(self, tag):
- self.flush()
- print 'end tag: </' + tag + '>'
-
- def unknown_entityref(self, ref):
- self.flush()
- print '*** unknown entity ref: &' + ref + ';'
-
- def unknown_charref(self, ref):
- self.flush()
- print '*** unknown char ref: &#' + ref + ';'
-
- def close(self):
- XMLParser.close(self)
- self.flush()
-
-def test(args = None):
- import sys, getopt
- from time import time
-
- if not args:
- args = sys.argv[1:]
-
- opts, args = getopt.getopt(args, 'st')
- klass = TestXMLParser
- do_time = 0
- for o, a in opts:
- if o == '-s':
- klass = XMLParser
- elif o == '-t':
- do_time = 1
-
- if args:
- file = args[0]
- else:
- file = 'test.xml'
-
- if file == '-':
- f = sys.stdin
- else:
- try:
- f = open(file, 'r')
- except IOError, msg:
- print file, ":", msg
- sys.exit(1)
-
- data = f.read()
- if f is not sys.stdin:
- f.close()
-
- x = klass()
- t0 = time()
- try:
- if do_time:
- x.feed(data)
- x.close()
- else:
- for c in data:
- x.feed(c)
- x.close()
- except Error, msg:
- t1 = time()
- print msg
- if do_time:
- print 'total time: %g' % (t1-t0)
- sys.exit(1)
- t1 = time()
- if do_time:
- print 'total time: %g' % (t1-t0)
-
-
-if __name__ == '__main__':
- test()
diff --git a/sys/lib/python/xmlrpclib.py b/sys/lib/python/xmlrpclib.py
deleted file mode 100644
index 9305e1018..000000000
--- a/sys/lib/python/xmlrpclib.py
+++ /dev/null
@@ -1,1488 +0,0 @@
-#
-# XML-RPC CLIENT LIBRARY
-# $Id: xmlrpclib.py 41594 2005-12-04 19:11:17Z andrew.kuchling $
-#
-# an XML-RPC client interface for Python.
-#
-# the marshalling and response parser code can also be used to
-# implement XML-RPC servers.
-#
-# Notes:
-# this version is designed to work with Python 2.1 or newer.
-#
-# History:
-# 1999-01-14 fl Created
-# 1999-01-15 fl Changed dateTime to use localtime
-# 1999-01-16 fl Added Binary/base64 element, default to RPC2 service
-# 1999-01-19 fl Fixed array data element (from Skip Montanaro)
-# 1999-01-21 fl Fixed dateTime constructor, etc.
-# 1999-02-02 fl Added fault handling, handle empty sequences, etc.
-# 1999-02-10 fl Fixed problem with empty responses (from Skip Montanaro)
-# 1999-06-20 fl Speed improvements, pluggable parsers/transports (0.9.8)
-# 2000-11-28 fl Changed boolean to check the truth value of its argument
-# 2001-02-24 fl Added encoding/Unicode/SafeTransport patches
-# 2001-02-26 fl Added compare support to wrappers (0.9.9/1.0b1)
-# 2001-03-28 fl Make sure response tuple is a singleton
-# 2001-03-29 fl Don't require empty params element (from Nicholas Riley)
-# 2001-06-10 fl Folded in _xmlrpclib accelerator support (1.0b2)
-# 2001-08-20 fl Base xmlrpclib.Error on built-in Exception (from Paul Prescod)
-# 2001-09-03 fl Allow Transport subclass to override getparser
-# 2001-09-10 fl Lazy import of urllib, cgi, xmllib (20x import speedup)
-# 2001-10-01 fl Remove containers from memo cache when done with them
-# 2001-10-01 fl Use faster escape method (80% dumps speedup)
-# 2001-10-02 fl More dumps microtuning
-# 2001-10-04 fl Make sure import expat gets a parser (from Guido van Rossum)
-# 2001-10-10 sm Allow long ints to be passed as ints if they don't overflow
-# 2001-10-17 sm Test for int and long overflow (allows use on 64-bit systems)
-# 2001-11-12 fl Use repr() to marshal doubles (from Paul Felix)
-# 2002-03-17 fl Avoid buffered read when possible (from James Rucker)
-# 2002-04-07 fl Added pythondoc comments
-# 2002-04-16 fl Added __str__ methods to datetime/binary wrappers
-# 2002-05-15 fl Added error constants (from Andrew Kuchling)
-# 2002-06-27 fl Merged with Python CVS version
-# 2002-10-22 fl Added basic authentication (based on code from Phillip Eby)
-# 2003-01-22 sm Add support for the bool type
-# 2003-02-27 gvr Remove apply calls
-# 2003-04-24 sm Use cStringIO if available
-# 2003-04-25 ak Add support for nil
-# 2003-06-15 gn Add support for time.struct_time
-# 2003-07-12 gp Correct marshalling of Faults
-# 2003-10-31 mvl Add multicall support
-# 2004-08-20 mvl Bump minimum supported Python version to 2.1
-#
-# Copyright (c) 1999-2002 by Secret Labs AB.
-# Copyright (c) 1999-2002 by Fredrik Lundh.
-#
-# info@pythonware.com
-# http://www.pythonware.com
-#
-# --------------------------------------------------------------------
-# The XML-RPC client interface is
-#
-# Copyright (c) 1999-2002 by Secret Labs AB
-# Copyright (c) 1999-2002 by Fredrik Lundh
-#
-# By obtaining, using, and/or copying this software and/or its
-# associated documentation, you agree that you have read, understood,
-# and will comply with the following terms and conditions:
-#
-# Permission to use, copy, modify, and distribute this software and
-# its associated documentation for any purpose and without fee is
-# hereby granted, provided that the above copyright notice appears in
-# all copies, and that both that copyright notice and this permission
-# notice appear in supporting documentation, and that the name of
-# Secret Labs AB or the author not be used in advertising or publicity
-# pertaining to distribution of the software without specific, written
-# prior permission.
-#
-# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
-# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
-# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
-# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
-# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
-# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
-# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
-# OF THIS SOFTWARE.
-# --------------------------------------------------------------------
-
-#
-# things to look into some day:
-
-# TODO: sort out True/False/boolean issues for Python 2.3
-
-"""
-An XML-RPC client interface for Python.
-
-The marshalling and response parser code can also be used to
-implement XML-RPC servers.
-
-Exported exceptions:
-
- Error Base class for client errors
- ProtocolError Indicates an HTTP protocol error
- ResponseError Indicates a broken response package
- Fault Indicates an XML-RPC fault package
-
-Exported classes:
-
- ServerProxy Represents a logical connection to an XML-RPC server
-
- MultiCall Executor of boxcared xmlrpc requests
- Boolean boolean wrapper to generate a "boolean" XML-RPC value
- DateTime dateTime wrapper for an ISO 8601 string or time tuple or
- localtime integer value to generate a "dateTime.iso8601"
- XML-RPC value
- Binary binary data wrapper
-
- SlowParser Slow but safe standard parser (based on xmllib)
- Marshaller Generate an XML-RPC params chunk from a Python data structure
- Unmarshaller Unmarshal an XML-RPC response from incoming XML event message
- Transport Handles an HTTP transaction to an XML-RPC server
- SafeTransport Handles an HTTPS transaction to an XML-RPC server
-
-Exported constants:
-
- True
- False
-
-Exported functions:
-
- boolean Convert any Python value to an XML-RPC boolean
- getparser Create instance of the fastest available parser & attach
- to an unmarshalling object
- dumps Convert an argument tuple or a Fault instance to an XML-RPC
- request (or response, if the methodresponse option is used).
- loads Convert an XML-RPC packet to unmarshalled data plus a method
- name (None if not present).
-"""
-
-import re, string, time, operator
-
-from types import *
-
-# --------------------------------------------------------------------
-# Internal stuff
-
-try:
- unicode
-except NameError:
- unicode = None # unicode support not available
-
-try:
- import datetime
-except ImportError:
- datetime = None
-
-try:
- _bool_is_builtin = False.__class__.__name__ == "bool"
-except NameError:
- _bool_is_builtin = 0
-
-def _decode(data, encoding, is8bit=re.compile("[\x80-\xff]").search):
- # decode non-ascii string (if possible)
- if unicode and encoding and is8bit(data):
- data = unicode(data, encoding)
- return data
-
-def escape(s, replace=string.replace):
- s = replace(s, "&", "&amp;")
- s = replace(s, "<", "&lt;")
- return replace(s, ">", "&gt;",)
-
-if unicode:
- def _stringify(string):
- # convert to 7-bit ascii if possible
- try:
- return string.encode("ascii")
- except UnicodeError:
- return string
-else:
- def _stringify(string):
- return string
-
-__version__ = "1.0.1"
-
-# xmlrpc integer limits
-MAXINT = 2L**31-1
-MININT = -2L**31
-
-# --------------------------------------------------------------------
-# Error constants (from Dan Libby's specification at
-# http://xmlrpc-epi.sourceforge.net/specs/rfc.fault_codes.php)
-
-# Ranges of errors
-PARSE_ERROR = -32700
-SERVER_ERROR = -32600
-APPLICATION_ERROR = -32500
-SYSTEM_ERROR = -32400
-TRANSPORT_ERROR = -32300
-
-# Specific errors
-NOT_WELLFORMED_ERROR = -32700
-UNSUPPORTED_ENCODING = -32701
-INVALID_ENCODING_CHAR = -32702
-INVALID_XMLRPC = -32600
-METHOD_NOT_FOUND = -32601
-INVALID_METHOD_PARAMS = -32602
-INTERNAL_ERROR = -32603
-
-# --------------------------------------------------------------------
-# Exceptions
-
-##
-# Base class for all kinds of client-side errors.
-
-class Error(Exception):
- """Base class for client errors."""
- def __str__(self):
- return repr(self)
-
-##
-# Indicates an HTTP-level protocol error. This is raised by the HTTP
-# transport layer, if the server returns an error code other than 200
-# (OK).
-#
-# @param url The target URL.
-# @param errcode The HTTP error code.
-# @param errmsg The HTTP error message.
-# @param headers The HTTP header dictionary.
-
-class ProtocolError(Error):
- """Indicates an HTTP protocol error."""
- def __init__(self, url, errcode, errmsg, headers):
- Error.__init__(self)
- self.url = url
- self.errcode = errcode
- self.errmsg = errmsg
- self.headers = headers
- def __repr__(self):
- return (
- "<ProtocolError for %s: %s %s>" %
- (self.url, self.errcode, self.errmsg)
- )
-
-##
-# Indicates a broken XML-RPC response package. This exception is
-# raised by the unmarshalling layer, if the XML-RPC response is
-# malformed.
-
-class ResponseError(Error):
- """Indicates a broken response package."""
- pass
-
-##
-# Indicates an XML-RPC fault response package. This exception is
-# raised by the unmarshalling layer, if the XML-RPC response contains
-# a fault string. This exception can also used as a class, to
-# generate a fault XML-RPC message.
-#
-# @param faultCode The XML-RPC fault code.
-# @param faultString The XML-RPC fault string.
-
-class Fault(Error):
- """Indicates an XML-RPC fault package."""
- def __init__(self, faultCode, faultString, **extra):
- Error.__init__(self)
- self.faultCode = faultCode
- self.faultString = faultString
- def __repr__(self):
- return (
- "<Fault %s: %s>" %
- (self.faultCode, repr(self.faultString))
- )
-
-# --------------------------------------------------------------------
-# Special values
-
-##
-# Wrapper for XML-RPC boolean values. Use the xmlrpclib.True and
-# xmlrpclib.False constants, or the xmlrpclib.boolean() function, to
-# generate boolean XML-RPC values.
-#
-# @param value A boolean value. Any true value is interpreted as True,
-# all other values are interpreted as False.
-
-if _bool_is_builtin:
- boolean = Boolean = bool
- # to avoid breaking code which references xmlrpclib.{True,False}
- True, False = True, False
-else:
- class Boolean:
- """Boolean-value wrapper.
-
- Use True or False to generate a "boolean" XML-RPC value.
- """
-
- def __init__(self, value = 0):
- self.value = operator.truth(value)
-
- def encode(self, out):
- out.write("<value><boolean>%d</boolean></value>\n" % self.value)
-
- def __cmp__(self, other):
- if isinstance(other, Boolean):
- other = other.value
- return cmp(self.value, other)
-
- def __repr__(self):
- if self.value:
- return "<Boolean True at %x>" % id(self)
- else:
- return "<Boolean False at %x>" % id(self)
-
- def __int__(self):
- return self.value
-
- def __nonzero__(self):
- return self.value
-
- True, False = Boolean(1), Boolean(0)
-
- ##
- # Map true or false value to XML-RPC boolean values.
- #
- # @def boolean(value)
- # @param value A boolean value. Any true value is mapped to True,
- # all other values are mapped to False.
- # @return xmlrpclib.True or xmlrpclib.False.
- # @see Boolean
- # @see True
- # @see False
-
- def boolean(value, _truefalse=(False, True)):
- """Convert any Python value to XML-RPC 'boolean'."""
- return _truefalse[operator.truth(value)]
-
-##
-# Wrapper for XML-RPC DateTime values. This converts a time value to
-# the format used by XML-RPC.
-# <p>
-# The value can be given as a string in the format
-# "yyyymmddThh:mm:ss", as a 9-item time tuple (as returned by
-# time.localtime()), or an integer value (as returned by time.time()).
-# The wrapper uses time.localtime() to convert an integer to a time
-# tuple.
-#
-# @param value The time, given as an ISO 8601 string, a time
-# tuple, or a integer time value.
-
-class DateTime:
- """DateTime wrapper for an ISO 8601 string or time tuple or
- localtime integer value to generate 'dateTime.iso8601' XML-RPC
- value.
- """
-
- def __init__(self, value=0):
- if not isinstance(value, StringType):
- if datetime and isinstance(value, datetime.datetime):
- self.value = value.strftime("%Y%m%dT%H:%M:%S")
- return
- if datetime and isinstance(value, datetime.date):
- self.value = value.strftime("%Y%m%dT%H:%M:%S")
- return
- if datetime and isinstance(value, datetime.time):
- today = datetime.datetime.now().strftime("%Y%m%d")
- self.value = value.strftime(today+"T%H:%M:%S")
- return
- if not isinstance(value, (TupleType, time.struct_time)):
- if value == 0:
- value = time.time()
- value = time.localtime(value)
- value = time.strftime("%Y%m%dT%H:%M:%S", value)
- self.value = value
-
- def __cmp__(self, other):
- if isinstance(other, DateTime):
- other = other.value
- return cmp(self.value, other)
-
- ##
- # Get date/time value.
- #
- # @return Date/time value, as an ISO 8601 string.
-
- def __str__(self):
- return self.value
-
- def __repr__(self):
- return "<DateTime %s at %x>" % (repr(self.value), id(self))
-
- def decode(self, data):
- data = str(data)
- self.value = string.strip(data)
-
- def encode(self, out):
- out.write("<value><dateTime.iso8601>")
- out.write(self.value)
- out.write("</dateTime.iso8601></value>\n")
-
-def _datetime(data):
- # decode xml element contents into a DateTime structure.
- value = DateTime()
- value.decode(data)
- return value
-
-def _datetime_type(data):
- t = time.strptime(data, "%Y%m%dT%H:%M:%S")
- return datetime.datetime(*tuple(t)[:6])
-
-##
-# Wrapper for binary data. This can be used to transport any kind
-# of binary data over XML-RPC, using BASE64 encoding.
-#
-# @param data An 8-bit string containing arbitrary data.
-
-import base64
-try:
- import cStringIO as StringIO
-except ImportError:
- import StringIO
-
-class Binary:
- """Wrapper for binary data."""
-
- def __init__(self, data=None):
- self.data = data
-
- ##
- # Get buffer contents.
- #
- # @return Buffer contents, as an 8-bit string.
-
- def __str__(self):
- return self.data or ""
-
- def __cmp__(self, other):
- if isinstance(other, Binary):
- other = other.data
- return cmp(self.data, other)
-
- def decode(self, data):
- self.data = base64.decodestring(data)
-
- def encode(self, out):
- out.write("<value><base64>\n")
- base64.encode(StringIO.StringIO(self.data), out)
- out.write("</base64></value>\n")
-
-def _binary(data):
- # decode xml element contents into a Binary structure
- value = Binary()
- value.decode(data)
- return value
-
-WRAPPERS = (DateTime, Binary)
-if not _bool_is_builtin:
- WRAPPERS = WRAPPERS + (Boolean,)
-
-# --------------------------------------------------------------------
-# XML parsers
-
-try:
- # optional xmlrpclib accelerator
- import _xmlrpclib
- FastParser = _xmlrpclib.Parser
- FastUnmarshaller = _xmlrpclib.Unmarshaller
-except (AttributeError, ImportError):
- FastParser = FastUnmarshaller = None
-
-try:
- import _xmlrpclib
- FastMarshaller = _xmlrpclib.Marshaller
-except (AttributeError, ImportError):
- FastMarshaller = None
-
-#
-# the SGMLOP parser is about 15x faster than Python's builtin
-# XML parser. SGMLOP sources can be downloaded from:
-#
-# http://www.pythonware.com/products/xml/sgmlop.htm
-#
-
-try:
- import sgmlop
- if not hasattr(sgmlop, "XMLParser"):
- raise ImportError
-except ImportError:
- SgmlopParser = None # sgmlop accelerator not available
-else:
- class SgmlopParser:
- def __init__(self, target):
-
- # setup callbacks
- self.finish_starttag = target.start
- self.finish_endtag = target.end
- self.handle_data = target.data
- self.handle_xml = target.xml
-
- # activate parser
- self.parser = sgmlop.XMLParser()
- self.parser.register(self)
- self.feed = self.parser.feed
- self.entity = {
- "amp": "&", "gt": ">", "lt": "<",
- "apos": "'", "quot": '"'
- }
-
- def close(self):
- try:
- self.parser.close()
- finally:
- self.parser = self.feed = None # nuke circular reference
-
- def handle_proc(self, tag, attr):
- m = re.search("encoding\s*=\s*['\"]([^\"']+)[\"']", attr)
- if m:
- self.handle_xml(m.group(1), 1)
-
- def handle_entityref(self, entity):
- # <string> entity
- try:
- self.handle_data(self.entity[entity])
- except KeyError:
- self.handle_data("&%s;" % entity)
-
-try:
- from xml.parsers import expat
- if not hasattr(expat, "ParserCreate"):
- raise ImportError
-except ImportError:
- ExpatParser = None # expat not available
-else:
- class ExpatParser:
- # fast expat parser for Python 2.0 and later. this is about
- # 50% slower than sgmlop, on roundtrip testing
- def __init__(self, target):
- self._parser = parser = expat.ParserCreate(None, None)
- self._target = target
- parser.StartElementHandler = target.start
- parser.EndElementHandler = target.end
- parser.CharacterDataHandler = target.data
- encoding = None
- if not parser.returns_unicode:
- encoding = "utf-8"
- target.xml(encoding, None)
-
- def feed(self, data):
- self._parser.Parse(data, 0)
-
- def close(self):
- self._parser.Parse("", 1) # end of data
- del self._target, self._parser # get rid of circular references
-
-class SlowParser:
- """Default XML parser (based on xmllib.XMLParser)."""
- # this is about 10 times slower than sgmlop, on roundtrip
- # testing.
- def __init__(self, target):
- import xmllib # lazy subclassing (!)
- if xmllib.XMLParser not in SlowParser.__bases__:
- SlowParser.__bases__ = (xmllib.XMLParser,)
- self.handle_xml = target.xml
- self.unknown_starttag = target.start
- self.handle_data = target.data
- self.handle_cdata = target.data
- self.unknown_endtag = target.end
- try:
- xmllib.XMLParser.__init__(self, accept_utf8=1)
- except TypeError:
- xmllib.XMLParser.__init__(self) # pre-2.0
-
-# --------------------------------------------------------------------
-# XML-RPC marshalling and unmarshalling code
-
-##
-# XML-RPC marshaller.
-#
-# @param encoding Default encoding for 8-bit strings. The default
-# value is None (interpreted as UTF-8).
-# @see dumps
-
-class Marshaller:
- """Generate an XML-RPC params chunk from a Python data structure.
-
- Create a Marshaller instance for each set of parameters, and use
- the "dumps" method to convert your data (represented as a tuple)
- to an XML-RPC params chunk. To write a fault response, pass a
- Fault instance instead. You may prefer to use the "dumps" module
- function for this purpose.
- """
-
- # by the way, if you don't understand what's going on in here,
- # that's perfectly ok.
-
- def __init__(self, encoding=None, allow_none=0):
- self.memo = {}
- self.data = None
- self.encoding = encoding
- self.allow_none = allow_none
-
- dispatch = {}
-
- def dumps(self, values):
- out = []
- write = out.append
- dump = self.__dump
- if isinstance(values, Fault):
- # fault instance
- write("<fault>\n")
- dump({'faultCode': values.faultCode,
- 'faultString': values.faultString},
- write)
- write("</fault>\n")
- else:
- # parameter block
- # FIXME: the xml-rpc specification allows us to leave out
- # the entire <params> block if there are no parameters.
- # however, changing this may break older code (including
- # old versions of xmlrpclib.py), so this is better left as
- # is for now. See @XMLRPC3 for more information. /F
- write("<params>\n")
- for v in values:
- write("<param>\n")
- dump(v, write)
- write("</param>\n")
- write("</params>\n")
- result = string.join(out, "")
- return result
-
- def __dump(self, value, write):
- try:
- f = self.dispatch[type(value)]
- except KeyError:
- raise TypeError, "cannot marshal %s objects" % type(value)
- else:
- f(self, value, write)
-
- def dump_nil (self, value, write):
- if not self.allow_none:
- raise TypeError, "cannot marshal None unless allow_none is enabled"
- write("<value><nil/></value>")
- dispatch[NoneType] = dump_nil
-
- def dump_int(self, value, write):
- # in case ints are > 32 bits
- if value > MAXINT or value < MININT:
- raise OverflowError, "int exceeds XML-RPC limits"
- write("<value><int>")
- write(str(value))
- write("</int></value>\n")
- dispatch[IntType] = dump_int
-
- if _bool_is_builtin:
- def dump_bool(self, value, write):
- write("<value><boolean>")
- write(value and "1" or "0")
- write("</boolean></value>\n")
- dispatch[bool] = dump_bool
-
- def dump_long(self, value, write):
- if value > MAXINT or value < MININT:
- raise OverflowError, "long int exceeds XML-RPC limits"
- write("<value><int>")
- write(str(int(value)))
- write("</int></value>\n")
- dispatch[LongType] = dump_long
-
- def dump_double(self, value, write):
- write("<value><double>")
- write(repr(value))
- write("</double></value>\n")
- dispatch[FloatType] = dump_double
-
- def dump_string(self, value, write, escape=escape):
- write("<value><string>")
- write(escape(value))
- write("</string></value>\n")
- dispatch[StringType] = dump_string
-
- if unicode:
- def dump_unicode(self, value, write, escape=escape):
- value = value.encode(self.encoding)
- write("<value><string>")
- write(escape(value))
- write("</string></value>\n")
- dispatch[UnicodeType] = dump_unicode
-
- def dump_array(self, value, write):
- i = id(value)
- if self.memo.has_key(i):
- raise TypeError, "cannot marshal recursive sequences"
- self.memo[i] = None
- dump = self.__dump
- write("<value><array><data>\n")
- for v in value:
- dump(v, write)
- write("</data></array></value>\n")
- del self.memo[i]
- dispatch[TupleType] = dump_array
- dispatch[ListType] = dump_array
-
- def dump_struct(self, value, write, escape=escape):
- i = id(value)
- if self.memo.has_key(i):
- raise TypeError, "cannot marshal recursive dictionaries"
- self.memo[i] = None
- dump = self.__dump
- write("<value><struct>\n")
- for k, v in value.items():
- write("<member>\n")
- if type(k) is not StringType:
- if unicode and type(k) is UnicodeType:
- k = k.encode(self.encoding)
- else:
- raise TypeError, "dictionary key must be string"
- write("<name>%s</name>\n" % escape(k))
- dump(v, write)
- write("</member>\n")
- write("</struct></value>\n")
- del self.memo[i]
- dispatch[DictType] = dump_struct
-
- if datetime:
- def dump_datetime(self, value, write):
- write("<value><dateTime.iso8601>")
- write(value.strftime("%Y%m%dT%H:%M:%S"))
- write("</dateTime.iso8601></value>\n")
- dispatch[datetime.datetime] = dump_datetime
-
- def dump_date(self, value, write):
- write("<value><dateTime.iso8601>")
- write(value.strftime("%Y%m%dT00:00:00"))
- write("</dateTime.iso8601></value>\n")
- dispatch[datetime.date] = dump_date
-
- def dump_time(self, value, write):
- write("<value><dateTime.iso8601>")
- write(datetime.datetime.now().date().strftime("%Y%m%dT"))
- write(value.strftime("%H:%M:%S"))
- write("</dateTime.iso8601></value>\n")
- dispatch[datetime.time] = dump_time
-
- def dump_instance(self, value, write):
- # check for special wrappers
- if value.__class__ in WRAPPERS:
- self.write = write
- value.encode(self)
- del self.write
- else:
- # store instance attributes as a struct (really?)
- self.dump_struct(value.__dict__, write)
- dispatch[InstanceType] = dump_instance
-
-##
-# XML-RPC unmarshaller.
-#
-# @see loads
-
-class Unmarshaller:
- """Unmarshal an XML-RPC response, based on incoming XML event
- messages (start, data, end). Call close() to get the resulting
- data structure.
-
- Note that this reader is fairly tolerant, and gladly accepts bogus
- XML-RPC data without complaining (but not bogus XML).
- """
-
- # and again, if you don't understand what's going on in here,
- # that's perfectly ok.
-
- def __init__(self, use_datetime=0):
- self._type = None
- self._stack = []
- self._marks = []
- self._data = []
- self._methodname = None
- self._encoding = "utf-8"
- self.append = self._stack.append
- self._use_datetime = use_datetime
- if use_datetime and not datetime:
- raise ValueError, "the datetime module is not available"
-
- def close(self):
- # return response tuple and target method
- if self._type is None or self._marks:
- raise ResponseError()
- if self._type == "fault":
- raise Fault(**self._stack[0])
- return tuple(self._stack)
-
- def getmethodname(self):
- return self._methodname
-
- #
- # event handlers
-
- def xml(self, encoding, standalone):
- self._encoding = encoding
- # FIXME: assert standalone == 1 ???
-
- def start(self, tag, attrs):
- # prepare to handle this element
- if tag == "array" or tag == "struct":
- self._marks.append(len(self._stack))
- self._data = []
- self._value = (tag == "value")
-
- def data(self, text):
- self._data.append(text)
-
- def end(self, tag, join=string.join):
- # call the appropriate end tag handler
- try:
- f = self.dispatch[tag]
- except KeyError:
- pass # unknown tag ?
- else:
- return f(self, join(self._data, ""))
-
- #
- # accelerator support
-
- def end_dispatch(self, tag, data):
- # dispatch data
- try:
- f = self.dispatch[tag]
- except KeyError:
- pass # unknown tag ?
- else:
- return f(self, data)
-
- #
- # element decoders
-
- dispatch = {}
-
- def end_nil (self, data):
- self.append(None)
- self._value = 0
- dispatch["nil"] = end_nil
-
- def end_boolean(self, data):
- if data == "0":
- self.append(False)
- elif data == "1":
- self.append(True)
- else:
- raise TypeError, "bad boolean value"
- self._value = 0
- dispatch["boolean"] = end_boolean
-
- def end_int(self, data):
- self.append(int(data))
- self._value = 0
- dispatch["i4"] = end_int
- dispatch["int"] = end_int
-
- def end_double(self, data):
- self.append(float(data))
- self._value = 0
- dispatch["double"] = end_double
-
- def end_string(self, data):
- if self._encoding:
- data = _decode(data, self._encoding)
- self.append(_stringify(data))
- self._value = 0
- dispatch["string"] = end_string
- dispatch["name"] = end_string # struct keys are always strings
-
- def end_array(self, data):
- mark = self._marks.pop()
- # map arrays to Python lists
- self._stack[mark:] = [self._stack[mark:]]
- self._value = 0
- dispatch["array"] = end_array
-
- def end_struct(self, data):
- mark = self._marks.pop()
- # map structs to Python dictionaries
- dict = {}
- items = self._stack[mark:]
- for i in range(0, len(items), 2):
- dict[_stringify(items[i])] = items[i+1]
- self._stack[mark:] = [dict]
- self._value = 0
- dispatch["struct"] = end_struct
-
- def end_base64(self, data):
- value = Binary()
- value.decode(data)
- self.append(value)
- self._value = 0
- dispatch["base64"] = end_base64
-
- def end_dateTime(self, data):
- value = DateTime()
- value.decode(data)
- if self._use_datetime:
- value = _datetime_type(data)
- self.append(value)
- dispatch["dateTime.iso8601"] = end_dateTime
-
- def end_value(self, data):
- # if we stumble upon a value element with no internal
- # elements, treat it as a string element
- if self._value:
- self.end_string(data)
- dispatch["value"] = end_value
-
- def end_params(self, data):
- self._type = "params"
- dispatch["params"] = end_params
-
- def end_fault(self, data):
- self._type = "fault"
- dispatch["fault"] = end_fault
-
- def end_methodName(self, data):
- if self._encoding:
- data = _decode(data, self._encoding)
- self._methodname = data
- self._type = "methodName" # no params
- dispatch["methodName"] = end_methodName
-
-## Multicall support
-#
-
-class _MultiCallMethod:
- # some lesser magic to store calls made to a MultiCall object
- # for batch execution
- def __init__(self, call_list, name):
- self.__call_list = call_list
- self.__name = name
- def __getattr__(self, name):
- return _MultiCallMethod(self.__call_list, "%s.%s" % (self.__name, name))
- def __call__(self, *args):
- self.__call_list.append((self.__name, args))
-
-class MultiCallIterator:
- """Iterates over the results of a multicall. Exceptions are
- thrown in response to xmlrpc faults."""
-
- def __init__(self, results):
- self.results = results
-
- def __getitem__(self, i):
- item = self.results[i]
- if type(item) == type({}):
- raise Fault(item['faultCode'], item['faultString'])
- elif type(item) == type([]):
- return item[0]
- else:
- raise ValueError,\
- "unexpected type in multicall result"
-
-class MultiCall:
- """server -> a object used to boxcar method calls
-
- server should be a ServerProxy object.
-
- Methods can be added to the MultiCall using normal
- method call syntax e.g.:
-
- multicall = MultiCall(server_proxy)
- multicall.add(2,3)
- multicall.get_address("Guido")
-
- To execute the multicall, call the MultiCall object e.g.:
-
- add_result, address = multicall()
- """
-
- def __init__(self, server):
- self.__server = server
- self.__call_list = []
-
- def __repr__(self):
- return "<MultiCall at %x>" % id(self)
-
- __str__ = __repr__
-
- def __getattr__(self, name):
- return _MultiCallMethod(self.__call_list, name)
-
- def __call__(self):
- marshalled_list = []
- for name, args in self.__call_list:
- marshalled_list.append({'methodName' : name, 'params' : args})
-
- return MultiCallIterator(self.__server.system.multicall(marshalled_list))
-
-# --------------------------------------------------------------------
-# convenience functions
-
-##
-# Create a parser object, and connect it to an unmarshalling instance.
-# This function picks the fastest available XML parser.
-#
-# return A (parser, unmarshaller) tuple.
-
-def getparser(use_datetime=0):
- """getparser() -> parser, unmarshaller
-
- Create an instance of the fastest available parser, and attach it
- to an unmarshalling object. Return both objects.
- """
- if use_datetime and not datetime:
- raise ValueError, "the datetime module is not available"
- if FastParser and FastUnmarshaller:
- if use_datetime:
- mkdatetime = _datetime_type
- else:
- mkdatetime = _datetime
- target = FastUnmarshaller(True, False, _binary, mkdatetime, Fault)
- parser = FastParser(target)
- else:
- target = Unmarshaller(use_datetime=use_datetime)
- if FastParser:
- parser = FastParser(target)
- elif SgmlopParser:
- parser = SgmlopParser(target)
- elif ExpatParser:
- parser = ExpatParser(target)
- else:
- parser = SlowParser(target)
- return parser, target
-
-##
-# Convert a Python tuple or a Fault instance to an XML-RPC packet.
-#
-# @def dumps(params, **options)
-# @param params A tuple or Fault instance.
-# @keyparam methodname If given, create a methodCall request for
-# this method name.
-# @keyparam methodresponse If given, create a methodResponse packet.
-# If used with a tuple, the tuple must be a singleton (that is,
-# it must contain exactly one element).
-# @keyparam encoding The packet encoding.
-# @return A string containing marshalled data.
-
-def dumps(params, methodname=None, methodresponse=None, encoding=None,
- allow_none=0):
- """data [,options] -> marshalled data
-
- Convert an argument tuple or a Fault instance to an XML-RPC
- request (or response, if the methodresponse option is used).
-
- In addition to the data object, the following options can be given
- as keyword arguments:
-
- methodname: the method name for a methodCall packet
-
- methodresponse: true to create a methodResponse packet.
- If this option is used with a tuple, the tuple must be
- a singleton (i.e. it can contain only one element).
-
- encoding: the packet encoding (default is UTF-8)
-
- All 8-bit strings in the data structure are assumed to use the
- packet encoding. Unicode strings are automatically converted,
- where necessary.
- """
-
- assert isinstance(params, TupleType) or isinstance(params, Fault),\
- "argument must be tuple or Fault instance"
-
- if isinstance(params, Fault):
- methodresponse = 1
- elif methodresponse and isinstance(params, TupleType):
- assert len(params) == 1, "response tuple must be a singleton"
-
- if not encoding:
- encoding = "utf-8"
-
- if FastMarshaller:
- m = FastMarshaller(encoding)
- else:
- m = Marshaller(encoding, allow_none)
-
- data = m.dumps(params)
-
- if encoding != "utf-8":
- xmlheader = "<?xml version='1.0' encoding='%s'?>\n" % str(encoding)
- else:
- xmlheader = "<?xml version='1.0'?>\n" # utf-8 is default
-
- # standard XML-RPC wrappings
- if methodname:
- # a method call
- if not isinstance(methodname, StringType):
- methodname = methodname.encode(encoding)
- data = (
- xmlheader,
- "<methodCall>\n"
- "<methodName>", methodname, "</methodName>\n",
- data,
- "</methodCall>\n"
- )
- elif methodresponse:
- # a method response, or a fault structure
- data = (
- xmlheader,
- "<methodResponse>\n",
- data,
- "</methodResponse>\n"
- )
- else:
- return data # return as is
- return string.join(data, "")
-
-##
-# Convert an XML-RPC packet to a Python object. If the XML-RPC packet
-# represents a fault condition, this function raises a Fault exception.
-#
-# @param data An XML-RPC packet, given as an 8-bit string.
-# @return A tuple containing the unpacked data, and the method name
-# (None if not present).
-# @see Fault
-
-def loads(data, use_datetime=0):
- """data -> unmarshalled data, method name
-
- Convert an XML-RPC packet to unmarshalled data plus a method
- name (None if not present).
-
- If the XML-RPC packet represents a fault condition, this function
- raises a Fault exception.
- """
- p, u = getparser(use_datetime=use_datetime)
- p.feed(data)
- p.close()
- return u.close(), u.getmethodname()
-
-
-# --------------------------------------------------------------------
-# request dispatcher
-
-class _Method:
- # some magic to bind an XML-RPC method to an RPC server.
- # supports "nested" methods (e.g. examples.getStateName)
- def __init__(self, send, name):
- self.__send = send
- self.__name = name
- def __getattr__(self, name):
- return _Method(self.__send, "%s.%s" % (self.__name, name))
- def __call__(self, *args):
- return self.__send(self.__name, args)
-
-##
-# Standard transport class for XML-RPC over HTTP.
-# <p>
-# You can create custom transports by subclassing this method, and
-# overriding selected methods.
-
-class Transport:
- """Handles an HTTP transaction to an XML-RPC server."""
-
- # client identifier (may be overridden)
- user_agent = "xmlrpclib.py/%s (by www.pythonware.com)" % __version__
-
- def __init__(self, use_datetime=0):
- self._use_datetime = use_datetime
-
- ##
- # Send a complete request, and parse the response.
- #
- # @param host Target host.
- # @param handler Target PRC handler.
- # @param request_body XML-RPC request body.
- # @param verbose Debugging flag.
- # @return Parsed response.
-
- def request(self, host, handler, request_body, verbose=0):
- # issue XML-RPC request
-
- h = self.make_connection(host)
- if verbose:
- h.set_debuglevel(1)
-
- self.send_request(h, handler, request_body)
- self.send_host(h, host)
- self.send_user_agent(h)
- self.send_content(h, request_body)
-
- errcode, errmsg, headers = h.getreply()
-
- if errcode != 200:
- raise ProtocolError(
- host + handler,
- errcode, errmsg,
- headers
- )
-
- self.verbose = verbose
-
- try:
- sock = h._conn.sock
- except AttributeError:
- sock = None
-
- return self._parse_response(h.getfile(), sock)
-
- ##
- # Create parser.
- #
- # @return A 2-tuple containing a parser and a unmarshaller.
-
- def getparser(self):
- # get parser and unmarshaller
- return getparser(use_datetime=self._use_datetime)
-
- ##
- # Get authorization info from host parameter
- # Host may be a string, or a (host, x509-dict) tuple; if a string,
- # it is checked for a "user:pw@host" format, and a "Basic
- # Authentication" header is added if appropriate.
- #
- # @param host Host descriptor (URL or (URL, x509 info) tuple).
- # @return A 3-tuple containing (actual host, extra headers,
- # x509 info). The header and x509 fields may be None.
-
- def get_host_info(self, host):
-
- x509 = {}
- if isinstance(host, TupleType):
- host, x509 = host
-
- import urllib
- auth, host = urllib.splituser(host)
-
- if auth:
- import base64
- auth = base64.encodestring(urllib.unquote(auth))
- auth = string.join(string.split(auth), "") # get rid of whitespace
- extra_headers = [
- ("Authorization", "Basic " + auth)
- ]
- else:
- extra_headers = None
-
- return host, extra_headers, x509
-
- ##
- # Connect to server.
- #
- # @param host Target host.
- # @return A connection handle.
-
- def make_connection(self, host):
- # create a HTTP connection object from a host descriptor
- import httplib
- host, extra_headers, x509 = self.get_host_info(host)
- return httplib.HTTP(host)
-
- ##
- # Send request header.
- #
- # @param connection Connection handle.
- # @param handler Target RPC handler.
- # @param request_body XML-RPC body.
-
- def send_request(self, connection, handler, request_body):
- connection.putrequest("POST", handler)
-
- ##
- # Send host name.
- #
- # @param connection Connection handle.
- # @param host Host name.
-
- def send_host(self, connection, host):
- host, extra_headers, x509 = self.get_host_info(host)
- connection.putheader("Host", host)
- if extra_headers:
- if isinstance(extra_headers, DictType):
- extra_headers = extra_headers.items()
- for key, value in extra_headers:
- connection.putheader(key, value)
-
- ##
- # Send user-agent identifier.
- #
- # @param connection Connection handle.
-
- def send_user_agent(self, connection):
- connection.putheader("User-Agent", self.user_agent)
-
- ##
- # Send request body.
- #
- # @param connection Connection handle.
- # @param request_body XML-RPC request body.
-
- def send_content(self, connection, request_body):
- connection.putheader("Content-Type", "text/xml")
- connection.putheader("Content-Length", str(len(request_body)))
- connection.endheaders()
- if request_body:
- connection.send(request_body)
-
- ##
- # Parse response.
- #
- # @param file Stream.
- # @return Response tuple and target method.
-
- def parse_response(self, file):
- # compatibility interface
- return self._parse_response(file, None)
-
- ##
- # Parse response (alternate interface). This is similar to the
- # parse_response method, but also provides direct access to the
- # underlying socket object (where available).
- #
- # @param file Stream.
- # @param sock Socket handle (or None, if the socket object
- # could not be accessed).
- # @return Response tuple and target method.
-
- def _parse_response(self, file, sock):
- # read response from input file/socket, and parse it
-
- p, u = self.getparser()
-
- while 1:
- if sock:
- response = sock.recv(1024)
- else:
- response = file.read(1024)
- if not response:
- break
- if self.verbose:
- print "body:", repr(response)
- p.feed(response)
-
- file.close()
- p.close()
-
- return u.close()
-
-##
-# Standard transport class for XML-RPC over HTTPS.
-
-class SafeTransport(Transport):
- """Handles an HTTPS transaction to an XML-RPC server."""
-
- # FIXME: mostly untested
-
- def make_connection(self, host):
- # create a HTTPS connection object from a host descriptor
- # host may be a string, or a (host, x509-dict) tuple
- import httplib
- host, extra_headers, x509 = self.get_host_info(host)
- try:
- HTTPS = httplib.HTTPS
- except AttributeError:
- raise NotImplementedError(
- "your version of httplib doesn't support HTTPS"
- )
- else:
- return HTTPS(host, None, **(x509 or {}))
-
-##
-# Standard server proxy. This class establishes a virtual connection
-# to an XML-RPC server.
-# <p>
-# This class is available as ServerProxy and Server. New code should
-# use ServerProxy, to avoid confusion.
-#
-# @def ServerProxy(uri, **options)
-# @param uri The connection point on the server.
-# @keyparam transport A transport factory, compatible with the
-# standard transport class.
-# @keyparam encoding The default encoding used for 8-bit strings
-# (default is UTF-8).
-# @keyparam verbose Use a true value to enable debugging output.
-# (printed to standard output).
-# @see Transport
-
-class ServerProxy:
- """uri [,options] -> a logical connection to an XML-RPC server
-
- uri is the connection point on the server, given as
- scheme://host/target.
-
- The standard implementation always supports the "http" scheme. If
- SSL socket support is available (Python 2.0), it also supports
- "https".
-
- If the target part and the slash preceding it are both omitted,
- "/RPC2" is assumed.
-
- The following options can be given as keyword arguments:
-
- transport: a transport factory
- encoding: the request encoding (default is UTF-8)
-
- All 8-bit strings passed to the server proxy are assumed to use
- the given encoding.
- """
-
- def __init__(self, uri, transport=None, encoding=None, verbose=0,
- allow_none=0, use_datetime=0):
- # establish a "logical" server connection
-
- # get the url
- import urllib
- type, uri = urllib.splittype(uri)
- if type not in ("http", "https"):
- raise IOError, "unsupported XML-RPC protocol"
- self.__host, self.__handler = urllib.splithost(uri)
- if not self.__handler:
- self.__handler = "/RPC2"
-
- if transport is None:
- if type == "https":
- transport = SafeTransport(use_datetime=use_datetime)
- else:
- transport = Transport(use_datetime=use_datetime)
- self.__transport = transport
-
- self.__encoding = encoding
- self.__verbose = verbose
- self.__allow_none = allow_none
-
- def __request(self, methodname, params):
- # call a method on the remote server
-
- request = dumps(params, methodname, encoding=self.__encoding,
- allow_none=self.__allow_none)
-
- response = self.__transport.request(
- self.__host,
- self.__handler,
- request,
- verbose=self.__verbose
- )
-
- if len(response) == 1:
- response = response[0]
-
- return response
-
- def __repr__(self):
- return (
- "<ServerProxy for %s%s>" %
- (self.__host, self.__handler)
- )
-
- __str__ = __repr__
-
- def __getattr__(self, name):
- # magic method dispatcher
- return _Method(self.__request, name)
-
- # note: to call a remote object with an non-standard name, use
- # result getattr(server, "strange-python-name")(args)
-
-# compatibility
-
-Server = ServerProxy
-
-# --------------------------------------------------------------------
-# test code
-
-if __name__ == "__main__":
-
- # simple test program (from the XML-RPC specification)
-
- # server = ServerProxy("http://localhost:8000") # local server
- server = ServerProxy("http://time.xmlrpc.com/RPC2")
-
- print server
-
- try:
- print server.currentTime.getCurrentTime()
- except Error, v:
- print "ERROR", v
-
- multi = MultiCall(server)
- multi.currentTime.getCurrentTime()
- multi.currentTime.getCurrentTime()
- try:
- for response in multi():
- print response
- except Error, v:
- print "ERROR", v
diff --git a/sys/lib/python/zipfile.py b/sys/lib/python/zipfile.py
deleted file mode 100644
index 5c3fff3e7..000000000
--- a/sys/lib/python/zipfile.py
+++ /dev/null
@@ -1,900 +0,0 @@
-"""
-Read and write ZIP files.
-"""
-import struct, os, time, sys
-import binascii, cStringIO
-
-try:
- import zlib # We may need its compression method
-except ImportError:
- zlib = None
-
-__all__ = ["BadZipfile", "error", "ZIP_STORED", "ZIP_DEFLATED", "is_zipfile",
- "ZipInfo", "ZipFile", "PyZipFile", "LargeZipFile" ]
-
-class BadZipfile(Exception):
- pass
-
-
-class LargeZipFile(Exception):
- """
- Raised when writing a zipfile, the zipfile requires ZIP64 extensions
- and those extensions are disabled.
- """
-
-error = BadZipfile # The exception raised by this module
-
-ZIP64_LIMIT= (1 << 31) - 1
-
-# constants for Zip file compression methods
-ZIP_STORED = 0
-ZIP_DEFLATED = 8
-# Other ZIP compression methods not supported
-
-# Here are some struct module formats for reading headers
-structEndArchive = "<4s4H2lH" # 9 items, end of archive, 22 bytes
-stringEndArchive = "PK\005\006" # magic number for end of archive record
-structCentralDir = "<4s4B4HlLL5HLl"# 19 items, central directory, 46 bytes
-stringCentralDir = "PK\001\002" # magic number for central directory
-structFileHeader = "<4s2B4HlLL2H" # 12 items, file header record, 30 bytes
-stringFileHeader = "PK\003\004" # magic number for file header
-structEndArchive64Locator = "<4slql" # 4 items, locate Zip64 header, 20 bytes
-stringEndArchive64Locator = "PK\x06\x07" # magic token for locator header
-structEndArchive64 = "<4sqhhllqqqq" # 10 items, end of archive (Zip64), 56 bytes
-stringEndArchive64 = "PK\x06\x06" # magic token for Zip64 header
-
-
-# indexes of entries in the central directory structure
-_CD_SIGNATURE = 0
-_CD_CREATE_VERSION = 1
-_CD_CREATE_SYSTEM = 2
-_CD_EXTRACT_VERSION = 3
-_CD_EXTRACT_SYSTEM = 4 # is this meaningful?
-_CD_FLAG_BITS = 5
-_CD_COMPRESS_TYPE = 6
-_CD_TIME = 7
-_CD_DATE = 8
-_CD_CRC = 9
-_CD_COMPRESSED_SIZE = 10
-_CD_UNCOMPRESSED_SIZE = 11
-_CD_FILENAME_LENGTH = 12
-_CD_EXTRA_FIELD_LENGTH = 13
-_CD_COMMENT_LENGTH = 14
-_CD_DISK_NUMBER_START = 15
-_CD_INTERNAL_FILE_ATTRIBUTES = 16
-_CD_EXTERNAL_FILE_ATTRIBUTES = 17
-_CD_LOCAL_HEADER_OFFSET = 18
-
-# indexes of entries in the local file header structure
-_FH_SIGNATURE = 0
-_FH_EXTRACT_VERSION = 1
-_FH_EXTRACT_SYSTEM = 2 # is this meaningful?
-_FH_GENERAL_PURPOSE_FLAG_BITS = 3
-_FH_COMPRESSION_METHOD = 4
-_FH_LAST_MOD_TIME = 5
-_FH_LAST_MOD_DATE = 6
-_FH_CRC = 7
-_FH_COMPRESSED_SIZE = 8
-_FH_UNCOMPRESSED_SIZE = 9
-_FH_FILENAME_LENGTH = 10
-_FH_EXTRA_FIELD_LENGTH = 11
-
-def is_zipfile(filename):
- """Quickly see if file is a ZIP file by checking the magic number."""
- try:
- fpin = open(filename, "rb")
- endrec = _EndRecData(fpin)
- fpin.close()
- if endrec:
- return True # file has correct magic number
- except IOError:
- pass
- return False
-
-def _EndRecData64(fpin, offset, endrec):
- """
- Read the ZIP64 end-of-archive records and use that to update endrec
- """
- locatorSize = struct.calcsize(structEndArchive64Locator)
- fpin.seek(offset - locatorSize, 2)
- data = fpin.read(locatorSize)
- sig, diskno, reloff, disks = struct.unpack(structEndArchive64Locator, data)
- if sig != stringEndArchive64Locator:
- return endrec
-
- if diskno != 0 or disks != 1:
- raise BadZipfile("zipfiles that span multiple disks are not supported")
-
- # Assume no 'zip64 extensible data'
- endArchiveSize = struct.calcsize(structEndArchive64)
- fpin.seek(offset - locatorSize - endArchiveSize, 2)
- data = fpin.read(endArchiveSize)
- sig, sz, create_version, read_version, disk_num, disk_dir, \
- dircount, dircount2, dirsize, diroffset = \
- struct.unpack(structEndArchive64, data)
- if sig != stringEndArchive64:
- return endrec
-
- # Update the original endrec using data from the ZIP64 record
- endrec[1] = disk_num
- endrec[2] = disk_dir
- endrec[3] = dircount
- endrec[4] = dircount2
- endrec[5] = dirsize
- endrec[6] = diroffset
- return endrec
-
-
-def _EndRecData(fpin):
- """Return data from the "End of Central Directory" record, or None.
-
- The data is a list of the nine items in the ZIP "End of central dir"
- record followed by a tenth item, the file seek offset of this record."""
- fpin.seek(-22, 2) # Assume no archive comment.
- filesize = fpin.tell() + 22 # Get file size
- data = fpin.read()
- if data[0:4] == stringEndArchive and data[-2:] == "\000\000":
- endrec = struct.unpack(structEndArchive, data)
- endrec = list(endrec)
- endrec.append("") # Append the archive comment
- endrec.append(filesize - 22) # Append the record start offset
- if endrec[-4] == -1 or endrec[-4] == 0xffffffff:
- return _EndRecData64(fpin, -22, endrec)
- return endrec
- # Search the last END_BLOCK bytes of the file for the record signature.
- # The comment is appended to the ZIP file and has a 16 bit length.
- # So the comment may be up to 64K long. We limit the search for the
- # signature to a few Kbytes at the end of the file for efficiency.
- # also, the signature must not appear in the comment.
- END_BLOCK = min(filesize, 1024 * 4)
- fpin.seek(filesize - END_BLOCK, 0)
- data = fpin.read()
- start = data.rfind(stringEndArchive)
- if start >= 0: # Correct signature string was found
- endrec = struct.unpack(structEndArchive, data[start:start+22])
- endrec = list(endrec)
- comment = data[start+22:]
- if endrec[7] == len(comment): # Comment length checks out
- # Append the archive comment and start offset
- endrec.append(comment)
- endrec.append(filesize - END_BLOCK + start)
- if endrec[-4] == -1 or endrec[-4] == 0xffffffff:
- return _EndRecData64(fpin, - END_BLOCK + start, endrec)
- return endrec
- return # Error, return None
-
-
-class ZipInfo (object):
- """Class with attributes describing each file in the ZIP archive."""
-
- __slots__ = (
- 'orig_filename',
- 'filename',
- 'date_time',
- 'compress_type',
- 'comment',
- 'extra',
- 'create_system',
- 'create_version',
- 'extract_version',
- 'reserved',
- 'flag_bits',
- 'volume',
- 'internal_attr',
- 'external_attr',
- 'header_offset',
- 'CRC',
- 'compress_size',
- 'file_size',
- )
-
- def __init__(self, filename="NoName", date_time=(1980,1,1,0,0,0)):
- self.orig_filename = filename # Original file name in archive
-
- # Terminate the file name at the first null byte. Null bytes in file
- # names are used as tricks by viruses in archives.
- null_byte = filename.find(chr(0))
- if null_byte >= 0:
- filename = filename[0:null_byte]
- # This is used to ensure paths in generated ZIP files always use
- # forward slashes as the directory separator, as required by the
- # ZIP format specification.
- if os.sep != "/" and os.sep in filename:
- filename = filename.replace(os.sep, "/")
-
- self.filename = filename # Normalized file name
- self.date_time = date_time # year, month, day, hour, min, sec
- # Standard values:
- self.compress_type = ZIP_STORED # Type of compression for the file
- self.comment = "" # Comment for each file
- self.extra = "" # ZIP extra data
- if sys.platform == 'win32':
- self.create_system = 0 # System which created ZIP archive
- else:
- # Assume everything else is unix-y
- self.create_system = 3 # System which created ZIP archive
- self.create_version = 20 # Version which created ZIP archive
- self.extract_version = 20 # Version needed to extract archive
- self.reserved = 0 # Must be zero
- self.flag_bits = 0 # ZIP flag bits
- self.volume = 0 # Volume number of file header
- self.internal_attr = 0 # Internal attributes
- self.external_attr = 0 # External file attributes
- # Other attributes are set by class ZipFile:
- # header_offset Byte offset to the file header
- # CRC CRC-32 of the uncompressed file
- # compress_size Size of the compressed file
- # file_size Size of the uncompressed file
-
- def FileHeader(self):
- """Return the per-file header as a string."""
- dt = self.date_time
- dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2]
- dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2)
- if self.flag_bits & 0x08:
- # Set these to zero because we write them after the file data
- CRC = compress_size = file_size = 0
- else:
- CRC = self.CRC
- compress_size = self.compress_size
- file_size = self.file_size
-
- extra = self.extra
-
- if file_size > ZIP64_LIMIT or compress_size > ZIP64_LIMIT:
- # File is larger than what fits into a 4 byte integer,
- # fall back to the ZIP64 extension
- fmt = '<hhqq'
- extra = extra + struct.pack(fmt,
- 1, struct.calcsize(fmt)-4, file_size, compress_size)
- file_size = 0xffffffff # -1
- compress_size = 0xffffffff # -1
- self.extract_version = max(45, self.extract_version)
- self.create_version = max(45, self.extract_version)
-
- header = struct.pack(structFileHeader, stringFileHeader,
- self.extract_version, self.reserved, self.flag_bits,
- self.compress_type, dostime, dosdate, CRC,
- compress_size, file_size,
- len(self.filename), len(extra))
- return header + self.filename + extra
-
- def _decodeExtra(self):
- # Try to decode the extra field.
- extra = self.extra
- unpack = struct.unpack
- while extra:
- tp, ln = unpack('<hh', extra[:4])
- if tp == 1:
- if ln >= 24:
- counts = unpack('<qqq', extra[4:28])
- elif ln == 16:
- counts = unpack('<qq', extra[4:20])
- elif ln == 8:
- counts = unpack('<q', extra[4:12])
- elif ln == 0:
- counts = ()
- else:
- raise RuntimeError, "Corrupt extra field %s"%(ln,)
-
- idx = 0
-
- # ZIP64 extension (large files and/or large archives)
- if self.file_size == -1 or self.file_size == 0xFFFFFFFFL:
- self.file_size = counts[idx]
- idx += 1
-
- if self.compress_size == -1 or self.compress_size == 0xFFFFFFFFL:
- self.compress_size = counts[idx]
- idx += 1
-
- if self.header_offset == -1 or self.header_offset == 0xffffffffL:
- old = self.header_offset
- self.header_offset = counts[idx]
- idx+=1
-
- extra = extra[ln+4:]
-
-
-class ZipFile:
- """ Class with methods to open, read, write, close, list zip files.
-
- z = ZipFile(file, mode="r", compression=ZIP_STORED, allowZip64=True)
-
- file: Either the path to the file, or a file-like object.
- If it is a path, the file will be opened and closed by ZipFile.
- mode: The mode can be either read "r", write "w" or append "a".
- compression: ZIP_STORED (no compression) or ZIP_DEFLATED (requires zlib).
- allowZip64: if True ZipFile will create files with ZIP64 extensions when
- needed, otherwise it will raise an exception when this would
- be necessary.
-
- """
-
- fp = None # Set here since __del__ checks it
-
- def __init__(self, file, mode="r", compression=ZIP_STORED, allowZip64=False):
- """Open the ZIP file with mode read "r", write "w" or append "a"."""
- self._allowZip64 = allowZip64
- self._didModify = False
- if compression == ZIP_STORED:
- pass
- elif compression == ZIP_DEFLATED:
- if not zlib:
- raise RuntimeError,\
- "Compression requires the (missing) zlib module"
- else:
- raise RuntimeError, "That compression method is not supported"
- self.debug = 0 # Level of printing: 0 through 3
- self.NameToInfo = {} # Find file info given name
- self.filelist = [] # List of ZipInfo instances for archive
- self.compression = compression # Method of compression
- self.mode = key = mode.replace('b', '')[0]
-
- # Check if we were passed a file-like object
- if isinstance(file, basestring):
- self._filePassed = 0
- self.filename = file
- modeDict = {'r' : 'rb', 'w': 'wb', 'a' : 'r+b'}
- self.fp = open(file, modeDict[mode])
- else:
- self._filePassed = 1
- self.fp = file
- self.filename = getattr(file, 'name', None)
-
- if key == 'r':
- self._GetContents()
- elif key == 'w':
- pass
- elif key == 'a':
- try: # See if file is a zip file
- self._RealGetContents()
- # seek to start of directory and overwrite
- self.fp.seek(self.start_dir, 0)
- except BadZipfile: # file is not a zip file, just append
- self.fp.seek(0, 2)
- else:
- if not self._filePassed:
- self.fp.close()
- self.fp = None
- raise RuntimeError, 'Mode must be "r", "w" or "a"'
-
- def _GetContents(self):
- """Read the directory, making sure we close the file if the format
- is bad."""
- try:
- self._RealGetContents()
- except BadZipfile:
- if not self._filePassed:
- self.fp.close()
- self.fp = None
- raise
-
- def _RealGetContents(self):
- """Read in the table of contents for the ZIP file."""
- fp = self.fp
- endrec = _EndRecData(fp)
- if not endrec:
- raise BadZipfile, "File is not a zip file"
- if self.debug > 1:
- print endrec
- size_cd = endrec[5] # bytes in central directory
- offset_cd = endrec[6] # offset of central directory
- self.comment = endrec[8] # archive comment
- # endrec[9] is the offset of the "End of Central Dir" record
- if endrec[9] > ZIP64_LIMIT:
- x = endrec[9] - size_cd - 56 - 20
- else:
- x = endrec[9] - size_cd
- # "concat" is zero, unless zip was concatenated to another file
- concat = x - offset_cd
- if self.debug > 2:
- print "given, inferred, offset", offset_cd, x, concat
- # self.start_dir: Position of start of central directory
- self.start_dir = offset_cd + concat
- fp.seek(self.start_dir, 0)
- data = fp.read(size_cd)
- fp = cStringIO.StringIO(data)
- total = 0
- while total < size_cd:
- centdir = fp.read(46)
- total = total + 46
- if centdir[0:4] != stringCentralDir:
- raise BadZipfile, "Bad magic number for central directory"
- centdir = struct.unpack(structCentralDir, centdir)
- if self.debug > 2:
- print centdir
- filename = fp.read(centdir[_CD_FILENAME_LENGTH])
- # Create ZipInfo instance to store file information
- x = ZipInfo(filename)
- x.extra = fp.read(centdir[_CD_EXTRA_FIELD_LENGTH])
- x.comment = fp.read(centdir[_CD_COMMENT_LENGTH])
- total = (total + centdir[_CD_FILENAME_LENGTH]
- + centdir[_CD_EXTRA_FIELD_LENGTH]
- + centdir[_CD_COMMENT_LENGTH])
- x.header_offset = centdir[_CD_LOCAL_HEADER_OFFSET]
- (x.create_version, x.create_system, x.extract_version, x.reserved,
- x.flag_bits, x.compress_type, t, d,
- x.CRC, x.compress_size, x.file_size) = centdir[1:12]
- x.volume, x.internal_attr, x.external_attr = centdir[15:18]
- # Convert date/time code to (year, month, day, hour, min, sec)
- x.date_time = ( (d>>9)+1980, (d>>5)&0xF, d&0x1F,
- t>>11, (t>>5)&0x3F, (t&0x1F) * 2 )
-
- x._decodeExtra()
- x.header_offset = x.header_offset + concat
- self.filelist.append(x)
- self.NameToInfo[x.filename] = x
- if self.debug > 2:
- print "total", total
-
-
- def namelist(self):
- """Return a list of file names in the archive."""
- l = []
- for data in self.filelist:
- l.append(data.filename)
- return l
-
- def infolist(self):
- """Return a list of class ZipInfo instances for files in the
- archive."""
- return self.filelist
-
- def printdir(self):
- """Print a table of contents for the zip file."""
- print "%-46s %19s %12s" % ("File Name", "Modified ", "Size")
- for zinfo in self.filelist:
- date = "%d-%02d-%02d %02d:%02d:%02d" % zinfo.date_time
- print "%-46s %s %12d" % (zinfo.filename, date, zinfo.file_size)
-
- def testzip(self):
- """Read all the files and check the CRC."""
- for zinfo in self.filelist:
- try:
- self.read(zinfo.filename) # Check CRC-32
- except BadZipfile:
- return zinfo.filename
-
-
- def getinfo(self, name):
- """Return the instance of ZipInfo given 'name'."""
- return self.NameToInfo[name]
-
- def read(self, name):
- """Return file bytes (as a string) for name."""
- if self.mode not in ("r", "a"):
- raise RuntimeError, 'read() requires mode "r" or "a"'
- if not self.fp:
- raise RuntimeError, \
- "Attempt to read ZIP archive that was already closed"
- zinfo = self.getinfo(name)
- filepos = self.fp.tell()
-
- self.fp.seek(zinfo.header_offset, 0)
-
- # Skip the file header:
- fheader = self.fp.read(30)
- if fheader[0:4] != stringFileHeader:
- raise BadZipfile, "Bad magic number for file header"
-
- fheader = struct.unpack(structFileHeader, fheader)
- fname = self.fp.read(fheader[_FH_FILENAME_LENGTH])
- if fheader[_FH_EXTRA_FIELD_LENGTH]:
- self.fp.read(fheader[_FH_EXTRA_FIELD_LENGTH])
-
- if fname != zinfo.orig_filename:
- raise BadZipfile, \
- 'File name in directory "%s" and header "%s" differ.' % (
- zinfo.orig_filename, fname)
-
- bytes = self.fp.read(zinfo.compress_size)
- self.fp.seek(filepos, 0)
- if zinfo.compress_type == ZIP_STORED:
- pass
- elif zinfo.compress_type == ZIP_DEFLATED:
- if not zlib:
- raise RuntimeError, \
- "De-compression requires the (missing) zlib module"
- # zlib compress/decompress code by Jeremy Hylton of CNRI
- dc = zlib.decompressobj(-15)
- bytes = dc.decompress(bytes)
- # need to feed in unused pad byte so that zlib won't choke
- ex = dc.decompress('Z') + dc.flush()
- if ex:
- bytes = bytes + ex
- else:
- raise BadZipfile, \
- "Unsupported compression method %d for file %s" % \
- (zinfo.compress_type, name)
- crc = binascii.crc32(bytes)
- if crc != zinfo.CRC:
- raise BadZipfile, "Bad CRC-32 for file %s" % name
- return bytes
-
- def _writecheck(self, zinfo):
- """Check for errors before writing a file to the archive."""
- if zinfo.filename in self.NameToInfo:
- if self.debug: # Warning for duplicate names
- print "Duplicate name:", zinfo.filename
- if self.mode not in ("w", "a"):
- raise RuntimeError, 'write() requires mode "w" or "a"'
- if not self.fp:
- raise RuntimeError, \
- "Attempt to write ZIP archive that was already closed"
- if zinfo.compress_type == ZIP_DEFLATED and not zlib:
- raise RuntimeError, \
- "Compression requires the (missing) zlib module"
- if zinfo.compress_type not in (ZIP_STORED, ZIP_DEFLATED):
- raise RuntimeError, \
- "That compression method is not supported"
- if zinfo.file_size > ZIP64_LIMIT:
- if not self._allowZip64:
- raise LargeZipFile("Filesize would require ZIP64 extensions")
- if zinfo.header_offset > ZIP64_LIMIT:
- if not self._allowZip64:
- raise LargeZipFile("Zipfile size would require ZIP64 extensions")
-
- def write(self, filename, arcname=None, compress_type=None):
- """Put the bytes from filename into the archive under the name
- arcname."""
- st = os.stat(filename)
- mtime = time.localtime(st.st_mtime)
- date_time = mtime[0:6]
- # Create ZipInfo instance to store file information
- if arcname is None:
- arcname = filename
- arcname = os.path.normpath(os.path.splitdrive(arcname)[1])
- while arcname[0] in (os.sep, os.altsep):
- arcname = arcname[1:]
- zinfo = ZipInfo(arcname, date_time)
- zinfo.external_attr = (st[0] & 0xFFFF) << 16L # Unix attributes
- if compress_type is None:
- zinfo.compress_type = self.compression
- else:
- zinfo.compress_type = compress_type
-
- zinfo.file_size = st.st_size
- zinfo.flag_bits = 0x00
- zinfo.header_offset = self.fp.tell() # Start of header bytes
-
- self._writecheck(zinfo)
- self._didModify = True
- fp = open(filename, "rb")
- # Must overwrite CRC and sizes with correct data later
- zinfo.CRC = CRC = 0
- zinfo.compress_size = compress_size = 0
- zinfo.file_size = file_size = 0
- self.fp.write(zinfo.FileHeader())
- if zinfo.compress_type == ZIP_DEFLATED:
- cmpr = zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION,
- zlib.DEFLATED, -15)
- else:
- cmpr = None
- while 1:
- buf = fp.read(1024 * 8)
- if not buf:
- break
- file_size = file_size + len(buf)
- CRC = binascii.crc32(buf, CRC)
- if cmpr:
- buf = cmpr.compress(buf)
- compress_size = compress_size + len(buf)
- self.fp.write(buf)
- fp.close()
- if cmpr:
- buf = cmpr.flush()
- compress_size = compress_size + len(buf)
- self.fp.write(buf)
- zinfo.compress_size = compress_size
- else:
- zinfo.compress_size = file_size
- zinfo.CRC = CRC
- zinfo.file_size = file_size
- # Seek backwards and write CRC and file sizes
- position = self.fp.tell() # Preserve current position in file
- self.fp.seek(zinfo.header_offset + 14, 0)
- self.fp.write(struct.pack("<lLL", zinfo.CRC, zinfo.compress_size,
- zinfo.file_size))
- self.fp.seek(position, 0)
- self.filelist.append(zinfo)
- self.NameToInfo[zinfo.filename] = zinfo
-
- def writestr(self, zinfo_or_arcname, bytes):
- """Write a file into the archive. The contents is the string
- 'bytes'. 'zinfo_or_arcname' is either a ZipInfo instance or
- the name of the file in the archive."""
- if not isinstance(zinfo_or_arcname, ZipInfo):
- zinfo = ZipInfo(filename=zinfo_or_arcname,
- date_time=time.localtime(time.time()))
- zinfo.compress_type = self.compression
- else:
- zinfo = zinfo_or_arcname
- zinfo.file_size = len(bytes) # Uncompressed size
- zinfo.header_offset = self.fp.tell() # Start of header bytes
- self._writecheck(zinfo)
- self._didModify = True
- zinfo.CRC = binascii.crc32(bytes) # CRC-32 checksum
- if zinfo.compress_type == ZIP_DEFLATED:
- co = zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION,
- zlib.DEFLATED, -15)
- bytes = co.compress(bytes) + co.flush()
- zinfo.compress_size = len(bytes) # Compressed size
- else:
- zinfo.compress_size = zinfo.file_size
- zinfo.header_offset = self.fp.tell() # Start of header bytes
- self.fp.write(zinfo.FileHeader())
- self.fp.write(bytes)
- self.fp.flush()
- if zinfo.flag_bits & 0x08:
- # Write CRC and file sizes after the file data
- self.fp.write(struct.pack("<lLL", zinfo.CRC, zinfo.compress_size,
- zinfo.file_size))
- self.filelist.append(zinfo)
- self.NameToInfo[zinfo.filename] = zinfo
-
- def __del__(self):
- """Call the "close()" method in case the user forgot."""
- self.close()
-
- def close(self):
- """Close the file, and for mode "w" and "a" write the ending
- records."""
- if self.fp is None:
- return
-
- if self.mode in ("w", "a") and self._didModify: # write ending records
- count = 0
- pos1 = self.fp.tell()
- for zinfo in self.filelist: # write central directory
- count = count + 1
- dt = zinfo.date_time
- dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2]
- dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2)
- extra = []
- if zinfo.file_size > ZIP64_LIMIT \
- or zinfo.compress_size > ZIP64_LIMIT:
- extra.append(zinfo.file_size)
- extra.append(zinfo.compress_size)
- file_size = 0xffffffff #-1
- compress_size = 0xffffffff #-1
- else:
- file_size = zinfo.file_size
- compress_size = zinfo.compress_size
-
- if zinfo.header_offset > ZIP64_LIMIT:
- extra.append(zinfo.header_offset)
- header_offset = -1 # struct "l" format: 32 one bits
- else:
- header_offset = zinfo.header_offset
-
- extra_data = zinfo.extra
- if extra:
- # Append a ZIP64 field to the extra's
- extra_data = struct.pack(
- '<hh' + 'q'*len(extra),
- 1, 8*len(extra), *extra) + extra_data
-
- extract_version = max(45, zinfo.extract_version)
- create_version = max(45, zinfo.create_version)
- else:
- extract_version = zinfo.extract_version
- create_version = zinfo.create_version
-
- centdir = struct.pack(structCentralDir,
- stringCentralDir, create_version,
- zinfo.create_system, extract_version, zinfo.reserved,
- zinfo.flag_bits, zinfo.compress_type, dostime, dosdate,
- zinfo.CRC, compress_size, file_size,
- len(zinfo.filename), len(extra_data), len(zinfo.comment),
- 0, zinfo.internal_attr, zinfo.external_attr,
- header_offset)
- self.fp.write(centdir)
- self.fp.write(zinfo.filename)
- self.fp.write(extra_data)
- self.fp.write(zinfo.comment)
-
- pos2 = self.fp.tell()
- # Write end-of-zip-archive record
- if pos1 > ZIP64_LIMIT:
- # Need to write the ZIP64 end-of-archive records
- zip64endrec = struct.pack(
- structEndArchive64, stringEndArchive64,
- 44, 45, 45, 0, 0, count, count, pos2 - pos1, pos1)
- self.fp.write(zip64endrec)
-
- zip64locrec = struct.pack(
- structEndArchive64Locator,
- stringEndArchive64Locator, 0, pos2, 1)
- self.fp.write(zip64locrec)
-
- # XXX Why is `pos3` computed next? It's never referenced.
- pos3 = self.fp.tell()
- endrec = struct.pack(structEndArchive, stringEndArchive,
- 0, 0, count, count, pos2 - pos1, -1, 0)
- self.fp.write(endrec)
-
- else:
- endrec = struct.pack(structEndArchive, stringEndArchive,
- 0, 0, count, count, pos2 - pos1, pos1, 0)
- self.fp.write(endrec)
- self.fp.flush()
- if not self._filePassed:
- self.fp.close()
- self.fp = None
-
-
-class PyZipFile(ZipFile):
- """Class to create ZIP archives with Python library files and packages."""
-
- def writepy(self, pathname, basename = ""):
- """Add all files from "pathname" to the ZIP archive.
-
- If pathname is a package directory, search the directory and
- all package subdirectories recursively for all *.py and enter
- the modules into the archive. If pathname is a plain
- directory, listdir *.py and enter all modules. Else, pathname
- must be a Python *.py file and the module will be put into the
- archive. Added modules are always module.pyo or module.pyc.
- This method will compile the module.py into module.pyc if
- necessary.
- """
- dir, name = os.path.split(pathname)
- if os.path.isdir(pathname):
- initname = os.path.join(pathname, "__init__.py")
- if os.path.isfile(initname):
- # This is a package directory, add it
- if basename:
- basename = "%s/%s" % (basename, name)
- else:
- basename = name
- if self.debug:
- print "Adding package in", pathname, "as", basename
- fname, arcname = self._get_codename(initname[0:-3], basename)
- if self.debug:
- print "Adding", arcname
- self.write(fname, arcname)
- dirlist = os.listdir(pathname)
- dirlist.remove("__init__.py")
- # Add all *.py files and package subdirectories
- for filename in dirlist:
- path = os.path.join(pathname, filename)
- root, ext = os.path.splitext(filename)
- if os.path.isdir(path):
- if os.path.isfile(os.path.join(path, "__init__.py")):
- # This is a package directory, add it
- self.writepy(path, basename) # Recursive call
- elif ext == ".py":
- fname, arcname = self._get_codename(path[0:-3],
- basename)
- if self.debug:
- print "Adding", arcname
- self.write(fname, arcname)
- else:
- # This is NOT a package directory, add its files at top level
- if self.debug:
- print "Adding files from directory", pathname
- for filename in os.listdir(pathname):
- path = os.path.join(pathname, filename)
- root, ext = os.path.splitext(filename)
- if ext == ".py":
- fname, arcname = self._get_codename(path[0:-3],
- basename)
- if self.debug:
- print "Adding", arcname
- self.write(fname, arcname)
- else:
- if pathname[-3:] != ".py":
- raise RuntimeError, \
- 'Files added with writepy() must end with ".py"'
- fname, arcname = self._get_codename(pathname[0:-3], basename)
- if self.debug:
- print "Adding file", arcname
- self.write(fname, arcname)
-
- def _get_codename(self, pathname, basename):
- """Return (filename, archivename) for the path.
-
- Given a module name path, return the correct file path and
- archive name, compiling if necessary. For example, given
- /python/lib/string, return (/python/lib/string.pyc, string).
- """
- file_py = pathname + ".py"
- file_pyc = pathname + ".pyc"
- file_pyo = pathname + ".pyo"
- if os.path.isfile(file_pyo) and \
- os.stat(file_pyo).st_mtime >= os.stat(file_py).st_mtime:
- fname = file_pyo # Use .pyo file
- elif not os.path.isfile(file_pyc) or \
- os.stat(file_pyc).st_mtime < os.stat(file_py).st_mtime:
- import py_compile
- if self.debug:
- print "Compiling", file_py
- try:
- py_compile.compile(file_py, file_pyc, None, True)
- except py_compile.PyCompileError,err:
- print err.msg
- fname = file_pyc
- else:
- fname = file_pyc
- archivename = os.path.split(fname)[1]
- if basename:
- archivename = "%s/%s" % (basename, archivename)
- return (fname, archivename)
-
-
-def main(args = None):
- import textwrap
- USAGE=textwrap.dedent("""\
- Usage:
- zipfile.py -l zipfile.zip # Show listing of a zipfile
- zipfile.py -t zipfile.zip # Test if a zipfile is valid
- zipfile.py -e zipfile.zip target # Extract zipfile into target dir
- zipfile.py -c zipfile.zip src ... # Create zipfile from sources
- """)
- if args is None:
- args = sys.argv[1:]
-
- if not args or args[0] not in ('-l', '-c', '-e', '-t'):
- print USAGE
- sys.exit(1)
-
- if args[0] == '-l':
- if len(args) != 2:
- print USAGE
- sys.exit(1)
- zf = ZipFile(args[1], 'r')
- zf.printdir()
- zf.close()
-
- elif args[0] == '-t':
- if len(args) != 2:
- print USAGE
- sys.exit(1)
- zf = ZipFile(args[1], 'r')
- zf.testzip()
- print "Done testing"
-
- elif args[0] == '-e':
- if len(args) != 3:
- print USAGE
- sys.exit(1)
-
- zf = ZipFile(args[1], 'r')
- out = args[2]
- for path in zf.namelist():
- if path.startswith('./'):
- tgt = os.path.join(out, path[2:])
- else:
- tgt = os.path.join(out, path)
-
- tgtdir = os.path.dirname(tgt)
- if not os.path.exists(tgtdir):
- os.makedirs(tgtdir)
- fp = open(tgt, 'wb')
- fp.write(zf.read(path))
- fp.close()
- zf.close()
-
- elif args[0] == '-c':
- if len(args) < 3:
- print USAGE
- sys.exit(1)
-
- def addToZip(zf, path, zippath):
- if os.path.isfile(path):
- zf.write(path, zippath, ZIP_DEFLATED)
- elif os.path.isdir(path):
- for nm in os.listdir(path):
- addToZip(zf,
- os.path.join(path, nm), os.path.join(zippath, nm))
- # else: ignore
-
- zf = ZipFile(args[1], 'w', allowZip64=True)
- for src in args[2:]:
- addToZip(zf, src, os.path.basename(src))
-
- zf.close()
-
-if __name__ == "__main__":
- main()